id
int64 0
755k
| file_name
stringlengths 3
109
| file_path
stringlengths 13
185
| content
stringlengths 31
9.38M
| size
int64 31
9.38M
| language
stringclasses 1
value | extension
stringclasses 11
values | total_lines
int64 1
340k
| avg_line_length
float64 2.18
149k
| max_line_length
int64 7
2.22M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 6
65
| repo_stars
int64 100
47.3k
| repo_forks
int64 0
12k
| repo_open_issues
int64 0
3.4k
| repo_license
stringclasses 9
values | repo_extraction_date
stringclasses 92
values | exact_duplicates_redpajama
bool 2
classes | near_duplicates_redpajama
bool 2
classes | exact_duplicates_githubcode
bool 2
classes | exact_duplicates_stackv2
bool 1
class | exact_duplicates_stackv1
bool 2
classes | near_duplicates_githubcode
bool 2
classes | near_duplicates_stackv1
bool 2
classes | near_duplicates_stackv2
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5,306
|
cellVdec.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellVdec.cpp
|
#include "stdafx.h"
#include "Emu/IdManager.h"
#include "Emu/perf_meter.hpp"
#include "Emu/Cell/PPUModule.h"
#include "Emu/Cell/lv2/sys_sync.h"
#include "Emu/Cell/lv2/sys_ppu_thread.h"
#include "Emu/Cell/lv2/sys_process.h"
#include "Emu/savestate_utils.hpp"
#include "sysPrxForUser.h"
#include "util/media_utils.h"
#ifdef _MSC_VER
#pragma warning(push, 0)
#else
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wall"
#pragma GCC diagnostic ignored "-Wextra"
#pragma GCC diagnostic ignored "-Wold-style-cast"
#endif
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavutil/imgutils.h"
#include "libswscale/swscale.h"
}
#ifdef _MSC_VER
#pragma warning(pop)
#else
#pragma GCC diagnostic pop
#endif
#include "cellPamf.h"
#include "cellVdec.h"
#include <mutex>
#include <queue>
#include <cmath>
#include "Utilities/lockless.h"
#include <variant>
#include "util/asm.hpp"
std::mutex g_mutex_avcodec_open2;
LOG_CHANNEL(cellVdec);
template<>
void fmt_class_string<CellVdecError>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto error)
{
switch (error)
{
STR_CASE(CELL_VDEC_ERROR_ARG);
STR_CASE(CELL_VDEC_ERROR_SEQ);
STR_CASE(CELL_VDEC_ERROR_BUSY);
STR_CASE(CELL_VDEC_ERROR_EMPTY);
STR_CASE(CELL_VDEC_ERROR_AU);
STR_CASE(CELL_VDEC_ERROR_PIC);
STR_CASE(CELL_VDEC_ERROR_FATAL);
}
return unknown;
});
}
// The general sequence control flow has these possible transitions:
// closed -> dormant
// dormant -> ready
// dormant -> closed
// ready -> ending
// ready -> resetting
// ready -> closed
// ending -> dormant
// resetting -> ready
enum class sequence_state : u32
{
closed = 0, // Also called non-existent. Needs to be opened before anything can be done with it.
dormant = 1, // Waiting for the next sequence. The last picture and pic-item can be aqcuired in this state.
ready = 2, // Ready for decoding. Can also restart sequences in this state.
ending = 3, // Ending a sequence. Goes to dormant afterwards.
resetting = 4, // Stops the current sequence and starts a new one. The pictures of the old sequence are flushed
invalid = 5, // Any other value is invalid
};
template<>
void fmt_class_string<sequence_state>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto error)
{
switch (error)
{
STR_CASE(sequence_state::closed);
STR_CASE(sequence_state::dormant);
STR_CASE(sequence_state::ready);
STR_CASE(sequence_state::ending);
STR_CASE(sequence_state::resetting);
STR_CASE(sequence_state::invalid);
}
return unknown;
});
}
vm::gvar<s32> _cell_vdec_prx_ver; // TODO: this should probably specify the VDEC module that was loaded. E.g. CELL_SYSMODULE_VDEC_MPEG2
enum class vdec_cmd_type : u32
{
start_sequence,
end_sequence,
close,
au_decode,
framerate,
};
struct vdec_cmd
{
explicit vdec_cmd(vdec_cmd_type _type, u64 _seq_id, u64 _id)
: type(_type), seq_id(_seq_id), id(_id)
{
ensure(_type != vdec_cmd_type::au_decode);
ensure(_type != vdec_cmd_type::framerate);
}
explicit vdec_cmd(vdec_cmd_type _type, u64 _seq_id, u64 _id, s32 _mode, CellVdecAuInfo _au)
: type(_type), seq_id(_seq_id), id(_id), mode(_mode), au(std::move(_au))
{
ensure(_type == vdec_cmd_type::au_decode);
}
explicit vdec_cmd(vdec_cmd_type _type, u64 _seq_id, u64 _id, s32 _framerate)
: type(_type), seq_id(_seq_id), id(_id), framerate(_framerate)
{
ensure(_type == vdec_cmd_type::framerate);
}
vdec_cmd_type type{};
u64 seq_id{};
u64 id{};
s32 mode{};
s32 framerate{};
CellVdecAuInfo au{};
};
struct vdec_frame
{
struct frame_dtor
{
void operator()(AVFrame* data) const
{
av_frame_unref(data);
av_frame_free(&data);
}
};
u64 seq_id{};
u64 cmd_id{};
std::unique_ptr<AVFrame, frame_dtor> avf;
u64 dts{};
u64 pts{};
u64 userdata{};
u32 frc{};
bool pic_item_received = false;
CellVdecPicAttr attr = CELL_VDEC_PICITEM_ATTR_NORMAL;
AVFrame* operator ->() const
{
return avf.get();
}
};
struct vdec_context final
{
static const u32 id_base = 0xf0000000;
static const u32 id_step = 0x00000100;
static const u32 id_count = 1024;
SAVESTATE_INIT_POS(24);
u32 handle = 0;
atomic_t<u64> seq_id = 0; // The first sequence will have the ID 1
atomic_t<u64> next_cmd_id = 0;
atomic_t<bool> abort_decode = false; // Used for thread interaction
atomic_t<bool> is_running = false; // Used for thread interaction
atomic_t<sequence_state> seq_state = sequence_state::closed;
const AVCodec* codec{};
const AVCodecDescriptor* codec_desc{};
AVCodecContext* ctx{};
SwsContext* sws{};
shared_mutex mutex; // Used for 'out' queue (TODO)
const u32 type;
const u32 mem_addr;
const u32 mem_size;
const vm::ptr<CellVdecCbMsg> cb_func;
const u32 cb_arg;
u32 mem_bias{};
u32 frc_set{}; // Frame Rate Override
u64 next_pts{};
u64 next_dts{};
atomic_t<u32> ppu_tid{};
std::deque<vdec_frame> out_queue;
const u32 out_max = 60;
atomic_t<s32> au_count{0};
lf_queue<vdec_cmd> in_cmd;
AVRational log_time_base{}; // Used to reduce log spam
vdec_context(s32 type, u32 /*profile*/, u32 addr, u32 size, vm::ptr<CellVdecCbMsg> func, u32 arg)
: type(type)
, mem_addr(addr)
, mem_size(size)
, cb_func(func)
, cb_arg(arg)
{
switch (type)
{
case CELL_VDEC_CODEC_TYPE_MPEG2:
{
codec = avcodec_find_decoder(AV_CODEC_ID_MPEG2VIDEO);
break;
}
case CELL_VDEC_CODEC_TYPE_AVC:
{
codec = avcodec_find_decoder(AV_CODEC_ID_H264);
break;
}
case CELL_VDEC_CODEC_TYPE_DIVX:
{
codec = avcodec_find_decoder(AV_CODEC_ID_MPEG4);
break;
}
default:
{
fmt::throw_exception("Unknown video decoder type (0x%x)", type);
}
}
if (!codec)
{
fmt::throw_exception("avcodec_find_decoder() failed (type=0x%x)", type);
}
codec_desc = avcodec_descriptor_get(codec->id);
if (!codec_desc)
{
fmt::throw_exception("avcodec_descriptor_get() failed (type=0x%x)", type);
}
ctx = avcodec_alloc_context3(codec);
if (!ctx)
{
fmt::throw_exception("avcodec_alloc_context3() failed (type=0x%x)", type);
}
AVDictionary* opts = nullptr;
std::lock_guard lock(g_mutex_avcodec_open2);
int err = avcodec_open2(ctx, codec, &opts);
if (err || opts)
{
avcodec_free_context(&ctx);
std::string dict_content;
if (opts)
{
AVDictionaryEntry* tag = nullptr;
while ((tag = av_dict_get(opts, "", tag, AV_DICT_IGNORE_SUFFIX)))
{
fmt::append(dict_content, "['%s': '%s']", tag->key, tag->value);
}
}
fmt::throw_exception("avcodec_open2() failed (err=0x%x='%s', opts=%s)", err, utils::av_error_to_string(err), dict_content);
}
av_dict_free(&opts);
seq_state = sequence_state::dormant;
}
~vdec_context()
{
avcodec_free_context(&ctx);
sws_freeContext(sws);
}
void exec(ppu_thread& ppu, u32 vid)
{
perf_meter<"VDEC"_u32> perf0;
ppu_tid.release(ppu.id);
for (auto slice = in_cmd.pop_all(); thread_ctrl::state() != thread_state::aborting; [&]
{
if (slice)
{
slice.pop_front();
}
if (slice || thread_ctrl::state() == thread_state::aborting)
{
return;
}
thread_ctrl::wait_on(in_cmd);
slice = in_cmd.pop_all(); // Pop new command list
}())
{
// pcmd can be nullptr
auto* cmd = slice.get();
if (!cmd)
{
continue;
}
switch (cmd->type)
{
case vdec_cmd_type::start_sequence:
{
std::lock_guard lock{mutex};
if (seq_state == sequence_state::resetting)
{
cellVdec.trace("Reset sequence... (handle=0x%x, seq_id=%d, cmd_id=%d)", handle, cmd->seq_id, cmd->id);
}
else
{
cellVdec.trace("Start sequence... (handle=0x%x, seq_id=%d, cmd_id=%d)", handle, cmd->seq_id, cmd->id);
}
avcodec_flush_buffers(ctx);
out_queue.clear(); // Flush image queue
log_time_base = {};
frc_set = 0; // TODO: ???
next_pts = 0;
next_dts = 0;
abort_decode = false;
is_running = true;
break;
}
case vdec_cmd_type::end_sequence:
{
cellVdec.trace("End sequence... (handle=0x%x, seq_id=%d, cmd_id=%d)", handle, cmd->seq_id, cmd->id);
{
std::lock_guard lock{mutex};
seq_state = sequence_state::dormant;
}
cellVdec.trace("Sending CELL_VDEC_MSG_TYPE_SEQDONE (handle=0x%x, seq_id=%d, cmd_id=%d)", handle, cmd->seq_id, cmd->id);
cb_func(ppu, vid, CELL_VDEC_MSG_TYPE_SEQDONE, CELL_OK, cb_arg);
lv2_obj::sleep(ppu);
break;
}
case vdec_cmd_type::au_decode:
{
AVPacket packet{};
packet.pos = -1;
u64 au_usrd{};
const u32 au_mode = cmd->mode;
const u32 au_addr = cmd->au.startAddr;
const u32 au_size = cmd->au.size;
const u64 au_pts = u64{cmd->au.pts.upper} << 32 | cmd->au.pts.lower;
const u64 au_dts = u64{cmd->au.dts.upper} << 32 | cmd->au.dts.lower;
au_usrd = cmd->au.userData;
packet.data = vm::_ptr<u8>(au_addr);
packet.size = au_size;
packet.pts = au_pts != umax ? au_pts : s64{smin};
packet.dts = au_dts != umax ? au_dts : s64{smin};
if (next_pts == 0 && au_pts != umax)
{
next_pts = au_pts;
}
if (next_dts == 0 && au_dts != umax)
{
next_dts = au_dts;
}
const CellVdecPicAttr attr = au_mode == CELL_VDEC_DEC_MODE_NORMAL ? CELL_VDEC_PICITEM_ATTR_NORMAL : CELL_VDEC_PICITEM_ATTR_SKIPPED;
ctx->skip_frame =
au_mode == CELL_VDEC_DEC_MODE_NORMAL ? AVDISCARD_DEFAULT :
au_mode == CELL_VDEC_DEC_MODE_B_SKIP ? AVDISCARD_NONREF : AVDISCARD_NONINTRA;
std::deque<vdec_frame> decoded_frames;
if (!abort_decode && seq_id == cmd->seq_id)
{
cellVdec.trace("AU decoding: handle=0x%x, seq_id=%d, cmd_id=%d, size=0x%x, pts=0x%llx, dts=0x%llx, userdata=0x%llx", handle, cmd->seq_id, cmd->id, au_size, au_pts, au_dts, au_usrd);
if (int ret = avcodec_send_packet(ctx, &packet); ret < 0)
{
fmt::throw_exception("AU queuing error (handle=0x%x, seq_id=%d, cmd_id=%d, error=0x%x): %s", handle, cmd->seq_id, cmd->id, ret, utils::av_error_to_string(ret));
}
while (!abort_decode && seq_id == cmd->seq_id)
{
// Keep receiving frames
vdec_frame frame;
frame.seq_id = cmd->seq_id;
frame.cmd_id = cmd->id;
frame.avf.reset(av_frame_alloc());
if (!frame.avf)
{
fmt::throw_exception("av_frame_alloc() failed (handle=0x%x, seq_id=%d, cmd_id=%d)", handle, cmd->seq_id, cmd->id);
}
if (int ret = avcodec_receive_frame(ctx, frame.avf.get()); ret < 0)
{
if (ret == AVERROR(EAGAIN) || ret == AVERROR(EOF))
{
break;
}
fmt::throw_exception("AU decoding error (handle=0x%x, seq_id=%d, cmd_id=%d, error=0x%x): %s", handle, cmd->seq_id, cmd->id, ret, utils::av_error_to_string(ret));
}
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(60, 31, 102)
const int ticks_per_frame = ctx->ticks_per_frame;
#else
const int ticks_per_frame = (codec_desc->props & AV_CODEC_PROP_FIELDS) ? 2 : 1;
#endif
#if LIBAVUTIL_VERSION_INT < AV_VERSION_INT(58, 29, 100)
const bool is_interlaced = frame->interlaced_frame != 0;
#else
const bool is_interlaced = !!(frame->flags & AV_FRAME_FLAG_INTERLACED);
#endif
if (is_interlaced)
{
// NPEB01838, NPUB31260
cellVdec.todo("Interlaced frames not supported (handle=0x%x, seq_id=%d, cmd_id=%d)", handle, cmd->seq_id, cmd->id);
}
if (frame->repeat_pict)
{
fmt::throw_exception("Repeated frames not supported (handle=0x%x, seq_id=%d, cmd_id=%d, repear_pict=0x%x)", handle, cmd->seq_id, cmd->id, frame->repeat_pict);
}
if (frame->pts != smin)
{
next_pts = frame->pts;
}
if (frame->pkt_dts != smin)
{
next_dts = frame->pkt_dts;
}
frame.pts = next_pts;
frame.dts = next_dts;
frame.userdata = au_usrd;
frame.attr = attr;
if (frc_set)
{
u64 amend = 0;
switch (frc_set)
{
case CELL_VDEC_FRC_24000DIV1001: amend = 1001 * 90000 / 24000; break;
case CELL_VDEC_FRC_24: amend = 90000 / 24; break;
case CELL_VDEC_FRC_25: amend = 90000 / 25; break;
case CELL_VDEC_FRC_30000DIV1001: amend = 1001 * 90000 / 30000; break;
case CELL_VDEC_FRC_30: amend = 90000 / 30; break;
case CELL_VDEC_FRC_50: amend = 90000 / 50; break;
case CELL_VDEC_FRC_60000DIV1001: amend = 1001 * 90000 / 60000; break;
case CELL_VDEC_FRC_60: amend = 90000 / 60; break;
default:
{
fmt::throw_exception("Invalid frame rate code set (handle=0x%x, seq_id=%d, cmd_id=%d, frc=0x%x)", handle, cmd->seq_id, cmd->id, frc_set);
}
}
next_pts += amend;
next_dts += amend;
frame.frc = frc_set;
}
else if (ctx->time_base.num == 0)
{
if (log_time_base.den != ctx->time_base.den || log_time_base.num != ctx->time_base.num)
{
cellVdec.error("time_base.num is 0 (handle=0x%x, seq_id=%d, cmd_id=%d, %d/%d, tpf=%d framerate=%d/%d)", handle, cmd->seq_id, cmd->id, ctx->time_base.num, ctx->time_base.den, ticks_per_frame, ctx->framerate.num, ctx->framerate.den);
log_time_base = ctx->time_base;
}
// Hack
const u64 amend = u64{90000} / 30;
frame.frc = CELL_VDEC_FRC_30;
next_pts += amend;
next_dts += amend;
}
else
{
u64 amend = u64{90000} * ctx->time_base.num * ticks_per_frame / ctx->time_base.den;
const auto freq = 1. * ctx->time_base.den / ctx->time_base.num / ticks_per_frame;
if (std::abs(freq - 23.976) < 0.002)
frame.frc = CELL_VDEC_FRC_24000DIV1001;
else if (std::abs(freq - 24.000) < 0.001)
frame.frc = CELL_VDEC_FRC_24;
else if (std::abs(freq - 25.000) < 0.001)
frame.frc = CELL_VDEC_FRC_25;
else if (std::abs(freq - 29.970) < 0.002)
frame.frc = CELL_VDEC_FRC_30000DIV1001;
else if (std::abs(freq - 30.000) < 0.001)
frame.frc = CELL_VDEC_FRC_30;
else if (std::abs(freq - 50.000) < 0.001)
frame.frc = CELL_VDEC_FRC_50;
else if (std::abs(freq - 59.940) < 0.002)
frame.frc = CELL_VDEC_FRC_60000DIV1001;
else if (std::abs(freq - 60.000) < 0.001)
frame.frc = CELL_VDEC_FRC_60;
else
{
if (log_time_base.den != ctx->time_base.den || log_time_base.num != ctx->time_base.num)
{
// 1/1000 usually means that the time stamps are written in 1ms units and that the frame rate may vary.
cellVdec.error("Unsupported time_base (handle=0x%x, seq_id=%d, cmd_id=%d, %d/%d, tpf=%d framerate=%d/%d)", handle, cmd->seq_id, cmd->id, ctx->time_base.num, ctx->time_base.den, ticks_per_frame, ctx->framerate.num, ctx->framerate.den);
log_time_base = ctx->time_base;
}
// Hack
amend = u64{90000} / 30;
frame.frc = CELL_VDEC_FRC_30;
}
next_pts += amend;
next_dts += amend;
}
cellVdec.trace("Got picture (handle=0x%x, seq_id=%d, cmd_id=%d, pts=0x%llx[0x%llx], dts=0x%llx[0x%llx])", handle, cmd->seq_id, cmd->id, frame.pts, frame->pts, frame.dts, frame->pkt_dts);
decoded_frames.push_back(std::move(frame));
}
}
if (thread_ctrl::state() != thread_state::aborting)
{
// Send AUDONE even if the current sequence was reset and a new sequence was started.
cellVdec.trace("Sending CELL_VDEC_MSG_TYPE_AUDONE (handle=0x%x, seq_id=%d, cmd_id=%d)", handle, cmd->seq_id, cmd->id);
ensure(au_count.try_dec(0));
cb_func(ppu, vid, CELL_VDEC_MSG_TYPE_AUDONE, CELL_OK, cb_arg);
lv2_obj::sleep(ppu);
while (!decoded_frames.empty() && seq_id == cmd->seq_id)
{
// Wait until there is free space in the image queue.
// Do this after pushing the frame to the queue. That way the game can consume the frame and we can move on.
u32 elapsed = 0;
while (thread_ctrl::state() != thread_state::aborting && !abort_decode && seq_id == cmd->seq_id)
{
{
std::lock_guard lock{mutex};
if (out_queue.size() <= out_max)
{
break;
}
}
thread_ctrl::wait_for(10000);
if (elapsed++ >= 500) // 5 seconds
{
cellVdec.error("Video au decode has been waiting for a consumer for 5 seconds. (handle=0x%x, seq_id=%d, cmd_id=%d, queue_size=%d)", handle, cmd->seq_id, cmd->id, out_queue.size());
elapsed = 0;
}
}
if (thread_ctrl::state() == thread_state::aborting || abort_decode || seq_id != cmd->seq_id)
{
break;
}
{
std::lock_guard lock{mutex};
out_queue.push_back(std::move(decoded_frames.front()));
decoded_frames.pop_front();
}
cellVdec.trace("Sending CELL_VDEC_MSG_TYPE_PICOUT (handle=0x%x, seq_id=%d, cmd_id=%d)", handle, cmd->seq_id, cmd->id);
cb_func(ppu, vid, CELL_VDEC_MSG_TYPE_PICOUT, CELL_OK, cb_arg);
lv2_obj::sleep(ppu);
}
}
if (abort_decode || seq_id != cmd->seq_id)
{
cellVdec.warning("AU decoding: aborted (handle=0x%x, seq_id=%d, cmd_id=%d, abort_decode=%d)", handle, cmd->seq_id, cmd->id, abort_decode.load());
}
else
{
cellVdec.trace("AU decoding: done (handle=0x%x, seq_id=%d, cmd_id=%d)", handle, cmd->seq_id, cmd->id);
}
break;
}
case vdec_cmd_type::framerate:
{
frc_set = cmd->framerate;
break;
}
case vdec_cmd_type::close:
{
std::lock_guard lock{mutex};
out_queue.clear();
break;
}
default:
fmt::throw_exception("Unknown vdec_cmd_type (handle=0x%x, seq_id=%d, cmd_id=%d, type=%d)", handle, cmd->seq_id, cmd->id, static_cast<u32>(cmd->type));
break;
}
std::lock_guard lock{mutex};
if (seq_state == sequence_state::closed)
{
break;
}
}
// Make sure the state is closed at the end
std::lock_guard lock{mutex};
seq_state = sequence_state::closed;
}
};
extern bool check_if_vdec_contexts_exist()
{
bool context_exists = false;
idm::select<vdec_context>([&](u32, vdec_context&)
{
context_exists = true;
});
return context_exists;
}
extern void vdecEntry(ppu_thread& ppu, u32 vid)
{
idm::get<vdec_context>(vid)->exec(ppu, vid);
ppu.state += cpu_flag::exit;
}
static error_code vdecQueryAttr(s32 type, u32 profile, u32 spec_addr /* may be 0 */, CellVdecAttr* attr)
{
// Write 0 at start
attr->memSize = 0;
u32 decoderVerLower;
u32 memSize = 0;
const bool new_sdk = g_ps3_process_info.sdk_ver > 0x20FFFF;
switch (type)
{
case CELL_VDEC_CODEC_TYPE_AVC:
{
cellVdec.warning("cellVdecQueryAttr: AVC (profile=%d)", profile);
//const vm::ptr<CellVdecAvcSpecificInfo> sinfo = vm::cast(spec_addr);
// TODO: sinfo
switch (profile)
{
case CELL_VDEC_AVC_LEVEL_1P0: memSize = new_sdk ? 0x70167D : 0xA014FD ; break;
case CELL_VDEC_AVC_LEVEL_1P1: memSize = new_sdk ? 0x86CB7D : 0xB6C9FD ; break;
case CELL_VDEC_AVC_LEVEL_1P2: memSize = new_sdk ? 0x9E307D : 0xCE2D7D ; break;
case CELL_VDEC_AVC_LEVEL_1P3: memSize = new_sdk ? 0xA057FD : 0xD054FD ; break;
case CELL_VDEC_AVC_LEVEL_2P0: memSize = new_sdk ? 0xA057FD : 0xD054FD ; break;
case CELL_VDEC_AVC_LEVEL_2P1: memSize = new_sdk ? 0xE90DFD : 0x1190AFD; break;
case CELL_VDEC_AVC_LEVEL_2P2: memSize = new_sdk ? 0x14E49FD : 0x17E46FD; break;
case CELL_VDEC_AVC_LEVEL_3P0: memSize = new_sdk ? 0x155B5FD : 0x185B17D; break;
case CELL_VDEC_AVC_LEVEL_3P1: memSize = new_sdk ? 0x1CD327D : 0x1FD2AFD; break;
case CELL_VDEC_AVC_LEVEL_3P2: memSize = new_sdk ? 0x2397B7D : 0x2696F7D; break;
case CELL_VDEC_AVC_LEVEL_4P0: memSize = new_sdk ? 0x33A5FFD : 0x36A527D; break;
case CELL_VDEC_AVC_LEVEL_4P1: memSize = new_sdk ? 0x33A5FFD : 0x36A527D; break;
case CELL_VDEC_AVC_LEVEL_4P2: memSize = new_sdk ? 0x33A5FFD : 0x36A527D; break;
default: return CELL_VDEC_ERROR_ARG;
}
decoderVerLower = 0x11300;
break;
}
case CELL_VDEC_CODEC_TYPE_MPEG2:
{
cellVdec.warning("cellVdecQueryAttr: MPEG2 (profile=%d)", profile);
const vm::ptr<CellVdecMpeg2SpecificInfo> sinfo = vm::cast(spec_addr);
if (sinfo)
{
if (sinfo->thisSize != sizeof(CellVdecMpeg2SpecificInfo))
{
return CELL_VDEC_ERROR_ARG;
}
}
// TODO: sinfo
const u32 maxDecH = sinfo ? +sinfo->maxDecodedFrameHeight : 0;
const u32 maxDecW = sinfo ? +sinfo->maxDecodedFrameWidth : 0;
switch (profile)
{
case CELL_VDEC_MPEG2_MP_LL:
{
if (maxDecW > 352 || maxDecH > 288)
{
return CELL_VDEC_ERROR_ARG;
}
memSize = new_sdk ? 0x11290B : 0x2A610B;
break;
}
case CELL_VDEC_MPEG2_MP_ML:
{
if (maxDecW > 720 || maxDecH > 576)
{
return CELL_VDEC_ERROR_ARG;
}
memSize = new_sdk ? 0x2DFB8B : 0x47110B;
break;
}
case CELL_VDEC_MPEG2_MP_H14:
{
if (maxDecW > 1440 || maxDecH > 1152)
{
return CELL_VDEC_ERROR_ARG;
}
memSize = new_sdk ? 0xA0270B : 0xB8F90B;
break;
}
case CELL_VDEC_MPEG2_MP_HL:
{
if (maxDecW > 1920 || maxDecH > 1152)
{
return CELL_VDEC_ERROR_ARG;
}
memSize = new_sdk ? 0xD2F40B : 0xEB990B;
break;
}
default: return CELL_VDEC_ERROR_ARG;
}
decoderVerLower = 0x1030000;
break;
}
case CELL_VDEC_CODEC_TYPE_DIVX:
{
cellVdec.warning("cellVdecQueryAttr: DivX (profile=%d)", profile);
const vm::ptr<CellVdecDivxSpecificInfo2> sinfo = vm::cast(spec_addr);
if (sinfo)
{
if (sinfo->thisSize != sizeof(CellVdecDivxSpecificInfo2))
{
return CELL_VDEC_ERROR_ARG;
}
}
// TODO: sinfo
//const u32 maxDecH = sinfo ? +sinfo->maxDecodedFrameHeight : 0;
//const u32 maxDecW = sinfo ? +sinfo->maxDecodedFrameWidth : 0;
u32 nrOfBuf = sinfo ? +sinfo->numberOfDecodedFrameBuffer : 0;
if (nrOfBuf == 0)
{
nrOfBuf = 4;
}
else if (nrOfBuf == 2)
{
if (profile != CELL_VDEC_DIVX_QMOBILE && profile != CELL_VDEC_DIVX_MOBILE)
{
return CELL_VDEC_ERROR_ARG;
}
}
else if (nrOfBuf != 4 && nrOfBuf != 3)
{
return CELL_VDEC_ERROR_ARG;
}
// TODO: change memSize based on buffercount.
switch (profile)
{
case CELL_VDEC_DIVX_QMOBILE : memSize = new_sdk ? 0x11B720 : 0x1DEF30; break;
case CELL_VDEC_DIVX_MOBILE : memSize = new_sdk ? 0x19A740 : 0x26DED0; break;
case CELL_VDEC_DIVX_HOME_THEATER: memSize = new_sdk ? 0x386A60 : 0x498060; break;
case CELL_VDEC_DIVX_HD_720 : memSize = new_sdk ? 0x692070 : 0x805690; break;
case CELL_VDEC_DIVX_HD_1080 : memSize = new_sdk ? 0xD78100 : 0xFC9870; break;
default: return CELL_VDEC_ERROR_ARG;
}
decoderVerLower = 0x30806;
break;
}
default: return CELL_VDEC_ERROR_ARG;
}
attr->decoderVerLower = decoderVerLower;
attr->decoderVerUpper = 0x4840010;
attr->memSize = !spec_addr ? ensure(memSize) : 4 * 1024 * 1024;
attr->cmdDepth = 4;
return CELL_OK;
}
error_code cellVdecQueryAttr(vm::cptr<CellVdecType> type, vm::ptr<CellVdecAttr> attr)
{
cellVdec.warning("cellVdecQueryAttr(type=*0x%x, attr=*0x%x)", type, attr);
if (!type || !attr)
{
return CELL_VDEC_ERROR_ARG;
}
return vdecQueryAttr(type->codecType, type->profileLevel, 0, attr.get_ptr());
}
error_code cellVdecQueryAttrEx(vm::cptr<CellVdecTypeEx> type, vm::ptr<CellVdecAttr> attr)
{
cellVdec.warning("cellVdecQueryAttrEx(type=*0x%x, attr=*0x%x)", type, attr);
if (!type || !attr)
{
return CELL_VDEC_ERROR_ARG;
}
return vdecQueryAttr(type->codecType, type->profileLevel, type->codecSpecificInfo_addr, attr.get_ptr());
}
template <typename T, typename U>
static error_code vdecOpen(ppu_thread& ppu, T type, U res, vm::cptr<CellVdecCb> cb, vm::ptr<u32> handle)
{
if (!type || !res || !cb || !handle || !cb->cbFunc)
{
return CELL_VDEC_ERROR_ARG;
}
if (!res->memAddr || res->ppuThreadPriority + 0u >= 3072 || res->spuThreadPriority + 0u >= 256 || res->ppuThreadStackSize < 4096
|| type->codecType + 0u >= 0xe)
{
return CELL_VDEC_ERROR_ARG;
}
u32 spec_addr = 0;
if constexpr (std::is_same_v<std::decay_t<typename T::type>, CellVdecTypeEx>)
{
spec_addr = type->codecSpecificInfo_addr;
}
if (CellVdecAttr attr{};
vdecQueryAttr(type->codecType, type->profileLevel, spec_addr, &attr) != CELL_OK ||
attr.memSize > res->memSize)
{
return CELL_VDEC_ERROR_ARG;
}
// Create decoder context
std::shared_ptr<vdec_context> vdec;
if (std::unique_lock lock{g_fxo->get<hle_locks_t>(), std::try_to_lock})
{
vdec = idm::make_ptr<vdec_context>(type->codecType, type->profileLevel, res->memAddr, res->memSize, cb->cbFunc, cb->cbArg);
}
else
{
ppu.state += cpu_flag::again;
return {};
}
const u32 vid = idm::last_id();
ensure(vdec);
vdec->handle = vid;
// Run thread
vm::var<u64> _tid;
vm::var<char[]> _name = vm::make_str("HLE Video Decoder");
ppu_execute<&sys_ppu_thread_create>(ppu, +_tid, 0x10000, vid, +res->ppuThreadPriority, +res->ppuThreadStackSize, SYS_PPU_THREAD_CREATE_INTERRUPT, +_name);
*handle = vid;
const auto thrd = idm::get<named_thread<ppu_thread>>(static_cast<u32>(*_tid));
thrd->cmd_list
({
{ ppu_cmd::set_args, 1 }, u64{vid},
{ ppu_cmd::hle_call, FIND_FUNC(vdecEntry) },
});
thrd->state -= cpu_flag::stop;
thrd->state.notify_one();
return CELL_OK;
}
error_code cellVdecOpen(ppu_thread& ppu, vm::cptr<CellVdecType> type, vm::cptr<CellVdecResource> res, vm::cptr<CellVdecCb> cb, vm::ptr<u32> handle)
{
cellVdec.warning("cellVdecOpen(type=*0x%x, res=*0x%x, cb=*0x%x, handle=*0x%x)", type, res, cb, handle);
return vdecOpen(ppu, type, res, cb, handle);
}
error_code cellVdecOpenEx(ppu_thread& ppu, vm::cptr<CellVdecTypeEx> type, vm::cptr<CellVdecResourceEx> res, vm::cptr<CellVdecCb> cb, vm::ptr<u32> handle)
{
cellVdec.warning("cellVdecOpenEx(type=*0x%x, res=*0x%x, cb=*0x%x, handle=*0x%x)", type, res, cb, handle);
return vdecOpen(ppu, type, res, cb, handle);
}
error_code cellVdecClose(ppu_thread& ppu, u32 handle)
{
cellVdec.warning("cellVdecClose(handle=0x%x)", handle);
std::unique_lock lock_hle{g_fxo->get<hle_locks_t>(), std::try_to_lock};
if (!lock_hle)
{
ppu.state += cpu_flag::again;
return {};
}
auto vdec = idm::get<vdec_context>(handle);
if (!vdec)
{
return CELL_VDEC_ERROR_ARG;
}
{
std::lock_guard lock{vdec->mutex};
if (vdec->seq_state == sequence_state::closed)
{
return { CELL_VDEC_ERROR_SEQ, vdec->seq_state.load() };
}
}
const u64 seq_id = vdec->seq_id;
const u64 cmd_id = vdec->next_cmd_id++;
cellVdec.trace("Adding close cmd (handle=0x%x, seq_id=%d, cmd_id=%d)", handle, seq_id, cmd_id);
lv2_obj::sleep(ppu);
vdec->abort_decode = true;
vdec->in_cmd.push(vdec_cmd(vdec_cmd_type::close, seq_id, cmd_id));
while (!vdec->ppu_tid)
{
thread_ctrl::wait_for(1000);
}
const u32 tid = vdec->ppu_tid.exchange(-1);
if (tid != umax)
{
ppu_execute<&sys_interrupt_thread_disestablish>(ppu, tid);
}
vdec->seq_state = sequence_state::closed;
vdec->mutex.lock_unlock();
if (!idm::remove_verify<vdec_context>(handle, std::move(vdec)))
{
// Other thread removed it beforehead
return CELL_VDEC_ERROR_ARG;
}
return CELL_OK;
}
error_code cellVdecStartSeq(ppu_thread& ppu, u32 handle)
{
ppu.state += cpu_flag::wait;
cellVdec.warning("cellVdecStartSeq(handle=0x%x)", handle);
const auto vdec = idm::get<vdec_context>(handle);
if (!vdec)
{
return CELL_VDEC_ERROR_ARG;
}
sequence_state old_state{};
{
std::lock_guard lock{vdec->mutex};
old_state = vdec->seq_state;
if (old_state != sequence_state::dormant && old_state != sequence_state::ready)
{
return { CELL_VDEC_ERROR_SEQ, old_state };
}
if (old_state == sequence_state::ready)
{
vdec->seq_state = sequence_state::resetting;
}
}
const u64 seq_id = ++vdec->seq_id;
const u64 cmd_id = vdec->next_cmd_id++;
cellVdec.trace("Adding start cmd (handle=0x%x, seq_id=%d, cmd_id=%d)", handle, seq_id, cmd_id);
vdec->abort_decode = false;
vdec->is_running = false;
vdec->in_cmd.push(vdec_cmd(vdec_cmd_type::start_sequence, seq_id, cmd_id));
std::lock_guard lock{vdec->mutex};
if (false) // TODO: set to old state in case of error
{
vdec->seq_state = old_state;
}
else
{
vdec->seq_state = sequence_state::ready;
}
return CELL_OK;
}
error_code cellVdecEndSeq(ppu_thread& ppu, u32 handle)
{
ppu.state += cpu_flag::wait;
cellVdec.warning("cellVdecEndSeq(handle=0x%x)", handle);
const auto vdec = idm::get<vdec_context>(handle);
if (!vdec)
{
return CELL_VDEC_ERROR_ARG;
}
{
std::lock_guard lock{vdec->mutex};
if (vdec->seq_state != sequence_state::ready)
{
return { CELL_VDEC_ERROR_SEQ, vdec->seq_state.load() };
}
vdec->seq_state = sequence_state::ending;
}
const u64 seq_id = vdec->seq_id;
const u64 cmd_id = vdec->next_cmd_id++;
cellVdec.trace("Adding end cmd (handle=0x%x, seq_id=%d, cmd_id=%d)", handle, seq_id, cmd_id);
vdec->in_cmd.push(vdec_cmd(vdec_cmd_type::end_sequence, seq_id, cmd_id));
return CELL_OK;
}
error_code cellVdecDecodeAu(ppu_thread& ppu, u32 handle, CellVdecDecodeMode mode, vm::cptr<CellVdecAuInfo> auInfo)
{
ppu.state += cpu_flag::wait;
cellVdec.trace("cellVdecDecodeAu(handle=0x%x, mode=%d, auInfo=*0x%x)", handle, +mode, auInfo);
const auto vdec = idm::get<vdec_context>(handle);
if (!vdec || !auInfo || !auInfo->size || !auInfo->startAddr)
{
return { CELL_VDEC_ERROR_ARG, "vdec=%d, auInfo=%d, size=%d, startAddr=0x%x", !!vdec, !!auInfo, auInfo ? auInfo->size.value() : 0, auInfo ? auInfo->startAddr.value() : 0 };
}
{
std::lock_guard lock{vdec->mutex};
if (vdec->seq_state != sequence_state::ready)
{
return { CELL_VDEC_ERROR_SEQ, vdec->seq_state.load() };
}
}
if (mode < 0 || mode > (CELL_VDEC_DEC_MODE_B_SKIP | CELL_VDEC_DEC_MODE_PB_SKIP))
{
return { CELL_VDEC_ERROR_ARG, "mode=%d", +mode };
}
// TODO: what does the 3 stand for ?
if ((mode == (CELL_VDEC_DEC_MODE_B_SKIP | CELL_VDEC_DEC_MODE_PB_SKIP) && vdec->type != 3) ||
(mode == CELL_VDEC_DEC_MODE_PB_SKIP && vdec->type != CELL_VDEC_CODEC_TYPE_AVC))
{
return { CELL_VDEC_ERROR_ARG, "mode=%d, type=%d", +mode, vdec->type };
}
if (!vdec->au_count.try_inc(4))
{
return CELL_VDEC_ERROR_BUSY;
}
const u64 seq_id = vdec->seq_id;
const u64 cmd_id = vdec->next_cmd_id++;
cellVdec.trace("Adding decode cmd (handle=0x%x, seq_id=%d, cmd_id=%d)", handle, seq_id, cmd_id);
// TODO: check info
vdec->in_cmd.push(vdec_cmd(vdec_cmd_type::au_decode, seq_id, cmd_id, mode, *auInfo));
return CELL_OK;
}
error_code cellVdecDecodeAuEx2(ppu_thread& ppu, u32 handle, CellVdecDecodeMode mode, vm::cptr<CellVdecAuInfoEx2> auInfo)
{
ppu.state += cpu_flag::wait;
cellVdec.todo("cellVdecDecodeAuEx2(handle=0x%x, mode=%d, auInfo=*0x%x)", handle, +mode, auInfo);
const auto vdec = idm::get<vdec_context>(handle);
if (!vdec || !auInfo || !auInfo->size || !auInfo->startAddr)
{
return { CELL_VDEC_ERROR_ARG, "vdec=%d, auInfo=%d, size=%d, startAddr=0x%x", !!vdec, !!auInfo, auInfo ? auInfo->size.value() : 0, auInfo ? auInfo->startAddr.value() : 0 };
}
{
std::lock_guard lock{vdec->mutex};
if (vdec->seq_state != sequence_state::ready)
{
return { CELL_VDEC_ERROR_SEQ, vdec->seq_state.load() };
}
}
if (mode < 0 || mode > (CELL_VDEC_DEC_MODE_B_SKIP | CELL_VDEC_DEC_MODE_PB_SKIP))
{
return { CELL_VDEC_ERROR_ARG, "mode=%d", +mode };
}
// TODO: what does the 3 stand for ?
if ((mode == (CELL_VDEC_DEC_MODE_B_SKIP | CELL_VDEC_DEC_MODE_PB_SKIP) && vdec->type != 3) ||
(mode == CELL_VDEC_DEC_MODE_PB_SKIP && vdec->type != CELL_VDEC_CODEC_TYPE_AVC))
{
return { CELL_VDEC_ERROR_ARG, "mode=%d, type=%d", +mode, vdec->type };
}
if (!vdec->au_count.try_inc(4))
{
return CELL_VDEC_ERROR_BUSY;
}
CellVdecAuInfo au_info{};
au_info.startAddr = auInfo->startAddr;
au_info.size = auInfo->size;
au_info.pts = auInfo->pts;
au_info.dts = auInfo->dts;
au_info.userData = auInfo->userData;
au_info.codecSpecificData = auInfo->codecSpecificData;
const u64 seq_id = vdec->seq_id;
const u64 cmd_id = vdec->next_cmd_id++;
cellVdec.trace("Adding decode cmd (handle=0x%x, seq_id=%d, cmd_id=%d)", handle, seq_id, cmd_id);
// TODO: check info
vdec->in_cmd.push(vdec_cmd(vdec_cmd_type::au_decode, seq_id, cmd_id, mode, au_info));
return CELL_OK;
}
error_code cellVdecGetPictureExt(ppu_thread& ppu, u32 handle, vm::cptr<CellVdecPicFormat2> format, vm::ptr<u8> outBuff, u32 arg4)
{
ppu.state += cpu_flag::wait;
cellVdec.trace("cellVdecGetPictureExt(handle=0x%x, format=*0x%x, outBuff=*0x%x, arg4=*0x%x)", handle, format, outBuff, arg4);
const auto vdec = idm::get<vdec_context>(handle);
if (!vdec || !format)
{
return CELL_VDEC_ERROR_ARG;
}
{
std::lock_guard lock{vdec->mutex};
if (vdec->seq_state == sequence_state::closed || vdec->seq_state > sequence_state::ending)
{
return { CELL_VDEC_ERROR_SEQ, vdec->seq_state.load() };
}
}
if (format->formatType > 4 || (format->formatType <= CELL_VDEC_PICFMT_RGBA32_ILV && format->colorMatrixType > CELL_VDEC_COLOR_MATRIX_TYPE_BT709))
{
return CELL_VDEC_ERROR_ARG;
}
if (arg4 && arg4 != 8 && arg4 != 0xc)
{
return CELL_VDEC_ERROR_ARG;
}
if (arg4 || format->unk0 || format->unk1)
{
fmt::throw_exception("cellVdecGetPictureExt: Unknown arguments (arg4=*0x%x, unk0=0x%x, unk1=0x%x)", arg4, format->unk0, format->unk1);
}
vdec_frame frame;
bool notify = false;
u64 sequence_id{};
{
std::lock_guard lock(vdec->mutex);
if (vdec->out_queue.empty())
{
return CELL_VDEC_ERROR_EMPTY;
}
frame = std::move(vdec->out_queue.front());
sequence_id = vdec->seq_id;
vdec->out_queue.pop_front();
if (vdec->out_queue.size() + 1 == vdec->out_max)
notify = true;
}
if (notify)
{
auto vdec_ppu = idm::get<named_thread<ppu_thread>>(vdec->ppu_tid);
if (vdec_ppu) thread_ctrl::notify(*vdec_ppu);
}
if (sequence_id != frame.seq_id)
{
return { CELL_VDEC_ERROR_EMPTY, "sequence_id=%d, seq_id=%d", sequence_id, frame.seq_id };
}
if (outBuff)
{
const int w = frame->width;
const int h = frame->height;
AVPixelFormat out_f = AV_PIX_FMT_YUV420P;
std::unique_ptr<u8[]> alpha_plane;
switch (const u32 type = format->formatType)
{
case CELL_VDEC_PICFMT_ARGB32_ILV: out_f = AV_PIX_FMT_ARGB; alpha_plane.reset(new u8[w * h]); break;
case CELL_VDEC_PICFMT_RGBA32_ILV: out_f = AV_PIX_FMT_RGBA; alpha_plane.reset(new u8[w * h]); break;
case CELL_VDEC_PICFMT_UYVY422_ILV: out_f = AV_PIX_FMT_UYVY422; break;
case CELL_VDEC_PICFMT_YUV420_PLANAR: out_f = AV_PIX_FMT_YUV420P; break;
default:
{
fmt::throw_exception("cellVdecGetPictureExt: Unknown formatType (handle=0x%x, seq_id=%d, cmd_id=%d, type=%d)", handle, frame.seq_id, frame.cmd_id, type);
}
}
// TODO: color matrix
if (alpha_plane)
{
std::memset(alpha_plane.get(), format->alpha, w * h);
}
AVPixelFormat in_f = AV_PIX_FMT_YUV420P;
switch (frame->format)
{
case AV_PIX_FMT_YUVJ420P:
cellVdec.error("cellVdecGetPictureExt: experimental AVPixelFormat (handle=0x%x, seq_id=%d, cmd_id=%d, format=%d). This may cause suboptimal video quality.", handle, frame.seq_id, frame.cmd_id, frame->format);
[[fallthrough]];
case AV_PIX_FMT_YUV420P:
in_f = alpha_plane ? AV_PIX_FMT_YUVA420P : static_cast<AVPixelFormat>(frame->format);
break;
default:
fmt::throw_exception("cellVdecGetPictureExt: Unknown frame format (%d)", frame->format);
}
cellVdec.trace("cellVdecGetPictureExt: handle=0x%x, seq_id=%d, cmd_id=%d, w=%d, h=%d, frameFormat=%d, formatType=%d, in_f=%d, out_f=%d, alpha_plane=%d, alpha=%d, colorMatrixType=%d", handle, frame.seq_id, frame.cmd_id, w, h, frame->format, format->formatType, +in_f, +out_f, !!alpha_plane, format->alpha, format->colorMatrixType);
vdec->sws = sws_getCachedContext(vdec->sws, w, h, in_f, w, h, out_f, SWS_POINT, nullptr, nullptr, nullptr);
u8* in_data[4] = { frame->data[0], frame->data[1], frame->data[2], alpha_plane.get() };
int in_line[4] = { frame->linesize[0], frame->linesize[1], frame->linesize[2], w * 1 };
u8* out_data[4] = { outBuff.get_ptr() };
int out_line[4] = { w * 4 }; // RGBA32 or ARGB32
// TODO:
// It's possible that we need to align the pitch to 128 here.
// PS HOME seems to rely on this somehow in certain cases.
if (!alpha_plane)
{
// YUV420P or UYVY422
out_data[1] = out_data[0] + w * h;
out_data[2] = out_data[0] + w * h * 5 / 4;
if (const int ret = av_image_fill_linesizes(out_line, out_f, w); ret < 0)
{
fmt::throw_exception("cellVdecGetPictureExt: av_image_fill_linesizes failed (handle=0x%x, seq_id=%d, cmd_id=%d, ret=0x%x): %s", handle, frame.seq_id, frame.cmd_id, ret, utils::av_error_to_string(ret));
}
}
sws_scale(vdec->sws, in_data, in_line, 0, h, out_data, out_line);
}
return CELL_OK;
}
error_code cellVdecGetPicture(ppu_thread& ppu, u32 handle, vm::cptr<CellVdecPicFormat> format, vm::ptr<u8> outBuff)
{
ppu.state += cpu_flag::wait;
cellVdec.trace("cellVdecGetPicture(handle=0x%x, format=*0x%x, outBuff=*0x%x)", handle, format, outBuff);
if (!format)
{
return CELL_VDEC_ERROR_ARG;
}
vm::var<CellVdecPicFormat2> format2;
format2->formatType = format->formatType;
format2->colorMatrixType = format->colorMatrixType;
format2->alpha = format->alpha;
format2->unk0 = 0;
format2->unk1 = 0;
return cellVdecGetPictureExt(ppu, handle, format2, outBuff, 0);
}
error_code cellVdecGetPicItem(ppu_thread& ppu, u32 handle, vm::pptr<CellVdecPicItem> picItem)
{
ppu.state += cpu_flag::wait;
cellVdec.trace("cellVdecGetPicItem(handle=0x%x, picItem=**0x%x)", handle, picItem);
const auto vdec = idm::get<vdec_context>(handle);
if (!vdec || !picItem)
{
return CELL_VDEC_ERROR_ARG;
}
u64 sequence_id{};
{
std::lock_guard lock{vdec->mutex};
if (vdec->seq_state == sequence_state::closed || vdec->seq_state > sequence_state::ending)
{
return { CELL_VDEC_ERROR_SEQ, vdec->seq_state.load() };
}
sequence_id = vdec->seq_id;
}
struct all_info_t
{
CellVdecPicItem picItem;
std::aligned_union_t<0, CellVdecAvcInfo, CellVdecDivxInfo, CellVdecMpeg2Info> picInfo;
};
AVFrame* frame{};
u64 seq_id{};
u64 cmd_id{};
u64 pts;
u64 dts;
u64 usrd;
u32 frc = 0;
CellVdecPicAttr attr = CELL_VDEC_PICITEM_ATTR_NORMAL;
vm::ptr<CellVdecPicItem> info;
{
std::lock_guard lock(vdec->mutex);
for (auto& picture : vdec->out_queue)
{
if (!picture.pic_item_received)
{
picture.pic_item_received = true;
frame = picture.avf.get();
seq_id = picture.seq_id;
cmd_id = picture.cmd_id;
pts = picture.pts;
dts = picture.dts;
usrd = picture.userdata;
frc = picture.frc;
attr = picture.attr;
info.set(vdec->mem_addr + vdec->mem_bias);
constexpr u64 size_needed = sizeof(all_info_t);
if (vdec->mem_bias + size_needed >= vdec->mem_size / size_needed * size_needed)
{
vdec->mem_bias = 0;
break;
}
vdec->mem_bias += size_needed;
break;
}
}
}
if (!frame || seq_id != sequence_id)
{
// If frame is empty info was not found
return { CELL_VDEC_ERROR_EMPTY, " frame=%d, sequence_id=%d, seq_id=%d", !!frame, sequence_id, seq_id };
}
info->codecType = vdec->type;
info->startAddr = 0x00000123; // invalid value (no address for picture)
const int buffer_size = av_image_get_buffer_size(vdec->ctx->pix_fmt, vdec->ctx->width, vdec->ctx->height, 1);
ensure(buffer_size >= 0);
info->size = utils::align<u32>(buffer_size, 128);
info->auNum = 1;
info->auPts[0].lower = static_cast<u32>(pts);
info->auPts[0].upper = static_cast<u32>(pts >> 32);
info->auPts[1].lower = CELL_CODEC_PTS_INVALID;
info->auPts[1].upper = CELL_CODEC_PTS_INVALID;
info->auDts[0].lower = static_cast<u32>(dts);
info->auDts[0].upper = static_cast<u32>(dts >> 32);
info->auDts[1].lower = CELL_CODEC_DTS_INVALID;
info->auDts[1].upper = CELL_CODEC_DTS_INVALID;
info->auUserData[0] = usrd;
info->auUserData[1] = 0;
info->status = CELL_OK;
info->attr = attr;
const vm::addr_t picinfo_addr{info.addr() + ::offset32(&all_info_t::picInfo)};
info->picInfo_addr = picinfo_addr;
if (vdec->type == CELL_VDEC_CODEC_TYPE_AVC)
{
const vm::ptr<CellVdecAvcInfo> avc = picinfo_addr;
avc->horizontalSize = frame->width;
avc->verticalSize = frame->height;
switch (s32 pct = frame->pict_type)
{
case AV_PICTURE_TYPE_I: avc->pictureType[0] = CELL_VDEC_AVC_PCT_I; break;
case AV_PICTURE_TYPE_P: avc->pictureType[0] = CELL_VDEC_AVC_PCT_P; break;
case AV_PICTURE_TYPE_B: avc->pictureType[0] = CELL_VDEC_AVC_PCT_B; break;
default:
{
avc->pictureType[0] = CELL_VDEC_AVC_PCT_UNKNOWN;
cellVdec.error("cellVdecGetPicItem(AVC): unknown pict_type value (handle=0x%x, seq_id=%d, cmd_id=%d, pct=0x%x)", handle, seq_id, cmd_id, pct);
break;
}
}
avc->pictureType[1] = CELL_VDEC_AVC_PCT_UNKNOWN; // ???
avc->idrPictureFlag = false; // ???
avc->aspect_ratio_idc = CELL_VDEC_AVC_ARI_SAR_UNSPECIFIED; // ???
avc->sar_height = 0;
avc->sar_width = 0;
avc->pic_struct = CELL_VDEC_AVC_PSTR_FRAME; // ???
avc->picOrderCount[0] = 0; // ???
avc->picOrderCount[1] = 0;
avc->vui_parameters_present_flag = true; // ???
avc->frame_mbs_only_flag = true; // ??? progressive
avc->video_signal_type_present_flag = true; // ???
avc->video_format = CELL_VDEC_AVC_VF_COMPONENT; // ???
avc->video_full_range_flag = false; // ???
avc->colour_description_present_flag = true;
avc->colour_primaries = CELL_VDEC_AVC_CP_ITU_R_BT_709_5; // ???
avc->transfer_characteristics = CELL_VDEC_AVC_TC_ITU_R_BT_709_5;
avc->matrix_coefficients = CELL_VDEC_AVC_MXC_ITU_R_BT_709_5; // important
avc->timing_info_present_flag = true;
switch (frc)
{
case CELL_VDEC_FRC_24000DIV1001: avc->frameRateCode = CELL_VDEC_AVC_FRC_24000DIV1001; break;
case CELL_VDEC_FRC_24: avc->frameRateCode = CELL_VDEC_AVC_FRC_24; break;
case CELL_VDEC_FRC_25: avc->frameRateCode = CELL_VDEC_AVC_FRC_25; break;
case CELL_VDEC_FRC_30000DIV1001: avc->frameRateCode = CELL_VDEC_AVC_FRC_30000DIV1001; break;
case CELL_VDEC_FRC_30: avc->frameRateCode = CELL_VDEC_AVC_FRC_30; break;
case CELL_VDEC_FRC_50: avc->frameRateCode = CELL_VDEC_AVC_FRC_50; break;
case CELL_VDEC_FRC_60000DIV1001: avc->frameRateCode = CELL_VDEC_AVC_FRC_60000DIV1001; break;
case CELL_VDEC_FRC_60: avc->frameRateCode = CELL_VDEC_AVC_FRC_60; break;
default: cellVdec.error("cellVdecGetPicItem(AVC): unknown frc value (handle=0x%x, seq_id=%d, cmd_id=%d, frc=0x%x)", handle, seq_id, cmd_id, frc);
}
avc->fixed_frame_rate_flag = true;
avc->low_delay_hrd_flag = true; // ???
avc->entropy_coding_mode_flag = true; // ???
avc->nalUnitPresentFlags = 0; // ???
avc->ccDataLength[0] = 0;
avc->ccDataLength[1] = 0;
avc->reserved[0] = 0;
avc->reserved[1] = 0;
}
else if (vdec->type == CELL_VDEC_CODEC_TYPE_DIVX)
{
const vm::ptr<CellVdecDivxInfo> dvx = picinfo_addr;
switch (s32 pct = frame->pict_type)
{
case AV_PICTURE_TYPE_I: dvx->pictureType = CELL_VDEC_DIVX_VCT_I; break;
case AV_PICTURE_TYPE_P: dvx->pictureType = CELL_VDEC_DIVX_VCT_P; break;
case AV_PICTURE_TYPE_B: dvx->pictureType = CELL_VDEC_DIVX_VCT_B; break;
default: cellVdec.error("cellVdecGetPicItem(DivX): unknown pict_type value (handle=0x%x, seq_id=%d, cmd_id=%d, pct=0x%x)", handle, seq_id, cmd_id, pct);
}
dvx->horizontalSize = frame->width;
dvx->verticalSize = frame->height;
dvx->pixelAspectRatio = CELL_VDEC_DIVX_ARI_PAR_1_1; // ???
dvx->parHeight = 0;
dvx->parWidth = 0;
dvx->colourDescription = false; // ???
dvx->colourPrimaries = CELL_VDEC_DIVX_CP_ITU_R_BT_709; // ???
dvx->transferCharacteristics = CELL_VDEC_DIVX_TC_ITU_R_BT_709; // ???
dvx->matrixCoefficients = CELL_VDEC_DIVX_MXC_ITU_R_BT_709; // ???
dvx->pictureStruct = CELL_VDEC_DIVX_PSTR_FRAME; // ???
switch (frc)
{
case CELL_VDEC_FRC_24000DIV1001: dvx->frameRateCode = CELL_VDEC_DIVX_FRC_24000DIV1001; break;
case CELL_VDEC_FRC_24: dvx->frameRateCode = CELL_VDEC_DIVX_FRC_24; break;
case CELL_VDEC_FRC_25: dvx->frameRateCode = CELL_VDEC_DIVX_FRC_25; break;
case CELL_VDEC_FRC_30000DIV1001: dvx->frameRateCode = CELL_VDEC_DIVX_FRC_30000DIV1001; break;
case CELL_VDEC_FRC_30: dvx->frameRateCode = CELL_VDEC_DIVX_FRC_30; break;
case CELL_VDEC_FRC_50: dvx->frameRateCode = CELL_VDEC_DIVX_FRC_50; break;
case CELL_VDEC_FRC_60000DIV1001: dvx->frameRateCode = CELL_VDEC_DIVX_FRC_60000DIV1001; break;
case CELL_VDEC_FRC_60: dvx->frameRateCode = CELL_VDEC_DIVX_FRC_60; break;
default: cellVdec.error("cellVdecGetPicItem(DivX): unknown frc value (handle=0x%x, seq_id=%d, cmd_id=%d, frc=0x%x)", handle, seq_id, cmd_id, frc);
}
}
else if (vdec->type == CELL_VDEC_CODEC_TYPE_MPEG2)
{
const vm::ptr<CellVdecMpeg2Info> mp2 = picinfo_addr;
std::memset(mp2.get_ptr(), 0, sizeof(CellVdecMpeg2Info));
mp2->horizontal_size = frame->width;
mp2->vertical_size = frame->height;
mp2->aspect_ratio_information = CELL_VDEC_MPEG2_ARI_SAR_1_1; // ???
switch (frc)
{
case CELL_VDEC_FRC_24000DIV1001: mp2->frame_rate_code = CELL_VDEC_MPEG2_FRC_24000DIV1001; break;
case CELL_VDEC_FRC_24: mp2->frame_rate_code = CELL_VDEC_MPEG2_FRC_24; break;
case CELL_VDEC_FRC_25: mp2->frame_rate_code = CELL_VDEC_MPEG2_FRC_25; break;
case CELL_VDEC_FRC_30000DIV1001: mp2->frame_rate_code = CELL_VDEC_MPEG2_FRC_30000DIV1001; break;
case CELL_VDEC_FRC_30: mp2->frame_rate_code = CELL_VDEC_MPEG2_FRC_30; break;
case CELL_VDEC_FRC_50: mp2->frame_rate_code = CELL_VDEC_MPEG2_FRC_50; break;
case CELL_VDEC_FRC_60000DIV1001: mp2->frame_rate_code = CELL_VDEC_MPEG2_FRC_60000DIV1001; break;
case CELL_VDEC_FRC_60: mp2->frame_rate_code = CELL_VDEC_MPEG2_FRC_60; break;
default: cellVdec.error("cellVdecGetPicItem(MPEG2): unknown frc value (handle=0x%x, seq_id=%d, cmd_id=%d, frc=0x%x)", handle, seq_id, cmd_id, frc);
}
mp2->progressive_sequence = true; // ???
mp2->low_delay = true; // ???
mp2->video_format = CELL_VDEC_MPEG2_VF_UNSPECIFIED; // ???
mp2->colour_description = false; // ???
switch (s32 pct = frame->pict_type)
{
case AV_PICTURE_TYPE_I: mp2->picture_coding_type[0] = CELL_VDEC_MPEG2_PCT_I; break;
case AV_PICTURE_TYPE_P: mp2->picture_coding_type[0] = CELL_VDEC_MPEG2_PCT_P; break;
case AV_PICTURE_TYPE_B: mp2->picture_coding_type[0] = CELL_VDEC_MPEG2_PCT_B; break;
default: cellVdec.error("cellVdecGetPicItem(MPEG2): unknown pict_type value (handle=0x%x, seq_id=%d, cmd_id=%d, pct=0x%x)", handle, seq_id, cmd_id, pct);
}
mp2->picture_coding_type[1] = CELL_VDEC_MPEG2_PCT_FORBIDDEN; // ???
mp2->picture_structure[0] = CELL_VDEC_MPEG2_PSTR_FRAME;
mp2->picture_structure[1] = CELL_VDEC_MPEG2_PSTR_FRAME;
// ...
}
*picItem = info;
return CELL_OK;
}
error_code cellVdecSetFrameRate(u32 handle, CellVdecFrameRate frameRateCode)
{
cellVdec.trace("cellVdecSetFrameRate(handle=0x%x, frameRateCode=0x%x)", handle, +frameRateCode);
const auto vdec = idm::get<vdec_context>(handle);
// 0x80 seems like a common prefix
if (!vdec || (frameRateCode & 0xf8) != 0x80)
{
return CELL_VDEC_ERROR_ARG;
}
std::lock_guard lock{vdec->mutex};
if (vdec->seq_state == sequence_state::closed || vdec->seq_state >= sequence_state::invalid)
{
return { CELL_VDEC_ERROR_SEQ, vdec->seq_state.load() };
}
const u64 seq_id = vdec->seq_id;
const u64 cmd_id = vdec->next_cmd_id++;
cellVdec.trace("Adding framerate cmd (handle=0x%x, seq_id=%d, cmd_id=%d)", handle, seq_id, cmd_id);
vdec->in_cmd.push(vdec_cmd(vdec_cmd_type::framerate, seq_id, cmd_id, frameRateCode & 0x87));
return CELL_OK;
}
error_code cellVdecOpenExt()
{
UNIMPLEMENTED_FUNC(cellVdec);
return CELL_OK;
}
error_code cellVdecStartSeqExt()
{
UNIMPLEMENTED_FUNC(cellVdec);
return CELL_OK;
}
error_code cellVdecGetInputStatus()
{
UNIMPLEMENTED_FUNC(cellVdec);
return CELL_OK;
}
error_code cellVdecGetPicItemEx()
{
UNIMPLEMENTED_FUNC(cellVdec);
return CELL_OK;
}
error_code cellVdecGetPicItemExt()
{
UNIMPLEMENTED_FUNC(cellVdec);
return CELL_OK;
}
error_code cellVdecSetFrameRateExt()
{
UNIMPLEMENTED_FUNC(cellVdec);
return CELL_OK;
}
error_code cellVdecSetPts(u32 handle, vm::ptr<void> unk)
{
cellVdec.error("cellVdecSetPts(handle=0x%x, unk=*0x%x)", handle, unk);
const auto vdec = idm::get<vdec_context>(handle);
if (!vdec || !unk)
{
return CELL_VDEC_ERROR_ARG;
}
std::lock_guard lock{vdec->mutex};
if (vdec->seq_state == sequence_state::closed || vdec->seq_state >= sequence_state::invalid)
{
return { CELL_VDEC_ERROR_SEQ, vdec->seq_state.load() };
}
return CELL_OK;
}
DECLARE(ppu_module_manager::cellVdec)("libvdec", []()
{
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 10, 100)
avcodec_register_all();
#endif
static ppu_static_module libavcdec("libavcdec");
static ppu_static_module libdivx311dec("libdivx311dec");
static ppu_static_module libdivxdec("libdivxdec");
static ppu_static_module libmvcdec("libmvcdec");
static ppu_static_module libsjvtd("libsjvtd");
static ppu_static_module libsmvd2("libsmvd2");
static ppu_static_module libsmvd4("libsmvd4");
static ppu_static_module libsvc1d("libsvc1d");
REG_VAR(libvdec, _cell_vdec_prx_ver); // 0x085a7ecb
REG_FUNC(libvdec, cellVdecQueryAttr);
REG_FUNC(libvdec, cellVdecQueryAttrEx);
REG_FUNC(libvdec, cellVdecOpen);
REG_FUNC(libvdec, cellVdecOpenEx);
REG_FUNC(libvdec, cellVdecOpenExt); // 0xef4d8ad7
REG_FUNC(libvdec, cellVdecClose);
REG_FUNC(libvdec, cellVdecStartSeq);
REG_FUNC(libvdec, cellVdecStartSeqExt); // 0xebb8e70a
REG_FUNC(libvdec, cellVdecEndSeq);
REG_FUNC(libvdec, cellVdecDecodeAu);
REG_FUNC(libvdec, cellVdecDecodeAuEx2);
REG_FUNC(libvdec, cellVdecGetInputStatus);
REG_FUNC(libvdec, cellVdecGetPicture);
REG_FUNC(libvdec, cellVdecGetPictureExt); // 0xa21aa896
REG_FUNC(libvdec, cellVdecGetPicItem);
REG_FUNC(libvdec, cellVdecGetPicItemEx);
REG_FUNC(libvdec, cellVdecGetPicItemExt); // 0x2cbd9806
REG_FUNC(libvdec, cellVdecSetFrameRate);
REG_FUNC(libvdec, cellVdecSetFrameRateExt); // 0xcffc42a5
REG_FUNC(libvdec, cellVdecSetPts); // 0x3ce2e4f8
REG_HIDDEN_FUNC(vdecEntry);
});
| 49,621
|
C++
|
.cpp
| 1,407
| 31.803127
| 332
| 0.670355
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,307
|
sys_net_.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/sys_net_.cpp
|
#include "stdafx.h"
#include "Emu/Cell/PPUModule.h"
#include "Emu/IdManager.h"
#include "sys_net_.h"
LOG_CHANNEL(libnet);
// Temporarily
#ifndef _MSC_VER
#pragma GCC diagnostic ignored "-Wunused-parameter"
#endif
s32 sys_net_accept(s32 s, vm::ptr<sys_net_sockaddr> addr, vm::ptr<u32> paddrlen)
{
libnet.todo("accept(s=%d, addr=*0x%x, paddrlen=*0x%x)", s, addr, paddrlen);
return 0;
}
s32 sys_net_bind(s32 s, vm::cptr<sys_net_sockaddr> addr, u32 addrlen)
{
libnet.todo("bind(s=%d, addr=*0x%x, addrlen=%u)", s, addr, addrlen);
return 0;
}
s32 sys_net_connect(s32 s, vm::ptr<sys_net_sockaddr> addr, u32 addrlen)
{
libnet.todo("connect(s=%d, addr=*0x%x, addrlen=%u)", s, addr, addrlen);
return 0;
}
s32 sys_net_gethostbyaddr()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_gethostbyname()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_getpeername(s32 s, vm::ptr<sys_net_sockaddr> addr, vm::ptr<u32> paddrlen)
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_getsockname(s32 s, vm::ptr<sys_net_sockaddr> addr, vm::ptr<u32> paddrlen)
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_getsockopt(s32 s, s32 level, s32 optname, vm::ptr<void> optval, vm::ptr<u32> optlen)
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
u32 sys_net_inet_addr(vm::cptr<char> cp)
{
libnet.todo("inet_addr(cp=%s)", cp);
return 0xffffffff;
}
s32 sys_net_inet_aton()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_inet_lnaof()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_inet_makeaddr()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_inet_netof()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_inet_network()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
vm::ptr<char> sys_net_inet_ntoa(u32 in)
{
libnet.todo("inet_ntoa(in=0x%x)", in);
return vm::null;
}
vm::cptr<char> sys_net_inet_ntop(s32 af, vm::ptr<void> src, vm::ptr<char> dst, u32 size)
{
libnet.todo("inet_ntop(af=%d, src=%s, dst=*0x%x, size=%d)", af, src, dst, size);
return vm::null;
}
s32 sys_net_inet_pton(s32 af, vm::cptr<char> src, vm::ptr<char> dst)
{
libnet.todo("inet_pton(af=%d, src=%s, dst=*0x%x)", af, src, dst);
return 0;
}
s32 sys_net_listen(s32 s, s32 backlog)
{
libnet.todo("listen(s=%d, backlog=%d)", s, backlog);
return 0;
}
s32 sys_net_recv(s32 s, vm::ptr<void> buf, u32 len, s32 flags)
{
libnet.todo("recv(s=%d, buf=*0x%x, len=%d, flags=0x%x)", s, buf, len, flags);
return 0;
}
s32 sys_net_recvfrom(s32 s, vm::ptr<void> buf, u32 len, s32 flags, vm::ptr<sys_net_sockaddr> addr, vm::ptr<u32> paddrlen)
{
libnet.todo("recvfrom(s=%d, buf=*0x%x, len=%d, flags=0x%x, addr=*0x%x, paddrlen=*0x%x)", s, buf, len, flags, addr, paddrlen);
return 0;
}
s32 sys_net_recvmsg(s32 s, vm::ptr<sys_net_msghdr> msg, s32 flags)
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_send(s32 s, vm::cptr<void> buf, u32 len, s32 flags)
{
libnet.todo("send(s=%d, buf=*0x%x, len=%d, flags=0x%x)", s, buf, len, flags);
return 0;
}
s32 sys_net_sendmsg(s32 s, vm::cptr<sys_net_msghdr> msg, s32 flags)
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_sendto(s32 s, vm::cptr<void> buf, u32 len, s32 flags, vm::cptr<sys_net_sockaddr> addr, u32 addrlen)
{
libnet.todo("sendto(s=%d, buf=*0x%x, len=%d, flags=0x%x, addr=*0x%x, addrlen=%d)", s, buf, len, flags, addr, addrlen);
return 0;
}
s32 sys_net_setsockopt(s32 s, s32 level, s32 optname, vm::cptr<void> optval, u32 optlen)
{
libnet.todo("setsockopt(s=%d, level=%d, optname=%d, optval=*0x%x, optlen=%d)", s, level, optname, optval, optlen);
return 0;
}
s32 sys_net_shutdown(s32 s, s32 how)
{
libnet.todo("shutdown(s=%d, how=%d)", s, how);
return 0;
}
s32 sys_net_socket(s32 family, s32 type, s32 protocol)
{
libnet.todo("socket(family=%d, type=%d, protocol=%d)", family, type, protocol);
return 0;
}
s32 sys_net_socketclose(s32 s)
{
libnet.warning("socketclose(s=%d)", s);
return 0;
}
s32 sys_net_socketpoll(vm::ptr<sys_net_pollfd> fds, s32 nfds, s32 ms)
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_socketselect(s32 nfds, vm::ptr<sys_net_fd_set> readfds, vm::ptr<sys_net_fd_set> writefds, vm::ptr<sys_net_fd_set> exceptfds, vm::ptr<sys_net_timeval> timeout)
{
libnet.todo("socketselect(nfds=%d, readfds=*0x%x, writefds=*0x%x, exceptfds=*0x%x, timeout=*0x%x)", nfds, readfds, writefds, exceptfds, timeout);
return 0;
}
s32 sys_net_initialize_network_ex(vm::ptr<sys_net_initialize_parameter_t> param)
{
libnet.todo("sys_net_initialize_network_ex(param=*0x%x)", param);
return CELL_OK;
}
s32 sys_net_get_udpp2p_test_param()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_set_udpp2p_test_param()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_get_lib_name_server()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_if_ctl()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_get_if_list()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_get_name_server()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_get_netemu_test_param()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_get_routing_table_af()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_get_sockinfo(s32 s, vm::ptr<sys_net_sockinfo_t> p, s32 n)
{
libnet.todo("sys_net_get_sockinfo(s=%d, p=*0x%x, n=%d)", s, p, n);
return CELL_OK;
}
s32 sys_net_close_dump(s32 id, vm::ptr<s32> pflags)
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_set_test_param()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_show_nameserver()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
vm::ptr<s32> _sys_net_errno_loc(ppu_thread& ppu)
{
libnet.warning("_sys_net_errno_loc()");
// Return fake location from system TLS area
return vm::cast(ppu.gpr[13] - 0x7030 + 0x2c);
}
s32 sys_net_set_resolver_configurations()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_show_route()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_read_dump(s32 id, vm::ptr<void> buf, s32 len, vm::ptr<s32> pflags)
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_abort_resolver()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_abort_socket()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_set_lib_name_server()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_get_test_param()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_get_sockinfo_ex()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_open_dump(s32 len, s32 flags)
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_show_ifconfig()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_finalize_network()
{
libnet.todo("sys_net_finalize_network()");
return CELL_OK;
}
vm::ptr<s32> _sys_net_h_errno_loc(ppu_thread& ppu)
{
libnet.warning("_sys_net_h_errno_loc()");
// Return fake location from system TLS area
return vm::cast(ppu.gpr[13] - 0x7030 + 0x28);
}
s32 sys_net_set_netemu_test_param()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_net_free_thread_context(u64 tid, s32 flags)
{
libnet.todo("sys_net_free_thread_context(tid=0x%x, flags=%d)", tid, flags);
return CELL_OK;
}
s32 _sys_net_lib_abort()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sys_net_lib_bnet_control()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 __sys_net_lib_calloc()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sys_net_lib_free()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sys_net_lib_get_system_time()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sys_net_lib_if_nametoindex()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sys_net_lib_ioctl()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 __sys_net_lib_malloc()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sys_net_lib_rand()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 __sys_net_lib_realloc()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sys_net_lib_reset_libnetctl_queue()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sys_net_lib_set_libnetctl_queue()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sys_net_lib_thread_create()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sys_net_lib_thread_exit()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sys_net_lib_thread_join()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sys_net_lib_sync_clear()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sys_net_lib_sync_create()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sys_net_lib_sync_destroy()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sys_net_lib_sync_signal()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sys_net_lib_sync_wait()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sys_net_lib_sysctl()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sys_net_lib_usleep()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_netset_abort()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_netset_close()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_netset_get_if_id()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_netset_get_status()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_netset_if_down()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_netset_get_key_value()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_netset_if_up()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 sys_netset_open()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sce_net_get_name_server()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sce_net_add_name_server()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sce_net_add_name_server_with_char()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sce_net_flush_route()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sce_net_set_default_gateway()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sce_net_set_ip_and_mask()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
s32 _sce_net_set_name_server()
{
UNIMPLEMENTED_FUNC(libnet);
return CELL_OK;
}
DECLARE(ppu_module_manager::sys_net)("sys_net", []()
{
REG_FNID(sys_net, "accept", sys_net_accept);
REG_FNID(sys_net, "bind", sys_net_bind);
REG_FNID(sys_net, "connect", sys_net_connect);
REG_FNID(sys_net, "gethostbyaddr", sys_net_gethostbyaddr);
REG_FNID(sys_net, "gethostbyname", sys_net_gethostbyname);
REG_FNID(sys_net, "getpeername", sys_net_getpeername);
REG_FNID(sys_net, "getsockname", sys_net_getsockname);
REG_FNID(sys_net, "getsockopt", sys_net_getsockopt);
REG_FNID(sys_net, "inet_addr", sys_net_inet_addr);
REG_FNID(sys_net, "inet_aton", sys_net_inet_aton);
REG_FNID(sys_net, "inet_lnaof", sys_net_inet_lnaof);
REG_FNID(sys_net, "inet_makeaddr", sys_net_inet_makeaddr);
REG_FNID(sys_net, "inet_netof", sys_net_inet_netof);
REG_FNID(sys_net, "inet_network", sys_net_inet_network);
REG_FNID(sys_net, "inet_ntoa", sys_net_inet_ntoa);
REG_FNID(sys_net, "inet_ntop", sys_net_inet_ntop);
REG_FNID(sys_net, "inet_pton", sys_net_inet_pton);
REG_FNID(sys_net, "listen", sys_net_listen);
REG_FNID(sys_net, "recv", sys_net_recv);
REG_FNID(sys_net, "recvfrom", sys_net_recvfrom);
REG_FNID(sys_net, "recvmsg", sys_net_recvmsg);
REG_FNID(sys_net, "send", sys_net_send);
REG_FNID(sys_net, "sendmsg", sys_net_sendmsg);
REG_FNID(sys_net, "sendto", sys_net_sendto);
REG_FNID(sys_net, "setsockopt", sys_net_setsockopt);
REG_FNID(sys_net, "shutdown", sys_net_shutdown);
REG_FNID(sys_net, "socket", sys_net_socket);
REG_FNID(sys_net, "socketclose", sys_net_socketclose);
REG_FNID(sys_net, "socketpoll", sys_net_socketpoll);
REG_FNID(sys_net, "socketselect", sys_net_socketselect);
REG_FUNC(sys_net, sys_net_initialize_network_ex);
REG_FUNC(sys_net, sys_net_get_udpp2p_test_param);
REG_FUNC(sys_net, sys_net_set_udpp2p_test_param);
REG_FUNC(sys_net, sys_net_get_lib_name_server);
REG_FUNC(sys_net, sys_net_if_ctl);
REG_FUNC(sys_net, sys_net_get_if_list);
REG_FUNC(sys_net, sys_net_get_name_server);
REG_FUNC(sys_net, sys_net_get_netemu_test_param);
REG_FUNC(sys_net, sys_net_get_routing_table_af);
REG_FUNC(sys_net, sys_net_get_sockinfo);
REG_FUNC(sys_net, sys_net_close_dump);
REG_FUNC(sys_net, sys_net_set_test_param);
REG_FUNC(sys_net, sys_net_show_nameserver);
REG_FUNC(sys_net, _sys_net_errno_loc);
REG_FUNC(sys_net, sys_net_set_resolver_configurations);
REG_FUNC(sys_net, sys_net_show_route);
REG_FUNC(sys_net, sys_net_read_dump);
REG_FUNC(sys_net, sys_net_abort_resolver);
REG_FUNC(sys_net, sys_net_abort_socket);
REG_FUNC(sys_net, sys_net_set_lib_name_server);
REG_FUNC(sys_net, sys_net_get_test_param);
REG_FUNC(sys_net, sys_net_get_sockinfo_ex);
REG_FUNC(sys_net, sys_net_open_dump);
REG_FUNC(sys_net, sys_net_show_ifconfig);
REG_FUNC(sys_net, sys_net_finalize_network);
REG_FUNC(sys_net, _sys_net_h_errno_loc);
REG_FUNC(sys_net, sys_net_set_netemu_test_param);
REG_FUNC(sys_net, sys_net_free_thread_context);
REG_FUNC(sys_net, _sys_net_lib_abort);
REG_FUNC(sys_net, _sys_net_lib_bnet_control);
REG_FUNC(sys_net, __sys_net_lib_calloc);
REG_FUNC(sys_net, _sys_net_lib_free);
REG_FUNC(sys_net, _sys_net_lib_get_system_time);
REG_FUNC(sys_net, _sys_net_lib_if_nametoindex);
REG_FUNC(sys_net, _sys_net_lib_ioctl);
REG_FUNC(sys_net, __sys_net_lib_malloc);
REG_FUNC(sys_net, _sys_net_lib_rand);
REG_FUNC(sys_net, __sys_net_lib_realloc);
REG_FUNC(sys_net, _sys_net_lib_reset_libnetctl_queue);
REG_FUNC(sys_net, _sys_net_lib_set_libnetctl_queue);
REG_FUNC(sys_net, _sys_net_lib_thread_create);
REG_FUNC(sys_net, _sys_net_lib_thread_exit);
REG_FUNC(sys_net, _sys_net_lib_thread_join);
REG_FUNC(sys_net, _sys_net_lib_sync_clear);
REG_FUNC(sys_net, _sys_net_lib_sync_create);
REG_FUNC(sys_net, _sys_net_lib_sync_destroy);
REG_FUNC(sys_net, _sys_net_lib_sync_signal);
REG_FUNC(sys_net, _sys_net_lib_sync_wait);
REG_FUNC(sys_net, _sys_net_lib_sysctl);
REG_FUNC(sys_net, _sys_net_lib_usleep);
REG_FUNC(sys_net, sys_netset_abort);
REG_FUNC(sys_net, sys_netset_close);
REG_FUNC(sys_net, sys_netset_get_if_id);
REG_FUNC(sys_net, sys_netset_get_key_value);
REG_FUNC(sys_net, sys_netset_get_status);
REG_FUNC(sys_net, sys_netset_if_down);
REG_FUNC(sys_net, sys_netset_if_up);
REG_FUNC(sys_net, sys_netset_open);
REG_FUNC(sys_net, _sce_net_add_name_server);
REG_FUNC(sys_net, _sce_net_add_name_server_with_char);
REG_FUNC(sys_net, _sce_net_flush_route);
REG_FUNC(sys_net, _sce_net_get_name_server);
REG_FUNC(sys_net, _sce_net_set_default_gateway);
REG_FUNC(sys_net, _sce_net_set_ip_and_mask);
REG_FUNC(sys_net, _sce_net_set_name_server);
});
| 14,789
|
C++
|
.cpp
| 584
| 23.611301
| 170
| 0.723643
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,308
|
cellSysutilAvc.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellSysutilAvc.cpp
|
#include "stdafx.h"
#include "Emu/Cell/PPUModule.h"
#include "Emu/Cell/Modules/cellSysutilAvc.h"
#include "Emu/Cell/Modules/cellSysutil.h"
#include "Emu/IdManager.h"
LOG_CHANNEL(cellSysutil);
template<>
void fmt_class_string<CellAvcError>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto error)
{
switch (error)
{
STR_CASE(CELL_AVC_ERROR_UNKNOWN);
STR_CASE(CELL_AVC_ERROR_NOT_SUPPORTED);
STR_CASE(CELL_AVC_ERROR_NOT_INITIALIZED);
STR_CASE(CELL_AVC_ERROR_ALREADY_INITIALIZED);
STR_CASE(CELL_AVC_ERROR_INVALID_ARGUMENT);
STR_CASE(CELL_AVC_ERROR_OUT_OF_MEMORY);
STR_CASE(CELL_AVC_ERROR_BAD_ID);
STR_CASE(CELL_AVC_ERROR_INVALID_STATUS);
STR_CASE(CELL_AVC_ERROR_TIMEOUT);
STR_CASE(CELL_AVC_ERROR_NO_SESSION);
STR_CASE(CELL_AVC_ERROR_INCOMPATIBLE_PROTOCOL);
STR_CASE(CELL_AVC_ERROR_PEER_UNREACHABLE);
}
return unknown;
});
}
// Callback handle tag type
struct avc_cb_handle_t{};
struct avc_settings
{
avc_settings() = default;
avc_settings(const avc_settings&) = delete;
avc_settings& operator=(const avc_settings&) = delete;
SAVESTATE_INIT_POS(53);
shared_mutex mutex_cb;
vm::ptr<CellSysutilAvcCallback> avc_cb{};
vm::ptr<void> avc_cb_arg{};
atomic_t<u32> req_id_cnt = 0;
static bool saveable(bool /*is_writing*/) noexcept
{
return GET_SERIALIZATION_VERSION(cellSysutil) != 0;
}
avc_settings(utils::serial& ar) noexcept
{
[[maybe_unused]] const s32 version = GET_SERIALIZATION_VERSION(cellSysutil);
if (version == 0)
{
return;
}
save(ar);
}
void save(utils::serial& ar)
{
[[maybe_unused]] const s32 version = GET_OR_USE_SERIALIZATION_VERSION(ar.is_writing(), cellSysutil);
ar(avc_cb, avc_cb_arg);
}
void register_cb_call(CellSysutilAvcRequestId req_id, CellSysutilAvcEvent event_id, CellSysUtilAvcEventParam param)
{
// This is equivalent to the dispatcher code
sysutil_register_cb_with_id<avc_cb_handle_t>([=, this](ppu_thread& cb_ppu) -> s32
{
vm::ptr<CellSysutilAvcCallback> avc_cb{};
vm::ptr<void> avc_cb_arg{};
{
std::lock_guard lock(this->mutex_cb);
avc_cb = this->avc_cb;
avc_cb_arg = this->avc_cb_arg;
}
if (avc_cb)
{
// TODO: lots of checks before calling the cb
if (event_id == CELL_AVC_EVENT_UNLOAD_SUCCEEDED)
{
std::lock_guard lock(this->mutex_cb);
this->avc_cb = {};
this->avc_cb_arg = {};
}
avc_cb(cb_ppu, req_id, event_id, param, avc_cb_arg);
}
return 0;
});
}
};
error_code cellSysutilAvcByeRequest(vm::ptr<CellSysutilAvcRequestId> request_id)
{
cellSysutil.todo("cellSysutilAvcByeRequest(request_id=*0x%x)", request_id);
if (!request_id)
return CELL_AVC_ERROR_INVALID_ARGUMENT;
auto& settings = g_fxo->get<avc_settings>();
const CellSysutilAvcRequestId req_id = settings.req_id_cnt.fetch_add(1);
*request_id = req_id;
settings.register_cb_call(req_id, CELL_AVC_EVENT_BYE_SUCCEEDED, static_cast<CellSysUtilAvcEventParam>(0));
return CELL_OK;
}
error_code cellSysutilAvcCancelByeRequest(vm::ptr<CellSysutilAvcRequestId> request_id)
{
cellSysutil.todo("cellSysutilAvcCancelByeRequest(request_id=*0x%x)", request_id);
if (!request_id)
return CELL_AVC_ERROR_INVALID_ARGUMENT;
return CELL_OK;
}
error_code cellSysutilAvcCancelJoinRequest(vm::ptr<CellSysutilAvcRequestId> request_id)
{
cellSysutil.todo("cellSysutilAvcCancelJoinRequest(request_id=*0x%x)", request_id);
if (!request_id)
return CELL_AVC_ERROR_INVALID_ARGUMENT;
return CELL_OK;
}
error_code cellSysutilAvcEnumPlayers(vm::ptr<SceNpId> players_id, vm::ptr<s32> players_num)
{
cellSysutil.todo("cellSysutilAvcEnumPlayers(players_id=*0x%x, players_num=*0x%x)", players_id, players_num);
if (!players_num)
return CELL_AVC_ERROR_INVALID_ARGUMENT;
if (players_id)
{
// Fill players_id with players_num participants
}
else
{
// Return number of participants
*players_num = 0;
}
return CELL_OK;
}
error_code cellSysutilAvcGetAttribute(CellSysUtilAvcAttribute attr_id, vm::pptr<void> param)
{
cellSysutil.todo("cellSysutilAvcGetAttribute(attr_id=0x%x, param=*0x%x)", +attr_id, param);
if (!param)
return CELL_AVC_ERROR_INVALID_ARGUMENT;
return CELL_OK;
}
error_code cellSysutilAvcGetLayoutMode(vm::ptr<CellSysutilAvcLayoutMode> layout)
{
cellSysutil.todo("cellSysutilAvcGetLayoutMode(layout=*0x%x)", layout);
if (!layout)
return CELL_AVC_ERROR_INVALID_ARGUMENT;
return CELL_OK;
}
error_code cellSysutilAvcGetShowStatus(vm::ptr<b8> is_visible)
{
cellSysutil.todo("cellSysutilAvcGetShowStatus(is_visible=*0x%x)", is_visible);
if (!is_visible)
return CELL_AVC_ERROR_INVALID_ARGUMENT;
return CELL_OK;
}
error_code cellSysutilAvcGetSpeakerVolumeLevel(vm::ptr<s32> volumeLevel)
{
cellSysutil.todo("cellSysutilAvcGetSpeakerVolumeLevel(volumeLevel=*0x%x)", volumeLevel);
if (!volumeLevel)
return CELL_AVC_ERROR_INVALID_ARGUMENT;
return CELL_OK;
}
error_code cellSysutilAvcGetVideoMuting(vm::ptr<b8> is_muting)
{
cellSysutil.todo("cellSysutilAvcGetVideoMuting(is_muting=*0x%x)", is_muting);
if (!is_muting)
return CELL_AVC_ERROR_INVALID_ARGUMENT;
return CELL_OK;
}
error_code cellSysutilAvcGetVoiceMuting(vm::ptr<b8> is_muting)
{
cellSysutil.todo("cellSysutilAvcGetVoiceMuting(is_muting=*0x%x)", is_muting);
if (!is_muting)
return CELL_AVC_ERROR_INVALID_ARGUMENT;
return CELL_OK;
}
error_code cellSysutilAvcHidePanel()
{
cellSysutil.todo("cellSysutilAvcHidePanel()");
return CELL_OK;
}
error_code cellSysutilAvcJoinRequest(u32 ctx_id, vm::cptr<SceNpRoomId> room_id, vm::ptr<CellSysutilAvcRequestId> request_id)
{
cellSysutil.todo("cellSysutilAvcJoinRequest(ctx_id=*0x%x, room_id=*0x%x, request_id=*0x%x)", ctx_id, room_id, request_id);
if (!room_id || !request_id)
return CELL_AVC_ERROR_INVALID_ARGUMENT;
auto& settings = g_fxo->get<avc_settings>();
const CellSysutilAvcRequestId req_id = settings.req_id_cnt.fetch_add(1);
*request_id = req_id;
settings.register_cb_call(req_id, CELL_AVC_EVENT_JOIN_SUCCEEDED, static_cast<CellSysUtilAvcEventParam>(0));
return CELL_OK;
}
error_code cellSysutilAvcLoadAsync(vm::ptr<CellSysutilAvcCallback> func, vm::ptr<void> userdata, sys_memory_container_t container,
CellSysUtilAvcMediaType media, CellSysUtilAvcVideoQuality videoQuality, CellSysUtilAvcVoiceQuality voiceQuality, vm::ptr<CellSysutilAvcRequestId> request_id)
{
cellSysutil.todo("cellSysutilAvcLoadAsync(func=*0x%x, userdata=*0x%x, container=0x%x, media=0x%x, videoQuality=0x%x, voiceQuality=0x%x, request_id=*0x%x)",
func, userdata, container, +media, +videoQuality, +voiceQuality, request_id);
//if (sys_memory_container_get_size(.., container) != CELL_OK)
// return CELL_AVC_ERROR_INVALID_ARGUMENT;
switch (media)
{
case CELL_SYSUTIL_AVC_VOICE_CHAT:
case CELL_SYSUTIL_AVC_VIDEO_CHAT:
// TODO: return CELL_AVC_ERROR_OUT_OF_MEMORY
// TODO: return CELL_AVC_ERROR_INVALID_ARGUMENT
break;
default:
return CELL_AVC_ERROR_INVALID_ARGUMENT;
}
if (!func || !request_id)
return CELL_AVC_ERROR_INVALID_ARGUMENT;
if (videoQuality != CELL_SYSUTIL_AVC_VIDEO_QUALITY_DEFAULT || voiceQuality != CELL_SYSUTIL_AVC_VOICE_QUALITY_DEFAULT)
return CELL_AVC_ERROR_INVALID_ARGUMENT;
auto& settings = g_fxo->get<avc_settings>();
{
std::lock_guard lock(settings.mutex_cb);
if (settings.avc_cb)
return CELL_AVC_ERROR_ALREADY_INITIALIZED;
settings.avc_cb = func;
settings.avc_cb_arg = userdata;
}
const CellSysutilAvcRequestId req_id = settings.req_id_cnt.fetch_add(1);
*request_id = req_id;
settings.register_cb_call(req_id, CELL_AVC_EVENT_LOAD_SUCCEEDED, static_cast<CellSysUtilAvcEventParam>(0));
return CELL_OK;
}
error_code cellSysutilAvcSetAttribute(CellSysUtilAvcAttribute attr_id, vm::ptr<void> param)
{
cellSysutil.todo("cellSysutilAvcSetAttribute(attr_id=0x%x, param=*0x%x)", +attr_id, param);
if (!param)
return CELL_AVC_ERROR_INVALID_ARGUMENT;
return CELL_OK;
}
error_code cellSysutilAvcSetLayoutMode(CellSysutilAvcLayoutMode layout)
{
cellSysutil.todo("cellSysutilAvcSetLayoutMode(layout=0x%x)", +layout);
if (layout > CELL_SYSUTIL_AVC_LAYOUT_BOTTOM)
return CELL_AVC_ERROR_INVALID_ARGUMENT;
return CELL_OK;
}
error_code cellSysutilAvcSetSpeakerVolumeLevel(s32 volumeLevel)
{
cellSysutil.todo("cellSysutilAvcSetSpeakerVolumeLevel(volumeLevel=%d)", volumeLevel);
if (volumeLevel < 0 || volumeLevel > 10)
return CELL_AVC_ERROR_INVALID_ARGUMENT;
return CELL_OK;
}
error_code cellSysutilAvcSetVideoMuting(b8 is_muting)
{
cellSysutil.todo("cellSysutilAvcSetVideoMuting(is_muting=%d)", is_muting);
return CELL_OK;
}
error_code cellSysutilAvcSetVoiceMuting(b8 is_muting)
{
cellSysutil.todo("cellSysutilAvcSetVoiceMuting(is_muting=%d)", is_muting);
return CELL_OK;
}
error_code cellSysutilAvcShowPanel()
{
cellSysutil.todo("cellSysutilAvcShowPanel()");
return CELL_OK;
}
error_code cellSysutilAvcUnloadAsync(vm::ptr<CellSysutilAvcRequestId> request_id)
{
cellSysutil.todo("cellSysutilAvcUnloadAsync(request_id=*0x%x)", request_id);
if (!request_id)
return CELL_AVC_ERROR_INVALID_ARGUMENT;
auto& settings = g_fxo->get<avc_settings>();
const CellSysutilAvcRequestId req_id = settings.req_id_cnt.fetch_add(1);
*request_id = req_id;
settings.register_cb_call(req_id, CELL_AVC_EVENT_UNLOAD_SUCCEEDED, static_cast<CellSysUtilAvcEventParam>(0));
return CELL_OK;
}
void cellSysutil_SysutilAvc_init()
{
REG_FUNC(cellSysutil, cellSysutilAvcByeRequest);
REG_FUNC(cellSysutil, cellSysutilAvcCancelByeRequest);
REG_FUNC(cellSysutil, cellSysutilAvcCancelJoinRequest);
REG_FUNC(cellSysutil, cellSysutilAvcEnumPlayers);
REG_FUNC(cellSysutil, cellSysutilAvcGetAttribute);
REG_FUNC(cellSysutil, cellSysutilAvcGetLayoutMode);
REG_FUNC(cellSysutil, cellSysutilAvcGetShowStatus);
REG_FUNC(cellSysutil, cellSysutilAvcGetSpeakerVolumeLevel);
REG_FUNC(cellSysutil, cellSysutilAvcGetVideoMuting);
REG_FUNC(cellSysutil, cellSysutilAvcGetVoiceMuting);
REG_FUNC(cellSysutil, cellSysutilAvcHidePanel);
REG_FUNC(cellSysutil, cellSysutilAvcJoinRequest);
REG_FUNC(cellSysutil, cellSysutilAvcLoadAsync);
REG_FUNC(cellSysutil, cellSysutilAvcSetAttribute);
REG_FUNC(cellSysutil, cellSysutilAvcSetLayoutMode);
REG_FUNC(cellSysutil, cellSysutilAvcSetSpeakerVolumeLevel);
REG_FUNC(cellSysutil, cellSysutilAvcSetVideoMuting);
REG_FUNC(cellSysutil, cellSysutilAvcSetVoiceMuting);
REG_FUNC(cellSysutil, cellSysutilAvcShowPanel);
REG_FUNC(cellSysutil, cellSysutilAvcUnloadAsync);
}
| 10,428
|
C++
|
.cpp
| 289
| 33.557093
| 158
| 0.771855
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,309
|
cellSpursJq.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellSpursJq.cpp
|
#include "stdafx.h"
#include "Emu/Cell/PPUModule.h"
#include "Emu/Cell/lv2/sys_spu.h"
#include "cellSpursJq.h"
LOG_CHANNEL(cellSpursJq);
error_code cellSpursJobQueueAttributeInitialize()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueueAttributeSetMaxGrab()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueueAttributeSetSubmitWithEntryLock()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueueAttributeSetDoBusyWaiting()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueueAttributeSetIsHaltOnError()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueueAttributeSetIsJobTypeMemoryCheck()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueueAttributeSetMaxSizeJobDescriptor()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueueAttributeSetGrabParameters()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueueSetWaitingMode()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursShutdownJobQueue()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code _cellSpursCreateJobQueueWithJobDescriptorPool()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code _cellSpursCreateJobQueue()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJoinJobQueue()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code _cellSpursJobQueuePushJobListBody()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code _cellSpursJobQueuePushJobBody2()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code _cellSpursJobQueuePushJob2Body()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code _cellSpursJobQueuePushAndReleaseJobBody()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code _cellSpursJobQueuePushJobBody()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code _cellSpursJobQueuePushBody()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code _cellSpursJobQueueAllocateJobDescriptorBody()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code _cellSpursJobQueuePushSync()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code _cellSpursJobQueuePushFlush()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueueGetSpurs()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueueGetHandleCount()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueueGetError()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueueGetMaxSizeJobDescriptor()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursGetJobQueueId()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueueGetSuspendedJobSize()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueueClose()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueueOpen()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueueSemaphoreTryAcquire()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueueSemaphoreAcquire()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueueSemaphoreInitialize()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueueSendSignal()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueuePortGetJobQueue()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code _cellSpursJobQueuePortPushSync()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code _cellSpursJobQueuePortPushFlush()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code _cellSpursJobQueuePortPushJobListBody()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code _cellSpursJobQueuePortPushJobBody()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code _cellSpursJobQueuePortPushJobBody2()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code _cellSpursJobQueuePortPushBody()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueuePortTrySync()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueuePortSync()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueuePortInitialize()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueuePortInitializeWithDescriptorBuffer()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueuePortFinalize()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code _cellSpursJobQueuePortCopyPushJobBody()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code _cellSpursJobQueuePortCopyPushJobBody2()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code _cellSpursJobQueuePortCopyPushBody()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueuePort2GetJobQueue()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueuePort2PushSync()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueuePort2PushFlush()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code _cellSpursJobQueuePort2PushJobListBody()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueuePort2Sync()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueuePort2Create()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueuePort2Destroy()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueuePort2AllocateJobDescriptor()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code _cellSpursJobQueuePort2PushAndReleaseJobBody()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code _cellSpursJobQueuePort2CopyPushJobBody()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code _cellSpursJobQueuePort2PushJobBody()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueueSetExceptionEventHandler()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueueSetExceptionEventHandler2()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
error_code cellSpursJobQueueUnsetExceptionEventHandler()
{
UNIMPLEMENTED_FUNC(cellSpursJq);
return CELL_OK;
}
DECLARE(ppu_module_manager::cellSpursJq)("cellSpursJq", []()
{
REG_FUNC(cellSpursJq, cellSpursJobQueueAttributeInitialize);
REG_FUNC(cellSpursJq, cellSpursJobQueueAttributeSetMaxGrab);
REG_FUNC(cellSpursJq, cellSpursJobQueueAttributeSetSubmitWithEntryLock);
REG_FUNC(cellSpursJq, cellSpursJobQueueAttributeSetDoBusyWaiting);
REG_FUNC(cellSpursJq, cellSpursJobQueueAttributeSetIsHaltOnError);
REG_FUNC(cellSpursJq, cellSpursJobQueueAttributeSetIsJobTypeMemoryCheck);
REG_FUNC(cellSpursJq, cellSpursJobQueueAttributeSetMaxSizeJobDescriptor);
REG_FUNC(cellSpursJq, cellSpursJobQueueAttributeSetGrabParameters);
REG_FUNC(cellSpursJq, cellSpursJobQueueSetWaitingMode);
REG_FUNC(cellSpursJq, cellSpursShutdownJobQueue);
REG_FUNC(cellSpursJq, _cellSpursCreateJobQueueWithJobDescriptorPool);
REG_FUNC(cellSpursJq, _cellSpursCreateJobQueue);
REG_FUNC(cellSpursJq, cellSpursJoinJobQueue);
REG_FUNC(cellSpursJq, _cellSpursJobQueuePushJobListBody);
REG_FUNC(cellSpursJq, _cellSpursJobQueuePushJobBody2);
REG_FUNC(cellSpursJq, _cellSpursJobQueuePushJob2Body);
REG_FUNC(cellSpursJq, _cellSpursJobQueuePushAndReleaseJobBody);
REG_FUNC(cellSpursJq, _cellSpursJobQueuePushJobBody);
REG_FUNC(cellSpursJq, _cellSpursJobQueuePushBody);
REG_FUNC(cellSpursJq, _cellSpursJobQueueAllocateJobDescriptorBody);
REG_FUNC(cellSpursJq, _cellSpursJobQueuePushSync);
REG_FUNC(cellSpursJq, _cellSpursJobQueuePushFlush);
REG_FUNC(cellSpursJq, cellSpursJobQueueGetSpurs);
REG_FUNC(cellSpursJq, cellSpursJobQueueGetHandleCount);
REG_FUNC(cellSpursJq, cellSpursJobQueueGetError);
REG_FUNC(cellSpursJq, cellSpursJobQueueGetMaxSizeJobDescriptor);
REG_FUNC(cellSpursJq, cellSpursGetJobQueueId);
REG_FUNC(cellSpursJq, cellSpursJobQueueGetSuspendedJobSize);
REG_FUNC(cellSpursJq, cellSpursJobQueueClose);
REG_FUNC(cellSpursJq, cellSpursJobQueueOpen);
REG_FUNC(cellSpursJq, cellSpursJobQueueSemaphoreTryAcquire);
REG_FUNC(cellSpursJq, cellSpursJobQueueSemaphoreAcquire);
REG_FUNC(cellSpursJq, cellSpursJobQueueSemaphoreInitialize);
REG_FUNC(cellSpursJq, cellSpursJobQueueSendSignal);
REG_FUNC(cellSpursJq, cellSpursJobQueuePortGetJobQueue);
REG_FUNC(cellSpursJq, _cellSpursJobQueuePortPushSync);
REG_FUNC(cellSpursJq, _cellSpursJobQueuePortPushFlush);
REG_FUNC(cellSpursJq, _cellSpursJobQueuePortPushJobListBody);
REG_FUNC(cellSpursJq, _cellSpursJobQueuePortPushJobBody);
REG_FUNC(cellSpursJq, _cellSpursJobQueuePortPushJobBody2);
REG_FUNC(cellSpursJq, _cellSpursJobQueuePortPushBody);
REG_FUNC(cellSpursJq, cellSpursJobQueuePortTrySync);
REG_FUNC(cellSpursJq, cellSpursJobQueuePortSync);
REG_FUNC(cellSpursJq, cellSpursJobQueuePortInitialize);
REG_FUNC(cellSpursJq, cellSpursJobQueuePortInitializeWithDescriptorBuffer);
REG_FUNC(cellSpursJq, cellSpursJobQueuePortFinalize);
REG_FUNC(cellSpursJq, _cellSpursJobQueuePortCopyPushJobBody);
REG_FUNC(cellSpursJq, _cellSpursJobQueuePortCopyPushJobBody2);
REG_FUNC(cellSpursJq, _cellSpursJobQueuePortCopyPushBody);
REG_FUNC(cellSpursJq, cellSpursJobQueuePort2GetJobQueue);
REG_FUNC(cellSpursJq, cellSpursJobQueuePort2PushSync);
REG_FUNC(cellSpursJq, cellSpursJobQueuePort2PushFlush);
REG_FUNC(cellSpursJq, _cellSpursJobQueuePort2PushJobListBody);
REG_FUNC(cellSpursJq, cellSpursJobQueuePort2Sync);
REG_FUNC(cellSpursJq, cellSpursJobQueuePort2Create);
REG_FUNC(cellSpursJq, cellSpursJobQueuePort2Destroy);
REG_FUNC(cellSpursJq, cellSpursJobQueuePort2AllocateJobDescriptor);
REG_FUNC(cellSpursJq, _cellSpursJobQueuePort2PushAndReleaseJobBody);
REG_FUNC(cellSpursJq, _cellSpursJobQueuePort2CopyPushJobBody);
REG_FUNC(cellSpursJq, _cellSpursJobQueuePort2PushJobBody);
REG_FUNC(cellSpursJq, cellSpursJobQueueSetExceptionEventHandler);
REG_FUNC(cellSpursJq, cellSpursJobQueueSetExceptionEventHandler2);
REG_FUNC(cellSpursJq, cellSpursJobQueueUnsetExceptionEventHandler);
});
| 10,511
|
C++
|
.cpp
| 386
| 25.569948
| 76
| 0.85257
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,310
|
sys_mmapper_.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/sys_mmapper_.cpp
|
#include "stdafx.h"
#include "Emu/Cell/PPUModule.h"
#include "Emu/Cell/lv2/sys_mmapper.h"
LOG_CHANNEL(sysPrxForUser);
error_code sys_mmapper_allocate_memory(ppu_thread& ppu, u32 size, u64 flags, vm::ptr<u32> mem_id)
{
sysPrxForUser.notice("sys_mmapper_allocate_memory(size=0x%x, flags=0x%llx, mem_id=*0x%x)", size, flags, mem_id);
return sys_mmapper_allocate_shared_memory(ppu, SYS_MMAPPER_NO_SHM_KEY, size, flags, mem_id);
}
error_code sys_mmapper_allocate_memory_from_container(ppu_thread& ppu, u32 size, u32 cid, u64 flags, vm::ptr<u32> mem_id)
{
sysPrxForUser.notice("sys_mmapper_allocate_memory_from_container(size=0x%x, cid=0x%x, flags=0x%llx, mem_id=*0x%x)", size, cid, flags, mem_id);
return sys_mmapper_allocate_shared_memory_from_container(ppu, SYS_MMAPPER_NO_SHM_KEY, size, cid, flags, mem_id);
}
error_code sys_mmapper_map_memory(ppu_thread& ppu, u32 addr, u32 mem_id, u64 flags)
{
sysPrxForUser.notice("sys_mmapper_map_memory(addr=0x%x, mem_id=0x%x, flags=0x%llx)", addr, mem_id, flags);
return sys_mmapper_map_shared_memory(ppu, addr, mem_id, flags);
}
error_code sys_mmapper_unmap_memory(ppu_thread& ppu, u32 addr, vm::ptr<u32> mem_id)
{
sysPrxForUser.notice("sys_mmapper_unmap_memory(addr=0x%x, mem_id=*0x%x)", addr, mem_id);
return sys_mmapper_unmap_shared_memory(ppu, addr, mem_id);
}
error_code sys_mmapper_free_memory(ppu_thread& ppu, u32 mem_id)
{
sysPrxForUser.notice("sys_mmapper_free_memory(mem_id=0x%x)", mem_id);
return sys_mmapper_free_shared_memory(ppu, mem_id);
}
extern void sysPrxForUser_sys_mmapper_init()
{
REG_FUNC(sysPrxForUser, sys_mmapper_allocate_memory);
REG_FUNC(sysPrxForUser, sys_mmapper_allocate_memory_from_container);
REG_FUNC(sysPrxForUser, sys_mmapper_map_memory);
REG_FUNC(sysPrxForUser, sys_mmapper_unmap_memory);
REG_FUNC(sysPrxForUser, sys_mmapper_free_memory);
}
| 1,842
|
C++
|
.cpp
| 37
| 48.054054
| 143
| 0.756274
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,311
|
sceNpSns.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/sceNpSns.cpp
|
#include "stdafx.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/PPUModule.h"
#include "sceNpSns.h"
#include "sceNp.h"
#include "Emu/NP/np_handler.h"
LOG_CHANNEL(sceNpSns);
template<>
void fmt_class_string<sceNpSnsError>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto error)
{
switch (error)
{
STR_CASE(SCE_NP_SNS_ERROR_UNKNOWN);
STR_CASE(SCE_NP_SNS_ERROR_NOT_SIGN_IN);
STR_CASE(SCE_NP_SNS_ERROR_INVALID_ARGUMENT);
STR_CASE(SCE_NP_SNS_ERROR_OUT_OF_MEMORY);
STR_CASE(SCE_NP_SNS_ERROR_SHUTDOWN);
STR_CASE(SCE_NP_SNS_ERROR_BUSY);
STR_CASE(SCE_NP_SNS_FB_ERROR_ALREADY_INITIALIZED);
STR_CASE(SCE_NP_SNS_FB_ERROR_NOT_INITIALIZED);
STR_CASE(SCE_NP_SNS_FB_ERROR_EXCEEDS_MAX);
STR_CASE(SCE_NP_SNS_FB_ERROR_UNKNOWN_HANDLE);
STR_CASE(SCE_NP_SNS_FB_ERROR_ABORTED);
STR_CASE(SCE_NP_SNS_FB_ERROR_ALREADY_ABORTED);
STR_CASE(SCE_NP_SNS_FB_ERROR_CONFIG_DISABLED);
STR_CASE(SCE_NP_SNS_FB_ERROR_FBSERVER_ERROR_RESPONSE);
STR_CASE(SCE_NP_SNS_FB_ERROR_THROTTLE_CLOSED);
STR_CASE(SCE_NP_SNS_FB_ERROR_OPERATION_INTERVAL_VIOLATION);
STR_CASE(SCE_NP_SNS_FB_ERROR_UNLOADED_THROTTLE);
STR_CASE(SCE_NP_SNS_FB_ERROR_ACCESS_NOT_ALLOWED);
}
return unknown;
});
}
error_code sceNpSnsFbInit(vm::cptr<SceNpSnsFbInitParams> params)
{
sceNpSns.todo("sceNpSnsFbInit(params=*0x%x)", params);
auto& manager = g_fxo->get<sce_np_sns_manager>();
if (manager.is_initialized)
{
return SCE_NP_SNS_FB_ERROR_ALREADY_INITIALIZED;
}
if (!params)
{
return SCE_NP_SNS_ERROR_INVALID_ARGUMENT;
}
// TODO: Use the initialization parameters somewhere
manager.is_initialized = true;
return CELL_OK;
}
error_code sceNpSnsFbTerm()
{
sceNpSns.warning("sceNpSnsFbTerm()");
auto& manager = g_fxo->get<sce_np_sns_manager>();
if (!manager.is_initialized)
{
return SCE_NP_SNS_FB_ERROR_NOT_INITIALIZED;
}
manager.is_initialized = false;
return CELL_OK;
}
error_code sceNpSnsFbCreateHandle(vm::ptr<u32> handle)
{
sceNpSns.warning("sceNpSnsFbCreateHandle(handle=*0x%x)", handle);
if (!g_fxo->get<sce_np_sns_manager>().is_initialized)
{
return SCE_NP_SNS_FB_ERROR_NOT_INITIALIZED;
}
if (!handle)
{
return SCE_NP_SNS_ERROR_INVALID_ARGUMENT;
}
// TODO: is handle set here or after the next check ?
*handle = idm::make<sns_fb_handle_t>();
if (*handle == SCE_NP_SNS_FB_INVALID_HANDLE) // id_count > SCE_NP_SNS_FB_HANDLE_SLOT_MAX
{
return SCE_NP_SNS_FB_ERROR_EXCEEDS_MAX;
}
return CELL_OK;
}
error_code sceNpSnsFbDestroyHandle(u32 handle)
{
sceNpSns.warning("sceNpSnsFbDestroyHandle(handle=%d)", handle);
if (!g_fxo->get<sce_np_sns_manager>().is_initialized)
{
return SCE_NP_SNS_FB_ERROR_NOT_INITIALIZED;
}
if (handle == SCE_NP_SNS_FB_INVALID_HANDLE || handle > SCE_NP_SNS_FB_HANDLE_SLOT_MAX)
{
return SCE_NP_SNS_ERROR_INVALID_ARGUMENT;
}
if (!idm::remove<sns_fb_handle_t>(handle))
{
return SCE_NP_SNS_FB_ERROR_UNKNOWN_HANDLE;
}
return CELL_OK;
}
error_code sceNpSnsFbAbortHandle(u32 handle)
{
sceNpSns.todo("sceNpSnsFbAbortHandle(handle=%d)", handle);
if (!g_fxo->get<sce_np_sns_manager>().is_initialized)
{
return SCE_NP_SNS_FB_ERROR_NOT_INITIALIZED;
}
if (handle == SCE_NP_SNS_FB_INVALID_HANDLE || handle > SCE_NP_SNS_FB_HANDLE_SLOT_MAX)
{
return SCE_NP_SNS_ERROR_INVALID_ARGUMENT;
}
const auto sfh = idm::get<sns_fb_handle_t>(handle);
if (!sfh)
{
return SCE_NP_SNS_FB_ERROR_UNKNOWN_HANDLE;
}
// TODO
return CELL_OK;
}
error_code sceNpSnsFbGetAccessToken(u32 handle, vm::cptr<SceNpSnsFbAccessTokenParam> param, vm::ptr<SceNpSnsFbAccessTokenResult> result)
{
sceNpSns.todo("sceNpSnsFbGetAccessToken(handle=%d, param=*0x%x, result=*0x%x)", handle, param, result);
if (!param || !result || !param->fb_app_id)
{
return SCE_NP_SNS_ERROR_INVALID_ARGUMENT;
}
if (!g_fxo->get<sce_np_sns_manager>().is_initialized)
{
return SCE_NP_SNS_FB_ERROR_NOT_INITIALIZED;
}
// TODO: test the following checks
if (handle == SCE_NP_SNS_FB_INVALID_HANDLE || handle > SCE_NP_SNS_FB_HANDLE_SLOT_MAX)
{
return SCE_NP_SNS_ERROR_INVALID_ARGUMENT;
}
const auto sfh = idm::get<sns_fb_handle_t>(handle);
if (!sfh)
{
return SCE_NP_SNS_FB_ERROR_UNKNOWN_HANDLE;
}
auto& nph = g_fxo->get<named_thread<np::np_handler>>();
if (nph.get_psn_status() == SCE_NP_MANAGER_STATUS_OFFLINE)
{
return not_an_error(SCE_NP_SNS_ERROR_NOT_SIGN_IN);
}
// TODO
return CELL_OK;
}
s32 sceNpSnsFbStreamPublish(u32 handle) // add more arguments
{
sceNpSns.todo("sceNpSnsFbStreamPublish(handle=%d, ...)", handle);
if (handle == SCE_NP_SNS_FB_INVALID_HANDLE || handle > SCE_NP_SNS_FB_HANDLE_SLOT_MAX)
{
return SCE_NP_SNS_ERROR_INVALID_ARGUMENT;
}
const auto sfh = idm::get<sns_fb_handle_t>(handle);
if (!sfh)
{
return SCE_NP_SNS_FB_ERROR_UNKNOWN_HANDLE;
}
//if (canceled)
//{
// return CELL_ECANCELED;
//}
//if (aborted)
//{
// return SCE_NP_SNS_FB_ERROR_ABORTED;
//}
return CELL_OK;
}
s32 sceNpSnsFbCheckThrottle(vm::ptr<void> arg0)
{
sceNpSns.todo("sceNpSnsFbCheckThrottle(arg0=*0x%x)", arg0);
if (!arg0)
{
return SCE_NP_SNS_ERROR_INVALID_ARGUMENT;
}
if (!g_fxo->get<sce_np_sns_manager>().is_initialized)
{
return SCE_NP_SNS_FB_ERROR_NOT_INITIALIZED;
}
return CELL_OK;
}
s32 sceNpSnsFbCheckConfig(vm::ptr<void> arg0)
{
sceNpSns.todo("sceNpSnsFbCheckConfig(arg0=*0x%x)", arg0);
if (!g_fxo->get<sce_np_sns_manager>().is_initialized)
{
return SCE_NP_SNS_FB_ERROR_NOT_INITIALIZED;
}
return CELL_OK;
}
s32 sceNpSnsFbLoadThrottle(u32 handle)
{
sceNpSns.todo("sceNpSnsFbLoadThrottle(handle=%d)", handle);
if (handle == SCE_NP_SNS_FB_INVALID_HANDLE || handle > SCE_NP_SNS_FB_HANDLE_SLOT_MAX)
{
return SCE_NP_SNS_ERROR_INVALID_ARGUMENT;
}
const auto sfh = idm::get<sns_fb_handle_t>(handle);
if (!sfh)
{
return SCE_NP_SNS_FB_ERROR_UNKNOWN_HANDLE;
}
//if (canceled)
//{
// return CELL_ECANCELED;
//}
//if (aborted)
//{
// return SCE_NP_SNS_FB_ERROR_ABORTED;
//}
return CELL_OK;
}
error_code sceNpSnsFbGetLongAccessToken(u32 handle, vm::cptr<SceNpSnsFbAccessTokenParam> param, vm::ptr<SceNpSnsFbLongAccessTokenResult> result)
{
sceNpSns.todo("sceNpSnsFbGetLongAccessToken(handle=%d, param=*0x%x, result=*0x%x)", handle, param, result);
if (!param || !result || !param->fb_app_id)
{
return SCE_NP_SNS_ERROR_INVALID_ARGUMENT;
}
if (!g_fxo->get<sce_np_sns_manager>().is_initialized)
{
return SCE_NP_SNS_FB_ERROR_NOT_INITIALIZED;
}
// TODO: test the following checks
if (handle == SCE_NP_SNS_FB_INVALID_HANDLE || handle > SCE_NP_SNS_FB_HANDLE_SLOT_MAX)
{
return SCE_NP_SNS_ERROR_INVALID_ARGUMENT;
}
const auto sfh = idm::get<sns_fb_handle_t>(handle);
if (!sfh)
{
return SCE_NP_SNS_FB_ERROR_UNKNOWN_HANDLE;
}
auto& nph = g_fxo->get<named_thread<np::np_handler>>();
if (nph.get_psn_status() == SCE_NP_MANAGER_STATUS_OFFLINE)
{
return not_an_error(SCE_NP_SNS_ERROR_NOT_SIGN_IN);
}
return CELL_OK;
}
DECLARE(ppu_module_manager::sceNpSns)("sceNpSns", []()
{
REG_FUNC(sceNpSns, sceNpSnsFbInit);
REG_FUNC(sceNpSns, sceNpSnsFbTerm);
REG_FUNC(sceNpSns, sceNpSnsFbCreateHandle);
REG_FUNC(sceNpSns, sceNpSnsFbDestroyHandle);
REG_FUNC(sceNpSns, sceNpSnsFbAbortHandle);
REG_FUNC(sceNpSns, sceNpSnsFbGetAccessToken);
REG_FUNC(sceNpSns, sceNpSnsFbGetLongAccessToken);
REG_FUNC(sceNpSns, sceNpSnsFbStreamPublish);
REG_FUNC(sceNpSns, sceNpSnsFbCheckThrottle);
REG_FUNC(sceNpSns, sceNpSnsFbCheckConfig);
REG_FUNC(sceNpSns, sceNpSnsFbLoadThrottle);
});
| 7,467
|
C++
|
.cpp
| 255
| 26.890196
| 144
| 0.729184
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,312
|
cellMusicSelectionContext.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellMusicSelectionContext.cpp
|
#include "stdafx.h"
#include "cellMusic.h"
#include "util/yaml.hpp"
#include "Emu/VFS.h"
#include <random>
// This is just a helper and not a real cell entity
LOG_CHANNEL(cellMusicSelectionContext);
bool music_selection_context::set(const CellMusicSelectionContext& in)
{
if (memcmp(in.data, magic, sizeof(magic)) != 0)
{
return false;
}
u32 pos = sizeof(magic);
hash = &in.data[pos];
return load_playlist();
}
CellMusicSelectionContext music_selection_context::get() const
{
if (hash.size() + sizeof(magic) > CELL_MUSIC_SELECTION_CONTEXT_SIZE)
{
fmt::throw_exception("Contents of music_selection_context are too large");
}
CellMusicSelectionContext out{};
u32 pos = 0;
std::memset(out.data, 0, CELL_MUSIC_SELECTION_CONTEXT_SIZE);
std::memcpy(out.data, magic, sizeof(magic));
pos += sizeof(magic);
std::memcpy(&out.data[pos], hash.c_str(), hash.size());
return out;
}
std::string music_selection_context::to_string() const
{
std::string str = fmt::format(".magic='%s', .content_type=%d, .repeat_mode=%d, .context_option=%d, .first_track=%d, .tracks=%d, .hash='%s', .playlist:",
magic, static_cast<u32>(content_type), static_cast<u32>(repeat_mode), static_cast<u32>(context_option), first_track, playlist.size(), hash);
for (usz i = 0; i < playlist.size(); i++)
{
fmt::append(str, "\n - Track %d: %s", i, ::at32(playlist, i));
}
return str;
}
std::string music_selection_context::get_next_hash()
{
static u64 hash_counter = 0;
return fmt::format("music_selection_context_%d", hash_counter++);
}
std::string music_selection_context::context_to_hex(const CellMusicSelectionContext& context)
{
std::string dahex;
for (usz i = 0; i < CELL_MUSIC_SELECTION_CONTEXT_SIZE; i++)
{
fmt::append(dahex, " %.2x", context.data[i]);
}
return dahex;
}
std::string music_selection_context::get_yaml_path() const
{
std::string path = fs::get_cache_dir() + "cache/playlists/";
if (!fs::create_path(path))
{
cellMusicSelectionContext.fatal("Failed to create path: %s (%s)", path, fs::g_tls_error);
}
return path + hash + ".yml";
}
void music_selection_context::set_playlist(const std::string& path)
{
playlist.clear();
const std::string dir_path = "/dev_hdd0/music";
const std::string vfs_dir_path = vfs::get("/dev_hdd0/music");
if (fs::is_dir(path))
{
content_type = CELL_SEARCH_CONTENTTYPE_MUSICLIST;
for (auto&& dir_entry : fs::dir{path})
{
if (dir_entry.name == "." || dir_entry.name == "..")
{
continue;
}
playlist.push_back(dir_path + std::string(path + "/" + dir_entry.name).substr(vfs_dir_path.length()));
}
}
else
{
content_type = CELL_SEARCH_CONTENTTYPE_MUSIC;
playlist.push_back(dir_path + path.substr(vfs_dir_path.length()));
}
}
void music_selection_context::create_playlist(const std::string& new_hash)
{
hash = new_hash;
const std::string yaml_path = get_yaml_path();
cellMusicSelectionContext.notice("Saving music playlist file %s", yaml_path);
YAML::Emitter out;
out << YAML::BeginMap;
out << "Version" << target_version;
out << "FileType" << target_file_type;
out << "ContentType" << content_type;
out << "ContextOption" << context_option;
out << "RepeatMode" << repeat_mode;
out << "FirstTrack" << first_track;
out << "Tracks" << YAML::BeginSeq;
for (const std::string& track : playlist)
{
out << track;
}
out << YAML::EndSeq;
out << YAML::EndMap;
fs::pending_file file(yaml_path);
if (!file.file || (file.file.write(out.c_str(), out.size()), !file.commit()))
{
cellMusicSelectionContext.error("Failed to create music playlist file %s (error=%s)", yaml_path, fs::g_tls_error);
}
}
bool music_selection_context::load_playlist()
{
playlist.clear();
const std::string path = get_yaml_path();
cellMusicSelectionContext.notice("Loading music playlist file %s", path);
std::string content;
{
// Load patch file
fs::file file{path};
if (!file)
{
cellMusicSelectionContext.error("Failed to load music playlist file %s: %s", path, fs::g_tls_error);
return false;
}
content = file.to_string();
}
auto [root, error] = yaml_load(content);
if (!error.empty() || !root)
{
cellMusicSelectionContext.error("Failed to load music playlist file %s:\n%s", path, error);
return false;
}
std::string err;
const std::string version = get_yaml_node_value<std::string>(root["Version"], err);
if (!err.empty())
{
cellMusicSelectionContext.error("No Version entry found. Error: '%s' (file: %s)", err, path);
return false;
}
if (version != target_version)
{
cellMusicSelectionContext.error("Version '%s' does not match music playlist target '%s' (file: %s)", version, target_version, path);
return false;
}
const std::string file_type = get_yaml_node_value<std::string>(root["FileType"], err);
if (!err.empty())
{
cellMusicSelectionContext.error("No FileType entry found. Error: '%s' (file: %s)", err, path);
return false;
}
if (file_type != target_file_type)
{
cellMusicSelectionContext.error("FileType '%s' does not match music playlist target '%s' (file: %s)", file_type, target_file_type, path);
return false;
}
content_type = static_cast<CellSearchContentType>(get_yaml_node_value<u32>(root["ContentType"], err));
if (!err.empty())
{
cellMusicSelectionContext.error("No ContentType entry found. Error: '%s' (file: %s)", err, path);
return false;
}
context_option = static_cast<CellSearchContextOption>(get_yaml_node_value<u32>(root["ContextOption"], err));
if (!err.empty())
{
cellMusicSelectionContext.error("No ContextOption entry found. Error: '%s' (file: %s)", err, path);
return false;
}
repeat_mode = static_cast<CellSearchRepeatMode>(get_yaml_node_value<u32>(root["RepeatMode"], err));
if (!err.empty())
{
cellMusicSelectionContext.error("No RepeatMode entry found. Error: '%s' (file: %s)", err, path);
return false;
}
first_track = get_yaml_node_value<u32>(root["FirstTrack"], err);
if (!err.empty())
{
cellMusicSelectionContext.error("No FirstTrack entry found. Error: '%s' (file: %s)", err, path);
return false;
}
const YAML::Node& track_node = root["Tracks"];
if (!track_node || track_node.Type() != YAML::NodeType::Sequence)
{
cellMusicSelectionContext.error("No Tracks entry found or Tracks is not a Sequence. (file: %s)", path);
return false;
}
for (usz i = 0; i < track_node.size(); i++)
{
playlist.push_back(track_node[i].Scalar());
}
valid = true;
return true;
}
u32 music_selection_context::step_track(bool next)
{
if (playlist.empty())
{
cellMusicSelectionContext.error("No tracks to play...");
current_track = umax;
return umax;
}
switch (repeat_mode)
{
case CELL_SEARCH_REPEATMODE_NONE:
{
if (next)
{
// Try to play the next track.
if (++current_track >= playlist.size())
{
// We are at the end of the playlist.
cellMusicSelectionContext.notice("No more tracks to play in playlist...");
current_track = umax;
return umax;
}
}
else
{
// Try to play the previous track.
if (current_track == 0)
{
// We are at the start of the playlist.
cellMusicSelectionContext.notice("No more tracks to play in playlist...");
current_track = umax;
return umax;
}
current_track--;
}
break;
}
case CELL_SEARCH_REPEATMODE_REPEAT1:
{
// Keep decoding the same track.
break;
}
case CELL_SEARCH_REPEATMODE_ALL:
{
if (next)
{
// Play the next track. Start with the first track if we reached the end of the playlist.
current_track = (current_track + 1) % playlist.size();
}
else
{
// Play the previous track. Start with the last track if we reached the start of the playlist.
if (current_track == 0)
{
current_track = ::narrow<u32>(playlist.size() - 1);
}
else
{
current_track--;
}
}
break;
}
case CELL_SEARCH_REPEATMODE_NOREPEAT1:
{
// We are done. We only wanted to decode a single track.
cellMusicSelectionContext.notice("No more tracks to play...");
current_track = umax;
return umax;
}
default:
{
fmt::throw_exception("Unknown repeat mode %d", static_cast<u32>(repeat_mode));
}
}
if (context_option == CELL_SEARCH_CONTEXTOPTION_SHUFFLE && repeat_mode == CELL_SEARCH_REPEATMODE_ALL && playlist.size() > 1)
{
if (next ? current_track == 0 : current_track == (playlist.size() - 1))
{
// We reached the first or last track again. Let's shuffle!
cellMusicSelectionContext.notice("Shuffling playlist...");
auto engine = std::default_random_engine{};
std::shuffle(std::begin(playlist), std::end(playlist), engine);
}
}
return current_track;
}
| 8,571
|
C++
|
.cpp
| 282
| 27.765957
| 153
| 0.691569
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,313
|
cellDmux.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellDmux.cpp
|
#include "stdafx.h"
#include "Emu/System.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/PPUModule.h"
#include "Emu/Cell/lv2/sys_sync.h"
#include "cellPamf.h"
#include "cellDmux.h"
#include "util/asm.hpp"
LOG_CHANNEL(cellDmux);
template <>
void fmt_class_string<CellDmuxError>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](CellDmuxError value)
{
switch (value)
{
STR_CASE(CELL_DMUX_ERROR_ARG);
STR_CASE(CELL_DMUX_ERROR_SEQ);
STR_CASE(CELL_DMUX_ERROR_BUSY);
STR_CASE(CELL_DMUX_ERROR_EMPTY);
STR_CASE(CELL_DMUX_ERROR_FATAL);
}
return unknown;
});
}
/* Demuxer Thread Classes */
enum
{
/* http://dvd.sourceforge.net/dvdinfo/mpeghdrs.html */
PACKET_START_CODE_MASK = 0xffffff00,
PACKET_START_CODE_PREFIX = 0x00000100,
PACK_START_CODE = 0x000001ba,
SYSTEM_HEADER_START_CODE = 0x000001bb,
PRIVATE_STREAM_1 = 0x000001bd,
PADDING_STREAM = 0x000001be,
PRIVATE_STREAM_2 = 0x000001bf,
};
struct DemuxerStream
{
u32 addr;
u32 size;
u64 userdata;
bool discontinuity;
template<typename T>
bool get(T& out)
{
if (sizeof(T) > size) return false;
std::memcpy(&out, vm::base(addr), sizeof(T));
addr += sizeof(T);
size -= sizeof(T);
return true;
}
template<typename T>
bool peek(T& out, u32 shift = 0)
{
if (sizeof(T) + shift > size) return false;
std::memcpy(&out, vm::base(addr + shift), sizeof(T));
return true;
}
void skip(u32 count)
{
addr += count;
size = size > count ? size - count : 0;
}
bool check(u32 count) const
{
return count <= size;
}
u64 get_ts(u8 c)
{
u8 v[4]; get(v);
return
((u64{c} & 0x0e) << 29) |
((u64{v[0]}) << 21) |
((u64{v[1]} & 0x7e) << 15) |
((u64{v[2]}) << 7) | (u64{v[3]} >> 1);
}
};
struct PesHeader
{
u64 pts;
u64 dts;
u8 size;
bool has_ts;
bool is_ok;
PesHeader(DemuxerStream& stream);
};
class ElementaryStream;
class Demuxer;
enum DemuxerJobType
{
dmuxSetStream,
dmuxResetStream,
dmuxResetStreamAndWaitDone,
dmuxEnableEs,
dmuxDisableEs,
dmuxResetEs,
dmuxFlushEs,
dmuxClose,
};
struct DemuxerTask
{
DemuxerJobType type;
union
{
DemuxerStream stream;
struct
{
u32 es;
u32 auInfo_ptr_addr;
u32 auSpec_ptr_addr;
ElementaryStream* es_ptr;
} es;
};
DemuxerTask()
{
}
DemuxerTask(DemuxerJobType type)
: type(type)
{
}
};
class ElementaryStream
{
std::mutex m_mutex;
squeue_t<u32> entries; // AU starting addresses
u32 put_count = 0; // number of AU written
u32 got_count = 0; // number of AU obtained by GetAu(Ex)
u32 released = 0; // number of AU released
u32 put; // AU that is being written now
bool is_full(u32 space);
public:
static const u32 id_base = 1;
static const u32 id_step = 1;
static const u32 id_count = 1023;
SAVESTATE_INIT_POS(34);
ElementaryStream(Demuxer* dmux, u32 addr, u32 size, u32 fidMajor, u32 fidMinor, u32 sup1, u32 sup2, vm::ptr<CellDmuxCbEsMsg> cbFunc, u32 cbArg, u32 spec);
Demuxer* dmux;
const u32 id = idm::last_id();
const u32 memAddr;
const u32 memSize;
const u32 fidMajor;
const u32 fidMinor;
const u32 sup1;
const u32 sup2;
const vm::ptr<CellDmuxCbEsMsg> cbFunc;
const u32 cbArg;
const u32 spec; //addr
std::vector<u8> raw_data; // demultiplexed data stream (managed by demuxer thread)
usz raw_pos = 0; // should be <= raw_data.size()
u64 last_dts = CODEC_TS_INVALID;
u64 last_pts = CODEC_TS_INVALID;
void push(DemuxerStream& stream, u32 size); // called by demuxer thread (not multithread-safe)
bool isfull(u32 space);
void push_au(u32 size, u64 dts, u64 pts, u64 userdata, bool rap, u32 specific);
bool release();
bool peek(u32& out_data, bool no_ex, u32& out_spec, bool update_index);
void reset();
};
class Demuxer : public ppu_thread
{
public:
squeue_t<DemuxerTask, 32> job;
const u32 memAddr;
const u32 memSize;
const vm::ptr<CellDmuxCbMsg> cbFunc;
const u32 cbArg;
volatile bool is_finished = false;
volatile bool is_closed = false;
atomic_t<bool> is_running = false;
atomic_t<bool> is_working = false;
Demuxer(u32 addr, u32 size, vm::ptr<CellDmuxCbMsg> func, u32 arg)
: ppu_thread({}, "", 0)
, memAddr(addr)
, memSize(size)
, cbFunc(func)
, cbArg(arg)
{
}
void non_task()
{
DemuxerTask task;
DemuxerStream stream = {};
ElementaryStream* esALL[96]{};
ElementaryStream** esAVC = &esALL[0]; // AVC (max 16 minus M2V count)
//ElementaryStream** esM2V = &esALL[16]; // M2V (max 16 minus AVC count)
//ElementaryStream** esDATA = &esALL[32]; // user data (max 16)
ElementaryStream** esATX = &esALL[48]; // ATRAC3+ (max 16)
//ElementaryStream** esAC3 = &esALL[64]; // AC3 (max 16)
//ElementaryStream** esPCM = &esALL[80]; // LPCM (max 16)
u32 cb_add = 0;
while (true)
{
if (Emu.IsStopped() || is_closed)
{
break;
}
if (!job.try_peek(task) && is_running && stream.addr)
{
// default task (demuxing) (if there is no other work)
be_t<u32> code;
be_t<u16> len;
if (!stream.peek(code))
{
// demuxing finished
is_running = false;
// callback
auto dmuxMsg = vm::ptr<CellDmuxMsg>::make(memAddr + (cb_add ^= 16));
dmuxMsg->msgType = CELL_DMUX_MSG_TYPE_DEMUX_DONE;
dmuxMsg->supplementalInfo = stream.userdata;
cbFunc(*this, id, dmuxMsg, cbArg);
lv2_obj::sleep(*this);
is_working = false;
stream = {};
continue;
}
switch (code)
{
case PACK_START_CODE:
{
if (!stream.check(14))
{
fmt::throw_exception("End of stream (PACK_START_CODE)");
}
stream.skip(14);
break;
}
case SYSTEM_HEADER_START_CODE:
{
if (!stream.check(18))
{
fmt::throw_exception("End of stream (SYSTEM_HEADER_START_CODE)");
}
stream.skip(18);
break;
}
case PADDING_STREAM:
{
if (!stream.check(6))
{
fmt::throw_exception("End of stream (PADDING_STREAM)");
}
stream.skip(4);
stream.get(len);
if (!stream.check(len))
{
fmt::throw_exception("End of stream (PADDING_STREAM, len=%d)", len);
}
stream.skip(len);
break;
}
case PRIVATE_STREAM_2:
{
if (!stream.check(6))
{
fmt::throw_exception("End of stream (PRIVATE_STREAM_2)");
}
stream.skip(4);
stream.get(len);
cellDmux.notice("PRIVATE_STREAM_2 (%d)", len);
if (!stream.check(len))
{
fmt::throw_exception("End of stream (PRIVATE_STREAM_2, len=%d)", len);
}
stream.skip(len);
break;
}
case PRIVATE_STREAM_1:
{
// audio and user data stream
DemuxerStream backup = stream;
if (!stream.check(6))
{
fmt::throw_exception("End of stream (PRIVATE_STREAM_1)");
}
stream.skip(4);
stream.get(len);
if (!stream.check(len))
{
fmt::throw_exception("End of stream (PRIVATE_STREAM_1, len=%d)", len);
}
const PesHeader pes(stream);
if (!pes.is_ok)
{
fmt::throw_exception("PesHeader error (PRIVATE_STREAM_1, len=%d)", len);
}
if (len < pes.size + 4)
{
fmt::throw_exception("End of block (PRIVATE_STREAM_1, PesHeader + fid_minor, len=%d)", len);
}
len -= pes.size + 4;
u8 fid_minor;
if (!stream.get(fid_minor))
{
fmt::throw_exception("End of stream (PRIVATE_STREAM1, fid_minor)");
}
const u32 ch = fid_minor % 16;
if ((fid_minor & -0x10) == 0 && esATX[ch])
{
ElementaryStream& es = *esATX[ch];
if (es.raw_data.size() > 1024 * 1024)
{
stream = backup;
std::this_thread::sleep_for(1ms); // hack
continue;
}
if (len < 3 || !stream.check(3))
{
fmt::throw_exception("End of block (ATX, unknown header, len=%d)", len);
}
len -= 3;
stream.skip(3);
if (pes.has_ts)
{
es.last_dts = pes.dts;
es.last_pts = pes.pts;
}
es.push(stream, len);
while (true)
{
auto const size = es.raw_data.size() - es.raw_pos; // size of available new data
auto const data = es.raw_data.data() + es.raw_pos; // pointer to available data
if (size < 8) break; // skip if cannot read ATS header
if (data[0] != 0x0f || data[1] != 0xd0)
{
fmt::throw_exception("ATX: 0x0fd0 header not found (ats=0x%llx)", *reinterpret_cast<be_t<u64>*>(data));
}
u32 frame_size = (((u32{data[2]} & 0x3) << 8) | u32{data[3]}) * 8 + 8;
if (size < frame_size + 8) break; // skip non-complete AU
if (es.isfull(frame_size + 8)) break; // skip if cannot push AU
es.push_au(frame_size + 8, es.last_dts, es.last_pts, stream.userdata, false /* TODO: set correct value */, 0);
//cellDmux.notice("ATX AU pushed (ats=0x%llx, frame_size=%d)", *(be_t<u64>*)data, frame_size);
auto esMsg = vm::ptr<CellDmuxEsMsg>::make(memAddr + (cb_add ^= 16));
esMsg->msgType = CELL_DMUX_ES_MSG_TYPE_AU_FOUND;
esMsg->supplementalInfo = stream.userdata;
es.cbFunc(*this, id, es.id, esMsg, es.cbArg);
lv2_obj::sleep(*this);
}
}
else
{
cellDmux.notice("PRIVATE_STREAM_1 (len=%d, fid_minor=0x%x)", len, fid_minor);
stream.skip(len);
}
break;
}
case 0x1e0: case 0x1e1: case 0x1e2: case 0x1e3:
case 0x1e4: case 0x1e5: case 0x1e6: case 0x1e7:
case 0x1e8: case 0x1e9: case 0x1ea: case 0x1eb:
case 0x1ec: case 0x1ed: case 0x1ee: case 0x1ef:
{
// video stream (AVC or M2V)
DemuxerStream backup = stream;
if (!stream.check(6))
{
fmt::throw_exception("End of stream (video, code=0x%x)", code);
}
stream.skip(4);
stream.get(len);
if (!stream.check(len))
{
fmt::throw_exception("End of stream (video, code=0x%x, len=%d)", code, len);
}
const PesHeader pes(stream);
if (!pes.is_ok)
{
fmt::throw_exception("PesHeader error (video, code=0x%x, len=%d)", code, len);
}
if (len < pes.size + 3)
{
fmt::throw_exception("End of block (video, code=0x%x, PesHeader)", code);
}
len -= pes.size + 3;
const u32 ch = code % 16;
if (esAVC[ch])
{
ElementaryStream& es = *esAVC[ch];
const u32 old_size = ::size32(es.raw_data);
if (es.isfull(old_size))
{
stream = backup;
std::this_thread::sleep_for(1ms); // hack
continue;
}
if ((pes.has_ts && old_size) || old_size >= 0x69800)
{
// push AU if it becomes too big or the next packet contains PTS/DTS
es.push_au(old_size, es.last_dts, es.last_pts, stream.userdata, false /* TODO: set correct value */, 0);
// callback
auto esMsg = vm::ptr<CellDmuxEsMsg>::make(memAddr + (cb_add ^= 16));
esMsg->msgType = CELL_DMUX_ES_MSG_TYPE_AU_FOUND;
esMsg->supplementalInfo = stream.userdata;
es.cbFunc(*this, id, es.id, esMsg, es.cbArg);
lv2_obj::sleep(*this);
}
if (pes.has_ts)
{
// preserve dts/pts for next AU
es.last_dts = pes.dts;
es.last_pts = pes.pts;
}
// reconstruction of MPEG2-PS stream for vdec module
const u32 size = len + pes.size + 9;
stream = backup;
es.push(stream, size);
}
else
{
cellDmux.notice("Video stream (code=0x%x, len=%d)", code, len);
stream.skip(len);
}
break;
}
default:
{
if ((code & PACKET_START_CODE_MASK) == PACKET_START_CODE_PREFIX)
{
fmt::throw_exception("Unknown code found (0x%x)", code);
}
// search
stream.skip(1);
}
}
continue;
}
// wait for task if no work
if (!job.pop(task, &is_closed))
{
break; // Emu is stopped
}
switch (task.type)
{
case dmuxSetStream:
{
if (task.stream.discontinuity)
{
cellDmux.warning("dmuxSetStream (beginning)");
for (u32 i = 0; i < std::size(esALL); i++)
{
if (esALL[i])
{
esALL[i]->reset();
}
}
}
stream = task.stream;
//cellDmux.notice("*** stream updated(addr=0x%x, size=0x%x, discont=%d, userdata=0x%llx)",
//stream.addr, stream.size, stream.discontinuity, stream.userdata);
break;
}
case dmuxResetStream:
case dmuxResetStreamAndWaitDone:
{
// demuxing stopped
if (is_running.exchange(false))
{
// callback
auto dmuxMsg = vm::ptr<CellDmuxMsg>::make(memAddr + (cb_add ^= 16));
dmuxMsg->msgType = CELL_DMUX_MSG_TYPE_DEMUX_DONE;
dmuxMsg->supplementalInfo = stream.userdata;
cbFunc(*this, id, dmuxMsg, cbArg);
lv2_obj::sleep(*this);
stream = {};
is_working = false;
}
break;
}
case dmuxEnableEs:
{
ElementaryStream& es = *task.es.es_ptr;
// TODO: uncomment when ready to use
//if ((es.fidMajor & -0x10) == 0xe0 && es.fidMinor == 0 && es.sup1 == 1 && !es.sup2)
//{
// esAVC[es.fidMajor % 16] = task.es.es_ptr;
//}
//else if ((es.fidMajor & -0x10) == 0xe0 && es.fidMinor == 0 && !es.sup1 && !es.sup2)
//{
// esM2V[es.fidMajor % 16] = task.es.es_ptr;
//}
//else if (es.fidMajor == 0xbd && (es.fidMinor & -0x10) == 0 && !es.sup1 && !es.sup2)
//{
// esATX[es.fidMinor % 16] = task.es.es_ptr;
//}
//else if (es.fidMajor == 0xbd && (es.fidMinor & -0x10) == 0x20 && !es.sup1 && !es.sup2)
//{
// esDATA[es.fidMinor % 16] = task.es.es_ptr;
//}
//else if (es.fidMajor == 0xbd && (es.fidMinor & -0x10) == 0x30 && !es.sup1 && !es.sup2)
//{
// esAC3[es.fidMinor % 16] = task.es.es_ptr;
//}
//else if (es.fidMajor == 0xbd && (es.fidMinor & -0x10) == 0x40 && !es.sup1 && !es.sup2)
//{
// esPCM[es.fidMinor % 16] = task.es.es_ptr;
//}
//else
{
fmt::throw_exception("dmuxEnableEs: unknown filter (0x%x, 0x%x, 0x%x, 0x%x)", es.fidMajor, es.fidMinor, es.sup1, es.sup2);
}
es.dmux = this;
break;
}
case dmuxDisableEs:
{
ElementaryStream& es = *task.es.es_ptr;
if (es.dmux != this)
{
fmt::throw_exception("dmuxDisableEs: invalid elementary stream");
}
for (u32 i = 0; i < std::size(esALL); i++)
{
if (esALL[i] == &es)
{
esALL[i] = nullptr;
}
}
es.dmux = nullptr;
idm::remove<ElementaryStream>(task.es.es);
break;
}
case dmuxFlushEs:
{
ElementaryStream& es = *task.es.es_ptr;
const u32 old_size = ::size32(es.raw_data);
if (old_size && (es.fidMajor & -0x10) == 0xe0)
{
// TODO (it's only for AVC, some ATX data may be lost)
while (es.isfull(old_size))
{
if (Emu.IsStopped() || is_closed) break;
std::this_thread::sleep_for(1ms); // hack
}
es.push_au(old_size, es.last_dts, es.last_pts, stream.userdata, false, 0);
// callback
auto esMsg = vm::ptr<CellDmuxEsMsg>::make(memAddr + (cb_add ^= 16));
esMsg->msgType = CELL_DMUX_ES_MSG_TYPE_AU_FOUND;
esMsg->supplementalInfo = stream.userdata;
es.cbFunc(*this, id, es.id, esMsg, es.cbArg);
lv2_obj::sleep(*this);
}
if (!es.raw_data.empty())
{
cellDmux.error("dmuxFlushEs: 0x%x bytes lost (es_id=%d)", ::size32(es.raw_data), es.id);
}
// callback
auto esMsg = vm::ptr<CellDmuxEsMsg>::make(memAddr + (cb_add ^= 16));
esMsg->msgType = CELL_DMUX_ES_MSG_TYPE_FLUSH_DONE;
esMsg->supplementalInfo = stream.userdata;
es.cbFunc(*this, id, es.id, esMsg, es.cbArg);
lv2_obj::sleep(*this);
break;
}
case dmuxResetEs:
{
task.es.es_ptr->reset();
break;
}
case dmuxClose:
{
break;
}
default:
{
fmt::throw_exception("Demuxer thread error: unknown task (0x%x)", +task.type);
}
}
}
is_finished = true;
}
};
PesHeader::PesHeader(DemuxerStream& stream)
: pts(CODEC_TS_INVALID)
, dts(CODEC_TS_INVALID)
, size(0)
, has_ts(false)
, is_ok(false)
{
u16 header;
if (!stream.get(header))
{
fmt::throw_exception("End of stream (header)");
}
if (!stream.get(size))
{
fmt::throw_exception("End of stream (size)");
}
if (!stream.check(size))
{
fmt::throw_exception("End of stream (size=%d)", size);
}
u8 pos = 0;
while (pos++ < size)
{
u8 v;
if (!stream.get(v))
{
return; // should never occur
}
if (v == 0xff) // skip padding bytes
{
continue;
}
if ((v & 0xf0) == 0x20 && (size - pos) >= 4) // pts only
{
pos += 4;
pts = stream.get_ts(v);
has_ts = true;
}
else if ((v & 0xf0) == 0x30 && (size - pos) >= 9) // pts and dts
{
pos += 5;
pts = stream.get_ts(v);
stream.get(v);
has_ts = true;
if ((v & 0xf0) != 0x10)
{
cellDmux.error("PesHeader(): dts not found (v=0x%x, size=%d, pos=%d)", v, size, pos - 1);
stream.skip(size - pos);
return;
}
pos += 4;
dts = stream.get_ts(v);
}
else
{
cellDmux.warning("PesHeader(): unknown code (v=0x%x, size=%d, pos=%d)", v, size, pos - 1);
stream.skip(size - pos);
pos = size;
break;
}
}
is_ok = true;
}
ElementaryStream::ElementaryStream(Demuxer* dmux, u32 addr, u32 size, u32 fidMajor, u32 fidMinor, u32 sup1, u32 sup2, vm::ptr<CellDmuxCbEsMsg> cbFunc, u32 cbArg, u32 spec)
: put(utils::align(addr, 128))
, dmux(dmux)
, memAddr(utils::align(addr, 128))
, memSize(size - (addr - memAddr))
, fidMajor(fidMajor)
, fidMinor(fidMinor)
, sup1(sup1)
, sup2(sup2)
, cbFunc(cbFunc)
, cbArg(cbArg)
, spec(spec)
{
}
bool ElementaryStream::is_full(u32 space)
{
if (released < put_count)
{
if (entries.is_full())
{
return true;
}
u32 first = 0;
if (!entries.peek(first, 0, &dmux->is_closed) || !first)
{
fmt::throw_exception("entries.peek() failed");
}
else if (first >= put)
{
return first - put < space + 128;
}
else if (put + space + 128 > memAddr + memSize)
{
return first - memAddr < space + 128;
}
else
{
return false;
}
}
else
{
return false;
}
}
bool ElementaryStream::isfull(u32 space)
{
std::lock_guard lock(m_mutex);
return is_full(space);
}
void ElementaryStream::push_au(u32 size, u64 dts, u64 pts, u64 userdata, bool rap, u32 specific)
{
u32 addr;
{
std::lock_guard lock(m_mutex);
ensure(!is_full(size));
if (put + size + 128 > memAddr + memSize)
{
put = memAddr;
}
std::memcpy(vm::base(put + 128), raw_data.data(), size);
raw_data.erase(raw_data.begin(), raw_data.begin() + size);
auto info = vm::ptr<CellDmuxAuInfoEx>::make(put);
info->auAddr = put + 128;
info->auSize = size;
info->dts.lower = static_cast<u32>(dts);
info->dts.upper = static_cast<u32>(dts >> 32);
info->pts.lower = static_cast<u32>(pts);
info->pts.upper = static_cast<u32>(pts >> 32);
info->isRap = rap;
info->reserved = 0;
info->userData = userdata;
auto spec = vm::ptr<u32>::make(put + u32{sizeof(CellDmuxAuInfoEx)});
*spec = specific;
auto inf = vm::ptr<CellDmuxAuInfo>::make(put + 64);
inf->auAddr = put + 128;
inf->auSize = size;
inf->dtsLower = static_cast<u32>(dts);
inf->dtsUpper = static_cast<u32>(dts >> 32);
inf->ptsLower = static_cast<u32>(pts);
inf->ptsUpper = static_cast<u32>(pts >> 32);
inf->auMaxSize = 0; // ?????
inf->userData = userdata;
addr = put;
put = utils::align(put + 128 + size, 128);
put_count++;
}
ensure(entries.push(addr, &dmux->is_closed));
}
void ElementaryStream::push(DemuxerStream& stream, u32 size)
{
auto const old_size = raw_data.size();
raw_data.resize(old_size + size);
std::memcpy(raw_data.data() + old_size, vm::base(stream.addr), size); // append bytes
stream.skip(size);
}
bool ElementaryStream::release()
{
std::lock_guard lock(m_mutex);
if (released >= put_count)
{
cellDmux.fatal("es::release() error: buffer is empty");
return false;
}
if (released >= got_count)
{
cellDmux.fatal("es::release() error: buffer has not been seen yet");
return false;
}
u32 addr = 0;
if (!entries.pop(addr, &dmux->is_closed) || !addr)
{
cellDmux.fatal("es::release() error: entries.Pop() failed");
return false;
}
released++;
return true;
}
bool ElementaryStream::peek(u32& out_data, bool no_ex, u32& out_spec, bool update_index)
{
std::lock_guard lock(m_mutex);
if (got_count < released)
{
cellDmux.fatal("es::peek() error: got_count(%d) < released(%d) (put_count=%d)", got_count, released, put_count);
return false;
}
if (got_count >= put_count)
{
return false;
}
u32 addr = 0;
if (!entries.peek(addr, got_count - released, &dmux->is_closed) || !addr)
{
cellDmux.fatal("es::peek() error: entries.Peek() failed");
return false;
}
out_data = no_ex ? addr + 64 : addr;
out_spec = addr + sizeof(CellDmuxAuInfoEx);
if (update_index)
{
got_count++;
}
return true;
}
void ElementaryStream::reset()
{
std::lock_guard lock(m_mutex);
put = memAddr;
entries.clear();
put_count = 0;
got_count = 0;
released = 0;
raw_data.clear();
raw_pos = 0;
}
void dmuxQueryAttr(u32 /* info_addr, may be 0 */, vm::ptr<CellDmuxAttr> attr)
{
attr->demuxerVerLower = 0x280000; // TODO: check values
attr->demuxerVerUpper = 0x260000;
attr->memSize = 0x10000; // 0x3e8e6 from ps3
}
void dmuxQueryEsAttr(u32 /* info, may be 0 */, vm::cptr<CellCodecEsFilterId> esFilterId, u32 /*esSpecificInfo*/, vm::ptr<CellDmuxEsAttr> attr)
{
if (esFilterId->filterIdMajor >= 0xe0)
{
attr->memSize = 0x500000; // 0x45fa49 from ps3
}
else
{
attr->memSize = 0x7000; // 0x73d9 from ps3
}
cellDmux.warning("*** filter(0x%x, 0x%x, 0x%x, 0x%x)", esFilterId->filterIdMajor, esFilterId->filterIdMinor, esFilterId->supplementalInfo1, esFilterId->supplementalInfo2);
}
error_code cellDmuxQueryAttr(vm::cptr<CellDmuxType> type, vm::ptr<CellDmuxAttr> attr)
{
cellDmux.warning("cellDmuxQueryAttr(type=*0x%x, attr=*0x%x)", type, attr);
if (type->streamType != CELL_DMUX_STREAM_TYPE_PAMF)
{
return CELL_DMUX_ERROR_ARG;
}
dmuxQueryAttr(0, attr);
return CELL_OK;
}
error_code cellDmuxQueryAttr2(vm::cptr<CellDmuxType2> type2, vm::ptr<CellDmuxAttr> attr)
{
cellDmux.warning("cellDmuxQueryAttr2(demuxerType2=*0x%x, demuxerAttr=*0x%x)", type2, attr);
if (type2->streamType != CELL_DMUX_STREAM_TYPE_PAMF)
{
return CELL_DMUX_ERROR_ARG;
}
dmuxQueryAttr(type2->streamSpecificInfo, attr);
return CELL_OK;
}
error_code cellDmuxOpen(vm::cptr<CellDmuxType> type, vm::cptr<CellDmuxResource> res, vm::cptr<CellDmuxCb> cb, vm::ptr<u32> handle)
{
cellDmux.warning("cellDmuxOpen(type=*0x%x, res=*0x%x, cb=*0x%x, handle=*0x%x)", type, res, cb, handle);
if (type->streamType != CELL_DMUX_STREAM_TYPE_PAMF)
{
return CELL_DMUX_ERROR_ARG;
}
// TODO: check demuxerResource and demuxerCb arguments
fmt::throw_exception("cellDmux disabled, use LLE.");
}
error_code cellDmuxOpenEx(vm::cptr<CellDmuxType> type, vm::cptr<CellDmuxResourceEx> resEx, vm::cptr<CellDmuxCb> cb, vm::ptr<u32> handle)
{
cellDmux.warning("cellDmuxOpenEx(type=*0x%x, resEx=*0x%x, cb=*0x%x, handle=*0x%x)", type, resEx, cb, handle);
if (type->streamType != CELL_DMUX_STREAM_TYPE_PAMF)
{
return CELL_DMUX_ERROR_ARG;
}
// TODO: check demuxerResourceEx and demuxerCb arguments
fmt::throw_exception("cellDmux disabled, use LLE.");
}
error_code cellDmuxOpenExt(vm::cptr<CellDmuxType> type, vm::cptr<CellDmuxResourceEx> resEx, vm::cptr<CellDmuxCb> cb, vm::ptr<u32> handle)
{
cellDmux.warning("cellDmuxOpenExt(type=*0x%x, resEx=*0x%x, cb=*0x%x, handle=*0x%x)", type, resEx, cb, handle);
return cellDmuxOpenEx(type, resEx, cb, handle);
}
error_code cellDmuxOpen2(vm::cptr<CellDmuxType2> type2, vm::cptr<CellDmuxResource2> res2, vm::cptr<CellDmuxCb> cb, vm::ptr<u32> handle)
{
cellDmux.warning("cellDmuxOpen2(type2=*0x%x, res2=*0x%x, cb=*0x%x, handle=*0x%x)", type2, res2, cb, handle);
if (type2->streamType != CELL_DMUX_STREAM_TYPE_PAMF)
{
return CELL_DMUX_ERROR_ARG;
}
// TODO: check demuxerType2, demuxerResource2 and demuxerCb arguments
fmt::throw_exception("cellDmux disabled, use LLE.");
}
error_code cellDmuxClose(u32 handle)
{
cellDmux.warning("cellDmuxClose(handle=0x%x)", handle);
const auto dmux = idm::get<Demuxer>(handle);
if (!dmux)
{
return CELL_DMUX_ERROR_ARG;
}
dmux->is_closed = true;
dmux->job.try_push(DemuxerTask(dmuxClose));
while (!dmux->is_finished)
{
if (Emu.IsStopped())
{
cellDmux.warning("cellDmuxClose(%d) aborted", handle);
return CELL_OK;
}
std::this_thread::sleep_for(1ms); // hack
}
idm::remove<ppu_thread>(handle);
return CELL_OK;
}
error_code cellDmuxSetStream(u32 handle, u32 streamAddress, u32 streamSize, b8 discontinuity, u64 userData)
{
cellDmux.trace("cellDmuxSetStream(handle=0x%x, streamAddress=0x%x, streamSize=%d, discontinuity=%d, userData=0x%llx)", handle, streamAddress, streamSize, discontinuity, userData);
const auto dmux = idm::get<Demuxer>(handle);
if (!dmux)
{
return CELL_DMUX_ERROR_ARG;
}
if (dmux->is_running.exchange(true))
{
//std::this_thread::sleep_for(1ms); // hack
return CELL_DMUX_ERROR_BUSY;
}
DemuxerTask task(dmuxSetStream);
auto& info = task.stream;
info.addr = streamAddress;
info.size = streamSize;
info.discontinuity = discontinuity;
info.userdata = userData;
dmux->job.push(task, &dmux->is_closed);
return CELL_OK;
}
error_code cellDmuxResetStream(u32 handle)
{
cellDmux.warning("cellDmuxResetStream(handle=0x%x)", handle);
const auto dmux = idm::get<Demuxer>(handle);
if (!dmux)
{
return CELL_DMUX_ERROR_ARG;
}
dmux->job.push(DemuxerTask(dmuxResetStream), &dmux->is_closed);
return CELL_OK;
}
error_code cellDmuxResetStreamAndWaitDone(u32 handle)
{
cellDmux.warning("cellDmuxResetStreamAndWaitDone(handle=0x%x)", handle);
const auto dmux = idm::get<Demuxer>(handle);
if (!dmux)
{
return CELL_DMUX_ERROR_ARG;
}
if (!dmux->is_running)
{
return CELL_OK;
}
dmux->is_working = true;
dmux->job.push(DemuxerTask(dmuxResetStreamAndWaitDone), &dmux->is_closed);
while (dmux->is_running && dmux->is_working && !dmux->is_closed) // TODO: ensure that it is safe
{
if (Emu.IsStopped())
{
cellDmux.warning("cellDmuxResetStreamAndWaitDone(%d) aborted", handle);
return CELL_OK;
}
std::this_thread::sleep_for(1ms); // hack
}
return CELL_OK;
}
error_code cellDmuxQueryEsAttr(vm::cptr<CellDmuxType> type, vm::cptr<CellCodecEsFilterId> esFilterId, u32 esSpecificInfo, vm::ptr<CellDmuxEsAttr> esAttr)
{
cellDmux.warning("cellDmuxQueryEsAttr(demuxerType=*0x%x, esFilterId=*0x%x, esSpecificInfo=*0x%x, esAttr=*0x%x)", type, esFilterId, esSpecificInfo, esAttr);
if (type->streamType != CELL_DMUX_STREAM_TYPE_PAMF)
{
return CELL_DMUX_ERROR_ARG;
}
// TODO: check esFilterId and esSpecificInfo correctly
dmuxQueryEsAttr(0, esFilterId, esSpecificInfo, esAttr);
return CELL_OK;
}
error_code cellDmuxQueryEsAttr2(vm::cptr<CellDmuxType2> type2, vm::cptr<CellCodecEsFilterId> esFilterId, u32 esSpecificInfo, vm::ptr<CellDmuxEsAttr> esAttr)
{
cellDmux.warning("cellDmuxQueryEsAttr2(type2=*0x%x, esFilterId=*0x%x, esSpecificInfo=*0x%x, esAttr=*0x%x)", type2, esFilterId, esSpecificInfo, esAttr);
if (type2->streamType != CELL_DMUX_STREAM_TYPE_PAMF)
{
return CELL_DMUX_ERROR_ARG;
}
// TODO: check demuxerType2, esFilterId and esSpecificInfo correctly
dmuxQueryEsAttr(type2->streamSpecificInfo, esFilterId, esSpecificInfo, esAttr);
return CELL_OK;
}
error_code cellDmuxEnableEs(u32 handle, vm::cptr<CellCodecEsFilterId> esFilterId, vm::cptr<CellDmuxEsResource> esResourceInfo, vm::cptr<CellDmuxEsCb> esCb, u32 esSpecificInfo, vm::ptr<u32> esHandle)
{
cellDmux.warning("cellDmuxEnableEs(handle=0x%x, esFilterId=*0x%x, esResourceInfo=*0x%x, esCb=*0x%x, esSpecificInfo=*0x%x, esHandle=*0x%x)", handle, esFilterId, esResourceInfo, esCb, esSpecificInfo, esHandle);
const auto dmux = idm::get<Demuxer>(handle);
if (!dmux)
{
return CELL_DMUX_ERROR_ARG;
}
// TODO: check esFilterId, esResourceInfo, esCb and esSpecificInfo correctly
const auto es = idm::make_ptr<ElementaryStream>(dmux.get(), esResourceInfo->memAddr, esResourceInfo->memSize,
esFilterId->filterIdMajor, esFilterId->filterIdMinor, esFilterId->supplementalInfo1, esFilterId->supplementalInfo2,
esCb->cbEsMsgFunc, esCb->cbArg, esSpecificInfo);
*esHandle = es->id;
cellDmux.warning("*** New ES(dmux=0x%x, addr=0x%x, size=0x%x, filter={0x%x, 0x%x, 0x%x, 0x%x}, cb=0x%x, arg=0x%x, spec=0x%x): id = 0x%x",
handle, es->memAddr, es->memSize, es->fidMajor, es->fidMinor, es->sup1, es->sup2, es->cbFunc, es->cbArg, es->spec, es->id);
DemuxerTask task(dmuxEnableEs);
task.es.es = es->id;
task.es.es_ptr = es.get();
dmux->job.push(task, &dmux->is_closed);
return CELL_OK;
}
error_code cellDmuxDisableEs(u32 esHandle)
{
cellDmux.warning("cellDmuxDisableEs(esHandle=0x%x)", esHandle);
const auto es = idm::get<ElementaryStream>(esHandle);
if (!es)
{
return CELL_DMUX_ERROR_ARG;
}
DemuxerTask task(dmuxDisableEs);
task.es.es = esHandle;
task.es.es_ptr = es.get();
es->dmux->job.push(task, &es->dmux->is_closed);
return CELL_OK;
}
error_code cellDmuxResetEs(u32 esHandle)
{
cellDmux.trace("cellDmuxResetEs(esHandle=0x%x)", esHandle);
const auto es = idm::get<ElementaryStream>(esHandle);
if (!es)
{
return CELL_DMUX_ERROR_ARG;
}
DemuxerTask task(dmuxResetEs);
task.es.es = esHandle;
task.es.es_ptr = es.get();
es->dmux->job.push(task, &es->dmux->is_closed);
return CELL_OK;
}
error_code cellDmuxGetAu(u32 esHandle, vm::ptr<u32> auInfo, vm::ptr<u32> auSpecificInfo)
{
cellDmux.trace("cellDmuxGetAu(esHandle=0x%x, auInfo=**0x%x, auSpecificInfo=**0x%x)", esHandle, auInfo, auSpecificInfo);
const auto es = idm::get<ElementaryStream>(esHandle);
if (!es)
{
return CELL_DMUX_ERROR_ARG;
}
u32 info;
u32 spec;
if (!es->peek(info, true, spec, true))
{
return CELL_DMUX_ERROR_EMPTY;
}
*auInfo = info;
*auSpecificInfo = spec;
return CELL_OK;
}
error_code cellDmuxPeekAu(u32 esHandle, vm::ptr<u32> auInfo, vm::ptr<u32> auSpecificInfo)
{
cellDmux.trace("cellDmuxPeekAu(esHandle=0x%x, auInfo=**0x%x, auSpecificInfo=**0x%x)", esHandle, auInfo, auSpecificInfo);
const auto es = idm::get<ElementaryStream>(esHandle);
if (!es)
{
return CELL_DMUX_ERROR_ARG;
}
u32 info;
u32 spec;
if (!es->peek(info, true, spec, false))
{
return CELL_DMUX_ERROR_EMPTY;
}
*auInfo = info;
*auSpecificInfo = spec;
return CELL_OK;
}
error_code cellDmuxGetAuEx(u32 esHandle, vm::ptr<u32> auInfoEx, vm::ptr<u32> auSpecificInfo)
{
cellDmux.trace("cellDmuxGetAuEx(esHandle=0x%x, auInfoEx=**0x%x, auSpecificInfo=**0x%x)", esHandle, auInfoEx, auSpecificInfo);
const auto es = idm::get<ElementaryStream>(esHandle);
if (!es)
{
return CELL_DMUX_ERROR_ARG;
}
u32 info;
u32 spec;
if (!es->peek(info, false, spec, true))
{
return CELL_DMUX_ERROR_EMPTY;
}
*auInfoEx = info;
*auSpecificInfo = spec;
return CELL_OK;
}
error_code cellDmuxPeekAuEx(u32 esHandle, vm::ptr<u32> auInfoEx, vm::ptr<u32> auSpecificInfo)
{
cellDmux.trace("cellDmuxPeekAuEx(esHandle=0x%x, auInfoEx=**0x%x, auSpecificInfo=**0x%x)", esHandle, auInfoEx, auSpecificInfo);
const auto es = idm::get<ElementaryStream>(esHandle);
if (!es)
{
return CELL_DMUX_ERROR_ARG;
}
u32 info;
u32 spec;
if (!es->peek(info, false, spec, false))
{
return CELL_DMUX_ERROR_EMPTY;
}
*auInfoEx = info;
*auSpecificInfo = spec;
return CELL_OK;
}
error_code cellDmuxReleaseAu(u32 esHandle)
{
cellDmux.trace("cellDmuxReleaseAu(esHandle=0x%x)", esHandle);
const auto es = idm::get<ElementaryStream>(esHandle);
if (!es)
{
return CELL_DMUX_ERROR_ARG;
}
if (!es->release())
{
return CELL_DMUX_ERROR_SEQ;
}
return CELL_OK;
}
error_code cellDmuxFlushEs(u32 esHandle)
{
cellDmux.warning("cellDmuxFlushEs(esHandle=0x%x)", esHandle);
const auto es = idm::get<ElementaryStream>(esHandle);
if (!es)
{
return CELL_DMUX_ERROR_ARG;
}
DemuxerTask task(dmuxFlushEs);
task.es.es = esHandle;
task.es.es_ptr = es.get();
es->dmux->job.push(task, &es->dmux->is_closed);
return CELL_OK;
}
DECLARE(ppu_module_manager::cellDmux)("cellDmux", []()
{
static ppu_static_module cellDmuxPamf("cellDmuxPamf");
REG_FUNC(cellDmux, cellDmuxQueryAttr);
REG_FUNC(cellDmux, cellDmuxQueryAttr2);
REG_FUNC(cellDmux, cellDmuxOpen);
REG_FUNC(cellDmux, cellDmuxOpenEx);
REG_FUNC(cellDmux, cellDmuxOpenExt); // 0xe075fabc
REG_FUNC(cellDmux, cellDmuxOpen2);
REG_FUNC(cellDmux, cellDmuxClose);
REG_FUNC(cellDmux, cellDmuxSetStream);
REG_FUNC(cellDmux, cellDmuxResetStream);
REG_FUNC(cellDmux, cellDmuxResetStreamAndWaitDone);
REG_FUNC(cellDmux, cellDmuxQueryEsAttr);
REG_FUNC(cellDmux, cellDmuxQueryEsAttr2);
REG_FUNC(cellDmux, cellDmuxEnableEs);
REG_FUNC(cellDmux, cellDmuxDisableEs);
REG_FUNC(cellDmux, cellDmuxResetEs);
REG_FUNC(cellDmux, cellDmuxGetAu);
REG_FUNC(cellDmux, cellDmuxPeekAu);
REG_FUNC(cellDmux, cellDmuxGetAuEx);
REG_FUNC(cellDmux, cellDmuxPeekAuEx);
REG_FUNC(cellDmux, cellDmuxReleaseAu);
REG_FUNC(cellDmux, cellDmuxFlushEs);
});
| 32,782
|
C++
|
.cpp
| 1,137
| 25.244503
| 209
| 0.663153
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,314
|
sys_rsxaudio_.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/sys_rsxaudio_.cpp
|
#include "stdafx.h"
#include "Emu/Cell/PPUModule.h"
#include "sysPrxForUser.h"
LOG_CHANNEL(sysPrxForUser);
error_code sys_rsxaudio_close_connection()
{
UNIMPLEMENTED_FUNC(sysPrxForUser);
return CELL_OK;
}
error_code sys_rsxaudio_create_connection()
{
UNIMPLEMENTED_FUNC(sysPrxForUser);
return CELL_OK;
}
error_code sys_rsxaudio_finalize()
{
UNIMPLEMENTED_FUNC(sysPrxForUser);
return CELL_OK;
}
error_code sys_rsxaudio_import_shared_memory()
{
UNIMPLEMENTED_FUNC(sysPrxForUser);
return CELL_OK;
}
error_code sys_rsxaudio_initialize()
{
UNIMPLEMENTED_FUNC(sysPrxForUser);
return CELL_OK;
}
error_code sys_rsxaudio_prepare_process()
{
UNIMPLEMENTED_FUNC(sysPrxForUser);
return CELL_OK;
}
error_code sys_rsxaudio_start_process()
{
UNIMPLEMENTED_FUNC(sysPrxForUser);
return CELL_OK;
}
error_code sys_rsxaudio_stop_process()
{
UNIMPLEMENTED_FUNC(sysPrxForUser);
return CELL_OK;
}
error_code sys_rsxaudio_unimport_shared_memory()
{
UNIMPLEMENTED_FUNC(sysPrxForUser);
return CELL_OK;
}
void sysPrxForUser_sys_rsxaudio_init()
{
REG_FUNC(sysPrxForUser, sys_rsxaudio_close_connection);
REG_FUNC(sysPrxForUser, sys_rsxaudio_create_connection);
REG_FUNC(sysPrxForUser, sys_rsxaudio_finalize);
REG_FUNC(sysPrxForUser, sys_rsxaudio_import_shared_memory);
REG_FUNC(sysPrxForUser, sys_rsxaudio_initialize);
REG_FUNC(sysPrxForUser, sys_rsxaudio_prepare_process);
REG_FUNC(sysPrxForUser, sys_rsxaudio_start_process);
REG_FUNC(sysPrxForUser, sys_rsxaudio_stop_process);
REG_FUNC(sysPrxForUser, sys_rsxaudio_unimport_shared_memory);
}
| 1,553
|
C++
|
.cpp
| 61
| 23.819672
| 62
| 0.810135
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,315
|
cellNetCtl.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellNetCtl.cpp
|
#include "stdafx.h"
#include "Emu/system_config.h"
#include "Emu/Cell/PPUModule.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/lv2/sys_sync.h"
#include "cellGame.h"
#include "cellSysutil.h"
#include "cellNetCtl.h"
#include "Utilities/StrUtil.h"
#include "Emu/NP/np_handler.h"
#include "Emu/NP/np_helpers.h"
LOG_CHANNEL(cellNetCtl);
template <>
void fmt_class_string<CellNetCtlError>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto error)
{
switch (error)
{
STR_CASE(CELL_NET_CTL_ERROR_NOT_INITIALIZED);
STR_CASE(CELL_NET_CTL_ERROR_NOT_TERMINATED);
STR_CASE(CELL_NET_CTL_ERROR_HANDLER_MAX);
STR_CASE(CELL_NET_CTL_ERROR_ID_NOT_FOUND);
STR_CASE(CELL_NET_CTL_ERROR_INVALID_ID);
STR_CASE(CELL_NET_CTL_ERROR_INVALID_CODE);
STR_CASE(CELL_NET_CTL_ERROR_INVALID_ADDR);
STR_CASE(CELL_NET_CTL_ERROR_NOT_CONNECTED);
STR_CASE(CELL_NET_CTL_ERROR_NOT_AVAIL);
STR_CASE(CELL_NET_CTL_ERROR_INVALID_TYPE);
STR_CASE(CELL_NET_CTL_ERROR_INVALID_SIZE);
STR_CASE(CELL_NET_CTL_ERROR_NET_DISABLED);
STR_CASE(CELL_NET_CTL_ERROR_NET_NOT_CONNECTED);
STR_CASE(CELL_NET_CTL_ERROR_NP_NO_ACCOUNT);
STR_CASE(CELL_NET_CTL_ERROR_NP_RESERVED1);
STR_CASE(CELL_NET_CTL_ERROR_NP_RESERVED2);
STR_CASE(CELL_NET_CTL_ERROR_NET_CABLE_NOT_CONNECTED);
STR_CASE(CELL_NET_CTL_ERROR_DIALOG_CANCELED);
STR_CASE(CELL_NET_CTL_ERROR_DIALOG_ABORTED);
STR_CASE(CELL_NET_CTL_ERROR_WLAN_DEAUTHED);
STR_CASE(CELL_NET_CTL_ERROR_WLAN_KEYINFO_EXCHNAGE_TIMEOUT);
STR_CASE(CELL_NET_CTL_ERROR_WLAN_ASSOC_FAILED);
STR_CASE(CELL_NET_CTL_ERROR_WLAN_AP_DISAPPEARED);
STR_CASE(CELL_NET_CTL_ERROR_PPPOE_SESSION_INIT);
STR_CASE(CELL_NET_CTL_ERROR_PPPOE_SESSION_NO_PADO);
STR_CASE(CELL_NET_CTL_ERROR_PPPOE_SESSION_NO_PADS);
STR_CASE(CELL_NET_CTL_ERROR_PPPOE_SESSION_GET_PADT);
STR_CASE(CELL_NET_CTL_ERROR_PPPOE_SESSION_SERVICE_NAME);
STR_CASE(CELL_NET_CTL_ERROR_PPPOE_SESSION_AC_SYSTEM);
STR_CASE(CELL_NET_CTL_ERROR_PPPOE_SESSION_GENERIC);
STR_CASE(CELL_NET_CTL_ERROR_PPPOE_STATUS_AUTH);
STR_CASE(CELL_NET_CTL_ERROR_PPPOE_STATUS_NETWORK);
STR_CASE(CELL_NET_CTL_ERROR_PPPOE_STATUS_TERMINATE);
STR_CASE(CELL_NET_CTL_ERROR_DHCP_LEASE_TIME);
STR_CASE(CELL_GAMEUPDATE_ERROR_NOT_INITIALIZED);
STR_CASE(CELL_GAMEUPDATE_ERROR_ALREADY_INITIALIZED);
STR_CASE(CELL_GAMEUPDATE_ERROR_INVALID_ADDR);
STR_CASE(CELL_GAMEUPDATE_ERROR_INVALID_SIZE);
STR_CASE(CELL_GAMEUPDATE_ERROR_INVALID_MEMORY_CONTAINER);
STR_CASE(CELL_GAMEUPDATE_ERROR_INSUFFICIENT_MEMORY_CONTAINER);
STR_CASE(CELL_GAMEUPDATE_ERROR_BUSY);
STR_CASE(CELL_GAMEUPDATE_ERROR_NOT_START);
STR_CASE(CELL_GAMEUPDATE_ERROR_LOAD_FAILED);
}
return unknown;
});
}
template <>
void fmt_class_string<CellNetCtlState>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](CellNetCtlState value)
{
switch (value)
{
case CELL_NET_CTL_STATE_Disconnected: return "Disconnected";
case CELL_NET_CTL_STATE_Connecting: return "Connecting";
case CELL_NET_CTL_STATE_IPObtaining: return "Obtaining IP";
case CELL_NET_CTL_STATE_IPObtained: return "IP Obtained";
}
return unknown;
});
}
struct CellGameUpdateResult
{
be_t<s32> status; // CellGameUpdateResultStatus
be_t<s32> error_code;
char app_ver[CELL_GAME_SYSP_APP_VER_SIZE];
char padding[2];
};
struct CellGameUpdateParam
{
be_t<u32> size;
be_t<u32> cid;
};
using CellGameUpdateCallback = void(s32 status, s32 error_code, vm::ptr<void> userdata);
using CellGameUpdateCallbackEx = void(vm::ptr<CellGameUpdateResult> result, vm::ptr<void> userdata);
error_code cellNetCtlInit()
{
cellNetCtl.warning("cellNetCtlInit()");
auto& nph = g_fxo->get<named_thread<np::np_handler>>();
if (nph.is_netctl_init)
{
return CELL_NET_CTL_ERROR_NOT_TERMINATED;
}
nph.is_netctl_init = true;
return CELL_OK;
}
void cellNetCtlTerm()
{
cellNetCtl.warning("cellNetCtlTerm()");
auto& nph = g_fxo->get<named_thread<np::np_handler>>();
nph.is_netctl_init = false;
}
error_code cellNetCtlGetState(vm::ptr<s32> state)
{
cellNetCtl.trace("cellNetCtlGetState(state=*0x%x)", state);
auto& nph = g_fxo->get<named_thread<np::np_handler>>();
if (!nph.is_netctl_init)
{
return CELL_NET_CTL_ERROR_NOT_INITIALIZED;
}
if (!state)
{
return CELL_NET_CTL_ERROR_INVALID_ADDR;
}
*state = nph.get_net_status();
return CELL_OK;
}
error_code cellNetCtlAddHandler(vm::ptr<cellNetCtlHandler> handler, vm::ptr<void> arg, vm::ptr<s32> hid)
{
cellNetCtl.todo("cellNetCtlAddHandler(handler=*0x%x, arg=*0x%x, hid=*0x%x)", handler, arg, hid);
auto& nph = g_fxo->get<named_thread<np::np_handler>>();
if (!nph.is_netctl_init)
{
return CELL_NET_CTL_ERROR_NOT_INITIALIZED;
}
if (!hid)
{
return CELL_NET_CTL_ERROR_INVALID_ADDR;
}
return CELL_OK;
}
error_code cellNetCtlDelHandler(s32 hid)
{
cellNetCtl.todo("cellNetCtlDelHandler(hid=0x%x)", hid);
auto& nph = g_fxo->get<named_thread<np::np_handler>>();
if (!nph.is_netctl_init)
{
return CELL_NET_CTL_ERROR_NOT_INITIALIZED;
}
if (hid > 3)
{
return CELL_NET_CTL_ERROR_INVALID_ID;
}
return CELL_OK;
}
error_code cellNetCtlGetInfo(s32 code, vm::ptr<CellNetCtlInfo> info)
{
cellNetCtl.warning("cellNetCtlGetInfo(code=0x%x (%s), info=*0x%x)", code, InfoCodeToName(code), info);
auto& nph = g_fxo->get<named_thread<np::np_handler>>();
if (!nph.is_netctl_init)
{
return CELL_NET_CTL_ERROR_NOT_INITIALIZED;
}
if (!info)
{
return CELL_NET_CTL_ERROR_INVALID_ADDR;
}
if (code == CELL_NET_CTL_INFO_ETHER_ADDR)
{
memcpy(info->ether_addr.data, nph.get_ether_addr().data(), 6);
return CELL_OK;
}
if (nph.get_net_status() == CELL_NET_CTL_STATE_Disconnected)
{
return CELL_NET_CTL_ERROR_NOT_CONNECTED;
}
switch (code)
{
case CELL_NET_CTL_INFO_DEVICE: info->device = CELL_NET_CTL_DEVICE_WIRED; break;
case CELL_NET_CTL_INFO_MTU: info->mtu = 1500; break;
case CELL_NET_CTL_INFO_LINK: info->link = CELL_NET_CTL_LINK_CONNECTED; break;
case CELL_NET_CTL_INFO_LINK_TYPE: info->link_type = CELL_NET_CTL_LINK_TYPE_100BASE_FULL; break;
// case CELL_NET_CTL_INFO_BSSID: break;
// case CELL_NET_CTL_INFO_SSID: break;
// case CELL_NET_CTL_INFO_WLAN_SECURITY: break;
// case CELL_NET_CTL_INFO_8021X_TYPE: break;
// case CELL_NET_CTL_INFO_8021X_AUTH_NAME: break;
case CELL_NET_CTL_INFO_RSSI: info->rssi = 100; break; // wireless: value ranges from 0-100 indicating wireless connection strength
case CELL_NET_CTL_INFO_CHANNEL: info->channel = 1; break; // wireless: channel used to connect to the AP?
case CELL_NET_CTL_INFO_IP_CONFIG: info->ip_config = CELL_NET_CTL_IP_STATIC; break;
case CELL_NET_CTL_INFO_DHCP_HOSTNAME: strcpy_trunc(info->dhcp_hostname, nph.get_hostname()); break;
// case CELL_NET_CTL_INFO_PPPOE_AUTH_NAME: break;
case CELL_NET_CTL_INFO_IP_ADDRESS: strcpy_trunc(info->ip_address, np::ip_to_string(nph.get_local_ip_addr())); break; // verified on HW
case CELL_NET_CTL_INFO_NETMASK: strcpy_trunc(info->netmask, "255.255.255.0"); break;
case CELL_NET_CTL_INFO_DEFAULT_ROUTE: strcpy_trunc(info->default_route, "192.168.1.1"); break;
case CELL_NET_CTL_INFO_PRIMARY_DNS: strcpy_trunc(info->primary_dns, np::ip_to_string(nph.get_dns_ip())); break;
case CELL_NET_CTL_INFO_SECONDARY_DNS: strcpy_trunc(info->secondary_dns, np::ip_to_string(nph.get_dns_ip())); break;
case CELL_NET_CTL_INFO_HTTP_PROXY_CONFIG: info->http_proxy_config = 0; break;
// case CELL_NET_CTL_INFO_HTTP_PROXY_SERVER: break;
// case CELL_NET_CTL_INFO_HTTP_PROXY_PORT: break;
case CELL_NET_CTL_INFO_UPNP_CONFIG: info->upnp_config = (nph.get_upnp_status() == SCE_NP_SIGNALING_NETINFO_UPNP_STATUS_VALID) ? CELL_NET_CTL_UPNP_ON : CELL_NET_CTL_UPNP_OFF; break;
// case CELL_NET_CTL_INFO_RESERVED1: break;
// case CELL_NET_CTL_INFO_RESERVED2: break;
default: cellNetCtl.error("Unsupported request: %s", InfoCodeToName(code)); break;
}
return CELL_OK;
}
struct netstart_hack
{
static constexpr std::string_view thread_name = "NetStart Hack";
void operator()(int) const
{
thread_ctrl::wait_for(500'000);
sysutil_send_system_cmd(CELL_SYSUTIL_NET_CTL_NETSTART_LOADED, 0);
sysutil_send_system_cmd(CELL_SYSUTIL_NET_CTL_NETSTART_FINISHED, 0);
}
};
error_code cellNetCtlNetStartDialogLoadAsync(vm::cptr<CellNetCtlNetStartDialogParam> param)
{
cellNetCtl.warning("cellNetCtlNetStartDialogLoadAsync(param=*0x%x)", param);
auto& nph = g_fxo->get<named_thread<np::np_handler>>();
if (!nph.is_netctl_init)
{
return CELL_NET_CTL_ERROR_NOT_INITIALIZED;
}
if (!param)
{
return CELL_NET_CTL_ERROR_INVALID_ADDR;
}
if (param->type >= CELL_NET_CTL_NETSTART_TYPE_MAX)
{
return CELL_NET_CTL_ERROR_INVALID_TYPE;
}
if (param->size != 12u)
{
return CELL_NET_CTL_ERROR_INVALID_SIZE;
}
// This is a hack for Diva F 2nd that registers the sysutil callback after calling this function.
g_fxo->get<named_thread<netstart_hack>>()(0);
return CELL_OK;
}
error_code cellNetCtlNetStartDialogAbortAsync()
{
cellNetCtl.error("cellNetCtlNetStartDialogAbortAsync()");
auto& nph = g_fxo->get<named_thread<np::np_handler>>();
if (!nph.is_netctl_init)
{
return CELL_NET_CTL_ERROR_NOT_INITIALIZED;
}
return CELL_OK;
}
error_code cellNetCtlNetStartDialogUnloadAsync(vm::ptr<CellNetCtlNetStartDialogResult> result)
{
cellNetCtl.warning("cellNetCtlNetStartDialogUnloadAsync(result=*0x%x)", result);
auto& nph = g_fxo->get<named_thread<np::np_handler>>();
if (!nph.is_netctl_init)
{
return CELL_NET_CTL_ERROR_NOT_INITIALIZED;
}
if (!result)
{
return CELL_NET_CTL_ERROR_INVALID_ADDR;
}
if (result->size != 8u)
{
return CELL_NET_CTL_ERROR_INVALID_SIZE;
}
result->result = nph.get_net_status() == CELL_NET_CTL_STATE_IPObtained ? 0 : CELL_NET_CTL_ERROR_DIALOG_CANCELED;
sysutil_send_system_cmd(CELL_SYSUTIL_NET_CTL_NETSTART_UNLOADED, 0);
return CELL_OK;
}
error_code cellNetCtlGetNatInfo(vm::ptr<CellNetCtlNatInfo> natInfo)
{
cellNetCtl.warning("cellNetCtlGetNatInfo(natInfo=*0x%x)", natInfo);
auto& nph = g_fxo->get<named_thread<np::np_handler>>();
if (!nph.is_netctl_init)
{
return CELL_NET_CTL_ERROR_NOT_INITIALIZED;
}
if (!natInfo)
{
return CELL_NET_CTL_ERROR_INVALID_ADDR;
}
if (natInfo->size != 16u && natInfo->size != 20u)
{
return CELL_NET_CTL_ERROR_INVALID_SIZE;
}
natInfo->nat_type = CELL_NET_CTL_NATINFO_NAT_TYPE_2;
natInfo->stun_status = CELL_NET_CTL_NATINFO_STUN_OK;
natInfo->upnp_status = nph.get_upnp_status();
return CELL_OK;
}
error_code cellNetCtlAddHandlerGameInt()
{
cellNetCtl.todo("cellNetCtlAddHandlerGameInt()");
return CELL_OK;
}
error_code cellNetCtlConnectGameInt()
{
cellNetCtl.todo("cellNetCtlConnectGameInt()");
return CELL_OK;
}
error_code cellNetCtlDelHandlerGameInt()
{
cellNetCtl.todo("cellNetCtlDelHandlerGameInt()");
return CELL_OK;
}
error_code cellNetCtlDisconnectGameInt()
{
cellNetCtl.todo("cellNetCtlDisconnectGameInt()");
return CELL_OK;
}
error_code cellNetCtlGetInfoGameInt()
{
cellNetCtl.todo("cellNetCtlGetInfoGameInt()");
return CELL_OK;
}
error_code cellNetCtlGetScanInfoGameInt()
{
cellNetCtl.todo("cellNetCtlGetScanInfoGameInt()");
return CELL_OK;
}
error_code cellNetCtlGetStateGameInt()
{
cellNetCtl.todo("cellNetCtlGetStateGameInt()");
return CELL_OK;
}
error_code cellNetCtlScanGameInt()
{
cellNetCtl.todo("cellNetCtlScanGameInt()");
return CELL_OK;
}
error_code cellGameUpdateInit()
{
cellNetCtl.todo("cellGameUpdateInit()");
return CELL_OK;
}
error_code cellGameUpdateTerm()
{
cellNetCtl.todo("cellGameUpdateTerm()");
return CELL_OK;
}
error_code cellGameUpdateCheckStartAsync(vm::cptr<CellGameUpdateParam> param, vm::ptr<CellGameUpdateCallback> cb_func, vm::ptr<void> userdata)
{
cellNetCtl.todo("cellGameUpdateCheckStartAsync(param=*0x%x, cb_func=*0x%x, userdata=*0x%x)", param, cb_func, userdata);
sysutil_register_cb([=](ppu_thread& ppu) -> s32
{
cb_func(ppu, CELL_GAMEUPDATE_RESULT_STATUS_NO_UPDATE, CELL_OK, userdata);
return CELL_OK;
});
return CELL_OK;
}
error_code cellGameUpdateCheckFinishAsync(vm::ptr<CellGameUpdateCallback> cb_func, vm::ptr<void> userdata)
{
cellNetCtl.todo("cellGameUpdateCheckFinishAsync(cb_func=*0x%x, userdata=*0x%x)", cb_func, userdata);
sysutil_register_cb([=](ppu_thread& ppu) -> s32
{
cb_func(ppu, CELL_GAMEUPDATE_RESULT_STATUS_FINISHED, CELL_OK, userdata);
return CELL_OK;
});
return CELL_OK;
}
error_code cellGameUpdateCheckStartWithoutDialogAsync(vm::ptr<CellGameUpdateCallback> cb_func, vm::ptr<void> userdata)
{
cellNetCtl.todo("cellGameUpdateCheckStartWithoutDialogAsync(cb_func=*0x%x, userdata=*0x%x)", cb_func, userdata);
sysutil_register_cb([=](ppu_thread& ppu) -> s32
{
cb_func(ppu, CELL_GAMEUPDATE_RESULT_STATUS_NO_UPDATE, CELL_OK, userdata);
return CELL_OK;
});
return CELL_OK;
}
error_code cellGameUpdateCheckAbort()
{
cellNetCtl.todo("cellGameUpdateCheckAbort()");
return CELL_OK;
}
error_code cellGameUpdateCheckStartAsyncEx(vm::cptr<CellGameUpdateParam> param, vm::ptr<CellGameUpdateCallbackEx> cb_func, vm::ptr<void> userdata)
{
cellNetCtl.todo("cellGameUpdateCheckStartAsyncEx(param=*0x%x, cb_func=*0x%x, userdata=*0x%x)", param, cb_func, userdata);
sysutil_register_cb([=](ppu_thread& ppu) -> s32
{
cb_func(ppu, vm::make_var(CellGameUpdateResult{CELL_GAMEUPDATE_RESULT_STATUS_NO_UPDATE, CELL_OK}), userdata);
return CELL_OK;
});
return CELL_OK;
}
error_code cellGameUpdateCheckFinishAsyncEx(vm::ptr<CellGameUpdateCallbackEx> cb_func, vm::ptr<void> userdata)
{
cellNetCtl.todo("cellGameUpdateCheckFinishAsyncEx(cb_func=*0x%x, userdata=*0x%x)", cb_func, userdata);
sysutil_register_cb([=](ppu_thread& ppu) -> s32
{
cb_func(ppu, vm::make_var(CellGameUpdateResult{CELL_GAMEUPDATE_RESULT_STATUS_FINISHED, CELL_OK}), userdata);
return CELL_OK;
});
return CELL_OK;
}
error_code cellGameUpdateCheckStartWithoutDialogAsyncEx(vm::ptr<CellGameUpdateCallbackEx> cb_func, vm::ptr<void> userdata)
{
cellNetCtl.todo("cellGameUpdateCheckStartWithoutDialogAsyncEx(cb_func=*0x%x, userdata=*0x%x)", cb_func, userdata);
sysutil_register_cb([=](ppu_thread& ppu) -> s32
{
cb_func(ppu, vm::make_var(CellGameUpdateResult{CELL_GAMEUPDATE_RESULT_STATUS_NO_UPDATE, CELL_OK}), userdata);
return CELL_OK;
});
return CELL_OK;
}
DECLARE(ppu_module_manager::cellNetCtl)("cellNetCtl", []()
{
REG_FUNC(cellNetCtl, cellNetCtlInit);
REG_FUNC(cellNetCtl, cellNetCtlTerm);
REG_FUNC(cellNetCtl, cellNetCtlGetState);
REG_FUNC(cellNetCtl, cellNetCtlAddHandler);
REG_FUNC(cellNetCtl, cellNetCtlDelHandler);
REG_FUNC(cellNetCtl, cellNetCtlGetInfo);
REG_FUNC(cellNetCtl, cellNetCtlNetStartDialogLoadAsync);
REG_FUNC(cellNetCtl, cellNetCtlNetStartDialogAbortAsync);
REG_FUNC(cellNetCtl, cellNetCtlNetStartDialogUnloadAsync);
REG_FUNC(cellNetCtl, cellNetCtlGetNatInfo);
REG_FUNC(cellNetCtl, cellNetCtlAddHandlerGameInt);
REG_FUNC(cellNetCtl, cellNetCtlConnectGameInt);
REG_FUNC(cellNetCtl, cellNetCtlDelHandlerGameInt);
REG_FUNC(cellNetCtl, cellNetCtlDisconnectGameInt);
REG_FUNC(cellNetCtl, cellNetCtlGetInfoGameInt);
REG_FUNC(cellNetCtl, cellNetCtlGetScanInfoGameInt);
REG_FUNC(cellNetCtl, cellNetCtlGetStateGameInt);
REG_FUNC(cellNetCtl, cellNetCtlScanGameInt);
REG_FUNC(cellNetCtl, cellGameUpdateInit);
REG_FUNC(cellNetCtl, cellGameUpdateTerm);
REG_FUNC(cellNetCtl, cellGameUpdateCheckStartAsync);
REG_FUNC(cellNetCtl, cellGameUpdateCheckFinishAsync);
REG_FUNC(cellNetCtl, cellGameUpdateCheckStartWithoutDialogAsync);
REG_FUNC(cellNetCtl, cellGameUpdateCheckAbort);
REG_FUNC(cellNetCtl, cellGameUpdateCheckStartAsyncEx);
REG_FUNC(cellNetCtl, cellGameUpdateCheckFinishAsyncEx);
REG_FUNC(cellNetCtl, cellGameUpdateCheckStartWithoutDialogAsyncEx);
});
| 15,593
|
C++
|
.cpp
| 437
| 33.407323
| 181
| 0.758252
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,316
|
cellGame.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellGame.cpp
|
#include "stdafx.h"
#include "Emu/localized_string.h"
#include "Emu/System.h"
#include "Emu/system_utils.hpp"
#include "Emu/VFS.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUModule.h"
#include "Emu/Cell/lv2/sys_fs.h"
#include "Emu/Cell/lv2/sys_sync.h"
#include "cellSysutil.h"
#include "cellMsgDialog.h"
#include "cellGame.h"
#include "Loader/PSF.h"
#include "Utilities/StrUtil.h"
#include "util/init_mutex.hpp"
#include "util/asm.hpp"
#include "Crypto/utils.h"
#include <span>
LOG_CHANNEL(cellGame);
vm::gvar<CellHddGameStatGet> g_stat_get;
vm::gvar<CellHddGameStatSet> g_stat_set;
vm::gvar<CellHddGameSystemFileParam> g_file_param;
vm::gvar<CellHddGameCBResult> g_cb_result;
stx::init_lock acquire_lock(stx::init_mutex& mtx, ppu_thread* ppu = nullptr);
stx::access_lock acquire_access_lock(stx::init_mutex& mtx, ppu_thread* ppu = nullptr);
stx::reset_lock acquire_reset_lock(stx::init_mutex& mtx, ppu_thread* ppu = nullptr);
template<>
void fmt_class_string<CellGameError>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto error)
{
switch (error)
{
STR_CASE(CELL_GAME_ERROR_NOTFOUND);
STR_CASE(CELL_GAME_ERROR_BROKEN);
STR_CASE(CELL_GAME_ERROR_INTERNAL);
STR_CASE(CELL_GAME_ERROR_PARAM);
STR_CASE(CELL_GAME_ERROR_NOAPP);
STR_CASE(CELL_GAME_ERROR_ACCESS_ERROR);
STR_CASE(CELL_GAME_ERROR_NOSPACE);
STR_CASE(CELL_GAME_ERROR_NOTSUPPORTED);
STR_CASE(CELL_GAME_ERROR_FAILURE);
STR_CASE(CELL_GAME_ERROR_BUSY);
STR_CASE(CELL_GAME_ERROR_IN_SHUTDOWN);
STR_CASE(CELL_GAME_ERROR_INVALID_ID);
STR_CASE(CELL_GAME_ERROR_EXIST);
STR_CASE(CELL_GAME_ERROR_NOTPATCH);
STR_CASE(CELL_GAME_ERROR_INVALID_THEME_FILE);
STR_CASE(CELL_GAME_ERROR_BOOTPATH);
}
return unknown;
});
}
template<>
void fmt_class_string<CellGameDataError>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto error)
{
switch (error)
{
STR_CASE(CELL_GAMEDATA_ERROR_CBRESULT);
STR_CASE(CELL_GAMEDATA_ERROR_ACCESS_ERROR);
STR_CASE(CELL_GAMEDATA_ERROR_INTERNAL);
STR_CASE(CELL_GAMEDATA_ERROR_PARAM);
STR_CASE(CELL_GAMEDATA_ERROR_NOSPACE);
STR_CASE(CELL_GAMEDATA_ERROR_BROKEN);
STR_CASE(CELL_GAMEDATA_ERROR_FAILURE);
}
return unknown;
});
}
template<>
void fmt_class_string<CellDiscGameError>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto error)
{
switch (error)
{
STR_CASE(CELL_DISCGAME_ERROR_INTERNAL);
STR_CASE(CELL_DISCGAME_ERROR_NOT_DISCBOOT);
STR_CASE(CELL_DISCGAME_ERROR_PARAM);
}
return unknown;
});
}
template<>
void fmt_class_string<CellHddGameError>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto error)
{
switch (error)
{
STR_CASE(CELL_HDDGAME_ERROR_CBRESULT);
STR_CASE(CELL_HDDGAME_ERROR_ACCESS_ERROR);
STR_CASE(CELL_HDDGAME_ERROR_INTERNAL);
STR_CASE(CELL_HDDGAME_ERROR_PARAM);
STR_CASE(CELL_HDDGAME_ERROR_NOSPACE);
STR_CASE(CELL_HDDGAME_ERROR_BROKEN);
STR_CASE(CELL_HDDGAME_ERROR_FAILURE);
}
return unknown;
});
}
// If dir is empty:
// contentInfo = "/dev_bdvd/PS3_GAME"
// usrdir = "/dev_bdvd/PS3_GAME/USRDIR"
// Temporary content directory (dir is not empty):
// contentInfo = "/dev_hdd0/game/_GDATA_" + time_since_epoch
// usrdir = "/dev_hdd0/game/_GDATA_" + time_since_epoch + "/USRDIR"
// Normal content directory (dir is not empty):
// contentInfo = "/dev_hdd0/game/" + dir
// usrdir = "/dev_hdd0/game/" + dir + "/USRDIR"
struct content_permission final
{
// Content directory name or path
std::string dir;
// SFO file
psf::registry sfo;
// Temporary directory path
std::string temp;
stx::init_mutex init;
enum class check_mode
{
not_set,
game_data,
patch,
hdd_game,
disc_game
};
atomic_t<u32> can_create = 0;
atomic_t<bool> exists = false;
atomic_t<check_mode> mode = check_mode::not_set;
content_permission() = default;
content_permission(const content_permission&) = delete;
content_permission& operator=(const content_permission&) = delete;
void reset()
{
dir.clear();
sfo.clear();
temp.clear();
can_create = 0;
exists = false;
mode = check_mode::not_set;
}
~content_permission()
{
bool success = false;
fs::g_tls_error = fs::error::ok;
if (temp.size() <= 1 || fs::remove_all(temp))
{
success = true;
}
if (!success)
{
cellGame.fatal("Failed to clean directory '%s' (%s)", temp, fs::g_tls_error);
}
}
};
template<>
void fmt_class_string<content_permission::check_mode>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto error)
{
switch (error)
{
STR_CASE(content_permission::check_mode::not_set);
STR_CASE(content_permission::check_mode::game_data);
STR_CASE(content_permission::check_mode::patch);
STR_CASE(content_permission::check_mode::hdd_game);
STR_CASE(content_permission::check_mode::disc_game);
}
return unknown;
});
}
template<>
void fmt_class_string<disc_change_manager::eject_state>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto error)
{
switch (error)
{
STR_CASE(disc_change_manager::eject_state::unknown);
STR_CASE(disc_change_manager::eject_state::inserted);
STR_CASE(disc_change_manager::eject_state::ejected);
STR_CASE(disc_change_manager::eject_state::busy);
}
return unknown;
});
}
static bool check_system_ver(vm::cptr<char> systemVersion)
{
// Only allow something like "04.8300".
// The disassembly shows that "04.83" would also be considered valid, but the initial strlen check makes this void.
return (
systemVersion &&
std::strlen(systemVersion.get_ptr()) == 7 &&
std::isdigit(systemVersion[0]) &&
std::isdigit(systemVersion[1]) &&
systemVersion[2] == '.' &&
std::isdigit(systemVersion[3]) &&
std::isdigit(systemVersion[4]) &&
std::isdigit(systemVersion[5]) &&
std::isdigit(systemVersion[6])
);
}
disc_change_manager::disc_change_manager()
{
Emu.GetCallbacks().enable_disc_eject(false);
Emu.GetCallbacks().enable_disc_insert(false);
}
disc_change_manager::~disc_change_manager()
{
Emu.GetCallbacks().enable_disc_eject(false);
Emu.GetCallbacks().enable_disc_insert(false);
}
error_code disc_change_manager::register_callbacks(vm::ptr<CellGameDiscEjectCallback> func_eject, vm::ptr<CellGameDiscInsertCallback> func_insert)
{
std::lock_guard lock(mtx);
eject_callback = func_eject;
insert_callback = func_insert;
const bool is_disc_mounted = fs::is_dir(vfs::get("/dev_bdvd/PS3_GAME"));
if (state == eject_state::unknown)
{
state = is_disc_mounted ? eject_state::inserted : eject_state::ejected;
}
Emu.GetCallbacks().enable_disc_eject(!!func_eject && is_disc_mounted);
Emu.GetCallbacks().enable_disc_insert(!!func_insert && !is_disc_mounted);
return CELL_OK;
}
error_code disc_change_manager::unregister_callbacks()
{
const auto unregister = [this]() -> void
{
eject_callback = vm::null;
insert_callback = vm::null;
Emu.GetCallbacks().enable_disc_eject(false);
Emu.GetCallbacks().enable_disc_insert(false);
};
if (is_inserting)
{
// NOTE: The insert_callback is known to call cellGameUnregisterDiscChangeCallback.
// So we keep it out of the mutex lock until it proves to be an issue.
unregister();
}
else
{
std::lock_guard lock(mtx);
unregister();
}
return CELL_OK;
}
void disc_change_manager::eject_disc()
{
cellGame.notice("Ejecting disc...");
std::lock_guard lock(mtx);
if (state != eject_state::inserted)
{
cellGame.fatal("Can not eject disc in the current state. (state=%s)", state.load());
return;
}
state = eject_state::busy;
Emu.GetCallbacks().enable_disc_eject(false);
ensure(eject_callback);
sysutil_register_cb([](ppu_thread& cb_ppu) -> s32
{
auto& dcm = g_fxo->get<disc_change_manager>();
std::lock_guard lock(dcm.mtx);
cellGame.notice("Executing eject_callback...");
dcm.eject_callback(cb_ppu);
ensure(vfs::unmount("/dev_bdvd"));
ensure(vfs::unmount("/dev_ps2disc"));
dcm.state = eject_state::ejected;
// Re-enable disc insertion only if the callback is still registered
Emu.GetCallbacks().enable_disc_insert(!!dcm.insert_callback);
return CELL_OK;
});
}
void disc_change_manager::insert_disc(u32 disc_type, std::string title_id)
{
cellGame.notice("Inserting disc...");
std::lock_guard lock(mtx);
if (state != eject_state::ejected)
{
cellGame.fatal("Can not insert disc in the current state. (state=%s)", state.load());
return;
}
state = eject_state::busy;
Emu.GetCallbacks().enable_disc_insert(false);
ensure(insert_callback);
is_inserting = true;
sysutil_register_cb([disc_type, title_id = std::move(title_id)](ppu_thread& cb_ppu) -> s32
{
auto& dcm = g_fxo->get<disc_change_manager>();
std::lock_guard lock(dcm.mtx);
if (disc_type == CELL_GAME_DISCTYPE_PS3)
{
vm::var<char[]> _title_id = vm::make_str(title_id);
cellGame.notice("Executing insert_callback for title '%s' with disc_type %d...", _title_id.get_ptr(), disc_type);
dcm.insert_callback(cb_ppu, disc_type, _title_id);
}
else
{
cellGame.notice("Executing insert_callback with disc_type %d...", disc_type);
dcm.insert_callback(cb_ppu, disc_type, vm::null);
}
dcm.state = eject_state::inserted;
// Re-enable disc ejection only if the callback is still registered
Emu.GetCallbacks().enable_disc_eject(!!dcm.eject_callback);
dcm.is_inserting = false;
return CELL_OK;
});
}
extern void lv2_sleep(u64 timeout, ppu_thread* ppu = nullptr)
{
if (!ppu)
{
ppu = ensure(cpu_thread::get_current<ppu_thread>());
}
if (!timeout)
{
return;
}
const bool had_wait = ppu->state.test_and_set(cpu_flag::wait);
lv2_obj::sleep(*ppu);
lv2_obj::wait_timeout(timeout);
ppu->check_state();
if (had_wait)
{
ppu->state += cpu_flag::wait;
}
}
error_code cellHddGameCheck(ppu_thread& ppu, u32 version, vm::cptr<char> dirName, u32 errDialog, vm::ptr<CellHddGameStatCallback> funcStat, u32 container)
{
cellGame.warning("cellHddGameCheck(version=%d, dirName=%s, errDialog=%d, funcStat=*0x%x, container=%d)", version, dirName, errDialog, funcStat, container);
if (version != CELL_GAMEDATA_VERSION_CURRENT || !dirName || !funcStat || sysutil_check_name_string(dirName.get_ptr(), 1, CELL_GAME_DIRNAME_SIZE) != 0)
{
return CELL_HDDGAME_ERROR_PARAM;
}
std::string game_dir = dirName.get_ptr();
// TODO: Find error code
ensure(game_dir.size() == 9);
const std::string dir = "/dev_hdd0/game/" + game_dir;
auto [sfo, psf_error] = psf::load(vfs::get(dir + "/PARAM.SFO"));
const u32 new_data = psf_error == psf::error::stream ? CELL_GAMEDATA_ISNEWDATA_YES : CELL_GAMEDATA_ISNEWDATA_NO;
if (!new_data)
{
const auto cat = psf::get_string(sfo, "CATEGORY", "");
if (!psf::is_cat_hdd(cat))
{
return { CELL_GAMEDATA_ERROR_BROKEN, "CATEGORY='%s'", cat };
}
}
const std::string usrdir = dir + "/USRDIR";
auto& get = g_stat_get;
auto& set = g_stat_set;
auto& result = g_cb_result;
std::memset(get.get_ptr(), 0, sizeof(*get));
std::memset(set.get_ptr(), 0, sizeof(*set));
std::memset(result.get_ptr(), 0, sizeof(*result));
const std::string local_dir = vfs::get(dir);
// 40 GB - 256 kilobytes. The reasoning is that many games take this number and multiply it by 1024, to get the amount of bytes. With 40GB exactly,
// this will result in an overflow, and the size would be 0, preventing the game from running. By reducing 256 kilobytes, we make sure that even
// after said overflow, the number would still be high enough to contain the game's data.
get->hddFreeSizeKB = 40 * 1024 * 1024 - 256;
get->isNewData = CELL_HDDGAME_ISNEWDATA_EXIST;
get->sysSizeKB = 0; // TODO
get->st_atime_ = 0; // TODO
get->st_ctime_ = 0; // TODO
get->st_mtime_ = 0; // TODO
get->sizeKB = CELL_HDDGAME_SIZEKB_NOTCALC;
strcpy_trunc(get->contentInfoPath, dir);
strcpy_trunc(get->gameDataPath, usrdir);
std::memset(g_file_param.get_ptr(), 0, sizeof(*g_file_param));
set->setParam = g_file_param;
if (!fs::is_dir(local_dir))
{
get->isNewData = CELL_HDDGAME_ISNEWDATA_NODIR;
get->getParam = {};
}
else
{
// TODO: Is cellHddGameCheck really responsible for writing the information in get->getParam ? (If not, delete this else)
const psf::registry psf = psf::load_object(local_dir + "/PARAM.SFO");
// Some following fields may be zero in old FW 1.00 version PARAM.SFO
if (psf.contains("PARENTAL_LEVEL")) get->getParam.parentalLevel = ::at32(psf, "PARENTAL_LEVEL").as_integer();
if (psf.contains("ATTRIBUTE")) get->getParam.attribute = ::at32(psf, "ATTRIBUTE").as_integer();
if (psf.contains("RESOLUTION")) get->getParam.resolution = ::at32(psf, "RESOLUTION").as_integer();
if (psf.contains("SOUND_FORMAT")) get->getParam.soundFormat = ::at32(psf, "SOUND_FORMAT").as_integer();
if (psf.contains("TITLE")) strcpy_trunc(get->getParam.title, ::at32(psf, "TITLE").as_string());
if (psf.contains("APP_VER")) strcpy_trunc(get->getParam.dataVersion, ::at32(psf, "APP_VER").as_string());
if (psf.contains("TITLE_ID")) strcpy_trunc(get->getParam.titleId, ::at32(psf, "TITLE_ID").as_string());
for (u32 i = 0; i < CELL_HDDGAME_SYSP_LANGUAGE_NUM; i++)
{
strcpy_trunc(get->getParam.titleLang[i], psf::get_string(psf, fmt::format("TITLE_%02d", i)));
}
}
// TODO ?
lv2_sleep(5000, &ppu);
funcStat(ppu, result, get, set);
std::string error_msg;
switch (result->result)
{
case CELL_HDDGAME_CBRESULT_OK:
{
// Game confirmed that it wants to create directory
const auto setParam = set->setParam;
lv2_sleep(2000, &ppu);
if (new_data)
{
if (!setParam)
{
return CELL_GAMEDATA_ERROR_PARAM;
}
if (!fs::create_path(vfs::get(usrdir)))
{
return {CELL_GAME_ERROR_ACCESS_ERROR, usrdir};
}
}
// Nuked until correctly reversed engineered
// if (setParam)
// {
// if (new_data)
// {
// psf::assign(sfo, "CATEGORY", psf::string(3, "HG"));
// }
// psf::assign(sfo, "TITLE_ID", psf::string(TITLEID_SFO_ENTRY_SIZE, setParam->titleId));
// psf::assign(sfo, "TITLE", psf::string(CELL_GAME_SYSP_TITLE_SIZE, setParam->title));
// psf::assign(sfo, "VERSION", psf::string(CELL_GAME_SYSP_VERSION_SIZE, setParam->dataVersion));
// psf::assign(sfo, "PARENTAL_LEVEL", +setParam->parentalLevel);
// psf::assign(sfo, "RESOLUTION", +setParam->resolution);
// psf::assign(sfo, "SOUND_FORMAT", +setParam->soundFormat);
// for (u32 i = 0; i < CELL_HDDGAME_SYSP_LANGUAGE_NUM; i++)
// {
// if (!setParam->titleLang[i][0])
// {
// continue;
// }
// psf::assign(sfo, fmt::format("TITLE_%02d", i), psf::string(CELL_GAME_SYSP_TITLE_SIZE, setParam->titleLang[i]));
// }
// psf::save_object(fs::file(vfs::get(dir + "/PARAM.SFO"), fs::rewrite), sfo);
// }
return CELL_OK;
}
case CELL_HDDGAME_CBRESULT_OK_CANCEL:
cellGame.warning("cellHddGameCheck(): callback returned CELL_HDDGAME_CBRESULT_OK_CANCEL");
return CELL_OK;
case CELL_HDDGAME_CBRESULT_ERR_NOSPACE:
cellGame.error("cellHddGameCheck(): callback returned CELL_HDDGAME_CBRESULT_ERR_NOSPACE. Space Needed: %d KB", result->errNeedSizeKB);
error_msg = get_localized_string(localized_string_id::CELL_HDD_GAME_CHECK_NOSPACE, fmt::format("%d", result->errNeedSizeKB).c_str());
break;
case CELL_HDDGAME_CBRESULT_ERR_BROKEN:
cellGame.error("cellHddGameCheck(): callback returned CELL_HDDGAME_CBRESULT_ERR_BROKEN");
error_msg = get_localized_string(localized_string_id::CELL_HDD_GAME_CHECK_BROKEN, game_dir.c_str());
break;
case CELL_HDDGAME_CBRESULT_ERR_NODATA:
cellGame.error("cellHddGameCheck(): callback returned CELL_HDDGAME_CBRESULT_ERR_NODATA");
error_msg = get_localized_string(localized_string_id::CELL_HDD_GAME_CHECK_NODATA, game_dir.c_str());
break;
case CELL_HDDGAME_CBRESULT_ERR_INVALID:
cellGame.error("cellHddGameCheck(): callback returned CELL_HDDGAME_CBRESULT_ERR_INVALID. Error message: %s", result->invalidMsg);
error_msg = get_localized_string(localized_string_id::CELL_HDD_GAME_CHECK_INVALID, fmt::format("%s", result->invalidMsg).c_str());
break;
default:
cellGame.error("cellHddGameCheck(): callback returned unknown error (code=0x%x). Error message: %s", result->invalidMsg);
error_msg = get_localized_string(localized_string_id::CELL_HDD_GAME_CHECK_INVALID, fmt::format("%s", result->invalidMsg).c_str());
break;
}
if (errDialog == CELL_GAMEDATA_ERRDIALOG_ALWAYS) // Maybe != CELL_GAMEDATA_ERRDIALOG_NONE
{
// Yield before a blocking dialog is being spawned
lv2_obj::sleep(ppu);
// Get user confirmation by opening a blocking dialog
error_code res = open_msg_dialog(true, CELL_MSGDIALOG_TYPE_SE_TYPE_ERROR | CELL_MSGDIALOG_TYPE_BUTTON_TYPE_OK | CELL_MSGDIALOG_TYPE_DISABLE_CANCEL_ON, vm::make_str(error_msg), msg_dialog_source::_cellGame);
// Reschedule after a blocking dialog returns
if (ppu.check_state())
{
return 0;
}
if (res != CELL_OK)
{
return CELL_GAMEDATA_ERROR_INTERNAL;
}
}
else
{
lv2_sleep(2000, &ppu);
}
return CELL_HDDGAME_ERROR_CBRESULT;
}
error_code cellHddGameCheck2(ppu_thread& ppu, u32 version, vm::cptr<char> dirName, u32 errDialog, vm::ptr<CellHddGameStatCallback> funcStat, u32 container)
{
cellGame.trace("cellHddGameCheck2()");
// Identical function
return cellHddGameCheck(ppu, version, dirName, errDialog, funcStat, container);
}
error_code cellHddGameGetSizeKB(ppu_thread& ppu, vm::ptr<u32> size)
{
ppu.state += cpu_flag::wait;
cellGame.warning("cellHddGameGetSizeKB(size=*0x%x)", size);
if (!size)
{
return CELL_HDDGAME_ERROR_PARAM;
}
lv2_obj::sleep(ppu);
const u64 start_sleep = ppu.start_time;
const std::string local_dir = vfs::get(Emu.GetDir());
const auto dirsz = fs::get_dir_size(local_dir, 1024);
// This function is very slow by nature
// TODO: Check if after first use the result is being cached so the sleep can be reduced in this case
lv2_sleep(utils::sub_saturate<u64>(dirsz == umax ? 2000 : 200000, get_guest_system_time() - start_sleep), &ppu);
if (dirsz == umax)
{
const auto error = fs::g_tls_error;
if (fs::exists(local_dir))
{
cellGame.error("cellHddGameGetSizeKB(): Unknown failure on calculating directory '%s' size (%s)", local_dir, error);
}
return CELL_HDDGAME_ERROR_FAILURE;
}
ppu.check_state();
*size = ::narrow<s32>(dirsz / 1024);
return CELL_OK;
}
error_code cellHddGameSetSystemVer(vm::cptr<char> systemVersion)
{
cellGame.todo("cellHddGameSetSystemVer(systemVersion=%s)", systemVersion);
if (!check_system_ver(systemVersion))
{
return CELL_HDDGAME_ERROR_PARAM;
}
return CELL_OK;
}
error_code cellHddGameExitBroken()
{
cellGame.warning("cellHddGameExitBroken()");
return open_exit_dialog(get_localized_string(localized_string_id::CELL_HDD_GAME_EXIT_BROKEN), true, msg_dialog_source::_cellGame);
}
error_code cellGameDataGetSizeKB(ppu_thread& ppu, vm::ptr<u32> size)
{
ppu.state += cpu_flag::wait;
cellGame.warning("cellGameDataGetSizeKB(size=*0x%x)", size);
if (!size)
{
return CELL_GAMEDATA_ERROR_PARAM;
}
lv2_obj::sleep(ppu);
const u64 start_sleep = ppu.start_time;
const std::string local_dir = vfs::get(Emu.GetDir());
const auto dirsz = fs::get_dir_size(local_dir, 1024);
// This function is very slow by nature
// TODO: Check if after first use the result is being cached so the sleep can be reduced in this case
lv2_sleep(utils::sub_saturate<u64>(dirsz == umax ? 2000 : 200000, get_guest_system_time() - start_sleep), &ppu);
if (dirsz == umax)
{
const auto error = fs::g_tls_error;
if (fs::exists(local_dir))
{
cellGame.error("cellGameDataGetSizeKB(): Unknown failure on calculating directory '%s' size (%s)", local_dir, error);
}
return CELL_GAMEDATA_ERROR_FAILURE;
}
ppu.check_state();
*size = ::narrow<s32>(dirsz / 1024);
return CELL_OK;
}
error_code cellGameDataSetSystemVer(vm::cptr<char> systemVersion)
{
cellGame.todo("cellGameDataSetSystemVer(systemVersion=%s)", systemVersion);
if (!check_system_ver(systemVersion))
{
return CELL_GAMEDATA_ERROR_PARAM;
}
return CELL_OK;
}
error_code cellGameDataExitBroken()
{
cellGame.warning("cellGameDataExitBroken()");
return open_exit_dialog(get_localized_string(localized_string_id::CELL_GAME_DATA_EXIT_BROKEN), true, msg_dialog_source::_cellGame);
}
error_code cellGameBootCheck(vm::ptr<u32> type, vm::ptr<u32> attributes, vm::ptr<CellGameContentSize> size, vm::ptr<char[CELL_GAME_DIRNAME_SIZE]> dirName)
{
cellGame.warning("cellGameBootCheck(type=*0x%x, attributes=*0x%x, size=*0x%x, dirName=*0x%x)", type, attributes, size, dirName);
if (!type || !attributes)
{
return CELL_GAME_ERROR_PARAM;
}
auto& perm = g_fxo->get<content_permission>();
lv2_sleep(500);
const auto init = acquire_lock(perm.init);
if (!init)
{
return CELL_GAME_ERROR_BUSY;
}
std::string dir;
psf::registry sfo;
const std::string& cat = Emu.GetFakeCat();
u32 _type{};
if (cat == "DG")
{
perm.mode = content_permission::check_mode::disc_game;
_type = CELL_GAME_GAMETYPE_DISC;
*attributes = 0; // TODO
// TODO: dirName might be a read only string when BootCheck is called on a disc game. (e.g. Ben 10 Ultimate Alien: Cosmic Destruction)
sfo = psf::load_object(vfs::get("/dev_bdvd/PS3_GAME/PARAM.SFO"));
}
else if (cat == "GD")
{
perm.mode = content_permission::check_mode::patch;
_type = CELL_GAME_GAMETYPE_DISC;
*attributes = CELL_GAME_ATTRIBUTE_PATCH; // TODO
sfo = psf::load_object(vfs::get(Emu.GetDir() + "PARAM.SFO"));
}
else
{
perm.mode = content_permission::check_mode::hdd_game;
_type = CELL_GAME_GAMETYPE_HDD;
*attributes = 0; // TODO
sfo = psf::load_object(vfs::get(Emu.GetDir() + "PARAM.SFO"));
dir = fmt::trim(Emu.GetDir().substr(fs::get_parent_dir_view(Emu.GetDir()).size() + 1), fs::delim);
}
*type = _type;
if (size)
{
// TODO: Use the free space of the computer's HDD where RPCS3 is being run.
size->hddFreeSizeKB = 40 * 1024 * 1024 - 256; // Read explanation in cellHddGameCheck
// TODO: Calculate data size for HG and DG games, if necessary.
size->sizeKB = CELL_GAME_SIZEKB_NOTCALC;
size->sysSizeKB = 4;
}
if (_type == u32{CELL_GAME_GAMETYPE_HDD} && dirName)
{
ensure(dir.size() < CELL_GAME_DIRNAME_SIZE);
strcpy_trunc(*dirName, dir);
}
perm.dir = std::move(dir);
perm.sfo = std::move(sfo);
perm.exists = true;
return CELL_OK;
}
error_code cellGamePatchCheck(vm::ptr<CellGameContentSize> size, vm::ptr<void> reserved)
{
cellGame.warning("cellGamePatchCheck(size=*0x%x, reserved=*0x%x)", size, reserved);
lv2_sleep(5000);
if (Emu.GetCat() != "GD")
{
return CELL_GAME_ERROR_NOTPATCH;
}
psf::registry sfo = psf::load_object(vfs::get(Emu.GetDir() + "PARAM.SFO"));
auto& perm = g_fxo->get<content_permission>();
const auto init = acquire_lock(perm.init);
if (!init)
{
return CELL_GAME_ERROR_BUSY;
}
if (size)
{
// TODO: Use the free space of the computer's HDD where RPCS3 is being run.
size->hddFreeSizeKB = 40 * 1024 * 1024 - 256; // Read explanation in cellHddGameCheck
// TODO: Calculate data size for patch data, if necessary.
size->sizeKB = CELL_GAME_SIZEKB_NOTCALC;
size->sysSizeKB = 0; // TODO
}
perm.mode = content_permission::check_mode::patch;
perm.dir = Emu.GetTitleID();
perm.sfo = std::move(sfo);
perm.exists = true;
return CELL_OK;
}
error_code cellGameDataCheck(u32 type, vm::cptr<char> dirName, vm::ptr<CellGameContentSize> size)
{
cellGame.warning("cellGameDataCheck(type=%d, dirName=%s, size=*0x%x)", type, dirName, size);
if ((type - 1) >= 3 || (type != CELL_GAME_GAMETYPE_DISC && !dirName))
{
return {CELL_GAME_ERROR_PARAM, type};
}
std::string name;
if (type != CELL_GAME_GAMETYPE_DISC)
{
name = dirName.get_ptr();
}
const std::string dir = type == CELL_GAME_GAMETYPE_DISC ? "/dev_bdvd/PS3_GAME"s : "/dev_hdd0/game/" + name;
// TODO: not sure what should be checked there
auto& perm = g_fxo->get<content_permission>();
auto init = acquire_lock(perm.init);
if (!init)
{
lv2_sleep(300);
return CELL_GAME_ERROR_BUSY;
}
// This function is incredibly slow, slower for DISC type and even if the game/disc data does not exist
// Null size does not change it
lv2_sleep(type == CELL_GAME_GAMETYPE_DISC ? 300000 : 120000);
auto [sfo, psf_error] = psf::load(vfs::get(dir + "/PARAM.SFO"));
if (const std::string_view cat = psf::get_string(sfo, "CATEGORY"); [&]()
{
switch (type)
{
case CELL_GAME_GAMETYPE_HDD: return !psf::is_cat_hdd(cat);
case CELL_GAME_GAMETYPE_GAMEDATA: return cat != "GD"sv;
case CELL_GAME_GAMETYPE_DISC: return cat != "DG"sv;
default: fmt::throw_exception("Unreachable");
}
}())
{
if (psf_error != psf::error::stream)
{
init.cancel();
return {CELL_GAME_ERROR_BROKEN, "psf::error='%s', type='%d' CATEGORY='%s'", psf_error, type, cat};
}
}
if (size)
{
// TODO: Use the free space of the computer's HDD where RPCS3 is being run.
size->hddFreeSizeKB = 40 * 1024 * 1024 - 256; // Read explanation in cellHddGameCheck
// TODO: Calculate data size for game data, if necessary.
size->sizeKB = sfo.empty() ? 0 : CELL_GAME_SIZEKB_NOTCALC;
size->sysSizeKB = 0; // TODO
}
perm.dir = std::move(name);
perm.can_create = type == CELL_GAME_GAMETYPE_GAMEDATA;
perm.mode = content_permission::check_mode::game_data;
if (sfo.empty())
{
cellGame.warning("cellGameDataCheck(): directory '%s' not found", dir);
return not_an_error(CELL_GAME_RET_NONE);
}
perm.exists = true;
perm.sfo = std::move(sfo);
return CELL_OK;
}
error_code cellGameContentPermit(ppu_thread& ppu, vm::ptr<char[CELL_GAME_PATH_MAX]> contentInfoPath, vm::ptr<char[CELL_GAME_PATH_MAX]> usrdirPath)
{
cellGame.warning("cellGameContentPermit(contentInfoPath=*0x%x, usrdirPath=*0x%x)", contentInfoPath, usrdirPath);
if (!contentInfoPath || !usrdirPath)
{
return CELL_GAME_ERROR_PARAM;
}
auto& perm = g_fxo->get<content_permission>();
const auto init = acquire_reset_lock(perm.init);
if (!init)
{
return CELL_GAME_ERROR_FAILURE;
}
const std::string dir = perm.dir.empty() ? "/dev_bdvd/PS3_GAME"s : "/dev_hdd0/game/" + perm.dir;
if (perm.temp.empty() && !perm.exists)
{
perm.reset();
strcpy_trunc(*contentInfoPath, "");
strcpy_trunc(*usrdirPath, "");
return CELL_OK;
}
lv2_obj::sleep(ppu);
const u64 start_sleep = ppu.start_time;
if (!perm.temp.empty())
{
std::vector<std::shared_ptr<lv2_file>> lv2_files;
const std::string real_dir = vfs::get(dir) + "/";
std::lock_guard lock(g_mp_sys_dev_hdd0.mutex);
// Create PARAM.SFO
fs::pending_file temp(perm.temp + "/PARAM.SFO");
temp.file.write(psf::save_object(perm.sfo));
ensure(temp.commit());
idm::select<lv2_fs_object, lv2_file>([&](u32 id, lv2_file& file)
{
if (file.mp != &g_mp_sys_dev_hdd0)
{
return;
}
if (real_dir.starts_with(file.real_path))
{
if (!file.file)
{
return;
}
if (file.flags & CELL_FS_O_ACCMODE)
{
// Synchronize outside IDM lock scope
lv2_files.emplace_back(ensure(idm::get_unlocked<lv2_fs_object, lv2_file>(id)));
}
}
});
for (auto& file : lv2_files)
{
// For atomicity
file->file.sync();
}
// Make temporary directory persistent (atomically)
if (vfs::host::rename(perm.temp, real_dir, &g_mp_sys_dev_hdd0, false, false))
{
cellGame.success("cellGameContentPermit(): directory '%s' has been created", dir);
// Prevent cleanup
perm.temp.clear();
}
else
{
cellGame.error("cellGameContentPermit(): failed to initialize directory '%s' (%s)", dir, fs::g_tls_error);
}
}
else if (perm.can_create)
{
// Update PARAM.SFO
fs::pending_file temp(vfs::get(dir + "/PARAM.SFO"));
temp.file.write(psf::save_object(perm.sfo));
ensure(temp.commit());
}
// This function is very slow by nature
lv2_sleep(utils::sub_saturate<u64>(!perm.temp.empty() || perm.can_create ? 200000 : 2000, get_guest_system_time() - start_sleep), &ppu);
// Cleanup
perm.reset();
strcpy_trunc(*contentInfoPath, dir);
strcpy_trunc(*usrdirPath, dir + "/USRDIR");
return CELL_OK;
}
error_code cellGameDataCheckCreate2(ppu_thread& ppu, u32 version, vm::cptr<char> dirName, u32 errDialog, vm::ptr<CellGameDataStatCallback> funcStat, u32 container)
{
cellGame.success("cellGameDataCheckCreate2(version=0x%x, dirName=%s, errDialog=0x%x, funcStat=*0x%x, container=%d)", version, dirName, errDialog, funcStat, container);
//older sdk. it might not care about game type.
if (version != CELL_GAMEDATA_VERSION_CURRENT || !funcStat || !dirName || sysutil_check_name_string(dirName.get_ptr(), 1, CELL_GAME_DIRNAME_SIZE) != 0)
{
return CELL_GAMEDATA_ERROR_PARAM;
}
const std::string game_dir = dirName.get_ptr();
const std::string dir = "/dev_hdd0/game/"s + game_dir;
auto [sfo, psf_error] = psf::load(vfs::get(dir + "/PARAM.SFO"));
const u32 new_data = psf_error == psf::error::stream ? CELL_GAMEDATA_ISNEWDATA_YES : CELL_GAMEDATA_ISNEWDATA_NO;
if (!new_data)
{
const auto cat = psf::get_string(sfo, "CATEGORY", "");
if (cat != "GD" && cat != "DG")
{
return CELL_GAMEDATA_ERROR_BROKEN;
}
}
const std::string usrdir = dir + "/USRDIR";
auto& cbResult = g_cb_result;
auto& cbGet = g_stat_get;
auto& cbSet = g_stat_set;
std::memset(cbGet.get_ptr(), 0, sizeof(*cbGet));
std::memset(cbSet.get_ptr(), 0, sizeof(*cbSet));
std::memset(cbResult.get_ptr(), 0, sizeof(*cbResult));
cbGet->isNewData = new_data;
// TODO: Use the free space of the computer's HDD where RPCS3 is being run.
cbGet->hddFreeSizeKB = 40 * 1024 * 1024 - 256; // Read explanation in cellHddGameCheck
strcpy_trunc(cbGet->contentInfoPath, dir);
strcpy_trunc(cbGet->gameDataPath, usrdir);
// TODO: set correct time
cbGet->st_atime_ = 0;
cbGet->st_ctime_ = 0;
cbGet->st_mtime_ = 0;
// TODO: calculate data size, if necessary
cbGet->sizeKB = CELL_GAMEDATA_SIZEKB_NOTCALC;
cbGet->sysSizeKB = 0; // TODO
cbGet->getParam.attribute = CELL_GAMEDATA_ATTR_NORMAL;
cbGet->getParam.parentalLevel = psf::get_integer(sfo, "PARENTAL_LEVEL", 0);
strcpy_trunc(cbGet->getParam.dataVersion, psf::get_string(sfo, "APP_VER", psf::get_string(sfo, "VERSION", ""))); // Old games do not have APP_VER key
strcpy_trunc(cbGet->getParam.titleId, psf::get_string(sfo, "TITLE_ID", ""));
strcpy_trunc(cbGet->getParam.title, psf::get_string(sfo, "TITLE", ""));
for (u32 i = 0; i < CELL_HDDGAME_SYSP_LANGUAGE_NUM; i++)
{
strcpy_trunc(cbGet->getParam.titleLang[i], psf::get_string(sfo, fmt::format("TITLE_%02d", i)));
}
lv2_sleep(5000, &ppu);
funcStat(ppu, cbResult, cbGet, cbSet);
std::string error_msg;
switch (cbResult->result)
{
case CELL_GAMEDATA_CBRESULT_OK_CANCEL:
{
cellGame.warning("cellGameDataCheckCreate2(): callback returned CELL_GAMEDATA_CBRESULT_OK_CANCEL");
return CELL_OK;
}
case CELL_GAMEDATA_CBRESULT_OK:
{
// Game confirmed that it wants to create directory
const auto setParam = cbSet->setParam;
lv2_sleep(2000, &ppu);
if (new_data)
{
if (!setParam)
{
return CELL_GAMEDATA_ERROR_PARAM;
}
if (!fs::create_path(vfs::get(usrdir)))
{
return {CELL_GAME_ERROR_ACCESS_ERROR, usrdir};
}
}
if (setParam)
{
if (new_data)
{
psf::assign(sfo, "CATEGORY", psf::string(3, "GD"));
}
psf::assign(sfo, "TITLE_ID", psf::string(TITLEID_SFO_ENTRY_SIZE, setParam->titleId, true));
psf::assign(sfo, "TITLE", psf::string(CELL_GAME_SYSP_TITLE_SIZE, setParam->title));
psf::assign(sfo, "VERSION", psf::string(CELL_GAME_SYSP_VERSION_SIZE, setParam->dataVersion));
psf::assign(sfo, "PARENTAL_LEVEL", +setParam->parentalLevel);
for (u32 i = 0; i < CELL_HDDGAME_SYSP_LANGUAGE_NUM; i++)
{
if (!setParam->titleLang[i][0])
{
continue;
}
psf::assign(sfo, fmt::format("TITLE_%02d", i), psf::string(CELL_GAME_SYSP_TITLE_SIZE, setParam->titleLang[i]));
}
if (!psf::check_registry(sfo))
{
// This results in CELL_OK, broken SFO and CELL_GAMEDATA_ERROR_BROKEN on the next load
// Avoid creation for now
cellGame.error("Broken SFO paramters: %s", sfo);
return CELL_OK;
}
fs::pending_file temp(vfs::get(dir + "/PARAM.SFO"));
temp.file.write(psf::save_object(sfo));
ensure(temp.commit());
}
return CELL_OK;
}
case CELL_GAMEDATA_CBRESULT_ERR_NOSPACE:
cellGame.error("cellGameDataCheckCreate2(): callback returned CELL_GAMEDATA_CBRESULT_ERR_NOSPACE. Space Needed: %d KB", cbResult->errNeedSizeKB);
error_msg = get_localized_string(localized_string_id::CELL_GAMEDATA_CHECK_NOSPACE, fmt::format("%d", cbResult->errNeedSizeKB).c_str());
break;
case CELL_GAMEDATA_CBRESULT_ERR_BROKEN:
cellGame.error("cellGameDataCheckCreate2(): callback returned CELL_GAMEDATA_CBRESULT_ERR_BROKEN");
error_msg = get_localized_string(localized_string_id::CELL_GAMEDATA_CHECK_BROKEN, game_dir.c_str());
break;
case CELL_GAMEDATA_CBRESULT_ERR_NODATA:
cellGame.error("cellGameDataCheckCreate2(): callback returned CELL_GAMEDATA_CBRESULT_ERR_NODATA");
error_msg = get_localized_string(localized_string_id::CELL_GAMEDATA_CHECK_NODATA, game_dir.c_str());
break;
case CELL_GAMEDATA_CBRESULT_ERR_INVALID:
cellGame.error("cellGameDataCheckCreate2(): callback returned CELL_GAMEDATA_CBRESULT_ERR_INVALID. Error message: %s", cbResult->invalidMsg);
error_msg = get_localized_string(localized_string_id::CELL_GAMEDATA_CHECK_INVALID, fmt::format("%s", cbResult->invalidMsg).c_str());
break;
default:
cellGame.error("cellGameDataCheckCreate2(): callback returned unknown error (code=0x%x). Error message: %s", cbResult->invalidMsg);
error_msg = get_localized_string(localized_string_id::CELL_GAMEDATA_CHECK_INVALID, fmt::format("%s", cbResult->invalidMsg).c_str());
break;
}
if (errDialog == CELL_GAMEDATA_ERRDIALOG_ALWAYS)
{
// Yield before a blocking dialog is being spawned
lv2_obj::sleep(ppu);
// Get user confirmation by opening a blocking dialog
error_code res = open_msg_dialog(true, CELL_MSGDIALOG_TYPE_SE_TYPE_ERROR | CELL_MSGDIALOG_TYPE_BUTTON_TYPE_OK | CELL_MSGDIALOG_TYPE_DISABLE_CANCEL_ON, vm::make_str(error_msg), msg_dialog_source::_cellGame);
// Reschedule after a blocking dialog returns
if (ppu.check_state())
{
return 0;
}
if (res != CELL_OK)
{
return CELL_GAMEDATA_ERROR_INTERNAL;
}
}
else
{
lv2_sleep(2000, &ppu);
}
return CELL_GAMEDATA_ERROR_CBRESULT;
}
error_code cellGameDataCheckCreate(ppu_thread& ppu, u32 version, vm::cptr<char> dirName, u32 errDialog, vm::ptr<CellGameDataStatCallback> funcStat, u32 container)
{
cellGame.warning("cellGameDataCheckCreate(version=0x%x, dirName=%s, errDialog=0x%x, funcStat=*0x%x, container=%d)", version, dirName, errDialog, funcStat, container);
// TODO: almost identical, the only difference is that this function will always calculate the size of game data
return cellGameDataCheckCreate2(ppu, version, dirName, errDialog, funcStat, container);
}
error_code cellGameCreateGameData(vm::ptr<CellGameSetInitParams> init, vm::ptr<char[CELL_GAME_PATH_MAX]> tmp_contentInfoPath, vm::ptr<char[CELL_GAME_PATH_MAX]> tmp_usrdirPath)
{
cellGame.success("cellGameCreateGameData(init=*0x%x, tmp_contentInfoPath=*0x%x, tmp_usrdirPath=*0x%x)", init, tmp_contentInfoPath, tmp_usrdirPath);
if (!init)
{
return CELL_GAME_ERROR_PARAM;
}
auto& perm = g_fxo->get<content_permission>();
const auto _init = acquire_access_lock(perm.init);
lv2_sleep(2000);
if (!_init || perm.dir.empty())
{
return CELL_GAME_ERROR_FAILURE;
}
if (!perm.can_create)
{
return CELL_GAME_ERROR_NOTSUPPORTED;
}
if (perm.exists)
{
return CELL_GAME_ERROR_EXIST;
}
// Account for for filesystem operations
lv2_sleep(50'000);
std::string dirname = "_GDATA_" + std::to_string(steady_clock::now().time_since_epoch().count());
std::string tmp_contentInfo = "/dev_hdd0/game/" + dirname;
std::string tmp_usrdir = "/dev_hdd0/game/" + dirname + "/USRDIR";
if (!fs::create_dir(vfs::get(tmp_contentInfo)))
{
cellGame.error("cellGameCreateGameData(): failed to create directory '%s' (%s)", tmp_contentInfo, fs::g_tls_error);
return CELL_GAME_ERROR_ACCESS_ERROR; // ???
}
// cellGameContentPermit should then move files in non-temporary location and return their non-temporary displacement
if (tmp_contentInfoPath) strcpy_trunc(*tmp_contentInfoPath, tmp_contentInfo);
if (!fs::create_dir(vfs::get(tmp_usrdir)))
{
cellGame.error("cellGameCreateGameData(): failed to create directory '%s' (%s)", tmp_usrdir, fs::g_tls_error);
return CELL_GAME_ERROR_ACCESS_ERROR; // ???
}
if (tmp_usrdirPath) strcpy_trunc(*tmp_usrdirPath, tmp_usrdir);
perm.temp = vfs::get(tmp_contentInfo);
cellGame.success("cellGameCreateGameData(): temporary directory '%s' has been created", tmp_contentInfo);
// Initial PARAM.SFO parameters (overwrite)
perm.sfo =
{
{ "CATEGORY", psf::string(3, "GD") },
{ "TITLE_ID", psf::string(TITLEID_SFO_ENTRY_SIZE, init->titleId) },
{ "TITLE", psf::string(CELL_GAME_SYSP_TITLE_SIZE, init->title) },
{ "VERSION", psf::string(CELL_GAME_SYSP_VERSION_SIZE, init->version) },
};
return CELL_OK;
}
error_code cellGameDeleteGameData(vm::cptr<char> dirName)
{
cellGame.warning("cellGameDeleteGameData(dirName=%s)", dirName);
if (!dirName)
{
return CELL_GAME_ERROR_PARAM;
}
const std::string name = dirName.get_ptr();
const std::string dir = vfs::get("/dev_hdd0/game/"s + name);
auto& perm = g_fxo->get<content_permission>();
auto remove_gd = [&]() -> error_code
{
if (Emu.GetCat() == "GD" && Emu.GetDir().substr(Emu.GetDir().find_last_of('/') + 1) == vfs::escape(name))
{
// Boot patch cannot delete its own directory
return CELL_GAME_ERROR_NOTSUPPORTED;
}
const auto [sfo, psf_error] = psf::load(dir + "/PARAM.SFO");
if (psf::get_string(sfo, "CATEGORY") != "GD" && psf_error != psf::error::stream)
{
return {CELL_GAME_ERROR_NOTSUPPORTED, psf_error};
}
if (sfo.empty())
{
// Nothing to remove
return CELL_GAME_ERROR_NOTFOUND;
}
if (auto id = psf::get_string(sfo, "TITLE_ID"); !id.empty() && id != Emu.GetTitleID())
{
cellGame.error("cellGameDeleteGameData(%s): Attempts to delete GameData with TITLE ID which does not match the program's (%s)", id, Emu.GetTitleID());
}
// Actually remove game data
if (!vfs::host::remove_all(dir, rpcs3::utils::get_hdd0_dir(), &g_mp_sys_dev_hdd0, true))
{
return {CELL_GAME_ERROR_ACCESS_ERROR, dir};
}
return CELL_OK;
};
while (true)
{
// Obtain exclusive lock and cancel init
auto _init = perm.init.init();
if (!_init)
{
// Or access it
if (auto access = acquire_access_lock(perm.init); access)
{
// Cannot remove it when it is accessed by cellGameDataCheck
// If it is HG data then resort to remove_gd for ERROR_BROKEN
if (perm.dir == name && perm.can_create)
{
return CELL_GAME_ERROR_NOTSUPPORTED;
}
return remove_gd();
}
else
{
// Reacquire lock
continue;
}
}
auto err = remove_gd();
_init.cancel();
return err;
}
}
error_code cellGameGetParamInt(s32 id, vm::ptr<s32> value)
{
cellGame.warning("cellGameGetParamInt(id=%d, value=*0x%x)", id, value);
if (!value)
{
return CELL_GAME_ERROR_PARAM;
}
lv2_sleep(2000);
auto& perm = g_fxo->get<content_permission>();
const auto init = acquire_access_lock(perm.init);
if (!init)
{
return CELL_GAME_ERROR_FAILURE;
}
std::string key;
switch(id)
{
case CELL_GAME_PARAMID_PARENTAL_LEVEL: key = "PARENTAL_LEVEL"; break;
case CELL_GAME_PARAMID_RESOLUTION: key = "RESOLUTION"; break;
case CELL_GAME_PARAMID_SOUND_FORMAT: key = "SOUND_FORMAT"; break;
default:
{
return CELL_GAME_ERROR_INVALID_ID;
}
}
if (!perm.sfo.count(key))
{
// TODO: Check if special values need to be set here
cellGame.warning("cellGameGetParamInt(): id=%d was not found", id);
}
*value = psf::get_integer(perm.sfo, key, 0);
return CELL_OK;
}
// String key flags
enum class strkey_flag : u32
{
get_game_data, // reading is allowed for game data PARAM.SFO
set_game_data, // writing is allowed for game data PARAM.SFO
get_other, // reading is allowed for other types of PARAM.SFO
//set_other, // writing is allowed for other types of PARAM.SFO (not possible)
__bitset_enum_max
};
struct string_key_info
{
public:
string_key_info() = default;
string_key_info(std::string_view _name, u32 _max_size, bs_t<strkey_flag> _flags)
: name(_name), max_size(_max_size), flags(_flags)
{}
std::string_view name;
u32 max_size = 0;
inline bool is_supported(bool is_setter, content_permission::check_mode mode) const
{
switch (mode)
{
case content_permission::check_mode::game_data:
case content_permission::check_mode::patch: // TODO: it's unclear if patch mode should also support these flags
{
return !!(flags & (is_setter ? strkey_flag::set_game_data : strkey_flag::get_game_data));
}
case content_permission::check_mode::hdd_game:
case content_permission::check_mode::disc_game:
{
return !is_setter && (flags & (strkey_flag::get_other));
}
case content_permission::check_mode::not_set:
{
fmt::throw_exception("This should never happen!");
}
}
return false; // Fixes some VS warning
}
private:
bs_t<strkey_flag> flags{}; // allowed operations
};
static string_key_info get_param_string_key(s32 id)
{
switch (id)
{
case CELL_GAME_PARAMID_TITLE: return string_key_info("TITLE", CELL_GAME_SYSP_TITLE_SIZE, strkey_flag::get_game_data + strkey_flag::get_other + strkey_flag::set_game_data);
case CELL_GAME_PARAMID_TITLE_DEFAULT: return string_key_info("TITLE", CELL_GAME_SYSP_TITLE_SIZE, strkey_flag::get_game_data + strkey_flag::get_other + strkey_flag::set_game_data);
case CELL_GAME_PARAMID_TITLE_JAPANESE: return string_key_info("TITLE_00", CELL_GAME_SYSP_TITLE_SIZE, strkey_flag::get_game_data + strkey_flag::set_game_data);
case CELL_GAME_PARAMID_TITLE_ENGLISH: return string_key_info("TITLE_01", CELL_GAME_SYSP_TITLE_SIZE, strkey_flag::get_game_data + strkey_flag::set_game_data);
case CELL_GAME_PARAMID_TITLE_FRENCH: return string_key_info("TITLE_02", CELL_GAME_SYSP_TITLE_SIZE, strkey_flag::get_game_data + strkey_flag::set_game_data);
case CELL_GAME_PARAMID_TITLE_SPANISH: return string_key_info("TITLE_03", CELL_GAME_SYSP_TITLE_SIZE, strkey_flag::get_game_data + strkey_flag::set_game_data);
case CELL_GAME_PARAMID_TITLE_GERMAN: return string_key_info("TITLE_04", CELL_GAME_SYSP_TITLE_SIZE, strkey_flag::get_game_data + strkey_flag::set_game_data);
case CELL_GAME_PARAMID_TITLE_ITALIAN: return string_key_info("TITLE_05", CELL_GAME_SYSP_TITLE_SIZE, strkey_flag::get_game_data + strkey_flag::set_game_data);
case CELL_GAME_PARAMID_TITLE_DUTCH: return string_key_info("TITLE_06", CELL_GAME_SYSP_TITLE_SIZE, strkey_flag::get_game_data + strkey_flag::set_game_data);
case CELL_GAME_PARAMID_TITLE_PORTUGUESE: return string_key_info("TITLE_07", CELL_GAME_SYSP_TITLE_SIZE, strkey_flag::get_game_data + strkey_flag::set_game_data);
case CELL_GAME_PARAMID_TITLE_RUSSIAN: return string_key_info("TITLE_08", CELL_GAME_SYSP_TITLE_SIZE, strkey_flag::get_game_data + strkey_flag::set_game_data);
case CELL_GAME_PARAMID_TITLE_KOREAN: return string_key_info("TITLE_09", CELL_GAME_SYSP_TITLE_SIZE, strkey_flag::get_game_data + strkey_flag::set_game_data);
case CELL_GAME_PARAMID_TITLE_CHINESE_T: return string_key_info("TITLE_10", CELL_GAME_SYSP_TITLE_SIZE, strkey_flag::get_game_data + strkey_flag::set_game_data);
case CELL_GAME_PARAMID_TITLE_CHINESE_S: return string_key_info("TITLE_11", CELL_GAME_SYSP_TITLE_SIZE, strkey_flag::get_game_data + strkey_flag::set_game_data);
case CELL_GAME_PARAMID_TITLE_FINNISH: return string_key_info("TITLE_12", CELL_GAME_SYSP_TITLE_SIZE, strkey_flag::get_game_data + strkey_flag::set_game_data);
case CELL_GAME_PARAMID_TITLE_SWEDISH: return string_key_info("TITLE_13", CELL_GAME_SYSP_TITLE_SIZE, strkey_flag::get_game_data + strkey_flag::set_game_data);
case CELL_GAME_PARAMID_TITLE_DANISH: return string_key_info("TITLE_14", CELL_GAME_SYSP_TITLE_SIZE, strkey_flag::get_game_data + strkey_flag::set_game_data);
case CELL_GAME_PARAMID_TITLE_NORWEGIAN: return string_key_info("TITLE_15", CELL_GAME_SYSP_TITLE_SIZE, strkey_flag::get_game_data + strkey_flag::set_game_data);
case CELL_GAME_PARAMID_TITLE_POLISH: return string_key_info("TITLE_16", CELL_GAME_SYSP_TITLE_SIZE, strkey_flag::get_game_data + strkey_flag::set_game_data);
case CELL_GAME_PARAMID_TITLE_PORTUGUESE_BRAZIL: return string_key_info("TITLE_17", CELL_GAME_SYSP_TITLE_SIZE, strkey_flag::get_game_data + strkey_flag::set_game_data);
case CELL_GAME_PARAMID_TITLE_ENGLISH_UK: return string_key_info("TITLE_18", CELL_GAME_SYSP_TITLE_SIZE, strkey_flag::get_game_data + strkey_flag::set_game_data);
case CELL_GAME_PARAMID_TITLE_TURKISH: return string_key_info("TITLE_19", CELL_GAME_SYSP_TITLE_SIZE, strkey_flag::get_game_data + strkey_flag::set_game_data);
case CELL_GAME_PARAMID_TITLE_ID: return string_key_info("TITLE_ID", CELL_GAME_SYSP_TITLEID_SIZE, strkey_flag::get_game_data + strkey_flag::get_other);
case CELL_GAME_PARAMID_VERSION: return string_key_info("VERSION", CELL_GAME_SYSP_VERSION_SIZE, strkey_flag::get_game_data);
case CELL_GAME_PARAMID_PS3_SYSTEM_VER: return string_key_info("PS3_SYSTEM_VER", CELL_GAME_SYSP_PS3_SYSTEM_VER_SIZE, {}); // TODO
case CELL_GAME_PARAMID_APP_VER: return string_key_info("APP_VER", CELL_GAME_SYSP_APP_VER_SIZE, strkey_flag::get_game_data + strkey_flag::get_other);
}
return {};
}
error_code cellGameGetParamString(s32 id, vm::ptr<char> buf, u32 bufsize)
{
cellGame.warning("cellGameGetParamString(id=%d, buf=*0x%x, bufsize=%d)", id, buf, bufsize);
if (!buf || bufsize == 0)
{
return CELL_GAME_ERROR_PARAM;
}
auto& perm = g_fxo->get<content_permission>();
lv2_sleep(2000);
const auto init = acquire_access_lock(perm.init);
if (!init || perm.mode == content_permission::check_mode::not_set)
{
return CELL_GAME_ERROR_FAILURE;
}
const auto key = get_param_string_key(id);
if (key.name.empty())
{
return CELL_GAME_ERROR_INVALID_ID;
}
if (!key.is_supported(false, perm.mode))
{
// TODO: this error is possibly only returned during debug mode
return { CELL_GAME_ERROR_NOTSUPPORTED, "id %d is not supported in the current check mode: %s", id, perm.mode.load() };
}
const auto value = psf::get_string(perm.sfo, key.name);
if (value.empty() && !perm.sfo.count(std::string(key.name)))
{
// TODO: Check if special values need to be set here
cellGame.warning("cellGameGetParamString(): id=%d was not found", id);
}
std::span dst(buf.get_ptr(), bufsize);
strcpy_trunc(dst, value);
return CELL_OK;
}
error_code cellGameSetParamString(s32 id, vm::cptr<char> buf)
{
cellGame.warning("cellGameSetParamString(id=%d, buf=*0x%x %s)", id, buf, buf);
if (!buf)
{
return CELL_GAME_ERROR_PARAM;
}
lv2_sleep(2000);
auto& perm = g_fxo->get<content_permission>();
const auto init = acquire_access_lock(perm.init);
if (!init || perm.mode == content_permission::check_mode::not_set)
{
return CELL_GAME_ERROR_FAILURE;
}
const auto key = get_param_string_key(id);
if (key.name.empty())
{
return CELL_GAME_ERROR_INVALID_ID;
}
if (!perm.can_create || !key.is_supported(true, perm.mode))
{
return CELL_GAME_ERROR_NOTSUPPORTED;
}
psf::assign(perm.sfo, key.name, psf::string(key.max_size, buf.get_ptr()));
return CELL_OK;
}
error_code cellGameGetSizeKB(ppu_thread& ppu, vm::ptr<s32> size)
{
cellGame.warning("cellGameGetSizeKB(size=*0x%x)", size);
if (!size)
{
return CELL_GAME_ERROR_PARAM;
}
// Always reset to 0 at start
*size = 0;
ppu.state += cpu_flag::wait;
auto& perm = g_fxo->get<content_permission>();
const auto init = acquire_access_lock(perm.init);
if (!init)
{
return CELL_GAME_ERROR_FAILURE;
}
lv2_obj::sleep(ppu);
const u64 start_sleep = ppu.start_time;
const std::string local_dir = !perm.temp.empty() ? perm.temp : vfs::get("/dev_hdd0/game/" + perm.dir);
const auto dirsz = fs::get_dir_size(local_dir, 1024);
// This function is very slow by nature
// TODO: Check if after first use the result is being cached so the sleep can be reduced in this case
lv2_sleep(utils::sub_saturate<u64>(dirsz == umax ? 1000 : 200000, get_guest_system_time() - start_sleep), &ppu);
if (dirsz == umax)
{
const auto error = fs::g_tls_error;
if (!fs::exists(local_dir))
{
return CELL_OK;
}
else
{
cellGame.error("cellGameGetSizeKb(): Unknown failure on calculating directory size '%s' (%s)", local_dir, error);
return CELL_GAME_ERROR_ACCESS_ERROR;
}
}
ppu.check_state();
*size = ::narrow<s32>(dirsz / 1024);
return CELL_OK;
}
error_code cellGameGetDiscContentInfoUpdatePath(vm::ptr<char> updatePath)
{
cellGame.todo("cellGameGetDiscContentInfoUpdatePath(updatePath=*0x%x)", updatePath);
if (!updatePath)
{
return CELL_GAME_ERROR_PARAM;
}
return CELL_OK;
}
error_code cellGameGetLocalWebContentPath(vm::ptr<char> contentPath)
{
cellGame.todo("cellGameGetLocalWebContentPath(contentPath=*0x%x)", contentPath);
if (!contentPath)
{
return CELL_GAME_ERROR_PARAM;
}
return CELL_OK;
}
error_code cellGameContentErrorDialog(s32 type, s32 errNeedSizeKB, vm::cptr<char> dirName)
{
cellGame.warning("cellGameContentErrorDialog(type=%d, errNeedSizeKB=%d, dirName=%s)", type, errNeedSizeKB, dirName);
std::string error_msg;
switch (type)
{
case CELL_GAME_ERRDIALOG_BROKEN_GAMEDATA:
// Game data is corrupted. The application will continue.
error_msg = get_localized_string(localized_string_id::CELL_GAME_ERROR_BROKEN_GAMEDATA);
break;
case CELL_GAME_ERRDIALOG_BROKEN_HDDGAME:
// HDD boot game is corrupted. The application will continue.
error_msg = get_localized_string(localized_string_id::CELL_GAME_ERROR_BROKEN_HDDGAME);
break;
case CELL_GAME_ERRDIALOG_NOSPACE:
// Not enough available space. The application will continue.
error_msg = get_localized_string(localized_string_id::CELL_GAME_ERROR_NOSPACE, fmt::format("%d", errNeedSizeKB).c_str());
break;
case CELL_GAME_ERRDIALOG_BROKEN_EXIT_GAMEDATA:
// Game data is corrupted. The application will be terminated.
error_msg = get_localized_string(localized_string_id::CELL_GAME_ERROR_BROKEN_EXIT_GAMEDATA);
break;
case CELL_GAME_ERRDIALOG_BROKEN_EXIT_HDDGAME:
// HDD boot game is corrupted. The application will be terminated.
error_msg = get_localized_string(localized_string_id::CELL_GAME_ERROR_BROKEN_EXIT_HDDGAME);
break;
case CELL_GAME_ERRDIALOG_NOSPACE_EXIT:
// Not enough available space. The application will be terminated.
error_msg = get_localized_string(localized_string_id::CELL_GAME_ERROR_NOSPACE_EXIT, fmt::format("%d", errNeedSizeKB).c_str());
break;
default:
return CELL_GAME_ERROR_PARAM;
}
if (dirName)
{
if (!memchr(dirName.get_ptr(), '\0', CELL_GAME_DIRNAME_SIZE))
{
return CELL_GAME_ERROR_PARAM;
}
error_msg += '\n';
error_msg += get_localized_string(localized_string_id::CELL_GAME_ERROR_DIR_NAME, fmt::format("%s", dirName).c_str());
}
return open_exit_dialog(error_msg, type > CELL_GAME_ERRDIALOG_NOSPACE, msg_dialog_source::_cellGame);
}
error_code cellGameThemeInstall(vm::cptr<char> usrdirPath, vm::cptr<char> fileName, u32 option)
{
cellGame.todo("cellGameThemeInstall(usrdirPath=%s, fileName=%s, option=0x%x)", usrdirPath, fileName, option);
if (!usrdirPath || !fileName || !memchr(usrdirPath.get_ptr(), '\0', CELL_GAME_PATH_MAX) || option > CELL_GAME_THEME_OPTION_APPLY)
{
return CELL_GAME_ERROR_PARAM;
}
const std::string src_path = vfs::get(fmt::format("%s/%s", usrdirPath, fileName));
// Use hash to get a hopefully unique filename
std::string hash;
if (fs::file theme = fs::file(src_path))
{
u32 magic{};
if (src_path.ends_with(".p3t") || !theme.read(magic) || magic != "P3TF"_u32)
{
return CELL_GAME_ERROR_INVALID_THEME_FILE;
}
hash = sha256_get_hash(theme.to_string().c_str(), theme.size(), true);
}
else
{
return CELL_GAME_ERROR_NOTFOUND;
}
const std::string dst_path = vfs::get(fmt::format("/dev_hdd0/theme/%s_%s.p3t", Emu.GetTitleID(), hash)); // TODO: this is renamed with some other scheme
if (fs::is_file(dst_path))
{
cellGame.notice("cellGameThemeInstall: theme already installed: '%s'", dst_path);
}
else
{
cellGame.notice("cellGameThemeInstall: copying theme from '%s' to '%s'", src_path, dst_path);
if (!fs::copy_file(src_path, dst_path, false)) // TODO: new file is write protected
{
cellGame.error("cellGameThemeInstall: failed to copy theme from '%s' to '%s' (error=%s)", src_path, dst_path, fs::g_tls_error);
return CELL_GAME_ERROR_ACCESS_ERROR;
}
}
if (false && !fs::remove_file(src_path)) // TODO: disabled for now
{
cellGame.error("cellGameThemeInstall: failed to remove source theme from '%s' (error=%s)", src_path, fs::g_tls_error);
}
if (option == CELL_GAME_THEME_OPTION_APPLY)
{
// TODO: apply new theme
}
return CELL_OK;
}
error_code cellGameThemeInstallFromBuffer(ppu_thread& ppu, u32 fileSize, u32 bufSize, vm::ptr<void> buf, vm::ptr<CellGameThemeInstallCallback> func, u32 option)
{
cellGame.todo("cellGameThemeInstallFromBuffer(fileSize=%d, bufSize=%d, buf=*0x%x, func=*0x%x, option=0x%x)", fileSize, bufSize, buf, func, option);
if (!buf || !fileSize || (fileSize > bufSize && !func) || bufSize < CELL_GAME_THEMEINSTALL_BUFSIZE_MIN || option > CELL_GAME_THEME_OPTION_APPLY)
{
return CELL_GAME_ERROR_PARAM;
}
const std::string hash = sha256_get_hash(reinterpret_cast<char*>(buf.get_ptr()), fileSize, true);
const std::string dst_path = vfs::get(fmt::format("/dev_hdd0/theme/%s_%s.p3t", Emu.GetTitleID(), hash)); // TODO: this is renamed with some scheme
if (fs::file theme = fs::file(dst_path, fs::write_new + fs::isfile)) // TODO: new file is write protected
{
const u32 magic = *reinterpret_cast<u32*>(buf.get_ptr());
if (magic != "P3TF"_u32)
{
return CELL_GAME_ERROR_INVALID_THEME_FILE;
}
if (func && bufSize < fileSize)
{
cellGame.notice("cellGameThemeInstallFromBuffer: writing theme with func callback to '%s'", dst_path);
for (u32 file_offset = 0; file_offset < fileSize;)
{
const u32 read_size = std::min(bufSize, fileSize - file_offset);
cellGame.notice("cellGameThemeInstallFromBuffer: writing %d bytes at pos %d", read_size, file_offset);
if (theme.write(reinterpret_cast<u8*>(buf.get_ptr()) + file_offset, read_size) != read_size)
{
cellGame.error("cellGameThemeInstallFromBuffer: failed to write to destination file '%s' (error=%s)", dst_path, fs::g_tls_error);
if (fs::g_tls_error == fs::error::nospace)
{
return CELL_GAME_ERROR_NOSPACE;
}
return CELL_GAME_ERROR_ACCESS_ERROR;
}
file_offset += read_size;
// Report status with callback
cellGame.notice("cellGameThemeInstallFromBuffer: func(fileOffset=%d, readSize=%d, buf=0x%x)", file_offset, read_size, buf);
const s32 result = func(ppu, file_offset, read_size, buf);
if (result == CELL_GAME_RET_CANCEL) // same as CELL_GAME_CBRESULT_CANCEL
{
cellGame.notice("cellGameThemeInstallFromBuffer: theme installation was cancelled");
return not_an_error(CELL_GAME_RET_CANCEL);
}
}
}
else
{
cellGame.notice("cellGameThemeInstallFromBuffer: writing theme to '%s'", dst_path);
if (theme.write(buf.get_ptr(), fileSize) != fileSize)
{
cellGame.error("cellGameThemeInstallFromBuffer: failed to write to destination file '%s' (error=%s)", dst_path, fs::g_tls_error);
if (fs::g_tls_error == fs::error::nospace)
{
return CELL_GAME_ERROR_NOSPACE;
}
return CELL_GAME_ERROR_ACCESS_ERROR;
}
}
}
else if (fs::g_tls_error == fs::error::exist) // Do not overwrite files, but continue.
{
cellGame.notice("cellGameThemeInstallFromBuffer: theme already installed: '%s'", dst_path);
}
else
{
cellGame.error("cellGameThemeInstallFromBuffer: failed to open destination file '%s' (error=%s)", dst_path, fs::g_tls_error);
return CELL_GAME_ERROR_ACCESS_ERROR;
}
if (option == CELL_GAME_THEME_OPTION_APPLY)
{
// TODO: apply new theme
}
return CELL_OK;
}
error_code cellDiscGameGetBootDiscInfo(vm::ptr<CellDiscGameSystemFileParam> getParam)
{
cellGame.warning("cellDiscGameGetBootDiscInfo(getParam=*0x%x)", getParam);
if (!getParam)
{
return CELL_DISCGAME_ERROR_PARAM;
}
// Always sets 0 at first dword
write_to_ptr<u32>(getParam->titleId, 0);
lv2_sleep(2000);
// This is also called by non-disc games, see NPUB90029
static const std::string dir = "/dev_bdvd/PS3_GAME"s;
if (!fs::is_dir(vfs::get(dir)))
{
return CELL_DISCGAME_ERROR_NOT_DISCBOOT;
}
const psf::registry psf = psf::load_object(vfs::get(dir + "/PARAM.SFO"));
if (psf.contains("PARENTAL_LEVEL")) getParam->parentalLevel = ::at32(psf, "PARENTAL_LEVEL").as_integer();
if (psf.contains("TITLE_ID")) strcpy_trunc(getParam->titleId, ::at32(psf, "TITLE_ID").as_string());
return CELL_OK;
}
error_code cellDiscGameRegisterDiscChangeCallback(vm::ptr<CellDiscGameDiscEjectCallback> funcEject, vm::ptr<CellDiscGameDiscInsertCallback> funcInsert)
{
cellGame.warning("cellDiscGameRegisterDiscChangeCallback(funcEject=*0x%x, funcInsert=*0x%x)", funcEject, funcInsert);
return g_fxo->get<disc_change_manager>().register_callbacks(funcEject, funcInsert);
}
error_code cellDiscGameUnregisterDiscChangeCallback()
{
cellGame.warning("cellDiscGameUnregisterDiscChangeCallback()");
return g_fxo->get<disc_change_manager>().unregister_callbacks();
}
error_code cellGameRegisterDiscChangeCallback(vm::ptr<CellGameDiscEjectCallback> funcEject, vm::ptr<CellGameDiscInsertCallback> funcInsert)
{
cellGame.warning("cellGameRegisterDiscChangeCallback(funcEject=*0x%x, funcInsert=*0x%x)", funcEject, funcInsert);
return g_fxo->get<disc_change_manager>().register_callbacks(funcEject, funcInsert);
}
error_code cellGameUnregisterDiscChangeCallback()
{
cellGame.warning("cellGameUnregisterDiscChangeCallback()");
return g_fxo->get<disc_change_manager>().unregister_callbacks();
}
void cellSysutil_GameData_init()
{
REG_FUNC(cellSysutil, cellHddGameCheck);
REG_FUNC(cellSysutil, cellHddGameCheck2);
REG_FUNC(cellSysutil, cellHddGameGetSizeKB);
REG_FUNC(cellSysutil, cellHddGameSetSystemVer);
REG_FUNC(cellSysutil, cellHddGameExitBroken);
REG_FUNC(cellSysutil, cellGameDataGetSizeKB);
REG_FUNC(cellSysutil, cellGameDataSetSystemVer);
REG_FUNC(cellSysutil, cellGameDataExitBroken);
REG_FUNC(cellSysutil, cellGameDataCheckCreate);
REG_FUNC(cellSysutil, cellGameDataCheckCreate2);
REG_FUNC(cellSysutil, cellDiscGameGetBootDiscInfo);
REG_FUNC(cellSysutil, cellDiscGameRegisterDiscChangeCallback);
REG_FUNC(cellSysutil, cellDiscGameUnregisterDiscChangeCallback);
REG_FUNC(cellSysutil, cellGameRegisterDiscChangeCallback);
REG_FUNC(cellSysutil, cellGameUnregisterDiscChangeCallback);
}
DECLARE(ppu_module_manager::cellGame)("cellGame", []()
{
REG_FUNC(cellGame, cellGameBootCheck);
REG_FUNC(cellGame, cellGamePatchCheck);
REG_FUNC(cellGame, cellGameDataCheck);
REG_FUNC(cellGame, cellGameContentPermit);
REG_FUNC(cellGame, cellGameCreateGameData);
REG_FUNC(cellGame, cellGameDeleteGameData);
REG_FUNC(cellGame, cellGameGetParamInt);
REG_FUNC(cellGame, cellGameGetParamString);
REG_FUNC(cellGame, cellGameSetParamString);
REG_FUNC(cellGame, cellGameGetSizeKB);
REG_FUNC(cellGame, cellGameGetDiscContentInfoUpdatePath);
REG_FUNC(cellGame, cellGameGetLocalWebContentPath);
REG_FUNC(cellGame, cellGameContentErrorDialog);
REG_FUNC(cellGame, cellGameThemeInstall);
REG_FUNC(cellGame, cellGameThemeInstallFromBuffer);
REG_VAR(cellGame, g_stat_get).flag(MFF_HIDDEN);
REG_VAR(cellGame, g_stat_set).flag(MFF_HIDDEN);
REG_VAR(cellGame, g_file_param).flag(MFF_HIDDEN);
REG_VAR(cellGame, g_cb_result).flag(MFF_HIDDEN);
});
| 59,787
|
C++
|
.cpp
| 1,556
| 35.71144
| 208
| 0.713636
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,317
|
cellSailRec.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellSailRec.cpp
|
#include "stdafx.h"
#include "Emu/Cell/PPUModule.h"
#include "cellSail.h"
LOG_CHANNEL(cellSailRec);
error_code cellSailProfileSetEsAudioParameter()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailProfileSetEsVideoParameter()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailProfileSetStreamParameter()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailVideoConverterCanProcess()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailVideoConverterProcess()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailVideoConverterCanGetResult()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailVideoConverterGetResult()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailFeederAudioInitialize()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailFeederAudioFinalize()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailFeederAudioNotifyCallCompleted()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailFeederAudioNotifyFrameOut()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailFeederAudioNotifySessionEnd()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailFeederAudioNotifySessionError()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailFeederVideoInitialize()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailFeederVideoFinalize()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailFeederVideoNotifyCallCompleted()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailFeederVideoNotifyFrameOut()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailFeederVideoNotifySessionEnd()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailFeederVideoNotifySessionError()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailRecorderInitialize()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailRecorderFinalize()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailRecorderSetFeederAudio()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailRecorderSetFeederVideo()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailRecorderSetParameter()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailRecorderGetParameter()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailRecorderSubscribeEvent()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailRecorderUnsubscribeEvent()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailRecorderReplaceEventHandler()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailRecorderBoot()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailRecorderCreateProfile()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailRecorderDestroyProfile()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailRecorderCreateVideoConverter()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailRecorderDestroyVideoConverter()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailRecorderOpenStream()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailRecorderCloseStream()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailRecorderStart()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailRecorderStop()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailRecorderCancel()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailRecorderRegisterComposer()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailRecorderUnregisterComposer()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailRecorderDumpImage()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailComposerInitialize()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailComposerFinalize()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailComposerGetStreamParameter()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailComposerGetEsAudioParameter()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailComposerGetEsUserParameter()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailComposerGetEsVideoParameter()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailComposerGetEsAudioAu()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailComposerGetEsUserAu()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailComposerGetEsVideoAu()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailComposerTryGetEsAudioAu()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailComposerTryGetEsUserAu()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailComposerTryGetEsVideoAu()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailComposerReleaseEsAudioAu()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailComposerReleaseEsUserAu()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailComposerReleaseEsVideoAu()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailComposerNotifyCallCompleted()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
error_code cellSailComposerNotifySessionError()
{
UNIMPLEMENTED_FUNC(cellSailRec);
return CELL_OK;
}
DECLARE(ppu_module_manager::cellSailRec)("cellSailRec", []()
{
static ppu_static_module cellMp4("cellMp4");
static ppu_static_module cellApostSrcMini("cellApostSrcMini");
REG_FUNC(cellSailRec, cellSailProfileSetEsAudioParameter);
REG_FUNC(cellSailRec, cellSailProfileSetEsVideoParameter);
REG_FUNC(cellSailRec, cellSailProfileSetStreamParameter);
REG_FUNC(cellSailRec, cellSailVideoConverterCanProcess);
REG_FUNC(cellSailRec, cellSailVideoConverterProcess);
REG_FUNC(cellSailRec, cellSailVideoConverterCanGetResult);
REG_FUNC(cellSailRec, cellSailVideoConverterGetResult);
REG_FUNC(cellSailRec, cellSailFeederAudioInitialize);
REG_FUNC(cellSailRec, cellSailFeederAudioFinalize);
REG_FUNC(cellSailRec, cellSailFeederAudioNotifyCallCompleted);
REG_FUNC(cellSailRec, cellSailFeederAudioNotifyFrameOut);
REG_FUNC(cellSailRec, cellSailFeederAudioNotifySessionEnd);
REG_FUNC(cellSailRec, cellSailFeederAudioNotifySessionError);
REG_FUNC(cellSailRec, cellSailFeederVideoInitialize);
REG_FUNC(cellSailRec, cellSailFeederVideoFinalize);
REG_FUNC(cellSailRec, cellSailFeederVideoNotifyCallCompleted);
REG_FUNC(cellSailRec, cellSailFeederVideoNotifyFrameOut);
REG_FUNC(cellSailRec, cellSailFeederVideoNotifySessionEnd);
REG_FUNC(cellSailRec, cellSailFeederVideoNotifySessionError);
REG_FUNC(cellSailRec, cellSailRecorderInitialize);
REG_FUNC(cellSailRec, cellSailRecorderFinalize);
REG_FUNC(cellSailRec, cellSailRecorderSetFeederAudio);
REG_FUNC(cellSailRec, cellSailRecorderSetFeederVideo);
REG_FUNC(cellSailRec, cellSailRecorderSetParameter);
REG_FUNC(cellSailRec, cellSailRecorderGetParameter);
REG_FUNC(cellSailRec, cellSailRecorderSubscribeEvent);
REG_FUNC(cellSailRec, cellSailRecorderUnsubscribeEvent);
REG_FUNC(cellSailRec, cellSailRecorderReplaceEventHandler);
REG_FUNC(cellSailRec, cellSailRecorderBoot);
REG_FUNC(cellSailRec, cellSailRecorderCreateProfile);
REG_FUNC(cellSailRec, cellSailRecorderDestroyProfile);
REG_FUNC(cellSailRec, cellSailRecorderCreateVideoConverter);
REG_FUNC(cellSailRec, cellSailRecorderDestroyVideoConverter);
REG_FUNC(cellSailRec, cellSailRecorderOpenStream);
REG_FUNC(cellSailRec, cellSailRecorderCloseStream);
REG_FUNC(cellSailRec, cellSailRecorderStart);
REG_FUNC(cellSailRec, cellSailRecorderStop);
REG_FUNC(cellSailRec, cellSailRecorderCancel);
REG_FUNC(cellSailRec, cellSailRecorderRegisterComposer);
REG_FUNC(cellSailRec, cellSailRecorderUnregisterComposer);
REG_FUNC(cellSailRec, cellSailRecorderDumpImage);
REG_FUNC(cellSailRec, cellSailComposerInitialize);
REG_FUNC(cellSailRec, cellSailComposerFinalize);
REG_FUNC(cellSailRec, cellSailComposerGetStreamParameter);
REG_FUNC(cellSailRec, cellSailComposerGetEsAudioParameter);
REG_FUNC(cellSailRec, cellSailComposerGetEsUserParameter);
REG_FUNC(cellSailRec, cellSailComposerGetEsVideoParameter);
REG_FUNC(cellSailRec, cellSailComposerGetEsAudioAu);
REG_FUNC(cellSailRec, cellSailComposerGetEsUserAu);
REG_FUNC(cellSailRec, cellSailComposerGetEsVideoAu);
REG_FUNC(cellSailRec, cellSailComposerTryGetEsAudioAu);
REG_FUNC(cellSailRec, cellSailComposerTryGetEsUserAu);
REG_FUNC(cellSailRec, cellSailComposerTryGetEsVideoAu);
REG_FUNC(cellSailRec, cellSailComposerReleaseEsAudioAu);
REG_FUNC(cellSailRec, cellSailComposerReleaseEsUserAu);
REG_FUNC(cellSailRec, cellSailComposerReleaseEsVideoAu);
REG_FUNC(cellSailRec, cellSailComposerNotifyCallCompleted);
REG_FUNC(cellSailRec, cellSailComposerNotifySessionError);
});
| 9,402
|
C++
|
.cpp
| 357
| 24.655462
| 63
| 0.851192
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,318
|
cellAtracXdec.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellAtracXdec.cpp
|
#include "stdafx.h"
#include "Emu/perf_meter.hpp"
#include "Emu/Cell/PPUModule.h"
#include "Emu/Cell/lv2/sys_sync.h"
#include "Emu/Cell/lv2/sys_ppu_thread.h"
#include "Emu/savestate_utils.hpp"
#include "sysPrxForUser.h"
#include "util/asm.hpp"
#include "util/media_utils.h"
#include "cellAtracXdec.h"
vm::gvar<CellAdecCoreOps> g_cell_adec_core_ops_atracx2ch;
vm::gvar<CellAdecCoreOps> g_cell_adec_core_ops_atracx6ch;
vm::gvar<CellAdecCoreOps> g_cell_adec_core_ops_atracx8ch;
vm::gvar<CellAdecCoreOps> g_cell_adec_core_ops_atracx;
LOG_CHANNEL(cellAtracXdec);
template <>
void fmt_class_string<CellAtracXdecError>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](CellAtracXdecError value)
{
switch (value)
{
STR_CASE(CELL_ADEC_ERROR_ATX_OK); // CELL_ADEC_ERROR_ATX_OFFSET, CELL_ADEC_ERROR_ATX_NONE
STR_CASE(CELL_ADEC_ERROR_ATX_BUSY);
STR_CASE(CELL_ADEC_ERROR_ATX_EMPTY);
STR_CASE(CELL_ADEC_ERROR_ATX_ATSHDR);
STR_CASE(CELL_ADEC_ERROR_ATX_NON_FATAL);
STR_CASE(CELL_ADEC_ERROR_ATX_NOT_IMPLE);
STR_CASE(CELL_ADEC_ERROR_ATX_PACK_CE_OVERFLOW);
STR_CASE(CELL_ADEC_ERROR_ATX_ILLEGAL_NPROCQUS);
STR_CASE(CELL_ADEC_ERROR_ATX_FATAL);
STR_CASE(CELL_ADEC_ERROR_ATX_ENC_OVERFLOW);
STR_CASE(CELL_ADEC_ERROR_ATX_PACK_CE_UNDERFLOW);
STR_CASE(CELL_ADEC_ERROR_ATX_SYNTAX_IDCT);
STR_CASE(CELL_ADEC_ERROR_ATX_SYNTAX_GAINADJ);
STR_CASE(CELL_ADEC_ERROR_ATX_SYNTAX_IDSF);
STR_CASE(CELL_ADEC_ERROR_ATX_SYNTAX_SPECTRA);
STR_CASE(CELL_ADEC_ERROR_ATX_SYNTAX_IDWL);
STR_CASE(CELL_ADEC_ERROR_ATX_SYNTAX_GHWAVE);
STR_CASE(CELL_ADEC_ERROR_ATX_SYNTAX_SHEADER);
STR_CASE(CELL_ADEC_ERROR_ATX_SYNTAX_IDWL_A);
STR_CASE(CELL_ADEC_ERROR_ATX_SYNTAX_IDWL_B);
STR_CASE(CELL_ADEC_ERROR_ATX_SYNTAX_IDWL_C);
STR_CASE(CELL_ADEC_ERROR_ATX_SYNTAX_IDWL_D);
STR_CASE(CELL_ADEC_ERROR_ATX_SYNTAX_IDWL_E);
STR_CASE(CELL_ADEC_ERROR_ATX_SYNTAX_IDSF_A);
STR_CASE(CELL_ADEC_ERROR_ATX_SYNTAX_IDSF_B);
STR_CASE(CELL_ADEC_ERROR_ATX_SYNTAX_IDSF_C);
STR_CASE(CELL_ADEC_ERROR_ATX_SYNTAX_IDSF_D);
STR_CASE(CELL_ADEC_ERROR_ATX_SYNTAX_IDCT_A);
STR_CASE(CELL_ADEC_ERROR_ATX_SYNTAX_GC_NGC);
STR_CASE(CELL_ADEC_ERROR_ATX_SYNTAX_GC_IDLEV_A);
STR_CASE(CELL_ADEC_ERROR_ATX_SYNTAX_GC_IDLOC_A);
STR_CASE(CELL_ADEC_ERROR_ATX_SYNTAX_GC_IDLEV_B);
STR_CASE(CELL_ADEC_ERROR_ATX_SYNTAX_GC_IDLOC_B);
STR_CASE(CELL_ADEC_ERROR_ATX_SYNTAX_SN_NWVS);
STR_CASE(CELL_ADEC_ERROR_ATX_FATAL_HANDLE);
STR_CASE(CELL_ADEC_ERROR_ATX_ASSERT_SAMPLING_FREQ);
STR_CASE(CELL_ADEC_ERROR_ATX_ASSERT_CH_CONFIG_INDEX);
STR_CASE(CELL_ADEC_ERROR_ATX_ASSERT_NBYTES);
STR_CASE(CELL_ADEC_ERROR_ATX_ASSERT_BLOCK_NUM);
STR_CASE(CELL_ADEC_ERROR_ATX_ASSERT_BLOCK_ID);
STR_CASE(CELL_ADEC_ERROR_ATX_ASSERT_CHANNELS);
STR_CASE(CELL_ADEC_ERROR_ATX_UNINIT_BLOCK_SPECIFIED);
STR_CASE(CELL_ADEC_ERROR_ATX_POSCFG_PRESENT);
STR_CASE(CELL_ADEC_ERROR_ATX_BUFFER_OVERFLOW);
STR_CASE(CELL_ADEC_ERROR_ATX_ILL_BLK_TYPE_ID);
STR_CASE(CELL_ADEC_ERROR_ATX_UNPACK_CHANNEL_BLK_FAILED);
STR_CASE(CELL_ADEC_ERROR_ATX_ILL_BLK_ID_USED_1);
STR_CASE(CELL_ADEC_ERROR_ATX_ILL_BLK_ID_USED_2);
STR_CASE(CELL_ADEC_ERROR_ATX_ILLEGAL_ENC_SETTING);
STR_CASE(CELL_ADEC_ERROR_ATX_ILLEGAL_DEC_SETTING);
STR_CASE(CELL_ADEC_ERROR_ATX_ASSERT_NSAMPLES);
STR_CASE(CELL_ADEC_ERROR_ATX_ILL_SYNCWORD);
STR_CASE(CELL_ADEC_ERROR_ATX_ILL_SAMPLING_FREQ);
STR_CASE(CELL_ADEC_ERROR_ATX_ILL_CH_CONFIG_INDEX);
STR_CASE(CELL_ADEC_ERROR_ATX_RAW_DATA_FRAME_SIZE_OVER);
STR_CASE(CELL_ADEC_ERROR_ATX_SYNTAX_ENHANCE_LENGTH_OVER);
STR_CASE(CELL_ADEC_ERROR_ATX_SPU_INTERNAL_FAIL);
}
return unknown;
});
}
constexpr u32 atracXdecGetSpursMemSize(u32 nch_in)
{
switch (nch_in)
{
case 1: return 0x6000;
case 2: return 0x6000;
case 3: return 0x12880;
case 4: return 0x19c80;
case 5: return -1;
case 6: return 0x23080;
case 7: return 0x2a480;
case 8: return 0x2c480;
default: return -1;
}
}
void AtracXdecDecoder::alloc_avcodec()
{
codec = avcodec_find_decoder(AV_CODEC_ID_ATRAC3P);
if (!codec)
{
fmt::throw_exception("avcodec_find_decoder() failed");
}
ensure(!(codec->capabilities & AV_CODEC_CAP_SUBFRAMES));
ctx = avcodec_alloc_context3(codec);
if (!ctx)
{
fmt::throw_exception("avcodec_alloc_context3() failed");
}
// Allows FFmpeg to output directly into guest memory
ctx->opaque = this;
ctx->thread_type = FF_THREAD_SLICE; // Silences a warning by FFmpeg about requesting frame threading with a custom get_buffer2(). Default is FF_THREAD_FRAME & FF_THREAD_SLICE
ctx->get_buffer2 = [](AVCodecContext* s, AVFrame* frame, int /*flags*/) -> int
{
for (s32 i = 0; i < frame->ch_layout.nb_channels; i++)
{
frame->data[i] = static_cast<AtracXdecDecoder*>(s->opaque)->work_mem.get_ptr() + ATXDEC_MAX_FRAME_LENGTH + ATXDEC_SAMPLES_PER_FRAME * sizeof(f32) * i;
frame->linesize[i] = ATXDEC_SAMPLES_PER_FRAME * sizeof(f32);
}
frame->buf[0] = av_buffer_create(frame->data[0], ATXDEC_SAMPLES_PER_FRAME * sizeof(f32) * frame->ch_layout.nb_channels, [](void*, uint8_t*){}, nullptr, 0);
return 0;
};
packet = av_packet_alloc();
if (!packet)
{
fmt::throw_exception("av_packet_alloc() failed");
}
frame = av_frame_alloc();
if (!frame)
{
fmt::throw_exception("av_frame_alloc() failed");
}
}
void AtracXdecDecoder::free_avcodec()
{
av_packet_free(&packet);
av_frame_free(&frame);
avcodec_free_context(&ctx);
}
void AtracXdecDecoder::init_avcodec()
{
if (int err = avcodec_close(ctx); err)
{
fmt::throw_exception("avcodec_close() failed (err=0x%x='%s')", err, utils::av_error_to_string(err));
}
ctx->block_align = nbytes;
ctx->ch_layout.nb_channels = nch_in;
ctx->sample_rate = sampling_freq;
if (int err = avcodec_open2(ctx, codec, nullptr); err)
{
fmt::throw_exception("avcodec_open2() failed (err=0x%x='%s')", err, utils::av_error_to_string(err));
}
packet->data = work_mem.get_ptr();
packet->size = nbytes;
packet->buf = av_buffer_create(work_mem.get_ptr(), nbytes, [](void*, uint8_t*){}, nullptr, 0);
}
error_code AtracXdecDecoder::set_config_info(u32 sampling_freq, u32 ch_config_idx, u32 nbytes)
{
cellAtracXdec.notice("AtracXdecDecoder::set_config_info(sampling_freq=%d, ch_config_idx=%d, nbytes=0x%x)", sampling_freq, ch_config_idx, nbytes);
this->sampling_freq = sampling_freq;
this->ch_config_idx = ch_config_idx;
this->nbytes = nbytes;
this->nbytes_128_aligned = utils::align(nbytes, 0x80);
this->nch_in = ch_config_idx <= 4 ? ch_config_idx : ch_config_idx + 1;
if (ch_config_idx > 7u)
{
this->config_is_set = false;
return { 0x80004005, "AtracXdecDecoder::set_config_info() failed: Invalid channel configuration: %d", ch_config_idx };
}
this->nch_blocks = ATXDEC_NCH_BLOCKS_MAP[ch_config_idx];
// These checks are performed on the LLE SPU thread
if (ch_config_idx == 0u)
{
this->config_is_set = false;
return { 0x80004005, "AtracXdecDecoder::set_config_info() failed: Invalid channel configuration: %d", ch_config_idx };
}
if (sampling_freq != 48000u && sampling_freq != 44100u) // 32kHz is not supported, even though official docs claim it is
{
this->config_is_set = false;
return { 0x80004005, "AtracXdecDecoder::set_config_info() failed: Invalid sample rate: %d", sampling_freq };
}
if (nbytes == 0u || nbytes > ATXDEC_MAX_FRAME_LENGTH)
{
this->config_is_set = false;
return { 0x80004005, "AtracXdecDecoder::set_config_info() failed: Invalid frame length: 0x%x", nbytes };
}
this->config_is_set = true;
return CELL_OK;
}
error_code AtracXdecDecoder::init_decode(u32 bw_pcm, u32 nch_out)
{
if (bw_pcm < CELL_ADEC_ATRACX_WORD_SZ_16BIT || (bw_pcm > CELL_ADEC_ATRACX_WORD_SZ_32BIT && bw_pcm != CELL_ADEC_ATRACX_WORD_SZ_FLOAT))
{
return { 0x80004005, "AtracXdecDecoder::init_decode() failed: Invalid PCM output format" };
}
this->bw_pcm = bw_pcm;
this->nch_out = nch_out; // Not checked for invalid values on LLE
this->pcm_output_size = (bw_pcm == CELL_ADEC_ATRACX_WORD_SZ_16BIT ? sizeof(s16) : sizeof(f32)) * nch_in * ATXDEC_SAMPLES_PER_FRAME;
init_avcodec();
return CELL_OK;
}
error_code AtracXdecDecoder::parse_ats_header(vm::cptr<u8> au_start_addr)
{
const auto ats = std::bit_cast<AtracXdecAtsHeader>(vm::read64(au_start_addr.addr()));
if (ats.sync_word != 0x0fd0)
{
return { CELL_ADEC_ERROR_ATX_ATSHDR, "AtracXdecDecoder::parse_ats_header() failed: Invalid sync word: 0x%x", ats.sync_word };
}
const u8 sample_rate_idx = ats.params >> 13;
const u8 ch_config_idx = ats.params >> 10 & 7;
const u16 nbytes = ((ats.params & 0x3ff) + 1) * 8;
if (ch_config_idx == 0u)
{
return { CELL_ADEC_ERROR_ATX_ATSHDR, "AtracXdecDecoder::parse_ats_header() failed: Invalid channel configuration: %d", ch_config_idx };
}
u32 sampling_freq;
switch (sample_rate_idx)
{
case 1: sampling_freq = 44100; break;
case 2: sampling_freq = 48000; break;
default: return { CELL_ADEC_ERROR_ATX_ATSHDR, "AtracXdecDecoder::parse_ats_header() failed: Invalid sample rate index: %d", sample_rate_idx };
}
return set_config_info(sampling_freq, ch_config_idx, nbytes); // Cannot return error here, values were already checked
}
void AtracXdecContext::exec(ppu_thread& ppu)
{
perf_meter<"ATXDEC"_u64> perf0;
// Savestates
if (decoder.config_is_set)
{
decoder.init_avcodec();
}
for (;;cmd_counter++)
{
cellAtracXdec.trace("Command counter: %llu, waiting for next command...", cmd_counter);
if (!skip_getting_command)
{
lv2_obj::sleep(ppu);
std::lock_guard lock{queue_mutex};
while (cmd_queue.empty() && !ppu.is_stopped())
{
lv2_obj::sleep(ppu);
queue_not_empty.wait(queue_mutex, 20000);
}
if (ppu.is_stopped())
{
ppu.state += cpu_flag::again;
return;
}
cmd_queue.pop(cmd);
if (!run_thread)
{
return;
}
}
cellAtracXdec.trace("Command type: %d", static_cast<u32>(cmd.type.get()));
switch (cmd.type)
{
case AtracXdecCmdType::start_seq:
{
first_decode = true;
skip_next_frame = true;
// Skip if access units contain an ATS header, the parameters are included in the header and we need to wait for the first decode command to parse them
if (cmd.atracx_param.au_includes_ats_hdr_flg == CELL_ADEC_ATRACX_ATS_HDR_NOTINC)
{
if (decoder.set_config_info(cmd.atracx_param.sampling_freq, cmd.atracx_param.ch_config_idx, cmd.atracx_param.nbytes) == static_cast<s32>(0x80004005))
{
break;
}
if (decoder.init_decode(cmd.atracx_param.bw_pcm, cmd.atracx_param.nch_out) == static_cast<s32>(0x80004005))
{
break;
}
}
atracx_param = cmd.atracx_param;
break;
}
case AtracXdecCmdType::end_seq:
{
skip_getting_command = true;
// Block savestate creation during callbacks
std::unique_lock savestate_lock{g_fxo->get<hle_locks_t>(), std::try_to_lock};
if (!savestate_lock.owns_lock())
{
ppu.state += cpu_flag::again;
return;
}
skip_getting_command = false;
// Doesn't do anything else
notify_seq_done.cbFunc(ppu, notify_seq_done.cbArg);
break;
}
case AtracXdecCmdType::decode_au:
{
skip_getting_command = true;
ensure(!!cmd.au_start_addr); // Not checked on LLE
cellAtracXdec.trace("Waiting for output to be consumed...");
lv2_obj::sleep(ppu);
std::unique_lock output_mutex_lock{output_mutex};
while (output_locked && !ppu.is_stopped())
{
lv2_obj::sleep(ppu);
output_consumed.wait(output_mutex, 20000);
}
if (ppu.is_stopped())
{
ppu.state += cpu_flag::again;
return;
}
if (!run_thread)
{
return;
}
cellAtracXdec.trace("Output consumed");
u32 error = CELL_OK;
// Only the first valid ATS header after starting a sequence is parsed. It is ignored on all subsequent access units
if (first_decode && atracx_param.au_includes_ats_hdr_flg == CELL_ADEC_ATRACX_ATS_HDR_INC)
{
// Block savestate creation during callbacks
std::unique_lock savestate_lock{g_fxo->get<hle_locks_t>(), std::try_to_lock};
if (!savestate_lock.owns_lock())
{
ppu.state += cpu_flag::again;
return;
}
if (error = decoder.parse_ats_header(cmd.au_start_addr); error != CELL_OK)
{
notify_error.cbFunc(ppu, error, notify_error.cbArg);
}
else if (decoder.init_decode(atracx_param.bw_pcm, atracx_param.nch_out) != CELL_OK)
{
notify_error.cbFunc(ppu, CELL_ADEC_ERROR_ATX_FATAL, notify_error.cbArg);
}
}
// LLE does not initialize the output address if parsing the ATS header fails
vm::ptr<void> output = vm::null;
u32 decoded_samples_num = 0;
if (error != CELL_ADEC_ERROR_ATX_ATSHDR)
{
// The LLE SPU thread would crash if you attempt to decode without a valid configuration
ensure(decoder.config_is_set, "Attempted to decode with invalid configuration");
output.set(work_mem.addr() + atracXdecGetSpursMemSize(decoder.nch_in));
const auto au_start_addr = atracx_param.au_includes_ats_hdr_flg == CELL_ADEC_ATRACX_ATS_HDR_INC ? cmd.au_start_addr.get_ptr() + sizeof(AtracXdecAtsHeader) : cmd.au_start_addr.get_ptr();
std::memcpy(work_mem.get_ptr(), au_start_addr, decoder.nbytes);
if (int err = avcodec_send_packet(decoder.ctx, decoder.packet); err)
{
// These errors should never occur
if (err == AVERROR(EAGAIN) || err == averror_eof || err == AVERROR(EINVAL) || err == AVERROR(ENOMEM))
{
fmt::throw_exception("avcodec_send_packet() failed (err=0x%x='%s')", err, utils::av_error_to_string(err));
}
// Game sent invalid data
cellAtracXdec.error("avcodec_send_packet() failed (err=0x%x='%s')", err, utils::av_error_to_string(err));
error = CELL_ADEC_ERROR_ATX_NON_FATAL; // Not accurate, FFmpeg doesn't provide detailed errors like LLE
av_frame_unref(decoder.frame);
}
else if ((err = avcodec_receive_frame(decoder.ctx, decoder.frame)))
{
fmt::throw_exception("avcodec_receive_frame() failed (err=0x%x='%s')", err, utils::av_error_to_string(err));
}
decoded_samples_num = decoder.frame->nb_samples;
ensure(decoded_samples_num == 0u || decoded_samples_num == ATXDEC_SAMPLES_PER_FRAME);
// The first frame after starting a new sequence or after an error is replaced with silence
if (skip_next_frame && error == CELL_OK)
{
skip_next_frame = false;
decoded_samples_num = 0;
std::memset(output.get_ptr(), 0, ATXDEC_SAMPLES_PER_FRAME * (decoder.bw_pcm & 0x7full) * decoder.nch_out);
}
// Convert FFmpeg output to LLE output
const auto output_f32 = vm::static_ptr_cast<f32>(output).get_ptr();
const auto output_s16 = vm::static_ptr_cast<s16>(output).get_ptr();
const auto output_s32 = vm::static_ptr_cast<s32>(output).get_ptr();
const u8* const ch_map = ATXDEC_AVCODEC_CH_MAP[decoder.ch_config_idx - 1];
const u32 nch_in = decoder.nch_in;
switch (decoder.bw_pcm)
{
case CELL_ADEC_ATRACX_WORD_SZ_FLOAT:
for (u32 channel_idx = 0; channel_idx < nch_in; channel_idx++)
{
const f32* samples = reinterpret_cast<f32*>(decoder.frame->data[channel_idx]);
for (u32 in_sample_idx = 0, out_sample_idx = ch_map[channel_idx]; in_sample_idx < decoded_samples_num; in_sample_idx++, out_sample_idx += nch_in)
{
const f32 sample = samples[in_sample_idx];
if (sample >= std::bit_cast<f32>(std::bit_cast<u32>(1.f) - 1))
{
output_f32[out_sample_idx] = std::bit_cast<be_t<f32>>("\x3f\x7f\xff\xff"_u32); // Prevents an unnecessary endian swap
}
else if (sample <= -1.f)
{
output_f32[out_sample_idx] = -1.f;
}
else
{
output_f32[out_sample_idx] = sample;
}
}
}
break;
case CELL_ADEC_ATRACX_WORD_SZ_16BIT:
for (u32 channel_idx = 0; channel_idx < nch_in; channel_idx++)
{
const f32* samples = reinterpret_cast<f32*>(decoder.frame->data[channel_idx]);
for (u32 in_sample_idx = 0, out_sample_idx = ch_map[channel_idx]; in_sample_idx < decoded_samples_num; in_sample_idx++, out_sample_idx += nch_in)
{
const f32 sample = samples[in_sample_idx];
if (sample >= 1.f)
{
output_s16[out_sample_idx] = INT16_MAX;
}
else if (sample <= -1.f)
{
output_s16[out_sample_idx] = INT16_MIN;
}
else
{
output_s16[out_sample_idx] = static_cast<s16>(std::floor(sample * 0x8000u));
}
}
}
break;
case CELL_ADEC_ATRACX_WORD_SZ_24BIT:
for (u32 channel_idx = 0; channel_idx < nch_in; channel_idx++)
{
const f32* samples = reinterpret_cast<f32*>(decoder.frame->data[channel_idx]);
for (u32 in_sample_idx = 0, out_sample_idx = ch_map[channel_idx]; in_sample_idx < decoded_samples_num; in_sample_idx++, out_sample_idx += nch_in)
{
const f32 sample = samples[in_sample_idx];
if (sample >= 1.f)
{
output_s32[out_sample_idx] = 0x007fffff;
}
else if (sample <= -1.f)
{
output_s32[out_sample_idx] = 0x00800000;
}
else
{
output_s32[out_sample_idx] = static_cast<s32>(std::floor(sample * 0x00800000u)) & 0x00ffffff;
}
}
}
break;
case CELL_ADEC_ATRACX_WORD_SZ_32BIT:
for (u32 channel_idx = 0; channel_idx < nch_in; channel_idx++)
{
const f32* samples = reinterpret_cast<f32*>(decoder.frame->data[channel_idx]);
for (u32 in_sample_idx = 0, out_sample_idx = ch_map[channel_idx]; in_sample_idx < decoded_samples_num; in_sample_idx++, out_sample_idx += nch_in)
{
const f32 sample = samples[in_sample_idx];
if (sample >= 1.f)
{
output_s32[out_sample_idx] = INT32_MAX;
}
else if (sample <= -1.f)
{
output_s32[out_sample_idx] = INT32_MIN;
}
else
{
output_s32[out_sample_idx] = static_cast<s32>(std::floor(sample * 0x80000000u));
}
}
}
}
first_decode = false;
if (error != CELL_OK)
{
// Block savestate creation during callbacks
std::unique_lock savestate_lock{g_fxo->get<hle_locks_t>(), std::try_to_lock};
if (!savestate_lock.owns_lock())
{
ppu.state += cpu_flag::again;
return;
}
skip_next_frame = true;
notify_error.cbFunc(ppu, error, notify_error.cbArg);
}
}
// Block savestate creation during callbacks
std::unique_lock savestate_lock{g_fxo->get<hle_locks_t>(), std::try_to_lock};
if (!savestate_lock.owns_lock())
{
ppu.state += cpu_flag::again;
return;
}
skip_getting_command = false;
// au_done and pcm_out callbacks are always called after a decode command, even if an error occurred
// The output always has to be consumed as well
notify_au_done.cbFunc(ppu, cmd.pcm_handle, notify_au_done.cbArg);
output_locked = true;
output_mutex_lock.unlock();
const u32 output_size = decoded_samples_num * (decoder.bw_pcm & 0x7fu) * decoder.nch_out;
const vm::var<CellAdecAtracXInfo> bsi_info{{ decoder.sampling_freq, decoder.ch_config_idx, decoder.nbytes }};
const AdecCorrectPtsValueType correct_pts_type = [&]
{
switch (decoder.sampling_freq)
{
case 32000u: return ADEC_CORRECT_PTS_VALUE_TYPE_ATRACX_32000Hz;
case 44100u: return ADEC_CORRECT_PTS_VALUE_TYPE_ATRACX_44100Hz;
case 48000u: return ADEC_CORRECT_PTS_VALUE_TYPE_ATRACX_48000Hz;
default: return ADEC_CORRECT_PTS_VALUE_TYPE_UNSPECIFIED;
}
}();
notify_pcm_out.cbFunc(ppu, cmd.pcm_handle, output, output_size, notify_pcm_out.cbArg, vm::make_var<vm::bcptr<void>>(bsi_info), correct_pts_type, error);
break;
}
default:
fmt::throw_exception("Invalid command");
}
}
}
template <AtracXdecCmdType type>
error_code AtracXdecContext::send_command(ppu_thread& ppu, auto&&... args)
{
ppu.state += cpu_flag::wait;
{
std::lock_guard lock{queue_mutex};
if constexpr (type == AtracXdecCmdType::close)
{
// Close command is only sent if the queue is empty on LLE
if (!cmd_queue.empty())
{
return {};
}
}
if (cmd_queue.full())
{
return CELL_ADEC_ERROR_ATX_BUSY;
}
cmd_queue.emplace(std::forward<AtracXdecCmdType>(type), std::forward<decltype(args)>(args)...);
}
queue_not_empty.notify_one();
return CELL_OK;
}
void atracXdecEntry(ppu_thread& ppu, vm::ptr<AtracXdecContext> atxdec)
{
atxdec->decoder.alloc_avcodec();
atxdec->exec(ppu);
atxdec->decoder.free_avcodec();
if (ppu.state & cpu_flag::again)
{
// For savestates, save argument
ppu.syscall_args[0] = atxdec.addr();
return;
}
ppu_execute<&sys_ppu_thread_exit>(ppu, CELL_OK);
}
template <u32 nch_in>
error_code _CellAdecCoreOpGetMemSize_atracx(vm::ptr<CellAdecAttr> attr)
{
cellAtracXdec.notice("_CellAdecCoreOpGetMemSize_atracx<nch_in=%d>(attr=*0x%x)", nch_in, attr);
ensure(!!attr); // Not checked on LLE
constexpr u32 mem_size =
sizeof(AtracXdecContext) + 0x7f
+ ATXDEC_SPURS_STRUCTS_SIZE + 0x1d8
+ atracXdecGetSpursMemSize(nch_in)
+ ATXDEC_SAMPLES_PER_FRAME * sizeof(f32) * nch_in;
attr->workMemSize = utils::align(mem_size, 0x80);
return CELL_OK;
}
error_code _CellAdecCoreOpOpenExt_atracx(ppu_thread& ppu, vm::ptr<AtracXdecContext> handle, vm::ptr<AdecNotifyAuDone> notifyAuDone, vm::ptr<void> notifyAuDoneArg, vm::ptr<AdecNotifyPcmOut> notifyPcmOut, vm::ptr<void> notifyPcmOutArg,
vm::ptr<AdecNotifyError> notifyError, vm::ptr<void> notifyErrorArg, vm::ptr<AdecNotifySeqDone> notifySeqDone, vm::ptr<void> notifySeqDoneArg, vm::cptr<CellAdecResource> res, vm::cptr<CellAdecResourceSpurs> spursRes)
{
std::unique_lock savestate_lock{g_fxo->get<hle_locks_t>(), std::try_to_lock};
if (!savestate_lock.owns_lock())
{
ppu.state += cpu_flag::again;
return {};
}
cellAtracXdec.notice("_CellAdecCoreOpOpenExt_atracx(handle=*0x%x, notifyAuDone=*0x%x, notifyAuDoneArg=*0x%x, notifyPcmOut=*0x%x, notifyPcmOutArg=*0x%x, notifyError=*0x%x, notifyErrorArg=*0x%x, notifySeqDone=*0x%x, notifySeqDoneArg=*0x%x, res=*0x%x, spursRes=*0x%x)",
handle, notifyAuDone, notifyAuDoneArg, notifyPcmOut, notifyPcmOutArg, notifyError, notifyErrorArg, notifySeqDone, notifySeqDoneArg, res, spursRes);
ensure(!!handle && !!res); // Not checked on LLE
ensure(handle.aligned(0x80)); // On LLE, this functions doesn't check the alignment or aligns the address itself. The address should already be aligned to 128 bytes by cellAdec
ensure(!!notifyAuDone && !!notifyAuDoneArg && !!notifyPcmOut && !!notifyPcmOutArg && !!notifyError && !!notifyErrorArg && !!notifySeqDone && !!notifySeqDoneArg); // These should always be set by cellAdec
write_to_ptr(handle.get_ptr(), AtracXdecContext(notifyAuDone, notifyAuDoneArg, notifyPcmOut, notifyPcmOutArg, notifyError, notifyErrorArg, notifySeqDone, notifySeqDoneArg,
vm::bptr<u8>::make(handle.addr() + utils::align(static_cast<u32>(sizeof(AtracXdecContext)), 0x80) + ATXDEC_SPURS_STRUCTS_SIZE)));
const vm::var<char[]> _name = vm::make_str("HLE ATRAC3plus decoder");
const auto entry = g_fxo->get<ppu_function_manager>().func_addr(FIND_FUNC(atracXdecEntry));
ppu_execute<&sys_ppu_thread_create>(ppu, handle.ptr(&AtracXdecContext::thread_id), entry, handle.addr(), +res->ppuThreadPriority, +res->ppuThreadStackSize, SYS_PPU_THREAD_CREATE_JOINABLE, +_name);
return CELL_OK;
}
error_code _CellAdecCoreOpOpen_atracx(ppu_thread& ppu, vm::ptr<AtracXdecContext> handle, vm::ptr<AdecNotifyAuDone> notifyAuDone, vm::ptr<void> notifyAuDoneArg, vm::ptr<AdecNotifyPcmOut> notifyPcmOut, vm::ptr<void> notifyPcmOutArg,
vm::ptr<AdecNotifyError> notifyError, vm::ptr<void> notifyErrorArg, vm::ptr<AdecNotifySeqDone> notifySeqDone, vm::ptr<void> notifySeqDoneArg, vm::cptr<CellAdecResource> res)
{
cellAtracXdec.notice("_CellAdecCoreOpOpen_atracx(handle=*0x%x, notifyAuDone=*0x%x, notifyAuDoneArg=*0x%x, notifyPcmOut=*0x%x, notifyPcmOutArg=*0x%x, notifyError=*0x%x, notifyErrorArg=*0x%x, notifySeqDone=*0x%x, notifySeqDoneArg=*0x%x, res=*0x%x)",
handle, notifyAuDone, notifyAuDoneArg, notifyPcmOut, notifyPcmOutArg, notifyError, notifyErrorArg, notifySeqDone, notifySeqDoneArg, res);
return _CellAdecCoreOpOpenExt_atracx(ppu, handle, notifyAuDone, notifyAuDoneArg, notifyPcmOut, notifyPcmOutArg, notifyError, notifyErrorArg, notifySeqDone, notifySeqDoneArg, res, vm::null);
}
error_code _CellAdecCoreOpClose_atracx(ppu_thread& ppu, vm::ptr<AtracXdecContext> handle)
{
std::unique_lock savestate_lock{g_fxo->get<hle_locks_t>(), std::try_to_lock};
if (!savestate_lock.owns_lock())
{
ppu.state += cpu_flag::again;
return {};
}
ppu.state += cpu_flag::wait;
cellAtracXdec.notice("_CellAdecCoreOpClose_atracx(handle=*0x%x)", handle);
ensure(!!handle); // Not checked on LLE
handle->run_thread = false;
handle->send_command<AtracXdecCmdType::close>(ppu);
{
std::lock_guard lock{handle->output_mutex};
handle->output_locked = false;
}
handle->output_consumed.notify_one();
if (vm::var<u64> ret; sys_ppu_thread_join(ppu, static_cast<u32>(handle->thread_id), +ret) != CELL_OK)
{
// Other thread already closed the decoder
return CELL_ADEC_ERROR_FATAL;
}
return CELL_OK;
}
error_code _CellAdecCoreOpStartSeq_atracx(ppu_thread& ppu, vm::ptr<AtracXdecContext> handle, vm::cptr<CellAdecParamAtracX> atracxParam)
{
cellAtracXdec.notice("_CellAdecCoreOpStartSeq_atracx(handle=*0x%x, atracxParam=*0x%x)", handle, atracxParam);
ensure(!!handle && !!atracxParam); // Not checked on LLE
cellAtracXdec.notice("_CellAdecCoreOpStartSeq_atracx(): sampling_freq=%d, ch_config_idx=%d, nch_out=%d, nbytes=0x%x, extra_config_data=0x%08x, bw_pcm=0x%x, downmix_flag=%d, au_includes_ats_hdr_flg=%d",
atracxParam->sampling_freq, atracxParam->ch_config_idx, atracxParam->nch_out, atracxParam->nbytes, std::bit_cast<u32>(atracxParam->extra_config_data), atracxParam->bw_pcm, atracxParam->downmix_flag, atracxParam->au_includes_ats_hdr_flg);
return handle->send_command<AtracXdecCmdType::start_seq>(ppu, *atracxParam);
}
error_code _CellAdecCoreOpEndSeq_atracx(ppu_thread& ppu, vm::ptr<AtracXdecContext> handle)
{
cellAtracXdec.notice("_CellAdecCoreOpEndSeq_atracx(handle=*0x%x)", handle);
ensure(!!handle); // Not checked on LLE
return handle->send_command<AtracXdecCmdType::end_seq>(ppu);
}
error_code _CellAdecCoreOpDecodeAu_atracx(ppu_thread& ppu, vm::ptr<AtracXdecContext> handle, s32 pcmHandle, vm::cptr<CellAdecAuInfo> auInfo)
{
cellAtracXdec.trace("_CellAdecCoreOpDecodeAu_atracx(handle=*0x%x, pcmHandle=%d, auInfo=*0x%x)", handle, pcmHandle, auInfo);
ensure(!!handle && !!auInfo); // Not checked on LLE
cellAtracXdec.trace("_CellAdecCoreOpDecodeAu_atracx(): startAddr=*0x%x, size=0x%x, pts=%lld, userData=0x%llx", auInfo->startAddr, auInfo->size, std::bit_cast<be_t<u64>>(auInfo->pts), auInfo->userData);
return handle->send_command<AtracXdecCmdType::decode_au>(ppu, pcmHandle, *auInfo);
}
void _CellAdecCoreOpGetVersion_atracx(vm::ptr<be_t<u32, 1>> version)
{
cellAtracXdec.notice("_CellAdecCoreOpGetVersion_atracx(version=*0x%x)", version);
ensure(!!version); // Not checked on LLE
*version = 0x01020000;
}
error_code _CellAdecCoreOpRealign_atracx(vm::ptr<AtracXdecContext> handle, vm::ptr<void> outBuffer, vm::cptr<void> pcmStartAddr)
{
cellAtracXdec.trace("_CellAdecCoreOpRealign_atracx(handle=*0x%x, outBuffer=*0x%x, pcmStartAddr=*0x%x)", handle, outBuffer, pcmStartAddr);
if (outBuffer)
{
ensure(!!handle && !!pcmStartAddr); // Not checked on LLE
ensure(vm::check_addr(outBuffer.addr(), vm::page_info_t::page_writable, handle->decoder.pcm_output_size));
std::memcpy(outBuffer.get_ptr(), pcmStartAddr.get_ptr(), handle->decoder.pcm_output_size);
}
return CELL_OK;
}
error_code _CellAdecCoreOpReleasePcm_atracx(ppu_thread& ppu, vm::ptr<AtracXdecContext> handle, s32 pcmHandle, vm::cptr<void> outBuffer)
{
ppu.state += cpu_flag::wait;
cellAtracXdec.trace("_CellAdecCoreOpReleasePcm_atracx(handle=*0x%x, pcmHandle=%d, outBuffer=*0x%x)", handle, pcmHandle, outBuffer);
ensure(!!handle); // Not checked on LLE
std::lock_guard lock{handle->output_mutex};
handle->output_locked = false;
handle->output_consumed.notify_one();
return CELL_OK;
}
s32 _CellAdecCoreOpGetPcmHandleNum_atracx()
{
cellAtracXdec.notice("_CellAdecCoreOpGetPcmHandleNum_atracx()");
return 3;
}
u32 _CellAdecCoreOpGetBsiInfoSize_atracx()
{
cellAtracXdec.notice("_CellAdecCoreOpGetBsiInfoSize_atracx()");
return sizeof(CellAdecAtracXInfo);
}
static void init_gvar(vm::gvar<CellAdecCoreOps>& var)
{
var->open.set(g_fxo->get<ppu_function_manager>().func_addr(FIND_FUNC(_CellAdecCoreOpOpen_atracx)));
var->close.set(g_fxo->get<ppu_function_manager>().func_addr(FIND_FUNC(_CellAdecCoreOpClose_atracx)));
var->startSeq.set(g_fxo->get<ppu_function_manager>().func_addr(FIND_FUNC(_CellAdecCoreOpStartSeq_atracx)));
var->endSeq.set(g_fxo->get<ppu_function_manager>().func_addr(FIND_FUNC(_CellAdecCoreOpEndSeq_atracx)));
var->decodeAu.set(g_fxo->get<ppu_function_manager>().func_addr(FIND_FUNC(_CellAdecCoreOpDecodeAu_atracx)));
var->getVersion.set(g_fxo->get<ppu_function_manager>().func_addr(FIND_FUNC(_CellAdecCoreOpGetVersion_atracx)));
var->realign.set(g_fxo->get<ppu_function_manager>().func_addr(FIND_FUNC(_CellAdecCoreOpRealign_atracx)));
var->releasePcm.set(g_fxo->get<ppu_function_manager>().func_addr(FIND_FUNC(_CellAdecCoreOpReleasePcm_atracx)));
var->getPcmHandleNum.set(g_fxo->get<ppu_function_manager>().func_addr(FIND_FUNC(_CellAdecCoreOpGetPcmHandleNum_atracx)));
var->getBsiInfoSize.set(g_fxo->get<ppu_function_manager>().func_addr(FIND_FUNC(_CellAdecCoreOpGetBsiInfoSize_atracx)));
var->openExt.set(g_fxo->get<ppu_function_manager>().func_addr(FIND_FUNC(_CellAdecCoreOpOpenExt_atracx)));
}
DECLARE(ppu_module_manager::cellAtracXdec)("cellAtracXdec", []()
{
REG_VNID(cellAtracXdec, 0x076b33ab, g_cell_adec_core_ops_atracx2ch).init = []()
{
g_cell_adec_core_ops_atracx2ch->getMemSize.set(g_fxo->get<ppu_function_manager>().func_addr(FIND_FUNC(_CellAdecCoreOpGetMemSize_atracx<2>)));
init_gvar(g_cell_adec_core_ops_atracx2ch);
};
REG_VNID(cellAtracXdec, 0x1d210eaa, g_cell_adec_core_ops_atracx6ch).init = []()
{
g_cell_adec_core_ops_atracx6ch->getMemSize.set(g_fxo->get<ppu_function_manager>().func_addr(FIND_FUNC(_CellAdecCoreOpGetMemSize_atracx<6>)));
init_gvar(g_cell_adec_core_ops_atracx6ch);
};
REG_VNID(cellAtracXdec, 0xe9a86e54, g_cell_adec_core_ops_atracx8ch).init = []()
{
g_cell_adec_core_ops_atracx8ch->getMemSize.set(g_fxo->get<ppu_function_manager>().func_addr(FIND_FUNC(_CellAdecCoreOpGetMemSize_atracx<8>)));
init_gvar(g_cell_adec_core_ops_atracx8ch);
};
REG_VNID(cellAtracXdec, 0x4944af9a, g_cell_adec_core_ops_atracx).init = []()
{
g_cell_adec_core_ops_atracx->getMemSize.set(g_fxo->get<ppu_function_manager>().func_addr(FIND_FUNC(_CellAdecCoreOpGetMemSize_atracx<8>)));
init_gvar(g_cell_adec_core_ops_atracx);
};
REG_HIDDEN_FUNC(_CellAdecCoreOpGetMemSize_atracx<2>);
REG_HIDDEN_FUNC(_CellAdecCoreOpGetMemSize_atracx<6>);
REG_HIDDEN_FUNC(_CellAdecCoreOpGetMemSize_atracx<8>);
REG_HIDDEN_FUNC(_CellAdecCoreOpOpen_atracx);
REG_HIDDEN_FUNC(_CellAdecCoreOpClose_atracx);
REG_HIDDEN_FUNC(_CellAdecCoreOpStartSeq_atracx);
REG_HIDDEN_FUNC(_CellAdecCoreOpEndSeq_atracx);
REG_HIDDEN_FUNC(_CellAdecCoreOpDecodeAu_atracx);
REG_HIDDEN_FUNC(_CellAdecCoreOpGetVersion_atracx);
REG_HIDDEN_FUNC(_CellAdecCoreOpRealign_atracx);
REG_HIDDEN_FUNC(_CellAdecCoreOpReleasePcm_atracx);
REG_HIDDEN_FUNC(_CellAdecCoreOpGetPcmHandleNum_atracx);
REG_HIDDEN_FUNC(_CellAdecCoreOpGetBsiInfoSize_atracx);
REG_HIDDEN_FUNC(_CellAdecCoreOpOpenExt_atracx);
REG_HIDDEN_FUNC(atracXdecEntry);
});
| 31,457
|
C++
|
.cpp
| 726
| 39.710744
| 267
| 0.712482
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,319
|
sys_lv2dbg.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/sys_lv2dbg.cpp
|
#include "stdafx.h"
#include "Emu/Cell/PPUModule.h"
#include "sys_lv2dbg.h"
LOG_CHANNEL(sys_lv2dbg);
template <>
void fmt_class_string<CellLv2DbgError>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](CellLv2DbgError value)
{
switch (value)
{
STR_CASE(CELL_LV2DBG_ERROR_DEINVALIDPROCESSID);
STR_CASE(CELL_LV2DBG_ERROR_DEINVALIDTHREADID);
STR_CASE(CELL_LV2DBG_ERROR_DEILLEGALREGISTERTYPE);
STR_CASE(CELL_LV2DBG_ERROR_DEILLEGALREGISTERNUMBER);
STR_CASE(CELL_LV2DBG_ERROR_DEILLEGALTHREADSTATE);
STR_CASE(CELL_LV2DBG_ERROR_DEINVALIDEFFECTIVEADDRESS);
STR_CASE(CELL_LV2DBG_ERROR_DENOTFOUNDPROCESSID);
STR_CASE(CELL_LV2DBG_ERROR_DENOMEM);
STR_CASE(CELL_LV2DBG_ERROR_DEINVALIDARGUMENTS);
STR_CASE(CELL_LV2DBG_ERROR_DENOTFOUNDFILE);
STR_CASE(CELL_LV2DBG_ERROR_DEINVALIDFILETYPE);
STR_CASE(CELL_LV2DBG_ERROR_DENOTFOUNDTHREADID);
STR_CASE(CELL_LV2DBG_ERROR_DEINVALIDTHREADSTATUS);
STR_CASE(CELL_LV2DBG_ERROR_DENOAVAILABLEPROCESSID);
STR_CASE(CELL_LV2DBG_ERROR_DENOTFOUNDEVENTHANDLER);
STR_CASE(CELL_LV2DBG_ERROR_DESPNOROOM);
STR_CASE(CELL_LV2DBG_ERROR_DESPNOTFOUND);
STR_CASE(CELL_LV2DBG_ERROR_DESPINPROCESS);
STR_CASE(CELL_LV2DBG_ERROR_DEINVALIDPRIMARYSPUTHREADID);
STR_CASE(CELL_LV2DBG_ERROR_DETHREADSTATEISNOTSTOPPED);
STR_CASE(CELL_LV2DBG_ERROR_DEINVALIDTHREADTYPE);
STR_CASE(CELL_LV2DBG_ERROR_DECONTINUEFAILED);
STR_CASE(CELL_LV2DBG_ERROR_DESTOPFAILED);
STR_CASE(CELL_LV2DBG_ERROR_DENOEXCEPTION);
STR_CASE(CELL_LV2DBG_ERROR_DENOMOREEVENTQUE);
STR_CASE(CELL_LV2DBG_ERROR_DEEVENTQUENOTCREATED);
STR_CASE(CELL_LV2DBG_ERROR_DEEVENTQUEOVERFLOWED);
STR_CASE(CELL_LV2DBG_ERROR_DENOTIMPLEMENTED);
STR_CASE(CELL_LV2DBG_ERROR_DEQUENOTREGISTERED);
STR_CASE(CELL_LV2DBG_ERROR_DENOMOREEVENTPROCESS);
STR_CASE(CELL_LV2DBG_ERROR_DEPROCESSNOTREGISTERED);
STR_CASE(CELL_LV2DBG_ERROR_DEEVENTDISCARDED);
STR_CASE(CELL_LV2DBG_ERROR_DENOMORESYNCID);
STR_CASE(CELL_LV2DBG_ERROR_DESYNCIDALREADYADDED);
STR_CASE(CELL_LV2DBG_ERROR_DESYNCIDNOTFOUND);
STR_CASE(CELL_LV2DBG_ERROR_DESYNCIDNOTACQUIRED);
STR_CASE(CELL_LV2DBG_ERROR_DEPROCESSALREADYREGISTERED);
STR_CASE(CELL_LV2DBG_ERROR_DEINVALIDLSADDRESS);
STR_CASE(CELL_LV2DBG_ERROR_DEINVALIDOPERATION);
STR_CASE(CELL_LV2DBG_ERROR_DEINVALIDMODULEID);
STR_CASE(CELL_LV2DBG_ERROR_DEHANDLERALREADYREGISTERED);
STR_CASE(CELL_LV2DBG_ERROR_DEINVALIDHANDLER);
STR_CASE(CELL_LV2DBG_ERROR_DEHANDLENOTREGISTERED);
STR_CASE(CELL_LV2DBG_ERROR_DEOPERATIONDENIED);
STR_CASE(CELL_LV2DBG_ERROR_DEHANDLERNOTINITIALIZED);
STR_CASE(CELL_LV2DBG_ERROR_DEHANDLERALREADYINITIALIZED);
STR_CASE(CELL_LV2DBG_ERROR_DEILLEGALCOREDUMPPARAMETER);
}
return unknown;
});
}
// Temporarily
#ifndef _MSC_VER
#pragma GCC diagnostic ignored "-Wunused-parameter"
#endif
error_code sys_dbg_read_ppu_thread_context(u64 id, vm::ptr<sys_dbg_ppu_thread_context_t> ppu_context)
{
sys_lv2dbg.todo("sys_dbg_read_ppu_thread_context()");
return CELL_OK;
}
error_code sys_dbg_read_spu_thread_context(u32 id, vm::ptr<sys_dbg_spu_thread_context_t> spu_context)
{
sys_lv2dbg.todo("sys_dbg_read_spu_thread_context()");
return CELL_OK;
}
error_code sys_dbg_read_spu_thread_context2(u32 id, vm::ptr<sys_dbg_spu_thread_context2_t> spu_context)
{
sys_lv2dbg.todo("sys_dbg_read_spu_thread_context2()");
return CELL_OK;
}
error_code sys_dbg_set_stacksize_ppu_exception_handler(u32 stacksize)
{
sys_lv2dbg.todo("sys_dbg_set_stacksize_ppu_exception_handler()");
return CELL_OK;
}
error_code sys_dbg_initialize_ppu_exception_handler(s32 prio)
{
sys_lv2dbg.todo("sys_dbg_initialize_ppu_exception_handler()");
return CELL_OK;
}
error_code sys_dbg_finalize_ppu_exception_handler()
{
sys_lv2dbg.todo("sys_dbg_finalize_ppu_exception_handler()");
return CELL_OK;
}
error_code sys_dbg_register_ppu_exception_handler(vm::ptr<dbg_exception_handler_t> callback, u64 ctrl_flags)
{
sys_lv2dbg.todo("sys_dbg_register_ppu_exception_handler()");
return CELL_OK;
}
error_code sys_dbg_unregister_ppu_exception_handler()
{
sys_lv2dbg.todo("sys_dbg_unregister_ppu_exception_handler()");
return CELL_OK;
}
error_code sys_dbg_signal_to_ppu_exception_handler(u64 flags)
{
sys_lv2dbg.todo("sys_dbg_signal_to_ppu_exception_handler()");
return CELL_OK;
}
error_code sys_dbg_get_mutex_information(u32 id, vm::ptr<sys_dbg_mutex_information_t> info)
{
sys_lv2dbg.todo("sys_dbg_get_mutex_information()");
return CELL_OK;
}
error_code sys_dbg_get_cond_information(u32 id, vm::ptr<sys_dbg_cond_information_t> info)
{
sys_lv2dbg.todo("sys_dbg_get_cond_information()");
return CELL_OK;
}
error_code sys_dbg_get_rwlock_information(u32 id, vm::ptr<sys_dbg_rwlock_information_t> info)
{
sys_lv2dbg.todo("sys_dbg_get_rwlock_information()");
return CELL_OK;
}
error_code sys_dbg_get_event_queue_information(u32 id, vm::ptr<sys_dbg_event_queue_information_t> info)
{
sys_lv2dbg.todo("sys_dbg_get_event_queue_information()");
return CELL_OK;
}
error_code sys_dbg_get_semaphore_information(u32 id, vm::ptr<sys_dbg_semaphore_information_t> info)
{
sys_lv2dbg.todo("sys_dbg_get_semaphore_information()");
return CELL_OK;
}
error_code sys_dbg_get_lwmutex_information(u32 id, vm::ptr<sys_dbg_lwmutex_information_t> info)
{
sys_lv2dbg.todo("sys_dbg_get_lwmutex_information()");
return CELL_OK;
}
error_code sys_dbg_get_lwcond_information(u32 id, vm::ptr<sys_dbg_lwcond_information_t> info)
{
sys_lv2dbg.todo("sys_dbg_get_lwcond_information()");
return CELL_OK;
}
error_code sys_dbg_get_event_flag_information(u32 id, vm::ptr<sys_dbg_event_flag_information_t> info)
{
sys_lv2dbg.todo("sys_dbg_get_event_flag_information()");
return CELL_OK;
}
error_code sys_dbg_get_ppu_thread_ids(vm::ptr<u64> ids, vm::ptr<u64> ids_num, vm::ptr<u64> all_ids_num)
{
sys_lv2dbg.todo("sys_dbg_get_ppu_thread_ids()");
return CELL_OK;
}
error_code sys_dbg_get_spu_thread_group_ids(vm::ptr<u32> ids, vm::ptr<u64> ids_num, vm::ptr<u64> all_ids_num)
{
sys_lv2dbg.todo("sys_dbg_get_spu_thread_group_ids()");
return CELL_OK;
}
error_code sys_dbg_get_spu_thread_ids(u32 group_id, vm::ptr<u32> ids, vm::ptr<u64> ids_num, vm::ptr<u64> all_ids_num)
{
sys_lv2dbg.todo("sys_dbg_get_spu_thread_ids()");
return CELL_OK;
}
error_code sys_dbg_get_ppu_thread_name(u64 id, vm::ptr<char> name)
{
sys_lv2dbg.todo("sys_dbg_get_ppu_thread_name()");
return CELL_OK;
}
error_code sys_dbg_get_spu_thread_name(u32 id, vm::ptr<char> name)
{
sys_lv2dbg.todo("sys_dbg_get_spu_thread_name()");
return CELL_OK;
}
error_code sys_dbg_get_spu_thread_group_name(u32 id, vm::ptr<char> name)
{
sys_lv2dbg.todo("sys_dbg_get_spu_thread_group_name()");
return CELL_OK;
}
error_code sys_dbg_get_ppu_thread_status(u64 id, vm::ptr<u32> status)
{
sys_lv2dbg.todo("sys_dbg_get_ppu_thread_status()");
return CELL_OK;
}
error_code sys_dbg_get_spu_thread_group_status(u32 id, vm::ptr<u32> status)
{
sys_lv2dbg.todo("sys_dbg_get_spu_thread_group_status()");
return CELL_OK;
}
error_code sys_dbg_enable_floating_point_enabled_exception(u64 id, u64 flags, u64 opt1, u64 opt2)
{
sys_lv2dbg.todo("sys_dbg_enable_floating_point_enabled_exception()");
return CELL_OK;
}
error_code sys_dbg_disable_floating_point_enabled_exception(u64 id, u64 flags, u64 opt1, u64 opt2)
{
sys_lv2dbg.todo("sys_dbg_disable_floating_point_enabled_exception()");
return CELL_OK;
}
error_code sys_dbg_vm_get_page_information(u32 addr, u32 num, vm::ptr<sys_vm_page_information_t> pageinfo)
{
sys_lv2dbg.todo("sys_dbg_vm_get_page_information()");
return CELL_OK;
}
error_code sys_dbg_set_address_to_dabr(u64 addr, u64 ctrl_flag)
{
sys_lv2dbg.todo("sys_dbg_set_address_to_dabr()");
return CELL_OK;
}
error_code sys_dbg_get_address_from_dabr(vm::ptr<u64> addr, vm::ptr<u64> ctrl_flag)
{
sys_lv2dbg.todo("sys_dbg_get_address_from_dabr()");
return CELL_OK;
}
error_code sys_dbg_signal_to_coredump_handler(u64 data1, u64 data2, u64 data3)
{
sys_lv2dbg.todo("sys_dbg_signal_to_coredump_handler()");
return CELL_OK;
}
error_code sys_dbg_mat_set_condition(u32 addr, u64 cond)
{
sys_lv2dbg.todo("sys_dbg_mat_set_condition()");
return CELL_OK;
}
error_code sys_dbg_mat_get_condition(u32 addr, vm::ptr<u64> condp)
{
sys_lv2dbg.todo("sys_dbg_mat_get_condition()");
return CELL_OK;
}
error_code sys_dbg_get_coredump_params(vm::ptr<s32> param)
{
sys_lv2dbg.todo("sys_dbg_get_coredump_params()");
return CELL_OK;
}
error_code sys_dbg_set_mask_to_ppu_exception_handler(u64 mask, u64 flags)
{
sys_lv2dbg.todo("sys_dbg_set_mask_to_ppu_exception_handler()");
return CELL_OK;
}
DECLARE(ppu_module_manager::sys_lv2dbg)("sys_lv2dbg", []
{
REG_FUNC(sys_lv2dbg, sys_dbg_read_ppu_thread_context);
REG_FUNC(sys_lv2dbg, sys_dbg_read_spu_thread_context);
REG_FUNC(sys_lv2dbg, sys_dbg_read_spu_thread_context2);
REG_FUNC(sys_lv2dbg, sys_dbg_set_stacksize_ppu_exception_handler);
REG_FUNC(sys_lv2dbg, sys_dbg_initialize_ppu_exception_handler);
REG_FUNC(sys_lv2dbg, sys_dbg_finalize_ppu_exception_handler);
REG_FUNC(sys_lv2dbg, sys_dbg_register_ppu_exception_handler);
REG_FUNC(sys_lv2dbg, sys_dbg_unregister_ppu_exception_handler);
REG_FUNC(sys_lv2dbg, sys_dbg_signal_to_ppu_exception_handler);
REG_FUNC(sys_lv2dbg, sys_dbg_get_mutex_information);
REG_FUNC(sys_lv2dbg, sys_dbg_get_cond_information);
REG_FUNC(sys_lv2dbg, sys_dbg_get_rwlock_information);
REG_FUNC(sys_lv2dbg, sys_dbg_get_event_queue_information);
REG_FUNC(sys_lv2dbg, sys_dbg_get_semaphore_information);
REG_FUNC(sys_lv2dbg, sys_dbg_get_lwmutex_information);
REG_FUNC(sys_lv2dbg, sys_dbg_get_lwcond_information);
REG_FUNC(sys_lv2dbg, sys_dbg_get_event_flag_information);
REG_FUNC(sys_lv2dbg, sys_dbg_get_ppu_thread_ids);
REG_FUNC(sys_lv2dbg, sys_dbg_get_spu_thread_group_ids);
REG_FUNC(sys_lv2dbg, sys_dbg_get_spu_thread_ids);
REG_FUNC(sys_lv2dbg, sys_dbg_get_ppu_thread_name);
REG_FUNC(sys_lv2dbg, sys_dbg_get_spu_thread_name);
REG_FUNC(sys_lv2dbg, sys_dbg_get_spu_thread_group_name);
REG_FUNC(sys_lv2dbg, sys_dbg_get_ppu_thread_status);
REG_FUNC(sys_lv2dbg, sys_dbg_get_spu_thread_group_status);
REG_FUNC(sys_lv2dbg, sys_dbg_enable_floating_point_enabled_exception);
REG_FUNC(sys_lv2dbg, sys_dbg_disable_floating_point_enabled_exception);
REG_FUNC(sys_lv2dbg, sys_dbg_vm_get_page_information);
REG_FUNC(sys_lv2dbg, sys_dbg_set_address_to_dabr);
REG_FUNC(sys_lv2dbg, sys_dbg_get_address_from_dabr);
REG_FUNC(sys_lv2dbg, sys_dbg_signal_to_coredump_handler);
REG_FUNC(sys_lv2dbg, sys_dbg_mat_set_condition);
REG_FUNC(sys_lv2dbg, sys_dbg_mat_get_condition);
REG_FUNC(sys_lv2dbg, sys_dbg_get_coredump_params);
REG_FUNC(sys_lv2dbg, sys_dbg_set_mask_to_ppu_exception_handler);
});
| 10,592
|
C++
|
.cpp
| 279
| 36.039427
| 117
| 0.767462
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,320
|
cellStorage.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellStorage.cpp
|
#include "stdafx.h"
#include "Emu/Cell/PPUModule.h"
#include "cellSysutil.h"
#include "cellStorage.h"
LOG_CHANNEL(cellSysutil);
template <>
void fmt_class_string<CellStorageError>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto error)
{
switch (error)
{
STR_CASE(CELL_STORAGEDATA_ERROR_ACCESS_ERROR);
STR_CASE(CELL_STORAGEDATA_ERROR_INTERNAL);
STR_CASE(CELL_STORAGEDATA_ERROR_PARAM);
STR_CASE(CELL_STORAGEDATA_ERROR_FAILURE);
STR_CASE(CELL_STORAGEDATA_ERROR_BUSY);
}
return unknown;
});
}
error_code cellStorageDataImportMove(u32 version, vm::ptr<char> srcMediaFile, vm::ptr<char> dstHddDir, vm::ptr<CellStorageDataSetParam> param, vm::ptr<CellStorageDataFinishCallback> funcFinish, u32 container, vm::ptr<void> userdata)
{
cellSysutil.todo("cellStorageDataImportMove(version=0x%x, srcMediaFile=%s, dstHddDir=%s, param=*0x%x, funcFinish=*0x%x, container=0x%x, userdata=*0x%x)", version, srcMediaFile, dstHddDir, param, funcFinish, container, userdata);
sysutil_register_cb([=](ppu_thread& ppu) -> s32
{
funcFinish(ppu, CELL_OK, userdata);
return CELL_OK;
});
return CELL_OK;
}
error_code cellStorageDataImport(u32 version, vm::ptr<char> srcMediaFile, vm::ptr<char> dstHddDir, vm::ptr<CellStorageDataSetParam> param, vm::ptr<CellStorageDataFinishCallback> funcFinish, u32 container, vm::ptr<void> userdata)
{
cellSysutil.todo("cellStorageDataImport(version=0x%x, srcMediaFile=%s, dstHddDir=%s, param=*0x%x, funcFinish=*0x%x, container=0x%x, userdata=*0x%x)", version, srcMediaFile, dstHddDir, param, funcFinish, container, userdata);
sysutil_register_cb([=](ppu_thread& ppu) -> s32
{
funcFinish(ppu, CELL_OK, userdata);
return CELL_OK;
});
return CELL_OK;
}
error_code cellStorageDataExport(u32 version, vm::ptr<char> srcHddFile, vm::ptr<char> dstMediaDir, vm::ptr<CellStorageDataSetParam> param, vm::ptr<CellStorageDataFinishCallback> funcFinish, u32 container, vm::ptr<void> userdata)
{
cellSysutil.todo("cellStorageDataExport(version=0x%x, srcHddFile=%s, dstMediaDir=%s, param=*0x%x, funcFinish=*0x%x, container=0x%x, userdata=*0x%x)", version, srcHddFile, dstMediaDir, param, funcFinish, container, userdata);
sysutil_register_cb([=](ppu_thread& ppu) -> s32
{
funcFinish(ppu, CELL_OK, userdata);
return CELL_OK;
});
return CELL_OK;
}
void cellSysutil_Storage_init()
{
REG_FUNC(cellSysutil, cellStorageDataImportMove);
REG_FUNC(cellSysutil, cellStorageDataImport);
REG_FUNC(cellSysutil, cellStorageDataExport);
}
| 2,498
|
C++
|
.cpp
| 57
| 41.701754
| 232
| 0.766063
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,321
|
cellAtrac.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellAtrac.cpp
|
#include "stdafx.h"
#include "Emu/Cell/PPUModule.h"
#include "cellAtrac.h"
LOG_CHANNEL(cellAtrac);
template <>
void fmt_class_string<CellAtracError>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](CellAtracError value)
{
switch (value)
{
STR_CASE(CELL_ATRAC_ERROR_API_FAIL);
STR_CASE(CELL_ATRAC_ERROR_READSIZE_OVER_BUFFER);
STR_CASE(CELL_ATRAC_ERROR_UNKNOWN_FORMAT);
STR_CASE(CELL_ATRAC_ERROR_READSIZE_IS_TOO_SMALL);
STR_CASE(CELL_ATRAC_ERROR_ILLEGAL_SAMPLING_RATE);
STR_CASE(CELL_ATRAC_ERROR_ILLEGAL_DATA);
STR_CASE(CELL_ATRAC_ERROR_NO_DECODER);
STR_CASE(CELL_ATRAC_ERROR_UNSET_DATA);
STR_CASE(CELL_ATRAC_ERROR_DECODER_WAS_CREATED);
STR_CASE(CELL_ATRAC_ERROR_ALLDATA_WAS_DECODED);
STR_CASE(CELL_ATRAC_ERROR_NODATA_IN_BUFFER);
STR_CASE(CELL_ATRAC_ERROR_NOT_ALIGNED_OUT_BUFFER);
STR_CASE(CELL_ATRAC_ERROR_NEED_SECOND_BUFFER);
STR_CASE(CELL_ATRAC_ERROR_ALLDATA_IS_ONMEMORY);
STR_CASE(CELL_ATRAC_ERROR_ADD_DATA_IS_TOO_BIG);
STR_CASE(CELL_ATRAC_ERROR_NONEED_SECOND_BUFFER);
STR_CASE(CELL_ATRAC_ERROR_UNSET_LOOP_NUM);
STR_CASE(CELL_ATRAC_ERROR_ILLEGAL_SAMPLE);
STR_CASE(CELL_ATRAC_ERROR_ILLEGAL_RESET_BYTE);
STR_CASE(CELL_ATRAC_ERROR_ILLEGAL_PPU_THREAD_PRIORITY);
STR_CASE(CELL_ATRAC_ERROR_ILLEGAL_SPU_THREAD_PRIORITY);
}
return unknown;
});
}
error_code cellAtracSetDataAndGetMemSize(vm::ptr<CellAtracHandle> pHandle, vm::ptr<u8> pucBufferAddr, u32 uiReadByte, u32 uiBufferByte, vm::ptr<u32> puiWorkMemByte)
{
cellAtrac.warning("cellAtracSetDataAndGetMemSize(pHandle=*0x%x, pucBufferAddr=*0x%x, uiReadByte=0x%x, uiBufferByte=0x%x, puiWorkMemByte=*0x%x)", pHandle, pucBufferAddr, uiReadByte, uiBufferByte, puiWorkMemByte);
*puiWorkMemByte = 0x1000;
return CELL_OK;
}
error_code cellAtracCreateDecoder(vm::ptr<CellAtracHandle> pHandle, vm::ptr<u8> pucWorkMem, u32 uiPpuThreadPriority, u32 uiSpuThreadPriority)
{
cellAtrac.warning("cellAtracCreateDecoder(pHandle=*0x%x, pucWorkMem=*0x%x, uiPpuThreadPriority=%d, uiSpuThreadPriority=%d)", pHandle, pucWorkMem, uiPpuThreadPriority, uiSpuThreadPriority);
std::memcpy(pHandle->ucWorkMem, pucWorkMem.get_ptr(), CELL_ATRAC_HANDLE_SIZE);
return CELL_OK;
}
error_code cellAtracCreateDecoderExt(vm::ptr<CellAtracHandle> pHandle, vm::ptr<u8> pucWorkMem, u32 uiPpuThreadPriority, vm::ptr<CellAtracExtRes> pExtRes)
{
cellAtrac.warning("cellAtracCreateDecoderExt(pHandle=*0x%x, pucWorkMem=*0x%x, uiPpuThreadPriority=%d, pExtRes=*0x%x)", pHandle, pucWorkMem, uiPpuThreadPriority, pExtRes);
std::memcpy(pHandle->ucWorkMem, pucWorkMem.get_ptr(), CELL_ATRAC_HANDLE_SIZE);
return CELL_OK;
}
error_code cellAtracDeleteDecoder(vm::ptr<CellAtracHandle> pHandle)
{
cellAtrac.warning("cellAtracDeleteDecoder(pHandle=*0x%x)", pHandle);
return CELL_OK;
}
error_code cellAtracDecode(vm::ptr<CellAtracHandle> pHandle, vm::ptr<float> pfOutAddr, vm::ptr<u32> puiSamples, vm::ptr<u32> puiFinishflag, vm::ptr<s32> piRemainFrame)
{
cellAtrac.warning("cellAtracDecode(pHandle=*0x%x, pfOutAddr=*0x%x, puiSamples=*0x%x, puiFinishFlag=*0x%x, piRemainFrame=*0x%x)", pHandle, pfOutAddr, puiSamples, puiFinishflag, piRemainFrame);
*puiSamples = 0;
*puiFinishflag = 1;
*piRemainFrame = CELL_ATRAC_ALLDATA_IS_ON_MEMORY;
return CELL_OK;
}
error_code cellAtracGetStreamDataInfo(vm::ptr<CellAtracHandle> pHandle, vm::pptr<u8> ppucWritePointer, vm::ptr<u32> puiWritableByte, vm::ptr<u32> puiReadPosition)
{
cellAtrac.warning("cellAtracGetStreamDataInfo(pHandle=*0x%x, ppucWritePointer=**0x%x, puiWritableByte=*0x%x, puiReadPosition=*0x%x)", pHandle, ppucWritePointer, puiWritableByte, puiReadPosition);
ppucWritePointer->set(pHandle.addr());
*puiWritableByte = 0x1000;
*puiReadPosition = 0;
return CELL_OK;
}
error_code cellAtracAddStreamData(vm::ptr<CellAtracHandle> pHandle, u32 uiAddByte)
{
cellAtrac.warning("cellAtracAddStreamData(pHandle=*0x%x, uiAddByte=0x%x)", pHandle, uiAddByte);
return CELL_OK;
}
error_code cellAtracGetRemainFrame(vm::ptr<CellAtracHandle> pHandle, vm::ptr<s32> piRemainFrame)
{
cellAtrac.warning("cellAtracGetRemainFrame(pHandle=*0x%x, piRemainFrame=*0x%x)", pHandle, piRemainFrame);
*piRemainFrame = CELL_ATRAC_ALLDATA_IS_ON_MEMORY;
return CELL_OK;
}
error_code cellAtracGetVacantSize(vm::ptr<CellAtracHandle> pHandle, vm::ptr<u32> puiVacantSize)
{
cellAtrac.warning("cellAtracGetVacantSize(pHandle=*0x%x, puiVacantSize=*0x%x)", pHandle, puiVacantSize);
*puiVacantSize = 0x1000;
return CELL_OK;
}
error_code cellAtracIsSecondBufferNeeded(vm::ptr<CellAtracHandle> pHandle)
{
cellAtrac.warning("cellAtracIsSecondBufferNeeded(pHandle=*0x%x)", pHandle);
return 0;
}
error_code cellAtracGetSecondBufferInfo(vm::ptr<CellAtracHandle> pHandle, vm::ptr<u32> puiReadPosition, vm::ptr<u32> puiDataByte)
{
cellAtrac.warning("cellAtracGetSecondBufferInfo(pHandle=*0x%x, puiReadPosition=*0x%x, puiDataByte=*0x%x)", pHandle, puiReadPosition, puiDataByte);
*puiReadPosition = 0;
*puiDataByte = 0; // write to null block will occur
return CELL_OK;
}
error_code cellAtracSetSecondBuffer(vm::ptr<CellAtracHandle> pHandle, vm::ptr<u8> pucSecondBufferAddr, u32 uiSecondBufferByte)
{
cellAtrac.warning("cellAtracSetSecondBuffer(pHandle=*0x%x, pucSecondBufferAddr=*0x%x, uiSecondBufferByte=0x%x)", pHandle, pucSecondBufferAddr, uiSecondBufferByte);
return CELL_OK;
}
error_code cellAtracGetChannel(vm::ptr<CellAtracHandle> pHandle, vm::ptr<u32> puiChannel)
{
cellAtrac.warning("cellAtracGetChannel(pHandle=*0x%x, puiChannel=*0x%x)", pHandle, puiChannel);
*puiChannel = 2;
return CELL_OK;
}
error_code cellAtracGetMaxSample(vm::ptr<CellAtracHandle> pHandle, vm::ptr<u32> puiMaxSample)
{
cellAtrac.warning("cellAtracGetMaxSample(pHandle=*0x%x, puiMaxSample=*0x%x)", pHandle, puiMaxSample);
*puiMaxSample = 512;
return CELL_OK;
}
error_code cellAtracGetNextSample(vm::ptr<CellAtracHandle> pHandle, vm::ptr<u32> puiNextSample)
{
cellAtrac.warning("cellAtracGetNextSample(pHandle=*0x%x, puiNextSample=*0x%x)", pHandle, puiNextSample);
*puiNextSample = 0;
return CELL_OK;
}
error_code cellAtracGetSoundInfo(vm::ptr<CellAtracHandle> pHandle, vm::ptr<s32> piEndSample, vm::ptr<s32> piLoopStartSample, vm::ptr<s32> piLoopEndSample)
{
cellAtrac.warning("cellAtracGetSoundInfo(pHandle=*0x%x, piEndSample=*0x%x, piLoopStartSample=*0x%x, piLoopEndSample=*0x%x)", pHandle, piEndSample, piLoopStartSample, piLoopEndSample);
*piEndSample = 0;
*piLoopStartSample = 0;
*piLoopEndSample = 0;
return CELL_OK;
}
error_code cellAtracGetNextDecodePosition(vm::ptr<CellAtracHandle> pHandle, vm::ptr<u32> puiSamplePosition)
{
cellAtrac.warning("cellAtracGetNextDecodePosition(pHandle=*0x%x, puiSamplePosition=*0x%x)", pHandle, puiSamplePosition);
*puiSamplePosition = 0;
return CELL_ATRAC_ERROR_ALLDATA_WAS_DECODED;
}
error_code cellAtracGetBitrate(vm::ptr<CellAtracHandle> pHandle, vm::ptr<u32> puiBitrate)
{
cellAtrac.warning("cellAtracGetBitrate(pHandle=*0x%x, puiBitrate=*0x%x)", pHandle, puiBitrate);
*puiBitrate = 128;
return CELL_OK;
}
error_code cellAtracGetLoopInfo(vm::ptr<CellAtracHandle> pHandle, vm::ptr<s32> piLoopNum, vm::ptr<u32> puiLoopStatus)
{
cellAtrac.warning("cellAtracGetLoopInfo(pHandle=*0x%x, piLoopNum=*0x%x, puiLoopStatus=*0x%x)", pHandle, piLoopNum, puiLoopStatus);
*piLoopNum = 0;
*puiLoopStatus = 0;
return CELL_OK;
}
error_code cellAtracSetLoopNum(vm::ptr<CellAtracHandle> pHandle, s32 iLoopNum)
{
cellAtrac.warning("cellAtracSetLoopNum(pHandle=*0x%x, iLoopNum=%d)", pHandle, iLoopNum);
return CELL_OK;
}
error_code cellAtracGetBufferInfoForResetting(vm::ptr<CellAtracHandle> pHandle, u32 uiSample, vm::ptr<CellAtracBufferInfo> pBufferInfo)
{
cellAtrac.warning("cellAtracGetBufferInfoForResetting(pHandle=*0x%x, uiSample=0x%x, pBufferInfo=*0x%x)", pHandle, uiSample, pBufferInfo);
pBufferInfo->pucWriteAddr.set(pHandle.addr());
pBufferInfo->uiWritableByte = 0x1000;
pBufferInfo->uiMinWriteByte = 0;
pBufferInfo->uiReadPosition = 0;
return CELL_OK;
}
error_code cellAtracResetPlayPosition(vm::ptr<CellAtracHandle> pHandle, u32 uiSample, u32 uiWriteByte)
{
cellAtrac.warning("cellAtracResetPlayPosition(pHandle=*0x%x, uiSample=0x%x, uiWriteByte=0x%x)", pHandle, uiSample, uiWriteByte);
return CELL_OK;
}
error_code cellAtracGetInternalErrorInfo(vm::ptr<CellAtracHandle> pHandle, vm::ptr<s32> piResult)
{
cellAtrac.warning("cellAtracGetInternalErrorInfo(pHandle=*0x%x, piResult=*0x%x)", pHandle, piResult);
*piResult = 0;
return CELL_OK;
}
error_code cellAtracGetSamplingRate()
{
UNIMPLEMENTED_FUNC(cellAtrac);
return CELL_OK;
}
DECLARE(ppu_module_manager::cellAtrac)("cellAtrac", []()
{
REG_FUNC(cellAtrac, cellAtracSetDataAndGetMemSize);
REG_FUNC(cellAtrac, cellAtracCreateDecoder);
REG_FUNC(cellAtrac, cellAtracCreateDecoderExt);
REG_FUNC(cellAtrac, cellAtracDeleteDecoder);
REG_FUNC(cellAtrac, cellAtracDecode);
REG_FUNC(cellAtrac, cellAtracGetStreamDataInfo);
REG_FUNC(cellAtrac, cellAtracAddStreamData);
REG_FUNC(cellAtrac, cellAtracGetRemainFrame);
REG_FUNC(cellAtrac, cellAtracGetVacantSize);
REG_FUNC(cellAtrac, cellAtracIsSecondBufferNeeded);
REG_FUNC(cellAtrac, cellAtracGetSecondBufferInfo);
REG_FUNC(cellAtrac, cellAtracSetSecondBuffer);
REG_FUNC(cellAtrac, cellAtracGetChannel);
REG_FUNC(cellAtrac, cellAtracGetMaxSample);
REG_FUNC(cellAtrac, cellAtracGetNextSample);
REG_FUNC(cellAtrac, cellAtracGetSoundInfo);
REG_FUNC(cellAtrac, cellAtracGetNextDecodePosition);
REG_FUNC(cellAtrac, cellAtracGetBitrate);
REG_FUNC(cellAtrac, cellAtracGetLoopInfo);
REG_FUNC(cellAtrac, cellAtracSetLoopNum);
REG_FUNC(cellAtrac, cellAtracGetBufferInfoForResetting);
REG_FUNC(cellAtrac, cellAtracResetPlayPosition);
REG_FUNC(cellAtrac, cellAtracGetInternalErrorInfo);
REG_FUNC(cellAtrac, cellAtracGetSamplingRate);
});
| 9,750
|
C++
|
.cpp
| 211
| 44.199052
| 212
| 0.798818
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,322
|
cellSysmodule.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellSysmodule.cpp
|
#include "stdafx.h"
#include "Emu/Cell/PPUModule.h"
LOG_CHANNEL(cellSysmodule);
constexpr auto CELL_SYSMODULE_LOADED = CELL_OK;
enum CellSysmoduleError : u32
{
CELL_SYSMODULE_ERROR_DUPLICATED = 0x80012001,
CELL_SYSMODULE_ERROR_UNKNOWN = 0x80012002,
CELL_SYSMODULE_ERROR_UNLOADED = 0x80012003,
CELL_SYSMODULE_ERROR_INVALID_MEMCONTAINER = 0x80012004,
CELL_SYSMODULE_ERROR_FATAL = 0x800120ff,
};
template<>
void fmt_class_string<CellSysmoduleError>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto error)
{
switch (error)
{
STR_CASE(CELL_SYSMODULE_ERROR_DUPLICATED);
STR_CASE(CELL_SYSMODULE_ERROR_UNKNOWN);
STR_CASE(CELL_SYSMODULE_ERROR_UNLOADED);
STR_CASE(CELL_SYSMODULE_ERROR_INVALID_MEMCONTAINER);
STR_CASE(CELL_SYSMODULE_ERROR_FATAL);
}
return unknown;
});
}
static const char* get_module_name(u16 id)
{
switch (id)
{
case 0x0000: return "sys_net";
case 0x0001: return "cellHttp";
case 0x0002: return "cellHttpUtil";
case 0x0003: return "cellSsl";
case 0x0004: return "cellHttps";
case 0x0005: return "libvdec";
case 0x0006: return "cellAdec";
case 0x0007: return "cellDmux";
case 0x0008: return "cellVpost";
case 0x0009: return "cellRtc";
case 0x000a: return "cellSpurs";
case 0x000b: return "cellOvis";
case 0x000c: return "cellSheap";
case 0x000d: return "cellSync";
case 0x000e: return "sys_fs";
case 0x000f: return "cellJpgDec";
case 0x0010: return "cellGcmSys";
case 0x0011: return "cellAudio";
case 0x0012: return "cellPamf";
case 0x0013: return "cellAtrac";
case 0x0014: return "cellNetCtl";
case 0x0015: return "cellSysutil";
case 0x0016: return "sceNp";
case 0x0017: return "sys_io";
case 0x0018: return "cellPngDec";
case 0x0019: return "cellFont";
case 0x001a: return "cellFontFT";
case 0x001b: return "cell_FreeType2";
case 0x001c: return "cellUsbd";
case 0x001d: return "cellSail";
case 0x001e: return "cellL10n";
case 0x001f: return "cellResc";
case 0x0020: return "cellDaisy";
case 0x0021: return "cellKey2char";
case 0x0022: return "cellMic";
case 0x0023: return "cellCamera";
case 0x0024: return "cellVdecMpeg2";
case 0x0025: return "cellVdecAvc";
case 0x0026: return "cellAdecLpcm";
case 0x0027: return "cellAdecAc3";
case 0x0028: return "cellAdecAtx";
case 0x0029: return "cellAdecAt3";
case 0x002a: return "cellDmuxPamf";
case 0x002b: return nullptr;
case 0x002c: return nullptr;
case 0x002d: return nullptr;
case 0x002e: return "sys_lv2dbg";
case 0x002f: return "cellSysutilAvcExt";
case 0x0030: return "cellUsbPspcm";
case 0x0031: return "cellSysutilAvconfExt";
case 0x0032: return "cellUserInfo";
case 0x0033: return "cellSaveData";
case 0x0034: return "cellSubDisplay";
case 0x0035: return "cellRec";
case 0x0036: return "cellVideoExportUtility";
case 0x0037: return "cellGameExec";
case 0x0038: return "sceNp2";
case 0x0039: return "cellSysutilAp";
case 0x003a: return "sceNpClans";
case 0x003b: return "cellOskExtUtility";
case 0x003c: return "cellVdecDivx";
case 0x003d: return "cellJpgEnc";
case 0x003e: return "cellGame";
case 0x003f: return "cellBGDLUtility";
case 0x0040: return "cell_FreeType2";
case 0x0041: return "cellVideoUpload";
case 0x0042: return "cellSysconfExtUtility";
case 0x0043: return "cellFiber";
case 0x0044: return "sceNpCommerce2";
case 0x0045: return "sceNpTus";
case 0x0046: return "cellVoice";
case 0x0047: return "cellAdecCelp8";
case 0x0048: return "cellCelp8Enc";
case 0x0049: return "cellSysutilMisc";
case 0x004a: return "cellMusicUtility";
// TODO: Check if those libad are correctly matched.
// They belong to those IDs but actual order is unknown.
case 0x004b: return "libad_core";
case 0x004c: return "libad_async";
case 0x004d: return "libad_billboard_util";
case 0x004e: return "cellScreenShotUtility";
case 0x004f: return "cellMusicDecodeUtility";
case 0x0050: return "cellSpursJq";
case 0x0052: return "cellPngEnc";
case 0x0053: return "cellMusicDecodeUtility";
case 0x0054: return "libmedi";
case 0x0055: return "cellSync2";
case 0x0056: return "sceNpUtil";
case 0x0057: return "cellRudp";
case 0x0059: return "sceNpSns";
case 0x005a: return "libgem";
case 0x005c: return "cellCrossController";
case 0xf00a: return "cellCelpEnc";
case 0xf010: return "cellGifDec";
case 0xf019: return "cellAdecCelp";
case 0xf01b: return "cellAdecM2bc";
case 0xf01d: return "cellAdecM4aac";
case 0xf01e: return "cellAdecMp3";
case 0xf023: return "cellImeJpUtility";
case 0xf028: return "cellMusicUtility";
case 0xf029: return "cellPhotoUtility";
case 0xf02a: return "cellPrintUtility";
case 0xf02b: return "cellPhotoImportUtil";
case 0xf02c: return "cellMusicExportUtility";
case 0xf02e: return "cellPhotoDecodeUtil";
case 0xf02f: return "cellSearchUtility";
case 0xf030: return "cellSysutilAvc2";
case 0xf034: return "cellSailRec";
case 0xf035: return "sceNpTrophy";
case 0xf044: return "cellSysutilNpEula";
case 0xf053: return "cellAdecAt3multi";
case 0xf054: return "cellAtracMulti";
}
return nullptr;
}
static const char* get_module_id(u16 id)
{
static thread_local char tls_id_name[8]; // for test
switch (id)
{
case 0x0000: return "CELL_SYSMODULE_NET";
case 0x0001: return "CELL_SYSMODULE_HTTP";
case 0x0002: return "CELL_SYSMODULE_HTTP_UTIL";
case 0x0003: return "CELL_SYSMODULE_SSL";
case 0x0004: return "CELL_SYSMODULE_HTTPS";
case 0x0005: return "CELL_SYSMODULE_VDEC";
case 0x0006: return "CELL_SYSMODULE_ADEC";
case 0x0007: return "CELL_SYSMODULE_DMUX";
case 0x0008: return "CELL_SYSMODULE_VPOST";
case 0x0009: return "CELL_SYSMODULE_RTC";
case 0x000a: return "CELL_SYSMODULE_SPURS";
case 0x000b: return "CELL_SYSMODULE_OVIS";
case 0x000c: return "CELL_SYSMODULE_SHEAP";
case 0x000d: return "CELL_SYSMODULE_SYNC";
case 0x000e: return "CELL_SYSMODULE_FS";
case 0x000f: return "CELL_SYSMODULE_JPGDEC";
case 0x0010: return "CELL_SYSMODULE_GCM_SYS";
case 0x0011: return "CELL_SYSMODULE_AUDIO";
case 0x0012: return "CELL_SYSMODULE_PAMF";
case 0x0013: return "CELL_SYSMODULE_ATRAC3PLUS";
case 0x0014: return "CELL_SYSMODULE_NETCTL";
case 0x0015: return "CELL_SYSMODULE_SYSUTIL";
case 0x0016: return "CELL_SYSMODULE_SYSUTIL_NP";
case 0x0017: return "CELL_SYSMODULE_IO";
case 0x0018: return "CELL_SYSMODULE_PNGDEC";
case 0x0019: return "CELL_SYSMODULE_FONT";
case 0x001a: return "CELL_SYSMODULE_FONTFT";
case 0x001b: return "CELL_SYSMODULE_FREETYPE";
case 0x001c: return "CELL_SYSMODULE_USBD";
case 0x001d: return "CELL_SYSMODULE_SAIL";
case 0x001e: return "CELL_SYSMODULE_L10N";
case 0x001f: return "CELL_SYSMODULE_RESC";
case 0x0020: return "CELL_SYSMODULE_DAISY";
case 0x0021: return "CELL_SYSMODULE_KEY2CHAR";
case 0x0022: return "CELL_SYSMODULE_MIC";
case 0x0023: return "CELL_SYSMODULE_CAMERA";
case 0x0024: return "CELL_SYSMODULE_VDEC_MPEG2";
case 0x0025: return "CELL_SYSMODULE_VDEC_AVC";
case 0x0026: return "CELL_SYSMODULE_ADEC_LPCM";
case 0x0027: return "CELL_SYSMODULE_ADEC_AC3";
case 0x0028: return "CELL_SYSMODULE_ADEC_ATX";
case 0x0029: return "CELL_SYSMODULE_ADEC_AT3";
case 0x002a: return "CELL_SYSMODULE_DMUX_PAMF";
case 0x002b: return "CELL_SYSMODULE_VDEC_AL";
case 0x002c: return "CELL_SYSMODULE_ADEC_AL";
case 0x002d: return "CELL_SYSMODULE_DMUX_AL";
case 0x002e: return "CELL_SYSMODULE_LV2DBG";
case 0x002f: return "CELL_SYSMODULE_SYSUTIL_AVCHAT";
case 0x0030: return "CELL_SYSMODULE_USBPSPCM";
case 0x0031: return "CELL_SYSMODULE_AVCONF_EXT";
case 0x0032: return "CELL_SYSMODULE_SYSUTIL_USERINFO";
case 0x0033: return "CELL_SYSMODULE_SYSUTIL_SAVEDATA";
case 0x0034: return "CELL_SYSMODULE_SUBDISPLAY";
case 0x0035: return "CELL_SYSMODULE_SYSUTIL_REC";
case 0x0036: return "CELL_SYSMODULE_VIDEO_EXPORT";
case 0x0037: return "CELL_SYSMODULE_SYSUTIL_GAME_EXEC";
case 0x0038: return "CELL_SYSMODULE_SYSUTIL_NP2";
case 0x0039: return "CELL_SYSMODULE_SYSUTIL_AP";
case 0x003a: return "CELL_SYSMODULE_SYSUTIL_NP_CLANS";
case 0x003b: return "CELL_SYSMODULE_SYSUTIL_OSK_EXT";
case 0x003c: return "CELL_SYSMODULE_VDEC_DIVX";
case 0x003d: return "CELL_SYSMODULE_JPGENC";
case 0x003e: return "CELL_SYSMODULE_SYSUTIL_GAME";
case 0x003f: return "CELL_SYSMODULE_BGDL";
case 0x0040: return "CELL_SYSMODULE_FREETYPE_TT";
case 0x0041: return "CELL_SYSMODULE_SYSUTIL_VIDEO_UPLOAD";
case 0x0042: return "CELL_SYSMODULE_SYSUTIL_SYSCONF_EXT";
case 0x0043: return "CELL_SYSMODULE_FIBER";
case 0x0044: return "CELL_SYSMODULE_SYSUTIL_NP_COMMERCE2";
case 0x0045: return "CELL_SYSMODULE_SYSUTIL_NP_TUS";
case 0x0046: return "CELL_SYSMODULE_VOICE";
case 0x0047: return "CELL_SYSMODULE_ADEC_CELP8";
case 0x0048: return "CELL_SYSMODULE_CELP8ENC";
case 0x0049: return "CELL_SYSMODULE_SYSUTIL_LICENSEAREA";
case 0x004a: return "CELL_SYSMODULE_SYSUTIL_MUSIC2";
// TODO: Check if those libad are correctly matched.
// They belong to those IDs but actual order is unknown.
case 0x004b: return "CELL_SYSMODULE_AD_CORE";
case 0x004c: return "CELL_SYSMODULE_AD_ASYNC";
case 0x004d: return "CELL_SYSMODULE_AD_BILLBOARD_UTIL";
case 0x004e: return "CELL_SYSMODULE_SYSUTIL_SCREENSHOT";
case 0x004f: return "CELL_SYSMODULE_SYSUTIL_MUSIC_DECODE";
case 0x0050: return "CELL_SYSMODULE_SPURS_JQ";
case 0x0052: return "CELL_SYSMODULE_PNGENC";
case 0x0053: return "CELL_SYSMODULE_SYSUTIL_MUSIC_DECODE2";
case 0x0054: return "CELL_SYSMODULE_MEDI";
case 0x0055: return "CELL_SYSMODULE_SYNC2";
case 0x0056: return "CELL_SYSMODULE_SYSUTIL_NP_UTIL";
case 0x0057: return "CELL_SYSMODULE_RUDP";
case 0x0059: return "CELL_SYSMODULE_SYSUTIL_NP_SNS";
case 0x005a: return "CELL_SYSMODULE_GEM";
case 0x005c: return "CELL_SYSMODULE_SYSUTIL_CROSS_CONTROLLER";
case 0xf00a: return "CELL_SYSMODULE_CELPENC";
case 0xf010: return "CELL_SYSMODULE_GIFDEC";
case 0xf019: return "CELL_SYSMODULE_ADEC_CELP";
case 0xf01b: return "CELL_SYSMODULE_ADEC_M2BC";
case 0xf01d: return "CELL_SYSMODULE_ADEC_M4AAC";
case 0xf01e: return "CELL_SYSMODULE_ADEC_MP3";
case 0xf023: return "CELL_SYSMODULE_IMEJP";
case 0xf028: return "CELL_SYSMODULE_SYSUTIL_MUSIC";
case 0xf029: return "CELL_SYSMODULE_PHOTO_EXPORT";
case 0xf02a: return "CELL_SYSMODULE_PRINT";
case 0xf02b: return "CELL_SYSMODULE_PHOTO_IMPORT";
case 0xf02c: return "CELL_SYSMODULE_MUSIC_EXPORT";
case 0xf02e: return "CELL_SYSMODULE_PHOTO_DECODE";
case 0xf02f: return "CELL_SYSMODULE_SYSUTIL_SEARCH";
case 0xf030: return "CELL_SYSMODULE_SYSUTIL_AVCHAT2";
case 0xf034: return "CELL_SYSMODULE_SAIL_REC";
case 0xf035: return "CELL_SYSMODULE_SYSUTIL_NP_TROPHY";
case 0xf044: return "CELL_SYSMODULE_SYSUTIL_NP_EULA";
case 0xf053: return "CELL_SYSMODULE_ADEC_AT3MULTI";
case 0xf054: return "CELL_SYSMODULE_LIBATRAC3MULTI";
case 0xffff: return "CELL_SYSMODULE_INVALID";
}
std::snprintf(tls_id_name, sizeof(tls_id_name), "0x%04X", id);
return tls_id_name;
}
error_code cellSysmoduleInitialize()
{
cellSysmodule.warning("cellSysmoduleInitialize()");
return CELL_OK;
}
error_code cellSysmoduleFinalize()
{
cellSysmodule.warning("cellSysmoduleFinalize()");
return CELL_OK;
}
error_code cellSysmoduleSetMemcontainer(u32 ct_id)
{
cellSysmodule.todo("cellSysmoduleSetMemcontainer(ct_id=0x%x)", ct_id);
return CELL_OK;
}
error_code cellSysmoduleLoadModule(u16 id)
{
cellSysmodule.warning("cellSysmoduleLoadModule(id=0x%04X=%s)", id, get_module_id(id));
const auto name = get_module_name(id);
if (!name)
{
return CELL_SYSMODULE_ERROR_UNKNOWN;
}
//if (Module<>* m = Emu.GetModuleManager().GetModuleById(id))
//{
// // CELL_SYSMODULE_ERROR_DUPLICATED shouldn't be returned
// m->Load();
//}
return CELL_OK;
}
error_code cellSysmoduleUnloadModule(u16 id)
{
cellSysmodule.warning("cellSysmoduleUnloadModule(id=0x%04X=%s)", id, get_module_id(id));
const auto name = get_module_name(id);
if (!name)
{
return CELL_SYSMODULE_ERROR_UNKNOWN;
}
//if (Module<>* m = Emu.GetModuleManager().GetModuleById(id))
//{
// if (!m->IsLoaded())
// {
// cellSysmodule.error("cellSysmoduleUnloadModule() failed: module not loaded (id=0x%04x)", id);
// return CELL_SYSMODULE_ERROR_FATAL;
// }
// m->Unload();
//}
return CELL_OK;
}
error_code cellSysmoduleIsLoaded(u16 id)
{
cellSysmodule.warning("cellSysmoduleIsLoaded(id=0x%04X=%s)", id, get_module_id(id));
const auto name = get_module_name(id);
if (!name)
{
return CELL_SYSMODULE_ERROR_UNKNOWN;
}
//if (Module<>* m = Emu.GetModuleManager().GetModuleById(id))
//{
// if (!m->IsLoaded())
// {
// cellSysmodule.warning("cellSysmoduleIsLoaded(): module not loaded (id=0x%04x)", id);
// return CELL_SYSMODULE_ERROR_UNLOADED;
// }
//}
return CELL_SYSMODULE_LOADED;
}
error_code cellSysmoduleGetImagesize()
{
UNIMPLEMENTED_FUNC(cellSysmodule);
return CELL_OK;
}
error_code cellSysmoduleFetchImage()
{
UNIMPLEMENTED_FUNC(cellSysmodule);
return CELL_OK;
}
error_code cellSysmoduleUnloadModuleInternal()
{
UNIMPLEMENTED_FUNC(cellSysmodule);
return CELL_OK;
}
error_code cellSysmoduleLoadModuleInternal()
{
UNIMPLEMENTED_FUNC(cellSysmodule);
return CELL_OK;
}
error_code cellSysmoduleUnloadModuleEx()
{
UNIMPLEMENTED_FUNC(cellSysmodule);
return CELL_OK;
}
error_code cellSysmoduleLoadModuleEx()
{
UNIMPLEMENTED_FUNC(cellSysmodule);
return CELL_OK;
}
error_code cellSysmoduleIsLoadedEx()
{
UNIMPLEMENTED_FUNC(cellSysmodule);
return CELL_OK;
}
DECLARE(ppu_module_manager::cellSysmodule)("cellSysmodule", []()
{
REG_FUNC(cellSysmodule, cellSysmoduleInitialize);
REG_FUNC(cellSysmodule, cellSysmoduleFinalize);
REG_FUNC(cellSysmodule, cellSysmoduleSetMemcontainer);
REG_FUNC(cellSysmodule, cellSysmoduleLoadModule);
REG_FUNC(cellSysmodule, cellSysmoduleUnloadModule);
REG_FUNC(cellSysmodule, cellSysmoduleIsLoaded);
REG_FUNC(cellSysmodule, cellSysmoduleGetImagesize);
REG_FUNC(cellSysmodule, cellSysmoduleFetchImage);
REG_FUNC(cellSysmodule, cellSysmoduleUnloadModuleInternal);
REG_FUNC(cellSysmodule, cellSysmoduleLoadModuleInternal);
REG_FUNC(cellSysmodule, cellSysmoduleUnloadModuleEx);
REG_FUNC(cellSysmodule, cellSysmoduleLoadModuleEx);
REG_FUNC(cellSysmodule, cellSysmoduleIsLoadedEx);
});
| 14,101
|
C++
|
.cpp
| 387
| 34.449612
| 98
| 0.772245
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,323
|
libad_core.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/libad_core.cpp
|
#include "stdafx.h"
#include "Emu/Cell/PPUModule.h"
LOG_CHANNEL(libad_core);
error_code sceAdOpenContext()
{
UNIMPLEMENTED_FUNC(libad_core);
return CELL_OK;
}
error_code sceAdFlushReports()
{
UNIMPLEMENTED_FUNC(libad_core);
return CELL_OK;
}
error_code sceAdGetAssetInfo()
{
UNIMPLEMENTED_FUNC(libad_core);
return CELL_OK;
}
error_code sceAdCloseContext()
{
UNIMPLEMENTED_FUNC(libad_core);
return CELL_OK;
}
error_code sceAdGetSpaceInfo()
{
UNIMPLEMENTED_FUNC(libad_core);
return CELL_OK;
}
error_code sceAdGetConnectionInfo()
{
UNIMPLEMENTED_FUNC(libad_core);
return CELL_OK;
}
error_code sceAdConnectContext()
{
UNIMPLEMENTED_FUNC(libad_core);
return CELL_OK;
}
DECLARE(ppu_module_manager::libad_core)("libad_core", []()
{
REG_FUNC(libad_core, sceAdOpenContext);
REG_FUNC(libad_core, sceAdFlushReports);
REG_FUNC(libad_core, sceAdGetAssetInfo);
REG_FUNC(libad_core, sceAdCloseContext);
REG_FUNC(libad_core, sceAdGetSpaceInfo);
REG_FUNC(libad_core, sceAdGetConnectionInfo);
REG_FUNC(libad_core, sceAdConnectContext);
});
| 1,052
|
C++
|
.cpp
| 48
| 20.291667
| 58
| 0.792965
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,324
|
sys_overlay.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_overlay.cpp
|
#include "stdafx.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
#include "Emu/VFS.h"
#include "Emu/IdManager.h"
#include "Emu/system_config.h"
#include "Crypto/unself.h"
#include "Crypto/unedat.h"
#include "Loader/ELF.h"
#include "sys_process.h"
#include "sys_overlay.h"
#include "sys_fs.h"
extern std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_exec_object&, bool virtual_load, const std::string& path, s64 file_offset, utils::serial* ar = nullptr);
extern bool ppu_initialize(const ppu_module&, bool check_only = false, u64 file_size = 0);
extern void ppu_finalize(const ppu_module& info, bool force_mem_release = false);
LOG_CHANNEL(sys_overlay);
static error_code overlay_load_module(vm::ptr<u32> ovlmid, const std::string& vpath, u64 /*flags*/, vm::ptr<u32> entry, fs::file src = {}, s64 file_offset = 0)
{
if (!src)
{
auto [fs_error, ppath, path, lv2_file, type] = lv2_file::open(vpath, 0, 0);
if (fs_error)
{
return {fs_error, vpath};
}
src = std::move(lv2_file);
}
u128 klic = g_fxo->get<loaded_npdrm_keys>().last_key();
src = decrypt_self(std::move(src), reinterpret_cast<u8*>(&klic), nullptr, true);
if (!src)
{
return {CELL_ENOEXEC, +"Failed to decrypt file"};
}
ppu_exec_object obj = std::move(src);
src.close();
if (obj != elf_error::ok)
{
return {CELL_ENOEXEC, obj.operator elf_error()};
}
const auto [ovlm, error] = ppu_load_overlay(obj, false, vfs::get(vpath), file_offset);
obj.clear();
if (error)
{
if (error == CELL_CANCEL + 0u)
{
// Emulation stopped
return {};
}
return error;
}
ppu_initialize(*ovlm);
sys_overlay.success(u8"Loaded overlay: “%s” (id=0x%x)", vpath, idm::last_id());
*ovlmid = idm::last_id();
*entry = ovlm->entry;
return CELL_OK;
}
fs::file make_file_view(fs::file&& file, u64 offset, u64 size);
std::shared_ptr<void> lv2_overlay::load(utils::serial& ar)
{
const std::string vpath = ar.pop<std::string>();
const std::string path = vfs::get(vpath);
const s64 offset = ar.pop<s64>();
sys_overlay.success("lv2_overlay::load(): vpath='%s', path='%s', offset=0x%x", vpath, path, offset);
std::shared_ptr<lv2_overlay> ovlm;
fs::file file{path.substr(0, path.size() - (offset ? fmt::format("_x%x", offset).size() : 0))};
if (file)
{
u128 klic = g_fxo->get<loaded_npdrm_keys>().last_key();
file = make_file_view(std::move(file), offset, umax);
ovlm = ppu_load_overlay(ppu_exec_object{ decrypt_self(std::move(file), reinterpret_cast<u8*>(&klic)) }, false, path, 0, &ar).first;
if (!ovlm)
{
fmt::throw_exception("lv2_overlay::load(): ppu_load_overlay() failed. (vpath='%s', offset=0x%x)", vpath, offset);
}
}
else if (!g_cfg.savestate.state_inspection_mode.get())
{
fmt::throw_exception("lv2_overlay::load(): Failed to find file. (vpath='%s', offset=0x%x)", vpath, offset);
}
else
{
sys_overlay.error("lv2_overlay::load(): Failed to find file. (vpath='%s', offset=0x%x)", vpath, offset);
}
return ovlm;
}
void lv2_overlay::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(lv2_prx_overlay);
const std::string vpath = vfs::retrieve(path);
(vpath.empty() ? sys_overlay.error : sys_overlay.success)("lv2_overlay::save(): vpath='%s', offset=0x%x", vpath, offset);
ar(vpath, offset);
}
error_code sys_overlay_load_module(vm::ptr<u32> ovlmid, vm::cptr<char> path, u64 flags, vm::ptr<u32> entry)
{
sys_overlay.warning("sys_overlay_load_module(ovlmid=*0x%x, path=%s, flags=0x%x, entry=*0x%x)", ovlmid, path, flags, entry);
if (!g_ps3_process_info.ppc_seg)
{
// Process not permitted
return CELL_ENOSYS;
}
if (!path)
{
return CELL_EFAULT;
}
return overlay_load_module(ovlmid, path.get_ptr(), flags, entry);
}
error_code sys_overlay_load_module_by_fd(vm::ptr<u32> ovlmid, u32 fd, u64 offset, u64 flags, vm::ptr<u32> entry)
{
sys_overlay.warning("sys_overlay_load_module_by_fd(ovlmid=*0x%x, fd=%d, offset=0x%llx, flags=0x%x, entry=*0x%x)", ovlmid, fd, offset, flags, entry);
if (!g_ps3_process_info.ppc_seg)
{
// Process not permitted
return CELL_ENOSYS;
}
if (static_cast<s64>(offset) < 0)
{
return CELL_EINVAL;
}
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
if (!file)
{
return CELL_EBADF;
}
std::lock_guard lock(file->mp->mutex);
if (!file->file)
{
return CELL_EBADF;
}
return overlay_load_module(ovlmid, offset ? fmt::format("%s_x%x", file->name.data(), offset) : file->name.data(), flags, entry, lv2_file::make_view(file, offset), offset);
}
error_code sys_overlay_unload_module(u32 ovlmid)
{
sys_overlay.warning("sys_overlay_unload_module(ovlmid=0x%x)", ovlmid);
if (!g_ps3_process_info.ppc_seg)
{
// Process not permitted
return CELL_ENOSYS;
}
const auto _main = idm::withdraw<lv2_obj, lv2_overlay>(ovlmid);
if (!_main)
{
return CELL_ESRCH;
}
for (auto& seg : _main->segs)
{
vm::dealloc(seg.addr);
}
ppu_finalize(*_main);
return CELL_OK;
}
| 4,939
|
C++
|
.cpp
| 150
| 30.533333
| 189
| 0.683777
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,325
|
sys_timer.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_timer.cpp
|
#include "stdafx.h"
#include "sys_timer.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/timers.hpp"
#include "util/asm.hpp"
#include "Emu/System.h"
#include "Emu/system_config.h"
#include "sys_event.h"
#include "sys_process.h"
#include <thread>
#include <deque>
LOG_CHANNEL(sys_timer);
struct lv2_timer_thread
{
shared_mutex mutex;
std::deque<std::shared_ptr<lv2_timer>> timers;
lv2_timer_thread();
void operator()();
//SAVESTATE_INIT_POS(46); // FREE SAVESTATE_INIT_POS number
static constexpr auto thread_name = "Timer Thread"sv;
};
lv2_timer::lv2_timer(utils::serial& ar)
: lv2_obj{1}
, state(ar)
, port(lv2_event_queue::load_ptr(ar, port, "timer"))
, source(ar)
, data1(ar)
, data2(ar)
, expire(ar)
, period(ar)
{
}
void lv2_timer::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(lv2_sync);
ar(state), lv2_event_queue::save_ptr(ar, port.get()), ar(source, data1, data2, expire, period);
}
u64 lv2_timer::check(u64 _now) noexcept
{
while (true)
{
const u32 _state = +state;
if (_state == SYS_TIMER_STATE_RUN)
{
u64 next = expire;
// If aborting, perform the last accurate check for event
if (_now >= next)
{
lv2_obj::notify_all_t notify;
std::lock_guard lock(mutex);
return check_unlocked(_now);
}
return (next - _now);
}
break;
}
return umax;
}
u64 lv2_timer::check_unlocked(u64 _now) noexcept
{
const u64 next = expire;
if (_now < next || state != SYS_TIMER_STATE_RUN)
{
return umax;
}
if (port)
{
port->send(source, data1, data2, next);
}
if (period)
{
// Set next expiration time and check again
const u64 expire0 = utils::add_saturate<u64>(next, period);
expire.release(expire0);
return utils::sub_saturate<u64>(expire0, _now);
}
// Stop after oneshot
state.release(SYS_TIMER_STATE_STOP);
return umax;
}
lv2_timer_thread::lv2_timer_thread()
{
Emu.PostponeInitCode([this]()
{
idm::select<lv2_obj, lv2_timer>([&](u32 id, lv2_timer&)
{
timers.emplace_back(idm::get_unlocked<lv2_obj, lv2_timer>(id));
});
});
}
void lv2_timer_thread::operator()()
{
u64 sleep_time = 0;
while (true)
{
if (sleep_time != umax)
{
// Scale time
sleep_time = std::min(sleep_time, u64{umax} / 100) * 100 / g_cfg.core.clocks_scale;
}
thread_ctrl::wait_for(sleep_time);
if (thread_ctrl::state() == thread_state::aborting)
{
break;
}
sleep_time = umax;
if (Emu.IsPaused())
{
sleep_time = 10000;
continue;
}
const u64 _now = get_guest_system_time();
reader_lock lock(mutex);
for (const auto& timer : timers)
{
while (lv2_obj::check(timer))
{
if (thread_ctrl::state() == thread_state::aborting)
{
break;
}
if (const u64 advised_sleep_time = timer->check(_now))
{
if (sleep_time > advised_sleep_time)
{
sleep_time = advised_sleep_time;
}
break;
}
}
}
}
}
error_code sys_timer_create(ppu_thread& ppu, vm::ptr<u32> timer_id)
{
ppu.state += cpu_flag::wait;
sys_timer.warning("sys_timer_create(timer_id=*0x%x)", timer_id);
if (auto ptr = idm::make_ptr<lv2_obj, lv2_timer>())
{
auto& thread = g_fxo->get<named_thread<lv2_timer_thread>>();
{
std::lock_guard lock(thread.mutex);
// Theoretically could have been destroyed by sys_timer_destroy by now
if (auto it = std::find(thread.timers.begin(), thread.timers.end(), ptr); it == thread.timers.end())
{
thread.timers.emplace_back(std::move(ptr));
}
}
ppu.check_state();
*timer_id = idm::last_id();
return CELL_OK;
}
return CELL_EAGAIN;
}
error_code sys_timer_destroy(ppu_thread& ppu, u32 timer_id)
{
ppu.state += cpu_flag::wait;
sys_timer.warning("sys_timer_destroy(timer_id=0x%x)", timer_id);
auto timer = idm::withdraw<lv2_obj, lv2_timer>(timer_id, [&](lv2_timer& timer) -> CellError
{
if (reader_lock lock(timer.mutex); lv2_obj::check(timer.port))
{
return CELL_EISCONN;
}
timer.exists--;
return {};
});
if (!timer)
{
return CELL_ESRCH;
}
if (timer.ret)
{
return timer.ret;
}
auto& thread = g_fxo->get<named_thread<lv2_timer_thread>>();
std::lock_guard lock(thread.mutex);
if (auto it = std::find(thread.timers.begin(), thread.timers.end(), timer.ptr); it != thread.timers.end())
{
thread.timers.erase(it);
}
return CELL_OK;
}
error_code sys_timer_get_information(ppu_thread& ppu, u32 timer_id, vm::ptr<sys_timer_information_t> info)
{
ppu.state += cpu_flag::wait;
sys_timer.trace("sys_timer_get_information(timer_id=0x%x, info=*0x%x)", timer_id, info);
sys_timer_information_t _info{};
const u64 now = get_guest_system_time();
const auto timer = idm::check<lv2_obj, lv2_timer>(timer_id, [&](lv2_timer& timer)
{
std::lock_guard lock(timer.mutex);
timer.check_unlocked(now);
timer.get_information(_info);
});
if (!timer)
{
return CELL_ESRCH;
}
ppu.check_state();
std::memcpy(info.get_ptr(), &_info, info.size());
return CELL_OK;
}
error_code _sys_timer_start(ppu_thread& ppu, u32 timer_id, u64 base_time, u64 period)
{
ppu.state += cpu_flag::wait;
(period ? sys_timer.warning : sys_timer.trace)("_sys_timer_start(timer_id=0x%x, base_time=0x%llx, period=0x%llx)", timer_id, base_time, period);
const u64 start_time = get_guest_system_time();
if (period && period < 100)
{
// Invalid periodic timer
return CELL_EINVAL;
}
const auto timer = idm::check<lv2_obj, lv2_timer>(timer_id, [&](lv2_timer& timer) -> CellError
{
std::lock_guard lock(timer.mutex);
// LV2 Disassembly: Simple nullptr check (assignment test, do not use lv2_obj::check here)
if (!timer.port)
{
return CELL_ENOTCONN;
}
timer.check_unlocked(start_time);
if (timer.state != SYS_TIMER_STATE_STOP)
{
return CELL_EBUSY;
}
if (!period && start_time >= base_time)
{
// Invalid oneshot
return CELL_ETIMEDOUT;
}
const u64 expire = period == 0 ? base_time : // oneshot
base_time == 0 ? utils::add_saturate(start_time, period) : // periodic timer with no base (using start time as base)
start_time < utils::add_saturate(base_time, period) ? utils::add_saturate(base_time, period) : // periodic with base time over start time
[&]() -> u64 // periodic timer base before start time (align to be at least a period over start time)
{
// Optimized from a loop in LV2:
// do
// {
// base_time += period;
// }
// while (base_time < start_time);
const u64 start_time_with_base_time_reminder = utils::add_saturate(start_time - start_time % period, base_time % period);
return utils::add_saturate(start_time_with_base_time_reminder, start_time_with_base_time_reminder < start_time ? period : 0);
}();
timer.expire = expire;
timer.period = period;
timer.state = SYS_TIMER_STATE_RUN;
return {};
});
if (!timer)
{
return CELL_ESRCH;
}
if (timer.ret)
{
if (timer.ret == CELL_ETIMEDOUT)
{
return not_an_error(timer.ret);
}
return timer.ret;
}
g_fxo->get<named_thread<lv2_timer_thread>>()([]{});
return CELL_OK;
}
error_code sys_timer_stop(ppu_thread& ppu, u32 timer_id)
{
ppu.state += cpu_flag::wait;
sys_timer.trace("sys_timer_stop()");
const auto timer = idm::check<lv2_obj, lv2_timer>(timer_id, [now = get_guest_system_time(), notify = lv2_obj::notify_all_t()](lv2_timer& timer)
{
std::lock_guard lock(timer.mutex);
timer.check_unlocked(now);
timer.state = SYS_TIMER_STATE_STOP;
});
if (!timer)
{
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_timer_connect_event_queue(ppu_thread& ppu, u32 timer_id, u32 queue_id, u64 name, u64 data1, u64 data2)
{
ppu.state += cpu_flag::wait;
sys_timer.warning("sys_timer_connect_event_queue(timer_id=0x%x, queue_id=0x%x, name=0x%llx, data1=0x%llx, data2=0x%llx)", timer_id, queue_id, name, data1, data2);
const auto timer = idm::check<lv2_obj, lv2_timer>(timer_id, [&](lv2_timer& timer) -> CellError
{
const auto found = idm::find_unlocked<lv2_obj, lv2_event_queue>(queue_id);
if (!found)
{
return CELL_ESRCH;
}
std::lock_guard lock(timer.mutex);
if (lv2_obj::check(timer.port))
{
return CELL_EISCONN;
}
// Connect event queue
timer.port = std::static_pointer_cast<lv2_event_queue>(found->second);
timer.source = name ? name : (u64{process_getpid() + 0u} << 32) | u64{timer_id};
timer.data1 = data1;
timer.data2 = data2;
return {};
});
if (!timer)
{
return CELL_ESRCH;
}
if (timer.ret)
{
return timer.ret;
}
return CELL_OK;
}
error_code sys_timer_disconnect_event_queue(ppu_thread& ppu, u32 timer_id)
{
ppu.state += cpu_flag::wait;
sys_timer.warning("sys_timer_disconnect_event_queue(timer_id=0x%x)", timer_id);
const auto timer = idm::check<lv2_obj, lv2_timer>(timer_id, [now = get_guest_system_time(), notify = lv2_obj::notify_all_t()](lv2_timer& timer) -> CellError
{
std::lock_guard lock(timer.mutex);
timer.check_unlocked(now);
timer.state = SYS_TIMER_STATE_STOP;
if (!lv2_obj::check(timer.port))
{
return CELL_ENOTCONN;
}
timer.port.reset();
return {};
});
if (!timer)
{
return CELL_ESRCH;
}
if (timer.ret)
{
return timer.ret;
}
return CELL_OK;
}
error_code sys_timer_sleep(ppu_thread& ppu, u32 sleep_time)
{
ppu.state += cpu_flag::wait;
sys_timer.trace("sys_timer_sleep(sleep_time=%d)", sleep_time);
return sys_timer_usleep(ppu, sleep_time * u64{1000000});
}
error_code sys_timer_usleep(ppu_thread& ppu, u64 sleep_time)
{
ppu.state += cpu_flag::wait;
sys_timer.trace("sys_timer_usleep(sleep_time=0x%llx)", sleep_time);
if (sleep_time)
{
const s64 add_time = g_cfg.core.usleep_addend;
// Over/underflow checks
if (add_time >= 0)
{
sleep_time = utils::add_saturate<u64>(sleep_time, add_time);
}
else
{
sleep_time = std::max<u64>(1, utils::sub_saturate<u64>(sleep_time, -add_time));
}
lv2_obj::sleep(ppu, g_cfg.core.sleep_timers_accuracy < sleep_timers_accuracy_level::_usleep ? sleep_time : 0);
if (!lv2_obj::wait_timeout(sleep_time, &ppu, true, true))
{
ppu.state += cpu_flag::again;
}
}
else
{
std::this_thread::yield();
}
return CELL_OK;
}
| 10,122
|
C++
|
.cpp
| 376
| 24.087766
| 163
| 0.675174
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,326
|
sys_mutex.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_mutex.cpp
|
#include "stdafx.h"
#include "Emu/IdManager.h"
#include "Emu/IPC.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "util/asm.hpp"
#include "sys_mutex.h"
LOG_CHANNEL(sys_mutex);
lv2_mutex::lv2_mutex(utils::serial& ar)
: protocol(ar)
, recursive(ar)
, adaptive(ar)
, key(ar)
, name(ar)
{
ar(lock_count, control.raw().owner);
// For backwards compatibility
control.raw().owner >>= 1;
}
std::shared_ptr<void> lv2_mutex::load(utils::serial& ar)
{
auto mtx = std::make_shared<lv2_mutex>(ar);
return lv2_obj::load(mtx->key, mtx);
}
void lv2_mutex::save(utils::serial& ar)
{
ar(protocol, recursive, adaptive, key, name, lock_count, control.raw().owner << 1);
}
error_code sys_mutex_create(ppu_thread& ppu, vm::ptr<u32> mutex_id, vm::ptr<sys_mutex_attribute_t> attr)
{
ppu.state += cpu_flag::wait;
sys_mutex.trace("sys_mutex_create(mutex_id=*0x%x, attr=*0x%x)", mutex_id, attr);
if (!mutex_id || !attr)
{
return CELL_EFAULT;
}
const auto _attr = *attr;
const u64 ipc_key = lv2_obj::get_key(_attr);
if (ipc_key)
{
sys_mutex.warning("sys_mutex_create(mutex_id=*0x%x, attr=*0x%x): IPC=0x%016x", mutex_id, attr, ipc_key);
}
switch (_attr.protocol)
{
case SYS_SYNC_FIFO: break;
case SYS_SYNC_PRIORITY: break;
case SYS_SYNC_PRIORITY_INHERIT:
sys_mutex.warning("sys_mutex_create(): SYS_SYNC_PRIORITY_INHERIT");
break;
default:
{
sys_mutex.error("sys_mutex_create(): unknown protocol (0x%x)", _attr.protocol);
return CELL_EINVAL;
}
}
switch (_attr.recursive)
{
case SYS_SYNC_RECURSIVE: break;
case SYS_SYNC_NOT_RECURSIVE: break;
default:
{
sys_mutex.error("sys_mutex_create(): unknown recursive (0x%x)", _attr.recursive);
return CELL_EINVAL;
}
}
if (_attr.adaptive != SYS_SYNC_NOT_ADAPTIVE)
{
sys_mutex.todo("sys_mutex_create(): unexpected adaptive (0x%x)", _attr.adaptive);
}
if (auto error = lv2_obj::create<lv2_mutex>(_attr.pshared, _attr.ipc_key, _attr.flags, [&]()
{
return std::make_shared<lv2_mutex>(
_attr.protocol,
_attr.recursive,
_attr.adaptive,
ipc_key,
_attr.name_u64);
}))
{
return error;
}
ppu.check_state();
*mutex_id = idm::last_id();
return CELL_OK;
}
error_code sys_mutex_destroy(ppu_thread& ppu, u32 mutex_id)
{
ppu.state += cpu_flag::wait;
sys_mutex.trace("sys_mutex_destroy(mutex_id=0x%x)", mutex_id);
const auto mutex = idm::withdraw<lv2_obj, lv2_mutex>(mutex_id, [](lv2_mutex& mutex) -> CellError
{
std::lock_guard lock(mutex.mutex);
if (atomic_storage<u32>::load(mutex.control.raw().owner))
{
return CELL_EBUSY;
}
if (mutex.cond_count)
{
return CELL_EPERM;
}
lv2_obj::on_id_destroy(mutex, mutex.key);
return {};
});
if (!mutex)
{
return CELL_ESRCH;
}
if (mutex->key)
{
sys_mutex.warning("sys_mutex_destroy(mutex_id=0x%x): IPC=0x%016x", mutex_id, mutex->key);
}
if (mutex.ret)
{
return mutex.ret;
}
return CELL_OK;
}
error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout)
{
ppu.state += cpu_flag::wait;
sys_mutex.trace("sys_mutex_lock(mutex_id=0x%x, timeout=0x%llx)", mutex_id, timeout);
const auto mutex = idm::get<lv2_obj, lv2_mutex>(mutex_id, [&, notify = lv2_obj::notify_all_t()](lv2_mutex& mutex)
{
CellError result = mutex.try_lock(ppu);
if (result == CELL_EBUSY && !atomic_storage<ppu_thread*>::load(mutex.control.raw().sq))
{
// Try busy waiting a bit if advantageous
for (u32 i = 0, end = lv2_obj::has_ppus_in_running_state() ? 3 : 10; id_manager::g_mutex.is_lockable() && i < end; i++)
{
busy_wait(300);
result = mutex.try_lock(ppu);
if (!result || atomic_storage<ppu_thread*>::load(mutex.control.raw().sq))
{
break;
}
}
}
if (result == CELL_EBUSY)
{
lv2_obj::prepare_for_sleep(ppu);
ppu.cancel_sleep = 1;
if (mutex.try_own(ppu) || !mutex.sleep(ppu, timeout))
{
result = {};
}
if (ppu.cancel_sleep != 1)
{
notify.cleanup();
}
ppu.cancel_sleep = 0;
}
return result;
});
if (!mutex)
{
return CELL_ESRCH;
}
if (mutex.ret)
{
if (mutex.ret != CELL_EBUSY)
{
return mutex.ret;
}
}
else
{
return CELL_OK;
}
ppu.gpr[3] = CELL_OK;
while (auto state = +ppu.state)
{
if (state & cpu_flag::signal && ppu.state.test_and_reset(cpu_flag::signal))
{
break;
}
if (is_stopped(state))
{
std::lock_guard lock(mutex->mutex);
for (auto cpu = atomic_storage<ppu_thread*>::load(mutex->control.raw().sq); cpu; cpu = cpu->next_cpu)
{
if (cpu == &ppu)
{
ppu.state += cpu_flag::again;
return {};
}
}
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 40; i++)
{
busy_wait(500);
}
if (ppu.state & cpu_flag::signal)
{
continue;
}
if (timeout)
{
if (lv2_obj::wait_timeout(timeout, &ppu))
{
// Wait for rescheduling
if (ppu.check_state())
{
continue;
}
ppu.state += cpu_flag::wait;
if (!atomic_storage<ppu_thread*>::load(mutex->control.raw().sq))
{
// Waiters queue is empty, so the thread must have been signaled
mutex->mutex.lock_unlock();
break;
}
std::lock_guard lock(mutex->mutex);
bool success = false;
mutex->control.fetch_op([&](lv2_mutex::control_data_t& data)
{
success = false;
ppu_thread* sq = static_cast<ppu_thread*>(data.sq);
const bool retval = &ppu == sq;
if (!mutex->unqueue<false>(sq, &ppu))
{
return false;
}
success = true;
if (!retval)
{
return false;
}
data.sq = sq;
return true;
});
if (success)
{
ppu.next_cpu = nullptr;
ppu.gpr[3] = CELL_ETIMEDOUT;
}
break;
}
}
else
{
ppu.state.wait(state);
}
}
return not_an_error(ppu.gpr[3]);
}
error_code sys_mutex_trylock(ppu_thread& ppu, u32 mutex_id)
{
ppu.state += cpu_flag::wait;
sys_mutex.trace("sys_mutex_trylock(mutex_id=0x%x)", mutex_id);
const auto mutex = idm::check<lv2_obj, lv2_mutex>(mutex_id, [&](lv2_mutex& mutex)
{
return mutex.try_lock(ppu);
});
if (!mutex)
{
return CELL_ESRCH;
}
if (mutex.ret)
{
if (mutex.ret == CELL_EBUSY)
{
return not_an_error(CELL_EBUSY);
}
return mutex.ret;
}
return CELL_OK;
}
error_code sys_mutex_unlock(ppu_thread& ppu, u32 mutex_id)
{
ppu.state += cpu_flag::wait;
sys_mutex.trace("sys_mutex_unlock(mutex_id=0x%x)", mutex_id);
const auto mutex = idm::check<lv2_obj, lv2_mutex>(mutex_id, [&, notify = lv2_obj::notify_all_t()](lv2_mutex& mutex) -> CellError
{
auto result = mutex.try_unlock(ppu);
if (result == CELL_EBUSY)
{
std::lock_guard lock(mutex.mutex);
if (auto cpu = mutex.reown<ppu_thread>())
{
if (cpu->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return {};
}
mutex.awake(cpu);
}
result = {};
}
notify.cleanup();
return result;
});
if (!mutex)
{
return CELL_ESRCH;
}
if (mutex.ret)
{
return mutex.ret;
}
return CELL_OK;
}
| 6,982
|
C++
|
.cpp
| 302
| 19.817881
| 129
| 0.640594
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,327
|
sys_storage.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_storage.cpp
|
#include "stdafx.h"
#include "Emu/Memory/vm.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "sys_event.h"
#include "sys_fs.h"
#include "util/shared_ptr.hpp"
#include "sys_storage.h"
LOG_CHANNEL(sys_storage);
namespace
{
struct storage_manager
{
// This is probably wrong and should be assigned per fd or something
atomic_ptr<std::shared_ptr<lv2_event_queue>> asyncequeue;
};
}
error_code sys_storage_open(u64 device, u64 mode, vm::ptr<u32> fd, u64 flags)
{
sys_storage.todo("sys_storage_open(device=0x%x, mode=0x%x, fd=*0x%x, flags=0x%x)", device, mode, fd, flags);
if (device == 0)
{
return CELL_ENOENT;
}
if (!fd)
{
return CELL_EFAULT;
}
[[maybe_unused]] u64 storage_id = device & 0xFFFFF00FFFFFFFF;
fs::file file;
if (const u32 id = idm::make<lv2_storage>(device, std::move(file), mode, flags))
{
*fd = id;
return CELL_OK;
}
return CELL_EAGAIN;
}
error_code sys_storage_close(u32 fd)
{
sys_storage.todo("sys_storage_close(fd=0x%x)", fd);
idm::remove<lv2_storage>(fd);
return CELL_OK;
}
error_code sys_storage_read(u32 fd, u32 mode, u32 start_sector, u32 num_sectors, vm::ptr<void> bounce_buf, vm::ptr<u32> sectors_read, u64 flags)
{
sys_storage.todo("sys_storage_read(fd=0x%x, mode=0x%x, start_sector=0x%x, num_sectors=0x%x, bounce_buf=*0x%x, sectors_read=*0x%x, flags=0x%x)", fd, mode, start_sector, num_sectors, bounce_buf, sectors_read, flags);
if (!bounce_buf || !sectors_read)
{
return CELL_EFAULT;
}
std::memset(bounce_buf.get_ptr(), 0, num_sectors * 0x200ull);
const auto handle = idm::get<lv2_storage>(fd);
if (!handle)
{
return CELL_ESRCH;
}
if (handle->file)
{
handle->file.seek(start_sector * 0x200ull);
const u64 size = num_sectors * 0x200ull;
const u64 result = lv2_file::op_read(handle->file, bounce_buf, size);
num_sectors = ::narrow<u32>(result / 0x200ull);
}
*sectors_read = num_sectors;
return CELL_OK;
}
error_code sys_storage_write(u32 fd, u32 mode, u32 start_sector, u32 num_sectors, vm::ptr<void> data, vm::ptr<u32> sectors_wrote, u64 flags)
{
sys_storage.todo("sys_storage_write(fd=0x%x, mode=0x%x, start_sector=0x%x, num_sectors=0x%x, data=*=0x%x, sectors_wrote=*0x%x, flags=0x%llx)", fd, mode, start_sector, num_sectors, data, sectors_wrote, flags);
if (!sectors_wrote)
{
return CELL_EFAULT;
}
const auto handle = idm::get<lv2_storage>(fd);
if (!handle)
{
return CELL_ESRCH;
}
*sectors_wrote = num_sectors;
return CELL_OK;
}
error_code sys_storage_send_device_command(u32 dev_handle, u64 cmd, vm::ptr<void> in, u64 inlen, vm::ptr<void> out, u64 outlen)
{
sys_storage.todo("sys_storage_send_device_command(dev_handle=0x%x, cmd=0x%llx, in=*0x%, inlen=0x%x, out=*0x%x, outlen=0x%x)", dev_handle, cmd, in, inlen, out, outlen);
return CELL_OK;
}
error_code sys_storage_async_configure(u32 fd, u32 io_buf, u32 equeue_id, u32 unk)
{
sys_storage.todo("sys_storage_async_configure(fd=0x%x, io_buf=0x%x, equeue_id=0x%x, unk=*0x%x)", fd, io_buf, equeue_id, unk);
auto& manager = g_fxo->get<storage_manager>();
if (auto queue = idm::get<lv2_obj, lv2_event_queue>(equeue_id))
{
manager.asyncequeue.store(queue);
}
else
{
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_storage_async_send_device_command(u32 dev_handle, u64 cmd, vm::ptr<void> in, u64 inlen, vm::ptr<void> out, u64 outlen, u64 unk)
{
sys_storage.todo("sys_storage_async_send_device_command(dev_handle=0x%x, cmd=0x%llx, in=*0x%x, inlen=0x%x, out=*0x%x, outlen=0x%x, unk=0x%x)", dev_handle, cmd, in, inlen, out, outlen, unk);
auto& manager = g_fxo->get<storage_manager>();
if (auto q = *manager.asyncequeue.load())
{
q->send(0, unk, unk, unk);
}
return CELL_OK;
}
error_code sys_storage_async_read()
{
sys_storage.todo("sys_storage_async_read()");
return CELL_OK;
}
error_code sys_storage_async_write()
{
sys_storage.todo("sys_storage_async_write()");
return CELL_OK;
}
error_code sys_storage_async_cancel()
{
sys_storage.todo("sys_storage_async_cancel()");
return CELL_OK;
}
error_code sys_storage_get_device_info(u64 device, vm::ptr<StorageDeviceInfo> buffer)
{
sys_storage.todo("sys_storage_get_device_info(device=0x%x, buffer=*0x%x)", device, buffer);
if (!buffer)
{
return CELL_EFAULT;
}
memset(buffer.get_ptr(), 0, sizeof(StorageDeviceInfo));
u64 storage = device & 0xFFFFF00FFFFFFFF;
u32 dev_num = (device >> 32) & 0xFF;
if (storage == ATA_HDD) // dev_hdd?
{
if (dev_num > 2)
{
return not_an_error(-5);
}
std::string u = "unnamed";
memcpy(buffer->name, u.c_str(), u.size());
buffer->sector_size = 0x200;
buffer->one = 1;
buffer->flags[1] = 1;
buffer->flags[2] = 1;
buffer->flags[7] = 1;
// set partition size based on dev_num
// stole these sizes from kernel dump, unknown if they are 100% correct
// vsh reports only 2 partitions even though there is 3 sizes
switch (dev_num)
{
case 0:
buffer->sector_count = 0x2542EAB0; // possibly total size
break;
case 1:
buffer->sector_count = 0x24FAEA98; // which makes this hdd0
break;
case 2:
buffer->sector_count = 0x3FFFF8; // and this one hdd1
break;
}
}
else if (storage == BDVD_DRIVE) // dev_bdvd?
{
if (dev_num > 0)
{
return not_an_error(-5);
}
std::string u = "unnamed";
memcpy(buffer->name, u.c_str(), u.size());
buffer->sector_count = 0x4D955;
buffer->sector_size = 0x800;
buffer->one = 1;
buffer->flags[1] = 0;
buffer->flags[2] = 1;
buffer->flags[7] = 1;
}
else if (storage == USB_MASS_STORAGE_1(0))
{
if (dev_num > 0)
{
return not_an_error(-5);
}
std::string u = "unnamed";
memcpy(buffer->name, u.c_str(), u.size());
/*buffer->sector_count = 0x4D955;*/
buffer->sector_size = 0x200;
buffer->one = 1;
buffer->flags[1] = 0;
buffer->flags[2] = 1;
buffer->flags[7] = 1;
}
else if (storage == NAND_FLASH)
{
if (dev_num > 6)
{
return not_an_error(-5);
}
std::string u = "unnamed";
memcpy(buffer->name, u.c_str(), u.size());
buffer->sector_size = 0x200;
buffer->one = 1;
buffer->flags[1] = 1;
buffer->flags[2] = 1;
buffer->flags[7] = 1;
// see ata_hdd for explanation
switch (dev_num)
{
case 0: buffer->sector_count = 0x80000;
break;
case 1: buffer->sector_count = 0x75F8;
break;
case 2: buffer->sector_count = 0x63E00;
break;
case 3: buffer->sector_count = 0x8000;
break;
case 4: buffer->sector_count = 0x400;
break;
case 5: buffer->sector_count = 0x2000;
break;
case 6: buffer->sector_count = 0x200;
break;
}
}
else if (storage == NOR_FLASH)
{
if (dev_num > 3)
{
return not_an_error(-5);
}
std::string u = "unnamed";
memcpy(buffer->name, u.c_str(), u.size());
buffer->sector_size = 0x200;
buffer->one = 1;
buffer->flags[1] = 0;
buffer->flags[2] = 1;
buffer->flags[7] = 1;
// see ata_hdd for explanation
switch (dev_num)
{
case 0: buffer->sector_count = 0x8000;
break;
case 1: buffer->sector_count = 0x77F8;
break;
case 2: buffer->sector_count = 0x100; // offset, 0x20000
break;
case 3: buffer->sector_count = 0x400;
break;
}
}
else if (storage == NAND_UNK)
{
if (dev_num > 1)
{
return not_an_error(-5);
}
std::string u = "unnamed";
memcpy(buffer->name, u.c_str(), u.size());
buffer->sector_size = 0x800;
buffer->one = 1;
buffer->flags[1] = 0;
buffer->flags[2] = 1;
buffer->flags[7] = 1;
// see ata_hdd for explanation
switch (dev_num)
{
case 0: buffer->sector_count = 0x7FFFFFFF;
break;
}
}
else
{
sys_storage.error("sys_storage_get_device_info(device=0x%x, buffer=*0x%x)", device, buffer);
}
return CELL_OK;
}
error_code sys_storage_get_device_config(vm::ptr<u32> storages, vm::ptr<u32> devices)
{
sys_storage.todo("sys_storage_get_device_config(storages=*0x%x, devices=*0x%x)", storages, devices);
if (storages) *storages = 6; else return CELL_EFAULT;
if (devices) *devices = 17; else return CELL_EFAULT;
return CELL_OK;
}
error_code sys_storage_report_devices(u32 storages, u32 start, u32 devices, vm::ptr<u64> device_ids)
{
sys_storage.todo("sys_storage_report_devices(storages=0x%x, start=0x%x, devices=0x%x, device_ids=0x%x)", storages, start, devices, device_ids);
if (!device_ids)
{
return CELL_EFAULT;
}
static constexpr std::array<u64, 0x11> all_devs = []
{
std::array<u64, 0x11> all_devs{};
all_devs[0] = 0x10300000000000A;
for (int i = 0; i < 7; ++i)
{
all_devs[i + 1] = 0x100000000000001 | (static_cast<u64>(i) << 32);
}
for (int i = 0; i < 3; ++i)
{
all_devs[i + 8] = 0x101000000000007 | (static_cast<u64>(i) << 32);
}
all_devs[11] = 0x101000000000006;
for (int i = 0; i < 4; ++i)
{
all_devs[i + 12] = 0x100000000000004 | (static_cast<u64>(i) << 32);
}
all_devs[16] = 0x100000000000003;
return all_devs;
}();
if (!devices || start >= all_devs.size() || devices > all_devs.size() - start)
{
return CELL_EINVAL;
}
std::copy_n(all_devs.begin() + start, devices, device_ids.get_ptr());
return CELL_OK;
}
error_code sys_storage_configure_medium_event(u32 fd, u32 equeue_id, u32 c)
{
sys_storage.todo("sys_storage_configure_medium_event(fd=0x%x, equeue_id=0x%x, c=0x%x)", fd, equeue_id, c);
return CELL_OK;
}
error_code sys_storage_set_medium_polling_interval()
{
sys_storage.todo("sys_storage_set_medium_polling_interval()");
return CELL_OK;
}
error_code sys_storage_create_region()
{
sys_storage.todo("sys_storage_create_region()");
return CELL_OK;
}
error_code sys_storage_delete_region()
{
sys_storage.todo("sys_storage_delete_region()");
return CELL_OK;
}
error_code sys_storage_execute_device_command(u32 fd, u64 cmd, vm::ptr<char> cmdbuf, u64 cmdbuf_size, vm::ptr<char> databuf, u64 databuf_size, vm::ptr<u32> driver_status)
{
sys_storage.todo("sys_storage_execute_device_command(fd=0x%x, cmd=0x%llx, cmdbuf=*0x%x, cmdbuf_size=0x%llx, databuf=*0x%x, databuf_size=0x%llx, driver_status=*0x%x)", fd, cmd, cmdbuf, cmdbuf_size, databuf, databuf_size, driver_status);
// cmd == 2 is get device info,
// databuf, first byte 0 == status ok?
// byte 1, if < 0 , not ata device
return CELL_OK;
}
error_code sys_storage_check_region_acl()
{
sys_storage.todo("sys_storage_check_region_acl()");
return CELL_OK;
}
error_code sys_storage_set_region_acl()
{
sys_storage.todo("sys_storage_set_region_acl()");
return CELL_OK;
}
error_code sys_storage_get_region_offset()
{
sys_storage.todo("sys_storage_get_region_offset()");
return CELL_OK;
}
error_code sys_storage_set_emulated_speed()
{
sys_storage.todo("sys_storage_set_emulated_speed()");
// todo: only debug kernel has this
return CELL_ENOSYS;
}
| 10,627
|
C++
|
.cpp
| 368
| 26.345109
| 236
| 0.681581
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,328
|
sys_gamepad.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_gamepad.cpp
|
#include "stdafx.h"
#include "sys_gamepad.h"
#include "Emu/Cell/ErrorCodes.h"
LOG_CHANNEL(sys_gamepad);
u32 sys_gamepad_ycon_initalize(vm::ptr<u8> in, vm::ptr<u8> out)
{
sys_gamepad.todo("sys_gamepad_ycon_initalize(in=%d, out=%d) -> CELL_OK", in, out);
return CELL_OK;
}
u32 sys_gamepad_ycon_finalize(vm::ptr<u8> in, vm::ptr<u8> out)
{
sys_gamepad.todo("sys_gamepad_ycon_finalize(in=%d, out=%d) -> CELL_OK", in, out);
return CELL_OK;
}
u32 sys_gamepad_ycon_has_input_ownership(vm::ptr<u8> in, vm::ptr<u8> out)
{
sys_gamepad.todo("sys_gamepad_ycon_has_input_ownership(in=%d, out=%d) -> CELL_OK", in, out);
return CELL_OK;
}
u32 sys_gamepad_ycon_enumerate_device(vm::ptr<u8> in, vm::ptr<u8> out)
{
sys_gamepad.todo("sys_gamepad_ycon_enumerate_device(in=%d, out=%d) -> CELL_OK", in, out);
return CELL_OK;
}
u32 sys_gamepad_ycon_get_device_info(vm::ptr<u8> in, vm::ptr<u8> out)
{
sys_gamepad.todo("sys_gamepad_ycon_get_device_info(in=%d, out=%d) -> CELL_OK", in, out);
return CELL_OK;
}
u32 sys_gamepad_ycon_read_raw_report(vm::ptr<u8> in, vm::ptr<u8> out)
{
sys_gamepad.todo("sys_gamepad_ycon_read_raw_report(in=%d, out=%d) -> CELL_OK", in, out);
return CELL_OK;
}
u32 sys_gamepad_ycon_write_raw_report(vm::ptr<u8> in, vm::ptr<u8> out)
{
sys_gamepad.todo("sys_gamepad_ycon_write_raw_report(in=%d, out=%d) -> CELL_OK", in, out);
return CELL_OK;
}
u32 sys_gamepad_ycon_get_feature(vm::ptr<u8> in, vm::ptr<u8> out)
{
sys_gamepad.todo("sys_gamepad_ycon_get_feature(in=%d, out=%d) -> CELL_OK", in, out);
return CELL_OK;
}
u32 sys_gamepad_ycon_set_feature(vm::ptr<u8> in, vm::ptr<u8> out)
{
sys_gamepad.todo("sys_gamepad_ycon_set_feature(in=%d, out=%d) -> CELL_OK", in, out);
return CELL_OK;
}
u32 sys_gamepad_ycon_is_gem(vm::ptr<u8> in, vm::ptr<u8> out)
{
sys_gamepad.todo("sys_gamepad_ycon_is_gem(in=%d, out=%d) -> CELL_OK", in, out);
return CELL_OK;
}
// syscall(621,packet_id,u8 *in,u8 *out) Talk:LV2_Functions_and_Syscalls#Syscall_621_.280x26D.29 gamepad_if usage
u32 sys_gamepad_ycon_if(u8 packet_id, vm::ptr<u8> in, vm::ptr<u8> out)
{
switch (packet_id)
{
case 0:
return sys_gamepad_ycon_initalize(in, out);
case 1:
return sys_gamepad_ycon_finalize(in, out);
case 2:
return sys_gamepad_ycon_has_input_ownership(in, out);
case 3:
return sys_gamepad_ycon_enumerate_device(in, out);
case 4:
return sys_gamepad_ycon_get_device_info(in, out);
case 5:
return sys_gamepad_ycon_read_raw_report(in, out);
case 6:
return sys_gamepad_ycon_write_raw_report(in, out);
case 7:
return sys_gamepad_ycon_get_feature(in, out);
case 8:
return sys_gamepad_ycon_set_feature(in, out);
case 9:
return sys_gamepad_ycon_is_gem(in, out);
default:
sys_gamepad.error("sys_gamepad_ycon_if(packet_id=*%d, in=%d, out=%d), unknown packet id", packet_id, in, out);
break;
}
return CELL_OK;
}
| 2,830
|
C++
|
.cpp
| 85
| 31.435294
| 113
| 0.698279
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,329
|
sys_hid.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_hid.cpp
|
#include "stdafx.h"
#include "sys_hid.h"
#include "Emu/Memory/vm.h"
#include "Emu/Memory/vm_var.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/Modules/cellPad.h"
#include "sys_process.h"
LOG_CHANNEL(sys_hid);
error_code sys_hid_manager_open(ppu_thread& ppu, u64 device_type, u64 port_no, vm::ptr<u32> handle)
{
sys_hid.todo("sys_hid_manager_open(device_type=0x%llx, port_no=0x%llx, handle=*0x%llx)", device_type, port_no, handle);
//device type == 1 = pad, 2 = kb, 3 = mouse
if (device_type > 3)
{
return CELL_EINVAL;
}
if (!handle)
{
return CELL_EFAULT;
}
// 'handle' starts at 0x100 in realhw, and increments every time sys_hid_manager_open is called
// however, sometimes the handle is reused when opening sys_hid_manager again (even when the previous one hasn't been closed yet) - maybe when processes/threads get killed/finish they also release their handles?
static u32 ctr = 0x100;
*handle = ctr++;
if (device_type == 1)
{
cellPadInit(ppu, 7);
cellPadSetPortSetting(::narrow<u32>(port_no) /* 0 */, CELL_PAD_SETTING_LDD | CELL_PAD_SETTING_PRESS_ON | CELL_PAD_SETTING_SENSOR_ON);
}
return CELL_OK;
}
error_code sys_hid_manager_ioctl(u32 hid_handle, u32 pkg_id, vm::ptr<void> buf, u64 buf_size)
{
sys_hid.todo("sys_hid_manager_ioctl(hid_handle=0x%x, pkg_id=0x%llx, buf=*0x%x, buf_size=0x%llx)", hid_handle, pkg_id, buf, buf_size);
// From realhw syscall dump when vsh boots
// SC count | handle | pkg_id | *buf (in) | *buf (out) | size -> ret
// ---------|--------|--------|---------------------------------------------------------------------------|---------------------------------------------------------------------------|------------
// 28893 | 0x101 | 0x2 | 000000000000000000000000000000000000000000 | 054c02680102020000000000000008035000001c1f | 21 -> 0
// 28894 | 0x101 | 0x3 | 00000000 | 00000000 | 4 -> 0
// 28895 | 0x101 | 0x5 | 00000000 | 00000000 | 4 -> 0
// 28896 | 0x101 | 0x68 | 01000000d0031cb020169e502006b7f80000000000606098000000000000000000000000d | 01000000d0031cb020169e502006b7f80000000000606098000000000000000000000000d | 64 -> 0
// | | | 0031c90000000002006bac400000000d0031cb0000000002006b4d0 | 0031c90000000002006bac400000000d0031cb0000000002006b4d0 |
// 28898 | 0x102 | 0x2 | 000000000000000000000000000000000000000000 | 054c02680102020000000000000008035000001c1f | 21 -> 0
// 28901 | 0x100 | 0x64 | 00000001 | 00000001 | 4 -> 0xffffffff80010002 # x3::hidportassign
// 2890 | 0x100 | 0x65 | 6b49d200 | 6b49d200 | 4 -> 0xffffffff80010002 # x3::hidportassign
// 28903 | 0x100 | 0x66 | 00000001 | 00000001 | 4 -> 0 # x3::hidportassign
// 28904 | 0x100 | 0x0 | 00000001000000ff000000ff000000ff000000ff000000010000000100000001000000010 | 00000001000000ff000000ff000000ff000000ff000000010000000100000001000000010 | 68 -> 0 # x3::hidportassign
// | | | 000000000000000000000000000000000000001000000010000000100000001 | 000000000000000000000000000000000000001000000010000000100000001 |
// 28907 | 0x101 | 0x3 | 00000001 | 00000001 | 4 -> 0
// 28908 | 0x101 | 0x5 | 00000001 | 00000001 | 4 -> 0
// 29404 | 0x100 | 0x4 | 00 | ee | 1 -> 0
// *** repeats 30600, 31838, 33034, 34233, 35075 (35075 is x3::hidportassign) ***
// 35076 | 0x100 | 0x0 | 00000001000000ff000000ff000000ff000000ff000000320000003200000032000000320 | 00000001000000ff000000ff000000ff000000ff000000320000003200000032000000320 | 68 -> 0
// | | | 000003200000032000000320000003200002710000027100000271000002710 | 000003200000032000000320000003200002710000027100000271000002710 |
// *** more 0x4 that have buf(in)=00 and buf(out)=ee ***
if (pkg_id == 2)
{
// Return what realhw seems to return
// TODO: Figure out what this corresponds to
auto info = vm::static_ptr_cast<sys_hid_info_2>(buf);
info->vid = 0x054C;
info->pid = 0x0268;
u8 realhw[17] = { 0x01, 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x03, 0x50, 0x00, 0x00, 0x1c, 0x1f };
memcpy(info->unk, &realhw, 17);
}
else if (pkg_id == 5)
{
auto info = vm::static_ptr_cast<sys_hid_info_5>(buf);
info->vid = 0x054C;
info->pid = 0x0268;
}
// pkg_id == 6 == setpressmode?
else if (pkg_id == 0x68)
{
[[maybe_unused]] auto info = vm::static_ptr_cast<sys_hid_ioctl_68>(buf);
//info->unk2 = 0;
}
return CELL_OK;
}
error_code sys_hid_manager_check_focus()
{
// spammy sys_hid.todo("sys_hid_manager_check_focus()");
return not_an_error(1);
}
error_code sys_hid_manager_513(u64 a1, u64 a2, vm::ptr<void> buf, u64 buf_size)
{
sys_hid.todo("sys_hid_manager_513(%llx, %llx, buf=%llx, buf_size=%llx)", a1, a2, buf, buf_size);
return CELL_OK;
}
error_code sys_hid_manager_514(u32 pkg_id, vm::ptr<void> buf, u64 buf_size)
{
if (pkg_id == 0xE)
{
sys_hid.trace("sys_hid_manager_514(pkg_id=0x%x, buf=*0x%x, buf_size=0x%llx)", pkg_id, buf, buf_size);
}
else
{
sys_hid.todo("sys_hid_manager_514(pkg_id=0x%x, buf=*0x%x, buf_size=0x%llx)", pkg_id, buf, buf_size);
}
if (pkg_id == 0xE)
{
// buf holds device_type
// auto device_type = vm::static_ptr_cast<u8>(buf);
// spammy sys_hid.todo("device_type: 0x%x", device_type[0]);
// return 1 or 0? look like almost like another check_focus type check, returning 0 looks to keep system focus
}
else if (pkg_id == 0xD)
{
auto inf = vm::static_ptr_cast<sys_hid_manager_514_pkg_d>(buf);
// unk1 = (pad# << 24) | pad# | 0x100
// return value doesn't seem to be used again
sys_hid.todo("unk1: 0x%x, unk2:0x%x", inf->unk1, inf->unk2);
}
return CELL_OK;
}
error_code sys_hid_manager_is_process_permission_root(u32 pid)
{
sys_hid.todo("sys_hid_manager_is_process_permission_root(pid=0x%x)", pid);
return not_an_error(g_ps3_process_info.has_root_perm());
}
error_code sys_hid_manager_add_hot_key_observer(u32 event_queue, vm::ptr<u32> unk)
{
sys_hid.todo("sys_hid_manager_add_hot_key_observer(event_queue=0x%x, unk=*0x%x)", event_queue, unk);
return CELL_OK;
}
error_code sys_hid_manager_read(u32 handle, u32 pkg_id, vm::ptr<void> buf, u64 buf_size)
{
if (!buf)
{
return CELL_EFAULT;
}
(pkg_id == 2 || pkg_id == 0x81 ? sys_hid.trace : sys_hid.todo)
("sys_hid_manager_read(handle=0x%x, pkg_id=0x%x, buf=*0x%x, buf_size=0x%llx)", handle, pkg_id, buf, buf_size);
if (pkg_id == 2)
{
// cellPadGetData
// it returns just button array from 'CellPadData'
//auto data = vm::static_ptr_cast<u16[64]>(buf);
// todo: use handle and dont call cellpad here
vm::var<CellPadData> tmpData;
if ((cellPadGetData(0, +tmpData) == CELL_OK) && tmpData->len > 0)
{
u64 cpySize = std::min(static_cast<u64>(tmpData->len) * sizeof(u16), buf_size * sizeof(u16));
memcpy(buf.get_ptr(), &tmpData->button, cpySize);
return not_an_error(cpySize);
}
}
else if (pkg_id == 0x81)
{
// cellPadGetDataExtra?
vm::var<CellPadData> tmpData;
if ((cellPadGetData(0, +tmpData) == CELL_OK) && tmpData->len > 0)
{
u64 cpySize = std::min(static_cast<u64>(tmpData->len) * sizeof(u16), buf_size * sizeof(u16));
memcpy(buf.get_ptr(), &tmpData->button, cpySize);
return not_an_error(cpySize / 2);
}
}
return CELL_OK;
}
| 8,669
|
C++
|
.cpp
| 161
| 51.52795
| 232
| 0.55916
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,330
|
sys_prx.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_prx.cpp
|
#include "stdafx.h"
#include "sys_prx.h"
#include "Emu/System.h"
#include "Emu/system_config.h"
#include "Emu/VFS.h"
#include "Emu/IdManager.h"
#include "Crypto/unself.h"
#include "Loader/ELF.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Crypto/unedat.h"
#include "Utilities/StrUtil.h"
#include "sys_fs.h"
#include "sys_process.h"
#include "sys_memory.h"
#include <span>
extern void dump_executable(std::span<const u8> data, const ppu_module* _module, std::string_view title_id);
extern std::shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object&, bool virtual_load, const std::string&, s64, utils::serial* = nullptr);
extern void ppu_unload_prx(const lv2_prx& prx);
extern bool ppu_initialize(const ppu_module&, bool check_only = false, u64 file_size = 0);
extern void ppu_finalize(const ppu_module& info, bool force_mem_release = false);
extern void ppu_manual_load_imports_exports(u32 imports_start, u32 imports_size, u32 exports_start, u32 exports_size, std::basic_string<char>& loaded_flags);
LOG_CHANNEL(sys_prx);
// <string: firmware sprx, int: should hle if 1>
extern const std::map<std::string_view, int> g_prx_list
{
{ "/dev_flash/sys/internal/libfs_utility_init.sprx", 1 },
{ "libaacenc.sprx", 0 },
{ "libaacenc_spurs.sprx", 0 },
{ "libac3dec.sprx", 0 },
{ "libac3dec2.sprx", 0 },
{ "libadec.sprx", 0 },
{ "libadec2.sprx", 0 },
{ "libadec_internal.sprx", 0 },
{ "libad_async.sprx", 0 },
{ "libad_billboard_util.sprx", 0 },
{ "libad_core.sprx", 0 },
{ "libapostsrc_mini.sprx", 0 },
{ "libasfparser2_astd.sprx", 0 },
{ "libat3dec.sprx", 0 },
{ "libat3multidec.sprx", 0 },
{ "libatrac3multi.sprx", 0 },
{ "libatrac3plus.sprx", 0 },
{ "libatxdec.sprx", 1 },
{ "libatxdec2.sprx", 0 },
{ "libaudio.sprx", 1 },
{ "libavcdec.sprx", 0 },
{ "libavcenc.sprx", 0 },
{ "libavcenc_small.sprx", 0 },
{ "libavchatjpgdec.sprx", 0 },
{ "libbeisobmf.sprx", 0 },
{ "libbemp2sys.sprx", 0 },
{ "libcamera.sprx", 1 },
{ "libcelp8dec.sprx", 0 },
{ "libcelp8enc.sprx", 0 },
{ "libcelpdec.sprx", 0 },
{ "libcelpenc.sprx", 0 },
{ "libddpdec.sprx", 0 },
{ "libdivxdec.sprx", 0 },
{ "libdmux.sprx", 0 },
{ "libdmuxpamf.sprx", 0 },
{ "libdtslbrdec.sprx", 0 },
{ "libfiber.sprx", 0 },
{ "libfont.sprx", 0 },
{ "libfontFT.sprx", 0 },
{ "libfreetype.sprx", 0 },
{ "libfreetypeTT.sprx", 0 },
{ "libfs.sprx", 0 },
{ "libfs_155.sprx", 0 },
{ "libgcm_sys.sprx", 0 },
{ "libgem.sprx", 1 },
{ "libgifdec.sprx", 0 },
{ "libhttp.sprx", 0 },
{ "libio.sprx", 1 },
{ "libjpgdec.sprx", 0 },
{ "libjpgenc.sprx", 0 },
{ "libkey2char.sprx", 0 },
{ "libl10n.sprx", 0 },
{ "liblv2.sprx", 0 },
{ "liblv2coredump.sprx", 0 },
{ "liblv2dbg_for_cex.sprx", 0 },
{ "libm2bcdec.sprx", 0 },
{ "libm4aacdec.sprx", 0 },
{ "libm4aacdec2ch.sprx", 0 },
{ "libm4hdenc.sprx", 0 },
{ "libm4venc.sprx", 0 },
{ "libmedi.sprx", 1 },
{ "libmic.sprx", 1 },
{ "libmp3dec.sprx", 0 },
{ "libmp4.sprx", 0 },
{ "libmpl1dec.sprx", 0 },
{ "libmvcdec.sprx", 0 },
{ "libnet.sprx", 0 },
{ "libnetctl.sprx", 1 },
{ "libpamf.sprx", 1 },
{ "libpngdec.sprx", 0 },
{ "libpngenc.sprx", 0 },
{ "libresc.sprx", 0 },
{ "librtc.sprx", 1 },
{ "librudp.sprx", 0 },
{ "libsail.sprx", 0 },
{ "libsail_avi.sprx", 0 },
{ "libsail_rec.sprx", 0 },
{ "libsjvtd.sprx", 0 },
{ "libsmvd2.sprx", 0 },
{ "libsmvd4.sprx", 0 },
{ "libspurs_jq.sprx", 0 },
{ "libsre.sprx", 0 },
{ "libssl.sprx", 0 },
{ "libsvc1d.sprx", 0 },
{ "libsync2.sprx", 0 },
{ "libsysmodule.sprx", 0 },
{ "libsysutil.sprx", 1 },
{ "libsysutil_ap.sprx", 1 },
{ "libsysutil_authdialog.sprx", 1 },
{ "libsysutil_avc2.sprx", 1 },
{ "libsysutil_avconf_ext.sprx", 1 },
{ "libsysutil_avc_ext.sprx", 1 },
{ "libsysutil_bgdl.sprx", 1 },
{ "libsysutil_cross_controller.sprx", 1 },
{ "libsysutil_dec_psnvideo.sprx", 1 },
{ "libsysutil_dtcp_ip.sprx", 1 },
{ "libsysutil_game.sprx", 1 },
{ "libsysutil_game_exec.sprx", 1 },
{ "libsysutil_imejp.sprx", 1 },
{ "libsysutil_misc.sprx", 1 },
{ "libsysutil_music.sprx", 1 },
{ "libsysutil_music_decode.sprx", 1 },
{ "libsysutil_music_export.sprx", 1 },
{ "libsysutil_np.sprx", 1 },
{ "libsysutil_np2.sprx", 1 },
{ "libsysutil_np_clans.sprx", 1 },
{ "libsysutil_np_commerce2.sprx", 1 },
{ "libsysutil_np_eula.sprx", 1 },
{ "libsysutil_np_installer.sprx", 1 },
{ "libsysutil_np_sns.sprx", 1 },
{ "libsysutil_np_trophy.sprx", 1 },
{ "libsysutil_np_tus.sprx", 1 },
{ "libsysutil_np_util.sprx", 1 },
{ "libsysutil_oskdialog_ext.sprx", 1 },
{ "libsysutil_pesm.sprx", 1 },
{ "libsysutil_photo_decode.sprx", 1 },
{ "libsysutil_photo_export.sprx", 1 },
{ "libsysutil_photo_export2.sprx", 1 },
{ "libsysutil_photo_import.sprx", 1 },
{ "libsysutil_photo_network_sharing.sprx", 1 },
{ "libsysutil_print.sprx", 1 },
{ "libsysutil_rec.sprx", 1 },
{ "libsysutil_remoteplay.sprx", 1 },
{ "libsysutil_rtcalarm.sprx", 1 },
{ "libsysutil_savedata.sprx", 1 },
{ "libsysutil_savedata_psp.sprx", 1 },
{ "libsysutil_screenshot.sprx", 1 },
{ "libsysutil_search.sprx", 1 },
{ "libsysutil_storagedata.sprx", 1 },
{ "libsysutil_subdisplay.sprx", 1 },
{ "libsysutil_syschat.sprx", 1 },
{ "libsysutil_sysconf_ext.sprx", 1 },
{ "libsysutil_userinfo.sprx", 1 },
{ "libsysutil_video_export.sprx", 1 },
{ "libsysutil_video_player.sprx", 1 },
{ "libsysutil_video_upload.sprx", 1 },
{ "libusbd.sprx", 0 },
{ "libusbpspcm.sprx", 0 },
{ "libvdec.sprx", 1 },
{ "libvoice.sprx", 1 },
{ "libvpost.sprx", 0 },
{ "libvpost2.sprx", 0 },
{ "libwmadec.sprx", 0 },
};
bool ppu_register_library_lock(std::string_view libname, bool lock_lib);
static error_code prx_load_module(const std::string& vpath, u64 flags, vm::ptr<sys_prx_load_module_option_t> /*pOpt*/, fs::file src = {}, s64 file_offset = 0)
{
if (flags != 0)
{
if (flags & SYS_PRX_LOAD_MODULE_FLAGS_INVALIDMASK)
{
return CELL_EINVAL;
}
if (flags & SYS_PRX_LOAD_MODULE_FLAGS_FIXEDADDR && !g_ps3_process_info.ppc_seg)
{
return CELL_ENOSYS;
}
fmt::throw_exception("sys_prx: Unimplemented fixed address allocations");
}
std::string vpath0;
std::string path = vfs::get(vpath, nullptr, &vpath0);
std::string name = vpath0.substr(vpath0.find_last_of('/') + 1);
bool ignore = false;
constexpr std::string_view firmware_sprx_dir = "/dev_flash/sys/external/";
const bool is_firmware_sprx = vpath0.starts_with(firmware_sprx_dir) && g_prx_list.count(std::string_view(vpath0).substr(firmware_sprx_dir.size()));
if (is_firmware_sprx)
{
if (g_cfg.core.libraries_control.get_set().count(name + ":lle"))
{
// Force LLE
ignore = false;
}
else if (g_cfg.core.libraries_control.get_set().count(name + ":hle"))
{
// Force HLE
ignore = true;
}
else
{
// Use list
ignore = ::at32(g_prx_list, name) != 0;
}
}
else if (vpath0.starts_with("/"))
{
// Special case : HLE for files outside of "/dev_flash/sys/external/"
// Have to specify full path for them
ignore = g_prx_list.count(vpath0) && ::at32(g_prx_list, vpath0);
}
auto hle_load = [&]()
{
const auto prx = idm::make_ptr<lv2_obj, lv2_prx>();
prx->name = std::move(name);
prx->path = std::move(path);
sys_prx.warning(u8"Ignored module: “%s” (id=0x%x)", vpath, idm::last_id());
return not_an_error(idm::last_id());
};
if (ignore)
{
return hle_load();
}
if (!src)
{
auto [fs_error, ppath, path0, lv2_file, type] = lv2_file::open(vpath, 0, 0);
if (fs_error)
{
if (fs_error + 0u == CELL_ENOENT && is_firmware_sprx)
{
sys_prx.error(u8"firmware SPRX not found: “%s” (forcing HLE implementation)", vpath, idm::last_id());
return hle_load();
}
return {fs_error, vpath};
}
src = std::move(lv2_file);
}
u128 klic = g_fxo->get<loaded_npdrm_keys>().last_key();
src = decrypt_self(std::move(src), reinterpret_cast<u8*>(&klic), nullptr, true);
if (!src)
{
return {CELL_PRX_ERROR_UNSUPPORTED_PRX_TYPE, +"Failed to decrypt file"};
}
const auto src_data = g_cfg.core.ppu_debug ? src.to_vector<u8>() : std::vector<u8>{};
ppu_prx_object obj = std::move(src);
src.close();
if (obj != elf_error::ok)
{
return {CELL_PRX_ERROR_UNSUPPORTED_PRX_TYPE, obj.get_error()};
}
const auto prx = ppu_load_prx(obj, false, path, file_offset);
if (g_cfg.core.ppu_debug)
{
dump_executable({src_data.data(), src_data.size()}, prx.get(), Emu.GetTitleID());
}
obj.clear();
if (!prx)
{
return CELL_PRX_ERROR_ILLEGAL_LIBRARY;
}
ppu_initialize(*prx);
sys_prx.success(u8"Loaded module: “%s” (id=0x%x)", vpath, idm::last_id());
return not_an_error(idm::last_id());
}
fs::file make_file_view(fs::file&& file, u64 offset, u64 size);
std::shared_ptr<void> lv2_prx::load(utils::serial& ar)
{
[[maybe_unused]] const s32 version = GET_SERIALIZATION_VERSION(lv2_prx_overlay);
const std::string path = vfs::get(ar.pop<std::string>());
const s64 offset = ar;
const u32 state = ar;
usz seg_count = 0;
ar.deserialize_vle(seg_count);
std::shared_ptr<lv2_prx> prx;
auto hle_load = [&]()
{
prx = std::make_shared<lv2_prx>();
prx->path = path;
prx->name = path.substr(path.find_last_of(fs::delim) + 1);
};
if (seg_count)
{
std::basic_string<char> loaded_flags, external_flags;
ar(loaded_flags, external_flags);
fs::file file{path.substr(0, path.size() - (offset ? fmt::format("_x%x", offset).size() : 0))};
if (file)
{
u128 klic = g_fxo->get<loaded_npdrm_keys>().last_key();
file = make_file_view(std::move(file), offset, umax);
prx = ppu_load_prx(ppu_prx_object{ decrypt_self(std::move(file), reinterpret_cast<u8*>(&klic)) }, false, path, 0, &ar);
prx->m_loaded_flags = std::move(loaded_flags);
prx->m_external_loaded_flags = std::move(external_flags);
if (state <= PRX_STATE_STARTED)
{
prx->restore_exports();
}
ensure(prx);
}
else
{
ensure(g_cfg.savestate.state_inspection_mode.get());
hle_load();
// Partially recover information
for (usz i = 0; i < seg_count; i++)
{
auto& seg = prx->segs.emplace_back();
seg.addr = ar;
seg.size = 1; // TODO
}
}
}
else
{
hle_load();
}
prx->state = state;
return prx;
}
void lv2_prx::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(lv2_prx_overlay);
ar(vfs::retrieve(path), offset, state);
// Save segments count
ar.serialize_vle(segs.size());
if (!segs.empty())
{
ar(m_loaded_flags);
ar(m_external_loaded_flags);
}
for (const ppu_segment& seg : segs)
{
if (seg.type == 0x1u && seg.size) ar(seg.addr);
}
}
error_code sys_prx_get_ppu_guid(ppu_thread& ppu)
{
ppu.state += cpu_flag::wait;
sys_prx.todo("sys_prx_get_ppu_guid()");
return CELL_OK;
}
error_code _sys_prx_load_module_by_fd(ppu_thread& ppu, s32 fd, u64 offset, u64 flags, vm::ptr<sys_prx_load_module_option_t> pOpt)
{
ppu.state += cpu_flag::wait;
sys_prx.warning("_sys_prx_load_module_by_fd(fd=%d, offset=0x%x, flags=0x%x, pOpt=*0x%x)", fd, offset, flags, pOpt);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
if (!file)
{
return CELL_EBADF;
}
std::lock_guard lock(file->mp->mutex);
if (!file->file)
{
return CELL_EBADF;
}
return prx_load_module(offset ? fmt::format("%s_x%x", file->name.data(), offset) : file->name.data(), flags, pOpt, lv2_file::make_view(file, offset), offset);
}
error_code _sys_prx_load_module_on_memcontainer_by_fd(ppu_thread& ppu, s32 fd, u64 offset, u32 mem_ct, u64 flags, vm::ptr<sys_prx_load_module_option_t> pOpt)
{
ppu.state += cpu_flag::wait;
sys_prx.warning("_sys_prx_load_module_on_memcontainer_by_fd(fd=%d, offset=0x%x, mem_ct=0x%x, flags=0x%x, pOpt=*0x%x)", fd, offset, mem_ct, flags, pOpt);
return _sys_prx_load_module_by_fd(ppu, fd, offset, flags, pOpt);
}
static error_code prx_load_module_list(ppu_thread& ppu, s32 count, vm::cpptr<char, u32, u64> path_list, u32 /*mem_ct*/, u64 flags, vm::ptr<sys_prx_load_module_option_t> pOpt, vm::ptr<u32> id_list)
{
if (flags != 0)
{
if (flags & SYS_PRX_LOAD_MODULE_FLAGS_INVALIDMASK)
{
return CELL_EINVAL;
}
if (flags & SYS_PRX_LOAD_MODULE_FLAGS_FIXEDADDR && !g_ps3_process_info.ppc_seg)
{
return CELL_ENOSYS;
}
fmt::throw_exception("sys_prx: Unimplemented fixed address allocations");
}
for (s32 i = 0; i < count; ++i)
{
const auto result = prx_load_module(path_list[i].get_ptr(), flags, pOpt);
if (result < 0)
{
while (--i >= 0)
{
// Unload already loaded modules
_sys_prx_unload_module(ppu, id_list[i], 0, vm::null);
}
// Fill with -1
std::memset(id_list.get_ptr(), -1, count * sizeof(id_list[0]));
return result;
}
id_list[i] = result;
}
return CELL_OK;
}
error_code _sys_prx_load_module_list(ppu_thread& ppu, s32 count, vm::cpptr<char, u32, u64> path_list, u64 flags, vm::ptr<sys_prx_load_module_option_t> pOpt, vm::ptr<u32> id_list)
{
ppu.state += cpu_flag::wait;
sys_prx.warning("_sys_prx_load_module_list(count=%d, path_list=**0x%x, flags=0x%x, pOpt=*0x%x, id_list=*0x%x)", count, path_list, flags, pOpt, id_list);
return prx_load_module_list(ppu, count, path_list, SYS_MEMORY_CONTAINER_ID_INVALID, flags, pOpt, id_list);
}
error_code _sys_prx_load_module_list_on_memcontainer(ppu_thread& ppu, s32 count, vm::cpptr<char, u32, u64> path_list, u32 mem_ct, u64 flags, vm::ptr<sys_prx_load_module_option_t> pOpt, vm::ptr<u32> id_list)
{
ppu.state += cpu_flag::wait;
sys_prx.warning("_sys_prx_load_module_list_on_memcontainer(count=%d, path_list=**0x%x, mem_ct=0x%x, flags=0x%x, pOpt=*0x%x, id_list=*0x%x)", count, path_list, mem_ct, flags, pOpt, id_list);
return prx_load_module_list(ppu, count, path_list, mem_ct, flags, pOpt, id_list);
}
error_code _sys_prx_load_module_on_memcontainer(ppu_thread& ppu, vm::cptr<char> path, u32 mem_ct, u64 flags, vm::ptr<sys_prx_load_module_option_t> pOpt)
{
ppu.state += cpu_flag::wait;
sys_prx.warning("_sys_prx_load_module_on_memcontainer(path=%s, mem_ct=0x%x, flags=0x%x, pOpt=*0x%x)", path, mem_ct, flags, pOpt);
return prx_load_module(path.get_ptr(), flags, pOpt);
}
error_code _sys_prx_load_module(ppu_thread& ppu, vm::cptr<char> path, u64 flags, vm::ptr<sys_prx_load_module_option_t> pOpt)
{
ppu.state += cpu_flag::wait;
sys_prx.warning("_sys_prx_load_module(path=%s, flags=0x%x, pOpt=*0x%x)", path, flags, pOpt);
return prx_load_module(path.get_ptr(), flags, pOpt);
}
error_code _sys_prx_start_module(ppu_thread& ppu, u32 id, u64 flags, vm::ptr<sys_prx_start_stop_module_option_t> pOpt)
{
ppu.state += cpu_flag::wait;
sys_prx.warning("_sys_prx_start_module(id=0x%x, flags=0x%x, pOpt=*0x%x)", id, flags, pOpt);
if (id == 0 || !pOpt)
{
return CELL_EINVAL;
}
const auto prx = idm::get<lv2_obj, lv2_prx>(id);
if (!prx)
{
return CELL_ESRCH;
}
switch (pOpt->cmd & 0xf)
{
case 1:
{
std::lock_guard lock(prx->mutex);
if (!prx->state.compare_and_swap_test(PRX_STATE_INITIALIZED, PRX_STATE_STARTING))
{
if (prx->state == PRX_STATE_DESTROYED)
{
return CELL_ESRCH;
}
return CELL_PRX_ERROR_ERROR;
}
prx->load_exports();
break;
}
case 2:
{
switch (const u64 res = pOpt->res)
{
case SYS_PRX_RESIDENT:
{
// No error code on invalid state, so throw on unexpected state
ensure(prx->state.compare_and_swap_test(PRX_STATE_STARTING, PRX_STATE_STARTED));
return CELL_OK;
}
default:
{
if (res & 0xffff'ffffu)
{
// Unload the module (SYS_PRX_NO_RESIDENT expected)
sys_prx.warning("_sys_prx_start_module(): Start entry function returned SYS_PRX_NO_RESIDENT (res=0x%llx)", res);
// Thread-safe if called from liblv2.sprx, due to internal lwmutex lock before it
prx->state = PRX_STATE_STOPPED;
prx->unload_exports();
_sys_prx_unload_module(ppu, id, 0, vm::null);
// Return the exact value returned by the start function (as an error)
return static_cast<s32>(res);
}
// Return type of start entry function is s32
// And according to RE this path results in weird behavior
sys_prx.error("_sys_prx_start_module(): Start entry function returned weird value (res=0x%llx)", res);
return CELL_OK;
}
}
}
default:
return CELL_PRX_ERROR_ERROR;
}
ppu.check_state();
pOpt->entry.set(prx->start ? prx->start.addr() : ~0ull);
// This check is probably for older fw
if (pOpt->size != 0x20u)
{
pOpt->entry2.set(prx->prologue ? prx->prologue.addr() : ~0ull);
}
return CELL_OK;
}
error_code _sys_prx_stop_module(ppu_thread& ppu, u32 id, u64 flags, vm::ptr<sys_prx_start_stop_module_option_t> pOpt)
{
ppu.state += cpu_flag::wait;
sys_prx.warning("_sys_prx_stop_module(id=0x%x, flags=0x%x, pOpt=*0x%x)", id, flags, pOpt);
const auto prx = idm::get<lv2_obj, lv2_prx>(id);
if (!prx)
{
return CELL_ESRCH;
}
if (!pOpt)
{
return CELL_EINVAL;
}
auto set_entry2 = [&](u64 addr)
{
if (pOpt->size != 0x20u)
{
pOpt->entry2.set(addr);
}
};
switch (pOpt->cmd & 0xf)
{
case 1:
{
switch (const auto old = prx->state.compare_and_swap(PRX_STATE_STARTED, PRX_STATE_STOPPING))
{
case PRX_STATE_INITIALIZED: return CELL_PRX_ERROR_NOT_STARTED;
case PRX_STATE_STOPPED: return CELL_PRX_ERROR_ALREADY_STOPPED;
case PRX_STATE_STOPPING: return CELL_PRX_ERROR_ALREADY_STOPPING; // Internal error
case PRX_STATE_STARTING: return CELL_PRX_ERROR_ERROR; // Internal error
case PRX_STATE_DESTROYED: return CELL_ESRCH;
case PRX_STATE_STARTED: break;
default:
fmt::throw_exception("Invalid prx state (%d)", old);
}
ppu.check_state();
pOpt->entry.set(prx->stop ? prx->stop.addr() : ~0ull);
set_entry2(prx->epilogue ? prx->epilogue.addr() : ~0ull);
return CELL_OK;
}
case 2:
{
switch (pOpt->res)
{
case 0:
{
// No error code on invalid state, so throw on unexpected state
std::lock_guard lock(prx->mutex);
ensure(prx->exports_end <= prx->exports_start || (prx->state == PRX_STATE_STOPPING));
prx->unload_exports();
ensure(prx->state.compare_and_swap_test(PRX_STATE_STOPPING, PRX_STATE_STOPPED));
return CELL_OK;
}
case 1:
return CELL_PRX_ERROR_CAN_NOT_STOP; // Internal error
default:
// Nothing happens (probably unexpected value)
return CELL_OK;
}
}
// These commands are not used by liblv2.sprx
case 4: // Get start entry and stop functions
case 8: // Disable stop function execution
{
switch (const auto old = +prx->state)
{
case PRX_STATE_INITIALIZED: return CELL_PRX_ERROR_NOT_STARTED;
case PRX_STATE_STOPPED: return CELL_PRX_ERROR_ALREADY_STOPPED;
case PRX_STATE_STOPPING: return CELL_PRX_ERROR_ALREADY_STOPPING; // Internal error
case PRX_STATE_STARTING: return CELL_PRX_ERROR_ERROR; // Internal error
case PRX_STATE_DESTROYED: return CELL_ESRCH;
case PRX_STATE_STARTED: break;
default:
fmt::throw_exception("Invalid prx state (%d)", old);
}
if (pOpt->cmd == 4u)
{
ppu.check_state();
pOpt->entry.set(prx->stop ? prx->stop.addr() : ~0ull);
set_entry2(prx->epilogue ? prx->epilogue.addr() : ~0ull);
}
else
{
// Disables stop function execution (but the real value can be read through _sys_prx_get_module_info)
sys_prx.todo("_sys_prx_stop_module(): cmd is 8 (stop function = *0x%x)", prx->stop);
//prx->stop = vm::null;
}
return CELL_OK;
}
default:
return CELL_PRX_ERROR_ERROR;
}
}
error_code _sys_prx_unload_module(ppu_thread& ppu, u32 id, u64 flags, vm::ptr<sys_prx_unload_module_option_t> pOpt)
{
ppu.state += cpu_flag::wait;
// Get the PRX, free the used memory and delete the object and its ID
const auto prx = idm::withdraw<lv2_obj, lv2_prx>(id, [](lv2_prx& prx) -> CellPrxError
{
switch (prx.state.fetch_op([](u32& value)
{
if (value == PRX_STATE_INITIALIZED || value == PRX_STATE_STOPPED)
{
value = PRX_STATE_DESTROYED;
return true;
}
return false;
}).first)
{
case PRX_STATE_INITIALIZED:
case PRX_STATE_STOPPED:
return {};
default: break;
}
return CELL_PRX_ERROR_NOT_REMOVABLE;
});
if (!prx)
{
return {CELL_PRX_ERROR_UNKNOWN_MODULE, id};
}
if (prx.ret)
{
return {prx.ret, "%s (id=%s)", prx->name, id};
}
sys_prx.success("_sys_prx_unload_module(id=0x%x, flags=0x%x, pOpt=*0x%x): name='%s'", id, flags, pOpt, prx->name);
prx->mutex.lock_unlock();
ppu_unload_prx(*prx);
ppu_finalize(*prx);
//s32 result = prx->exit ? prx->exit() : CELL_OK;
return CELL_OK;
}
void lv2_prx::load_exports()
{
if (exports_end <= exports_start)
{
// Nothing to load
return;
}
if (!m_loaded_flags.empty())
{
// Already loaded
return;
}
ppu_manual_load_imports_exports(0, 0, exports_start, exports_end - exports_start, m_loaded_flags);
}
void lv2_prx::restore_exports()
{
constexpr usz sizeof_export_data = 0x1C;
std::basic_string<char> loaded_flags_empty;
for (u32 start = exports_start, i = 0; start < exports_end; i++, start += vm::read8(start) ? vm::read8(start) : sizeof_export_data)
{
if (::at32(m_external_loaded_flags, i) || (!m_loaded_flags.empty() && ::at32(m_loaded_flags, i)))
{
loaded_flags_empty.clear();
ppu_manual_load_imports_exports(0, 0, start, sizeof_export_data, loaded_flags_empty);
}
}
}
void lv2_prx::unload_exports()
{
if (m_loaded_flags.empty())
{
// Not loaded
return;
}
std::basic_string<char> merged = m_loaded_flags;
for (usz i = 0; i < merged.size(); i++)
{
merged[i] |= ::at32(m_external_loaded_flags, i);
}
ppu_manual_load_imports_exports(0, 0, exports_start, exports_end - exports_start, merged);
}
error_code _sys_prx_register_module(ppu_thread& ppu, vm::cptr<char> name, vm::ptr<void> opt)
{
ppu.state += cpu_flag::wait;
sys_prx.todo("_sys_prx_register_module(name=%s, opt=*0x%x)", name, opt);
if (!opt)
{
return CELL_EINVAL;
}
sys_prx_register_module_0x30_type_1_t info{};
switch (const u64 size_struct = vm::read64(opt.addr()))
{
case 0x1c:
case 0x20:
{
const auto _info = vm::static_ptr_cast<sys_prx_register_module_0x20_t>(opt);
sys_prx.todo("_sys_prx_register_module(): opt size is 0x%x", size_struct);
// Rebuild info with corresponding members of old structures
// Weird that type is set to 0 because 0 means NO-OP in this syscall
info.size = 0x30;
info.lib_stub_size = _info->stubs_size;
info.lib_stub_ea = _info->stubs_ea;
info.error_handler = _info->error_handler;
info.type = 0;
break;
}
case 0x30:
{
std::memcpy(&info, opt.get_ptr(), sizeof(info));
break;
}
default: return CELL_EINVAL;
}
sys_prx.warning("opt: size=0x%x, type=0x%x, unk3=0x%x, unk4=0x%x, lib_entries_ea=%s, lib_entries_size=0x%x"
", lib_stub_ea=%s, lib_stub_size=0x%x, error_handler=%s", info.size, info.type, info.unk3, info.unk4
, info.lib_entries_ea, info.lib_entries_size, info.lib_stub_ea, info.lib_stub_size, info.error_handler);
if (info.type & 0x1)
{
if (Emu.IsVsh())
{
ppu_manual_load_imports_exports(info.lib_stub_ea.addr(), info.lib_stub_size, info.lib_entries_ea.addr(), info.lib_entries_size, *std::make_unique<std::basic_string<char>>());
}
else
{
// Only VSH is allowed to load it manually
return not_an_error(CELL_PRX_ERROR_ELF_IS_REGISTERED);
}
}
return CELL_OK;
}
error_code _sys_prx_query_module(ppu_thread& ppu)
{
ppu.state += cpu_flag::wait;
sys_prx.todo("_sys_prx_query_module()");
return CELL_OK;
}
error_code _sys_prx_register_library(ppu_thread& ppu, vm::ptr<void> library)
{
ppu.state += cpu_flag::wait;
sys_prx.notice("_sys_prx_register_library(library=*0x%x)", library);
if (!vm::check_addr(library.addr()))
{
return CELL_EFAULT;
}
constexpr u32 sizeof_lib = 0x1c;
std::array<char, sizeof_lib> mem_copy{};
std::memcpy(mem_copy.data(), library.get_ptr(), sizeof_lib);
std::basic_string<char> flags;
ppu_manual_load_imports_exports(0, 0, library.addr(), sizeof_lib, flags);
if (flags.front())
{
const bool success = idm::select<lv2_obj, lv2_prx>([&](u32 /*id*/, lv2_prx& prx)
{
if (prx.state == PRX_STATE_INITIALIZED)
{
for (u32 lib_addr = prx.exports_start, index = 0; lib_addr < prx.exports_end; index++, lib_addr += vm::read8(lib_addr) ? vm::read8(lib_addr) : sizeof_lib)
{
if (std::memcpy(vm::base(lib_addr), mem_copy.data(), sizeof_lib) == 0)
{
atomic_storage<char>::release(prx.m_external_loaded_flags[index], true);
return true;
}
}
}
return false;
}).ret;
if (!success)
{
sys_prx.error("_sys_prx_register_library(): Failed to associate library to PRX!");
}
}
return CELL_OK;
}
error_code _sys_prx_unregister_library(ppu_thread& ppu, vm::ptr<void> library)
{
ppu.state += cpu_flag::wait;
sys_prx.todo("_sys_prx_unregister_library(library=*0x%x)", library);
return CELL_OK;
}
error_code _sys_prx_link_library(ppu_thread& ppu)
{
ppu.state += cpu_flag::wait;
sys_prx.todo("_sys_prx_link_library()");
return CELL_OK;
}
error_code _sys_prx_unlink_library(ppu_thread& ppu)
{
ppu.state += cpu_flag::wait;
sys_prx.todo("_sys_prx_unlink_library()");
return CELL_OK;
}
error_code _sys_prx_query_library(ppu_thread& ppu)
{
ppu.state += cpu_flag::wait;
sys_prx.todo("_sys_prx_query_library()");
return CELL_OK;
}
error_code _sys_prx_get_module_list(ppu_thread& ppu, u64 flags, vm::ptr<sys_prx_get_module_list_option_t> pInfo)
{
ppu.state += cpu_flag::wait;
if (flags & 0x1)
{
sys_prx.todo("_sys_prx_get_module_list(flags=%d, pInfo=*0x%x)", flags, pInfo);
}
else
{
sys_prx.warning("_sys_prx_get_module_list(flags=%d, pInfo=*0x%x)", flags, pInfo);
}
// TODO: Some action occurs if LSB of flags is set here
if (!(flags & 0x2))
{
// Do nothing
return CELL_OK;
}
if (pInfo->size == pInfo.size())
{
const u32 max_count = pInfo->max;
const auto idlist = +pInfo->idlist;
u32 count = 0;
if (max_count)
{
const std::string liblv2_path = vfs::get("/dev_flash/sys/external/liblv2.sprx");
idm::select<lv2_obj, lv2_prx>([&](u32 id, lv2_prx& prx)
{
if (count >= max_count)
{
return true;
}
if (prx.path == liblv2_path)
{
// Hide liblv2.sprx for now
return false;
}
idlist[count++] = id;
return false;
});
}
pInfo->count = count;
}
else
{
// TODO: A different structure should be served here with sizeof == 0x18
sys_prx.todo("_sys_prx_get_module_list(): Unknown structure specified (size=0x%llx)", pInfo->size);
}
return CELL_OK;
}
error_code _sys_prx_get_module_info(ppu_thread& ppu, u32 id, u64 flags, vm::ptr<sys_prx_module_info_option_t> pOpt)
{
ppu.state += cpu_flag::wait;
sys_prx.warning("_sys_prx_get_module_info(id=0x%x, flags=%d, pOpt=*0x%x)", id, flags, pOpt);
const auto prx = idm::get<lv2_obj, lv2_prx>(id);
if (!pOpt)
{
return CELL_EFAULT;
}
if (pOpt->size != pOpt.size())
{
return CELL_EINVAL;
}
if (!pOpt->info)
{
return CELL_EFAULT;
}
if (pOpt->info->size != pOpt->info.size())
{
return CELL_EINVAL;
}
if (!prx)
{
return CELL_PRX_ERROR_UNKNOWN_MODULE;
}
strcpy_trunc(pOpt->info->name, prx->module_info_name);
pOpt->info->version[0] = prx->module_info_version[0];
pOpt->info->version[1] = prx->module_info_version[1];
pOpt->info->modattribute = prx->module_info_attributes;
pOpt->info->start_entry = prx->start.addr();
pOpt->info->stop_entry = prx->stop.addr();
pOpt->info->all_segments_num = ::size32(prx->segs);
if (pOpt->info->filename)
{
std::span dst(pOpt->info->filename.get_ptr(), pOpt->info->filename_size);
strcpy_trunc(dst, vfs::retrieve(prx->path));
}
if (pOpt->info->segments)
{
u32 i = 0;
for (; i < prx->segs.size() && i < pOpt->info->segments_num; i++)
{
if (!prx->segs[i].addr) continue; // TODO: Check this
pOpt->info->segments[i].index = i;
pOpt->info->segments[i].base = prx->segs[i].addr;
pOpt->info->segments[i].filesz = prx->segs[i].filesz;
pOpt->info->segments[i].memsz = prx->segs[i].size;
pOpt->info->segments[i].type = prx->segs[i].type;
}
pOpt->info->segments_num = i;
}
return CELL_OK;
}
error_code _sys_prx_get_module_id_by_name(ppu_thread& ppu, vm::cptr<char> name, u64 flags, vm::ptr<sys_prx_get_module_id_by_name_option_t> pOpt)
{
ppu.state += cpu_flag::wait;
sys_prx.todo("_sys_prx_get_module_id_by_name(name=%s, flags=%d, pOpt=*0x%x)", name, flags, pOpt);
//if (realName == "?") ...
return not_an_error(CELL_PRX_ERROR_UNKNOWN_MODULE);
}
error_code _sys_prx_get_module_id_by_address(ppu_thread& ppu, u32 addr)
{
ppu.state += cpu_flag::wait;
sys_prx.warning("_sys_prx_get_module_id_by_address(addr=0x%x)", addr);
if (!vm::check_addr(addr))
{
// Fast check for an invalid argument
return {CELL_PRX_ERROR_UNKNOWN_MODULE, addr};
}
const auto [prx, id] = idm::select<lv2_obj, lv2_prx>([&](u32 id, lv2_prx& prx) -> u32
{
for (const ppu_segment& seg : prx.segs)
{
if (seg.size && addr >= seg.addr && addr < seg.addr + seg.size)
{
return id;
}
}
return 0;
});
if (!id)
{
return {CELL_PRX_ERROR_UNKNOWN_MODULE, addr};
}
return not_an_error(id);
}
error_code _sys_prx_start(ppu_thread& ppu)
{
ppu.state += cpu_flag::wait;
sys_prx.todo("sys_prx_start()");
return CELL_OK;
}
error_code _sys_prx_stop(ppu_thread& ppu)
{
ppu.state += cpu_flag::wait;
sys_prx.todo("sys_prx_stop()");
return CELL_OK;
}
template <>
void fmt_class_string<CellPrxError>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](CellPrxError value)
{
switch (value)
{
STR_CASE(CELL_PRX_ERROR_ERROR);
STR_CASE(CELL_PRX_ERROR_ILLEGAL_PERM);
STR_CASE(CELL_PRX_ERROR_UNKNOWN_MODULE);
STR_CASE(CELL_PRX_ERROR_ALREADY_STARTED);
STR_CASE(CELL_PRX_ERROR_NOT_STARTED);
STR_CASE(CELL_PRX_ERROR_ALREADY_STOPPED);
STR_CASE(CELL_PRX_ERROR_CAN_NOT_STOP);
STR_CASE(CELL_PRX_ERROR_NOT_REMOVABLE);
STR_CASE(CELL_PRX_ERROR_LIBRARY_NOT_YET_LINKED);
STR_CASE(CELL_PRX_ERROR_LIBRARY_FOUND);
STR_CASE(CELL_PRX_ERROR_LIBRARY_NOTFOUND);
STR_CASE(CELL_PRX_ERROR_ILLEGAL_LIBRARY);
STR_CASE(CELL_PRX_ERROR_LIBRARY_INUSE);
STR_CASE(CELL_PRX_ERROR_ALREADY_STOPPING);
STR_CASE(CELL_PRX_ERROR_UNSUPPORTED_PRX_TYPE);
STR_CASE(CELL_PRX_ERROR_INVAL);
STR_CASE(CELL_PRX_ERROR_ILLEGAL_PROCESS);
STR_CASE(CELL_PRX_ERROR_NO_LIBLV2);
STR_CASE(CELL_PRX_ERROR_UNSUPPORTED_ELF_TYPE);
STR_CASE(CELL_PRX_ERROR_UNSUPPORTED_ELF_CLASS);
STR_CASE(CELL_PRX_ERROR_UNDEFINED_SYMBOL);
STR_CASE(CELL_PRX_ERROR_UNSUPPORTED_RELOCATION_TYPE);
STR_CASE(CELL_PRX_ERROR_ELF_IS_REGISTERED);
STR_CASE(CELL_PRX_ERROR_NO_EXIT_ENTRY);
}
return unknown;
});
}
| 30,206
|
C++
|
.cpp
| 963
| 28.692627
| 206
| 0.66708
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,331
|
sys_semaphore.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_semaphore.cpp
|
#include "stdafx.h"
#include "sys_semaphore.h"
#include "Emu/IdManager.h"
#include "Emu/IPC.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_semaphore);
lv2_sema::lv2_sema(utils::serial& ar)
: protocol(ar)
, key(ar)
, name(ar)
, max(ar)
{
ar(val);
}
std::shared_ptr<void> lv2_sema::load(utils::serial& ar)
{
auto sema = std::make_shared<lv2_sema>(ar);
return lv2_obj::load(sema->key, sema);
}
void lv2_sema::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(lv2_sync);
ar(protocol, key, name, max, std::max<s32>(+val, 0));
}
error_code sys_semaphore_create(ppu_thread& ppu, vm::ptr<u32> sem_id, vm::ptr<sys_semaphore_attribute_t> attr, s32 initial_val, s32 max_val)
{
ppu.state += cpu_flag::wait;
sys_semaphore.trace("sys_semaphore_create(sem_id=*0x%x, attr=*0x%x, initial_val=%d, max_val=%d)", sem_id, attr, initial_val, max_val);
if (!sem_id || !attr)
{
return CELL_EFAULT;
}
if (max_val <= 0 || initial_val > max_val || initial_val < 0)
{
sys_semaphore.error("sys_semaphore_create(): invalid parameters (initial_val=%d, max_val=%d)", initial_val, max_val);
return CELL_EINVAL;
}
const auto _attr = *attr;
const u32 protocol = _attr.protocol;
if (protocol != SYS_SYNC_FIFO && protocol != SYS_SYNC_PRIORITY)
{
sys_semaphore.error("sys_semaphore_create(): unknown protocol (0x%x)", protocol);
return CELL_EINVAL;
}
const u64 ipc_key = lv2_obj::get_key(_attr);
if (ipc_key)
{
sys_semaphore.warning("sys_semaphore_create(sem_id=*0x%x, attr=*0x%x, initial_val=%d, max_val=%d): IPC=0x%016x", sem_id, attr, initial_val, max_val, ipc_key);
}
if (auto error = lv2_obj::create<lv2_sema>(_attr.pshared, ipc_key, _attr.flags, [&]
{
return std::make_shared<lv2_sema>(protocol, ipc_key, _attr.name_u64, max_val, initial_val);
}))
{
return error;
}
static_cast<void>(ppu.test_stopped());
*sem_id = idm::last_id();
return CELL_OK;
}
error_code sys_semaphore_destroy(ppu_thread& ppu, u32 sem_id)
{
ppu.state += cpu_flag::wait;
sys_semaphore.trace("sys_semaphore_destroy(sem_id=0x%x)", sem_id);
const auto sem = idm::withdraw<lv2_obj, lv2_sema>(sem_id, [](lv2_sema& sema) -> CellError
{
if (sema.val < 0)
{
return CELL_EBUSY;
}
lv2_obj::on_id_destroy(sema, sema.key);
return {};
});
if (!sem)
{
return CELL_ESRCH;
}
if (sem->key)
{
sys_semaphore.warning("sys_semaphore_destroy(sem_id=0x%x): IPC=0x%016x", sem_id, sem->key);
}
if (sem.ret)
{
return sem.ret;
}
return CELL_OK;
}
error_code sys_semaphore_wait(ppu_thread& ppu, u32 sem_id, u64 timeout)
{
ppu.state += cpu_flag::wait;
sys_semaphore.trace("sys_semaphore_wait(sem_id=0x%x, timeout=0x%llx)", sem_id, timeout);
const auto sem = idm::get<lv2_obj, lv2_sema>(sem_id, [&, notify = lv2_obj::notify_all_t()](lv2_sema& sema)
{
const s32 val = sema.val;
if (val > 0)
{
if (sema.val.compare_and_swap_test(val, val - 1))
{
return true;
}
}
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(sema.mutex);
if (sema.val-- <= 0)
{
sema.sleep(ppu, timeout);
lv2_obj::emplace(sema.sq, &ppu);
return false;
}
return true;
});
if (!sem)
{
return CELL_ESRCH;
}
if (sem.ret)
{
return CELL_OK;
}
ppu.gpr[3] = CELL_OK;
while (auto state = +ppu.state)
{
if (state & cpu_flag::signal && ppu.state.test_and_reset(cpu_flag::signal))
{
break;
}
if (is_stopped(state))
{
std::lock_guard lock(sem->mutex);
for (auto cpu = +sem->sq; cpu; cpu = cpu->next_cpu)
{
if (cpu == &ppu)
{
ppu.state += cpu_flag::again;
return {};
}
}
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++)
{
busy_wait(500);
}
if (ppu.state & cpu_flag::signal)
{
continue;
}
if (timeout)
{
if (lv2_obj::wait_timeout(timeout, &ppu))
{
// Wait for rescheduling
if (ppu.check_state())
{
continue;
}
ppu.state += cpu_flag::wait;
std::lock_guard lock(sem->mutex);
if (!sem->unqueue(sem->sq, &ppu))
{
break;
}
ensure(0 > sem->val.fetch_op([](s32& val)
{
if (val < 0)
{
val++;
}
}));
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
}
else
{
ppu.state.wait(state);
}
}
return not_an_error(ppu.gpr[3]);
}
error_code sys_semaphore_trywait(ppu_thread& ppu, u32 sem_id)
{
ppu.state += cpu_flag::wait;
sys_semaphore.trace("sys_semaphore_trywait(sem_id=0x%x)", sem_id);
const auto sem = idm::check<lv2_obj, lv2_sema>(sem_id, [&](lv2_sema& sema)
{
return sema.val.try_dec(0);
});
if (!sem)
{
return CELL_ESRCH;
}
if (!sem.ret)
{
return not_an_error(CELL_EBUSY);
}
return CELL_OK;
}
error_code sys_semaphore_post(ppu_thread& ppu, u32 sem_id, s32 count)
{
ppu.state += cpu_flag::wait;
sys_semaphore.trace("sys_semaphore_post(sem_id=0x%x, count=%d)", sem_id, count);
const auto sem = idm::get<lv2_obj, lv2_sema>(sem_id, [&](lv2_sema& sema)
{
const s32 val = sema.val;
if (val >= 0 && count > 0 && count <= sema.max - val)
{
if (sema.val.compare_and_swap_test(val, val + count))
{
return true;
}
}
return false;
});
if (!sem)
{
return CELL_ESRCH;
}
if (count <= 0)
{
return CELL_EINVAL;
}
lv2_obj::notify_all_t notify;
if (sem.ret)
{
return CELL_OK;
}
else
{
std::lock_guard lock(sem->mutex);
for (auto cpu = +sem->sq; cpu; cpu = cpu->next_cpu)
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return {};
}
}
const auto [val, ok] = sem->val.fetch_op([&](s32& val)
{
if (count + 0u <= sem->max + 0u - val)
{
val += count;
return true;
}
return false;
});
if (!ok)
{
return not_an_error(CELL_EBUSY);
}
// Wake threads
const s32 to_awake = std::min<s32>(-std::min<s32>(val, 0), count);
for (s32 i = 0; i < to_awake; i++)
{
sem->append((ensure(sem->schedule<ppu_thread>(sem->sq, sem->protocol))));
}
if (to_awake > 0)
{
lv2_obj::awake_all();
}
}
return CELL_OK;
}
error_code sys_semaphore_get_value(ppu_thread& ppu, u32 sem_id, vm::ptr<s32> count)
{
ppu.state += cpu_flag::wait;
sys_semaphore.trace("sys_semaphore_get_value(sem_id=0x%x, count=*0x%x)", sem_id, count);
const auto sema = idm::check<lv2_obj, lv2_sema>(sem_id, [](lv2_sema& sema)
{
return std::max<s32>(0, sema.val);
});
if (!sema)
{
return CELL_ESRCH;
}
if (!count)
{
return CELL_EFAULT;
}
static_cast<void>(ppu.test_stopped());
*count = sema.ret;
return CELL_OK;
}
| 6,585
|
C++
|
.cpp
| 286
| 20.006993
| 160
| 0.633644
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,332
|
lv2.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/lv2.cpp
|
#include "stdafx.h"
#include "Emu/System.h"
#include "Emu/system_config.h"
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Memory/vm_reservation.h"
#include "Emu/Memory/vm_locking.h"
#include "Emu/Cell/PPUFunction.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/MFC.h"
#include "sys_sync.h"
#include "sys_lwmutex.h"
#include "sys_lwcond.h"
#include "sys_mutex.h"
#include "sys_cond.h"
#include "sys_event.h"
#include "sys_event_flag.h"
#include "sys_game.h"
#include "sys_interrupt.h"
#include "sys_memory.h"
#include "sys_mmapper.h"
#include "sys_net.h"
#include "sys_overlay.h"
#include "sys_ppu_thread.h"
#include "sys_process.h"
#include "sys_prx.h"
#include "sys_rsx.h"
#include "sys_rwlock.h"
#include "sys_semaphore.h"
#include "sys_spu.h"
#include "sys_time.h"
#include "sys_timer.h"
#include "sys_trace.h"
#include "sys_tty.h"
#include "sys_usbd.h"
#include "sys_vm.h"
#include "sys_fs.h"
#include "sys_dbg.h"
#include "sys_gamepad.h"
#include "sys_ss.h"
#include "sys_gpio.h"
#include "sys_config.h"
#include "sys_bdemu.h"
#include "sys_btsetting.h"
#include "sys_console.h"
#include "sys_hid.h"
#include "sys_io.h"
#include "sys_rsxaudio.h"
#include "sys_sm.h"
#include "sys_storage.h"
#include "sys_uart.h"
#include "sys_crypto_engine.h"
#include <algorithm>
#include <optional>
#include <deque>
#include "util/tsc.hpp"
#include "util/sysinfo.hpp"
#include "util/init_mutex.hpp"
#if defined(ARCH_X64)
#ifdef _MSC_VER
#include <intrin.h>
#include <immintrin.h>
#else
#include <x86intrin.h>
#endif
#endif
extern std::string ppu_get_syscall_name(u64 code);
namespace rsx
{
void set_rsx_yield_flag() noexcept;
}
template <>
void fmt_class_string<ppu_syscall_code>::format(std::string& out, u64 arg)
{
out += ppu_get_syscall_name(arg);
}
template <>
void fmt_class_string<lv2_protocol>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto value)
{
switch (value)
{
case SYS_SYNC_FIFO: return "FIFO";
case SYS_SYNC_PRIORITY: return "PRIO";
case SYS_SYNC_PRIORITY_INHERIT: return "PRIO-INHER";
case SYS_SYNC_RETRY: return "RETRY";
}
return unknown;
});
}
template <>
void fmt_class_string<lv2_obj::name_64>::format(std::string& out, u64 arg)
{
out += lv2_obj::name64(get_object(arg).data);
}
static void null_func_(ppu_thread& ppu, ppu_opcode_t, be_t<u32>* this_op, ppu_intrp_func*)
{
ppu_log.todo("Unimplemented syscall %s -> CELL_OK (r3=0x%llx, r4=0x%llx, r5=0x%llx, r6=0x%llx, r7=0x%llx, r8=0x%llx, r9=0x%llx, r10=0x%llx)", ppu_syscall_code(ppu.gpr[11]),
ppu.gpr[3], ppu.gpr[4], ppu.gpr[5], ppu.gpr[6], ppu.gpr[7], ppu.gpr[8], ppu.gpr[9], ppu.gpr[10]);
ppu.gpr[3] = 0;
ppu.cia = vm::get_addr(this_op) + 4;
}
static void uns_func_(ppu_thread& ppu, ppu_opcode_t, be_t<u32>* this_op, ppu_intrp_func*)
{
ppu_log.trace("Unused syscall %d -> ENOSYS", ppu.gpr[11]);
ppu.gpr[3] = CELL_ENOSYS;
ppu.cia = vm::get_addr(this_op) + 4;
}
// Bind Syscall
#define BIND_SYSC(func) {BIND_FUNC(func), #func}
#define NULL_FUNC(name) {null_func_, #name}
constexpr std::pair<ppu_intrp_func_t, std::string_view> null_func{null_func_, ""};
constexpr std::pair<ppu_intrp_func_t, std::string_view> uns_func{uns_func_, ""};
// UNS = Unused
// ROOT = Root
// DBG = Debug
// DEX..DECR = Unavailable on retail consoles
// PM = Product Mode
// AuthID = Authentication ID
const std::array<std::pair<ppu_intrp_func_t, std::string_view>, 1024> g_ppu_syscall_table
{
null_func,
BIND_SYSC(sys_process_getpid), //1 (0x001)
BIND_SYSC(sys_process_wait_for_child), //2 (0x002) ROOT
BIND_SYSC(sys_process_exit3), //3 (0x003)
BIND_SYSC(sys_process_get_status), //4 (0x004) DBG
BIND_SYSC(sys_process_detach_child), //5 (0x005) DBG
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //6-11 UNS
BIND_SYSC(sys_process_get_number_of_object), //12 (0x00C)
BIND_SYSC(sys_process_get_id), //13 (0x00D)
BIND_SYSC(sys_process_is_spu_lock_line_reservation_address), //14 (0x00E)
uns_func, uns_func, uns_func, //15-17 UNS
BIND_SYSC(sys_process_getppid), //18 (0x012)
BIND_SYSC(sys_process_kill), //19 (0x013)
uns_func, //20 (0x014) UNS
NULL_FUNC(_sys_process_spawn), //21 (0x015) DBG
BIND_SYSC(_sys_process_exit), //22 (0x016)
BIND_SYSC(sys_process_wait_for_child2), //23 (0x017) DBG
null_func,//BIND_SYSC(), //24 (0x018) DBG
BIND_SYSC(sys_process_get_sdk_version), //25 (0x019)
BIND_SYSC(_sys_process_exit2), //26 (0x01A)
BIND_SYSC(sys_process_spawns_a_self2), //27 (0x01B) DBG
NULL_FUNC(_sys_process_get_number_of_object), //28 (0x01C) ROOT
BIND_SYSC(sys_process_get_id2), //29 (0x01D) ROOT
BIND_SYSC(_sys_process_get_paramsfo), //30 (0x01E)
NULL_FUNC(sys_process_get_ppu_guid), //31 (0x01F)
uns_func, uns_func ,uns_func , uns_func ,uns_func, uns_func ,uns_func, uns_func ,uns_func, //32-40 UNS
BIND_SYSC(_sys_ppu_thread_exit), //41 (0x029)
uns_func, //42 (0x02A) UNS
BIND_SYSC(sys_ppu_thread_yield), //43 (0x02B)
BIND_SYSC(sys_ppu_thread_join), //44 (0x02C)
BIND_SYSC(sys_ppu_thread_detach), //45 (0x02D)
BIND_SYSC(sys_ppu_thread_get_join_state), //46 (0x02E)
BIND_SYSC(sys_ppu_thread_set_priority), //47 (0x02F) DBG
BIND_SYSC(sys_ppu_thread_get_priority), //48 (0x030)
BIND_SYSC(sys_ppu_thread_get_stack_information), //49 (0x031)
BIND_SYSC(sys_ppu_thread_stop), //50 (0x032) ROOT
BIND_SYSC(sys_ppu_thread_restart), //51 (0x033) ROOT
BIND_SYSC(_sys_ppu_thread_create), //52 (0x034) DBG
BIND_SYSC(sys_ppu_thread_start), //53 (0x035)
null_func,//BIND_SYSC(sys_ppu_...), //54 (0x036) ROOT
null_func,//BIND_SYSC(sys_ppu_...), //55 (0x037) ROOT
BIND_SYSC(sys_ppu_thread_rename), //56 (0x038)
BIND_SYSC(sys_ppu_thread_recover_page_fault), //57 (0x039)
BIND_SYSC(sys_ppu_thread_get_page_fault_context), //58 (0x03A)
uns_func, //59 (0x03B) UNS
BIND_SYSC(sys_trace_create), //60 (0x03C)
BIND_SYSC(sys_trace_start), //61 (0x03D)
BIND_SYSC(sys_trace_stop), //62 (0x03E)
BIND_SYSC(sys_trace_update_top_index), //63 (0x03F)
BIND_SYSC(sys_trace_destroy), //64 (0x040)
BIND_SYSC(sys_trace_drain), //65 (0x041)
BIND_SYSC(sys_trace_attach_process), //66 (0x042)
BIND_SYSC(sys_trace_allocate_buffer), //67 (0x043)
BIND_SYSC(sys_trace_free_buffer), //68 (0x044)
BIND_SYSC(sys_trace_create2), //69 (0x045)
BIND_SYSC(sys_timer_create), //70 (0x046)
BIND_SYSC(sys_timer_destroy), //71 (0x047)
BIND_SYSC(sys_timer_get_information), //72 (0x048)
BIND_SYSC(_sys_timer_start), //73 (0x049)
BIND_SYSC(sys_timer_stop), //74 (0x04A)
BIND_SYSC(sys_timer_connect_event_queue), //75 (0x04B)
BIND_SYSC(sys_timer_disconnect_event_queue), //76 (0x04C)
NULL_FUNC(sys_trace_create2_in_cbepm), //77 (0x04D)
null_func,//BIND_SYSC(sys_trace_...), //78 (0x04E)
uns_func, //79 (0x04F) UNS
NULL_FUNC(sys_interrupt_tag_create), //80 (0x050)
BIND_SYSC(sys_interrupt_tag_destroy), //81 (0x051)
BIND_SYSC(sys_event_flag_create), //82 (0x052)
BIND_SYSC(sys_event_flag_destroy), //83 (0x053)
BIND_SYSC(_sys_interrupt_thread_establish), //84 (0x054)
BIND_SYSC(sys_event_flag_wait), //85 (0x055)
BIND_SYSC(sys_event_flag_trywait), //86 (0x056)
BIND_SYSC(sys_event_flag_set), //87 (0x057)
BIND_SYSC(sys_interrupt_thread_eoi), //88 (0x058)
BIND_SYSC(_sys_interrupt_thread_disestablish), //89 (0x059)
BIND_SYSC(sys_semaphore_create), //90 (0x05A)
BIND_SYSC(sys_semaphore_destroy), //91 (0x05B)
BIND_SYSC(sys_semaphore_wait), //92 (0x05C)
BIND_SYSC(sys_semaphore_trywait), //93 (0x05D)
BIND_SYSC(sys_semaphore_post), //94 (0x05E)
BIND_SYSC(_sys_lwmutex_create), //95 (0x05F)
BIND_SYSC(_sys_lwmutex_destroy), //96 (0x060)
BIND_SYSC(_sys_lwmutex_lock), //97 (0x061)
BIND_SYSC(_sys_lwmutex_unlock), //98 (0x062)
BIND_SYSC(_sys_lwmutex_trylock), //99 (0x063)
BIND_SYSC(sys_mutex_create), //100 (0x064)
BIND_SYSC(sys_mutex_destroy), //101 (0x065)
BIND_SYSC(sys_mutex_lock), //102 (0x066)
BIND_SYSC(sys_mutex_trylock), //103 (0x067)
BIND_SYSC(sys_mutex_unlock), //104 (0x068)
BIND_SYSC(sys_cond_create), //105 (0x069)
BIND_SYSC(sys_cond_destroy), //106 (0x06A)
BIND_SYSC(sys_cond_wait), //107 (0x06B)
BIND_SYSC(sys_cond_signal), //108 (0x06C)
BIND_SYSC(sys_cond_signal_all), //109 (0x06D)
BIND_SYSC(sys_cond_signal_to), //110 (0x06E)
BIND_SYSC(_sys_lwcond_create), //111 (0x06F)
BIND_SYSC(_sys_lwcond_destroy), //112 (0x070)
BIND_SYSC(_sys_lwcond_queue_wait), //113 (0x071)
BIND_SYSC(sys_semaphore_get_value), //114 (0x072)
BIND_SYSC(_sys_lwcond_signal), //115 (0x073)
BIND_SYSC(_sys_lwcond_signal_all), //116 (0x074)
BIND_SYSC(_sys_lwmutex_unlock2), //117 (0x075)
BIND_SYSC(sys_event_flag_clear), //118 (0x076)
BIND_SYSC(sys_time_get_rtc), //119 (0x077) ROOT
BIND_SYSC(sys_rwlock_create), //120 (0x078)
BIND_SYSC(sys_rwlock_destroy), //121 (0x079)
BIND_SYSC(sys_rwlock_rlock), //122 (0x07A)
BIND_SYSC(sys_rwlock_tryrlock), //123 (0x07B)
BIND_SYSC(sys_rwlock_runlock), //124 (0x07C)
BIND_SYSC(sys_rwlock_wlock), //125 (0x07D)
BIND_SYSC(sys_rwlock_trywlock), //126 (0x07E)
BIND_SYSC(sys_rwlock_wunlock), //127 (0x07F)
BIND_SYSC(sys_event_queue_create), //128 (0x080)
BIND_SYSC(sys_event_queue_destroy), //129 (0x081)
BIND_SYSC(sys_event_queue_receive), //130 (0x082)
BIND_SYSC(sys_event_queue_tryreceive), //131 (0x083)
BIND_SYSC(sys_event_flag_cancel), //132 (0x084)
BIND_SYSC(sys_event_queue_drain), //133 (0x085)
BIND_SYSC(sys_event_port_create), //134 (0x086)
BIND_SYSC(sys_event_port_destroy), //135 (0x087)
BIND_SYSC(sys_event_port_connect_local), //136 (0x088)
BIND_SYSC(sys_event_port_disconnect), //137 (0x089)
BIND_SYSC(sys_event_port_send), //138 (0x08A)
BIND_SYSC(sys_event_flag_get), //139 (0x08B)
BIND_SYSC(sys_event_port_connect_ipc), //140 (0x08C)
BIND_SYSC(sys_timer_usleep), //141 (0x08D)
BIND_SYSC(sys_timer_sleep), //142 (0x08E)
BIND_SYSC(sys_time_set_timezone), //143 (0x08F) ROOT
BIND_SYSC(sys_time_get_timezone), //144 (0x090)
BIND_SYSC(sys_time_get_current_time), //145 (0x091)
BIND_SYSC(sys_time_set_current_time), //146 (0x092) ROOT
BIND_SYSC(sys_time_get_timebase_frequency), //147 (0x093)
BIND_SYSC(_sys_rwlock_trywlock), //148 (0x094)
NULL_FUNC(sys_time_get_system_time), //149 (0x095)
BIND_SYSC(sys_raw_spu_create_interrupt_tag), //150 (0x096)
BIND_SYSC(sys_raw_spu_set_int_mask), //151 (0x097)
BIND_SYSC(sys_raw_spu_get_int_mask), //152 (0x098)
BIND_SYSC(sys_raw_spu_set_int_stat), //153 (0x099)
BIND_SYSC(sys_raw_spu_get_int_stat), //154 (0x09A)
BIND_SYSC(_sys_spu_image_get_information), //155 (0x09B)
BIND_SYSC(sys_spu_image_open), //156 (0x09C)
BIND_SYSC(_sys_spu_image_import), //157 (0x09D)
BIND_SYSC(_sys_spu_image_close), //158 (0x09E)
BIND_SYSC(_sys_spu_image_get_segments), //159 (0x09F)
BIND_SYSC(sys_raw_spu_create), //160 (0x0A0)
BIND_SYSC(sys_raw_spu_destroy), //161 (0x0A1)
uns_func, //162 (0x0A2) UNS
BIND_SYSC(sys_raw_spu_read_puint_mb), //163 (0x0A3)
uns_func, //164 (0x0A4) UNS
BIND_SYSC(sys_spu_thread_get_exit_status), //165 (0x0A5)
BIND_SYSC(sys_spu_thread_set_argument), //166 (0x0A6)
NULL_FUNC(sys_spu_thread_group_start_on_exit), //167 (0x0A7)
uns_func, //168 (0x0A8) UNS
BIND_SYSC(sys_spu_initialize), //169 (0x0A9)
BIND_SYSC(sys_spu_thread_group_create), //170 (0x0AA)
BIND_SYSC(sys_spu_thread_group_destroy), //171 (0x0AB)
BIND_SYSC(sys_spu_thread_initialize), //172 (0x0AC)
BIND_SYSC(sys_spu_thread_group_start), //173 (0x0AD)
BIND_SYSC(sys_spu_thread_group_suspend), //174 (0x0AE)
BIND_SYSC(sys_spu_thread_group_resume), //175 (0x0AF)
BIND_SYSC(sys_spu_thread_group_yield), //176 (0x0B0)
BIND_SYSC(sys_spu_thread_group_terminate), //177 (0x0B1)
BIND_SYSC(sys_spu_thread_group_join), //178 (0x0B2)
BIND_SYSC(sys_spu_thread_group_set_priority), //179 (0x0B3)
BIND_SYSC(sys_spu_thread_group_get_priority), //180 (0x0B4)
BIND_SYSC(sys_spu_thread_write_ls), //181 (0x0B5)
BIND_SYSC(sys_spu_thread_read_ls), //182 (0x0B6)
uns_func, //183 (0x0B7) UNS
BIND_SYSC(sys_spu_thread_write_snr), //184 (0x0B8)
BIND_SYSC(sys_spu_thread_group_connect_event), //185 (0x0B9)
BIND_SYSC(sys_spu_thread_group_disconnect_event), //186 (0x0BA)
BIND_SYSC(sys_spu_thread_set_spu_cfg), //187 (0x0BB)
BIND_SYSC(sys_spu_thread_get_spu_cfg), //188 (0x0BC)
uns_func, //189 (0x0BD) UNS
BIND_SYSC(sys_spu_thread_write_spu_mb), //190 (0x0BE)
BIND_SYSC(sys_spu_thread_connect_event), //191 (0x0BF)
BIND_SYSC(sys_spu_thread_disconnect_event), //192 (0x0C0)
BIND_SYSC(sys_spu_thread_bind_queue), //193 (0x0C1)
BIND_SYSC(sys_spu_thread_unbind_queue), //194 (0x0C2)
uns_func, //195 (0x0C3) UNS
BIND_SYSC(sys_raw_spu_set_spu_cfg), //196 (0x0C4)
BIND_SYSC(sys_raw_spu_get_spu_cfg), //197 (0x0C5)
BIND_SYSC(sys_spu_thread_recover_page_fault), //198 (0x0C6)
BIND_SYSC(sys_raw_spu_recover_page_fault), //199 (0x0C7)
null_func, null_func, null_func, null_func, null_func, //204 UNS?
null_func, null_func, null_func, null_func, null_func, //209 UNS?
null_func, null_func, null_func, //212 UNS?
BIND_SYSC(sys_console_write2), //213 (0x0D5)
null_func, //214 UNS?
NULL_FUNC(sys_dbg_mat_set_condition), //215 (0x0D7)
NULL_FUNC(sys_dbg_mat_get_condition), //216 (0x0D8)
uns_func,//BIND_SYSC(sys_dbg_...), //217 (0x0D9) DBG UNS?
uns_func,//BIND_SYSC(sys_dbg_...), //218 (0x0DA) DBG UNS?
uns_func,//BIND_SYSC(sys_dbg_...), //219 (0x0DB) DBG UNS?
null_func, null_func, null_func, null_func, null_func, //224 UNS
null_func, null_func, null_func, null_func, null_func, //229 UNS?
BIND_SYSC(sys_isolated_spu_create), //230 (0x0E6) ROOT
BIND_SYSC(sys_isolated_spu_destroy), //231 (0x0E7) ROOT
BIND_SYSC(sys_isolated_spu_start), //232 (0x0E8) ROOT
BIND_SYSC(sys_isolated_spu_create_interrupt_tag), //233 (0x0E9) ROOT
BIND_SYSC(sys_isolated_spu_set_int_mask), //234 (0x0EA) ROOT
BIND_SYSC(sys_isolated_spu_get_int_mask), //235 (0x0EB) ROOT
BIND_SYSC(sys_isolated_spu_set_int_stat), //236 (0x0EC) ROOT
BIND_SYSC(sys_isolated_spu_get_int_stat), //237 (0x0ED) ROOT
BIND_SYSC(sys_isolated_spu_set_spu_cfg), //238 (0x0EE) ROOT
BIND_SYSC(sys_isolated_spu_get_spu_cfg), //239 (0x0EF) ROOT
BIND_SYSC(sys_isolated_spu_read_puint_mb), //240 (0x0F0) ROOT
uns_func, uns_func, uns_func, //241-243 ROOT UNS
NULL_FUNC(sys_spu_thread_group_system_set_next_group), //244 (0x0F4) ROOT
NULL_FUNC(sys_spu_thread_group_system_unset_next_group),//245 (0x0F5) ROOT
NULL_FUNC(sys_spu_thread_group_system_set_switch_group),//246 (0x0F6) ROOT
NULL_FUNC(sys_spu_thread_group_system_unset_switch_group),//247 (0x0F7) ROOT
null_func,//BIND_SYSC(sys_spu_thread_group...), //248 (0x0F8) ROOT
null_func,//BIND_SYSC(sys_spu_thread_group...), //249 (0x0F9) ROOT
BIND_SYSC(sys_spu_thread_group_set_cooperative_victims),//250 (0x0FA)
BIND_SYSC(sys_spu_thread_group_connect_event_all_threads), //251 (0x0FB)
BIND_SYSC(sys_spu_thread_group_disconnect_event_all_threads), //252 (0x0FC)
BIND_SYSC(sys_spu_thread_group_syscall_253), //253 (0x0FD)
BIND_SYSC(sys_spu_thread_group_log), //254 (0x0FE)
uns_func, uns_func, uns_func, uns_func, uns_func, //255-259 UNS
NULL_FUNC(sys_spu_image_open_by_fd), //260 (0x104)
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //261-269 UNS
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //270-279 UNS
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //280-289 UNS
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //290-299 UNS
BIND_SYSC(sys_vm_memory_map), //300 (0x12C)
BIND_SYSC(sys_vm_unmap), //301 (0x12D)
BIND_SYSC(sys_vm_append_memory), //302 (0x12E)
BIND_SYSC(sys_vm_return_memory), //303 (0x12F)
BIND_SYSC(sys_vm_lock), //304 (0x130)
BIND_SYSC(sys_vm_unlock), //305 (0x131)
BIND_SYSC(sys_vm_touch), //306 (0x132)
BIND_SYSC(sys_vm_flush), //307 (0x133)
BIND_SYSC(sys_vm_invalidate), //308 (0x134)
BIND_SYSC(sys_vm_store), //309 (0x135)
BIND_SYSC(sys_vm_sync), //310 (0x136)
BIND_SYSC(sys_vm_test), //311 (0x137)
BIND_SYSC(sys_vm_get_statistics), //312 (0x138)
BIND_SYSC(sys_vm_memory_map_different), //313 (0x139)
null_func,//BIND_SYSC(sys_...), //314 (0x13A)
null_func,//BIND_SYSC(sys_...), //315 (0x13B)
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //316-323 UNS
BIND_SYSC(sys_memory_container_create), //324 (0x144) DBG
BIND_SYSC(sys_memory_container_destroy), //325 (0x145) DBG
BIND_SYSC(sys_mmapper_allocate_fixed_address), //326 (0x146)
BIND_SYSC(sys_mmapper_enable_page_fault_notification), //327 (0x147)
BIND_SYSC(sys_mmapper_allocate_shared_memory_from_container_ext), //328 (0x148)
BIND_SYSC(sys_mmapper_free_shared_memory), //329 (0x149)
BIND_SYSC(sys_mmapper_allocate_address), //330 (0x14A)
BIND_SYSC(sys_mmapper_free_address), //331 (0x14B)
BIND_SYSC(sys_mmapper_allocate_shared_memory), //332 (0x14C)
NULL_FUNC(sys_mmapper_set_shared_memory_flag), //333(0x14D)
BIND_SYSC(sys_mmapper_map_shared_memory), //334 (0x14E)
BIND_SYSC(sys_mmapper_unmap_shared_memory), //335 (0x14F)
BIND_SYSC(sys_mmapper_change_address_access_right), //336 (0x150)
BIND_SYSC(sys_mmapper_search_and_map), //337 (0x151)
NULL_FUNC(sys_mmapper_get_shared_memory_attribute), //338 (0x152)
BIND_SYSC(sys_mmapper_allocate_shared_memory_ext), //339 (0x153)
null_func,//BIND_SYSC(sys_...), //340 (0x154)
BIND_SYSC(sys_memory_container_create), //341 (0x155)
BIND_SYSC(sys_memory_container_destroy), //342 (0x156)
BIND_SYSC(sys_memory_container_get_size), //343 (0x157)
NULL_FUNC(sys_memory_budget_set), //344 (0x158)
BIND_SYSC(sys_memory_container_destroy_parent_with_childs), //345 (0x159)
null_func,//BIND_SYSC(sys_memory_...), //346 (0x15A)
uns_func, //347 (0x15B) UNS
BIND_SYSC(sys_memory_allocate), //348 (0x15C)
BIND_SYSC(sys_memory_free), //349 (0x15D)
BIND_SYSC(sys_memory_allocate_from_container), //350 (0x15E)
BIND_SYSC(sys_memory_get_page_attribute), //351 (0x15F)
BIND_SYSC(sys_memory_get_user_memory_size), //352 (0x160)
BIND_SYSC(sys_memory_get_user_memory_stat), //353 (0x161)
null_func,//BIND_SYSC(sys_memory_...), //354 (0x162)
null_func,//BIND_SYSC(sys_memory_...), //355 (0x163)
NULL_FUNC(sys_memory_allocate_colored), //356 (0x164)
null_func,//BIND_SYSC(sys_memory_...), //357 (0x165)
null_func,//BIND_SYSC(sys_memory_...), //358 (0x166)
null_func,//BIND_SYSC(sys_memory_...), //359 (0x167)
null_func,//BIND_SYSC(sys_memory_...), //360 (0x168)
NULL_FUNC(sys_memory_allocate_from_container_colored), //361 (0x169)
BIND_SYSC(sys_mmapper_allocate_shared_memory_from_container),//362 (0x16A)
null_func,//BIND_SYSC(sys_mmapper_...), //363 (0x16B)
null_func,//BIND_SYSC(sys_mmapper_...), //364 (0x16C)
uns_func, uns_func, //366 (0x16E) UNS
BIND_SYSC(sys_uart_initialize), //367 (0x16F) ROOT
BIND_SYSC(sys_uart_receive), //368 (0x170) ROOT
BIND_SYSC(sys_uart_send), //369 (0x171) ROOT
BIND_SYSC(sys_uart_get_params), //370 (0x172) ROOT
uns_func, //371 (0x173) UNS
BIND_SYSC(_sys_game_watchdog_start), //372 (0x174)
BIND_SYSC(_sys_game_watchdog_stop), //373 (0x175)
BIND_SYSC(_sys_game_watchdog_clear), //374 (0x176)
BIND_SYSC(_sys_game_set_system_sw_version), //375 (0x177) ROOT
BIND_SYSC(_sys_game_get_system_sw_version), //376 (0x178) ROOT
BIND_SYSC(sys_sm_set_shop_mode), //377 (0x179) ROOT
BIND_SYSC(sys_sm_get_ext_event2), //378 (0x17A) ROOT
BIND_SYSC(sys_sm_shutdown), //379 (0x17B) ROOT
BIND_SYSC(sys_sm_get_params), //380 (0x17C) DBG
NULL_FUNC(sys_sm_get_inter_lpar_parameter), //381 (0x17D) ROOT
NULL_FUNC(sys_sm_initialize), //382 (0x17E) ROOT
NULL_FUNC(sys_game_get_temperature), //383 (0x17F) ROOT
NULL_FUNC(sys_sm_get_tzpb), //384 (0x180) ROOT
NULL_FUNC(sys_sm_request_led), //385 (0x181) ROOT
BIND_SYSC(sys_sm_control_led), //386 (0x182) ROOT
NULL_FUNC(sys_sm_get_system_info), //387 (0x183) DBG
BIND_SYSC(sys_sm_ring_buzzer2), //388 (0x184) ROOT
NULL_FUNC(sys_sm_set_fan_policy), //389 (0x185) PM
NULL_FUNC(sys_sm_request_error_log), //390 (0x186) ROOT
NULL_FUNC(sys_sm_request_be_count), //391 (0x187) ROOT
BIND_SYSC(sys_sm_ring_buzzer), //392 (0x188) ROOT
NULL_FUNC(sys_sm_get_hw_config), //393 (0x189) ROOT
NULL_FUNC(sys_sm_request_scversion), //394 (0x18A) ROOT
NULL_FUNC(sys_sm_request_system_event_log), //395 (0x18B) PM
NULL_FUNC(sys_sm_set_rtc_alarm), //396 (0x18C) ROOT
NULL_FUNC(sys_sm_get_rtc_alarm), //397 (0x18D) ROOT
BIND_SYSC(sys_console_write), //398 (0x18E) ROOT
uns_func, //399 (0x18F) UNS
null_func,//BIND_SYSC(sys_sm_...), //400 (0x190) PM
null_func,//BIND_SYSC(sys_sm_...), //401 (0x191) ROOT
BIND_SYSC(sys_tty_read), //402 (0x192)
BIND_SYSC(sys_tty_write), //403 (0x193)
null_func,//BIND_SYSC(sys_...), //404 (0x194) ROOT
null_func,//BIND_SYSC(sys_...), //405 (0x195) PM
null_func,//BIND_SYSC(sys_...), //406 (0x196) PM
null_func,//BIND_SYSC(sys_...), //407 (0x197) PM
NULL_FUNC(sys_sm_get_tzpb), //408 (0x198) PM
NULL_FUNC(sys_sm_get_fan_policy), //409 (0x199) PM
BIND_SYSC(_sys_game_board_storage_read), //410 (0x19A)
BIND_SYSC(_sys_game_board_storage_write), //411 (0x19B)
BIND_SYSC(_sys_game_get_rtc_status), //412 (0x19C)
null_func,//BIND_SYSC(sys_...), //413 (0x19D) ROOT
null_func,//BIND_SYSC(sys_...), //414 (0x19E) ROOT
null_func,//BIND_SYSC(sys_...), //415 (0x19F) ROOT
uns_func, uns_func, uns_func, uns_func, //416-419 UNS
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //420-429 UNS
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //430-439 UNS
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //440-449 UNS
BIND_SYSC(sys_overlay_load_module), //450 (0x1C2)
BIND_SYSC(sys_overlay_unload_module), //451 (0x1C3)
NULL_FUNC(sys_overlay_get_module_list), //452 (0x1C4)
NULL_FUNC(sys_overlay_get_module_info), //453 (0x1C5)
BIND_SYSC(sys_overlay_load_module_by_fd), //454 (0x1C6)
NULL_FUNC(sys_overlay_get_module_info2), //455 (0x1C7)
NULL_FUNC(sys_overlay_get_sdk_version), //456 (0x1C8)
NULL_FUNC(sys_overlay_get_module_dbg_info), //457 (0x1C9)
NULL_FUNC(sys_overlay_get_module_dbg_info), //458 (0x1CA)
uns_func, //459 (0x1CB) UNS
NULL_FUNC(sys_prx_dbg_get_module_id_list), //460 (0x1CC) ROOT
BIND_SYSC(_sys_prx_get_module_id_by_address), //461 (0x1CD)
uns_func, //462 (0x1CE) DEX
BIND_SYSC(_sys_prx_load_module_by_fd), //463 (0x1CF)
BIND_SYSC(_sys_prx_load_module_on_memcontainer_by_fd), //464 (0x1D0)
BIND_SYSC(_sys_prx_load_module_list), //465 (0x1D1)
BIND_SYSC(_sys_prx_load_module_list_on_memcontainer), //466 (0x1D2)
BIND_SYSC(sys_prx_get_ppu_guid), //467 (0x1D3)
null_func,//BIND_SYSC(sys_...), //468 (0x1D4) ROOT
uns_func, //469 (0x1D5) UNS
NULL_FUNC(sys_npdrm_check_ekc), //470 (0x1D6) ROOT
NULL_FUNC(sys_npdrm_regist_ekc), //471 (0x1D7) ROOT
null_func,//BIND_SYSC(sys_...), //472 (0x1D8) ROOT
null_func,//BIND_SYSC(sys_...), //473 (0x1D9)
null_func,//BIND_SYSC(sys_...), //474 (0x1DA)
null_func,//BIND_SYSC(sys_...), //475 (0x1DB) ROOT
null_func,//BIND_SYSC(sys_...), //476 (0x1DC) ROOT
uns_func, uns_func, uns_func, //477-479 UNS
BIND_SYSC(_sys_prx_load_module), //480 (0x1E0)
BIND_SYSC(_sys_prx_start_module), //481 (0x1E1)
BIND_SYSC(_sys_prx_stop_module), //482 (0x1E2)
BIND_SYSC(_sys_prx_unload_module), //483 (0x1E3)
BIND_SYSC(_sys_prx_register_module), //484 (0x1E4)
BIND_SYSC(_sys_prx_query_module), //485 (0x1E5)
BIND_SYSC(_sys_prx_register_library), //486 (0x1E6)
BIND_SYSC(_sys_prx_unregister_library), //487 (0x1E7)
BIND_SYSC(_sys_prx_link_library), //488 (0x1E8)
BIND_SYSC(_sys_prx_unlink_library), //489 (0x1E9)
BIND_SYSC(_sys_prx_query_library), //490 (0x1EA)
uns_func, //491 (0x1EB) UNS
NULL_FUNC(sys_prx_dbg_get_module_list), //492 (0x1EC) DBG
NULL_FUNC(sys_prx_dbg_get_module_info), //493 (0x1ED) DBG
BIND_SYSC(_sys_prx_get_module_list), //494 (0x1EE)
BIND_SYSC(_sys_prx_get_module_info), //495 (0x1EF)
BIND_SYSC(_sys_prx_get_module_id_by_name), //496 (0x1F0)
BIND_SYSC(_sys_prx_load_module_on_memcontainer), //497 (0x1F1)
BIND_SYSC(_sys_prx_start), //498 (0x1F2)
BIND_SYSC(_sys_prx_stop), //499 (0x1F3)
BIND_SYSC(sys_hid_manager_open), //500 (0x1F4)
NULL_FUNC(sys_hid_manager_close), //501 (0x1F5)
BIND_SYSC(sys_hid_manager_read), //502 (0x1F6) ROOT
BIND_SYSC(sys_hid_manager_ioctl), //503 (0x1F7)
NULL_FUNC(sys_hid_manager_map_logical_id_to_port_id), //504 (0x1F8) ROOT
NULL_FUNC(sys_hid_manager_unmap_logical_id_to_port_id), //505 (0x1F9) ROOT
BIND_SYSC(sys_hid_manager_add_hot_key_observer), //506 (0x1FA) ROOT
NULL_FUNC(sys_hid_manager_remove_hot_key_observer), //507 (0x1FB) ROOT
NULL_FUNC(sys_hid_manager_grab_focus), //508 (0x1FC) ROOT
NULL_FUNC(sys_hid_manager_release_focus), //509 (0x1FD) ROOT
BIND_SYSC(sys_hid_manager_check_focus), //510 (0x1FE)
NULL_FUNC(sys_hid_manager_set_master_process), //511 (0x1FF) ROOT
BIND_SYSC(sys_hid_manager_is_process_permission_root), //512 (0x200) ROOT
BIND_SYSC(sys_hid_manager_513), //513 (0x201)
BIND_SYSC(sys_hid_manager_514), //514 (0x202)
uns_func, //515 (0x203) UNS
BIND_SYSC(sys_config_open), //516 (0x204)
BIND_SYSC(sys_config_close), //517 (0x205)
BIND_SYSC(sys_config_get_service_event), //518 (0x206)
BIND_SYSC(sys_config_add_service_listener), //519 (0x207)
BIND_SYSC(sys_config_remove_service_listener), //520 (0x208)
BIND_SYSC(sys_config_register_service), //521 (0x209)
BIND_SYSC(sys_config_unregister_service), //522 (0x20A)
BIND_SYSC(sys_config_get_io_event), //523 (0x20B)
BIND_SYSC(sys_config_register_io_error_listener), //524 (0x20C)
BIND_SYSC(sys_config_unregister_io_error_listener), //525 (0x20D)
uns_func, uns_func, uns_func, uns_func, //526-529 UNS
BIND_SYSC(sys_usbd_initialize), //530 (0x212)
BIND_SYSC(sys_usbd_finalize), //531 (0x213)
BIND_SYSC(sys_usbd_get_device_list), //532 (0x214)
BIND_SYSC(sys_usbd_get_descriptor_size), //533 (0x215)
BIND_SYSC(sys_usbd_get_descriptor), //534 (0x216)
BIND_SYSC(sys_usbd_register_ldd), //535 (0x217)
BIND_SYSC(sys_usbd_unregister_ldd), //536 (0x218)
BIND_SYSC(sys_usbd_open_pipe), //537 (0x219)
BIND_SYSC(sys_usbd_open_default_pipe), //538 (0x21A)
BIND_SYSC(sys_usbd_close_pipe), //539 (0x21B)
BIND_SYSC(sys_usbd_receive_event), //540 (0x21C)
BIND_SYSC(sys_usbd_detect_event), //541 (0x21D)
BIND_SYSC(sys_usbd_attach), //542 (0x21E)
BIND_SYSC(sys_usbd_transfer_data), //543 (0x21F)
BIND_SYSC(sys_usbd_isochronous_transfer_data), //544 (0x220)
BIND_SYSC(sys_usbd_get_transfer_status), //545 (0x221)
BIND_SYSC(sys_usbd_get_isochronous_transfer_status), //546 (0x222)
BIND_SYSC(sys_usbd_get_device_location), //547 (0x223)
BIND_SYSC(sys_usbd_send_event), //548 (0x224)
BIND_SYSC(sys_usbd_event_port_send), //549 (0x225)
BIND_SYSC(sys_usbd_allocate_memory), //550 (0x226)
BIND_SYSC(sys_usbd_free_memory), //551 (0x227)
null_func,//BIND_SYSC(sys_usbd_...), //552 (0x228)
null_func,//BIND_SYSC(sys_usbd_...), //553 (0x229)
null_func,//BIND_SYSC(sys_usbd_...), //554 (0x22A)
null_func,//BIND_SYSC(sys_usbd_...), //555 (0x22B)
BIND_SYSC(sys_usbd_get_device_speed), //556 (0x22C)
null_func,//BIND_SYSC(sys_usbd_...), //557 (0x22D)
BIND_SYSC(sys_usbd_unregister_extra_ldd), //558 (0x22E)
BIND_SYSC(sys_usbd_register_extra_ldd), //559 (0x22F)
null_func,//BIND_SYSC(sys_...), //560 (0x230) ROOT
null_func,//BIND_SYSC(sys_...), //561 (0x231) ROOT
null_func,//BIND_SYSC(sys_...), //562 (0x232) ROOT
null_func,//BIND_SYSC(sys_...), //563 (0x233)
null_func,//BIND_SYSC(sys_...), //564 (0x234)
null_func,//BIND_SYSC(sys_...), //565 (0x235)
null_func,//BIND_SYSC(sys_...), //566 (0x236)
null_func,//BIND_SYSC(sys_...), //567 (0x237)
null_func,//BIND_SYSC(sys_...), //568 (0x238)
null_func,//BIND_SYSC(sys_...), //569 (0x239)
NULL_FUNC(sys_pad_ldd_register_controller), //570 (0x23A)
NULL_FUNC(sys_pad_ldd_unregister_controller), //571 (0x23B)
NULL_FUNC(sys_pad_ldd_data_insert), //572 (0x23C)
NULL_FUNC(sys_pad_dbg_ldd_set_data_insert_mode), //573 (0x23D)
NULL_FUNC(sys_pad_ldd_register_controller), //574 (0x23E)
NULL_FUNC(sys_pad_ldd_get_port_no), //575 (0x23F)
uns_func, //576 (0x240) UNS
null_func,//BIND_SYSC(sys_pad_manager_...), //577 (0x241) ROOT PM
null_func,//BIND_SYSC(sys_bluetooth_...), //578 (0x242)
null_func,//BIND_SYSC(sys_bluetooth_aud_serial_...), //579 (0x243)
null_func,//BIND_SYSC(sys_bluetooth_...), //580 (0x244) ROOT
null_func,//BIND_SYSC(sys_bluetooth_...), //581 (0x245) ROOT
null_func,//BIND_SYSC(sys_bluetooth_...), //582 (0x246) ROOT
NULL_FUNC(sys_bt_read_firmware_version), //583 (0x247) ROOT
NULL_FUNC(sys_bt_complete_wake_on_host), //584 (0x248) ROOT
NULL_FUNC(sys_bt_disable_bluetooth), //585 (0x249)
NULL_FUNC(sys_bt_enable_bluetooth), //586 (0x24A)
NULL_FUNC(sys_bt_bccmd), //587 (0x24B) ROOT
NULL_FUNC(sys_bt_read_hq), //588 (0x24C)
NULL_FUNC(sys_bt_hid_get_remote_status), //589 (0x24D)
NULL_FUNC(sys_bt_register_controller), //590 (0x24E) ROOT
NULL_FUNC(sys_bt_clear_registered_contoller), //591 (0x24F)
NULL_FUNC(sys_bt_connect_accept_controller), //592 (0x250)
NULL_FUNC(sys_bt_get_local_bdaddress), //593 (0x251) ROOT
NULL_FUNC(sys_bt_hid_get_data), //594 (0x252)
NULL_FUNC(sys_bt_hid_set_report), //595 (0x253)
NULL_FUNC(sys_bt_sched_log), //596 (0x254)
NULL_FUNC(sys_bt_cancel_connect_accept_controller), //597 (0x255)
null_func,//BIND_SYSC(sys_bluetooth_...), //598 (0x256) ROOT
null_func,//BIND_SYSC(sys_bluetooth_...), //599 (0x257) ROOT
BIND_SYSC(sys_storage_open), //600 (0x258) ROOT
BIND_SYSC(sys_storage_close), //601 (0x259)
BIND_SYSC(sys_storage_read), //602 (0x25A)
BIND_SYSC(sys_storage_write), //603 (0x25B)
BIND_SYSC(sys_storage_send_device_command), //604 (0x25C)
BIND_SYSC(sys_storage_async_configure), //605 (0x25D)
BIND_SYSC(sys_storage_async_read), //606 (0x25E)
BIND_SYSC(sys_storage_async_write), //607 (0x25F)
BIND_SYSC(sys_storage_async_cancel), //608 (0x260)
BIND_SYSC(sys_storage_get_device_info), //609 (0x261) ROOT
BIND_SYSC(sys_storage_get_device_config), //610 (0x262) ROOT
BIND_SYSC(sys_storage_report_devices), //611 (0x263) ROOT
BIND_SYSC(sys_storage_configure_medium_event), //612 (0x264) ROOT
BIND_SYSC(sys_storage_set_medium_polling_interval), //613 (0x265)
BIND_SYSC(sys_storage_create_region), //614 (0x266)
BIND_SYSC(sys_storage_delete_region), //615 (0x267)
BIND_SYSC(sys_storage_execute_device_command), //616 (0x268)
BIND_SYSC(sys_storage_check_region_acl), //617 (0x269)
BIND_SYSC(sys_storage_set_region_acl), //618 (0x26A)
BIND_SYSC(sys_storage_async_send_device_command), //619 (0x26B)
null_func,//BIND_SYSC(sys_...), //620 (0x26C) ROOT
BIND_SYSC(sys_gamepad_ycon_if), //621 (0x26D)
BIND_SYSC(sys_storage_get_region_offset), //622 (0x26E)
BIND_SYSC(sys_storage_set_emulated_speed), //623 (0x26F)
BIND_SYSC(sys_io_buffer_create), //624 (0x270)
BIND_SYSC(sys_io_buffer_destroy), //625 (0x271)
BIND_SYSC(sys_io_buffer_allocate), //626 (0x272)
BIND_SYSC(sys_io_buffer_free), //627 (0x273)
uns_func, uns_func, //629 (0x275) UNS
BIND_SYSC(sys_gpio_set), //630 (0x276)
BIND_SYSC(sys_gpio_get), //631 (0x277)
uns_func, //632 (0x278) UNS
NULL_FUNC(sys_fsw_connect_event), //633 (0x279)
NULL_FUNC(sys_fsw_disconnect_event), //634 (0x27A)
BIND_SYSC(sys_btsetting_if), //635 (0x27B)
null_func,//BIND_SYSC(sys_...), //636 (0x27C)
null_func,//BIND_SYSC(sys_...), //637 (0x27D)
null_func,//BIND_SYSC(sys_...), //638 (0x27E)
null_func,//BIND_SYSC(sys...), //639 DEPRECATED
NULL_FUNC(sys_usbbtaudio_initialize), //640 DEPRECATED
NULL_FUNC(sys_usbbtaudio_finalize), //641 DEPRECATED
NULL_FUNC(sys_usbbtaudio_discovery), //642 DEPRECATED
NULL_FUNC(sys_usbbtaudio_cancel_discovery), //643 DEPRECATED
NULL_FUNC(sys_usbbtaudio_pairing), //644 DEPRECATED
NULL_FUNC(sys_usbbtaudio_set_passkey), //645 DEPRECATED
NULL_FUNC(sys_usbbtaudio_connect), //646 DEPRECATED
NULL_FUNC(sys_usbbtaudio_disconnect), //647 DEPRECATED
null_func,//BIND_SYSC(sys_...), //648 DEPRECATED
null_func,//BIND_SYSC(sys_...), //649 DEPRECATED
BIND_SYSC(sys_rsxaudio_initialize), //650 (0x28A)
BIND_SYSC(sys_rsxaudio_finalize), //651 (0x28B)
BIND_SYSC(sys_rsxaudio_import_shared_memory), //652 (0x28C)
BIND_SYSC(sys_rsxaudio_unimport_shared_memory), //653 (0x28D)
BIND_SYSC(sys_rsxaudio_create_connection), //654 (0x28E)
BIND_SYSC(sys_rsxaudio_close_connection), //655 (0x28F)
BIND_SYSC(sys_rsxaudio_prepare_process), //656 (0x290)
BIND_SYSC(sys_rsxaudio_start_process), //657 (0x291)
BIND_SYSC(sys_rsxaudio_stop_process), //658 (0x292)
BIND_SYSC(sys_rsxaudio_get_dma_param), //659 (0x293)
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //660-665 UNS
BIND_SYSC(sys_rsx_device_open), //666 (0x29A)
BIND_SYSC(sys_rsx_device_close), //667 (0x29B)
BIND_SYSC(sys_rsx_memory_allocate), //668 (0x29C)
BIND_SYSC(sys_rsx_memory_free), //669 (0x29D)
BIND_SYSC(sys_rsx_context_allocate), //670 (0x29E)
BIND_SYSC(sys_rsx_context_free), //671 (0x29F)
BIND_SYSC(sys_rsx_context_iomap), //672 (0x2A0)
BIND_SYSC(sys_rsx_context_iounmap), //673 (0x2A1)
BIND_SYSC(sys_rsx_context_attribute), //674 (0x2A2)
BIND_SYSC(sys_rsx_device_map), //675 (0x2A3)
BIND_SYSC(sys_rsx_device_unmap), //676 (0x2A4)
BIND_SYSC(sys_rsx_attribute), //677 (0x2A5)
null_func,//BIND_SYSC(sys_...), //678 (0x2A6)
null_func,//BIND_SYSC(sys_...), //679 (0x2A7) ROOT
null_func,//BIND_SYSC(sys_...), //680 (0x2A8) ROOT
null_func,//BIND_SYSC(sys_...), //681 (0x2A9) ROOT
null_func,//BIND_SYSC(sys_...), //682 (0x2AA) ROOT
null_func,//BIND_SYSC(sys_...), //683 (0x2AB) ROOT
null_func,//BIND_SYSC(sys_...), //684 (0x2AC) ROOT
null_func,//BIND_SYSC(sys_...), //685 (0x2AD) ROOT
null_func,//BIND_SYSC(sys_...), //686 (0x2AE) ROOT
null_func,//BIND_SYSC(sys_...), //687 (0x2AF) ROOT
null_func,//BIND_SYSC(sys_...), //688 (0x2B0) ROOT
null_func,//BIND_SYSC(sys_...), //689 (0x2B1) ROOT
null_func,//BIND_SYSC(sys_...), //690 (0x2B2) ROOT
null_func,//BIND_SYSC(sys_...), //691 (0x2B3) ROOT
null_func,//BIND_SYSC(sys_...), //692 (0x2B4) ROOT
null_func,//BIND_SYSC(sys_...), //693 (0x2B5) ROOT
null_func,//BIND_SYSC(sys_...), //694 (0x2B6) DEPRECATED
null_func,//BIND_SYSC(sys_...), //695 (0x2B7) DEPRECATED
null_func,//BIND_SYSC(sys_...), //696 (0x2B8) ROOT
uns_func,//BIND_SYSC(sys_...), //697 (0x2B9) UNS
uns_func,//BIND_SYSC(sys_...), //698 (0x2BA) UNS
BIND_SYSC(sys_bdemu_send_command), //699 (0x2BB)
BIND_SYSC(sys_net_bnet_accept), //700 (0x2BC)
BIND_SYSC(sys_net_bnet_bind), //701 (0x2BD)
BIND_SYSC(sys_net_bnet_connect), //702 (0x2BE)
BIND_SYSC(sys_net_bnet_getpeername), //703 (0x2BF)
BIND_SYSC(sys_net_bnet_getsockname), //704 (0x2C0)
BIND_SYSC(sys_net_bnet_getsockopt), //705 (0x2C1)
BIND_SYSC(sys_net_bnet_listen), //706 (0x2C2)
BIND_SYSC(sys_net_bnet_recvfrom), //707 (0x2C3)
BIND_SYSC(sys_net_bnet_recvmsg), //708 (0x2C4)
BIND_SYSC(sys_net_bnet_sendmsg), //709 (0x2C5)
BIND_SYSC(sys_net_bnet_sendto), //710 (0x2C6)
BIND_SYSC(sys_net_bnet_setsockopt), //711 (0x2C7)
BIND_SYSC(sys_net_bnet_shutdown), //712 (0x2C8)
BIND_SYSC(sys_net_bnet_socket), //713 (0x2C9)
BIND_SYSC(sys_net_bnet_close), //714 (0x2CA)
BIND_SYSC(sys_net_bnet_poll), //715 (0x2CB)
BIND_SYSC(sys_net_bnet_select), //716 (0x2CC)
BIND_SYSC(_sys_net_open_dump), //717 (0x2CD)
BIND_SYSC(_sys_net_read_dump), //718 (0x2CE)
BIND_SYSC(_sys_net_close_dump), //719 (0x2CF)
BIND_SYSC(_sys_net_write_dump), //720 (0x2D0)
BIND_SYSC(sys_net_abort), //721 (0x2D1)
BIND_SYSC(sys_net_infoctl), //722 (0x2D2)
BIND_SYSC(sys_net_control), //723 (0x2D3)
BIND_SYSC(sys_net_bnet_ioctl), //724 (0x2D4)
BIND_SYSC(sys_net_bnet_sysctl), //725 (0x2D5)
BIND_SYSC(sys_net_eurus_post_command), //726 (0x2D6)
uns_func, uns_func, uns_func, //727-729 UNS
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //730-739 UNS
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //740-749 UNS
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //750-759 UNS
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //760-769 UNS
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //770-779 UNS
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //780-789 UNS
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //790-799 UNS
BIND_SYSC(sys_fs_test), //800 (0x320)
BIND_SYSC(sys_fs_open), //801 (0x321)
BIND_SYSC(sys_fs_read), //802 (0x322)
BIND_SYSC(sys_fs_write), //803 (0x323)
BIND_SYSC(sys_fs_close), //804 (0x324)
BIND_SYSC(sys_fs_opendir), //805 (0x325)
BIND_SYSC(sys_fs_readdir), //806 (0x326)
BIND_SYSC(sys_fs_closedir), //807 (0x327)
BIND_SYSC(sys_fs_stat), //808 (0x328)
BIND_SYSC(sys_fs_fstat), //809 (0x329)
BIND_SYSC(sys_fs_link), //810 (0x32A)
BIND_SYSC(sys_fs_mkdir), //811 (0x32B)
BIND_SYSC(sys_fs_rename), //812 (0x32C)
BIND_SYSC(sys_fs_rmdir), //813 (0x32D)
BIND_SYSC(sys_fs_unlink), //814 (0x32E)
BIND_SYSC(sys_fs_utime), //815 (0x32F)
BIND_SYSC(sys_fs_access), //816 (0x330)
BIND_SYSC(sys_fs_fcntl), //817 (0x331)
BIND_SYSC(sys_fs_lseek), //818 (0x332)
BIND_SYSC(sys_fs_fdatasync), //819 (0x333)
BIND_SYSC(sys_fs_fsync), //820 (0x334)
BIND_SYSC(sys_fs_fget_block_size), //821 (0x335)
BIND_SYSC(sys_fs_get_block_size), //822 (0x336)
BIND_SYSC(sys_fs_acl_read), //823 (0x337)
BIND_SYSC(sys_fs_acl_write), //824 (0x338)
BIND_SYSC(sys_fs_lsn_get_cda_size), //825 (0x339)
BIND_SYSC(sys_fs_lsn_get_cda), //826 (0x33A)
BIND_SYSC(sys_fs_lsn_lock), //827 (0x33B)
BIND_SYSC(sys_fs_lsn_unlock), //828 (0x33C)
BIND_SYSC(sys_fs_lsn_read), //829 (0x33D)
BIND_SYSC(sys_fs_lsn_write), //830 (0x33E)
BIND_SYSC(sys_fs_truncate), //831 (0x33F)
BIND_SYSC(sys_fs_ftruncate), //832 (0x340)
BIND_SYSC(sys_fs_symbolic_link), //833 (0x341)
BIND_SYSC(sys_fs_chmod), //834 (0x342)
BIND_SYSC(sys_fs_chown), //835 (0x343)
BIND_SYSC(sys_fs_newfs), //836 (0x344)
BIND_SYSC(sys_fs_mount), //837 (0x345)
BIND_SYSC(sys_fs_unmount), //838 (0x346)
NULL_FUNC(sys_fs_sync), //839 (0x347)
BIND_SYSC(sys_fs_disk_free), //840 (0x348)
BIND_SYSC(sys_fs_get_mount_info_size), //841 (0x349)
BIND_SYSC(sys_fs_get_mount_info), //842 (0x34A)
NULL_FUNC(sys_fs_get_fs_info_size), //843 (0x34B)
NULL_FUNC(sys_fs_get_fs_info), //844 (0x34C)
BIND_SYSC(sys_fs_mapped_allocate), //845 (0x34D)
BIND_SYSC(sys_fs_mapped_free), //846 (0x34E)
BIND_SYSC(sys_fs_truncate2), //847 (0x34F)
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //848-853 UNS
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //854-859 UNS
NULL_FUNC(sys_ss_get_cache_of_analog_sunset_flag), //860 (0x35C) AUTHID
NULL_FUNC(sys_ss_protected_file_db), //861 ROOT
BIND_SYSC(sys_ss_virtual_trm_manager), //862 ROOT
BIND_SYSC(sys_ss_update_manager), //863 ROOT
NULL_FUNC(sys_ss_sec_hw_framework), //864 DBG
BIND_SYSC(sys_ss_random_number_generator), //865 (0x361)
BIND_SYSC(sys_ss_secure_rtc), //866 ROOT
BIND_SYSC(sys_ss_appliance_info_manager), //867 ROOT
BIND_SYSC(sys_ss_individual_info_manager), //868 ROOT / DBG AUTHID
NULL_FUNC(sys_ss_factory_data_manager), //869 ROOT
BIND_SYSC(sys_ss_get_console_id), //870 (0x366)
BIND_SYSC(sys_ss_access_control_engine), //871 (0x367) DBG
BIND_SYSC(sys_ss_get_open_psid), //872 (0x368)
BIND_SYSC(sys_ss_get_cache_of_product_mode), //873 (0x369)
BIND_SYSC(sys_ss_get_cache_of_flash_ext_flag), //874 (0x36A)
BIND_SYSC(sys_ss_get_boot_device), //875 (0x36B)
NULL_FUNC(sys_ss_disc_access_control), //876 (0x36C)
null_func, //BIND_SYSC(sys_ss_~utoken_if), //877 (0x36D) ROOT
NULL_FUNC(sys_ss_ad_sign), //878 (0x36E)
NULL_FUNC(sys_ss_media_id), //879 (0x36F)
NULL_FUNC(sys_deci3_open), //880 (0x370)
NULL_FUNC(sys_deci3_create_event_path), //881 (0x371)
NULL_FUNC(sys_deci3_close), //882 (0x372)
NULL_FUNC(sys_deci3_send), //883 (0x373)
NULL_FUNC(sys_deci3_receive), //884 (0x374)
NULL_FUNC(sys_deci3_open2), //885 (0x375)
uns_func, uns_func, uns_func, //886-888 UNS
null_func,//BIND_SYSC(sys_...), //889 (0x379) ROOT
NULL_FUNC(sys_deci3_initialize), //890 (0x37A)
NULL_FUNC(sys_deci3_terminate), //891 (0x37B)
NULL_FUNC(sys_deci3_debug_mode), //892 (0x37C)
NULL_FUNC(sys_deci3_show_status), //893 (0x37D)
NULL_FUNC(sys_deci3_echo_test), //894 (0x37E)
NULL_FUNC(sys_deci3_send_dcmp_packet), //895 (0x37F)
NULL_FUNC(sys_deci3_dump_cp_register), //896 (0x380)
NULL_FUNC(sys_deci3_dump_cp_buffer), //897 (0x381)
uns_func, //898 (0x382) UNS
NULL_FUNC(sys_deci3_test), //899 (0x383)
NULL_FUNC(sys_dbg_stop_processes), //900 (0x384)
NULL_FUNC(sys_dbg_continue_processes), //901 (0x385)
NULL_FUNC(sys_dbg_stop_threads), //902 (0x386)
NULL_FUNC(sys_dbg_continue_threads), //903 (0x387)
BIND_SYSC(sys_dbg_read_process_memory), //904 (0x388)
BIND_SYSC(sys_dbg_write_process_memory), //905 (0x389)
NULL_FUNC(sys_dbg_read_thread_register), //906 (0x38A)
NULL_FUNC(sys_dbg_write_thread_register), //907 (0x38B)
NULL_FUNC(sys_dbg_get_process_list), //908 (0x38C)
NULL_FUNC(sys_dbg_get_thread_list), //909 (0x38D)
NULL_FUNC(sys_dbg_get_thread_info), //910 (0x38E)
NULL_FUNC(sys_dbg_spu_thread_read_from_ls), //911 (0x38F)
NULL_FUNC(sys_dbg_spu_thread_write_to_ls), //912 (0x390)
NULL_FUNC(sys_dbg_kill_process), //913 (0x391)
NULL_FUNC(sys_dbg_get_process_info), //914 (0x392)
NULL_FUNC(sys_dbg_set_run_control_bit_to_spu), //915 (0x393)
NULL_FUNC(sys_dbg_spu_thread_get_exception_cause), //916 (0x394)
NULL_FUNC(sys_dbg_create_kernel_event_queue), //917 (0x395)
NULL_FUNC(sys_dbg_read_kernel_event_queue), //918 (0x396)
NULL_FUNC(sys_dbg_destroy_kernel_event_queue), //919 (0x397)
NULL_FUNC(sys_dbg_get_process_event_ctrl_flag), //920 (0x398)
NULL_FUNC(sys_dbg_set_process_event_cntl_flag), //921 (0x399)
NULL_FUNC(sys_dbg_get_spu_thread_group_event_cntl_flag),//922 (0x39A)
NULL_FUNC(sys_dbg_set_spu_thread_group_event_cntl_flag),//923 (0x39B)
NULL_FUNC(sys_dbg_get_module_list), //924 (0x39C)
NULL_FUNC(sys_dbg_get_raw_spu_list), //925 (0x39D)
NULL_FUNC(sys_dbg_initialize_scratch_executable_area), //926 (0x39E)
NULL_FUNC(sys_dbg_terminate_scratch_executable_area), //927 (0x3A0)
NULL_FUNC(sys_dbg_initialize_scratch_data_area), //928 (0x3A1)
NULL_FUNC(sys_dbg_terminate_scratch_data_area), //929 (0x3A2)
NULL_FUNC(sys_dbg_get_user_memory_stat), //930 (0x3A3)
NULL_FUNC(sys_dbg_get_shared_memory_attribute_list), //931 (0x3A4)
NULL_FUNC(sys_dbg_get_mutex_list), //932 (0x3A4)
NULL_FUNC(sys_dbg_get_mutex_information), //933 (0x3A5)
NULL_FUNC(sys_dbg_get_cond_list), //934 (0x3A6)
NULL_FUNC(sys_dbg_get_cond_information), //935 (0x3A7)
NULL_FUNC(sys_dbg_get_rwlock_list), //936 (0x3A8)
NULL_FUNC(sys_dbg_get_rwlock_information), //937 (0x3A9)
NULL_FUNC(sys_dbg_get_lwmutex_list), //938 (0x3AA)
NULL_FUNC(sys_dbg_get_address_from_dabr), //939 (0x3AB)
NULL_FUNC(sys_dbg_set_address_to_dabr), //940 (0x3AC)
NULL_FUNC(sys_dbg_get_lwmutex_information), //941 (0x3AD)
NULL_FUNC(sys_dbg_get_event_queue_list), //942 (0x3AE)
NULL_FUNC(sys_dbg_get_event_queue_information), //943 (0x3AF)
NULL_FUNC(sys_dbg_initialize_ppu_exception_handler), //944 (0x3B0)
NULL_FUNC(sys_dbg_finalize_ppu_exception_handler), //945 (0x3B1) DBG
NULL_FUNC(sys_dbg_get_semaphore_list), //946 (0x3B2)
NULL_FUNC(sys_dbg_get_semaphore_information), //947 (0x3B3)
NULL_FUNC(sys_dbg_get_kernel_thread_list), //948 (0x3B4)
NULL_FUNC(sys_dbg_get_kernel_thread_info), //949 (0x3B5)
NULL_FUNC(sys_dbg_get_lwcond_list), //950 (0x3B6)
NULL_FUNC(sys_dbg_get_lwcond_information), //951 (0x3B7)
NULL_FUNC(sys_dbg_create_scratch_data_area_ext), //952 (0x3B8)
NULL_FUNC(sys_dbg_vm_get_page_information), //953 (0x3B9)
NULL_FUNC(sys_dbg_vm_get_info), //954 (0x3BA)
NULL_FUNC(sys_dbg_enable_floating_point_enabled_exception),//955 (0x3BB)
NULL_FUNC(sys_dbg_disable_floating_point_enabled_exception),//956 (0x3BC)
NULL_FUNC(sys_dbg_get_process_memory_container_information),//957 (0x3BD)
uns_func, //958 (0x3BE) UNS
null_func,//BIND_SYSC(sys_dbg_...), //959 (0x3BF)
NULL_FUNC(sys_control_performance_monitor), //960 (0x3C0)
NULL_FUNC(sys_performance_monitor_hidden), //961 (0x3C1)
NULL_FUNC(sys_performance_monitor_bookmark), //962 (0x3C2)
NULL_FUNC(sys_lv1_pc_trace_create), //963 (0x3C3)
NULL_FUNC(sys_lv1_pc_trace_start), //964 (0x3C4)
NULL_FUNC(sys_lv1_pc_trace_stop), //965 (0x3C5)
NULL_FUNC(sys_lv1_pc_trace_get_status), //966 (0x3C6)
NULL_FUNC(sys_lv1_pc_trace_destroy), //967 (0x3C7)
NULL_FUNC(sys_rsx_trace_ioctl), //968 (0x3C8)
null_func,//BIND_SYSC(sys_dbg_...), //969 (0x3C9)
NULL_FUNC(sys_dbg_get_event_flag_list), //970 (0x3CA)
NULL_FUNC(sys_dbg_get_event_flag_information), //971 (0x3CB)
null_func,//BIND_SYSC(sys_dbg_...), //972 (0x3CC)
uns_func,//BIND_SYSC(sys_dbg_...), //973 (0x3CD)
null_func,//BIND_SYSC(sys_dbg_...), //974 (0x3CE)
NULL_FUNC(sys_dbg_read_spu_thread_context2), //975 (0x3CF)
BIND_SYSC(sys_crypto_engine_create), //976 (0x3D0)
BIND_SYSC(sys_crypto_engine_destroy), //977 (0x3D1)
NULL_FUNC(sys_crypto_engine_hasher_prepare), //978 (0x3D2) ROOT
NULL_FUNC(sys_crypto_engine_hasher_run), //979 (0x3D3)
NULL_FUNC(sys_crypto_engine_hasher_get_hash), //980 (0x3D4)
NULL_FUNC(sys_crypto_engine_cipher_prepare), //981 (0x3D5) ROOT
NULL_FUNC(sys_crypto_engine_cipher_run), //982 (0x3D6)
NULL_FUNC(sys_crypto_engine_cipher_get_hash), //983 (0x3D7)
BIND_SYSC(sys_crypto_engine_random_generate), //984 (0x3D8)
NULL_FUNC(sys_dbg_get_console_type), //985 (0x3D9) ROOT
null_func,//BIND_SYSC(sys_dbg_...), //986 (0x3DA) ROOT DBG
null_func,//BIND_SYSC(sys_dbg_...), //987 (0x3DB) ROOT
null_func,//BIND_SYSC(sys_dbg_..._ppu_exception_handler) //988 (0x3DC)
null_func,//BIND_SYSC(sys_dbg_...), //989 (0x3DD)
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //990-998 UNS
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //999-1007 UNS
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //1008-1016 UNS
uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, uns_func, //1020-1023 UNS
};
#undef BIND_SYSC
#undef NULL_FUNC
// TODO: more enums
enum CellAdecError : u32;
enum CellAtracError : u32;
enum CellAtracMultiError : u32;
enum CellAudioError : u32;
enum CellAudioOutError : u32;
enum CellAudioInError : u32;
enum CellVideoOutError : u32;
enum CellSpursCoreError : u32;
enum CellSpursPolicyModuleError : u32;
enum CellSpursTaskError : u32;
enum CellSpursJobError : u32;
enum CellSyncError : u32;
enum CellGameError : u32;
enum CellGameDataError : u32;
enum CellDiscGameError : u32;
enum CellHddGameError : u32;
enum SceNpTrophyError : u32;
enum SceNpError : u32;
template <u64 EnumMin, typename E>
constexpr auto formatter_of = std::make_pair(EnumMin, &fmt_class_string<E>::format);
const std::map<u64, void(*)(std::string&, u64)> s_error_codes_formatting_by_type
{
formatter_of<0x80610000, CellAdecError>,
formatter_of<0x80612100, CellAdecError>,
formatter_of<0x80610300, CellAtracError>,
formatter_of<0x80610b00, CellAtracMultiError>,
formatter_of<0x80310700, CellAudioError>,
formatter_of<0x8002b240, CellAudioOutError>,
formatter_of<0x8002b260, CellAudioInError>,
formatter_of<0x8002b220, CellVideoOutError>,
formatter_of<0x80410100, CellSyncError>,
formatter_of<0x80410700, CellSpursCoreError>,
formatter_of<0x80410800, CellSpursPolicyModuleError>,
formatter_of<0x80410900, CellSpursTaskError>,
formatter_of<0x80410A00, CellSpursJobError>,
formatter_of<0x8002cb00, CellGameError>,
formatter_of<0x8002b600, CellGameDataError>,
formatter_of<0x8002bd00, CellDiscGameError>,
formatter_of<0x8002ba00, CellHddGameError>,
formatter_of<0x80022900, SceNpTrophyError>,
formatter_of<0x80029500, SceNpError>,
};
template<>
void fmt_class_string<CellError>::format(std::string& out, u64 arg)
{
// Test if can be formatted by this formatter
const bool lv2_cell_error = (arg >> 8) == 0x800100u;
if (!lv2_cell_error)
{
// Format by external enum formatters
auto upper = s_error_codes_formatting_by_type.upper_bound(arg);
if (upper == s_error_codes_formatting_by_type.begin())
{
// Format as unknown by another enum formatter
upper->second(out, arg);
return;
}
// Find the formatter whose base is the highest that is not more than arg
const auto found = std::prev(upper);
found->second(out, arg);
return;
}
format_enum(out, arg, [](auto error)
{
switch (error)
{
STR_CASE(CELL_EAGAIN);
STR_CASE(CELL_EINVAL);
STR_CASE(CELL_ENOSYS);
STR_CASE(CELL_ENOMEM);
STR_CASE(CELL_ESRCH);
STR_CASE(CELL_ENOENT);
STR_CASE(CELL_ENOEXEC);
STR_CASE(CELL_EDEADLK);
STR_CASE(CELL_EPERM);
STR_CASE(CELL_EBUSY);
STR_CASE(CELL_ETIMEDOUT);
STR_CASE(CELL_EABORT);
STR_CASE(CELL_EFAULT);
STR_CASE(CELL_ENOCHILD);
STR_CASE(CELL_ESTAT);
STR_CASE(CELL_EALIGN);
STR_CASE(CELL_EKRESOURCE);
STR_CASE(CELL_EISDIR);
STR_CASE(CELL_ECANCELED);
STR_CASE(CELL_EEXIST);
STR_CASE(CELL_EISCONN);
STR_CASE(CELL_ENOTCONN);
STR_CASE(CELL_EAUTHFAIL);
STR_CASE(CELL_ENOTMSELF);
STR_CASE(CELL_ESYSVER);
STR_CASE(CELL_EAUTHFATAL);
STR_CASE(CELL_EDOM);
STR_CASE(CELL_ERANGE);
STR_CASE(CELL_EILSEQ);
STR_CASE(CELL_EFPOS);
STR_CASE(CELL_EINTR);
STR_CASE(CELL_EFBIG);
STR_CASE(CELL_EMLINK);
STR_CASE(CELL_ENFILE);
STR_CASE(CELL_ENOSPC);
STR_CASE(CELL_ENOTTY);
STR_CASE(CELL_EPIPE);
STR_CASE(CELL_EROFS);
STR_CASE(CELL_ESPIPE);
STR_CASE(CELL_E2BIG);
STR_CASE(CELL_EACCES);
STR_CASE(CELL_EBADF);
STR_CASE(CELL_EIO);
STR_CASE(CELL_EMFILE);
STR_CASE(CELL_ENODEV);
STR_CASE(CELL_ENOTDIR);
STR_CASE(CELL_ENXIO);
STR_CASE(CELL_EXDEV);
STR_CASE(CELL_EBADMSG);
STR_CASE(CELL_EINPROGRESS);
STR_CASE(CELL_EMSGSIZE);
STR_CASE(CELL_ENAMETOOLONG);
STR_CASE(CELL_ENOLCK);
STR_CASE(CELL_ENOTEMPTY);
STR_CASE(CELL_ENOTSUP);
STR_CASE(CELL_EFSSPECIFIC);
STR_CASE(CELL_EOVERFLOW);
STR_CASE(CELL_ENOTMOUNTED);
STR_CASE(CELL_ENOTSDATA);
STR_CASE(CELL_ESDKVER);
STR_CASE(CELL_ENOLICDISC);
STR_CASE(CELL_ENOLICENT);
}
return unknown;
});
}
stx::init_lock acquire_lock(stx::init_mutex& mtx, ppu_thread* ppu)
{
if (!ppu)
{
ppu = ensure(cpu_thread::get_current<ppu_thread>());
}
return mtx.init([](int invoke_count, const stx::init_lock&, ppu_thread* ppu)
{
if (!invoke_count)
{
// Sleep before waiting on lock
lv2_obj::sleep(*ppu);
}
else
{
// Wake up after acquistion or failure to acquire
ppu->check_state();
}
}, ppu);
}
stx::access_lock acquire_access_lock(stx::init_mutex& mtx, ppu_thread* ppu)
{
if (!ppu)
{
ppu = ensure(cpu_thread::get_current<ppu_thread>());
}
// TODO: Check if needs to wait
return mtx.access();
}
stx::reset_lock acquire_reset_lock(stx::init_mutex& mtx, ppu_thread* ppu)
{
if (!ppu)
{
ppu = ensure(cpu_thread::get_current<ppu_thread>());
}
return mtx.reset([](int invoke_count, const stx::init_lock&, ppu_thread* ppu)
{
if (!invoke_count)
{
// Sleep before waiting on lock
lv2_obj::sleep(*ppu);
}
else
{
// Wake up after acquistion or failure to acquire
ppu->check_state();
}
}, ppu);
}
class ppu_syscall_usage
{
// Internal buffer
std::string m_stats;
u64 m_old_stat[1024]{};
public:
// Public info collection buffers
atomic_t<u64> stat[1024]{};
void print_stats(bool force_print) noexcept
{
std::multimap<u64, u64, std::greater<u64>> usage;
for (u32 i = 0; i < 1024; i++)
{
if (u64 v = stat[i]; m_old_stat[i] != v || (force_print && v))
{
// Only add syscalls with non-zero usage counter and only if caught new calls since last print
usage.emplace(v, i);
m_old_stat[i] = v;
}
}
m_stats.clear();
for (auto&& pair : usage)
{
fmt::append(m_stats, u8"\n\tâ‚ %s [%u]", ppu_get_syscall_name(pair.second), pair.first);
}
if (!m_stats.empty())
{
ppu_log.notice("PPU Syscall Usage Stats:%s", m_stats);
}
}
void operator()()
{
bool was_paused = false;
u64 sleep_until = get_system_time();
for (u32 i = 1; thread_ctrl::state() != thread_state::aborting; i++)
{
thread_ctrl::wait_until(&sleep_until, 1'000'000);
const bool is_paused = Emu.IsPaused();
// Force-print all if paused
const bool force_print = is_paused && !was_paused;
if (force_print || i % 10 == 0)
{
was_paused = is_paused;
print_stats(force_print);
}
}
}
~ppu_syscall_usage()
{
print_stats(true);
}
static constexpr auto thread_name = "PPU Syscall Usage Thread"sv;
};
extern void ppu_execute_syscall(ppu_thread& ppu, u64 code)
{
if (g_cfg.core.ppu_decoder == ppu_decoder_type::llvm)
{
code = ppu.gpr[11];
}
if (code < g_ppu_syscall_table.size())
{
g_fxo->get<named_thread<ppu_syscall_usage>>().stat[code]++;
if (const auto func = g_ppu_syscall_table[code].first)
{
#ifdef __APPLE__
pthread_jit_write_protect_np(false);
#endif
func(ppu, {}, vm::_ptr<u32>(ppu.cia), nullptr);
ppu_log.trace("Syscall '%s' (%llu) finished, r3=0x%llx", ppu_syscall_code(code), code, ppu.gpr[3]);
#ifdef __APPLE__
pthread_jit_write_protect_np(true);
// No need to flush cache lines after a syscall, since we didn't generate any code.
#endif
return;
}
}
fmt::throw_exception("Invalid syscall number (%llu)", code);
}
extern ppu_intrp_func_t ppu_get_syscall(u64 code)
{
if (code < g_ppu_syscall_table.size())
{
return g_ppu_syscall_table[code].first;
}
return nullptr;
}
std::string ppu_get_syscall_name(u64 code)
{
if (code < g_ppu_syscall_table.size() && !g_ppu_syscall_table[code].second.empty())
{
return std::string(g_ppu_syscall_table[code].second);
}
return fmt::format("syscall_%u", code);
}
DECLARE(lv2_obj::g_mutex);
DECLARE(lv2_obj::g_ppu){};
DECLARE(lv2_obj::g_pending){};
DECLARE(lv2_obj::g_priority_order_tag){};
thread_local DECLARE(lv2_obj::g_to_notify){};
thread_local DECLARE(lv2_obj::g_postpone_notify_barrier){};
thread_local DECLARE(lv2_obj::g_to_awake);
// Scheduler queue for timeouts (wait until -> thread)
static std::deque<std::pair<u64, class cpu_thread*>> g_waiting;
// Threads which must call lv2_obj::sleep before the scheduler starts
static std::deque<class cpu_thread*> g_to_sleep;
static atomic_t<bool> g_scheduler_ready = false;
static atomic_t<u64> s_yield_frequency = 0;
static atomic_t<u64> s_max_allowed_yield_tsc = 0;
static u64 s_last_yield_tsc = 0;
atomic_t<u32> g_lv2_preempts_taken = 0;
namespace cpu_counter
{
void remove(cpu_thread*) noexcept;
}
std::string lv2_obj::name64(u64 name_u64)
{
const auto ptr = reinterpret_cast<const char*>(&name_u64);
// NTS string, ignore invalid/newline characters
// Example: "lv2\n\0tx" will be printed as "lv2"
std::string str{ptr, std::find(ptr, ptr + 7, '\0')};
str.erase(std::remove_if(str.begin(), str.end(), [](uchar c){ return !std::isprint(c); }), str.end());
return str;
}
bool lv2_obj::sleep(cpu_thread& cpu, const u64 timeout)
{
// Should already be performed when using this flag
if (!g_postpone_notify_barrier)
{
prepare_for_sleep(cpu);
}
if (cpu.get_class() == thread_class::ppu)
{
if (u32 addr = static_cast<ppu_thread&>(cpu).res_notify)
{
static_cast<ppu_thread&>(cpu).res_notify = 0;
if (static_cast<ppu_thread&>(cpu).res_notify_time != (vm::reservation_acquire(addr) & -128))
{
// Ignore outdated notification request
}
else if (auto it = std::find(g_to_notify, std::end(g_to_notify), std::add_pointer_t<const void>{}); it != std::end(g_to_notify))
{
*it++ = vm::reservation_notifier_notify(addr, true);
if (it < std::end(g_to_notify))
{
// Null-terminate the list if it ends before last slot
*it = nullptr;
}
}
else
{
vm::reservation_notifier_notify(addr);
}
}
}
bool result = false;
const u64 current_time = get_guest_system_time();
{
std::lock_guard lock{g_mutex};
result = sleep_unlocked(cpu, timeout, current_time);
if (!g_to_awake.empty())
{
// Schedule pending entries
awake_unlocked({});
}
schedule_all(current_time);
}
if (!g_postpone_notify_barrier)
{
notify_all();
}
g_to_awake.clear();
return result;
}
bool lv2_obj::awake(cpu_thread* thread, s32 prio)
{
if (ppu_thread* ppu = cpu_thread::get_current<ppu_thread>())
{
if (u32 addr = ppu->res_notify)
{
ppu->res_notify = 0;
if (ppu->res_notify_time != (vm::reservation_acquire(addr) & -128))
{
// Ignore outdated notification request
}
else if (auto it = std::find(g_to_notify, std::end(g_to_notify), std::add_pointer_t<const void>{}); it != std::end(g_to_notify))
{
*it++ = vm::reservation_notifier_notify(addr, true);
if (it < std::end(g_to_notify))
{
// Null-terminate the list if it ends before last slot
*it = nullptr;
}
}
else
{
vm::reservation_notifier_notify(addr);
}
}
}
bool result = false;
{
std::lock_guard lock(g_mutex);
result = awake_unlocked(thread, prio);
schedule_all();
}
if (result)
{
if (auto cpu = cpu_thread::get_current(); cpu && cpu->is_paused())
{
vm::temporary_unlock();
}
}
if (!g_postpone_notify_barrier)
{
notify_all();
}
return result;
}
bool lv2_obj::yield(cpu_thread& thread)
{
if (auto ppu = thread.try_get<ppu_thread>())
{
ppu->raddr = 0; // Clear reservation
if (!atomic_storage<ppu_thread*>::load(ppu->next_ppu))
{
// Nothing to do
return false;
}
}
return awake(&thread, yield_cmd);
}
bool lv2_obj::sleep_unlocked(cpu_thread& thread, u64 timeout, u64 current_time)
{
const u64 start_time = current_time;
auto on_to_sleep_update = [&]()
{
if (g_to_sleep.size() > 5u)
{
ppu_log.warning("Threads (%d)", g_to_sleep.size());
}
else if (!g_to_sleep.empty())
{
// In case there is a deadlock (PPU threads not sleeping)
// Print-out their IDs for further inspection (focus at 5 at max for now to avoid log spam)
std::string out = fmt::format("Threads (%d):", g_to_sleep.size());
for (auto thread : g_to_sleep)
{
fmt::append(out, " 0x%x,", thread->id);
}
out.resize(out.size() - 1);
ppu_log.warning("%s", out);
}
else
{
ppu_log.warning("Final Thread");
// All threads are ready, wake threads
Emu.CallFromMainThread([]
{
if (Emu.IsStarting())
{
// It uses lv2_obj::g_mutex, run it on main thread
Emu.FinalizeRunRequest();
}
});
}
};
bool return_val = true;
if (auto ppu = thread.try_get<ppu_thread>())
{
ppu_log.trace("sleep() - waiting (%zu)", g_pending);
if (ppu->ack_suspend)
{
ppu->ack_suspend = false;
g_pending--;
}
if (std::exchange(ppu->cancel_sleep, 0) == 2)
{
// Signal that the underlying LV2 operation has been cancelled and replaced with a short yield
return_val = false;
}
const auto [_, ok] = ppu->state.fetch_op([&](bs_t<cpu_flag>& val)
{
if (!(val & cpu_flag::signal))
{
val += cpu_flag::suspend;
// Flag used for forced timeout notification
ensure(!timeout || !(val & cpu_flag::notify));
return true;
}
return false;
});
if (!ok)
{
ppu_log.fatal("sleep() failed (signaled) (%s)", ppu->current_function);
return false;
}
// Find and remove the thread
if (!unqueue(g_ppu, ppu, &ppu_thread::next_ppu))
{
if (auto it = std::find(g_to_sleep.begin(), g_to_sleep.end(), ppu); it != g_to_sleep.end())
{
g_to_sleep.erase(it);
ppu->start_time = start_time;
on_to_sleep_update();
return true;
}
// Already sleeping
ppu_log.trace("sleep(): called on already sleeping thread.");
return false;
}
ppu->raddr = 0; // Clear reservation
ppu->start_time = start_time;
ppu->end_time = timeout ? start_time + std::min<u64>(timeout, ~start_time) : u64{umax};
}
else if (auto spu = thread.try_get<spu_thread>())
{
if (auto it = std::find(g_to_sleep.begin(), g_to_sleep.end(), spu); it != g_to_sleep.end())
{
g_to_sleep.erase(it);
on_to_sleep_update();
return true;
}
return false;
}
if (timeout)
{
const u64 wait_until = start_time + std::min<u64>(timeout, ~start_time);
// Register timeout if necessary
for (auto it = g_waiting.cbegin(), end = g_waiting.cend();; it++)
{
if (it == end || it->first > wait_until)
{
g_waiting.emplace(it, wait_until, &thread);
break;
}
}
}
return return_val;
}
bool lv2_obj::awake_unlocked(cpu_thread* cpu, s32 prio)
{
// Check thread type
AUDIT(!cpu || cpu->get_class() == thread_class::ppu);
bool push_first = false;
switch (prio)
{
default:
{
// Priority set
auto set_prio = [](atomic_t<ppu_thread::ppu_prio_t>& prio, s32 value, bool increment_order_last, bool increment_order_first)
{
s64 tag = 0;
if (increment_order_first || increment_order_last)
{
tag = ++g_priority_order_tag;
}
prio.atomic_op([&](ppu_thread::ppu_prio_t& prio)
{
prio.prio = value;
if (increment_order_first)
{
prio.order = ~tag;
}
else if (increment_order_last)
{
prio.order = tag;
}
});
};
const s64 old_prio = static_cast<ppu_thread*>(cpu)->prio.load().prio;
// If priority is the same, push ONPROC/RUNNABLE thread to the back of the priority list if it is not the current thread
if (old_prio == prio && cpu == cpu_thread::get_current())
{
set_prio(static_cast<ppu_thread*>(cpu)->prio, prio, false, false);
return true;
}
if (!unqueue(g_ppu, static_cast<ppu_thread*>(cpu), &ppu_thread::next_ppu))
{
set_prio(static_cast<ppu_thread*>(cpu)->prio, prio, old_prio > prio, old_prio < prio);
return true;
}
set_prio(static_cast<ppu_thread*>(cpu)->prio, prio, false, false);
break;
}
case yield_cmd:
{
usz i = 0;
// Yield command
for (auto ppu_next = &g_ppu;; i++)
{
const auto ppu = +*ppu_next;
if (!ppu)
{
return false;
}
if (ppu == cpu)
{
auto ppu2 = ppu->next_ppu;
if (!ppu2 || ppu2->prio.load().prio != ppu->prio.load().prio)
{
// Empty 'same prio' threads list
return false;
}
for (i++;; i++)
{
const auto next = ppu2->next_ppu;
if (!next || next->prio.load().prio != ppu->prio.load().prio)
{
break;
}
ppu2 = next;
}
// Rotate current thread to the last position of the 'same prio' threads list
// Exchange forward pointers
*ppu_next = std::exchange(ppu->next_ppu, std::exchange(ppu2->next_ppu, ppu));
if (i < g_cfg.core.ppu_threads + 0u)
{
// Threads were rotated, but no context switch was made
return false;
}
ppu->start_time = get_guest_system_time();
break;
}
ppu_next = &ppu->next_ppu;
}
break;
}
case enqueue_cmd:
{
break;
}
}
const auto emplace_thread = [push_first](cpu_thread* const cpu)
{
for (auto it = &g_ppu;;)
{
const auto next = +*it;
if (next == cpu)
{
ppu_log.trace("sleep() - suspended (p=%zu)", g_pending);
if (static_cast<ppu_thread*>(cpu)->cancel_sleep == 1)
{
// The next sleep call of the thread is cancelled
static_cast<ppu_thread*>(cpu)->cancel_sleep = 2;
}
return false;
}
// Use priority, also preserve FIFO order
if (!next || (push_first ? next->prio.load().prio >= static_cast<ppu_thread*>(cpu)->prio.load().prio : next->prio.load().prio > static_cast<ppu_thread*>(cpu)->prio.load().prio))
{
atomic_storage<ppu_thread*>::release(static_cast<ppu_thread*>(cpu)->next_ppu, next);
atomic_storage<ppu_thread*>::release(*it, static_cast<ppu_thread*>(cpu));
break;
}
it = &next->next_ppu;
}
// Unregister timeout if necessary
for (auto it = g_waiting.cbegin(), end = g_waiting.cend(); it != end; it++)
{
if (it->second == cpu)
{
g_waiting.erase(it);
break;
}
}
ppu_log.trace("awake(): %s", cpu->id);
return true;
};
// Yield changed the queue before
bool changed_queue = prio == yield_cmd;
s32 lowest_new_priority = smax;
const bool has_free_hw_thread_space = count_non_sleeping_threads().onproc_count < g_cfg.core.ppu_threads + 0u;
if (cpu && prio != yield_cmd)
{
// Emplace current thread
if (emplace_thread(cpu))
{
changed_queue = true;
lowest_new_priority = std::min<s32>(static_cast<ppu_thread*>(cpu)->prio.load().prio, lowest_new_priority);
}
}
else for (const auto _cpu : g_to_awake)
{
// Emplace threads from list
if (emplace_thread(_cpu))
{
changed_queue = true;
lowest_new_priority = std::min<s32>(static_cast<ppu_thread*>(_cpu)->prio.load().prio, lowest_new_priority);
}
}
auto target = +g_ppu;
usz i = 0;
// Suspend threads if necessary
for (usz thread_count = g_cfg.core.ppu_threads; target; target = target->next_ppu, i++)
{
if (i >= thread_count && cpu_flag::suspend - target->state)
{
ppu_log.trace("suspend(): %s", target->id);
target->ack_suspend = true;
g_pending++;
ensure(!target->state.test_and_set(cpu_flag::suspend));
if (is_paused(target->state - cpu_flag::suspend))
{
target->state.notify_one();
}
}
}
const auto current_ppu = cpu_thread::get_current<ppu_thread>();
// Remove pending if necessary
if (current_ppu)
{
if (std::exchange(current_ppu->ack_suspend, false))
{
ensure(g_pending)--;
}
}
// In real PS3 (it seems), when a thread with a higher priority than the caller is signaled and -
// - that there is available space on the running queue for the other hardware thread to start
// It prioritizes signaled thread - caller's hardware thread switches instantly to the new thread code
// While signaling to the other hardware thread to execute the caller's code.
// Resulting in a delay to the caller after such thread is signaled
if (current_ppu && changed_queue && has_free_hw_thread_space)
{
if (current_ppu->prio.load().prio > lowest_new_priority)
{
const bool is_create_thread = current_ppu->gpr[11] == 0x35;
// When not being set to All timers - activate only for sys_ppu_thread_start
if (is_create_thread || g_cfg.core.sleep_timers_accuracy == sleep_timers_accuracy_level::_all_timers)
{
if (!current_ppu->state.test_and_set(cpu_flag::yield) || current_ppu->hw_sleep_time != 0)
{
current_ppu->hw_sleep_time += (is_create_thread ? 51 : 35);
}
else
{
current_ppu->hw_sleep_time = 30000; // In addition to another flag's use (TODO: Refactor and clean this)
}
}
}
}
return changed_queue;
}
void lv2_obj::cleanup()
{
g_ppu = nullptr;
g_scheduler_ready = false;
g_to_sleep.clear();
g_waiting.clear();
g_pending = 0;
s_yield_frequency = 0;
}
void lv2_obj::schedule_all(u64 current_time)
{
auto it = std::find(g_to_notify, std::end(g_to_notify), std::add_pointer_t<const void>{});
if (!g_pending && g_scheduler_ready)
{
auto target = +g_ppu;
// Wake up threads
for (usz x = g_cfg.core.ppu_threads; target && x; target = target->next_ppu, x--)
{
if (target->state & cpu_flag::suspend)
{
ppu_log.trace("schedule(): %s", target->id);
// Remove yield if it was sleeping until now
const bs_t<cpu_flag> remove_yield = target->start_time == 0 ? +cpu_flag::suspend : (cpu_flag::yield + cpu_flag::preempt);
target->start_time = 0;
if ((target->state.fetch_op(AOFN(x += cpu_flag::signal, x -= cpu_flag::suspend, x-= remove_yield, void())) & (cpu_flag::wait + cpu_flag::signal)) != cpu_flag::wait)
{
continue;
}
if (it == std::end(g_to_notify))
{
// Out of notification slots, notify locally (resizable container is not worth it)
target->state.notify_one();
}
else
{
*it++ = &target->state;
}
}
}
}
// Check registered timeouts
while (!g_waiting.empty())
{
const auto pair = &g_waiting.front();
if (!current_time)
{
current_time = get_guest_system_time();
}
if (pair->first <= current_time)
{
const auto target = pair->second;
g_waiting.pop_front();
if (target != cpu_thread::get_current())
{
// Change cpu_thread::state for the lightweight notification to work
ensure(!target->state.test_and_set(cpu_flag::notify));
// Otherwise notify it to wake itself
if (it == std::end(g_to_notify))
{
// Out of notification slots, notify locally (resizable container is not worth it)
target->state.notify_one();
}
else
{
*it++ = &target->state;
}
}
}
else
{
// The list is sorted so assume no more timeouts
break;
}
}
if (it < std::end(g_to_notify))
{
// Null-terminate the list if it ends before last slot
*it = nullptr;
}
if (const u64 freq = s_yield_frequency)
{
const u64 tsc = utils::get_tsc();
const u64 last_tsc = s_last_yield_tsc;
if (tsc >= last_tsc && tsc <= s_max_allowed_yield_tsc && tsc - last_tsc >= freq)
{
auto target = +g_ppu;
cpu_thread* cpu = nullptr;
for (usz x = g_cfg.core.ppu_threads;; target = target->next_ppu, x--)
{
if (!target || !x)
{
if (g_ppu && cpu_flag::preempt - g_ppu->state)
{
// Don't be picky, pick up any running PPU thread even it has a wait flag
cpu = g_ppu;
}
// TODO: If this case is common enough it may be valuable to iterate over all CPU threads to find a perfect candidate (one without a wait or suspend flag)
else if (auto current = cpu_thread::get_current(); current && cpu_flag::suspend - current->state)
{
// May be an SPU or RSX thread, use them as a last resort
cpu = current;
}
break;
}
if (target->state.none_of(cpu_flag::preempt + cpu_flag::wait))
{
cpu = target;
break;
}
}
if (cpu && cpu_flag::preempt - cpu->state && !cpu->state.test_and_set(cpu_flag::preempt))
{
s_last_yield_tsc = tsc;
g_lv2_preempts_taken.release(g_lv2_preempts_taken.load() + 1); // Has a minor race but performance is more important
rsx::set_rsx_yield_flag();
}
}
}
}
void lv2_obj::make_scheduler_ready()
{
g_scheduler_ready.release(true);
lv2_obj::awake_all();
}
std::pair<ppu_thread_status, u32> lv2_obj::ppu_state(ppu_thread* ppu, bool lock_idm, bool lock_lv2)
{
std::optional<reader_lock> opt_lock[2];
if (lock_idm)
{
opt_lock[0].emplace(id_manager::g_mutex);
}
if (!Emu.IsReady() ? ppu->state.all_of(cpu_flag::stop) : ppu->stop_flag_removal_protection)
{
return { PPU_THREAD_STATUS_IDLE, 0};
}
switch (ppu->joiner)
{
case ppu_join_status::zombie: return { PPU_THREAD_STATUS_ZOMBIE, 0};
case ppu_join_status::exited: return { PPU_THREAD_STATUS_DELETED, 0};
default: break;
}
if (lock_lv2)
{
opt_lock[1].emplace(lv2_obj::g_mutex);
}
u32 pos = umax;
u32 i = 0;
for (auto target = +g_ppu; target; target = target->next_ppu, i++)
{
if (target == ppu)
{
pos = i;
break;
}
}
if (pos == umax)
{
if (!ppu->interrupt_thread_executing)
{
return { PPU_THREAD_STATUS_STOP, 0};
}
return { PPU_THREAD_STATUS_SLEEP, 0 };
}
if (pos >= g_cfg.core.ppu_threads + 0u)
{
return { PPU_THREAD_STATUS_RUNNABLE, pos };
}
return { PPU_THREAD_STATUS_ONPROC, pos};
}
void lv2_obj::set_future_sleep(cpu_thread* cpu)
{
g_to_sleep.emplace_back(cpu);
}
bool lv2_obj::is_scheduler_ready()
{
reader_lock lock(g_mutex);
return g_to_sleep.empty();
}
ppu_non_sleeping_count_t lv2_obj::count_non_sleeping_threads()
{
ppu_non_sleeping_count_t total{};
auto target = atomic_storage<ppu_thread*>::load(g_ppu);
for (usz thread_count = g_cfg.core.ppu_threads; target; target = atomic_storage<ppu_thread*>::load(target->next_ppu))
{
if (total.onproc_count == thread_count)
{
total.has_running = true;
break;
}
total.onproc_count++;
}
return total;
}
void lv2_obj::set_yield_frequency(u64 freq, u64 max_allowed_tsc)
{
s_yield_frequency.release(freq);
s_max_allowed_yield_tsc.release(max_allowed_tsc);
g_lv2_preempts_taken.release(0);
}
#if defined(_MSC_VER)
#define mwaitx_func
#define waitpkg_func
#else
#define mwaitx_func __attribute__((__target__("mwaitx")))
#define waitpkg_func __attribute__((__target__("waitpkg")))
#endif
#if defined(ARCH_X64)
// Waits for a number of TSC clock cycles in power optimized state
// Cstate is represented in bits [7:4]+1 cstate. So C0 requires bits [7:4] to be set to 0xf, C1 requires bits [7:4] to be set to 0.
mwaitx_func static void __mwaitx(u32 cycles, u32 cstate)
{
constexpr u32 timer_enable = 0x2;
// monitorx will wake if the cache line is written to. We don't want this, so place the monitor value on it's own cache line.
alignas(64) u64 monitor_var{};
_mm_monitorx(&monitor_var, 0, 0);
_mm_mwaitx(timer_enable, cstate, cycles);
}
// First bit indicates cstate, 0x0 for C.02 state (lower power) or 0x1 for C.01 state (higher power)
waitpkg_func static void __tpause(u32 cycles, u32 cstate)
{
const u64 tsc = utils::get_tsc() + cycles;
_tpause(cstate, tsc);
}
#endif
bool lv2_obj::wait_timeout(u64 usec, ppu_thread* cpu, bool scale, bool is_usleep)
{
static_assert(u64{umax} / max_timeout >= 100, "max timeout is not valid for scaling");
const u64 start_time = get_system_time();
if (cpu)
{
if (u64 end_time = cpu->end_time; end_time != umax)
{
const u64 guest_start = get_guest_system_time(start_time);
if (end_time <= guest_start)
{
return true;
}
usec = end_time - guest_start;
scale = true;
}
}
if (scale)
{
// Scale time
usec = std::min<u64>(usec, u64{umax} / 100) * 100 / g_cfg.core.clocks_scale;
}
// Clamp
usec = std::min<u64>(usec, max_timeout);
u64 passed = 0;
atomic_bs_t<cpu_flag> dummy{};
const auto& state = cpu ? cpu->state : dummy;
auto old_state = +state;
auto wait_for = [&](u64 timeout)
{
thread_ctrl::wait_on(state, old_state, timeout);
};
for (;; old_state = state)
{
if (old_state & cpu_flag::notify)
{
// Timeout notification has been forced
break;
}
if (old_state & cpu_flag::signal)
{
return false;
}
if (::is_stopped(old_state) || thread_ctrl::state() == thread_state::aborting)
{
return passed >= usec;
}
if (passed >= usec)
{
break;
}
u64 remaining = usec - passed;
#ifdef __linux__
// NOTE: Assumption that timer initialization has succeeded
constexpr u64 host_min_quantum = 10;
#else
// Host scheduler quantum for windows (worst case)
// NOTE: On ps3 this function has very high accuracy
constexpr u64 host_min_quantum = 500;
#endif
// TODO: Tune for other non windows operating sytems
if (g_cfg.core.sleep_timers_accuracy < (is_usleep ? sleep_timers_accuracy_level::_usleep : sleep_timers_accuracy_level::_all_timers))
{
wait_for(remaining);
}
else
{
if (remaining > host_min_quantum)
{
#ifdef __linux__
// With timerslack set low, Linux is precise for all values above
wait_for(remaining);
#else
// Wait on multiple of min quantum for large durations to avoid overloading low thread cpus
wait_for(remaining - (remaining % host_min_quantum));
#endif
}
// TODO: Determine best value for yield delay
#if defined(ARCH_X64)
else if (utils::has_appropriate_um_wait())
{
const u32 us_in_tsc_clocks = ::narrow<u32>(remaining * (utils::get_tsc_freq() / 1000000ULL));
if (utils::has_waitpkg())
{
__tpause(us_in_tsc_clocks, 0x1);
}
else
{
__mwaitx(us_in_tsc_clocks, 0xf0);
}
}
#endif
else
{
// Try yielding. May cause long wake latency but helps weaker CPUs a lot by alleviating resource pressure
std::this_thread::yield();
}
}
passed = get_system_time() - start_time;
}
return true;
}
void lv2_obj::prepare_for_sleep(cpu_thread& cpu)
{
vm::temporary_unlock(cpu);
cpu_counter::remove(&cpu);
}
| 91,024
|
C++
|
.cpp
| 1,973
| 43.526102
| 180
| 0.566234
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,333
|
sys_vm.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_vm.cpp
|
#include "stdafx.h"
#include "sys_vm.h"
#include "sys_process.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/timers.hpp"
#include "Emu/Memory/vm_locking.h"
sys_vm_t::sys_vm_t(u32 _addr, u32 vsize, lv2_memory_container* ct, u32 psize)
: ct(ct)
, addr(_addr)
, size(vsize)
, psize(psize)
{
// Write ID
g_ids[addr >> 28].release(idm::last_id());
}
void sys_vm_t::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(lv2_vm);
ar(ct->id, addr, size, psize);
}
sys_vm_t::~sys_vm_t()
{
// Free ID
g_ids[addr >> 28].release(id_manager::id_traits<sys_vm_t>::invalid);
}
LOG_CHANNEL(sys_vm);
struct sys_vm_global_t
{
atomic_t<u32> total_vsize = 0;
};
sys_vm_t::sys_vm_t(utils::serial& ar)
: ct(lv2_memory_container::search(ar))
, addr(ar)
, size(ar)
, psize(ar)
{
g_ids[addr >> 28].release(idm::last_id());
g_fxo->need<sys_vm_global_t>();
g_fxo->get<sys_vm_global_t>().total_vsize += size;
}
error_code sys_vm_memory_map(ppu_thread& ppu, u64 vsize, u64 psize, u32 cid, u64 flag, u64 policy, vm::ptr<u32> addr)
{
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_memory_map(vsize=0x%x, psize=0x%x, cid=0x%x, flags=0x%x, policy=0x%x, addr=*0x%x)", vsize, psize, cid, flag, policy, addr);
if (!vsize || !psize || vsize % 0x200'0000 || vsize > 0x1000'0000 || psize % 0x1'0000 || policy != SYS_VM_POLICY_AUTO_RECOMMENDED)
{
return CELL_EINVAL;
}
if (ppu.gpr[11] == 300 && psize < 0x10'0000)
{
return CELL_EINVAL;
}
const auto idm_ct = idm::get<lv2_memory_container>(cid);
const auto ct = cid == SYS_MEMORY_CONTAINER_ID_INVALID ? &g_fxo->get<lv2_memory_container>() : idm_ct.get();
if (!ct)
{
return CELL_ESRCH;
}
if (!g_fxo->get<sys_vm_global_t>().total_vsize.fetch_op([vsize, has_root = g_ps3_process_info.has_root_perm()](u32& size)
{
// A single process can hold up to 256MB of virtual memory, even on DECR
// VSH can hold more
if ((has_root ? 0x1E000000 : 0x10000000) - size < vsize)
{
return false;
}
size += static_cast<u32>(vsize);
return true;
}).second)
{
return CELL_EBUSY;
}
if (!ct->take(psize))
{
g_fxo->get<sys_vm_global_t>().total_vsize -= static_cast<u32>(vsize);
return CELL_ENOMEM;
}
// Look for unmapped space
if (const auto area = vm::find_map(0x10000000, 0x10000000, 2 | (flag & SYS_MEMORY_PAGE_SIZE_MASK)))
{
sys_vm.warning("sys_vm_memory_map(): Found VM 0x%x area (vsize=0x%x)", addr, vsize);
// Alloc all memory (shall not fail)
ensure(area->alloc(static_cast<u32>(vsize)));
vm::lock_sudo(area->addr, static_cast<u32>(vsize));
idm::make<sys_vm_t>(area->addr, static_cast<u32>(vsize), ct, static_cast<u32>(psize));
// Write a pointer for the allocated memory
ppu.check_state();
*addr = area->addr;
return CELL_OK;
}
ct->free(psize);
g_fxo->get<sys_vm_global_t>().total_vsize -= static_cast<u32>(vsize);
return CELL_ENOMEM;
}
error_code sys_vm_memory_map_different(ppu_thread& ppu, u64 vsize, u64 psize, u32 cid, u64 flag, u64 policy, vm::ptr<u32> addr)
{
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_memory_map_different(vsize=0x%x, psize=0x%x, cid=0x%x, flags=0x%llx, policy=0x%llx, addr=*0x%x)", vsize, psize, cid, flag, policy, addr);
// TODO: if needed implement different way to map memory, unconfirmed.
return sys_vm_memory_map(ppu, vsize, psize, cid, flag, policy, addr);
}
error_code sys_vm_unmap(ppu_thread& ppu, u32 addr)
{
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_unmap(addr=0x%x)", addr);
// Special case, check if its a start address by alignment
if (addr % 0x10000000)
{
return CELL_EINVAL;
}
// Free block and info
const auto vmo = idm::withdraw<sys_vm_t>(sys_vm_t::find_id(addr), [&](sys_vm_t& vmo)
{
// Free block
ensure(vm::unmap(addr).second);
// Return memory
vmo.ct->free(vmo.psize);
g_fxo->get<sys_vm_global_t>().total_vsize -= vmo.size;
});
if (!vmo)
{
return CELL_EINVAL;
}
return CELL_OK;
}
error_code sys_vm_append_memory(ppu_thread& ppu, u32 addr, u64 size)
{
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_append_memory(addr=0x%x, size=0x%x)", addr, size);
if (!size || size % 0x100000)
{
return CELL_EINVAL;
}
const auto block = idm::check<sys_vm_t>(sys_vm_t::find_id(addr), [&](sys_vm_t& vmo) -> CellError
{
if (vmo.addr != addr)
{
return CELL_EINVAL;
}
if (!vmo.ct->take(size))
{
return CELL_ENOMEM;
}
vmo.psize += static_cast<u32>(size);
return {};
});
if (!block)
{
return CELL_EINVAL;
}
if (block.ret)
{
return block.ret;
}
return CELL_OK;
}
error_code sys_vm_return_memory(ppu_thread& ppu, u32 addr, u64 size)
{
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_return_memory(addr=0x%x, size=0x%x)", addr, size);
if (!size || size % 0x100000)
{
return CELL_EINVAL;
}
const auto block = idm::check<sys_vm_t>(sys_vm_t::find_id(addr), [&](sys_vm_t& vmo) -> CellError
{
if (vmo.addr != addr)
{
return CELL_EINVAL;
}
auto [_, ok] = vmo.psize.fetch_op([&](u32& value)
{
if (value <= size || value - size < 0x100000ull)
{
return false;
}
value -= static_cast<u32>(size);
return true;
});
if (!ok)
{
return CELL_EBUSY;
}
vmo.ct->free(size);
return {};
});
if (!block)
{
return CELL_EINVAL;
}
if (block.ret)
{
return block.ret;
}
return CELL_OK;
}
error_code sys_vm_lock(ppu_thread& ppu, u32 addr, u32 size)
{
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_lock(addr=0x%x, size=0x%x)", addr, size);
if (!size)
{
return CELL_EINVAL;
}
const auto block = idm::get<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || u64{addr} + size > u64{block->addr} + block->size)
{
return CELL_EINVAL;
}
return CELL_OK;
}
error_code sys_vm_unlock(ppu_thread& ppu, u32 addr, u32 size)
{
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_unlock(addr=0x%x, size=0x%x)", addr, size);
if (!size)
{
return CELL_EINVAL;
}
const auto block = idm::get<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || u64{addr} + size > u64{block->addr} + block->size)
{
return CELL_EINVAL;
}
return CELL_OK;
}
error_code sys_vm_touch(ppu_thread& ppu, u32 addr, u32 size)
{
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_touch(addr=0x%x, size=0x%x)", addr, size);
if (!size)
{
return CELL_EINVAL;
}
const auto block = idm::get<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || u64{addr} + size > u64{block->addr} + block->size)
{
return CELL_EINVAL;
}
return CELL_OK;
}
error_code sys_vm_flush(ppu_thread& ppu, u32 addr, u32 size)
{
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_flush(addr=0x%x, size=0x%x)", addr, size);
if (!size)
{
return CELL_EINVAL;
}
const auto block = idm::get<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || u64{addr} + size > u64{block->addr} + block->size)
{
return CELL_EINVAL;
}
return CELL_OK;
}
error_code sys_vm_invalidate(ppu_thread& ppu, u32 addr, u32 size)
{
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_invalidate(addr=0x%x, size=0x%x)", addr, size);
if (!size)
{
return CELL_EINVAL;
}
const auto block = idm::get<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || u64{addr} + size > u64{block->addr} + block->size)
{
return CELL_EINVAL;
}
return CELL_OK;
}
error_code sys_vm_store(ppu_thread& ppu, u32 addr, u32 size)
{
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_store(addr=0x%x, size=0x%x)", addr, size);
if (!size)
{
return CELL_EINVAL;
}
const auto block = idm::get<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || u64{addr} + size > u64{block->addr} + block->size)
{
return CELL_EINVAL;
}
return CELL_OK;
}
error_code sys_vm_sync(ppu_thread& ppu, u32 addr, u32 size)
{
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_sync(addr=0x%x, size=0x%x)", addr, size);
if (!size)
{
return CELL_EINVAL;
}
const auto block = idm::get<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || u64{addr} + size > u64{block->addr} + block->size)
{
return CELL_EINVAL;
}
return CELL_OK;
}
error_code sys_vm_test(ppu_thread& ppu, u32 addr, u32 size, vm::ptr<u64> result)
{
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_test(addr=0x%x, size=0x%x, result=*0x%x)", addr, size, result);
const auto block = idm::get<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || u64{addr} + size > u64{block->addr} + block->size)
{
return CELL_EINVAL;
}
ppu.check_state();
*result = SYS_VM_STATE_ON_MEMORY;
return CELL_OK;
}
error_code sys_vm_get_statistics(ppu_thread& ppu, u32 addr, vm::ptr<sys_vm_statistics_t> stat)
{
ppu.state += cpu_flag::wait;
sys_vm.warning("sys_vm_get_statistics(addr=0x%x, stat=*0x%x)", addr, stat);
const auto block = idm::get<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || block->addr != addr)
{
return CELL_EINVAL;
}
ppu.check_state();
stat->page_fault_ppu = 0;
stat->page_fault_spu = 0;
stat->page_in = 0;
stat->page_out = 0;
stat->pmem_total = block->psize;
stat->pmem_used = 0;
stat->timestamp = get_timebased_time();
return CELL_OK;
}
DECLARE(sys_vm_t::g_ids){};
| 9,100
|
C++
|
.cpp
| 336
| 24.6875
| 161
| 0.663278
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,334
|
sys_trace.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_trace.cpp
|
#include "stdafx.h"
#include "sys_trace.h"
#include "Emu/Cell/ErrorCodes.h"
LOG_CHANNEL(sys_trace);
// TODO: DEX/DECR mode support?
s32 sys_trace_create()
{
sys_trace.todo("sys_trace_create()");
return CELL_ENOSYS;
}
s32 sys_trace_start()
{
sys_trace.todo("sys_trace_start()");
return CELL_ENOSYS;
}
s32 sys_trace_stop()
{
sys_trace.todo("sys_trace_stop()");
return CELL_ENOSYS;
}
s32 sys_trace_update_top_index()
{
sys_trace.todo("sys_trace_update_top_index()");
return CELL_ENOSYS;
}
s32 sys_trace_destroy()
{
sys_trace.todo("sys_trace_destroy()");
return CELL_ENOSYS;
}
s32 sys_trace_drain()
{
sys_trace.todo("sys_trace_drain()");
return CELL_ENOSYS;
}
s32 sys_trace_attach_process()
{
sys_trace.todo("sys_trace_attach_process()");
return CELL_ENOSYS;
}
s32 sys_trace_allocate_buffer()
{
sys_trace.todo("sys_trace_allocate_buffer()");
return CELL_ENOSYS;
}
s32 sys_trace_free_buffer()
{
sys_trace.todo("sys_trace_free_buffer()");
return CELL_ENOSYS;
}
s32 sys_trace_create2()
{
sys_trace.todo("sys_trace_create2()");
return CELL_ENOSYS;
}
| 1,077
|
C++
|
.cpp
| 55
| 17.945455
| 48
| 0.729891
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,335
|
sys_event.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_event.cpp
|
#include "stdafx.h"
#include "sys_event.h"
#include "Emu/IdManager.h"
#include "Emu/IPC.h"
#include "Emu/System.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/SPUThread.h"
#include "sys_process.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_event);
lv2_event_queue::lv2_event_queue(u32 protocol, s32 type, s32 size, u64 name, u64 ipc_key) noexcept
: id(idm::last_id())
, protocol{static_cast<u8>(protocol)}
, type(static_cast<u8>(type))
, size(static_cast<u8>(size))
, name(name)
, key(ipc_key)
{
}
lv2_event_queue::lv2_event_queue(utils::serial& ar) noexcept
: id(idm::last_id())
, protocol(ar)
, type(ar)
, size(ar)
, name(ar)
, key(ar)
{
ar(events);
}
std::shared_ptr<void> lv2_event_queue::load(utils::serial& ar)
{
auto queue = std::make_shared<lv2_event_queue>(ar);
return lv2_obj::load(queue->key, queue);
}
void lv2_event_queue::save(utils::serial& ar)
{
ar(protocol, type, size, name, key, events);
}
void lv2_event_queue::save_ptr(utils::serial& ar, lv2_event_queue* q)
{
if (!lv2_obj::check(q))
{
ar(u32{0});
return;
}
ar(q->id);
}
std::shared_ptr<lv2_event_queue> lv2_event_queue::load_ptr(utils::serial& ar, std::shared_ptr<lv2_event_queue>& queue, std::string_view msg)
{
const u32 id = ar.pop<u32>();
if (!id)
{
return nullptr;
}
if (auto q = idm::get_unlocked<lv2_obj, lv2_event_queue>(id))
{
// Already initialized
return q;
}
if (id >> 24 != id_base >> 24)
{
fmt::throw_exception("Failed in event queue pointer deserialization (invalid ID): location: %s, id=0x%x", msg, id);
}
Emu.PostponeInitCode([id, &queue, msg_str = std::string{msg}]()
{
// Defer resolving
queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(id);
if (!queue)
{
fmt::throw_exception("Failed in event queue pointer deserialization (not found): location: %s, id=0x%x", msg_str, id);
}
});
// Null until resolved
return nullptr;
}
lv2_event_port::lv2_event_port(utils::serial& ar)
: type(ar)
, name(ar)
, queue(lv2_event_queue::load_ptr(ar, queue, "eventport"))
{
}
void lv2_event_port::save(utils::serial& ar)
{
ar(type, name);
lv2_event_queue::save_ptr(ar, queue.get());
}
std::shared_ptr<lv2_event_queue> lv2_event_queue::find(u64 ipc_key)
{
if (ipc_key == SYS_EVENT_QUEUE_LOCAL)
{
// Invalid IPC key
return {};
}
return g_fxo->get<ipc_manager<lv2_event_queue, u64>>().get(ipc_key);
}
extern void resume_spu_thread_group_from_waiting(spu_thread& spu);
CellError lv2_event_queue::send(lv2_event event, bool* notified_thread, lv2_event_port* port)
{
if (notified_thread)
{
*notified_thread = false;
}
std::lock_guard lock(mutex);
if (!exists)
{
return CELL_ENOTCONN;
}
if (!pq && !sq)
{
if (events.size() < this->size + 0u)
{
// Save event
events.emplace_back(event);
return {};
}
return CELL_EBUSY;
}
if (type == SYS_PPU_QUEUE)
{
// Store event in registers
auto& ppu = static_cast<ppu_thread&>(*schedule<ppu_thread>(pq, protocol));
if (ppu.state & cpu_flag::again)
{
if (auto cpu = get_current_cpu_thread())
{
cpu->state += cpu_flag::again;
cpu->state += cpu_flag::exit;
}
sys_event.warning("Ignored event!");
// Fake error for abort
return CELL_EAGAIN;
}
std::tie(ppu.gpr[4], ppu.gpr[5], ppu.gpr[6], ppu.gpr[7]) = event;
awake(&ppu);
if (port && ppu.prio.load().prio < ensure(cpu_thread::get_current<ppu_thread>())->prio.load().prio)
{
// Block event port disconnection for the time being of sending events
// PPU -> lower prio PPU is the only case that can cause thread blocking
port->is_busy++;
ensure(notified_thread);
*notified_thread = true;
}
}
else
{
// Store event in In_MBox
auto& spu = static_cast<spu_thread&>(*schedule<spu_thread>(sq, protocol));
if (spu.state & cpu_flag::again)
{
if (auto cpu = get_current_cpu_thread())
{
cpu->state += cpu_flag::exit + cpu_flag::again;
}
sys_event.warning("Ignored event!");
// Fake error for abort
return CELL_EAGAIN;
}
const u32 data1 = static_cast<u32>(std::get<1>(event));
const u32 data2 = static_cast<u32>(std::get<2>(event));
const u32 data3 = static_cast<u32>(std::get<3>(event));
spu.ch_in_mbox.set_values(4, CELL_OK, data1, data2, data3);
resume_spu_thread_group_from_waiting(spu);
}
return {};
}
error_code sys_event_queue_create(cpu_thread& cpu, vm::ptr<u32> equeue_id, vm::ptr<sys_event_queue_attribute_t> attr, u64 ipc_key, s32 size)
{
cpu.state += cpu_flag::wait;
sys_event.warning("sys_event_queue_create(equeue_id=*0x%x, attr=*0x%x, ipc_key=0x%llx, size=%d)", equeue_id, attr, ipc_key, size);
if (size <= 0 || size > 127)
{
return CELL_EINVAL;
}
const u32 protocol = attr->protocol;
if (protocol != SYS_SYNC_FIFO && protocol != SYS_SYNC_PRIORITY)
{
sys_event.error("sys_event_queue_create(): unknown protocol (0x%x)", protocol);
return CELL_EINVAL;
}
const u32 type = attr->type;
if (type != SYS_PPU_QUEUE && type != SYS_SPU_QUEUE)
{
sys_event.error("sys_event_queue_create(): unknown type (0x%x)", type);
return CELL_EINVAL;
}
const u32 pshared = ipc_key == SYS_EVENT_QUEUE_LOCAL ? SYS_SYNC_NOT_PROCESS_SHARED : SYS_SYNC_PROCESS_SHARED;
constexpr u32 flags = SYS_SYNC_NEWLY_CREATED;
const u64 name = attr->name_u64;
if (const auto error = lv2_obj::create<lv2_event_queue>(pshared, ipc_key, flags, [&]()
{
return std::make_shared<lv2_event_queue>(protocol, type, size, name, ipc_key);
}))
{
return error;
}
cpu.check_state();
*equeue_id = idm::last_id();
return CELL_OK;
}
error_code sys_event_queue_destroy(ppu_thread& ppu, u32 equeue_id, s32 mode)
{
ppu.state += cpu_flag::wait;
sys_event.warning("sys_event_queue_destroy(equeue_id=0x%x, mode=%d)", equeue_id, mode);
if (mode && mode != SYS_EVENT_QUEUE_DESTROY_FORCE)
{
return CELL_EINVAL;
}
std::vector<lv2_event> events;
std::unique_lock<shared_mutex> qlock;
cpu_thread* head{};
const auto queue = idm::withdraw<lv2_obj, lv2_event_queue>(equeue_id, [&](lv2_event_queue& queue) -> CellError
{
qlock = std::unique_lock{queue.mutex};
head = queue.type == SYS_PPU_QUEUE ? static_cast<cpu_thread*>(+queue.pq) : +queue.sq;
if (!mode && head)
{
return CELL_EBUSY;
}
if (!queue.events.empty())
{
// Copy events for logging, does not empty
events.insert(events.begin(), queue.events.begin(), queue.events.end());
}
lv2_obj::on_id_destroy(queue, queue.key);
if (!head)
{
qlock.unlock();
}
else
{
for (auto cpu = head; cpu; cpu = cpu->get_next_cpu())
{
if (cpu->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return CELL_EAGAIN;
}
}
}
return {};
});
if (!queue)
{
return CELL_ESRCH;
}
if (ppu.state & cpu_flag::again)
{
return {};
}
if (queue.ret)
{
return queue.ret;
}
std::string lost_data;
if (qlock.owns_lock())
{
if (sys_event.warning)
{
u32 size = 0;
for (auto cpu = head; cpu; cpu = cpu->get_next_cpu())
{
size++;
}
fmt::append(lost_data, "Forcefully awaken waiters (%u):\n", size);
for (auto cpu = head; cpu; cpu = cpu->get_next_cpu())
{
lost_data += cpu->get_name();
lost_data += '\n';
}
}
if (queue->type == SYS_PPU_QUEUE)
{
for (auto cpu = +queue->pq; cpu; cpu = cpu->next_cpu)
{
cpu->gpr[3] = CELL_ECANCELED;
queue->append(cpu);
}
atomic_storage<ppu_thread*>::release(queue->pq, nullptr);
lv2_obj::awake_all();
}
else
{
for (auto cpu = +queue->sq; cpu; cpu = cpu->next_cpu)
{
cpu->ch_in_mbox.set_values(1, CELL_ECANCELED);
resume_spu_thread_group_from_waiting(*cpu);
}
atomic_storage<spu_thread*>::release(queue->sq, nullptr);
}
qlock.unlock();
}
if (sys_event.warning)
{
if (!events.empty())
{
fmt::append(lost_data, "Unread queue events (%u):\n", events.size());
}
for (const lv2_event& evt : events)
{
fmt::append(lost_data, "data0=0x%x, data1=0x%x, data2=0x%x, data3=0x%x\n"
, std::get<0>(evt), std::get<1>(evt), std::get<2>(evt), std::get<3>(evt));
}
if (!lost_data.empty())
{
sys_event.warning("sys_event_queue_destroy(): %s", lost_data);
}
}
return CELL_OK;
}
error_code sys_event_queue_tryreceive(ppu_thread& ppu, u32 equeue_id, vm::ptr<sys_event_t> event_array, s32 size, vm::ptr<u32> number)
{
ppu.state += cpu_flag::wait;
sys_event.trace("sys_event_queue_tryreceive(equeue_id=0x%x, event_array=*0x%x, size=%d, number=*0x%x)", equeue_id, event_array, size, number);
const auto queue = idm::get<lv2_obj, lv2_event_queue>(equeue_id);
if (!queue)
{
return CELL_ESRCH;
}
if (queue->type != SYS_PPU_QUEUE)
{
return CELL_EINVAL;
}
std::array<sys_event_t, 127> events;
std::unique_lock lock(queue->mutex);
if (!queue->exists)
{
return CELL_ESRCH;
}
s32 count = 0;
while (count < size && !queue->events.empty())
{
auto& dest = events[count++];
const auto event = queue->events.front();
queue->events.pop_front();
std::tie(dest.source, dest.data1, dest.data2, dest.data3) = event;
}
lock.unlock();
ppu.check_state();
std::copy_n(events.begin(), count, event_array.get_ptr());
*number = count;
return CELL_OK;
}
error_code sys_event_queue_receive(ppu_thread& ppu, u32 equeue_id, vm::ptr<sys_event_t> dummy_event, u64 timeout)
{
ppu.state += cpu_flag::wait;
sys_event.trace("sys_event_queue_receive(equeue_id=0x%x, *0x%x, timeout=0x%llx)", equeue_id, dummy_event, timeout);
ppu.gpr[3] = CELL_OK;
const auto queue = idm::get<lv2_obj, lv2_event_queue>(equeue_id, [&, notify = lv2_obj::notify_all_t()](lv2_event_queue& queue) -> CellError
{
if (queue.type != SYS_PPU_QUEUE)
{
return CELL_EINVAL;
}
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(queue.mutex);
// "/dev_flash/vsh/module/msmw2.sprx" seems to rely on some cryptic shared memory behaviour that we don't emulate correctly
// This is a hack to avoid waiting for 1m40s every time we boot vsh
if (queue.key == 0x8005911000000012 && Emu.IsVsh())
{
sys_event.todo("sys_event_queue_receive(equeue_id=0x%x, *0x%x, timeout=0x%llx) Bypassing timeout for msmw2.sprx", equeue_id, dummy_event, timeout);
timeout = 1;
}
if (queue.events.empty())
{
queue.sleep(ppu, timeout);
lv2_obj::emplace(queue.pq, &ppu);
return CELL_EBUSY;
}
std::tie(ppu.gpr[4], ppu.gpr[5], ppu.gpr[6], ppu.gpr[7]) = queue.events.front();
queue.events.pop_front();
return {};
});
if (!queue)
{
return CELL_ESRCH;
}
if (queue.ret)
{
if (queue.ret != CELL_EBUSY)
{
return queue.ret;
}
}
else
{
return CELL_OK;
}
// If cancelled, gpr[3] will be non-zero. Other registers must contain event data.
while (auto state = +ppu.state)
{
if (state & cpu_flag::signal && ppu.state.test_and_reset(cpu_flag::signal))
{
break;
}
if (is_stopped(state))
{
std::lock_guard lock_rsx(queue->mutex);
for (auto cpu = +queue->pq; cpu; cpu = cpu->next_cpu)
{
if (cpu == &ppu)
{
ppu.state += cpu_flag::again;
return {};
}
}
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++)
{
busy_wait(500);
}
if (ppu.state & cpu_flag::signal)
{
continue;
}
if (timeout)
{
if (lv2_obj::wait_timeout(timeout, &ppu))
{
// Wait for rescheduling
if (ppu.check_state())
{
continue;
}
ppu.state += cpu_flag::wait;
if (!atomic_storage<ppu_thread*>::load(queue->pq))
{
// Waiters queue is empty, so the thread must have been signaled
queue->mutex.lock_unlock();
break;
}
std::lock_guard lock(queue->mutex);
if (!queue->unqueue(queue->pq, &ppu))
{
break;
}
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
}
else
{
ppu.state.wait(state);
}
}
return not_an_error(ppu.gpr[3]);
}
error_code sys_event_queue_drain(ppu_thread& ppu, u32 equeue_id)
{
ppu.state += cpu_flag::wait;
sys_event.trace("sys_event_queue_drain(equeue_id=0x%x)", equeue_id);
const auto queue = idm::check<lv2_obj, lv2_event_queue>(equeue_id, [&](lv2_event_queue& queue)
{
std::lock_guard lock(queue.mutex);
queue.events.clear();
});
if (!queue)
{
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_event_port_create(cpu_thread& cpu, vm::ptr<u32> eport_id, s32 port_type, u64 name)
{
cpu.state += cpu_flag::wait;
sys_event.warning("sys_event_port_create(eport_id=*0x%x, port_type=%d, name=0x%llx)", eport_id, port_type, name);
if (port_type != SYS_EVENT_PORT_LOCAL && port_type != 3)
{
sys_event.error("sys_event_port_create(): unknown port type (%d)", port_type);
return CELL_EINVAL;
}
if (const u32 id = idm::make<lv2_obj, lv2_event_port>(port_type, name))
{
cpu.check_state();
*eport_id = id;
return CELL_OK;
}
return CELL_EAGAIN;
}
error_code sys_event_port_destroy(ppu_thread& ppu, u32 eport_id)
{
ppu.state += cpu_flag::wait;
sys_event.warning("sys_event_port_destroy(eport_id=0x%x)", eport_id);
const auto port = idm::withdraw<lv2_obj, lv2_event_port>(eport_id, [](lv2_event_port& port) -> CellError
{
if (lv2_obj::check(port.queue))
{
return CELL_EISCONN;
}
return {};
});
if (!port)
{
return CELL_ESRCH;
}
if (port.ret)
{
return port.ret;
}
return CELL_OK;
}
error_code sys_event_port_connect_local(cpu_thread& cpu, u32 eport_id, u32 equeue_id)
{
cpu.state += cpu_flag::wait;
sys_event.warning("sys_event_port_connect_local(eport_id=0x%x, equeue_id=0x%x)", eport_id, equeue_id);
std::lock_guard lock(id_manager::g_mutex);
const auto port = idm::check_unlocked<lv2_obj, lv2_event_port>(eport_id);
if (!port || !idm::check_unlocked<lv2_obj, lv2_event_queue>(equeue_id))
{
return CELL_ESRCH;
}
if (port->type != SYS_EVENT_PORT_LOCAL)
{
return CELL_EINVAL;
}
if (lv2_obj::check(port->queue))
{
return CELL_EISCONN;
}
port->queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(equeue_id);
return CELL_OK;
}
error_code sys_event_port_connect_ipc(ppu_thread& ppu, u32 eport_id, u64 ipc_key)
{
ppu.state += cpu_flag::wait;
sys_event.warning("sys_event_port_connect_ipc(eport_id=0x%x, ipc_key=0x%x)", eport_id, ipc_key);
if (ipc_key == 0)
{
return CELL_EINVAL;
}
auto queue = lv2_event_queue::find(ipc_key);
std::lock_guard lock(id_manager::g_mutex);
const auto port = idm::check_unlocked<lv2_obj, lv2_event_port>(eport_id);
if (!port || !queue)
{
return CELL_ESRCH;
}
if (port->type != SYS_EVENT_PORT_IPC)
{
return CELL_EINVAL;
}
if (lv2_obj::check(port->queue))
{
return CELL_EISCONN;
}
port->queue = std::move(queue);
return CELL_OK;
}
error_code sys_event_port_disconnect(ppu_thread& ppu, u32 eport_id)
{
ppu.state += cpu_flag::wait;
sys_event.warning("sys_event_port_disconnect(eport_id=0x%x)", eport_id);
std::lock_guard lock(id_manager::g_mutex);
const auto port = idm::check_unlocked<lv2_obj, lv2_event_port>(eport_id);
if (!port)
{
return CELL_ESRCH;
}
if (!lv2_obj::check(port->queue))
{
return CELL_ENOTCONN;
}
if (port->is_busy)
{
return CELL_EBUSY;
}
port->queue.reset();
return CELL_OK;
}
error_code sys_event_port_send(u32 eport_id, u64 data1, u64 data2, u64 data3)
{
const auto cpu = cpu_thread::get_current();
const auto ppu = cpu ? cpu->try_get<ppu_thread>() : nullptr;
if (cpu)
{
cpu->state += cpu_flag::wait;
}
sys_event.trace("sys_event_port_send(eport_id=0x%x, data1=0x%llx, data2=0x%llx, data3=0x%llx)", eport_id, data1, data2, data3);
bool notified_thread = false;
const auto port = idm::check<lv2_obj, lv2_event_port>(eport_id, [&, notify = lv2_obj::notify_all_t()](lv2_event_port& port) -> CellError
{
if (ppu && ppu->loaded_from_savestate)
{
port.is_busy++;
notified_thread = true;
return {};
}
if (lv2_obj::check(port.queue))
{
const u64 source = port.name ? port.name : (u64{process_getpid() + 0u} << 32) | u64{eport_id};
return port.queue->send(source, data1, data2, data3, ¬ified_thread, ppu && port.queue->type == SYS_PPU_QUEUE ? &port : nullptr);
}
return CELL_ENOTCONN;
});
if (!port)
{
return CELL_ESRCH;
}
if (ppu && notified_thread)
{
// Wait to be requeued
if (ppu->test_stopped())
{
// Wait again on savestate load
ppu->state += cpu_flag::again;
}
port->is_busy--;
return CELL_OK;
}
if (port.ret)
{
if (port.ret == CELL_EAGAIN)
{
// Not really an error code exposed to games (thread has raised cpu_flag::again)
return not_an_error(CELL_EAGAIN);
}
if (port.ret == CELL_EBUSY)
{
return not_an_error(CELL_EBUSY);
}
return port.ret;
}
return CELL_OK;
}
| 16,591
|
C++
|
.cpp
| 625
| 23.6368
| 150
| 0.66219
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,336
|
sys_interrupt.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_interrupt.cpp
|
#include "stdafx.h"
#include "sys_interrupt.h"
#include "Emu/IdManager.h"
#include "Emu/System.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/SPUThread.h"
#include "Emu/Cell/PPUOpcodes.h"
LOG_CHANNEL(sys_interrupt);
lv2_int_tag::lv2_int_tag() noexcept
: lv2_obj{1}
, id(idm::last_id())
{
}
lv2_int_tag::lv2_int_tag(utils::serial& ar) noexcept
: lv2_obj{1}
, id(idm::last_id())
, handler([&]()
{
const u32 id = ar;
auto ptr = idm::get_unlocked<lv2_obj, lv2_int_serv>(id);
if (!ptr && id)
{
Emu.PostponeInitCode([id, &handler = this->handler]()
{
handler = ensure(idm::get_unlocked<lv2_obj, lv2_int_serv>(id));
});
}
return ptr;
}())
{
}
void lv2_int_tag::save(utils::serial& ar)
{
ar(lv2_obj::check(handler) ? handler->id : 0);
}
lv2_int_serv::lv2_int_serv(const std::shared_ptr<named_thread<ppu_thread>>& thread, u64 arg1, u64 arg2) noexcept
: lv2_obj{1}
, id(idm::last_id())
, thread(thread)
, arg1(arg1)
, arg2(arg2)
{
}
lv2_int_serv::lv2_int_serv(utils::serial& ar) noexcept
: lv2_obj{1}
, id(idm::last_id())
, thread(idm::get_unlocked<named_thread<ppu_thread>>(ar))
, arg1(ar)
, arg2(ar)
{
}
void lv2_int_serv::save(utils::serial& ar)
{
ar(thread && idm::check_unlocked<named_thread<ppu_thread>>(thread->id) ? thread->id : 0, arg1, arg2);
}
void ppu_interrupt_thread_entry(ppu_thread&, ppu_opcode_t, be_t<u32>*, struct ppu_intrp_func*);
void lv2_int_serv::exec() const
{
thread->cmd_list
({
{ ppu_cmd::reset_stack, 0 },
{ ppu_cmd::set_args, 2 }, arg1, arg2,
{ ppu_cmd::entry_call, 0 },
{ ppu_cmd::sleep, 0 },
{ ppu_cmd::ptr_call, 0 },
std::bit_cast<u64>(&ppu_interrupt_thread_entry)
});
}
void ppu_thread_exit(ppu_thread&, ppu_opcode_t, be_t<u32>*, struct ppu_intrp_func*);
void lv2_int_serv::join() const
{
thread->cmd_list
({
{ ppu_cmd::ptr_call, 0 },
std::bit_cast<u64>(&ppu_thread_exit)
});
thread->cmd_notify.store(1);
thread->cmd_notify.notify_one();
(*thread)();
idm::remove_verify<named_thread<ppu_thread>>(thread->id, static_cast<std::weak_ptr<named_thread<ppu_thread>>>(thread));
}
error_code sys_interrupt_tag_destroy(ppu_thread& ppu, u32 intrtag)
{
ppu.state += cpu_flag::wait;
sys_interrupt.warning("sys_interrupt_tag_destroy(intrtag=0x%x)", intrtag);
const auto tag = idm::withdraw<lv2_obj, lv2_int_tag>(intrtag, [](lv2_int_tag& tag) -> CellError
{
if (lv2_obj::check(tag.handler))
{
return CELL_EBUSY;
}
tag.exists.release(0);
return {};
});
if (!tag)
{
return CELL_ESRCH;
}
if (tag.ret)
{
return tag.ret;
}
return CELL_OK;
}
error_code _sys_interrupt_thread_establish(ppu_thread& ppu, vm::ptr<u32> ih, u32 intrtag, u32 intrthread, u64 arg1, u64 arg2)
{
ppu.state += cpu_flag::wait;
sys_interrupt.warning("_sys_interrupt_thread_establish(ih=*0x%x, intrtag=0x%x, intrthread=0x%x, arg1=0x%llx, arg2=0x%llx)", ih, intrtag, intrthread, arg1, arg2);
CellError error = CELL_EAGAIN;
const u32 id = idm::import<lv2_obj, lv2_int_serv>([&]()
{
std::shared_ptr<lv2_int_serv> result;
// Get interrupt tag
const auto tag = idm::check_unlocked<lv2_obj, lv2_int_tag>(intrtag);
if (!tag)
{
error = CELL_ESRCH;
return result;
}
// Get interrupt thread
const auto it = idm::get_unlocked<named_thread<ppu_thread>>(intrthread);
if (!it)
{
error = CELL_ESRCH;
return result;
}
// If interrupt thread is running, it's already established on another interrupt tag
if (cpu_flag::stop - it->state)
{
error = CELL_EAGAIN;
return result;
}
// It's unclear if multiple handlers can be established on single interrupt tag
if (lv2_obj::check(tag->handler))
{
error = CELL_ESTAT;
return result;
}
result = std::make_shared<lv2_int_serv>(it, arg1, arg2);
tag->handler = result;
it->cmd_list
({
{ ppu_cmd::ptr_call, 0 },
std::bit_cast<u64>(&ppu_interrupt_thread_entry)
});
it->state -= cpu_flag::stop;
it->state.notify_one();
return result;
});
if (id)
{
ppu.check_state();
*ih = id;
return CELL_OK;
}
return error;
}
error_code _sys_interrupt_thread_disestablish(ppu_thread& ppu, u32 ih, vm::ptr<u64> r13)
{
ppu.state += cpu_flag::wait;
sys_interrupt.warning("_sys_interrupt_thread_disestablish(ih=0x%x, r13=*0x%x)", ih, r13);
const auto handler = idm::withdraw<lv2_obj, lv2_int_serv>(ih, [](lv2_obj& obj)
{
obj.exists.release(0);
});
if (!handler)
{
if (const auto thread = idm::withdraw<named_thread<ppu_thread>>(ih))
{
*r13 = thread->gpr[13];
// It is detached from IDM now so join must be done explicitly now
*thread = thread_state::finished;
return CELL_OK;
}
return CELL_ESRCH;
}
lv2_obj::sleep(ppu);
// Wait for sys_interrupt_thread_eoi() and destroy interrupt thread
handler->join();
// Save TLS base
*r13 = handler->thread->gpr[13];
return CELL_OK;
}
void sys_interrupt_thread_eoi(ppu_thread& ppu)
{
ppu.state += cpu_flag::wait;
sys_interrupt.trace("sys_interrupt_thread_eoi()");
ppu.state += cpu_flag::ret;
lv2_obj::sleep(ppu);
ppu.interrupt_thread_executing = false;
}
void ppu_interrupt_thread_entry(ppu_thread& ppu, ppu_opcode_t, be_t<u32>*, struct ppu_intrp_func*)
{
while (true)
{
std::shared_ptr<lv2_int_serv> serv = nullptr;
// Loop endlessly trying to invoke an interrupt if required
idm::select<named_thread<spu_thread>>([&](u32, spu_thread& spu)
{
if (spu.get_type() != spu_type::threaded)
{
auto& ctrl = spu.int_ctrl[2];
if (lv2_obj::check(ctrl.tag))
{
auto& handler = ctrl.tag->handler;
if (lv2_obj::check(handler))
{
if (handler->thread.get() == &ppu)
{
if (spu.ch_out_intr_mbox.get_count() && ctrl.mask & SPU_INT2_STAT_MAILBOX_INT)
{
ctrl.stat |= SPU_INT2_STAT_MAILBOX_INT;
}
if (ctrl.mask & ctrl.stat)
{
ensure(!serv);
serv = handler;
}
}
}
}
}
});
if (serv)
{
// Queue interrupt, after the interrupt has finished the PPU returns to this loop
serv->exec();
return;
}
const auto state = +ppu.state;
if (::is_stopped(state) || ppu.cmd_notify.exchange(0))
{
return;
}
thread_ctrl::wait_on(ppu.cmd_notify, 0);
}
}
| 6,237
|
C++
|
.cpp
| 238
| 23.210084
| 162
| 0.662622
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,337
|
sys_dbg.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_dbg.cpp
|
#include "stdafx.h"
#include "sys_dbg.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUInterpreter.h"
#include "Emu/Cell/Modules/sys_lv2dbg.h"
#include "Emu/Memory/vm_locking.h"
#include "util/asm.hpp"
void ppu_register_function_at(u32 addr, u32 size, ppu_intrp_func_t ptr = nullptr);
LOG_CHANNEL(sys_dbg);
error_code sys_dbg_read_process_memory(s32 pid, u32 address, u32 size, vm::ptr<void> data)
{
sys_dbg.warning("sys_dbg_read_process_memory(pid=0x%x, address=0x%llx, size=0x%x, data=*0x%x)", pid, address, size, data);
// Todo(TGEnigma): Process lookup (only 1 process exists right now)
if (pid != 1)
{
return CELL_LV2DBG_ERROR_DEINVALIDARGUMENTS;
}
if (!size || !data)
{
return CELL_LV2DBG_ERROR_DEINVALIDARGUMENTS;
}
vm::writer_lock lock;
// Check if data destination is writable
if (!vm::check_addr(data.addr(), vm::page_writable, size))
{
return CELL_EFAULT;
}
// Check if the source is readable
if (!vm::check_addr(address, vm::page_readable, size))
{
return CELL_EFAULT;
}
std::memmove(data.get_ptr(), vm::base(address), size);
return CELL_OK;
}
error_code sys_dbg_write_process_memory(s32 pid, u32 address, u32 size, vm::cptr<void> data)
{
sys_dbg.warning("sys_dbg_write_process_memory(pid=0x%x, address=0x%llx, size=0x%x, data=*0x%x)", pid, address, size, data);
// Todo(TGEnigma): Process lookup (only 1 process exists right now)
if (pid != 1)
{
return CELL_LV2DBG_ERROR_DEINVALIDARGUMENTS;
}
if (!size || !data)
{
return CELL_LV2DBG_ERROR_DEINVALIDARGUMENTS;
}
// Check if data source is readable
if (!vm::check_addr(data.addr(), vm::page_readable, size))
{
return CELL_EFAULT;
}
// Check destination (can be read-only actually)
if (!vm::check_addr(address, vm::page_readable, size))
{
return CELL_EFAULT;
}
vm::writer_lock lock;
// Again
if (!vm::check_addr(data.addr(), vm::page_readable, size) || !vm::check_addr(address, vm::page_readable, size))
{
return CELL_EFAULT;
}
const u8* data_ptr = static_cast<const u8*>(data.get_ptr());
if ((address >> 28) == 0xDu)
{
// Stack pages (4k pages is the exception here)
std::memmove(vm::base(address), data_ptr, size);
return CELL_OK;
}
const u32 end = address + size;
for (u32 i = address, exec_update_size = 0; i < end;)
{
const u32 op_size = std::min<u32>(utils::align<u32>(i + 1, 0x10000), end) - i;
const bool is_exec = vm::check_addr(i, vm::page_executable | vm::page_readable);
if (is_exec)
{
exec_update_size += op_size;
i += op_size;
}
if (!is_exec || i >= end)
{
// Commit executable data update
// The read memory is also super ptr so memmove can work correctly on all implementations
const u32 before_addr = i - exec_update_size;
std::memmove(vm::get_super_ptr(before_addr), vm::get_super_ptr(data.addr() + (before_addr - address)), exec_update_size);
ppu_register_function_at(before_addr, exec_update_size);
exec_update_size = 0;
if (i >= end)
{
break;
}
}
if (!is_exec)
{
std::memmove(vm::base(i), data_ptr + (i - address), op_size);
i += op_size;
}
}
return CELL_OK;
}
| 3,120
|
C++
|
.cpp
| 101
| 28.227723
| 124
| 0.685514
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,338
|
sys_usbd.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_usbd.cpp
|
#include "stdafx.h"
#include "sys_usbd.h"
#include "sys_ppu_thread.h"
#include "sys_sync.h"
#include <queue>
#include "Emu/System.h"
#include "Emu/system_config.h"
#include "Emu/Memory/vm.h"
#include "Emu/IdManager.h"
#include "Emu/vfs_config.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Io/usb_device.h"
#include "Emu/Io/usb_vfs.h"
#include "Emu/Io/Skylander.h"
#include "Emu/Io/Infinity.h"
#include "Emu/Io/Dimensions.h"
#include "Emu/Io/GHLtar.h"
#include "Emu/Io/ghltar_config.h"
#include "Emu/Io/guncon3_config.h"
#include "Emu/Io/topshotelite_config.h"
#include "Emu/Io/topshotfearmaster_config.h"
#include "Emu/Io/Buzz.h"
#include "Emu/Io/buzz_config.h"
#include "Emu/Io/GameTablet.h"
#include "Emu/Io/GunCon3.h"
#include "Emu/Io/TopShotElite.h"
#include "Emu/Io/TopShotFearmaster.h"
#include "Emu/Io/Turntable.h"
#include "Emu/Io/turntable_config.h"
#include "Emu/Io/RB3MidiKeyboard.h"
#include "Emu/Io/RB3MidiGuitar.h"
#include "Emu/Io/RB3MidiDrums.h"
#include "Emu/Io/rb3drums_config.h"
#include "Emu/Io/usio.h"
#include "Emu/Io/usio_config.h"
#include "Emu/Io/midi_config_types.h"
#include <libusb.h>
LOG_CHANNEL(sys_usbd);
cfg_buzz g_cfg_buzz;
cfg_ghltars g_cfg_ghltar;
cfg_turntables g_cfg_turntable;
cfg_usios g_cfg_usio;
cfg_guncon3 g_cfg_guncon3;
cfg_topshotelite g_cfg_topshotelite;
cfg_topshotfearmaster g_cfg_topshotfearmaster;
template <>
void fmt_class_string<libusb_transfer>::format(std::string& out, u64 arg)
{
const auto& transfer = get_object(arg);
const int data_start = transfer.type == LIBUSB_TRANSFER_TYPE_CONTROL ? LIBUSB_CONTROL_SETUP_SIZE : 0;
fmt::append(out, "TR[r:%d][sz:%d] => %s", +transfer.status, transfer.actual_length, fmt::buf_to_hexstring(&transfer.buffer[data_start], transfer.actual_length));
}
struct UsbLdd
{
u16 id_vendor{};
u16 id_product_min{};
u16 id_product_max{};
};
struct UsbPipe
{
std::shared_ptr<usb_device> device = nullptr;
u8 endpoint = 0;
};
class usb_handler_thread
{
public:
usb_handler_thread();
~usb_handler_thread();
SAVESTATE_INIT_POS(14);
usb_handler_thread(utils::serial& ar) : usb_handler_thread()
{
is_init = !!ar.pop<u8>();
}
void save(utils::serial& ar)
{
ar(u8{is_init.load()});
}
// Thread loop
void operator()();
// Called by the libusb callback function to notify transfer completion
void transfer_complete(libusb_transfer* transfer);
// LDDs handling functions
bool add_ldd(std::string_view product, u16 id_vendor, u16 id_product_min, u16 id_product_max);
bool remove_ldd(std::string_view product);
// Pipe functions
u32 open_pipe(u32 device_handle, u8 endpoint);
bool close_pipe(u32 pipe_id);
bool is_pipe(u32 pipe_id) const;
const UsbPipe& get_pipe(u32 pipe_id) const;
// Events related functions
bool get_event(vm::ptr<u64>& arg1, vm::ptr<u64>& arg2, vm::ptr<u64>& arg3);
void add_event(u64 arg1, u64 arg2, u64 arg3);
// Transfers related functions
std::pair<u32, UsbTransfer&> get_free_transfer();
std::pair<u32, u32> get_transfer_status(u32 transfer_id);
std::pair<u32, UsbDeviceIsoRequest> get_isochronous_transfer_status(u32 transfer_id);
void push_fake_transfer(UsbTransfer* transfer);
const std::array<u8, 7>& get_new_location();
void connect_usb_device(std::shared_ptr<usb_device> dev, bool update_usb_devices = false);
void disconnect_usb_device(std::shared_ptr<usb_device> dev, bool update_usb_devices = false);
// Map of devices actively handled by the ps3(device_id, device)
std::map<u32, std::pair<UsbInternalDevice, std::shared_ptr<usb_device>>> handled_devices;
std::map<u8, std::pair<input::product_type, std::shared_ptr<usb_device>>> pad_to_usb;
shared_mutex mutex;
atomic_t<bool> is_init = false;
// sys_usbd_receive_event PPU Threads
shared_mutex mutex_sq;
ppu_thread* sq{};
static constexpr auto thread_name = "Usb Manager Thread"sv;
private:
// Lock free functions for internal use(ie make sure to lock before using those)
UsbTransfer& get_transfer(u32 transfer_id);
u32 get_free_transfer_id();
void send_message(u32 message, u32 tr_id);
private:
// Counters for device IDs, transfer IDs and pipe IDs
atomic_t<u8> dev_counter = 1;
u32 transfer_counter = 0;
u32 pipe_counter = 0x10; // Start at 0x10 only for tracing purposes
// List of device drivers
std::unordered_map<std::string, UsbLdd, fmt::string_hash, std::equal_to<>> ldds;
// List of pipes
std::map<u32, UsbPipe> open_pipes;
// Transfers infos
shared_mutex mutex_transfers;
std::array<UsbTransfer, MAX_SYS_USBD_TRANSFERS> transfers;
std::vector<UsbTransfer*> fake_transfers;
// Queue of pending usbd events
std::queue<std::tuple<u64, u64, u64>> usbd_events;
// List of devices "connected" to the ps3
std::array<u8, 7> location{};
std::vector<std::shared_ptr<usb_device>> usb_devices;
libusb_context* ctx = nullptr;
};
void LIBUSB_CALL callback_transfer(struct libusb_transfer* transfer)
{
auto& usbh = g_fxo->get<named_thread<usb_handler_thread>>();
if (!usbh.is_init)
return;
usbh.transfer_complete(transfer);
}
#if LIBUSB_API_VERSION >= 0x0100010A
static void LIBUSB_CALL log_cb(libusb_context* /*ctx*/, enum libusb_log_level level, const char* str)
{
if (!str)
return;
const std::string msg = fmt::trim(str, " \t\n");
switch (level)
{
case LIBUSB_LOG_LEVEL_ERROR:
sys_usbd.error("libusb log: %s", msg);
break;
case LIBUSB_LOG_LEVEL_WARNING:
sys_usbd.warning("libusb log: %s", msg);
break;
case LIBUSB_LOG_LEVEL_INFO:
sys_usbd.notice("libusb log: %s", msg);
break;
case LIBUSB_LOG_LEVEL_DEBUG:
sys_usbd.trace("libusb log: %s", msg);
break;
default:
break;
}
}
#endif
usb_handler_thread::usb_handler_thread()
{
#if LIBUSB_API_VERSION >= 0x0100010A
libusb_init_option log_lv_opt{};
log_lv_opt.option = LIBUSB_OPTION_LOG_LEVEL;
log_lv_opt.value.ival = LIBUSB_LOG_LEVEL_WARNING;// You can also set the LIBUSB_DEBUG env variable instead
libusb_init_option log_cb_opt{};
log_cb_opt.option = LIBUSB_OPTION_LOG_CB;
log_cb_opt.value.log_cbval = &log_cb;
std::vector<libusb_init_option> options = {
std::move(log_lv_opt),
std::move(log_cb_opt)
};
if (int res = libusb_init_context(&ctx, options.data(), static_cast<int>(options.size())); res < 0)
#else
if (int res = libusb_init(&ctx); res < 0)
#endif
{
sys_usbd.error("Failed to initialize sys_usbd: %s", libusb_error_name(res));
return;
}
for (u32 index = 0; index < MAX_SYS_USBD_TRANSFERS; index++)
{
transfers[index].transfer = libusb_alloc_transfer(8);
transfers[index].transfer_id = index;
}
// look if any device which we could be interested in is actually connected
libusb_device** list = nullptr;
ssize_t ndev = libusb_get_device_list(ctx, &list);
if (ndev < 0)
{
sys_usbd.error("Failed to get device list: %s", libusb_error_name(static_cast<s32>(ndev)));
return;
}
bool found_skylander = false;
bool found_infinity = false;
bool found_dimension = false;
bool found_usj = false;
for (ssize_t index = 0; index < ndev; index++)
{
libusb_device_descriptor desc;
if (int res = libusb_get_device_descriptor(list[index], &desc); res < 0)
{
sys_usbd.error("Failed to get device descriptor: %s", libusb_error_name(res));
continue;
}
auto check_device = [&](const u16 id_vendor, const u16 id_product_min, const u16 id_product_max, const char* s_name) -> bool
{
if (desc.idVendor == id_vendor && desc.idProduct >= id_product_min && desc.idProduct <= id_product_max)
{
sys_usbd.success("Found device: %s", s_name);
libusb_ref_device(list[index]);
std::shared_ptr<usb_device_passthrough> usb_dev = std::make_shared<usb_device_passthrough>(list[index], desc, get_new_location());
usb_devices.push_back(usb_dev);
return true;
}
return false;
};
// Portals
if (check_device(0x1430, 0x0150, 0x0150, "Skylanders Portal"))
{
found_skylander = true;
}
if (check_device(0x0E6F, 0x0129, 0x0129, "Disney Infinity Base"))
{
found_infinity = true;
}
if (check_device(0x0E6F, 0x0241, 0x0241, "Lego Dimensions Portal"))
{
found_dimension = true;
}
check_device(0x0E6F, 0x200A, 0x200A, "Kamen Rider Summonride Portal");
// Cameras
// check_device(0x1415, 0x0020, 0x2000, "Sony Playstation Eye"); // TODO: verifiy
// Music devices
check_device(0x1415, 0x0000, 0x0000, "SingStar Microphone");
// check_device(0x1415, 0x0020, 0x0020, "SingStar Microphone Wireless"); // TODO: verifiy
check_device(0x12BA, 0x0100, 0x0100, "Guitar Hero Guitar");
check_device(0x12BA, 0x0120, 0x0120, "Guitar Hero Drums");
check_device(0x12BA, 0x074B, 0x074B, "Guitar Hero Live Guitar");
check_device(0x12BA, 0x0140, 0x0140, "DJ Hero Turntable");
check_device(0x12BA, 0x0200, 0x020F, "Harmonix Guitar");
check_device(0x12BA, 0x0210, 0x021F, "Harmonix Drums");
check_device(0x12BA, 0x2330, 0x233F, "Harmonix Keyboard");
check_device(0x12BA, 0x2430, 0x243F, "Harmonix Button Guitar");
check_device(0x12BA, 0x2530, 0x253F, "Harmonix Real Guitar");
check_device(0x1BAD, 0x0004, 0x0004, "Harmonix RB1 Guitar - Wii");
check_device(0x1BAD, 0x0005, 0x0005, "Harmonix RB1 Drums - Wii");
check_device(0x1BAD, 0x3010, 0x301F, "Harmonix RB2 Guitar - Wii");
check_device(0x1BAD, 0x3110, 0x313F, "Harmonix RB2 Drums - Wii");
check_device(0x1BAD, 0x3330, 0x333F, "Harmonix Keyboard - Wii");
check_device(0x1BAD, 0x3430, 0x343F, "Harmonix Button Guitar - Wii");
check_device(0x1BAD, 0x3530, 0x353F, "Harmonix Real Guitar - Wii");
if (desc.idVendor == 0x1209 && desc.idProduct == 0x2882)
{
sys_usbd.success("Found device: Santroller");
// Send the device a specific control transfer so that it jumps to a RPCS3 compatible mode
libusb_device_handle* lusb_handle;
if (libusb_open(list[index], &lusb_handle) == LIBUSB_SUCCESS)
{
#ifdef __linux__
libusb_set_auto_detach_kernel_driver(lusb_handle, true);
libusb_claim_interface(lusb_handle, 2);
#endif
libusb_control_transfer(lusb_handle, +LIBUSB_ENDPOINT_IN | +LIBUSB_REQUEST_TYPE_CLASS | +LIBUSB_RECIPIENT_INTERFACE, 0x01, 0x03f2, 2, nullptr, 0, 5000);
libusb_close(lusb_handle);
}
else
{
sys_usbd.error("Unable to open Santroller device, make sure Santroller isn't open in the background.");
}
}
// Top Shot Elite controllers
check_device(0x12BA, 0x04A0, 0x04A0, "Top Shot Elite");
check_device(0x12BA, 0x04A1, 0x04A1, "Top Shot Fearmaster");
check_device(0x12BA, 0x04B0, 0x04B0, "Rapala Fishing Rod");
// GT5 Wheels&co
check_device(0x046D, 0xC283, 0xC29B, "lgFF_c283_c29b");
check_device(0x044F, 0xB653, 0xB653, "Thrustmaster RGT FFB Pro");
check_device(0x044F, 0xB65A, 0xB65A, "Thrustmaster F430");
check_device(0x044F, 0xB65D, 0xB65D, "Thrustmaster FFB");
check_device(0x044F, 0xB65E, 0xB65E, "Thrustmaster TRS");
check_device(0x044F, 0xB660, 0xB660, "Thrustmaster T500 RS Gear Shift");
// GT6
check_device(0x2833, 0x0001, 0x0001, "Oculus");
check_device(0x046D, 0xCA03, 0xCA03, "lgFF_ca03_ca03");
// Buzz controllers
check_device(0x054C, 0x1000, 0x1040, "buzzer0");
check_device(0x054C, 0x0001, 0x0041, "buzzer1");
check_device(0x054C, 0x0042, 0x0042, "buzzer2");
check_device(0x046D, 0xC220, 0xC220, "buzzer9");
// GunCon3 Gun
check_device(0x0B9A, 0x0800, 0x0800, "GunCon3");
// uDraw GameTablet
check_device(0x20D6, 0xCB17, 0xCB17, "uDraw GameTablet");
// DVB-T
check_device(0x1415, 0x0003, 0x0003, "PlayTV SCEH-0036");
// 0x0900: "H050 USJ(C) PCB rev00", 0x0910: "USIO PCB rev00"
if (check_device(0x0B9A, 0x0900, 0x0910, "PS3A-USJ"))
{
found_usj = true;
}
// Densha de GO! controller
check_device(0x0AE4, 0x0004, 0x0004, "Densha de GO! Type 2 Controller");
// EA Active 2 dongle for connecting wristbands & legband
check_device(0x21A4, 0xAC27, 0xAC27, "EA Active 2 Dongle");
// Tony Hawk RIDE Skateboard
check_device(0x12BA, 0x0400, 0x0400, "Tony Hawk RIDE Skateboard Controller");
// PSP in UsbPspCm mode
check_device(0x054C, 0x01CB, 0x01CB, "UsbPspcm");
}
libusb_free_device_list(list, 1);
for (int i = 0; i < 8; i++) // Add VFS USB mass storage devices (/dev_usbXXX) to the USB device list
{
const auto usb_info = g_cfg_vfs.get_device(g_cfg_vfs.dev_usb, fmt::format("/dev_usb%03d", i));
if (fs::is_dir(usb_info.path))
usb_devices.push_back(std::make_shared<usb_device_vfs>(usb_info, get_new_location()));
}
if (!found_skylander)
{
sys_usbd.notice("Adding emulated skylander");
usb_devices.push_back(std::make_shared<usb_device_skylander>(get_new_location()));
}
if (!found_infinity)
{
sys_usbd.notice("Adding emulated infinity base");
usb_devices.push_back(std::make_shared<usb_device_infinity>(get_new_location()));
}
if (!found_dimension)
{
sys_usbd.notice("Adding emulated dimension toypad");
usb_devices.push_back(std::make_shared<usb_device_dimensions>(get_new_location()));
}
if (!found_usj)
{
if (!g_cfg_usio.load())
{
sys_usbd.notice("Could not load usio config. Using defaults.");
}
sys_usbd.notice("Adding emulated USIO");
usb_devices.push_back(std::make_shared<usb_device_usio>(get_new_location()));
sys_usbd.notice("USIO config=\n", g_cfg_usio.to_string());
}
const std::vector<std::string> devices_list = fmt::split(g_cfg.io.midi_devices.to_string(), { "@@@" });
for (usz index = 0; index < std::min(max_midi_devices, devices_list.size()); index++)
{
const midi_device device = midi_device::from_string(::at32(devices_list, index));
if (device.name.empty()) continue;
sys_usbd.notice("Adding Emulated Midi Pro Adapter (type=%s, name=%s)", device.type, device.name);
switch (device.type)
{
case midi_device_type::guitar:
usb_devices.push_back(std::make_shared<usb_device_rb3_midi_guitar>(get_new_location(), device.name, false));
break;
case midi_device_type::guitar_22fret:
usb_devices.push_back(std::make_shared<usb_device_rb3_midi_guitar>(get_new_location(), device.name, true));
break;
case midi_device_type::keyboard:
usb_devices.push_back(std::make_shared<usb_device_rb3_midi_keyboard>(get_new_location(), device.name));
break;
case midi_device_type::drums:
if (!g_cfg_rb3drums.load())
{
sys_usbd.notice("Could not load rb3drums config. Using defaults.");
}
usb_devices.push_back(std::make_shared<usb_device_rb3_midi_drums>(get_new_location(), device.name));
sys_usbd.notice("RB3 drums config=\n", g_cfg_rb3drums.to_string());
break;
}
}
if (g_cfg.io.ghltar == ghltar_handler::one_controller || g_cfg.io.ghltar == ghltar_handler::two_controllers)
{
if (!g_cfg_ghltar.load())
{
sys_usbd.notice("Could not load ghltar config. Using defaults.");
}
sys_usbd.notice("Adding emulated GHLtar (1 player)");
usb_devices.push_back(std::make_shared<usb_device_ghltar>(0, get_new_location()));
if (g_cfg.io.ghltar == ghltar_handler::two_controllers)
{
sys_usbd.notice("Adding emulated GHLtar (2 players)");
usb_devices.push_back(std::make_shared<usb_device_ghltar>(1, get_new_location()));
}
sys_usbd.notice("Ghltar config=\n", g_cfg_ghltar.to_string());
}
if (g_cfg.io.turntable == turntable_handler::one_controller || g_cfg.io.turntable == turntable_handler::two_controllers)
{
if (!g_cfg_turntable.load())
{
sys_usbd.notice("Could not load turntable config. Using defaults.");
}
sys_usbd.notice("Adding emulated turntable (1 player)");
usb_devices.push_back(std::make_shared<usb_device_turntable>(0, get_new_location()));
if (g_cfg.io.turntable == turntable_handler::two_controllers)
{
sys_usbd.notice("Adding emulated turntable (2 players)");
usb_devices.push_back(std::make_shared<usb_device_turntable>(1, get_new_location()));
}
sys_usbd.notice("Turntable config=\n", g_cfg_turntable.to_string());
}
if (g_cfg.io.buzz == buzz_handler::one_controller || g_cfg.io.buzz == buzz_handler::two_controllers)
{
if (!g_cfg_buzz.load())
{
sys_usbd.notice("Could not load buzz config. Using defaults.");
}
sys_usbd.notice("Adding emulated Buzz! buzzer (1-4 players)");
usb_devices.push_back(std::make_shared<usb_device_buzz>(0, 3, get_new_location()));
if (g_cfg.io.buzz == buzz_handler::two_controllers)
{
// The current buzz emulation piggybacks on the pad input.
// Since there can only be 7 pads connected on a PS3 the 8th player is currently not supported
sys_usbd.notice("Adding emulated Buzz! buzzer (5-7 players)");
usb_devices.push_back(std::make_shared<usb_device_buzz>(4, 6, get_new_location()));
}
sys_usbd.notice("Buzz config=\n", g_cfg_buzz.to_string());
}
}
usb_handler_thread::~usb_handler_thread()
{
// Ensures shared_ptr<usb_device> are all cleared before terminating libusb
handled_devices.clear();
open_pipes.clear();
usb_devices.clear();
for (u32 index = 0; index < MAX_SYS_USBD_TRANSFERS; index++)
{
if (transfers[index].transfer)
libusb_free_transfer(transfers[index].transfer);
}
if (ctx)
libusb_exit(ctx);
}
void usb_handler_thread::operator()()
{
timeval lusb_tv{0, 0};
while (ctx && thread_ctrl::state() != thread_state::aborting)
{
// Todo: Hotplug here?
// Process asynchronous requests that are pending
libusb_handle_events_timeout_completed(ctx, &lusb_tv, nullptr);
// Process fake transfers
if (!fake_transfers.empty())
{
std::lock_guard lock_tf(mutex_transfers);
u64 timestamp = get_system_time() - Emu.GetPauseTime();
for (auto it = fake_transfers.begin(); it != fake_transfers.end();)
{
auto transfer = *it;
ensure(transfer->busy && transfer->fake);
if (transfer->expected_time > timestamp)
{
++it;
continue;
}
transfer->result = transfer->expected_result;
transfer->count = transfer->expected_count;
transfer->fake = false;
transfer->busy = false;
send_message(SYS_USBD_TRANSFER_COMPLETE, transfer->transfer_id);
it = fake_transfers.erase(it); // if we've processed this, then we erase this entry (replacing the iterator with the new reference)
}
}
// If there is no handled devices usb thread is not actively needed
if (handled_devices.empty())
thread_ctrl::wait_for(500'000);
else
thread_ctrl::wait_for(1'000);
}
}
void usb_handler_thread::send_message(u32 message, u32 tr_id)
{
add_event(message, tr_id, 0x00);
}
void usb_handler_thread::transfer_complete(struct libusb_transfer* transfer)
{
std::lock_guard lock_tf(mutex_transfers);
UsbTransfer* usbd_transfer = static_cast<UsbTransfer*>(transfer->user_data);
if (transfer->status != 0)
{
sys_usbd.error("Transfer Error: %d", +transfer->status);
}
switch (transfer->status)
{
case LIBUSB_TRANSFER_COMPLETED: usbd_transfer->result = HC_CC_NOERR; break;
case LIBUSB_TRANSFER_TIMED_OUT: usbd_transfer->result = EHCI_CC_XACT; break;
case LIBUSB_TRANSFER_OVERFLOW: usbd_transfer->result = EHCI_CC_BABBLE; break;
case LIBUSB_TRANSFER_NO_DEVICE:
usbd_transfer->result = EHCI_CC_HALTED;
for (const auto& dev : usb_devices)
{
if (dev->assigned_number == usbd_transfer->assigned_number)
{
disconnect_usb_device(dev, true);
break;
}
}
break;
case LIBUSB_TRANSFER_ERROR:
case LIBUSB_TRANSFER_CANCELLED:
case LIBUSB_TRANSFER_STALL:
default:
usbd_transfer->result = EHCI_CC_HALTED;
break;
}
usbd_transfer->count = transfer->actual_length;
for (s32 index = 0; index < transfer->num_iso_packets; index++)
{
u8 iso_status;
switch (transfer->iso_packet_desc[index].status)
{
case LIBUSB_TRANSFER_COMPLETED: iso_status = USBD_HC_CC_NOERR; break;
case LIBUSB_TRANSFER_TIMED_OUT: iso_status = USBD_HC_CC_XACT; break;
case LIBUSB_TRANSFER_OVERFLOW: iso_status = USBD_HC_CC_BABBLE; break;
case LIBUSB_TRANSFER_ERROR:
case LIBUSB_TRANSFER_CANCELLED:
case LIBUSB_TRANSFER_STALL:
case LIBUSB_TRANSFER_NO_DEVICE:
default: iso_status = USBD_HC_CC_MISSMF; break;
}
usbd_transfer->iso_request.packets[index] = ((iso_status & 0xF) << 12 | (transfer->iso_packet_desc[index].actual_length & 0xFFF));
}
if (transfer->type == LIBUSB_TRANSFER_TYPE_CONTROL && usbd_transfer->control_destbuf)
{
memcpy(usbd_transfer->control_destbuf, transfer->buffer + LIBUSB_CONTROL_SETUP_SIZE, transfer->actual_length);
usbd_transfer->control_destbuf = nullptr;
}
usbd_transfer->busy = false;
send_message(SYS_USBD_TRANSFER_COMPLETE, usbd_transfer->transfer_id);
sys_usbd.trace("Transfer complete(0x%x): %s", usbd_transfer->transfer_id, *transfer);
}
bool usb_handler_thread::add_ldd(std::string_view product, u16 id_vendor, u16 id_product_min, u16 id_product_max)
{
if (ldds.try_emplace(std::string(product), UsbLdd{id_vendor, id_product_min, id_product_max}).second)
{
for (const auto& dev : usb_devices)
{
if (dev->assigned_number)
continue;
if (dev->device._device.idVendor == id_vendor && dev->device._device.idProduct >= id_product_min && dev->device._device.idProduct <= id_product_max)
{
connect_usb_device(dev);
}
}
return true;
}
return false;
}
bool usb_handler_thread::remove_ldd(std::string_view product)
{
if (const auto iterator = ldds.find(product); iterator != ldds.end())
{
for (const auto& dev : usb_devices)
{
if (!dev->assigned_number)
continue;
if (dev->device._device.idVendor == iterator->second.id_vendor && dev->device._device.idProduct >= iterator->second.id_product_min && dev->device._device.idProduct <= iterator->second.id_product_max)
{
disconnect_usb_device(dev);
}
}
ldds.erase(iterator);
return true;
}
return false;
}
u32 usb_handler_thread::open_pipe(u32 device_handle, u8 endpoint)
{
open_pipes.emplace(pipe_counter, UsbPipe{handled_devices[device_handle].second, endpoint});
return pipe_counter++;
}
bool usb_handler_thread::close_pipe(u32 pipe_id)
{
return open_pipes.erase(pipe_id) != 0;
}
bool usb_handler_thread::is_pipe(u32 pipe_id) const
{
return open_pipes.count(pipe_id) != 0;
}
const UsbPipe& usb_handler_thread::get_pipe(u32 pipe_id) const
{
return ::at32(open_pipes, pipe_id);
}
bool usb_handler_thread::get_event(vm::ptr<u64>& arg1, vm::ptr<u64>& arg2, vm::ptr<u64>& arg3)
{
if (!usbd_events.empty())
{
const auto& usb_event = usbd_events.front();
*arg1 = std::get<0>(usb_event);
*arg2 = std::get<1>(usb_event);
*arg3 = std::get<2>(usb_event);
usbd_events.pop();
sys_usbd.trace("Received event: arg1=0x%x arg2=0x%x arg3=0x%x", *arg1, *arg2, *arg3);
return true;
}
return false;
}
void usb_handler_thread::add_event(u64 arg1, u64 arg2, u64 arg3)
{
// sys_usbd events use an internal event queue with SYS_SYNC_PRIORITY protocol
std::lock_guard lock_sq(mutex_sq);
if (const auto cpu = lv2_obj::schedule<ppu_thread>(sq, SYS_SYNC_PRIORITY))
{
sys_usbd.trace("Sending event(queue): arg1=0x%x arg2=0x%x arg3=0x%x", arg1, arg2, arg3);
cpu->gpr[4] = arg1;
cpu->gpr[5] = arg2;
cpu->gpr[6] = arg3;
lv2_obj::awake(cpu);
}
else
{
sys_usbd.trace("Sending event: arg1=0x%x arg2=0x%x arg3=0x%x", arg1, arg2, arg3);
usbd_events.emplace(arg1, arg2, arg3);
}
}
u32 usb_handler_thread::get_free_transfer_id()
{
u32 num_loops = 0;
do
{
num_loops++;
transfer_counter++;
if (transfer_counter >= MAX_SYS_USBD_TRANSFERS)
{
transfer_counter = 0;
}
if (num_loops > MAX_SYS_USBD_TRANSFERS)
{
sys_usbd.fatal("Usb transfers are saturated!");
}
} while (transfers[transfer_counter].busy);
return transfer_counter;
}
UsbTransfer& usb_handler_thread::get_transfer(u32 transfer_id)
{
return transfers[transfer_id];
}
std::pair<u32, UsbTransfer&> usb_handler_thread::get_free_transfer()
{
std::lock_guard lock_tf(mutex_transfers);
u32 transfer_id = get_free_transfer_id();
auto& transfer = get_transfer(transfer_id);
transfer.busy = true;
return {transfer_id, transfer};
}
std::pair<u32, u32> usb_handler_thread::get_transfer_status(u32 transfer_id)
{
std::lock_guard lock_tf(mutex_transfers);
const auto& transfer = get_transfer(transfer_id);
return {transfer.result, transfer.count};
}
std::pair<u32, UsbDeviceIsoRequest> usb_handler_thread::get_isochronous_transfer_status(u32 transfer_id)
{
std::lock_guard lock_tf(mutex_transfers);
const auto& transfer = get_transfer(transfer_id);
return {transfer.result, transfer.iso_request};
}
void usb_handler_thread::push_fake_transfer(UsbTransfer* transfer)
{
std::lock_guard lock_tf(mutex_transfers);
fake_transfers.push_back(transfer);
}
const std::array<u8, 7>& usb_handler_thread::get_new_location()
{
location[0]++;
return location;
}
void usb_handler_thread::connect_usb_device(std::shared_ptr<usb_device> dev, bool update_usb_devices)
{
if (update_usb_devices)
usb_devices.push_back(dev);
for (const auto& [name, ldd] : ldds)
{
if (dev->device._device.idVendor == ldd.id_vendor && dev->device._device.idProduct >= ldd.id_product_min && dev->device._device.idProduct <= ldd.id_product_max)
{
if (!dev->open_device())
{
sys_usbd.error("Failed to open USB device(VID=0x%04x, PID=0x%04x) for LDD <%s>", dev->device._device.idVendor, dev->device._device.idProduct, name);
return;
}
dev->read_descriptors();
dev->assigned_number = dev_counter++; // assign current dev_counter, and atomically increment0
handled_devices.emplace(dev->assigned_number, std::pair(UsbInternalDevice{0x00, narrow<u8>(dev->assigned_number), 0x02, 0x40}, dev));
send_message(SYS_USBD_ATTACH, dev->assigned_number);
sys_usbd.success("USB device(VID=0x%04x, PID=0x%04x) matches up with LDD <%s>, assigned as handled_device=0x%x", dev->device._device.idVendor, dev->device._device.idProduct, name, dev->assigned_number);
}
}
}
void usb_handler_thread::disconnect_usb_device(std::shared_ptr<usb_device> dev, bool update_usb_devices)
{
if (dev->assigned_number && handled_devices.erase(dev->assigned_number))
{
send_message(SYS_USBD_DETACH, dev->assigned_number);
sys_usbd.success("USB device(VID=0x%04x, PID=0x%04x) unassigned, handled_device=0x%x", dev->device._device.idVendor, dev->device._device.idProduct, dev->assigned_number);
dev->assigned_number = 0;
}
if (update_usb_devices)
usb_devices.erase(find(usb_devices.begin(), usb_devices.end(), dev));
}
void connect_usb_controller(u8 index, input::product_type type)
{
auto usbh = g_fxo->try_get<named_thread<usb_handler_thread>>();
if (!usbh)
{
return;
}
bool already_connected = false;
if (const auto it = usbh->pad_to_usb.find(index); it != usbh->pad_to_usb.end())
{
if (it->second.first == type)
{
already_connected = true;
}
else
{
usbh->disconnect_usb_device(it->second.second, true);
usbh->pad_to_usb.erase(it->first);
}
}
if (!already_connected)
{
switch (type)
{
case input::product_type::guncon_3:
{
if (!g_cfg_guncon3.load())
{
sys_usbd.notice("Could not load GunCon3 config. Using defaults.");
}
sys_usbd.success("Adding emulated GunCon3 (controller %d)", index);
std::shared_ptr<usb_device> dev = std::make_shared<usb_device_guncon3>(index, usbh->get_new_location());
usbh->connect_usb_device(dev, true);
usbh->pad_to_usb.emplace(index, std::pair(type, dev));
sys_usbd.notice("GunCon3 config=\n", g_cfg_guncon3.to_string());
break;
}
case input::product_type::top_shot_elite:
{
if (!g_cfg_topshotelite.load())
{
sys_usbd.notice("Could not load Top Shot Elite config. Using defaults.");
}
sys_usbd.success("Adding emulated Top Shot Elite (controller %d)", index);
std::shared_ptr<usb_device> dev = std::make_shared<usb_device_topshotelite>(index, usbh->get_new_location());
usbh->connect_usb_device(dev, true);
usbh->pad_to_usb.emplace(index, std::pair(type, dev));
sys_usbd.notice("Top shot elite config=\n", g_cfg_topshotelite.to_string());
break;
}
case input::product_type::top_shot_fearmaster:
{
if (!g_cfg_topshotfearmaster.load())
{
sys_usbd.notice("Could not load Top Shot Fearmaster config. Using defaults.");
}
sys_usbd.success("Adding emulated Top Shot Fearmaster (controller %d)", index);
std::shared_ptr<usb_device> dev = std::make_shared<usb_device_topshotfearmaster>(index, usbh->get_new_location());
usbh->connect_usb_device(dev, true);
usbh->pad_to_usb.emplace(index, std::pair(type, dev));
sys_usbd.notice("Top shot fearmaster config=\n", g_cfg_topshotfearmaster.to_string());
break;
}
case input::product_type::udraw_gametablet:
{
sys_usbd.success("Adding emulated uDraw GameTablet (controller %d)", index);
std::shared_ptr<usb_device> dev = std::make_shared<usb_device_gametablet>(index, usbh->get_new_location());
usbh->connect_usb_device(dev, true);
usbh->pad_to_usb.emplace(index, std::pair(type, dev));
break;
}
default:
break;
}
}
}
error_code sys_usbd_initialize(ppu_thread& ppu, vm::ptr<u32> handle)
{
ppu.state += cpu_flag::wait;
sys_usbd.warning("sys_usbd_initialize(handle=*0x%x)", handle);
auto& usbh = g_fxo->get<named_thread<usb_handler_thread>>();
{
std::lock_guard lock(usbh.mutex);
// Must not occur (lv2 allows multiple handles, cellUsbd does not)
ensure(!usbh.is_init.exchange(true));
}
ppu.check_state();
*handle = 0x115B;
// TODO
return CELL_OK;
}
error_code sys_usbd_finalize(ppu_thread& ppu, u32 handle)
{
ppu.state += cpu_flag::wait;
sys_usbd.warning("sys_usbd_finalize(handle=0x%x)", handle);
auto& usbh = g_fxo->get<named_thread<usb_handler_thread>>();
std::lock_guard lock(usbh.mutex);
usbh.is_init = false;
// Forcefully awake all waiters
while (auto cpu = lv2_obj::schedule<ppu_thread>(usbh.sq, SYS_SYNC_FIFO))
{
// Special ternimation signal value
cpu->gpr[4] = 4;
cpu->gpr[5] = 0;
cpu->gpr[6] = 0;
lv2_obj::awake(cpu);
}
// TODO
return CELL_OK;
}
error_code sys_usbd_get_device_list(ppu_thread& ppu, u32 handle, vm::ptr<UsbInternalDevice> device_list, u32 max_devices)
{
ppu.state += cpu_flag::wait;
sys_usbd.warning("sys_usbd_get_device_list(handle=0x%x, device_list=*0x%x, max_devices=0x%x)", handle, device_list, max_devices);
auto& usbh = g_fxo->get<named_thread<usb_handler_thread>>();
std::lock_guard lock(usbh.mutex);
if (!usbh.is_init)
return CELL_EINVAL;
// TODO: was std::min<s32>
u32 i_tocopy = std::min<u32>(max_devices, ::size32(usbh.handled_devices));
for (u32 index = 0; index < i_tocopy; index++)
{
device_list[index] = usbh.handled_devices[index].first;
}
return not_an_error(i_tocopy);
}
error_code sys_usbd_register_extra_ldd(ppu_thread& ppu, u32 handle, vm::cptr<char> s_product, u16 slen_product, u16 id_vendor, u16 id_product_min, u16 id_product_max)
{
ppu.state += cpu_flag::wait;
sys_usbd.trace("sys_usbd_register_extra_ldd(handle=0x%x, s_product=%s, slen_product=%d, id_vendor=0x%04x, id_product_min=0x%04x, id_product_max=0x%04x)", handle, s_product, slen_product, id_vendor, id_product_min, id_product_max);
auto& usbh = g_fxo->get<named_thread<usb_handler_thread>>();
std::lock_guard lock(usbh.mutex);
if (!usbh.is_init)
return CELL_EINVAL;
std::string_view product{s_product.get_ptr(), slen_product};
if (usbh.add_ldd(product, id_vendor, id_product_min, id_product_max))
return CELL_OK;
return CELL_EEXIST;
}
error_code sys_usbd_unregister_extra_ldd(ppu_thread& ppu, u32 handle, vm::cptr<char> s_product, u16 slen_product)
{
ppu.state += cpu_flag::wait;
sys_usbd.trace("sys_usbd_unregister_extra_ldd(handle=0x%x, s_product=%s, slen_product=%d)", handle, s_product, slen_product);
auto& usbh = g_fxo->get<named_thread<usb_handler_thread>>();
std::lock_guard lock(usbh.mutex);
if (!usbh.is_init)
return CELL_EINVAL;
std::string_view product{s_product.get_ptr(), slen_product};
if (usbh.remove_ldd(product))
return CELL_OK;
return CELL_ESRCH;
}
error_code sys_usbd_get_descriptor_size(ppu_thread& ppu, u32 handle, u32 device_handle)
{
ppu.state += cpu_flag::wait;
sys_usbd.trace("sys_usbd_get_descriptor_size(handle=0x%x, deviceNumber=0x%x)", handle, device_handle);
auto& usbh = g_fxo->get<named_thread<usb_handler_thread>>();
std::lock_guard lock(usbh.mutex);
if (!usbh.is_init || !usbh.handled_devices.count(device_handle))
{
return CELL_EINVAL;
}
return not_an_error(usbh.handled_devices[device_handle].second->device.get_size());
}
error_code sys_usbd_get_descriptor(ppu_thread& ppu, u32 handle, u32 device_handle, vm::ptr<void> descriptor, u32 desc_size)
{
ppu.state += cpu_flag::wait;
sys_usbd.trace("sys_usbd_get_descriptor(handle=0x%x, deviceNumber=0x%x, descriptor=0x%x, desc_size=0x%x)", handle, device_handle, descriptor, desc_size);
if (!descriptor)
{
return CELL_EINVAL;
}
auto& usbh = g_fxo->get<named_thread<usb_handler_thread>>();
std::lock_guard lock(usbh.mutex);
if (!usbh.is_init || !usbh.handled_devices.count(device_handle))
{
return CELL_EINVAL;
}
if (!desc_size)
{
return CELL_ENOMEM;
}
usbh.handled_devices[device_handle].second->device.write_data(reinterpret_cast<u8*>(descriptor.get_ptr()), desc_size);
return CELL_OK;
}
// This function is used for psp(cellUsbPspcm), ps3 arcade usj io(PS3A-USJ), ps2 cam(eyetoy), generic usb camera?(sample_usb2cam)
error_code sys_usbd_register_ldd(ppu_thread& ppu, u32 handle, vm::cptr<char> s_product, u16 slen_product)
{
ppu.state += cpu_flag::wait;
std::string_view product{s_product.get_ptr(), slen_product};
// slightly hacky way of getting Namco GCon3 gun to work.
// The register_ldd appears to be a more promiscuous mode function, where all device 'inserts' would be presented to the cellUsbd for Probing.
// Unsure how many more devices might need similar treatment (i.e. just a compare and force VID/PID add), or if it's worth adding a full promiscuous capability
static const std::unordered_map<std::string, UsbLdd, fmt::string_hash, std::equal_to<>> predefined_ldds
{
{"cellUsbPspcm", {0x054C, 0x01CB, 0x01CB}},
{"guncon3", {0x0B9A, 0x0800, 0x0800}},
{"PS3A-USJ", {0x0B9A, 0x0900, 0x0910}}
};
if (const auto iterator = predefined_ldds.find(product); iterator != predefined_ldds.end())
{
sys_usbd.trace("sys_usbd_register_ldd(handle=0x%x, s_product=%s, slen_product=%d) -> Redirecting to sys_usbd_register_extra_ldd()", handle, s_product, slen_product);
return sys_usbd_register_extra_ldd(ppu, handle, s_product, slen_product, iterator->second.id_vendor, iterator->second.id_product_min, iterator->second.id_product_max);
}
sys_usbd.todo("sys_usbd_register_ldd(handle=0x%x, s_product=%s, slen_product=%d)", handle, s_product, slen_product);
return CELL_OK;
}
error_code sys_usbd_unregister_ldd(ppu_thread& ppu, u32 handle, vm::cptr<char> s_product, u16 slen_product)
{
ppu.state += cpu_flag::wait;
sys_usbd.trace("sys_usbd_unregister_ldd(handle=0x%x, s_product=%s, slen_product=%d) -> Redirecting to sys_usbd_unregister_extra_ldd()", handle, s_product, slen_product);
return sys_usbd_unregister_extra_ldd(ppu, handle, s_product, slen_product);
}
// TODO: determine what the unknown params are
// attributes (bmAttributes) : 2=Bulk, 3=Interrupt
error_code sys_usbd_open_pipe(ppu_thread& ppu, u32 handle, u32 device_handle, u32 unk1, u64 unk2, u64 unk3, u32 endpoint, u64 attributes)
{
ppu.state += cpu_flag::wait;
sys_usbd.warning("sys_usbd_open_pipe(handle=0x%x, device_handle=0x%x, unk1=0x%x, unk2=0x%x, unk3=0x%x, endpoint=0x%x, attributes=0x%x)", handle, device_handle, unk1, unk2, unk3, endpoint, attributes);
auto& usbh = g_fxo->get<named_thread<usb_handler_thread>>();
std::lock_guard lock(usbh.mutex);
if (!usbh.is_init || !usbh.handled_devices.count(device_handle))
{
return CELL_EINVAL;
}
return not_an_error(usbh.open_pipe(device_handle, static_cast<u8>(endpoint)));
}
error_code sys_usbd_open_default_pipe(ppu_thread& ppu, u32 handle, u32 device_handle)
{
ppu.state += cpu_flag::wait;
sys_usbd.trace("sys_usbd_open_default_pipe(handle=0x%x, device_handle=0x%x)", handle, device_handle);
auto& usbh = g_fxo->get<named_thread<usb_handler_thread>>();
std::lock_guard lock(usbh.mutex);
if (!usbh.is_init || !usbh.handled_devices.count(device_handle))
{
return CELL_EINVAL;
}
return not_an_error(usbh.open_pipe(device_handle, 0));
}
error_code sys_usbd_close_pipe(ppu_thread& ppu, u32 handle, u32 pipe_handle)
{
ppu.state += cpu_flag::wait;
sys_usbd.todo("sys_usbd_close_pipe(handle=0x%x, pipe_handle=0x%x)", handle, pipe_handle);
auto& usbh = g_fxo->get<named_thread<usb_handler_thread>>();
std::lock_guard lock(usbh.mutex);
if (!usbh.is_init || !usbh.is_pipe(pipe_handle))
{
return CELL_EINVAL;
}
usbh.close_pipe(pipe_handle);
return CELL_OK;
}
// From RE:
// In libusbd_callback_thread
// *arg1 = 4 will terminate CellUsbd libusbd_callback_thread
// *arg1 = 3 will do some extra processing right away(notification of transfer finishing)
// *arg1 < 1 || *arg1 > 4 are ignored(rewait instantly for event)
// *arg1 == 1 || *arg1 == 2 will send a sys_event to internal CellUsbd event queue with same parameters as received and loop(attach and detach event)
error_code sys_usbd_receive_event(ppu_thread& ppu, u32 handle, vm::ptr<u64> arg1, vm::ptr<u64> arg2, vm::ptr<u64> arg3)
{
ppu.state += cpu_flag::wait;
sys_usbd.trace("sys_usbd_receive_event(handle=0x%x, arg1=*0x%x, arg2=*0x%x, arg3=*0x%x)", handle, arg1, arg2, arg3);
auto& usbh = g_fxo->get<named_thread<usb_handler_thread>>();
{
std::lock_guard lock_sq(usbh.mutex_sq);
if (!usbh.is_init)
return CELL_EINVAL;
if (usbh.get_event(arg1, arg2, arg3))
{
// hack for Guitar Hero Live
// Attaching the device too fast seems to result in a nullptr along the way
if (*arg1 == SYS_USBD_ATTACH)
lv2_obj::sleep(ppu), lv2_obj::wait_timeout(5000);
return CELL_OK;
}
lv2_obj::sleep(ppu);
lv2_obj::emplace(usbh.sq, &ppu);
}
while (auto state = +ppu.state)
{
if (state & cpu_flag::signal && ppu.state.test_and_reset(cpu_flag::signal))
{
sys_usbd.trace("Received event(queued): arg1=0x%x arg2=0x%x arg3=0x%x", ppu.gpr[4], ppu.gpr[5], ppu.gpr[6]);
break;
}
if (is_stopped(state))
{
std::lock_guard lock(usbh.mutex);
for (auto cpu = +usbh.sq; cpu; cpu = cpu->next_cpu)
{
if (cpu == &ppu)
{
ppu.state += cpu_flag::again;
sys_usbd.trace("sys_usbd_receive_event: aborting");
return {};
}
}
break;
}
ppu.state.wait(state);
}
ppu.check_state();
*arg1 = ppu.gpr[4];
*arg2 = ppu.gpr[5];
*arg3 = ppu.gpr[6];
if (*arg1 == SYS_USBD_ATTACH)
lv2_obj::sleep(ppu), lv2_obj::wait_timeout(5000);
return CELL_OK;
}
error_code sys_usbd_detect_event(ppu_thread& ppu)
{
ppu.state += cpu_flag::wait;
sys_usbd.todo("sys_usbd_detect_event()");
return CELL_OK;
}
error_code sys_usbd_attach(ppu_thread& ppu, u32 handle, u32 unk1, u32 unk2, u32 device_handle)
{
ppu.state += cpu_flag::wait;
sys_usbd.todo("sys_usbd_attach(handle=0x%x, unk1=0x%x, unk2=0x%x, device_handle=0x%x)", handle, unk1, unk2, device_handle);
return CELL_OK;
}
error_code sys_usbd_transfer_data(ppu_thread& ppu, u32 handle, u32 id_pipe, vm::ptr<u8> buf, u32 buf_size, vm::ptr<UsbDeviceRequest> request, u32 type_transfer)
{
ppu.state += cpu_flag::wait;
sys_usbd.trace("sys_usbd_transfer_data(handle=0x%x, id_pipe=0x%x, buf=*0x%x, buf_length=0x%x, request=*0x%x, type=0x%x)", handle, id_pipe, buf, buf_size, request, type_transfer);
if (sys_usbd.trace && request)
{
sys_usbd.trace("RequestType:0x%02x, Request:0x%02x, wValue:0x%04x, wIndex:0x%04x, wLength:0x%04x", request->bmRequestType, request->bRequest, request->wValue, request->wIndex, request->wLength);
if ((request->bmRequestType & 0x80) == 0 && buf && buf_size != 0)
sys_usbd.trace("Control sent:\n%s", fmt::buf_to_hexstring(buf.get_ptr(), buf_size));
}
auto& usbh = g_fxo->get<named_thread<usb_handler_thread>>();
std::lock_guard lock(usbh.mutex);
if (!usbh.is_init || !usbh.is_pipe(id_pipe))
{
return CELL_EINVAL;
}
const auto& pipe = usbh.get_pipe(id_pipe);
auto&& [transfer_id, transfer] = usbh.get_free_transfer();
transfer.assigned_number = pipe.device->assigned_number;
// Default endpoint is control endpoint
if (pipe.endpoint == 0)
{
if (!request)
{
sys_usbd.error("Tried to use control pipe without proper request pointer");
return CELL_EINVAL;
}
// Claiming interface
switch (request->bmRequestType)
{
case 0U /*silences warning*/ | LIBUSB_ENDPOINT_OUT | LIBUSB_REQUEST_TYPE_STANDARD | LIBUSB_RECIPIENT_DEVICE:
{
switch (request->bRequest)
{
case LIBUSB_REQUEST_SET_CONFIGURATION:
{
pipe.device->set_configuration(static_cast<u8>(+request->wValue));
pipe.device->set_interface(0);
break;
}
default: break;
}
break;
}
case 0U /*silences warning*/ | LIBUSB_ENDPOINT_IN | LIBUSB_REQUEST_TYPE_STANDARD | LIBUSB_RECIPIENT_DEVICE:
{
if (!buf)
{
sys_usbd.error("Invalid buffer for control_transfer");
return CELL_EFAULT;
}
break;
}
default: break;
}
pipe.device->control_transfer(request->bmRequestType, request->bRequest, request->wValue, request->wIndex, request->wLength, buf_size, buf.get_ptr(), &transfer);
}
else
{
// If output endpoint
if (!(pipe.endpoint & 0x80))
sys_usbd.trace("Write Int(s: %d):\n%s", buf_size, fmt::buf_to_hexstring(buf.get_ptr(), buf_size));
pipe.device->interrupt_transfer(buf_size, buf.get_ptr(), pipe.endpoint, &transfer);
}
if (transfer.fake)
{
usbh.push_fake_transfer(&transfer);
}
// returns an identifier specific to the transfer
return not_an_error(transfer_id);
}
error_code sys_usbd_isochronous_transfer_data(ppu_thread& ppu, u32 handle, u32 id_pipe, vm::ptr<UsbDeviceIsoRequest> iso_request)
{
ppu.state += cpu_flag::wait;
sys_usbd.todo("sys_usbd_isochronous_transfer_data(handle=0x%x, id_pipe=0x%x, iso_request=*0x%x)", handle, id_pipe, iso_request);
auto& usbh = g_fxo->get<named_thread<usb_handler_thread>>();
std::lock_guard lock(usbh.mutex);
if (!usbh.is_init || !usbh.is_pipe(id_pipe))
{
return CELL_EINVAL;
}
const auto& pipe = usbh.get_pipe(id_pipe);
auto&& [transfer_id, transfer] = usbh.get_free_transfer();
pipe.device->isochronous_transfer(&transfer);
// returns an identifier specific to the transfer
return not_an_error(transfer_id);
}
error_code sys_usbd_get_transfer_status(ppu_thread& ppu, u32 handle, u32 id_transfer, u32 unk1, vm::ptr<u32> result, vm::ptr<u32> count)
{
ppu.state += cpu_flag::wait;
sys_usbd.trace("sys_usbd_get_transfer_status(handle=0x%x, id_transfer=0x%x, unk1=0x%x, result=*0x%x, count=*0x%x)", handle, id_transfer, unk1, result, count);
auto& usbh = g_fxo->get<named_thread<usb_handler_thread>>();
std::lock_guard lock(usbh.mutex);
if (!usbh.is_init)
return CELL_EINVAL;
const auto status = usbh.get_transfer_status(id_transfer);
*result = status.first;
*count = status.second;
return CELL_OK;
}
error_code sys_usbd_get_isochronous_transfer_status(ppu_thread& ppu, u32 handle, u32 id_transfer, u32 unk1, vm::ptr<UsbDeviceIsoRequest> request, vm::ptr<u32> result)
{
ppu.state += cpu_flag::wait;
sys_usbd.todo("sys_usbd_get_isochronous_transfer_status(handle=0x%x, id_transfer=0x%x, unk1=0x%x, request=*0x%x, result=*0x%x)", handle, id_transfer, unk1, request, result);
auto& usbh = g_fxo->get<named_thread<usb_handler_thread>>();
std::lock_guard lock(usbh.mutex);
if (!usbh.is_init)
return CELL_EINVAL;
const auto status = usbh.get_isochronous_transfer_status(id_transfer);
*result = status.first;
*request = status.second;
return CELL_OK;
}
error_code sys_usbd_get_device_location(ppu_thread& ppu, u32 handle, u32 device_handle, vm::ptr<u8> location)
{
ppu.state += cpu_flag::wait;
sys_usbd.notice("sys_usbd_get_device_location(handle=0x%x, device_handle=0x%x, location=*0x%x)", handle, device_handle, location);
auto& usbh = g_fxo->get<named_thread<usb_handler_thread>>();
std::lock_guard lock(usbh.mutex);
if (!usbh.is_init || !usbh.handled_devices.count(device_handle))
return CELL_EINVAL;
usbh.handled_devices[device_handle].second->get_location(location.get_ptr());
return CELL_OK;
}
error_code sys_usbd_send_event(ppu_thread& ppu)
{
ppu.state += cpu_flag::wait;
sys_usbd.todo("sys_usbd_send_event()");
return CELL_OK;
}
error_code sys_usbd_event_port_send(ppu_thread& ppu, u32 handle, u64 arg1, u64 arg2, u64 arg3)
{
ppu.state += cpu_flag::wait;
sys_usbd.warning("sys_usbd_event_port_send(handle=0x%x, arg1=0x%x, arg2=0x%x, arg3=0x%x)", handle, arg1, arg2, arg3);
auto& usbh = g_fxo->get<named_thread<usb_handler_thread>>();
std::lock_guard lock(usbh.mutex);
if (!usbh.is_init)
return CELL_EINVAL;
usbh.add_event(arg1, arg2, arg3);
return CELL_OK;
}
error_code sys_usbd_allocate_memory(ppu_thread& ppu)
{
ppu.state += cpu_flag::wait;
sys_usbd.todo("sys_usbd_allocate_memory()");
return CELL_OK;
}
error_code sys_usbd_free_memory(ppu_thread& ppu)
{
ppu.state += cpu_flag::wait;
sys_usbd.todo("sys_usbd_free_memory()");
return CELL_OK;
}
error_code sys_usbd_get_device_speed(ppu_thread& ppu)
{
ppu.state += cpu_flag::wait;
sys_usbd.todo("sys_usbd_get_device_speed()");
return CELL_OK;
}
| 45,007
|
C++
|
.cpp
| 1,185
| 35.277637
| 231
| 0.712837
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,339
|
sys_tty.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_tty.cpp
|
#include "stdafx.h"
#include "Emu/system_config.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/timers.hpp"
#include "sys_tty.h"
#include <deque>
#include <mutex>
LOG_CHANNEL(sys_tty);
extern fs::file g_tty;
extern atomic_t<s64> g_tty_size;
extern std::array<std::deque<std::string>, 16> g_tty_input;
extern std::mutex g_tty_mutex;
error_code sys_tty_read(s32 ch, vm::ptr<char> buf, u32 len, vm::ptr<u32> preadlen)
{
sys_tty.trace("sys_tty_read(ch=%d, buf=*0x%x, len=%d, preadlen=*0x%x)", ch, buf, len, preadlen);
if (!g_cfg.core.debug_console_mode)
{
return CELL_EIO;
}
if (ch > 15 || ch < 0 || !buf)
{
return CELL_EINVAL;
}
if (ch < SYS_TTYP_USER1)
{
sys_tty.warning("sys_tty_read called with system channel %d", ch);
}
usz chars_to_read = 0; // number of chars that will be read from the input string
std::string tty_read; // string for storage of read chars
if (len > 0)
{
std::lock_guard lock(g_tty_mutex);
if (!g_tty_input[ch].empty())
{
// reference to our first queue element
std::string& input = g_tty_input[ch].front();
// we have to stop reading at either a new line, the param len, or our input string size
usz new_line_pos = input.find_first_of('\n');
if (new_line_pos != input.npos)
{
chars_to_read = std::min(new_line_pos, static_cast<usz>(len));
}
else
{
chars_to_read = std::min(input.size(), static_cast<usz>(len));
}
// read the previously calculated number of chars from the beginning of the input string
tty_read = input.substr(0, chars_to_read);
// remove the just read text from the input string
input = input.substr(chars_to_read, input.size() - 1);
if (input.empty())
{
// pop the first queue element if it was completely consumed
g_tty_input[ch].pop_front();
}
}
}
if (!preadlen)
{
return CELL_EFAULT;
}
*preadlen = static_cast<u32>(chars_to_read);
if (chars_to_read > 0)
{
std::memcpy(buf.get_ptr(), tty_read.c_str(), chars_to_read);
sys_tty.success("sys_tty_read(ch=%d, len=%d) read %s with length %d", ch, len, tty_read, *preadlen);
}
return CELL_OK;
}
std::string dump_useful_thread_info();
error_code sys_tty_write([[maybe_unused]] ppu_thread& ppu, s32 ch, vm::cptr<char> buf, u32 len, vm::ptr<u32> pwritelen)
{
ppu.state += cpu_flag::wait;
sys_tty.notice("sys_tty_write(ch=%d, buf=*0x%x, len=%d, pwritelen=*0x%x)", ch, buf, len, pwritelen);
std::string msg;
if (static_cast<s32>(len) > 0 && vm::check_addr(buf.addr(), vm::page_readable, len))
{
msg.resize(len);
if (!vm::try_access(buf.addr(), msg.data(), len, false))
{
msg.clear();
}
}
auto find_word = [](std::string_view msg, std::string_view word) -> bool
{
// Match uppercase and lowercase starting words
const usz index = msg.find(word.substr(1));
if (index != umax && index >= 1u)
{
return std::tolower(static_cast<u8>(msg[index - 1])) == word[0];
}
return false;
};
std::string_view sample = std::string_view(msg).substr(0, 1024);
const bool warning = find_word(sample, "failed"sv) || find_word(sample, "abort"sv) || find_word(sample, "crash"sv)
|| find_word(sample, "error"sv) || find_word(sample, "unexpected"sv) || find_word(sample, "0x8001"sv);
sample = {}; // Remove reference to string
if (msg.size() >= 2u && [&]()
{
static thread_local u64 last_write = 0;
// Dump thread about every period which TTY was not being touched for about half a second
const u64 current = get_system_time();
return current - std::exchange(last_write, current) >= (warning ? 500'000 : 3'000'000);
}())
{
ppu_log.notice("\n%s", dump_useful_thread_info());
}
// Hack: write to tty even on CEX mode, but disable all error checks
if (ch < 0 || ch > 15)
{
if (g_cfg.core.debug_console_mode)
{
return CELL_EINVAL;
}
else
{
msg.clear();
}
}
if (g_cfg.core.debug_console_mode)
{
// Don't modify it in CEX mode
len = static_cast<s32>(len) > 0 ? len : 0;
}
if (static_cast<s32>(len) > 0)
{
if (!msg.empty())
{
if (msg.ends_with("\n"))
{
// Avoid logging trailing newlines, log them verbosely instead
const std::string_view msg_clear = std::string_view(msg).substr(0, msg.find_last_not_of('\n') + 1);
if (msg.size() - 1 == msg_clear.size())
{
(warning ? sys_tty.warning : sys_tty.notice)(u8"sys_tty_write(): “%s“ << endl", msg_clear);
}
else
{
(warning ? sys_tty.warning : sys_tty.notice)(u8"sys_tty_write(): “%s” << endl(%u)", msg_clear, msg.size() - msg_clear.size());
}
}
else
{
(warning ? sys_tty.warning : sys_tty.notice)(u8"sys_tty_write(): “%s”", msg);
}
if (g_tty)
{
// Lock size by making it negative
g_tty_size -= (1ll << 48);
g_tty.write(msg);
g_tty_size += (1ll << 48) + len;
}
}
else if (g_cfg.core.debug_console_mode)
{
return {CELL_EFAULT, buf.addr()};
}
}
if (!pwritelen.try_write(len))
{
return {CELL_EFAULT, pwritelen};
}
return CELL_OK;
}
| 4,992
|
C++
|
.cpp
| 164
| 27.243902
| 131
| 0.643351
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,340
|
sys_ss.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_ss.cpp
|
#include "stdafx.h"
#include "sys_ss.h"
#include "sys_process.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/timers.hpp"
#include "Emu/system_config.h"
#include "util/sysinfo.hpp"
#include <charconv>
#include <shared_mutex>
#include <unordered_set>
#ifdef _WIN32
#include <Windows.h>
#include <bcrypt.h>
#endif
struct lv2_update_manager
{
lv2_update_manager()
{
std::string version_str = utils::get_firmware_version();
// For example, 4.90 should be converted to 0x4900000000000
std::erase(version_str, '.');
if (std::from_chars(version_str.data(), version_str.data() + version_str.size(), system_sw_version, 16).ec != std::errc{})
system_sw_version <<= 40;
else
system_sw_version = 0;
}
lv2_update_manager(const lv2_update_manager&) = delete;
lv2_update_manager& operator=(const lv2_update_manager&) = delete;
~lv2_update_manager() = default;
u64 system_sw_version;
std::unordered_map<u32, u8> eeprom_map // offset, value
{
// system language
// *i think* this gives english
{0x48C18, 0x00},
{0x48C19, 0x00},
{0x48C1A, 0x00},
{0x48C1B, 0x01},
// system language end
// vsh target (seems it can be 0xFFFFFFFE, 0xFFFFFFFF, 0x00000001 default: 0x00000000 / vsh sets it to 0x00000000 on boot if it isn't 0x00000000)
{0x48C1C, 0x00},
{0x48C1D, 0x00},
{0x48C1E, 0x00},
{0x48C1F, 0x00}
// vsh target end
};
mutable std::shared_mutex eeprom_mutex;
std::unordered_set<u32> malloc_set;
mutable std::shared_mutex malloc_mutex;
// return address
u32 allocate(u32 size)
{
std::unique_lock unique_lock(malloc_mutex);
if (const auto addr = vm::alloc(size, vm::main); addr)
{
malloc_set.emplace(addr);
return addr;
}
return 0;
}
// return size
u32 deallocate(u32 addr)
{
std::unique_lock unique_lock(malloc_mutex);
if (malloc_set.count(addr))
{
return vm::dealloc(addr, vm::main);
}
return 0;
}
};
template<>
void fmt_class_string<sys_ss_rng_error>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto error)
{
switch (error)
{
STR_CASE(SYS_SS_RNG_ERROR_INVALID_PKG);
STR_CASE(SYS_SS_RNG_ERROR_ENOMEM);
STR_CASE(SYS_SS_RNG_ERROR_EAGAIN);
STR_CASE(SYS_SS_RNG_ERROR_EFAULT);
STR_CASE(SYS_SS_RTC_ERROR_UNK);
}
return unknown;
});
}
LOG_CHANNEL(sys_ss);
error_code sys_ss_random_number_generator(u64 pkg_id, vm::ptr<void> buf, u64 size)
{
sys_ss.warning("sys_ss_random_number_generator(pkg_id=%u, buf=*0x%x, size=0x%x)", pkg_id, buf, size);
if (pkg_id != 2)
{
if (pkg_id == 1)
{
if (!g_ps3_process_info.has_root_perm())
{
return CELL_ENOSYS;
}
sys_ss.todo("sys_ss_random_number_generator(): pkg_id=1");
std::memset(buf.get_ptr(), 0, 0x18);
return CELL_OK;
}
return SYS_SS_RNG_ERROR_INVALID_PKG;
}
// TODO
if (size > 0x10000000)
{
return SYS_SS_RNG_ERROR_ENOMEM;
}
std::unique_ptr<u8[]> temp(new u8[size]);
#ifdef _WIN32
if (auto ret = BCryptGenRandom(nullptr, temp.get(), static_cast<ULONG>(size), BCRYPT_USE_SYSTEM_PREFERRED_RNG))
{
fmt::throw_exception("sys_ss_random_number_generator(): BCryptGenRandom failed (0x%08x)", ret);
}
#else
fs::file rnd{"/dev/urandom"};
if (!rnd || rnd.read(temp.get(), size) != size)
{
fmt::throw_exception("sys_ss_random_number_generator(): Failed to generate pseudo-random numbers");
}
#endif
std::memcpy(buf.get_ptr(), temp.get(), size);
return CELL_OK;
}
error_code sys_ss_access_control_engine(u64 pkg_id, u64 a2, u64 a3)
{
sys_ss.success("sys_ss_access_control_engine(pkg_id=0x%llx, a2=0x%llx, a3=0x%llx)", pkg_id, a2, a3);
const u64 authid = g_ps3_process_info.self_info.valid ?
g_ps3_process_info.self_info.prog_id_hdr.program_authority_id : 0;
switch (pkg_id)
{
case 0x1:
{
if (!g_ps3_process_info.debug_or_root())
{
return not_an_error(CELL_ENOSYS);
}
if (!a2)
{
return CELL_ESRCH;
}
ensure(a2 == static_cast<u64>(process_getpid()));
vm::write64(vm::cast(a3), authid);
break;
}
case 0x2:
{
vm::write64(vm::cast(a2), authid);
break;
}
case 0x3:
{
if (!g_ps3_process_info.debug_or_root())
{
return CELL_ENOSYS;
}
break;
}
default:
return 0x8001051du;
}
return CELL_OK;
}
error_code sys_ss_get_console_id(vm::ptr<u8> buf)
{
sys_ss.notice("sys_ss_get_console_id(buf=*0x%x)", buf);
return sys_ss_appliance_info_manager(0x19003, buf);
}
error_code sys_ss_get_open_psid(vm::ptr<CellSsOpenPSID> psid)
{
sys_ss.notice("sys_ss_get_open_psid(psid=*0x%x)", psid);
psid->high = g_cfg.sys.console_psid_high;
psid->low = g_cfg.sys.console_psid_low;
return CELL_OK;
}
error_code sys_ss_appliance_info_manager(u32 code, vm::ptr<u8> buffer)
{
sys_ss.notice("sys_ss_appliance_info_manager(code=0x%x, buffer=*0x%x)", code, buffer);
if (!g_ps3_process_info.has_root_perm())
return CELL_ENOSYS;
if (!buffer)
return CELL_EFAULT;
switch (code)
{
case 0x19002:
{
// AIM_get_device_type
constexpr u8 product_code[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89 };
std::memcpy(buffer.get_ptr(), product_code, 16);
if (g_cfg.core.debug_console_mode)
buffer[15] = 0x81; // DECR
break;
}
case 0x19003:
{
// AIM_get_device_id
constexpr u8 idps[] = { 0x00, 0x00, 0x00, 0x01, 0x00, 0x89, 0x00, 0x0B, 0x14, 0x00, 0xEF, 0xDD, 0xCA, 0x25, 0x52, 0x66 };
std::memcpy(buffer.get_ptr(), idps, 16);
if (g_cfg.core.debug_console_mode)
{
buffer[5] = 0x81; // DECR
buffer[7] = 0x09; // DECR-1400
}
break;
}
case 0x19004:
{
// AIM_get_ps_code
constexpr u8 pscode[] = { 0x00, 0x01, 0x00, 0x85, 0x00, 0x07, 0x00, 0x04 };
std::memcpy(buffer.get_ptr(), pscode, 8);
break;
}
case 0x19005:
{
// AIM_get_open_ps_id
be_t<u64> psid[2] = { +g_cfg.sys.console_psid_high, +g_cfg.sys.console_psid_low };
std::memcpy(buffer.get_ptr(), psid, 16);
break;
}
case 0x19006:
{
// qa values (dex only) ??
[[fallthrough]];
}
default: sys_ss.todo("sys_ss_appliance_info_manager(code=0x%x, buffer=*0x%x)", code, buffer);
}
return CELL_OK;
}
error_code sys_ss_get_cache_of_product_mode(vm::ptr<u8> ptr)
{
sys_ss.todo("sys_ss_get_cache_of_product_mode(ptr=*0x%x)", ptr);
if (!ptr)
{
return CELL_EINVAL;
}
// 0xff Happens when hypervisor call returns an error
// 0 - disabled
// 1 - enabled
// except something segfaults when using 0, so error it is!
*ptr = 0xFF;
return CELL_OK;
}
error_code sys_ss_secure_rtc(u64 cmd, u64 a2, u64 a3, u64 a4)
{
sys_ss.todo("sys_ss_secure_rtc(cmd=0x%llx, a2=0x%x, a3=0x%llx, a4=0x%llx)", cmd, a2, a3, a4);
if (cmd == 0x3001)
{
if (a3 != 0x20)
return 0x80010500; // bad packet id
return CELL_OK;
}
else if (cmd == 0x3002)
{
// Get time
if (a2 > 1)
return 0x80010500; // bad packet id
// a3 is actual output, not 100% sure, but best guess is its tb val
vm::write64(::narrow<u32>(a3), get_timebased_time());
// a4 is a pointer to status, non 0 on error
vm::write64(::narrow<u32>(a4), 0);
return CELL_OK;
}
else if (cmd == 0x3003)
{
return CELL_OK;
}
return 0x80010500; // bad packet id
}
error_code sys_ss_get_cache_of_flash_ext_flag(vm::ptr<u64> flag)
{
sys_ss.todo("sys_ss_get_cache_of_flash_ext_flag(flag=*0x%x)", flag);
if (!flag)
{
return CELL_EFAULT;
}
*flag = 0xFE; // nand vs nor from lsb
return CELL_OK;
}
error_code sys_ss_get_boot_device(vm::ptr<u64> dev)
{
sys_ss.todo("sys_ss_get_boot_device(dev=*0x%x)", dev);
if (!dev)
{
return CELL_EINVAL;
}
*dev = 0x190;
return CELL_OK;
}
error_code sys_ss_update_manager(u64 pkg_id, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6)
{
sys_ss.notice("sys_ss_update_manager(pkg=0x%x, a1=0x%x, a2=0x%x, a3=0x%x, a4=0x%x, a5=0x%x, a6=0x%x)", pkg_id, a1, a2, a3, a4, a5, a6);
if (!g_ps3_process_info.has_root_perm())
return CELL_ENOSYS;
auto& update_manager = g_fxo->get<lv2_update_manager>();
switch (pkg_id)
{
case 0x6001:
{
// update package async
break;
}
case 0x6002:
{
// inspect package async
break;
}
case 0x6003:
{
// get installed package info
[[maybe_unused]] const auto type = ::narrow<u32>(a1);
const auto info_ptr = ::narrow<u32>(a2);
if (!info_ptr)
return CELL_EFAULT;
vm::write64(info_ptr, update_manager.system_sw_version);
break;
}
case 0x6004:
{
// get fix instruction
break;
}
case 0x6005:
{
// extract package async
break;
}
case 0x6006:
{
// get extract package
break;
}
case 0x6007:
{
// get flash initialized
break;
}
case 0x6008:
{
// set flash initialized
break;
}
case 0x6009:
{
// get seed token
break;
}
case 0x600A:
{
// set seed token
break;
}
case 0x600B:
{
// read eeprom
const auto offset = ::narrow<u32>(a1);
const auto value_ptr = ::narrow<u32>(a2);
if (!value_ptr)
return CELL_EFAULT;
std::shared_lock shared_lock(update_manager.eeprom_mutex);
if (const auto iterator = update_manager.eeprom_map.find(offset); iterator != update_manager.eeprom_map.end())
vm::write8(value_ptr, iterator->second);
else
vm::write8(value_ptr, 0xFF); // 0xFF if not set
break;
}
case 0x600C:
{
// write eeprom
const auto offset = ::narrow<u32>(a1);
const auto value = ::narrow<u8>(a2);
std::unique_lock unique_lock(update_manager.eeprom_mutex);
if (value != 0xFF)
update_manager.eeprom_map[offset] = value;
else
update_manager.eeprom_map.erase(offset); // 0xFF: unset
break;
}
case 0x600D:
{
// get async status
break;
}
case 0x600E:
{
// allocate buffer
const auto size = ::narrow<u32>(a1);
const auto addr_ptr = ::narrow<u32>(a2);
if (!addr_ptr)
return CELL_EFAULT;
const auto addr = update_manager.allocate(size);
if (!addr)
return CELL_ENOMEM;
vm::write32(addr_ptr, addr);
break;
}
case 0x600F:
{
// release buffer
const auto addr = ::narrow<u32>(a1);
if (!update_manager.deallocate(addr))
return CELL_ENOMEM;
break;
}
case 0x6010:
{
// check integrity
break;
}
case 0x6011:
{
// get applicable version
const auto addr_ptr = ::narrow<u32>(a2);
if (!addr_ptr)
return CELL_EFAULT;
vm::write64(addr_ptr, 0x30040ULL << 32); // 3.40
break;
}
case 0x6012:
{
// allocate buffer from memory container
[[maybe_unused]] const auto mem_ct = ::narrow<u32>(a1);
const auto size = ::narrow<u32>(a2);
const auto addr_ptr = ::narrow<u32>(a3);
if (!addr_ptr)
return CELL_EFAULT;
const auto addr = update_manager.allocate(size);
if (!addr)
return CELL_ENOMEM;
vm::write32(addr_ptr, addr);
break;
}
case 0x6013:
{
// unknown
break;
}
default:
{
sys_ss.error("sys_ss_update_manager(): invalid packet id 0x%x ", pkg_id);
return CELL_EINVAL;
}
}
return CELL_OK;
}
error_code sys_ss_virtual_trm_manager(u64 pkg_id, u64 a1, u64 a2, u64 a3, u64 a4)
{
sys_ss.todo("sys_ss_virtual_trm_manager(pkg=0x%llx, a1=0x%llx, a2=0x%llx, a3=0x%llx, a4=0x%llx)", pkg_id, a1, a2, a3, a4);
return CELL_OK;
}
error_code sys_ss_individual_info_manager(u64 pkg_id, u64 a2, vm::ptr<u64> out_size, u64 a4, u64 a5, u64 a6)
{
sys_ss.todo("sys_ss_individual_info_manager(pkg=0x%llx, a2=0x%llx, out_size=*0x%llx, a4=0x%llx, a5=0x%llx, a6=0x%llx)", pkg_id, a2, out_size, a4, a5, a6);
switch (pkg_id)
{
// Read EID
case 0x17002:
{
// TODO
vm::_ref<u64>(a5) = a4; // Write back size of buffer
break;
}
// Get EID size
case 0x17001: *out_size = 0x100; break;
default: break;
}
return CELL_OK;
}
| 11,433
|
C++
|
.cpp
| 470
| 21.742553
| 155
| 0.67636
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,341
|
sys_rwlock.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_rwlock.cpp
|
#include "stdafx.h"
#include "sys_rwlock.h"
#include "Emu/IdManager.h"
#include "Emu/IPC.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_rwlock);
lv2_rwlock::lv2_rwlock(utils::serial& ar)
: protocol(ar)
, key(ar)
, name(ar)
{
ar(owner);
}
std::shared_ptr<void> lv2_rwlock::load(utils::serial& ar)
{
auto rwlock = std::make_shared<lv2_rwlock>(ar);
return lv2_obj::load(rwlock->key, rwlock);
}
void lv2_rwlock::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(lv2_sync);
ar(protocol, key, name, owner);
}
error_code sys_rwlock_create(ppu_thread& ppu, vm::ptr<u32> rw_lock_id, vm::ptr<sys_rwlock_attribute_t> attr)
{
ppu.state += cpu_flag::wait;
sys_rwlock.warning("sys_rwlock_create(rw_lock_id=*0x%x, attr=*0x%x)", rw_lock_id, attr);
if (!rw_lock_id || !attr)
{
return CELL_EFAULT;
}
const auto _attr = *attr;
const u32 protocol = _attr.protocol;
if (protocol != SYS_SYNC_FIFO && protocol != SYS_SYNC_PRIORITY)
{
sys_rwlock.error("sys_rwlock_create(): unknown protocol (0x%x)", protocol);
return CELL_EINVAL;
}
const u64 ipc_key = lv2_obj::get_key(_attr);
if (auto error = lv2_obj::create<lv2_rwlock>(_attr.pshared, ipc_key, _attr.flags, [&]
{
return std::make_shared<lv2_rwlock>(protocol, ipc_key, _attr.name_u64);
}))
{
return error;
}
ppu.check_state();
*rw_lock_id = idm::last_id();
return CELL_OK;
}
error_code sys_rwlock_destroy(ppu_thread& ppu, u32 rw_lock_id)
{
ppu.state += cpu_flag::wait;
sys_rwlock.warning("sys_rwlock_destroy(rw_lock_id=0x%x)", rw_lock_id);
const auto rwlock = idm::withdraw<lv2_obj, lv2_rwlock>(rw_lock_id, [](lv2_rwlock& rw) -> CellError
{
if (rw.owner)
{
return CELL_EBUSY;
}
lv2_obj::on_id_destroy(rw, rw.key);
return {};
});
if (!rwlock)
{
return CELL_ESRCH;
}
if (rwlock.ret)
{
return rwlock.ret;
}
return CELL_OK;
}
error_code sys_rwlock_rlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
{
ppu.state += cpu_flag::wait;
sys_rwlock.trace("sys_rwlock_rlock(rw_lock_id=0x%x, timeout=0x%llx)", rw_lock_id, timeout);
const auto rwlock = idm::get<lv2_obj, lv2_rwlock>(rw_lock_id, [&, notify = lv2_obj::notify_all_t()](lv2_rwlock& rwlock)
{
const s64 val = rwlock.owner;
if (val <= 0 && !(val & 1))
{
if (rwlock.owner.compare_and_swap_test(val, val - 2))
{
return true;
}
}
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(rwlock.mutex);
const s64 _old = rwlock.owner.fetch_op([&](s64& val)
{
if (val <= 0 && !(val & 1))
{
val -= 2;
}
else
{
val |= 1;
}
});
if (_old > 0 || _old & 1)
{
rwlock.sleep(ppu, timeout);
lv2_obj::emplace(rwlock.rq, &ppu);
return false;
}
return true;
});
if (!rwlock)
{
return CELL_ESRCH;
}
if (rwlock.ret)
{
return CELL_OK;
}
ppu.gpr[3] = CELL_OK;
while (auto state = +ppu.state)
{
if (state & cpu_flag::signal && ppu.state.test_and_reset(cpu_flag::signal))
{
break;
}
if (is_stopped(state))
{
std::lock_guard lock(rwlock->mutex);
for (auto cpu = +rwlock->rq; cpu; cpu = cpu->next_cpu)
{
if (cpu == &ppu)
{
ppu.state += cpu_flag::again;
return {};
}
}
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++)
{
busy_wait(500);
}
if (ppu.state & cpu_flag::signal)
{
continue;
}
if (timeout)
{
if (lv2_obj::wait_timeout(timeout, &ppu))
{
// Wait for rescheduling
if (ppu.check_state())
{
continue;
}
ppu.state += cpu_flag::wait;
if (!atomic_storage<ppu_thread*>::load(rwlock->rq))
{
// Waiters queue is empty, so the thread must have been signaled
rwlock->mutex.lock_unlock();
break;
}
std::lock_guard lock(rwlock->mutex);
if (!rwlock->unqueue(rwlock->rq, &ppu))
{
break;
}
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
}
else
{
ppu.state.wait(state);
}
}
return not_an_error(ppu.gpr[3]);
}
error_code sys_rwlock_tryrlock(ppu_thread& ppu, u32 rw_lock_id)
{
ppu.state += cpu_flag::wait;
sys_rwlock.trace("sys_rwlock_tryrlock(rw_lock_id=0x%x)", rw_lock_id);
const auto rwlock = idm::check<lv2_obj, lv2_rwlock>(rw_lock_id, [](lv2_rwlock& rwlock)
{
auto [_, ok] = rwlock.owner.fetch_op([](s64& val)
{
if (val <= 0 && !(val & 1))
{
val -= 2;
return true;
}
return false;
});
return ok;
});
if (!rwlock)
{
return CELL_ESRCH;
}
if (!rwlock.ret)
{
return not_an_error(CELL_EBUSY);
}
return CELL_OK;
}
error_code sys_rwlock_runlock(ppu_thread& ppu, u32 rw_lock_id)
{
ppu.state += cpu_flag::wait;
sys_rwlock.trace("sys_rwlock_runlock(rw_lock_id=0x%x)", rw_lock_id);
const auto rwlock = idm::get<lv2_obj, lv2_rwlock>(rw_lock_id, [](lv2_rwlock& rwlock)
{
const s64 val = rwlock.owner;
if (val < 0 && !(val & 1))
{
if (rwlock.owner.compare_and_swap_test(val, val + 2))
{
return true;
}
}
return false;
});
if (!rwlock)
{
return CELL_ESRCH;
}
lv2_obj::notify_all_t notify;
if (rwlock.ret)
{
return CELL_OK;
}
else
{
std::lock_guard lock(rwlock->mutex);
// Remove one reader
const s64 _old = rwlock->owner.fetch_op([](s64& val)
{
if (val < -1)
{
val += 2;
}
});
if (_old >= 0)
{
return CELL_EPERM;
}
if (_old == -1)
{
if (const auto cpu = rwlock->schedule<ppu_thread>(rwlock->wq, rwlock->protocol))
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return {};
}
rwlock->owner = cpu->id << 1 | !!rwlock->wq | !!rwlock->rq;
rwlock->awake(cpu);
}
else
{
rwlock->owner = 0;
ensure(!rwlock->rq);
}
}
}
return CELL_OK;
}
error_code sys_rwlock_wlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout)
{
ppu.state += cpu_flag::wait;
sys_rwlock.trace("sys_rwlock_wlock(rw_lock_id=0x%x, timeout=0x%llx)", rw_lock_id, timeout);
const auto rwlock = idm::get<lv2_obj, lv2_rwlock>(rw_lock_id, [&, notify = lv2_obj::notify_all_t()](lv2_rwlock& rwlock) -> s64
{
const s64 val = rwlock.owner;
if (val == 0)
{
if (rwlock.owner.compare_and_swap_test(0, ppu.id << 1))
{
return 0;
}
}
else if (val >> 1 == ppu.id)
{
return val;
}
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(rwlock.mutex);
const s64 _old = rwlock.owner.fetch_op([&](s64& val)
{
if (val == 0)
{
val = ppu.id << 1;
}
else
{
val |= 1;
}
});
if (_old != 0)
{
rwlock.sleep(ppu, timeout);
lv2_obj::emplace(rwlock.wq, &ppu);
}
return _old;
});
if (!rwlock)
{
return CELL_ESRCH;
}
if (rwlock.ret == 0)
{
return CELL_OK;
}
if (rwlock.ret >> 1 == ppu.id)
{
return CELL_EDEADLK;
}
ppu.gpr[3] = CELL_OK;
while (auto state = +ppu.state)
{
if (state & cpu_flag::signal && ppu.state.test_and_reset(cpu_flag::signal))
{
break;
}
if (is_stopped(state))
{
std::lock_guard lock(rwlock->mutex);
for (auto cpu = +rwlock->wq; cpu; cpu = cpu->next_cpu)
{
if (cpu == &ppu)
{
ppu.state += cpu_flag::again;
return {};
}
}
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++)
{
busy_wait(500);
}
if (ppu.state & cpu_flag::signal)
{
continue;
}
if (timeout)
{
if (lv2_obj::wait_timeout(timeout, &ppu))
{
// Wait for rescheduling
if (ppu.check_state())
{
continue;
}
std::lock_guard lock(rwlock->mutex);
if (!rwlock->unqueue(rwlock->wq, &ppu))
{
break;
}
// If the last waiter quit the writer sleep queue, wake blocked readers
if (rwlock->rq && !rwlock->wq && rwlock->owner < 0)
{
s64 size = 0;
// Protocol doesn't matter here since they are all enqueued anyways
while (auto cpu = rwlock->schedule<ppu_thread>(rwlock->rq, SYS_SYNC_FIFO))
{
size++;
rwlock->append(cpu);
}
rwlock->owner.atomic_op([&](s64& owner)
{
owner -= 2 * size; // Add readers to value
owner &= -2; // Clear wait bit
});
lv2_obj::awake_all();
}
else if (!rwlock->rq && !rwlock->wq)
{
rwlock->owner &= -2;
}
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
}
else
{
ppu.state.wait(state);
}
}
return not_an_error(ppu.gpr[3]);
}
error_code sys_rwlock_trywlock(ppu_thread& ppu, u32 rw_lock_id)
{
ppu.state += cpu_flag::wait;
sys_rwlock.trace("sys_rwlock_trywlock(rw_lock_id=0x%x)", rw_lock_id);
const auto rwlock = idm::check<lv2_obj, lv2_rwlock>(rw_lock_id, [&](lv2_rwlock& rwlock)
{
const s64 val = rwlock.owner;
// Return previous value
return val ? val : rwlock.owner.compare_and_swap(0, ppu.id << 1);
});
if (!rwlock)
{
return CELL_ESRCH;
}
if (rwlock.ret != 0)
{
if (rwlock.ret >> 1 == ppu.id)
{
return CELL_EDEADLK;
}
return not_an_error(CELL_EBUSY);
}
return CELL_OK;
}
error_code sys_rwlock_wunlock(ppu_thread& ppu, u32 rw_lock_id)
{
ppu.state += cpu_flag::wait;
sys_rwlock.trace("sys_rwlock_wunlock(rw_lock_id=0x%x)", rw_lock_id);
const auto rwlock = idm::get<lv2_obj, lv2_rwlock>(rw_lock_id, [&](lv2_rwlock& rwlock)
{
const s64 val = rwlock.owner;
// Return previous value
return val != ppu.id << 1 ? val : rwlock.owner.compare_and_swap(val, 0);
});
if (!rwlock)
{
return CELL_ESRCH;
}
if (rwlock.ret >> 1 != ppu.id)
{
return CELL_EPERM;
}
if (lv2_obj::notify_all_t notify; rwlock.ret & 1)
{
std::lock_guard lock(rwlock->mutex);
if (auto cpu = rwlock->schedule<ppu_thread>(rwlock->wq, rwlock->protocol))
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return {};
}
rwlock->owner = cpu->id << 1 | !!rwlock->wq | !!rwlock->rq;
rwlock->awake(cpu);
}
else if (rwlock->rq)
{
for (auto cpu = +rwlock->rq; cpu; cpu = cpu->next_cpu)
{
if (cpu->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return {};
}
}
s64 size = 0;
// Protocol doesn't matter here since they are all enqueued anyways
while (auto cpu = rwlock->schedule<ppu_thread>(rwlock->rq, SYS_SYNC_FIFO))
{
size++;
rwlock->append(cpu);
}
rwlock->owner.release(-2 * static_cast<s64>(size));
lv2_obj::awake_all();
}
else
{
rwlock->owner = 0;
}
}
return CELL_OK;
}
| 10,383
|
C++
|
.cpp
| 472
| 18.557203
| 127
| 0.614913
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,342
|
sys_cond.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_cond.cpp
|
#include "stdafx.h"
#include "util/serialization.hpp"
#include "Emu/IdManager.h"
#include "Emu/IPC.h"
#include "Emu/System.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "sys_cond.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_cond);
lv2_cond::lv2_cond(utils::serial& ar)
: key(ar)
, name(ar)
, mtx_id(ar)
, mutex(idm::get_unlocked<lv2_obj, lv2_mutex>(mtx_id)) // May be nullptr
{
}
CellError lv2_cond::on_id_create()
{
exists++;
static auto do_it = [](lv2_cond* _this) -> CellError
{
if (lv2_obj::check(_this->mutex))
{
_this->mutex->cond_count++;
return {};
}
// Mutex has been destroyed, cannot create conditional variable
return CELL_ESRCH;
};
if (mutex)
{
return do_it(this);
}
ensure(!!Emu.DeserialManager());
Emu.PostponeInitCode([this]()
{
if (!mutex)
{
mutex = ensure(idm::get_unlocked<lv2_obj, lv2_mutex>(mtx_id));
}
// Defer function
ensure(CellError{} == do_it(this));
});
return {};
}
std::shared_ptr<void> lv2_cond::load(utils::serial& ar)
{
auto cond = std::make_shared<lv2_cond>(ar);
return lv2_obj::load(cond->key, cond);
}
void lv2_cond::save(utils::serial& ar)
{
ar(key, name, mtx_id);
}
error_code sys_cond_create(ppu_thread& ppu, vm::ptr<u32> cond_id, u32 mutex_id, vm::ptr<sys_cond_attribute_t> attr)
{
ppu.state += cpu_flag::wait;
sys_cond.trace("sys_cond_create(cond_id=*0x%x, mutex_id=0x%x, attr=*0x%x)", cond_id, mutex_id, attr);
auto mutex = idm::get<lv2_obj, lv2_mutex>(mutex_id);
if (!mutex)
{
return CELL_ESRCH;
}
const auto _attr = *attr;
const u64 ipc_key = lv2_obj::get_key(_attr);
if (ipc_key)
{
sys_cond.warning("sys_cond_create(cond_id=*0x%x, attr=*0x%x): IPC=0x%016x", cond_id, attr, ipc_key);
}
if (const auto error = lv2_obj::create<lv2_cond>(_attr.pshared, ipc_key, _attr.flags, [&]
{
return std::make_shared<lv2_cond>(
ipc_key,
_attr.name_u64,
mutex_id,
std::move(mutex));
}))
{
return error;
}
ppu.check_state();
*cond_id = idm::last_id();
return CELL_OK;
}
error_code sys_cond_destroy(ppu_thread& ppu, u32 cond_id)
{
ppu.state += cpu_flag::wait;
sys_cond.trace("sys_cond_destroy(cond_id=0x%x)", cond_id);
const auto cond = idm::withdraw<lv2_obj, lv2_cond>(cond_id, [&](lv2_cond& cond) -> CellError
{
std::lock_guard lock(cond.mutex->mutex);
if (atomic_storage<ppu_thread*>::load(cond.sq))
{
return CELL_EBUSY;
}
cond.mutex->cond_count--;
lv2_obj::on_id_destroy(cond, cond.key);
return {};
});
if (!cond)
{
return CELL_ESRCH;
}
if (cond->key)
{
sys_cond.warning("sys_cond_destroy(cond_id=0x%x): IPC=0x%016x", cond_id, cond->key);
}
if (cond.ret)
{
return cond.ret;
}
return CELL_OK;
}
error_code sys_cond_signal(ppu_thread& ppu, u32 cond_id)
{
ppu.state += cpu_flag::wait;
sys_cond.trace("sys_cond_signal(cond_id=0x%x)", cond_id);
while (true)
{
if (ppu.test_stopped())
{
ppu.state += cpu_flag::again;
return {};
}
bool finished = true;
ppu.state += cpu_flag::wait;
const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [&, notify = lv2_obj::notify_all_t()](lv2_cond& cond)
{
if (atomic_storage<ppu_thread*>::load(cond.sq))
{
std::lock_guard lock(cond.mutex->mutex);
if (ppu.state & cpu_flag::suspend)
{
// Test if another signal caused the current thread to be suspended, in which case it needs to wait until the thread wakes up (otherwise the signal may cause unexpected results)
finished = false;
return;
}
if (const auto cpu = cond.schedule<ppu_thread>(cond.sq, cond.mutex->protocol))
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return;
}
// TODO: Is EBUSY returned after reqeueing, on sys_cond_destroy?
if (cond.mutex->try_own(*cpu))
{
cond.awake(cpu);
}
}
}
else
{
cond.mutex->mutex.lock_unlock();
if (ppu.state & cpu_flag::suspend)
{
finished = false;
}
}
});
if (!finished)
{
continue;
}
if (!cond)
{
return CELL_ESRCH;
}
return CELL_OK;
}
}
error_code sys_cond_signal_all(ppu_thread& ppu, u32 cond_id)
{
ppu.state += cpu_flag::wait;
sys_cond.trace("sys_cond_signal_all(cond_id=0x%x)", cond_id);
while (true)
{
if (ppu.test_stopped())
{
ppu.state += cpu_flag::again;
return {};
}
bool finished = true;
ppu.state += cpu_flag::wait;
const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [&, notify = lv2_obj::notify_all_t()](lv2_cond& cond)
{
if (atomic_storage<ppu_thread*>::load(cond.sq))
{
std::lock_guard lock(cond.mutex->mutex);
if (ppu.state & cpu_flag::suspend)
{
// Test if another signal caused the current thread to be suspended, in which case it needs to wait until the thread wakes up (otherwise the signal may cause unexpected results)
finished = false;
return;
}
for (auto cpu = +cond.sq; cpu; cpu = cpu->next_cpu)
{
if (cpu->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return;
}
}
cpu_thread* result = nullptr;
auto sq = cond.sq;
atomic_storage<ppu_thread*>::release(cond.sq, nullptr);
while (const auto cpu = cond.schedule<ppu_thread>(sq, SYS_SYNC_PRIORITY))
{
if (cond.mutex->try_own(*cpu))
{
ensure(!std::exchange(result, cpu));
}
}
if (result)
{
cond.awake(result);
}
}
else
{
cond.mutex->mutex.lock_unlock();
if (ppu.state & cpu_flag::suspend)
{
finished = false;
}
}
});
if (!finished)
{
continue;
}
if (!cond)
{
return CELL_ESRCH;
}
return CELL_OK;
}
}
error_code sys_cond_signal_to(ppu_thread& ppu, u32 cond_id, u32 thread_id)
{
ppu.state += cpu_flag::wait;
sys_cond.trace("sys_cond_signal_to(cond_id=0x%x, thread_id=0x%x)", cond_id, thread_id);
while (true)
{
if (ppu.test_stopped())
{
ppu.state += cpu_flag::again;
return {};
}
bool finished = true;
ppu.state += cpu_flag::wait;
const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [&, notify = lv2_obj::notify_all_t()](lv2_cond& cond)
{
if (!idm::check_unlocked<named_thread<ppu_thread>>(thread_id))
{
return -1;
}
if (atomic_storage<ppu_thread*>::load(cond.sq))
{
std::lock_guard lock(cond.mutex->mutex);
if (ppu.state & cpu_flag::suspend)
{
// Test if another signal caused the current thread to be suspended, in which case it needs to wait until the thread wakes up (otherwise the signal may cause unexpected results)
finished = false;
return 0;
}
for (auto cpu = +cond.sq; cpu; cpu = cpu->next_cpu)
{
if (cpu->id == thread_id)
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return 0;
}
ensure(cond.unqueue(cond.sq, cpu));
if (cond.mutex->try_own(*cpu))
{
cond.awake(cpu);
}
return 1;
}
}
}
else
{
cond.mutex->mutex.lock_unlock();
if (ppu.state & cpu_flag::suspend)
{
finished = false;
return 0;
}
}
return 0;
});
if (!finished)
{
continue;
}
if (!cond || cond.ret == -1)
{
return CELL_ESRCH;
}
if (!cond.ret)
{
return not_an_error(CELL_EPERM);
}
return CELL_OK;
}
}
error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout)
{
ppu.state += cpu_flag::wait;
sys_cond.trace("sys_cond_wait(cond_id=0x%x, timeout=%lld)", cond_id, timeout);
// Further function result
ppu.gpr[3] = CELL_OK;
auto& sstate = *ppu.optional_savestate_state;
const auto cond = idm::get<lv2_obj, lv2_cond>(cond_id, [&, notify = lv2_obj::notify_all_t()](lv2_cond& cond) -> s64
{
if (!ppu.loaded_from_savestate && atomic_storage<u32>::load(cond.mutex->control.raw().owner) != ppu.id)
{
return -1;
}
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(cond.mutex->mutex);
const u64 syscall_state = sstate.try_read<u64>().second;
sstate.clear();
if (ppu.loaded_from_savestate)
{
if (syscall_state & 1)
{
// Mutex sleep
ensure(!cond.mutex->try_own(ppu));
}
else
{
lv2_obj::emplace(cond.sq, &ppu);
}
cond.sleep(ppu, timeout);
return static_cast<u32>(syscall_state >> 32);
}
// Register waiter
lv2_obj::emplace(cond.sq, &ppu);
// Unlock the mutex
const u32 count = cond.mutex->lock_count.exchange(0);
if (const auto cpu = cond.mutex->reown<ppu_thread>())
{
if (cpu->state & cpu_flag::again)
{
ensure(cond.unqueue(cond.sq, &ppu));
ppu.state += cpu_flag::again;
return 0;
}
cond.mutex->append(cpu);
}
// Sleep current thread and schedule mutex waiter
cond.sleep(ppu, timeout);
// Save the recursive value
return count;
});
if (!cond)
{
return CELL_ESRCH;
}
if (ppu.state & cpu_flag::again)
{
return {};
}
if (cond.ret < 0)
{
return CELL_EPERM;
}
while (auto state = +ppu.state)
{
if (state & cpu_flag::signal && ppu.state.test_and_reset(cpu_flag::signal))
{
break;
}
if (is_stopped(state))
{
std::lock_guard lock(cond->mutex->mutex);
bool mutex_sleep = false;
bool cond_sleep = false;
for (auto cpu = atomic_storage<ppu_thread*>::load(cond->sq); cpu; cpu = cpu->next_cpu)
{
if (cpu == &ppu)
{
cond_sleep = true;
break;
}
}
for (auto cpu = atomic_storage<ppu_thread*>::load(cond->mutex->control.raw().sq); cpu; cpu = cpu->next_cpu)
{
if (cpu == &ppu)
{
mutex_sleep = true;
break;
}
}
if (!cond_sleep && !mutex_sleep)
{
break;
}
const u64 optional_syscall_state = u32{mutex_sleep} | (u64{static_cast<u32>(cond.ret)} << 32);
sstate(optional_syscall_state);
ppu.state += cpu_flag::again;
return {};
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++)
{
busy_wait(500);
}
if (ppu.state & cpu_flag::signal)
{
continue;
}
if (timeout)
{
if (lv2_obj::wait_timeout(timeout, &ppu))
{
const u64 start_time = ppu.start_time;
// Wait for rescheduling
if (ppu.check_state())
{
continue;
}
ppu.state += cpu_flag::wait;
std::lock_guard lock(cond->mutex->mutex);
// Try to cancel the waiting
if (cond->unqueue(cond->sq, &ppu))
{
// TODO: Is EBUSY returned after reqeueing, on sys_cond_destroy?
ppu.gpr[3] = CELL_ETIMEDOUT;
// Own or requeue
if (cond->mutex->try_own(ppu))
{
break;
}
}
else if (atomic_storage<u32>::load(cond->mutex->control.raw().owner) == ppu.id)
{
break;
}
cond->mutex->sleep(ppu);
ppu.start_time = start_time; // Restore start time because awake has been called
timeout = 0;
continue;
}
}
else
{
ppu.state.wait(state);
}
}
// Verify ownership
ensure(atomic_storage<u32>::load(cond->mutex->control.raw().owner) == ppu.id);
// Restore the recursive value
cond->mutex->lock_count.release(static_cast<u32>(cond.ret));
return not_an_error(ppu.gpr[3]);
}
| 11,107
|
C++
|
.cpp
| 461
| 20.277657
| 182
| 0.627684
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,343
|
sys_fs.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_fs.cpp
|
#include "stdafx.h"
#include "sys_sync.h"
#include "sys_fs.h"
#include "sys_memory.h"
#include "Emu/Cell/PPUModule.h"
#include "Emu/Cell/PPUThread.h"
#include "Crypto/unedat.h"
#include "Emu/System.h"
#include "Emu/system_config.h"
#include "Emu/VFS.h"
#include "Emu/vfs_config.h"
#include "Emu/IdManager.h"
#include "Emu/system_utils.hpp"
#include "Emu/Cell/lv2/sys_process.h"
#include <filesystem>
#include <span>
#include <shared_mutex>
LOG_CHANNEL(sys_fs);
lv2_fs_mount_point g_mp_sys_dev_usb{"/dev_usb", "CELL_FS_FAT", "CELL_FS_IOS:USB_MASS_STORAGE", 512, 0x100, 4096, lv2_mp_flag::no_uid_gid};
lv2_fs_mount_point g_mp_sys_dev_dvd{"/dev_ps2disc", "CELL_FS_ISO9660", "CELL_FS_IOS:PATA1_BDVD_DRIVE", 2048, 0x100, 32768, lv2_mp_flag::read_only + lv2_mp_flag::no_uid_gid, &g_mp_sys_dev_usb};
lv2_fs_mount_point g_mp_sys_dev_bdvd{"/dev_bdvd", "CELL_FS_ISO9660", "CELL_FS_IOS:PATA0_BDVD_DRIVE", 2048, 0x4D955, 65536, lv2_mp_flag::read_only + lv2_mp_flag::no_uid_gid, &g_mp_sys_dev_dvd};
lv2_fs_mount_point g_mp_sys_dev_hdd1{"/dev_hdd1", "CELL_FS_FAT", "CELL_FS_UTILITY:HDD1", 512, 0x3FFFF8, 32768, lv2_mp_flag::no_uid_gid + lv2_mp_flag::cache, &g_mp_sys_dev_bdvd};
lv2_fs_mount_point g_mp_sys_dev_hdd0{"/dev_hdd0", "CELL_FS_UFS", "CELL_FS_UTILITY:HDD0", 512, 0x24FAEA98, 4096, {}, &g_mp_sys_dev_hdd1};
lv2_fs_mount_point g_mp_sys_dev_flash3{"/dev_flash3", "CELL_FS_FAT", "CELL_FS_IOS:BUILTIN_FLSH3", 512, 0x400, 8192, lv2_mp_flag::no_uid_gid, &g_mp_sys_dev_hdd0}; // TODO confirm
lv2_fs_mount_point g_mp_sys_dev_flash2{"/dev_flash2", "CELL_FS_FAT", "CELL_FS_IOS:BUILTIN_FLSH2", 512, 0x8000, 8192, lv2_mp_flag::no_uid_gid, &g_mp_sys_dev_flash3}; // TODO confirm
lv2_fs_mount_point g_mp_sys_dev_flash{"/dev_flash", "CELL_FS_FAT", "CELL_FS_IOS:BUILTIN_FLSH1", 512, 0x63E00, 8192, lv2_mp_flag::no_uid_gid, &g_mp_sys_dev_flash2};
lv2_fs_mount_point g_mp_sys_host_root{"/host_root", "CELL_FS_DUMMYFS", "CELL_FS_DUMMY:/", 512, 0x100, 512, lv2_mp_flag::strict_get_block_size + lv2_mp_flag::no_uid_gid, &g_mp_sys_dev_flash};
lv2_fs_mount_point g_mp_sys_app_home{"/app_home", "CELL_FS_DUMMYFS", "CELL_FS_DUMMY:", 512, 0x100, 512, lv2_mp_flag::strict_get_block_size + lv2_mp_flag::no_uid_gid, &g_mp_sys_host_root};
lv2_fs_mount_point g_mp_sys_dev_root{"/", "CELL_FS_ADMINFS", "CELL_FS_ADMINFS:", 512, 0x100, 512, lv2_mp_flag::read_only + lv2_mp_flag::strict_get_block_size + lv2_mp_flag::no_uid_gid, &g_mp_sys_app_home};
lv2_fs_mount_point g_mp_sys_no_device{};
lv2_fs_mount_info g_mi_sys_not_found{}; // wrapper for &g_mp_sys_no_device
template<>
void fmt_class_string<lv2_file_type>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](lv2_file_type type)
{
switch (type)
{
case lv2_file_type::regular: return "Regular file";
case lv2_file_type::sdata: return "SDATA";
case lv2_file_type::edata: return "EDATA";
}
return unknown;
});
}
template<>
void fmt_class_string<lv2_file>::format(std::string& out, u64 arg)
{
const auto& file = get_object(arg);
auto get_size = [](u64 size) -> std::string
{
if (size == umax)
{
return "N/A";
}
std::string size_str;
switch (std::bit_width(size) / 10 * 10)
{
case 0: fmt::append(size_str, "%u", size); break;
case 10: fmt::append(size_str, "%gKB", size / 1024.); break;
case 20: fmt::append(size_str, "%gMB", size / (1024. * 1024)); break;
default:
case 30: fmt::append(size_str, "%gGB", size / (1024. * 1024 * 1024)); break;
}
return size_str;
};
const usz pos = file.file ? file.file.pos() : umax;
const usz size = file.file ? file.file.size() : umax;
fmt::append(out, u8"%s, “%s”, Mode: 0x%x, Flags: 0x%x, Pos/Size: %s/%s (0x%x/0x%x)", file.type, file.name.data(), file.mode, file.flags, get_size(pos), get_size(size), pos, size);
}
template<>
void fmt_class_string<lv2_dir>::format(std::string& out, u64 arg)
{
const auto& dir = get_object(arg);
fmt::append(out, u8"Directory, “%s”, Entries: %u/%u", dir.name.data(), std::min<u64>(dir.pos, dir.entries.size()), dir.entries.size());
}
bool has_fs_write_rights(std::string_view vpath)
{
// VSH has access to everything
if (g_ps3_process_info.has_root_perm())
return true;
const auto norm_vpath = lv2_fs_object::get_normalized_path(vpath);
const auto parent_dir = fs::get_parent_dir_view(norm_vpath);
// This is not exhaustive, PS3 has a unix filesystem with rights for each directory and files
// This is mostly meant to protect against games doing insane things(ie NPUB30003 => NPUB30008)
if (parent_dir == "/dev_hdd0" || parent_dir == "/dev_hdd0/game")
return false;
return true;
}
bool verify_mself(const fs::file& mself_file)
{
FsMselfHeader mself_header;
if (!mself_file.read<FsMselfHeader>(mself_header))
{
sys_fs.error("verify_mself: Didn't read expected bytes for header.");
return false;
}
if (mself_header.m_magic != 0x4D534600u)
{
sys_fs.error("verify_mself: Header magic is incorrect.");
return false;
}
if (mself_header.m_format_version != 1u)
{
sys_fs.error("verify_mself: Unexpected header format version.");
return false;
}
// sanity check
if (mself_header.m_entry_size != sizeof(FsMselfEntry))
{
sys_fs.error("verify_mself: Unexpected header entry size.");
return false;
}
mself_file.seek(0);
return true;
}
lv2_fs_mount_info_map::lv2_fs_mount_info_map()
{
for (auto mp = &g_mp_sys_dev_root; mp; mp = mp->next) // Scan and keep track of pre-mounted devices
{
if (mp == &g_mp_sys_dev_usb)
{
for (int i = 0; i < 8; i++)
{
if (!vfs::get(fmt::format("%s%03d", mp->root, i)).empty())
{
add(fmt::format("%s%03d", mp->root, i), mp, fmt::format("%s%03d", mp->device, i), mp->file_system, false);
}
}
}
else if (mp == &g_mp_sys_dev_root || !vfs::get(mp->root).empty())
{
add(std::string(mp->root), mp, mp->device, mp->file_system, mp == &g_mp_sys_dev_flash); // /dev_flash is mounted in read only mode initially
}
}
}
lv2_fs_mount_info_map::~lv2_fs_mount_info_map()
{
for (const auto& [path, info] : map)
vfs_unmount(path, false); // Do not remove the value from the map we are iterating over.
}
bool lv2_fs_mount_info_map::remove(std::string_view path)
{
if (const auto iterator = map.find(path); iterator != map.end())
{
map.erase(iterator);
return true;
}
return false;
}
const lv2_fs_mount_info& lv2_fs_mount_info_map::lookup(std::string_view path, bool no_cell_fs_path, std::string* mount_path) const
{
if (path.starts_with("/"sv))
{
constexpr std::string_view cell_fs_path = "CELL_FS_PATH:"sv;
const std::string normalized_path = lv2_fs_object::get_normalized_path(path);
std::string_view parent_dir;
u32 parent_level = 0;
do
{
parent_dir = fs::get_parent_dir_view(normalized_path, parent_level++);
if (const auto iterator = map.find(parent_dir); iterator != map.end())
{
if (iterator->second == &g_mp_sys_dev_root && parent_level > 1)
break;
if (no_cell_fs_path && iterator->second.device.starts_with(cell_fs_path))
return lookup(iterator->second.device.substr(cell_fs_path.size()), no_cell_fs_path, mount_path); // Recursively look up the parent mount info
if (mount_path)
*mount_path = iterator->first;
return iterator->second;
}
} while (parent_dir.length() > 1); // Exit the loop when parent_dir == "/" or empty
}
return g_mi_sys_not_found;
}
u64 lv2_fs_mount_info_map::get_all(CellFsMountInfo* info, u64 len) const
{
if (!info)
return map.size();
u64 count = 0;
for (const auto& [path, mount_info] : map)
{
if (count >= len)
break;
strcpy_trunc(info[count].mount_path, path);
strcpy_trunc(info[count].filesystem, mount_info.file_system);
strcpy_trunc(info[count].dev_name, mount_info.device);
if (mount_info.read_only)
info[count].unk[4] |= 0x10000000;
count++;
}
return count;
}
bool lv2_fs_mount_info_map::is_device_mounted(std::string_view device_name) const
{
return std::any_of(map.begin(), map.end(), [&](const decltype(map)::value_type& info) { return info.second.device == device_name; });
}
bool lv2_fs_mount_info_map::vfs_unmount(std::string_view vpath, bool remove_from_map)
{
const std::string local_path = vfs::get(vpath);
if (local_path.empty())
return false;
if (fs::is_file(local_path))
{
if (fs::remove_file(local_path))
{
sys_fs.notice("Removed simplefs file \"%s\"", local_path);
}
else
{
sys_fs.error("Failed to remove simplefs file \"%s\"", local_path);
}
}
const bool result = vfs::unmount(vpath);
if (result && remove_from_map)
g_fxo->get<lv2_fs_mount_info_map>().remove(vpath);
return result;
}
std::string lv2_fs_object::get_normalized_path(std::string_view path)
{
std::string normalized_path = std::filesystem::path(path).lexically_normal().string();
#ifdef _WIN32
std::replace(normalized_path.begin(), normalized_path.end(), '\\', '/');
#endif
if (normalized_path.ends_with('/'))
normalized_path.pop_back();
return normalized_path.empty() ? "/" : normalized_path;
}
std::string lv2_fs_object::get_device_root(std::string_view filename)
{
std::string path = get_normalized_path(filename); // Prevent getting fooled by ".." trick such as "/dev_usb000/../dev_flash"
if (const auto first = path.find_first_not_of("/"sv); first != umax)
{
if (const auto pos = path.substr(first).find_first_of("/"sv); pos != umax)
path = path.substr(0, first + pos);
path = path.substr(std::max<std::make_signed_t<usz>>(0, first - 1)); // Remove duplicate leading '/' while keeping only one
}
else
{
path = path.substr(0, 1);
}
return path;
}
lv2_fs_mount_point* lv2_fs_object::get_mp(std::string_view filename, std::string* vfs_path)
{
constexpr std::string_view cell_fs_path = "CELL_FS_PATH:"sv;
const bool is_cell_fs_path = filename.starts_with(cell_fs_path);
if (is_cell_fs_path)
filename.remove_prefix(cell_fs_path.size());
const bool is_path = filename.starts_with("/"sv);
std::string mp_name = is_path ? get_device_root(filename) : std::string(filename);
const auto check_mp = [&]()
{
for (auto mp = &g_mp_sys_dev_root; mp; mp = mp->next)
{
const auto& device_alias_check = !is_path && (
(mp == &g_mp_sys_dev_hdd0 && mp_name == "CELL_FS_IOS:PATA0_HDD_DRIVE"sv) ||
(mp == &g_mp_sys_dev_hdd1 && mp_name == "CELL_FS_IOS:PATA1_HDD_DRIVE"sv) ||
(mp == &g_mp_sys_dev_flash2 && mp_name == "CELL_FS_IOS:BUILTIN_FLASH"sv)); // TODO confirm
if (mp == &g_mp_sys_dev_usb)
{
if (mp_name.starts_with(is_path ? mp->root : mp->device))
{
if (!is_path)
mp_name = fmt::format("%s%s", mp->root, mp_name.substr(mp->device.size()));
return mp;
}
}
else if ((is_path ? mp->root : mp->device) == mp_name || device_alias_check)
{
if (!is_path)
mp_name = mp->root;
return mp;
}
}
return &g_mp_sys_no_device; // Default fallback
};
const auto result = check_mp();
if (vfs_path)
{
if (is_cell_fs_path)
*vfs_path = vfs::get(filename);
else if (result == &g_mp_sys_dev_hdd0)
*vfs_path = g_cfg_vfs.get(g_cfg_vfs.dev_hdd0, rpcs3::utils::get_emu_dir());
else if (result == &g_mp_sys_dev_hdd1)
*vfs_path = g_cfg_vfs.get(g_cfg_vfs.dev_hdd1, rpcs3::utils::get_emu_dir());
else if (result == &g_mp_sys_dev_usb)
*vfs_path = g_cfg_vfs.get_device(g_cfg_vfs.dev_usb, mp_name, rpcs3::utils::get_emu_dir()).path;
else if (result == &g_mp_sys_dev_bdvd)
*vfs_path = g_cfg_vfs.get(g_cfg_vfs.dev_bdvd, rpcs3::utils::get_emu_dir());
else if (result == &g_mp_sys_dev_dvd)
*vfs_path = g_cfg_vfs.get(g_cfg_vfs.dev_bdvd, rpcs3::utils::get_emu_dir()); // For compatibility
else if (result == &g_mp_sys_app_home)
*vfs_path = g_cfg_vfs.get(g_cfg_vfs.app_home, rpcs3::utils::get_emu_dir());
else if (result == &g_mp_sys_host_root && g_cfg.vfs.host_root)
*vfs_path = "/";
else if (result == &g_mp_sys_dev_flash)
*vfs_path = g_cfg_vfs.get_dev_flash();
else if (result == &g_mp_sys_dev_flash2)
*vfs_path = g_cfg_vfs.get_dev_flash2();
else if (result == &g_mp_sys_dev_flash3)
*vfs_path = g_cfg_vfs.get_dev_flash3();
else
*vfs_path = {};
if (is_path && !is_cell_fs_path && !vfs_path->empty())
vfs_path->append(filename.substr(mp_name.size()));
}
return result;
}
lv2_fs_object::lv2_fs_object(std::string_view filename)
: name(get_name(filename))
, mp(g_fxo->get<lv2_fs_mount_info_map>().lookup(name.data()))
{
}
lv2_fs_object::lv2_fs_object(utils::serial& ar, bool)
: name(ar)
, mp(g_fxo->get<lv2_fs_mount_info_map>().lookup(name.data()))
{
}
u64 lv2_file::op_read(const fs::file& file, vm::ptr<void> buf, u64 size, u64 opt_pos)
{
if (u64 region = buf.addr() >> 28, region_end = (buf.addr() & 0xfff'ffff) + (size & 0xfff'ffff); region == region_end && ((region >> 28) == 0 || region >= 0xC))
{
// Optimize reads from safe memory
return (opt_pos == umax ? file.read(buf.get_ptr(), size) : file.read_at(opt_pos, buf.get_ptr(), size));
}
// Copy data from intermediate buffer (avoid passing vm pointer to a native API)
std::vector<uchar> local_buf(std::min<u64>(size, 65536));
u64 result = 0;
while (result < size)
{
const u64 block = std::min<u64>(size - result, local_buf.size());
const u64 nread = (opt_pos == umax ? file.read(local_buf.data(), block) : file.read_at(opt_pos + result, local_buf.data(), block));
std::memcpy(static_cast<uchar*>(buf.get_ptr()) + result, local_buf.data(), nread);
result += nread;
if (nread < block)
{
break;
}
}
return result;
}
u64 lv2_file::op_write(const fs::file& file, vm::cptr<void> buf, u64 size)
{
// Copy data to intermediate buffer (avoid passing vm pointer to a native API)
std::vector<uchar> local_buf(std::min<u64>(size, 65536));
u64 result = 0;
while (result < size)
{
const u64 block = std::min<u64>(size - result, local_buf.size());
std::memcpy(local_buf.data(), static_cast<const uchar*>(buf.get_ptr()) + result, block);
const u64 nwrite = file.write(+local_buf.data(), block);
result += nwrite;
if (nwrite < block)
{
break;
}
}
return result;
}
lv2_file::lv2_file(utils::serial& ar)
: lv2_fs_object(ar, false)
, mode(ar)
, flags(ar)
, type(ar)
{
[[maybe_unused]] const s32 version = GET_SERIALIZATION_VERSION(lv2_fs);
ar(lock);
be_t<u64> arg = 0;
u64 size = 0;
switch (type)
{
case lv2_file_type::regular: break;
case lv2_file_type::sdata: arg = 0x18000000010, size = 8; break; // TODO: Fix
case lv2_file_type::edata: arg = 0x2, size = 8; break;
}
const std::string retrieve_real = ar.pop<std::string>();
if (type == lv2_file_type::edata && version >= 2)
{
ar(g_fxo->get<loaded_npdrm_keys>().one_time_key);
}
open_result_t res = lv2_file::open(retrieve_real, flags & CELL_FS_O_ACCMODE, mode, size ? &arg : nullptr, size);
file = std::move(res.file);
real_path = std::move(res.real_path);
g_fxo->get<loaded_npdrm_keys>().npdrm_fds.raw() += type != lv2_file_type::regular;
g_fxo->get<loaded_npdrm_keys>().one_time_key = {};
if (ar.pop<bool>()) // see lv2_file::save in_mem
{
const fs::stat_t stat = ar;
std::vector<u8> buf(stat.size);
ar(std::span<u8>(buf.data(), buf.size()));
file = fs::make_stream<std::vector<u8>>(std::move(buf), stat);
}
if (!file)
{
sys_fs.error("Failed to load \'%s\' file for savestates (res=%s, vpath=\'%s\', real-path=\'%s\', type=%s, flags=0x%x)", name.data(), res.error, retrieve_real, real_path, type, flags);
ar.pos += sizeof(u64);
ensure(!!g_cfg.savestate.state_inspection_mode);
return;
}
else
{
sys_fs.success("Loaded file descriptor \'%s\' file for savestates (vpath=\'%s\', type=%s, flags=0x%x, id=%d)", name.data(), retrieve_real, type, flags, idm::last_id());
}
file.seek(ar);
}
void lv2_file::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(lv2_fs);
ar(name, mode, flags, type, lock, ensure(vfs::retrieve(real_path), FN(!x.empty())));
if (type == lv2_file_type::edata)
{
auto file_ptr = file.release();
ar(static_cast<EDATADecrypter*>(file_ptr.get())->get_key());
file.reset(std::move(file_ptr));
}
if (!mp.read_only && flags & CELL_FS_O_ACCMODE)
{
// Ensure accurate timestamps and content on disk
file.sync();
}
// UNIX allows deletion of files while descriptors are still opened
// descriptors shall keep the data in memory in this case
const bool in_mem = [&]()
{
if (mp.read_only)
{
return false;
}
fs::file test{real_path};
if (!test)
{
if (fs::is_file(real_path + ".66600"))
{
// May be a split-files descriptor, don't even bother
return false;
}
return true;
}
fs::file_id test_s = test.get_id();
fs::file_id file_s = file.get_id();
return !test_s.is_coherent_with(file_s);
}();
ar(in_mem);
if (in_mem)
{
fs::stat_t stats = file.get_stat();
sys_fs.error("Saving \'%s\' LV2 file descriptor in memory! (exists=%s, type=%s, flags=0x%x, size=0x%x)", name.data(), fs::is_file(real_path), type, flags, stats.size);
const usz patch_stats_pos = ar.seek_end();
ar(stats);
const usz old_end = ar.pad_from_end(stats.size);
if (usz read_size = file.read_at(0, &ar.data[old_end], stats.size); read_size != stats.size)
{
ensure(read_size < stats.size);
sys_fs.error("Read less than expected! (new-size=0x%x)", read_size);
stats.size = read_size;
ar.data.resize(old_end + stats.size);
write_to_ptr<fs::stat_t>(&ar.data[patch_stats_pos], stats);
}
}
ar(file.pos());
}
lv2_dir::lv2_dir(utils::serial& ar)
: lv2_fs_object(ar, false)
, entries([&]
{
std::vector<fs::dir_entry> entries;
u64 size = 0;
ar.deserialize_vle(size);
entries.resize(size);
for (auto& entry : entries)
{
ar(entry.name, static_cast<fs::stat_t&>(entry));
}
return entries;
}())
, pos(ar)
{
}
void lv2_dir::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(lv2_fs);
ar(name);
ar.serialize_vle(entries.size());
for (auto& entry : entries)
{
ar(entry.name, static_cast<const fs::stat_t&>(entry));
}
ar(pos);
}
loaded_npdrm_keys::loaded_npdrm_keys(utils::serial& ar)
{
save(ar);
}
void loaded_npdrm_keys::save(utils::serial& ar)
{
ar(dec_keys_pos);
ar(std::span(dec_keys, std::min<usz>(std::size(dec_keys), dec_keys_pos)));
}
struct lv2_file::file_view : fs::file_base
{
const std::shared_ptr<lv2_file> m_file;
const u64 m_off;
u64 m_pos;
explicit file_view(const std::shared_ptr<lv2_file>& _file, u64 offset)
: m_file(_file)
, m_off(offset)
, m_pos(0)
{
}
~file_view() override
{
}
fs::stat_t get_stat() override
{
fs::stat_t stat = m_file->file.get_stat();
// TODO: Check this on realhw
//stat.size = utils::sub_saturate<u64>(stat.size, m_off);
stat.is_writable = false;
return stat;
}
bool trunc(u64) override
{
return false;
}
u64 read(void* buffer, u64 size) override
{
const u64 result = file_view::read_at(m_pos, buffer, size);
m_pos += result;
return result;
}
u64 read_at(u64 offset, void* buffer, u64 size) override
{
return m_file->file.read_at(m_off + offset, buffer, size);
}
u64 write(const void*, u64) override
{
return 0;
}
u64 seek(s64 offset, fs::seek_mode whence) override
{
const s64 new_pos =
whence == fs::seek_set ? offset :
whence == fs::seek_cur ? offset + m_pos :
whence == fs::seek_end ? offset + size() : -1;
if (new_pos < 0)
{
fs::g_tls_error = fs::error::inval;
return -1;
}
m_pos = new_pos;
return m_pos;
}
u64 size() override
{
return utils::sub_saturate<u64>(m_file->file.size(), m_off);
}
fs::file_id get_id() override
{
fs::file_id id = m_file->file.get_id();
be_t<u64> off = m_off;
const auto ptr = reinterpret_cast<u8*>(&off);
id.data.insert(id.data.end(), ptr, ptr + sizeof(off));
id.type.insert(0, "lv2_file::file_view: "sv);
return id;
}
};
fs::file lv2_file::make_view(const std::shared_ptr<lv2_file>& _file, u64 offset)
{
fs::file result;
result.reset(std::make_unique<lv2_file::file_view>(_file, offset));
return result;
}
std::pair<CellError, std::string> translate_to_str(vm::cptr<char> ptr, bool is_path = true)
{
constexpr usz max_length = CELL_FS_MAX_FS_PATH_LENGTH + 1;
std::string path;
if (!vm::read_string(ptr.addr(), max_length, path, true))
{
// Null character lookup has ended whilst pointing at invalid memory
return {CELL_EFAULT, std::move(path)};
}
if (path.size() == max_length)
{
return {CELL_ENAMETOOLONG, {}};
}
if (is_path && !path.starts_with("/"sv))
{
return {CELL_ENOENT, std::move(path)};
}
return {{}, std::move(path)};
}
error_code sys_fs_test(ppu_thread&, u32 arg1, u32 arg2, vm::ptr<u32> arg3, u32 arg4, vm::ptr<char> buf, u32 buf_size)
{
sys_fs.trace("sys_fs_test(arg1=0x%x, arg2=0x%x, arg3=*0x%x, arg4=0x%x, buf=*0x%x, buf_size=0x%x)", arg1, arg2, arg3, arg4, buf, buf_size);
if (arg1 != 6 || arg2 != 0 || arg4 != sizeof(u32))
{
sys_fs.todo("sys_fs_test: unknown arguments (arg1=0x%x, arg2=0x%x, arg3=*0x%x, arg4=0x%x)", arg1, arg2, arg3, arg4);
}
if (!arg3)
{
return CELL_EFAULT;
}
const auto file = idm::get<lv2_fs_object>(*arg3);
if (!file)
{
return CELL_EBADF;
}
for (u32 i = 0; i < buf_size; i++)
{
if (!(buf[i] = file->name[i]))
{
return CELL_OK;
}
}
buf[buf_size - 1] = 0;
return CELL_OK;
}
lv2_file::open_raw_result_t lv2_file::open_raw(const std::string& local_path, s32 flags, s32 /*mode*/, lv2_file_type type, const lv2_fs_mount_info& mp)
{
// TODO: other checks for path
if (fs::is_dir(local_path))
{
return {CELL_EISDIR};
}
bs_t<fs::open_mode> open_mode{};
switch (flags & CELL_FS_O_ACCMODE)
{
case CELL_FS_O_RDONLY: open_mode += fs::read; break;
case CELL_FS_O_WRONLY: open_mode += fs::write; break;
case CELL_FS_O_RDWR: open_mode += fs::read + fs::write; break;
default: break;
}
if (mp.read_only)
{
if ((flags & CELL_FS_O_ACCMODE) != CELL_FS_O_RDONLY && fs::is_file(local_path))
{
return {CELL_EPERM};
}
}
if (flags & CELL_FS_O_CREAT)
{
open_mode += fs::create;
if (flags & CELL_FS_O_EXCL)
{
open_mode += fs::excl;
}
}
if (flags & CELL_FS_O_TRUNC)
{
open_mode += fs::trunc;
}
if (flags & CELL_FS_O_MSELF)
{
open_mode = fs::read;
// mself can be mself or mself | rdonly
if (flags & ~(CELL_FS_O_MSELF | CELL_FS_O_RDONLY))
{
open_mode = {};
}
}
if (flags & CELL_FS_O_UNK)
{
sys_fs.warning("lv2_file::open() called with CELL_FS_O_UNK flag enabled. FLAGS: %#o", flags);
}
if (mp.read_only)
{
// Deactivate mutating flags on read-only FS
open_mode = fs::read;
}
// Tests have shown that invalid combinations get resolved internally (without exceptions), but that would complicate code with minimal accuracy gains.
// For example, no games are known to try and call TRUNCATE | APPEND | RW, or APPEND | READ, which currently would cause an exception.
if (flags & ~(CELL_FS_O_UNK | CELL_FS_O_ACCMODE | CELL_FS_O_CREAT | CELL_FS_O_TRUNC | CELL_FS_O_APPEND | CELL_FS_O_EXCL | CELL_FS_O_MSELF))
{
open_mode = {}; // error
}
if ((flags & CELL_FS_O_ACCMODE) == CELL_FS_O_ACCMODE)
{
open_mode = {}; // error
}
if (!open_mode)
{
fmt::throw_exception("lv2_file::open_raw(): Invalid or unimplemented flags: %#o", flags);
}
std::lock_guard lock(mp->mutex);
fs::file file(local_path, open_mode);
if (!file && open_mode == fs::read && fs::g_tls_error == fs::error::noent)
{
// Try to gather split file (TODO)
std::vector<fs::file> fragments;
for (u32 i = 66600; i <= 66699; i++)
{
if (fs::file fragment{fmt::format("%s.%u", local_path, i)})
{
fragments.emplace_back(std::move(fragment));
}
else
{
break;
}
}
if (!fragments.empty())
{
file = fs::make_gather(std::move(fragments));
}
}
if (!file)
{
if (mp.read_only)
{
// Failed to create file on read-only FS (file doesn't exist)
if (flags & CELL_FS_O_ACCMODE && flags & CELL_FS_O_CREAT)
{
return {CELL_EPERM};
}
}
if (open_mode & fs::excl && fs::g_tls_error == fs::error::exist)
{
return {CELL_EEXIST};
}
switch (auto error = fs::g_tls_error)
{
case fs::error::noent: return {CELL_ENOENT};
default: sys_fs.error("lv2_file::open(): unknown error %s", error);
}
return {CELL_EIO};
}
if (flags & CELL_FS_O_MSELF && !verify_mself(file))
{
return {CELL_ENOTMSELF};
}
if (type >= lv2_file_type::sdata)
{
// check for sdata
switch (type)
{
case lv2_file_type::sdata:
{
// check if the file has the NPD header, or else assume its not encrypted
u32 magic;
file.read<u32>(magic);
file.seek(0);
if (magic == "NPD\0"_u32)
{
auto sdata_file = std::make_unique<EDATADecrypter>(std::move(file));
if (!sdata_file->ReadHeader())
{
return {CELL_EFSSPECIFIC};
}
file.reset(std::move(sdata_file));
}
break;
}
// edata
case lv2_file_type::edata:
{
// check if the file has the NPD header, or else assume its not encrypted
u32 magic;
file.read<u32>(magic);
file.seek(0);
if (magic == "NPD\0"_u32)
{
auto& edatkeys = g_fxo->get<loaded_npdrm_keys>();
const u64 init_pos = edatkeys.dec_keys_pos;
const auto& dec_keys = edatkeys.dec_keys;
const u64 max_i = std::min<u64>(std::size(dec_keys), init_pos);
if (edatkeys.one_time_key)
{
auto edata_file = std::make_unique<EDATADecrypter>(std::move(file), edatkeys.one_time_key);
edatkeys.one_time_key = {};
if (!edata_file->ReadHeader())
{
// Read failure
return {CELL_EFSSPECIFIC};
}
file.reset(std::move(edata_file));
break;
}
for (u64 i = 0;; i++)
{
if (i == max_i)
{
// Run out of keys to try
return {CELL_EFSSPECIFIC};
}
// Try all registered keys
auto edata_file = std::make_unique<EDATADecrypter>(std::move(file), dec_keys[(init_pos - i - 1) % std::size(dec_keys)].load());
if (!edata_file->ReadHeader())
{
// Prepare file for the next iteration
file = std::move(edata_file->m_edata_file);
continue;
}
file.reset(std::move(edata_file));
break;
}
}
break;
}
default: break;
}
}
return {.error = {}, .file = std::move(file)};
}
lv2_file::open_result_t lv2_file::open(std::string_view vpath, s32 flags, s32 mode, const void* arg, u64 size)
{
if (vpath.empty())
{
return {CELL_ENOENT};
}
std::string path;
std::string local_path = vfs::get(vpath, nullptr, &path);
const auto& mp = g_fxo->get<lv2_fs_mount_info_map>().lookup(vpath);
if (mp == &g_mp_sys_dev_root)
{
return {CELL_EISDIR, path};
}
if (local_path.empty())
{
return {CELL_ENOTMOUNTED, path};
}
if (flags & CELL_FS_O_CREAT && !has_fs_write_rights(vpath) && !fs::is_dir(local_path))
{
return {CELL_EACCES};
}
lv2_file_type type = lv2_file_type::regular;
if (size == 8)
{
// see lv2_file::open_raw
switch (*static_cast<const be_t<u64, 1>*>(arg))
{
case 0x18000000010: type = lv2_file_type::sdata; break;
case 0x2: type = lv2_file_type::edata; break;
default:
break;
}
}
auto [error, file] = open_raw(local_path, flags, mode, type, mp);
return {.error = error, .ppath = std::move(path), .real_path = std::move(local_path), .file = std::move(file), .type = type};
}
error_code sys_fs_open(ppu_thread& ppu, vm::cptr<char> path, s32 flags, vm::ptr<u32> fd, s32 mode, vm::cptr<void> arg, u64 size)
{
ppu.state += cpu_flag::wait;
lv2_obj::sleep(ppu);
sys_fs.warning("sys_fs_open(path=%s, flags=%#o, fd=*0x%x, mode=%#o, arg=*0x%x, size=0x%llx)", path, flags, fd, mode, arg, size);
const auto [path_error, vpath] = translate_to_str(path);
if (path_error)
{
return {path_error, vpath};
}
auto [error, ppath, real, file, type] = lv2_file::open(vpath, flags, mode, arg.get_ptr(), size);
if (error)
{
if (error == CELL_EEXIST)
{
return not_an_error(CELL_EEXIST);
}
return {g_fxo->get<lv2_fs_mount_info_map>().lookup(vpath) == &g_mp_sys_dev_hdd1 ? sys_fs.warning : sys_fs.error, error, path};
}
if (const u32 id = idm::import<lv2_fs_object, lv2_file>([&ppath = ppath, &file = file, mode, flags, &real = real, &type = type]() -> std::shared_ptr<lv2_file>
{
std::shared_ptr<lv2_file> result;
if (type >= lv2_file_type::sdata && !g_fxo->get<loaded_npdrm_keys>().npdrm_fds.try_inc(16))
{
return result;
}
result = std::make_shared<lv2_file>(ppath, std::move(file), mode, flags, real, type);
sys_fs.warning("sys_fs_open(): fd=%u, %s", idm::last_id(), *result);
return result;
}))
{
ppu.check_state();
*fd = id;
return CELL_OK;
}
// Out of file descriptors
return {CELL_EMFILE, path};
}
error_code sys_fs_read(ppu_thread& ppu, u32 fd, vm::ptr<void> buf, u64 nbytes, vm::ptr<u64> nread)
{
ppu.state += cpu_flag::wait;
lv2_obj::sleep(ppu);
sys_fs.trace("sys_fs_read(fd=%d, buf=*0x%x, nbytes=0x%llx, nread=*0x%x)", fd, buf, nbytes, nread);
if (!nread)
{
return CELL_EFAULT;
}
if (!buf)
{
nread.try_write(0);
return CELL_EFAULT;
}
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
if (!file || (nbytes && file->flags & CELL_FS_O_WRONLY))
{
nread.try_write(0); // nread writing is allowed to fail, error code is unchanged
return CELL_EBADF;
}
if (!nbytes)
{
// Whole function is skipped, only EBADF and EBUSY are checked
if (file->lock == 1)
{
nread.try_write(0);
return CELL_EBUSY;
}
ppu.check_state();
*nread = 0;
return CELL_OK;
}
std::unique_lock lock(file->mp->mutex);
if (!file->file)
{
return CELL_EBADF;
}
if (file->lock == 2)
{
nread.try_write(0);
return CELL_EIO;
}
const u64 read_bytes = file->op_read(buf, nbytes);
const bool failure = !read_bytes && file->file.pos() < file->file.size();
lock.unlock();
ppu.check_state();
*nread = read_bytes;
if (failure)
{
// EDATA corruption perhaps
return CELL_EFSSPECIFIC;
}
return CELL_OK;
}
error_code sys_fs_write(ppu_thread& ppu, u32 fd, vm::cptr<void> buf, u64 nbytes, vm::ptr<u64> nwrite)
{
ppu.state += cpu_flag::wait;
lv2_obj::sleep(ppu);
sys_fs.trace("sys_fs_write(fd=%d, buf=*0x%x, nbytes=0x%llx, nwrite=*0x%x)", fd, buf, nbytes, nwrite);
if (!nwrite)
{
return CELL_EFAULT;
}
if (!buf)
{
nwrite.try_write(0);
return CELL_EFAULT;
}
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
if (!file || (nbytes && !(file->flags & CELL_FS_O_ACCMODE)))
{
nwrite.try_write(0); // nwrite writing is allowed to fail, error code is unchanged
return CELL_EBADF;
}
if (!nbytes)
{
// Whole function is skipped, only EBADF and EBUSY are checked
if (file->lock == 1)
{
nwrite.try_write(0);
return CELL_EBUSY;
}
ppu.check_state();
*nwrite = 0;
return CELL_OK;
}
if (file->type != lv2_file_type::regular)
{
sys_fs.error("%s type: Writing %u bytes to FD=%d (path=%s)", file->type, nbytes, file->name.data());
}
if (file->mp.read_only)
{
nwrite.try_write(0);
return CELL_EROFS;
}
std::unique_lock lock(file->mp->mutex);
if (!file->file)
{
return CELL_EBADF;
}
if (file->lock)
{
if (file->lock == 2)
{
nwrite.try_write(0);
return CELL_EIO;
}
nwrite.try_write(0);
return CELL_EBUSY;
}
if (file->flags & CELL_FS_O_APPEND)
{
file->file.seek(0, fs::seek_end);
}
const u64 written = file->op_write(buf, nbytes);
lock.unlock();
ppu.check_state();
*nwrite = written;
return CELL_OK;
}
error_code sys_fs_close(ppu_thread& ppu, u32 fd)
{
ppu.state += cpu_flag::wait;
lv2_obj::sleep(ppu);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
if (!file)
{
return {CELL_EBADF, fd};
}
std::string FD_state_log;
if (sys_fs.warning)
{
FD_state_log = fmt::format("sys_fs_close(fd=%u)", fd);
}
{
std::lock_guard lock(file->mp->mutex);
if (!file->file)
{
sys_fs.warning("%s", FD_state_log);
return {CELL_EBADF, fd};
}
if (!(file->mp.read_only && file->mp->flags & lv2_mp_flag::cache) && file->flags & CELL_FS_O_ACCMODE)
{
// Special: Ensure temporary directory for gamedata writes will remain on disk before final gamedata commitment
file->file.sync(); // For cellGameContentPermit atomicity
}
if (!FD_state_log.empty())
{
sys_fs.warning("%s: %s", FD_state_log, *file);
}
// Free memory associated with fd if any
if (file->ct_id && file->ct_used)
{
auto& default_container = g_fxo->get<default_sys_fs_container>();
std::lock_guard lock(default_container.mutex);
if (auto ct = idm::get<lv2_memory_container>(file->ct_id))
{
ct->free(file->ct_used);
if (default_container.id == file->ct_id)
{
default_container.used -= file->ct_used;
}
}
}
// Ensure Host file handle won't be kept open after this syscall
file->file.close();
}
ensure(idm::withdraw<lv2_fs_object, lv2_file>(fd, [&](lv2_file& _file) -> CellError
{
if (_file.type >= lv2_file_type::sdata)
{
g_fxo->get<loaded_npdrm_keys>().npdrm_fds--;
}
return {};
}));
if (file->lock == 1)
{
return {CELL_EBUSY, fd};
}
return CELL_OK;
}
error_code sys_fs_opendir(ppu_thread& ppu, vm::cptr<char> path, vm::ptr<u32> fd)
{
ppu.state += cpu_flag::wait;
lv2_obj::sleep(ppu);
sys_fs.warning("sys_fs_opendir(path=%s, fd=*0x%x)", path, fd);
const auto [path_error, vpath] = translate_to_str(path);
if (path_error)
{
return {path_error, vpath};
}
std::string processed_path;
std::vector<std::string> ext;
const std::string local_path = vfs::get(vpath, &ext, &processed_path);
processed_path += "/";
const auto& mp = g_fxo->get<lv2_fs_mount_info_map>().lookup(vpath);
if (local_path.empty() && ext.empty())
{
return {CELL_ENOTMOUNTED, path};
}
// TODO: other checks for path
if (fs::is_file(local_path))
{
return {CELL_ENOTDIR, path};
}
std::unique_lock lock(mp->mutex);
const fs::dir dir(local_path);
if (!dir)
{
switch (const auto error = fs::g_tls_error)
{
case fs::error::noent:
{
if (ext.empty())
{
return {mp == &g_mp_sys_dev_hdd1 ? sys_fs.warning : sys_fs.error, CELL_ENOENT, path};
}
break;
}
default:
{
sys_fs.error("sys_fs_opendir(): unknown error %s", error);
return {CELL_EIO, path};
}
}
}
// Build directory as a vector of entries
std::vector<fs::dir_entry> data;
if (dir)
{
// Add real directories
while (dir.read(data.emplace_back()))
{
// Preprocess entries
data.back().name = vfs::unescape(data.back().name);
if (!data.back().is_directory && data.back().name == "."sv)
{
// Files hidden from emulation
data.resize(data.size() - 1);
continue;
}
// Add additional entries for split file candidates (while ends with .66600)
while (data.back().name.ends_with(".66600"))
{
data.emplace_back(data.back()).name.resize(data.back().name.size() - 6);
}
}
data.resize(data.size() - 1);
}
else
{
data.emplace_back().name += '.';
data.back().is_directory = true;
data.emplace_back().name = "..";
data.back().is_directory = true;
}
// Add mount points (TODO)
for (auto&& ex : ext)
{
data.emplace_back().name = std::move(ex);
data.back().is_directory = true;
}
// Sort files, keeping . and ..
std::stable_sort(data.begin() + 2, data.end(), FN(x.name < y.name));
// Remove duplicates
data.erase(std::unique(data.begin(), data.end(), FN(x.name == y.name)), data.end());
if (const u32 id = idm::make<lv2_fs_object, lv2_dir>(processed_path, std::move(data)))
{
lock.unlock();
ppu.check_state();
*fd = id;
return CELL_OK;
}
// Out of file descriptors
return CELL_EMFILE;
}
error_code sys_fs_readdir(ppu_thread& ppu, u32 fd, vm::ptr<CellFsDirent> dir, vm::ptr<u64> nread)
{
ppu.state += cpu_flag::wait;
sys_fs.warning("sys_fs_readdir(fd=%d, dir=*0x%x, nread=*0x%x)", fd, dir, nread);
if (!dir || !nread)
{
return CELL_EFAULT;
}
const auto directory = idm::get<lv2_fs_object, lv2_dir>(fd);
if (!directory)
{
return CELL_EBADF;
}
ppu.check_state();
auto* info = directory->dir_read();
u64 nread_to_write = 0;
if (info)
{
nread_to_write = sizeof(CellFsDirent);
}
else
{
// It does actually write polling the last entry. Seems consistent across HDD0 and HDD1 (TODO: check more partitions)
info = &directory->entries.back();
nread_to_write = 0;
}
CellFsDirent dir_write{};
dir_write.d_type = info->is_directory ? CELL_FS_TYPE_DIRECTORY : CELL_FS_TYPE_REGULAR;
dir_write.d_namlen = u8(std::min<usz>(info->name.size(), CELL_FS_MAX_FS_FILE_NAME_LENGTH));
strcpy_trunc(dir_write.d_name, info->name);
// TODO: Check more partitions (HDD1 is known to differ in actual filesystem implementation)
if (directory->mp != &g_mp_sys_dev_hdd1 && nread_to_write == 0)
{
// First 3 bytes are being set to 0 here
dir_write.d_type = 0;
dir_write.d_namlen = 0;
dir_write.d_name[0] = '\0';
}
*dir = dir_write;
// Write after dir
*nread = nread_to_write;
return CELL_OK;
}
error_code sys_fs_closedir(ppu_thread& ppu, u32 fd)
{
ppu.state += cpu_flag::wait;
lv2_obj::sleep(ppu);
sys_fs.warning("sys_fs_closedir(fd=%d)", fd);
if (!idm::remove<lv2_fs_object, lv2_dir>(fd))
{
return CELL_EBADF;
}
return CELL_OK;
}
error_code sys_fs_stat(ppu_thread& ppu, vm::cptr<char> path, vm::ptr<CellFsStat> sb)
{
ppu.state += cpu_flag::wait;
lv2_obj::sleep(ppu);
sys_fs.warning("sys_fs_stat(path=%s, sb=*0x%x)", path, sb);
const auto [path_error, vpath] = translate_to_str(path);
if (path_error)
{
return {path_error, vpath};
}
const std::string local_path = vfs::get(vpath);
const auto& mp = g_fxo->get<lv2_fs_mount_info_map>().lookup(vpath);
if (mp == &g_mp_sys_dev_root)
{
sb->mode = CELL_FS_S_IFDIR | 0711;
sb->uid = -1;
sb->gid = -1;
sb->atime = -1;
sb->mtime = -1;
sb->ctime = -1;
sb->size = 258;
sb->blksize = 512;
return CELL_OK;
}
if (local_path.empty())
{
// This syscall can be used by games and VSH to test the presence of dev_usb000 ~ dev_usb127
// Thus there is no need to fuss about CELL_ENOTMOUNTED in this case
return {sys_fs.warning, CELL_ENOTMOUNTED, path};
}
std::unique_lock lock(mp->mutex);
fs::stat_t info{};
if (!fs::get_stat(local_path, info))
{
switch (auto error = fs::g_tls_error)
{
case fs::error::noent:
{
// Try to analyse split file (TODO)
u64 total_size = 0;
for (u32 i = 66601; i <= 66699; i++)
{
if (fs::get_stat(fmt::format("%s.%u", local_path, i), info) && !info.is_directory)
{
total_size += info.size;
}
else
{
break;
}
}
// Use attributes from the first fragment (consistently with sys_fs_open+fstat)
if (fs::get_stat(local_path + ".66600", info) && !info.is_directory)
{
// Success
info.size += total_size;
break;
}
return {mp == &g_mp_sys_dev_hdd1 ? sys_fs.warning : sys_fs.error, CELL_ENOENT, path};
}
default:
{
sys_fs.error("sys_fs_stat(): unknown error %s", error);
return {CELL_EIO, path};
}
}
}
lock.unlock();
ppu.check_state();
s32 mode = info.is_directory ? CELL_FS_S_IFDIR | 0777 : CELL_FS_S_IFREG | 0666;
if (mp.read_only)
{
// Remove write permissions
mode &= ~0222;
}
sb->mode = mode;
sb->uid = mp->flags & lv2_mp_flag::no_uid_gid ? -1 : 0;
sb->gid = mp->flags & lv2_mp_flag::no_uid_gid ? -1 : 0;
sb->atime = info.atime;
sb->mtime = info.mtime;
sb->ctime = info.ctime;
sb->size = info.is_directory ? mp->block_size : info.size;
sb->blksize = mp->block_size;
return CELL_OK;
}
error_code sys_fs_fstat(ppu_thread& ppu, u32 fd, vm::ptr<CellFsStat> sb)
{
ppu.state += cpu_flag::wait;
lv2_obj::sleep(ppu);
sys_fs.warning("sys_fs_fstat(fd=%d, sb=*0x%x)", fd, sb);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
if (!file)
{
return CELL_EBADF;
}
std::unique_lock lock(file->mp->mutex);
if (!file->file)
{
return CELL_EBADF;
}
if (file->lock == 2)
{
return CELL_EIO;
}
const fs::stat_t info = file->file.get_stat();
lock.unlock();
ppu.check_state();
s32 mode = info.is_directory ? CELL_FS_S_IFDIR | 0777 : CELL_FS_S_IFREG | 0666;
if (file->mp.read_only)
{
// Remove write permissions
mode &= ~0222;
}
sb->mode = mode;
sb->uid = file->mp->flags & lv2_mp_flag::no_uid_gid ? -1 : 0;
sb->gid = file->mp->flags & lv2_mp_flag::no_uid_gid ? -1 : 0;
sb->atime = info.atime;
sb->mtime = info.mtime;
sb->ctime = info.ctime; // ctime may be incorrect
sb->size = info.size;
sb->blksize = file->mp->block_size;
return CELL_OK;
}
error_code sys_fs_link(ppu_thread&, vm::cptr<char> from, vm::cptr<char> to)
{
sys_fs.todo("sys_fs_link(from=%s, to=%s)", from, to);
return CELL_OK;
}
error_code sys_fs_mkdir(ppu_thread& ppu, vm::cptr<char> path, s32 mode)
{
ppu.state += cpu_flag::wait;
lv2_obj::sleep(ppu);
sys_fs.warning("sys_fs_mkdir(path=%s, mode=%#o)", path, mode);
const auto [path_error, vpath] = translate_to_str(path);
if (path_error)
{
return {path_error, vpath};
}
const std::string local_path = vfs::get(vpath);
const auto& mp = g_fxo->get<lv2_fs_mount_info_map>().lookup(vpath);
if (mp == &g_mp_sys_dev_root)
{
return {CELL_EEXIST, path};
}
if (local_path.empty())
{
return {CELL_ENOTMOUNTED, path};
}
if (mp.read_only)
{
return {CELL_EROFS, path};
}
if (!fs::exists(local_path) && !has_fs_write_rights(path.get_ptr()))
{
return {CELL_EACCES, path};
}
std::lock_guard lock(mp->mutex);
if (!fs::create_dir(local_path))
{
switch (auto error = fs::g_tls_error)
{
case fs::error::noent:
{
return {mp == &g_mp_sys_dev_hdd1 ? sys_fs.warning : sys_fs.error, CELL_ENOENT, path};
}
case fs::error::exist:
{
return {sys_fs.warning, CELL_EEXIST, path};
}
default: sys_fs.error("sys_fs_mkdir(): unknown error %s", error);
}
return {CELL_EIO, path}; // ???
}
sys_fs.notice("sys_fs_mkdir(): directory %s created", path);
return CELL_OK;
}
error_code sys_fs_rename(ppu_thread& ppu, vm::cptr<char> from, vm::cptr<char> to)
{
ppu.state += cpu_flag::wait;
lv2_obj::sleep(ppu);
sys_fs.warning("sys_fs_rename(from=%s, to=%s)", from, to);
const auto [from_error, vfrom] = translate_to_str(from);
if (from_error)
{
return {from_error, vfrom};
}
const auto [to_error, vto] = translate_to_str(to);
if (to_error)
{
return {to_error, vto};
}
const std::string local_from = vfs::get(vfrom);
const std::string local_to = vfs::get(vto);
const auto& mp = g_fxo->get<lv2_fs_mount_info_map>().lookup(vfrom);
const auto& mp_to = g_fxo->get<lv2_fs_mount_info_map>().lookup(vto);
if (mp == &g_mp_sys_dev_root || mp_to == &g_mp_sys_dev_root)
{
return CELL_EPERM;
}
if (local_from.empty() || local_to.empty())
{
return CELL_ENOTMOUNTED;
}
if (mp != mp_to)
{
return CELL_EXDEV;
}
if (mp.read_only)
{
return CELL_EROFS;
}
// Done in vfs::host::rename
//std::lock_guard lock(mp->mutex);
if (!vfs::host::rename(local_from, local_to, mp.mp, false))
{
switch (auto error = fs::g_tls_error)
{
case fs::error::noent: return {CELL_ENOENT, from};
case fs::error::exist: return {CELL_EEXIST, to};
default: sys_fs.error("sys_fs_rename(): unknown error %s", error);
}
return {CELL_EIO, from}; // ???
}
sys_fs.notice("sys_fs_rename(): %s renamed to %s", from, to);
return CELL_OK;
}
error_code sys_fs_rmdir(ppu_thread& ppu, vm::cptr<char> path)
{
ppu.state += cpu_flag::wait;
lv2_obj::sleep(ppu);
sys_fs.warning("sys_fs_rmdir(path=%s)", path);
const auto [path_error, vpath] = translate_to_str(path);
if (path_error)
{
return {path_error, vpath};
}
const std::string local_path = vfs::get(vpath);
const auto& mp = g_fxo->get<lv2_fs_mount_info_map>().lookup(vpath);
if (mp == &g_mp_sys_dev_root)
{
return {CELL_EPERM, path};
}
if (local_path.empty())
{
return {CELL_ENOTMOUNTED, path};
}
if (mp.read_only)
{
return {CELL_EROFS, path};
}
if (fs::is_dir(local_path) && !has_fs_write_rights(path.get_ptr()))
{
return {CELL_EACCES};
}
std::lock_guard lock(mp->mutex);
if (!fs::remove_dir(local_path))
{
switch (auto error = fs::g_tls_error)
{
case fs::error::noent: return {CELL_ENOENT, path};
case fs::error::notempty: return {CELL_ENOTEMPTY, path};
default: sys_fs.error("sys_fs_rmdir(): unknown error %s", error);
}
return {CELL_EIO, path}; // ???
}
sys_fs.notice("sys_fs_rmdir(): directory %s removed", path);
return CELL_OK;
}
error_code sys_fs_unlink(ppu_thread& ppu, vm::cptr<char> path)
{
ppu.state += cpu_flag::wait;
lv2_obj::sleep(ppu);
sys_fs.warning("sys_fs_unlink(path=%s)", path);
const auto [path_error, vpath] = translate_to_str(path);
if (path_error)
{
return {path_error, vpath};
}
const std::string local_path = vfs::get(vpath);
std::string mount_path = fs::get_parent_dir(vpath); // Use its parent directory as fallback
const auto& mp = g_fxo->get<lv2_fs_mount_info_map>().lookup(vpath, true, &mount_path);
if (mp == &g_mp_sys_dev_root)
{
return {CELL_EISDIR, path};
}
if (local_path.empty())
{
return {CELL_ENOTMOUNTED, path};
}
if (fs::is_dir(local_path))
{
return {CELL_EISDIR, path};
}
if (mp.read_only)
{
return {CELL_EROFS, path};
}
std::lock_guard lock(mp->mutex);
if (!vfs::host::unlink(local_path, vfs::get(mount_path)))
{
switch (auto error = fs::g_tls_error)
{
case fs::error::noent:
{
return {mp == &g_mp_sys_dev_hdd1 ? sys_fs.warning : sys_fs.error, CELL_ENOENT, path};
}
default: sys_fs.error("sys_fs_unlink(): unknown error %s", error);
}
return {CELL_EIO, path}; // ???
}
sys_fs.notice("sys_fs_unlink(): file %s deleted", path);
return CELL_OK;
}
error_code sys_fs_access(ppu_thread&, vm::cptr<char> path, s32 mode)
{
sys_fs.todo("sys_fs_access(path=%s, mode=%#o)", path, mode);
return CELL_OK;
}
error_code sys_fs_fcntl(ppu_thread& ppu, u32 fd, u32 op, vm::ptr<void> _arg, u32 _size)
{
ppu.state += cpu_flag::wait;
sys_fs.trace("sys_fs_fcntl(fd=%d, op=0x%x, arg=*0x%x, size=0x%x)", fd, op, _arg, _size);
switch (op)
{
case 0x80000004: // Unknown
{
if (_size > 4)
{
return CELL_EINVAL;
}
const auto arg = vm::static_ptr_cast<u32>(_arg);
*arg = 0;
break;
}
case 0x80000006: // cellFsAllocateFileAreaByFdWithInitialData
{
break;
}
case 0x80000007: // cellFsAllocateFileAreaByFdWithoutZeroFill
{
break;
}
case 0x80000008: // cellFsChangeFileSizeByFdWithoutAllocation
{
break;
}
case 0x8000000a: // cellFsReadWithOffset
case 0x8000000b: // cellFsWriteWithOffset
{
lv2_obj::sleep(ppu);
const auto arg = vm::static_ptr_cast<lv2_file_op_rw>(_arg);
if (_size < arg.size())
{
return CELL_EINVAL;
}
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
if (!file)
{
return CELL_EBADF;
}
if (op == 0x8000000a && file->flags & CELL_FS_O_WRONLY)
{
return CELL_EBADF;
}
if (op == 0x8000000b && !(file->flags & CELL_FS_O_ACCMODE))
{
return CELL_EBADF;
}
if (op == 0x8000000b && file->flags & CELL_FS_O_APPEND)
{
return CELL_EBADF;
}
if (op == 0x8000000b && file->mp.read_only)
{
return CELL_EROFS;
}
if (op == 0x8000000b && file->type != lv2_file_type::regular && arg->size)
{
sys_fs.error("%s type: Writing %u bytes to FD=%d (path=%s)", file->type, arg->size, file->name.data());
}
std::unique_lock wlock(file->mp->mutex, std::defer_lock);
std::shared_lock rlock(file->mp->mutex, std::defer_lock);
if (op == 0x8000000b)
{
// Writer lock
wlock.lock();
}
else
{
// Reader lock (not needing exclusivity in this special case because the state should not change)
rlock.lock();
}
if (!file->file)
{
return CELL_EBADF;
}
if (file->lock == 2)
{
return CELL_EIO;
}
if (op == 0x8000000b && file->lock)
{
return CELL_EBUSY;
}
u64 old_pos = umax;
const u64 op_pos = arg->offset;
if (op == 0x8000000b)
{
old_pos = file->file.pos();
file->file.seek(op_pos);
}
arg->out_size = op == 0x8000000a
? file->op_read(arg->buf, arg->size, op_pos)
: file->op_write(arg->buf, arg->size);
if (op == 0x8000000b)
{
ensure(old_pos == file->file.seek(old_pos));
}
// TODO: EDATA corruption detection
arg->out_code = CELL_OK;
return CELL_OK;
}
case 0x80000009: // cellFsSdataOpenByFd
{
lv2_obj::sleep(ppu);
const auto arg = vm::static_ptr_cast<lv2_file_op_09>(_arg);
if (_size < arg.size())
{
return CELL_EINVAL;
}
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
if (!file)
{
return {CELL_EBADF, "fd=%u", fd};
}
sys_fs.warning("sys_fs_fcntl(0x80000009): fd=%d, arg->offset=0x%x, size=0x%x (file: %s)", fd, arg->offset, _size, *file);
std::lock_guard lock(file->mp->mutex);
if (!file->file)
{
return {CELL_EBADF, "fd=%u", fd};
}
auto sdata_file = std::make_unique<EDATADecrypter>(lv2_file::make_view(file, arg->offset));
if (!sdata_file->ReadHeader())
{
return {CELL_EFSSPECIFIC, "fd=%u", fd};
}
fs::file stream;
stream.reset(std::move(sdata_file));
if (const u32 id = idm::import<lv2_fs_object, lv2_file>([&file = *file, &stream = stream]() -> std::shared_ptr<lv2_file>
{
if (!g_fxo->get<loaded_npdrm_keys>().npdrm_fds.try_inc(16))
{
return nullptr;
}
return std::make_shared<lv2_file>(file, std::move(stream), file.mode, CELL_FS_O_RDONLY, file.real_path, lv2_file_type::sdata);
}))
{
arg->out_code = CELL_OK;
arg->out_fd = id;
return CELL_OK;
}
// Out of file descriptors
return CELL_EMFILE;
}
case 0xc0000002: // cellFsGetFreeSize (TODO)
{
lv2_obj::sleep(ppu);
const auto arg = vm::static_ptr_cast<lv2_file_c0000002>(_arg);
const auto& mp = g_fxo->get<lv2_fs_mount_info_map>().lookup("/dev_hdd0");
arg->out_block_size = mp->block_size;
arg->out_block_count = (40ull * 1024 * 1024 * 1024 - 1) / mp->block_size; // Read explanation in cellHddGameCheck
return CELL_OK;
}
case 0xc0000003: // cellFsUtilitySetFakeSize
{
break;
}
case 0xc0000004: // cellFsUtilityGetFakeSize
{
break;
}
case 0xc0000006: // Unknown
{
const auto arg = vm::static_ptr_cast<lv2_file_c0000006>(_arg);
if (arg->size != 0x20u)
{
sys_fs.error("sys_fs_fcntl(0xc0000006): invalid size (0x%x)", arg->size);
break;
}
if (arg->_x4 != 0x10u || arg->_x8 != 0x18u)
{
sys_fs.error("sys_fs_fcntl(0xc0000006): invalid args (0x%x, 0x%x)", arg->_x4, arg->_x8);
break;
}
// Load mountpoint (doesn't support multiple // at the start)
std::string_view vpath{arg->name.get_ptr(), arg->name_size};
sys_fs.notice("sys_fs_fcntl(0xc0000006): %s", vpath);
// Check only mountpoint
vpath = vpath.substr(0, vpath.find_first_of("/", 1));
// Some mountpoints seem to be handled specially
if (false)
{
// TODO: /dev_hdd1, /dev_usb000, /dev_flash
//arg->out_code = CELL_OK;
//arg->out_id = 0x1b5;
}
arg->out_code = CELL_ENOTSUP;
arg->out_id = 0;
return CELL_OK;
}
case 0xc0000007: // cellFsArcadeHddSerialNumber
{
const auto arg = vm::static_ptr_cast<lv2_file_c0000007>(_arg);
arg->out_code = CELL_OK;
if (const auto size = arg->model_size; size > 0)
strcpy_trunc(std::span(arg->model.get_ptr(), size),
fmt::format("%-*s", size - 1, g_cfg.sys.hdd_model.to_string())); // Example: "TOSHIBA MK3265GSX H "
if (const auto size = arg->serial_size; size > 0)
strcpy_trunc(std::span(arg->serial.get_ptr(), size),
fmt::format("%*s", size - 1, g_cfg.sys.hdd_serial.to_string())); // Example: " 0A1B2C3D4"
else
return CELL_EFAULT; // CELL_EFAULT is returned only when arg->serial_size == 0
return CELL_OK;
}
case 0xc0000008: // cellFsSetDefaultContainer, cellFsSetIoBuffer, cellFsSetIoBufferFromDefaultContainer
{
// Allocates memory from a container/default container to a specific fd or default IO processing
const auto arg = vm::static_ptr_cast<lv2_file_c0000008>(_arg);
auto& default_container = g_fxo->get<default_sys_fs_container>();
std::lock_guard def_container_lock(default_container.mutex);
if (fd == 0xFFFFFFFF)
{
// No check on container is done when setting default container
default_container.id = arg->size ? ::narrow<u32>(arg->container_id) : 0u;
default_container.cap = arg->size;
default_container.used = 0;
arg->out_code = CELL_OK;
return CELL_OK;
}
auto file = idm::get<lv2_fs_object, lv2_file>(fd);
if (!file)
{
return CELL_EBADF;
}
if (auto ct = idm::get<lv2_memory_container>(file->ct_id))
{
ct->free(file->ct_used);
if (default_container.id == file->ct_id)
{
default_container.used -= file->ct_used;
}
}
file->ct_id = 0;
file->ct_used = 0;
// Aligns on lower bound
u32 actual_size = arg->size - (arg->size % ((arg->page_type & CELL_FS_IO_BUFFER_PAGE_SIZE_64KB) ? 0x10000 : 0x100000));
if (!actual_size)
{
arg->out_code = CELL_OK;
return CELL_OK;
}
u32 new_container_id = arg->container_id == 0xFFFFFFFF ? default_container.id : ::narrow<u32>(arg->container_id);
if (default_container.id == new_container_id && (default_container.used + actual_size) > default_container.cap)
{
return CELL_ENOMEM;
}
const auto ct = idm::get<lv2_memory_container>(new_container_id, [&](lv2_memory_container& ct) -> CellError
{
if (!ct.take(actual_size))
{
return CELL_ENOMEM;
}
return {};
});
if (!ct)
{
return CELL_ESRCH;
}
if (ct.ret)
{
return ct.ret;
}
if (default_container.id == new_container_id)
{
default_container.used += actual_size;
}
file->ct_id = new_container_id;
file->ct_used = actual_size;
arg->out_code = CELL_OK;
return CELL_OK;
}
case 0xc0000015: // USB Vid/Pid query
case 0xc000001c: // USB Vid/Pid/Serial query
{
const auto arg = vm::static_ptr_cast<lv2_file_c0000015>(_arg);
const bool with_serial = op == 0xc000001c;
if (arg->size != (with_serial ? sizeof(lv2_file_c000001c) : sizeof(lv2_file_c0000015)))
{
sys_fs.error("sys_fs_fcntl(0x%08x): invalid size (0x%x)", op, arg->size);
break;
}
if (arg->_x4 != 0x10u || arg->_x8 != 0x18u)
{
sys_fs.error("sys_fs_fcntl(0x%08x): invalid args (0x%x, 0x%x)", op, arg->_x4, arg->_x8);
break;
}
std::string_view vpath{arg->path.get_ptr(), arg->path_size};
if (vpath.size() == 0)
return CELL_ENOMEM;
// Trim trailing '\0'
if (const auto trim_pos = vpath.find('\0'); trim_pos != umax)
vpath.remove_suffix(vpath.size() - trim_pos);
arg->out_code = CELL_ENOTMOUNTED; // arg->out_code is set to CELL_ENOTMOUNTED on real hardware when the device doesn't exist or when the device isn't USB
if (!vfs::get(vpath).empty())
{
if (const auto& mp = g_fxo->get<lv2_fs_mount_info_map>().lookup(vpath, true); mp == &g_mp_sys_dev_usb)
{
const cfg::device_info device = g_cfg_vfs.get_device(g_cfg_vfs.dev_usb, fmt::format("%s%s", mp->root, mp.device.substr(mp->device.size())));
const auto usb_ids = device.get_usb_ids();
std::tie(arg->vendorID, arg->productID) = usb_ids;
if (with_serial)
{
const auto arg_c000001c = vm::static_ptr_cast<lv2_file_c000001c>(_arg);
const std::u16string serial = utf8_to_utf16(device.serial); // Serial needs to be encoded to utf-16 BE
std::copy_n(serial.begin(), std::min(serial.size(), sizeof(arg_c000001c->serial) / sizeof(u16)), arg_c000001c->serial);
}
arg->out_code = CELL_OK;
sys_fs.trace("sys_fs_fcntl(0x%08x): found device \"%s\" (vid=0x%04x, pid=0x%04x, serial=\"%s\")", op, mp.device, usb_ids.first, usb_ids.second, device.serial);
}
}
return CELL_OK;
}
case 0xc0000016: // ps2disc_8160A811
{
break;
}
case 0xc000001a: // cellFsSetDiscReadRetrySetting, 5731DF45
{
[[maybe_unused]] const auto arg = vm::static_ptr_cast<lv2_file_c000001a>(_arg);
return CELL_OK;
}
case 0xc0000021: // 9FDBBA89
{
break;
}
case 0xe0000000: // Unknown (cellFsGetBlockSize)
{
break;
}
case 0xe0000001: // Unknown (cellFsStat)
{
break;
}
case 0xe0000003: // Unknown
{
break;
}
case 0xe0000004: // Unknown
{
break;
}
case 0xe0000005: // Unknown (cellFsMkdir)
{
break;
}
case 0xe0000006: // Unknown
{
break;
}
case 0xe0000007: // Unknown
{
break;
}
case 0xe0000008: // Unknown (cellFsAclRead)
{
break;
}
case 0xe0000009: // Unknown (cellFsAccess)
{
break;
}
case 0xe000000a: // Unknown (E3D28395)
{
break;
}
case 0xe000000b: // Unknown (cellFsRename, FF29F478)
{
break;
}
case 0xe000000c: // Unknown (cellFsTruncate)
{
break;
}
case 0xe000000d: // Unknown (cellFsUtime)
{
break;
}
case 0xe000000e: // Unknown (cellFsAclWrite)
{
break;
}
case 0xe000000f: // Unknown (cellFsChmod)
{
break;
}
case 0xe0000010: // Unknown (cellFsChown)
{
break;
}
case 0xe0000011: // Unknown
{
break;
}
case 0xe0000012: // cellFsGetDirectoryEntries
{
lv2_obj::sleep(ppu);
const auto arg = vm::static_ptr_cast<lv2_file_op_dir::dir_info>(_arg);
if (_size < arg.size())
{
return CELL_EINVAL;
}
const auto directory = idm::get<lv2_fs_object, lv2_dir>(fd);
if (!directory)
{
return CELL_EBADF;
}
ppu.check_state();
u32 read_count = 0;
// NOTE: This function is actually capable of reading only one entry at a time
if (const u32 max = arg->max)
{
const auto arg_ptr = +arg->ptr;
if (auto* info = directory->dir_read())
{
auto& entry = arg_ptr[read_count++];
s32 mode = info->is_directory ? CELL_FS_S_IFDIR | 0777 : CELL_FS_S_IFREG | 0666;
if (directory->mp.read_only)
{
// Remove write permissions
mode &= ~0222;
}
entry.attribute.mode = mode;
entry.attribute.uid = directory->mp->flags & lv2_mp_flag::no_uid_gid ? -1 : 0;
entry.attribute.gid = directory->mp->flags & lv2_mp_flag::no_uid_gid ? -1 : 0;
entry.attribute.atime = info->atime;
entry.attribute.mtime = info->mtime;
entry.attribute.ctime = info->ctime;
entry.attribute.size = info->size;
entry.attribute.blksize = directory->mp->block_size;
entry.entry_name.d_type = info->is_directory ? CELL_FS_TYPE_DIRECTORY : CELL_FS_TYPE_REGULAR;
entry.entry_name.d_namlen = u8(std::min<usz>(info->name.size(), CELL_FS_MAX_FS_FILE_NAME_LENGTH));
strcpy_trunc(entry.entry_name.d_name, info->name);
}
// Apparently all this function does to additional buffer elements is to zeroize them
std::memset(arg_ptr.get_ptr() + read_count, 0, (max - read_count) * arg->ptr.size());
}
arg->_size = read_count;
arg->_code = CELL_OK;
return CELL_OK;
}
case 0xe0000015: // Unknown
{
break;
}
case 0xe0000016: // cellFsAllocateFileAreaWithInitialData
{
break;
}
case 0xe0000017: // cellFsAllocateFileAreaWithoutZeroFill
{
const auto arg = vm::static_ptr_cast<lv2_file_e0000017>(_arg);
if (_size < arg->size || arg->_x4 != 0x10u || arg->_x8 != 0x20u)
{
return CELL_EINVAL;
}
arg->out_code = sys_fs_truncate(ppu, arg->file_path, arg->file_size);
return CELL_OK;
}
case 0xe0000018: // cellFsChangeFileSizeWithoutAllocation
{
break;
}
case 0xe0000019: // Unknown
{
break;
}
case 0xe000001b: // Unknown
{
break;
}
case 0xe000001d: // Unknown
{
break;
}
case 0xe000001e: // Unknown
{
break;
}
case 0xe000001f: // Unknown
{
break;
}
case 0xe0000020: // Unknown
{
break;
}
case 0xe0000025: // cellFsSdataOpenWithVersion
{
const auto arg = vm::static_ptr_cast<lv2_file_e0000025>(_arg);
if (arg->size != 0x30u)
{
sys_fs.error("sys_fs_fcntl(0xe0000025): invalid size (0x%x)", arg->size);
break;
}
if (arg->_x4 != 0x10u || arg->_x8 != 0x28u)
{
sys_fs.error("sys_fs_fcntl(0xe0000025): invalid args (0x%x, 0x%x)", arg->_x4, arg->_x8);
break;
}
std::string_view vpath{ arg->name.get_ptr(), arg->name_size };
vpath = vpath.substr(0, vpath.find_first_of('\0'));
sys_fs.notice("sys_fs_fcntl(0xe0000025): %s", vpath);
be_t<u64> sdata_identifier = 0x18000000010;
lv2_file::open_result_t result = lv2_file::open(vpath, 0, 0, &sdata_identifier, 8);
if (result.error)
{
return result.error;
}
if (const u32 id = idm::import<lv2_fs_object, lv2_file>([&]() -> std::shared_ptr<lv2_file>
{
if (!g_fxo->get<loaded_npdrm_keys>().npdrm_fds.try_inc(16))
{
return nullptr;
}
return std::make_shared<lv2_file>(result.ppath, std::move(result.file), 0, 0, std::move(result.real_path), lv2_file_type::sdata);
}))
{
arg->out_code = CELL_OK;
arg->fd = id;
return CELL_OK;
}
// Out of file descriptors
return CELL_EMFILE;
}
}
sys_fs.error("sys_fs_fcntl(): Unknown operation 0x%08x (fd=%d, arg=*0x%x, size=0x%x)", op, fd, _arg, _size);
return CELL_OK;
}
error_code sys_fs_lseek(ppu_thread& ppu, u32 fd, s64 offset, s32 whence, vm::ptr<u64> pos)
{
ppu.state += cpu_flag::wait;
lv2_obj::sleep(ppu);
sys_fs.trace("sys_fs_lseek(fd=%d, offset=0x%llx, whence=0x%x, pos=*0x%x)", fd, offset, whence, pos);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
if (!file)
{
return CELL_EBADF;
}
std::unique_lock lock(file->mp->mutex);
if (!file->file)
{
return CELL_EBADF;
}
if (whence + 0u >= 3)
{
return {CELL_EINVAL, whence};
}
const u64 result = file->file.seek(offset, static_cast<fs::seek_mode>(whence));
if (result == umax)
{
switch (auto error = fs::g_tls_error)
{
case fs::error::inval: return {CELL_EINVAL, "fd=%u, offset=0x%x, whence=%d", fd, offset, whence};
default: sys_fs.error("sys_fs_lseek(): unknown error %s", error);
}
return CELL_EIO; // ???
}
lock.unlock();
ppu.check_state();
*pos = result;
return CELL_OK;
}
error_code sys_fs_fdatasync(ppu_thread& ppu, u32 fd)
{
ppu.state += cpu_flag::wait;
lv2_obj::sleep(ppu);
sys_fs.trace("sys_fs_fdadasync(fd=%d)", fd);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
if (!file || !(file->flags & CELL_FS_O_ACCMODE))
{
return CELL_EBADF;
}
std::lock_guard lock(file->mp->mutex);
if (!file->file)
{
return CELL_EBADF;
}
file->file.sync();
return CELL_OK;
}
error_code sys_fs_fsync(ppu_thread& ppu, u32 fd)
{
ppu.state += cpu_flag::wait;
lv2_obj::sleep(ppu);
sys_fs.trace("sys_fs_fsync(fd=%d)", fd);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
if (!file || !(file->flags & CELL_FS_O_ACCMODE))
{
return CELL_EBADF;
}
std::lock_guard lock(file->mp->mutex);
if (!file->file)
{
return CELL_EBADF;
}
file->file.sync();
return CELL_OK;
}
error_code sys_fs_fget_block_size(ppu_thread& ppu, u32 fd, vm::ptr<u64> sector_size, vm::ptr<u64> block_size, vm::ptr<u64> arg4, vm::ptr<s32> out_flags)
{
ppu.state += cpu_flag::wait;
sys_fs.warning("sys_fs_fget_block_size(fd=%d, sector_size=*0x%x, block_size=*0x%x, arg4=*0x%x, out_flags=*0x%x)", fd, sector_size, block_size, arg4, out_flags);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
if (!file)
{
return CELL_EBADF;
}
static_cast<void>(ppu.test_stopped());
// TODO
*sector_size = file->mp->sector_size;
*block_size = file->mp->block_size;
*arg4 = file->mp->sector_size;
*out_flags = file->flags;
return CELL_OK;
}
error_code sys_fs_get_block_size(ppu_thread& ppu, vm::cptr<char> path, vm::ptr<u64> sector_size, vm::ptr<u64> block_size, vm::ptr<u64> arg4)
{
ppu.state += cpu_flag::wait;
sys_fs.warning("sys_fs_get_block_size(path=%s, sector_size=*0x%x, block_size=*0x%x, arg4=*0x%x)", path, sector_size, block_size, arg4);
const auto [path_error, vpath] = translate_to_str(path);
if (path_error)
{
return {path_error, vpath};
}
const std::string local_path = vfs::get(vpath);
if (vpath.find_first_not_of('/') == umax)
{
return {CELL_EISDIR, path};
}
if (local_path.empty())
{
return {CELL_ENOTMOUNTED, path};
}
const auto& mp = g_fxo->get<lv2_fs_mount_info_map>().lookup(vpath);
// It appears that /dev_hdd0 mount point is special in this function
if (mp != &g_mp_sys_dev_hdd0 && (mp->flags & lv2_mp_flag::strict_get_block_size ? !fs::is_file(local_path) : !fs::exists(local_path)))
{
switch (auto error = fs::g_tls_error)
{
case fs::error::exist: return {CELL_EISDIR, path};
case fs::error::noent: return {CELL_ENOENT, path};
default: sys_fs.error("sys_fs_get_block_size(): unknown error %s", error);
}
return {CELL_EIO, path}; // ???
}
static_cast<void>(ppu.test_stopped());
// TODO
*sector_size = mp->sector_size;
*block_size = mp->block_size;
*arg4 = mp->sector_size;
return CELL_OK;
}
error_code sys_fs_truncate(ppu_thread& ppu, vm::cptr<char> path, u64 size)
{
ppu.state += cpu_flag::wait;
lv2_obj::sleep(ppu);
sys_fs.warning("sys_fs_truncate(path=%s, size=0x%llx)", path, size);
const auto [path_error, vpath] = translate_to_str(path);
if (path_error)
{
return {path_error, vpath};
}
const std::string local_path = vfs::get(vpath);
const auto& mp = g_fxo->get<lv2_fs_mount_info_map>().lookup(vpath);
if (mp == &g_mp_sys_dev_root)
{
return {CELL_EISDIR, path};
}
if (local_path.empty())
{
return {CELL_ENOTMOUNTED, path};
}
if (mp.read_only)
{
return {CELL_EROFS, path};
}
std::lock_guard lock(mp->mutex);
if (!fs::truncate_file(local_path, size))
{
switch (auto error = fs::g_tls_error)
{
case fs::error::noent:
{
return {mp == &g_mp_sys_dev_hdd1 ? sys_fs.warning : sys_fs.error, CELL_ENOENT, path};
}
default: sys_fs.error("sys_fs_truncate(): unknown error %s", error);
}
return {CELL_EIO, path}; // ???
}
return CELL_OK;
}
error_code sys_fs_ftruncate(ppu_thread& ppu, u32 fd, u64 size)
{
ppu.state += cpu_flag::wait;
lv2_obj::sleep(ppu);
sys_fs.warning("sys_fs_ftruncate(fd=%d, size=0x%llx)", fd, size);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
if (!file || !(file->flags & CELL_FS_O_ACCMODE))
{
return CELL_EBADF;
}
if (file->mp.read_only)
{
return CELL_EROFS;
}
std::lock_guard lock(file->mp->mutex);
if (!file->file)
{
return CELL_EBADF;
}
if (file->lock == 2)
{
return CELL_EIO;
}
if (file->lock)
{
return CELL_EBUSY;
}
if (!file->file.trunc(size))
{
switch (auto error = fs::g_tls_error)
{
case fs::error::ok:
default: sys_fs.error("sys_fs_ftruncate(): unknown error %s", error);
}
return CELL_EIO; // ???
}
return CELL_OK;
}
error_code sys_fs_symbolic_link(ppu_thread&, vm::cptr<char> target, vm::cptr<char> linkpath)
{
sys_fs.todo("sys_fs_symbolic_link(target=%s, linkpath=%s)", target, linkpath);
return CELL_OK;
}
error_code sys_fs_chmod(ppu_thread&, vm::cptr<char> path, s32 mode)
{
sys_fs.todo("sys_fs_chmod(path=%s, mode=%#o)", path, mode);
const auto [path_error, vpath] = translate_to_str(path);
if (path_error)
{
return {path_error, vpath};
}
const std::string local_path = vfs::get(vpath);
const auto mp = lv2_fs_object::get_mp(vpath);
if (local_path.empty())
{
return {CELL_ENOTMOUNTED, path};
}
if (mp->flags & lv2_mp_flag::read_only)
{
return {CELL_EROFS, path};
}
std::unique_lock lock(mp->mutex);
fs::stat_t info{};
if (!fs::get_stat(local_path, info))
{
switch (auto error = fs::g_tls_error)
{
case fs::error::noent:
{
// Try to locate split files
for (u32 i = 66601; i <= 66699; i++)
{
if (!fs::get_stat(fmt::format("%s.%u", local_path, i), info) && !info.is_directory)
{
break;
}
}
if (fs::get_stat(local_path + ".66600", info) && !info.is_directory)
{
break;
}
return {CELL_ENOENT, path};
}
default:
{
sys_fs.error("sys_fs_chmod(): unknown error %s", error);
return {CELL_EIO, path};
}
}
}
return CELL_OK;
}
error_code sys_fs_chown(ppu_thread&, vm::cptr<char> path, s32 uid, s32 gid)
{
sys_fs.todo("sys_fs_chown(path=%s, uid=%d, gid=%d)", path, uid, gid);
return CELL_OK;
}
error_code sys_fs_disk_free(ppu_thread& ppu, vm::cptr<char> path, vm::ptr<u64> total_free, vm::ptr<u64> avail_free)
{
ppu.state += cpu_flag::wait;
lv2_obj::sleep(ppu);
sys_fs.warning("sys_fs_disk_free(path=%s total_free=*0x%x avail_free=*0x%x)", path, total_free, avail_free);
if (!path)
return CELL_EFAULT;
if (!path[0])
return CELL_EINVAL;
const std::string_view vpath = path.get_ptr();
if (vpath == "/"sv)
{
return CELL_ENOTSUP;
}
// It seems max length is 31, and multiple / at the start aren't supported
if (vpath.size() > CELL_FS_MAX_MP_LENGTH)
{
return {CELL_ENAMETOOLONG, path};
}
if (vpath.find_first_not_of('/') != 1)
{
return {CELL_EINVAL, path};
}
// Get only device path
const std::string local_path = vfs::get(vpath.substr(0, vpath.find_first_of('/', 1)));
if (local_path.empty())
{
return {CELL_EINVAL, path};
}
const auto& mp = g_fxo->get<lv2_fs_mount_info_map>().lookup(vpath);
if (mp->flags & lv2_mp_flag::strict_get_block_size)
{
// TODO:
return {CELL_ENOTSUP, path};
}
if (mp.read_only)
{
// TODO: check /dev_bdvd
ppu.check_state();
*total_free = 0;
*avail_free = 0;
return CELL_OK;
}
u64 available = 0;
// avail_free is the only value used by cellFsGetFreeSize
if (mp == &g_mp_sys_dev_hdd1)
{
available = (1u << 31) - mp->sector_size; // 2GB (TODO: Should be the total size)
}
else //if (mp == &g_mp_sys_dev_hdd0)
{
available = (40ull * 1024 * 1024 * 1024 - mp->sector_size); // Read explanation in cellHddGameCheck
}
// HACK: Hopefully nothing uses this value or once at max because its hacked here:
// The total size can change based on the size of the directory
const u64 total = available + fs::get_dir_size(local_path, mp->sector_size);
ppu.check_state();
*total_free = total;
*avail_free = available;
return CELL_OK;
}
error_code sys_fs_utime(ppu_thread& ppu, vm::cptr<char> path, vm::cptr<CellFsUtimbuf> timep)
{
ppu.state += cpu_flag::wait;
lv2_obj::sleep(ppu);
sys_fs.warning("sys_fs_utime(path=%s, timep=*0x%x)", path, timep);
sys_fs.warning("** actime=%u, modtime=%u", timep->actime, timep->modtime);
const auto [path_error, vpath] = translate_to_str(path);
if (path_error)
{
return {path_error, vpath};
}
const std::string local_path = vfs::get(vpath);
const auto& mp = g_fxo->get<lv2_fs_mount_info_map>().lookup(vpath);
if (mp == &g_mp_sys_dev_root)
{
return {CELL_EISDIR, path};
}
if (local_path.empty())
{
return {CELL_ENOTMOUNTED, path};
}
if (mp.read_only)
{
return {CELL_EROFS, path};
}
std::lock_guard lock(mp->mutex);
if (!fs::utime(local_path, timep->actime, timep->modtime))
{
switch (auto error = fs::g_tls_error)
{
case fs::error::noent:
{
return {mp == &g_mp_sys_dev_hdd1 ? sys_fs.warning : sys_fs.error, CELL_ENOENT, path};
}
default: sys_fs.error("sys_fs_utime(): unknown error %s", error);
}
return {CELL_EIO, path}; // ???
}
return CELL_OK;
}
error_code sys_fs_acl_read(ppu_thread&, vm::cptr<char> path, vm::ptr<void> ptr)
{
sys_fs.todo("sys_fs_acl_read(path=%s, ptr=*0x%x)", path, ptr);
return CELL_OK;
}
error_code sys_fs_acl_write(ppu_thread&, vm::cptr<char> path, vm::ptr<void> ptr)
{
sys_fs.todo("sys_fs_acl_write(path=%s, ptr=*0x%x)", path, ptr);
return CELL_OK;
}
error_code sys_fs_lsn_get_cda_size(ppu_thread&, u32 fd, vm::ptr<u64> ptr)
{
sys_fs.warning("sys_fs_lsn_get_cda_size(fd=%d, ptr=*0x%x)", fd, ptr);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
if (!file)
{
return CELL_EBADF;
}
// TODO
*ptr = 0;
return CELL_OK;
}
error_code sys_fs_lsn_get_cda(ppu_thread&, u32 fd, vm::ptr<void> arg2, u64 arg3, vm::ptr<u64> arg4)
{
sys_fs.todo("sys_fs_lsn_get_cda(fd=%d, arg2=*0x%x, arg3=0x%x, arg4=*0x%x)", fd, arg2, arg3, arg4);
return CELL_OK;
}
error_code sys_fs_lsn_lock(ppu_thread&, u32 fd)
{
sys_fs.trace("sys_fs_lsn_lock(fd=%d)", fd);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
if (!file)
{
return CELL_EBADF;
}
// TODO: seems to do nothing on /dev_hdd0 or /host_root
if (file->mp == &g_mp_sys_dev_hdd0 || file->mp->flags & lv2_mp_flag::strict_get_block_size)
{
return CELL_OK;
}
std::lock_guard lock(file->mp->mutex);
file->lock.compare_and_swap(0, 1);
return CELL_OK;
}
error_code sys_fs_lsn_unlock(ppu_thread&, u32 fd)
{
sys_fs.trace("sys_fs_lsn_unlock(fd=%d)", fd);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
if (!file)
{
return CELL_EBADF;
}
// See sys_fs_lsn_lock
if (file->mp == &g_mp_sys_dev_hdd0 || file->mp->flags & lv2_mp_flag::strict_get_block_size)
{
return CELL_OK;
}
// Unlock unconditionally
std::lock_guard lock(file->mp->mutex);
file->lock.compare_and_swap(1, 0);
return CELL_OK;
}
error_code sys_fs_lsn_read(ppu_thread&, u32 fd, vm::cptr<void> ptr, u64 size)
{
sys_fs.todo("sys_fs_lsn_read(fd=%d, ptr=*0x%x, size=0x%x)", fd, ptr, size);
return CELL_OK;
}
error_code sys_fs_lsn_write(ppu_thread&, u32 fd, vm::cptr<void> ptr, u64 size)
{
sys_fs.todo("sys_fs_lsn_write(fd=%d, ptr=*0x%x, size=0x%x)", fd, ptr, size);
return CELL_OK;
}
error_code sys_fs_mapped_allocate(ppu_thread&, u32 fd, u64 size, vm::pptr<void> out_ptr)
{
sys_fs.todo("sys_fs_mapped_allocate(fd=%d, arg2=0x%x, out_ptr=**0x%x)", fd, size, out_ptr);
return CELL_OK;
}
error_code sys_fs_mapped_free(ppu_thread&, u32 fd, vm::ptr<void> ptr)
{
sys_fs.todo("sys_fs_mapped_free(fd=%d, ptr=0x%#x)", fd, ptr);
return CELL_OK;
}
error_code sys_fs_truncate2(ppu_thread&, u32 fd, u64 size)
{
sys_fs.todo("sys_fs_truncate2(fd=%d, size=0x%x)", fd, size);
return CELL_OK;
}
error_code sys_fs_get_mount_info_size(ppu_thread&, vm::ptr<u64> len)
{
sys_fs.warning("sys_fs_get_mount_info_size(len=*0x%x)", len);
if (!len)
{
return CELL_EFAULT;
}
*len = g_fxo->get<lv2_fs_mount_info_map>().get_all();
return CELL_OK;
}
error_code sys_fs_get_mount_info(ppu_thread&, vm::ptr<CellFsMountInfo> info, u64 len, vm::ptr<u64> out_len)
{
sys_fs.warning("sys_fs_get_mount_info(info=*0x%x, len=0x%x, out_len=*0x%x)", info, len, out_len);
if (!info || !out_len)
{
return CELL_EFAULT;
}
*out_len = g_fxo->get<lv2_fs_mount_info_map>().get_all(info.get_ptr(), len);
return CELL_OK;
}
error_code sys_fs_newfs(ppu_thread& ppu, vm::cptr<char> dev_name, vm::cptr<char> file_system, s32 unk1, vm::cptr<char> str1)
{
ppu.state += cpu_flag::wait;
sys_fs.warning("sys_fs_newfs(dev_name=%s, file_system=%s, unk1=0x%x, str1=%s)", dev_name, file_system, unk1, str1);
const auto [dev_error, device_name] = translate_to_str(dev_name, false);
if (dev_error)
{
return {dev_error, device_name};
}
std::string vfs_path;
const auto mp = lv2_fs_object::get_mp(device_name, &vfs_path);
std::unique_lock lock(mp->mutex, std::defer_lock);
if (!g_ps3_process_info.has_root_perm() && mp != &g_mp_sys_dev_usb)
return {CELL_EPERM, device_name};
if (mp == &g_mp_sys_no_device)
return {CELL_ENXIO, device_name};
if (g_fxo->get<lv2_fs_mount_info_map>().is_device_mounted(device_name) || !lock.try_lock())
return {CELL_EBUSY, device_name};
if (vfs_path.empty())
return {CELL_ENOTSUP, device_name};
if (mp->flags & lv2_mp_flag::read_only)
return {CELL_EROFS, device_name};
if (mp == &g_mp_sys_dev_hdd1)
{
const std::string_view appname = g_ps3_process_info.get_cellos_appname();
vfs_path = fmt::format("%s/caches/%s", vfs_path, appname.substr(0, appname.find_last_of('.')));
}
if (!fs::remove_all(vfs_path, false))
{
sys_fs.error("sys_fs_newfs(): Failed to clear \"%s\" at \"%s\"", device_name, vfs_path);
return {CELL_EIO, vfs_path};
}
sys_fs.success("sys_fs_newfs(): Successfully cleared \"%s\" at \"%s\"", device_name, vfs_path);
return CELL_OK;
}
error_code sys_fs_mount(ppu_thread& ppu, vm::cptr<char> dev_name, vm::cptr<char> file_system, vm::cptr<char> path, s32 unk1, s32 prot, s32 unk2, vm::cptr<char> str1, u32 str_len)
{
ppu.state += cpu_flag::wait;
sys_fs.warning("sys_fs_mount(dev_name=%s, file_system=%s, path=%s, unk1=0x%x, prot=%d, unk3=0x%x, str1=%s, str_len=%d)", dev_name, file_system, path, unk1, prot, unk2, str1, str_len);
const auto [dev_error, device_name] = translate_to_str(dev_name, false);
if (dev_error)
{
return {dev_error, device_name};
}
const auto [fs_error, filesystem] = translate_to_str(file_system, false);
if (fs_error)
{
return {fs_error, filesystem};
}
const auto [path_error, path_sv] = translate_to_str(path);
if (path_error)
{
return {path_error, path_sv};
}
const std::string vpath = lv2_fs_object::get_normalized_path(path_sv);
std::string vfs_path;
const auto mp = lv2_fs_object::get_mp(device_name, &vfs_path);
std::unique_lock lock(mp->mutex, std::defer_lock);
if (!g_ps3_process_info.has_root_perm() && mp != &g_mp_sys_dev_usb)
return {CELL_EPERM, device_name};
if (mp == &g_mp_sys_no_device)
return {CELL_ENXIO, device_name};
if (g_fxo->get<lv2_fs_mount_info_map>().is_device_mounted(device_name) || !lock.try_lock())
return {CELL_EBUSY, device_name};
if (vfs_path.empty())
return {CELL_ENOTSUP, device_name};
if (vpath.find_first_not_of('/') == umax || !vfs::get(vpath).empty())
return {CELL_EEXIST, vpath};
if (mp == &g_mp_sys_dev_hdd1)
{
const std::string_view appname = g_ps3_process_info.get_cellos_appname();
vfs_path = fmt::format("%s/caches/%s", vfs_path, appname.substr(0, appname.find_last_of('.')));
}
if (!vfs_path.ends_with('/'))
vfs_path += '/';
if (!fs::is_dir(vfs_path) && !fs::create_dir(vfs_path))
{
sys_fs.error("Failed to create directory \"%s\"", vfs_path);
return {CELL_EIO, vfs_path};
}
const bool is_simplefs = filesystem == "CELL_FS_SIMPLEFS"sv;
if (is_simplefs)
{
vfs_path += "simplefs.tmp";
if (fs::file simplefs_file; simplefs_file.open(vfs_path, fs::create + fs::read + fs::write + fs::trunc + fs::lock))
{
const u64 file_size = mp->sector_size; // One sector's size is enough for VSH's simplefs check
simplefs_file.trunc(file_size);
sys_fs.notice("Created a simplefs file at \"%s\"", vfs_path);
}
else
{
sys_fs.error("Failed to create simplefs file \"%s\"", vfs_path);
return {CELL_EIO, vfs_path};
}
}
if (!vfs::mount(vpath, vfs_path, !is_simplefs))
{
if (is_simplefs)
{
if (fs::remove_file(vfs_path))
{
sys_fs.notice("Removed simplefs file \"%s\"", vfs_path);
}
else
{
sys_fs.error("Failed to remove simplefs file \"%s\"", vfs_path);
}
}
return CELL_EIO;
}
g_fxo->get<lv2_fs_mount_info_map>().add(vpath, mp, device_name, filesystem, prot);
return CELL_OK;
}
error_code sys_fs_unmount(ppu_thread& ppu, vm::cptr<char> path, s32 unk1, s32 force)
{
ppu.state += cpu_flag::wait;
sys_fs.warning("sys_fs_unmount(path=%s, unk1=0x%x, force=%d)", path, unk1, force);
const auto [path_error, vpath] = translate_to_str(path);
if (path_error)
{
return {path_error, vpath};
}
const auto& mp = g_fxo->get<lv2_fs_mount_info_map>().lookup(vpath);
std::unique_lock lock(mp->mutex, std::defer_lock);
if (!g_ps3_process_info.has_root_perm() && mp != &g_mp_sys_dev_usb)
return {CELL_EPERM, vpath};
if (mp == &g_mp_sys_no_device)
return {CELL_EINVAL, vpath};
if (mp == &g_mp_sys_dev_root || (!lock.try_lock() && !force))
return {CELL_EBUSY, vpath};
if (!lv2_fs_mount_info_map::vfs_unmount(vpath))
return {CELL_EIO, vpath};
return CELL_OK;
}
| 78,615
|
C++
|
.cpp
| 2,659
| 26.714554
| 205
| 0.654851
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,344
|
sys_console.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_console.cpp
|
#include "stdafx.h"
#include "Emu/Cell/ErrorCodes.h"
#include "sys_console.h"
LOG_CHANNEL(sys_console);
error_code sys_console_write(vm::cptr<char> buf, u32 len)
{
sys_console.todo("sys_console_write(buf=*0x%x, len=0x%x)", buf, len);
return CELL_OK;
}
| 259
|
C++
|
.cpp
| 9
| 27
| 70
| 0.730612
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,345
|
sys_crypto_engine.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_crypto_engine.cpp
|
#include "stdafx.h"
#include "Emu/Cell/ErrorCodes.h"
#include "sys_crypto_engine.h"
LOG_CHANNEL(sys_crypto_engine);
error_code sys_crypto_engine_create(vm::ptr<u32> id)
{
sys_crypto_engine.todo("sys_crypto_engine_create(id=*0x%x)", id);
return CELL_OK;
}
error_code sys_crypto_engine_destroy(u32 id)
{
sys_crypto_engine.todo("sys_crypto_engine_destroy(id=0x%x)", id);
return CELL_OK;
}
error_code sys_crypto_engine_random_generate(vm::ptr<void> buffer, u64 buffer_size)
{
sys_crypto_engine.todo("sys_crypto_engine_random_generate(buffer=*0x%x, buffer_size=0x%x", buffer, buffer_size);
return CELL_OK;
}
| 618
|
C++
|
.cpp
| 19
| 30.736842
| 113
| 0.755932
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,346
|
sys_gpio.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_gpio.cpp
|
#include "stdafx.h"
#include "sys_gpio.h"
#include "Emu/Cell/ErrorCodes.h"
LOG_CHANNEL(sys_gpio);
error_code sys_gpio_get(u64 device_id, vm::ptr<u64> value)
{
sys_gpio.trace("sys_gpio_get(device_id=0x%llx, value=*0x%x)", device_id, value);
if (device_id != SYS_GPIO_LED_DEVICE_ID && device_id != SYS_GPIO_DIP_SWITCH_DEVICE_ID)
{
return CELL_ESRCH;
}
// Retail consoles dont have LEDs or DIPs switches, hence always sets 0 in paramenter
if (!value.try_write(0))
{
return CELL_EFAULT;
}
return CELL_OK;
}
error_code sys_gpio_set(u64 device_id, u64 mask, u64 value)
{
sys_gpio.trace("sys_gpio_set(device_id=0x%llx, mask=0x%llx, value=0x%llx)", device_id, mask, value);
// Retail consoles dont have LEDs or DIPs switches, hence the syscall can't modify devices's value
switch (device_id)
{
case SYS_GPIO_LED_DEVICE_ID: return CELL_OK;
case SYS_GPIO_DIP_SWITCH_DEVICE_ID: return CELL_EINVAL;
}
return CELL_ESRCH;
}
| 940
|
C++
|
.cpp
| 29
| 30.37931
| 101
| 0.72949
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,347
|
sys_rsx.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_rsx.cpp
|
#include "stdafx.h"
#include "sys_rsx.h"
#include "Emu/Cell/PPUModule.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/timers.hpp"
#include "Emu/Memory/vm_locking.h"
#include "Emu/RSX/Core/RSXEngLock.hpp"
#include "Emu/RSX/Core/RSXReservationLock.hpp"
#include "Emu/RSX/RSXThread.h"
#include "util/asm.hpp"
#include "sys_event.h"
#include "sys_vm.h"
LOG_CHANNEL(sys_rsx);
// Unknown error code returned by sys_rsx_context_attribute
enum sys_rsx_error : s32
{
SYS_RSX_CONTEXT_ATTRIBUTE_ERROR = -17
};
template<>
void fmt_class_string<sys_rsx_error>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto error)
{
switch (error)
{
STR_CASE(SYS_RSX_CONTEXT_ATTRIBUTE_ERROR);
}
return unknown;
});
}
static u64 rsx_timeStamp()
{
return get_timebased_time();
}
static void set_rsx_dmactl(rsx::thread* render, u64 get_put)
{
{
rsx::eng_lock rlock(render);
render->fifo_ctrl->abort();
// Unconditional set
while (!render->new_get_put.compare_and_swap_test(u64{umax}, get_put))
{
// Wait for the first store to complete (or be aborted)
if (auto cpu = cpu_thread::get_current())
{
if (cpu->state & cpu_flag::exit)
{
// Retry
cpu->state += cpu_flag::again;
return;
}
}
utils::pause();
}
// Schedule FIFO interrupt to deal with this immediately
render->m_eng_interrupt_mask |= rsx::dma_control_interrupt;
}
if (auto cpu = cpu_thread::get_current())
{
// Wait for the first store to complete (or be aborted)
while (render->new_get_put != usz{umax})
{
if (cpu->state & cpu_flag::exit)
{
if (render->new_get_put.compare_and_swap_test(get_put, umax))
{
// Retry
cpu->state += cpu_flag::again;
return;
}
}
thread_ctrl::wait_for(1000);
}
}
}
bool rsx::thread::send_event(u64 data1, u64 event_flags, u64 data3)
{
// Filter event bits, send them only if they are masked by gcm
// Except the upper 32-bits, they are reserved for unmapped io events and execute unconditionally
event_flags &= vm::_ref<RsxDriverInfo>(driver_info).handlers | 0xffff'ffffull << 32;
if (!event_flags)
{
// Nothing to do
return true;
}
auto error = sys_event_port_send(rsx_event_port, data1, event_flags, data3);
while (error + 0u == CELL_EBUSY)
{
auto cpu = get_current_cpu_thread();
if (cpu && cpu->get_class() == thread_class::ppu)
{
// Deschedule
lv2_obj::sleep(*cpu, 100);
}
// Wait a bit before resending event
thread_ctrl::wait_for(100);
if (cpu && cpu->get_class() == thread_class::rsx)
cpu->cpu_wait({});
if (Emu.IsStopped() || (cpu && cpu->check_state()))
{
error = 0;
break;
}
error = sys_event_port_send(rsx_event_port, data1, event_flags, data3);
}
if (error + 0u == CELL_EAGAIN)
{
// Thread has aborted when sending event (VBLANK duplicates are allowed)
ensure((unsent_gcm_events.fetch_or(event_flags) & event_flags & ~(SYS_RSX_EVENT_VBLANK | SYS_RSX_EVENT_SECOND_VBLANK_BASE | SYS_RSX_EVENT_SECOND_VBLANK_BASE * 2)) == 0);
return false;
}
if (error && error + 0u != CELL_ENOTCONN)
{
fmt::throw_exception("rsx::thread::send_event() Failed to send event! (error=%x)", +error);
}
return true;
}
error_code sys_rsx_device_open(cpu_thread& cpu)
{
cpu.state += cpu_flag::wait;
sys_rsx.todo("sys_rsx_device_open()");
return CELL_OK;
}
error_code sys_rsx_device_close(cpu_thread& cpu)
{
cpu.state += cpu_flag::wait;
sys_rsx.todo("sys_rsx_device_close()");
return CELL_OK;
}
/*
* lv2 SysCall 668 (0x29C): sys_rsx_memory_allocate
* @param mem_handle (OUT): Context / ID, which is used by sys_rsx_memory_free to free allocated memory.
* @param mem_addr (OUT): Returns the local memory base address, usually 0xC0000000.
* @param size (IN): Local memory size. E.g. 0x0F900000 (249 MB). (changes with sdk version)
* @param flags (IN): E.g. Immediate value passed in cellGcmSys is 8.
* @param a5 (IN): E.g. Immediate value passed in cellGcmSys is 0x00300000 (3 MB?).
* @param a6 (IN): E.g. Immediate value passed in cellGcmSys is 16.
* @param a7 (IN): E.g. Immediate value passed in cellGcmSys is 8.
*/
error_code sys_rsx_memory_allocate(cpu_thread& cpu, vm::ptr<u32> mem_handle, vm::ptr<u64> mem_addr, u32 size, u64 flags, u64 a5, u64 a6, u64 a7)
{
cpu.state += cpu_flag::wait;
sys_rsx.warning("sys_rsx_memory_allocate(mem_handle=*0x%x, mem_addr=*0x%x, size=0x%x, flags=0x%llx, a5=0x%llx, a6=0x%llx, a7=0x%llx)", mem_handle, mem_addr, size, flags, a5, a6, a7);
if (vm::falloc(rsx::constants::local_mem_base, size, vm::video))
{
rsx::get_current_renderer()->local_mem_size = size;
if (u32 addr = rsx::get_current_renderer()->driver_info)
{
vm::_ref<RsxDriverInfo>(addr).memory_size = size;
}
*mem_addr = rsx::constants::local_mem_base;
*mem_handle = 0x5a5a5a5b;
return CELL_OK;
}
return CELL_ENOMEM;
}
/*
* lv2 SysCall 669 (0x29D): sys_rsx_memory_free
* @param mem_handle (OUT): Context / ID, for allocated local memory generated by sys_rsx_memory_allocate
*/
error_code sys_rsx_memory_free(cpu_thread& cpu, u32 mem_handle)
{
cpu.state += cpu_flag::wait;
sys_rsx.warning("sys_rsx_memory_free(mem_handle=0x%x)", mem_handle);
if (!vm::check_addr(rsx::constants::local_mem_base))
{
return CELL_ENOMEM;
}
if (rsx::get_current_renderer()->dma_address)
{
fmt::throw_exception("Attempting to dealloc rsx memory when the context is still being used");
}
if (!vm::dealloc(rsx::constants::local_mem_base))
{
return CELL_ENOMEM;
}
return CELL_OK;
}
/*
* lv2 SysCall 670 (0x29E): sys_rsx_context_allocate
* @param context_id (OUT): RSX context, E.g. 0x55555555 (in vsh.self)
* @param lpar_dma_control (OUT): Control register area. E.g. 0x60100000 (in vsh.self)
* @param lpar_driver_info (OUT): RSX data like frequencies, sizes, version... E.g. 0x60200000 (in vsh.self)
* @param lpar_reports (OUT): Report data area. E.g. 0x60300000 (in vsh.self)
* @param mem_ctx (IN): mem_ctx given by sys_rsx_memory_allocate
* @param system_mode (IN):
*/
error_code sys_rsx_context_allocate(cpu_thread& cpu, vm::ptr<u32> context_id, vm::ptr<u64> lpar_dma_control, vm::ptr<u64> lpar_driver_info, vm::ptr<u64> lpar_reports, u64 mem_ctx, u64 system_mode)
{
cpu.state += cpu_flag::wait;
sys_rsx.warning("sys_rsx_context_allocate(context_id=*0x%x, lpar_dma_control=*0x%x, lpar_driver_info=*0x%x, lpar_reports=*0x%x, mem_ctx=0x%llx, system_mode=0x%llx)",
context_id, lpar_dma_control, lpar_driver_info, lpar_reports, mem_ctx, system_mode);
if (!vm::check_addr(rsx::constants::local_mem_base))
{
return CELL_EINVAL;
}
const auto render = rsx::get_current_renderer();
std::lock_guard lock(render->sys_rsx_mtx);
if (render->dma_address)
{
// We currently do not support multiple contexts
fmt::throw_exception("sys_rsx_context_allocate was called twice");
}
const auto area = vm::reserve_map(vm::rsx_context, 0, 0x10000000, 0x403);
const u32 dma_address = area ? area->alloc(0x300000) : 0;
if (!dma_address)
{
return CELL_ENOMEM;
}
sys_rsx.warning("sys_rsx_context_allocate(): Mapped address 0x%x", dma_address);
*lpar_dma_control = dma_address;
*lpar_driver_info = dma_address + 0x100000;
*lpar_reports = dma_address + 0x200000;
auto &reports = vm::_ref<RsxReports>(vm::cast(*lpar_reports));
std::memset(&reports, 0, sizeof(RsxReports));
for (usz i = 0; i < std::size(reports.notify); ++i)
reports.notify[i].timestamp = -1;
for (usz i = 0; i < std::size(reports.semaphore); i += 4)
{
reports.semaphore[i + 0].val.raw() = 0x1337C0D3;
reports.semaphore[i + 1].val.raw() = 0x1337BABE;
reports.semaphore[i + 2].val.raw() = 0x1337BEEF;
reports.semaphore[i + 3].val.raw() = 0x1337F001;
}
for (usz i = 0; i < std::size(reports.report); ++i)
{
reports.report[i].val = 0;
reports.report[i].timestamp = -1;
reports.report[i].pad = -1;
}
auto &driverInfo = vm::_ref<RsxDriverInfo>(vm::cast(*lpar_driver_info));
std::memset(&driverInfo, 0, sizeof(RsxDriverInfo));
driverInfo.version_driver = 0x211;
driverInfo.version_gpu = 0x5c;
driverInfo.memory_size = render->local_mem_size;
driverInfo.nvcore_frequency = 500000000; // 0x1DCD6500
driverInfo.memory_frequency = 650000000; // 0x26BE3680
driverInfo.reportsNotifyOffset = 0x1000;
driverInfo.reportsOffset = 0;
driverInfo.reportsReportOffset = 0x1400;
driverInfo.systemModeFlags = static_cast<u32>(system_mode);
driverInfo.hardware_channel = 1; // * i think* this 1 for games, 0 for vsh
render->driver_info = vm::cast(*lpar_driver_info);
auto &dmaControl = vm::_ref<RsxDmaControl>(vm::cast(*lpar_dma_control));
dmaControl.get = 0;
dmaControl.put = 0;
dmaControl.ref = 0; // Set later to -1 by cellGcmSys
if ((false/*system_mode & something*/ || g_cfg.video.decr_memory_layout)
&& g_cfg.core.debug_console_mode)
rsx::get_current_renderer()->main_mem_size = 0x20000000; //512MB
else
rsx::get_current_renderer()->main_mem_size = 0x10000000; //256MB
vm::var<sys_event_queue_attribute_t, vm::page_allocator<>> attr;
attr->protocol = SYS_SYNC_PRIORITY;
attr->type = SYS_PPU_QUEUE;
attr->name_u64 = 0;
sys_event_port_create(cpu, vm::get_addr(&driverInfo.handler_queue), SYS_EVENT_PORT_LOCAL, 0);
render->rsx_event_port = driverInfo.handler_queue;
sys_event_queue_create(cpu, vm::get_addr(&driverInfo.handler_queue), attr, 0, 0x20);
sys_event_port_connect_local(cpu, render->rsx_event_port, driverInfo.handler_queue);
render->display_buffers_count = 0;
render->current_display_buffer = 0;
render->label_addr = vm::cast(*lpar_reports);
render->init(dma_address);
*context_id = 0x55555555;
return CELL_OK;
}
/*
* lv2 SysCall 671 (0x29F): sys_rsx_context_free
* @param context_id (IN): RSX context generated by sys_rsx_context_allocate to free the context.
*/
error_code sys_rsx_context_free(ppu_thread& ppu, u32 context_id)
{
ppu.state += cpu_flag::wait;
sys_rsx.todo("sys_rsx_context_free(context_id=0x%x)", context_id);
const auto render = rsx::get_current_renderer();
rsx::eng_lock fifo_lock(render);
std::scoped_lock lock(render->sys_rsx_mtx);
const u32 dma_address = render->dma_address;
render->dma_address = 0;
if (context_id != 0x55555555 || !dma_address || render->state & cpu_flag::ret)
{
return CELL_EINVAL;
}
g_fxo->get<rsx::vblank_thread>() = thread_state::finished;
const u32 queue_id = vm::_ptr<RsxDriverInfo>(render->driver_info)->handler_queue;
render->state += cpu_flag::ret;
while (render->state & cpu_flag::ret)
{
thread_ctrl::wait_for(1000);
}
sys_event_port_disconnect(ppu, render->rsx_event_port);
sys_event_port_destroy(ppu, render->rsx_event_port);
sys_event_queue_destroy(ppu, queue_id, SYS_EVENT_QUEUE_DESTROY_FORCE);
render->label_addr = 0;
render->driver_info = 0;
render->main_mem_size = 0;
render->rsx_event_port = 0;
render->display_buffers_count = 0;
render->current_display_buffer = 0;
render->ctrl = nullptr;
render->rsx_thread_running = false;
render->serialized = false;
ensure(vm::get(vm::rsx_context)->dealloc(dma_address));
return CELL_OK;
}
/*
* lv2 SysCall 672 (0x2A0): sys_rsx_context_iomap
* @param context_id (IN): RSX context, E.g. 0x55555555 (in vsh.self)
* @param io (IN): IO offset mapping area. E.g. 0x00600000
* @param ea (IN): Start address of mapping area. E.g. 0x20400000
* @param size (IN): Size of mapping area in bytes. E.g. 0x00200000
* @param flags (IN):
*/
error_code sys_rsx_context_iomap(cpu_thread& cpu, u32 context_id, u32 io, u32 ea, u32 size, u64 flags)
{
cpu.state += cpu_flag::wait;
sys_rsx.warning("sys_rsx_context_iomap(context_id=0x%x, io=0x%x, ea=0x%x, size=0x%x, flags=0x%llx)", context_id, io, ea, size, flags);
const auto render = rsx::get_current_renderer();
if (!size || io & 0xFFFFF || ea + u64{size} > rsx::constants::local_mem_base || ea & 0xFFFFF || size & 0xFFFFF ||
context_id != 0x55555555 || render->main_mem_size < io + u64{size})
{
return CELL_EINVAL;
}
if (!render->is_fifo_idle())
{
sys_rsx.warning("sys_rsx_context_iomap(): RSX is not idle while mapping io");
}
// Wait until we have no active RSX locks and reserve iomap for use. Must do so before acquiring vm lock to avoid deadlocks
rsx::reservation_lock<true> rsx_lock(ea, size);
vm::writer_lock rlock;
for (u32 addr = ea, end = ea + size; addr < end; addr += 0x100000)
{
if (!vm::check_addr(addr, vm::page_readable | (addr < 0x20000000 ? 0 : vm::page_1m_size)))
{
return CELL_EINVAL;
}
if ((addr == ea || !(addr % 0x1000'0000)) && idm::check<sys_vm_t>(sys_vm_t::find_id(addr)))
{
// Virtual memory is disallowed
return CELL_EINVAL;
}
}
io >>= 20, ea >>= 20, size >>= 20;
rsx::eng_lock fifo_lock(render);
std::scoped_lock lock(render->sys_rsx_mtx);
for (u32 i = 0; i < size; i++)
{
auto& table = render->iomap_table;
// TODO: Investigate relaxed memory ordering
const u32 prev_ea = table.ea[io + i];
table.ea[io + i].release((ea + i) << 20);
if (prev_ea + 1) table.io[prev_ea >> 20].release(-1); // Clear previous mapping if exists
table.io[ea + i].release((io + i) << 20);
}
return CELL_OK;
}
/*
* lv2 SysCall 673 (0x2A1): sys_rsx_context_iounmap
* @param context_id (IN): RSX context, E.g. 0x55555555 (in vsh.self)
* @param io (IN): IO address. E.g. 0x00600000 (Start page 6)
* @param size (IN): Size to unmap in byte. E.g. 0x00200000
*/
error_code sys_rsx_context_iounmap(cpu_thread& cpu, u32 context_id, u32 io, u32 size)
{
cpu.state += cpu_flag::wait;
sys_rsx.warning("sys_rsx_context_iounmap(context_id=0x%x, io=0x%x, size=0x%x)", context_id, io, size);
const auto render = rsx::get_current_renderer();
if (!size || size & 0xFFFFF || io & 0xFFFFF || context_id != 0x55555555 ||
render->main_mem_size < io + u64{size})
{
return CELL_EINVAL;
}
if (!render->is_fifo_idle())
{
sys_rsx.warning("sys_rsx_context_iounmap(): RSX is not idle while unmapping io");
}
vm::writer_lock rlock;
std::scoped_lock lock(render->sys_rsx_mtx);
for (const u32 end = (io >>= 20) + (size >>= 20); io < end;)
{
auto& table = render->iomap_table;
const u32 ea_entry = table.ea[io];
table.ea[io++].release(-1);
if (ea_entry + 1) table.io[ea_entry >> 20].release(-1);
}
return CELL_OK;
}
/*
* lv2 SysCall 674 (0x2A2): sys_rsx_context_attribute
* @param context_id (IN): RSX context, e.g. 0x55555555
* @param package_id (IN):
* @param a3 (IN):
* @param a4 (IN):
* @param a5 (IN):
* @param a6 (IN):
*/
error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64 a4, u64 a5, u64 a6)
{
if (auto cpu = get_current_cpu_thread())
{
cpu->state += cpu_flag::wait;
}
// Flip/queue/reset flip/flip event/user command/vblank as trace to help with log spam
const bool trace_log = (package_id == 0x102 || package_id == 0x103 || package_id == 0x10a || package_id == 0xFEC || package_id == 0xFED || package_id == 0xFEF);
(trace_log ? sys_rsx.trace : sys_rsx.warning)("sys_rsx_context_attribute(context_id=0x%x, package_id=0x%x, a3=0x%llx, a4=0x%llx, a5=0x%llx, a6=0x%llx)", context_id, package_id, a3, a4, a5, a6);
// todo: these event ports probly 'shouldnt' be here as i think its supposed to be interrupts that are sent from rsx somewhere in lv1
const auto render = rsx::get_current_renderer();
if (!render->dma_address || context_id != 0x55555555)
{
return CELL_EINVAL;
}
auto &driverInfo = vm::_ref<RsxDriverInfo>(render->driver_info);
switch (package_id)
{
case 0x001: // FIFO
{
const u64 get = static_cast<u32>(a3);
const u64 put = static_cast<u32>(a4);
const u64 get_put = put << 32 | get;
std::lock_guard lock(render->sys_rsx_mtx);
set_rsx_dmactl(render, get_put);
break;
}
case 0x100: // Display mode set
break;
case 0x101: // Display sync set, cellGcmSetFlipMode
// a4 == 2 is vsync, a4 == 1 is hsync
render->requested_vsync.store(a4 == 2);
break;
case 0x102: // Display flip
{
u32 flip_idx = ~0u;
// high bit signifys grabbing a queued buffer
// otherwise it contains a display buffer offset
if ((a4 & 0x80000000) != 0)
{
// NOTE: There currently seem to only be 2 active heads on PS3
ensure(a3 < 2);
// last half byte gives buffer, 0xf seems to trigger just last queued
u8 idx_check = a4 & 0xf;
if (idx_check > 7)
flip_idx = driverInfo.head[a3].lastQueuedBufferId;
else
flip_idx = idx_check;
// fyi -- u32 hardware_channel = (a4 >> 8) & 0xFF;
// sanity check, the head should have a 'queued' buffer on it, and it should have been previously 'queued'
const u32 sanity_check = 0x40000000 & (1 << flip_idx);
if ((driverInfo.head[a3].flipFlags & sanity_check) != sanity_check)
rsx_log.error("Display Flip Queued: Flipping non previously queued buffer 0x%llx", a4);
}
else
{
for (u32 i = 0; i < render->display_buffers_count; ++i)
{
if (render->display_buffers[i].offset == a4)
{
flip_idx = i;
break;
}
}
if (flip_idx == ~0u)
{
rsx_log.error("Display Flip: Couldn't find display buffer offset, flipping 0. Offset: 0x%x", a4);
flip_idx = 0;
}
}
if (!render->request_emu_flip(flip_idx))
{
if (auto cpu = get_current_cpu_thread())
{
cpu->state += cpu_flag::exit;
cpu->state += cpu_flag::again;
}
return {};
}
}
break;
case 0x103: // Display Queue
{
// NOTE: There currently seem to only be 2 active heads on PS3
ensure(a3 < 2);
driverInfo.head[a3].lastQueuedBufferId = static_cast<u32>(a4);
driverInfo.head[a3].flipFlags |= 0x40000000 | (1 << a4);
render->on_frame_end(static_cast<u32>(a4));
if (!render->send_event(0, SYS_RSX_EVENT_QUEUE_BASE << a3, 0))
{
break;
}
if (g_cfg.video.frame_limit == frame_limit_type::infinite)
{
render->post_vblank_event(get_system_time());
}
}
break;
case 0x104: // Display buffer
{
const u8 id = a3 & 0xFF;
if (id > 7)
{
return SYS_RSX_CONTEXT_ATTRIBUTE_ERROR;
}
std::lock_guard lock(render->sys_rsx_mtx);
// Note: no error checking is being done
const u32 width = (a4 >> 32) & 0xFFFFFFFF;
const u32 height = a4 & 0xFFFFFFFF;
const u32 pitch = (a5 >> 32) & 0xFFFFFFFF;
const u32 offset = a5 & 0xFFFFFFFF;
render->display_buffers[id].width = width;
render->display_buffers[id].height = height;
render->display_buffers[id].pitch = pitch;
render->display_buffers[id].offset = offset;
render->display_buffers_count = std::max<u32>(id + 1, render->display_buffers_count);
}
break;
case 0x105: // destroy buffer?
break;
case 0x106: // ? (Used by cellGcmInitPerfMon)
break;
case 0x108: // cellGcmSetVBlankFrequency, cellGcmSetSecondVFrequency
// a4 == 3, CELL_GCM_DISPLAY_FREQUENCY_59_94HZ
// a4 == 2, CELL_GCM_DISPLAY_FREQUENCY_SCANOUT
// a4 == 4, CELL_GCM_DISPLAY_FREQUENCY_DISABLE
if (a5 == 1u)
{
// This function resets vsync state to enabled
render->requested_vsync = true;
// TODO: Set vblank frequency
}
else if (ensure(a5 == 2u))
{
// TODO: Implement its frequency as well
render->enable_second_vhandler.store(a4 != 4);
}
break;
case 0x10a: // ? Involved in managing flip status through cellGcmResetFlipStatus
{
if (a3 > 7)
{
return SYS_RSX_CONTEXT_ATTRIBUTE_ERROR;
}
// NOTE: There currently seem to only be 2 active heads on PS3
ensure(a3 < 2);
driverInfo.head[a3].flipFlags.atomic_op([&](be_t<u32>& flipStatus)
{
flipStatus = (flipStatus & static_cast<u32>(a4)) | static_cast<u32>(a5);
});
}
break;
case 0x10D: // Called by cellGcmInitCursor
break;
case 0x300: // Tiles
{
//a4 high bits = ret.tile = (location + 1) | (bank << 4) | ((offset / 0x10000) << 16) | (location << 31);
//a4 low bits = ret.limit = ((offset + size - 1) / 0x10000) << 16 | (location << 31);
//a5 high bits = ret.pitch = (pitch / 0x100) << 8;
//a5 low bits = ret.format = base | ((base + ((size - 1) / 0x10000)) << 13) | (comp << 26) | (1 << 30);
ensure(a3 < std::size(render->tiles));
if (!render->is_fifo_idle())
{
sys_rsx.warning("sys_rsx_context_attribute(): RSX is not idle while setting tile");
}
auto& tile = render->tiles[a3];
const u32 location = ((a4 >> 32) & 0x3) - 1;
const u32 offset = ((((a4 >> 32) & 0x7FFFFFFF) >> 16) * 0x10000);
const u32 size = ((((a4 & 0x7FFFFFFF) >> 16) + 1) * 0x10000) - offset;
const u32 pitch = (((a5 >> 32) & 0xFFFFFFFF) >> 8) * 0x100;
const u32 comp = ((a5 & 0xFFFFFFFF) >> 26) & 0xF;
const u32 base = (a5 & 0xFFFFFFFF) & 0x7FF;
//const u32 bank = (((a4 >> 32) & 0xFFFFFFFF) >> 4) & 0xF;
const bool bound = ((a4 >> 32) & 0x3) != 0;
const auto range = utils::address_range::start_length(offset, size);
if (bound)
{
if (!size || !pitch)
{
return CELL_EINVAL;
}
u32 limit = -1;
switch (location)
{
case CELL_GCM_LOCATION_MAIN: limit = render->main_mem_size; break;
case CELL_GCM_LOCATION_LOCAL: limit = render->local_mem_size; break;
default: fmt::throw_exception("sys_rsx_context_attribute(): Unexpected location value (location=0x%x)", location);
}
if (!range.valid() || range.end >= limit)
{
return CELL_EINVAL;
}
// Hardcoded value in gcm
ensure(a5 & (1 << 30));
}
std::lock_guard lock(render->sys_rsx_mtx);
// When tile is going to be unbound, we can use it as a hint that the address will no longer be used as a surface and can be removed/invalidated
// Todo: There may be more checks such as format/size/width can could be done
if (tile.bound && !bound)
render->notify_tile_unbound(static_cast<u32>(a3));
if (location == CELL_GCM_LOCATION_MAIN && bound)
{
vm::writer_lock rlock;
for (u32 io = (offset >> 20), end = (range.end >> 20); io <= end; io++)
{
if (render->iomap_table.ea[io] == umax)
{
return CELL_EINVAL;
}
}
}
tile.location = location;
tile.offset = offset;
tile.size = size;
tile.pitch = pitch;
tile.comp = comp;
tile.base = base;
tile.bank = base;
tile.bound = bound;
}
break;
case 0x301: // Depth-buffer (Z-cull)
{
//a4 high = region = (1 << 0) | (zFormat << 4) | (aaFormat << 8);
//a4 low = size = ((width >> 6) << 22) | ((height >> 6) << 6);
//a5 high = start = cullStart&(~0xFFF);
//a5 low = offset = offset;
//a6 high = status0 = (zcullDir << 1) | (zcullFormat << 2) | ((sFunc & 0xF) << 12) | (sRef << 16) | (sMask << 24);
//a6 low = status1 = (0x2000 << 0) | (0x20 << 16);
if (a3 >= std::size(render->zculls))
{
return SYS_RSX_CONTEXT_ATTRIBUTE_ERROR;
}
if (!render->is_fifo_idle())
{
sys_rsx.warning("sys_rsx_context_attribute(): RSX is not idle while setting zcull");
}
const u32 width = ((a4 & 0xFFFFFFFF) >> 22) << 6;
const u32 height = ((a4 & 0x0000FFFF) >> 6) << 6;
const u32 cullStart = (a5 >> 32) & ~0xFFF;
const u32 offset = (a5 & 0x0FFFFFFF);
const bool bound = (a6 & 0xFFFFFFFF) != 0;
if (bound)
{
const auto cull_range = utils::address_range::start_length(cullStart, width * height);
// cullStart is an offset inside ZCULL RAM which is 3MB long, check bounds
// width and height are not allowed to be zero (checked by range.valid())
if (!cull_range.valid() || cull_range.end >= 3u << 20 || offset >= render->local_mem_size)
{
return CELL_EINVAL;
}
if (a5 & 0xF0000000)
{
sys_rsx.warning("sys_rsx_context_attribute(): ZCULL offset greater than 256MB (offset=0x%x)", offset);
}
// Hardcoded values in gcm
ensure(a4 & (1ull << 32));
ensure((a6 & 0xFFFFFFFF) == 0u + ((0x2000 << 0) | (0x20 << 16)));
}
std::lock_guard lock(render->sys_rsx_mtx);
auto &zcull = render->zculls[a3];
zcull.zFormat = ((a4 >> 32) >> 4) & 0xF;
zcull.aaFormat = ((a4 >> 32) >> 8) & 0xF;
zcull.width = width;
zcull.height = height;
zcull.cullStart = cullStart;
zcull.offset = offset;
zcull.zcullDir = ((a6 >> 32) >> 1) & 0x1;
zcull.zcullFormat = ((a6 >> 32) >> 2) & 0x3FF;
zcull.sFunc = ((a6 >> 32) >> 12) & 0xF;
zcull.sRef = ((a6 >> 32) >> 16) & 0xFF;
zcull.sMask = ((a6 >> 32) >> 24) & 0xFF;
zcull.bound = bound;
}
break;
case 0x302: // something with zcull
break;
case 0x600: // Framebuffer setup
break;
case 0x601: // Framebuffer blit
break;
case 0x602: // Framebuffer blit sync
break;
case 0x603: // Framebuffer close
break;
case 0xFEC: // hack: flip event notification
{
// we only ever use head 1 for now
driverInfo.head[1].flipFlags |= 0x80000000;
driverInfo.head[1].lastFlipTime = rsx_timeStamp(); // should rsxthread set this?
driverInfo.head[1].flipBufferId = static_cast<u32>(a3);
// seems gcmSysWaitLabel uses this offset, so lets set it to 0 every flip
// NOTE: Realhw resets 16 bytes of this semaphore for some reason
vm::_ref<atomic_t<u128>>(render->label_addr + 0x10).store(u128{});
render->send_event(0, SYS_RSX_EVENT_FLIP_BASE << 1, 0);
break;
}
case 0xFED: // hack: vblank command
{
if (cpu_thread::get_current<ppu_thread>())
{
// VBLANK/RSX thread only
return CELL_EINVAL;
}
// NOTE: There currently seem to only be 2 active heads on PS3
ensure(a3 < 2);
// todo: this is wrong and should be 'second' vblank handler and freq, but since currently everything is reported as being 59.94, this should be fine
driverInfo.head[a3].lastSecondVTime.atomic_op([&](be_t<u64>& time)
{
a4 = std::max<u64>(a4, time + 1);
time = a4;
});
// Time point is supplied in argument 4 (todo: convert it to MFTB rate and use it)
const u64 current_time = rsx_timeStamp();
// Note: not atomic
driverInfo.head[a3].lastVTimeLow = static_cast<u32>(current_time);
driverInfo.head[a3].lastVTimeHigh = static_cast<u32>(current_time >> 32);
driverInfo.head[a3].vBlankCount++;
u64 event_flags = SYS_RSX_EVENT_VBLANK;
if (render->enable_second_vhandler)
event_flags |= SYS_RSX_EVENT_SECOND_VBLANK_BASE << a3; // second vhandler
render->send_event(0, event_flags, 0);
break;
}
case 0xFEF: // hack: user command
{
// 'custom' invalid package id for now
// as i think we need custom lv1 interrupts to handle this accurately
// this also should probly be set by rsxthread
driverInfo.userCmdParam = static_cast<u32>(a4);
render->send_event(0, SYS_RSX_EVENT_USER_CMD, 0);
break;
}
default:
return CELL_EINVAL;
}
return CELL_OK;
}
/*
* lv2 SysCall 675 (0x2A3): sys_rsx_device_map
* @param a1 (OUT): rsx device map address : 0x40000000, 0x50000000.. 0xB0000000
* @param a2 (OUT): Unused
* @param dev_id (IN): An immediate value and always 8. (cellGcmInitPerfMon uses 11, 10, 9, 7, 12 successively).
*/
error_code sys_rsx_device_map(cpu_thread& cpu, vm::ptr<u64> dev_addr, vm::ptr<u64> a2, u32 dev_id)
{
cpu.state += cpu_flag::wait;
sys_rsx.warning("sys_rsx_device_map(dev_addr=*0x%x, a2=*0x%x, dev_id=0x%x)", dev_addr, a2, dev_id);
if (dev_id != 8)
{
// TODO: lv1 related
fmt::throw_exception("sys_rsx_device_map: Invalid dev_id %d", dev_id);
}
const auto render = rsx::get_current_renderer();
std::scoped_lock lock(render->sys_rsx_mtx);
if (!render->device_addr)
{
const auto area = vm::reserve_map(vm::rsx_context, 0, 0x10000000, 0x403);
const u32 addr = area ? area->alloc(0x100000) : 0;
if (!addr)
{
return CELL_ENOMEM;
}
sys_rsx.warning("sys_rsx_device_map(): Mapped address 0x%x", addr);
*dev_addr = addr;
render->device_addr = addr;
return CELL_OK;
}
*dev_addr = render->device_addr;
return CELL_OK;
}
/*
* lv2 SysCall 676 (0x2A4): sys_rsx_device_unmap
* @param dev_id (IN): An immediate value and always 8.
*/
error_code sys_rsx_device_unmap(cpu_thread& cpu, u32 dev_id)
{
cpu.state += cpu_flag::wait;
sys_rsx.todo("sys_rsx_device_unmap(dev_id=0x%x)", dev_id);
return CELL_OK;
}
/*
* lv2 SysCall 677 (0x2A5): sys_rsx_attribute
*/
error_code sys_rsx_attribute(cpu_thread& cpu, u32 packageId, u32 a2, u32 a3, u32 a4, u32 a5)
{
cpu.state += cpu_flag::wait;
sys_rsx.warning("sys_rsx_attribute(packageId=0x%x, a2=0x%x, a3=0x%x, a4=0x%x, a5=0x%x)", packageId, a2, a3, a4, a5);
return CELL_OK;
}
| 27,906
|
C++
|
.cpp
| 787
| 32.584498
| 196
| 0.674568
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,348
|
sys_io.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_io.cpp
|
#include "stdafx.h"
#include "Emu/Memory/vm.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "sys_io.h"
LOG_CHANNEL(sys_io);
error_code sys_io_buffer_create(u32 block_count, u32 block_size, u32 blocks, u32 unk1, vm::ptr<u32> handle)
{
sys_io.todo("sys_io_buffer_create(block_count=0x%x, block_size=0x%x, blocks=0x%x, unk1=0x%x, handle=*0x%x)", block_count, block_size, blocks, unk1, handle);
if (!handle)
{
return CELL_EFAULT;
}
if (auto io = idm::make<lv2_io_buf>(block_count, block_size, blocks, unk1))
{
*handle = io;
return CELL_OK;
}
return CELL_ESRCH;
}
error_code sys_io_buffer_destroy(u32 handle)
{
sys_io.todo("sys_io_buffer_destroy(handle=0x%x)", handle);
idm::remove<lv2_io_buf>(handle);
return CELL_OK;
}
error_code sys_io_buffer_allocate(u32 handle, vm::ptr<u32> block)
{
sys_io.todo("sys_io_buffer_allocate(handle=0x%x, block=*0x%x)", handle, block);
if (!block)
{
return CELL_EFAULT;
}
if (auto io = idm::get<lv2_io_buf>(handle))
{
// no idea what we actually need to allocate
if (u32 addr = vm::alloc(io->block_count * io->block_size, vm::main))
{
*block = addr;
return CELL_OK;
}
return CELL_ENOMEM;
}
return CELL_ESRCH;
}
error_code sys_io_buffer_free(u32 handle, u32 block)
{
sys_io.todo("sys_io_buffer_free(handle=0x%x, block=0x%x)", handle, block);
const auto io = idm::get<lv2_io_buf>(handle);
if (!io)
{
return CELL_ESRCH;
}
vm::dealloc(block);
return CELL_OK;
}
| 1,480
|
C++
|
.cpp
| 56
| 24.160714
| 157
| 0.696797
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,349
|
sys_process.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_process.cpp
|
#include "stdafx.h"
#include "sys_process.h"
#include "Emu/Memory/vm_ptr.h"
#include "Emu/System.h"
#include "Emu/VFS.h"
#include "Emu/IdManager.h"
#include "Crypto/unedat.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "sys_lwmutex.h"
#include "sys_lwcond.h"
#include "sys_mutex.h"
#include "sys_cond.h"
#include "sys_event.h"
#include "sys_event_flag.h"
#include "sys_interrupt.h"
#include "sys_memory.h"
#include "sys_mmapper.h"
#include "sys_prx.h"
#include "sys_overlay.h"
#include "sys_rwlock.h"
#include "sys_semaphore.h"
#include "sys_timer.h"
#include "sys_fs.h"
#include "sys_vm.h"
#include "sys_spu.h"
// Check all flags known to be related to extended permissions (TODO)
// It's possible anything which has root flags implicitly has debug perm as well
// But I haven't confirmed it.
bool ps3_process_info_t::debug_or_root() const
{
return (ctrl_flags1 & (0xe << 28)) != 0;
}
bool ps3_process_info_t::has_root_perm() const
{
return (ctrl_flags1 & (0xc << 28)) != 0;
}
bool ps3_process_info_t::has_debug_perm() const
{
return (ctrl_flags1 & (0xa << 28)) != 0;
}
// If a SELF file is of CellOS return its filename, otheriwse return an empty string
std::string_view ps3_process_info_t::get_cellos_appname() const
{
if (!has_root_perm() || !Emu.GetTitleID().empty())
{
return {};
}
return std::string_view(Emu.GetBoot()).substr(Emu.GetBoot().find_last_of('/') + 1);
}
LOG_CHANNEL(sys_process);
ps3_process_info_t g_ps3_process_info;
s32 process_getpid()
{
// TODO: get current process id
return 1;
}
s32 sys_process_getpid()
{
sys_process.trace("sys_process_getpid() -> 1");
return process_getpid();
}
s32 sys_process_getppid()
{
sys_process.todo("sys_process_getppid() -> 0");
return 0;
}
template <typename T, typename Get>
u32 idm_get_count()
{
return idm::select<T, Get>([&](u32, Get&) {});
}
error_code sys_process_get_number_of_object(u32 object, vm::ptr<u32> nump)
{
sys_process.error("sys_process_get_number_of_object(object=0x%x, nump=*0x%x)", object, nump);
switch(object)
{
case SYS_MEM_OBJECT: *nump = idm_get_count<lv2_obj, lv2_memory>(); break;
case SYS_MUTEX_OBJECT: *nump = idm_get_count<lv2_obj, lv2_mutex>(); break;
case SYS_COND_OBJECT: *nump = idm_get_count<lv2_obj, lv2_cond>(); break;
case SYS_RWLOCK_OBJECT: *nump = idm_get_count<lv2_obj, lv2_rwlock>(); break;
case SYS_INTR_TAG_OBJECT: *nump = idm_get_count<lv2_obj, lv2_int_tag>(); break;
case SYS_INTR_SERVICE_HANDLE_OBJECT: *nump = idm_get_count<lv2_obj, lv2_int_serv>(); break;
case SYS_EVENT_QUEUE_OBJECT: *nump = idm_get_count<lv2_obj, lv2_event_queue>(); break;
case SYS_EVENT_PORT_OBJECT: *nump = idm_get_count<lv2_obj, lv2_event_port>(); break;
case SYS_TRACE_OBJECT: sys_process.error("sys_process_get_number_of_object: object = SYS_TRACE_OBJECT"); *nump = 0; break;
case SYS_SPUIMAGE_OBJECT: *nump = idm_get_count<lv2_obj, lv2_spu_image>(); break;
case SYS_PRX_OBJECT: *nump = idm_get_count<lv2_obj, lv2_prx>(); break;
case SYS_SPUPORT_OBJECT: sys_process.error("sys_process_get_number_of_object: object = SYS_SPUPORT_OBJECT"); *nump = 0; break;
case SYS_OVERLAY_OBJECT: *nump = idm_get_count<lv2_obj, lv2_overlay>(); break;
case SYS_LWMUTEX_OBJECT: *nump = idm_get_count<lv2_obj, lv2_lwmutex>(); break;
case SYS_TIMER_OBJECT: *nump = idm_get_count<lv2_obj, lv2_timer>(); break;
case SYS_SEMAPHORE_OBJECT: *nump = idm_get_count<lv2_obj, lv2_sema>(); break;
case SYS_FS_FD_OBJECT: *nump = idm_get_count<lv2_fs_object, lv2_fs_object>(); break;
case SYS_LWCOND_OBJECT: *nump = idm_get_count<lv2_obj, lv2_lwcond>(); break;
case SYS_EVENT_FLAG_OBJECT: *nump = idm_get_count<lv2_obj, lv2_event_flag>(); break;
default:
{
return CELL_EINVAL;
}
}
return CELL_OK;
}
#include <set>
template <typename T, typename Get>
void idm_get_set(std::set<u32>& out)
{
idm::select<T, Get>([&](u32 id, Get&)
{
out.emplace(id);
});
}
static error_code process_get_id(u32 object, vm::ptr<u32> buffer, u32 size, vm::ptr<u32> set_size)
{
std::set<u32> objects;
switch (object)
{
case SYS_MEM_OBJECT: idm_get_set<lv2_obj, lv2_memory>(objects); break;
case SYS_MUTEX_OBJECT: idm_get_set<lv2_obj, lv2_mutex>(objects); break;
case SYS_COND_OBJECT: idm_get_set<lv2_obj, lv2_cond>(objects); break;
case SYS_RWLOCK_OBJECT: idm_get_set<lv2_obj, lv2_rwlock>(objects); break;
case SYS_INTR_TAG_OBJECT: idm_get_set<lv2_obj, lv2_int_tag>(objects); break;
case SYS_INTR_SERVICE_HANDLE_OBJECT: idm_get_set<lv2_obj, lv2_int_serv>(objects); break;
case SYS_EVENT_QUEUE_OBJECT: idm_get_set<lv2_obj, lv2_event_queue>(objects); break;
case SYS_EVENT_PORT_OBJECT: idm_get_set<lv2_obj, lv2_event_port>(objects); break;
case SYS_TRACE_OBJECT: fmt::throw_exception("SYS_TRACE_OBJECT");
case SYS_SPUIMAGE_OBJECT: idm_get_set<lv2_obj, lv2_spu_image>(objects); break;
case SYS_PRX_OBJECT: idm_get_set<lv2_obj, lv2_prx>(objects); break;
case SYS_OVERLAY_OBJECT: idm_get_set<lv2_obj, lv2_overlay>(objects); break;
case SYS_LWMUTEX_OBJECT: idm_get_set<lv2_obj, lv2_lwmutex>(objects); break;
case SYS_TIMER_OBJECT: idm_get_set<lv2_obj, lv2_timer>(objects); break;
case SYS_SEMAPHORE_OBJECT: idm_get_set<lv2_obj, lv2_sema>(objects); break;
case SYS_FS_FD_OBJECT: idm_get_set<lv2_fs_object, lv2_fs_object>(objects); break;
case SYS_LWCOND_OBJECT: idm_get_set<lv2_obj, lv2_lwcond>(objects); break;
case SYS_EVENT_FLAG_OBJECT: idm_get_set<lv2_obj, lv2_event_flag>(objects); break;
case SYS_SPUPORT_OBJECT: fmt::throw_exception("SYS_SPUPORT_OBJECT");
default:
{
return CELL_EINVAL;
}
}
u32 i = 0;
// NOTE: Treats negative and 0 values as 1 due to signed checks and "do-while" behavior of fw
for (auto id = objects.begin(); i < std::max<s32>(size, 1) + 0u && id != objects.end(); id++, i++)
{
buffer[i] = *id;
}
*set_size = i;
return CELL_OK;
}
error_code sys_process_get_id(u32 object, vm::ptr<u32> buffer, u32 size, vm::ptr<u32> set_size)
{
sys_process.error("sys_process_get_id(object=0x%x, buffer=*0x%x, size=%d, set_size=*0x%x)", object, buffer, size, set_size);
if (object == SYS_SPUPORT_OBJECT)
{
// Unallowed for this syscall
return CELL_EINVAL;
}
return process_get_id(object, buffer, size, set_size);
}
error_code sys_process_get_id2(u32 object, vm::ptr<u32> buffer, u32 size, vm::ptr<u32> set_size)
{
sys_process.error("sys_process_get_id2(object=0x%x, buffer=*0x%x, size=%d, set_size=*0x%x)", object, buffer, size, set_size);
if (!g_ps3_process_info.has_root_perm())
{
// This syscall is more capable than sys_process_get_id but also needs a root perm check
return CELL_ENOSYS;
}
return process_get_id(object, buffer, size, set_size);
}
CellError process_is_spu_lock_line_reservation_address(u32 addr, u64 flags)
{
if (!flags || flags & ~(SYS_MEMORY_ACCESS_RIGHT_SPU_THR | SYS_MEMORY_ACCESS_RIGHT_RAW_SPU))
{
return CELL_EINVAL;
}
// TODO: respect sys_mmapper region's access rights
switch (addr >> 28)
{
case 0x0: // Main memory
case 0x1: // Main memory
case 0x2: // User 64k (sys_memory)
case 0xc: // RSX Local memory
case 0xe: // RawSPU MMIO
break;
case 0xf: // Private SPU MMIO
{
if (flags & SYS_MEMORY_ACCESS_RIGHT_RAW_SPU)
{
// Cannot be accessed by RawSPU
return CELL_EPERM;
}
break;
}
case 0xd: // PPU Stack area
return CELL_EPERM;
default:
{
if (auto vm0 = idm::get<sys_vm_t>(sys_vm_t::find_id(addr)))
{
// sys_vm area was not covering the address specified but made a reservation on the entire 256mb region
if (vm0->addr + vm0->size - 1 < addr)
{
return CELL_EINVAL;
}
// sys_vm memory is not allowed
return CELL_EPERM;
}
if (!vm::get(vm::any, addr & -0x1000'0000))
{
return CELL_EINVAL;
}
break;
}
}
return {};
}
error_code sys_process_is_spu_lock_line_reservation_address(u32 addr, u64 flags)
{
sys_process.warning("sys_process_is_spu_lock_line_reservation_address(addr=0x%x, flags=0x%llx)", addr, flags);
if (auto err = process_is_spu_lock_line_reservation_address(addr, flags))
{
return err;
}
return CELL_OK;
}
error_code _sys_process_get_paramsfo(vm::ptr<char> buffer)
{
sys_process.warning("_sys_process_get_paramsfo(buffer=0x%x)", buffer);
if (!Emu.GetTitleID().length())
{
return CELL_ENOENT;
}
memset(buffer.get_ptr(), 0, 0x40);
memcpy(buffer.get_ptr() + 1, Emu.GetTitleID().c_str(), std::min<usz>(Emu.GetTitleID().length(), 9));
return CELL_OK;
}
s32 process_get_sdk_version(u32 /*pid*/, s32& ver)
{
// get correct SDK version for selected pid
ver = g_ps3_process_info.sdk_ver;
return CELL_OK;
}
error_code sys_process_get_sdk_version(u32 pid, vm::ptr<s32> version)
{
sys_process.warning("sys_process_get_sdk_version(pid=0x%x, version=*0x%x)", pid, version);
s32 sdk_ver;
const s32 ret = process_get_sdk_version(pid, sdk_ver);
if (ret != CELL_OK)
{
return CellError{ret + 0u}; // error code
}
else
{
*version = sdk_ver;
return CELL_OK;
}
}
error_code sys_process_kill(u32 pid)
{
sys_process.todo("sys_process_kill(pid=0x%x)", pid);
return CELL_OK;
}
error_code sys_process_wait_for_child(u32 pid, vm::ptr<u32> status, u64 unk)
{
sys_process.todo("sys_process_wait_for_child(pid=0x%x, status=*0x%x, unk=0x%llx", pid, status, unk);
return CELL_OK;
}
error_code sys_process_wait_for_child2(u64 unk1, u64 unk2, u64 unk3, u64 unk4, u64 unk5, u64 unk6)
{
sys_process.todo("sys_process_wait_for_child2(unk1=0x%llx, unk2=0x%llx, unk3=0x%llx, unk4=0x%llx, unk5=0x%llx, unk6=0x%llx)",
unk1, unk2, unk3, unk4, unk5, unk6);
return CELL_OK;
}
error_code sys_process_get_status(u64 unk)
{
sys_process.todo("sys_process_get_status(unk=0x%llx)", unk);
//vm::write32(CPU.gpr[4], GetPPUThreadStatus(CPU));
return CELL_OK;
}
error_code sys_process_detach_child(u64 unk)
{
sys_process.todo("sys_process_detach_child(unk=0x%llx)", unk);
return CELL_OK;
}
extern void signal_system_cache_can_stay();
void _sys_process_exit(ppu_thread& ppu, s32 status, u32 arg2, u32 arg3)
{
ppu.state += cpu_flag::wait;
sys_process.warning("_sys_process_exit(status=%d, arg2=0x%x, arg3=0x%x)", status, arg2, arg3);
Emu.CallFromMainThread([]()
{
sys_process.success("Process finished");
signal_system_cache_can_stay();
Emu.Kill();
});
// Wait for GUI thread
while (auto state = +ppu.state)
{
if (is_stopped(state))
{
break;
}
ppu.state.wait(state);
}
}
void _sys_process_exit2(ppu_thread& ppu, s32 status, vm::ptr<sys_exit2_param> arg, u32 arg_size, u32 arg4)
{
ppu.state += cpu_flag::wait;
sys_process.warning("_sys_process_exit2(status=%d, arg=*0x%x, arg_size=0x%x, arg4=0x%x)", status, arg, arg_size, arg4);
auto pstr = +arg->args;
std::vector<std::string> argv;
std::vector<std::string> envp;
while (auto ptr = *pstr++)
{
argv.emplace_back(ptr.get_ptr());
sys_process.notice(" *** arg: %s", ptr);
}
while (auto ptr = *pstr++)
{
envp.emplace_back(ptr.get_ptr());
sys_process.notice(" *** env: %s", ptr);
}
std::vector<u8> data;
if (arg_size > 0x1030)
{
data.resize(0x1000);
std::memcpy(data.data(), vm::base(arg.addr() + arg_size - 0x1000), 0x1000);
}
if (argv.empty())
{
return _sys_process_exit(ppu, status, 0, 0);
}
// TODO: set prio, flags
lv2_exitspawn(ppu, argv, envp, data);
}
void lv2_exitspawn(ppu_thread& ppu, std::vector<std::string>& argv, std::vector<std::string>& envp, std::vector<u8>& data)
{
ppu.state += cpu_flag::wait;
// sys_sm_shutdown
const bool is_real_reboot = (ppu.gpr[11] == 379);
Emu.CallFromMainThread([is_real_reboot, argv = std::move(argv), envp = std::move(envp), data = std::move(data)]() mutable
{
sys_process.success("Process finished -> %s", argv[0]);
std::string disc;
if (Emu.GetCat() == "DG" || Emu.GetCat() == "GD")
disc = vfs::get("/dev_bdvd/");
if (disc.empty() && !Emu.GetTitleID().empty())
disc = vfs::get(Emu.GetDir());
std::string path = vfs::get(argv[0]);
std::string hdd1 = vfs::get("/dev_hdd1/");
const u128 klic = g_fxo->get<loaded_npdrm_keys>().last_key();
using namespace id_manager;
auto func = [is_real_reboot, old_size = g_fxo->get<lv2_memory_container>().size, vec = (reader_lock{g_mutex}, g_fxo->get<id_map<lv2_memory_container>>().vec)](u32 sdk_suggested_mem) mutable
{
if (is_real_reboot)
{
// Do not save containers on actual reboot
vec.clear();
}
// Save LV2 memory containers
ensure(g_fxo->init<id_map<lv2_memory_container>>())->vec = std::move(vec);
// Empty the containers, accumulate their total size
u32 total_size = 0;
idm::select<lv2_memory_container>([&](u32, lv2_memory_container& ctr)
{
ctr.used = 0;
total_size += ctr.size;
});
// The default memory container capacity can only decrease after exitspawn
// 1. If newer SDK version suggests higher memory capacity - it is ignored
// 2. If newer SDK version suggests lower memory capacity - it is lowered
// And if 2. happens while user memory containers exist, the left space can be spent on user memory containers
ensure(g_fxo->init<lv2_memory_container>(std::min(old_size - total_size, sdk_suggested_mem) + total_size));
};
Emu.after_kill_callback = [func = std::move(func), argv = std::move(argv), envp = std::move(envp), data = std::move(data),
disc = std::move(disc), path = std::move(path), hdd1 = std::move(hdd1), old_config = Emu.GetUsedConfig(), klic]() mutable
{
Emu.argv = std::move(argv);
Emu.envp = std::move(envp);
Emu.data = std::move(data);
Emu.disc = std::move(disc);
Emu.hdd1 = std::move(hdd1);
Emu.init_mem_containers = std::move(func);
if (klic)
{
Emu.klic.emplace_back(klic);
}
Emu.SetForceBoot(true);
auto res = Emu.BootGame(path, "", true, cfg_mode::continuous, old_config);
if (res != game_boot_result::no_errors)
{
sys_process.fatal("Failed to boot from exitspawn! (path=\"%s\", error=%s)", path, res);
}
};
signal_system_cache_can_stay();
Emu.Kill(false);
});
// Wait for GUI thread
while (auto state = +ppu.state)
{
if (is_stopped(state))
{
break;
}
ppu.state.wait(state);
}
}
void sys_process_exit3(ppu_thread& ppu, s32 status)
{
ppu.state += cpu_flag::wait;
sys_process.warning("_sys_process_exit3(status=%d)", status);
return _sys_process_exit(ppu, status, 0, 0);
}
error_code sys_process_spawns_a_self2(vm::ptr<u32> pid, u32 primary_prio, u64 flags, vm::ptr<void> stack, u32 stack_size, u32 mem_id, vm::ptr<void> param_sfo, vm::ptr<void> dbg_data)
{
sys_process.todo("sys_process_spawns_a_self2(pid=*0x%x, primary_prio=0x%x, flags=0x%llx, stack=*0x%x, stack_size=0x%x, mem_id=0x%x, param_sfo=*0x%x, dbg_data=*0x%x"
, pid, primary_prio, flags, stack, stack_size, mem_id, param_sfo, dbg_data);
return CELL_OK;
}
| 14,683
|
C++
|
.cpp
| 420
| 32.604762
| 191
| 0.693964
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,350
|
sys_uart.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_uart.cpp
|
#include "stdafx.h"
#include "Emu/System.h"
#include "Emu/system_config.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/lv2/sys_sync.h"
#include "Emu/Cell/lv2/sys_rsxaudio.h"
#include "Emu/Cell/lv2/sys_process.h"
#include "sys_uart.h"
LOG_CHANNEL(sys_uart);
template <>
void fmt_class_string<UartAudioCtrlID>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](UartAudioCtrlID value)
{
switch (value)
{
STR_CASE(UartAudioCtrlID::DAC_RESET);
STR_CASE(UartAudioCtrlID::DAC_DE_EMPHASIS);
STR_CASE(UartAudioCtrlID::AVCLK);
}
return unknown;
});
}
struct av_init_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_pkt_av_init);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_pkt_av_init *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
vuart.av_cmd_ver = pkt->hdr.version;
vuart.hdmi_events_bitmask |= pkt->event_bit;
if (pkt->event_bit & PS3AV_EVENT_BIT_UNK)
{
// 0 or 255, probably ps2 backwards compatibility (inverted)
const ps3av_pkt_av_init_reply reply = { 0 };
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_SUCCESS, &reply, sizeof(ps3av_pkt_av_init_reply));
return;
}
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
}
};
struct av_fini_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_header);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_header *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
vuart.hdmi_events_bitmask = 0;
vuart.write_resp(pkt->cid, PS3AV_STATUS_SUCCESS);
}
};
struct av_get_monitor_info_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_get_monitor_info);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_get_monitor_info *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
ps3av_get_monitor_info_reply cfg{};
if (pkt->avport == static_cast<u16>(UartAudioAvport::AVMULTI_0))
{
cfg.avport = static_cast<u8>(UartAudioAvport::AVMULTI_0);
cfg.monitor_type = PS3AV_MONITOR_TYPE_AVMULTI;
cfg.res_60.res_bits = UINT32_MAX;
cfg.res_50.res_bits = UINT32_MAX;
cfg.res_vesa.res_bits = UINT32_MAX;
cfg.cs.rgb = PS3AV_CS_SUPPORTED;
cfg.cs.yuv444 = PS3AV_CS_SUPPORTED;
cfg.cs.yuv422 = PS3AV_CS_SUPPORTED;
cfg.speaker_info = 1;
cfg.num_of_audio_block = 1;
cfg.audio_info[0].sbit = 7;
cfg.audio_info[0].max_num_of_ch = 2;
cfg.audio_info[0].type = PS3AV_MON_INFO_AUDIO_TYPE_LPCM;
cfg.audio_info[0].fs = 127;
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_SUCCESS, &cfg, sizeof(ps3av_get_monitor_info_reply));
}
else if (pkt->avport <= static_cast<u16>(UartAudioAvport::HDMI_1))
{
if (pkt->avport == static_cast<u16>(UartAudioAvport::HDMI_1) && !g_cfg.core.debug_console_mode)
{
vuart.write_resp<true>(pkt->hdr.cid, PS3AV_STATUS_SYSCON_COMMUNICATE_FAIL);
return;
}
set_hdmi_display_cfg(vuart, cfg, static_cast<u8>(pkt->avport));
vuart.write_resp<true>(pkt->hdr.cid, PS3AV_STATUS_SUCCESS, &cfg, sizeof(ps3av_get_monitor_info_reply) - 4); // Length is different for some reason
}
else
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_INVALID_PORT);
}
}
static void set_hdmi_display_cfg(vuart_av_thread &vuart, ps3av_get_monitor_info_reply &cfg, u8 avport)
{
if (vuart.hdmi_behavior_mode != PS3AV_HDMI_BEHAVIOR_NORMAL && (vuart.hdmi_behavior_mode & PS3AV_HDMI_BEHAVIOR_EDID_PASS))
{
cfg.monitor_type = vuart.hdmi_behavior_mode & PS3AV_HDMI_BEHAVIOR_DVI ? PS3AV_MONITOR_TYPE_DVI : PS3AV_MONITOR_TYPE_HDMI;
return;
}
// Report maximum support
static constexpr u8 mon_id[sizeof(cfg.monitor_id)] = { 0x4A, 0x13, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15 };
static constexpr u8 mon_name[sizeof(cfg.monitor_name)] = { 'R', 'P', 'C', 'S', '3', ' ', 'V', 'i', 'r', 't', 'M', 'o', 'n', '\0', '\0', '\0' };
static constexpr ps3av_info_audio audio_info[PS3AV_MON_INFO_AUDIO_BLK_MAX] =
{
{ PS3AV_MON_INFO_AUDIO_TYPE_LPCM, 8, 0x7F, 0x07 },
{ PS3AV_MON_INFO_AUDIO_TYPE_AC3, 8, 0x7F, 0xFF },
{ PS3AV_MON_INFO_AUDIO_TYPE_AAC, 8, 0x7F, 0xFF },
{ PS3AV_MON_INFO_AUDIO_TYPE_DTS, 8, 0x7F, 0xFF },
{ PS3AV_MON_INFO_AUDIO_TYPE_DDP, 8, 0x7F, 0xFF },
{ PS3AV_MON_INFO_AUDIO_TYPE_DTS_HD, 8, 0x7F, 0xFF },
{ PS3AV_MON_INFO_AUDIO_TYPE_DOLBY_THD, 8, 0x7F, 0xFF },
};
cfg.avport = avport;
memcpy(cfg.monitor_id, mon_id, sizeof(cfg.monitor_id));
cfg.monitor_type = PS3AV_MONITOR_TYPE_HDMI;
memcpy(cfg.monitor_name, mon_name, sizeof(cfg.monitor_name));
const u32 native_res = [&]()
{
switch (g_cfg.video.resolution)
{
case video_resolution::_1080p:
return PS3AV_RESBIT_1920x1080P;
case video_resolution::_1080i:
return PS3AV_RESBIT_1920x1080I;
case video_resolution::_1600x1080p:
case video_resolution::_1440x1080p:
case video_resolution::_1280x1080p:
case video_resolution::_720p:
return PS3AV_RESBIT_1280x720P;
case video_resolution::_576p:
return PS3AV_RESBIT_720x576P;
default:
return PS3AV_RESBIT_720x480P;
}
}();
cfg.res_60.res_bits = UINT32_MAX;
cfg.res_60.native = native_res;
cfg.res_50.res_bits = UINT32_MAX;
cfg.res_50.native = native_res;
cfg.res_other.res_bits = UINT32_MAX;
cfg.res_vesa.res_bits = 1; // Always one mode at a time
cfg.cs.rgb = PS3AV_CS_SUPPORTED | PS3AV_RGB_SELECTABLE_QAUNTIZATION_RANGE | PS3AV_12BIT_COLOR;
cfg.cs.yuv444 = PS3AV_CS_SUPPORTED | PS3AV_12BIT_COLOR;
cfg.cs.yuv422 = PS3AV_CS_SUPPORTED;
cfg.cs.colorimetry_data = PS3AV_COLORIMETRY_xvYCC_601 | PS3AV_COLORIMETRY_xvYCC_709 | PS3AV_COLORIMETRY_MD0 | PS3AV_COLORIMETRY_MD1 | PS3AV_COLORIMETRY_MD2;
cfg.color.red_x = 1023;
cfg.color.red_y = 0;
cfg.color.green_x = 0;
cfg.color.green_y = 1023;
cfg.color.blue_x = 0;
cfg.color.blue_y = 0;
cfg.color.white_x = 341;
cfg.color.white_y = 341;
cfg.color.gamma = 100;
cfg.supported_ai = 1;
cfg.speaker_info = 0x4F;
// Audio formats
cfg.num_of_audio_block = 7;
memcpy(cfg.audio_info, audio_info, sizeof(cfg.audio_info));
// 16:9 27-inch (as a default)
cfg.hor_screen_size = 60;
cfg.ver_screen_size = 34;
cfg.supported_content_types = 0b1111; // Graphics, cinema, photo, game
// 3D modes, no native formats
cfg.res_60_packed_3D.res_bits = UINT32_MAX;
cfg.res_50_packed_3D.res_bits = UINT32_MAX;
cfg.res_other_3D.res_bits = UINT32_MAX;
cfg.res_60_sbs_3D.res_bits = UINT32_MAX;
cfg.res_50_sbs_3D.res_bits = UINT32_MAX;
cfg.vendor_specific_flags = 0; // values from 0-3 (unk)
}
};
struct av_get_bksv_list_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_pkt_get_bksv);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_pkt_get_bksv *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
if (pkt->avport > static_cast<u16>(UartAudioAvport::HDMI_1))
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_INVALID_PORT);
return;
}
if (pkt->avport == static_cast<u16>(UartAudioAvport::HDMI_1) && !g_cfg.core.debug_console_mode)
{
vuart.write_resp<true>(pkt->hdr.cid, PS3AV_STATUS_SYSCON_COMMUNICATE_FAIL);
return;
}
ps3av_pkt_get_bksv_reply reply{};
reply.avport = pkt->avport;
u16 pkt_size = offsetof(ps3av_pkt_get_bksv_reply, ksv_arr);
if (vuart.hdmi_behavior_mode == PS3AV_HDMI_BEHAVIOR_NORMAL || !(vuart.hdmi_behavior_mode & PS3AV_HDMI_BEHAVIOR_HDCP_OFF))
{
reply.ksv_cnt = 1;
memcpy(reply.ksv_arr[0], PS3AV_BKSV_VALUE, sizeof(PS3AV_BKSV_VALUE));
pkt_size = (pkt_size + 5 * reply.ksv_cnt + 3) & 0xFFFC;
}
vuart.write_resp<true>(pkt->hdr.cid, PS3AV_STATUS_SUCCESS, &reply, pkt_size);
}
};
struct av_enable_event_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_pkt_enable_event);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_pkt_enable_event *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
vuart.hdmi_events_bitmask |= pkt->event_bit;
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
}
};
struct av_disable_event_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_pkt_enable_event);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_pkt_enable_event *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
vuart.hdmi_events_bitmask &= ~pkt->event_bit;
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
}
};
struct av_tv_mute_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_pkt_av_audio_mute);
}
// Behavior is unknown, but it seems that this pkt could be ignored
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_pkt_av_audio_mute *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
if (pkt->avport < (g_cfg.core.debug_console_mode ? 2 : 1))
{
sys_uart.notice("[av_tv_mute_cmd] tv mute set to %u", pkt->mute > 0);
}
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
}
};
struct av_null_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return 12;
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_header *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
vuart.write_resp(pkt->cid, PS3AV_STATUS_SUCCESS);
}
};
struct av_get_aksv_list_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_header);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_header *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr) + sizeof(ps3av_pkt_get_aksv_reply))
{
vuart.write_resp(pkt->cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
ps3av_pkt_get_aksv_reply reply{};
memcpy(reply.ksv_arr[0], PS3AV_AKSV_VALUE, sizeof(PS3AV_AKSV_VALUE));
if (g_cfg.core.debug_console_mode)
{
memcpy(reply.ksv_arr[1], PS3AV_AKSV_VALUE, sizeof(PS3AV_AKSV_VALUE));
reply.ksv_size = 2 * sizeof(PS3AV_AKSV_VALUE);
}
else
{
reply.ksv_size = sizeof(PS3AV_AKSV_VALUE);
}
vuart.write_resp(pkt->cid, PS3AV_STATUS_SUCCESS, &reply, sizeof(ps3av_pkt_get_aksv_reply));
}
};
struct video_disable_signal_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_pkt_video_disable_sig);
}
// Cross color reduction filter setting in vsh. (AVMULTI)
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_pkt_video_disable_sig *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
if (pkt->avport <= static_cast<u16>(UartAudioAvport::HDMI_1))
{
g_fxo->get<rsx_audio_data>().update_av_mute_state(vuart.avport_to_idx(static_cast<UartAudioAvport>(pkt->avport.get())), false, true);
if (pkt->avport == static_cast<u16>(UartAudioAvport::HDMI_1) && !g_cfg.core.debug_console_mode)
{
vuart.write_resp<true>(pkt->hdr.cid, PS3AV_STATUS_SYSCON_COMMUNICATE_FAIL);
return;
}
vuart.hdmi_res_set[pkt->avport == static_cast<u16>(UartAudioAvport::HDMI_1)] = false;
vuart.write_resp<true>(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
}
else if (pkt->avport == static_cast<u16>(UartAudioAvport::AVMULTI_0))
{
if (vuart.head_b_initialized)
{
vuart.write_resp<true>(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
}
}
else
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
}
}
};
struct av_video_ytrapcontrol_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_pkt_av_video_ytrapcontrol);
}
// Cross color reduction filter setting in vsh. (AVMULTI)
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_pkt_av_video_ytrapcontrol *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_get_hw_info_reply) + sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
if (pkt->unk1 && pkt->unk1 != 5U)
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
return;
}
sys_uart.notice("[av_video_ytrapcontrol_cmd] unk1=0x%04x unk2=0x%04x", pkt->unk1, pkt->unk2);
vuart.write_resp<true>(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
}
};
struct av_audio_mute_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_pkt_av_audio_mute);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_pkt_av_audio_mute *>(pkt_buf);
if (pkt->avport == static_cast<u16>(UartAudioAvport::AVMULTI_1))
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_INVALID_PORT);
return;
}
if ((pkt->avport > static_cast<u16>(UartAudioAvport::HDMI_1) && pkt->avport != static_cast<u16>(UartAudioAvport::AVMULTI_0)) ||
(pkt->avport == static_cast<u16>(UartAudioAvport::HDMI_1) && !g_cfg.core.debug_console_mode))
{
vuart.write_resp<true>(pkt->hdr.cid, PS3AV_STATUS_SYSCON_COMMUNICATE_FAIL);
return;
}
g_fxo->get<rsx_audio_data>().update_av_mute_state(vuart.avport_to_idx(static_cast<UartAudioAvport>(pkt->avport.get())), true, false, pkt->mute);
vuart.write_resp<true>(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
}
};
struct av_acp_ctrl_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_pkt_acp_ctrl);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_pkt_acp_ctrl *>(pkt_buf);
if (pkt->avport > static_cast<u8>(UartAudioAvport::HDMI_1) ||
(pkt->avport == static_cast<u8>(UartAudioAvport::HDMI_1) && !g_cfg.core.debug_console_mode))
{
vuart.write_resp<true>(pkt->hdr.cid, PS3AV_STATUS_INVALID_PORT);
return;
}
sys_uart.notice("[av_acp_ctrl_cmd] HDMI_%u data island ctrl pkt ctrl=0x%02x", pkt->avport, pkt->packetctl);
vuart.write_resp<true>(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
}
};
struct av_set_acp_packet_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_pkt_set_acp_packet);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_pkt_set_acp_packet *>(pkt_buf);
if (pkt->avport > static_cast<u8>(UartAudioAvport::HDMI_1) ||
(pkt->avport == static_cast<u8>(UartAudioAvport::HDMI_1) && !g_cfg.core.debug_console_mode) ||
(pkt->pkt_type > 0x0A && pkt->pkt_type < 0x81) ||
pkt->pkt_type > 0x85)
{
vuart.write_resp<true>(pkt->hdr.cid, PS3AV_STATUS_INVALID_PORT);
return;
}
sys_uart.notice("[av_set_acp_packet_cmd] HDMI_%u data island pkt type=0x%02x", pkt->avport, pkt->pkt_type);
vuart.write_resp<true>(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
}
};
struct av_add_signal_ctl_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_pkt_add_signal_ctl);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_pkt_add_signal_ctl *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
if (pkt->avport != static_cast<u16>(UartAudioAvport::AVMULTI_0))
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_INVALID_PORT);
return;
}
sys_uart.notice("[av_add_signal_ctl_cmd] signal_ctl=0x%04x", pkt->signal_ctl);
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
}
};
struct av_set_cgms_wss_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_pkt_av_set_cgms_wss);
}
// Something related to copy control on AVMULTI.
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_pkt_av_set_cgms_wss *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
if (pkt->avport != static_cast<u16>(UartAudioAvport::AVMULTI_0))
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_INVALID_PORT);
return;
}
sys_uart.notice("[av_set_cgms_wss_cmd] cgms_wss=0x%08x", pkt->cgms_wss);
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
}
};
struct av_get_hw_conf_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_header);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_header *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_get_hw_info_reply) + sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
ps3av_get_hw_info_reply out{};
out.num_of_hdmi = g_cfg.core.debug_console_mode ? 2 : 1;
out.num_of_avmulti = 1;
out.num_of_spdif = 1;
out.extra_bistream_support = 1;
vuart.write_resp(pkt->cid, PS3AV_STATUS_SUCCESS, &out, sizeof(ps3av_get_hw_info_reply));
}
};
struct av_set_hdmi_mode_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_pkt_set_hdmi_mode);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_pkt_set_hdmi_mode *>(pkt_buf);
if (pkt->mode != PS3AV_HDMI_BEHAVIOR_NORMAL)
{
if ((pkt->mode & PS3AV_HDMI_BEHAVIOR_HDCP_OFF) && !g_cfg.core.debug_console_mode)
{
vuart.write_resp<true>(pkt->hdr.cid, PS3AV_STATUS_UNSUPPORTED_HDMI_MODE);
return;
}
if (pkt->mode & ~(PS3AV_HDMI_BEHAVIOR_HDCP_OFF | PS3AV_HDMI_BEHAVIOR_EDID_PASS | PS3AV_HDMI_BEHAVIOR_DVI))
{
sys_uart.warning("[av_set_hdmi_mode_cmd] Unknown bits in hdmi mode: 0x%02x", pkt->mode);
}
}
vuart.hdmi_behavior_mode = pkt->mode;
vuart.add_hdmi_events(UartHdmiEvent::UNPLUGGED, vuart.hdmi_res_set[0] ? UartHdmiEvent::HDCP_DONE : UartHdmiEvent::PLUGGED, true, false);
vuart.add_hdmi_events(UartHdmiEvent::UNPLUGGED, vuart.hdmi_res_set[1] ? UartHdmiEvent::HDCP_DONE : UartHdmiEvent::PLUGGED, false, true);
vuart.write_resp<true>(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
}
};
struct av_get_cec_status_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_header);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_header *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_av_get_cec_config_reply) + sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
const ps3av_pkt_av_get_cec_config_reply reply{1};
vuart.write_resp(pkt->cid, PS3AV_STATUS_SUCCESS, &reply, sizeof(ps3av_pkt_av_get_cec_config_reply));
}
};
struct video_init_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_header);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_header *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
vuart.write_resp(pkt->cid, PS3AV_STATUS_SUCCESS);
}
};
struct video_set_format_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_pkt_video_format);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_pkt_video_format *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
if (pkt->video_head > PS3AV_HEAD_B_ANALOG || pkt->video_order > 1 || pkt->video_format > 16)
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_INVALID_VIDEO_PARAM);
return;
}
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
}
};
struct video_set_route_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return 24;
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_header *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
vuart.write_resp(pkt->cid, PS3AV_STATUS_NO_SEL); // Only available in PS2_GX_LPAR
}
};
struct video_set_pitch_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_pkt_video_set_pitch);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_pkt_video_set_pitch *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
if (pkt->video_head > PS3AV_HEAD_B_ANALOG || (pkt->pitch & 7) != 0U || pkt->pitch > UINT16_MAX)
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_INVALID_VIDEO_PARAM);
return;
}
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
}
};
struct video_get_hw_cfg_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_header);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_header *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_video_get_hw_cfg_reply) + sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
ps3av_pkt_video_get_hw_cfg_reply reply{};
reply.gx_available = 0; // Set to 1 only in PS2_GX_LPAR
vuart.write_resp(pkt->cid, PS3AV_STATUS_SUCCESS, &reply, sizeof(ps3av_pkt_video_get_hw_cfg_reply));
}
};
struct audio_init_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_header);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_header *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
g_fxo->get<rsx_audio_data>().reset_hw();
vuart.write_resp(pkt->cid, PS3AV_STATUS_SUCCESS);
}
};
struct audio_set_mode_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_pkt_audio_mode);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_pkt_audio_mode *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
if (!set_mode(*pkt))
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_INVALID_AUDIO_PARAM);
}
else
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
}
}
private:
bool set_mode(const ps3av_pkt_audio_mode& pkt)
{
bool spdif_use_serial_buf = false;
RsxaudioPort avport_src, rsxaudio_port;
RsxaudioAvportIdx avport_idx;
switch (pkt.avport)
{
case UartAudioAvport::HDMI_0:
{
avport_idx = RsxaudioAvportIdx::HDMI_0;
if (pkt.audio_source == UartAudioSource::SPDIF)
{
avport_src = rsxaudio_port = RsxaudioPort::SPDIF_1;
}
else
{
avport_src = rsxaudio_port = RsxaudioPort::SERIAL;
}
break;
}
case UartAudioAvport::HDMI_1:
{
avport_idx = RsxaudioAvportIdx::HDMI_1;
if (pkt.audio_source == UartAudioSource::SPDIF)
{
avport_src = rsxaudio_port = RsxaudioPort::SPDIF_1;
}
else
{
avport_src = rsxaudio_port = RsxaudioPort::SERIAL;
}
break;
}
case UartAudioAvport::AVMULTI_0:
{
avport_idx = RsxaudioAvportIdx::AVMULTI;
avport_src = rsxaudio_port = RsxaudioPort::SERIAL;
break;
}
case UartAudioAvport::SPDIF_0:
{
avport_idx = RsxaudioAvportIdx::SPDIF_0;
rsxaudio_port = RsxaudioPort::SPDIF_0;
if (pkt.audio_source == UartAudioSource::SERIAL)
{
spdif_use_serial_buf = true;
avport_src = RsxaudioPort::SERIAL;
}
else
{
avport_src = RsxaudioPort::SPDIF_0;
}
break;
}
case UartAudioAvport::SPDIF_1:
{
avport_idx = RsxaudioAvportIdx::SPDIF_1;
rsxaudio_port = RsxaudioPort::SPDIF_1;
if (pkt.audio_source == UartAudioSource::SERIAL)
{
spdif_use_serial_buf = true;
avport_src = RsxaudioPort::SERIAL;
}
else
{
avport_src = RsxaudioPort::SPDIF_1;
}
break;
}
default:
{
return false;
}
}
if (static_cast<u32>(pkt.audio_fs.value()) > static_cast<u32>(UartAudioFreq::_192K)) return false;
const auto bit_cnt = [&]()
{
if ((rsxaudio_port != RsxaudioPort::SERIAL && pkt.audio_format != UartAudioFormat::PCM) ||
pkt.audio_word_bits == UartAudioSampleSize::_16BIT)
{
return UartAudioSampleSize::_16BIT;
}
else
{
return UartAudioSampleSize::_24BIT;
}
}();
return commit_param(rsxaudio_port, avport_idx, avport_src, pkt.audio_fs, bit_cnt, spdif_use_serial_buf, pkt.audio_cs_info);
}
bool commit_param(RsxaudioPort rsxaudio_port, RsxaudioAvportIdx avport, RsxaudioPort avport_src, UartAudioFreq freq,
UartAudioSampleSize bit_cnt, bool spdif_use_serial_buf, const u8 *cs_data)
{
auto& rsxaudio_thread = g_fxo->get<rsx_audio_data>();
const auto avport_idx = static_cast<std::underlying_type_t<decltype(avport)>>(avport);
const auto rsxaudio_word_depth = bit_cnt == UartAudioSampleSize::_16BIT ? RsxaudioSampleSize::_16BIT : RsxaudioSampleSize::_32BIT;
const auto freq_param = [&]()
{
switch (freq)
{
case UartAudioFreq::_44K: return std::make_pair(8, SYS_RSXAUDIO_FREQ_BASE_352K);
default:
case UartAudioFreq::_48K: return std::make_pair(8, SYS_RSXAUDIO_FREQ_BASE_384K);
case UartAudioFreq::_88K: return std::make_pair(4, SYS_RSXAUDIO_FREQ_BASE_352K);
case UartAudioFreq::_96K: return std::make_pair(4, SYS_RSXAUDIO_FREQ_BASE_384K);
case UartAudioFreq::_176K: return std::make_pair(2, SYS_RSXAUDIO_FREQ_BASE_352K);
case UartAudioFreq::_192K: return std::make_pair(2, SYS_RSXAUDIO_FREQ_BASE_384K);
}
}();
switch (rsxaudio_port)
{
case RsxaudioPort::SERIAL:
{
rsxaudio_thread.update_hw_param([&](auto& obj)
{
obj.serial_freq_base = freq_param.second;
obj.serial.freq_div = freq_param.first;
obj.serial.depth = rsxaudio_word_depth;
obj.serial.buf_empty_en = true;
obj.avport_src[avport_idx] = avport_src;
});
break;
}
case RsxaudioPort::SPDIF_0:
case RsxaudioPort::SPDIF_1:
{
const u8 spdif_idx = rsxaudio_port == RsxaudioPort::SPDIF_1;
rsxaudio_thread.update_hw_param([&](auto& obj)
{
obj.spdif_freq_base = freq_param.second;
obj.spdif[spdif_idx].freq_div = freq_param.first;
obj.spdif[spdif_idx].depth = rsxaudio_word_depth;
obj.spdif[spdif_idx].use_serial_buf = spdif_use_serial_buf;
obj.spdif[spdif_idx].buf_empty_en = true;
obj.avport_src[avport_idx] = avport_src;
memcpy(obj.spdif[spdif_idx].cs_data.data(), cs_data, sizeof(obj.spdif[spdif_idx].cs_data));
});
break;
}
default:
{
return false;
}
}
return true;
}
};
struct audio_mute_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
// From RE
return 0;
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_pkt_audio_mute *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
switch (pkt->avport)
{
case UartAudioAvport::HDMI_0:
case UartAudioAvport::HDMI_1:
case UartAudioAvport::AVMULTI_0:
case UartAudioAvport::AVMULTI_1:
g_fxo->get<rsx_audio_data>().update_mute_state(RsxaudioPort::SERIAL, pkt->mute);
break;
case UartAudioAvport::SPDIF_0:
g_fxo->get<rsx_audio_data>().update_mute_state(RsxaudioPort::SPDIF_0, pkt->mute);
break;
case UartAudioAvport::SPDIF_1:
g_fxo->get<rsx_audio_data>().update_mute_state(RsxaudioPort::SPDIF_1, pkt->mute);
break;
default:
break;
}
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
}
};
struct audio_set_active_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_pkt_audio_set_active);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_pkt_audio_set_active *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
const bool requested_avports[SYS_RSXAUDIO_AVPORT_CNT] =
{
(pkt->audio_port & PS3AV_AUDIO_PORT_HDMI_0) != 0U,
(pkt->audio_port & PS3AV_AUDIO_PORT_HDMI_1) != 0U,
(pkt->audio_port & PS3AV_AUDIO_PORT_AVMULTI) != 0U,
(pkt->audio_port & PS3AV_AUDIO_PORT_SPDIF_0) != 0U,
(pkt->audio_port & PS3AV_AUDIO_PORT_SPDIF_1) != 0U
};
g_fxo->get<rsx_audio_data>().update_hw_param([&](auto &obj)
{
for (u8 avport_idx = 0; avport_idx < SYS_RSXAUDIO_AVPORT_CNT; avport_idx++)
{
if (requested_avports[avport_idx])
{
switch (obj.avport_src[avport_idx])
{
case RsxaudioPort::SERIAL:
obj.serial.en = true;
break;
case RsxaudioPort::SPDIF_0:
case RsxaudioPort::SPDIF_1:
{
const u8 spdif_idx = obj.avport_src[avport_idx] == RsxaudioPort::SPDIF_1;
if (!obj.spdif[spdif_idx].use_serial_buf)
{
obj.spdif[spdif_idx].en = true;
}
break;
}
default:
break;
}
}
}
obj.serial.muted = false;
obj.spdif[1].muted = false;
});
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
}
};
struct audio_set_inactive_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_pkt_audio_set_active);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_pkt_audio_set_active *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
g_fxo->get<rsx_audio_data>().update_hw_param([&](auto &obj)
{
if ((pkt->audio_port & 0x8000'0000) == 0U)
{
obj.avport_src.fill(RsxaudioPort::INVALID);
}
obj.serial.en = false;
obj.serial.muted = true;
obj.spdif[1].muted = true;
for (u8 spdif_idx = 0; spdif_idx < SYS_RSXAUDIO_SPDIF_CNT; spdif_idx++)
{
if (!obj.spdif[spdif_idx].use_serial_buf)
{
obj.spdif[spdif_idx].en = false;
}
}
});
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
}
};
struct audio_spdif_bit_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_pkt_audio_spdif_bit);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_pkt_audio_spdif_bit *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
const bool requested_avports[SYS_RSXAUDIO_AVPORT_CNT] =
{
(pkt->audio_port & PS3AV_AUDIO_PORT_HDMI_0) != 0U,
(pkt->audio_port & PS3AV_AUDIO_PORT_HDMI_1) != 0U,
(pkt->audio_port & PS3AV_AUDIO_PORT_AVMULTI) != 0U,
(pkt->audio_port & PS3AV_AUDIO_PORT_SPDIF_0) != 0U,
(pkt->audio_port & PS3AV_AUDIO_PORT_SPDIF_1) != 0U
};
g_fxo->get<rsx_audio_data>().update_hw_param([&](auto &obj)
{
for (u8 avport_idx = 0; avport_idx < SYS_RSXAUDIO_AVPORT_CNT; avport_idx++)
{
if (requested_avports[avport_idx] && obj.avport_src[avport_idx] == RsxaudioPort::SPDIF_0)
{
auto &b_data = pkt->spdif_bit_data;
sys_uart.notice("[audio_spdif_bit_cmd] Data 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
b_data[0], b_data[1], b_data[2], b_data[3], b_data[4], b_data[5], b_data[6], b_data[7],
b_data[8], b_data[9], b_data[10], b_data[11]);
break;
}
}
});
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
}
};
struct audio_ctrl_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return sizeof(ps3av_pkt_audio_ctrl);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_pkt_audio_ctrl *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
switch (pkt->audio_ctrl_id)
{
case UartAudioCtrlID::DAC_RESET:
case UartAudioCtrlID::DAC_DE_EMPHASIS:
case UartAudioCtrlID::AVCLK:
sys_uart.notice("[audio_ctrl_cmd] Option 0x%x", pkt->audio_ctrl_id);
break;
default:
break;
}
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
}
};
struct inc_avset_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* pkt_buf) override
{
const auto pkt = static_cast<const ps3av_pkt_inc_avset *>(pkt_buf);
if (pkt->num_of_video_pkt > 2 || pkt->num_of_av_video_pkt > 4 || pkt->num_of_av_audio_pkt > 4)
{
return -1;
}
const auto data_start = static_cast<const u8 *>(pkt_buf) + sizeof(ps3av_pkt_inc_avset);
u64 video_pkt_sec_size = 0;
u64 av_video_pkt_sec_size = 0;
u64 av_audio_pkt_sec_size = 0;
for (u16 pkt_idx = 0; pkt_idx < pkt->num_of_video_pkt; pkt_idx++)
{
video_pkt_sec_size += reinterpret_cast<const ps3av_header *>(&data_start[video_pkt_sec_size])->length + 4ULL;
}
for (u16 pkt_idx = 0; pkt_idx < pkt->num_of_av_video_pkt; pkt_idx++)
{
av_video_pkt_sec_size += reinterpret_cast<const ps3av_header *>(&data_start[video_pkt_sec_size + av_video_pkt_sec_size])->length + 4ULL;
}
for (u16 pkt_idx = 0; pkt_idx < pkt->num_of_av_audio_pkt; pkt_idx++)
{
av_audio_pkt_sec_size += reinterpret_cast<const ps3av_header *>(&data_start[video_pkt_sec_size + av_video_pkt_sec_size + av_audio_pkt_sec_size])->length + 4ULL;
}
return static_cast<u16>(sizeof(ps3av_pkt_inc_avset) + video_pkt_sec_size + av_video_pkt_sec_size + av_audio_pkt_sec_size);
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_pkt_inc_avset *>(pkt_buf);
auto pkt_data_addr = static_cast<const u8 *>(pkt_buf) + sizeof(ps3av_pkt_inc_avset);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
bool syscon_check_passed = true;
// Video
u32 video_cmd_status = PS3AV_STATUS_SUCCESS;
for (u32 video_pkt_idx = 0; video_pkt_idx < pkt->num_of_video_pkt; ++video_pkt_idx)
{
const auto video_pkt = reinterpret_cast<const ps3av_pkt_video_mode *>(pkt_data_addr);
const u32 subcmd_status = video_pkt_parse(*video_pkt);
if (video_pkt->video_head == PS3AV_HEAD_B_ANALOG)
{
vuart.head_b_initialized = true;
}
if (subcmd_status != PS3AV_STATUS_SUCCESS)
{
video_cmd_status = subcmd_status;
}
pkt_data_addr += video_pkt->hdr.length + 4ULL;
}
if (pkt->num_of_av_video_pkt == 0U && pkt->num_of_av_audio_pkt == 0U)
{
vuart.write_resp(pkt->hdr.cid, video_cmd_status);
return;
}
bool hdcp_done[2]{};
// AV Video
for (u32 video_av_pkt_idx = 0; video_av_pkt_idx < pkt->num_of_av_video_pkt; video_av_pkt_idx++)
{
const auto av_video_pkt = reinterpret_cast<const ps3av_pkt_av_video_cs *>(pkt_data_addr);
const av_video_resp subcmd_resp = av_video_pkt_parse(*av_video_pkt, syscon_check_passed);
if (subcmd_resp.status != PS3AV_STATUS_SUCCESS)
{
vuart.write_resp(pkt->hdr.cid, subcmd_resp.status);
return;
}
if (syscon_check_passed)
{
hdcp_done[0] |= subcmd_resp.hdcp_done_event[0];
hdcp_done[1] |= subcmd_resp.hdcp_done_event[1];
}
pkt_data_addr += av_video_pkt->hdr.length + 4ULL;
}
vuart.hdmi_res_set[0] = hdcp_done[0];
vuart.hdmi_res_set[1] = hdcp_done[1];
vuart.add_hdmi_events(UartHdmiEvent::HDCP_DONE, vuart.hdmi_res_set[0], vuart.hdmi_res_set[1]);
if (vuart.hdmi_res_set[0])
{
g_fxo->get<rsx_audio_data>().update_av_mute_state(RsxaudioAvportIdx::HDMI_0, false, true);
}
if (vuart.hdmi_res_set[1])
{
g_fxo->get<rsx_audio_data>().update_av_mute_state(RsxaudioAvportIdx::HDMI_1, false, true);
}
bool valid_av_audio_pkt = false;
// AV Audio
for (u32 audio_av_pkt_idx = 0; audio_av_pkt_idx < pkt->num_of_av_audio_pkt; audio_av_pkt_idx++)
{
const auto av_audio_pkt = reinterpret_cast<const ps3av_pkt_av_audio_param *>(pkt_data_addr);
if (av_audio_pkt->avport <= static_cast<u16>(UartAudioAvport::HDMI_1))
{
valid_av_audio_pkt = true;
if (!syscon_check_passed || (av_audio_pkt->avport == static_cast<u16>(UartAudioAvport::HDMI_1) && !g_cfg.core.debug_console_mode))
{
syscon_check_passed = false;
break;
}
const u8 hdmi_idx = av_audio_pkt->avport == static_cast<u16>(UartAudioAvport::HDMI_1);
g_fxo->get<rsx_audio_data>().update_hw_param([&](auto &obj)
{
auto &hdmi = obj.hdmi[hdmi_idx];
hdmi.init = true;
const std::array<u8, SYS_RSXAUDIO_SERIAL_STREAM_CNT> fifomap =
{
static_cast<u8>((av_audio_pkt->fifomap >> 0) & 3U),
static_cast<u8>((av_audio_pkt->fifomap >> 2) & 3U),
static_cast<u8>((av_audio_pkt->fifomap >> 4) & 3U),
static_cast<u8>((av_audio_pkt->fifomap >> 6) & 3U)
};
const std::array<bool, SYS_RSXAUDIO_SERIAL_STREAM_CNT> en_streams =
{
static_cast<bool>(av_audio_pkt->enable & 0x10),
static_cast<bool>(av_audio_pkt->enable & 0x20),
static_cast<bool>(av_audio_pkt->enable & 0x40),
static_cast<bool>(av_audio_pkt->enable & 0x80)
};
// Might be wrong
const std::array<bool, SYS_RSXAUDIO_SERIAL_STREAM_CNT> swap_lr =
{
static_cast<bool>(av_audio_pkt->swaplr & 0x10),
static_cast<bool>(av_audio_pkt->swaplr & 0x20),
static_cast<bool>(av_audio_pkt->swaplr & 0x40),
static_cast<bool>(av_audio_pkt->swaplr & 0x80)
};
memcpy(hdmi.info_frame.data(), av_audio_pkt->info, sizeof(av_audio_pkt->info));
memcpy(hdmi.chstat.data(), av_audio_pkt->chstat, sizeof(av_audio_pkt->chstat));
hdmi.ch_cfg = hdmi_param_conv(fifomap, en_streams, swap_lr);
});
}
pkt_data_addr += av_audio_pkt->hdr.length + 4ULL;
}
if (pkt->num_of_av_video_pkt || valid_av_audio_pkt)
{
if (!syscon_check_passed)
{
vuart.write_resp<true>(pkt->hdr.cid, PS3AV_STATUS_SYSCON_COMMUNICATE_FAIL);
return;
}
vuart.write_resp<true>(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
}
else
{
vuart.write_resp(pkt->hdr.cid, PS3AV_STATUS_SUCCESS);
}
}
private:
struct video_sce_param
{
u32 width_div;
u32 width;
u32 height;
};
struct av_video_resp
{
u32 status = PS3AV_STATUS_SUCCESS;
bool hdcp_done_event[2]{};
};
u32 video_pkt_parse(const ps3av_pkt_video_mode &video_head_cfg)
{
static constexpr video_sce_param sce_param_arr[28] =
{
{ 0, 0, 0 },
{ 4, 2880, 480 },
{ 4, 2880, 480 },
{ 4, 2880, 576 },
{ 4, 2880, 576 },
{ 2, 1440, 480 },
{ 2, 1440, 576 },
{ 1, 1920, 1080 },
{ 1, 1920, 1080 },
{ 1, 1920, 1080 },
{ 1, 1280, 720 },
{ 1, 1280, 720 },
{ 1, 1920, 1080 },
{ 1, 1920, 1080 },
{ 1, 1920, 1080 },
{ 1, 1920, 1080 },
{ 1, 1280, 768 },
{ 1, 1280, 1024 },
{ 1, 1920, 1200 },
{ 1, 1360, 768 },
{ 1, 1280, 1470 },
{ 1, 1280, 1470 },
{ 1, 1920, 1080 },
{ 1, 1920, 2205 },
{ 1, 1920, 2205 },
{ 1, 1280, 721 },
{ 1, 720, 481 },
{ 1, 720, 577 }
};
const auto sce_idx = [&]() -> u8
{
switch (video_head_cfg.video_vid)
{
case 16: return 1;
case 1: return 2;
case 3: return 4;
case 17: return 4;
case 5: return 5;
case 6: return 6;
case 18: return 7;
case 7: return 8;
case 33: return 8;
case 8: return 9;
case 34: return 9;
case 9: return 10;
case 31: return 10;
case 10: return 11;
case 32: return 11;
case 11: return 12;
case 35: return 12;
case 37: return 12;
case 12: return 13;
case 36: return 13;
case 38: return 13;
case 19: return 14;
case 20: return 15;
case 13: return 16;
case 14: return 17;
case 15: return 18;
case 21: return 19;
case 22: return 20;
case 27: return 20;
case 23: return 21;
case 28: return 21;
case 24: return 22;
case 25: return 23;
case 29: return 23;
case 26: return 24;
case 30: return 24;
case 39: return 25;
case 40: return 26;
case 41: return 27;
default: return umax;
}
}();
const video_sce_param &sce_param = sce_param_arr[sce_idx];
if (sce_idx == umax ||
video_head_cfg.video_head > PS3AV_HEAD_B_ANALOG ||
video_head_cfg.video_order > 1 ||
video_head_cfg.video_format > 16 ||
video_head_cfg.video_out_format > 16 ||
((1ULL << video_head_cfg.video_out_format) & 0x1CE07) == 0U ||
video_head_cfg.unk2 > 3 ||
video_head_cfg.pitch & 7 ||
video_head_cfg.pitch > UINT16_MAX ||
(video_head_cfg.width != 1280U && ((video_head_cfg.width & 7) != 0U || video_head_cfg.width > UINT16_MAX)) ||
(sce_param.width != 720 && video_head_cfg.width > sce_param.width / sce_param.width_div) ||
!((video_head_cfg.height == 1470U && (sce_param.height == 721 || sce_param.height == 481 || sce_param.height == 577)) || (video_head_cfg.height <= sce_param.height && video_head_cfg.height <= UINT16_MAX)))
{
return PS3AV_STATUS_INVALID_VIDEO_PARAM;
}
sys_uart.notice("[inc_avset_cmd] new resolution on HEAD_%c width=%u height=%u", video_head_cfg.video_head == PS3AV_HEAD_A_HDMI ? 'A' : 'B', video_head_cfg.width, video_head_cfg.height);
return PS3AV_STATUS_SUCCESS;
}
av_video_resp av_video_pkt_parse(const ps3av_pkt_av_video_cs &pkt, bool &syscon_pkt_valid)
{
if (pkt.avport <= static_cast<u16>(UartAudioAvport::HDMI_1))
{
if (pkt.av_vid > 23)
{
return {PS3AV_STATUS_INVALID_AV_PARAM};
}
if (pkt.avport == static_cast<u16>(UartAudioAvport::HDMI_1) && !g_cfg.core.debug_console_mode)
{
syscon_pkt_valid = false;
}
else if (syscon_pkt_valid)
{
// HDMI setup, code 0x80
av_video_resp resp{};
resp.hdcp_done_event[pkt.avport] = true;
return resp;
}
}
else
{
if ((pkt.avport != static_cast<u16>(UartAudioAvport::AVMULTI_0) && pkt.avport != static_cast<u16>(UartAudioAvport::AVMULTI_1)) ||
pkt.av_vid > 23 ||
(pkt.av_vid > 12 && pkt.av_vid != 18U))
{
return {PS3AV_STATUS_INVALID_AV_PARAM};
}
if (pkt.avport == static_cast<u16>(UartAudioAvport::AVMULTI_1))
{
syscon_pkt_valid = false;
}
else if (syscon_pkt_valid)
{
// AVMULTI setup
}
}
return {};
}
static rsxaudio_hw_param_t::hdmi_param_t::hdmi_ch_cfg_t hdmi_param_conv(const std::array<u8, SYS_RSXAUDIO_SERIAL_STREAM_CNT> &map,
const std::array<bool, SYS_RSXAUDIO_SERIAL_STREAM_CNT> &en,
const std::array<bool, SYS_RSXAUDIO_SERIAL_STREAM_CNT> &swap)
{
std::array<u8, SYS_RSXAUDIO_SERIAL_MAX_CH> result{};
u8 ch_cnt = 0;
for (usz stream_idx = 0; stream_idx < SYS_RSXAUDIO_SERIAL_STREAM_CNT; stream_idx++)
{
const u8 stream_pos = map[stream_idx];
if (en[stream_pos])
{
result[stream_idx * 2 + 0] = stream_pos * 2 + swap[stream_pos];
result[stream_idx * 2 + 1] = stream_pos * 2 + !swap[stream_pos];
ch_cnt = static_cast<u8>((stream_idx + 1) * 2);
}
else
{
result[stream_idx * 2 + 0] = rsxaudio_hw_param_t::hdmi_param_t::MAP_SILENT_CH;
result[stream_idx * 2 + 1] = rsxaudio_hw_param_t::hdmi_param_t::MAP_SILENT_CH;
}
}
const AudioChannelCnt ch_cnt_conv = [&]()
{
switch (ch_cnt)
{
default:
case 0:
case 2:
return AudioChannelCnt::STEREO;
case 4:
case 6:
return AudioChannelCnt::SURROUND_5_1;
case 8:
return AudioChannelCnt::SURROUND_7_1;
}
}();
return { result, ch_cnt_conv };
}
};
struct generic_reply_cmd : public ps3av_cmd
{
u16 get_size(vuart_av_thread& /*vuart*/, const void* /*pkt_buf*/) override
{
return 0;
}
void execute(vuart_av_thread &vuart, const void *pkt_buf) override
{
const auto pkt = static_cast<const ps3av_header *>(pkt_buf);
if (vuart.get_reply_buf_free_size() < sizeof(ps3av_pkt_reply_hdr))
{
vuart.write_resp(pkt->cid, PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
sys_uart.todo("Unimplemented cid=0x%08x", pkt->cid);
vuart.write_resp(pkt->cid, PS3AV_STATUS_SUCCESS);
}
};
error_code sys_uart_initialize(ppu_thread &ppu)
{
ppu.state += cpu_flag::wait;
sys_uart.trace("sys_uart_initialize()");
if (!g_ps3_process_info.has_root_perm())
{
return CELL_ENOSYS;
}
auto &vuart_thread = g_fxo->get<vuart_av>();
if (vuart_thread.initialized.test_and_set())
{
return CELL_EPERM;
}
return CELL_OK;
}
error_code sys_uart_receive(ppu_thread &ppu, vm::ptr<void> buffer, u64 size, u32 mode)
{
sys_uart.trace("sys_uart_receive(buffer=*0x%x, size=0x%llx, mode=0x%x)", buffer, size, mode);
if (!g_ps3_process_info.has_root_perm())
{
return CELL_ENOSYS;
}
if (!size)
{
return CELL_OK;
}
if (mode & ~(BLOCKING_BIG_OP | NOT_BLOCKING_BIG_OP))
{
return CELL_EINVAL;
}
auto &vuart_thread = g_fxo->get<vuart_av>();
if (!vuart_thread.initialized)
{
return CELL_ESRCH;
}
if (size > 0x20000U)
{
// kmalloc restriction
fmt::throw_exception("Buffer is too big");
}
const std::unique_ptr<u8[]> data = std::make_unique<u8[]>(size);
u32 read_size = 0;
auto vuart_read = [&](u8 *buf, u32 buf_size) -> s32
{
constexpr u32 ITER_SIZE = 4096;
std::unique_lock lock(vuart_thread.rx_mutex, std::defer_lock);
if (!lock.try_lock())
{
return CELL_EBUSY;
}
u32 read_size = 0;
u32 remaining = buf_size;
while (read_size < buf_size)
{
const u32 packet_size = std::min(remaining, ITER_SIZE);
const u32 nread = vuart_thread.read_rx_data(buf + read_size, packet_size);
read_size += nread;
remaining -= nread;
if (nread < packet_size)
break;
}
return read_size;
};
if (mode & BLOCKING_BIG_OP)
{
// Yield before checking for packets
lv2_obj::sleep(ppu);
for (;;)
{
if (ppu.is_stopped())
{
return {};
}
std::unique_lock<shared_mutex> lock(vuart_thread.rx_wake_m);
const s32 read_result = vuart_read(data.get(), static_cast<u32>(size));
if (read_result > CELL_OK)
{
read_size = read_result;
break;
}
vuart_thread.rx_wake_c.wait_unlock(5000, lock);
}
ppu.check_state();
}
else // NOT_BLOCKING_BIG_OP
{
const s32 read_result = vuart_read(data.get(), static_cast<u32>(size));
if (read_result <= CELL_OK)
{
return read_result;
}
read_size = read_result;
}
if (!vm::check_addr(buffer.addr(), vm::page_writable, read_size))
{
return CELL_EFAULT;
}
memcpy(buffer.get_ptr(), data.get(), read_size);
return not_an_error(read_size);
}
error_code sys_uart_send(ppu_thread &ppu, vm::cptr<void> buffer, u64 size, u32 mode)
{
sys_uart.trace("sys_uart_send(buffer=0x%x, size=0x%llx, mode=0x%x)", buffer, size, mode);
if (!g_ps3_process_info.has_root_perm())
{
return CELL_ENOSYS;
}
if (!size)
{
return CELL_OK;
}
if (mode & ~(BLOCKING_BIG_OP | NOT_BLOCKING_OP | NOT_BLOCKING_BIG_OP))
{
return CELL_EINVAL;
}
auto &vuart_thread = g_fxo->get<vuart_av>();
if (!vuart_thread.initialized)
{
return CELL_ESRCH;
}
if (size > 0x20000U)
{
// kmalloc restriction
fmt::throw_exception("Buffer is too big");
}
if (!vm::check_addr(buffer.addr(), vm::page_readable, static_cast<u32>(size)))
{
return CELL_EFAULT;
}
const std::unique_ptr<u8[]> data = std::make_unique<u8[]>(size);
memcpy(data.get(), buffer.get_ptr(), size);
std::unique_lock lock(vuart_thread.tx_mutex, std::defer_lock);
constexpr u32 ITER_SIZE = 4096;
if (mode & BLOCKING_BIG_OP)
{
// Yield before sending packets
lv2_obj::sleep(ppu);
lock.lock();
auto vuart_send_all = [&](const u8 *data, u32 data_sz)
{
u32 rem_size = data_sz;
while (rem_size)
{
if (ppu.is_stopped())
{
return false;
}
std::unique_lock<shared_mutex> lock(vuart_thread.tx_rdy_m);
if (vuart_thread.get_tx_bytes() >= PS3AV_TX_BUF_SIZE)
{
vuart_thread.tx_rdy_c.wait_unlock(5000, lock);
}
else
{
lock.unlock();
}
rem_size -= vuart_thread.enque_tx_data(data + data_sz - rem_size, rem_size);
}
return true;
};
u32 sent_size = 0;
u32 remaining = static_cast<u32>(size);
while (remaining)
{
const u32 packet_size = std::min(remaining, ITER_SIZE);
if (!vuart_send_all(data.get() + sent_size, packet_size)) return {};
sent_size += packet_size;
remaining -= packet_size;
}
ppu.check_state();
}
else if (mode & NOT_BLOCKING_OP)
{
if (!lock.try_lock())
{
return CELL_EBUSY;
}
if (PS3AV_TX_BUF_SIZE - vuart_thread.get_tx_bytes() < size)
{
return CELL_EAGAIN;
}
return not_an_error(vuart_thread.enque_tx_data(data.get(), static_cast<u32>(size)));
}
else // NOT_BLOCKING_BIG_OP
{
if (!lock.try_lock())
{
return CELL_EBUSY;
}
u32 sent_size = 0;
u32 remaining = static_cast<u32>(size);
while (sent_size < size)
{
const u32 packet_size = std::min(remaining, ITER_SIZE);
const u32 nsent = vuart_thread.enque_tx_data(data.get() + sent_size, packet_size);
remaining -= nsent;
if (nsent < packet_size)
{
// Based on RE
if (sent_size == 0)
{
return not_an_error(packet_size); // First iteration
}
else if (sent_size + nsent < size)
{
return not_an_error(sent_size + nsent);
}
else
{
break; // Last iteration
}
}
sent_size += nsent;
}
}
return not_an_error(size);
}
error_code sys_uart_get_params(vm::ptr<vuart_params> buffer)
{
sys_uart.trace("sys_uart_get_params(buffer=0x%x)", buffer);
if (!g_ps3_process_info.has_root_perm())
{
return CELL_ENOSYS;
}
auto &vuart_thread = g_fxo->get<vuart_av>();
if (!vuart_thread.initialized)
{
return CELL_ESRCH;
}
if (!vm::check_addr(buffer.addr(), vm::page_writable, sizeof(vuart_params)))
{
return CELL_EFAULT;
}
buffer->rx_buf_size = PS3AV_RX_BUF_SIZE;
buffer->tx_buf_size = PS3AV_TX_BUF_SIZE;
return CELL_OK;
}
void vuart_av_thread::operator()()
{
while (thread_ctrl::state() != thread_state::aborting)
{
if (Emu.IsPaused())
{
thread_ctrl::wait_for(5000);
continue;
}
const u64 hdmi_event_dist[2] = { hdmi_event_handler[0].time_until_next(), hdmi_event_handler[1].time_until_next() };
bool update_dist = false;
if (hdmi_event_dist[0] == 0)
{
dispatch_hdmi_event(hdmi_event_handler[0].get_occured_event(), UartAudioAvport::HDMI_0);
update_dist |= hdmi_event_handler[0].events_available();
}
if (hdmi_event_dist[1] == 0)
{
dispatch_hdmi_event(hdmi_event_handler[1].get_occured_event(), UartAudioAvport::HDMI_1);
update_dist |= hdmi_event_handler[1].events_available();
}
if (update_dist)
{
continue;
}
const u64 wait_time = [&]()
{
if (hdmi_event_dist[0] != 0 && hdmi_event_dist[1] != 0)
return std::min(hdmi_event_dist[0], hdmi_event_dist[1]);
else
return std::max(hdmi_event_dist[0], hdmi_event_dist[1]);
}();
std::unique_lock<shared_mutex> lock(tx_wake_m);
if (!tx_buf.get_used_size())
{
tx_wake_c.wait_unlock(wait_time ? wait_time : -1, lock);
}
else
{
lock.unlock();
}
if (u32 read_size = read_tx_data(temp_tx_buf, PS3AV_TX_BUF_SIZE))
{
parse_tx_buffer(read_size);
// Give vsh some time
thread_ctrl::wait_for(1000 * 100 / g_cfg.core.clocks_scale);
commit_rx_buf(false);
commit_rx_buf(true);
}
}
}
void vuart_av_thread::parse_tx_buffer(u32 buf_size)
{
if (buf_size >= PS3AV_TX_BUF_SIZE)
{
while (read_tx_data(temp_tx_buf, PS3AV_TX_BUF_SIZE) >= PS3AV_TX_BUF_SIZE);
write_resp(reinterpret_cast<be_t<u16, 1>*>(temp_tx_buf)[3], PS3AV_STATUS_BUFFER_OVERFLOW);
return;
}
u32 read_ptr = 0;
while (buf_size)
{
const ps3av_header* const hdr = reinterpret_cast<ps3av_header*>(&temp_tx_buf[read_ptr]);
const u16 pkt_size = hdr->length + 4;
if (hdr->length == 0xFFFCU)
{
write_resp(0xDEAD, PS3AV_STATUS_FAILURE);
return;
}
if (hdr->version != PS3AV_VERSION)
{
if (hdr->version >= 0x100 && hdr->version < PS3AV_VERSION)
{
sys_uart.todo("Unimplemented AV version: 0x%04x", hdr->version);
}
write_resp(static_cast<u16>(hdr->cid.get()), PS3AV_STATUS_INVALID_COMMAND);
return;
}
const void* const pkt_storage = &temp_tx_buf[read_ptr];
read_ptr += pkt_size;
buf_size = buf_size < pkt_size ? 0 : buf_size - pkt_size;
auto cmd = get_cmd(hdr->cid);
if (!cmd.get())
{
sys_uart.error("Unknown AV cmd: 0x%08x", hdr->cid);
continue;
}
const auto cmd_size = cmd->get_size(*this, pkt_storage);
if (cmd_size != pkt_size && cmd_size)
{
sys_uart.error("Invalid size for cid=0x%x, expected=0x%x, got=0x%x", static_cast<const ps3av_header *>(pkt_storage)->cid, cmd_size, pkt_size);
write_resp(static_cast<u16>(hdr->cid.get()), PS3AV_STATUS_INVALID_SAMPLE_SIZE);
return;
}
cmd->execute(*this, pkt_storage);
}
}
vuart_av_thread &vuart_av_thread::operator=(thread_state)
{
{
std::lock_guard lock(tx_wake_m);
}
tx_wake_c.notify_all();
return *this;
}
u32 vuart_av_thread::enque_tx_data(const void *data, u32 data_sz)
{
std::unique_lock<shared_mutex> lock(tx_wake_m);
if (u32 size = static_cast<u32>(tx_buf.push(data, data_sz, true)))
{
lock.unlock();
tx_wake_c.notify_all();
return size;
}
return 0;
}
u32 vuart_av_thread::get_tx_bytes()
{
return static_cast<u32>(tx_buf.get_used_size());
}
u32 vuart_av_thread::read_rx_data(void *data, u32 data_sz)
{
return static_cast<u32>(rx_buf.pop(data, data_sz, true));
}
u32 vuart_av_thread::read_tx_data(void *data, u32 data_sz)
{
std::unique_lock<shared_mutex> lock(tx_rdy_m);
if (u32 size = static_cast<u32>(tx_buf.pop(data, data_sz, true)))
{
lock.unlock();
tx_rdy_c.notify_all();
return size;
}
return 0;
}
u32 vuart_av_thread::get_reply_buf_free_size()
{
return sizeof(temp_rx_buf.buf) - temp_rx_buf.crnt_size;
}
template<bool UseScBuffer>
void vuart_av_thread::write_resp(u32 cid, u32 status, const void *data, u16 data_size)
{
const ps3av_pkt_reply_hdr pkt_hdr =
{
PS3AV_VERSION,
data_size + 8U,
cid | PS3AV_REPLY_BIT,
status
};
if (status != PS3AV_STATUS_SUCCESS)
{
sys_uart.error("Packet failed cid=0x%08x status=0x%02x", cid, status);
}
temp_buf &buf = UseScBuffer ? temp_rx_sc_buf : temp_rx_buf;
const u32 total_size = sizeof(pkt_hdr) + data_size;
if (buf.crnt_size + total_size <= sizeof(buf.buf))
{
memcpy(&buf.buf[buf.crnt_size], &pkt_hdr, sizeof(pkt_hdr));
memcpy(&buf.buf[buf.crnt_size + sizeof(pkt_hdr)], data, data_size);
buf.crnt_size += total_size;
}
}
std::shared_ptr<ps3av_cmd> vuart_av_thread::get_cmd(u32 cid)
{
switch (cid)
{
case PS3AV_CID_AV_CEC_MESSAGE:
case PS3AV_CID_AV_UNK11:
case PS3AV_CID_AV_UNK12:
return std::make_shared<generic_reply_cmd>();
// AV commands
case PS3AV_CID_AV_INIT: return std::make_shared<av_init_cmd>();
case PS3AV_CID_AV_FIN: return std::make_shared<av_fini_cmd>();
case PS3AV_CID_AV_GET_HW_CONF: return std::make_shared<av_get_hw_conf_cmd>();
case PS3AV_CID_AV_GET_MONITOR_INFO: return std::make_shared<av_get_monitor_info_cmd>();
case PS3AV_CID_AV_GET_BKSV_LIST: return std::make_shared<av_get_bksv_list_cmd>();
case PS3AV_CID_AV_ENABLE_EVENT: return std::make_shared<av_enable_event_cmd>();
case PS3AV_CID_AV_DISABLE_EVENT: return std::make_shared<av_disable_event_cmd>();
case PS3AV_CID_AV_TV_MUTE: return std::make_shared<av_tv_mute_cmd>();
case PS3AV_CID_AV_NULL_CMD: return std::make_shared<av_null_cmd>();
case PS3AV_CID_AV_GET_AKSV: return std::make_shared<av_get_aksv_list_cmd>();
case PS3AV_CID_AV_VIDEO_DISABLE_SIG: return std::make_shared<video_disable_signal_cmd>();
case PS3AV_CID_AV_VIDEO_YTRAPCONTROL: return std::make_shared<av_video_ytrapcontrol_cmd>();
case PS3AV_CID_AV_AUDIO_MUTE: return std::make_shared<av_audio_mute_cmd>();
case PS3AV_CID_AV_ACP_CTRL: return std::make_shared<av_acp_ctrl_cmd>();
case PS3AV_CID_AV_SET_ACP_PACKET: return std::make_shared<av_set_acp_packet_cmd>();
case PS3AV_CID_AV_ADD_SIGNAL_CTL: return std::make_shared<av_add_signal_ctl_cmd>();
case PS3AV_CID_AV_SET_CGMS_WSS: return std::make_shared<av_set_cgms_wss_cmd>();
case PS3AV_CID_AV_HDMI_MODE: return std::make_shared<av_set_hdmi_mode_cmd>();
case PS3AV_CID_AV_GET_CEC_CONFIG: return std::make_shared<av_get_cec_status_cmd>();
// Video commands
case PS3AV_CID_VIDEO_INIT: return std::make_shared<video_init_cmd>();
case PS3AV_CID_VIDEO_ROUTE: return std::make_shared<video_set_route_cmd>();
case PS3AV_CID_VIDEO_FORMAT: return std::make_shared<video_set_format_cmd>();
case PS3AV_CID_VIDEO_PITCH: return std::make_shared<video_set_pitch_cmd>();
case PS3AV_CID_VIDEO_GET_HW_CONF: return std::make_shared<video_get_hw_cfg_cmd>();
// Audio commands
case PS3AV_CID_AUDIO_INIT: return std::make_shared<audio_init_cmd>();
case PS3AV_CID_AUDIO_MODE: return std::make_shared<audio_set_mode_cmd>();
case PS3AV_CID_AUDIO_MUTE: return std::make_shared<audio_mute_cmd>();
case PS3AV_CID_AUDIO_ACTIVE: return std::make_shared<audio_set_active_cmd>();
case PS3AV_CID_AUDIO_INACTIVE: return std::make_shared<audio_set_inactive_cmd>();
case PS3AV_CID_AUDIO_SPDIF_BIT: return std::make_shared<audio_spdif_bit_cmd>();
case PS3AV_CID_AUDIO_CTRL: return std::make_shared<audio_ctrl_cmd>();
// Multipacket
case PS3AV_CID_AVB_PARAM: return std::make_shared<inc_avset_cmd>();
default: return {};
}
}
void vuart_av_thread::commit_rx_buf(bool syscon_buf)
{
temp_buf &buf = syscon_buf ? temp_rx_sc_buf : temp_rx_buf;
std::unique_lock<shared_mutex> lock(rx_wake_m);
rx_buf.push(buf.buf, buf.crnt_size, true);
buf.crnt_size = 0;
if (rx_buf.get_used_size())
{
lock.unlock();
rx_wake_c.notify_all();
}
}
void vuart_av_thread::add_hdmi_events(UartHdmiEvent first_event, UartHdmiEvent last_event, bool hdmi_0, bool hdmi_1)
{
if (hdmi_0)
{
hdmi_event_handler[0].set_target_state(first_event, last_event);
}
if (hdmi_1 && g_cfg.core.debug_console_mode)
{
hdmi_event_handler[1].set_target_state(first_event, last_event);
}
}
void vuart_av_thread::add_hdmi_events(UartHdmiEvent last_event, bool hdmi_0, bool hdmi_1)
{
add_hdmi_events(last_event, last_event, hdmi_0, hdmi_1);
}
void vuart_av_thread::dispatch_hdmi_event(UartHdmiEvent event, UartAudioAvport hdmi)
{
const bool hdmi_0 = hdmi == UartAudioAvport::HDMI_0;
const bool hdmi_1 = hdmi == UartAudioAvport::HDMI_1;
switch (event)
{
case UartHdmiEvent::UNPLUGGED:
{
add_unplug_event(hdmi_0, hdmi_1);
break;
}
case UartHdmiEvent::PLUGGED:
{
add_plug_event(hdmi_0, hdmi_1);
break;
}
case UartHdmiEvent::HDCP_DONE:
{
add_hdcp_done_event(hdmi_0, hdmi_1);
break;
}
default: break;
}
}
RsxaudioAvportIdx vuart_av_thread::avport_to_idx(UartAudioAvport avport)
{
switch (avport)
{
case UartAudioAvport::HDMI_0:
return RsxaudioAvportIdx::HDMI_0;
case UartAudioAvport::HDMI_1:
return RsxaudioAvportIdx::HDMI_1;
case UartAudioAvport::AVMULTI_0:
return RsxaudioAvportIdx::AVMULTI;
case UartAudioAvport::SPDIF_0:
return RsxaudioAvportIdx::SPDIF_0;
case UartAudioAvport::SPDIF_1:
return RsxaudioAvportIdx::SPDIF_1;
default:
ensure(false);
return RsxaudioAvportIdx::HDMI_0;
}
}
void vuart_av_thread::add_unplug_event(bool hdmi_0, bool hdmi_1)
{
if ((hdmi_events_bitmask & PS3AV_EVENT_BIT_UNPLUGGED) == 0) return;
ps3av_header pkt{};
pkt.version = av_cmd_ver;
pkt.length = sizeof(ps3av_header) - 4;
pkt.cid = PS3AV_CID_EVENT_UNPLUGGED;
if (hdmi_0)
{
g_fxo->get<rsx_audio_data>().update_av_mute_state(RsxaudioAvportIdx::HDMI_0, false, true);
hdcp_first_auth[0] = true;
commit_event_data(&pkt, sizeof(pkt));
}
if (hdmi_1)
{
g_fxo->get<rsx_audio_data>().update_av_mute_state(RsxaudioAvportIdx::HDMI_1, false, true);
hdcp_first_auth[1] = true;
pkt.cid |= 0x10000;
commit_event_data(&pkt, sizeof(pkt));
}
}
void vuart_av_thread::add_plug_event(bool hdmi_0, bool hdmi_1)
{
if ((hdmi_events_bitmask & PS3AV_EVENT_BIT_PLUGGED) == 0) return;
ps3av_pkt_hdmi_plugged_event pkt{};
pkt.hdr.version = av_cmd_ver;
pkt.hdr.length = sizeof(ps3av_pkt_hdmi_plugged_event) - 8;
pkt.hdr.cid = PS3AV_CID_EVENT_PLUGGED;
if (hdmi_0)
{
g_fxo->get<rsx_audio_data>().update_av_mute_state(RsxaudioAvportIdx::HDMI_0, false, true);
av_get_monitor_info_cmd::set_hdmi_display_cfg(*this, pkt.minfo, static_cast<u8>(UartAudioAvport::HDMI_0));
commit_event_data(&pkt, sizeof(pkt) - 4);
}
if (hdmi_1)
{
g_fxo->get<rsx_audio_data>().update_av_mute_state(RsxaudioAvportIdx::HDMI_1, false, true);
memset(&pkt.minfo, 0, sizeof(pkt.minfo));
pkt.hdr.cid |= 0x10000;
av_get_monitor_info_cmd::set_hdmi_display_cfg(*this, pkt.minfo, static_cast<u8>(UartAudioAvport::HDMI_1));
commit_event_data(&pkt, sizeof(pkt) - 4);
}
}
void vuart_av_thread::add_hdcp_done_event(bool hdmi_0, bool hdmi_1)
{
u16 pkt_size = offsetof(ps3av_pkt_hdmi_hdcp_done_event, ksv_arr);
ps3av_pkt_hdmi_hdcp_done_event pkt{};
pkt.hdr.version = av_cmd_ver;
if (hdmi_behavior_mode == PS3AV_HDMI_BEHAVIOR_NORMAL || !(hdmi_behavior_mode & PS3AV_HDMI_BEHAVIOR_HDCP_OFF))
{
pkt.ksv_cnt = 1;
memcpy(&pkt.ksv_arr[0], PS3AV_BKSV_VALUE, sizeof(PS3AV_BKSV_VALUE));
pkt_size = (pkt_size + 5 * pkt.ksv_cnt + 3) & 0xFFFC;
}
pkt.hdr.length = pkt_size - 4;
if (hdmi_0)
{
g_fxo->get<rsx_audio_data>().update_av_mute_state(RsxaudioAvportIdx::HDMI_0, false, true, false);
if (hdcp_first_auth[0])
{
if (hdmi_events_bitmask & PS3AV_EVENT_BIT_HDCP_DONE)
{
hdcp_first_auth[0] = false;
pkt.hdr.cid = PS3AV_CID_EVENT_HDCP_DONE;
commit_event_data(&pkt, pkt_size);
}
}
else if (hdmi_events_bitmask & PS3AV_EVENT_BIT_HDCP_REAUTH)
{
pkt.hdr.cid = PS3AV_CID_EVENT_HDCP_REAUTH;
commit_event_data(&pkt, pkt_size);
}
}
if (hdmi_1)
{
g_fxo->get<rsx_audio_data>().update_av_mute_state(RsxaudioAvportIdx::HDMI_1, false, true, false);
if (hdcp_first_auth[1])
{
if (hdmi_events_bitmask & PS3AV_EVENT_BIT_HDCP_DONE)
{
hdcp_first_auth[1] = false;
pkt.hdr.cid = PS3AV_CID_EVENT_HDCP_DONE | 0x10000;
commit_event_data(&pkt, pkt_size);
}
}
else if (hdmi_events_bitmask & PS3AV_EVENT_BIT_HDCP_REAUTH)
{
pkt.hdr.cid = PS3AV_CID_EVENT_HDCP_REAUTH | 0x10000;
commit_event_data(&pkt, pkt_size);
}
}
}
void vuart_av_thread::commit_event_data(const void *data, u16 data_size)
{
std::unique_lock<shared_mutex> lock(rx_wake_m);
rx_buf.push(data, data_size, true);
if (rx_buf.get_used_size())
{
lock.unlock();
rx_wake_c.notify_all();
}
}
vuart_hdmi_event_handler::vuart_hdmi_event_handler(u64 time_offset) : time_offset(time_offset)
{
}
void vuart_hdmi_event_handler::set_target_state(UartHdmiEvent start_state, UartHdmiEvent end_state)
{
ensure(start_state != UartHdmiEvent::NONE && static_cast<u8>(start_state) <= static_cast<u8>(end_state));
base_state = static_cast<UartHdmiEvent>(std::min<u8>(static_cast<u8>(start_state) - 1, static_cast<u8>(current_to_state)));
target_state = end_state;
if (!events_available())
{
advance_state();
}
}
bool vuart_hdmi_event_handler::events_available()
{
return time_of_next_event != 0;
}
u64 vuart_hdmi_event_handler::time_until_next()
{
const u64 current_time = get_system_time();
if (!events_available() || current_time + EVENT_TIME_THRESHOLD >= time_of_next_event)
{
return 0;
}
return time_of_next_event - current_time;
}
UartHdmiEvent vuart_hdmi_event_handler::get_occured_event()
{
if (events_available() && time_until_next() == 0)
{
const UartHdmiEvent occured = current_to_state;
advance_state();
return occured;
}
return UartHdmiEvent::NONE;
}
void vuart_hdmi_event_handler::schedule_next()
{
time_of_next_event = get_system_time() + (EVENT_TIME_DURATION + time_offset) * 100 / g_cfg.core.clocks_scale;
}
void vuart_hdmi_event_handler::advance_state()
{
current_state = current_to_state;
while (base_state != target_state)
{
base_state = static_cast<UartHdmiEvent>(static_cast<u8>(base_state) + 1);
if (current_state == UartHdmiEvent::UNPLUGGED && base_state == UartHdmiEvent::UNPLUGGED)
{
continue;
}
if (current_state == UartHdmiEvent::PLUGGED && base_state == UartHdmiEvent::PLUGGED)
{
continue;
}
if (current_state == UartHdmiEvent::HDCP_DONE && base_state == UartHdmiEvent::PLUGGED)
{
continue;
}
current_to_state = base_state;
schedule_next();
return;
}
time_of_next_event = 0;
}
| 67,436
|
C++
|
.cpp
| 2,086
| 29.053212
| 208
| 0.677096
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,351
|
sys_memory.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_memory.cpp
|
#include "stdafx.h"
#include "sys_memory.h"
#include "Emu/Memory/vm_locking.h"
#include "Emu/CPU/CPUThread.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/SPUThread.h"
#include "Emu/IdManager.h"
#include "util/vm.hpp"
#include "util/asm.hpp"
LOG_CHANNEL(sys_memory);
//
static shared_mutex s_memstats_mtx;
lv2_memory_container::lv2_memory_container(u32 size, bool from_idm) noexcept
: size(size)
, id{from_idm ? idm::last_id() : SYS_MEMORY_CONTAINER_ID_INVALID}
{
}
lv2_memory_container::lv2_memory_container(utils::serial& ar, bool from_idm) noexcept
: size(ar)
, id{from_idm ? idm::last_id() : SYS_MEMORY_CONTAINER_ID_INVALID}
, used(ar)
{
}
std::shared_ptr<void> lv2_memory_container::load(utils::serial& ar)
{
// Use idm::last_id() only for the instances at IDM
return std::make_shared<lv2_memory_container>(stx::exact_t<utils::serial&>(ar), true);
}
void lv2_memory_container::save(utils::serial& ar)
{
ar(size, used);
}
lv2_memory_container* lv2_memory_container::search(u32 id)
{
if (id != SYS_MEMORY_CONTAINER_ID_INVALID)
{
return idm::check<lv2_memory_container>(id);
}
return &g_fxo->get<lv2_memory_container>();
}
struct sys_memory_address_table
{
atomic_t<lv2_memory_container*> addrs[65536]{};
sys_memory_address_table() = default;
SAVESTATE_INIT_POS(id_manager::id_map<lv2_memory_container>::savestate_init_pos + 0.1);
sys_memory_address_table(utils::serial& ar)
{
// First: address, second: conatiner ID (SYS_MEMORY_CONTAINER_ID_INVALID for global FXO memory container)
std::unordered_map<u16, u32> mm;
ar(mm);
for (const auto& [addr, id] : mm)
{
addrs[addr] = ensure(lv2_memory_container::search(id));
}
}
void save(utils::serial& ar)
{
std::unordered_map<u16, u32> mm;
for (auto& ctr : addrs)
{
if (const auto ptr = +ctr)
{
mm[static_cast<u16>(&ctr - addrs)] = ptr->id;
}
}
ar(mm);
}
};
std::shared_ptr<vm::block_t> reserve_map(u32 alloc_size, u32 align)
{
return vm::reserve_map(align == 0x10000 ? vm::user64k : vm::user1m, 0, align == 0x10000 ? 0x20000000 : utils::align(alloc_size, 0x10000000)
, align == 0x10000 ? (vm::page_size_64k | vm::bf0_0x1) : (vm::page_size_1m | vm::bf0_0x1));
}
// Todo: fix order of error checks
error_code sys_memory_allocate(cpu_thread& cpu, u64 size, u64 flags, vm::ptr<u32> alloc_addr)
{
cpu.state += cpu_flag::wait;
sys_memory.warning("sys_memory_allocate(size=0x%x, flags=0x%llx, alloc_addr=*0x%x)", size, flags, alloc_addr);
if (!size)
{
return {CELL_EALIGN, size};
}
// Check allocation size
const u32 align =
flags == SYS_MEMORY_PAGE_SIZE_1M ? 0x100000 :
flags == SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 :
flags == 0 ? 0x100000 : 0;
if (!align)
{
return {CELL_EINVAL, flags};
}
if (size % align)
{
return {CELL_EALIGN, size};
}
// Get "default" memory container
auto& dct = g_fxo->get<lv2_memory_container>();
// Try to get "physical memory"
if (!dct.take(size))
{
return {CELL_ENOMEM, dct.size - dct.used};
}
if (const auto area = reserve_map(static_cast<u32>(size), align))
{
if (const u32 addr = area->alloc(static_cast<u32>(size), nullptr, align))
{
ensure(!g_fxo->get<sys_memory_address_table>().addrs[addr >> 16].exchange(&dct));
if (alloc_addr)
{
sys_memory.notice("sys_memory_allocate(): Allocated 0x%x address (size=0x%x)", addr, size);
vm::lock_sudo(addr, static_cast<u32>(size));
cpu.check_state();
*alloc_addr = addr;
return CELL_OK;
}
// Dealloc using the syscall
sys_memory_free(cpu, addr);
return CELL_EFAULT;
}
}
dct.free(size);
return CELL_ENOMEM;
}
error_code sys_memory_allocate_from_container(cpu_thread& cpu, u64 size, u32 cid, u64 flags, vm::ptr<u32> alloc_addr)
{
cpu.state += cpu_flag::wait;
sys_memory.warning("sys_memory_allocate_from_container(size=0x%x, cid=0x%x, flags=0x%llx, alloc_addr=*0x%x)", size, cid, flags, alloc_addr);
if (!size)
{
return {CELL_EALIGN, size};
}
// Check allocation size
const u32 align =
flags == SYS_MEMORY_PAGE_SIZE_1M ? 0x100000 :
flags == SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 :
flags == 0 ? 0x100000 : 0;
if (!align)
{
return {CELL_EINVAL, flags};
}
if (size % align)
{
return {CELL_EALIGN, size};
}
const auto ct = idm::get<lv2_memory_container>(cid, [&](lv2_memory_container& ct) -> CellError
{
// Try to get "physical memory"
if (!ct.take(size))
{
return CELL_ENOMEM;
}
return {};
});
if (!ct)
{
return CELL_ESRCH;
}
if (ct.ret)
{
return {ct.ret, ct->size - ct->used};
}
if (const auto area = reserve_map(static_cast<u32>(size), align))
{
if (const u32 addr = area->alloc(static_cast<u32>(size)))
{
ensure(!g_fxo->get<sys_memory_address_table>().addrs[addr >> 16].exchange(ct.ptr.get()));
if (alloc_addr)
{
vm::lock_sudo(addr, static_cast<u32>(size));
cpu.check_state();
*alloc_addr = addr;
return CELL_OK;
}
// Dealloc using the syscall
sys_memory_free(cpu, addr);
return CELL_EFAULT;
}
}
ct->free(size);
return CELL_ENOMEM;
}
error_code sys_memory_free(cpu_thread& cpu, u32 addr)
{
cpu.state += cpu_flag::wait;
sys_memory.warning("sys_memory_free(addr=0x%x)", addr);
const auto ct = addr % 0x10000 ? nullptr : g_fxo->get<sys_memory_address_table>().addrs[addr >> 16].exchange(nullptr);
if (!ct)
{
return {CELL_EINVAL, addr};
}
const auto size = (ensure(vm::dealloc(addr)));
reader_lock{id_manager::g_mutex}, ct->free(size);
return CELL_OK;
}
error_code sys_memory_get_page_attribute(cpu_thread& cpu, u32 addr, vm::ptr<sys_page_attr_t> attr)
{
cpu.state += cpu_flag::wait;
sys_memory.trace("sys_memory_get_page_attribute(addr=0x%x, attr=*0x%x)", addr, attr);
vm::writer_lock rlock;
if (!vm::check_addr(addr) || addr >= SPU_FAKE_BASE_ADDR)
{
return CELL_EINVAL;
}
if (!vm::check_addr(attr.addr(), vm::page_readable, attr.size()))
{
return CELL_EFAULT;
}
attr->attribute = 0x40000ull; // SYS_MEMORY_PROT_READ_WRITE (TODO)
attr->access_right = addr >> 28 == 0xdu ? SYS_MEMORY_ACCESS_RIGHT_PPU_THR : SYS_MEMORY_ACCESS_RIGHT_ANY;// (TODO)
if (vm::check_addr(addr, vm::page_1m_size))
{
attr->page_size = 0x100000;
}
else if (vm::check_addr(addr, vm::page_64k_size))
{
attr->page_size = 0x10000;
}
else
{
attr->page_size = 4096;
}
attr->pad = 0; // Always write 0
return CELL_OK;
}
error_code sys_memory_get_user_memory_size(cpu_thread& cpu, vm::ptr<sys_memory_info_t> mem_info)
{
cpu.state += cpu_flag::wait;
sys_memory.warning("sys_memory_get_user_memory_size(mem_info=*0x%x)", mem_info);
// Get "default" memory container
auto& dct = g_fxo->get<lv2_memory_container>();
sys_memory_info_t out{};
{
::reader_lock lock(s_memstats_mtx);
out.total_user_memory = dct.size;
out.available_user_memory = dct.size - dct.used;
// Scan other memory containers
idm::select<lv2_memory_container>([&](u32, lv2_memory_container& ct)
{
out.total_user_memory -= ct.size;
});
}
cpu.check_state();
*mem_info = out;
return CELL_OK;
}
error_code sys_memory_get_user_memory_stat(cpu_thread& cpu, vm::ptr<sys_memory_user_memory_stat_t> mem_stat)
{
cpu.state += cpu_flag::wait;
sys_memory.todo("sys_memory_get_user_memory_stat(mem_stat=*0x%x)", mem_stat);
return CELL_OK;
}
error_code sys_memory_container_create(cpu_thread& cpu, vm::ptr<u32> cid, u64 size)
{
cpu.state += cpu_flag::wait;
sys_memory.warning("sys_memory_container_create(cid=*0x%x, size=0x%x)", cid, size);
// Round down to 1 MB granularity
size &= ~0xfffff;
if (!size)
{
return CELL_ENOMEM;
}
auto& dct = g_fxo->get<lv2_memory_container>();
std::lock_guard lock(s_memstats_mtx);
// Try to obtain "physical memory" from the default container
if (!dct.take(size))
{
return CELL_ENOMEM;
}
// Create the memory container
if (const u32 id = idm::make<lv2_memory_container>(static_cast<u32>(size), true))
{
cpu.check_state();
*cid = id;
return CELL_OK;
}
dct.free(size);
return CELL_EAGAIN;
}
error_code sys_memory_container_destroy(cpu_thread& cpu, u32 cid)
{
cpu.state += cpu_flag::wait;
sys_memory.warning("sys_memory_container_destroy(cid=0x%x)", cid);
std::lock_guard lock(s_memstats_mtx);
const auto ct = idm::withdraw<lv2_memory_container>(cid, [](lv2_memory_container& ct) -> CellError
{
// Check if some memory is not deallocated (the container cannot be destroyed in this case)
if (!ct.used.compare_and_swap_test(0, ct.size))
{
return CELL_EBUSY;
}
return {};
});
if (!ct)
{
return CELL_ESRCH;
}
if (ct.ret)
{
return ct.ret;
}
// Return "physical memory" to the default container
g_fxo->get<lv2_memory_container>().free(ct->size);
return CELL_OK;
}
error_code sys_memory_container_get_size(cpu_thread& cpu, vm::ptr<sys_memory_info_t> mem_info, u32 cid)
{
cpu.state += cpu_flag::wait;
sys_memory.warning("sys_memory_container_get_size(mem_info=*0x%x, cid=0x%x)", mem_info, cid);
const auto ct = idm::get<lv2_memory_container>(cid);
if (!ct)
{
return CELL_ESRCH;
}
cpu.check_state();
mem_info->total_user_memory = ct->size; // Total container memory
mem_info->available_user_memory = ct->size - ct->used; // Available container memory
return CELL_OK;
}
error_code sys_memory_container_destroy_parent_with_childs(cpu_thread& cpu, u32 cid, u32 must_0, vm::ptr<u32> mc_child)
{
sys_memory.warning("sys_memory_container_destroy_parent_with_childs(cid=0x%x, must_0=%d, mc_child=*0x%x)", cid, must_0, mc_child);
if (must_0)
{
return CELL_EINVAL;
}
// Multi-process is not supported yet so child containers mean nothing at the moment
// Simply destroy parent
return sys_memory_container_destroy(cpu, cid);
}
| 9,660
|
C++
|
.cpp
| 329
| 26.841945
| 141
| 0.690058
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,352
|
sys_ppu_thread.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_ppu_thread.cpp
|
#include "stdafx.h"
#include "sys_ppu_thread.h"
#include "Emu/System.h"
#include "Emu/IdManager.h"
#include "Emu/perf_meter.hpp"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/PPUCallback.h"
#include "Emu/Cell/PPUOpcodes.h"
#include "Emu/Memory/vm_locking.h"
#include "sys_event.h"
#include "sys_process.h"
#include "sys_mmapper.h"
#include "sys_memory.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_ppu_thread);
// Simple structure to cleanup previous thread, because can't remove its own thread
struct ppu_thread_cleaner
{
std::shared_ptr<void> old;
std::shared_ptr<void> clean(std::shared_ptr<void> ptr)
{
return std::exchange(old, std::move(ptr));
}
ppu_thread_cleaner() = default;
ppu_thread_cleaner(const ppu_thread_cleaner&) = delete;
ppu_thread_cleaner& operator=(const ppu_thread_cleaner&) = delete;
ppu_thread_cleaner& operator=(thread_state state) noexcept
{
reader_lock lock(id_manager::g_mutex);
if (old)
{
// It is detached from IDM now so join must be done explicitly now
*static_cast<named_thread<ppu_thread>*>(old.get()) = state;
}
return *this;
}
};
void ppu_thread_exit(ppu_thread& ppu, ppu_opcode_t, be_t<u32>*, struct ppu_intrp_func*)
{
ppu.state += cpu_flag::exit + cpu_flag::wait;
// Deallocate Stack Area
ensure(vm::dealloc(ppu.stack_addr, vm::stack) == ppu.stack_size);
if (auto dct = g_fxo->try_get<lv2_memory_container>())
{
dct->free(ppu.stack_size);
}
if (ppu.call_history.index)
{
ppu_log.notice("Calling history: %s", ppu.call_history);
ppu.call_history.index = 0;
}
if (ppu.syscall_history.index)
{
ppu_log.notice("HLE/LV2 history: %s", ppu.syscall_history);
ppu.syscall_history.index = 0;
}
}
constexpr u32 c_max_ppu_name_size = 28;
void _sys_ppu_thread_exit(ppu_thread& ppu, u64 errorcode)
{
ppu.state += cpu_flag::wait;
u64 writer_mask = 0;
sys_ppu_thread.trace("_sys_ppu_thread_exit(errorcode=0x%llx)", errorcode);
ppu_join_status old_status;
// Avoid cases where cleaning causes the destructor to be called inside IDM lock scope (for performance)
std::shared_ptr<void> old_ppu;
{
lv2_obj::notify_all_t notify;
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(id_manager::g_mutex);
// Get joiner ID
old_status = ppu.joiner.fetch_op([](ppu_join_status& status)
{
if (status == ppu_join_status::joinable)
{
// Joinable, not joined
status = ppu_join_status::zombie;
return;
}
// Set deleted thread status
status = ppu_join_status::exited;
});
if (old_status >= ppu_join_status::max)
{
lv2_obj::append(idm::check_unlocked<named_thread<ppu_thread>>(static_cast<u32>(old_status)));
}
if (old_status != ppu_join_status::joinable)
{
// Remove self ID from IDM, move owning ptr
old_ppu = g_fxo->get<ppu_thread_cleaner>().clean(std::move(idm::find_unlocked<named_thread<ppu_thread>>(ppu.id)->second));
}
// Get writers mask (wait for all current writers to quit)
writer_mask = vm::g_range_lock_bits[1];
// Unqueue
lv2_obj::sleep(ppu);
notify.cleanup();
// Remove suspend state (TODO)
ppu.state -= cpu_flag::suspend;
}
while (ppu.joiner == ppu_join_status::zombie)
{
if (ppu.is_stopped() && ppu.joiner.compare_and_swap_test(ppu_join_status::zombie, ppu_join_status::joinable))
{
// Abort
ppu.state += cpu_flag::again;
return;
}
// Wait for termination
thread_ctrl::wait_on(ppu.joiner, ppu_join_status::zombie);
}
ppu_thread_exit(ppu, {}, nullptr, nullptr);
if (old_ppu)
{
// It is detached from IDM now so join must be done explicitly now
*static_cast<named_thread<ppu_thread>*>(old_ppu.get()) = thread_state::finished;
}
// Need to wait until the current writers finish
if (ppu.state & cpu_flag::memory)
{
for (; writer_mask; writer_mask &= vm::g_range_lock_bits[1])
{
busy_wait(200);
}
}
}
s32 sys_ppu_thread_yield(ppu_thread& ppu)
{
ppu.state += cpu_flag::wait;
sys_ppu_thread.trace("sys_ppu_thread_yield()");
// Return 0 on successful context switch, 1 otherwise
return +!lv2_obj::yield(ppu);
}
error_code sys_ppu_thread_join(ppu_thread& ppu, u32 thread_id, vm::ptr<u64> vptr)
{
lv2_obj::prepare_for_sleep(ppu);
sys_ppu_thread.trace("sys_ppu_thread_join(thread_id=0x%x, vptr=*0x%x)", thread_id, vptr);
if (thread_id == ppu.id)
{
return CELL_EDEADLK;
}
auto thread = idm::get<named_thread<ppu_thread>>(thread_id, [&, notify = lv2_obj::notify_all_t()](ppu_thread& thread) -> CellError
{
CellError result = thread.joiner.atomic_op([&](ppu_join_status& value) -> CellError
{
switch (value)
{
case ppu_join_status::joinable:
value = ppu_join_status{ppu.id};
return {};
case ppu_join_status::zombie:
value = ppu_join_status::exited;
return CELL_EAGAIN;
case ppu_join_status::exited:
return CELL_ESRCH;
case ppu_join_status::detached:
default:
return CELL_EINVAL;
}
});
if (!result)
{
lv2_obj::prepare_for_sleep(ppu);
lv2_obj::sleep(ppu);
}
notify.cleanup();
return result;
});
if (!thread)
{
return CELL_ESRCH;
}
if (thread.ret && thread.ret != CELL_EAGAIN)
{
return thread.ret;
}
if (thread.ret == CELL_EAGAIN)
{
// Notify thread if waiting for a joiner
thread->joiner.notify_one();
}
// Wait for cleanup
(*thread.ptr)();
if (thread->joiner != ppu_join_status::exited)
{
// Thread aborted, log it later
ppu.state += cpu_flag::again;
return {};
}
static_cast<void>(ppu.test_stopped());
// Get the exit status from the register
const u64 vret = thread->gpr[3];
if (thread.ret == CELL_EAGAIN)
{
// Cleanup
ensure(idm::remove_verify<named_thread<ppu_thread>>(thread_id, std::move(thread.ptr)));
}
if (!vptr)
{
return not_an_error(CELL_EFAULT);
}
*vptr = vret;
return CELL_OK;
}
error_code sys_ppu_thread_detach(ppu_thread& ppu, u32 thread_id)
{
ppu.state += cpu_flag::wait;
sys_ppu_thread.trace("sys_ppu_thread_detach(thread_id=0x%x)", thread_id);
CellError result = CELL_ESRCH;
auto [ptr, _] = idm::withdraw<named_thread<ppu_thread>>(thread_id, [&](ppu_thread& thread)
{
result = thread.joiner.atomic_op([](ppu_join_status& value) -> CellError
{
switch (value)
{
case ppu_join_status::joinable:
value = ppu_join_status::detached;
return {};
case ppu_join_status::detached:
return CELL_EINVAL;
case ppu_join_status::zombie:
value = ppu_join_status::exited;
return CELL_EAGAIN;
case ppu_join_status::exited:
return CELL_ESRCH;
default:
return CELL_EBUSY;
}
});
// Remove ID on EAGAIN
return result != CELL_EAGAIN;
});
if (result)
{
if (result == CELL_EAGAIN)
{
// Join and notify thread (it is detached from IDM now so it must be done explicitly now)
*ptr = thread_state::finished;
}
return result;
}
return CELL_OK;
}
error_code sys_ppu_thread_get_join_state(ppu_thread& ppu, vm::ptr<s32> isjoinable)
{
sys_ppu_thread.trace("sys_ppu_thread_get_join_state(isjoinable=*0x%x)", isjoinable);
if (!isjoinable)
{
return CELL_EFAULT;
}
*isjoinable = ppu.joiner != ppu_join_status::detached;
return CELL_OK;
}
error_code sys_ppu_thread_set_priority(ppu_thread& ppu, u32 thread_id, s32 prio)
{
ppu.state += cpu_flag::wait;
sys_ppu_thread.trace("sys_ppu_thread_set_priority(thread_id=0x%x, prio=%d)", thread_id, prio);
if (prio < (g_ps3_process_info.debug_or_root() ? -512 : 0) || prio > 3071)
{
return CELL_EINVAL;
}
if (thread_id == ppu.id)
{
// Fast path for self
if (ppu.prio.load().prio != prio)
{
lv2_obj::set_priority(ppu, prio);
}
return CELL_OK;
}
const auto thread = idm::check<named_thread<ppu_thread>>(thread_id, [&, notify = lv2_obj::notify_all_t()](ppu_thread& thread)
{
lv2_obj::set_priority(thread, prio);
});
if (!thread)
{
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_ppu_thread_get_priority(ppu_thread& ppu, u32 thread_id, vm::ptr<s32> priop)
{
ppu.state += cpu_flag::wait;
sys_ppu_thread.trace("sys_ppu_thread_get_priority(thread_id=0x%x, priop=*0x%x)", thread_id, priop);
u32 prio{};
if (thread_id == ppu.id)
{
// Fast path for self
for (; !ppu.is_stopped(); std::this_thread::yield())
{
if (reader_lock lock(lv2_obj::g_mutex); cpu_flag::suspend - ppu.state)
{
prio = ppu.prio.load().prio;
break;
}
ppu.check_state();
ppu.state += cpu_flag::wait;
}
ppu.check_state();
*priop = prio;
return CELL_OK;
}
for (; !ppu.is_stopped(); std::this_thread::yield())
{
bool check_state = false;
const auto thread = idm::check<named_thread<ppu_thread>>(thread_id, [&](ppu_thread& thread)
{
if (reader_lock lock(lv2_obj::g_mutex); cpu_flag::suspend - ppu.state)
{
prio = thread.prio.load().prio;
}
else
{
check_state = true;
}
});
if (check_state)
{
ppu.check_state();
ppu.state += cpu_flag::wait;
continue;
}
if (!thread)
{
return CELL_ESRCH;
}
ppu.check_state();
*priop = prio;
break;
}
return CELL_OK;
}
error_code sys_ppu_thread_get_stack_information(ppu_thread& ppu, vm::ptr<sys_ppu_thread_stack_t> sp)
{
sys_ppu_thread.trace("sys_ppu_thread_get_stack_information(sp=*0x%x)", sp);
sp->pst_addr = ppu.stack_addr;
sp->pst_size = ppu.stack_size;
return CELL_OK;
}
error_code sys_ppu_thread_stop(ppu_thread& ppu, u32 thread_id)
{
ppu.state += cpu_flag::wait;
sys_ppu_thread.todo("sys_ppu_thread_stop(thread_id=0x%x)", thread_id);
if (!g_ps3_process_info.has_root_perm())
{
return CELL_ENOSYS;
}
const auto thread = idm::check<named_thread<ppu_thread>>(thread_id);
if (!thread)
{
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_ppu_thread_restart(ppu_thread& ppu)
{
ppu.state += cpu_flag::wait;
sys_ppu_thread.todo("sys_ppu_thread_restart()");
if (!g_ps3_process_info.has_root_perm())
{
return CELL_ENOSYS;
}
return CELL_OK;
}
error_code _sys_ppu_thread_create(ppu_thread& ppu, vm::ptr<u64> thread_id, vm::ptr<ppu_thread_param_t> param, u64 arg, u64 unk, s32 prio, u32 _stacksz, u64 flags, vm::cptr<char> threadname)
{
ppu.state += cpu_flag::wait;
sys_ppu_thread.warning("_sys_ppu_thread_create(thread_id=*0x%x, param=*0x%x, arg=0x%llx, unk=0x%llx, prio=%d, stacksize=0x%x, flags=0x%llx, threadname=*0x%x)",
thread_id, param, arg, unk, prio, _stacksz, flags, threadname);
// thread_id is checked for null in stub -> CELL_ENOMEM
// unk is set to 0 in sys_ppu_thread_create stub
if (!param || !param->entry)
{
return CELL_EFAULT;
}
if (prio < (g_ps3_process_info.debug_or_root() ? -512 : 0) || prio > 3071)
{
return CELL_EINVAL;
}
if ((flags & 3) == 3) // Check two flags: joinable + interrupt not allowed
{
return CELL_EPERM;
}
const ppu_func_opd_t entry = param->entry.opd();
const u32 tls = param->tls;
// Compute actual stack size and allocate
const u32 stack_size = utils::align<u32>(std::max<u32>(_stacksz, 4096), 4096);
auto& dct = g_fxo->get<lv2_memory_container>();
// Try to obtain "physical memory" from the default container
if (!dct.take(stack_size))
{
return {CELL_ENOMEM, dct.size - dct.used};
}
const vm::addr_t stack_base{vm::alloc(stack_size, vm::stack, 4096)};
if (!stack_base)
{
dct.free(stack_size);
return CELL_ENOMEM;
}
std::string ppu_name;
if (threadname)
{
constexpr u32 max_size = c_max_ppu_name_size - 1; // max size excluding null terminator
if (!vm::read_string(threadname.addr(), max_size, ppu_name, true))
{
dct.free(stack_size);
return CELL_EFAULT;
}
}
const u32 tid = idm::import<named_thread<ppu_thread>>([&]()
{
ppu_thread_params p;
p.stack_addr = stack_base;
p.stack_size = stack_size;
p.tls_addr = tls;
p.entry = entry;
p.arg0 = arg;
p.arg1 = unk;
return std::make_shared<named_thread<ppu_thread>>(p, ppu_name, prio, 1 - static_cast<int>(flags & 3));
});
if (!tid)
{
vm::dealloc(stack_base);
dct.free(stack_size);
return CELL_EAGAIN;
}
sys_ppu_thread.warning(u8"_sys_ppu_thread_create(): Thread “%s” created (id=0x%x, func=*0x%x, rtoc=0x%x, user-tls=0x%x)", ppu_name, tid, entry.addr, entry.rtoc, tls);
ppu.check_state();
*thread_id = tid;
return CELL_OK;
}
error_code sys_ppu_thread_start(ppu_thread& ppu, u32 thread_id)
{
ppu.state += cpu_flag::wait;
sys_ppu_thread.trace("sys_ppu_thread_start(thread_id=0x%x)", thread_id);
const auto thread = idm::get<named_thread<ppu_thread>>(thread_id, [&, notify = lv2_obj::notify_all_t()](ppu_thread& thread) -> CellError
{
if (!thread.state.test_and_reset(cpu_flag::stop))
{
// Already started
return CELL_EBUSY;
}
ensure(lv2_obj::awake(&thread));
thread.cmd_list
({
{ppu_cmd::entry_call, 0},
});
return {};
});
if (!thread)
{
return CELL_ESRCH;
}
if (thread.ret)
{
return thread.ret;
}
else
{
thread->cmd_notify.store(1);
thread->cmd_notify.notify_one();
}
return CELL_OK;
}
error_code sys_ppu_thread_rename(ppu_thread& ppu, u32 thread_id, vm::cptr<char> name)
{
ppu.state += cpu_flag::wait;
sys_ppu_thread.warning("sys_ppu_thread_rename(thread_id=0x%x, name=*0x%x)", thread_id, name);
const auto thread = idm::get<named_thread<ppu_thread>>(thread_id);
if (!thread)
{
return CELL_ESRCH;
}
if (!name)
{
return CELL_EFAULT;
}
constexpr u32 max_size = c_max_ppu_name_size - 1; // max size excluding null terminator
// Make valid name
std::string out_str;
if (!vm::read_string(name.addr(), max_size, out_str, true))
{
return CELL_EFAULT;
}
auto _name = make_single<std::string>(std::move(out_str));
// thread_ctrl name is not changed (TODO)
sys_ppu_thread.warning(u8"sys_ppu_thread_rename(): Thread renamed to “%s”", *_name);
thread->ppu_tname.store(std::move(_name));
thread_ctrl::set_name(*thread, thread->thread_name); // TODO: Currently sets debugger thread name only for local thread
return CELL_OK;
}
error_code sys_ppu_thread_recover_page_fault(ppu_thread& ppu, u32 thread_id)
{
ppu.state += cpu_flag::wait;
sys_ppu_thread.warning("sys_ppu_thread_recover_page_fault(thread_id=0x%x)", thread_id);
const auto thread = idm::get<named_thread<ppu_thread>>(thread_id);
if (!thread)
{
return CELL_ESRCH;
}
return mmapper_thread_recover_page_fault(thread.get());
}
error_code sys_ppu_thread_get_page_fault_context(ppu_thread& ppu, u32 thread_id, vm::ptr<sys_ppu_thread_icontext_t> ctxt)
{
ppu.state += cpu_flag::wait;
sys_ppu_thread.todo("sys_ppu_thread_get_page_fault_context(thread_id=0x%x, ctxt=*0x%x)", thread_id, ctxt);
const auto thread = idm::get<named_thread<ppu_thread>>(thread_id);
if (!thread)
{
return CELL_ESRCH;
}
// We can only get a context if the thread is being suspended for a page fault.
auto& pf_events = g_fxo->get<page_fault_event_entries>();
reader_lock lock(pf_events.pf_mutex);
const auto evt = pf_events.events.find(thread.get());
if (evt == pf_events.events.end())
{
return CELL_EINVAL;
}
// TODO: Fill ctxt with proper information.
return CELL_OK;
}
| 14,942
|
C++
|
.cpp
| 522
| 25.862069
| 189
| 0.686624
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,353
|
sys_net.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_net.cpp
|
#include "stdafx.h"
#include "sys_net.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/PPUThread.h"
#include "Utilities/Thread.h"
#include "sys_sync.h"
#ifdef _WIN32
#include <winsock2.h>
#include <WS2tcpip.h>
#else
#ifdef __clang__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wold-style-cast"
#endif
#include <errno.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <fcntl.h>
#include <poll.h>
#ifdef __clang__
#pragma GCC diagnostic pop
#endif
#endif
#include "Emu/NP/np_handler.h"
#include "Emu/NP/np_helpers.h"
#include "Emu/NP/np_dnshook.h"
#include <chrono>
#include <shared_mutex>
#include "sys_net/network_context.h"
#include "sys_net/lv2_socket.h"
#include "sys_net/lv2_socket_native.h"
#include "sys_net/lv2_socket_raw.h"
#include "sys_net/lv2_socket_p2p.h"
#include "sys_net/lv2_socket_p2ps.h"
#include "sys_net/sys_net_helpers.h"
LOG_CHANNEL(sys_net);
LOG_CHANNEL(sys_net_dump);
template <>
void fmt_class_string<sys_net_error>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto error)
{
switch (s32 _error = error)
{
#define SYS_NET_ERROR_CASE(x) \
case -x: return "-" #x; \
case x: \
return #x
SYS_NET_ERROR_CASE(SYS_NET_ENOENT);
SYS_NET_ERROR_CASE(SYS_NET_EINTR);
SYS_NET_ERROR_CASE(SYS_NET_EBADF);
SYS_NET_ERROR_CASE(SYS_NET_ENOMEM);
SYS_NET_ERROR_CASE(SYS_NET_EACCES);
SYS_NET_ERROR_CASE(SYS_NET_EFAULT);
SYS_NET_ERROR_CASE(SYS_NET_EBUSY);
SYS_NET_ERROR_CASE(SYS_NET_EINVAL);
SYS_NET_ERROR_CASE(SYS_NET_EMFILE);
SYS_NET_ERROR_CASE(SYS_NET_ENOSPC);
SYS_NET_ERROR_CASE(SYS_NET_EPIPE);
SYS_NET_ERROR_CASE(SYS_NET_EAGAIN);
static_assert(SYS_NET_EWOULDBLOCK == SYS_NET_EAGAIN);
SYS_NET_ERROR_CASE(SYS_NET_EINPROGRESS);
SYS_NET_ERROR_CASE(SYS_NET_EALREADY);
SYS_NET_ERROR_CASE(SYS_NET_EDESTADDRREQ);
SYS_NET_ERROR_CASE(SYS_NET_EMSGSIZE);
SYS_NET_ERROR_CASE(SYS_NET_EPROTOTYPE);
SYS_NET_ERROR_CASE(SYS_NET_ENOPROTOOPT);
SYS_NET_ERROR_CASE(SYS_NET_EPROTONOSUPPORT);
SYS_NET_ERROR_CASE(SYS_NET_EOPNOTSUPP);
SYS_NET_ERROR_CASE(SYS_NET_EPFNOSUPPORT);
SYS_NET_ERROR_CASE(SYS_NET_EAFNOSUPPORT);
SYS_NET_ERROR_CASE(SYS_NET_EADDRINUSE);
SYS_NET_ERROR_CASE(SYS_NET_EADDRNOTAVAIL);
SYS_NET_ERROR_CASE(SYS_NET_ENETDOWN);
SYS_NET_ERROR_CASE(SYS_NET_ENETUNREACH);
SYS_NET_ERROR_CASE(SYS_NET_ECONNABORTED);
SYS_NET_ERROR_CASE(SYS_NET_ECONNRESET);
SYS_NET_ERROR_CASE(SYS_NET_ENOBUFS);
SYS_NET_ERROR_CASE(SYS_NET_EISCONN);
SYS_NET_ERROR_CASE(SYS_NET_ENOTCONN);
SYS_NET_ERROR_CASE(SYS_NET_ESHUTDOWN);
SYS_NET_ERROR_CASE(SYS_NET_ETOOMANYREFS);
SYS_NET_ERROR_CASE(SYS_NET_ETIMEDOUT);
SYS_NET_ERROR_CASE(SYS_NET_ECONNREFUSED);
SYS_NET_ERROR_CASE(SYS_NET_EHOSTDOWN);
SYS_NET_ERROR_CASE(SYS_NET_EHOSTUNREACH);
#undef SYS_NET_ERROR_CASE
}
return unknown;
});
}
template <>
void fmt_class_string<lv2_socket_type>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto value)
{
switch (value)
{
case SYS_NET_SOCK_STREAM: return "STREAM";
case SYS_NET_SOCK_DGRAM: return "DGRAM";
case SYS_NET_SOCK_RAW: return "RAW";
case SYS_NET_SOCK_DGRAM_P2P: return "DGRAM-P2P";
case SYS_NET_SOCK_STREAM_P2P: return "STREAM-P2P";
}
return unknown;
});
}
template <>
void fmt_class_string<lv2_socket_family>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto value)
{
switch (value)
{
case SYS_NET_AF_UNSPEC: return "UNSPEC";
case SYS_NET_AF_LOCAL: return "LOCAL";
case SYS_NET_AF_INET: return "INET";
case SYS_NET_AF_INET6: return "INET6";
}
return unknown;
});
}
template <>
void fmt_class_string<lv2_ip_protocol>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto value)
{
switch (value)
{
case SYS_NET_IPPROTO_IP: return "IPPROTO_IP";
case SYS_NET_IPPROTO_ICMP: return "IPPROTO_ICMP";
case SYS_NET_IPPROTO_IGMP: return "IPPROTO_IGMP";
case SYS_NET_IPPROTO_TCP: return "IPPROTO_TCP";
case SYS_NET_IPPROTO_UDP: return "IPPROTO_UDP";
case SYS_NET_IPPROTO_ICMPV6: return "IPPROTO_ICMPV6";
}
return unknown;
});
}
template <>
void fmt_class_string<lv2_tcp_option>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto value)
{
switch (value)
{
case SYS_NET_TCP_NODELAY: return "TCP_NODELAY";
case SYS_NET_TCP_MAXSEG: return "TCP_MAXSEG";
case SYS_NET_TCP_MSS_TO_ADVERTISE: return "TCP_MSS_TO_ADVERTISE";
}
return unknown;
});
}
template <>
void fmt_class_string<lv2_socket_option>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto value)
{
switch (value)
{
case SYS_NET_SO_SNDBUF: return "SO_SNDBUF";
case SYS_NET_SO_RCVBUF: return "SO_RCVBUF";
case SYS_NET_SO_SNDLOWAT: return "SO_SNDLOWAT";
case SYS_NET_SO_RCVLOWAT: return "SO_RCVLOWAT";
case SYS_NET_SO_SNDTIMEO: return "SO_SNDTIMEO";
case SYS_NET_SO_RCVTIMEO: return "SO_RCVTIMEO";
case SYS_NET_SO_ERROR: return "SO_ERROR";
case SYS_NET_SO_TYPE: return "SO_TYPE";
case SYS_NET_SO_NBIO: return "SO_NBIO";
case SYS_NET_SO_TPPOLICY: return "SO_TPPOLICY";
case SYS_NET_SO_REUSEADDR: return "SO_REUSEADDR";
case SYS_NET_SO_KEEPALIVE: return "SO_KEEPALIVE";
case SYS_NET_SO_BROADCAST: return "SO_BROADCAST";
case SYS_NET_SO_LINGER: return "SO_LINGER";
case SYS_NET_SO_OOBINLINE: return "SO_OOBINLINE";
case SYS_NET_SO_REUSEPORT: return "SO_REUSEPORT";
case SYS_NET_SO_ONESBCAST: return "SO_ONESBCAST";
case SYS_NET_SO_USECRYPTO: return "SO_USECRYPTO";
case SYS_NET_SO_USESIGNATURE: return "SO_USESIGNATURE";
case SYS_NET_SOL_SOCKET: return "SOL_SOCKET";
}
return unknown;
});
}
template <>
void fmt_class_string<lv2_ip_option>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto value)
{
switch (value)
{
case SYS_NET_IP_HDRINCL: return "IP_HDRINCL";
case SYS_NET_IP_TOS: return "IP_TOS";
case SYS_NET_IP_TTL: return "IP_TTL";
case SYS_NET_IP_MULTICAST_IF: return "IP_MULTICAST_IF";
case SYS_NET_IP_MULTICAST_TTL: return "IP_MULTICAST_TTL";
case SYS_NET_IP_MULTICAST_LOOP: return "IP_MULTICAST_LOOP";
case SYS_NET_IP_ADD_MEMBERSHIP: return "IP_ADD_MEMBERSHIP";
case SYS_NET_IP_DROP_MEMBERSHIP: return "IP_DROP_MEMBERSHIP";
case SYS_NET_IP_TTLCHK: return "IP_TTLCHK";
case SYS_NET_IP_MAXTTL: return "IP_MAXTTL";
case SYS_NET_IP_DONTFRAG: return "IP_DONTFRAG";
}
return unknown;
});
}
template <>
void fmt_class_string<struct in_addr>::format(std::string& out, u64 arg)
{
const u8* data = reinterpret_cast<const u8*>(&get_object(arg));
fmt::append(out, "%u.%u.%u.%u", data[0], data[1], data[2], data[3]);
}
lv2_socket::lv2_socket(utils::serial& ar, lv2_socket_type _type)
: family(ar)
, type(_type)
, protocol(ar)
, so_nbio(ar)
, so_error(ar)
, so_tcp_maxseg(ar)
#ifdef _WIN32
, so_reuseaddr(ar)
, so_reuseport(ar)
{
#else
{
// Try to match structure between different platforms
ar.pos += 8;
#endif
[[maybe_unused]] const s32 version = GET_SERIALIZATION_VERSION(lv2_net);
ar(so_rcvtimeo, so_sendtimeo);
lv2_id = idm::last_id();
ar(last_bound_addr);
}
std::shared_ptr<void> lv2_socket::load(utils::serial& ar)
{
const lv2_socket_type type{ar};
std::shared_ptr<lv2_socket> sock_lv2;
switch (type)
{
case SYS_NET_SOCK_STREAM:
case SYS_NET_SOCK_DGRAM:
{
auto lv2_native = std::make_shared<lv2_socket_native>(ar, type);
ensure(lv2_native->create_socket() >= 0);
sock_lv2 = std::move(lv2_native);
break;
}
case SYS_NET_SOCK_RAW: sock_lv2 = std::make_shared<lv2_socket_raw>(ar, type); break;
case SYS_NET_SOCK_DGRAM_P2P: sock_lv2 = std::make_shared<lv2_socket_p2p>(ar, type); break;
case SYS_NET_SOCK_STREAM_P2P: sock_lv2 = std::make_shared<lv2_socket_p2ps>(ar, type); break;
}
if (std::memcmp(&sock_lv2->last_bound_addr, std::array<u8, 16>{}.data(), 16))
{
// NOTE: It is allowed fail
sock_lv2->bind(sock_lv2->last_bound_addr);
}
return sock_lv2;
}
void lv2_socket::save(utils::serial& ar, bool save_only_this_class)
{
USING_SERIALIZATION_VERSION(lv2_net);
if (save_only_this_class)
{
ar(family, protocol, so_nbio, so_error, so_tcp_maxseg);
#ifdef _WIN32
ar(so_reuseaddr, so_reuseport);
#else
ar(std::array<char, 8>{});
#endif
ar(so_rcvtimeo, so_sendtimeo);
ar(last_bound_addr);
return;
}
ar(type);
switch (type)
{
case SYS_NET_SOCK_STREAM:
case SYS_NET_SOCK_DGRAM:
{
static_cast<lv2_socket_native*>(this)->save(ar);
break;
}
case SYS_NET_SOCK_RAW: static_cast<lv2_socket_raw*>(this)->save(ar); break;
case SYS_NET_SOCK_DGRAM_P2P: static_cast<lv2_socket_p2p*>(this)->save(ar); break;
case SYS_NET_SOCK_STREAM_P2P: static_cast<lv2_socket_p2ps*>(this)->save(ar); break;
}
}
void sys_net_dump_data(std::string_view desc, const u8* data, s32 len, const void* addr)
{
const sys_net_sockaddr_in_p2p* p2p_addr = reinterpret_cast<const sys_net_sockaddr_in_p2p*>(addr);
if (p2p_addr)
sys_net_dump.trace("%s(%s:%d:%d): %s", desc, np::ip_to_string(std::bit_cast<u32>(p2p_addr->sin_addr)), p2p_addr->sin_port, p2p_addr->sin_vport, fmt::buf_to_hexstring(data, len));
else
sys_net_dump.trace("%s: %s", desc, fmt::buf_to_hexstring(data, len));
}
error_code sys_net_bnet_accept(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr> addr, vm::ptr<u32> paddrlen)
{
ppu.state += cpu_flag::wait;
sys_net.warning("sys_net_bnet_accept(s=%d, addr=*0x%x, paddrlen=*0x%x)", s, addr, paddrlen);
if (addr.operator bool() != paddrlen.operator bool() || (paddrlen && *paddrlen < addr.size()))
{
return -SYS_NET_EINVAL;
}
s32 result = 0;
sys_net_sockaddr sn_addr{};
std::shared_ptr<lv2_socket> new_socket{};
const auto sock = idm::check<lv2_socket>(s, [&, notify = lv2_obj::notify_all_t()](lv2_socket& sock)
{
auto [success, res, res_socket, res_addr] = sock.accept();
if (success)
{
result = res;
sn_addr = res_addr;
new_socket = std::move(res_socket);
return true;
}
auto lock = sock.lock();
sock.poll_queue(idm::get_unlocked<named_thread<ppu_thread>>(ppu.id), lv2_socket::poll_t::read, [&](bs_t<lv2_socket::poll_t> events) -> bool
{
if (events & lv2_socket::poll_t::read)
{
auto [success, res, res_socket, res_addr] = sock.accept(false);
if (success)
{
result = res;
sn_addr = res_addr;
new_socket = std::move(res_socket);
lv2_obj::awake(&ppu);
return success;
}
}
sock.set_poll_event(lv2_socket::poll_t::read);
return false;
});
lv2_obj::prepare_for_sleep(ppu);
lv2_obj::sleep(ppu);
return false;
});
if (!sock)
{
return -SYS_NET_EBADF;
}
if (!sock.ret)
{
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (is_stopped(state))
{
return {};
}
if (state & cpu_flag::signal)
{
break;
}
ppu.state.wait(state);
}
if (ppu.gpr[3] == static_cast<u64>(-SYS_NET_EINTR))
{
return -SYS_NET_EINTR;
}
if (result < 0)
{
return sys_net_error{result};
}
}
if (result < 0)
{
return sys_net_error{result};
}
s32 id_ps3 = result;
if (!id_ps3)
{
ensure(new_socket);
id_ps3 = idm::import_existing<lv2_socket>(new_socket);
if (id_ps3 == id_manager::id_traits<lv2_socket>::invalid)
{
return -SYS_NET_EMFILE;
}
}
static_cast<void>(ppu.test_stopped());
if (addr)
{
*paddrlen = sizeof(sys_net_sockaddr_in);
*addr = sn_addr;
}
// Socket ID
return not_an_error(id_ps3);
}
error_code sys_net_bnet_bind(ppu_thread& ppu, s32 s, vm::cptr<sys_net_sockaddr> addr, u32 addrlen)
{
ppu.state += cpu_flag::wait;
sys_net.warning("sys_net_bnet_bind(s=%d, addr=*0x%x, addrlen=%u)", s, addr, addrlen);
if (!addr || addrlen < addr.size())
{
return -SYS_NET_EINVAL;
}
if (!idm::check<lv2_socket>(s))
{
return -SYS_NET_EBADF;
}
const sys_net_sockaddr sn_addr = *addr;
// 0 presumably defaults to AF_INET(to check?)
if (sn_addr.sa_family != SYS_NET_AF_INET && sn_addr.sa_family != SYS_NET_AF_UNSPEC)
{
sys_net.error("sys_net_bnet_bind: unsupported sa_family (%d)", sn_addr.sa_family);
return -SYS_NET_EAFNOSUPPORT;
}
const auto sock = idm::check<lv2_socket>(s, [&, notify = lv2_obj::notify_all_t()](lv2_socket& sock) -> s32
{
return sock.bind(sn_addr);
});
if (!sock)
{
return -SYS_NET_EBADF;
}
if (sock.ret)
{
return sys_net_error{sock.ret};
}
return CELL_OK;
}
error_code sys_net_bnet_connect(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr> addr, u32 addrlen)
{
ppu.state += cpu_flag::wait;
sys_net.warning("sys_net_bnet_connect(s=%d, addr=*0x%x, addrlen=%u)", s, addr, addrlen);
if (!addr || addrlen < addr.size())
{
return -SYS_NET_EINVAL;
}
if (addr->sa_family != SYS_NET_AF_INET)
{
sys_net.error("sys_net_bnet_connect(s=%d): unsupported sa_family (%d)", s, addr->sa_family);
return -SYS_NET_EAFNOSUPPORT;
}
if (!idm::check<lv2_socket>(s))
{
return -SYS_NET_EBADF;
}
s32 result = 0;
sys_net_sockaddr sn_addr = *addr;
const auto sock = idm::check<lv2_socket>(s, [&, notify = lv2_obj::notify_all_t()](lv2_socket& sock)
{
const auto success = sock.connect(sn_addr);
if (success)
{
result = *success;
return true;
}
auto lock = sock.lock();
sock.poll_queue(idm::get_unlocked<named_thread<ppu_thread>>(ppu.id), lv2_socket::poll_t::write, [&](bs_t<lv2_socket::poll_t> events) -> bool
{
if (events & lv2_socket::poll_t::write)
{
result = sock.connect_followup();
lv2_obj::awake(&ppu);
return true;
}
sock.set_poll_event(lv2_socket::poll_t::write);
return false;
});
lv2_obj::prepare_for_sleep(ppu);
lv2_obj::sleep(ppu);
return false;
});
if (!sock)
{
return -SYS_NET_EBADF;
}
if (sock.ret)
{
if (result < 0)
{
return sys_net_error{result};
}
return not_an_error(result);
}
if (!sock.ret)
{
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (is_stopped(state))
{
return {};
}
if (state & cpu_flag::signal)
{
break;
}
ppu.state.wait(state);
}
if (ppu.gpr[3] == static_cast<u64>(-SYS_NET_EINTR))
{
return -SYS_NET_EINTR;
}
if (result)
{
if (result < 0)
{
return sys_net_error{result};
}
return not_an_error(result);
}
}
return CELL_OK;
}
error_code sys_net_bnet_getpeername(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr> addr, vm::ptr<u32> paddrlen)
{
ppu.state += cpu_flag::wait;
sys_net.warning("sys_net_bnet_getpeername(s=%d, addr=*0x%x, paddrlen=*0x%x)", s, addr, paddrlen);
// Note: paddrlen is both an input and output argument
if (!addr || !paddrlen || *paddrlen < addr.size())
{
return -SYS_NET_EINVAL;
}
const auto sock = idm::check<lv2_socket>(s, [&, notify = lv2_obj::notify_all_t()](lv2_socket& sock) -> s32
{
auto [res, sn_addr] = sock.getpeername();
if (res == CELL_OK)
{
*paddrlen = sizeof(sys_net_sockaddr);
*addr = sn_addr;
}
return res;
});
if (!sock)
{
return -SYS_NET_EBADF;
}
if (sock.ret < 0)
{
return sys_net_error{sock.ret};
}
return CELL_OK;
}
error_code sys_net_bnet_getsockname(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr> addr, vm::ptr<u32> paddrlen)
{
ppu.state += cpu_flag::wait;
sys_net.warning("sys_net_bnet_getsockname(s=%d, addr=*0x%x, paddrlen=*0x%x)", s, addr, paddrlen);
// Note: paddrlen is both an input and output argument
if (!addr || !paddrlen || *paddrlen < addr.size())
{
return -SYS_NET_EINVAL;
}
const auto sock = idm::check<lv2_socket>(s, [&, notify = lv2_obj::notify_all_t()](lv2_socket& sock) -> s32
{
auto [res, sn_addr] = sock.getsockname();
if (res == CELL_OK)
{
*paddrlen = sizeof(sys_net_sockaddr);
*addr = sn_addr;
}
return res;
});
if (!sock)
{
return -SYS_NET_EBADF;
}
if (sock.ret < 0)
{
return sys_net_error{sock.ret};
}
return CELL_OK;
}
error_code sys_net_bnet_getsockopt(ppu_thread& ppu, s32 s, s32 level, s32 optname, vm::ptr<void> optval, vm::ptr<u32> optlen)
{
ppu.state += cpu_flag::wait;
switch (level)
{
case SYS_NET_SOL_SOCKET:
sys_net.warning("sys_net_bnet_getsockopt(s=%d, level=SYS_NET_SOL_SOCKET, optname=%s, optval=*0x%x, optlen=%u)", s, static_cast<lv2_socket_option>(optname), optval, optlen);
break;
case SYS_NET_IPPROTO_TCP:
sys_net.warning("sys_net_bnet_getsockopt(s=%d, level=SYS_NET_IPPROTO_TCP, optname=%s, optval=*0x%x, optlen=%u)", s, static_cast<lv2_tcp_option>(optname), optval, optlen);
break;
case SYS_NET_IPPROTO_IP:
sys_net.warning("sys_net_bnet_getsockopt(s=%d, level=SYS_NET_IPPROTO_IP, optname=%s, optval=*0x%x, optlen=%u)", s, static_cast<lv2_ip_option>(optname), optval, optlen);
break;
default:
sys_net.warning("sys_net_bnet_getsockopt(s=%d, level=0x%x, optname=0x%x, optval=*0x%x, optlen=%u)", s, level, optname, optval, optlen);
break;
}
if (!optval || !optlen)
{
return -SYS_NET_EINVAL;
}
const u32 len = *optlen;
if (!len)
{
return -SYS_NET_EINVAL;
}
const auto sock = idm::check<lv2_socket>(s, [&, notify = lv2_obj::notify_all_t()](lv2_socket& sock) -> s32
{
if (len < sizeof(s32))
{
return -SYS_NET_EINVAL;
}
const auto& [res, out_val, out_len] = sock.getsockopt(level, optname, *optlen);
if (res == CELL_OK)
{
std::memcpy(optval.get_ptr(), out_val.ch, out_len);
*optlen = out_len;
}
return res;
});
if (!sock)
{
return -SYS_NET_EBADF;
}
if (sock.ret < 0)
{
return sys_net_error{sock.ret};
}
return CELL_OK;
}
error_code sys_net_bnet_listen(ppu_thread& ppu, s32 s, s32 backlog)
{
ppu.state += cpu_flag::wait;
sys_net.warning("sys_net_bnet_listen(s=%d, backlog=%d)", s, backlog);
if (backlog <= 0)
{
return -SYS_NET_EINVAL;
}
const auto sock = idm::check<lv2_socket>(s, [&, notify = lv2_obj::notify_all_t()](lv2_socket& sock) -> s32
{
return sock.listen(backlog);
});
if (!sock)
{
return -SYS_NET_EBADF;
}
if (sock.ret < 0)
{
return sys_net_error{sock.ret};
}
return CELL_OK;
}
error_code sys_net_bnet_recvfrom(ppu_thread& ppu, s32 s, vm::ptr<void> buf, u32 len, s32 flags, vm::ptr<sys_net_sockaddr> addr, vm::ptr<u32> paddrlen)
{
ppu.state += cpu_flag::wait;
sys_net.trace("sys_net_bnet_recvfrom(s=%d, buf=*0x%x, len=%u, flags=0x%x, addr=*0x%x, paddrlen=*0x%x)", s, buf, len, flags, addr, paddrlen);
// If addr is null, paddrlen must be null as well
if (!buf || !len || addr.operator bool() != paddrlen.operator bool())
{
return -SYS_NET_EINVAL;
}
if (flags & ~(SYS_NET_MSG_PEEK | SYS_NET_MSG_DONTWAIT | SYS_NET_MSG_WAITALL | SYS_NET_MSG_USECRYPTO | SYS_NET_MSG_USESIGNATURE))
{
fmt::throw_exception("sys_net_bnet_recvfrom(s=%d): unknown flags (0x%x)", flags);
}
s32 result = 0;
sys_net_sockaddr sn_addr{};
const auto sock = idm::check<lv2_socket>(s, [&, notify = lv2_obj::notify_all_t()](lv2_socket& sock)
{
const auto success = sock.recvfrom(flags, len);
if (success)
{
const auto& [res, vec, res_addr] = *success;
if (res > 0)
{
sn_addr = res_addr;
std::memcpy(buf.get_ptr(), vec.data(), res);
sys_net_dump_data("recvfrom", vec.data(), res, &res_addr);
}
result = res;
return true;
}
auto lock = sock.lock();
sock.poll_queue(idm::get_unlocked<named_thread<ppu_thread>>(ppu.id), lv2_socket::poll_t::read, [&](bs_t<lv2_socket::poll_t> events) -> bool
{
if (events & lv2_socket::poll_t::read)
{
const auto success = sock.recvfrom(flags, len, false);
if (success)
{
const auto& [res, vec, res_addr] = *success;
if (res > 0)
{
sn_addr = res_addr;
std::memcpy(buf.get_ptr(), vec.data(), res);
sys_net_dump_data("recvfrom", vec.data(), res, &res_addr);
}
result = res;
lv2_obj::awake(&ppu);
return true;
}
}
if (sock.so_rcvtimeo && get_guest_system_time() - ppu.start_time > sock.so_rcvtimeo)
{
result = -SYS_NET_EWOULDBLOCK;
lv2_obj::awake(&ppu);
return true;
}
sock.set_poll_event(lv2_socket::poll_t::read);
return false;
});
lv2_obj::prepare_for_sleep(ppu);
lv2_obj::sleep(ppu);
return false;
});
if (!sock)
{
return -SYS_NET_EBADF;
}
if (!sock.ret)
{
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (is_stopped(state))
{
return {};
}
if (state & cpu_flag::signal)
{
break;
}
ppu.state.wait(state);
}
if (ppu.gpr[3] == static_cast<u64>(-SYS_NET_EINTR))
{
return -SYS_NET_EINTR;
}
}
static_cast<void>(ppu.test_stopped());
if (result == -SYS_NET_EWOULDBLOCK)
{
return not_an_error(result);
}
if (result >= 0)
{
if (addr)
{
*paddrlen = sizeof(sys_net_sockaddr_in);
*addr = sn_addr;
}
return not_an_error(result);
}
return sys_net_error{result};
}
error_code sys_net_bnet_recvmsg(ppu_thread& ppu, s32 s, vm::ptr<sys_net_msghdr> msg, s32 flags)
{
ppu.state += cpu_flag::wait;
sys_net.todo("sys_net_bnet_recvmsg(s=%d, msg=*0x%x, flags=0x%x)", s, msg, flags);
return CELL_OK;
}
error_code sys_net_bnet_sendmsg(ppu_thread& ppu, s32 s, vm::cptr<sys_net_msghdr> msg, s32 flags)
{
ppu.state += cpu_flag::wait;
sys_net.warning("sys_net_bnet_sendmsg(s=%d, msg=*0x%x, flags=0x%x)", s, msg, flags);
if (flags & ~(SYS_NET_MSG_DONTWAIT | SYS_NET_MSG_WAITALL | SYS_NET_MSG_USECRYPTO | SYS_NET_MSG_USESIGNATURE))
{
fmt::throw_exception("sys_net_bnet_sendmsg(s=%d): unknown flags (0x%x)", flags);
}
s32 result{};
const auto sock = idm::check<lv2_socket>(s, [&](lv2_socket& sock)
{
auto netmsg = msg.get_ptr();
const auto success = sock.sendmsg(flags, *netmsg);
if (success)
{
result = *success;
return true;
}
sock.poll_queue(idm::get_unlocked<named_thread<ppu_thread>>(ppu.id), lv2_socket::poll_t::write, [&](bs_t<lv2_socket::poll_t> events) -> bool
{
if (events & lv2_socket::poll_t::write)
{
const auto success = sock.sendmsg(flags, *netmsg, false);
if (success)
{
result = *success;
lv2_obj::awake(&ppu);
return true;
}
}
sock.set_poll_event(lv2_socket::poll_t::write);
return false;
});
lv2_obj::sleep(ppu);
return false;
});
if (!sock)
{
return -SYS_NET_EBADF;
}
if (!sock.ret)
{
while (true)
{
const auto state = ppu.state.fetch_sub(cpu_flag::signal);
if (is_stopped(state) || state & cpu_flag::signal)
{
break;
}
thread_ctrl::wait_on(ppu.state, state);
}
if (ppu.gpr[3] == static_cast<u64>(-SYS_NET_EINTR))
{
return -SYS_NET_EINTR;
}
}
if (result >= 0 || result == -SYS_NET_EWOULDBLOCK)
{
return not_an_error(result);
}
return sys_net_error{result};
}
error_code sys_net_bnet_sendto(ppu_thread& ppu, s32 s, vm::cptr<void> buf, u32 len, s32 flags, vm::cptr<sys_net_sockaddr> addr, u32 addrlen)
{
ppu.state += cpu_flag::wait;
sys_net.trace("sys_net_bnet_sendto(s=%d, buf=*0x%x, len=%u, flags=0x%x, addr=*0x%x, addrlen=%u)", s, buf, len, flags, addr, addrlen);
if (flags & ~(SYS_NET_MSG_DONTWAIT | SYS_NET_MSG_WAITALL | SYS_NET_MSG_USECRYPTO | SYS_NET_MSG_USESIGNATURE))
{
fmt::throw_exception("sys_net_bnet_sendto(s=%d): unknown flags (0x%x)", flags);
}
if (addr && addrlen < 8)
{
sys_net.error("sys_net_bnet_sendto(s=%d): bad addrlen (%u)", s, addrlen);
return -SYS_NET_EINVAL;
}
if (addr && addr->sa_family != SYS_NET_AF_INET)
{
sys_net.error("sys_net_bnet_sendto(s=%d): unsupported sa_family (%d)", s, addr->sa_family);
return -SYS_NET_EAFNOSUPPORT;
}
sys_net_dump_data("sendto", static_cast<const u8*>(buf.get_ptr()), len, addr ? addr.get_ptr() : nullptr);
const std::optional<sys_net_sockaddr> sn_addr = addr ? std::optional<sys_net_sockaddr>(*addr) : std::nullopt;
const std::vector<u8> buf_copy(vm::_ptr<const char>(buf.addr()), vm::_ptr<const char>(buf.addr()) + len);
s32 result{};
const auto sock = idm::check<lv2_socket>(s, [&, notify = lv2_obj::notify_all_t()](lv2_socket& sock)
{
auto success = sock.sendto(flags, buf_copy, sn_addr);
if (success)
{
result = *success;
return true;
}
auto lock = sock.lock();
// Enable write event
sock.poll_queue(idm::get_unlocked<named_thread<ppu_thread>>(ppu.id), lv2_socket::poll_t::write, [&](bs_t<lv2_socket::poll_t> events) -> bool
{
if (events & lv2_socket::poll_t::write)
{
auto success = sock.sendto(flags, buf_copy, sn_addr, false);
if (success)
{
result = *success;
lv2_obj::awake(&ppu);
return true;
}
}
if (sock.so_sendtimeo && get_guest_system_time() - ppu.start_time > sock.so_sendtimeo)
{
result = -SYS_NET_EWOULDBLOCK;
lv2_obj::awake(&ppu);
return true;
}
sock.set_poll_event(lv2_socket::poll_t::write);
return false;
});
lv2_obj::prepare_for_sleep(ppu);
lv2_obj::sleep(ppu);
return false;
});
if (!sock)
{
return -SYS_NET_EBADF;
}
if (!sock.ret)
{
while (true)
{
const auto state = ppu.state.fetch_sub(cpu_flag::signal);
if (is_stopped(state) || state & cpu_flag::signal)
{
break;
}
ppu.state.wait(state);
}
if (ppu.gpr[3] == static_cast<u64>(-SYS_NET_EINTR))
{
return -SYS_NET_EINTR;
}
}
if (result >= 0 || result == -SYS_NET_EWOULDBLOCK)
{
return not_an_error(result);
}
return sys_net_error{result};
}
error_code sys_net_bnet_setsockopt(ppu_thread& ppu, s32 s, s32 level, s32 optname, vm::cptr<void> optval, u32 optlen)
{
ppu.state += cpu_flag::wait;
switch (level)
{
case SYS_NET_SOL_SOCKET:
sys_net.warning("sys_net_bnet_setsockopt(s=%d, level=SYS_NET_SOL_SOCKET, optname=%s, optval=*0x%x, optlen=%u)", s, static_cast<lv2_socket_option>(optname), optval, optlen);
break;
case SYS_NET_IPPROTO_TCP:
sys_net.warning("sys_net_bnet_setsockopt(s=%d, level=SYS_NET_IPPROTO_TCP, optname=%s, optval=*0x%x, optlen=%u)", s, static_cast<lv2_tcp_option>(optname), optval, optlen);
break;
case SYS_NET_IPPROTO_IP:
sys_net.warning("sys_net_bnet_setsockopt(s=%d, level=SYS_NET_IPPROTO_IP, optname=%s, optval=*0x%x, optlen=%u)", s, static_cast<lv2_ip_option>(optname), optval, optlen);
break;
default:
sys_net.warning("sys_net_bnet_setsockopt(s=%d, level=0x%x, optname=0x%x, optval=*0x%x, optlen=%u)", s, level, optname, optval, optlen);
break;
}
switch (optlen)
{
case 1:
sys_net.warning("optval: 0x%02X", *static_cast<const u8*>(optval.get_ptr()));
break;
case 2:
sys_net.warning("optval: 0x%04X", *static_cast<const be_t<u16>*>(optval.get_ptr()));
break;
case 4:
sys_net.warning("optval: 0x%08X", *static_cast<const be_t<u32>*>(optval.get_ptr()));
break;
case 8:
sys_net.warning("optval: 0x%016X", *static_cast<const be_t<u64>*>(optval.get_ptr()));
break;
}
if (optlen < sizeof(s32))
{
return -SYS_NET_EINVAL;
}
std::vector<u8> optval_copy(vm::_ptr<u8>(optval.addr()), vm::_ptr<u8>(optval.addr() + optlen));
const auto sock = idm::check<lv2_socket>(s, [&, notify = lv2_obj::notify_all_t()](lv2_socket& sock) -> s32
{
return sock.setsockopt(level, optname, optval_copy);
});
if (!sock)
{
return -SYS_NET_EBADF;
}
if (sock.ret < 0)
{
return sys_net_error{sock.ret};
}
return not_an_error(sock.ret);
}
error_code sys_net_bnet_shutdown(ppu_thread& ppu, s32 s, s32 how)
{
ppu.state += cpu_flag::wait;
sys_net.warning("sys_net_bnet_shutdown(s=%d, how=%d)", s, how);
if (how < 0 || how > 2)
{
return -SYS_NET_EINVAL;
}
const auto sock = idm::check<lv2_socket>(s, [&, notify = lv2_obj::notify_all_t()](lv2_socket& sock) -> s32
{
return sock.shutdown(how);
});
if (!sock)
{
return -SYS_NET_EBADF;
}
if (sock.ret < 0)
{
return sys_net_error{sock.ret};
}
return CELL_OK;
}
error_code sys_net_bnet_socket(ppu_thread& ppu, lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol)
{
ppu.state += cpu_flag::wait;
sys_net.warning("sys_net_bnet_socket(family=%s, type=%s, protocol=%s)", family, type, protocol);
if (family != SYS_NET_AF_INET)
{
sys_net.error("sys_net_bnet_socket(): unknown family (%d)", family);
}
if (type != SYS_NET_SOCK_STREAM && type != SYS_NET_SOCK_DGRAM && type != SYS_NET_SOCK_RAW && type != SYS_NET_SOCK_DGRAM_P2P && type != SYS_NET_SOCK_STREAM_P2P)
{
sys_net.error("sys_net_bnet_socket(): unsupported type (%d)", type);
return -SYS_NET_EPROTONOSUPPORT;
}
std::shared_ptr<lv2_socket> sock_lv2;
switch (type)
{
case SYS_NET_SOCK_STREAM:
case SYS_NET_SOCK_DGRAM:
{
auto lv2_native = std::make_shared<lv2_socket_native>(family, type, protocol);
if (s32 result = lv2_native->create_socket(); result < 0)
{
return sys_net_error{result};
}
sock_lv2 = std::move(lv2_native);
break;
}
case SYS_NET_SOCK_RAW: sock_lv2 = std::make_shared<lv2_socket_raw>(family, type, protocol); break;
case SYS_NET_SOCK_DGRAM_P2P: sock_lv2 = std::make_shared<lv2_socket_p2p>(family, type, protocol); break;
case SYS_NET_SOCK_STREAM_P2P: sock_lv2 = std::make_shared<lv2_socket_p2ps>(family, type, protocol); break;
}
const s32 s = idm::import_existing<lv2_socket>(sock_lv2);
// Can't allocate more than 1000 sockets
if (s == id_manager::id_traits<lv2_socket>::invalid)
{
return -SYS_NET_EMFILE;
}
sock_lv2->set_lv2_id(s);
return not_an_error(s);
}
error_code sys_net_bnet_close(ppu_thread& ppu, s32 s)
{
ppu.state += cpu_flag::wait;
sys_net.warning("sys_net_bnet_close(s=%d)", s);
auto sock = idm::withdraw<lv2_socket>(s);
if (!sock)
{
return -SYS_NET_EBADF;
}
if (sock->get_queue_size())
{
sock->abort_socket(0);
}
sock->close();
{
// Ensures the socket has no lingering copy from the network thread
std::lock_guard nw_lock(g_fxo->get<network_context>().mutex_thread_loop);
sock.reset();
}
return CELL_OK;
}
error_code sys_net_bnet_poll(ppu_thread& ppu, vm::ptr<sys_net_pollfd> fds, s32 nfds, s32 ms)
{
ppu.state += cpu_flag::wait;
sys_net.trace("sys_net_bnet_poll(fds=*0x%x, nfds=%d, ms=%d)", fds, nfds, ms);
if (nfds <= 0)
{
return not_an_error(0);
}
atomic_t<s32> signaled{0};
u64 timeout = ms < 0 ? 0 : ms * 1000ull;
std::vector<sys_net_pollfd> fds_buf;
{
fds_buf.assign(fds.get_ptr(), fds.get_ptr() + nfds);
lv2_obj::prepare_for_sleep(ppu);
std::unique_lock nw_lock(g_fxo->get<network_context>().mutex_thread_loop);
std::shared_lock lock(id_manager::g_mutex);
std::vector<::pollfd> _fds(nfds);
#ifdef _WIN32
std::vector<bool> connecting(nfds);
#endif
for (s32 i = 0; i < nfds; i++)
{
_fds[i].fd = -1;
fds_buf[i].revents = 0;
if (fds_buf[i].fd < 0)
{
continue;
}
if (auto sock = idm::check_unlocked<lv2_socket>(fds_buf[i].fd))
{
signaled += sock->poll(fds_buf[i], _fds[i]);
#ifdef _WIN32
connecting[i] = sock->is_connecting();
#endif
}
else
{
fds_buf[i].revents |= SYS_NET_POLLNVAL;
signaled++;
}
}
#ifdef _WIN32
windows_poll(_fds, nfds, 0, connecting);
#else
::poll(_fds.data(), nfds, 0);
#endif
for (s32 i = 0; i < nfds; i++)
{
if (_fds[i].revents & (POLLIN | POLLHUP))
fds_buf[i].revents |= SYS_NET_POLLIN;
if (_fds[i].revents & POLLOUT)
fds_buf[i].revents |= SYS_NET_POLLOUT;
if (_fds[i].revents & POLLERR)
fds_buf[i].revents |= SYS_NET_POLLERR;
if (fds_buf[i].revents)
{
signaled++;
}
}
if (ms == 0 || signaled)
{
lock.unlock();
nw_lock.unlock();
std::memcpy(fds.get_ptr(), fds_buf.data(), nfds * sizeof(sys_net_pollfd));
return not_an_error(signaled);
}
for (s32 i = 0; i < nfds; i++)
{
if (fds_buf[i].fd < 0)
{
continue;
}
if (auto sock = idm::check_unlocked<lv2_socket>(fds_buf[i].fd))
{
auto lock = sock->lock();
#ifdef _WIN32
sock->set_connecting(connecting[i]);
#endif
bs_t<lv2_socket::poll_t> selected = +lv2_socket::poll_t::error;
if (fds_buf[i].events & SYS_NET_POLLIN)
selected += lv2_socket::poll_t::read;
if (fds_buf[i].events & SYS_NET_POLLOUT)
selected += lv2_socket::poll_t::write;
// if (fds_buf[i].events & SYS_NET_POLLPRI) // Unimplemented
// selected += lv2_socket::poll::error;
sock->poll_queue(idm::get_unlocked<named_thread<ppu_thread>>(ppu.id), selected, [sock, selected, &fds_buf, i, &signaled, &ppu](bs_t<lv2_socket::poll_t> events)
{
if (events & selected)
{
if (events & selected & lv2_socket::poll_t::read)
fds_buf[i].revents |= SYS_NET_POLLIN;
if (events & selected & lv2_socket::poll_t::write)
fds_buf[i].revents |= SYS_NET_POLLOUT;
if (events & selected & lv2_socket::poll_t::error)
fds_buf[i].revents |= SYS_NET_POLLERR;
signaled++;
sock->queue_wake(&ppu);
return true;
}
sock->set_poll_event(selected);
return false;
});
}
}
lv2_obj::sleep(ppu, timeout);
}
bool has_timedout = false;
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (is_stopped(state))
{
return {};
}
if (state & cpu_flag::signal)
{
break;
}
if (timeout)
{
if (lv2_obj::wait_timeout(timeout, &ppu))
{
// Wait for rescheduling
if (ppu.check_state())
{
return {};
}
has_timedout = network_clear_queue(ppu);
ppu.state -= cpu_flag::signal;
break;
}
}
else
{
ppu.state.wait(state);
}
}
if (!has_timedout && !signaled)
{
return -SYS_NET_EINTR;
}
std::memcpy(fds.get_ptr(), fds_buf.data(), nfds * sizeof(fds[0]));
return not_an_error(signaled);
}
error_code sys_net_bnet_select(ppu_thread& ppu, s32 nfds, vm::ptr<sys_net_fd_set> readfds, vm::ptr<sys_net_fd_set> writefds, vm::ptr<sys_net_fd_set> exceptfds, vm::ptr<sys_net_timeval> _timeout)
{
ppu.state += cpu_flag::wait;
sys_net.trace("sys_net_bnet_select(nfds=%d, readfds=*0x%x, writefds=*0x%x, exceptfds=*0x%x, timeout=*0x%x(%d:%d))", nfds, readfds, writefds, exceptfds, _timeout, _timeout ? _timeout->tv_sec.value() : 0, _timeout ? _timeout->tv_usec.value() : 0);
atomic_t<s32> signaled{0};
if (exceptfds)
{
struct log_t
{
atomic_t<bool> logged = false;
};
if (!g_fxo->get<log_t>().logged.exchange(true))
{
sys_net.error("sys_net_bnet_select(): exceptfds not implemented");
}
}
sys_net_fd_set rread{}, _readfds{};
sys_net_fd_set rwrite{}, _writefds{};
sys_net_fd_set rexcept{}, _exceptfds{};
u64 timeout = !_timeout ? 0 : _timeout->tv_sec * 1000000ull + _timeout->tv_usec;
if (nfds > 0 && nfds <= 1024)
{
if (readfds)
_readfds = *readfds;
if (writefds)
_writefds = *writefds;
if (exceptfds)
_exceptfds = *exceptfds;
std::lock_guard nw_lock(g_fxo->get<network_context>().mutex_thread_loop);
reader_lock lock(id_manager::g_mutex);
std::vector<::pollfd> _fds(nfds);
#ifdef _WIN32
std::vector<bool> connecting(nfds);
#endif
for (s32 i = 0; i < nfds; i++)
{
_fds[i].fd = -1;
bs_t<lv2_socket::poll_t> selected{};
if (readfds && _readfds.bit(i))
selected += lv2_socket::poll_t::read;
if (writefds && _writefds.bit(i))
selected += lv2_socket::poll_t::write;
// if (exceptfds && _exceptfds.bit(i))
// selected += lv2_socket::poll::error;
if (selected)
{
selected += lv2_socket::poll_t::error;
}
else
{
continue;
}
if (auto sock = idm::check_unlocked<lv2_socket>((lv2_socket::id_base & -1024) + i))
{
auto [read_set, write_set, except_set] = sock->select(selected, _fds[i]);
if (read_set || write_set || except_set)
{
signaled++;
}
if (read_set)
{
rread.set(i);
}
if (write_set)
{
rwrite.set(i);
}
if (except_set)
{
rexcept.set(i);
}
#ifdef _WIN32
connecting[i] = sock->is_connecting();
#endif
}
else
{
return -SYS_NET_EBADF;
}
}
#ifdef _WIN32
windows_poll(_fds, nfds, 0, connecting);
#else
::poll(_fds.data(), nfds, 0);
#endif
for (s32 i = 0; i < nfds; i++)
{
bool sig = false;
if (_fds[i].revents & (POLLIN | POLLHUP | POLLERR))
sig = true, rread.set(i);
if (_fds[i].revents & (POLLOUT | POLLERR))
sig = true, rwrite.set(i);
if (sig)
{
signaled++;
}
}
if ((_timeout && !timeout) || signaled)
{
if (readfds)
*readfds = rread;
if (writefds)
*writefds = rwrite;
if (exceptfds)
*exceptfds = rexcept;
return not_an_error(signaled);
}
for (s32 i = 0; i < nfds; i++)
{
bs_t<lv2_socket::poll_t> selected{};
if (readfds && _readfds.bit(i))
selected += lv2_socket::poll_t::read;
if (writefds && _writefds.bit(i))
selected += lv2_socket::poll_t::write;
// if (exceptfds && _exceptfds.bit(i))
// selected += lv2_socket::poll_t::error;
if (selected)
{
selected += lv2_socket::poll_t::error;
}
else
{
continue;
}
if (auto sock = idm::check_unlocked<lv2_socket>((lv2_socket::id_base & -1024) + i))
{
auto lock = sock->lock();
#ifdef _WIN32
sock->set_connecting(connecting[i]);
#endif
sock->poll_queue(idm::get_unlocked<named_thread<ppu_thread>>(ppu.id), selected, [sock, selected, i, &rread, &rwrite, &rexcept, &signaled, &ppu](bs_t<lv2_socket::poll_t> events)
{
if (events & selected)
{
if (selected & lv2_socket::poll_t::read && events & (lv2_socket::poll_t::read + lv2_socket::poll_t::error))
rread.set(i);
if (selected & lv2_socket::poll_t::write && events & (lv2_socket::poll_t::write + lv2_socket::poll_t::error))
rwrite.set(i);
// if (events & (selected & lv2_socket::poll::error))
// rexcept.set(i);
signaled++;
sock->queue_wake(&ppu);
return true;
}
sock->set_poll_event(selected);
return false;
});
}
else
{
return -SYS_NET_EBADF;
}
}
lv2_obj::sleep(ppu, timeout);
}
else
{
return -SYS_NET_EINVAL;
}
bool has_timedout = false;
while (auto state = ppu.state.fetch_sub(cpu_flag::signal))
{
if (is_stopped(state))
{
return {};
}
if (state & cpu_flag::signal)
{
break;
}
if (timeout)
{
if (lv2_obj::wait_timeout(timeout, &ppu))
{
// Wait for rescheduling
if (ppu.check_state())
{
return {};
}
has_timedout = network_clear_queue(ppu);
ppu.state -= cpu_flag::signal;
break;
}
}
else
{
ppu.state.wait(state);
}
}
if (!has_timedout && !signaled)
{
return -SYS_NET_EINTR;
}
if (readfds)
*readfds = rread;
if (writefds)
*writefds = rwrite;
if (exceptfds)
*exceptfds = rexcept;
return not_an_error(signaled);
}
error_code _sys_net_open_dump(ppu_thread& ppu, s32 len, s32 flags)
{
ppu.state += cpu_flag::wait;
sys_net.todo("_sys_net_open_dump(len=%d, flags=0x%x)", len, flags);
return CELL_OK;
}
error_code _sys_net_read_dump(ppu_thread& ppu, s32 id, vm::ptr<void> buf, s32 len, vm::ptr<s32> pflags)
{
ppu.state += cpu_flag::wait;
sys_net.todo("_sys_net_read_dump(id=0x%x, buf=*0x%x, len=%d, pflags=*0x%x)", id, buf, len, pflags);
return CELL_OK;
}
error_code _sys_net_close_dump(ppu_thread& ppu, s32 id, vm::ptr<s32> pflags)
{
ppu.state += cpu_flag::wait;
sys_net.todo("_sys_net_close_dump(id=0x%x, pflags=*0x%x)", id, pflags);
return CELL_OK;
}
error_code _sys_net_write_dump(ppu_thread& ppu, s32 id, vm::cptr<void> buf, s32 len, u32 unknown)
{
ppu.state += cpu_flag::wait;
sys_net.todo("_sys_net_write_dump(id=0x%x, buf=*0x%x, len=%d, unk=0x%x)", id, buf, len, unknown);
return CELL_OK;
}
error_code lv2_socket::abort_socket(s32 flags)
{
decltype(queue) qcopy;
{
std::lock_guard lock(mutex);
if (queue.empty())
{
if (flags & SYS_NET_ABORT_STRICT_CHECK)
{
// Strict error checking: ENOENT if nothing happened
return -SYS_NET_ENOENT;
}
// TODO: Abort the subsequent function called on this socket (need to investigate correct behaviour)
return CELL_OK;
}
qcopy = std::move(queue);
queue = {};
events.store({});
}
for (auto& [ppu, _] : qcopy)
{
if (!ppu)
continue;
sys_net.warning("lv2_socket::abort_socket(): waking up \"%s\": (func: %s, r3=0x%x, r4=0x%x, r5=0x%x, r6=0x%x)", ppu->get_name(), ppu->current_function, ppu->gpr[3], ppu->gpr[4], ppu->gpr[5], ppu->gpr[6]);
ppu->gpr[3] = static_cast<u64>(-SYS_NET_EINTR);
lv2_obj::append(ppu.get());
}
const u32 num_waiters = ::size32(qcopy);
if (num_waiters && (type == SYS_NET_SOCK_STREAM || type == SYS_NET_SOCK_DGRAM))
{
auto& nc = g_fxo->get<network_context>();
const u32 prev_value = nc.num_polls.fetch_sub(num_waiters);
ensure(prev_value >= num_waiters);
}
lv2_obj::awake_all();
return CELL_OK;
}
error_code sys_net_abort(ppu_thread& ppu, s32 type, u64 arg, s32 flags)
{
ppu.state += cpu_flag::wait;
sys_net.warning("sys_net_abort(type=%d, arg=0x%x, flags=0x%x)", type, arg, flags);
enum abort_type : s32
{
_socket,
resolver,
type_2, // ??
type_3, // ??
all,
};
switch (type)
{
case _socket:
{
std::lock_guard nw_lock(g_fxo->get<network_context>().mutex_thread_loop);
const auto sock = idm::get<lv2_socket>(static_cast<u32>(arg));
if (!sock)
{
return -SYS_NET_EBADF;
}
return sock->abort_socket(flags);
}
case all:
{
std::vector<u32> sockets;
idm::select<lv2_socket>([&](u32 id, lv2_socket&)
{
sockets.emplace_back(id);
});
s32 failed = 0;
for (u32 id : sockets)
{
const auto sock = idm::withdraw<lv2_socket>(id);
if (!sock)
{
failed++;
continue;
}
if (sock->get_queue_size())
sys_net.error("ABORT 4");
sock->close();
sys_net.success("lv2_socket::handle_abort(): Closed socket %d", id);
}
// Ensures the socket has no lingering copy from the network thread
g_fxo->get<network_context>().mutex_thread_loop.lock_unlock();
return not_an_error(::narrow<s32>(sockets.size()) - failed);
}
case resolver:
case type_2:
case type_3:
{
break;
}
default: return -SYS_NET_EINVAL;
}
return CELL_OK;
}
struct net_infoctl_cmd_9_t
{
be_t<u32> zero;
vm::bptr<char> server_name;
// More (TODO)
};
error_code sys_net_infoctl(ppu_thread& ppu, s32 cmd, vm::ptr<void> arg)
{
ppu.state += cpu_flag::wait;
sys_net.todo("sys_net_infoctl(cmd=%d, arg=*0x%x)", cmd, arg);
// TODO
switch (cmd)
{
case 9:
{
constexpr auto nameserver = "nameserver \0"sv;
char buffer[nameserver.size() + 80]{};
std::memcpy(buffer, nameserver.data(), nameserver.size());
auto& nph = g_fxo->get<named_thread<np::np_handler>>();
const auto dns_str = np::ip_to_string(nph.get_dns_ip());
std::memcpy(buffer + nameserver.size() - 1, dns_str.data(), dns_str.size());
std::string_view name{buffer};
vm::static_ptr_cast<net_infoctl_cmd_9_t>(arg)->zero = 0;
std::memcpy(vm::static_ptr_cast<net_infoctl_cmd_9_t>(arg)->server_name.get_ptr(), name.data(), name.size());
break;
}
default: break;
}
return CELL_OK;
}
error_code sys_net_control(ppu_thread& ppu, u32 arg1, s32 arg2, vm::ptr<void> arg3, s32 arg4)
{
ppu.state += cpu_flag::wait;
sys_net.todo("sys_net_control(0x%x, %d, *0x%x, %d)", arg1, arg2, arg3, arg4);
return CELL_OK;
}
error_code sys_net_bnet_ioctl(ppu_thread& ppu, s32 arg1, u32 arg2, u32 arg3)
{
ppu.state += cpu_flag::wait;
sys_net.todo("sys_net_bnet_ioctl(%d, 0x%x, 0x%x)", arg1, arg2, arg3);
return CELL_OK;
}
error_code sys_net_bnet_sysctl(ppu_thread& ppu, u32 arg1, u32 arg2, u32 arg3, vm::ptr<void> arg4, u32 arg5, u32 arg6)
{
ppu.state += cpu_flag::wait;
sys_net.todo("sys_net_bnet_sysctl(0x%x, 0x%x, 0x%x, *0x%x, 0x%x, 0x%x)", arg1, arg2, arg3, arg4, arg5, arg6);
return CELL_OK;
}
error_code sys_net_eurus_post_command(ppu_thread& ppu, s32 arg1, u32 arg2, u32 arg3)
{
ppu.state += cpu_flag::wait;
sys_net.todo("sys_net_eurus_post_command(%d, 0x%x, 0x%x)", arg1, arg2, arg3);
return CELL_OK;
}
| 43,876
|
C++
|
.cpp
| 1,548
| 24.982558
| 246
| 0.648484
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,354
|
sys_rsxaudio.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_rsxaudio.cpp
|
#include "stdafx.h"
#include "Emu/Memory/vm.h"
#include "Emu/IdManager.h"
#include "Emu/System.h"
#include "Emu/system_config.h"
#include "Emu//Cell/Modules/cellAudioOut.h"
#include "util/video_provider.h"
#include "sys_process.h"
#include "sys_rsxaudio.h"
#include <cmath>
#include <bitset>
#include <optional>
#ifdef __linux__
#include <sys/epoll.h>
#include <sys/timerfd.h>
#include <sys/eventfd.h>
#include <unistd.h>
#elif defined(BSD) || defined(__APPLE__)
#include <unistd.h>
#endif
LOG_CHANNEL(sys_rsxaudio);
extern atomic_t<recording_mode> g_recording_mode;
namespace rsxaudio_ringbuf_reader
{
static constexpr void clean_buf(rsxaudio_shmem::ringbuf_t& ring_buf)
{
ring_buf.unk2 = 100;
ring_buf.read_idx = 0;
ring_buf.write_idx = 0;
ring_buf.queue_notify_idx = 0;
ring_buf.next_blk_idx = 0;
for (auto& ring_entry : ring_buf.entries)
{
ring_entry.valid = 0;
ring_entry.audio_blk_idx = 0;
ring_entry.timestamp = 0;
}
}
static void set_timestamp(rsxaudio_shmem::ringbuf_t& ring_buf, u64 timestamp)
{
const s32 entry_idx_raw = (ring_buf.read_idx + ring_buf.rw_max_idx - (ring_buf.rw_max_idx > 2) - 1) % ring_buf.rw_max_idx;
const s32 entry_idx = std::clamp<s32>(entry_idx_raw, 0, SYS_RSXAUDIO_RINGBUF_SZ);
ring_buf.entries[entry_idx].timestamp = convert_to_timebased_time(timestamp);
}
static std::tuple<bool /*notify*/, u64 /*blk_idx*/, u64 /*timestamp*/> update_status(rsxaudio_shmem::ringbuf_t& ring_buf)
{
const s32 read_idx = std::clamp<s32>(ring_buf.read_idx, 0, SYS_RSXAUDIO_RINGBUF_SZ);
if ((ring_buf.entries[read_idx].valid & 1) == 0U)
{
return {};
}
const s32 entry_idx_raw = (ring_buf.read_idx + ring_buf.rw_max_idx - (ring_buf.rw_max_idx > 2)) % ring_buf.rw_max_idx;
const s32 entry_idx = std::clamp<s32>(entry_idx_raw, 0, SYS_RSXAUDIO_RINGBUF_SZ);
ring_buf.entries[read_idx].valid = 0;
ring_buf.queue_notify_idx = (ring_buf.queue_notify_idx + 1) % ring_buf.queue_notify_step;
ring_buf.read_idx = (ring_buf.read_idx + 1) % ring_buf.rw_max_idx;
return std::make_tuple(((ring_buf.rw_max_idx > 2) ^ ring_buf.queue_notify_idx) == 0, ring_buf.entries[entry_idx].audio_blk_idx, ring_buf.entries[entry_idx].timestamp);
}
static std::pair<bool /*entry_valid*/, u32 /*addr*/> get_addr(const rsxaudio_shmem::ringbuf_t& ring_buf)
{
const s32 read_idx = std::clamp<s32>(ring_buf.read_idx, 0, SYS_RSXAUDIO_RINGBUF_SZ);
if (ring_buf.entries[read_idx].valid & 1)
{
return std::make_pair(true, ring_buf.entries[read_idx].dma_addr);
}
return std::make_pair(false, ring_buf.dma_silence_addr);
}
[[maybe_unused]]
static std::optional<u64> get_spdif_channel_data(RsxaudioPort dst, rsxaudio_shmem& shmem)
{
if (dst == RsxaudioPort::SPDIF_0)
{
if (shmem.ctrl.spdif_ch0_channel_data_tx_cycles)
{
shmem.ctrl.spdif_ch0_channel_data_tx_cycles--;
return static_cast<u64>(shmem.ctrl.spdif_ch0_channel_data_hi) << 32 | shmem.ctrl.spdif_ch0_channel_data_lo;
}
}
else
{
if (shmem.ctrl.spdif_ch1_channel_data_tx_cycles)
{
shmem.ctrl.spdif_ch1_channel_data_tx_cycles--;
return static_cast<u64>(shmem.ctrl.spdif_ch1_channel_data_hi) << 32 | shmem.ctrl.spdif_ch1_channel_data_lo;
}
}
return std::nullopt;
}
}
lv2_rsxaudio::lv2_rsxaudio(utils::serial& ar) noexcept
: lv2_obj{1}
, init(ar)
{
if (init)
{
ar(shmem);
for (auto& port : event_queue)
{
port = lv2_event_queue::load_ptr(ar, port, "rsxaudio");
}
}
}
void lv2_rsxaudio::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(LLE);
ar(init);
if (init)
{
ar(shmem);
for (const auto& port : event_queue)
{
lv2_event_queue::save_ptr(ar, port.get());
}
}
}
error_code sys_rsxaudio_initialize(vm::ptr<u32> handle)
{
sys_rsxaudio.trace("sys_rsxaudio_initialize(handle=*0x%x)", handle);
auto& rsxaudio_thread = g_fxo->get<rsx_audio_data>();
if (rsxaudio_thread.rsxaudio_ctx_allocated.test_and_set())
{
return CELL_EINVAL;
}
if (!vm::check_addr(handle.addr(), vm::page_writable, sizeof(u32)))
{
rsxaudio_thread.rsxaudio_ctx_allocated = false;
return CELL_EFAULT;
}
const u32 id = idm::make<lv2_obj, lv2_rsxaudio>();
if (!id)
{
rsxaudio_thread.rsxaudio_ctx_allocated = false;
return CELL_ENOMEM;
}
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(id);
std::lock_guard lock(rsxaudio_obj->mutex);
rsxaudio_obj->shmem = vm::addr_t{vm::alloc(sizeof(rsxaudio_shmem), vm::main)};
if (!rsxaudio_obj->shmem)
{
idm::remove<lv2_obj, lv2_rsxaudio>(id);
rsxaudio_thread.rsxaudio_ctx_allocated = false;
return CELL_ENOMEM;
}
rsxaudio_obj->page_lock();
rsxaudio_shmem* sh_page = rsxaudio_obj->get_rw_shared_page();
sh_page->ctrl = {};
for (auto& uf : sh_page->ctrl.channel_uf)
{
uf.uf_event_cnt = 0;
uf.unk1 = 0;
}
sh_page->ctrl.unk4 = 0x8000;
sh_page->ctrl.intr_thread_prio = 0xDEADBEEF;
sh_page->ctrl.unk5 = 0;
rsxaudio_obj->init = true;
*handle = id;
return CELL_OK;
}
error_code sys_rsxaudio_finalize(u32 handle)
{
sys_rsxaudio.trace("sys_rsxaudio_finalize(handle=0x%x)", handle);
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
if (!rsxaudio_obj)
{
return CELL_ESRCH;
}
std::lock_guard lock(rsxaudio_obj->mutex);
if (!rsxaudio_obj->init)
{
return CELL_ESRCH;
}
auto& rsxaudio_thread = g_fxo->get<rsx_audio_data>();
{
std::lock_guard ra_obj_lock{rsxaudio_thread.rsxaudio_obj_upd_m};
rsxaudio_thread.rsxaudio_obj_ptr = {};
}
rsxaudio_obj->init = false;
vm::dealloc(rsxaudio_obj->shmem, vm::main);
idm::remove<lv2_obj, lv2_rsxaudio>(handle);
rsxaudio_thread.rsxaudio_ctx_allocated = false;
return CELL_OK;
}
error_code sys_rsxaudio_import_shared_memory(u32 handle, vm::ptr<u64> addr)
{
sys_rsxaudio.trace("sys_rsxaudio_import_shared_memory(handle=0x%x, addr=*0x%x)", handle, addr);
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
if (!rsxaudio_obj)
{
return CELL_ESRCH;
}
std::lock_guard<shared_mutex> lock(rsxaudio_obj->mutex);
if (!rsxaudio_obj->init)
{
return CELL_ESRCH;
}
if (!vm::check_addr(addr.addr(), vm::page_writable, sizeof(u64)))
{
return CELL_EFAULT;
}
*addr = rsxaudio_obj->shmem;
rsxaudio_obj->page_unlock();
return CELL_OK;
}
error_code sys_rsxaudio_unimport_shared_memory(u32 handle, vm::ptr<u64> addr /* unused */)
{
sys_rsxaudio.trace("sys_rsxaudio_unimport_shared_memory(handle=0x%x, addr=*0x%x)", handle, addr);
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
if (!rsxaudio_obj)
{
return CELL_ESRCH;
}
std::lock_guard<shared_mutex> lock(rsxaudio_obj->mutex);
if (!rsxaudio_obj->init)
{
return CELL_ESRCH;
}
rsxaudio_obj->page_lock();
return CELL_OK;
}
error_code sys_rsxaudio_create_connection(u32 handle)
{
sys_rsxaudio.trace("sys_rsxaudio_create_connection(handle=0x%x)", handle);
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
if (!rsxaudio_obj)
{
return CELL_ESRCH;
}
std::lock_guard<shared_mutex> lock(rsxaudio_obj->mutex);
if (!rsxaudio_obj->init)
{
return CELL_ESRCH;
}
rsxaudio_shmem* sh_page = rsxaudio_obj->get_rw_shared_page();
const error_code port_create_status = [&]() -> error_code
{
if (auto queue1 = idm::get<lv2_obj, lv2_event_queue>(sh_page->ctrl.event_queue_1_id))
{
rsxaudio_obj->event_queue[0] = queue1;
if (auto queue2 = idm::get<lv2_obj, lv2_event_queue>(sh_page->ctrl.event_queue_2_id))
{
rsxaudio_obj->event_queue[1] = queue2;
if (auto queue3 = idm::get<lv2_obj, lv2_event_queue>(sh_page->ctrl.event_queue_3_id))
{
rsxaudio_obj->event_queue[2] = queue3;
return CELL_OK;
}
}
}
return CELL_ESRCH;
}();
if (port_create_status != CELL_OK)
{
return port_create_status;
}
for (auto& rb : sh_page->ctrl.ringbuf)
{
rb.dma_silence_addr = rsxaudio_obj->dma_io_base + offsetof(rsxaudio_shmem, dma_silence_region);
rb.unk2 = 100;
}
for (u32 entry_idx = 0; entry_idx < SYS_RSXAUDIO_RINGBUF_SZ; entry_idx++)
{
sh_page->ctrl.ringbuf[static_cast<u32>(RsxaudioPort::SERIAL)].entries[entry_idx].dma_addr = rsxaudio_obj->dma_io_base + u32{offsetof(rsxaudio_shmem, dma_serial_region)} + SYS_RSXAUDIO_RINGBUF_BLK_SZ_SERIAL * entry_idx;
sh_page->ctrl.ringbuf[static_cast<u32>(RsxaudioPort::SPDIF_0)].entries[entry_idx].dma_addr = rsxaudio_obj->dma_io_base + u32{offsetof(rsxaudio_shmem, dma_spdif_0_region)} + SYS_RSXAUDIO_RINGBUF_BLK_SZ_SPDIF * entry_idx;
sh_page->ctrl.ringbuf[static_cast<u32>(RsxaudioPort::SPDIF_1)].entries[entry_idx].dma_addr = rsxaudio_obj->dma_io_base + u32{offsetof(rsxaudio_shmem, dma_spdif_1_region)} + SYS_RSXAUDIO_RINGBUF_BLK_SZ_SPDIF * entry_idx;
}
return CELL_OK;
}
error_code sys_rsxaudio_close_connection(u32 handle)
{
sys_rsxaudio.trace("sys_rsxaudio_close_connection(handle=0x%x)", handle);
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
if (!rsxaudio_obj)
{
return CELL_ESRCH;
}
std::lock_guard<shared_mutex> lock(rsxaudio_obj->mutex);
if (!rsxaudio_obj->init)
{
return CELL_ESRCH;
}
{
auto& rsxaudio_thread = g_fxo->get<rsx_audio_data>();
std::lock_guard ra_obj_lock{rsxaudio_thread.rsxaudio_obj_upd_m};
rsxaudio_thread.rsxaudio_obj_ptr = {};
}
for (u32 q_idx = 0; q_idx < SYS_RSXAUDIO_PORT_CNT; q_idx++)
{
rsxaudio_obj->event_queue[q_idx].reset();
}
return CELL_OK;
}
error_code sys_rsxaudio_prepare_process(u32 handle)
{
sys_rsxaudio.trace("sys_rsxaudio_prepare_process(handle=0x%x)", handle);
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
if (!rsxaudio_obj)
{
return CELL_ESRCH;
}
std::lock_guard<shared_mutex> lock(rsxaudio_obj->mutex);
if (!rsxaudio_obj->init)
{
return CELL_ESRCH;
}
auto& rsxaudio_thread = g_fxo->get<rsx_audio_data>();
std::lock_guard ra_obj_lock{rsxaudio_thread.rsxaudio_obj_upd_m};
if (rsxaudio_thread.rsxaudio_obj_ptr)
{
return -1;
}
rsxaudio_thread.rsxaudio_obj_ptr = rsxaudio_obj;
return CELL_OK;
}
error_code sys_rsxaudio_start_process(u32 handle)
{
sys_rsxaudio.trace("sys_rsxaudio_start_process(handle=0x%x)", handle);
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
if (!rsxaudio_obj)
{
return CELL_ESRCH;
}
std::lock_guard<shared_mutex> lock(rsxaudio_obj->mutex);
if (!rsxaudio_obj->init)
{
return CELL_ESRCH;
}
rsxaudio_shmem* sh_page = rsxaudio_obj->get_rw_shared_page();
for (auto& rb : sh_page->ctrl.ringbuf)
{
if (rb.active) rsxaudio_ringbuf_reader::clean_buf(rb);
}
for (auto& uf : sh_page->ctrl.channel_uf)
{
uf.uf_event_cnt = 0;
uf.unk1 = 0;
}
auto& rsxaudio_thread = g_fxo->get<rsx_audio_data>();
rsxaudio_thread.update_hw_param([&](auto& param)
{
if (sh_page->ctrl.ringbuf[static_cast<u32>(RsxaudioPort::SERIAL)].active) param.serial.dma_en = true;
if (sh_page->ctrl.ringbuf[static_cast<u32>(RsxaudioPort::SPDIF_0)].active) param.spdif[0].dma_en = true;
if (sh_page->ctrl.ringbuf[static_cast<u32>(RsxaudioPort::SPDIF_1)].active) param.spdif[1].dma_en = true;
});
for (u32 q_idx = 0; q_idx < SYS_RSXAUDIO_PORT_CNT; q_idx++)
{
if (const auto& queue = rsxaudio_obj->event_queue[q_idx]; queue && sh_page->ctrl.ringbuf[q_idx].active)
{
queue->send(rsxaudio_obj->event_port_name[q_idx], q_idx, 0, 0);
}
}
return CELL_OK;
}
error_code sys_rsxaudio_stop_process(u32 handle)
{
sys_rsxaudio.trace("sys_rsxaudio_stop_process(handle=0x%x)", handle);
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
if (!rsxaudio_obj)
{
return CELL_ESRCH;
}
std::lock_guard<shared_mutex> lock(rsxaudio_obj->mutex);
if (!rsxaudio_obj->init)
{
return CELL_ESRCH;
}
auto& rsxaudio_thread = g_fxo->get<rsx_audio_data>();
rsxaudio_thread.update_hw_param([&](auto& param)
{
param.serial.dma_en = false;
param.serial.muted = true;
param.serial.en = false;
for (auto& spdif : param.spdif)
{
spdif.dma_en = false;
if (!spdif.use_serial_buf)
{
spdif.en = false;
}
}
param.spdif[1].muted = true;
});
rsxaudio_shmem* sh_page = rsxaudio_obj->get_rw_shared_page();
for (auto& rb : sh_page->ctrl.ringbuf)
{
if (rb.active) rsxaudio_ringbuf_reader::clean_buf(rb);
}
return CELL_OK;
}
error_code sys_rsxaudio_get_dma_param(u32 handle, u32 flag, vm::ptr<u64> out)
{
sys_rsxaudio.trace("sys_rsxaudio_get_dma_param(handle=0x%x, flag=0x%x, out=0x%x)", handle, flag, out);
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
if (!rsxaudio_obj)
{
return CELL_ESRCH;
}
std::lock_guard lock(rsxaudio_obj->mutex);
if (!rsxaudio_obj->init)
{
return CELL_ESRCH;
}
if (!vm::check_addr(out.addr(), vm::page_writable, sizeof(u64)))
{
return CELL_EFAULT;
}
if (flag == rsxaudio_dma_flag::IO_ID)
{
*out = rsxaudio_obj->dma_io_id;
}
else if (flag == rsxaudio_dma_flag::IO_BASE)
{
*out = rsxaudio_obj->dma_io_base;
}
return CELL_OK;
}
rsxaudio_data_container::rsxaudio_data_container(const rsxaudio_hw_param_t& hw_param, const buf_t& buf, bool serial_rdy, bool spdif_0_rdy, bool spdif_1_rdy) : hwp(hw_param), out_buf(buf)
{
if (serial_rdy)
{
avport_data_avail[static_cast<u8>(RsxaudioAvportIdx::AVMULTI)] = true;
if (hwp.spdif[0].use_serial_buf)
{
avport_data_avail[static_cast<u8>(RsxaudioAvportIdx::SPDIF_0)] = true;
}
if (hwp.spdif[1].use_serial_buf)
{
avport_data_avail[static_cast<u8>(RsxaudioAvportIdx::SPDIF_1)] = true;
}
}
if (spdif_0_rdy && !hwp.spdif[0].use_serial_buf)
{
avport_data_avail[static_cast<u8>(RsxaudioAvportIdx::SPDIF_0)] = true;
}
if (spdif_1_rdy && !hwp.spdif[1].use_serial_buf)
{
avport_data_avail[static_cast<u8>(RsxaudioAvportIdx::SPDIF_1)] = true;
}
if (hwp.hdmi[0].init)
{
if (hwp.hdmi[0].use_spdif_1)
{
avport_data_avail[static_cast<u8>(RsxaudioAvportIdx::HDMI_0)] = avport_data_avail[static_cast<u8>(RsxaudioAvportIdx::SPDIF_1)];
}
else
{
avport_data_avail[static_cast<u8>(RsxaudioAvportIdx::HDMI_0)] = serial_rdy;
}
hdmi_stream_cnt[0] = static_cast<u8>(hwp.hdmi[0].ch_cfg.total_ch_cnt) / SYS_RSXAUDIO_CH_PER_STREAM;
}
if (hwp.hdmi[1].init)
{
if (hwp.hdmi[1].use_spdif_1)
{
avport_data_avail[static_cast<u8>(RsxaudioAvportIdx::HDMI_1)] = avport_data_avail[static_cast<u8>(RsxaudioAvportIdx::SPDIF_1)];
}
else
{
avport_data_avail[static_cast<u8>(RsxaudioAvportIdx::HDMI_1)] = serial_rdy;
}
hdmi_stream_cnt[1] = static_cast<u8>(hwp.hdmi[1].ch_cfg.total_ch_cnt) / SYS_RSXAUDIO_CH_PER_STREAM;
}
}
u32 rsxaudio_data_container::get_data_size(RsxaudioAvportIdx avport)
{
if (!avport_data_avail[static_cast<u8>(avport)])
{
return 0;
}
switch (avport)
{
case RsxaudioAvportIdx::HDMI_0:
{
const RsxaudioSampleSize depth = hwp.hdmi[0].use_spdif_1 ? hwp.spdif[1].depth : hwp.serial.depth;
return (depth == RsxaudioSampleSize::_16BIT ? SYS_RSXAUDIO_STREAM_SIZE * 2 : SYS_RSXAUDIO_STREAM_SIZE) * hdmi_stream_cnt[0];
}
case RsxaudioAvportIdx::HDMI_1:
{
const RsxaudioSampleSize depth = hwp.hdmi[1].use_spdif_1 ? hwp.spdif[1].depth : hwp.serial.depth;
return (depth == RsxaudioSampleSize::_16BIT ? SYS_RSXAUDIO_STREAM_SIZE * 2 : SYS_RSXAUDIO_STREAM_SIZE) * hdmi_stream_cnt[1];
}
case RsxaudioAvportIdx::AVMULTI:
{
return hwp.serial.depth == RsxaudioSampleSize::_16BIT ? SYS_RSXAUDIO_STREAM_SIZE * 2 : SYS_RSXAUDIO_STREAM_SIZE;
}
case RsxaudioAvportIdx::SPDIF_0:
{
const RsxaudioSampleSize depth = hwp.spdif[0].use_serial_buf ? hwp.serial.depth : hwp.spdif[0].depth;
return depth == RsxaudioSampleSize::_16BIT ? SYS_RSXAUDIO_STREAM_SIZE * 2 : SYS_RSXAUDIO_STREAM_SIZE;
}
case RsxaudioAvportIdx::SPDIF_1:
{
const RsxaudioSampleSize depth = hwp.spdif[1].use_serial_buf ? hwp.serial.depth : hwp.spdif[1].depth;
return depth == RsxaudioSampleSize::_16BIT ? SYS_RSXAUDIO_STREAM_SIZE * 2 : SYS_RSXAUDIO_STREAM_SIZE;
}
default:
{
return 0;
}
}
}
void rsxaudio_data_container::get_data(RsxaudioAvportIdx avport, data_blk_t& data_out)
{
if (!avport_data_avail[static_cast<u8>(avport)])
{
return;
}
data_was_written = true;
auto spdif_filter_map = [&](u8 hdmi_idx)
{
std::array<u8, SYS_RSXAUDIO_SERIAL_MAX_CH> result;
for (u64 i = 0; i < SYS_RSXAUDIO_SERIAL_MAX_CH; i++)
{
const u8 old_val = hwp.hdmi[hdmi_idx].ch_cfg.map[i];
result[i] = old_val >= SYS_RSXAUDIO_SPDIF_MAX_CH ? rsxaudio_hw_param_t::hdmi_param_t::MAP_SILENT_CH : old_val;
}
return result;
};
switch (avport)
{
case RsxaudioAvportIdx::HDMI_0:
case RsxaudioAvportIdx::HDMI_1:
{
const u8 hdmi_idx = avport == RsxaudioAvportIdx::HDMI_1;
switch (hdmi_stream_cnt[hdmi_idx])
{
default:
case 0:
{
return;
}
case 1:
{
if (hwp.hdmi[hdmi_idx].use_spdif_1)
{
if (hwp.spdif[1].use_serial_buf)
{
mix<2>(spdif_filter_map(hdmi_idx), hwp.serial.depth, out_buf.serial, data_out);
}
else
{
mix<2>(hwp.hdmi[hdmi_idx].ch_cfg.map, hwp.spdif[1].depth, out_buf.spdif[1], data_out);
}
}
else
{
mix<2>(hwp.hdmi[hdmi_idx].ch_cfg.map, hwp.serial.depth, out_buf.serial, data_out);
}
break;
}
case 3:
{
if (hwp.hdmi[hdmi_idx].use_spdif_1)
{
if (hwp.spdif[1].use_serial_buf)
{
mix<6>(spdif_filter_map(hdmi_idx), hwp.serial.depth, out_buf.serial, data_out);
}
else
{
mix<6>(hwp.hdmi[hdmi_idx].ch_cfg.map, hwp.spdif[1].depth, out_buf.spdif[1], data_out);
}
}
else
{
mix<6>(hwp.hdmi[hdmi_idx].ch_cfg.map, hwp.serial.depth, out_buf.serial, data_out);
}
break;
}
case 4:
{
if (hwp.hdmi[hdmi_idx].use_spdif_1)
{
if (hwp.spdif[1].use_serial_buf)
{
mix<8>(spdif_filter_map(hdmi_idx), hwp.serial.depth, out_buf.serial, data_out);
}
else
{
mix<8>(hwp.hdmi[hdmi_idx].ch_cfg.map, hwp.spdif[1].depth, out_buf.spdif[1], data_out);
}
}
else
{
mix<8>(hwp.hdmi[hdmi_idx].ch_cfg.map, hwp.serial.depth, out_buf.serial, data_out);
}
break;
}
}
break;
}
case RsxaudioAvportIdx::AVMULTI:
{
mix<2>({2, 3}, hwp.serial.depth, out_buf.serial, data_out);
break;
}
case RsxaudioAvportIdx::SPDIF_0:
case RsxaudioAvportIdx::SPDIF_1:
{
const u8 spdif_idx = avport == RsxaudioAvportIdx::SPDIF_1;
if (hwp.spdif[spdif_idx].use_serial_buf)
{
mix<2>({0, 1}, hwp.serial.depth, out_buf.serial, data_out);
}
else
{
mix<2>({0, 1}, hwp.spdif[spdif_idx].depth, out_buf.spdif[spdif_idx], data_out);
}
break;
}
default:
{
return;
}
}
}
bool rsxaudio_data_container::data_was_used()
{
return data_was_written;
}
rsxaudio_data_thread::rsxaudio_data_thread() {}
void rsxaudio_data_thread::operator()()
{
thread_ctrl::scoped_priority high_prio(+1);
while (thread_ctrl::state() != thread_state::aborting)
{
static const std::function<void()> tmr_callback = [this]() { extract_audio_data(); };
switch (timer.wait(tmr_callback))
{
case rsxaudio_periodic_tmr::wait_result::SUCCESS:
case rsxaudio_periodic_tmr::wait_result::TIMEOUT:
case rsxaudio_periodic_tmr::wait_result::TIMER_CANCELED:
{
continue;
}
case rsxaudio_periodic_tmr::wait_result::INVALID_PARAM:
case rsxaudio_periodic_tmr::wait_result::TIMER_ERROR:
default:
{
fmt::throw_exception("rsxaudio_periodic_tmr::wait() failed");
}
}
}
}
rsxaudio_data_thread& rsxaudio_data_thread::operator=(thread_state /* state */)
{
timer.cancel_wait();
return *this;
}
void rsxaudio_data_thread::advance_all_timers()
{
const u64 crnt_time = get_system_time();
timer.vtimer_skip_periods(static_cast<u32>(RsxaudioPort::SERIAL), crnt_time);
timer.vtimer_skip_periods(static_cast<u32>(RsxaudioPort::SPDIF_0), crnt_time);
timer.vtimer_skip_periods(static_cast<u32>(RsxaudioPort::SPDIF_1), crnt_time);
}
void rsxaudio_data_thread::extract_audio_data()
{
// Accessing timer state is safe here, since we're in timer::wait()
const auto rsxaudio_obj = [&]()
{
std::lock_guard ra_obj_lock{rsxaudio_obj_upd_m};
return rsxaudio_obj_ptr;
}();
if (Emu.IsPaused() || !rsxaudio_obj)
{
advance_all_timers();
return;
}
std::lock_guard<shared_mutex> rsxaudio_lock(rsxaudio_obj->mutex);
if (!rsxaudio_obj->init)
{
advance_all_timers();
return;
}
rsxaudio_shmem* sh_page = rsxaudio_obj->get_rw_shared_page();
const auto hw_cfg = hw_param_ts.get_current();
const u64 crnt_time = get_system_time();
auto process_rb = [&](RsxaudioPort dst, bool dma_en)
{
// SPDIF channel data and underflow events are always disabled by lv1
const u32 dst_raw = static_cast<u32>(dst);
rsxaudio_ringbuf_reader::set_timestamp(sh_page->ctrl.ringbuf[dst_raw], timer.vtimer_get_sched_time(dst_raw));
const auto [data_present, rb_addr] = get_ringbuf_addr(dst, *rsxaudio_obj);
bool reset_periods = !enqueue_data(dst, rb_addr == nullptr, rb_addr, *hw_cfg);
if (dma_en)
{
if (const auto [notify, blk_idx, timestamp] = rsxaudio_ringbuf_reader::update_status(sh_page->ctrl.ringbuf[dst_raw]); notify)
{
// Too late to recover
reset_periods = true;
if (const auto& queue = rsxaudio_obj->event_queue[dst_raw])
{
queue->send(rsxaudio_obj->event_port_name[dst_raw], dst_raw, blk_idx, timestamp);
}
}
}
if (reset_periods)
{
timer.vtimer_skip_periods(dst_raw, crnt_time);
}
else
{
timer.vtimer_incr(dst_raw, crnt_time);
}
};
if (timer.is_vtimer_behind(static_cast<u32>(RsxaudioPort::SERIAL), crnt_time))
{
process_rb(RsxaudioPort::SERIAL, hw_cfg->serial.dma_en);
}
if (timer.is_vtimer_behind(static_cast<u32>(RsxaudioPort::SPDIF_0), crnt_time))
{
process_rb(RsxaudioPort::SPDIF_0, hw_cfg->spdif[0].dma_en);
}
if (timer.is_vtimer_behind(static_cast<u32>(RsxaudioPort::SPDIF_1), crnt_time))
{
process_rb(RsxaudioPort::SPDIF_1, hw_cfg->spdif[1].dma_en);
}
}
std::pair<bool, void*> rsxaudio_data_thread::get_ringbuf_addr(RsxaudioPort dst, const lv2_rsxaudio& rsxaudio_obj)
{
ensure(dst <= RsxaudioPort::SPDIF_1);
rsxaudio_shmem* sh_page = rsxaudio_obj.get_rw_shared_page();
const auto [data_present, addr] = rsxaudio_ringbuf_reader::get_addr(sh_page->ctrl.ringbuf[static_cast<u32>(dst)]);
const u32 buf_size = dst == RsxaudioPort::SERIAL ? SYS_RSXAUDIO_RINGBUF_BLK_SZ_SERIAL : SYS_RSXAUDIO_RINGBUF_BLK_SZ_SPDIF;
if (addr >= rsxaudio_obj.dma_io_base && addr < rsxaudio_obj.dma_io_base + sizeof(rsxaudio_shmem) - buf_size)
{
return std::make_pair(data_present, reinterpret_cast<u8*>(rsxaudio_obj.get_rw_shared_page()) + addr - rsxaudio_obj.dma_io_base);
}
// Buffer address is invalid
return std::make_pair(false, nullptr);
}
void rsxaudio_data_thread::reset_hw()
{
update_hw_param([&](rsxaudio_hw_param_t& current)
{
const bool serial_dma_en = current.serial.dma_en;
current.serial = {};
current.serial.dma_en = serial_dma_en;
for (auto& spdif : current.spdif)
{
const bool spdif_dma_en = spdif.dma_en;
spdif = {};
spdif.dma_en = spdif_dma_en;
}
current.serial_freq_base = SYS_RSXAUDIO_FREQ_BASE_384K;
current.spdif_freq_base = SYS_RSXAUDIO_FREQ_BASE_352K;
current.avport_src.fill(RsxaudioPort::INVALID);
});
}
void rsxaudio_data_thread::update_hw_param(std::function<void(rsxaudio_hw_param_t&)> update_callback)
{
ensure(update_callback);
hw_param_ts.add_op([&]()
{
auto new_hw_param = std::make_shared<rsxaudio_hw_param_t>(*hw_param_ts.get_current());
update_callback(*new_hw_param);
const bool serial_active = calc_port_active_state(RsxaudioPort::SERIAL, *new_hw_param);
const bool spdif_active[SYS_RSXAUDIO_SPDIF_CNT] =
{
calc_port_active_state(RsxaudioPort::SPDIF_0, *new_hw_param),
calc_port_active_state(RsxaudioPort::SPDIF_1, *new_hw_param)
};
std::array<rsxaudio_backend_thread::port_config, SYS_RSXAUDIO_AVPORT_CNT> port_cfg{};
port_cfg[static_cast<u8>(RsxaudioAvportIdx::AVMULTI)] = {static_cast<AudioFreq>(new_hw_param->serial_freq_base / new_hw_param->serial.freq_div), AudioChannelCnt::STEREO};
auto gen_spdif_port_cfg = [&](u8 spdif_idx)
{
if (new_hw_param->spdif[spdif_idx].use_serial_buf)
{
return port_cfg[static_cast<u8>(RsxaudioAvportIdx::AVMULTI)];
}
return rsxaudio_backend_thread::port_config{static_cast<AudioFreq>(new_hw_param->spdif_freq_base / new_hw_param->spdif[spdif_idx].freq_div), AudioChannelCnt::STEREO};
};
port_cfg[static_cast<u8>(RsxaudioAvportIdx::SPDIF_0)] = gen_spdif_port_cfg(0);
port_cfg[static_cast<u8>(RsxaudioAvportIdx::SPDIF_1)] = gen_spdif_port_cfg(1);
auto gen_hdmi_port_cfg = [&](u8 hdmi_idx)
{
if (new_hw_param->hdmi[hdmi_idx].use_spdif_1)
{
return rsxaudio_backend_thread::port_config{port_cfg[static_cast<u8>(RsxaudioAvportIdx::SPDIF_1)].freq, new_hw_param->hdmi[hdmi_idx].ch_cfg.total_ch_cnt};
}
return rsxaudio_backend_thread::port_config{port_cfg[static_cast<u8>(RsxaudioAvportIdx::AVMULTI)].freq, new_hw_param->hdmi[hdmi_idx].ch_cfg.total_ch_cnt};
};
port_cfg[static_cast<u8>(RsxaudioAvportIdx::HDMI_0)] = gen_hdmi_port_cfg(0);
port_cfg[static_cast<u8>(RsxaudioAvportIdx::HDMI_1)] = gen_hdmi_port_cfg(1);
// TODO: ideally, old data must be flushed from backend buffers if channel became inactive or its src changed
g_fxo->get<rsx_audio_backend>().set_new_stream_param(port_cfg, calc_avport_mute_state(*new_hw_param));
timer.vtimer_access_sec([&]()
{
const u64 crnt_time = get_system_time();
if (serial_active)
{
// 2 channels per stream, streams go in parallel
const u32 new_timer_rate = static_cast<u32>(port_cfg[static_cast<u8>(RsxaudioAvportIdx::AVMULTI)].freq) *
static_cast<u8>(new_hw_param->serial.depth) *
SYS_RSXAUDIO_CH_PER_STREAM;
timer.enable_vtimer(static_cast<u32>(RsxaudioPort::SERIAL), new_timer_rate, crnt_time);
}
else
{
timer.disable_vtimer(static_cast<u32>(RsxaudioPort::SERIAL));
}
for (u8 spdif_idx = 0; spdif_idx < SYS_RSXAUDIO_SPDIF_CNT; spdif_idx++)
{
const u32 vtimer_id = static_cast<u32>(RsxaudioPort::SPDIF_0) + spdif_idx;
if (spdif_active[spdif_idx] && !new_hw_param->spdif[spdif_idx].use_serial_buf)
{
// 2 channels per stream, single stream
const u32 new_timer_rate = static_cast<u32>(port_cfg[static_cast<u8>(RsxaudioAvportIdx::SPDIF_0) + spdif_idx].freq) *
static_cast<u8>(new_hw_param->spdif[spdif_idx].depth) *
SYS_RSXAUDIO_CH_PER_STREAM;
timer.enable_vtimer(vtimer_id, new_timer_rate, crnt_time);
}
else
{
timer.disable_vtimer(vtimer_id);
}
}
});
return new_hw_param;
});
}
void rsxaudio_data_thread::update_mute_state(RsxaudioPort port, bool muted)
{
hw_param_ts.add_op([&]()
{
auto new_hw_param = std::make_shared<rsxaudio_hw_param_t>(*hw_param_ts.get_current());
switch (port)
{
case RsxaudioPort::SERIAL:
{
new_hw_param->serial.muted = muted;
break;
}
case RsxaudioPort::SPDIF_0:
{
new_hw_param->spdif[0].muted = muted;
break;
}
case RsxaudioPort::SPDIF_1:
{
new_hw_param->spdif[1].muted = muted;
break;
}
default:
{
fmt::throw_exception("Invalid RSXAudio port: %u", static_cast<u8>(port));
}
}
g_fxo->get<rsx_audio_backend>().set_mute_state(calc_avport_mute_state(*new_hw_param));
return new_hw_param;
});
}
void rsxaudio_data_thread::update_av_mute_state(RsxaudioAvportIdx avport, bool muted, bool force_mute, bool set)
{
hw_param_ts.add_op([&]()
{
auto new_hw_param = std::make_shared<rsxaudio_hw_param_t>(*hw_param_ts.get_current());
switch (avport)
{
case RsxaudioAvportIdx::HDMI_0:
case RsxaudioAvportIdx::HDMI_1:
{
const u32 hdmi_idx = avport == RsxaudioAvportIdx::HDMI_1;
if (muted)
{
new_hw_param->hdmi[hdmi_idx].muted = set;
}
if (force_mute)
{
new_hw_param->hdmi[hdmi_idx].force_mute = set;
}
break;
}
case RsxaudioAvportIdx::AVMULTI:
{
if (muted)
{
new_hw_param->avmulti_av_muted = set;
}
break;
}
default:
{
fmt::throw_exception("Invalid RSXAudio avport: %u", static_cast<u8>(avport));
}
}
g_fxo->get<rsx_audio_backend>().set_mute_state(calc_avport_mute_state(*new_hw_param));
return new_hw_param;
});
}
rsxaudio_backend_thread::avport_bit rsxaudio_data_thread::calc_avport_mute_state(const rsxaudio_hw_param_t& hwp)
{
const bool serial_active = calc_port_active_state(RsxaudioPort::SERIAL, hwp);
const bool spdif_active[SYS_RSXAUDIO_SPDIF_CNT] =
{
calc_port_active_state(RsxaudioPort::SPDIF_0, hwp),
calc_port_active_state(RsxaudioPort::SPDIF_1, hwp)
};
const bool avmulti = !serial_active || hwp.serial.muted || hwp.avmulti_av_muted;
auto spdif_muted = [&](u8 spdif_idx)
{
const u8 spdif_port = spdif_idx == 1;
if (hwp.spdif[spdif_port].use_serial_buf)
{
// TODO: HW test if both serial and spdif mutes are used in serial mode for spdif
return !serial_active || hwp.spdif[spdif_port].freq_div != hwp.serial.freq_div || hwp.serial.muted || hwp.spdif[spdif_port].muted;
}
return !spdif_active[spdif_port] || hwp.spdif[spdif_port].muted;
};
auto hdmi_muted = [&](u8 hdmi_idx)
{
const u8 hdmi_port = hdmi_idx == 1;
if (hwp.hdmi[hdmi_idx].use_spdif_1)
{
return spdif_muted(1) || hwp.hdmi[hdmi_port].muted || hwp.hdmi[hdmi_port].force_mute || !hwp.hdmi[hdmi_port].init;
}
return !serial_active || hwp.serial.muted || hwp.hdmi[hdmi_port].muted || hwp.hdmi[hdmi_port].force_mute || !hwp.hdmi[hdmi_port].init;
};
return { hdmi_muted(0), hdmi_muted(1), avmulti, spdif_muted(0), spdif_muted(1) };
}
bool rsxaudio_data_thread::calc_port_active_state(RsxaudioPort port, const rsxaudio_hw_param_t& hwp)
{
auto gen_serial_active = [&]()
{
return hwp.serial.dma_en && hwp.serial.buf_empty_en && hwp.serial.en;
};
auto gen_spdif_active = [&](u8 spdif_idx)
{
if (hwp.spdif[spdif_idx].use_serial_buf)
{
return gen_serial_active() && (hwp.spdif[spdif_idx].freq_div == hwp.serial.freq_div);
}
return hwp.spdif[spdif_idx].dma_en && hwp.spdif[spdif_idx].buf_empty_en && hwp.spdif[spdif_idx].en;
};
switch (port)
{
case RsxaudioPort::SERIAL:
{
return gen_serial_active();
}
case RsxaudioPort::SPDIF_0:
{
return gen_spdif_active(0);
}
case RsxaudioPort::SPDIF_1:
{
return gen_spdif_active(1);
}
default:
{
return false;
}
}
}
f32 rsxaudio_data_thread::pcm_to_float(s32 sample)
{
return sample * (1.0f / 2147483648.0f);
}
f32 rsxaudio_data_thread::pcm_to_float(s16 sample)
{
return sample * (1.0f / 32768.0f);
}
void rsxaudio_data_thread::pcm_serial_process_channel(RsxaudioSampleSize word_bits, ra_stream_blk_t& buf_out_l, ra_stream_blk_t& buf_out_r, const void* buf_in, u8 src_stream)
{
const u8 input_word_sz = static_cast<u8>(word_bits);
u64 ch_dst = 0;
for (u64 blk_idx = 0; blk_idx < SYS_RSXAUDIO_STREAM_DATA_BLK_CNT; blk_idx++)
{
for (u64 offset = 0; offset < SYS_RSXAUDIO_DATA_BLK_SIZE / 2; offset += input_word_sz, ch_dst++)
{
const u64 left_ch_src = (blk_idx * SYS_RSXAUDIO_STREAM_SIZE + src_stream * SYS_RSXAUDIO_DATA_BLK_SIZE + offset) / input_word_sz;
const u64 right_ch_src = left_ch_src + (SYS_RSXAUDIO_DATA_BLK_SIZE / 2) / input_word_sz;
if (word_bits == RsxaudioSampleSize::_16BIT)
{
buf_out_l[ch_dst] = pcm_to_float(static_cast<const be_t<s16>*>(buf_in)[left_ch_src]);
buf_out_r[ch_dst] = pcm_to_float(static_cast<const be_t<s16>*>(buf_in)[right_ch_src]);
}
else
{
// Looks like rsx treats 20bit/24bit samples as 32bit ones
buf_out_l[ch_dst] = pcm_to_float(static_cast<const be_t<s32>*>(buf_in)[left_ch_src]);
buf_out_r[ch_dst] = pcm_to_float(static_cast<const be_t<s32>*>(buf_in)[right_ch_src]);
}
}
}
}
void rsxaudio_data_thread::pcm_spdif_process_channel(RsxaudioSampleSize word_bits, ra_stream_blk_t& buf_out_l, ra_stream_blk_t& buf_out_r, const void* buf_in)
{
const u8 input_word_sz = static_cast<u8>(word_bits);
for (u64 offset = 0; offset < SYS_RSXAUDIO_RINGBUF_BLK_SZ_SPDIF / (input_word_sz * SYS_RSXAUDIO_SPDIF_MAX_CH); offset++)
{
const u64 left_ch_src = offset * SYS_RSXAUDIO_SPDIF_MAX_CH;
const u64 right_ch_src = left_ch_src + 1;
if (word_bits == RsxaudioSampleSize::_16BIT)
{
buf_out_l[offset] = pcm_to_float(static_cast<const be_t<s16>*>(buf_in)[left_ch_src]);
buf_out_r[offset] = pcm_to_float(static_cast<const be_t<s16>*>(buf_in)[right_ch_src]);
}
else
{
// Looks like rsx treats 20bit/24bit samples as 32bit ones
buf_out_l[offset] = pcm_to_float(static_cast<const be_t<s32>*>(buf_in)[left_ch_src]);
buf_out_r[offset] = pcm_to_float(static_cast<const be_t<s32>*>(buf_in)[right_ch_src]);
}
}
}
bool rsxaudio_data_thread::enqueue_data(RsxaudioPort dst, bool silence, const void* src_addr, const rsxaudio_hw_param_t& hwp)
{
auto& backend_thread = g_fxo->get<rsx_audio_backend>();
if (dst == RsxaudioPort::SERIAL)
{
if (!silence)
{
for (u8 stream_idx = 0; stream_idx < SYS_RSXAUDIO_SERIAL_STREAM_CNT; stream_idx++)
{
pcm_serial_process_channel(hwp.serial.depth, output_buf.serial[stream_idx * 2], output_buf.serial[stream_idx * 2 + 1], src_addr, stream_idx);
}
}
else
{
output_buf.serial.fill({});
}
rsxaudio_data_container cont{hwp, output_buf, true, false, false};
backend_thread.add_data(cont);
return cont.data_was_used();
}
else if (dst == RsxaudioPort::SPDIF_0)
{
if (!silence)
{
pcm_spdif_process_channel(hwp.spdif[0].depth, output_buf.spdif[0][0], output_buf.spdif[0][1], src_addr);
}
else
{
output_buf.spdif[0].fill({});
}
rsxaudio_data_container cont{hwp, output_buf, false, true, false};
backend_thread.add_data(cont);
return cont.data_was_used();
}
else if (dst == RsxaudioPort::SPDIF_1)
{
if (!silence)
{
pcm_spdif_process_channel(hwp.spdif[1].depth, output_buf.spdif[1][0], output_buf.spdif[1][1], src_addr);
}
else
{
output_buf.spdif[1].fill({});
}
rsxaudio_data_container cont{hwp, output_buf, false, false, true};
backend_thread.add_data(cont);
return cont.data_was_used();
}
return false;
}
namespace audio
{
extern void configure_rsxaudio()
{
if (g_cfg.audio.provider == audio_provider::rsxaudio && g_fxo->is_init<rsx_audio_backend>())
{
g_fxo->get<rsx_audio_backend>().update_emu_cfg();
}
}
}
rsxaudio_backend_thread::rsxaudio_backend_thread()
{
new_emu_cfg = get_emu_cfg();
const u64 new_vol = g_cfg.audio.volume;
callback_cfg.atomic_op([&](callback_config& val)
{
val.target_volume = static_cast<u16>(new_vol / 100.0 * callback_config::VOL_NOMINAL);
val.initial_volume = val.current_volume;
});
}
rsxaudio_backend_thread::~rsxaudio_backend_thread()
{
if (backend)
{
backend->Close();
backend->SetWriteCallback(nullptr);
backend->SetStateCallback(nullptr);
backend = nullptr;
}
}
void rsxaudio_backend_thread::update_emu_cfg()
{
std::unique_lock lock(state_update_m);
const emu_audio_cfg _new_emu_cfg = get_emu_cfg();
const u64 new_vol = g_cfg.audio.volume;
callback_cfg.atomic_op([&](callback_config& val)
{
val.target_volume = static_cast<u16>(new_vol / 100.0 * callback_config::VOL_NOMINAL);
val.initial_volume = val.current_volume;
});
if (new_emu_cfg != _new_emu_cfg)
{
new_emu_cfg = _new_emu_cfg;
emu_cfg_changed = true;
lock.unlock();
state_update_c.notify_all();
}
}
u32 rsxaudio_backend_thread::get_sample_rate() const
{
return callback_cfg.load().freq;
}
u8 rsxaudio_backend_thread::get_channel_count() const
{
return callback_cfg.load().input_ch_cnt;
}
rsxaudio_backend_thread::emu_audio_cfg rsxaudio_backend_thread::get_emu_cfg()
{
// Get max supported channel count
AudioChannelCnt out_ch_cnt = AudioBackend::get_max_channel_count(0); // CELL_AUDIO_OUT_PRIMARY
emu_audio_cfg cfg =
{
.audio_device = g_cfg.audio.audio_device,
.desired_buffer_duration = g_cfg.audio.desired_buffer_duration,
.time_stretching_threshold = g_cfg.audio.time_stretching_threshold / 100.0,
.buffering_enabled = static_cast<bool>(g_cfg.audio.enable_buffering),
.convert_to_s16 = static_cast<bool>(g_cfg.audio.convert_to_s16),
.enable_time_stretching = static_cast<bool>(g_cfg.audio.enable_time_stretching),
.dump_to_file = static_cast<bool>(g_cfg.audio.dump_to_file),
.channels = out_ch_cnt,
.channel_layout = g_cfg.audio.channel_layout,
.renderer = g_cfg.audio.renderer,
.provider = g_cfg.audio.provider,
.avport = convert_avport(g_cfg.audio.rsxaudio_port)
};
cfg.buffering_enabled = cfg.buffering_enabled && cfg.renderer != audio_renderer::null;
cfg.enable_time_stretching = cfg.buffering_enabled && cfg.enable_time_stretching && cfg.time_stretching_threshold > 0.0;
return cfg;
}
void rsxaudio_backend_thread::operator()()
{
if (g_cfg.audio.provider != audio_provider::rsxaudio)
{
return;
}
static rsxaudio_state ra_state{};
static emu_audio_cfg emu_cfg{};
static bool backend_failed = false;
for (;;)
{
bool should_update_backend = false;
bool reset_backend = false;
bool checkDefaultDevice = false;
bool should_service_stream = false;
{
std::unique_lock lock(state_update_m);
for (;;)
{
// Unsafe to access backend under lock (state_changed_callback uses state_update_m -> possible deadlock)
if (thread_ctrl::state() == thread_state::aborting)
{
lock.unlock();
backend_stop();
return;
}
if (backend_device_changed)
{
should_update_backend = true;
checkDefaultDevice = true;
backend_device_changed = false;
}
// Emulated state changed
if (ra_state_changed)
{
const callback_config cb_cfg = callback_cfg.observe();
ra_state_changed = false;
ra_state = new_ra_state;
if (cb_cfg.cfg_changed)
{
should_update_backend = true;
checkDefaultDevice = false;
callback_cfg.atomic_op([&](callback_config& val)
{
val.cfg_changed = false; // Acknowledge cfg update
});
}
}
// Update emu config
if (emu_cfg_changed)
{
reset_backend |= emu_cfg.renderer != new_emu_cfg.renderer;
emu_cfg_changed = false;
emu_cfg = new_emu_cfg;
should_update_backend = true;
checkDefaultDevice = false;
}
// Handle backend error notification
if (backend_error_occured)
{
reset_backend = true;
should_update_backend = true;
checkDefaultDevice = false;
backend_error_occured = false;
}
if (should_update_backend)
{
backend_current_cfg.cfg = ra_state.port[static_cast<u8>(emu_cfg.avport)];
backend_current_cfg.avport = emu_cfg.avport;
break;
}
if (backend_failed)
{
state_update_c.wait(state_update_m, ERROR_SERVICE_PERIOD);
break;
}
if (use_aux_ringbuf)
{
const u64 next_period_time = get_time_until_service();
should_service_stream = next_period_time <= SERVICE_THRESHOLD;
if (should_service_stream)
{
break;
}
state_update_c.wait(state_update_m, next_period_time);
}
else
{
// Nothing to do - wait for events
state_update_c.wait(state_update_m, umax);
}
}
}
if (should_update_backend && (!checkDefaultDevice || backend->DefaultDeviceChanged()))
{
backend_init(ra_state, emu_cfg, reset_backend);
if (emu_cfg.enable_time_stretching)
{
resampler.set_params(backend_current_cfg.cfg.ch_cnt, backend_current_cfg.cfg.freq);
resampler.set_tempo(RESAMPLER_MAX_FREQ_VAL);
}
if (emu_cfg.dump_to_file)
{
dumper.Open(backend_current_cfg.cfg.ch_cnt, backend_current_cfg.cfg.freq, AudioSampleSize::FLOAT);
}
else
{
dumper.Close();
}
}
if (!backend->Operational())
{
if (!backend_failed)
{
sys_rsxaudio.warning("Backend stopped unexpectedly (likely device change). Attempting to recover...");
}
backend_init(ra_state, emu_cfg);
backend_failed = true;
continue;
}
if (backend_failed)
{
sys_rsxaudio.warning("Backend recovered");
backend_failed = false;
}
if (!Emu.IsPaused() || !use_aux_ringbuf) // Don't pause if thread is in direct mode
{
if (!backend_playing())
{
backend_start();
reset_service_time();
continue;
}
if (should_service_stream)
{
void* crnt_buf = thread_tmp_buf.data();
const u64 bytes_req = ringbuf.get_free_size();
const u64 bytes_read = aux_ringbuf.pop(crnt_buf, bytes_req, true);
u64 crnt_buf_size = bytes_read;
if (emu_cfg.enable_time_stretching)
{
const u64 input_ch_cnt = static_cast<u64>(ra_state.port[static_cast<u8>(emu_cfg.avport)].ch_cnt);
const u64 bytes_per_sample = static_cast<u32>(AudioSampleSize::FLOAT) * input_ch_cnt;
const u64 samples_req = bytes_req / bytes_per_sample;
const u64 samples_avail = crnt_buf_size / bytes_per_sample;
const f64 resampler_ratio = resampler.get_resample_ratio();
f64 fullness_ratio = static_cast<f64>(samples_avail + resampler.samples_available()) / samples_req;
if (fullness_ratio < emu_cfg.time_stretching_threshold)
{
fullness_ratio /= emu_cfg.time_stretching_threshold;
const f64 new_resampler_ratio = (resampler_ratio + fullness_ratio) / 2.0;
if (std::abs(new_resampler_ratio - resampler_ratio) >= TIME_STRETCHING_STEP)
{
resampler.set_tempo(new_resampler_ratio);
}
}
else if (resampler_ratio != RESAMPLER_MAX_FREQ_VAL)
{
resampler.set_tempo(RESAMPLER_MAX_FREQ_VAL);
}
resampler.put_samples(static_cast<f32*>(crnt_buf), static_cast<u32>(samples_avail));
const auto [resampled_data, sample_cnt] = resampler.get_samples(static_cast<u32>(samples_req));
crnt_buf = resampled_data;
crnt_buf_size = sample_cnt * bytes_per_sample;
}
// Dump audio if enabled
if (emu_cfg.dump_to_file)
{
dumper.WriteData(crnt_buf, static_cast<u32>(crnt_buf_size));
}
ringbuf.push(crnt_buf, crnt_buf_size);
update_service_time();
}
}
else
{
if (backend_playing())
{
backend_stop();
}
if (should_service_stream)
{
update_service_time();
}
}
}
}
rsxaudio_backend_thread& rsxaudio_backend_thread::operator=(thread_state /* state */)
{
{
std::lock_guard lock(state_update_m);
}
state_update_c.notify_all();
return *this;
}
void rsxaudio_backend_thread::set_new_stream_param(const std::array<port_config, SYS_RSXAUDIO_AVPORT_CNT> &cfg, avport_bit muted_avports)
{
std::unique_lock lock(state_update_m);
const auto new_mute_state = gen_mute_state(muted_avports);
const bool should_update = backend_current_cfg.cfg != cfg[static_cast<u8>(backend_current_cfg.avport)];
callback_cfg.atomic_op([&](callback_config& val)
{
val.mute_state = new_mute_state;
if (should_update)
{
val.ready = false; // Prevent audio playback until backend is reconfigured
val.cfg_changed = true;
}
});
if (new_ra_state.port != cfg)
{
new_ra_state.port = cfg;
ra_state_changed = true;
lock.unlock();
state_update_c.notify_all();
}
}
void rsxaudio_backend_thread::set_mute_state(avport_bit muted_avports)
{
const auto new_mute_state = gen_mute_state(muted_avports);
callback_cfg.atomic_op([&](callback_config& val)
{
val.mute_state = new_mute_state;
});
}
u8 rsxaudio_backend_thread::gen_mute_state(avport_bit avports)
{
std::bitset<SYS_RSXAUDIO_AVPORT_CNT> mute_state{0};
if (avports.hdmi_0) mute_state[static_cast<u8>(RsxaudioAvportIdx::HDMI_0)] = true;
if (avports.hdmi_1) mute_state[static_cast<u8>(RsxaudioAvportIdx::HDMI_1)] = true;
if (avports.avmulti) mute_state[static_cast<u8>(RsxaudioAvportIdx::AVMULTI)] = true;
if (avports.spdif_0) mute_state[static_cast<u8>(RsxaudioAvportIdx::SPDIF_0)] = true;
if (avports.spdif_1) mute_state[static_cast<u8>(RsxaudioAvportIdx::SPDIF_1)] = true;
return static_cast<u8>(mute_state.to_ulong());
}
void rsxaudio_backend_thread::add_data(rsxaudio_data_container& cont)
{
std::unique_lock lock(ringbuf_mutex, std::try_to_lock);
if (!lock.owns_lock())
{
return;
}
const callback_config cb_cfg = callback_cfg.observe();
if (!cb_cfg.ready || !cb_cfg.callback_active)
{
return;
}
static rsxaudio_data_container::data_blk_t in_data_blk{};
if (u32 len = cont.get_data_size(cb_cfg.avport_idx))
{
if (use_aux_ringbuf)
{
if (aux_ringbuf.get_free_size() >= len)
{
cont.get_data(cb_cfg.avport_idx, in_data_blk);
aux_ringbuf.push(in_data_blk.data(), len);
}
}
else
{
if (ringbuf.get_free_size() >= len)
{
cont.get_data(cb_cfg.avport_idx, in_data_blk);
ringbuf.push(in_data_blk.data(), len);
}
}
}
}
RsxaudioAvportIdx rsxaudio_backend_thread::convert_avport(audio_avport avport)
{
switch (avport)
{
case audio_avport::hdmi_0: return RsxaudioAvportIdx::HDMI_0;
case audio_avport::hdmi_1: return RsxaudioAvportIdx::HDMI_1;
case audio_avport::avmulti: return RsxaudioAvportIdx::AVMULTI;
case audio_avport::spdif_0: return RsxaudioAvportIdx::SPDIF_0;
case audio_avport::spdif_1: return RsxaudioAvportIdx::SPDIF_1;
default:
{
fmt::throw_exception("Invalid RSXAudio avport: %u", static_cast<u8>(avport));
}
}
}
void rsxaudio_backend_thread::backend_init(const rsxaudio_state& ra_state, const emu_audio_cfg& emu_cfg, bool reset_backend)
{
if (reset_backend || !backend)
{
backend = nullptr;
backend = Emu.GetCallbacks().get_audio();
backend->SetWriteCallback(std::bind(&rsxaudio_backend_thread::write_data_callback, this, std::placeholders::_1, std::placeholders::_2));
backend->SetStateCallback(std::bind(&rsxaudio_backend_thread::state_changed_callback, this, std::placeholders::_1));
}
const port_config& port_cfg = ra_state.port[static_cast<u8>(emu_cfg.avport)];
const AudioSampleSize sample_size = emu_cfg.convert_to_s16 ? AudioSampleSize::S16 : AudioSampleSize::FLOAT;
const AudioChannelCnt ch_cnt = static_cast<AudioChannelCnt>(std::min<u32>(static_cast<u32>(port_cfg.ch_cnt), static_cast<u32>(emu_cfg.channels)));
f64 cb_frame_len = 0.0;
audio_channel_layout backend_channel_layout = audio_channel_layout::stereo;
if (backend->Open(emu_cfg.audio_device, port_cfg.freq, sample_size, ch_cnt, emu_cfg.channel_layout))
{
cb_frame_len = backend->GetCallbackFrameLen();
backend_channel_layout = backend->get_channel_layout();
sys_rsxaudio.notice("Opened audio backend (sampling_rate=%d, sample_size=%d, channels=%d, layout=%s)", backend->get_sampling_rate(), backend->get_sample_size(), backend->get_channels(), backend->get_channel_layout());
}
else
{
sys_rsxaudio.error("Failed to open audio backend. Make sure that no other application is running that might block audio access (e.g. Netflix).");
}
static constexpr f64 _10ms = 512.0 / 48000.0;
const f64 buffering_len = emu_cfg.buffering_enabled ? (emu_cfg.desired_buffer_duration / 1000.0) : 0.0;
const u64 bytes_per_sec = static_cast<u32>(AudioSampleSize::FLOAT) * static_cast<u32>(port_cfg.ch_cnt) * static_cast<u32>(port_cfg.freq);
{
std::lock_guard lock(ringbuf_mutex);
use_aux_ringbuf = emu_cfg.enable_time_stretching || emu_cfg.dump_to_file;
if (use_aux_ringbuf)
{
const f64 frame_len = std::max<f64>(buffering_len * 0.5, SERVICE_PERIOD_SEC) + cb_frame_len + _10ms;
const u64 frame_len_bytes = static_cast<u64>(std::round(frame_len * bytes_per_sec));
aux_ringbuf.set_buf_size(frame_len_bytes);
ringbuf.set_buf_size(frame_len_bytes);
thread_tmp_buf.resize(frame_len_bytes);
}
else
{
const f64 frame_len = std::max<f64>(buffering_len, cb_frame_len) + _10ms;
ringbuf.set_buf_size(static_cast<u64>(std::round(frame_len * bytes_per_sec)));
thread_tmp_buf.resize(0);
}
callback_tmp_buf.resize(static_cast<usz>((cb_frame_len + _10ms) * static_cast<u32>(AudioSampleSize::FLOAT) * static_cast<u32>(port_cfg.ch_cnt) * static_cast<u32>(port_cfg.freq)));
}
callback_cfg.atomic_op([&](callback_config& val)
{
val.callback_active = false; // Backend may take some time to activate. This prevents overflows on input side.
if (!val.cfg_changed)
{
val.freq = static_cast<u32>(port_cfg.freq);
val.input_ch_cnt = static_cast<u32>(port_cfg.ch_cnt);
val.output_channel_layout = static_cast<u8>(backend_channel_layout);
val.convert_to_s16 = emu_cfg.convert_to_s16;
val.avport_idx = emu_cfg.avport;
val.ready = true;
}
});
}
void rsxaudio_backend_thread::backend_start()
{
ensure(backend != nullptr);
if (use_aux_ringbuf)
{
resampler.set_tempo(RESAMPLER_MAX_FREQ_VAL);
resampler.flush();
aux_ringbuf.reader_flush();
}
ringbuf.reader_flush();
backend->Play();
}
void rsxaudio_backend_thread::backend_stop()
{
if (backend == nullptr)
{
return;
}
backend->Pause();
callback_cfg.atomic_op([&](callback_config& val)
{
val.callback_active = false;
});
}
bool rsxaudio_backend_thread::backend_playing()
{
if (backend == nullptr)
{
return false;
}
return backend->IsPlaying();
}
u32 rsxaudio_backend_thread::write_data_callback(u32 bytes, void* buf)
{
const callback_config cb_cfg = callback_cfg.atomic_op([&](callback_config& val)
{
val.callback_active = true;
return val;
});
const std::bitset<SYS_RSXAUDIO_AVPORT_CNT> mute_state{cb_cfg.mute_state};
if (cb_cfg.ready && !mute_state[static_cast<u8>(cb_cfg.avport_idx)] && Emu.IsRunning())
{
const audio_channel_layout output_channel_layout = static_cast<audio_channel_layout>(cb_cfg.output_channel_layout);
const u32 output_ch_cnt = AudioBackend::default_layout_channel_count(output_channel_layout);
const u32 bytes_ch_adjusted = bytes / output_ch_cnt * cb_cfg.input_ch_cnt;
const u32 bytes_from_rb = cb_cfg.convert_to_s16 ? bytes_ch_adjusted / static_cast<u32>(AudioSampleSize::S16) * static_cast<u32>(AudioSampleSize::FLOAT) : bytes_ch_adjusted;
ensure(callback_tmp_buf.size() * static_cast<u32>(AudioSampleSize::FLOAT) >= bytes_from_rb);
const u32 byte_cnt = static_cast<u32>(ringbuf.pop(callback_tmp_buf.data(), bytes_from_rb, true));
const u32 sample_cnt = byte_cnt / static_cast<u32>(AudioSampleSize::FLOAT);
const u32 sample_cnt_out = sample_cnt / cb_cfg.input_ch_cnt * output_ch_cnt;
// Buffer is in weird state - drop acquired data
if (sample_cnt == 0 || sample_cnt % cb_cfg.input_ch_cnt != 0)
{
memset(buf, 0, bytes);
return bytes;
}
// Record audio if enabled
if (g_recording_mode != recording_mode::stopped)
{
utils::video_provider& provider = g_fxo->get<utils::video_provider>();
provider.present_samples(reinterpret_cast<u8*>(callback_tmp_buf.data()), sample_cnt / cb_cfg.input_ch_cnt, cb_cfg.input_ch_cnt);
}
// Downmix if necessary
AudioBackend::downmix(sample_cnt, cb_cfg.input_ch_cnt, output_channel_layout, callback_tmp_buf.data(), callback_tmp_buf.data());
if (cb_cfg.target_volume != cb_cfg.current_volume)
{
const AudioBackend::VolumeParam param =
{
.initial_volume = cb_cfg.initial_volume * callback_config::VOL_NOMINAL_INV,
.current_volume = cb_cfg.current_volume * callback_config::VOL_NOMINAL_INV,
.target_volume = cb_cfg.target_volume * callback_config::VOL_NOMINAL_INV,
.freq = cb_cfg.freq,
.ch_cnt = cb_cfg.input_ch_cnt
};
const u16 new_vol = static_cast<u16>(std::round(AudioBackend::apply_volume(param, sample_cnt_out, callback_tmp_buf.data(), callback_tmp_buf.data()) * callback_config::VOL_NOMINAL));
callback_cfg.atomic_op([&](callback_config& val)
{
if (val.target_volume != cb_cfg.target_volume)
{
val.initial_volume = new_vol;
}
// We don't care about proper volume adjustment if underflow has occured
val.current_volume = bytes_from_rb != byte_cnt ? val.target_volume : new_vol;
});
}
else if (cb_cfg.current_volume != callback_config::VOL_NOMINAL)
{
AudioBackend::apply_volume_static(cb_cfg.current_volume * callback_config::VOL_NOMINAL_INV, sample_cnt_out, callback_tmp_buf.data(), callback_tmp_buf.data());
}
if (cb_cfg.convert_to_s16)
{
AudioBackend::convert_to_s16(sample_cnt_out, callback_tmp_buf.data(), buf);
return sample_cnt_out * static_cast<u32>(AudioSampleSize::S16);
}
AudioBackend::normalize(sample_cnt_out, callback_tmp_buf.data(), static_cast<f32*>(buf));
return sample_cnt_out * static_cast<u32>(AudioSampleSize::FLOAT);
}
ringbuf.reader_flush();
memset(buf, 0, bytes);
return bytes;
}
void rsxaudio_backend_thread::state_changed_callback(AudioStateEvent event)
{
{
std::lock_guard lock(state_update_m);
switch (event)
{
case AudioStateEvent::UNSPECIFIED_ERROR:
{
backend_error_occured = true;
break;
}
case AudioStateEvent::DEFAULT_DEVICE_MAYBE_CHANGED:
{
backend_device_changed = true;
break;
}
default:
{
fmt::throw_exception("Unknown audio state event");
}
}
}
state_update_c.notify_all();
}
u64 rsxaudio_backend_thread::get_time_until_service()
{
const u64 next_service_time = start_time + time_period_idx * SERVICE_PERIOD;
const u64 current_time = get_system_time();
return next_service_time >= current_time ? next_service_time - current_time : 0;
}
void rsxaudio_backend_thread::update_service_time()
{
if (get_time_until_service() <= SERVICE_THRESHOLD) time_period_idx++;
}
void rsxaudio_backend_thread::reset_service_time()
{
start_time = get_system_time();
time_period_idx = 1;
}
void rsxaudio_periodic_tmr::sched_timer()
{
u64 interval = get_rel_next_time();
if (interval == 0)
{
zero_period = true;
}
else if (interval == UINT64_MAX)
{
interval = 0;
zero_period = false;
}
else
{
zero_period = false;
}
#if defined(_WIN32)
if (interval)
{
LARGE_INTEGER due_time{};
due_time.QuadPart = -static_cast<s64>(interval * 10);
ensure(SetWaitableTimerEx(timer_handle, &due_time, 0, nullptr, nullptr, nullptr, 0));
}
else
{
ensure(CancelWaitableTimer(timer_handle));
}
#elif defined(__linux__)
const time_t secs = interval / 1'000'000;
const long nsecs = (interval - secs * 1'000'000) * 1000;
const itimerspec tspec = {{}, { secs, nsecs }};
ensure(timerfd_settime(timer_handle, 0, &tspec, nullptr) == 0);
#elif defined(BSD) || defined(__APPLE__)
handle[TIMER_ID].data = interval * 1000;
if (interval)
{
handle[TIMER_ID].flags = (handle[TIMER_ID].flags & ~EV_DISABLE) | EV_ENABLE;
}
else
{
handle[TIMER_ID].flags = (handle[TIMER_ID].flags & ~EV_ENABLE) | EV_DISABLE;
}
ensure(kevent(kq, &handle[TIMER_ID], 1, nullptr, 0, nullptr) >= 0);
#else
#error "Implement"
#endif
}
void rsxaudio_periodic_tmr::cancel_timer_unlocked()
{
zero_period = false;
#if defined(_WIN32)
ensure(CancelWaitableTimer(timer_handle));
if (in_wait)
{
ensure(SetEvent(cancel_event));
}
#elif defined(__linux__)
const itimerspec tspec{};
ensure(timerfd_settime(timer_handle, 0, &tspec, nullptr) == 0);
if (in_wait)
{
const u64 flag = 1;
const auto wr_res = write(cancel_event, &flag, sizeof(flag));
ensure(wr_res == sizeof(flag) || wr_res == -EAGAIN);
}
#elif defined(BSD) || defined(__APPLE__)
handle[TIMER_ID].flags = (handle[TIMER_ID].flags & ~EV_ENABLE) | EV_DISABLE;
handle[TIMER_ID].data = 0;
if (in_wait)
{
ensure(kevent(kq, handle, 2, nullptr, 0, nullptr) >= 0);
}
else
{
ensure(kevent(kq, &handle[TIMER_ID], 1, nullptr, 0, nullptr) >= 0);
}
#else
#error "Implement"
#endif
}
void rsxaudio_periodic_tmr::reset_cancel_flag()
{
#if defined(_WIN32)
ensure(ResetEvent(cancel_event));
#elif defined(__linux__)
u64 tmp_buf{};
[[maybe_unused]] const auto nread = read(cancel_event, &tmp_buf, sizeof(tmp_buf));
#elif defined(BSD) || defined(__APPLE__)
// Cancel event is reset automatically
#else
#error "Implement"
#endif
}
rsxaudio_periodic_tmr::rsxaudio_periodic_tmr()
{
#if defined(_WIN32)
ensure(cancel_event = CreateEvent(nullptr, false, false, nullptr));
ensure(timer_handle = CreateWaitableTimer(nullptr, false, nullptr));
#elif defined(__linux__)
timer_handle = timerfd_create(CLOCK_MONOTONIC, 0);
ensure((epoll_fd = epoll_create(2)) >= 0);
epoll_event evnt{ EPOLLIN, {} };
evnt.data.fd = timer_handle;
ensure(timer_handle >= 0 && epoll_ctl(epoll_fd, EPOLL_CTL_ADD, timer_handle, &evnt) == 0);
cancel_event = eventfd(0, EFD_NONBLOCK);
evnt.data.fd = cancel_event;
ensure(cancel_event >= 0 && epoll_ctl(epoll_fd, EPOLL_CTL_ADD, cancel_event, &evnt) == 0);
#elif defined(BSD) || defined(__APPLE__)
#if defined(__APPLE__)
static constexpr unsigned int TMR_CFG = NOTE_NSECONDS | NOTE_CRITICAL;
#else
static constexpr unsigned int TMR_CFG = NOTE_NSECONDS;
#endif
ensure((kq = kqueue()) >= 0);
EV_SET(&handle[TIMER_ID], TIMER_ID, EVFILT_TIMER, EV_ADD | EV_ENABLE | EV_ONESHOT, TMR_CFG, 0, nullptr);
EV_SET(&handle[CANCEL_ID], CANCEL_ID, EVFILT_USER, EV_ADD | EV_ENABLE | EV_CLEAR, NOTE_FFNOP, 0, nullptr);
ensure(kevent(kq, &handle[CANCEL_ID], 1, nullptr, 0, nullptr) >= 0);
handle[CANCEL_ID].fflags |= NOTE_TRIGGER;
#else
#error "Implement"
#endif
}
rsxaudio_periodic_tmr::~rsxaudio_periodic_tmr()
{
#if defined(_WIN32)
CloseHandle(timer_handle);
CloseHandle(cancel_event);
#elif defined(__linux__)
close(epoll_fd);
close(timer_handle);
close(cancel_event);
#elif defined(BSD) || defined(__APPLE__)
close(kq);
#else
#error "Implement"
#endif
}
rsxaudio_periodic_tmr::wait_result rsxaudio_periodic_tmr::wait(const std::function<void()> &callback)
{
std::unique_lock lock(mutex);
if (in_wait || !callback)
{
return wait_result::INVALID_PARAM;
}
in_wait = true;
bool tmr_error = false;
bool timeout = false;
bool wait_canceled = false;
if (!zero_period)
{
lock.unlock();
constexpr u8 obj_wait_cnt = 2;
#if defined(_WIN32)
const HANDLE wait_arr[obj_wait_cnt] = { timer_handle, cancel_event };
const auto wait_status = WaitForMultipleObjects(obj_wait_cnt, wait_arr, false, INFINITE);
if (wait_status == WAIT_FAILED || (wait_status >= WAIT_ABANDONED_0 && wait_status < WAIT_ABANDONED_0 + obj_wait_cnt))
{
tmr_error = true;
}
else if (wait_status == WAIT_TIMEOUT)
{
timeout = true;
}
else if (wait_status == WAIT_OBJECT_0 + 1)
{
wait_canceled = true;
}
#elif defined(__linux__)
epoll_event event[obj_wait_cnt]{};
int wait_status = 0;
do
{
wait_status = epoll_wait(epoll_fd, event, obj_wait_cnt, -1);
}
while (wait_status == -1 && errno == EINTR);
if (wait_status < 0 || wait_status > obj_wait_cnt)
{
tmr_error = true;
}
else if (wait_status == 0)
{
timeout = true;
}
else
{
for (int i = 0; i < wait_status; i++)
{
if (event[i].data.fd == cancel_event)
{
wait_canceled = true;
break;
}
}
}
#elif defined(BSD) || defined(__APPLE__)
struct kevent event[obj_wait_cnt]{};
int wait_status = 0;
do
{
wait_status = kevent(kq, nullptr, 0, event, obj_wait_cnt, nullptr);
}
while (wait_status == -1 && errno == EINTR);
if (wait_status < 0 || wait_status > obj_wait_cnt)
{
tmr_error = true;
}
else if (wait_status == 0)
{
timeout = true;
}
else
{
for (int i = 0; i < wait_status; i++)
{
if (event[i].ident == CANCEL_ID)
{
wait_canceled = true;
break;
}
}
}
#else
#error "Implement"
#endif
lock.lock();
}
else
{
zero_period = false;
}
in_wait = false;
if (wait_canceled)
{
reset_cancel_flag();
}
if (tmr_error)
{
return wait_result::TIMER_ERROR;
}
if (timeout)
{
return wait_result::TIMEOUT;
}
if (wait_canceled)
{
sched_timer();
return wait_result::TIMER_CANCELED;
}
callback();
sched_timer();
return wait_result::SUCCESS;
}
u64 rsxaudio_periodic_tmr::get_rel_next_time()
{
const u64 crnt_time = get_system_time();
u64 next_time = UINT64_MAX;
for (vtimer& vtimer : vtmr_pool)
{
if (!vtimer.active) continue;
u64 next_blk_time = static_cast<u64>(vtimer.blk_cnt * vtimer.blk_time);
if (crnt_time >= next_blk_time)
{
const u64 crnt_blk = get_crnt_blk(crnt_time, vtimer.blk_time);
if (crnt_blk > vtimer.blk_cnt + MAX_BURST_PERIODS)
{
vtimer.blk_cnt = std::max(vtimer.blk_cnt, crnt_blk - MAX_BURST_PERIODS);
next_blk_time = static_cast<u64>(vtimer.blk_cnt * vtimer.blk_time);
}
}
if (crnt_time >= next_blk_time)
{
next_time = 0;
}
else
{
next_time = std::min(next_time, next_blk_time - crnt_time);
}
}
return next_time;
}
void rsxaudio_periodic_tmr::cancel_wait()
{
std::lock_guard lock(mutex);
cancel_timer_unlocked();
}
void rsxaudio_periodic_tmr::enable_vtimer(u32 vtimer_id, u32 rate, u64 crnt_time)
{
ensure(vtimer_id < VTIMER_MAX && rate);
vtimer& vtimer = vtmr_pool[vtimer_id];
const f64 new_blk_time = get_blk_time(rate);
// Avoid timer reset when possible
if (!vtimer.active || new_blk_time != vtimer.blk_time)
{
vtimer.blk_cnt = get_crnt_blk(crnt_time, new_blk_time);
}
vtimer.blk_time = new_blk_time;
vtimer.active = true;
}
void rsxaudio_periodic_tmr::disable_vtimer(u32 vtimer_id)
{
ensure(vtimer_id < VTIMER_MAX);
vtimer& vtimer = vtmr_pool[vtimer_id];
vtimer.active = false;
}
bool rsxaudio_periodic_tmr::is_vtimer_behind(u32 vtimer_id, u64 crnt_time) const
{
ensure(vtimer_id < VTIMER_MAX);
const vtimer& vtimer = vtmr_pool[vtimer_id];
return is_vtimer_behind(vtimer, crnt_time);
}
void rsxaudio_periodic_tmr::vtimer_skip_periods(u32 vtimer_id, u64 crnt_time)
{
ensure(vtimer_id < VTIMER_MAX);
vtimer& vtimer = vtmr_pool[vtimer_id];
if (is_vtimer_behind(vtimer, crnt_time))
{
vtimer.blk_cnt = get_crnt_blk(crnt_time, vtimer.blk_time);
}
}
void rsxaudio_periodic_tmr::vtimer_incr(u32 vtimer_id, u64 crnt_time)
{
ensure(vtimer_id < VTIMER_MAX);
vtimer& vtimer = vtmr_pool[vtimer_id];
if (is_vtimer_behind(vtimer, crnt_time))
{
vtimer.blk_cnt++;
}
}
bool rsxaudio_periodic_tmr::is_vtimer_active(u32 vtimer_id) const
{
ensure(vtimer_id < VTIMER_MAX);
const vtimer& vtimer = vtmr_pool[vtimer_id];
return vtimer.active;
}
u64 rsxaudio_periodic_tmr::vtimer_get_sched_time(u32 vtimer_id) const
{
ensure(vtimer_id < VTIMER_MAX);
const vtimer& vtimer = vtmr_pool[vtimer_id];
return static_cast<u64>(vtimer.blk_cnt * vtimer.blk_time);
}
bool rsxaudio_periodic_tmr::is_vtimer_behind(const vtimer& vtimer, u64 crnt_time) const
{
return vtimer.active && vtimer.blk_cnt < get_crnt_blk(crnt_time, vtimer.blk_time);
}
u64 rsxaudio_periodic_tmr::get_crnt_blk(u64 crnt_time, f64 blk_time) const
{
return static_cast<u64>(std::floor(static_cast<f64>(crnt_time) / blk_time)) + 1;
}
f64 rsxaudio_periodic_tmr::get_blk_time(u32 data_rate) const
{
return static_cast<f64>(SYS_RSXAUDIO_STREAM_SIZE * 1'000'000) / data_rate;
}
| 62,144
|
C++
|
.cpp
| 1,970
| 28.576142
| 221
| 0.694523
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,355
|
sys_lwcond.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_lwcond.cpp
|
#include "stdafx.h"
#include "sys_lwcond.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "sys_lwmutex.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_lwcond);
lv2_lwcond::lv2_lwcond(utils::serial& ar)
: name(ar.pop<be_t<u64>>())
, lwid(ar)
, protocol(ar)
, control(ar.pop<decltype(control)>())
{
}
void lv2_lwcond::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(lv2_sync);
ar(name, lwid, protocol, control);
}
error_code _sys_lwcond_create(ppu_thread& ppu, vm::ptr<u32> lwcond_id, u32 lwmutex_id, vm::ptr<sys_lwcond_t> control, u64 name)
{
ppu.state += cpu_flag::wait;
sys_lwcond.trace(u8"_sys_lwcond_create(lwcond_id=*0x%x, lwmutex_id=0x%x, control=*0x%x, name=0x%llx (“%s”))", lwcond_id, lwmutex_id, control, name, lv2_obj::name_64{std::bit_cast<be_t<u64>>(name)});
u32 protocol;
// Extract protocol from lwmutex
if (!idm::check<lv2_obj, lv2_lwmutex>(lwmutex_id, [&protocol](lv2_lwmutex& mutex)
{
protocol = mutex.protocol;
}))
{
return CELL_ESRCH;
}
if (protocol == SYS_SYNC_RETRY)
{
// Lwcond can't have SYS_SYNC_RETRY protocol
protocol = SYS_SYNC_PRIORITY;
}
if (const u32 id = idm::make<lv2_obj, lv2_lwcond>(name, lwmutex_id, protocol, control))
{
ppu.check_state();
*lwcond_id = id;
return CELL_OK;
}
return CELL_EAGAIN;
}
error_code _sys_lwcond_destroy(ppu_thread& ppu, u32 lwcond_id)
{
ppu.state += cpu_flag::wait;
sys_lwcond.trace("_sys_lwcond_destroy(lwcond_id=0x%x)", lwcond_id);
std::shared_ptr<lv2_lwcond> _cond;
while (true)
{
s32 old_val = 0;
auto [ptr, ret] = idm::withdraw<lv2_obj, lv2_lwcond>(lwcond_id, [&](lv2_lwcond& cond) -> CellError
{
// Ignore check on first iteration
if (_cond && std::addressof(cond) != _cond.get())
{
// Other thread has destroyed the lwcond earlier
return CELL_ESRCH;
}
std::lock_guard lock(cond.mutex);
if (atomic_storage<ppu_thread*>::load(cond.sq))
{
return CELL_EBUSY;
}
old_val = cond.lwmutex_waiters.or_fetch(smin);
if (old_val != smin)
{
// De-schedule if waiters were found
lv2_obj::sleep(ppu);
// Repeat loop: there are lwmutex waiters inside _sys_lwcond_queue_wait
return CELL_EAGAIN;
}
return {};
});
if (!ptr)
{
return CELL_ESRCH;
}
if (ret)
{
if (ret != CELL_EAGAIN)
{
return ret;
}
}
else
{
break;
}
_cond = std::move(ptr);
// Wait for all lwcond waiters to quit
while (old_val + 0u > 1u << 31)
{
thread_ctrl::wait_on(_cond->lwmutex_waiters, old_val);
if (ppu.is_stopped())
{
ppu.state += cpu_flag::again;
return {};
}
old_val = _cond->lwmutex_waiters;
}
// Wake up from sleep
ppu.check_state();
}
return CELL_OK;
}
error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u64 ppu_thread_id, u32 mode)
{
ppu.state += cpu_flag::wait;
sys_lwcond.trace("_sys_lwcond_signal(lwcond_id=0x%x, lwmutex_id=0x%x, ppu_thread_id=0x%llx, mode=%d)", lwcond_id, lwmutex_id, ppu_thread_id, mode);
// Mode 1: lwmutex was initially owned by the calling thread
// Mode 2: lwmutex was not owned by the calling thread and waiter hasn't been increased
// Mode 3: lwmutex was forcefully owned by the calling thread
if (mode < 1 || mode > 3)
{
fmt::throw_exception("Unknown mode (%d)", mode);
}
while (true)
{
if (ppu.test_stopped())
{
ppu.state += cpu_flag::again;
return {};
}
bool finished = true;
ppu.state += cpu_flag::wait;
const auto cond = idm::check<lv2_obj, lv2_lwcond>(lwcond_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwcond& cond) -> int
{
ppu_thread* cpu = nullptr;
if (ppu_thread_id != u32{umax})
{
cpu = idm::check_unlocked<named_thread<ppu_thread>>(static_cast<u32>(ppu_thread_id));
if (!cpu)
{
return -1;
}
}
lv2_lwmutex* mutex = nullptr;
if (mode != 2)
{
mutex = idm::check_unlocked<lv2_obj, lv2_lwmutex>(lwmutex_id);
if (!mutex)
{
return -1;
}
}
if (atomic_storage<ppu_thread*>::load(cond.sq))
{
std::lock_guard lock(cond.mutex);
if (ppu.state & cpu_flag::suspend)
{
// Test if another signal caused the current thread to be suspended, in which case it needs to wait until the thread wakes up (otherwise the signal may cause unexpected results)
finished = false;
return 0;
}
if (cpu)
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return 0;
}
}
auto result = cpu ? cond.unqueue(cond.sq, cpu) :
cond.schedule<ppu_thread>(cond.sq, cond.protocol);
if (result)
{
if (static_cast<ppu_thread*>(result)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return 0;
}
if (mode == 2)
{
static_cast<ppu_thread*>(result)->gpr[3] = CELL_EBUSY;
}
else if (mode == 3 && mutex->load_sq()) [[unlikely]]
{
std::lock_guard lock(mutex->mutex);
// Respect ordering of the sleep queue
mutex->try_own(result, true);
auto result2 = mutex->reown<ppu_thread>();
if (result2->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return 0;
}
if (result2 != result)
{
cond.awake(result2);
result = nullptr;
}
}
else if (mode == 1)
{
mutex->try_own(result, true);
result = nullptr;
}
if (result)
{
cond.awake(result);
}
return 1;
}
}
else
{
cond.mutex.lock_unlock();
if (ppu.state & cpu_flag::suspend)
{
finished = false;
return 0;
}
}
return 0;
});
if (!finished)
{
continue;
}
if (!cond || cond.ret == -1)
{
return CELL_ESRCH;
}
if (!cond.ret)
{
if (ppu_thread_id == u32{umax})
{
if (mode == 3)
{
return not_an_error(CELL_ENOENT);
}
else if (mode == 2)
{
return CELL_OK;
}
}
return not_an_error(CELL_EPERM);
}
return CELL_OK;
}
}
error_code _sys_lwcond_signal_all(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u32 mode)
{
ppu.state += cpu_flag::wait;
sys_lwcond.trace("_sys_lwcond_signal_all(lwcond_id=0x%x, lwmutex_id=0x%x, mode=%d)", lwcond_id, lwmutex_id, mode);
// Mode 1: lwmutex was initially owned by the calling thread
// Mode 2: lwmutex was not owned by the calling thread and waiter hasn't been increased
if (mode < 1 || mode > 2)
{
fmt::throw_exception("Unknown mode (%d)", mode);
}
while (true)
{
if (ppu.test_stopped())
{
ppu.state += cpu_flag::again;
return {};
}
bool finished = true;
ppu.state += cpu_flag::wait;
const auto cond = idm::check<lv2_obj, lv2_lwcond>(lwcond_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwcond& cond) -> int
{
lv2_lwmutex* mutex{};
if (mode != 2)
{
mutex = idm::check_unlocked<lv2_obj, lv2_lwmutex>(lwmutex_id);
if (!mutex)
{
return -1;
}
}
if (atomic_storage<ppu_thread*>::load(cond.sq))
{
std::lock_guard lock(cond.mutex);
if (ppu.state & cpu_flag::suspend)
{
// Test if another signal caused the current thread to be suspended, in which case it needs to wait until the thread wakes up (otherwise the signal may cause unexpected results)
finished = false;
return 0;
}
u32 result = 0;
for (auto cpu = +cond.sq; cpu; cpu = cpu->next_cpu)
{
if (cpu->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return 0;
}
}
auto sq = cond.sq;
atomic_storage<ppu_thread*>::release(cond.sq, nullptr);
while (const auto cpu = cond.schedule<ppu_thread>(sq, cond.protocol))
{
if (mode == 2)
{
static_cast<ppu_thread*>(cpu)->gpr[3] = CELL_EBUSY;
}
if (mode == 1)
{
mutex->try_own(cpu, true);
}
else
{
lv2_obj::append(cpu);
}
result++;
}
if (result && mode == 2)
{
lv2_obj::awake_all();
}
return result;
}
else
{
cond.mutex.lock_unlock();
if (ppu.state & cpu_flag::suspend)
{
finished = false;
return 0;
}
}
return 0;
});
if (!finished)
{
continue;
}
if (!cond || cond.ret == -1)
{
return CELL_ESRCH;
}
if (mode == 1)
{
// Mode 1: return the amount of threads (TODO)
return not_an_error(cond.ret);
}
return CELL_OK;
}
}
error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u64 timeout)
{
ppu.state += cpu_flag::wait;
sys_lwcond.trace("_sys_lwcond_queue_wait(lwcond_id=0x%x, lwmutex_id=0x%x, timeout=0x%llx)", lwcond_id, lwmutex_id, timeout);
ppu.gpr[3] = CELL_OK;
std::shared_ptr<lv2_lwmutex> mutex;
auto& sstate = *ppu.optional_savestate_state;
const auto cond = idm::get<lv2_obj, lv2_lwcond>(lwcond_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwcond& cond)
{
mutex = idm::get_unlocked<lv2_obj, lv2_lwmutex>(lwmutex_id);
if (!mutex)
{
return;
}
// Increment lwmutex's lwcond's waiters count
mutex->lwcond_waiters++;
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(cond.mutex);
cond.lwmutex_waiters++;
const bool mutex_sleep = sstate.try_read<bool>().second;
sstate.clear();
if (mutex_sleep)
{
// Special: loading state from the point of waiting on lwmutex sleep queue
mutex->try_own(&ppu, true);
}
else
{
// Add a waiter
lv2_obj::emplace(cond.sq, &ppu);
}
if (!ppu.loaded_from_savestate && !mutex->try_unlock(false))
{
std::lock_guard lock2(mutex->mutex);
// Process lwmutex sleep queue
if (const auto cpu = mutex->reown<ppu_thread>())
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ensure(cond.unqueue(cond.sq, &ppu));
ppu.state += cpu_flag::again;
return;
}
// Put the current thread to sleep and schedule lwmutex waiter atomically
cond.append(cpu);
cond.sleep(ppu, timeout);
return;
}
}
cond.sleep(ppu, timeout);
});
if (!cond || !mutex)
{
return CELL_ESRCH;
}
if (ppu.state & cpu_flag::again)
{
return CELL_OK;
}
while (auto state = +ppu.state)
{
if (state & cpu_flag::signal && ppu.state.test_and_reset(cpu_flag::signal))
{
break;
}
if (is_stopped(state))
{
std::scoped_lock lock(cond->mutex, mutex->mutex);
bool mutex_sleep = false;
bool cond_sleep = false;
for (auto cpu = mutex->load_sq(); cpu; cpu = cpu->next_cpu)
{
if (cpu == &ppu)
{
mutex_sleep = true;
break;
}
}
for (auto cpu = atomic_storage<ppu_thread*>::load(cond->sq); cpu; cpu = cpu->next_cpu)
{
if (cpu == &ppu)
{
cond_sleep = true;
break;
}
}
if (!cond_sleep && !mutex_sleep)
{
break;
}
sstate(mutex_sleep);
ppu.state += cpu_flag::again;
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++)
{
busy_wait(500);
}
if (ppu.state & cpu_flag::signal)
{
continue;
}
if (timeout)
{
if (lv2_obj::wait_timeout(timeout, &ppu))
{
// Wait for rescheduling
if (ppu.check_state())
{
continue;
}
ppu.state += cpu_flag::wait;
std::lock_guard lock(cond->mutex);
if (cond->unqueue(cond->sq, &ppu))
{
ppu.gpr[3] = CELL_ETIMEDOUT;
break;
}
std::lock_guard lock2(mutex->mutex);
bool success = false;
mutex->lv2_control.fetch_op([&](lv2_lwmutex::control_data_t& data)
{
success = false;
ppu_thread* sq = static_cast<ppu_thread*>(data.sq);
const bool retval = &ppu == sq;
if (!mutex->unqueue<false>(sq, &ppu))
{
return false;
}
success = true;
if (!retval)
{
return false;
}
data.sq = sq;
return true;
});
if (success)
{
ppu.next_cpu = nullptr;
ppu.gpr[3] = CELL_ETIMEDOUT;
}
break;
}
}
else
{
ppu.state.wait(state);
}
}
if (--mutex->lwcond_waiters == smin)
{
// Notify the thread destroying lwmutex on last waiter
mutex->lwcond_waiters.notify_all();
}
if (--cond->lwmutex_waiters == smin)
{
// Notify the thread destroying lwcond on last waiter
cond->lwmutex_waiters.notify_all();
}
// Return cause
return not_an_error(ppu.gpr[3]);
}
| 12,306
|
C++
|
.cpp
| 507
| 20.023669
| 199
| 0.616552
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,356
|
sys_time.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_time.cpp
|
#include "stdafx.h"
#include "sys_time.h"
#include "sys_process.h"
#include "Emu/system_config.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/timers.hpp"
#include "util/asm.hpp"
#include "util/sysinfo.hpp"
static u64 timebase_offset;
static u64 systemtime_offset;
#ifdef _WIN32
#include <Windows.h>
struct time_aux_info_t
{
u64 perf_freq;
u64 start_time;
u64 start_ftime; // time in 100ns units since Epoch
};
// Initialize time-related values
const auto s_time_aux_info = []() -> time_aux_info_t
{
LARGE_INTEGER freq;
if (!QueryPerformanceFrequency(&freq))
{
MessageBox(nullptr, L"Your hardware doesn't support a high-resolution performance counter", L"Error", MB_OK | MB_ICONERROR);
return {};
}
LARGE_INTEGER start;
QueryPerformanceCounter(&start); // get time in 1/perf_freq units from RDTSC
FILETIME ftime;
GetSystemTimeAsFileTime(&ftime); // get time in 100ns units since January 1, 1601 (UTC)
time_aux_info_t result;
result.perf_freq = freq.QuadPart;
result.start_time = start.QuadPart;
result.start_ftime = (ftime.dwLowDateTime | static_cast<u64>(ftime.dwHighDateTime) << 32) - 116444736000000000;
return result;
}();
#elif __APPLE__
// XXX only supports a single timer
#if !defined(HAVE_CLOCK_GETTIME)
#define TIMER_ABSTIME -1
// The opengroup spec isn't clear on the mapping from REALTIME to CALENDAR being appropriate or not.
// http://pubs.opengroup.org/onlinepubs/009695299/basedefs/time.h.html
#define CLOCK_REALTIME 1 // #define CALENDAR_CLOCK 1 from mach/clock_types.h
#define CLOCK_MONOTONIC 0 // #define SYSTEM_CLOCK 0
// the mach kernel uses struct mach_timespec, so struct timespec is loaded from <sys/_types/_timespec.h> for compatability
// struct timespec { time_t tv_sec; long tv_nsec; };
#include <sys/types.h>
#include <sys/_types/_timespec.h>
#include <mach/mach.h>
#include <mach/clock.h>
#include <mach/mach_time.h>
#undef CPU_STATE_MAX
#define MT_NANO (+1.0E-9)
#define MT_GIGA UINT64_C(1000000000)
// TODO create a list of timers,
static double mt_timebase = 0.0;
static u64 mt_timestart = 0;
static int clock_gettime(int clk_id, struct timespec* tp)
{
kern_return_t retval = KERN_SUCCESS;
if (clk_id == TIMER_ABSTIME)
{
if (!mt_timestart)
{
// only one timer, initilized on the first call to the TIMER
mach_timebase_info_data_t tb = {0};
mach_timebase_info(&tb);
mt_timebase = tb.numer;
mt_timebase /= tb.denom;
mt_timestart = mach_absolute_time();
}
double diff = (mach_absolute_time() - mt_timestart) * mt_timebase;
tp->tv_sec = diff * MT_NANO;
tp->tv_nsec = diff - (tp->tv_sec * MT_GIGA);
}
else // other clk_ids are mapped to the coresponding mach clock_service
{
clock_serv_t cclock;
mach_timespec_t mts;
host_get_clock_service(mach_host_self(), clk_id, &cclock);
retval = clock_get_time(cclock, &mts);
mach_port_deallocate(mach_task_self(), cclock);
tp->tv_sec = mts.tv_sec;
tp->tv_nsec = mts.tv_nsec;
}
return retval;
}
#endif
#endif
#ifndef _WIN32
#include <sys/time.h>
static struct timespec start_time = []()
{
struct timespec ts;
if (::clock_gettime(CLOCK_REALTIME, &ts) != 0)
{
// Fatal error
std::terminate();
}
tzset();
return ts;
}();
#endif
LOG_CHANNEL(sys_time);
static constexpr u64 g_timebase_freq = /*79800000*/ 80000000ull; // 80 Mhz
// Convert time is microseconds to timebased time
u64 convert_to_timebased_time(u64 time)
{
const u64 result = time * (g_timebase_freq / 1000000ull) * g_cfg.core.clocks_scale / 100u;
ensure(result >= timebase_offset);
return result - timebase_offset;
}
u64 get_timebased_time()
{
if (u64 freq = utils::get_tsc_freq())
{
const u64 tsc = utils::get_tsc();
#if _MSC_VER
const u64 result = static_cast<u64>(u128_from_mul(tsc, g_timebase_freq) / freq) * g_cfg.core.clocks_scale / 100u;
#else
const u64 result = (tsc / freq * g_timebase_freq + tsc % freq * g_timebase_freq / freq) * g_cfg.core.clocks_scale / 100u;
#endif
return result - timebase_offset;
}
while (true)
{
#ifdef _WIN32
LARGE_INTEGER count;
ensure(QueryPerformanceCounter(&count));
const u64 time = count.QuadPart;
const u64 freq = s_time_aux_info.perf_freq;
#if _MSC_VER
const u64 result = static_cast<u64>(u128_from_mul(time * g_cfg.core.clocks_scale, g_timebase_freq) / freq / 100u);
#else
const u64 result = (time / freq * g_timebase_freq + time % freq * g_timebase_freq / freq) * g_cfg.core.clocks_scale / 100u;
#endif
#else
struct timespec ts;
ensure(::clock_gettime(CLOCK_MONOTONIC, &ts) == 0);
const u64 result = (static_cast<u64>(ts.tv_sec) * g_timebase_freq + static_cast<u64>(ts.tv_nsec) * g_timebase_freq / 1000000000ull) * g_cfg.core.clocks_scale / 100u;
#endif
if (result) return result - timebase_offset;
}
}
// Add an offset to get_timebased_time to avoid leaking PC's uptime into the game
// As if PS3 starts at value 0 (base time) when the game boots
// If none-zero arg is specified it will become the base time (for savestates)
void initialize_timebased_time(u64 timebased_init, bool reset)
{
timebase_offset = 0;
if (reset)
{
// We simply want to zero-out these values
systemtime_offset = 0;
return;
}
const u64 current = get_timebased_time();
timebased_init = current - timebased_init;
timebase_offset = timebased_init;
systemtime_offset = timebased_init / (g_timebase_freq / 1000000);
}
// Returns some relative time in microseconds, don't change this fact
u64 get_system_time()
{
if (u64 freq = utils::get_tsc_freq())
{
const u64 tsc = utils::get_tsc();
#if _MSC_VER
const u64 result = static_cast<u64>(u128_from_mul(tsc, 1000000ull) / freq);
#else
const u64 result = (tsc / freq * 1000000ull + tsc % freq * 1000000ull / freq);
#endif
return result;
}
while (true)
{
#ifdef _WIN32
LARGE_INTEGER count;
ensure(QueryPerformanceCounter(&count));
const u64 time = count.QuadPart;
const u64 freq = s_time_aux_info.perf_freq;
#if _MSC_VER
const u64 result = static_cast<u64>(u128_from_mul(time, 1000000ull) / freq);
#else
const u64 result = time / freq * 1000000ull + (time % freq) * 1000000ull / freq;
#endif
#else
struct timespec ts;
ensure(::clock_gettime(CLOCK_MONOTONIC, &ts) == 0);
const u64 result = static_cast<u64>(ts.tv_sec) * 1000000ull + static_cast<u64>(ts.tv_nsec) / 1000u;
#endif
if (result) return result;
}
}
// As get_system_time but obeys Clocks scaling setting
u64 get_guest_system_time(u64 time)
{
const u64 result = (time != umax ? time : get_system_time()) * g_cfg.core.clocks_scale / 100;
return result - systemtime_offset;
}
// Functions
error_code sys_time_set_timezone(s32 timezone, s32 summertime)
{
sys_time.trace("sys_time_set_timezone(timezone=0x%x, summertime=0x%x)", timezone, summertime);
if (!g_ps3_process_info.has_root_perm())
{
return CELL_ENOSYS;
}
return CELL_OK;
}
error_code sys_time_get_timezone(vm::ptr<s32> timezone, vm::ptr<s32> summertime)
{
sys_time.trace("sys_time_get_timezone(timezone=*0x%x, summertime=*0x%x)", timezone, summertime);
#ifdef _WIN32
TIME_ZONE_INFORMATION tz{};
switch (GetTimeZoneInformation(&tz))
{
case TIME_ZONE_ID_UNKNOWN:
{
*timezone = -tz.Bias;
*summertime = 0;
break;
}
case TIME_ZONE_ID_STANDARD:
{
*timezone = -tz.Bias;
*summertime = -tz.StandardBias;
if (tz.StandardBias)
{
sys_time.error("Unexpected timezone bias (base=%d, std=%d, daylight=%d)", tz.Bias, tz.StandardBias, tz.DaylightBias);
}
break;
}
case TIME_ZONE_ID_DAYLIGHT:
{
*timezone = -tz.Bias;
*summertime = -tz.DaylightBias;
break;
}
default:
{
ensure(0);
}
}
#elif __linux__
*timezone = ::narrow<s16>(-::timezone / 60);
*summertime = !::daylight ? 0 : []() -> s32
{
struct tm test{};
ensure(&test == localtime_r(&start_time.tv_sec, &test));
// Check bounds [0,1]
if (test.tm_isdst & -2)
{
sys_time.error("No information for timezone DST bias (timezone=%.2fh, tm_gmtoff=%d)", -::timezone / 3600.0, test.tm_gmtoff);
return 0;
}
else
{
return test.tm_isdst ? ::narrow<s16>((test.tm_gmtoff + ::timezone) / 60) : 0;
}
}();
#else
// gettimeofday doesn't return timezone on linux anymore, but this should work on other OSes?
struct timezone tz{};
ensure(gettimeofday(nullptr, &tz) == 0);
*timezone = ::narrow<s16>(-tz.tz_minuteswest);
*summertime = !tz.tz_dsttime ? 0 : [&]() -> s32
{
struct tm test{};
ensure(&test == localtime_r(&start_time.tv_sec, &test));
return test.tm_isdst ? ::narrow<s16>(test.tm_gmtoff / 60 + tz.tz_minuteswest) : 0;
}();
#endif
return CELL_OK;
}
error_code sys_time_get_current_time(vm::ptr<s64> sec, vm::ptr<s64> nsec)
{
sys_time.trace("sys_time_get_current_time(sec=*0x%x, nsec=*0x%x)", sec, nsec);
if (!sec)
{
return CELL_EFAULT;
}
#ifdef _WIN32
LARGE_INTEGER count;
ensure(QueryPerformanceCounter(&count));
const u64 diff_base = count.QuadPart - s_time_aux_info.start_time;
// Get time difference in nanoseconds (using 128 bit accumulator)
const u64 diff_sl = diff_base * 1000000000ull;
const u64 diff_sh = utils::umulh64(diff_base, 1000000000ull);
const u64 diff = utils::udiv128(diff_sh, diff_sl, s_time_aux_info.perf_freq);
// get time since Epoch in nanoseconds
const u64 time = s_time_aux_info.start_ftime * 100u + (diff * g_cfg.core.clocks_scale / 100u);
// scale to seconds, and add the console time offset (which might be negative)
*sec = (time / 1000000000ull) + g_cfg.sys.console_time_offset;
if (!nsec)
{
return CELL_EFAULT;
}
*nsec = time % 1000000000ull;
#else
struct timespec ts;
ensure(::clock_gettime(CLOCK_REALTIME, &ts) == 0);
if (g_cfg.core.clocks_scale == 100)
{
// get the seconds from the system clock, and add the console time offset (which might be negative)
*sec = ts.tv_sec + g_cfg.sys.console_time_offset;
if (!nsec)
{
return CELL_EFAULT;
}
*nsec = ts.tv_nsec;
return CELL_OK;
}
u64 tv_sec = ts.tv_sec, stv_sec = start_time.tv_sec;
u64 tv_nsec = ts.tv_nsec, stv_nsec = start_time.tv_nsec;
// Substruct time since Epoch and since start time
tv_sec -= stv_sec;
if (tv_nsec < stv_nsec)
{
// Correct value if borrow encountered
tv_sec -= 1;
tv_nsec = 1'000'000'000ull - (stv_nsec - tv_nsec);
}
else
{
tv_nsec -= stv_nsec;
}
// Scale nanocseconds
tv_nsec = stv_nsec + (tv_nsec * g_cfg.core.clocks_scale / 100);
// Scale seconds and add from nanoseconds / 1'000'000'000, and add the console time offset (which might be negative)
*sec = stv_sec + (tv_sec * g_cfg.core.clocks_scale / 100u) + (tv_nsec / 1000000000ull) + g_cfg.sys.console_time_offset;
if (!nsec)
{
return CELL_EFAULT;
}
// Set nanoseconds
*nsec = tv_nsec % 1000000000ull;
#endif
return CELL_OK;
}
error_code sys_time_set_current_time(s64 sec, s64 nsec)
{
sys_time.trace("sys_time_set_current_time(sec=0x%x, nsec=0x%x)", sec, nsec);
if (!g_ps3_process_info.has_root_perm())
{
return CELL_ENOSYS;
}
return CELL_OK;
}
u64 sys_time_get_timebase_frequency()
{
sys_time.trace("sys_time_get_timebase_frequency()");
return g_timebase_freq;
}
error_code sys_time_get_rtc(vm::ptr<u64> rtc)
{
sys_time.todo("sys_time_get_rtc(rtc=*0x%x)", rtc);
return CELL_OK;
}
| 11,120
|
C++
|
.cpp
| 360
| 28.666667
| 167
| 0.708704
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,357
|
sys_mmapper.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_mmapper.cpp
|
#include "stdafx.h"
#include "sys_mmapper.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/lv2/sys_event.h"
#include "Emu/Memory/vm_var.h"
#include "sys_memory.h"
#include "sys_sync.h"
#include "sys_process.h"
#include <span>
#include "util/vm.hpp"
LOG_CHANNEL(sys_mmapper);
template <>
void fmt_class_string<lv2_mem_container_id>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto value)
{
switch (value)
{
case SYS_MEMORY_CONTAINER_ID_INVALID: return "Global";
}
// Resort to hex formatting for other values
return unknown;
});
}
lv2_memory::lv2_memory(u32 size, u32 align, u64 flags, u64 key, bool pshared, lv2_memory_container* ct)
: size(size)
, align(align)
, flags(flags)
, key(key)
, pshared(pshared)
, ct(ct)
, shm(std::make_shared<utils::shm>(size, 1 /* shareable flag */))
{
#ifndef _WIN32
// Optimization that's useless on Windows :puke:
utils::memory_lock(shm->map_self(), size);
#endif
}
lv2_memory::lv2_memory(utils::serial& ar)
: size(ar)
, align(ar)
, flags(ar)
, key(ar)
, pshared(ar)
, ct(lv2_memory_container::search(ar.pop<u32>()))
, shm([&](u32 addr)
{
if (addr)
{
return ensure(vm::get(vm::any, addr)->peek(addr).second);
}
const auto _shm = std::make_shared<utils::shm>(size, 1);
ar(std::span(_shm->map_self(), size));
return _shm;
}(ar.pop<u32>()))
, counter(ar)
{
#ifndef _WIN32
// Optimization that's useless on Windows :puke:
utils::memory_lock(shm->map_self(), size);
#endif
}
CellError lv2_memory::on_id_create()
{
if (!exists && !ct->take(size))
{
sys_mmapper.error("lv2_memory::on_id_create(): Cannot allocate 0x%x bytes (0x%x available)", size, ct->size - ct->used);
return CELL_ENOMEM;
}
exists++;
return {};
}
std::shared_ptr<void> lv2_memory::load(utils::serial& ar)
{
auto mem = std::make_shared<lv2_memory>(ar);
mem->exists++; // Disable on_id_create()
std::shared_ptr<void> ptr = lv2_obj::load(mem->key, mem, +mem->pshared);
mem->exists--;
return ptr;
}
void lv2_memory::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(lv2_memory);
ar(size, align, flags, key, pshared, ct->id);
ar(counter ? vm::get_shm_addr(shm) : 0);
if (!counter)
{
ar(std::span(shm->map_self(), size));
}
ar(counter);
}
page_fault_notification_entries::page_fault_notification_entries(utils::serial& ar)
{
ar(entries);
}
void page_fault_notification_entries::save(utils::serial& ar)
{
ar(entries);
}
template <bool exclusive = false>
error_code create_lv2_shm(bool pshared, u64 ipc_key, u64 size, u32 align, u64 flags, lv2_memory_container* ct)
{
const u32 _pshared = pshared ? SYS_SYNC_PROCESS_SHARED : SYS_SYNC_NOT_PROCESS_SHARED;
if (!pshared)
{
ipc_key = 0;
}
if (auto error = lv2_obj::create<lv2_memory>(_pshared, ipc_key, exclusive ? SYS_SYNC_NEWLY_CREATED : SYS_SYNC_NOT_CARE, [&]()
{
return std::make_shared<lv2_memory>(
static_cast<u32>(size),
align,
flags,
ipc_key,
pshared,
ct);
}, false))
{
return error;
}
return CELL_OK;
}
error_code sys_mmapper_allocate_address(ppu_thread& ppu, u64 size, u64 flags, u64 alignment, vm::ptr<u32> alloc_addr)
{
ppu.state += cpu_flag::wait;
sys_mmapper.warning("sys_mmapper_allocate_address(size=0x%x, flags=0x%x, alignment=0x%x, alloc_addr=*0x%x)", size, flags, alignment, alloc_addr);
if (size % 0x10000000)
{
return CELL_EALIGN;
}
if (size > u32{umax})
{
return CELL_ENOMEM;
}
// This is a workaround for psl1ght, which gives us an alignment of 0, which is technically invalid, but apparently is allowed on actual ps3
// https://github.com/ps3dev/PSL1GHT/blob/534e58950732c54dc6a553910b653c99ba6e9edc/ppu/librt/sbrk.c#L71
if (!alignment)
{
alignment = 0x10000000;
}
switch (alignment)
{
case 0x10000000:
case 0x20000000:
case 0x40000000:
case 0x80000000:
{
if (const auto area = vm::find_map(static_cast<u32>(size), static_cast<u32>(alignment), flags & SYS_MEMORY_PAGE_SIZE_MASK))
{
sys_mmapper.warning("sys_mmapper_allocate_address(): Found VM 0x%x area (vsize=0x%x)", area->addr, size);
ppu.check_state();
*alloc_addr = area->addr;
return CELL_OK;
}
return CELL_ENOMEM;
}
}
return CELL_EALIGN;
}
error_code sys_mmapper_allocate_fixed_address(ppu_thread& ppu)
{
ppu.state += cpu_flag::wait;
sys_mmapper.warning("sys_mmapper_allocate_fixed_address()");
if (!vm::map(0xB0000000, 0x10000000, SYS_MEMORY_PAGE_SIZE_1M))
{
return CELL_EEXIST;
}
return CELL_OK;
}
error_code sys_mmapper_allocate_shared_memory(ppu_thread& ppu, u64 ipc_key, u64 size, u64 flags, vm::ptr<u32> mem_id)
{
ppu.state += cpu_flag::wait;
sys_mmapper.warning("sys_mmapper_allocate_shared_memory(ipc_key=0x%x, size=0x%x, flags=0x%x, mem_id=*0x%x)", ipc_key, size, flags, mem_id);
if (size == 0)
{
return CELL_EALIGN;
}
// Check page granularity
switch (flags & SYS_MEMORY_GRANULARITY_MASK)
{
case 0:
case SYS_MEMORY_GRANULARITY_1M:
{
if (size % 0x100000)
{
return CELL_EALIGN;
}
break;
}
case SYS_MEMORY_GRANULARITY_64K:
{
if (size % 0x10000)
{
return CELL_EALIGN;
}
break;
}
default:
{
return CELL_EINVAL;
}
}
// Get "default" memory container
auto& dct = g_fxo->get<lv2_memory_container>();
if (auto error = create_lv2_shm(ipc_key != SYS_MMAPPER_NO_SHM_KEY, ipc_key, size, flags & SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 : 0x100000, flags, &dct))
{
return error;
}
ppu.check_state();
*mem_id = idm::last_id();
return CELL_OK;
}
error_code sys_mmapper_allocate_shared_memory_from_container(ppu_thread& ppu, u64 ipc_key, u64 size, u32 cid, u64 flags, vm::ptr<u32> mem_id)
{
ppu.state += cpu_flag::wait;
sys_mmapper.warning("sys_mmapper_allocate_shared_memory_from_container(ipc_key=0x%x, size=0x%x, cid=0x%x, flags=0x%x, mem_id=*0x%x)", ipc_key, size, cid, flags, mem_id);
if (size == 0)
{
return CELL_EALIGN;
}
// Check page granularity.
switch (flags & SYS_MEMORY_GRANULARITY_MASK)
{
case 0:
case SYS_MEMORY_GRANULARITY_1M:
{
if (size % 0x100000)
{
return CELL_EALIGN;
}
break;
}
case SYS_MEMORY_GRANULARITY_64K:
{
if (size % 0x10000)
{
return CELL_EALIGN;
}
break;
}
default:
{
return CELL_EINVAL;
}
}
const auto ct = idm::get<lv2_memory_container>(cid);
if (!ct)
{
return CELL_ESRCH;
}
if (auto error = create_lv2_shm(ipc_key != SYS_MMAPPER_NO_SHM_KEY, ipc_key, size, flags & SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 : 0x100000, flags, ct.get()))
{
return error;
}
ppu.check_state();
*mem_id = idm::last_id();
return CELL_OK;
}
error_code sys_mmapper_allocate_shared_memory_ext(ppu_thread& ppu, u64 ipc_key, u64 size, u32 flags, vm::ptr<mmapper_unk_entry_struct0> entries, s32 entry_count, vm::ptr<u32> mem_id)
{
ppu.state += cpu_flag::wait;
sys_mmapper.todo("sys_mmapper_allocate_shared_memory_ext(ipc_key=0x%x, size=0x%x, flags=0x%x, entries=*0x%x, entry_count=0x%x, mem_id=*0x%x)", ipc_key, size, flags, entries, entry_count, mem_id);
if (size == 0)
{
return CELL_EALIGN;
}
switch (flags & SYS_MEMORY_GRANULARITY_MASK)
{
case SYS_MEMORY_GRANULARITY_1M:
case 0:
{
if (size % 0x100000)
{
return CELL_EALIGN;
}
break;
}
case SYS_MEMORY_GRANULARITY_64K:
{
if (size % 0x10000)
{
return CELL_EALIGN;
}
break;
}
default:
{
return CELL_EINVAL;
}
}
if (flags & ~SYS_MEMORY_PAGE_SIZE_MASK)
{
return CELL_EINVAL;
}
if (entry_count <= 0 || entry_count > 0x10)
{
return CELL_EINVAL;
}
if constexpr (bool to_perm_check = false; true)
{
for (s32 i = 0; i < entry_count; i++)
{
const u64 type = entries[i].type;
// The whole structure contents are unknown
sys_mmapper.todo("sys_mmapper_allocate_shared_memory_ext(): entry type = 0x%x", type);
switch (type)
{
case 0:
case 1:
case 3:
{
break;
}
case 5:
{
to_perm_check = true;
break;
}
default:
{
return CELL_EPERM;
}
}
}
if (to_perm_check)
{
if (flags != SYS_MEMORY_PAGE_SIZE_64K || !g_ps3_process_info.debug_or_root())
{
return CELL_EPERM;
}
}
}
// Get "default" memory container
auto& dct = g_fxo->get<lv2_memory_container>();
if (auto error = create_lv2_shm<true>(true, ipc_key, size, flags & SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 : 0x100000, flags, &dct))
{
return error;
}
ppu.check_state();
*mem_id = idm::last_id();
return CELL_OK;
}
error_code sys_mmapper_allocate_shared_memory_from_container_ext(ppu_thread& ppu, u64 ipc_key, u64 size, u64 flags, u32 cid, vm::ptr<mmapper_unk_entry_struct0> entries, s32 entry_count, vm::ptr<u32> mem_id)
{
ppu.state += cpu_flag::wait;
sys_mmapper.todo("sys_mmapper_allocate_shared_memory_from_container_ext(ipc_key=0x%x, size=0x%x, flags=0x%x, cid=0x%x, entries=*0x%x, entry_count=0x%x, mem_id=*0x%x)", ipc_key, size, flags, cid, entries,
entry_count, mem_id);
switch (flags & SYS_MEMORY_PAGE_SIZE_MASK)
{
case SYS_MEMORY_PAGE_SIZE_1M:
case 0:
{
if (size % 0x100000)
{
return CELL_EALIGN;
}
break;
}
case SYS_MEMORY_PAGE_SIZE_64K:
{
if (size % 0x10000)
{
return CELL_EALIGN;
}
break;
}
default:
{
return CELL_EINVAL;
}
}
if (flags & ~SYS_MEMORY_PAGE_SIZE_MASK)
{
return CELL_EINVAL;
}
if (entry_count <= 0 || entry_count > 0x10)
{
return CELL_EINVAL;
}
if constexpr (bool to_perm_check = false; true)
{
for (s32 i = 0; i < entry_count; i++)
{
const u64 type = entries[i].type;
sys_mmapper.todo("sys_mmapper_allocate_shared_memory_from_container_ext(): entry type = 0x%x", type);
switch (type)
{
case 0:
case 1:
case 3:
{
break;
}
case 5:
{
to_perm_check = true;
break;
}
default:
{
return CELL_EPERM;
}
}
}
if (to_perm_check)
{
if (flags != SYS_MEMORY_PAGE_SIZE_64K || !g_ps3_process_info.debug_or_root())
{
return CELL_EPERM;
}
}
}
const auto ct = idm::get<lv2_memory_container>(cid);
if (!ct)
{
return CELL_ESRCH;
}
if (auto error = create_lv2_shm<true>(true, ipc_key, size, flags & SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 : 0x100000, flags, ct.get()))
{
return error;
}
ppu.check_state();
*mem_id = idm::last_id();
return CELL_OK;
}
error_code sys_mmapper_change_address_access_right(ppu_thread& ppu, u32 addr, u64 flags)
{
ppu.state += cpu_flag::wait;
sys_mmapper.todo("sys_mmapper_change_address_access_right(addr=0x%x, flags=0x%x)", addr, flags);
return CELL_OK;
}
error_code sys_mmapper_free_address(ppu_thread& ppu, u32 addr)
{
ppu.state += cpu_flag::wait;
sys_mmapper.warning("sys_mmapper_free_address(addr=0x%x)", addr);
if (addr < 0x20000000 || addr >= 0xC0000000)
{
return {CELL_EINVAL, addr};
}
// If page fault notify exists and an address in this area is faulted, we can't free the memory.
auto& pf_events = g_fxo->get<page_fault_event_entries>();
std::lock_guard pf_lock(pf_events.pf_mutex);
const auto mem = vm::get(vm::any, addr);
if (!mem || mem->addr != addr)
{
return {CELL_EINVAL, addr};
}
for (const auto& ev : pf_events.events)
{
if (addr <= ev.second && ev.second <= addr + mem->size - 1)
{
return CELL_EBUSY;
}
}
// Try to unmap area
const auto [area, success] = vm::unmap(addr, true, &mem);
if (!area)
{
return {CELL_EINVAL, addr};
}
if (!success)
{
return CELL_EBUSY;
}
// If a memory block is freed, remove it from page notification table.
auto& pf_entries = g_fxo->get<page_fault_notification_entries>();
std::lock_guard lock(pf_entries.mutex);
auto ind_to_remove = pf_entries.entries.begin();
for (; ind_to_remove != pf_entries.entries.end(); ++ind_to_remove)
{
if (addr == ind_to_remove->start_addr)
{
break;
}
}
if (ind_to_remove != pf_entries.entries.end())
{
pf_entries.entries.erase(ind_to_remove);
}
return CELL_OK;
}
error_code sys_mmapper_free_shared_memory(ppu_thread& ppu, u32 mem_id)
{
ppu.state += cpu_flag::wait;
sys_mmapper.warning("sys_mmapper_free_shared_memory(mem_id=0x%x)", mem_id);
// Conditionally remove memory ID
const auto mem = idm::withdraw<lv2_obj, lv2_memory>(mem_id, [&](lv2_memory& mem) -> CellError
{
if (mem.counter)
{
return CELL_EBUSY;
}
lv2_obj::on_id_destroy(mem, mem.key, +mem.pshared);
if (!mem.exists)
{
// Return "physical memory" to the memory container
mem.ct->free(mem.size);
}
return {};
});
if (!mem)
{
return CELL_ESRCH;
}
if (mem.ret)
{
return mem.ret;
}
return CELL_OK;
}
error_code sys_mmapper_map_shared_memory(ppu_thread& ppu, u32 addr, u32 mem_id, u64 flags)
{
ppu.state += cpu_flag::wait;
sys_mmapper.warning("sys_mmapper_map_shared_memory(addr=0x%x, mem_id=0x%x, flags=0x%x)", addr, mem_id, flags);
const auto area = vm::get(vm::any, addr);
if (!area || addr < 0x20000000 || addr >= 0xC0000000)
{
return CELL_EINVAL;
}
const auto mem = idm::get<lv2_obj, lv2_memory>(mem_id, [&](lv2_memory& mem) -> CellError
{
const u32 page_alignment = area->flags & SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 : 0x100000;
if (mem.align < page_alignment)
{
return CELL_EINVAL;
}
if (addr % page_alignment)
{
return CELL_EALIGN;
}
mem.counter++;
return {};
});
if (!mem)
{
return CELL_ESRCH;
}
if (mem.ret)
{
return mem.ret;
}
if (!area->falloc(addr, mem->size, &mem->shm, mem->align == 0x10000 ? SYS_MEMORY_PAGE_SIZE_64K : SYS_MEMORY_PAGE_SIZE_1M))
{
mem->counter--;
if (!area->is_valid())
{
return {CELL_EINVAL, addr};
}
return CELL_EBUSY;
}
vm::lock_sudo(addr, mem->size);
return CELL_OK;
}
error_code sys_mmapper_search_and_map(ppu_thread& ppu, u32 start_addr, u32 mem_id, u64 flags, vm::ptr<u32> alloc_addr)
{
ppu.state += cpu_flag::wait;
sys_mmapper.warning("sys_mmapper_search_and_map(start_addr=0x%x, mem_id=0x%x, flags=0x%x, alloc_addr=*0x%x)", start_addr, mem_id, flags, alloc_addr);
const auto area = vm::get(vm::any, start_addr);
if (!area || start_addr != area->addr || start_addr < 0x20000000 || start_addr >= 0xC0000000)
{
return {CELL_EINVAL, start_addr};
}
const auto mem = idm::get<lv2_obj, lv2_memory>(mem_id, [&](lv2_memory& mem) -> CellError
{
const u32 page_alignment = area->flags & SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 : 0x100000;
if (mem.align < page_alignment)
{
return CELL_EALIGN;
}
mem.counter++;
return {};
});
if (!mem)
{
return CELL_ESRCH;
}
if (mem.ret)
{
return mem.ret;
}
const u32 addr = area->alloc(mem->size, &mem->shm, mem->align, mem->align == 0x10000 ? SYS_MEMORY_PAGE_SIZE_64K : SYS_MEMORY_PAGE_SIZE_1M);
if (!addr)
{
mem->counter--;
if (!area->is_valid())
{
return {CELL_EINVAL, start_addr};
}
return CELL_ENOMEM;
}
sys_mmapper.notice("sys_mmapper_search_and_map(): Found 0x%x address", addr);
vm::lock_sudo(addr, mem->size);
ppu.check_state();
*alloc_addr = addr;
return CELL_OK;
}
error_code sys_mmapper_unmap_shared_memory(ppu_thread& ppu, u32 addr, vm::ptr<u32> mem_id)
{
ppu.state += cpu_flag::wait;
sys_mmapper.warning("sys_mmapper_unmap_shared_memory(addr=0x%x, mem_id=*0x%x)", addr, mem_id);
const auto area = vm::get(vm::any, addr);
if (!area || addr < 0x20000000 || addr >= 0xC0000000)
{
return {CELL_EINVAL, addr};
}
const auto shm = area->peek(addr);
if (!shm.second)
{
return {CELL_EINVAL, addr};
}
const auto mem = idm::select<lv2_obj, lv2_memory>([&](u32 id, lv2_memory& mem) -> u32
{
if (mem.shm.get() == shm.second.get())
{
return id;
}
return 0;
});
if (!mem)
{
return {CELL_EINVAL, addr};
}
if (!area->dealloc(addr, &shm.second))
{
return {CELL_EINVAL, addr};
}
// Write out the ID
ppu.check_state();
*mem_id = mem.ret;
// Acknowledge
mem->counter--;
return CELL_OK;
}
error_code sys_mmapper_enable_page_fault_notification(ppu_thread& ppu, u32 start_addr, u32 event_queue_id)
{
ppu.state += cpu_flag::wait;
sys_mmapper.warning("sys_mmapper_enable_page_fault_notification(start_addr=0x%x, event_queue_id=0x%x)", start_addr, event_queue_id);
auto mem = vm::get(vm::any, start_addr);
if (!mem || start_addr != mem->addr || start_addr < 0x20000000 || start_addr >= 0xC0000000)
{
return {CELL_EINVAL, start_addr};
}
// TODO: Check memory region's flags to make sure the memory can be used for page faults.
auto queue = idm::get<lv2_obj, lv2_event_queue>(event_queue_id);
if (!queue)
{ // Can't connect the queue if it doesn't exist.
return CELL_ESRCH;
}
vm::var<u32> port_id(0);
error_code res = sys_event_port_create(ppu, port_id, SYS_EVENT_PORT_LOCAL, SYS_MEMORY_PAGE_FAULT_EVENT_KEY);
sys_event_port_connect_local(ppu, *port_id, event_queue_id);
if (res + 0u == CELL_EAGAIN)
{
// Not enough system resources.
return CELL_EAGAIN;
}
auto& pf_entries = g_fxo->get<page_fault_notification_entries>();
std::unique_lock lock(pf_entries.mutex);
// Return error code if page fault notifications are already enabled
for (const auto& entry : pf_entries.entries)
{
if (entry.start_addr == start_addr)
{
lock.unlock();
sys_event_port_disconnect(ppu, *port_id);
sys_event_port_destroy(ppu, *port_id);
return CELL_EBUSY;
}
}
page_fault_notification_entry entry{ start_addr, event_queue_id, port_id->value() };
pf_entries.entries.emplace_back(entry);
return CELL_OK;
}
error_code mmapper_thread_recover_page_fault(cpu_thread* cpu)
{
// We can only wake a thread if it is being suspended for a page fault.
auto& pf_events = g_fxo->get<page_fault_event_entries>();
{
std::lock_guard pf_lock(pf_events.pf_mutex);
const auto pf_event_ind = pf_events.events.find(cpu);
if (pf_event_ind == pf_events.events.end())
{
// if not found...
return CELL_EINVAL;
}
pf_events.events.erase(pf_event_ind);
if (cpu->get_class() == thread_class::ppu)
{
lv2_obj::awake(cpu);
}
else
{
cpu->state += cpu_flag::signal;
}
}
if (cpu->state & cpu_flag::signal)
{
cpu->state.notify_one();
}
return CELL_OK;
}
| 17,837
|
C++
|
.cpp
| 701
| 22.824536
| 206
| 0.678729
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,358
|
sys_spu.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_spu.cpp
|
#include "stdafx.h"
#include "sys_spu.h"
#include "Emu/System.h"
#include "Emu/system_config.h"
#include "Emu/VFS.h"
#include "Emu/IdManager.h"
#include "Crypto/unself.h"
#include "Crypto/unedat.h"
#include "Crypto/sha1.h"
#include "Loader/ELF.h"
#include "Utilities/bin_patch.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/RawSPUThread.h"
#include "Emu/Cell/timers.hpp"
#include "Emu/Memory/vm_reservation.h"
#include "sys_interrupt.h"
#include "sys_process.h"
#include "sys_memory.h"
#include "sys_mmapper.h"
#include "sys_event.h"
#include "sys_fs.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_spu);
template <>
void fmt_class_string<spu_group_status>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](spu_group_status value)
{
switch (value)
{
case SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED: return "uninitialized";
case SPU_THREAD_GROUP_STATUS_INITIALIZED: return "initialized";
case SPU_THREAD_GROUP_STATUS_READY: return "ready";
case SPU_THREAD_GROUP_STATUS_WAITING: return "waiting";
case SPU_THREAD_GROUP_STATUS_SUSPENDED: return "suspended";
case SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED: return "waiting and suspended";
case SPU_THREAD_GROUP_STATUS_RUNNING: return "running";
case SPU_THREAD_GROUP_STATUS_STOPPED: return "stopped";
case SPU_THREAD_GROUP_STATUS_DESTROYED: return "destroyed";
case SPU_THREAD_GROUP_STATUS_UNKNOWN: break;
}
return unknown;
});
}
template <>
void fmt_class_string<spu_stop_syscall>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](spu_stop_syscall value)
{
switch (value)
{
case SYS_SPU_THREAD_STOP_YIELD: return "sys_spu_thread_yield";
case SYS_SPU_THREAD_STOP_GROUP_EXIT: return "sys_spu_thread_group_exit";
case SYS_SPU_THREAD_STOP_THREAD_EXIT: return "sys_spu_thread_thread_exit";
case SYS_SPU_THREAD_STOP_RECEIVE_EVENT: return "sys_spu_thread_receive_event";
case SYS_SPU_THREAD_STOP_TRY_RECEIVE_EVENT: return "sys_spu_thread_tryreceive_event";
case SYS_SPU_THREAD_STOP_SWITCH_SYSTEM_MODULE: return "sys_spu_thread_switch_system_module";
}
return unknown;
});
}
void sys_spu_image::load(const fs::file& stream)
{
const spu_exec_object obj{stream, 0, elf_opt::no_sections + elf_opt::no_data};
if (obj != elf_error::ok)
{
fmt::throw_exception("Failed to load SPU image: %s", obj.get_error());
}
for (const auto& shdr : obj.shdrs)
{
spu_log.notice("** Section: sh_type=0x%x, addr=0x%llx, size=0x%llx, flags=0x%x", std::bit_cast<u32>(shdr.sh_type), shdr.sh_addr, shdr.sh_size, shdr._sh_flags);
}
for (const auto& prog : obj.progs)
{
spu_log.notice("** Segment: p_type=0x%x, p_vaddr=0x%llx, p_filesz=0x%llx, p_memsz=0x%llx, flags=0x%x", prog.p_type, prog.p_vaddr, prog.p_filesz, prog.p_memsz, prog.p_flags);
if (prog.p_type != u32{SYS_SPU_SEGMENT_TYPE_COPY} && prog.p_type != u32{SYS_SPU_SEGMENT_TYPE_INFO})
{
spu_log.error("Unknown program type (0x%x)", prog.p_type);
}
}
this->type = SYS_SPU_IMAGE_TYPE_KERNEL;
const s32 nsegs = sys_spu_image::get_nsegs(obj.progs);
const u32 mem_size = nsegs * sizeof(sys_spu_segment) + ::size32(stream);
const vm::ptr<sys_spu_segment> segs = vm::cast(vm::alloc(mem_size, vm::main));
//const u32 entry = obj.header.e_entry;
const u32 src = (segs + nsegs).addr();
stream.seek(0);
stream.read(vm::base(src), stream.size());
if (nsegs <= 0 || nsegs > 0x20 || sys_spu_image::fill(segs, nsegs, obj.progs, src) != nsegs)
{
fmt::throw_exception("Failed to load SPU segments (%d)", nsegs);
}
// Write ID and save entry
this->entry_point = idm::make<lv2_obj, lv2_spu_image>(+obj.header.e_entry, segs, nsegs);
// Unused and set to 0
this->nsegs = 0;
this->segs = vm::null;
vm::page_protect(segs.addr(), utils::align(mem_size, 4096), 0, 0, vm::page_writable);
}
void sys_spu_image::free() const
{
if (type == SYS_SPU_IMAGE_TYPE_KERNEL)
{
// TODO: Remove, should be handled by syscalls
ensure(vm::dealloc(segs.addr(), vm::main));
}
}
void sys_spu_image::deploy(u8* loc, std::span<const sys_spu_segment> segs, bool is_verbose)
{
// Segment info dump
std::string dump;
// Executable hash
sha1_context sha;
sha1_starts(&sha);
u8 sha1_hash[20];
for (const auto& seg : segs)
{
fmt::append(dump, "\n\t[%u] t=0x%x, ls=0x%x, size=0x%x, addr=0x%x", &seg - segs.data(), seg.type, seg.ls, seg.size, seg.addr);
sha1_update(&sha, reinterpret_cast<const uchar*>(&seg.type), sizeof(seg.type));
// Hash big-endian values
if (seg.type == SYS_SPU_SEGMENT_TYPE_COPY)
{
std::memcpy(loc + seg.ls, vm::base(seg.addr), seg.size);
sha1_update(&sha, reinterpret_cast<const uchar*>(&seg.size), sizeof(seg.size));
sha1_update(&sha, reinterpret_cast<const uchar*>(&seg.ls), sizeof(seg.ls));
sha1_update(&sha, vm::_ptr<uchar>(seg.addr), seg.size);
}
else if (seg.type == SYS_SPU_SEGMENT_TYPE_FILL)
{
if ((seg.ls | seg.size) % 4)
{
spu_log.error("Unaligned SPU FILL type segment (ls=0x%x, size=0x%x)", seg.ls, seg.size);
}
std::fill_n(reinterpret_cast<be_t<u32>*>(loc + seg.ls), seg.size / 4, seg.addr);
sha1_update(&sha, reinterpret_cast<const uchar*>(&seg.size), sizeof(seg.size));
sha1_update(&sha, reinterpret_cast<const uchar*>(&seg.ls), sizeof(seg.ls));
sha1_update(&sha, reinterpret_cast<const uchar*>(&seg.addr), sizeof(seg.addr));
}
else if (seg.type == SYS_SPU_SEGMENT_TYPE_INFO)
{
const be_t<u32> size = seg.size + 0x14; // Workaround
sha1_update(&sha, reinterpret_cast<const uchar*>(&size), sizeof(size));
}
}
sha1_finish(&sha, sha1_hash);
// Format patch name
std::string hash("SPU-0000000000000000000000000000000000000000");
for (u32 i = 0; i < sizeof(sha1_hash); i++)
{
constexpr auto pal = "0123456789abcdef";
hash[4 + i * 2] = pal[sha1_hash[i] >> 4];
hash[5 + i * 2] = pal[sha1_hash[i] & 15];
}
auto mem_translate = [loc](u32 addr, u32 size)
{
return utils::add_saturate<u32>(addr, size) <= SPU_LS_SIZE ? loc + addr : nullptr;
};
// Apply the patch
std::vector<u32> applied;
g_fxo->get<patch_engine>().apply(applied, hash, mem_translate);
if (!Emu.GetTitleID().empty())
{
// Alternative patch
g_fxo->get<patch_engine>().apply(applied, Emu.GetTitleID() + '-' + hash, mem_translate);
}
(is_verbose ? spu_log.notice : sys_spu.trace)("Loaded SPU image: %s (<- %u)%s", hash, applied.size(), dump);
}
lv2_spu_group::lv2_spu_group(utils::serial& ar) noexcept
: name(ar.pop<std::string>())
, id(idm::last_id())
, max_num(ar)
, mem_size(ar)
, type(ar) // SPU Thread Group Type
, ct(lv2_memory_container::search(ar))
, has_scheduler_context(ar.pop<u8>())
, max_run(ar)
, init(ar)
, prio([&ar]()
{
std::common_type_t<decltype(lv2_spu_group::prio)> prio{};
ar(prio.all);
return prio;
}())
, run_state(ar.pop<spu_group_status>())
, exit_status(ar)
{
for (auto& thread : threads)
{
if (ar.pop<bool>())
{
ar(id_manager::g_id);
thread = std::make_shared<named_thread<spu_thread>>(stx::launch_retainer{}, ar, this);
ensure(idm::import_existing<named_thread<spu_thread>>(thread, idm::last_id()));
running += !thread->stop_flag_removal_protection;
}
}
ar(threads_map);
ar(imgs);
ar(args);
for (auto ep : {&ep_run, &ep_exception, &ep_sysmodule})
{
*ep = idm::get_unlocked<lv2_obj, lv2_event_queue>(ar.pop<u32>());
}
waiter_spu_index = -1;
switch (run_state)
{
// Commented stuff are handled by different means currently
//case SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED:
//case SPU_THREAD_GROUP_STATUS_INITIALIZED:
//case SPU_THREAD_GROUP_STATUS_READY:
case SPU_THREAD_GROUP_STATUS_WAITING:
{
run_state = SPU_THREAD_GROUP_STATUS_RUNNING;
ar(waiter_spu_index);
[[fallthrough]];
}
case SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED:
{
if (run_state == SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED)
{
run_state = SPU_THREAD_GROUP_STATUS_SUSPENDED;
}
[[fallthrough]];
}
case SPU_THREAD_GROUP_STATUS_SUSPENDED:
{
// Suspend all SPU threads except a thread that waits on sys_spu_thread_receive_event
for (const auto& thread : threads)
{
if (thread)
{
if (thread->index == waiter_spu_index)
{
lv2_obj::set_future_sleep(thread.get());
continue;
}
thread->state += cpu_flag::suspend;
}
}
break;
}
//case SPU_THREAD_GROUP_STATUS_RUNNING:
//case SPU_THREAD_GROUP_STATUS_STOPPED:
//case SPU_THREAD_GROUP_STATUS_UNKNOWN:
default:
{
break;
}
}
}
void lv2_spu_group::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(spu);
ar(name, max_num, mem_size, type, ct->id, has_scheduler_context, max_run, init, prio.load().all, run_state, exit_status);
for (const auto& thread : threads)
{
ar(u8{thread.operator bool()});
if (thread)
{
ar(thread->id);
thread->save(ar);
}
}
ar(threads_map);
ar(imgs);
ar(args);
for (auto ep : {&ep_run, &ep_exception, &ep_sysmodule})
{
ar(lv2_obj::check(*ep) ? (*ep)->id : 0);
}
if (run_state == SPU_THREAD_GROUP_STATUS_WAITING)
{
ar(waiter_spu_index);
}
}
lv2_spu_image::lv2_spu_image(utils::serial& ar)
: e_entry(ar)
, segs(ar.pop<decltype(segs)>())
, nsegs(ar)
{
}
void lv2_spu_image::save(utils::serial& ar)
{
ar(e_entry, segs, nsegs);
}
// Get spu thread ptr, returns group ptr as well for refcounting
std::pair<named_thread<spu_thread>*, std::shared_ptr<lv2_spu_group>> lv2_spu_group::get_thread(u32 id)
{
if (id >= 0x06000000)
{
// thread index is out of range (5 max)
return {};
}
// Bits 0-23 contain group id (without id base)
decltype(get_thread(0)) res{nullptr, idm::get<lv2_spu_group>((id & 0xFFFFFF) | (lv2_spu_group::id_base & ~0xFFFFFF))};
// Bits 24-31 contain thread index within the group
const u32 index = id >> 24;
if (auto group = res.second.get(); group && group->init > index)
{
res.first = group->threads[index].get();
}
return res;
}
struct limits_data
{
u32 physical = 0;
u32 raw_spu = 0;
u32 controllable = 0;
u32 spu_limit = umax;
u32 raw_limit = umax;
};
struct spu_limits_t
{
u32 max_raw = 0;
u32 max_spu = 6;
shared_mutex mutex;
spu_limits_t() = default;
spu_limits_t(utils::serial& ar) noexcept
{
ar(max_raw, max_spu);
}
void save(utils::serial& ar)
{
ar(max_raw, max_spu);
}
SAVESTATE_INIT_POS(47);
bool check(const limits_data& init) const
{
u32 physical_spus_count = init.physical;
u32 raw_spu_count = init.raw_spu;
u32 controllable_spu_count = init.controllable;
const u32 spu_limit = init.spu_limit != umax ? init.spu_limit : max_spu;
const u32 raw_limit = init.raw_limit != umax ? init.raw_limit : max_raw;
idm::select<lv2_spu_group>([&](u32, lv2_spu_group& group)
{
if (group.has_scheduler_context)
{
controllable_spu_count = std::max(controllable_spu_count, group.max_num);
}
else
{
physical_spus_count += group.max_num;
}
});
raw_spu_count += spu_thread::g_raw_spu_ctr;
if (spu_limit + raw_limit > 6 || raw_spu_count > raw_limit || physical_spus_count >= spu_limit || physical_spus_count + controllable_spu_count > spu_limit)
{
return false;
}
return true;
}
};
error_code sys_spu_initialize(ppu_thread& ppu, u32 max_usable_spu, u32 max_raw_spu)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_spu_initialize(max_usable_spu=%d, max_raw_spu=%d)", max_usable_spu, max_raw_spu);
auto& limits = g_fxo->get<spu_limits_t>();
if (max_raw_spu > 5)
{
return CELL_EINVAL;
}
// NOTE: This value can be changed by VSH in theory
max_usable_spu = 6;
std::lock_guard lock(limits.mutex);
if (!limits.check(limits_data{.spu_limit = max_usable_spu - max_raw_spu, .raw_limit = max_raw_spu}))
{
return CELL_EBUSY;
}
limits.max_raw = max_raw_spu;
limits.max_spu = max_usable_spu - max_raw_spu;
return CELL_OK;
}
error_code _sys_spu_image_get_information(ppu_thread& ppu, vm::ptr<sys_spu_image> img, vm::ptr<u32> entry_point, vm::ptr<s32> nsegs)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("_sys_spu_image_get_information(img=*0x%x, entry_point=*0x%x, nsegs=*0x%x)", img, entry_point, nsegs);
if (img->type != SYS_SPU_IMAGE_TYPE_KERNEL)
{
return CELL_EINVAL;
}
const auto image = idm::get<lv2_obj, lv2_spu_image>(img->entry_point);
if (!image)
{
return CELL_ESRCH;
}
ppu.check_state();
*entry_point = image->e_entry;
*nsegs = image->nsegs;
return CELL_OK;
}
error_code sys_spu_image_open(ppu_thread& ppu, vm::ptr<sys_spu_image> img, vm::cptr<char> path)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_spu_image_open(img=*0x%x, path=%s)", img, path);
auto [fs_error, ppath, path0, file, type] = lv2_file::open(path.get_ptr(), 0, 0);
if (fs_error)
{
return {fs_error, path};
}
u128 klic = g_fxo->get<loaded_npdrm_keys>().last_key();
const fs::file elf_file = decrypt_self(std::move(file), reinterpret_cast<u8*>(&klic));
if (!elf_file)
{
sys_spu.error("sys_spu_image_open(): file %s is illegal for SPU image!", path);
return {CELL_ENOEXEC, path};
}
img->load(elf_file);
return CELL_OK;
}
error_code _sys_spu_image_import(ppu_thread& ppu, vm::ptr<sys_spu_image> img, u32 src, u32 size, u32 arg4)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("_sys_spu_image_import(img=*0x%x, src=*0x%x, size=0x%x, arg4=0x%x)", img, src, size, arg4);
img->load(fs::file{vm::base(src), size});
return CELL_OK;
}
error_code _sys_spu_image_close(ppu_thread& ppu, vm::ptr<sys_spu_image> img)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("_sys_spu_image_close(img=*0x%x)", img);
if (img->type != SYS_SPU_IMAGE_TYPE_KERNEL)
{
return CELL_EINVAL;
}
const auto handle = idm::withdraw<lv2_obj, lv2_spu_image>(img->entry_point);
if (!handle)
{
return CELL_ESRCH;
}
ensure(vm::dealloc(handle->segs.addr(), vm::main));
return CELL_OK;
}
error_code _sys_spu_image_get_segments(ppu_thread& ppu, vm::ptr<sys_spu_image> img, vm::ptr<sys_spu_segment> segments, s32 nseg)
{
ppu.state += cpu_flag::wait;
sys_spu.error("_sys_spu_image_get_segments(img=*0x%x, segments=*0x%x, nseg=%d)", img, segments, nseg);
if (nseg <= 0 || nseg > 0x20 || img->type != SYS_SPU_IMAGE_TYPE_KERNEL)
{
return CELL_EINVAL;
}
const auto handle = idm::get<lv2_obj, lv2_spu_image>(img->entry_point);
if (!handle)
{
return CELL_ESRCH;
}
// TODO: apply SPU patches
ppu.check_state();
std::memcpy(segments.get_ptr(), handle->segs.get_ptr(), sizeof(sys_spu_segment) * std::min<s32>(nseg, handle->nsegs));
return CELL_OK;
}
error_code sys_spu_thread_initialize(ppu_thread& ppu, vm::ptr<u32> thread, u32 group_id, u32 spu_num, vm::ptr<sys_spu_image> img, vm::ptr<sys_spu_thread_attribute> attr, vm::ptr<sys_spu_thread_argument> arg)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_spu_thread_initialize(thread=*0x%x, group=0x%x, spu_num=%d, img=*0x%x, attr=*0x%x, arg=*0x%x)", thread, group_id, spu_num, img, attr, arg);
if (spu_num >= std::size(decltype(lv2_spu_group::threads_map){}))
{
return CELL_EINVAL;
}
if (!attr)
{
return CELL_EFAULT;
}
const sys_spu_thread_attribute attr_data = *attr;
if (attr_data.name_len > 0x80)
{
return CELL_EINVAL;
}
if (!arg)
{
return CELL_EFAULT;
}
const sys_spu_thread_argument args = *arg;
const u32 option = attr_data.option;
if (option & ~(SYS_SPU_THREAD_OPTION_DEC_SYNC_TB_ENABLE | SYS_SPU_THREAD_OPTION_ASYNC_INTR_ENABLE))
{
return CELL_EINVAL;
}
if (!img)
{
return CELL_EFAULT;
}
sys_spu_image image = *img;
switch (image.type)
{
case SYS_SPU_IMAGE_TYPE_KERNEL:
{
const auto handle = idm::get<lv2_obj, lv2_spu_image>(image.entry_point);
if (!handle)
{
return CELL_ESRCH;
}
// Image information is stored in IDM
image.entry_point = handle->e_entry;
image.nsegs = handle->nsegs;
image.segs = handle->segs;
image.type = SYS_SPU_IMAGE_TYPE_KERNEL;
break;
}
case SYS_SPU_IMAGE_TYPE_USER:
{
if (image.entry_point > 0x3fffc || image.nsegs <= 0 || image.nsegs > 0x20)
{
return CELL_EINVAL;
}
break;
}
default: return CELL_EINVAL;
}
std::vector<sys_spu_segment> spu_segs(image.segs.get_ptr(), image.segs.get_ptr() + image.nsegs);
bool found_info_segment = false;
bool found_copy_segment = false;
for (const auto& seg : spu_segs)
{
if (image.type == SYS_SPU_IMAGE_TYPE_KERNEL)
{
// Assume valid, values are coming from LV2
found_copy_segment = true;
break;
}
switch (seg.type)
{
case SYS_SPU_SEGMENT_TYPE_COPY:
{
if (seg.addr % 4)
{
// 4-bytes unaligned address is not valid
return CELL_EINVAL;
}
found_copy_segment = true;
break;
}
case SYS_SPU_SEGMENT_TYPE_FILL:
{
break;
}
case SYS_SPU_SEGMENT_TYPE_INFO:
{
// There can only be one INFO segment at max
if (seg.size > 256u || found_info_segment)
{
return CELL_EINVAL;
}
found_info_segment = true;
continue;
}
default: return CELL_EINVAL;
}
if (!seg.size || (seg.ls | seg.size) % 0x10 || seg.ls >= SPU_LS_SIZE || seg.size > SPU_LS_SIZE)
{
return CELL_EINVAL;
}
for (auto it = spu_segs.data(); it != &seg; it++)
{
if (it->type != SYS_SPU_SEGMENT_TYPE_INFO)
{
if (seg.ls + seg.size > it->ls && it->ls + it->size > seg.ls)
{
// Overlapping segments are not allowed
return CELL_EINVAL;
}
}
}
}
// There must be at least one COPY segment
if (!found_copy_segment)
{
return CELL_EINVAL;
}
// Read thread name
const std::string thread_name(attr_data.name.get_ptr(), std::max<u32>(attr_data.name_len, 1) - 1);
const auto group = idm::get<lv2_spu_group>(group_id);
if (!group)
{
return CELL_ESRCH;
}
std::unique_lock lock(group->mutex);
if (auto state = +group->run_state; state != SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED)
{
if (state == SPU_THREAD_GROUP_STATUS_DESTROYED)
{
return CELL_ESRCH;
}
return CELL_EBUSY;
}
if (group->threads_map[spu_num] != -1)
{
return CELL_EBUSY;
}
if (option & SYS_SPU_THREAD_OPTION_ASYNC_INTR_ENABLE)
{
sys_spu.warning("Unimplemented SPU Thread options (0x%x)", option);
}
const u32 inited = group->init;
const u32 tid = (inited << 24) | (group_id & 0xffffff);
ensure(idm::import<named_thread<spu_thread>>([&]()
{
const auto spu = std::make_shared<named_thread<spu_thread>>(group.get(), spu_num, thread_name, tid, false, option);
group->threads[inited] = spu;
group->threads_map[spu_num] = static_cast<s8>(inited);
return spu;
}));
// alloc_hidden indicates falloc to allocate page with no access rights in base memory
auto& spu = group->threads[inited];
ensure(vm::get(vm::spu)->falloc(spu->vm_offset(), SPU_LS_SIZE, &spu->shm, static_cast<u64>(vm::page_size_64k) | static_cast<u64>(vm::alloc_hidden)));
spu->map_ls(*spu->shm, spu->ls);
group->args[inited] = {args.arg1, args.arg2, args.arg3, args.arg4};
group->imgs[inited].first = image.entry_point;
group->imgs[inited].second = std::move(spu_segs);
if (++group->init == group->max_num)
{
const auto old = group->run_state.compare_and_swap(SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED, SPU_THREAD_GROUP_STATUS_INITIALIZED);
if (old == SPU_THREAD_GROUP_STATUS_DESTROYED)
{
return CELL_ESRCH;
}
}
lock.unlock();
sys_spu.warning(u8"sys_spu_thread_initialize(): Thread “%s” created (id=0x%x)", thread_name, tid);
ppu.check_state();
*thread = tid;
return CELL_OK;
}
error_code sys_spu_thread_set_argument(ppu_thread& ppu, u32 id, vm::ptr<sys_spu_thread_argument> arg)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_spu_thread_set_argument(id=0x%x, arg=*0x%x)", id, arg);
const auto [thread, group] = lv2_spu_group::get_thread(id);
if (!thread) [[unlikely]]
{
return CELL_ESRCH;
}
std::lock_guard lock(group->mutex);
group->args[id >> 24] = {arg->arg1, arg->arg2, arg->arg3, arg->arg4};
return CELL_OK;
}
error_code sys_spu_thread_get_exit_status(ppu_thread& ppu, u32 id, vm::ptr<s32> status)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_spu_thread_get_exit_status(id=0x%x, status=*0x%x)", id, status);
const auto [thread, group] = lv2_spu_group::get_thread(id);
if (!thread) [[unlikely]]
{
return CELL_ESRCH;
}
u32 data;
if (thread->exit_status.try_read(data))
{
ppu.check_state();
*status = static_cast<s32>(data);
return CELL_OK;
}
return CELL_ESTAT;
}
error_code sys_spu_thread_group_create(ppu_thread& ppu, vm::ptr<u32> id, u32 num, s32 prio, vm::ptr<sys_spu_thread_group_attribute> attr)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_spu_thread_group_create(id=*0x%x, num=%d, prio=%d, attr=*0x%x)", id, num, prio, attr);
const s32 min_prio = g_ps3_process_info.has_root_perm() ? 0 : 16;
const sys_spu_thread_group_attribute attr_data = *attr;
if (attr_data.nsize > 0x80 || !num)
{
return CELL_EINVAL;
}
const s32 type = attr_data.type;
bool use_scheduler = true;
bool use_memct = !!(type & SYS_SPU_THREAD_GROUP_TYPE_MEMORY_FROM_CONTAINER);
bool needs_root = false;
u32 max_threads = 6;
u32 min_threads = 1;
u32 mem_size = 0;
lv2_memory_container* ct{};
if (type)
{
sys_spu.warning("sys_spu_thread_group_create(): SPU Thread Group type (0x%x)", type);
}
switch (type)
{
case 0x0:
case 0x4:
case 0x18:
{
break;
}
case 0x20:
case 0x22:
case 0x24:
case 0x26:
{
if (type == 0x22 || type == 0x26)
{
needs_root = true;
}
min_threads = 2; // That's what appears from reversing
break;
}
case 0x2:
case 0x6:
case 0xA:
case 0x102:
case 0x106:
case 0x10A:
case 0x202:
case 0x206:
case 0x20A:
case 0x902:
case 0x906:
case 0xA02:
case 0xA06:
case 0xC02:
case 0xC06:
{
if (type & 0x700)
{
max_threads = 1;
}
needs_root = true;
break;
}
default: return CELL_EINVAL;
}
const bool is_system_coop = type & SYS_SPU_THREAD_GROUP_TYPE_COOPERATE_WITH_SYSTEM;
if (is_system_coop)
{
// Constant size, unknown what it means
mem_size = SPU_LS_SIZE;
}
else if (type & SYS_SPU_THREAD_GROUP_TYPE_NON_CONTEXT)
{
// No memory consumed
mem_size = 0;
use_scheduler = false;
}
else
{
// 256kb for each spu thread, probably for saving and restoring SPU LS (used by scheduler?)
mem_size = SPU_LS_SIZE * num;
}
if (num < min_threads || num > max_threads ||
(needs_root && min_prio == 0x10) || (use_scheduler && !is_system_coop && (prio > 255 || prio < min_prio)))
{
return CELL_EINVAL;
}
if (use_memct && mem_size)
{
const auto sct = idm::get<lv2_memory_container>(attr_data.ct);
if (!sct)
{
return CELL_ESRCH;
}
if (sct->take(mem_size) != mem_size)
{
return CELL_ENOMEM;
}
ct = sct.get();
}
else
{
ct = &g_fxo->get<lv2_memory_container>();
if (ct->take(mem_size) != mem_size)
{
return CELL_ENOMEM;
}
}
auto& limits = g_fxo->get<spu_limits_t>();
std::unique_lock lock(limits.mutex);
if (!limits.check(use_scheduler ? limits_data{.controllable = num} : limits_data{.physical = num}))
{
ct->free(mem_size);
return CELL_EBUSY;
}
const auto group = idm::make_ptr<lv2_spu_group>(std::string(attr_data.name.get_ptr(), std::max<u32>(attr_data.nsize, 1) - 1), num, prio, type, ct, use_scheduler, mem_size);
if (!group)
{
ct->free(mem_size);
return CELL_EAGAIN;
}
lock.unlock();
sys_spu.warning(u8"sys_spu_thread_group_create(): Thread group “%s” created (id=0x%x)", group->name, idm::last_id());
ppu.check_state();
*id = idm::last_id();
return CELL_OK;
}
error_code sys_spu_thread_group_destroy(ppu_thread& ppu, u32 id)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_spu_thread_group_destroy(id=0x%x)", id);
auto& limits = g_fxo->get<spu_limits_t>();
std::lock_guard lock(limits.mutex);
const auto group = idm::withdraw<lv2_spu_group>(id, [](lv2_spu_group& group) -> CellError
{
if (!group.run_state.fetch_op([](spu_group_status& state)
{
if (state <= SPU_THREAD_GROUP_STATUS_INITIALIZED)
{
state = SPU_THREAD_GROUP_STATUS_DESTROYED;
return true;
}
return false;
}).second)
{
return CELL_EBUSY;
}
group.ct->free(group.mem_size);
return {};
});
if (!group)
{
return CELL_ESRCH;
}
if (group.ret)
{
return group.ret;
}
group->mutex.lock_unlock();
for (const auto& t : group->threads)
{
if (auto thread = t.get())
{
// Deallocate LS
thread->cleanup();
// Remove ID from IDM (destruction will occur in group destructor)
idm::remove<named_thread<spu_thread>>(thread->id);
}
}
return CELL_OK;
}
error_code sys_spu_thread_group_start(ppu_thread& ppu, u32 id)
{
ppu.state += cpu_flag::wait;
sys_spu.trace("sys_spu_thread_group_start(id=0x%x)", id);
const auto group = idm::get<lv2_spu_group>(id);
if (!group)
{
return CELL_ESRCH;
}
struct notify_on_exit
{
usz index = umax;
std::array<spu_thread*, 8> threads; // Raw pointer suffices, as long as group is referenced its SPUs exist
~notify_on_exit() noexcept
{
for (; index != umax; index--)
{
threads[index]->state.notify_one();
}
}
} notify_threads;
std::lock_guard lock(group->mutex);
// SPU_THREAD_GROUP_STATUS_READY state is not used
switch (group->run_state.compare_and_swap(SPU_THREAD_GROUP_STATUS_INITIALIZED, SPU_THREAD_GROUP_STATUS_RUNNING))
{
case SPU_THREAD_GROUP_STATUS_INITIALIZED: break;
case SPU_THREAD_GROUP_STATUS_DESTROYED: return CELL_ESRCH;
default: return CELL_ESTAT;
}
const u32 max_threads = group->max_num;
group->join_state = 0;
group->exit_status = 0;
group->running = max_threads;
group->set_terminate = false;
for (auto& thread : group->threads)
{
if (thread)
{
auto& args = group->args[thread->lv2_id >> 24];
auto& img = group->imgs[thread->lv2_id >> 24];
sys_spu_image::deploy(thread->ls, std::span(img.second.data(), img.second.size()), group->stop_count < 5);
thread->cpu_init();
thread->gpr[3] = v128::from64(0, args[0]);
thread->gpr[4] = v128::from64(0, args[1]);
thread->gpr[5] = v128::from64(0, args[2]);
thread->gpr[6] = v128::from64(0, args[3]);
thread->status_npc = {SPU_STATUS_RUNNING, img.first};
}
}
// Because SPU_THREAD_GROUP_STATUS_READY is not possible, run event is delivered immediately
// TODO: check data2 and data3
group->send_run_event(id, 0, 0);
u32 ran_threads = max_threads;
for (auto& thread : group->threads)
{
if (!ran_threads)
{
break;
}
if (thread && ran_threads--)
{
thread->state -= cpu_flag::stop;
notify_threads.threads[++notify_threads.index] = thread.get();
}
}
return CELL_OK;
}
error_code sys_spu_thread_group_suspend(ppu_thread& ppu, u32 id)
{
ppu.state += cpu_flag::wait;
sys_spu.trace("sys_spu_thread_group_suspend(id=0x%x)", id);
const auto group = idm::get<lv2_spu_group>(id);
if (!group)
{
return CELL_ESRCH;
}
if (!group->has_scheduler_context || group->type & 0xf00)
{
return CELL_EINVAL;
}
std::lock_guard lock(group->mutex);
CellError error;
group->run_state.fetch_op([&error](spu_group_status& state)
{
if (state == SPU_THREAD_GROUP_STATUS_DESTROYED)
{
error = CELL_ESRCH;
return false;
}
if (state <= SPU_THREAD_GROUP_STATUS_INITIALIZED || state == SPU_THREAD_GROUP_STATUS_STOPPED)
{
error = CELL_ESTAT;
return false;
}
// SPU_THREAD_GROUP_STATUS_READY state is not used
if (state == SPU_THREAD_GROUP_STATUS_RUNNING)
{
state = SPU_THREAD_GROUP_STATUS_SUSPENDED;
}
else if (state == SPU_THREAD_GROUP_STATUS_WAITING)
{
state = SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED;
}
else if (state == SPU_THREAD_GROUP_STATUS_SUSPENDED || state == SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED)
{
error = {};
return false;
}
else
{
error = CELL_ESTAT;
return false;
}
error = CellError{CELL_CANCEL + 0u};
return true;
});
if (error != CELL_CANCEL + 0u)
{
if (!error)
{
return CELL_OK;
}
return error;
}
for (auto& thread : group->threads)
{
if (thread)
{
thread->state += cpu_flag::suspend;
}
}
return CELL_OK;
}
error_code sys_spu_thread_group_resume(ppu_thread& ppu, u32 id)
{
ppu.state += cpu_flag::wait;
sys_spu.trace("sys_spu_thread_group_resume(id=0x%x)", id);
const auto group = idm::get<lv2_spu_group>(id);
if (!group)
{
return CELL_ESRCH;
}
if (!group->has_scheduler_context || group->type & 0xf00)
{
return CELL_EINVAL;
}
struct notify_on_exit
{
usz index = umax;
std::array<spu_thread*, 8> threads; // Raw pointer suffices, as long as group is referenced its SPUs exist
~notify_on_exit() noexcept
{
for (; index != umax; index--)
{
threads[index]->state.notify_one();
}
}
} notify_threads;
std::lock_guard lock(group->mutex);
CellError error;
group->run_state.fetch_op([&error](spu_group_status& state)
{
if (state == SPU_THREAD_GROUP_STATUS_DESTROYED)
{
error = CELL_ESRCH;
return false;
}
// SPU_THREAD_GROUP_STATUS_READY state is not used
if (state == SPU_THREAD_GROUP_STATUS_SUSPENDED)
{
state = SPU_THREAD_GROUP_STATUS_RUNNING;
}
else if (state == SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED)
{
state = SPU_THREAD_GROUP_STATUS_WAITING;
error = CellError{};
return true;
}
else
{
error = CELL_ESTAT;
return false;
}
error = CellError{CELL_CANCEL + 0u};
return true;
});
if (error != CELL_CANCEL + 0u)
{
if (error)
{
return error;
}
return CELL_OK;
}
for (auto& thread : group->threads)
{
if (thread)
{
thread->state -= cpu_flag::suspend;
notify_threads.threads[++notify_threads.index] = thread.get();
}
}
return CELL_OK;
}
error_code sys_spu_thread_group_yield(ppu_thread& ppu, u32 id)
{
ppu.state += cpu_flag::wait;
sys_spu.trace("sys_spu_thread_group_yield(id=0x%x)", id);
const auto group = idm::get<lv2_spu_group>(id);
if (!group)
{
return CELL_ESRCH;
}
// No effect on these group types
if (!group->has_scheduler_context || group->type & 0xf00)
{
return CELL_OK;
}
if (auto state = +group->run_state; state != SPU_THREAD_GROUP_STATUS_RUNNING)
{
if (state == SPU_THREAD_GROUP_STATUS_DESTROYED)
{
return CELL_ESRCH;
}
return CELL_ESTAT;
}
// SPU_THREAD_GROUP_STATUS_READY state is not used, so this function does nothing
return CELL_OK;
}
error_code sys_spu_thread_group_terminate(ppu_thread& ppu, u32 id, s32 value)
{
ppu.state += cpu_flag::wait;
sys_spu.trace("sys_spu_thread_group_terminate(id=0x%x, value=0x%x)", id, value);
const auto group = idm::get<lv2_spu_group>(id);
if (!group)
{
return CELL_ESRCH;
}
std::unique_lock lock(group->mutex);
// There should be a small period of sleep when the PPU waits for a signal of termination
auto short_sleep = [](ppu_thread& ppu)
{
lv2_obj::sleep(ppu);
busy_wait(3000);
ppu.check_state();
ppu.state += cpu_flag::wait;
};
if (auto state = +group->run_state;
state <= SPU_THREAD_GROUP_STATUS_INITIALIZED ||
state == SPU_THREAD_GROUP_STATUS_WAITING ||
state == SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED ||
state == SPU_THREAD_GROUP_STATUS_DESTROYED)
{
if (state == SPU_THREAD_GROUP_STATUS_DESTROYED)
{
return CELL_ESRCH;
}
return CELL_ESTAT;
}
if (group->set_terminate)
{
// Wait for termination, only then return error code
const u32 last_stop = group->stop_count;
group->wait_term_count++;
lock.unlock();
short_sleep(ppu);
while (group->stop_count == last_stop)
{
group->stop_count.wait(last_stop);
}
group->wait_term_count--;
return CELL_ESTAT;
}
group->set_terminate = true;
for (auto& thread : group->threads)
{
if (thread)
{
thread->state.fetch_op([](bs_t<cpu_flag>& flags)
{
if (flags & cpu_flag::stop)
{
// In case the thread raised the ret flag itself at some point do not raise it again
return false;
}
flags += cpu_flag::stop + cpu_flag::ret;
return true;
});
}
}
u32 prev_resv = 0;
for (auto& thread : group->threads)
{
while (thread && group->running && thread->state & cpu_flag::wait)
{
thread_ctrl::notify(*thread);
if (u32 resv = atomic_storage<u32>::load(thread->raddr))
{
if (prev_resv && prev_resv != resv)
{
// Batch reservation notifications if possible
vm::reservation_notifier_notify(prev_resv);
}
prev_resv = resv;
}
}
}
if (prev_resv)
{
vm::reservation_notifier_notify(prev_resv);
}
group->exit_status = value;
group->join_state = SYS_SPU_THREAD_GROUP_JOIN_TERMINATED;
// Wait until the threads are actually stopped
const u32 last_stop = group->stop_count;
group->wait_term_count++;
lock.unlock();
short_sleep(ppu);
while (group->stop_count == last_stop)
{
group->stop_count.wait(last_stop);
}
group->wait_term_count--;
return CELL_OK;
}
error_code sys_spu_thread_group_join(ppu_thread& ppu, u32 id, vm::ptr<u32> cause, vm::ptr<u32> status)
{
ppu.state += cpu_flag::wait;
sys_spu.trace("sys_spu_thread_group_join(id=0x%x, cause=*0x%x, status=*0x%x)", id, cause, status);
const auto group = idm::get<lv2_spu_group>(id);
if (!group)
{
return CELL_ESRCH;
}
do
{
lv2_obj::prepare_for_sleep(ppu);
std::unique_lock lock(group->mutex);
const auto state = +group->run_state;
if (state == SPU_THREAD_GROUP_STATUS_DESTROYED)
{
return CELL_ESRCH;
}
if (state < SPU_THREAD_GROUP_STATUS_INITIALIZED)
{
return CELL_ESTAT;
}
if (group->waiter)
{
// another PPU thread is joining this thread group
return CELL_EBUSY;
}
if (group->join_state && state == SPU_THREAD_GROUP_STATUS_INITIALIZED)
{
// Already signaled
ppu.gpr[4] = group->join_state;
ppu.gpr[5] = group->exit_status;
group->join_state.release(0);
break;
}
else
{
// Subscribe to receive status in r4-r5
group->waiter = &ppu;
}
{
lv2_obj::notify_all_t notify;
lv2_obj::sleep(ppu);
lock.unlock();
}
while (auto state = +ppu.state)
{
if (state & cpu_flag::signal && ppu.state.test_and_reset(cpu_flag::signal))
{
break;
}
if (is_stopped(state))
{
std::lock_guard lock(group->mutex);
if (group->waiter != &ppu)
{
break;
}
ppu.state += cpu_flag::again;
break;
}
ppu.state.wait(state);
}
}
while (false);
ppu.check_state();
if (!cause)
{
if (status)
{
// Report unwritten data
return CELL_EFAULT;
}
return not_an_error(CELL_EFAULT);
}
*cause = static_cast<u32>(ppu.gpr[4]);
if (!status)
{
return not_an_error(CELL_EFAULT);
}
*status = static_cast<s32>(ppu.gpr[5]);
return CELL_OK;
}
error_code sys_spu_thread_group_set_priority(ppu_thread& ppu, u32 id, s32 priority)
{
ppu.state += cpu_flag::wait;
sys_spu.trace("sys_spu_thread_group_set_priority(id=0x%x, priority=%d)", id, priority);
const auto group = idm::get<lv2_spu_group>(id);
if (!group)
{
return CELL_ESRCH;
}
if (!group->has_scheduler_context || priority < (g_ps3_process_info.has_root_perm() ? 0 : 16) || priority > 255)
{
return CELL_EINVAL;
}
group->prio.atomic_op([&](std::common_type_t<decltype(lv2_spu_group::prio)>& prio)
{
prio.prio = priority;
});
return CELL_OK;
}
error_code sys_spu_thread_group_get_priority(ppu_thread& ppu, u32 id, vm::ptr<s32> priority)
{
ppu.state += cpu_flag::wait;
sys_spu.trace("sys_spu_thread_group_get_priority(id=0x%x, priority=*0x%x)", id, priority);
const auto group = idm::get<lv2_spu_group>(id);
if (!group)
{
return CELL_ESRCH;
}
ppu.check_state();
if (!group->has_scheduler_context)
{
*priority = 0;
}
else
{
*priority = group->prio.load().prio;
}
return CELL_OK;
}
error_code sys_spu_thread_group_set_cooperative_victims(ppu_thread& ppu, u32 id, u32 threads_mask)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_spu_thread_group_set_cooperative_victims(id=0x%x, threads_mask=0x%x)", id, threads_mask);
const auto group = idm::get<lv2_spu_group>(id);
if (!group)
{
return CELL_ESRCH;
}
if (!(group->type & SYS_SPU_THREAD_GROUP_TYPE_COOPERATE_WITH_SYSTEM))
{
return CELL_EINVAL;
}
if (threads_mask >= 1u << group->max_num)
{
return CELL_EINVAL;
}
// TODO
return CELL_OK;
}
error_code sys_spu_thread_group_syscall_253(ppu_thread& ppu, u32 id, vm::ptr<sys_spu_thread_group_syscall_253_info> info)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_spu_thread_group_syscall_253(id=0x%x, info=*0x%x)", id, info);
const auto group = idm::get<lv2_spu_group>(id);
if (!group)
{
return CELL_ESRCH;
}
if (!(group->type & SYS_SPU_THREAD_GROUP_TYPE_COOPERATE_WITH_SYSTEM))
{
return CELL_EINVAL;
}
// TODO
ppu.check_state();
info->deadlineMissCounter = 0;
info->deadlineMeetCounter = 0;
info->timestamp = get_timebased_time();
return CELL_OK;
}
error_code sys_spu_thread_write_ls(ppu_thread& ppu, u32 id, u32 lsa, u64 value, u32 type)
{
ppu.state += cpu_flag::wait;
sys_spu.trace("sys_spu_thread_write_ls(id=0x%x, lsa=0x%05x, value=0x%llx, type=%d)", id, lsa, value, type);
if (lsa >= SPU_LS_SIZE || type > 8 || !type || (type | lsa) & (type - 1)) // check range and alignment
{
return CELL_EINVAL;
}
const auto [thread, group] = lv2_spu_group::get_thread(id);
if (!thread) [[unlikely]]
{
return CELL_ESRCH;
}
std::lock_guard lock(group->mutex);
if (auto state = +group->run_state;
state < SPU_THREAD_GROUP_STATUS_WAITING || state > SPU_THREAD_GROUP_STATUS_RUNNING)
{
if (state == SPU_THREAD_GROUP_STATUS_DESTROYED)
{
return CELL_ESRCH;
}
return CELL_ESTAT;
}
switch (type)
{
case 1: thread->_ref<u8>(lsa) = static_cast<u8>(value); break;
case 2: thread->_ref<u16>(lsa) = static_cast<u16>(value); break;
case 4: thread->_ref<u32>(lsa) = static_cast<u32>(value); break;
case 8: thread->_ref<u64>(lsa) = value; break;
default: fmt::throw_exception("Unreachable");
}
return CELL_OK;
}
error_code sys_spu_thread_read_ls(ppu_thread& ppu, u32 id, u32 lsa, vm::ptr<u64> value, u32 type)
{
ppu.state += cpu_flag::wait;
sys_spu.trace("sys_spu_thread_read_ls(id=0x%x, lsa=0x%05x, value=*0x%x, type=%d)", id, lsa, value, type);
if (lsa >= SPU_LS_SIZE || type > 8 || !type || (type | lsa) & (type - 1)) // check range and alignment
{
return CELL_EINVAL;
}
const auto [thread, group] = lv2_spu_group::get_thread(id);
if (!thread) [[unlikely]]
{
return CELL_ESRCH;
}
std::unique_lock lock(group->mutex);
if (auto state = +group->run_state;
state < SPU_THREAD_GROUP_STATUS_WAITING || state > SPU_THREAD_GROUP_STATUS_RUNNING)
{
if (state == SPU_THREAD_GROUP_STATUS_DESTROYED)
{
return CELL_ESRCH;
}
return CELL_ESTAT;
}
u64 _value{};
switch (type)
{
case 1: _value = thread->_ref<u8>(lsa); break;
case 2: _value = thread->_ref<u16>(lsa); break;
case 4: _value = thread->_ref<u32>(lsa); break;
case 8: _value = thread->_ref<u64>(lsa); break;
default: fmt::throw_exception("Unreachable");
}
lock.unlock();
ppu.check_state();
*value = _value;
return CELL_OK;
}
error_code sys_spu_thread_write_spu_mb(ppu_thread& ppu, u32 id, u32 value)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_spu_thread_write_spu_mb(id=0x%x, value=0x%x)", id, value);
const auto [thread, group] = lv2_spu_group::get_thread(id);
if (!thread) [[unlikely]]
{
return CELL_ESRCH;
}
spu_channel_op_state state{};
{
std::lock_guard lock(group->mutex);
state = thread->ch_in_mbox.push(value, true);
}
if (!state.op_done)
{
ppu.state += cpu_flag::again;
return {};
}
if (state.notify)
{
thread->ch_in_mbox.notify();
}
return CELL_OK;
}
error_code sys_spu_thread_set_spu_cfg(ppu_thread& ppu, u32 id, u64 value)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_spu_thread_set_spu_cfg(id=0x%x, value=0x%x)", id, value);
if (value > 3)
{
return CELL_EINVAL;
}
const auto [thread, group] = lv2_spu_group::get_thread(id);
if (!thread) [[unlikely]]
{
return CELL_ESRCH;
}
thread->snr_config = value;
return CELL_OK;
}
error_code sys_spu_thread_get_spu_cfg(ppu_thread& ppu, u32 id, vm::ptr<u64> value)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_spu_thread_get_spu_cfg(id=0x%x, value=*0x%x)", id, value);
const auto [thread, group] = lv2_spu_group::get_thread(id);
if (!thread) [[unlikely]]
{
return CELL_ESRCH;
}
ppu.check_state();
*value = thread->snr_config;
return CELL_OK;
}
error_code sys_spu_thread_write_snr(ppu_thread& ppu, u32 id, u32 number, u32 value)
{
ppu.state += cpu_flag::wait;
sys_spu.trace("sys_spu_thread_write_snr(id=0x%x, number=%d, value=0x%x)", id, number, value);
if (number > 1)
{
return CELL_EINVAL;
}
const auto [thread, group] = lv2_spu_group::get_thread(id);
if (!thread) [[unlikely]]
{
return CELL_ESRCH;
}
thread->push_snr(number, value);
return CELL_OK;
}
error_code sys_spu_thread_group_connect_event(ppu_thread& ppu, u32 id, u32 eq, u32 et)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_spu_thread_group_connect_event(id=0x%x, eq=0x%x, et=%d)", id, eq, et);
const auto group = idm::get<lv2_spu_group>(id);
if (!group)
{
return CELL_ESRCH;
}
const auto ep =
et == SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE ? &group->ep_sysmodule :
et == SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION ? &group->ep_exception :
et == SYS_SPU_THREAD_GROUP_EVENT_RUN ? &group->ep_run :
nullptr;
if (!ep)
{
sys_spu.error("sys_spu_thread_group_connect_event(): unknown event type (%d)", et);
return CELL_EINVAL;
}
if (et == SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE && !(group->type & SYS_SPU_THREAD_GROUP_TYPE_COOPERATE_WITH_SYSTEM))
{
return CELL_EINVAL;
}
auto queue = idm::get<lv2_obj, lv2_event_queue>(eq);
std::lock_guard lock(group->mutex);
if (lv2_obj::check(*ep))
{
return CELL_EBUSY;
}
// ESRCH of event queue after EBUSY
if (!queue)
{
return CELL_ESRCH;
}
*ep = std::move(queue);
return CELL_OK;
}
error_code sys_spu_thread_group_disconnect_event(ppu_thread& ppu, u32 id, u32 et)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_spu_thread_group_disconnect_event(id=0x%x, et=%d)", id, et);
const auto group = idm::get<lv2_spu_group>(id);
if (!group)
{
return CELL_ESRCH;
}
const auto ep =
et == SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE ? &group->ep_sysmodule :
et == SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION ? &group->ep_exception :
et == SYS_SPU_THREAD_GROUP_EVENT_RUN ? &group->ep_run :
nullptr;
if (!ep)
{
sys_spu.error("sys_spu_thread_group_disconnect_event(): unknown event type (%d)", et);
return CELL_OK;
}
// No error checking is performed
std::lock_guard lock(group->mutex);
ep->reset();
return CELL_OK;
}
error_code sys_spu_thread_connect_event(ppu_thread& ppu, u32 id, u32 eq, u32 et, u32 spup)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_spu_thread_connect_event(id=0x%x, eq=0x%x, et=%d, spup=%d)", id, eq, et, spup);
const auto [thread, group] = lv2_spu_group::get_thread(id);
auto queue = idm::get<lv2_obj, lv2_event_queue>(eq);
if (!queue || !thread) [[unlikely]]
{
return CELL_ESRCH;
}
if (et != SYS_SPU_THREAD_EVENT_USER || spup > 63)
{
sys_spu.error("sys_spu_thread_connect_event(): invalid arguments (et=%d, spup=%d, queue->type=%d)", et, spup, queue->type);
return CELL_EINVAL;
}
std::lock_guard lock(group->mutex);
auto& port = thread->spup[spup];
if (lv2_obj::check(port))
{
return CELL_EISCONN;
}
port = std::move(queue);
return CELL_OK;
}
error_code sys_spu_thread_disconnect_event(ppu_thread& ppu, u32 id, u32 et, u32 spup)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_spu_thread_disconnect_event(id=0x%x, et=%d, spup=%d)", id, et, spup);
const auto [thread, group] = lv2_spu_group::get_thread(id);
if (!thread) [[unlikely]]
{
return CELL_ESRCH;
}
if (et != SYS_SPU_THREAD_EVENT_USER || spup > 63)
{
sys_spu.error("sys_spu_thread_disconnect_event(): invalid arguments (et=%d, spup=%d)", et, spup);
return CELL_EINVAL;
}
std::lock_guard lock(group->mutex);
auto& port = thread->spup[spup];
if (!lv2_obj::check(port))
{
return CELL_ENOTCONN;
}
port.reset();
return CELL_OK;
}
error_code sys_spu_thread_bind_queue(ppu_thread& ppu, u32 id, u32 spuq, u32 spuq_num)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_spu_thread_bind_queue(id=0x%x, spuq=0x%x, spuq_num=0x%x)", id, spuq, spuq_num);
const auto [thread, group] = lv2_spu_group::get_thread(id);
auto queue = idm::get<lv2_obj, lv2_event_queue>(spuq);
if (!queue || !thread) [[unlikely]]
{
return CELL_ESRCH;
}
if (queue->type != SYS_SPU_QUEUE)
{
return CELL_EINVAL;
}
std::lock_guard lock(group->mutex);
decltype(std::data(thread->spuq)) q{};
for (auto& v : thread->spuq)
{
// Check if the entry is assigned at all
if (!v.second)
{
if (!q)
{
q = &v;
}
continue;
}
if (v.first == spuq_num || v.second == queue)
{
return CELL_EBUSY;
}
}
if (!q)
{
return CELL_EAGAIN;
}
q->first = spuq_num;
q->second = std::move(queue);
return CELL_OK;
}
error_code sys_spu_thread_unbind_queue(ppu_thread& ppu, u32 id, u32 spuq_num)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_spu_thread_unbind_queue(id=0x%x, spuq_num=0x%x)", id, spuq_num);
const auto [thread, group] = lv2_spu_group::get_thread(id);
if (!thread) [[unlikely]]
{
return CELL_ESRCH;
}
std::lock_guard lock(group->mutex);
for (auto& v : thread->spuq)
{
if (v.first != spuq_num)
{
continue;
}
if (!v.second)
{
continue;
}
v.second.reset();
return CELL_OK;
}
return CELL_ESRCH;
}
error_code sys_spu_thread_group_connect_event_all_threads(ppu_thread& ppu, u32 id, u32 eq, u64 req, vm::ptr<u8> spup)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_spu_thread_group_connect_event_all_threads(id=0x%x, eq=0x%x, req=0x%llx, spup=*0x%x)", id, eq, req, spup);
if (!req)
{
return CELL_EINVAL;
}
const auto group = idm::get<lv2_spu_group>(id);
const auto queue = idm::get<lv2_obj, lv2_event_queue>(eq);
if (!group || !queue)
{
return CELL_ESRCH;
}
std::unique_lock lock(group->mutex);
if (auto state = +group->run_state;
state < SPU_THREAD_GROUP_STATUS_INITIALIZED || state == SPU_THREAD_GROUP_STATUS_DESTROYED)
{
if (state == SPU_THREAD_GROUP_STATUS_DESTROYED)
{
return CELL_ESRCH;
}
return CELL_ESTAT;
}
u8 port = 0; // SPU Port number
for (; port < 64; port++)
{
if (!(req & (1ull << port)))
{
continue;
}
bool found = true;
for (auto& t : group->threads)
{
if (t)
{
if (lv2_obj::check(t->spup[port]))
{
found = false;
break;
}
}
}
if (found)
{
break;
}
}
if (port == 64)
{
return CELL_EISCONN;
}
for (auto& t : group->threads)
{
if (t)
{
t->spup[port] = queue;
}
}
lock.unlock();
ppu.check_state();
*spup = port;
return CELL_OK;
}
error_code sys_spu_thread_group_disconnect_event_all_threads(ppu_thread& ppu, u32 id, u32 spup)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_spu_thread_group_disconnect_event_all_threads(id=0x%x, spup=%d)", id, spup);
if (spup > 63)
{
return CELL_EINVAL;
}
const auto group = idm::get<lv2_spu_group>(id);
if (!group)
{
return CELL_ESRCH;
}
std::lock_guard lock(group->mutex);
for (auto& t : group->threads)
{
if (t)
{
t->spup[spup].reset();
}
}
return CELL_OK;
}
error_code sys_spu_thread_group_log(ppu_thread& ppu, s32 command, vm::ptr<s32> stat)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_spu_thread_group_log(command=0x%x, stat=*0x%x)", command, stat);
struct spu_group_log_state_t
{
atomic_t<s32> state = SYS_SPU_THREAD_GROUP_LOG_ON;
};
auto& state = g_fxo->get<spu_group_log_state_t>();
switch (command)
{
case SYS_SPU_THREAD_GROUP_LOG_GET_STATUS:
{
if (!stat)
{
return CELL_EFAULT;
}
ppu.check_state();
*stat = state.state;
break;
}
case SYS_SPU_THREAD_GROUP_LOG_ON:
case SYS_SPU_THREAD_GROUP_LOG_OFF:
{
state.state.release(command);
break;
}
default: return CELL_EINVAL;
}
return CELL_OK;
}
error_code sys_spu_thread_recover_page_fault(ppu_thread& ppu, u32 id)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_spu_thread_recover_page_fault(id=0x%x)", id);
const auto [thread, group] = lv2_spu_group::get_thread(id);
if (!thread) [[unlikely]]
{
return CELL_ESRCH;
}
return mmapper_thread_recover_page_fault(thread);
}
error_code sys_raw_spu_recover_page_fault(ppu_thread& ppu, u32 id)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_raw_spu_recover_page_fault(id=0x%x)", id);
const auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
if (!thread) [[unlikely]]
{
return CELL_ESRCH;
}
return mmapper_thread_recover_page_fault(thread.get());
}
error_code sys_raw_spu_create(ppu_thread& ppu, vm::ptr<u32> id, vm::ptr<void> attr)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_raw_spu_create(id=*0x%x, attr=*0x%x)", id, attr);
auto& limits = g_fxo->get<spu_limits_t>();
std::lock_guard lock(limits.mutex);
if (!limits.check(limits_data{.raw_spu = 1}))
{
return CELL_EAGAIN;
}
if (!spu_thread::g_raw_spu_ctr.try_inc(5))
{
return CELL_EAGAIN;
}
u32 index = 0;
// Find free RawSPU ID
while (!spu_thread::g_raw_spu_id[index].try_inc(1))
{
if (++index == 5)
index = 0;
}
const auto spu = idm::make_ptr<named_thread<spu_thread>>(nullptr, index, "", index);
ensure(vm::get(vm::spu)->falloc(spu->vm_offset(), SPU_LS_SIZE, &spu->shm, vm::page_size_64k));
spu->map_ls(*spu->shm, spu->ls);
spu_thread::g_raw_spu_id[index] = idm::last_id();
ppu.check_state();
*id = index;
return CELL_OK;
}
error_code sys_isolated_spu_create(ppu_thread& ppu, vm::ptr<u32> id, vm::ptr<void> image, u64 arg1, u64 arg2, u64 arg3, u64 arg4)
{
ppu.state += cpu_flag::wait;
sys_spu.todo("sys_isolated_spu_create(id=*0x%x, image=*0x%x, arg1=0x%llx, arg2=0x%llx, arg3=0x%llx, arg4=0x%llx)", id, image, arg1, arg2, arg3, arg4);
// TODO: More accurate SPU image memory size calculation
u32 max = image.addr() & -4096;
while (max != 0u - 4096 && vm::check_addr(max))
{
max += 4096;
}
const auto obj = decrypt_self(fs::file{image.get_ptr(), max - image.addr()});
if (!obj)
{
return CELL_EAUTHFAIL;
}
auto& limits = g_fxo->get<spu_limits_t>();
std::lock_guard lock(limits.mutex);
if (!limits.check(limits_data{.raw_spu = 1}))
{
return CELL_EAGAIN;
}
if (!spu_thread::g_raw_spu_ctr.try_inc(5))
{
return CELL_EAGAIN;
}
u32 index = 0;
// Find free RawSPU ID
while (!spu_thread::g_raw_spu_id[index].try_inc(1))
{
if (++index == 5)
index = 0;
}
const u32 ls_addr = RAW_SPU_BASE_ADDR + RAW_SPU_OFFSET * index;
const auto thread = idm::make_ptr<named_thread<spu_thread>>(nullptr, index, "", index, true);
thread->gpr[3] = v128::from64(0, arg1);
thread->gpr[4] = v128::from64(0, arg2);
thread->gpr[5] = v128::from64(0, arg3);
thread->gpr[6] = v128::from64(0, arg4);
spu_thread::g_raw_spu_id[index] = (ensure(thread->id));
sys_spu_image img;
img.load(obj);
auto image_info = idm::get<lv2_obj, lv2_spu_image>(img.entry_point);
img.deploy(thread->ls, std::span(image_info->segs.get_ptr(), image_info->nsegs));
thread->write_reg(ls_addr + RAW_SPU_PROB_OFFSET + SPU_NPC_offs, image_info->e_entry);
ensure(idm::remove_verify<lv2_obj, lv2_spu_image>(img.entry_point, std::move(image_info)));
*id = index;
return CELL_OK;
}
template <bool isolated = false>
error_code raw_spu_destroy(ppu_thread& ppu, u32 id)
{
const u32 idm_id = spu_thread::find_raw_spu(id);
auto thread = idm::get<named_thread<spu_thread>>(idm_id, [](named_thread<spu_thread>& thread)
{
if (thread.get_type() != (isolated ? spu_type::isolated : spu_type::raw))
{
return false;
}
// Stop thread
thread = thread_state::aborting;
return true;
});
if (!thread || !thread.ret) [[unlikely]]
{
return CELL_ESRCH;
}
// TODO: CELL_EBUSY is not returned
// Kernel objects which must be removed
std::vector<std::pair<std::shared_ptr<lv2_obj>, u32>> to_remove;
// Clear interrupt handlers
for (auto& intr : thread->int_ctrl)
{
if (auto& tag = intr.tag; lv2_obj::check(tag))
{
if (auto& handler = tag->handler; lv2_obj::check(handler))
{
// SLEEP
lv2_obj::sleep(ppu);
handler->join();
to_remove.emplace_back(handler, handler->id);
}
to_remove.emplace_back(tag, tag->id);
}
}
// Remove IDs
for (auto&& pair : to_remove)
{
if (pair.second >> 24 == 0xa)
idm::remove_verify<lv2_obj, lv2_int_tag>(pair.second, std::move(pair.first));
if (pair.second >> 24 == 0xb)
idm::remove_verify<lv2_obj, lv2_int_serv>(pair.second, std::move(pair.first));
}
(*thread)();
auto& limits = g_fxo->get<spu_limits_t>();
std::lock_guard lock(limits.mutex);
if (auto ret = idm::withdraw<named_thread<spu_thread>>(idm_id, [&](spu_thread& spu) -> CellError
{
if (std::addressof(spu) != std::addressof(*thread))
{
return CELL_ESRCH;
}
spu.cleanup();
return {};
}); !ret || ret.ret)
{
// Other thread destroyed beforehead
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_raw_spu_destroy(ppu_thread& ppu, u32 id)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_raw_spu_destroy(id=%d)", id);
return raw_spu_destroy(ppu, id);
}
error_code sys_isolated_spu_destroy(ppu_thread& ppu, u32 id)
{
ppu.state += cpu_flag::wait;
sys_spu.todo("sys_isolated_spu_destroy(id=%d)", id);
return raw_spu_destroy<true>(ppu, id);
}
template <bool isolated = false>
error_code raw_spu_create_interrupt_tag(u32 id, u32 class_id, u32 /*hwthread*/, vm::ptr<u32> intrtag)
{
if (class_id != 0 && class_id != 2)
{
return CELL_EINVAL;
}
CellError error = {};
const auto tag = idm::import<lv2_obj, lv2_int_tag>([&]()
{
std::shared_ptr<lv2_int_tag> result;
auto thread = idm::check_unlocked<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
if (!thread || *thread == thread_state::aborting || thread->get_type() != (isolated ? spu_type::isolated : spu_type::raw))
{
error = CELL_ESRCH;
return result;
}
auto& int_ctrl = thread->int_ctrl[class_id];
if (lv2_obj::check(int_ctrl.tag))
{
error = CELL_EAGAIN;
return result;
}
result = std::make_shared<lv2_int_tag>();
int_ctrl.tag = result;
return result;
});
if (tag)
{
cpu_thread::get_current()->check_state();
*intrtag = tag;
return CELL_OK;
}
return error;
}
error_code sys_raw_spu_create_interrupt_tag(ppu_thread& ppu, u32 id, u32 class_id, u32 hwthread, vm::ptr<u32> intrtag)
{
ppu.state += cpu_flag::wait;
sys_spu.warning("sys_raw_spu_create_interrupt_tag(id=%d, class_id=%d, hwthread=0x%x, intrtag=*0x%x)", id, class_id, hwthread, intrtag);
return raw_spu_create_interrupt_tag(id, class_id, hwthread, intrtag);
}
error_code sys_isolated_spu_create_interrupt_tag(ppu_thread& ppu, u32 id, u32 class_id, u32 hwthread, vm::ptr<u32> intrtag)
{
ppu.state += cpu_flag::wait;
sys_spu.todo("sys_isolated_spu_create_interrupt_tag(id=%d, class_id=%d, hwthread=0x%x, intrtag=*0x%x)", id, class_id, hwthread, intrtag);
return raw_spu_create_interrupt_tag<true>(id, class_id, hwthread, intrtag);
}
template <bool isolated = false>
error_code raw_spu_set_int_mask(u32 id, u32 class_id, u64 mask)
{
if (class_id != 0 && class_id != 2)
{
return CELL_EINVAL;
}
const auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
if (!thread || thread->get_type() != (isolated ? spu_type::isolated : spu_type::raw)) [[unlikely]]
{
return CELL_ESRCH;
}
thread->int_ctrl[class_id].mask.exchange(mask);
return CELL_OK;
}
error_code sys_raw_spu_set_int_mask(ppu_thread& ppu, u32 id, u32 class_id, u64 mask)
{
ppu.state += cpu_flag::wait;
sys_spu.trace("sys_raw_spu_set_int_mask(id=%d, class_id=%d, mask=0x%llx)", id, class_id, mask);
return raw_spu_set_int_mask(id, class_id, mask);
}
error_code sys_isolated_spu_set_int_mask(ppu_thread& ppu, u32 id, u32 class_id, u64 mask)
{
ppu.state += cpu_flag::wait;
sys_spu.todo("sys_isolated_spu_set_int_mask(id=%d, class_id=%d, mask=0x%llx)", id, class_id, mask);
return raw_spu_set_int_mask<true>(id, class_id, mask);
}
template <bool isolated = false>
error_code raw_spu_set_int_stat(u32 id, u32 class_id, u64 stat)
{
if (class_id != 0 && class_id != 2)
{
return CELL_EINVAL;
}
const auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
if (!thread || thread->get_type() != (isolated ? spu_type::isolated : spu_type::raw)) [[unlikely]]
{
return CELL_ESRCH;
}
thread->int_ctrl[class_id].clear(stat);
return CELL_OK;
}
error_code sys_raw_spu_set_int_stat(ppu_thread& ppu, u32 id, u32 class_id, u64 stat)
{
ppu.state += cpu_flag::wait;
sys_spu.trace("sys_raw_spu_set_int_stat(id=%d, class_id=%d, stat=0x%llx)", id, class_id, stat);
return raw_spu_set_int_stat(id, class_id, stat);
}
error_code sys_isolated_spu_set_int_stat(ppu_thread& ppu, u32 id, u32 class_id, u64 stat)
{
ppu.state += cpu_flag::wait;
sys_spu.todo("sys_isolated_spu_set_int_stat(id=%d, class_id=%d, stat=0x%llx)", id, class_id, stat);
return raw_spu_set_int_stat<true>(id, class_id, stat);
}
template <bool isolated = false>
error_code raw_spu_get_int_control(u32 id, u32 class_id, vm::ptr<u64> value, atomic_t<u64> spu_int_ctrl_t::* control)
{
if (class_id != 0 && class_id != 2)
{
return CELL_EINVAL;
}
const auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
if (!thread || thread->get_type() != (isolated ? spu_type::isolated : spu_type::raw)) [[unlikely]]
{
return CELL_ESRCH;
}
cpu_thread::get_current()->check_state();
*value = thread->int_ctrl[class_id].*control;
return CELL_OK;
}
error_code sys_raw_spu_get_int_mask(ppu_thread& ppu, u32 id, u32 class_id, vm::ptr<u64> mask)
{
ppu.state += cpu_flag::wait;
sys_spu.trace("sys_raw_spu_get_int_mask(id=%d, class_id=%d, mask=*0x%x)", id, class_id, mask);
return raw_spu_get_int_control(id, class_id, mask, &spu_int_ctrl_t::mask);
}
error_code sys_isolated_spu_get_int_mask(ppu_thread& ppu, u32 id, u32 class_id, vm::ptr<u64> mask)
{
ppu.state += cpu_flag::wait;
sys_spu.trace("sys_isolated_spu_get_int_mask(id=%d, class_id=%d, mask=*0x%x)", id, class_id, mask);
return raw_spu_get_int_control<true>(id, class_id, mask, &spu_int_ctrl_t::mask);
}
error_code sys_raw_spu_get_int_stat(ppu_thread& ppu, u32 id, u32 class_id, vm::ptr<u64> stat)
{
ppu.state += cpu_flag::wait;
sys_spu.trace("sys_raw_spu_get_int_stat(id=%d, class_id=%d, stat=*0x%x)", id, class_id, stat);
return raw_spu_get_int_control(id, class_id, stat, &spu_int_ctrl_t::stat);
}
error_code sys_isolated_spu_get_int_stat(ppu_thread& ppu, u32 id, u32 class_id, vm::ptr<u64> stat)
{
ppu.state += cpu_flag::wait;
sys_spu.todo("sys_isolated_spu_get_int_stat(id=%d, class_id=%d, stat=*0x%x)", id, class_id, stat);
return raw_spu_get_int_control<true>(id, class_id, stat, &spu_int_ctrl_t::stat);
}
template <bool isolated = false>
error_code raw_spu_read_puint_mb(u32 id, vm::ptr<u32> value)
{
const auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
if (!thread || thread->get_type() != (isolated ? spu_type::isolated : spu_type::raw)) [[unlikely]]
{
return CELL_ESRCH;
}
cpu_thread::get_current()->check_state();
*value = thread->ch_out_intr_mbox.pop();
return CELL_OK;
}
error_code sys_raw_spu_read_puint_mb(ppu_thread& ppu, u32 id, vm::ptr<u32> value)
{
ppu.state += cpu_flag::wait;
sys_spu.trace("sys_raw_spu_read_puint_mb(id=%d, value=*0x%x)", id, value);
return raw_spu_read_puint_mb(id, value);
}
error_code sys_isolated_spu_read_puint_mb(ppu_thread& ppu, u32 id, vm::ptr<u32> value)
{
ppu.state += cpu_flag::wait;
sys_spu.todo("sys_isolated_spu_read_puint_mb(id=%d, value=*0x%x)", id, value);
return raw_spu_read_puint_mb<true>(id, value);
}
template <bool isolated = false>
error_code raw_spu_set_spu_cfg(u32 id, u32 value)
{
if (value > 3)
{
fmt::throw_exception("Unexpected value (0x%x)", value);
}
const auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
if (!thread || thread->get_type() != (isolated ? spu_type::isolated : spu_type::raw)) [[unlikely]]
{
return CELL_ESRCH;
}
thread->snr_config = value;
return CELL_OK;
}
error_code sys_raw_spu_set_spu_cfg(ppu_thread& ppu, u32 id, u32 value)
{
ppu.state += cpu_flag::wait;
sys_spu.trace("sys_raw_spu_set_spu_cfg(id=%d, value=0x%x)", id, value);
return raw_spu_set_spu_cfg(id, value);
}
error_code sys_isolated_spu_set_spu_cfg(ppu_thread& ppu, u32 id, u32 value)
{
ppu.state += cpu_flag::wait;
sys_spu.todo("sys_isolated_spu_set_spu_cfg(id=%d, value=0x%x)", id, value);
return raw_spu_set_spu_cfg<true>(id, value);
}
template <bool isolated = false>
error_code raw_spu_get_spu_cfg(u32 id, vm::ptr<u32> value)
{
const auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
if (!thread || thread->get_type() != (isolated ? spu_type::isolated : spu_type::raw)) [[unlikely]]
{
return CELL_ESRCH;
}
cpu_thread::get_current()->check_state();
*value = static_cast<u32>(thread->snr_config);
return CELL_OK;
}
error_code sys_raw_spu_get_spu_cfg(ppu_thread& ppu, u32 id, vm::ptr<u32> value)
{
ppu.state += cpu_flag::wait;
sys_spu.trace("sys_raw_spu_get_spu_afg(id=%d, value=*0x%x)", id, value);
return raw_spu_get_spu_cfg(id, value);
}
error_code sys_isolated_spu_get_spu_cfg(ppu_thread& ppu, u32 id, vm::ptr<u32> value)
{
ppu.state += cpu_flag::wait;
sys_spu.todo("sys_isolated_spu_get_spu_afg(id=%d, value=*0x%x)", id, value);
return raw_spu_get_spu_cfg<true>(id, value);
}
error_code sys_isolated_spu_start(ppu_thread& ppu, u32 id)
{
ppu.state += cpu_flag::wait;
sys_spu.todo("sys_isolated_spu_start(id=%d)", id);
const auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
if (!thread) [[unlikely]]
{
return CELL_ESRCH;
}
// TODO: Can return ESTAT if called twice
thread->write_reg(RAW_SPU_BASE_ADDR + thread->lv2_id * RAW_SPU_OFFSET + RAW_SPU_PROB_OFFSET + SPU_RunCntl_offs, SPU_RUNCNTL_RUN_REQUEST);
return CELL_OK;
}
| 61,700
|
C++
|
.cpp
| 2,147
| 26.072194
| 207
| 0.676797
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,359
|
sys_event_flag.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_event_flag.cpp
|
#include "stdafx.h"
#include "sys_event_flag.h"
#include "Emu/IdManager.h"
#include "Emu/IPC.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include <algorithm>
#include "util/asm.hpp"
LOG_CHANNEL(sys_event_flag);
lv2_event_flag::lv2_event_flag(utils::serial& ar)
: protocol(ar)
, key(ar)
, type(ar)
, name(ar)
{
ar(pattern);
}
std::shared_ptr<void> lv2_event_flag::load(utils::serial& ar)
{
auto eflag = std::make_shared<lv2_event_flag>(ar);
return lv2_obj::load(eflag->key, eflag);
}
void lv2_event_flag::save(utils::serial& ar)
{
ar(protocol, key, type, name, pattern);
}
error_code sys_event_flag_create(ppu_thread& ppu, vm::ptr<u32> id, vm::ptr<sys_event_flag_attribute_t> attr, u64 init)
{
ppu.state += cpu_flag::wait;
sys_event_flag.warning("sys_event_flag_create(id=*0x%x, attr=*0x%x, init=0x%llx)", id, attr, init);
if (!id || !attr)
{
return CELL_EFAULT;
}
const auto _attr = *attr;
const u32 protocol = _attr.protocol;
if (protocol != SYS_SYNC_FIFO && protocol != SYS_SYNC_PRIORITY)
{
sys_event_flag.error("sys_event_flag_create(): unknown protocol (0x%x)", protocol);
return CELL_EINVAL;
}
const u32 type = _attr.type;
if (type != SYS_SYNC_WAITER_SINGLE && type != SYS_SYNC_WAITER_MULTIPLE)
{
sys_event_flag.error("sys_event_flag_create(): unknown type (0x%x)", type);
return CELL_EINVAL;
}
const u64 ipc_key = lv2_obj::get_key(_attr);
if (const auto error = lv2_obj::create<lv2_event_flag>(_attr.pshared, ipc_key, _attr.flags, [&]
{
return std::make_shared<lv2_event_flag>(
_attr.protocol,
ipc_key,
_attr.type,
_attr.name_u64,
init);
}))
{
return error;
}
ppu.check_state();
*id = idm::last_id();
return CELL_OK;
}
error_code sys_event_flag_destroy(ppu_thread& ppu, u32 id)
{
ppu.state += cpu_flag::wait;
sys_event_flag.warning("sys_event_flag_destroy(id=0x%x)", id);
const auto flag = idm::withdraw<lv2_obj, lv2_event_flag>(id, [&](lv2_event_flag& flag) -> CellError
{
if (flag.sq)
{
return CELL_EBUSY;
}
lv2_obj::on_id_destroy(flag, flag.key);
return {};
});
if (!flag)
{
return CELL_ESRCH;
}
if (flag.ret)
{
return flag.ret;
}
return CELL_OK;
}
error_code sys_event_flag_wait(ppu_thread& ppu, u32 id, u64 bitptn, u32 mode, vm::ptr<u64> result, u64 timeout)
{
ppu.state += cpu_flag::wait;
sys_event_flag.trace("sys_event_flag_wait(id=0x%x, bitptn=0x%llx, mode=0x%x, result=*0x%x, timeout=0x%llx)", id, bitptn, mode, result, timeout);
// Fix function arguments for external access
ppu.gpr[3] = -1;
ppu.gpr[4] = bitptn;
ppu.gpr[5] = mode;
ppu.gpr[6] = 0;
// Always set result
struct store_result
{
vm::ptr<u64> ptr;
u64 val = 0;
~store_result() noexcept
{
if (ptr)
{
cpu_thread::get_current()->check_state();
*ptr = val;
}
}
} store{result};
if (!lv2_event_flag::check_mode(mode))
{
sys_event_flag.error("sys_event_flag_wait(): unknown mode (0x%x)", mode);
return CELL_EINVAL;
}
const auto flag = idm::get<lv2_obj, lv2_event_flag>(id, [&, notify = lv2_obj::notify_all_t()](lv2_event_flag& flag) -> CellError
{
if (flag.pattern.fetch_op([&](u64& pat)
{
return lv2_event_flag::check_pattern(pat, bitptn, mode, &ppu.gpr[6]);
}).second)
{
// TODO: is it possible to return EPERM in this case?
return {};
}
lv2_obj::prepare_for_sleep(ppu);
std::lock_guard lock(flag.mutex);
if (flag.pattern.fetch_op([&](u64& pat)
{
return lv2_event_flag::check_pattern(pat, bitptn, mode, &ppu.gpr[6]);
}).second)
{
return {};
}
if (flag.type == SYS_SYNC_WAITER_SINGLE && flag.sq)
{
return CELL_EPERM;
}
flag.sleep(ppu, timeout);
lv2_obj::emplace(flag.sq, &ppu);
return CELL_EBUSY;
});
if (!flag)
{
return CELL_ESRCH;
}
if (flag.ret)
{
if (flag.ret != CELL_EBUSY)
{
return flag.ret;
}
}
else
{
store.val = ppu.gpr[6];
return CELL_OK;
}
while (auto state = +ppu.state)
{
if (state & cpu_flag::signal && ppu.state.test_and_reset(cpu_flag::signal))
{
break;
}
if (is_stopped(state))
{
std::lock_guard lock(flag->mutex);
for (auto cpu = +flag->sq; cpu; cpu = cpu->next_cpu)
{
if (cpu == &ppu)
{
ppu.state += cpu_flag::again;
return {};
}
}
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++)
{
busy_wait(500);
}
if (ppu.state & cpu_flag::signal)
{
continue;
}
if (timeout)
{
if (lv2_obj::wait_timeout(timeout, &ppu))
{
// Wait for rescheduling
if (ppu.check_state())
{
continue;
}
ppu.state += cpu_flag::wait;
if (!atomic_storage<ppu_thread*>::load(flag->sq))
{
// Waiters queue is empty, so the thread must have been signaled
flag->mutex.lock_unlock();
break;
}
std::lock_guard lock(flag->mutex);
if (!flag->unqueue(flag->sq, &ppu))
{
break;
}
ppu.gpr[3] = CELL_ETIMEDOUT;
ppu.gpr[6] = flag->pattern;
break;
}
}
else
{
ppu.state.wait(state);
}
}
store.val = ppu.gpr[6];
return not_an_error(ppu.gpr[3]);
}
error_code sys_event_flag_trywait(ppu_thread& ppu, u32 id, u64 bitptn, u32 mode, vm::ptr<u64> result)
{
ppu.state += cpu_flag::wait;
sys_event_flag.trace("sys_event_flag_trywait(id=0x%x, bitptn=0x%llx, mode=0x%x, result=*0x%x)", id, bitptn, mode, result);
// Always set result
struct store_result
{
vm::ptr<u64> ptr;
u64 val = 0;
~store_result() noexcept
{
if (ptr)
{
cpu_thread::get_current()->check_state();
*ptr = val;
}
}
} store{result};
if (!lv2_event_flag::check_mode(mode))
{
sys_event_flag.error("sys_event_flag_trywait(): unknown mode (0x%x)", mode);
return CELL_EINVAL;
}
u64 pattern{};
const auto flag = idm::check<lv2_obj, lv2_event_flag>(id, [&](lv2_event_flag& flag)
{
return flag.pattern.fetch_op([&](u64& pat)
{
return lv2_event_flag::check_pattern(pat, bitptn, mode, &pattern);
}).second;
});
if (!flag)
{
return CELL_ESRCH;
}
if (!flag.ret)
{
return not_an_error(CELL_EBUSY);
}
store.val = pattern;
return CELL_OK;
}
error_code sys_event_flag_set(cpu_thread& cpu, u32 id, u64 bitptn)
{
cpu.state += cpu_flag::wait;
// Warning: may be called from SPU thread.
sys_event_flag.trace("sys_event_flag_set(id=0x%x, bitptn=0x%llx)", id, bitptn);
const auto flag = idm::get<lv2_obj, lv2_event_flag>(id);
if (!flag)
{
return CELL_ESRCH;
}
if ((flag->pattern & bitptn) == bitptn)
{
return CELL_OK;
}
if (lv2_obj::notify_all_t notify; true)
{
std::lock_guard lock(flag->mutex);
for (auto ppu = +flag->sq; ppu; ppu = ppu->next_cpu)
{
if (ppu->state & cpu_flag::again)
{
cpu.state += cpu_flag::again;
// Fake error for abort
return not_an_error(CELL_EAGAIN);
}
}
u32 count = 0;
// Process all waiters in single atomic op
for (u64 pattern = flag->pattern, to_write = pattern, dependant_mask = 0;; to_write = pattern, dependant_mask = 0)
{
count = 0;
to_write |= bitptn;
dependant_mask = 0;
for (auto ppu = +flag->sq; ppu; ppu = ppu->next_cpu)
{
ppu->gpr[7] = 0;
}
auto first = +flag->sq;
auto get_next = [&]() -> ppu_thread*
{
s32 prio = smax;
ppu_thread* it{};
for (auto ppu = first; ppu; ppu = ppu->next_cpu)
{
if (!ppu->gpr[7] && (flag->protocol != SYS_SYNC_PRIORITY || ppu->prio.load().prio <= prio))
{
it = ppu;
prio = ppu->prio.load().prio;
}
}
if (it)
{
// Mark it so it won't reappear
it->gpr[7] = 1;
}
return it;
};
while (auto it = get_next())
{
auto& ppu = *it;
const u64 pattern = ppu.gpr[4];
const u64 mode = ppu.gpr[5];
// If it's OR mode, set bits must have waken up the thread therefore no
// dependency on old value
const u64 dependant_mask_or = ((mode & 0xf) == SYS_EVENT_FLAG_WAIT_OR || (bitptn & pattern & to_write) == pattern ? 0 : pattern);
if (lv2_event_flag::check_pattern(to_write, pattern, mode, &ppu.gpr[6]))
{
dependant_mask |= dependant_mask_or;
ppu.gpr[3] = CELL_OK;
count++;
if (!to_write)
{
break;
}
}
else
{
ppu.gpr[3] = -1;
}
}
dependant_mask &= ~bitptn;
auto [new_val, ok] = flag->pattern.fetch_op([&](u64& x)
{
if ((x ^ pattern) & dependant_mask)
{
return false;
}
x |= bitptn;
// Clear the bit-wise difference
x &= ~((pattern | bitptn) & ~to_write);
return true;
});
if (ok)
{
break;
}
pattern = new_val;
}
if (!count)
{
return CELL_OK;
}
// Remove waiters
for (auto next_cpu = &flag->sq; *next_cpu;)
{
auto& ppu = **next_cpu;
if (ppu.gpr[3] == CELL_OK)
{
atomic_storage<ppu_thread*>::release(*next_cpu, ppu.next_cpu);
ppu.next_cpu = nullptr;
flag->append(&ppu);
continue;
}
next_cpu = &ppu.next_cpu;
};
lv2_obj::awake_all();
}
return CELL_OK;
}
error_code sys_event_flag_clear(ppu_thread& ppu, u32 id, u64 bitptn)
{
ppu.state += cpu_flag::wait;
sys_event_flag.trace("sys_event_flag_clear(id=0x%x, bitptn=0x%llx)", id, bitptn);
const auto flag = idm::check<lv2_obj, lv2_event_flag>(id, [&](lv2_event_flag& flag)
{
flag.pattern &= bitptn;
});
if (!flag)
{
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_event_flag_cancel(ppu_thread& ppu, u32 id, vm::ptr<u32> num)
{
ppu.state += cpu_flag::wait;
sys_event_flag.trace("sys_event_flag_cancel(id=0x%x, num=*0x%x)", id, num);
if (num) *num = 0;
const auto flag = idm::get<lv2_obj, lv2_event_flag>(id);
if (!flag)
{
return CELL_ESRCH;
}
u32 value = 0;
{
lv2_obj::notify_all_t notify;
std::lock_guard lock(flag->mutex);
for (auto cpu = +flag->sq; cpu; cpu = cpu->next_cpu)
{
if (cpu->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return {};
}
}
// Get current pattern
const u64 pattern = flag->pattern;
// Signal all threads to return CELL_ECANCELED (protocol does not matter)
while (auto ppu = flag->schedule<ppu_thread>(flag->sq, SYS_SYNC_FIFO))
{
ppu->gpr[3] = CELL_ECANCELED;
ppu->gpr[6] = pattern;
value++;
flag->append(ppu);
}
if (value)
{
lv2_obj::awake_all();
}
}
static_cast<void>(ppu.test_stopped());
if (num) *num = value;
return CELL_OK;
}
error_code sys_event_flag_get(ppu_thread& ppu, u32 id, vm::ptr<u64> flags)
{
ppu.state += cpu_flag::wait;
sys_event_flag.trace("sys_event_flag_get(id=0x%x, flags=*0x%x)", id, flags);
const auto flag = idm::check<lv2_obj, lv2_event_flag>(id, [](lv2_event_flag& flag)
{
return +flag.pattern;
});
ppu.check_state();
if (!flag)
{
if (flags) *flags = 0;
return CELL_ESRCH;
}
if (!flags)
{
return CELL_EFAULT;
}
*flags = flag.ret;
return CELL_OK;
}
| 10,798
|
C++
|
.cpp
| 456
| 20.333333
| 145
| 0.628376
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,360
|
sys_game.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_game.cpp
|
#include "stdafx.h"
#include "util/sysinfo.hpp"
#include "util/v128.hpp"
#include "Emu/Cell/lv2/sys_process.h"
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/System.h"
#include "Emu/system_utils.hpp"
#include "Emu/IdManager.h"
#include "Utilities/StrUtil.h"
#include "Utilities/Thread.h"
#include "sys_game.h"
LOG_CHANNEL(sys_game);
struct system_sw_version
{
system_sw_version()
{
f64 version_f = 0;
if (!try_to_float(&version_f, utils::get_firmware_version(), 0.0f, 99.9999f))
sys_game.error("Error parsing firmware version");
version = static_cast<usz>(version_f * 10000);
}
system_sw_version(const system_sw_version&) = delete;
system_sw_version& operator=(const system_sw_version&) = delete;
~system_sw_version() = default;
atomic_t<u64> version;
};
struct board_storage
{
public:
bool read(u8* buffer)
{
if (!buffer)
return false;
const auto data = storage.load();
memcpy(buffer, &data, size);
return true;
}
bool write(u8* buffer)
{
if (!buffer)
return false;
storage.store(read_from_ptr<be_t<v128>>(buffer));
written = true;
return true;
}
board_storage()
{
memset(&storage.raw(), -1, size);
if (fs::file file; file.open(file_path, fs::read))
file.read(&storage.raw(), std::min(file.size(), size));
}
board_storage(const board_storage&) = delete;
board_storage& operator=(const board_storage&) = delete;
~board_storage()
{
if (written)
{
if (fs::file file; file.open(file_path, fs::create + fs::write + fs::lock))
{
file.write(&storage.raw(), size);
file.trunc(size);
}
}
}
private:
atomic_be_t<v128> storage;
bool written = false;
const std::string file_path = rpcs3::utils::get_hdd1_dir() + "/caches/board_storage.bin";
static constexpr u64 size = sizeof(v128);
};
struct watchdog_t
{
struct alignas(8) control_t
{
bool needs_restart = false;
bool active = false;
char pad[sizeof(u32) - sizeof(bool) * 2]{};
u32 timeout = 0;
};
atomic_t<control_t> control;
void operator()()
{
u64 start_time = get_system_time();
u64 old_time = start_time;
u64 current_time = old_time;
constexpr u64 sleep_time = 50'000;
while (thread_ctrl::state() != thread_state::aborting)
{
if (Emu.GetStatus(false) == system_state::paused)
{
start_time += current_time - old_time;
old_time = current_time;
thread_ctrl::wait_for(sleep_time);
current_time = get_system_time();
continue;
}
old_time = std::exchange(current_time, get_system_time());
const auto old = control.fetch_op([&](control_t& data)
{
if (data.needs_restart)
{
data.needs_restart = false;
return true;
}
return false;
}).first;
if (old.active && old.needs_restart)
{
start_time = current_time;
old_time = current_time;
continue;
}
if (old.active && current_time - start_time >= old.timeout)
{
sys_game.success("Watchdog timeout! Restarting the game...");
Emu.CallFromMainThread([]()
{
Emu.Restart(false);
});
return;
}
thread_ctrl::wait_for(sleep_time);
}
}
static constexpr auto thread_name = "LV2 Watchdog Thread"sv;
};
void abort_lv2_watchdog()
{
if (auto thr = g_fxo->try_get<named_thread<watchdog_t>>())
{
sys_game.notice("Aborting %s...", thr->thread_name);
*thr = thread_state::aborting;
}
}
error_code _sys_game_watchdog_start(u32 timeout)
{
sys_game.trace("sys_game_watchdog_start(timeout=%d)", timeout);
// According to disassembly
timeout *= 1'000'000;
timeout &= -64;
if (!g_fxo->get<named_thread<watchdog_t>>().control.fetch_op([&](watchdog_t::control_t& data)
{
if (data.active)
{
return false;
}
data.needs_restart = true;
data.active = true;
data.timeout = timeout;
return true;
}).second)
{
return CELL_EABORT;
}
return CELL_OK;
}
error_code _sys_game_watchdog_stop()
{
sys_game.trace("sys_game_watchdog_stop()");
g_fxo->get<named_thread<watchdog_t>>().control.fetch_op([](watchdog_t::control_t& data)
{
if (!data.active)
{
return false;
}
data.active = false;
return true;
});
return CELL_OK;
}
error_code _sys_game_watchdog_clear()
{
sys_game.trace("sys_game_watchdog_clear()");
g_fxo->get<named_thread<watchdog_t>>().control.fetch_op([](watchdog_t::control_t& data)
{
if (!data.active || data.needs_restart)
{
return false;
}
data.needs_restart = true;
return true;
});
return CELL_OK;
}
error_code _sys_game_set_system_sw_version(u64 version)
{
sys_game.trace("sys_game_set_system_sw_version(version=%d)", version);
if (!g_ps3_process_info.has_root_perm())
return CELL_ENOSYS;
g_fxo->get<system_sw_version>().version = version;
return CELL_OK;
}
u64 _sys_game_get_system_sw_version()
{
sys_game.trace("sys_game_get_system_sw_version()");
return g_fxo->get<system_sw_version>().version;
}
error_code _sys_game_board_storage_read(vm::ptr<u8> buffer, vm::ptr<u8> status)
{
sys_game.trace("sys_game_board_storage_read(buffer=*0x%x, status=*0x%x)", buffer, status);
if (!buffer || !status)
{
return CELL_EFAULT;
}
*status = g_fxo->get<board_storage>().read(buffer.get_ptr()) ? 0x00 : 0xFF;
return CELL_OK;
}
error_code _sys_game_board_storage_write(vm::ptr<u8> buffer, vm::ptr<u8> status)
{
sys_game.trace("sys_game_board_storage_write(buffer=*0x%x, status=*0x%x)", buffer, status);
if (!buffer || !status)
{
return CELL_EFAULT;
}
*status = g_fxo->get<board_storage>().write(buffer.get_ptr()) ? 0x00 : 0xFF;
return CELL_OK;
}
error_code _sys_game_get_rtc_status(vm::ptr<s32> status)
{
sys_game.trace("sys_game_get_rtc_status(status=*0x%x)", status);
if (!status)
{
return CELL_EFAULT;
}
*status = 0;
return CELL_OK;
}
| 5,723
|
C++
|
.cpp
| 228
| 22.307018
| 94
| 0.681274
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,361
|
sys_lwmutex.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_lwmutex.cpp
|
#include "stdafx.h"
#include "sys_lwmutex.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "util/asm.hpp"
LOG_CHANNEL(sys_lwmutex);
lv2_lwmutex::lv2_lwmutex(utils::serial& ar)
: protocol(ar)
, control(ar.pop<decltype(control)>())
, name(ar.pop<be_t<u64>>())
{
ar(lv2_control.raw().signaled);
}
void lv2_lwmutex::save(utils::serial& ar)
{
ar(protocol, control, name, lv2_control.raw().signaled);
}
error_code _sys_lwmutex_create(ppu_thread& ppu, vm::ptr<u32> lwmutex_id, u32 protocol, vm::ptr<sys_lwmutex_t> control, s32 has_name, u64 name)
{
ppu.state += cpu_flag::wait;
sys_lwmutex.trace(u8"_sys_lwmutex_create(lwmutex_id=*0x%x, protocol=0x%x, control=*0x%x, has_name=0x%x, name=0x%llx (“%s”))", lwmutex_id, protocol, control, has_name, name, lv2_obj::name_64{std::bit_cast<be_t<u64>>(name)});
if (protocol != SYS_SYNC_FIFO && protocol != SYS_SYNC_RETRY && protocol != SYS_SYNC_PRIORITY)
{
sys_lwmutex.error("_sys_lwmutex_create(): unknown protocol (0x%x)", protocol);
return CELL_EINVAL;
}
if (!(has_name < 0))
{
name = 0;
}
if (const u32 id = idm::make<lv2_obj, lv2_lwmutex>(protocol, control, name))
{
ppu.check_state();
*lwmutex_id = id;
return CELL_OK;
}
return CELL_EAGAIN;
}
error_code _sys_lwmutex_destroy(ppu_thread& ppu, u32 lwmutex_id)
{
ppu.state += cpu_flag::wait;
sys_lwmutex.trace("_sys_lwmutex_destroy(lwmutex_id=0x%x)", lwmutex_id);
std::shared_ptr<lv2_lwmutex> _mutex;
while (true)
{
s32 old_val = 0;
auto [ptr, ret] = idm::withdraw<lv2_obj, lv2_lwmutex>(lwmutex_id, [&](lv2_lwmutex& mutex) -> CellError
{
// Ignore check on first iteration
if (_mutex && std::addressof(mutex) != _mutex.get())
{
// Other thread has destroyed the lwmutex earlier
return CELL_ESRCH;
}
std::lock_guard lock(mutex.mutex);
if (mutex.load_sq())
{
return CELL_EBUSY;
}
old_val = mutex.lwcond_waiters.or_fetch(smin);
if (old_val != smin)
{
// Deschedule if waiters were found
lv2_obj::sleep(ppu);
// Repeat loop: there are lwcond waiters
return CELL_EAGAIN;
}
return {};
});
if (!ptr)
{
return CELL_ESRCH;
}
if (ret)
{
if (ret != CELL_EAGAIN)
{
return ret;
}
}
else
{
break;
}
_mutex = std::move(ptr);
// Wait for all lwcond waiters to quit
while (old_val + 0u > 1u << 31)
{
thread_ctrl::wait_on(_mutex->lwcond_waiters, old_val);
if (ppu.is_stopped())
{
ppu.state += cpu_flag::again;
return {};
}
old_val = _mutex->lwcond_waiters;
}
// Wake up from sleep
ppu.check_state();
}
return CELL_OK;
}
error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
{
ppu.state += cpu_flag::wait;
sys_lwmutex.trace("_sys_lwmutex_lock(lwmutex_id=0x%x, timeout=0x%llx)", lwmutex_id, timeout);
ppu.gpr[3] = CELL_OK;
const auto mutex = idm::get<lv2_obj, lv2_lwmutex>(lwmutex_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwmutex& mutex)
{
if (s32 signal = mutex.lv2_control.fetch_op([](lv2_lwmutex::control_data_t& data)
{
if (data.signaled)
{
data.signaled = 0;
return true;
}
return false;
}).first.signaled)
{
if (~signal & 1)
{
ppu.gpr[3] = CELL_EBUSY;
}
return true;
}
lv2_obj::prepare_for_sleep(ppu);
ppu.cancel_sleep = 1;
if (s32 signal = mutex.try_own(&ppu))
{
if (~signal & 1)
{
ppu.gpr[3] = CELL_EBUSY;
}
ppu.cancel_sleep = 0;
return true;
}
const bool finished = !mutex.sleep(ppu, timeout);
notify.cleanup();
return finished;
});
if (!mutex)
{
return CELL_ESRCH;
}
if (mutex.ret)
{
return not_an_error(ppu.gpr[3]);
}
while (auto state = +ppu.state)
{
if (state & cpu_flag::signal && ppu.state.test_and_reset(cpu_flag::signal))
{
break;
}
if (is_stopped(state))
{
std::lock_guard lock(mutex->mutex);
for (auto cpu = mutex->load_sq(); cpu; cpu = cpu->next_cpu)
{
if (cpu == &ppu)
{
ppu.state += cpu_flag::again;
return {};
}
}
break;
}
for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++)
{
busy_wait(500);
}
if (ppu.state & cpu_flag::signal)
{
continue;
}
if (timeout)
{
if (lv2_obj::wait_timeout(timeout, &ppu))
{
// Wait for rescheduling
if (ppu.check_state())
{
continue;
}
ppu.state += cpu_flag::wait;
if (!mutex->load_sq())
{
// Sleep queue is empty, so the thread must have been signaled
mutex->mutex.lock_unlock();
break;
}
std::lock_guard lock(mutex->mutex);
bool success = false;
mutex->lv2_control.fetch_op([&](lv2_lwmutex::control_data_t& data)
{
success = false;
ppu_thread* sq = static_cast<ppu_thread*>(data.sq);
const bool retval = &ppu == sq;
if (!mutex->unqueue<false>(sq, &ppu))
{
return false;
}
success = true;
if (!retval)
{
return false;
}
data.sq = sq;
return true;
});
if (success)
{
ppu.next_cpu = nullptr;
ppu.gpr[3] = CELL_ETIMEDOUT;
}
break;
}
}
else
{
ppu.state.wait(state);
}
}
return not_an_error(ppu.gpr[3]);
}
error_code _sys_lwmutex_trylock(ppu_thread& ppu, u32 lwmutex_id)
{
ppu.state += cpu_flag::wait;
sys_lwmutex.trace("_sys_lwmutex_trylock(lwmutex_id=0x%x)", lwmutex_id);
const auto mutex = idm::check<lv2_obj, lv2_lwmutex>(lwmutex_id, [&](lv2_lwmutex& mutex)
{
auto [_, ok] = mutex.lv2_control.fetch_op([](lv2_lwmutex::control_data_t& data)
{
if (data.signaled & 1)
{
data.signaled = 0;
return true;
}
return false;
});
return ok;
});
if (!mutex)
{
return CELL_ESRCH;
}
if (!mutex.ret)
{
return not_an_error(CELL_EBUSY);
}
return CELL_OK;
}
error_code _sys_lwmutex_unlock(ppu_thread& ppu, u32 lwmutex_id)
{
ppu.state += cpu_flag::wait;
sys_lwmutex.trace("_sys_lwmutex_unlock(lwmutex_id=0x%x)", lwmutex_id);
const auto mutex = idm::check<lv2_obj, lv2_lwmutex>(lwmutex_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwmutex& mutex)
{
if (mutex.try_unlock(false))
{
return;
}
std::lock_guard lock(mutex.mutex);
if (const auto cpu = mutex.reown<ppu_thread>())
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return;
}
mutex.awake(cpu);
notify.cleanup(); // lv2_lwmutex::mutex is not really active 99% of the time, can be ignored
}
});
if (!mutex)
{
return CELL_ESRCH;
}
return CELL_OK;
}
error_code _sys_lwmutex_unlock2(ppu_thread& ppu, u32 lwmutex_id)
{
ppu.state += cpu_flag::wait;
sys_lwmutex.warning("_sys_lwmutex_unlock2(lwmutex_id=0x%x)", lwmutex_id);
const auto mutex = idm::check<lv2_obj, lv2_lwmutex>(lwmutex_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwmutex& mutex)
{
if (mutex.try_unlock(true))
{
return;
}
std::lock_guard lock(mutex.mutex);
if (const auto cpu = mutex.reown<ppu_thread>(true))
{
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
{
ppu.state += cpu_flag::again;
return;
}
static_cast<ppu_thread*>(cpu)->gpr[3] = CELL_EBUSY;
mutex.awake(cpu);
notify.cleanup(); // lv2_lwmutex::mutex is not really active 99% of the time, can be ignored
}
});
if (!mutex)
{
return CELL_ESRCH;
}
return CELL_OK;
}
| 7,376
|
C++
|
.cpp
| 309
| 20.304207
| 224
| 0.63622
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,362
|
sys_config.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_config.cpp
|
#include "stdafx.h"
#include "Emu/System.h"
#include "Emu/Memory/vm.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/lv2/sys_event.h"
#include "Emu/Cell/ErrorCodes.h"
#include "sys_config.h"
LOG_CHANNEL(sys_config);
// Enums
template<>
void fmt_class_string<sys_config_service_id>::format(std::string& out, u64 id)
{
const s64 s_id = static_cast<s64>(id);
switch (s_id)
{
case SYS_CONFIG_SERVICE_PADMANAGER : out += "SYS_CONFIG_SERVICE_PADMANAGER"; return;
case SYS_CONFIG_SERVICE_PADMANAGER2 : out += "SYS_CONFIG_SERVICE_PADMANAGER2"; return;
case SYS_CONFIG_SERVICE_USER_LIBPAD : out += "SYS_CONFIG_SERVICE_USER_LIBPAD"; return;
case SYS_CONFIG_SERVICE_USER_LIBKB : out += "SYS_CONFIG_SERVICE_USER_LIBKB"; return;
case SYS_CONFIG_SERVICE_USER_LIBMOUSE: out += "SYS_CONFIG_SERVICE_USER_LIBMOUSE"; return;
}
if (s_id < 0)
{
fmt::append(out, "SYS_CONFIG_SERVICE_USER_%llx", id & ~(1ull << 63));
}
else
{
fmt::append(out, "SYS_CONFIG_SERVICE_%llx", id);
}
}
template<>
void fmt_class_string<sys_config_service_listener_type>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](auto value)
{
switch (value)
{
STR_CASE(SYS_CONFIG_SERVICE_LISTENER_ONCE);
STR_CASE(SYS_CONFIG_SERVICE_LISTENER_REPEATING);
}
return unknown;
});
}
// Utilities
void dump_buffer(std::string& out, const std::vector<u8>& buffer)
{
if (!buffer.empty())
{
out.reserve(out.size() + buffer.size() * 2 + 1);
fmt::append(out, "0x");
for (u8 x : buffer)
{
fmt::append(out, "%02x", x);
}
}
else
{
fmt::append(out, "EMPTY");
}
}
// LV2 Config
void lv2_config::initialize()
{
if (m_state || !m_state.compare_and_swap_test(0, 1))
{
return;
}
// Register padmanager service, notifying vsh that a controller is connected
static const u8 hid_info[0x1a] = {
0x01, 0x01, // 2 unk
0x02, 0x02, // 4
0x00, 0x00, // 6
0x00, 0x00, // 8
0x00, 0x00, // 10
0x05, 0x4c, // 12 vid
0x02, 0x68, // 14 pid
0x00, 0x10, // 16 unk2
0x91, 0x88, // 18
0x04, 0x00, // 20
0x00, 0x07, // 22
0x00, 0x00, // 24
0x00, 0x00 // 26
};
// user_id for the padmanager seems to signify the controller port number, and the buffer contains some sort of HID descriptor
lv2_config_service::create(SYS_CONFIG_SERVICE_PADMANAGER , 0, 1, 0, hid_info, 0x1a)->notify();
lv2_config_service::create(SYS_CONFIG_SERVICE_PADMANAGER2, 0, 1, 0, hid_info, 0x1a)->notify();
}
void lv2_config::add_service_event(const std::shared_ptr<lv2_config_service_event>& event)
{
std::lock_guard lock(m_mutex);
events.emplace(event->id, event);
}
void lv2_config::remove_service_event(u32 id)
{
std::lock_guard lock(m_mutex);
events.erase(id);
}
// LV2 Config Service Listener
bool lv2_config_service_listener::check_service(const lv2_config_service& service) const
{
// Filter by type
if (type == SYS_CONFIG_SERVICE_LISTENER_ONCE && !service_events.empty())
{
return false;
}
// Filter by service ID or verbosity
if (service_id != service.id || min_verbosity > service.verbosity)
{
return false;
}
// realhw only seems to send the pad connected events to the listeners that provided 0x01 as the first byte of their data buffer
// TODO: Figure out how this filter works more properly
if (service_id == SYS_CONFIG_SERVICE_PADMANAGER && (data.empty() || data[0] != 0x01))
{
return false;
}
// Event applies to this listener!
return true;
}
bool lv2_config_service_listener::notify(const std::shared_ptr<lv2_config_service_event>& event)
{
service_events.emplace_back(event);
return event->notify();
}
bool lv2_config_service_listener::notify(const std::shared_ptr<lv2_config_service>& service)
{
if (!check_service(*service))
return false;
// Create service event and notify queue!
const auto event = lv2_config_service_event::create(handle, service, *this);
return notify(event);
}
void lv2_config_service_listener::notify_all()
{
std::vector<std::shared_ptr<lv2_config_service>> services;
// Grab all events
idm::select<lv2_config_service>([&](u32 /*id*/, lv2_config_service& service)
{
if (check_service(service))
{
services.push_back(service.get_shared_ptr());
}
});
// Sort services by timestamp
sort(services.begin(), services.end(), [](const std::shared_ptr<lv2_config_service>& s1, const std::shared_ptr<lv2_config_service>& s2)
{
return s1->timestamp < s2->timestamp;
});
// Notify listener (now with services in sorted order)
for (auto& service : services)
{
this->notify(service);
}
}
// LV2 Config Service
void lv2_config_service::unregister()
{
registered = false;
// Notify listeners
notify();
// Allow this object to be destroyed by withdrawing it from the IDM
// Note that it won't be destroyed while there are service events that hold a reference to it
idm::remove<lv2_config_service>(idm_id);
}
void lv2_config_service::notify() const
{
std::vector<std::shared_ptr<lv2_config_service_listener>> listeners;
auto sptr = wkptr.lock();
idm::select<lv2_config_service_listener>([&](u32 /*id*/, lv2_config_service_listener& listener)
{
if (listener.check_service(*sptr))
listeners.push_back(listener.get_shared_ptr());
});
for (auto& listener : listeners)
{
listener->notify(this->get_shared_ptr());
}
}
bool lv2_config_service_event::notify() const
{
const auto _handle = handle.lock();
if (!_handle)
{
return false;
}
// Send event
return _handle->notify(SYS_CONFIG_EVENT_SOURCE_SERVICE, (static_cast<u64>(service->is_registered()) << 32) | id, service->get_size());
}
// LV2 Config Service Event
void lv2_config_service_event::write(sys_config_service_event_t *dst) const
{
const auto registered = service->is_registered();
dst->service_listener_handle = listener.get_id();
dst->registered = registered;
dst->service_id = service->id;
dst->user_id = service->user_id;
if (registered)
{
dst->verbosity = service->verbosity;
dst->padding = service->padding;
const auto size = service->data.size();
dst->data_size = static_cast<u32>(size);
memcpy(dst->data, service->data.data(), size);
}
}
/*
* Syscalls
*/
error_code sys_config_open(u32 equeue_hdl, vm::ptr<u32> out_config_hdl)
{
sys_config.trace("sys_config_open(equeue_hdl=0x%x, out_config_hdl=*0x%x)", equeue_hdl, out_config_hdl);
// Find queue with the given ID
const auto queue = idm::get<lv2_obj, lv2_event_queue>(equeue_hdl);
if (!queue)
{
return CELL_ESRCH;
}
// Initialize lv2_config global state
auto& global = g_fxo->get<lv2_config>();
if (true)
{
global.initialize();
}
// Create a lv2_config_handle object
const auto config = lv2_config_handle::create(std::move(queue));
if (config)
{
*out_config_hdl = idm::last_id();
return CELL_OK;
}
// Failed to allocate sys_config object
return CELL_EAGAIN;
}
error_code sys_config_close(u32 config_hdl)
{
sys_config.trace("sys_config_close(config_hdl=0x%x)", config_hdl);
if (!idm::remove<lv2_config_handle>(config_hdl))
{
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_config_get_service_event(u32 config_hdl, u32 event_id, vm::ptr<sys_config_service_event_t> dst, u64 size)
{
sys_config.trace("sys_config_get_service_event(config_hdl=0x%x, event_id=0x%llx, dst=*0x%llx, size=0x%llx)", config_hdl, event_id, dst, size);
// Find sys_config handle object with the given ID
const auto cfg = idm::get<lv2_config_handle>(config_hdl);
if (!cfg)
{
return CELL_ESRCH;
}
// Find service_event object
const auto event = g_fxo->get<lv2_config>().find_event(event_id);
if (!event)
{
return CELL_ESRCH;
}
// Check buffer fits
if (!event->check_buffer_size(size))
{
return CELL_EAGAIN;
}
// Write event to buffer
event->write(dst.get_ptr());
return CELL_OK;
}
error_code sys_config_add_service_listener(u32 config_hdl, sys_config_service_id service_id, u64 min_verbosity, vm::ptr<void> in, u64 size, sys_config_service_listener_type type, vm::ptr<u32> out_listener_hdl)
{
sys_config.trace("sys_config_add_service_listener(config_hdl=0x%x, service_id=0x%llx, min_verbosity=0x%llx, in=*0x%x, size=%lld, type=0x%llx, out_listener_hdl=*0x%x)", config_hdl, service_id, min_verbosity, in, size, type, out_listener_hdl);
// Find sys_config handle object with the given ID
auto cfg = idm::get<lv2_config_handle>(config_hdl);
if (!cfg)
{
return CELL_ESRCH;
}
// Create service listener
const auto listener = lv2_config_service_listener::create(cfg, service_id, min_verbosity, type, static_cast<u8*>(in.get_ptr()), size);
if (!listener)
{
return CELL_EAGAIN;
}
if (size > 0)
{
std::string buf_str;
dump_buffer(buf_str, listener->data);
sys_config.todo("Registered service listener for service %llx with non-zero buffer: %s", service_id, buf_str.c_str());
}
// Notify listener with all past events
listener->notify_all();
// Done!
*out_listener_hdl = listener->get_id();
return CELL_OK;
}
error_code sys_config_remove_service_listener(u32 config_hdl, u32 listener_hdl)
{
sys_config.trace("sys_config_remove_service_listener(config_hdl=0x%x, listener_hdl=0x%x)", config_hdl, listener_hdl);
// Remove listener from IDM
if (!idm::remove<lv2_config_service_listener>(listener_hdl))
{
return CELL_ESRCH;
}
return CELL_OK;
}
error_code sys_config_register_service(u32 config_hdl, sys_config_service_id service_id, u64 user_id, u64 verbosity, vm::ptr<u8> data_buf, u64 size, vm::ptr<u32> out_service_hdl)
{
sys_config.trace("sys_config_register_service(config_hdl=0x%x, service_id=0x%llx, user_id=0x%llx, verbosity=0x%llx, data_but=*0x%llx, size=%lld, out_service_hdl=*0x%llx)", config_hdl, service_id, user_id, verbosity, data_buf, size, out_service_hdl);
// Find sys_config handle object with the given ID
const auto cfg = idm::get<lv2_config_handle>(config_hdl);
if (!cfg)
{
return CELL_ESRCH;
}
// Create service
const auto service = lv2_config_service::create(service_id, user_id, verbosity, 0, data_buf.get_ptr(), size);
if (!service)
{
return CELL_EAGAIN;
}
// Notify all listeners
service->notify();
// Done!
*out_service_hdl = service->get_id();
return CELL_OK;
}
error_code sys_config_unregister_service(u32 config_hdl, u32 service_hdl)
{
sys_config.trace("sys_config_unregister_service(config_hdl=0x%x, service_hdl=0x%x)", config_hdl, service_hdl);
// Remove listener from IDM
auto service = idm::withdraw<lv2_config_service>(service_hdl);
if (!service)
{
return CELL_ESRCH;
}
// Unregister service
service->unregister();
// Done!
return CELL_OK;
}
/*
* IO Events - TODO
*/
error_code sys_config_get_io_event(u32 config_hdl, u32 event_id /*?*/, vm::ptr<void> out_buf /*?*/, u64 size /*?*/)
{
sys_config.todo("sys_config_get_io_event(config_hdl=0x%x, event_id=0x%x, out_buf=*0x%x, size=%lld)", config_hdl, event_id, out_buf, size);
return CELL_OK;
}
error_code sys_config_register_io_error_listener(u32 config_hdl)
{
sys_config.todo("sys_config_register_io_error_listener(config_hdl=0x%x)", config_hdl);
return CELL_OK;
}
error_code sys_config_unregister_io_error_listener(u32 config_hdl)
{
sys_config.todo("sys_config_unregister_io_error_listener(config_hdl=0x%x)", config_hdl);
return CELL_OK;
}
| 11,143
|
C++
|
.cpp
| 356
| 29.103933
| 250
| 0.715341
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,363
|
sys_btsetting.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_btsetting.cpp
|
#include "stdafx.h"
#include "sys_btsetting.h"
#include "Emu/Cell/ErrorCodes.h"
LOG_CHANNEL(sys_btsetting);
error_code sys_btsetting_if(u64 cmd, vm::ptr<void> msg)
{
sys_btsetting.todo("sys_btsetting_if(cmd=0x%llx, msg=*0x%x)", cmd, msg);
return CELL_OK;
}
| 263
|
C++
|
.cpp
| 9
| 27.555556
| 73
| 0.736
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,364
|
sys_sm.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_sm.cpp
|
#include "stdafx.h"
#include "Emu/Memory/vm.h"
#include "Emu/System.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/lv2/sys_process.h"
#include "sys_sm.h"
LOG_CHANNEL(sys_sm);
error_code sys_sm_get_params(vm::ptr<u8> a, vm::ptr<u8> b, vm::ptr<u32> c, vm::ptr<u64> d)
{
sys_sm.todo("sys_sm_get_params(a=*0x%x, b=*0x%x, c=*0x%x, d=*0x%x)", a, b, c, d);
if (a) *a = 0; else return CELL_EFAULT;
if (b) *b = 0; else return CELL_EFAULT;
if (c) *c = 0x200; else return CELL_EFAULT;
if (d) *d = 7; else return CELL_EFAULT;
return CELL_OK;
}
error_code sys_sm_get_ext_event2(vm::ptr<u64> a1, vm::ptr<u64> a2, vm::ptr<u64> a3, u64 a4)
{
sys_sm.todo("sys_sm_get_ext_event2(a1=*0x%x, a2=*0x%x, a3=*0x%x, a4=*0x%x, a4=0x%xll", a1, a2, a3, a4);
if (a4 != 0 && a4 != 1)
{
return CELL_EINVAL;
}
// a1 == 7 - 'console too hot, restart'
// a2 looks to be used if a1 is either 5 or 3?
// a3 looks to be ignored in vsh
if (a1) *a1 = 0; else return CELL_EFAULT;
if (a2) *a2 = 0; else return CELL_EFAULT;
if (a3) *a3 = 0; else return CELL_EFAULT;
// eagain for no event
return not_an_error(CELL_EAGAIN);
}
error_code sys_sm_shutdown(ppu_thread& ppu, u16 op, vm::ptr<void> param, u64 size)
{
ppu.state += cpu_flag::wait;
sys_sm.success("sys_sm_shutdown(op=0x%x, param=*0x%x, size=0x%x)", op, param, size);
if (!g_ps3_process_info.has_root_perm())
{
return CELL_ENOSYS;
}
switch (op)
{
case 0x100:
case 0x1100:
{
sys_sm.success("Received shutdown request from application");
_sys_process_exit(ppu, 0, 0, 0);
break;
}
case 0x200:
case 0x1200:
{
sys_sm.success("Received reboot request from application");
lv2_exitspawn(ppu, Emu.argv, Emu.envp, Emu.data);
break;
}
case 0x8201:
case 0x8202:
case 0x8204:
{
sys_sm.warning("Unsupported LPAR operation: 0x%x", op);
return CELL_ENOTSUP;
}
default: return CELL_EINVAL;
}
return CELL_OK;
}
error_code sys_sm_set_shop_mode(s32 mode)
{
sys_sm.todo("sys_sm_set_shop_mode(mode=0x%x)", mode);
return CELL_OK;
}
error_code sys_sm_control_led(u8 led, u8 action)
{
sys_sm.todo("sys_sm_control_led(led=0x%x, action=0x%x)", led, action);
return CELL_OK;
}
error_code sys_sm_ring_buzzer(u64 packet, u64 a1, u64 a2)
{
sys_sm.todo("sys_sm_ring_buzzer(packet=0x%x, a1=0x%x, a2=0x%x)", packet, a1, a2);
return CELL_OK;
}
| 2,353
|
C++
|
.cpp
| 83
| 26.26506
| 104
| 0.673787
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,365
|
sys_bdemu.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_bdemu.cpp
|
#include "stdafx.h"
#include "Emu/Memory/vm.h"
#include "Emu/Cell/ErrorCodes.h"
#include "sys_bdemu.h"
LOG_CHANNEL(sys_bdemu);
error_code sys_bdemu_send_command(u64 cmd, u64 a2, u64 a3, vm::ptr<void> buf, u64 buf_len)
{
sys_bdemu.todo("sys_bdemu_send_command(cmd=0%llx, a2=0x%x, a3=0x%x, buf=0x%x, buf_len=0x%x)", cmd, a2, a3, buf, buf_len);
return CELL_OK;
}
| 367
|
C++
|
.cpp
| 10
| 35
| 122
| 0.704545
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,366
|
network_context.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_net/network_context.cpp
|
#include "stdafx.h"
#include "Emu/Cell/lv2/sys_sync.h"
#include "Emu/Cell/Modules/sceNp.h" // for SCE_NP_PORT
#include "network_context.h"
#include "Emu/system_config.h"
#include "sys_net_helpers.h"
LOG_CHANNEL(sys_net);
// Used by RPCN to send signaling packets to RPCN server(for UDP hole punching)
s32 send_packet_from_p2p_port(const std::vector<u8>& data, const sockaddr_in& addr)
{
s32 res{};
auto& nc = g_fxo->get<p2p_context>();
{
std::lock_guard list_lock(nc.list_p2p_ports_mutex);
if (nc.list_p2p_ports.contains(SCE_NP_PORT))
{
auto& def_port = ::at32(nc.list_p2p_ports, SCE_NP_PORT);
res = ::sendto(def_port.p2p_socket, reinterpret_cast<const char*>(data.data()), ::size32(data), 0, reinterpret_cast<const sockaddr*>(&addr), sizeof(sockaddr_in));
if (res == -1)
sys_net.error("Failed to send signaling packet: %s", get_last_error(false, false));
}
else
{
sys_net.error("send_packet_from_p2p_port: port %d not present", +SCE_NP_PORT);
}
}
return res;
}
std::vector<std::vector<u8>> get_rpcn_msgs()
{
std::vector<std::vector<u8>> msgs;
auto& nc = g_fxo->get<p2p_context>();
{
std::lock_guard list_lock(nc.list_p2p_ports_mutex);
if (nc.list_p2p_ports.contains(SCE_NP_PORT))
{
auto& def_port = ::at32(nc.list_p2p_ports, SCE_NP_PORT);
{
std::lock_guard lock(def_port.s_rpcn_mutex);
msgs = std::move(def_port.rpcn_msgs);
def_port.rpcn_msgs.clear();
}
}
else
{
sys_net.error("get_rpcn_msgs: port %d not present", +SCE_NP_PORT);
}
}
return msgs;
}
std::vector<signaling_message> get_sign_msgs()
{
std::vector<signaling_message> msgs;
auto& nc = g_fxo->get<p2p_context>();
{
std::lock_guard list_lock(nc.list_p2p_ports_mutex);
if (nc.list_p2p_ports.contains(SCE_NP_PORT))
{
auto& def_port = ::at32(nc.list_p2p_ports, SCE_NP_PORT);
{
std::lock_guard lock(def_port.s_sign_mutex);
msgs = std::move(def_port.sign_msgs);
def_port.sign_msgs.clear();
}
}
else
{
sys_net.error("get_sign_msgs: port %d not present", +SCE_NP_PORT);
}
}
return msgs;
}
namespace np
{
void init_np_handler_dependencies();
}
void base_network_thread::wake_threads()
{
ppu_to_awake.erase(std::unique(ppu_to_awake.begin(), ppu_to_awake.end()), ppu_to_awake.end());
for (ppu_thread* ppu : ppu_to_awake)
{
network_clear_queue(*ppu);
lv2_obj::append(ppu);
}
if (!ppu_to_awake.empty())
{
ppu_to_awake.clear();
lv2_obj::awake_all();
}
}
p2p_thread::p2p_thread()
{
np::init_np_handler_dependencies();
}
void p2p_thread::bind_sce_np_port()
{
std::lock_guard list_lock(list_p2p_ports_mutex);
create_p2p_port(SCE_NP_PORT);
}
void network_thread::operator()()
{
std::vector<std::shared_ptr<lv2_socket>> socklist;
socklist.reserve(lv2_socket::id_count);
ppu_to_awake.clear();
std::vector<::pollfd> fds(lv2_socket::id_count);
#ifdef _WIN32
std::vector<bool> connecting(lv2_socket::id_count);
std::vector<bool> was_connecting(lv2_socket::id_count);
#endif
while (thread_ctrl::state() != thread_state::aborting)
{
if (!num_polls)
{
thread_ctrl::wait_on(num_polls, 0);
continue;
}
ensure(socklist.size() <= lv2_socket::id_count);
// Wait with 1ms timeout
#ifdef _WIN32
windows_poll(fds, ::size32(socklist), 1, connecting);
#else
::poll(fds.data(), socklist.size(), 1);
#endif
std::lock_guard lock(mutex_thread_loop);
for (usz i = 0; i < socklist.size(); i++)
{
#ifdef _WIN32
socklist[i]->handle_events(fds[i], was_connecting[i] && !connecting[i]);
#else
socklist[i]->handle_events(fds[i]);
#endif
}
wake_threads();
socklist.clear();
// Obtain all native active sockets
idm::select<lv2_socket>([&](u32 id, lv2_socket& s)
{
if (s.get_type() == SYS_NET_SOCK_DGRAM || s.get_type() == SYS_NET_SOCK_STREAM)
{
socklist.emplace_back(idm::get_unlocked<lv2_socket>(id));
}
});
for (usz i = 0; i < socklist.size(); i++)
{
auto events = socklist[i]->get_events();
fds[i].fd = events ? socklist[i]->get_socket() : -1;
fds[i].events =
(events & lv2_socket::poll_t::read ? POLLIN : 0) |
(events & lv2_socket::poll_t::write ? POLLOUT : 0) |
0;
fds[i].revents = 0;
#ifdef _WIN32
const auto cur_connecting = socklist[i]->is_connecting();
was_connecting[i] = cur_connecting;
connecting[i] = cur_connecting;
#endif
}
}
}
// Must be used under list_p2p_ports_mutex lock!
void p2p_thread::create_p2p_port(u16 p2p_port)
{
if (!list_p2p_ports.contains(p2p_port))
{
list_p2p_ports.emplace(std::piecewise_construct, std::forward_as_tuple(p2p_port), std::forward_as_tuple(p2p_port));
const u32 prev_value = num_p2p_ports.fetch_add(1);
if (!prev_value)
{
num_p2p_ports.notify_one();
}
}
}
void p2p_thread::operator()()
{
std::vector<::pollfd> p2p_fd(lv2_socket::id_count);
while (thread_ctrl::state() != thread_state::aborting)
{
if (!num_p2p_ports)
{
thread_ctrl::wait_on(num_p2p_ports, 0);
continue;
}
// Check P2P sockets for incoming packets
auto num_p2p_sockets = 0;
std::memset(p2p_fd.data(), 0, p2p_fd.size() * sizeof(::pollfd));
{
std::lock_guard lock(list_p2p_ports_mutex);
for (const auto& p2p_port : list_p2p_ports)
{
p2p_fd[num_p2p_sockets].events = POLLIN;
p2p_fd[num_p2p_sockets].revents = 0;
p2p_fd[num_p2p_sockets].fd = p2p_port.second.p2p_socket;
num_p2p_sockets++;
}
}
#ifdef _WIN32
const auto ret_p2p = WSAPoll(p2p_fd.data(), num_p2p_sockets, 1);
#else
const auto ret_p2p = ::poll(p2p_fd.data(), num_p2p_sockets, 1);
#endif
if (ret_p2p > 0)
{
std::lock_guard lock(list_p2p_ports_mutex);
auto fd_index = 0;
for (auto& p2p_port : list_p2p_ports)
{
if ((p2p_fd[fd_index].revents & POLLIN) == POLLIN || (p2p_fd[fd_index].revents & POLLRDNORM) == POLLRDNORM)
{
while (p2p_port.second.recv_data())
;
}
fd_index++;
}
wake_threads();
}
else if (ret_p2p < 0)
{
sys_net.error("[P2P] Error poll on master P2P socket: %d", get_last_error(false));
}
}
}
| 6,005
|
C++
|
.cpp
| 221
| 24.276018
| 165
| 0.662552
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,367
|
lv2_socket_native.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_net/lv2_socket_native.cpp
|
#include "stdafx.h"
#include "Emu/Cell/lv2/sys_net.h"
#include "Emu/NP/np_dnshook.h"
#include "Emu/NP/np_handler.h"
#include "lv2_socket_native.h"
#include "sys_net_helpers.h"
#ifdef _WIN32
constexpr SOCKET invalid_socket = INVALID_SOCKET;
#else
constexpr int invalid_socket = -1;
#endif
LOG_CHANNEL(sys_net);
lv2_socket_native::lv2_socket_native(lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol)
: lv2_socket(family, type, protocol)
{
}
lv2_socket_native::lv2_socket_native(utils::serial& ar, lv2_socket_type type)
: lv2_socket(stx::make_exact(ar), type)
{
[[maybe_unused]] const s32 version = GET_SERIALIZATION_VERSION(lv2_net);
#ifdef _WIN32
ar(so_reuseaddr, so_reuseport);
#else
std::array<char, 8> dummy{};
ar(dummy);
if (dummy != std::array<char, 8>{})
{
sys_net.error("[Native] Savestate tried to load Win32 specific data, compatibility may be affected");
}
#endif
if (version >= 2)
{
// Flag to signal failure of TCP connection on socket start
ar(feign_tcp_conn_failure);
}
}
void lv2_socket_native::save(utils::serial& ar)
{
USING_SERIALIZATION_VERSION(lv2_net);
lv2_socket::save(ar, true);
#ifdef _WIN32
ar(so_reuseaddr, so_reuseport);
#else
ar(std::array<char, 8>{});
#endif
ar(is_socket_connected());
}
lv2_socket_native::~lv2_socket_native()
{
std::lock_guard lock(mutex);
if (socket)
{
#ifdef _WIN32
::closesocket(socket);
#else
::close(socket);
#endif
}
}
s32 lv2_socket_native::create_socket()
{
ensure(family == SYS_NET_AF_INET);
ensure(type == SYS_NET_SOCK_STREAM || type == SYS_NET_SOCK_DGRAM);
ensure(protocol == SYS_NET_IPPROTO_IP || protocol == SYS_NET_IPPROTO_TCP || protocol == SYS_NET_IPPROTO_UDP);
const int native_domain = AF_INET;
const int native_type = type == SYS_NET_SOCK_STREAM ? SOCK_STREAM : SOCK_DGRAM;
int native_proto = protocol == SYS_NET_IPPROTO_TCP ? IPPROTO_TCP :
protocol == SYS_NET_IPPROTO_UDP ? IPPROTO_UDP :
IPPROTO_IP;
auto socket_res = ::socket(native_domain, native_type, native_proto);
if (socket_res == invalid_socket)
{
return -get_last_error(false);
}
set_socket(socket_res, family, type, protocol);
return CELL_OK;
}
void lv2_socket_native::set_socket(socket_type socket, lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol)
{
this->socket = socket;
this->family = family;
this->type = type;
this->protocol = protocol;
set_default_buffers();
set_non_blocking();
}
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_native::accept(bool is_lock)
{
std::unique_lock<shared_mutex> lock(mutex, std::defer_lock);
if (is_lock)
{
lock.lock();
}
::sockaddr_storage native_addr;
::socklen_t native_addrlen = sizeof(native_addr);
if (feign_tcp_conn_failure)
{
sys_net.error("Calling socket::accept() from a previously connected socket!");
}
socket_type native_socket = ::accept(socket, reinterpret_cast<struct sockaddr*>(&native_addr), &native_addrlen);
if (native_socket != invalid_socket)
{
auto newsock = std::make_shared<lv2_socket_native>(family, type, protocol);
newsock->set_socket(native_socket, family, type, protocol);
// Sockets inherit non blocking behaviour from their parent
newsock->so_nbio = so_nbio;
sys_net_sockaddr ps3_addr = native_addr_to_sys_net_addr(native_addr);
return {true, 0, std::move(newsock), ps3_addr};
}
if (auto result = get_last_error(!so_nbio); result)
{
return {true, -result, {}, {}};
}
return {false, {}, {}, {}};
}
s32 lv2_socket_native::bind(const sys_net_sockaddr& addr)
{
std::lock_guard lock(mutex);
const auto* psa_in = reinterpret_cast<const sys_net_sockaddr_in*>(&addr);
auto& nph = g_fxo->get<named_thread<np::np_handler>>();
u32 saddr = nph.get_bind_ip();
if (saddr == 0)
{
// If zero use the supplied address
saddr = std::bit_cast<u32>(psa_in->sin_addr);
}
if (feign_tcp_conn_failure)
{
sys_net.error("Calling socket::bind() from a previously connected socket!");
}
::sockaddr_in native_addr{};
native_addr.sin_family = AF_INET;
native_addr.sin_port = std::bit_cast<u16>(psa_in->sin_port);
native_addr.sin_addr.s_addr = saddr;
::socklen_t native_addr_len = sizeof(native_addr);
// Note that this is a hack(TODO)
// ATM we don't support binding 3658 udp because we use it for the p2ps main socket
// Only Fat Princess is known to do this to my knowledge
if (psa_in->sin_port == 3658 && type == SYS_NET_SOCK_DGRAM)
{
native_addr.sin_port = std::bit_cast<u16, be_t<u16>>(3659);
}
sys_net.warning("[Native] Trying to bind %s:%d", native_addr.sin_addr, std::bit_cast<be_t<u16>, u16>(native_addr.sin_port));
if (::bind(socket, reinterpret_cast<struct sockaddr*>(&native_addr), native_addr_len) == 0)
{
// Only UPNP port forward binds to 0.0.0.0
if (saddr == 0)
{
if (native_addr.sin_port == 0)
{
sockaddr_in client_addr;
socklen_t client_addr_size = sizeof(client_addr);
ensure(::getsockname(socket, reinterpret_cast<struct sockaddr*>(&client_addr), &client_addr_size) == 0);
bound_port = std::bit_cast<u16, be_t<u16>>(client_addr.sin_port);
}
else
{
bound_port = std::bit_cast<u16, be_t<u16>>(native_addr.sin_port);
}
nph.upnp_add_port_mapping(bound_port, type == SYS_NET_SOCK_STREAM ? "TCP" : "UDP");
}
last_bound_addr = addr;
return CELL_OK;
}
auto error = get_last_error(false);
#ifdef __linux__
if (error == SYS_NET_EACCES && std::bit_cast<be_t<u16>, u16>(native_addr.sin_port) < 1024)
{
sys_net.error("The game tried to bind a port < 1024 which is privileged on Linux\n"
"Consider setting rpcs3 privileges for it with: setcap 'cap_net_bind_service=+ep' /path/to/rpcs3");
}
#endif
return -error;
}
std::optional<s32> lv2_socket_native::connect(const sys_net_sockaddr& addr)
{
std::lock_guard lock(mutex);
const auto* psa_in = reinterpret_cast<const sys_net_sockaddr_in*>(&addr);
::sockaddr_in native_addr = sys_net_addr_to_native_addr(addr);
::socklen_t native_addr_len = sizeof(native_addr);
sys_net.notice("[Native] Attempting to connect on %s:%d", native_addr.sin_addr, std::bit_cast<be_t<u16>, u16>(native_addr.sin_port));
auto& nph = g_fxo->get<named_thread<np::np_handler>>();
if (!nph.get_net_status() && is_ip_public_address(native_addr))
{
return -SYS_NET_EADDRNOTAVAIL;
}
if (psa_in->sin_port == 53)
{
// Add socket to the dns hook list
sys_net.notice("[Native] sys_net_bnet_connect: using DNS...");
auto& dnshook = g_fxo->get<np::dnshook>();
dnshook.add_dns_spy(lv2_id);
}
#ifdef _WIN32
bool was_connecting = connecting;
#endif
if (feign_tcp_conn_failure)
{
// As if still connected
return -SYS_NET_EALREADY;
}
if (::connect(socket, reinterpret_cast<struct sockaddr*>(&native_addr), native_addr_len) == 0)
{
return CELL_OK;
}
sys_net_error result = get_last_error(!so_nbio);
#ifdef _WIN32
// See https://learn.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-connect
if (was_connecting && (result == SYS_NET_EINVAL || result == SYS_NET_EWOULDBLOCK))
return -SYS_NET_EALREADY;
#endif
if (result)
{
if (result == SYS_NET_EWOULDBLOCK || result == SYS_NET_EINPROGRESS)
{
result = SYS_NET_EINPROGRESS;
#ifdef _WIN32
connecting = true;
#endif
this->poll_queue(nullptr, lv2_socket::poll_t::write, [this](bs_t<lv2_socket::poll_t> events) -> bool
{
if (events & lv2_socket::poll_t::write)
{
int native_error;
::socklen_t size = sizeof(native_error);
if (::getsockopt(socket, SOL_SOCKET, SO_ERROR, reinterpret_cast<char*>(&native_error), &size) != 0 || size != sizeof(int))
{
so_error = 1;
}
else
{
// TODO: check error formats (both native and translated)
so_error = native_error ? convert_error(false, native_error) : 0;
}
return true;
}
events += lv2_socket::poll_t::write;
return false;
});
}
return -result;
}
#ifdef _WIN32
connecting = true;
#endif
return std::nullopt;
}
s32 lv2_socket_native::connect_followup()
{
int native_error;
::socklen_t size = sizeof(native_error);
if (::getsockopt(socket, SOL_SOCKET, SO_ERROR, reinterpret_cast<char*>(&native_error), &size) != 0 || size != sizeof(int))
{
return -1;
}
// TODO: check error formats (both native and translated)
return native_error ? -convert_error(false, native_error) : 0;
}
std::pair<s32, sys_net_sockaddr> lv2_socket_native::getpeername()
{
std::lock_guard lock(mutex);
::sockaddr_storage native_addr;
::socklen_t native_addrlen = sizeof(native_addr);
if (::getpeername(socket, reinterpret_cast<struct sockaddr*>(&native_addr), &native_addrlen) == 0)
{
ensure(native_addr.ss_family == AF_INET);
sys_net_sockaddr sn_addr = native_addr_to_sys_net_addr(native_addr);
return {CELL_OK, sn_addr};
}
return {-get_last_error(false), {}};
}
std::pair<s32, sys_net_sockaddr> lv2_socket_native::getsockname()
{
std::lock_guard lock(mutex);
::sockaddr_storage native_addr;
::socklen_t native_addrlen = sizeof(native_addr);
if (::getsockname(socket, reinterpret_cast<struct sockaddr*>(&native_addr), &native_addrlen) == 0)
{
ensure(native_addr.ss_family == AF_INET);
sys_net_sockaddr sn_addr = native_addr_to_sys_net_addr(native_addr);
return {CELL_OK, sn_addr};
}
#ifdef _WIN32
else
{
// windows doesn't support getsockname for sockets that are not bound
if (get_native_error() == WSAEINVAL)
{
return {CELL_OK, {}};
}
}
#endif
return {-get_last_error(false), {}};
}
std::tuple<s32, lv2_socket::sockopt_data, u32> lv2_socket_native::getsockopt(s32 level, s32 optname, u32 len)
{
std::lock_guard lock(mutex);
sockopt_data out_val;
u32 out_len = sizeof(out_val);
int native_level = -1;
int native_opt = -1;
union
{
char ch[128];
int _int = 0;
::timeval timeo;
::linger linger;
} native_val;
::socklen_t native_len = sizeof(native_val);
if (level == SYS_NET_SOL_SOCKET)
{
native_level = SOL_SOCKET;
switch (optname)
{
case SYS_NET_SO_NBIO:
{
// Special
out_val._int = so_nbio;
out_len = sizeof(s32);
return {CELL_OK, out_val, out_len};
}
case SYS_NET_SO_ERROR:
{
// Special
out_val._int = std::exchange(so_error, 0);
out_len = sizeof(s32);
return {CELL_OK, out_val, out_len};
}
case SYS_NET_SO_KEEPALIVE:
{
native_opt = SO_KEEPALIVE;
break;
}
case SYS_NET_SO_SNDBUF:
{
native_opt = SO_SNDBUF;
break;
}
case SYS_NET_SO_RCVBUF:
{
native_opt = SO_RCVBUF;
break;
}
case SYS_NET_SO_SNDLOWAT:
{
native_opt = SO_SNDLOWAT;
break;
}
case SYS_NET_SO_RCVLOWAT:
{
native_opt = SO_RCVLOWAT;
break;
}
case SYS_NET_SO_BROADCAST:
{
native_opt = SO_BROADCAST;
break;
}
#ifdef _WIN32
case SYS_NET_SO_REUSEADDR:
{
out_val._int = so_reuseaddr;
out_len = sizeof(s32);
return {CELL_OK, out_val, out_len};
}
case SYS_NET_SO_REUSEPORT:
{
out_val._int = so_reuseport;
out_len = sizeof(s32);
return {CELL_OK, out_val, out_len};
}
#else
case SYS_NET_SO_REUSEADDR:
{
native_opt = SO_REUSEADDR;
break;
}
case SYS_NET_SO_REUSEPORT:
{
native_opt = SO_REUSEPORT;
break;
}
#endif
case SYS_NET_SO_SNDTIMEO:
case SYS_NET_SO_RCVTIMEO:
{
if (len < sizeof(sys_net_timeval))
return {-SYS_NET_EINVAL, {}, {}};
native_opt = optname == SYS_NET_SO_SNDTIMEO ? SO_SNDTIMEO : SO_RCVTIMEO;
break;
}
case SYS_NET_SO_LINGER:
{
if (len < sizeof(sys_net_linger))
return {-SYS_NET_EINVAL, {}, {}};
native_opt = SO_LINGER;
break;
}
default:
{
sys_net.error("sys_net_bnet_getsockopt(s=%d, SOL_SOCKET): unknown option (0x%x)", lv2_id, optname);
return {-SYS_NET_EINVAL, {}, {}};
}
}
}
else if (level == SYS_NET_IPPROTO_TCP)
{
native_level = IPPROTO_TCP;
switch (optname)
{
case SYS_NET_TCP_MAXSEG:
{
// Special (no effect)
out_val._int = so_tcp_maxseg;
out_len = sizeof(s32);
return {CELL_OK, out_val, out_len};
}
case SYS_NET_TCP_NODELAY:
{
native_opt = TCP_NODELAY;
break;
}
default:
{
sys_net.error("sys_net_bnet_getsockopt(s=%d, IPPROTO_TCP): unknown option (0x%x)", lv2_id, optname);
return {-SYS_NET_EINVAL, {}, {}};
}
}
}
else if (level == SYS_NET_IPPROTO_IP)
{
native_level = IPPROTO_IP;
switch (optname)
{
case SYS_NET_IP_HDRINCL:
{
native_opt = IP_HDRINCL;
break;
}
case SYS_NET_IP_TOS:
{
native_opt = IP_TOS;
break;
}
case SYS_NET_IP_TTL:
{
native_opt = IP_TTL;
break;
}
case SYS_NET_IP_MULTICAST_IF:
{
native_opt = IP_MULTICAST_IF;
break;
}
case SYS_NET_IP_MULTICAST_TTL:
{
native_opt = IP_MULTICAST_TTL;
break;
}
case SYS_NET_IP_MULTICAST_LOOP:
{
native_opt = IP_MULTICAST_LOOP;
break;
}
case SYS_NET_IP_ADD_MEMBERSHIP:
{
native_opt = IP_ADD_MEMBERSHIP;
break;
}
case SYS_NET_IP_DROP_MEMBERSHIP:
{
native_opt = IP_DROP_MEMBERSHIP;
break;
}
case SYS_NET_IP_TTLCHK:
{
sys_net.error("sys_net_bnet_getsockopt(IPPROTO_IP, SYS_NET_IP_TTLCHK): stubbed option");
return {CELL_OK, out_val, out_len};
}
case SYS_NET_IP_MAXTTL:
{
sys_net.error("sys_net_bnet_getsockopt(IPPROTO_IP, SYS_NET_IP_MAXTTL): stubbed option");
return {CELL_OK, out_val, out_len};
}
case SYS_NET_IP_DONTFRAG:
{
#ifdef _WIN32
native_opt = IP_DONTFRAGMENT;
#else
native_opt = IP_DF;
#endif
break;
}
default:
{
sys_net.error("sys_net_bnet_getsockopt(s=%d, IPPROTO_IP): unknown option (0x%x)", lv2_id, optname);
return {-SYS_NET_EINVAL, {}, {}};
}
}
}
else
{
sys_net.error("sys_net_bnet_getsockopt(s=%d): unknown level (0x%x)", lv2_id, level);
return {-SYS_NET_EINVAL, {}, {}};
}
if (::getsockopt(socket, native_level, native_opt, native_val.ch, &native_len) != 0)
{
return {-get_last_error(false), {}, {}};
}
if (level == SYS_NET_SOL_SOCKET)
{
switch (optname)
{
case SYS_NET_SO_SNDTIMEO:
case SYS_NET_SO_RCVTIMEO:
{
// TODO
out_val.timeo = {::narrow<s64>(native_val.timeo.tv_sec), ::narrow<s64>(native_val.timeo.tv_usec)};
out_len = sizeof(sys_net_timeval);
return {CELL_OK, out_val, out_len};
}
case SYS_NET_SO_LINGER:
{
// TODO
out_val.linger = {::narrow<s32>(native_val.linger.l_onoff), ::narrow<s32>(native_val.linger.l_linger)};
out_len = sizeof(sys_net_linger);
return {CELL_OK, out_val, out_len};
}
default: break;
}
}
// Fallback to int
out_val._int = native_val._int;
out_len = sizeof(s32);
return {CELL_OK, out_val, out_len};
}
s32 lv2_socket_native::setsockopt(s32 level, s32 optname, const std::vector<u8>& optval)
{
std::lock_guard lock(mutex);
int native_int = 0;
int native_level = -1;
int native_opt = -1;
const void* native_val = &native_int;
::socklen_t native_len = sizeof(int);
::linger native_linger;
::ip_mreq native_mreq;
#ifdef _WIN32
u32 native_timeo;
#else
::timeval native_timeo;
#endif
native_int = *reinterpret_cast<const be_t<s32>*>(optval.data());
if (level == SYS_NET_SOL_SOCKET)
{
native_level = SOL_SOCKET;
switch (optname)
{
case SYS_NET_SO_NBIO:
{
// Special
so_nbio = native_int;
return {};
}
case SYS_NET_SO_KEEPALIVE:
{
native_opt = SO_KEEPALIVE;
break;
}
case SYS_NET_SO_SNDBUF:
{
native_opt = SO_SNDBUF;
break;
}
case SYS_NET_SO_RCVBUF:
{
native_opt = SO_RCVBUF;
break;
}
case SYS_NET_SO_SNDLOWAT:
{
native_opt = SO_SNDLOWAT;
break;
}
case SYS_NET_SO_RCVLOWAT:
{
native_opt = SO_RCVLOWAT;
break;
}
case SYS_NET_SO_BROADCAST:
{
native_opt = SO_BROADCAST;
break;
}
#ifdef _WIN32
case SYS_NET_SO_REUSEADDR:
{
native_opt = SO_REUSEADDR;
so_reuseaddr = native_int;
native_int = so_reuseaddr || so_reuseport ? 1 : 0;
break;
}
case SYS_NET_SO_REUSEPORT:
{
native_opt = SO_REUSEADDR;
so_reuseport = native_int;
native_int = so_reuseaddr || so_reuseport ? 1 : 0;
break;
}
#else
case SYS_NET_SO_REUSEADDR:
{
native_opt = SO_REUSEADDR;
break;
}
case SYS_NET_SO_REUSEPORT:
{
native_opt = SO_REUSEPORT;
break;
}
#endif
case SYS_NET_SO_SNDTIMEO:
case SYS_NET_SO_RCVTIMEO:
{
if (optval.size() < sizeof(sys_net_timeval))
return -SYS_NET_EINVAL;
native_opt = optname == SYS_NET_SO_SNDTIMEO ? SO_SNDTIMEO : SO_RCVTIMEO;
native_val = &native_timeo;
native_len = sizeof(native_timeo);
const int tv_sec = ::narrow<int>(reinterpret_cast<const sys_net_timeval*>(optval.data())->tv_sec);
const int tv_usec = ::narrow<int>(reinterpret_cast<const sys_net_timeval*>(optval.data())->tv_usec);
#ifdef _WIN32
native_timeo = tv_sec * 1000;
native_timeo += tv_usec / 1000;
#else
native_timeo.tv_sec = tv_sec;
native_timeo.tv_usec = tv_usec;
#endif
// TODO: Overflow detection?
(optname == SYS_NET_SO_SNDTIMEO ? so_sendtimeo : so_rcvtimeo) = tv_usec + tv_sec * 1000000;
break;
}
case SYS_NET_SO_LINGER:
{
if (optval.size() < sizeof(sys_net_linger))
return -SYS_NET_EINVAL;
// TODO
native_opt = SO_LINGER;
native_val = &native_linger;
native_len = sizeof(native_linger);
native_linger.l_onoff = reinterpret_cast<const sys_net_linger*>(optval.data())->l_onoff;
native_linger.l_linger = reinterpret_cast<const sys_net_linger*>(optval.data())->l_linger;
break;
}
case SYS_NET_SO_USECRYPTO:
{
// TODO
sys_net.error("sys_net_bnet_setsockopt(s=%d, SOL_SOCKET): Stubbed option (0x%x) (SYS_NET_SO_USECRYPTO)", lv2_id, optname);
return {};
}
case SYS_NET_SO_USESIGNATURE:
{
// TODO
sys_net.error("sys_net_bnet_setsockopt(s=%d, SOL_SOCKET): Stubbed option (0x%x) (SYS_NET_SO_USESIGNATURE)", lv2_id, optname);
return {};
}
default:
{
sys_net.error("sys_net_bnet_setsockopt(s=%d, SOL_SOCKET): unknown option (0x%x)", lv2_id, optname);
return -SYS_NET_EINVAL;
}
}
}
else if (level == SYS_NET_IPPROTO_TCP)
{
native_level = IPPROTO_TCP;
switch (optname)
{
case SYS_NET_TCP_MAXSEG:
{
// Special (no effect)
so_tcp_maxseg = native_int;
return {};
}
case SYS_NET_TCP_NODELAY:
{
native_opt = TCP_NODELAY;
break;
}
default:
{
sys_net.error("sys_net_bnet_setsockopt(s=%d, IPPROTO_TCP): unknown option (0x%x)", lv2_id, optname);
return -SYS_NET_EINVAL;
}
}
}
else if (level == SYS_NET_IPPROTO_IP)
{
native_level = IPPROTO_IP;
switch (optname)
{
case SYS_NET_IP_HDRINCL:
{
native_opt = IP_HDRINCL;
break;
}
case SYS_NET_IP_TOS:
{
native_opt = IP_TOS;
break;
}
case SYS_NET_IP_TTL:
{
native_opt = IP_TTL;
break;
}
case SYS_NET_IP_MULTICAST_IF:
{
native_opt = IP_MULTICAST_IF;
break;
}
case SYS_NET_IP_MULTICAST_TTL:
{
native_opt = IP_MULTICAST_TTL;
break;
}
case SYS_NET_IP_MULTICAST_LOOP:
{
native_opt = IP_MULTICAST_LOOP;
break;
}
case SYS_NET_IP_ADD_MEMBERSHIP:
case SYS_NET_IP_DROP_MEMBERSHIP:
{
if (optval.size() < sizeof(sys_net_ip_mreq))
return -SYS_NET_EINVAL;
native_opt = optname == SYS_NET_IP_ADD_MEMBERSHIP ? IP_ADD_MEMBERSHIP : IP_DROP_MEMBERSHIP;
native_val = &native_mreq;
native_len = sizeof(::ip_mreq);
native_mreq.imr_interface.s_addr = std::bit_cast<u32>(reinterpret_cast<const sys_net_ip_mreq*>(optval.data())->imr_interface);
native_mreq.imr_multiaddr.s_addr = std::bit_cast<u32>(reinterpret_cast<const sys_net_ip_mreq*>(optval.data())->imr_multiaddr);
break;
}
case SYS_NET_IP_TTLCHK:
{
sys_net.error("sys_net_bnet_setsockopt(s=%d, IPPROTO_IP): Stubbed option (0x%x) (SYS_NET_IP_TTLCHK)", lv2_id, optname);
break;
}
case SYS_NET_IP_MAXTTL:
{
sys_net.error("sys_net_bnet_setsockopt(s=%d, IPPROTO_IP): Stubbed option (0x%x) (SYS_NET_IP_MAXTTL)", lv2_id, optname);
break;
}
case SYS_NET_IP_DONTFRAG:
{
#ifdef _WIN32
native_opt = IP_DONTFRAGMENT;
#else
native_opt = IP_DF;
#endif
break;
}
default:
{
sys_net.error("sys_net_bnet_setsockopt(s=%d, IPPROTO_IP): unknown option (0x%x)", lv2_id, optname);
return -SYS_NET_EINVAL;
}
}
}
else
{
sys_net.error("sys_net_bnet_setsockopt(s=%d): unknown level (0x%x)", lv2_id, level);
return -SYS_NET_EINVAL;
}
if (::setsockopt(socket, native_level, native_opt, static_cast<const char*>(native_val), native_len) == 0)
{
return {};
}
return -get_last_error(false);
}
s32 lv2_socket_native::listen(s32 backlog)
{
std::lock_guard lock(mutex);
if (::listen(socket, backlog) == 0)
{
return CELL_OK;
}
return -get_last_error(false);
}
std::optional<std::tuple<s32, std::vector<u8>, sys_net_sockaddr>> lv2_socket_native::recvfrom(s32 flags, u32 len, bool is_lock)
{
std::unique_lock<shared_mutex> lock(mutex, std::defer_lock);
if (is_lock)
{
lock.lock();
}
if (feign_tcp_conn_failure)
{
// As if just lost the connection
feign_tcp_conn_failure = false;
return {{-SYS_NET_ECONNRESET, {},{}}};
}
int native_flags = 0;
::sockaddr_storage native_addr{};
::socklen_t native_addrlen = sizeof(native_addr);
std::vector<u8> res_buf(len);
auto& dnshook = g_fxo->get<np::dnshook>();
if (dnshook.is_dns(lv2_id) && dnshook.is_dns_queue(lv2_id))
{
auto& nph = g_fxo->get<named_thread<np::np_handler>>();
const auto packet = dnshook.get_dns_packet(lv2_id);
ensure(packet.size() < len);
memcpy(res_buf.data(), packet.data(), packet.size());
native_addr.ss_family = AF_INET;
(reinterpret_cast<::sockaddr_in*>(&native_addr))->sin_port = std::bit_cast<u16, be_t<u16>>(53); // htons(53)
(reinterpret_cast<::sockaddr_in*>(&native_addr))->sin_addr.s_addr = nph.get_dns_ip();
const auto sn_addr = native_addr_to_sys_net_addr(native_addr);
return {{::narrow<s32>(packet.size()), res_buf, sn_addr}};
}
if (flags & SYS_NET_MSG_PEEK)
{
native_flags |= MSG_PEEK;
}
if (flags & SYS_NET_MSG_WAITALL)
{
native_flags |= MSG_WAITALL;
}
auto native_result = ::recvfrom(socket, reinterpret_cast<char*>(res_buf.data()), len, native_flags, reinterpret_cast<struct sockaddr*>(&native_addr), &native_addrlen);
if (native_result >= 0)
{
const auto sn_addr = native_addr_to_sys_net_addr(native_addr);
return {{::narrow<s32>(native_result), res_buf, sn_addr}};
}
#ifdef _WIN32
else
{
// Windows returns an error when trying to peek at a message and buffer not long enough to contain the whole message, should be ignored
if ((native_flags & MSG_PEEK) && get_native_error() == WSAEMSGSIZE)
{
const auto sn_addr = native_addr_to_sys_net_addr(native_addr);
return {{len, res_buf, sn_addr}};
}
// Windows will return WSASHUTDOWN when the connection is shutdown, POSIX just returns EOF (0) in this situation.
if (get_native_error() == WSAESHUTDOWN)
{
const auto sn_addr = native_addr_to_sys_net_addr(native_addr);
return {{0, {}, sn_addr}};
}
}
const auto result = get_last_error(!so_nbio && (flags & SYS_NET_MSG_DONTWAIT) == 0, connecting);
#else
const auto result = get_last_error(!so_nbio && (flags & SYS_NET_MSG_DONTWAIT) == 0);
#endif
if (result)
{
return {{-result, {}, {}}};
}
return std::nullopt;
}
std::optional<s32> lv2_socket_native::sendto(s32 flags, const std::vector<u8>& buf, std::optional<sys_net_sockaddr> opt_sn_addr, bool is_lock)
{
std::unique_lock<shared_mutex> lock(mutex, std::defer_lock);
if (is_lock)
{
lock.lock();
}
int native_flags = 0;
int native_result = -1;
std::optional<sockaddr_in> native_addr = std::nullopt;
if (opt_sn_addr)
{
native_addr = sys_net_addr_to_native_addr(*opt_sn_addr);
sys_net.trace("[Native] Attempting to send to %s:%d", (*native_addr).sin_addr, std::bit_cast<be_t<u16>, u16>((*native_addr).sin_port));
auto& nph = g_fxo->get<named_thread<np::np_handler>>();
if (!nph.get_net_status() && is_ip_public_address(*native_addr))
{
return -SYS_NET_EADDRNOTAVAIL;
}
}
else if (feign_tcp_conn_failure)
{
// As if just lost the connection
feign_tcp_conn_failure = false;
return -SYS_NET_ECONNRESET;
}
sys_net_error result{};
if (flags & SYS_NET_MSG_WAITALL)
{
native_flags |= MSG_WAITALL;
}
auto& dnshook = g_fxo->get<np::dnshook>();
if (opt_sn_addr && type == SYS_NET_SOCK_DGRAM && reinterpret_cast<const sys_net_sockaddr_in*>(&*opt_sn_addr)->sin_port == 53)
{
dnshook.add_dns_spy(lv2_id);
}
if (dnshook.is_dns(lv2_id))
{
const s32 ret_analyzer = dnshook.analyze_dns_packet(lv2_id, reinterpret_cast<const u8*>(buf.data()), ::size32(buf));
// Check if the packet is intercepted
if (ret_analyzer >= 0)
{
return {ret_analyzer};
}
}
native_result = ::sendto(socket, reinterpret_cast<const char*>(buf.data()), ::narrow<int>(buf.size()), native_flags, native_addr ? reinterpret_cast<struct sockaddr*>(&native_addr.value()) : nullptr, native_addr ? sizeof(sockaddr_in) : 0);
if (native_result >= 0)
{
return {native_result};
}
#ifdef _WIN32
result = get_last_error(!so_nbio && (flags & SYS_NET_MSG_DONTWAIT) == 0, connecting);
#else
result = get_last_error(!so_nbio && (flags & SYS_NET_MSG_DONTWAIT) == 0);
#endif
if (result)
{
return {-result};
}
// Note that this can only happen if the send buffer is full
return std::nullopt;
}
std::optional<s32> lv2_socket_native::sendmsg(s32 flags, const sys_net_msghdr& msg, bool is_lock)
{
std::unique_lock<shared_mutex> lock(mutex, std::defer_lock);
if (is_lock)
{
lock.lock();
}
int native_flags = 0;
int native_result = -1;
sys_net_error result{};
if (flags & SYS_NET_MSG_WAITALL)
{
native_flags |= MSG_WAITALL;
}
if (feign_tcp_conn_failure)
{
// As if just lost the connection
feign_tcp_conn_failure = false;
return {-SYS_NET_ECONNRESET};
}
for (int i = 0; i < msg.msg_iovlen; i++)
{
auto iov_base = msg.msg_iov[i].iov_base;
const u32 len = msg.msg_iov[i].iov_len;
const std::vector<u8> buf_copy(vm::_ptr<const char>(iov_base.addr()), vm::_ptr<const char>(iov_base.addr()) + len);
native_result = ::send(socket, reinterpret_cast<const char*>(buf_copy.data()), ::narrow<int>(buf_copy.size()), native_flags);
if (native_result >= 0)
{
return {native_result};
}
}
result = get_last_error(!so_nbio && (flags & SYS_NET_MSG_DONTWAIT) == 0);
if (result)
{
return {-result};
}
return std::nullopt;
}
void lv2_socket_native::close()
{
std::lock_guard lock(mutex);
if (socket)
{
#ifdef _WIN32
::closesocket(socket);
#else
::close(socket);
#endif
socket = {};
}
auto& dnshook = g_fxo->get<np::dnshook>();
dnshook.remove_dns_spy(lv2_id);
if (bound_port)
{
auto& nph = g_fxo->get<named_thread<np::np_handler>>();
nph.upnp_remove_port_mapping(bound_port, type == SYS_NET_SOCK_STREAM ? "TCP" : "UDP");
bound_port = 0;
}
}
s32 lv2_socket_native::shutdown(s32 how)
{
std::lock_guard lock(mutex);
if (feign_tcp_conn_failure)
{
// As if still connected
return CELL_OK;
}
#ifdef _WIN32
const int native_how =
how == SYS_NET_SHUT_RD ? SD_RECEIVE :
how == SYS_NET_SHUT_WR ? SD_SEND :
SD_BOTH;
#else
const int native_how =
how == SYS_NET_SHUT_RD ? SHUT_RD :
how == SYS_NET_SHUT_WR ? SHUT_WR :
SHUT_RDWR;
#endif
if (::shutdown(socket, native_how) == 0)
{
return CELL_OK;
}
return -get_last_error(false);
}
s32 lv2_socket_native::poll(sys_net_pollfd& sn_pfd, pollfd& native_pfd)
{
// Check for fake packet for dns interceptions
auto& dnshook = g_fxo->get<np::dnshook>();
if (sn_pfd.events & SYS_NET_POLLIN && dnshook.is_dns(sn_pfd.fd) && dnshook.is_dns_queue(sn_pfd.fd))
{
sn_pfd.revents |= SYS_NET_POLLIN;
return 1;
}
if (sn_pfd.events & ~(SYS_NET_POLLIN | SYS_NET_POLLOUT | SYS_NET_POLLERR))
{
sys_net.warning("sys_net_bnet_poll(fd=%d): events=0x%x", sn_pfd.fd, sn_pfd.events);
}
native_pfd.fd = socket;
if (sn_pfd.events & SYS_NET_POLLIN)
{
native_pfd.events |= POLLIN;
}
if (sn_pfd.events & SYS_NET_POLLOUT)
{
native_pfd.events |= POLLOUT;
}
return 0;
}
std::tuple<bool, bool, bool> lv2_socket_native::select(bs_t<lv2_socket::poll_t> selected, pollfd& native_pfd)
{
native_pfd.fd = socket;
if (selected & lv2_socket::poll_t::read)
{
native_pfd.events |= POLLIN;
}
if (selected & lv2_socket::poll_t::write)
{
native_pfd.events |= POLLOUT;
}
return {};
}
void lv2_socket_native::set_default_buffers()
{
// Those are the default PS3 values
u32 default_RCVBUF = (type == SYS_NET_SOCK_STREAM) ? 65535 : 9216;
if (::setsockopt(socket, SOL_SOCKET, SO_RCVBUF, reinterpret_cast<const char*>(&default_RCVBUF), sizeof(default_RCVBUF)) != 0)
{
sys_net.error("Error setting default SO_RCVBUF on sys_net_bnet_socket socket");
}
u32 default_SNDBUF = 131072;
if (::setsockopt(socket, SOL_SOCKET, SO_SNDBUF, reinterpret_cast<const char*>(&default_SNDBUF), sizeof(default_SNDBUF)) != 0)
{
sys_net.error("Error setting default SO_SNDBUF on sys_net_bnet_socket socket");
}
}
void lv2_socket_native::set_non_blocking()
{
// Set non-blocking
// This is done to avoid having threads stuck on blocking socket functions
// Blocking functions just put the thread to sleep and delegate the waking up to network_thread which polls the sockets
#ifdef _WIN32
u_long _true = 1;
::ioctlsocket(socket, FIONBIO, &_true);
#else
::fcntl(socket, F_SETFL, ::fcntl(socket, F_GETFL, 0) | O_NONBLOCK);
#endif
}
bool lv2_socket_native::is_socket_connected()
{
if (type != SYS_NET_SOCK_STREAM)
{
return false;
}
std::lock_guard lock(mutex);
int listening = 0;
socklen_t len = sizeof(listening);
if (::getsockopt(socket, SOL_SOCKET, SO_ACCEPTCONN, reinterpret_cast<char*>(&listening), &len) == -1)
{
return false;
}
if (listening)
{
// Would be handled in other ways
return false;
}
fd_set readfds, writefds;
struct timeval timeout{0, 0}; // Zero timeout
FD_ZERO(&readfds);
FD_ZERO(&writefds);
FD_SET(socket, &readfds);
FD_SET(socket, &writefds);
// Use select to check for readability and writability
const int result = ::select(1, &readfds, &writefds, NULL, &timeout);
if (result < 0)
{
// Error occurred
return false;
}
// Socket is connected if it's readable or writable
return FD_ISSET(socket, &readfds) || FD_ISSET(socket, &writefds);
}
| 30,522
|
C++
|
.cpp
| 1,101
| 24.782925
| 239
| 0.663943
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,368
|
lv2_socket_p2ps.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_net/lv2_socket_p2ps.cpp
|
#include "stdafx.h"
#include "Utilities/Thread.h"
#include "util/asm.hpp"
#include "util/atomic.hpp"
#include "lv2_socket_p2ps.h"
#include "Emu/NP/np_helpers.h"
#include "nt_p2p_port.h"
#include "network_context.h"
#include "sys_net_helpers.h"
LOG_CHANNEL(sys_net);
// Object in charge of retransmiting packets for STREAM_P2P sockets
class tcp_timeout_monitor
{
public:
void add_message(s32 sock_id, const sockaddr_in* dst, std::vector<u8> data, u64 seq)
{
{
std::lock_guard lock(data_mutex);
const auto now = steady_clock::now();
message msg;
msg.dst_addr = *dst;
msg.sock_id = sock_id;
msg.data = std::move(data);
msg.seq = seq;
msg.initial_sendtime = now;
rtt_info rtt = rtts[sock_id];
const auto expected_time = now + rtt.rtt_time;
msgs.insert(std::make_pair(expected_time, std::move(msg)));
}
wakey.release(1);
wakey.notify_one(); // TODO: Should be improved to only wake if new timeout < old timeout
}
void confirm_data_received(s32 sock_id, u64 ack)
{
std::lock_guard lock(data_mutex);
rtts[sock_id].num_retries = 0;
const auto now = steady_clock::now();
for (auto it = msgs.begin(); it != msgs.end();)
{
auto& msg = it->second;
if (msg.sock_id == sock_id && msg.seq < ack)
{
// Decreases RTT if msg is early
if (now < it->first)
{
const auto actual_rtt = std::chrono::duration_cast<std::chrono::milliseconds>(now - it->second.initial_sendtime);
const auto cur_rtt = rtts[sock_id].rtt_time;
if (cur_rtt > actual_rtt)
{
rtts[sock_id].rtt_time = (actual_rtt + cur_rtt) / 2;
}
}
it = msgs.erase(it);
continue;
}
it++;
}
}
void clear_all_messages(s32 sock_id)
{
std::lock_guard lock(data_mutex);
for (auto it = msgs.begin(); it != msgs.end();)
{
auto& msg = it->second;
if (msg.sock_id == sock_id)
{
it = msgs.erase(it);
continue;
}
it++;
}
}
void operator()()
{
atomic_wait_timeout timeout = atomic_wait_timeout::inf;
while (thread_ctrl::state() != thread_state::aborting)
{
if (!wakey)
{
wakey.wait(0, timeout);
}
wakey = 0;
if (thread_ctrl::state() == thread_state::aborting)
return;
std::lock_guard lock(data_mutex);
const auto now = steady_clock::now();
// Check for messages that haven't been acked
std::set<s32> rtt_increased;
for (auto it = msgs.begin(); it != msgs.end();)
{
if (it->first > now)
break;
// reply is late, increases rtt
auto& msg = it->second;
const auto addr = msg.dst_addr.sin_addr.s_addr;
rtt_info rtt = rtts[msg.sock_id];
// Only increases rtt once per loop(in case a big number of packets are sent at once)
if (!rtt_increased.count(msg.sock_id))
{
rtt.num_retries += 1;
// Increases current rtt by 10%
rtt.rtt_time += (rtt.rtt_time / 10);
rtts[addr] = rtt;
rtt_increased.emplace(msg.sock_id);
}
if (rtt.num_retries >= 10)
{
// Too many retries, need to notify the socket that the connection is dead
idm::check<lv2_socket>(msg.sock_id, [&](lv2_socket& sock)
{
sys_net.error("[P2PS] Too many retries, closing the stream");
ensure(sock.get_type() == SYS_NET_SOCK_STREAM_P2P);
auto& sock_p2ps = reinterpret_cast<lv2_socket_p2ps&>(sock);
sock_p2ps.close_stream();
});
it = msgs.erase(it);
continue;
}
// resend the message
const auto res = idm::check<lv2_socket>(msg.sock_id, [&](lv2_socket& sock) -> bool
{
ensure(sock.get_type() == SYS_NET_SOCK_STREAM_P2P);
auto& sock_p2ps = reinterpret_cast<lv2_socket_p2ps&>(sock);
while (::sendto(sock_p2ps.get_socket(), reinterpret_cast<const char*>(msg.data.data()), ::size32(msg.data), 0, reinterpret_cast<const sockaddr*>(&msg.dst_addr), sizeof(msg.dst_addr)) == -1)
{
const sys_net_error err = get_last_error(false);
// concurrency on the socket(from a sendto for example) can result in EAGAIN error in which case we try again
if (err == SYS_NET_EAGAIN)
{
continue;
}
sys_net.error("[P2PS] Resending the packet failed(%s), closing the stream", err);
sock_p2ps.close_stream();
return false;
}
return true;
});
if (!res || !res.ret)
{
it = msgs.erase(it);
continue;
}
// Update key timeout
msgs.insert(std::make_pair(now + rtt.rtt_time, std::move(msg)));
it = msgs.erase(it);
}
if (!msgs.empty())
{
const auto current_timepoint = steady_clock::now();
const auto expected_timepoint = msgs.begin()->first;
if (current_timepoint > expected_timepoint)
{
wakey = 1;
}
else
{
timeout = static_cast<atomic_wait_timeout>(std::chrono::duration_cast<std::chrono::nanoseconds>(expected_timepoint - current_timepoint).count());
}
}
else
{
timeout = atomic_wait_timeout::inf;
}
}
}
tcp_timeout_monitor& operator=(thread_state)
{
wakey.release(1);
wakey.notify_one();
return *this;
}
public:
static constexpr auto thread_name = "Tcp Over Udp Timeout Manager Thread"sv;
private:
atomic_t<u32> wakey = 0;
shared_mutex data_mutex;
// List of outgoing messages
struct message
{
s32 sock_id = 0;
::sockaddr_in dst_addr{};
std::vector<u8> data;
u64 seq = 0;
steady_clock::time_point initial_sendtime{};
};
std::map<steady_clock::time_point, message> msgs; // (wakeup time, msg)
// List of rtts
struct rtt_info
{
unsigned long num_retries = 0;
std::chrono::milliseconds rtt_time = 50ms;
};
std::unordered_map<s32, rtt_info> rtts; // (sock_id, rtt)
};
u16 u2s_tcp_checksum(const le_t<u16>* buffer, usz size)
{
u32 cksum = 0;
while (size > 1)
{
cksum += *buffer++;
size -= sizeof(u16);
}
if (size)
cksum += *reinterpret_cast<const u8*>(buffer);
cksum = (cksum >> 16) + (cksum & 0xffff);
cksum += (cksum >> 16);
return static_cast<u16>(~cksum);
}
std::vector<u8> generate_u2s_packet(const p2ps_encapsulated_tcp& header, const u8* data, const u32 datasize)
{
const u32 packet_size = (VPORT_P2P_HEADER_SIZE + sizeof(p2ps_encapsulated_tcp) + datasize);
ensure(packet_size < 65535); // packet size shouldn't be bigger than possible UDP payload
std::vector<u8> packet(packet_size);
u8* packet_data = packet.data();
le_t<u16> dst_port_le = +header.dst_port;
le_t<u16> src_port_le = +header.src_port;
le_t<u16> p2p_flags_le = P2P_FLAG_P2PS;
memcpy(packet_data, &dst_port_le, sizeof(u16));
memcpy(packet_data + sizeof(u16), &src_port_le, sizeof(u16));
memcpy(packet_data + sizeof(u16) + sizeof(u16), &p2p_flags_le, sizeof(u16));
memcpy(packet_data + VPORT_P2P_HEADER_SIZE, &header, sizeof(p2ps_encapsulated_tcp));
if (datasize)
memcpy(packet_data + VPORT_P2P_HEADER_SIZE + sizeof(p2ps_encapsulated_tcp), data, datasize);
auto* hdr_ptr = reinterpret_cast<p2ps_encapsulated_tcp*>(packet_data + VPORT_P2P_HEADER_SIZE);
hdr_ptr->checksum = 0;
hdr_ptr->checksum = u2s_tcp_checksum(utils::bless<le_t<u16>>(hdr_ptr), sizeof(p2ps_encapsulated_tcp) + datasize);
return packet;
}
lv2_socket_p2ps::lv2_socket_p2ps(lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol)
: lv2_socket_p2p(family, type, protocol)
{
sockopt_cache cache_type;
cache_type.data._int = SYS_NET_SOCK_STREAM_P2P;
cache_type.len = 4;
sockopts[(static_cast<u64>(SYS_NET_SOL_SOCKET) << 32ull) | SYS_NET_SO_TYPE] = cache_type;
}
lv2_socket_p2ps::lv2_socket_p2ps(socket_type socket, u16 port, u16 vport, u32 op_addr, u16 op_port, u16 op_vport, u64 cur_seq, u64 data_beg_seq, s32 so_nbio)
: lv2_socket_p2p(SYS_NET_AF_INET, SYS_NET_SOCK_STREAM_P2P, SYS_NET_IPPROTO_IP)
{
this->socket = socket;
this->port = port;
this->vport = vport;
this->op_addr = op_addr;
this->op_port = op_port;
this->op_vport = op_vport;
this->cur_seq = cur_seq;
this->data_beg_seq = data_beg_seq;
this->so_nbio = so_nbio;
status = p2ps_stream_status::stream_connected;
}
lv2_socket_p2ps::lv2_socket_p2ps(utils::serial& ar, lv2_socket_type type)
: lv2_socket_p2p(ar, type)
{
ar(status, max_backlog, backlog, op_port, op_vport, op_addr, data_beg_seq, received_data, cur_seq);
}
void lv2_socket_p2ps::save(utils::serial& ar)
{
static_cast<lv2_socket_p2p*>(this)->save(ar);
ar(status, max_backlog, backlog, op_port, op_vport, op_addr, data_beg_seq, received_data, cur_seq);
}
bool lv2_socket_p2ps::handle_connected(p2ps_encapsulated_tcp* tcp_header, u8* data, ::sockaddr_storage* op_addr, nt_p2p_port* p2p_port)
{
std::lock_guard lock(mutex);
if (status != p2ps_stream_status::stream_connected && status != p2ps_stream_status::stream_handshaking)
{
sys_net.error("[P2PS] lv2_socket_p2ps::handle_connected() called on a non connected/handshaking socket(%d)!", static_cast<u8>(status));
return false;
}
if (tcp_header->flags & static_cast<u8>(p2ps_tcp_flags::ACK))
{
auto& tcpm = g_fxo->get<named_thread<tcp_timeout_monitor>>();
tcpm.confirm_data_received(lv2_id, tcp_header->ack);
}
auto send_ack = [&]()
{
auto final_ack = data_beg_seq;
while (received_data.contains(final_ack))
{
final_ack += ::at32(received_data, final_ack).size();
}
data_available = final_ack - data_beg_seq;
p2ps_encapsulated_tcp send_hdr;
send_hdr.src_port = tcp_header->dst_port;
send_hdr.dst_port = tcp_header->src_port;
send_hdr.flags = p2ps_tcp_flags::ACK;
send_hdr.ack = final_ack;
auto packet = generate_u2s_packet(send_hdr, nullptr, 0);
sys_net.trace("[P2PS] Sent ack %d", final_ack);
send_u2s_packet(std::move(packet), reinterpret_cast<::sockaddr_in*>(op_addr), 0, false);
// check if polling is happening
if (data_available && events.test_and_reset(lv2_socket::poll_t::read))
{
bs_t<lv2_socket::poll_t> read_event = lv2_socket::poll_t::read;
for (auto it = queue.begin(); it != queue.end();)
{
if (it->second(read_event))
{
it = queue.erase(it);
continue;
}
it++;
}
if (queue.empty())
{
events.store({});
}
}
};
if (status == p2ps_stream_status::stream_handshaking)
{
// Only expect SYN|ACK
if (tcp_header->flags == (p2ps_tcp_flags::SYN | p2ps_tcp_flags::ACK))
{
sys_net.trace("[P2PS] Received SYN|ACK, status is now connected");
data_beg_seq = tcp_header->seq + 1;
status = p2ps_stream_status::stream_connected;
send_ack();
}
else
{
sys_net.error("[P2PS] Unexpected U2S TCP flag received with handshaking state: 0x%02X", tcp_header->flags);
}
return true;
}
else if (status == p2ps_stream_status::stream_connected)
{
switch (tcp_header->flags)
{
case 0:
case p2ps_tcp_flags::PSH:
case p2ps_tcp_flags::ACK:
case p2ps_tcp_flags::SYN:
case p2ps_tcp_flags::SYN | p2ps_tcp_flags::ACK:
{
if (tcp_header->seq < data_beg_seq)
{
// Data has already been processed
sys_net.trace("[P2PS] Data has already been processed");
if (tcp_header->flags != p2ps_tcp_flags::ACK)
send_ack();
return true;
}
if (!received_data.count(tcp_header->seq))
{
// New data
received_data.emplace(tcp_header->seq, std::vector<u8>(data, data + tcp_header->length));
}
else
{
sys_net.trace("[P2PS] Data was not new!");
}
send_ack();
return true;
}
case p2ps_tcp_flags::RST:
case p2ps_tcp_flags::FIN:
{
sys_net.error("[P2PS] Received RST/FIN packet(%d), closing the stream", tcp_header->flags);
close_stream_nl(p2p_port);
return false;
}
default:
{
sys_net.error("[P2PS] Unexpected U2S TCP flag received with connected state: 0x%02X", tcp_header->flags);
return true;
}
}
}
return true;
}
bool lv2_socket_p2ps::handle_listening(p2ps_encapsulated_tcp* tcp_header, [[maybe_unused]] u8* data, ::sockaddr_storage* op_addr)
{
std::lock_guard lock(mutex);
if (status != p2ps_stream_status::stream_listening)
{
sys_net.error("[P2PS] lv2_socket_p2ps::handle_listening() called on a non listening socket(%d)!", static_cast<u8>(status));
return false;
}
// Only valid packet
if (tcp_header->flags == static_cast<u8>(p2ps_tcp_flags::SYN))
{
if (backlog.size() >= max_backlog)
{
// Send a RST packet on backlog full
sys_net.trace("[P2PS] Backlog was full, sent a RST packet");
p2ps_encapsulated_tcp send_hdr;
send_hdr.src_port = tcp_header->dst_port;
send_hdr.dst_port = tcp_header->src_port;
send_hdr.flags = p2ps_tcp_flags::RST;
auto packet = generate_u2s_packet(send_hdr, nullptr, 0);
send_u2s_packet(std::move(packet), reinterpret_cast<::sockaddr_in*>(op_addr), 0, false);
return true;
}
// Yes, new connection and a backlog is available, create a new lv2_socket for it and send SYN|ACK
// Prepare reply packet
sys_net.notice("[P2PS] Received connection on listening STREAM-P2P socket!");
p2ps_encapsulated_tcp send_hdr;
send_hdr.src_port = tcp_header->dst_port;
send_hdr.dst_port = tcp_header->src_port;
send_hdr.flags = p2ps_tcp_flags::SYN | p2ps_tcp_flags::ACK;
send_hdr.ack = tcp_header->seq + 1;
// Generates random starting SEQ
send_hdr.seq = rand();
// Create new socket
const u32 new_op_addr = reinterpret_cast<struct sockaddr_in*>(op_addr)->sin_addr.s_addr;
const u16 new_op_port = std::bit_cast<u16, be_t<u16>>((reinterpret_cast<struct sockaddr_in*>(op_addr)->sin_port));
const u16 new_op_vport = tcp_header->src_port;
const u64 new_cur_seq = send_hdr.seq + 1;
const u64 new_data_beg_seq = send_hdr.ack;
auto sock_lv2 = std::make_shared<lv2_socket_p2ps>(socket, port, vport, new_op_addr, new_op_port, new_op_vport, new_cur_seq, new_data_beg_seq, so_nbio);
const s32 new_sock_id = idm::import_existing<lv2_socket>(sock_lv2);
sock_lv2->set_lv2_id(new_sock_id);
const u64 key_connected = (reinterpret_cast<struct sockaddr_in*>(op_addr)->sin_addr.s_addr) | (static_cast<u64>(tcp_header->src_port) << 48) | (static_cast<u64>(tcp_header->dst_port) << 32);
{
auto& nc = g_fxo->get<p2p_context>();
auto& pport = ::at32(nc.list_p2p_ports, port);
pport.bound_p2p_streams.emplace(key_connected, new_sock_id);
}
auto packet = generate_u2s_packet(send_hdr, nullptr, 0);
{
std::lock_guard lock(sock_lv2->mutex);
sock_lv2->send_u2s_packet(std::move(packet), reinterpret_cast<::sockaddr_in*>(op_addr), send_hdr.seq, true);
}
backlog.push_back(new_sock_id);
if (events.test_and_reset(lv2_socket::poll_t::read))
{
bs_t<lv2_socket::poll_t> read_event = lv2_socket::poll_t::read;
for (auto it = queue.begin(); it != queue.end();)
{
if (it->second(read_event))
{
it = queue.erase(it);
continue;
}
it++;
}
if (queue.empty())
{
events.store({});
}
}
}
else
{
sys_net.error("[P2PS] Unexpected U2S TCP flag received on listening socket: 0x%02X", tcp_header->flags);
}
// Ignore other packets?
return true;
}
void lv2_socket_p2ps::send_u2s_packet(std::vector<u8> data, const ::sockaddr_in* dst, u64 seq, bool require_ack)
{
char ip_str[16];
inet_ntop(AF_INET, &dst->sin_addr, ip_str, sizeof(ip_str));
sys_net.trace("[P2PS] Sending U2S packet on socket %d(id:%d): data(%d, seq %d, require_ack %d) to %s:%d", socket, lv2_id, data.size(), seq, require_ack, ip_str, std::bit_cast<u16, be_t<u16>>(dst->sin_port));
while (::sendto(socket, reinterpret_cast<char*>(data.data()), ::size32(data), 0, reinterpret_cast<const sockaddr*>(dst), sizeof(sockaddr_in)) == -1)
{
const sys_net_error err = get_last_error(false);
// concurrency on the socket can result in EAGAIN error in which case we try again
if (err == SYS_NET_EAGAIN)
{
continue;
}
sys_net.error("[P2PS] Attempting to send a u2s packet failed(%s)!", err);
return;
}
// Adds to tcp timeout monitor to resend the message until an ack is received
if (require_ack)
{
auto& tcpm = g_fxo->get<named_thread<tcp_timeout_monitor>>();
tcpm.add_message(lv2_id, dst, std::move(data), seq);
}
}
void lv2_socket_p2ps::close_stream_nl(nt_p2p_port* p2p_port)
{
status = p2ps_stream_status::stream_closed;
for (auto it = p2p_port->bound_p2p_streams.begin(); it != p2p_port->bound_p2p_streams.end();)
{
if (it->second == lv2_id)
{
it = p2p_port->bound_p2p_streams.erase(it);
continue;
}
it++;
}
auto& tcpm = g_fxo->get<named_thread<tcp_timeout_monitor>>();
tcpm.clear_all_messages(lv2_id);
}
void lv2_socket_p2ps::close_stream()
{
auto& nc = g_fxo->get<p2p_context>();
std::lock_guard lock(nc.list_p2p_ports_mutex);
auto& p2p_port = ::at32(nc.list_p2p_ports, port);
std::scoped_lock more_lock(p2p_port.bound_p2p_vports_mutex, mutex);
close_stream_nl(&p2p_port);
}
p2ps_stream_status lv2_socket_p2ps::get_status() const
{
return status;
}
void lv2_socket_p2ps::set_status(p2ps_stream_status new_status)
{
status = new_status;
}
std::pair<s32, sys_net_sockaddr> lv2_socket_p2ps::getpeername()
{
std::lock_guard lock(mutex);
if (!op_addr || !op_port || !op_vport)
{
return {-SYS_NET_ENOTCONN, {}};
}
sys_net_sockaddr res{};
sys_net_sockaddr_in_p2p* p2p_addr = reinterpret_cast<sys_net_sockaddr_in_p2p*>(&res);
p2p_addr->sin_len = sizeof(sys_net_sockaddr_in_p2p);
p2p_addr->sin_family = SYS_NET_AF_INET;
p2p_addr->sin_addr = std::bit_cast<be_t<u32>, u32>(op_addr);
p2p_addr->sin_port = op_vport;
p2p_addr->sin_vport = op_port;
return {CELL_OK, res};
}
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_p2ps::accept(bool is_lock)
{
std::unique_lock<shared_mutex> lock(mutex, std::defer_lock);
if (is_lock)
{
lock.lock();
}
if (backlog.size() == 0)
{
if (so_nbio)
{
return {true, -SYS_NET_EWOULDBLOCK, {}, {}};
}
return {false, {}, {}, {}};
}
auto p2ps_client = backlog.front();
backlog.pop_front();
sys_net_sockaddr ps3_addr{};
auto* paddr = reinterpret_cast<sys_net_sockaddr_in_p2p*>(&ps3_addr);
lv2_socket_p2ps* sock_client = reinterpret_cast<lv2_socket_p2ps*>(idm::check_unlocked<lv2_socket>(p2ps_client));
{
std::lock_guard lock(sock_client->mutex);
paddr->sin_family = SYS_NET_AF_INET;
paddr->sin_addr = std::bit_cast<be_t<u32>, u32>(sock_client->op_addr);
paddr->sin_port = sock_client->op_vport;
paddr->sin_vport = sock_client->op_port;
paddr->sin_len = sizeof(sys_net_sockaddr_in_p2p);
}
return {true, p2ps_client, {}, ps3_addr};
}
s32 lv2_socket_p2ps::bind(const sys_net_sockaddr& addr)
{
const auto* psa_in_p2p = reinterpret_cast<const sys_net_sockaddr_in_p2p*>(&addr);
// For SYS_NET_SOCK_STREAM_P2P sockets, the port is the "fake" tcp port and the vport is the udp port it's bound to
u16 p2p_port = psa_in_p2p->sin_vport;
u16 p2p_vport = psa_in_p2p->sin_port;
sys_net.notice("[P2PS] Trying to bind %s:%d:%d", np::ip_to_string(std::bit_cast<u32>(psa_in_p2p->sin_addr)), p2p_port, p2p_vport);
if (p2p_port == 0)
{
p2p_port = SCE_NP_PORT;
}
if (p2p_port != SCE_NP_PORT)
{
sys_net.warning("[P2PS] Attempting to bind a socket to a port != %d", +SCE_NP_PORT);
}
socket_type real_socket{};
auto& nc = g_fxo->get<p2p_context>();
{
std::lock_guard list_lock(nc.list_p2p_ports_mutex);
nc.create_p2p_port(p2p_port);
auto& pport = ::at32(nc.list_p2p_ports, p2p_port);
real_socket = pport.p2p_socket;
{
// Ensures the socket & the bound list are updated at the same time to avoid races
std::lock_guard vport_lock(pport.bound_p2p_vports_mutex);
std::lock_guard sock_lock(mutex);
if (p2p_vport == 0)
{
sys_net.warning("[P2PS] vport was unassigned in bind!");
p2p_vport = pport.get_port();
while (pport.bound_p2ps_vports.contains(p2p_vport))
{
p2p_vport = pport.get_port();
}
std::set<s32> bound_ports{lv2_id};
pport.bound_p2ps_vports.insert(std::make_pair(p2p_vport, std::move(bound_ports)));
}
else
{
if (pport.bound_p2ps_vports.contains(p2p_vport))
{
auto& bound_sockets = ::at32(pport.bound_p2ps_vports, p2p_vport);
if (!sys_net_helpers::all_reusable(bound_sockets))
{
return -SYS_NET_EADDRINUSE;
}
bound_sockets.insert(lv2_id);
}
else
{
std::set<s32> bound_ports{lv2_id};
pport.bound_p2ps_vports.insert(std::make_pair(p2p_vport, std::move(bound_ports)));
}
}
port = p2p_port;
vport = p2p_vport;
socket = real_socket;
bound_addr = psa_in_p2p->sin_addr;
}
}
return CELL_OK;
}
std::pair<s32, sys_net_sockaddr> lv2_socket_p2ps::getsockname()
{
std::lock_guard lock(mutex);
// Unbound socket
if (!socket)
{
return {CELL_OK, {}};
}
sys_net_sockaddr sn_addr{};
sys_net_sockaddr_in_p2p* paddr = reinterpret_cast<sys_net_sockaddr_in_p2p*>(&sn_addr);
paddr->sin_len = sizeof(sys_net_sockaddr_in);
paddr->sin_family = SYS_NET_AF_INET;
paddr->sin_port = vport;
paddr->sin_vport = port;
paddr->sin_addr = bound_addr;
return {CELL_OK, sn_addr};
}
std::optional<s32> lv2_socket_p2ps::connect(const sys_net_sockaddr& addr)
{
std::lock_guard lock(mutex);
if (status != p2ps_stream_status::stream_closed)
{
sys_net.error("[P2PS] Called connect on a socket that is not closed!");
return -SYS_NET_EALREADY;
}
p2ps_encapsulated_tcp send_hdr;
const auto psa_in_p2p = reinterpret_cast<const sys_net_sockaddr_in_p2p*>(&addr);
auto name = sys_net_addr_to_native_addr(addr);
// This is purposefully inverted, not a bug
const u16 dst_vport = psa_in_p2p->sin_port;
const u16 dst_port = psa_in_p2p->sin_vport;
socket_type real_socket{};
auto& nc = g_fxo->get<p2p_context>();
{
std::lock_guard list_lock(nc.list_p2p_ports_mutex);
nc.create_p2p_port(port);
auto& pport = ::at32(nc.list_p2p_ports, port);
real_socket = pport.p2p_socket;
{
std::lock_guard lock(pport.bound_p2p_vports_mutex);
if (vport == 0)
{
// Unassigned vport, assigns one
sys_net.warning("[P2PS] vport was unassigned before connect!");
vport = pport.get_port();
while (pport.bound_p2p_vports.count(vport) || pport.bound_p2p_streams.count(static_cast<u64>(vport) << 32))
{
vport = pport.get_port();
}
}
const u64 key = name.sin_addr.s_addr | (static_cast<u64>(vport) << 32) | (static_cast<u64>(dst_vport) << 48);
pport.bound_p2p_streams.emplace(key, lv2_id);
}
}
socket = real_socket;
send_hdr.src_port = vport;
send_hdr.dst_port = dst_vport;
send_hdr.flags = p2ps_tcp_flags::SYN;
send_hdr.seq = rand();
op_addr = name.sin_addr.s_addr;
op_port = dst_port;
op_vport = dst_vport;
cur_seq = send_hdr.seq + 1;
data_beg_seq = 0;
data_available = 0u;
received_data.clear();
status = p2ps_stream_status::stream_handshaking;
std::vector<u8> packet = generate_u2s_packet(send_hdr, nullptr, 0);
name.sin_port = std::bit_cast<u16, be_t<u16>>(dst_port); // not a bug
send_u2s_packet(std::move(packet), reinterpret_cast<::sockaddr_in*>(&name), send_hdr.seq, true);
return CELL_OK;
}
s32 lv2_socket_p2ps::listen(s32 backlog)
{
std::lock_guard lock(mutex);
status = p2ps_stream_status::stream_listening;
max_backlog = backlog;
return CELL_OK;
}
std::optional<std::tuple<s32, std::vector<u8>, sys_net_sockaddr>> lv2_socket_p2ps::recvfrom([[maybe_unused]] s32 flags, u32 len, bool is_lock)
{
std::unique_lock<shared_mutex> lock(mutex, std::defer_lock);
if (is_lock)
{
lock.lock();
}
if (!data_available)
{
if (status == p2ps_stream_status::stream_closed)
{
sys_net.error("[P2PS] Called recvfrom on closed socket!");
return {{0, {}, {}}};
}
if (so_nbio || (flags & SYS_NET_MSG_DONTWAIT))
{
return {{-SYS_NET_EWOULDBLOCK, {}, {}}};
}
return std::nullopt;
}
const u32 to_give = static_cast<u32>(std::min<u64>(data_available, len));
sys_net_sockaddr addr{};
std::vector<u8> dest_buf(to_give);
sys_net.trace("[P2PS] STREAM-P2P socket had %u available, given %u", data_available, to_give);
u32 left_to_give = to_give;
while (left_to_give)
{
auto& cur_data = received_data.begin()->second;
auto to_give_for_this_packet = std::min(static_cast<u32>(cur_data.size()), left_to_give);
memcpy(dest_buf.data() + (to_give - left_to_give), cur_data.data(), to_give_for_this_packet);
if (cur_data.size() != to_give_for_this_packet)
{
auto amount_left = cur_data.size() - to_give_for_this_packet;
std::vector<u8> new_vec(amount_left);
memcpy(new_vec.data(), cur_data.data() + to_give_for_this_packet, amount_left);
auto new_key = (received_data.begin()->first) + to_give_for_this_packet;
received_data.emplace(new_key, std::move(new_vec));
}
received_data.erase(received_data.begin());
left_to_give -= to_give_for_this_packet;
}
data_available -= to_give;
data_beg_seq += to_give;
sys_net_sockaddr_in_p2p* addr_p2p = reinterpret_cast<sys_net_sockaddr_in_p2p*>(&addr);
addr_p2p->sin_family = AF_INET;
addr_p2p->sin_addr = std::bit_cast<be_t<u32>, u32>(op_addr);
addr_p2p->sin_port = op_vport;
addr_p2p->sin_vport = op_port;
addr_p2p->sin_len = sizeof(sys_net_sockaddr_in_p2p);
return {{to_give, dest_buf, addr}};
}
std::optional<s32> lv2_socket_p2ps::sendto([[maybe_unused]] s32 flags, const std::vector<u8>& buf, std::optional<sys_net_sockaddr> opt_sn_addr, bool is_lock)
{
std::unique_lock<shared_mutex> lock(mutex, std::defer_lock);
if (is_lock)
{
lock.lock();
}
if (status == p2ps_stream_status::stream_closed)
{
sys_net.error("[P2PS] Called sendto on a closed socket!");
return -SYS_NET_ECONNRESET;
}
constexpr u32 max_data_len = (65535 - (VPORT_P2P_HEADER_SIZE + sizeof(p2ps_encapsulated_tcp)));
::sockaddr_in name{};
if (opt_sn_addr)
{
name = sys_net_addr_to_native_addr(*opt_sn_addr);
}
// Prepare address
name.sin_family = AF_INET;
name.sin_port = std::bit_cast<u16, be_t<u16>>(op_port);
name.sin_addr.s_addr = op_addr;
// Prepares encapsulated tcp
p2ps_encapsulated_tcp tcp_header;
tcp_header.src_port = vport;
tcp_header.dst_port = op_vport;
// chop it up
std::vector<std::vector<u8>> stream_packets;
u32 cur_total_len = ::size32(buf);
while (cur_total_len > 0)
{
u32 cur_data_len = std::min(cur_total_len, max_data_len);
tcp_header.length = cur_data_len;
tcp_header.seq = cur_seq;
auto packet = generate_u2s_packet(tcp_header, &buf[buf.size() - cur_total_len], cur_data_len);
send_u2s_packet(std::move(packet), &name, tcp_header.seq, true);
cur_total_len -= cur_data_len;
cur_seq += cur_data_len;
}
return {::size32(buf)};
}
std::optional<s32> lv2_socket_p2ps::sendmsg([[maybe_unused]] s32 flags, [[maybe_unused]] const sys_net_msghdr& msg, [[maybe_unused]] bool is_lock)
{
sys_net.todo("lv2_socket_p2ps::sendmsg");
return {};
}
void lv2_socket_p2ps::close()
{
if (!port || !vport)
{
return;
}
auto& nc = g_fxo->get<p2p_context>();
{
std::lock_guard lock(nc.list_p2p_ports_mutex);
auto& p2p_port = ::at32(nc.list_p2p_ports, port);
{
std::lock_guard lock(p2p_port.bound_p2p_vports_mutex);
for (auto it = p2p_port.bound_p2p_streams.begin(); it != p2p_port.bound_p2p_streams.end();)
{
if (it->second == lv2_id)
{
it = p2p_port.bound_p2p_streams.erase(it);
continue;
}
it++;
}
if (p2p_port.bound_p2ps_vports.contains(vport))
{
auto& bound_ports = ::at32(p2p_port.bound_p2ps_vports, vport);
bound_ports.erase(lv2_id);
if (bound_ports.empty())
{
p2p_port.bound_p2ps_vports.erase(vport);
}
}
}
}
auto& tcpm = g_fxo->get<named_thread<tcp_timeout_monitor>>();
tcpm.clear_all_messages(lv2_id);
}
s32 lv2_socket_p2ps::shutdown([[maybe_unused]] s32 how)
{
sys_net.todo("[P2PS] shutdown");
return CELL_OK;
}
s32 lv2_socket_p2ps::poll(sys_net_pollfd& sn_pfd, [[maybe_unused]] pollfd& native_pfd)
{
std::lock_guard lock(mutex);
sys_net.trace("[P2PS] poll checking for 0x%X", sn_pfd.events);
if (status == p2ps_stream_status::stream_connected)
{
if ((sn_pfd.events & SYS_NET_POLLIN) && data_available)
{
sys_net.trace("[P2PS] p2ps has %u bytes available", data_available);
sn_pfd.revents |= SYS_NET_POLLIN;
}
// Data can only be written if the socket is connected
if (sn_pfd.events & SYS_NET_POLLOUT && status == p2ps_stream_status::stream_connected)
{
sn_pfd.revents |= SYS_NET_POLLOUT;
}
if (sn_pfd.revents)
{
return 1;
}
}
return 0;
}
std::tuple<bool, bool, bool> lv2_socket_p2ps::select(bs_t<lv2_socket::poll_t> selected, [[maybe_unused]] pollfd& native_pfd)
{
std::lock_guard lock(mutex);
bool read_set = false;
bool write_set = false;
if (status == p2ps_stream_status::stream_connected)
{
if ((selected & lv2_socket::poll_t::read) && data_available)
{
sys_net.trace("[P2PS] socket has %d bytes available", data_available);
read_set = true;
}
if (selected & lv2_socket::poll_t::write)
{
sys_net.trace("[P2PS] socket is writeable");
write_set = true;
}
}
else if (status == p2ps_stream_status::stream_listening)
{
const auto bsize = backlog.size();
if ((selected & lv2_socket::poll_t::read) && bsize)
{
sys_net.trace("[P2PS] socket has %d clients available", bsize);
read_set = true;
}
}
return {read_set, write_set, false};
}
| 29,261
|
C++
|
.cpp
| 879
| 30.054608
| 208
| 0.662709
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,369
|
nt_p2p_port.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_net/nt_p2p_port.cpp
|
#include "stdafx.h"
#include <fcntl.h>
#include "nt_p2p_port.h"
#include "lv2_socket_native.h"
#include "lv2_socket_p2ps.h"
#include "util/asm.hpp"
#include "sys_net_helpers.h"
#include "Emu/NP/signaling_handler.h"
#include "sys_net_helpers.h"
#include "Emu/NP/vport0.h"
#include "Emu/NP/np_handler.h"
LOG_CHANNEL(sys_net);
namespace sys_net_helpers
{
bool all_reusable(const std::set<s32>& sock_ids)
{
for (const s32 sock_id : sock_ids)
{
const auto [_, reusable] = idm::check<lv2_socket>(sock_id, [&](lv2_socket& sock) -> bool
{
auto [res_reuseaddr, optval_reuseaddr, optlen_reuseaddr] = sock.getsockopt(SYS_NET_SOL_SOCKET, SYS_NET_SO_REUSEADDR, sizeof(s32));
auto [res_reuseport, optval_reuseport, optlen_reuseport] = sock.getsockopt(SYS_NET_SOL_SOCKET, SYS_NET_SO_REUSEPORT, sizeof(s32));
const bool reuse_addr = optlen_reuseaddr == 4 && !!optval_reuseaddr._int;
const bool reuse_port = optlen_reuseport == 4 && !!optval_reuseport._int;
return (reuse_addr || reuse_port);
});
if (!reusable)
{
return false;
}
}
return true;
}
} // namespace sys_net_helpers
nt_p2p_port::nt_p2p_port(u16 port)
: port(port)
{
// Creates and bind P2P Socket
p2p_socket = ::socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
#ifdef _WIN32
if (p2p_socket == INVALID_SOCKET)
#else
if (p2p_socket == -1)
#endif
fmt::throw_exception("Failed to create DGRAM socket for P2P socket: %s!", get_last_error(true));
#ifdef _WIN32
u_long _true = 1;
::ioctlsocket(p2p_socket, FIONBIO, &_true);
#else
::fcntl(p2p_socket, F_SETFL, ::fcntl(p2p_socket, F_GETFL, 0) | O_NONBLOCK);
#endif
u32 optval = 131072; // value obtained from DECR for a SOCK_DGRAM_P2P socket(should maybe be bigger for actual socket?)
if (setsockopt(p2p_socket, SOL_SOCKET, SO_RCVBUF, reinterpret_cast<const char*>(&optval), sizeof(optval)) != 0)
fmt::throw_exception("Error setsockopt SO_RCVBUF on P2P socket: %s", get_last_error(true));
::sockaddr_in p2p_saddr{};
p2p_saddr.sin_family = AF_INET;
p2p_saddr.sin_port = std::bit_cast<u16, be_t<u16>>(port); // htons(port);
p2p_saddr.sin_addr.s_addr = 0; // binds to 0.0.0.0
const auto ret_bind = ::bind(p2p_socket, reinterpret_cast<sockaddr*>(&p2p_saddr), sizeof(p2p_saddr));
if (ret_bind == -1)
fmt::throw_exception("Failed to bind DGRAM socket to %d for P2P: %s!", port, get_last_error(true));
auto& nph = g_fxo->get<named_thread<np::np_handler>>();
nph.upnp_add_port_mapping(port, "UDP");
sys_net.notice("P2P port %d was bound!", port);
}
nt_p2p_port::~nt_p2p_port()
{
if (p2p_socket)
{
#ifdef _WIN32
::closesocket(p2p_socket);
#else
::close(p2p_socket);
#endif
}
}
void nt_p2p_port::dump_packet(p2ps_encapsulated_tcp* tcph)
{
sys_net.trace("PACKET DUMP:\nsrc_port: %d\ndst_port: %d\nflags: %d\nseq: %d\nack: %d\nlen: %d", tcph->src_port, tcph->dst_port, tcph->flags, tcph->seq, tcph->ack, tcph->length);
}
// Must be used under bound_p2p_vports_mutex lock
u16 nt_p2p_port::get_port()
{
if (binding_port == 0)
{
binding_port = 30000;
}
return binding_port++;
}
bool nt_p2p_port::handle_connected(s32 sock_id, p2ps_encapsulated_tcp* tcp_header, u8* data, ::sockaddr_storage* op_addr)
{
const auto sock = idm::check<lv2_socket>(sock_id, [&](lv2_socket& sock) -> bool
{
ensure(sock.get_type() == SYS_NET_SOCK_STREAM_P2P);
auto& sock_p2ps = reinterpret_cast<lv2_socket_p2ps&>(sock);
return sock_p2ps.handle_connected(tcp_header, data, op_addr, this);
});
if (!sock)
{
sys_net.error("[P2PS] Couldn't find the socket!");
return false;
}
if (!sock.ret)
{
sys_net.error("[P2PS] handle_connected() failed!");
return false;
}
return true;
}
bool nt_p2p_port::handle_listening(s32 sock_id, p2ps_encapsulated_tcp* tcp_header, u8* data, ::sockaddr_storage* op_addr)
{
auto sock = idm::get<lv2_socket>(sock_id);
if (!sock)
return false;
auto& sock_p2ps = reinterpret_cast<lv2_socket_p2ps&>(*sock.get());
return sock_p2ps.handle_listening(tcp_header, data, op_addr);
}
bool nt_p2p_port::recv_data()
{
::sockaddr_storage native_addr{};
::socklen_t native_addrlen = sizeof(native_addr);
const auto recv_res = ::recvfrom(p2p_socket, reinterpret_cast<char*>(p2p_recv_data.data()), ::size32(p2p_recv_data), 0, reinterpret_cast<struct sockaddr*>(&native_addr), &native_addrlen);
if (recv_res == -1)
{
auto lerr = get_last_error(false);
if (lerr != SYS_NET_EINPROGRESS && lerr != SYS_NET_EWOULDBLOCK)
sys_net.error("Error recvfrom on P2P socket: %d", lerr);
return false;
}
if (recv_res < static_cast<s32>(sizeof(u16)))
{
sys_net.error("Received badly formed packet on P2P port(no vport)!");
return true;
}
u16 dst_vport = reinterpret_cast<le_t<u16>&>(p2p_recv_data[0]);
if (dst_vport == 0)
{
if (recv_res < VPORT_0_HEADER_SIZE)
{
sys_net.error("Bad vport 0 packet(no subset)!");
return true;
}
const u8 subset = p2p_recv_data[2];
const auto data_size = recv_res - VPORT_0_HEADER_SIZE;
std::vector<u8> vport_0_data(p2p_recv_data.data() + VPORT_0_HEADER_SIZE, p2p_recv_data.data() + VPORT_0_HEADER_SIZE + data_size);
switch (subset)
{
case SUBSET_RPCN:
{
std::lock_guard lock(s_rpcn_mutex);
rpcn_msgs.push_back(std::move(vport_0_data));
return true;
}
case SUBSET_SIGNALING:
{
signaling_message msg;
msg.src_addr = reinterpret_cast<struct sockaddr_in*>(&native_addr)->sin_addr.s_addr;
msg.src_port = std::bit_cast<u16, be_t<u16>>(reinterpret_cast<struct sockaddr_in*>(&native_addr)->sin_port);
msg.data = std::move(vport_0_data);
{
std::lock_guard lock(s_sign_mutex);
sign_msgs.push_back(std::move(msg));
}
auto& sigh = g_fxo->get<named_thread<signaling_handler>>();
sigh.wake_up();
return true;
}
default:
{
sys_net.error("Invalid vport 0 subset!");
return true;
}
}
}
if (recv_res < VPORT_P2P_HEADER_SIZE)
{
return true;
}
const u16 src_vport = *reinterpret_cast<le_t<u16>*>(p2p_recv_data.data() + sizeof(u16));
const u16 vport_flags = *reinterpret_cast<le_t<u16>*>(p2p_recv_data.data() + sizeof(u16) + sizeof(u16));
std::vector<u8> p2p_data(recv_res - VPORT_P2P_HEADER_SIZE);
memcpy(p2p_data.data(), p2p_recv_data.data() + VPORT_P2P_HEADER_SIZE, p2p_data.size());
if (vport_flags & P2P_FLAG_P2P)
{
std::lock_guard lock(bound_p2p_vports_mutex);
if (bound_p2p_vports.contains(dst_vport))
{
sys_net_sockaddr_in_p2p p2p_addr{};
p2p_addr.sin_len = sizeof(sys_net_sockaddr_in);
p2p_addr.sin_family = SYS_NET_AF_INET;
p2p_addr.sin_addr = std::bit_cast<be_t<u32>, u32>(reinterpret_cast<struct sockaddr_in*>(&native_addr)->sin_addr.s_addr);
p2p_addr.sin_vport = src_vport;
p2p_addr.sin_port = std::bit_cast<be_t<u16>, u16>(reinterpret_cast<struct sockaddr_in*>(&native_addr)->sin_port);
auto& bound_sockets = ::at32(bound_p2p_vports, dst_vport);
for (const auto sock_id : bound_sockets)
{
const auto sock = idm::check<lv2_socket>(sock_id, [&](lv2_socket& sock)
{
ensure(sock.get_type() == SYS_NET_SOCK_DGRAM_P2P);
auto& sock_p2p = reinterpret_cast<lv2_socket_p2p&>(sock);
sock_p2p.handle_new_data(p2p_addr, p2p_data);
});
if (!sock)
{
sys_net.error("Socket %d found in bound_p2p_vports didn't exist!", sock_id);
bound_sockets.erase(sock_id);
if (bound_sockets.empty())
{
bound_p2p_vports.erase(dst_vport);
}
}
}
return true;
}
}
else if (vport_flags & P2P_FLAG_P2PS)
{
if (p2p_data.size() < sizeof(p2ps_encapsulated_tcp))
{
sys_net.notice("Received P2P packet targeted at unbound vport(likely) or invalid(vport=%d)", dst_vport);
return true;
}
auto* tcp_header = reinterpret_cast<p2ps_encapsulated_tcp*>(p2p_data.data());
// Validate signature & length
if (tcp_header->signature != P2PS_U2S_SIG)
{
sys_net.notice("Received P2P packet targeted at unbound vport(vport=%d)", dst_vport);
return true;
}
if (tcp_header->length != (p2p_data.size() - sizeof(p2ps_encapsulated_tcp)))
{
sys_net.error("Received STREAM-P2P packet tcp length didn't match packet length");
return true;
}
// Sanity check
if (tcp_header->dst_port != dst_vport)
{
sys_net.error("Received STREAM-P2P packet with dst_port != vport");
return true;
}
// Validate checksum
u16 given_checksum = tcp_header->checksum;
tcp_header->checksum = 0;
if (given_checksum != u2s_tcp_checksum(reinterpret_cast<const le_t<u16>*>(p2p_data.data()), p2p_data.size()))
{
sys_net.error("Checksum is invalid, dropping packet!");
return true;
}
// The packet is valid
dump_packet(tcp_header);
// Check if it's bound
const u64 key_connected = (reinterpret_cast<struct sockaddr_in*>(&native_addr)->sin_addr.s_addr) | (static_cast<u64>(tcp_header->src_port) << 48) | (static_cast<u64>(tcp_header->dst_port) << 32);
{
std::lock_guard lock(bound_p2p_vports_mutex);
if (bound_p2p_streams.contains(key_connected))
{
const auto sock_id = ::at32(bound_p2p_streams, key_connected);
sys_net.trace("Received packet for connected STREAM-P2P socket(s=%d)", sock_id);
handle_connected(sock_id, tcp_header, p2p_data.data() + sizeof(p2ps_encapsulated_tcp), &native_addr);
return true;
}
if (bound_p2ps_vports.contains(tcp_header->dst_port))
{
const auto& bound_sockets = ::at32(bound_p2ps_vports, tcp_header->dst_port);
for (const auto sock_id : bound_sockets)
{
sys_net.trace("Received packet for listening STREAM-P2P socket(s=%d)", sock_id);
handle_listening(sock_id, tcp_header, p2p_data.data() + sizeof(p2ps_encapsulated_tcp), &native_addr);
}
return true;
}
if (tcp_header->flags == p2ps_tcp_flags::RST)
{
sys_net.trace("[P2PS] Received RST on unbound P2PS");
return true;
}
// The P2PS packet was sent to an unbound vport, send a RST packet
p2ps_encapsulated_tcp send_hdr;
send_hdr.src_port = tcp_header->dst_port;
send_hdr.dst_port = tcp_header->src_port;
send_hdr.flags = p2ps_tcp_flags::RST;
auto packet = generate_u2s_packet(send_hdr, nullptr, 0);
if (::sendto(p2p_socket, reinterpret_cast<char*>(packet.data()), ::size32(packet), 0, reinterpret_cast<const sockaddr*>(&native_addr), sizeof(sockaddr_in)) == -1)
{
sys_net.error("[P2PS] Error sending RST to sender to unbound P2PS: %s", get_last_error(false));
return true;
}
sys_net.trace("[P2PS] Sent RST to sender to unbound P2PS");
return true;
}
}
sys_net.notice("Received a P2P packet with no bound target(dst_vport = %d)", dst_vport);
return true;
}
| 10,626
|
C++
|
.cpp
| 294
| 32.914966
| 197
| 0.677344
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,370
|
lv2_socket_p2p.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_net/lv2_socket_p2p.cpp
|
#include "stdafx.h"
#include "lv2_socket_p2p.h"
#include "Emu/NP/np_helpers.h"
#include "network_context.h"
#include "sys_net_helpers.h"
LOG_CHANNEL(sys_net);
lv2_socket_p2p::lv2_socket_p2p(lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol)
: lv2_socket(family, type, protocol)
{
sockopt_cache cache_type;
cache_type.data._int = SYS_NET_SOCK_DGRAM_P2P;
cache_type.len = 4;
sockopts[(static_cast<u64>(SYS_NET_SOL_SOCKET) << 32ull) | SYS_NET_SO_TYPE] = cache_type;
}
lv2_socket_p2p::lv2_socket_p2p(utils::serial& ar, lv2_socket_type type)
: lv2_socket(stx::make_exact(ar), type)
{
ar(port, vport, bound_addr);
auto data_dequeue = ar.pop<std::deque<std::pair<sys_net_sockaddr_in_p2p, std::vector<u8>>>>();
for (; !data_dequeue.empty(); data_dequeue.pop_front())
{
data.push(std::move(data_dequeue.front()));
}
}
void lv2_socket_p2p::save(utils::serial& ar)
{
lv2_socket::save(ar, true);
ar(port, vport, bound_addr);
std::deque<std::pair<sys_net_sockaddr_in_p2p, std::vector<u8>>> data_dequeue;
for (auto save_data = ::as_rvalue(data); !save_data.empty(); save_data.pop())
{
data_dequeue.push_back(std::move(save_data.front()));
}
ar(data_dequeue);
}
void lv2_socket_p2p::handle_new_data(sys_net_sockaddr_in_p2p p2p_addr, std::vector<u8> p2p_data)
{
std::lock_guard lock(mutex);
sys_net.trace("Received a P2P packet for vport %d and saved it", p2p_addr.sin_vport);
data.push(std::make_pair(std::move(p2p_addr), std::move(p2p_data)));
// Check if poll is happening
if (events.test_and_reset(lv2_socket::poll_t::read))
{
bs_t<lv2_socket::poll_t> read_event = lv2_socket::poll_t::read;
for (auto it = queue.begin(); it != queue.end();)
{
if (it->second(read_event))
{
it = queue.erase(it);
continue;
}
it++;
}
if (queue.empty())
{
events.store({});
}
}
}
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_p2p::accept([[maybe_unused]] bool is_lock)
{
sys_net.fatal("[P2P] accept() called on a P2P socket");
return {};
}
std::optional<s32> lv2_socket_p2p::connect([[maybe_unused]] const sys_net_sockaddr& addr)
{
sys_net.fatal("[P2P] connect() called on a P2P socket");
return {};
}
s32 lv2_socket_p2p::connect_followup()
{
sys_net.fatal("[P2P] connect_followup() called on a P2P socket");
return {};
}
std::pair<s32, sys_net_sockaddr> lv2_socket_p2p::getpeername()
{
sys_net.fatal("[P2P] getpeername() called on a P2P socket");
return {};
}
s32 lv2_socket_p2p::listen([[maybe_unused]] s32 backlog)
{
sys_net.fatal("[P2P] listen() called on a P2P socket");
return {};
}
s32 lv2_socket_p2p::bind(const sys_net_sockaddr& addr)
{
const auto* psa_in_p2p = reinterpret_cast<const sys_net_sockaddr_in_p2p*>(&addr);
u16 p2p_port = psa_in_p2p->sin_port;
u16 p2p_vport = psa_in_p2p->sin_vport;
sys_net.notice("[P2P] Trying to bind %s:%d:%d", np::ip_to_string(std::bit_cast<u32>(psa_in_p2p->sin_addr)), p2p_port, p2p_vport);
if (p2p_port != SCE_NP_PORT)
{
if (p2p_port == 0)
{
return -SYS_NET_EINVAL;
}
sys_net.warning("[P2P] Attempting to bind a socket to a port != %d", +SCE_NP_PORT);
}
socket_type real_socket{};
auto& nc = g_fxo->get<p2p_context>();
{
std::lock_guard list_lock(nc.list_p2p_ports_mutex);
nc.create_p2p_port(p2p_port);
auto& pport = ::at32(nc.list_p2p_ports, p2p_port);
real_socket = pport.p2p_socket;
{
std::lock_guard lock(pport.bound_p2p_vports_mutex);
if (p2p_vport == 0)
{
// Find a free vport starting at 30000
p2p_vport = 30000;
while (pport.bound_p2p_vports.contains(p2p_vport))
{
p2p_vport++;
}
}
else if (pport.bound_p2p_vports.contains(p2p_vport))
{
// Check that all other sockets are SO_REUSEADDR or SO_REUSEPORT
auto& bound_sockets = ::at32(pport.bound_p2p_vports, p2p_vport);
if (!sys_net_helpers::all_reusable(bound_sockets))
{
return -SYS_NET_EADDRINUSE;
}
bound_sockets.insert(lv2_id);
}
else
{
std::set<s32> bound_ports{lv2_id};
pport.bound_p2p_vports.insert(std::make_pair(p2p_vport, std::move(bound_ports)));
}
}
}
{
std::lock_guard lock(mutex);
port = p2p_port;
vport = p2p_vport;
socket = real_socket;
bound_addr = psa_in_p2p->sin_addr;
}
return CELL_OK;
}
std::pair<s32, sys_net_sockaddr> lv2_socket_p2p::getsockname()
{
std::lock_guard lock(mutex);
// Unbound socket
if (!socket)
{
return {CELL_OK, {}};
}
sys_net_sockaddr sn_addr{};
sys_net_sockaddr_in_p2p* paddr = reinterpret_cast<sys_net_sockaddr_in_p2p*>(&sn_addr);
paddr->sin_len = sizeof(sys_net_sockaddr_in);
paddr->sin_family = SYS_NET_AF_INET;
paddr->sin_port = port;
paddr->sin_vport = vport;
paddr->sin_addr = bound_addr;
return {CELL_OK, sn_addr};
}
std::tuple<s32, lv2_socket::sockopt_data, u32> lv2_socket_p2p::getsockopt(s32 level, s32 optname, u32 len)
{
std::lock_guard lock(mutex);
const u64 key = (static_cast<u64>(level) << 32) | static_cast<u64>(optname);
if (!sockopts.contains(key))
{
sys_net.error("Unhandled getsockopt(level=%d, optname=%d, len=%d)", level, optname, len);
return {};
}
const auto& cache = ::at32(sockopts, key);
return {CELL_OK, cache.data, cache.len};
}
s32 lv2_socket_p2p::setsockopt(s32 level, s32 optname, const std::vector<u8>& optval)
{
std::lock_guard lock(mutex);
int native_int = *reinterpret_cast<const be_t<s32>*>(optval.data());
if (level == SYS_NET_SOL_SOCKET && optname == SYS_NET_SO_NBIO)
{
so_nbio = native_int;
}
const u64 key = (static_cast<u64>(level) << 32) | static_cast<u64>(optname);
sockopt_cache cache{};
memcpy(&cache.data._int, optval.data(), optval.size());
cache.len = ::size32(optval);
sockopts[key] = std::move(cache);
return CELL_OK;
}
std::optional<std::tuple<s32, std::vector<u8>, sys_net_sockaddr>> lv2_socket_p2p::recvfrom(s32 flags, u32 len, bool is_lock)
{
std::unique_lock<shared_mutex> lock(mutex, std::defer_lock);
if (is_lock)
{
lock.lock();
}
if (data.empty())
{
if (so_nbio || (flags & SYS_NET_MSG_DONTWAIT))
return {{-SYS_NET_EWOULDBLOCK, {}, {}}};
return std::nullopt;
}
sys_net.trace("[P2P] p2p_data for vport %d contains %d elements", vport, data.size());
std::vector<u8> res_buf(len);
const auto& p2p_data = data.front();
s32 native_result = std::min(len, static_cast<u32>(p2p_data.second.size()));
memcpy(res_buf.data(), p2p_data.second.data(), native_result);
sys_net_sockaddr sn_addr;
memcpy(&sn_addr, &p2p_data.first, sizeof(sn_addr));
data.pop();
return {{native_result, res_buf, sn_addr}};
}
std::optional<s32> lv2_socket_p2p::sendto(s32 flags, const std::vector<u8>& buf, std::optional<sys_net_sockaddr> opt_sn_addr, bool is_lock)
{
std::unique_lock<shared_mutex> lock(mutex, std::defer_lock);
if (is_lock)
{
lock.lock();
}
ensure(opt_sn_addr);
ensure(socket); // ensures it has been bound
ensure(buf.size() <= static_cast<usz>(65535 - VPORT_P2P_HEADER_SIZE)); // catch games using full payload for future fragmentation implementation if necessary
const u16 p2p_port = reinterpret_cast<const sys_net_sockaddr_in*>(&*opt_sn_addr)->sin_port;
const u16 p2p_vport = reinterpret_cast<const sys_net_sockaddr_in_p2p*>(&*opt_sn_addr)->sin_vport;
auto native_addr = sys_net_addr_to_native_addr(*opt_sn_addr);
char ip_str[16];
inet_ntop(AF_INET, &native_addr.sin_addr, ip_str, sizeof(ip_str));
sys_net.trace("[P2P] Sending a packet to %s:%d:%d", ip_str, p2p_port, p2p_vport);
std::vector<u8> p2p_data(buf.size() + VPORT_P2P_HEADER_SIZE);
const le_t<u16> p2p_vport_le = p2p_vport;
const le_t<u16> src_vport_le = vport;
const le_t<u16> p2p_flags_le = P2P_FLAG_P2P;
memcpy(p2p_data.data(), &p2p_vport_le, sizeof(u16));
memcpy(p2p_data.data() + sizeof(u16), &src_vport_le, sizeof(u16));
memcpy(p2p_data.data() + sizeof(u16) + sizeof(u16), &p2p_flags_le, sizeof(u16));
memcpy(p2p_data.data() + VPORT_P2P_HEADER_SIZE, buf.data(), buf.size());
int native_flags = 0;
if (flags & SYS_NET_MSG_WAITALL)
{
native_flags |= MSG_WAITALL;
}
auto native_result = ::sendto(socket, reinterpret_cast<const char*>(p2p_data.data()), ::size32(p2p_data), native_flags, reinterpret_cast<struct sockaddr*>(&native_addr), sizeof(native_addr));
if (native_result >= 0)
{
return {std::max<s32>(native_result - VPORT_P2P_HEADER_SIZE, 0l)};
}
s32 result = get_last_error(!so_nbio && (flags & SYS_NET_MSG_DONTWAIT) == 0);
if (result)
{
return {-result};
}
// Note that this can only happen if the send buffer is full
return std::nullopt;
}
std::optional<s32> lv2_socket_p2p::sendmsg([[maybe_unused]] s32 flags, [[maybe_unused]] const sys_net_msghdr& msg, [[maybe_unused]] bool is_lock)
{
sys_net.todo("lv2_socket_p2p::sendmsg");
return {};
}
void lv2_socket_p2p::close()
{
if (!port || !vport)
{
return;
}
auto& nc = g_fxo->get<p2p_context>();
{
std::lock_guard lock(nc.list_p2p_ports_mutex);
auto& p2p_port = ::at32(nc.list_p2p_ports, port);
{
std::lock_guard lock(p2p_port.bound_p2p_vports_mutex);
if (!p2p_port.bound_p2p_vports.contains(vport))
{
return;
}
auto& bound_sockets = ::at32(p2p_port.bound_p2p_vports, vport);
bound_sockets.erase(lv2_id);
if (bound_sockets.empty())
{
p2p_port.bound_p2p_vports.erase(vport);
}
}
}
}
s32 lv2_socket_p2p::shutdown([[maybe_unused]] s32 how)
{
sys_net.todo("[P2P] shutdown");
return CELL_OK;
}
s32 lv2_socket_p2p::poll(sys_net_pollfd& sn_pfd, [[maybe_unused]] pollfd& native_pfd)
{
std::lock_guard lock(mutex);
ensure(vport);
// Check if it's a bound P2P socket
if ((sn_pfd.events & SYS_NET_POLLIN) && !data.empty())
{
sys_net.trace("[P2P] p2p_data for vport %d contains %d elements", vport, data.size());
sn_pfd.revents |= SYS_NET_POLLIN;
}
// Data can always be written on a dgram socket
if (sn_pfd.events & SYS_NET_POLLOUT)
{
sn_pfd.revents |= SYS_NET_POLLOUT;
}
return sn_pfd.revents ? 1 : 0;
}
std::tuple<bool, bool, bool> lv2_socket_p2p::select(bs_t<lv2_socket::poll_t> selected, [[maybe_unused]] pollfd& native_pfd)
{
std::lock_guard lock(mutex);
bool read_set = false;
bool write_set = false;
// Check if it's a bound P2P socket
if ((selected & lv2_socket::poll_t::read) && vport && !data.empty())
{
sys_net.trace("[P2P] p2p_data for vport %d contains %d elements", vport, data.size());
read_set = true;
}
if (selected & lv2_socket::poll_t::write)
{
write_set = true;
}
return {read_set, write_set, false};
}
| 10,514
|
C++
|
.cpp
| 320
| 30.315625
| 192
| 0.677907
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,371
|
lv2_socket.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_net/lv2_socket.cpp
|
#include "stdafx.h"
#include "lv2_socket.h"
#include "network_context.h"
LOG_CHANNEL(sys_net);
lv2_socket::lv2_socket(lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol)
{
this->family = family;
this->type = type;
this->protocol = protocol;
}
std::unique_lock<shared_mutex> lv2_socket::lock()
{
return std::unique_lock(mutex);
}
lv2_socket_family lv2_socket::get_family() const
{
return family;
}
lv2_socket_type lv2_socket::get_type() const
{
return type;
}
lv2_ip_protocol lv2_socket::get_protocol() const
{
return protocol;
}
std::size_t lv2_socket::get_queue_size() const
{
return queue.size();
}
socket_type lv2_socket::get_socket() const
{
return socket;
}
#ifdef _WIN32
bool lv2_socket::is_connecting() const
{
return connecting;
}
void lv2_socket::set_connecting(bool connecting)
{
this->connecting = connecting;
}
#endif
void lv2_socket::set_lv2_id(u32 id)
{
lv2_id = id;
}
bs_t<lv2_socket::poll_t> lv2_socket::get_events() const
{
return events.load();
}
void lv2_socket::set_poll_event(bs_t<lv2_socket::poll_t> event)
{
events += event;
}
void lv2_socket::poll_queue(std::shared_ptr<ppu_thread> ppu, bs_t<lv2_socket::poll_t> event, std::function<bool(bs_t<lv2_socket::poll_t>)> poll_cb)
{
set_poll_event(event);
queue.emplace_back(std::move(ppu), poll_cb);
// Makes sure network_context thread is awaken
if (type == SYS_NET_SOCK_STREAM || type == SYS_NET_SOCK_DGRAM)
{
auto& nc = g_fxo->get<network_context>();
const u32 prev_value = nc.num_polls.fetch_add(1);
if (!prev_value)
{
nc.num_polls.notify_one();
}
}
}
u32 lv2_socket::clear_queue(ppu_thread* ppu)
{
std::lock_guard lock(mutex);
u32 cleared = 0;
for (auto it = queue.begin(); it != queue.end();)
{
if (it->first.get() == ppu)
{
it = queue.erase(it);
cleared++;
continue;
}
it++;
}
if (queue.empty())
{
events.store({});
}
if (cleared && (type == SYS_NET_SOCK_STREAM || type == SYS_NET_SOCK_DGRAM))
{
// Makes sure network_context thread can go back to sleep if there is no active polling
const u32 prev_value = g_fxo->get<network_context>().num_polls.fetch_sub(cleared);
ensure(prev_value >= cleared);
}
return cleared;
}
void lv2_socket::handle_events(const pollfd& native_pfd, [[maybe_unused]] bool unset_connecting)
{
bs_t<lv2_socket::poll_t> events_happening{};
if (native_pfd.revents & (POLLIN | POLLHUP) && events.test_and_reset(lv2_socket::poll_t::read))
events_happening += lv2_socket::poll_t::read;
if (native_pfd.revents & POLLOUT && events.test_and_reset(lv2_socket::poll_t::write))
events_happening += lv2_socket::poll_t::write;
if (native_pfd.revents & POLLERR && events.test_and_reset(lv2_socket::poll_t::error))
events_happening += lv2_socket::poll_t::error;
if (events_happening || (!queue.empty() && (so_rcvtimeo || so_sendtimeo)))
{
std::lock_guard lock(mutex);
#ifdef _WIN32
if (unset_connecting)
set_connecting(false);
#endif
u32 handled = 0;
for (auto it = queue.begin(); it != queue.end();)
{
if (it->second(events_happening))
{
it = queue.erase(it);
handled++;
continue;
}
it++;
}
if (handled && (type == SYS_NET_SOCK_STREAM || type == SYS_NET_SOCK_DGRAM))
{
const u32 prev_value = g_fxo->get<network_context>().num_polls.fetch_sub(handled);
ensure(prev_value >= handled);
}
if (queue.empty())
{
events.store({});
}
}
}
void lv2_socket::queue_wake(ppu_thread* ppu)
{
switch (type)
{
case SYS_NET_SOCK_STREAM:
case SYS_NET_SOCK_DGRAM:
g_fxo->get<network_context>().ppu_to_awake.emplace_back(ppu);
break;
case SYS_NET_SOCK_DGRAM_P2P:
case SYS_NET_SOCK_STREAM_P2P:
g_fxo->get<p2p_context>().ppu_to_awake.emplace_back(ppu);
break;
default:
break;
}
}
| 3,756
|
C++
|
.cpp
| 151
| 22.635762
| 147
| 0.691255
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,372
|
sys_net_helpers.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_net/sys_net_helpers.cpp
|
#include "stdafx.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/PPUThread.h"
#include "lv2_socket.h"
#include "sys_net_helpers.h"
LOG_CHANNEL(sys_net);
int get_native_error()
{
int native_error;
#ifdef _WIN32
native_error = WSAGetLastError();
#else
native_error = errno;
#endif
return native_error;
}
sys_net_error convert_error(bool is_blocking, int native_error, [[maybe_unused]] bool is_connecting)
{
// Convert the error code for socket functions to a one for sys_net
sys_net_error result{};
const char* name{};
#ifdef _WIN32
#define ERROR_CASE(error) \
case WSA##error: \
result = SYS_NET_##error; \
name = #error; \
break;
#else
#define ERROR_CASE(error) \
case error: \
result = SYS_NET_##error; \
name = #error; \
break;
#endif
switch (native_error)
{
#ifndef _WIN32
ERROR_CASE(ENOENT);
ERROR_CASE(ENOMEM);
ERROR_CASE(EBUSY);
ERROR_CASE(ENOSPC);
ERROR_CASE(EPIPE);
#endif
// TODO: We don't currently support EFAULT or EINTR
// ERROR_CASE(EFAULT);
// ERROR_CASE(EINTR);
ERROR_CASE(EBADF);
ERROR_CASE(EACCES);
ERROR_CASE(EINVAL);
ERROR_CASE(EMFILE);
ERROR_CASE(EWOULDBLOCK);
ERROR_CASE(EINPROGRESS);
ERROR_CASE(EALREADY);
ERROR_CASE(EDESTADDRREQ);
ERROR_CASE(EMSGSIZE);
ERROR_CASE(EPROTOTYPE);
ERROR_CASE(ENOPROTOOPT);
ERROR_CASE(EPROTONOSUPPORT);
ERROR_CASE(EOPNOTSUPP);
ERROR_CASE(EPFNOSUPPORT);
ERROR_CASE(EAFNOSUPPORT);
ERROR_CASE(EADDRINUSE);
ERROR_CASE(EADDRNOTAVAIL);
ERROR_CASE(ENETDOWN);
ERROR_CASE(ENETUNREACH);
ERROR_CASE(ECONNABORTED);
ERROR_CASE(ECONNRESET);
ERROR_CASE(ENOBUFS);
ERROR_CASE(EISCONN);
ERROR_CASE(ENOTCONN);
ERROR_CASE(ESHUTDOWN);
ERROR_CASE(ETOOMANYREFS);
ERROR_CASE(ETIMEDOUT);
ERROR_CASE(ECONNREFUSED);
ERROR_CASE(EHOSTDOWN);
ERROR_CASE(EHOSTUNREACH);
#ifdef _WIN32
// Windows likes to be special with unique errors
case WSAENETRESET:
result = SYS_NET_ECONNRESET;
name = "WSAENETRESET";
break;
#endif
default:
fmt::throw_exception("sys_net get_last_error(is_blocking=%d, native_error=%d): Unknown/illegal socket error", is_blocking, native_error);
}
#ifdef _WIN32
if (is_connecting)
{
// Windows will return SYS_NET_ENOTCONN when recvfrom/sendto is called on a socket that is connecting but not yet connected
if (result == SYS_NET_ENOTCONN)
return SYS_NET_EAGAIN;
}
#endif
if (name && result != SYS_NET_EWOULDBLOCK && result != SYS_NET_EINPROGRESS)
{
sys_net.error("Socket error %s", name);
}
if (is_blocking && result == SYS_NET_EWOULDBLOCK)
{
return {};
}
if (is_blocking && result == SYS_NET_EINPROGRESS)
{
return {};
}
return result;
#undef ERROR_CASE
}
sys_net_error get_last_error(bool is_blocking, bool is_connecting)
{
return convert_error(is_blocking, get_native_error(), is_connecting);
}
sys_net_sockaddr native_addr_to_sys_net_addr(const ::sockaddr_storage& native_addr)
{
ensure(native_addr.ss_family == AF_INET || native_addr.ss_family == AF_UNSPEC);
sys_net_sockaddr sn_addr;
sys_net_sockaddr_in* paddr = reinterpret_cast<sys_net_sockaddr_in*>(&sn_addr);
paddr->sin_len = sizeof(sys_net_sockaddr_in);
paddr->sin_family = SYS_NET_AF_INET;
paddr->sin_port = std::bit_cast<be_t<u16>, u16>(reinterpret_cast<const sockaddr_in*>(&native_addr)->sin_port);
paddr->sin_addr = std::bit_cast<be_t<u32>, u32>(reinterpret_cast<const sockaddr_in*>(&native_addr)->sin_addr.s_addr);
paddr->sin_zero = 0;
return sn_addr;
}
::sockaddr_in sys_net_addr_to_native_addr(const sys_net_sockaddr& sn_addr)
{
ensure(sn_addr.sa_family == SYS_NET_AF_INET);
const sys_net_sockaddr_in* psa_in = reinterpret_cast<const sys_net_sockaddr_in*>(&sn_addr);
::sockaddr_in native_addr{};
native_addr.sin_family = AF_INET;
native_addr.sin_port = std::bit_cast<u16>(psa_in->sin_port);
native_addr.sin_addr.s_addr = std::bit_cast<u32>(psa_in->sin_addr);
#ifdef _WIN32
// Windows doesn't support sending packets to 0.0.0.0 but it works on unixes, send to 127.0.0.1 instead
if (native_addr.sin_addr.s_addr == 0x00000000)
{
sys_net.warning("[Native] Redirected 0.0.0.0 to 127.0.0.1");
native_addr.sin_addr.s_addr = std::bit_cast<u32, be_t<u32>>(0x7F000001);
}
#endif
return native_addr;
}
bool is_ip_public_address(const ::sockaddr_in& addr)
{
const u8* ip = reinterpret_cast<const u8*>(&addr.sin_addr.s_addr);
if ((ip[0] == 10) ||
(ip[0] == 127) ||
(ip[0] == 172 && (ip[1] >= 16 && ip[1] <= 31)) ||
(ip[0] == 192 && ip[1] == 168))
{
return false;
}
return true;
}
u32 network_clear_queue(ppu_thread& ppu)
{
u32 cleared = 0;
idm::select<lv2_socket>([&](u32, lv2_socket& sock)
{
cleared += sock.clear_queue(&ppu);
});
return cleared;
}
#ifdef _WIN32
// Workaround function for WSAPoll not reporting failed connections
void windows_poll(std::vector<pollfd>& fds, unsigned long nfds, int timeout, std::vector<bool>& connecting)
{
ensure(fds.size() >= nfds);
ensure(connecting.size() >= nfds);
// Don't call WSAPoll with zero nfds (errors 10022 or 10038)
if (std::none_of(fds.begin(), fds.begin() + nfds, [](pollfd& pfd)
{
return pfd.fd != INVALID_SOCKET;
}))
{
if (timeout > 0)
{
Sleep(timeout);
}
return;
}
int r = ::WSAPoll(fds.data(), nfds, timeout);
if (r == SOCKET_ERROR)
{
sys_net.error("WSAPoll failed: %s", fmt::win_error{static_cast<unsigned long>(WSAGetLastError()), nullptr});
return;
}
for (unsigned long i = 0; i < nfds; i++)
{
if (connecting[i])
{
if (!fds[i].revents)
{
int error = 0;
socklen_t intlen = sizeof(error);
if (getsockopt(fds[i].fd, SOL_SOCKET, SO_ERROR, reinterpret_cast<char*>(&error), &intlen) == -1 || error != 0)
{
// Connection silently failed
connecting[i] = false;
fds[i].revents = POLLERR | POLLHUP | (fds[i].events & (POLLIN | POLLOUT));
}
}
else
{
connecting[i] = false;
}
}
}
}
#endif
| 5,954
|
C++
|
.cpp
| 211
| 25.677725
| 139
| 0.683842
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,373
|
lv2_socket_raw.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_net/lv2_socket_raw.cpp
|
#include "stdafx.h"
#include "lv2_socket_raw.h"
#include "Emu/NP/vport0.h"
LOG_CHANNEL(sys_net);
template <typename T>
struct socket_raw_logging
{
socket_raw_logging() = default;
socket_raw_logging(const socket_raw_logging&) = delete;
socket_raw_logging& operator=(const socket_raw_logging&) = delete;
atomic_t<bool> logged = false;
};
#define LOG_ONCE(raw_var, message) \
if (!g_fxo->get<socket_raw_logging<class raw_var>>().logged.exchange(true)) \
{ \
sys_net.todo(message); \
}
lv2_socket_raw::lv2_socket_raw(lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol)
: lv2_socket(family, type, protocol)
{
}
lv2_socket_raw::lv2_socket_raw(utils::serial& ar, lv2_socket_type type)
: lv2_socket(stx::make_exact(ar), type)
{
}
void lv2_socket_raw::save(utils::serial& ar)
{
lv2_socket::save(ar, true);
}
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_raw::accept([[maybe_unused]] bool is_lock)
{
sys_net.fatal("[RAW] accept() called on a RAW socket");
return {};
}
std::optional<s32> lv2_socket_raw::connect([[maybe_unused]] const sys_net_sockaddr& addr)
{
sys_net.fatal("[RAW] connect() called on a RAW socket");
return CELL_OK;
}
s32 lv2_socket_raw::connect_followup()
{
sys_net.fatal("[RAW] connect_followup() called on a RAW socket");
return CELL_OK;
}
std::pair<s32, sys_net_sockaddr> lv2_socket_raw::getpeername()
{
LOG_ONCE(raw_getpeername, "[RAW] getpeername() called on a RAW socket");
return {};
}
s32 lv2_socket_raw::listen([[maybe_unused]] s32 backlog)
{
LOG_ONCE(raw_listen, "[RAW] listen() called on a RAW socket");
return {};
}
s32 lv2_socket_raw::bind([[maybe_unused]] const sys_net_sockaddr& addr)
{
LOG_ONCE(raw_bind, "lv2_socket_raw::bind");
return {};
}
std::pair<s32, sys_net_sockaddr> lv2_socket_raw::getsockname()
{
LOG_ONCE(raw_getsockname, "lv2_socket_raw::getsockname");
return {};
}
std::tuple<s32, lv2_socket::sockopt_data, u32> lv2_socket_raw::getsockopt([[maybe_unused]] s32 level, [[maybe_unused]] s32 optname, [[maybe_unused]] u32 len)
{
LOG_ONCE(raw_getsockopt, "lv2_socket_raw::getsockopt");
return {};
}
s32 lv2_socket_raw::setsockopt(s32 level, s32 optname, const std::vector<u8>& optval)
{
LOG_ONCE(raw_setsockopt, "lv2_socket_raw::setsockopt");
// TODO
int native_int = *reinterpret_cast<const be_t<s32>*>(optval.data());
if (level == SYS_NET_SOL_SOCKET && optname == SYS_NET_SO_NBIO)
{
so_nbio = native_int;
}
return {};
}
std::optional<std::tuple<s32, std::vector<u8>, sys_net_sockaddr>> lv2_socket_raw::recvfrom(s32 flags, [[maybe_unused]] u32 len, [[maybe_unused]] bool is_lock)
{
LOG_ONCE(raw_recvfrom, "lv2_socket_raw::recvfrom");
if (so_nbio || (flags & SYS_NET_MSG_DONTWAIT))
{
return {{-SYS_NET_EWOULDBLOCK, {}, {}}};
}
return {};
}
std::optional<s32> lv2_socket_raw::sendto([[maybe_unused]] s32 flags, [[maybe_unused]] const std::vector<u8>& buf, [[maybe_unused]] std::optional<sys_net_sockaddr> opt_sn_addr, [[maybe_unused]] bool is_lock)
{
LOG_ONCE(raw_sendto, "lv2_socket_raw::sendto");
return ::size32(buf);
}
std::optional<s32> lv2_socket_raw::sendmsg([[maybe_unused]] s32 flags, [[maybe_unused]] const sys_net_msghdr& msg, [[maybe_unused]] bool is_lock)
{
LOG_ONCE(raw_sendmsg, "lv2_socket_raw::sendmsg");
return {};
}
void lv2_socket_raw::close()
{
LOG_ONCE(raw_close, "lv2_socket_raw::close");
}
s32 lv2_socket_raw::shutdown([[maybe_unused]] s32 how)
{
LOG_ONCE(raw_shutdown, "lv2_socket_raw::shutdown");
return {};
}
s32 lv2_socket_raw::poll([[maybe_unused]] sys_net_pollfd& sn_pfd, [[maybe_unused]] pollfd& native_pfd)
{
LOG_ONCE(raw_poll, "lv2_socket_raw::poll");
return {};
}
std::tuple<bool, bool, bool> lv2_socket_raw::select([[maybe_unused]] bs_t<lv2_socket::poll_t> selected, [[maybe_unused]] pollfd& native_pfd)
{
LOG_ONCE(raw_select, "lv2_socket_raw::select");
return {};
}
| 4,042
|
C++
|
.cpp
| 118
| 32.542373
| 207
| 0.675225
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,374
|
CPUThread.cpp
|
RPCS3_rpcs3/rpcs3/Emu/CPU/CPUThread.cpp
|
#include "stdafx.h"
#include "CPUThread.h"
#include "CPUDisAsm.h"
#include "Emu/System.h"
#include "Emu/system_config.h"
#include "Emu/Memory/vm_locking.h"
#include "Emu/Memory/vm_reservation.h"
#include "Emu/IdManager.h"
#include "Emu/GDB.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/SPUThread.h"
#include "Emu/RSX/RSXThread.h"
#include "Emu/perf_meter.hpp"
#include "util/asm.hpp"
#include <thread>
#include <unordered_map>
#include <map>
#if defined(ARCH_X64)
#include <emmintrin.h>
#endif
DECLARE(cpu_thread::g_threads_created){0};
DECLARE(cpu_thread::g_threads_deleted){0};
DECLARE(cpu_thread::g_suspend_counter){0};
LOG_CHANNEL(profiler);
LOG_CHANNEL(sys_log, "SYS");
static thread_local u32 s_tls_thread_slot = -1;
// Suspend counter stamp
static thread_local u64 s_tls_sctr = -1;
extern thread_local void(*g_tls_log_control)(const char* fmt, u64 progress);
extern thread_local std::string(*g_tls_log_prefix)();
template <>
void fmt_class_string<cpu_flag>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](cpu_flag f)
{
switch (f)
{
case cpu_flag::stop: return "STOP";
case cpu_flag::exit: return "EXIT";
case cpu_flag::wait: return "w";
case cpu_flag::temp: return "t";
case cpu_flag::pause: return "p";
case cpu_flag::suspend: return "s";
case cpu_flag::ret: return "ret";
case cpu_flag::again: return "a";
case cpu_flag::signal: return "sig";
case cpu_flag::memory: return "mem";
case cpu_flag::pending: return "pend";
case cpu_flag::pending_recheck: return "pend-re";
case cpu_flag::notify: return "ntf";
case cpu_flag::yield: return "y";
case cpu_flag::preempt: return "PREEMPT";
case cpu_flag::dbg_global_pause: return "G-PAUSE";
case cpu_flag::dbg_pause: return "PAUSE";
case cpu_flag::dbg_step: return "STEP";
case cpu_flag::__bitset_enum_max: break;
}
return unknown;
});
}
template<>
void fmt_class_string<bs_t<cpu_flag>>::format(std::string& out, u64 arg)
{
format_bitset(out, arg, "[", "|", "]", &fmt_class_string<cpu_flag>::format);
}
// CPU profiler thread
struct cpu_prof
{
// PPU/SPU id enqueued for registration
lf_queue<u32> registered;
struct sample_info
{
// Block occurences: name -> sample_count
std::unordered_map<u64, u64, value_hash<u64>> freq;
// Total number of samples
u64 samples = 0, idle = 0;
// Total number of sample collected in reservation operation
u64 reservation_samples = 0;
// Avoid printing replicas or when not much changed
u64 new_samples = 0;
static constexpr u64 min_print_samples = 500;
static constexpr u64 min_print_all_samples = min_print_samples * 20;
void reset()
{
freq.clear();
samples = 0;
idle = 0;
new_samples = 0;
reservation_samples = 0;
}
static std::string format(const std::multimap<u64, u64, std::greater<u64>>& chart, u64 samples, u64 idle, bool extended_print = false)
{
// Print results
std::string results;
results.reserve(extended_print ? 10100 : 5100);
// Fraction of non-idle samples
const f64 busy = 1. * (samples - idle) / samples;
for (auto& [count, name] : chart)
{
const f64 _frac = count / busy / samples;
// Print only 7 hash characters out of 11 (which covers roughly 48 bits)
fmt::append(results, "\n\t[%s", fmt::base57(be_t<u64>{name}));
results.resize(results.size() - 4);
// Print chunk address from lowest 16 bits
fmt::append(results, "...chunk-0x%05x]: %.4f%% (%u)", (name & 0xffff) * 4, _frac * 100., count);
if (results.size() >= (extended_print ? 10000 : 5000))
{
// Stop printing after reaching some arbitrary limit in characters
break;
}
}
return results;
}
static f64 get_percent(u64 dividend, u64 divisor)
{
if (!dividend)
{
return 0;
}
if (dividend >= divisor)
{
return 100;
}
return 100. * dividend / divisor;
}
// Print info
void print(const std::shared_ptr<cpu_thread>& ptr)
{
if (new_samples < min_print_samples || samples == idle)
{
if (cpu_flag::exit - ptr->state)
{
profiler.notice("Thread \"%s\" [0x%08x]: %u samples (%.4f%% idle), %u new, %u reservation (%.4f%%): Not enough new samples have been collected since the last print.", ptr->get_name(), ptr->id, samples, get_percent(idle, samples), new_samples, reservation_samples, get_percent(reservation_samples, samples - idle));
}
return;
}
// Make reversed map: sample_count -> name
std::multimap<u64, u64, std::greater<u64>> chart;
for (auto& [name, count] : freq)
{
chart.emplace(count, name);
}
// Print results
const std::string results = format(chart, samples, idle);
profiler.notice("Thread \"%s\" [0x%08x]: %u samples (%.4f%% idle), %u new, %u reservation (%.4f%%):\n%s", ptr->get_name(), ptr->id, samples, get_percent(idle, samples), new_samples, reservation_samples, get_percent(reservation_samples, samples - idle), results);
new_samples = 0;
}
static void print_all(std::unordered_map<std::shared_ptr<cpu_thread>, sample_info>& threads, sample_info& all_info)
{
u64 new_samples = 0;
// Print all results and cleanup
for (auto& [ptr, info] : threads)
{
new_samples += info.new_samples;
info.print(ptr);
}
std::multimap<u64, u64, std::greater<u64>> chart;
for (auto& [_, info] : threads)
{
// This function collects thread information regardless of 'new_samples' member state
for (auto& [name, count] : info.freq)
{
all_info.freq[name] += count;
}
all_info.samples += info.samples;
all_info.idle += info.idle;
all_info.reservation_samples += info.reservation_samples;
}
const u64 samples = all_info.samples;
const u64 idle = all_info.idle;
const u64 reservation = all_info.reservation_samples;
const auto& freq = all_info.freq;
if (samples == idle)
{
return;
}
if (new_samples < min_print_all_samples && thread_ctrl::state() != thread_state::aborting)
{
profiler.notice("All Threads: %u samples (%.4f%% idle), %u new, %u reservation (%.4f%%): Not enough new samples have been collected since the last print.", samples, get_percent(idle, samples), new_samples, reservation, get_percent(reservation, samples - idle));
return;
}
for (auto& [name, count] : freq)
{
chart.emplace(count, name);
}
const std::string results = format(chart, samples, idle, true);
profiler.notice("All Threads: %u samples (%.4f%% idle), %u new, %u reservation (%.4f%%):%s", samples, get_percent(idle, samples), new_samples, reservation, get_percent(reservation, samples - idle), results);
}
};
sample_info all_threads_info{};
void operator()()
{
std::unordered_map<std::shared_ptr<cpu_thread>, sample_info> threads;
while (thread_ctrl::state() != thread_state::aborting)
{
bool flush = false;
// Handle registration channel
for (u32 id : registered.pop_all())
{
if (id == 0)
{
// Handle id zero as a command to flush results
flush = true;
continue;
}
std::shared_ptr<cpu_thread> ptr;
if (id >> 24 == 1)
{
ptr = idm::get<named_thread<ppu_thread>>(id);
}
else if (id >> 24 == 2)
{
ptr = idm::get<named_thread<spu_thread>>(id);
}
else
{
profiler.error("Invalid Thread ID: 0x%08x", id);
continue;
}
if (ptr && cpu_flag::exit - ptr->state)
{
auto [found, add] = threads.try_emplace(std::move(ptr));
if (!add)
{
// Overwritten (impossible?): print previous data
found->second.print(found->first);
found->second.reset();
}
}
}
if (threads.empty())
{
// Wait for messages if no work (don't waste CPU)
thread_ctrl::wait_on(registered);
continue;
}
// Sample active threads
for (auto& [ptr, info] : threads)
{
if (auto state = +ptr->state; cpu_flag::exit - state)
{
// Get short function hash
const u64 name = atomic_storage<u64>::load(ptr->block_hash);
// Append occurrence
info.samples++;
if (cpu_flag::wait - state)
{
info.freq[name]++;
info.new_samples++;
if (auto spu = ptr->try_get<spu_thread>())
{
if (spu->raddr)
{
info.reservation_samples++;
}
}
// Append verification time to fixed common name 0000000...chunk-0x3fffc
if (name >> 16 && (name & 0xffff) == 0)
info.freq[0xffff]++;
}
else
{
if (state & (cpu_flag::dbg_pause + cpu_flag::dbg_global_pause))
{
// Idle state caused by emulation pause is not accounted for
continue;
}
info.idle++;
}
}
else
{
info.print(ptr);
}
}
if (flush)
{
profiler.success("Flushing profiling results...");
all_threads_info = {};
sample_info::print_all(threads, all_threads_info);
}
if (Emu.IsPaused())
{
thread_ctrl::wait_for(5000);
continue;
}
if (!g_cfg.core.spu_debug)
{
// Reduce accuracy in favor of performance when enabled alone
thread_ctrl::wait_for(60, false);
continue;
}
// Wait, roughly for 20µs
thread_ctrl::wait_for(20, false);
}
// Print all remaining results
sample_info::print_all(threads, all_threads_info);
}
static constexpr auto thread_name = "CPU Profiler"sv;
};
using cpu_profiler = named_thread<cpu_prof>;
extern f64 get_cpu_program_usage_percent(u64 hash)
{
if (auto prof = g_fxo->try_get<cpu_profiler>(); prof && *prof == thread_state::finished)
{
if (Emu.IsStopped())
{
u64 total = 0;
for (auto [name, count] : prof->all_threads_info.freq)
{
if ((name & -65536) == hash)
{
total += count;
}
}
if (!total)
{
return 0;
}
return std::max<f64>(0.0001, static_cast<f64>(total) * 100 / (prof->all_threads_info.samples - prof->all_threads_info.idle));
}
}
return 0;
}
thread_local DECLARE(cpu_thread::g_tls_this_thread) = nullptr;
// Total number of CPU threads
static atomic_t<u64, 64> s_cpu_counter{0};
// List of posted tasks for suspend_all
//static atomic_t<cpu_thread::suspend_work*> s_cpu_work[128]{};
// Linked list of pushed tasks for suspend_all
static atomic_t<cpu_thread::suspend_work*> s_pushed{};
// Lock for suspend_all operations
static shared_mutex s_cpu_lock;
// Bit allocator for threads which need to be suspended
static atomic_t<u128> s_cpu_bits{};
// List of active threads which need to be suspended
static atomic_t<cpu_thread*> s_cpu_list[128]{};
namespace cpu_counter
{
void add(cpu_thread* _this) noexcept
{
switch (_this->get_class())
{
case thread_class::ppu:
case thread_class::spu:
break;
default: return;
}
std::lock_guard lock(s_cpu_lock);
u32 id = -1;
for (u64 i = 0;; i++)
{
const auto [bits, ok] = s_cpu_bits.fetch_op([](u128& bits)
{
if (~bits) [[likely]]
{
// Set lowest clear bit
bits |= bits + 1;
return true;
}
return false;
});
if (ok) [[likely]]
{
// Get actual slot number
id = utils::ctz128(~bits);
// Register thread
if (s_cpu_list[id].compare_and_swap_test(nullptr, _this)) [[likely]]
{
break;
}
sys_log.fatal("Unexpected slot registration failure (%u).", id);
id = -1;
continue;
}
if (i > 50)
{
sys_log.fatal("Too many threads.");
return;
}
busy_wait(300);
}
s_tls_thread_slot = id;
}
static void remove_cpu_bit(u32 bit)
{
s_cpu_bits.atomic_op([=](u128& val)
{
val &= ~(u128{1} << (bit % 128));
});
}
void remove(cpu_thread* _this) noexcept
{
// Return if not registered
const u32 slot = s_tls_thread_slot;
if (slot == umax)
{
return;
}
if (slot >= std::size(s_cpu_list))
{
sys_log.fatal("Index out of bounds (%u).", slot);
return;
}
// Asynchronous unregister
if (!s_cpu_list[slot].compare_and_swap_test(_this, nullptr))
{
sys_log.fatal("Inconsistency for array slot %u", slot);
return;
}
remove_cpu_bit(slot);
s_tls_thread_slot = -1;
}
template <typename F>
u128 for_all_cpu(/*mutable*/ u128 copy, F func) noexcept
{
for (u128 bits = copy; bits; bits &= bits - 1)
{
const u32 index = utils::ctz128(bits);
if (cpu_thread* cpu = s_cpu_list[index].load())
{
if constexpr (std::is_invocable_v<F, cpu_thread*, u32>)
{
if (!func(cpu, index))
copy &= ~(u128{1} << index);
continue;
}
if constexpr (std::is_invocable_v<F, cpu_thread*>)
{
if (!func(cpu))
copy &= ~(u128{1} << index);
continue;
}
sys_log.fatal("cpu_counter::for_all_cpu: bad callback");
}
else
{
copy &= ~(u128{1} << index);
}
}
return copy;
}
}
void cpu_thread::operator()()
{
const auto old_prefix = g_tls_log_prefix;
g_tls_this_thread = this;
if (g_cfg.core.thread_scheduler != thread_scheduler_mode::os)
{
thread_ctrl::set_thread_affinity_mask(thread_ctrl::get_affinity_mask(get_class()));
}
while (!g_fxo->is_init<cpu_profiler>())
{
if (Emu.IsStopped())
{
return;
}
// Can we have a little race, right? First thread is started concurrently with g_fxo->init()
thread_ctrl::wait_for(1000);
}
switch (get_class())
{
case thread_class::ppu:
{
//g_fxo->get<cpu_profiler>().registered.push(id);
break;
}
case thread_class::spu:
{
if (g_cfg.core.spu_prof)
{
g_fxo->get<cpu_profiler>().registered.push(id);
}
break;
}
default: break;
}
// Register thread in g_cpu_array
s_cpu_counter++;
static thread_local struct thread_cleanup_t
{
cpu_thread* _this = nullptr;
std::string name;
std::string(*log_prefix)() = nullptr;
void cleanup()
{
if (_this == nullptr)
{
return;
}
if (auto ptr = vm::g_tls_locked)
{
ptr->compare_and_swap(_this, nullptr);
}
g_tls_log_control = [](const char*, u64){};
if (s_tls_thread_slot != umax)
{
cpu_counter::remove(_this);
}
s_cpu_lock.lock_unlock();
s_cpu_counter--;
g_tls_log_prefix = log_prefix;
g_tls_this_thread = nullptr;
g_threads_deleted++;
_this = nullptr;
}
~thread_cleanup_t()
{
if (_this)
{
sys_log.warning("CPU Thread '%s' terminated abnormally!", name);
cleanup();
}
}
} cleanup;
cleanup._this = this;
cleanup.name = thread_ctrl::get_name();
cleanup.log_prefix = old_prefix;
// Check thread status
while (!(state & cpu_flag::exit) && thread_ctrl::state() != thread_state::aborting)
{
// Check stop status
const auto state0 = +state;
if (is_stopped(state0 - cpu_flag::stop))
{
break;
}
if (!(state0 & cpu_flag::stop))
{
cpu_task();
state += cpu_flag::wait;
if (state & cpu_flag::ret && state.test_and_reset(cpu_flag::ret))
{
cpu_return();
}
continue;
}
state.wait(state0);
if (state & cpu_flag::ret && state.test_and_reset(cpu_flag::ret))
{
cpu_return();
}
}
// Complete cleanup gracefully
cleanup.cleanup();
}
cpu_thread::~cpu_thread()
{
}
cpu_thread::cpu_thread(u32 id)
: id(id)
{
while (Emu.GetStatus() == system_state::paused)
{
// Solve race between Emulator::Pause and this construction of thread which most likely is guarded by IDM mutex
state += cpu_flag::dbg_global_pause;
if (Emu.GetStatus() != system_state::paused)
{
// Emulator::Resume was called inbetween
state -= cpu_flag::dbg_global_pause;
// Recheck if state is inconsistent
continue;
}
break;
}
if (Emu.IsStopped())
{
// For similar race as above
state += cpu_flag::exit;
}
g_threads_created++;
if (u32* pc2 = get_pc2())
{
*pc2 = umax;
}
}
void cpu_thread::cpu_wait(bs_t<cpu_flag> old)
{
state.wait(old);
}
static atomic_t<u32> s_dummy_atomic = 0;
bool cpu_thread::check_state() noexcept
{
bool cpu_sleep_called = false;
bool cpu_memory_checked = false;
bool cpu_can_stop = true;
bool escape{}, retval{};
while (true)
{
// Process all flags in a single atomic op
bs_t<cpu_flag> state1;
auto state0 = state.fetch_op([&](bs_t<cpu_flag>& flags)
{
bool store = false;
if (flags & cpu_flag::pause && s_tls_thread_slot != umax)
{
// Save value before state is saved and cpu_flag::wait is observed
if (s_tls_sctr == umax)
{
u64 ctr = g_suspend_counter;
if (flags & cpu_flag::wait)
{
if ((ctr & 3) == 2)
{
s_tls_sctr = ctr;
}
}
else
{
s_tls_sctr = ctr;
}
}
}
else
{
// Cleanup after asynchronous remove()
if (flags & cpu_flag::pause && s_tls_thread_slot == umax)
{
flags -= cpu_flag::pause;
store = true;
}
s_tls_sctr = -1;
}
if (flags & cpu_flag::temp) [[unlikely]]
{
// Sticky flag, indicates check_state() is not allowed to return true
flags -= cpu_flag::temp;
cpu_can_stop = false;
store = true;
}
if (cpu_can_stop && flags & cpu_flag::signal)
{
flags -= cpu_flag::signal;
cpu_sleep_called = false;
store = true;
}
if (flags & cpu_flag::notify)
{
flags -= cpu_flag::notify;
store = true;
}
// Can't process dbg_step if we only paused temporarily
if (cpu_can_stop && flags & cpu_flag::dbg_step)
{
if (u32 pc = get_pc(), *pc2 = get_pc2(); pc != umax && pc2)
{
if (pc != *pc2)
{
flags -= cpu_flag::dbg_step;
flags += cpu_flag::dbg_pause;
store = true;
}
}
else
{
// Can't test, ignore flag
flags -= cpu_flag::dbg_step;
store = true;
}
}
// Atomically clean wait flag and escape
if (!is_stopped(flags) && flags.none_of(cpu_flag::ret))
{
// Check pause flags which hold thread inside check_state (ignore suspend/debug flags on cpu_flag::temp)
if (flags & cpu_flag::pause || (!cpu_memory_checked && flags & cpu_flag::memory) || (cpu_can_stop && flags & (cpu_flag::dbg_global_pause + cpu_flag::dbg_pause + cpu_flag::suspend + cpu_flag::yield + cpu_flag::preempt)))
{
if (!(flags & cpu_flag::wait))
{
flags += cpu_flag::wait;
store = true;
}
if (flags & (cpu_flag::yield + cpu_flag::preempt) && cpu_can_stop)
{
flags -= (cpu_flag::yield + cpu_flag::preempt);
store = true;
}
escape = false;
state1 = flags;
return store;
}
if (flags & (cpu_flag::wait + cpu_flag::memory))
{
flags -= (cpu_flag::wait + cpu_flag::memory);
store = true;
}
if (s_tls_thread_slot == umax)
{
if (cpu_flag::wait - this->state.load())
{
// Force wait flag (must be set during ownership of s_cpu_lock), this makes the atomic op fail as a side effect
this->state += cpu_flag::wait;
store = true;
}
// Restore thread in the suspend list
cpu_counter::add(this);
}
retval = false;
}
else
{
if (flags & cpu_flag::wait)
{
flags -= cpu_flag::wait;
store = true;
}
retval = cpu_can_stop;
}
escape = true;
state1 = flags;
return store;
}).first;
if (state0 & cpu_flag::preempt && cpu_can_stop)
{
if (cpu_flag::wait - state0)
{
if (!escape || !retval)
{
// Yield itself
state0 += cpu_flag::yield;
escape = false;
}
}
if (const u128 bits = s_cpu_bits)
{
reader_lock lock(s_cpu_lock);
cpu_counter::for_all_cpu(bits & s_cpu_bits, [](cpu_thread* cpu)
{
if (cpu->state.none_of(cpu_flag::wait + cpu_flag::yield))
{
cpu->state += cpu_flag::yield;
}
return true;
});
}
}
if (escape)
{
if (vm::g_range_lock_bits[1] && vm::g_tls_locked && *vm::g_tls_locked == this)
{
state += cpu_flag::wait + cpu_flag::memory;
cpu_sleep_called = false;
cpu_memory_checked = false;
continue;
}
if (cpu_can_stop && state0 & cpu_flag::pending)
{
// Execute pending work
cpu_work();
if ((state1 ^ state) - cpu_flag::pending)
{
// Work could have changed flags
// Reset internal flags as if check_state() has just been called
cpu_sleep_called = false;
cpu_memory_checked = false;
continue;
}
}
if (retval)
{
cpu_on_stop();
}
ensure(cpu_can_stop || !retval);
return retval;
}
if (cpu_can_stop && !cpu_sleep_called && state0 & cpu_flag::suspend)
{
cpu_sleep();
cpu_sleep_called = true;
cpu_memory_checked = false;
if (s_tls_thread_slot != umax)
{
// Exclude inactive threads from the suspend list (optimization)
cpu_counter::remove(this);
}
continue;
}
if (state0 & ((cpu_can_stop ? cpu_flag::suspend : cpu_flag::dbg_pause) + cpu_flag::dbg_global_pause + cpu_flag::dbg_pause))
{
if (state0 & cpu_flag::dbg_pause)
{
g_fxo->get<gdb_server>().pause_from(this);
}
cpu_wait(state1);
}
else
{
if (state0 & cpu_flag::memory)
{
vm::passive_lock(*this);
cpu_memory_checked = true;
continue;
}
// If only cpu_flag::pause was set, wait on suspend counter instead
if (state0 & cpu_flag::pause)
{
// Wait for current suspend_all operation
for (u64 i = 0;; i++)
{
u64 ctr = g_suspend_counter;
if (ctr >> 2 == s_tls_sctr >> 2 && state & cpu_flag::pause)
{
if (i < 20 || ctr & 1)
{
busy_wait(300);
}
else
{
// TODO: fix the workaround
g_suspend_counter.wait(ctr, atomic_wait_timeout{10'000});
}
}
else
{
s_tls_sctr = -1;
break;
}
}
continue;
}
if (state0 & cpu_flag::yield && cpu_can_stop)
{
if (auto spu = try_get<spu_thread>())
{
if (spu->raddr && spu->rtime == vm::reservation_acquire(spu->raddr) && spu->getllar_spin_count < 10)
{
// Reservation operation is a critical section (but this may result in false positives)
continue;
}
}
else if (auto ppu = try_get<ppu_thread>())
{
if (u32 usec = ppu->hw_sleep_time)
{
thread_ctrl::wait_for_accurate(usec);
ppu->hw_sleep_time = 0;
ppu->raddr = 0; // Also lose reservation if there is any (reservation is unsaved on hw thread switch)
continue;
}
if (ppu->raddr && ppu->rtime == vm::reservation_acquire(ppu->raddr))
{
// Same
continue;
}
}
// Short sleep when yield flag is present alone (makes no sense when other methods which can stop thread execution have been done)
s_dummy_atomic.wait(0, atomic_wait_timeout{80'000});
}
}
}
}
void cpu_thread::notify()
{
state.notify_one();
// Downcast to correct type
switch (get_class())
{
case thread_class::ppu:
{
thread_ctrl::notify(*static_cast<named_thread<ppu_thread>*>(this));
break;
}
case thread_class::spu:
{
thread_ctrl::notify(*static_cast<named_thread<spu_thread>*>(this));
break;
}
case thread_class::rsx:
{
break;
}
default:
{
fmt::throw_exception("Invalid cpu_thread type");
}
}
}
cpu_thread& cpu_thread::operator=(thread_state)
{
if (state & cpu_flag::exit)
{
// Must be notified elsewhere or self-raised
return *this;
}
const auto old = state.fetch_add(cpu_flag::exit);
if (old & cpu_flag::wait && old.none_of(cpu_flag::again + cpu_flag::exit))
{
state.notify_one();
if (auto thread = try_get<spu_thread>())
{
if (u32 resv = atomic_storage<u32>::load(thread->raddr))
{
vm::reservation_notifier_notify(resv);
}
}
}
return *this;
}
void cpu_thread::add_remove_flags(bs_t<cpu_flag> to_add, bs_t<cpu_flag> to_remove)
{
bs_t<cpu_flag> result{};
if (!to_remove)
{
state.add_fetch(to_add);
return;
}
else if (!to_add)
{
result = state.sub_fetch(to_remove);
}
else
{
result = state.atomic_op([&](bs_t<cpu_flag>& v)
{
v += to_add;
v -= to_remove;
return v;
});
}
if (!::is_paused(to_remove) && !::is_stopped(to_remove))
{
// No notable change requiring notification
return;
}
if (::is_paused(result) || ::is_stopped(result))
{
// Flags that stop thread execution
return;
}
state.notify_one();
}
std::string cpu_thread::get_name() const
{
// Downcast to correct type
switch (get_class())
{
case thread_class::ppu:
{
return thread_ctrl::get_name(*static_cast<const named_thread<ppu_thread>*>(this));
}
case thread_class::spu:
{
return thread_ctrl::get_name(*static_cast<const named_thread<spu_thread>*>(this));
}
default:
{
if (cpu_thread::get_current() == this && thread_ctrl::get_current())
{
return thread_ctrl::get_name();
}
if (get_class() == thread_class::rsx)
{
return fmt::format("rsx::thread");
}
return fmt::format("Invalid cpu_thread type (0x%x)", id_type());
}
}
}
u32 cpu_thread::get_pc() const
{
const u32* pc = nullptr;
switch (get_class())
{
case thread_class::ppu:
{
pc = &static_cast<const ppu_thread*>(this)->cia;
break;
}
case thread_class::spu:
{
pc = &static_cast<const spu_thread*>(this)->pc;
break;
}
case thread_class::rsx:
{
const auto ctrl = static_cast<const rsx::thread*>(this)->ctrl;
return ctrl ? ctrl->get.load() : umax;
}
default: break;
}
return pc ? atomic_storage<u32>::load(*pc) : u32{umax};
}
u32* cpu_thread::get_pc2()
{
switch (get_class())
{
case thread_class::ppu:
{
return &static_cast<ppu_thread*>(this)->dbg_step_pc;
}
case thread_class::spu:
{
return &static_cast<spu_thread*>(this)->dbg_step_pc;
}
case thread_class::rsx:
{
const auto ctrl = static_cast<rsx::thread*>(this)->ctrl;
return ctrl ? &static_cast<rsx::thread*>(this)->dbg_step_pc : nullptr;
}
default: break;
}
return nullptr;
}
cpu_thread* cpu_thread::get_next_cpu()
{
switch (get_class())
{
case thread_class::ppu:
{
return static_cast<ppu_thread*>(this)->next_cpu;
}
case thread_class::spu:
{
return static_cast<spu_thread*>(this)->next_cpu;
}
default: break;
}
return nullptr;
}
std::shared_ptr<CPUDisAsm> make_disasm(const cpu_thread* cpu, std::shared_ptr<cpu_thread> handle);
void cpu_thread::dump_all(std::string& ret) const
{
std::any func_data;
ret += dump_misc();
ret += '\n';
dump_regs(ret, func_data);
ret += '\n';
ret += dump_callstack();
ret += '\n';
if (u32 cur_pc = get_pc(); cur_pc != umax)
{
// Dump a snippet of currently executed code (may be unreliable with non-static-interpreter decoders)
auto disasm = make_disasm(this, nullptr);
const auto rsx = try_get<rsx::thread>();
for (u32 i = (rsx ? rsx->try_get_pc_of_x_cmds_backwards(20, cur_pc).second : cur_pc - 4 * 20), count = 0; count < 30; count++)
{
u32 advance = disasm->disasm(i);
ret += disasm->last_opcode;
i += std::max(advance, 4u);
disasm->dump_pc = i;
ret += '\n';
}
}
}
void cpu_thread::dump_regs(std::string&, std::any&) const
{
}
std::string cpu_thread::dump_callstack() const
{
std::string ret;
fmt::append(ret, "Call stack:\n=========\n0x%08x (0x0) called\n", get_pc());
for (const auto& sp : dump_callstack_list())
{
fmt::append(ret, "> from 0x%08x (sp=0x%08x)\n", sp.first, sp.second);
}
return ret;
}
std::vector<std::pair<u32, u32>> cpu_thread::dump_callstack_list() const
{
return {};
}
std::string cpu_thread::dump_misc() const
{
return fmt::format("Type: %s; State: %s\n", get_class() == thread_class::ppu ? "PPU" : get_class() == thread_class::spu ? "SPU" : "RSX", state.load());
}
bool cpu_thread::suspend_work::push(cpu_thread* _this) noexcept
{
// Can't allow pre-set wait bit (it'd be a problem)
ensure(!_this || !(_this->state & cpu_flag::wait));
do
{
// Load current head
next = s_pushed.load();
if (!next && cancel_if_not_suspended) [[unlikely]]
{
// Give up if not suspended
return false;
}
if (!_this && next)
{
// If _this == nullptr, it only works if this is the first workload pushed
s_cpu_lock.lock_unlock();
continue;
}
}
while (!s_pushed.compare_and_swap_test(next, this));
if (!next)
{
// Monitor the performance only of the actual suspend processing owner
perf_meter<"SUSPEND"_u64> perf0;
// First thread to push the work to the workload list pauses all threads and processes it
std::lock_guard lock(s_cpu_lock);
u128 copy = s_cpu_bits.load();
// Try to prefetch cpu->state earlier
copy = cpu_counter::for_all_cpu(copy, [&](cpu_thread* cpu)
{
if (cpu != _this)
{
utils::prefetch_write(&cpu->state);
return true;
}
return false;
});
// Initialization (first increment)
g_suspend_counter += 2;
// Copy snapshot for finalization
u128 copy2 = copy;
copy = cpu_counter::for_all_cpu(copy, [&](cpu_thread* cpu, u32 /*index*/)
{
if (cpu->state.fetch_add(cpu_flag::pause) & cpu_flag::wait)
{
// Clear bits as long as wait flag is set
return false;
}
return true;
});
while (copy)
{
// Check only CPUs which haven't acknowledged their waiting state yet
copy = cpu_counter::for_all_cpu(copy, [&](cpu_thread* cpu, u32 /*index*/)
{
if (cpu->state & cpu_flag::wait)
{
return false;
}
return true;
});
if (!copy)
{
break;
}
utils::pause();
}
// Second increment: all threads paused
g_suspend_counter++;
// Extract queue and reverse element order (FILO to FIFO) (TODO: maybe leave order as is?)
auto* head = s_pushed.exchange(nullptr);
u8 min_prio = head->prio;
u8 max_prio = head->prio;
if (auto* prev = head->next)
{
head->next = nullptr;
do
{
auto* pre2 = prev->next;
prev->next = head;
head = std::exchange(prev, pre2);
// Fill priority range
min_prio = std::min<u8>(min_prio, head->prio);
max_prio = std::max<u8>(max_prio, head->prio);
}
while (prev);
}
// Execute prefetch hint(s)
for (auto work = head; work; work = work->next)
{
for (u32 i = 0; i < work->prf_size; i++)
{
utils::prefetch_write(work->prf_list[0]);
}
}
cpu_counter::for_all_cpu(copy2, [&](cpu_thread* cpu)
{
utils::prefetch_write(&cpu->state);
return true;
});
// Execute all stored workload
for (s32 prio = max_prio; prio >= min_prio; prio--)
{
// ... according to priorities
for (auto work = head; work; work = work->next)
{
// Properly sorting single-linked list may require to optimize the loop
if (work->prio == prio)
{
work->exec(work->func_ptr, work->res_buf);
}
}
}
// Finalization (last increment)
ensure(g_suspend_counter++ & 1);
cpu_counter::for_all_cpu(copy2, [&](cpu_thread* cpu)
{
cpu->state -= cpu_flag::pause;
return true;
});
}
else
{
// Seems safe to set pause on self because wait flag hasn't been observed yet
s_tls_sctr = g_suspend_counter;
_this->state += cpu_flag::pause + cpu_flag::wait + cpu_flag::temp;
_this->check_state();
s_tls_sctr = -1;
return true;
}
g_suspend_counter.notify_all();
return true;
}
void cpu_thread::cleanup() noexcept
{
if (u64 count = s_cpu_counter)
{
fmt::throw_exception("cpu_thread::cleanup(): %u threads are still active! (created=%u, destroyed=%u)", count, +g_threads_created, +g_threads_deleted);
}
sys_log.notice("All CPU threads have been stopped. [+: %u]", +g_threads_created);
g_threads_deleted -= g_threads_created.load();
g_threads_created = 0;
}
void cpu_thread::flush_profilers() noexcept
{
if (!g_fxo->is_init<cpu_profiler>())
{
profiler.fatal("cpu_thread::flush_profilers() has been called incorrectly.");
return;
}
if (g_cfg.core.spu_prof)
{
g_fxo->get<cpu_profiler>().registered.push(0);
}
}
u32 CPUDisAsm::DisAsmBranchTarget(s32 /*imm*/)
{
// Unused
return 0;
}
extern bool try_lock_spu_threads_in_a_state_compatible_with_savestates(bool revert_lock, std::vector<std::pair<std::shared_ptr<named_thread<spu_thread>>, u32>>* out_list)
{
if (out_list)
{
out_list->clear();
}
auto get_spus = [old_counter = u64{umax}, spu_list = std::vector<std::shared_ptr<named_thread<spu_thread>>>()](bool can_collect, bool force_collect) mutable
{
const u64 new_counter = cpu_thread::g_threads_created + cpu_thread::g_threads_deleted;
if (old_counter != new_counter)
{
if (!can_collect)
{
return decltype(&spu_list){};
}
const u64 current = get_system_time();
// Fetch SPU contexts
spu_list.clear();
bool give_up = false;
idm::select<named_thread<spu_thread>>([&](u32 id, spu_thread& spu)
{
spu_list.emplace_back(ensure(idm::get_unlocked<named_thread<spu_thread>>(id)));
if (spu.current_func && spu.unsavable)
{
const u64 start = spu.start_time;
// Automatically give up if it is asleep 15 seconds or more
if (start && current > start && current - start >= 15'000'000)
{
give_up = true;
}
}
});
if (!force_collect && give_up)
{
return decltype(&spu_list){};
}
old_counter = new_counter;
}
return &spu_list;
};
// Attempt to lock for a second, if somehow takes longer abort it
for (u64 start = 0, passed_count = 0; passed_count < 15;)
{
if (revert_lock)
{
// Revert the operation of this function
break;
}
if (!start)
{
start = get_system_time();
}
else if (get_system_time() - start >= 150'000)
{
passed_count++;
start = 0;
continue;
}
// Try to fetch SPUs out of critical section
const auto spu_list = get_spus(true, false);
if (!spu_list)
{
// Give up for now
std::this_thread::sleep_for(10ms);
passed_count++;
start = 0;
continue;
}
// Avoid using suspend_all when more than 2 threads known to be unsavable
u32 unsavable_threads = 0;
for (auto& spu : *spu_list)
{
if (spu->unsavable)
{
unsavable_threads++;
if (unsavable_threads >= 3)
{
break;
}
}
}
if (unsavable_threads >= 3)
{
std::this_thread::yield();
continue;
}
// Flag for optimization
bool paused_anyone = false;
if (cpu_thread::suspend_all(nullptr, {}, [&]()
{
if (!get_spus(false, true))
{
// Avoid locking IDM here because this is a critical section
return true;
}
bool failed = false;
const bool is_emu_paused = Emu.IsPaused();
for (auto& spu : *spu_list)
{
if (spu->unsavable)
{
failed = true;
break;
}
if (is_emu_paused)
{
// If emulation is paused, we can only hope it's already in a state compatible with savestates
if (!(spu->state & (cpu_flag::dbg_global_pause + cpu_flag::dbg_pause)))
{
failed = true;
break;
}
}
else
{
paused_anyone = true;
ensure(!spu->state.test_and_set(cpu_flag::dbg_global_pause));
}
}
if (failed && paused_anyone)
{
// For faster signalling, first remove state flags then batch notifications
for (auto& spu : *spu_list)
{
spu->state -= cpu_flag::dbg_global_pause;
}
}
return failed;
}))
{
if (Emu.IsPaused())
{
return false;
}
if (!paused_anyone)
{
// Need not do anything
std::this_thread::yield();
continue;
}
for (auto& spu : *spu_list)
{
if (spu->state & cpu_flag::wait)
{
spu->state.notify_one();
}
}
std::this_thread::yield();
continue;
}
if (out_list)
{
for (auto& spu : *spu_list)
{
out_list->emplace_back(spu, spu->pc);
}
}
return true;
}
for (auto& spu : *get_spus(true, true))
{
if (spu->state.test_and_reset(cpu_flag::dbg_global_pause))
{
spu->state.notify_one();
}
};
return false;
}
| 35,286
|
C++
|
.cpp
| 1,393
| 21.502513
| 319
| 0.629672
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,375
|
CPUTranslator.cpp
|
RPCS3_rpcs3/rpcs3/Emu/CPU/CPUTranslator.cpp
|
#ifdef LLVM_AVAILABLE
#include "CPUTranslator.h"
#include "util/v128.hpp"
#include "util/simd.hpp"
llvm::LLVMContext g_llvm_ctx;
llvm::Value* peek_through_bitcasts(llvm::Value* arg)
{
llvm::CastInst* i;
while ((i = llvm::dyn_cast_or_null<llvm::CastInst>(arg)) && i->getOpcode() == llvm::Instruction::BitCast)
{
arg = i->getOperand(0);
}
return arg;
}
cpu_translator::cpu_translator(llvm::Module* _module, bool is_be)
: m_context(g_llvm_ctx)
, m_module(_module)
, m_is_be(is_be)
{
register_intrinsic("x86_pshufb", [&](llvm::CallInst* ci) -> llvm::Value*
{
const auto data0 = ci->getOperand(0);
const auto index = ci->getOperand(1);
const auto zeros = llvm::ConstantAggregateZero::get(get_type<u8[16]>());
if (m_use_ssse3)
{
#if defined(ARCH_X64)
return m_ir->CreateCall(get_intrinsic(llvm::Intrinsic::x86_ssse3_pshuf_b_128), {data0, index});
#elif defined(ARCH_ARM64)
// Modified from sse2neon
// movi v2.16b, #143
// and v1.16b, v1.16b, v2.16b
// tbl v0.16b, { v0.16b }, v1.16b
auto mask = llvm::ConstantInt::get(get_type<u8[16]>(), 0x8F);
auto and_mask = llvm::ConstantInt::get(get_type<bool[16]>(), true);
auto vec_len = llvm::ConstantInt::get(get_type<u32>(), 16);
auto index_masked = m_ir->CreateCall(get_intrinsic<u8[16]>(llvm::Intrinsic::vp_and), {index, mask, and_mask, vec_len});
return m_ir->CreateCall(get_intrinsic<u8[16]>(llvm::Intrinsic::aarch64_neon_tbl1), {data0, index_masked});
#else
#error "Unimplemented"
#endif
}
else
{
// Emulate PSHUFB (TODO)
const auto mask = m_ir->CreateAnd(index, 0xf);
const auto loop = llvm::BasicBlock::Create(m_context, "", m_ir->GetInsertBlock()->getParent());
const auto prev = ci->getParent();
const auto next = prev->splitBasicBlock(ci->getNextNode());
llvm::cast<llvm::BranchInst>(m_ir->GetInsertBlock()->getTerminator())->setOperand(0, loop);
llvm::Value* result;
//m_ir->CreateBr(loop);
m_ir->SetInsertPoint(loop);
const auto i = m_ir->CreatePHI(get_type<u32>(), 2);
const auto v = m_ir->CreatePHI(get_type<u8[16]>(), 2);
i->addIncoming(m_ir->getInt32(0), prev);
i->addIncoming(m_ir->CreateAdd(i, m_ir->getInt32(1)), loop);
v->addIncoming(zeros, prev);
result = m_ir->CreateInsertElement(v, m_ir->CreateExtractElement(data0, m_ir->CreateExtractElement(mask, i)), i);
v->addIncoming(result, loop);
m_ir->CreateCondBr(m_ir->CreateICmpULT(i, m_ir->getInt32(16)), loop, next);
m_ir->SetInsertPoint(next->getFirstNonPHI());
result = m_ir->CreateSelect(m_ir->CreateICmpSLT(index, zeros), zeros, result);
return result;
}
});
register_intrinsic("any_select_by_bit4", [&](llvm::CallInst* ci) -> llvm::Value*
{
const auto s = bitcast<s8[16]>(m_ir->CreateShl(bitcast<u64[2]>(ci->getOperand(0)), 3));;
const auto a = bitcast<u8[16]>(ci->getOperand(1));
const auto b = bitcast<u8[16]>(ci->getOperand(2));
return m_ir->CreateSelect(m_ir->CreateICmpSLT(s, llvm::ConstantAggregateZero::get(get_type<s8[16]>())), b, a);
});
}
void cpu_translator::initialize(llvm::LLVMContext& context, llvm::ExecutionEngine& engine)
{
m_context = context;
m_engine = &engine;
auto cpu = m_engine->getTargetMachine()->getTargetCPU();
if (cpu == "generic")
{
// Detection failed, try to guess
cpu = fallback_cpu_detection();
}
// Test SSSE3 feature (TODO)
if (cpu == "generic" ||
cpu == "k8" ||
cpu == "opteron" ||
cpu == "athlon64" ||
cpu == "athlon-fx" ||
cpu == "k8-sse3" ||
cpu == "opteron-sse3" ||
cpu == "athlon64-sse3" ||
cpu == "amdfam10" ||
cpu == "barcelona")
{
m_use_ssse3 = false;
}
// Test AVX feature (TODO)
if (cpu == "sandybridge" ||
cpu == "ivybridge" ||
cpu == "bdver1")
{
m_use_avx = true;
}
// Test FMA feature (TODO)
if (cpu == "haswell" ||
cpu == "broadwell" ||
cpu == "skylake" ||
cpu == "alderlake" ||
cpu == "raptorlake" ||
cpu == "meteorlake" ||
cpu == "bdver2" ||
cpu == "bdver3" ||
cpu == "bdver4" ||
cpu == "znver1" ||
cpu == "znver2" ||
cpu == "znver3")
{
m_use_fma = true;
m_use_avx = true;
}
// Test AVX-512 feature (TODO)
if (cpu == "skylake-avx512" ||
cpu == "cascadelake" ||
cpu == "cannonlake" ||
cpu == "cooperlake")
{
m_use_avx = true;
m_use_fma = true;
m_use_avx512 = true;
}
// Test VNNI feature (TODO)
if (cpu == "cascadelake" ||
cpu == "cooperlake" ||
cpu == "alderlake" ||
cpu == "raptorlake" ||
cpu == "meteorlake")
{
m_use_vnni = true;
}
// Test GFNI feature (TODO)
if (cpu == "tremont" ||
cpu == "gracemont" ||
cpu == "alderlake" ||
cpu == "raptorlake" ||
cpu == "meteorlake")
{
m_use_gfni = true;
}
// Test AVX-512_icelake features (TODO)
if (cpu == "icelake" ||
cpu == "icelake-client" ||
cpu == "icelake-server" ||
cpu == "tigerlake" ||
cpu == "rocketlake" ||
cpu == "sapphirerapids" ||
(cpu.starts_with("znver") && cpu != "znver1" && cpu != "znver2" && cpu != "znver3"))
{
m_use_avx = true;
m_use_fma = true;
m_use_avx512 = true;
m_use_avx512_icl = true;
m_use_vnni = true;
m_use_gfni = true;
}
// Aarch64 CPUs
if (cpu == "cyclone" || cpu.contains("cortex"))
{
m_use_fma = true;
// AVX does not use intrinsics so far
m_use_avx = true;
}
}
llvm::Value* cpu_translator::bitcast(llvm::Value* val, llvm::Type* type) const
{
uint s1 = type->getScalarSizeInBits();
uint s2 = val->getType()->getScalarSizeInBits();
if (type->isVectorTy())
s1 *= llvm::cast<llvm::FixedVectorType>(type)->getNumElements();
if (val->getType()->isVectorTy())
s2 *= llvm::cast<llvm::FixedVectorType>(val->getType())->getNumElements();
if (s1 != s2)
{
fmt::throw_exception("cpu_translator::bitcast(): incompatible type sizes (%u vs %u)", s1, s2);
}
if (const auto c1 = llvm::dyn_cast<llvm::Constant>(val))
{
return ensure(llvm::ConstantFoldCastOperand(llvm::Instruction::BitCast, c1, type, m_module->getDataLayout()));
}
return m_ir->CreateBitCast(val, type);
}
template <>
std::pair<bool, v128> cpu_translator::get_const_vector<v128>(llvm::Value* c, u32 _pos, u32 _line)
{
v128 result{};
if (!llvm::isa<llvm::Constant>(c))
{
return {false, result};
}
const auto t = c->getType();
if (!t->isVectorTy())
{
if (const auto ci = llvm::dyn_cast<llvm::ConstantInt>(c); ci && ci->getBitWidth() == 128)
{
const auto& cv = ci->getValue();
result._u64[0] = cv.extractBitsAsZExtValue(64, 0);
result._u64[1] = cv.extractBitsAsZExtValue(64, 64);
return {true, result};
}
fmt::throw_exception("[0x%x, %u] Not a vector", _pos, _line);
}
if (auto v = llvm::cast<llvm::FixedVectorType>(t); v->getScalarSizeInBits() * v->getNumElements() != 128)
{
fmt::throw_exception("[0x%x, %u] Bad vector size: i%ux%u", _pos, _line, v->getScalarSizeInBits(), v->getNumElements());
}
const auto cv = llvm::dyn_cast<llvm::ConstantDataVector>(c);
if (!cv)
{
if (llvm::isa<llvm::ConstantAggregateZero>(c))
{
return {true, result};
}
std::string result;
llvm::raw_string_ostream out(result);
c->print(out, true);
out.flush();
if (llvm::isa<llvm::ConstantExpr>(c))
{
// Sorry, if we cannot evaluate it we cannot use it
fmt::throw_exception("[0x%x, %u] Constant Expression!\n%s", _pos, _line, result);
}
fmt::throw_exception("[0x%x, %u] Unexpected constant type!\n%s", _pos, _line, result);
}
const auto sct = t->getScalarType();
if (sct->isIntegerTy(8))
{
for (u32 i = 0; i < 16; i++)
{
result._u8[i] = static_cast<u8>(cv->getElementAsInteger(i));
}
}
else if (sct->isIntegerTy(16))
{
for (u32 i = 0; i < 8; i++)
{
result._u16[i] = static_cast<u16>(cv->getElementAsInteger(i));
}
}
else if (sct->isIntegerTy(32))
{
for (u32 i = 0; i < 4; i++)
{
result._u32[i] = static_cast<u32>(cv->getElementAsInteger(i));
}
}
else if (sct->isIntegerTy(64))
{
for (u32 i = 0; i < 2; i++)
{
result._u64[i] = cv->getElementAsInteger(i);
}
}
else if (sct->isFloatTy())
{
for (u32 i = 0; i < 4; i++)
{
result._f[i] = cv->getElementAsFloat(i);
}
}
else if (sct->isDoubleTy())
{
for (u32 i = 0; i < 2; i++)
{
result._d[i] = cv->getElementAsDouble(i);
}
}
else
{
fmt::throw_exception("[0x%x, %u] Unexpected vector element type", _pos, _line);
}
return {true, result};
}
template <>
llvm::Constant* cpu_translator::make_const_vector<v128>(v128 v, llvm::Type* t, u32 _line)
{
if (const auto ct = llvm::dyn_cast<llvm::IntegerType>(t); ct && ct->getBitWidth() == 128)
{
return llvm::ConstantInt::get(t, llvm::APInt(128, llvm::ArrayRef(reinterpret_cast<const u64*>(v._bytes), 2)));
}
ensure(t->isVectorTy());
ensure(128 == t->getScalarSizeInBits() * llvm::cast<llvm::FixedVectorType>(t)->getNumElements());
const auto sct = t->getScalarType();
if (sct->isIntegerTy(8))
{
return llvm::ConstantDataVector::get(m_context, llvm::ArrayRef(reinterpret_cast<const u8*>(v._bytes), 16));
}
if (sct->isIntegerTy(16))
{
return llvm::ConstantDataVector::get(m_context, llvm::ArrayRef(reinterpret_cast<const u16*>(v._bytes), 8));
}
if (sct->isIntegerTy(32))
{
return llvm::ConstantDataVector::get(m_context, llvm::ArrayRef(reinterpret_cast<const u32*>(v._bytes), 4));
}
if (sct->isIntegerTy(64))
{
return llvm::ConstantDataVector::get(m_context, llvm::ArrayRef(reinterpret_cast<const u64*>(v._bytes), 2));
}
if (sct->isFloatTy())
{
return llvm::ConstantDataVector::get(m_context, llvm::ArrayRef(reinterpret_cast<const f32*>(v._bytes), 4));
}
if (sct->isDoubleTy())
{
return llvm::ConstantDataVector::get(m_context, llvm::ArrayRef(reinterpret_cast<const f64*>(v._bytes), 2));
}
fmt::throw_exception("[line %u] No supported constant type", _line);
}
void cpu_translator::replace_intrinsics(llvm::Function& f)
{
for (auto& bb : f)
{
for (auto bit = bb.begin(); bit != bb.end();)
{
if (auto ci = llvm::dyn_cast<llvm::CallInst>(&*bit))
{
if (auto cf = ci->getCalledFunction())
{
if (auto it = m_intrinsics.find(std::string_view(cf->getName().data(), cf->getName().size())); it != m_intrinsics.end())
{
m_ir->SetInsertPoint(ci);
ci->replaceAllUsesWith(it->second(ci));
bit = ci->eraseFromParent();
continue;
}
}
}
++bit;
}
}
}
void cpu_translator::run_transforms(llvm::Function& f)
{
// This pass must run first because the other passes may depend on resolved names.
replace_intrinsics(f);
for (auto& pass : m_transform_passes)
{
pass->run(m_ir, f);
}
}
void cpu_translator::register_transform_pass(std::unique_ptr<translator_pass>& pass)
{
m_transform_passes.emplace_back(std::move(pass));
}
void cpu_translator::clear_transforms()
{
m_transform_passes.clear();
}
void cpu_translator::reset_transforms()
{
for (auto& pass : m_transform_passes)
{
pass->reset();
}
}
void cpu_translator::erase_stores(llvm::ArrayRef<llvm::Value*> args)
{
for (auto v : args)
{
for (auto it = v->use_begin(); it != v->use_end(); ++it)
{
llvm::Value* i = *it;
llvm::CastInst* bci = nullptr;
// Walk through bitcasts
while (i && (bci = llvm::dyn_cast<llvm::CastInst>(i)) && bci->getOpcode() == llvm::Instruction::BitCast)
{
i = *bci->use_begin();
}
if (auto si = llvm::dyn_cast_or_null<llvm::StoreInst>(i))
{
si->eraseFromParent();
}
}
}
}
#endif
| 11,281
|
C++
|
.cpp
| 387
| 26.372093
| 125
| 0.645191
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,376
|
AArch64Signal.cpp
|
RPCS3_rpcs3/rpcs3/Emu/CPU/Backends/AArch64/AArch64Signal.cpp
|
#include <stdafx.h>
#include "AArch64Signal.h"
namespace aarch64
{
// Some of the EC codes we care about
enum class EL1_exception_class
{
undefined = 0,
instr_abort_0 = 32, // PAGE_FAULT - Execute, change in EL
instr_abort_1 = 33, // PAGE_FAULT - Execute, same EL
data_abort_0 = 36, // PAGE_FAULT - Generic, causing change in EL (e.g kernel sig handler back to EL0)
data_abort_1 = 37, // PAGE_FAULT - Generic, no change in EL, e.g EL1 driver fault
illegal_execution = 14, // BUS_ERROR
unaligned_pc = 34, // BUS_ERROR
unaligned_sp = 38, // BUS_ERROR
breakpoint = 60, // BRK
};
#ifdef __linux__
constexpr u32 ESR_CTX_MAGIC = 0x45535201;
const aarch64_esr_ctx* find_EL1_esr_context(const ucontext_t* ctx)
{
u32 offset = 0;
const auto& mctx = ctx->uc_mcontext;
while ((offset + 4) < sizeof(mctx.__reserved))
{
const auto head = reinterpret_cast<const aarch64_cpu_ctx_block*>(&mctx.__reserved[offset]);
if (!head->magic)
{
// End of linked list
return nullptr;
}
if (head->magic == ESR_CTX_MAGIC)
{
return reinterpret_cast<const aarch64_esr_ctx*>(head);
}
offset += head->size;
}
return nullptr;
}
u64 _read_ESR_EL1(const ucontext_t* uctx)
{
auto esr_ctx = find_EL1_esr_context(uctx);
return esr_ctx ? esr_ctx->esr : 0;
}
#elif defined(__APPLE__)
u64 _read_ESR_EL1(const ucontext_t* uctx)
{
// Easy to read from mcontext
const auto darwin_ctx = reinterpret_cast<aarch64_darwin_mcontext64*>(uctx->uc_mcontext);
return darwin_ctx->es.ESR;
}
#else
u64 _read_ESR_EL1(const ucontext_t*)
{
// Unimplemented
return 0;
}
#endif
fault_reason decode_fault_reason(const ucontext_t* uctx)
{
auto esr = _read_ESR_EL1(uctx);
if (!esr)
{
return fault_reason::undefined;
}
// We don't really care about most of the register fields, but we can check for a few things.
const auto exception_class = (esr >> 26) & 0b111111;
switch (static_cast<EL1_exception_class>(exception_class))
{
case EL1_exception_class::breakpoint:
// Debug break
return fault_reason::breakpoint;
case EL1_exception_class::illegal_execution:
case EL1_exception_class::unaligned_pc:
case EL1_exception_class::unaligned_sp:
return fault_reason::illegal_instruction;
case EL1_exception_class::instr_abort_0:
case EL1_exception_class::instr_abort_1:
return fault_reason::instruction_execute;
case EL1_exception_class::data_abort_0:
case EL1_exception_class::data_abort_1:
// Page fault
break;
default:
return fault_reason::undefined;
}
// Check direction bit
const auto direction = (esr >> 6u) & 1u;
return direction ? fault_reason::data_write : fault_reason::data_read;
}
}
| 3,233
|
C++
|
.cpp
| 91
| 27.076923
| 114
| 0.581202
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,377
|
AArch64Common.cpp
|
RPCS3_rpcs3/rpcs3/Emu/CPU/Backends/AArch64/AArch64Common.cpp
|
#include "stdafx.h"
#include "AArch64Common.h"
#include <thread>
#include <map>
#if defined(__APPLE__)
#include <sys/sysctl.h>
#endif
namespace aarch64
{
#if !defined(__APPLE__)
struct cpu_entry_t
{
u32 vendor;
u32 part;
const char* arch;
const char* family;
const char* name;
};
struct cpu_vendor_t
{
u32 id;
const char* name;
const char* short_name;
};
static cpu_vendor_t s_vendors_list[] =
{
{ 0x41, "Arm Limited.", "ARM" },
{ 0x42, "Broadcom Corporation.", "Broadcom" },
{ 0x43, "Cavium Inc.", "Cavium" },
{ 0x44, "Digital Equipment Corporation.", "DEC" },
{ 0x46, "Fujitsu Ltd.", "Fujitsu" },
{ 0x49, "Infineon Technologies AG.", "Infineon" },
{ 0x4D, "Motorola or Freescale Semiconductor Inc.", "Motorola" },
{ 0x4E, "NVIDIA Corporation.", "NVIDIA" },
{ 0x50, "Applied Micro Circuits Corporation.", "AMCC" },
{ 0x51, "Qualcomm Inc.", "Qualcomm" },
{ 0x56, "Marvell International Ltd.", "Marvell" },
{ 0x69, "Intel Corporation.", "Intel" },
{ 0xC0, "Ampere Computing", "Ampere" },
// Unofficial but existing in the wild
{ 0x61, "Apple Inc.", "Apple" },
};
static cpu_entry_t s_cpu_list[] =
{
// ARM
{ 0x41, 0xd01, "armv8-a+crc+simd", "", "Cortex-A32" },
{ 0x41, 0xd04, "armv8-a+crc+simd", "", "Cortex-A35" },
{ 0x41, 0xd03, "armv8-a+crc+simd", "", "Cortex-A53" },
{ 0x41, 0xd07, "armv8-a+crc+simd", "", "Cortex-A57" },
{ 0x41, 0xd08, "armv8-a+crc+simd", "", "Cortex-A72" },
{ 0x41, 0xd09, "armv8-a+crc+simd", "", "Cortex-A73" },
{ 0x41, 0xd05, "armv8.2-a+fp16+dotprod", "", "Cortex-A55" },
{ 0x41, 0xd0a, "armv8.2-a+fp16+dotprod", "", "Cortex-A75" },
{ 0x41, 0xd0b, "armv8.2-a+fp16+dotprod", "", "Cortex-A76" },
{ 0x41, 0xd0e, "armv8.2-a+fp16+dotprod", "", "Cortex-A76ae" },
{ 0x41, 0xd0d, "armv8.2-a+fp16+dotprod", "", "Cortex-A77" },
{ 0x41, 0xd41, "armv8.2-a+fp16+dotprod", "", "Cortex-A78" },
{ 0x41, 0xd42, "armv8.2-a+fp16+dotprod", "", "Cortex-A78ae" },
{ 0x41, 0xd4b, "armv8.2-a+fp16+dotprod", "", "Cortex-A78c" },
{ 0x41, 0xd47, "armv9-a+fp16+bf16+i8mm", "", "Cortex-A710" },
{ 0x41, 0xd44, "armv8.2-a+fp16+dotprod", "", "Cortex-X1" },
{ 0x41, 0xd4c, "armv8.2-a+fp16+dotprod", "", "Cortex-X1c" },
{ 0x41, 0xd0c, "armv8.2-a+fp16+dotprod", "", "Neoverse-N1" },
{ 0x41, 0xd40, "armv8.4-a+fp16+bf16+i8mm", "", "Neoverse-V1" },
{ 0x41, 0xd49, "armv8.5-a+fp16+bf16+i8mm", "", "Neoverse-N2" },
{ 0x41, 0xd23, "armv8.1-m.main+pacbti+mve.fp+fp.dp", "", "Cortex-M85" },
{ 0x41, 0xd13, "armv8-r+crc+simd", "", "Cortex-R52" },
{ 0x41, 0xd16, "armv8-r+crc+simd", "", "Cortex-R52+" },
// APPLE
{ 0x61, 0x22, "armv8.5-a", "M1", "Firestorm" },
{ 0x61, 0x23, "armv8.5-a", "M1", "IceStorm" },
{ 0x61, 0x28, "armv8.5-a", "M1 Max", "Firestorm" },
{ 0x61, 0x29, "armv8.5-a", "M1 Max", "Icestorm" },
{ 0x61, 0x24, "armv8.5-a", "M1 Pro", "Firestorm" },
{ 0x61, 0x25, "armv8.5-a", "M1 Pro", "Icestorm" },
{ 0x61, 0x32, "armv8.5-a", "M2", "Avalanche" },
{ 0x61, 0x33, "armv8.5-a", "M2", "Blizzard" },
// QUALCOMM
{ 0x51, 0x01, "armv8.5-a", "Snapdragon", "X-Elite" },
};
static const cpu_vendor_t* find_cpu_vendor(u64 id)
{
for (const auto& vendor : s_vendors_list)
{
if (id == vendor.id)
{
return &vendor;
}
}
return nullptr;
}
static const cpu_entry_t* find_cpu_part(u64 vendor, u64 part)
{
for (const auto& cpu : s_cpu_list)
{
if (cpu.vendor == vendor && cpu.part == part)
{
return &cpu;
}
}
return nullptr;
}
// Read main ID register
static u64 read_MIDR_EL1([[maybe_unused]] u32 cpu_id)
{
#if defined(__linux__)
const std::string path = fmt::format("/sys/devices/system/cpu/cpu%u/regs/identification/midr_el1", cpu_id);
if (!fs::is_file(path))
{
return umax;
}
std::string value;
if (!fs::file(path, fs::read).read(value, 18))
{
return 0;
}
return std::stoull(value, nullptr, 16);
#else
// Unimplemented
return 0;
#endif
}
std::string get_cpu_brand()
{
// Fetch vendor and part numbers. ARM CPUs often have more than 1 architecture on the SoC, so we check all of them.
std::map<u64, int> core_layout;
for (u32 i = 0; i < std::thread::hardware_concurrency(); ++i)
{
const auto midr = read_MIDR_EL1(i);
if (midr == umax)
{
break;
}
core_layout[midr]++;
}
if (core_layout.empty())
{
return "Unidentified CPU";
}
std::string vendor_name;
std::string part_family;
std::vector<std::string> core_names;
for (const auto& [midr, count] : core_layout)
{
const auto implementer_id = (midr >> 24) & 0xff;
const auto part_id = (midr >> 4) & 0xfff;
if (vendor_name.empty())
{
const auto vendor_info = find_cpu_vendor(implementer_id);
vendor_name = vendor_info ? vendor_info->short_name : "Unknown";
}
const auto part_info = find_cpu_part(implementer_id, part_id);
if (!part_info)
{
core_names.push_back(fmt::format("%dx\"Unidentified cores\"", count));
continue;
}
if (part_family.empty() && part_info->family)
{
part_family = part_info->family;
}
core_names.push_back(fmt::format("%dx\"%s\"", count, part_info->name));
}
// Assemble everything
std::string result = vendor_name + " ";
std::string suffix;
if (!part_family.empty())
{
// Since we have a known family name, the core layout is just extra info.
// Wrap core layout in brackets.
result += part_family + " (";
suffix = ")";
}
result += fmt::merge(core_names, " + ");
result += suffix;
return result;
}
#else
static std::string sysctl_s(const std::string_view& variable_name)
{
// Determine required buffer size
size_t length = 0;
if (sysctlbyname(variable_name.data(), nullptr, &length, nullptr, 0) == -1)
{
return "";
}
// Allocate space for the variable.
std::vector<char> text(length + 1);
text[length] = 0;
if (sysctlbyname(variable_name.data(), text.data(), &length, nullptr, 0) == -1)
{
return "";
}
return text.data();
}
static u64 sysctl_u64(const std::string_view& variable_name)
{
u64 value = 0;
size_t data_len = sizeof(value);
if (sysctlbyname(variable_name.data(), &value, &data_len, nullptr, 0) == -1)
{
return umax;
}
return value;
}
// We can get the brand name from sysctl directly
// Once we have windows implemented, we should probably separate the different OS-dependent bits to avoid clutter
std::string get_cpu_brand()
{
const auto brand = sysctl_s("machdep.cpu.brand_string");
if (brand.empty())
{
return "Unidentified CPU";
}
// Parse extra core information (P and E cores)
if (sysctl_u64("hw.nperflevels") < 2)
{
return brand;
}
u64 pcores = sysctl_u64("hw.perflevel0.physicalcpu");
u64 ecores = sysctl_u64("hw.perflevel1.physicalcpu");
if (sysctl_s("hw.perflevel0.name") == "Efficiency")
{
std::swap(ecores, pcores);
}
return fmt::format("%s (%lluP+%lluE)", brand, pcores, ecores);
}
#endif
}
| 8,262
|
C++
|
.cpp
| 229
| 27.200873
| 123
| 0.52231
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,378
|
AArch64JIT.cpp
|
RPCS3_rpcs3/rpcs3/Emu/CPU/Backends/AArch64/AArch64JIT.cpp
|
#include "stdafx.h"
#include "AArch64JIT.h"
#include "AArch64ASM.h"
LOG_CHANNEL(jit_log, "JIT");
#define STDOUT_DEBUG 0
#define DPRINT1(...)\
do {\
printf(__VA_ARGS__);\
printf("\n");\
fflush(stdout);\
} while (0)
#if STDOUT_DEBUG
#define DPRINT DPRINT1
#else
#define DPRINT jit_log.trace
#endif
namespace aarch64
{
using instruction_info_t = GHC_frame_preservation_pass::instruction_info_t;
using function_info_t = GHC_frame_preservation_pass::function_info_t;
GHC_frame_preservation_pass::GHC_frame_preservation_pass(const config_t& configuration)
: m_config(configuration)
{}
void GHC_frame_preservation_pass::reset()
{
m_visited_functions.clear();
}
void GHC_frame_preservation_pass::force_tail_call_terminators(llvm::Function& f)
{
// GHC functions are not call-stack preserving and can therefore never return if they make any external calls at all.
// Replace every terminator clause with a tail call explicitly. This is already done for X64 to work, but better safe than sorry.
for (auto& bb : f)
{
auto bit = bb.begin(), prev = bb.end();
for (; bit != bb.end(); prev = bit, ++bit)
{
if (prev == bb.end())
{
continue;
}
if (llvm::isa<llvm::ReturnInst>(&*bit))
{
if (auto ci = llvm::dyn_cast<llvm::CallInst>(&*prev))
{
// This is a "ret" that is coming after a "call" to another funciton.
// Enforce that it must be a tail call.
if (!ci->isTailCall())
{
ci->setTailCall();
}
}
}
}
}
}
function_info_t GHC_frame_preservation_pass::preprocess_function(const llvm::Function& f)
{
function_info_t result{};
result.instruction_count = f.getInstructionCount();
// Blanket exclusions. Stubs or dispatchers that do not compute anything themselves.
if (f.getName() == "__spu-null")
{
// Don't waste the effort processing this stub. It has no points of concern
result.num_external_calls = 1;
return result;
}
if (m_config.use_stack_frames)
{
// Stack frame estimation. SPU code can be very long and consumes several KB of stack.
u32 stack_frame_size = 128u;
// Actual ratio is usually around 1:4
const u32 expected_compiled_instr_count = f.getInstructionCount() * 4;
// Because GHC doesn't preserve stack (all stack is scratch), we know we'll start to spill once we go over the number of actual regs.
// We use a naive allocator that just assumes each instruction consumes a register slot. We "spill" every 32 instructions.
// FIXME: Aggressive spill is only really a thing with vector operations. We can detect those instead.
// A proper fix is to port this to a MF pass, but I have PTSD from working at MF level.
const u32 spill_pages = (expected_compiled_instr_count + 127u) / 128u;
stack_frame_size *= std::min(spill_pages, 32u); // 128 to 4k dynamic. It is unlikely that any frame consumes more than 4096 bytes
result.stack_frame_size = stack_frame_size;
}
result.instruction_count = f.getInstructionCount();
result.num_external_calls = 0;
// The LR is not spared by LLVM in cases where there is a lot of spilling.
// This is much easier to manage with a custom LLVM branch as we can just mark X30 as off-limits as a GPR.
// This is another thing to be moved to a MachineFunction pass. Ideally we should check the instruction stream for writes to LR and reload it on exit.
// For now, assume it is dirtied if the function is of any reasonable length.
result.clobbers_x30 = result.instruction_count > 32;
result.is_leaf = true;
for (auto& bb : f)
{
for (auto& inst : bb)
{
if (auto ci = llvm::dyn_cast<llvm::CallInst>(&inst))
{
if (llvm::isa<llvm::InlineAsm>(ci->getCalledOperand()))
{
// Inline ASM blocks are ignored
continue;
}
result.num_external_calls++;
if (ci->isTailCall())
{
// This is not a leaf if it has at least one exit point / terminator that is not a return instruction.
result.is_leaf = false;
}
else
{
// Returning calls always clobber x30
result.clobbers_x30 = true;
}
}
}
}
return result;
}
instruction_info_t GHC_frame_preservation_pass::decode_instruction(const llvm::Function& f, const llvm::Instruction* i)
{
instruction_info_t result{};
if (auto ci = llvm::dyn_cast<llvm::CallInst>(i))
{
// Watch out for injected ASM blocks...
if (llvm::isa<llvm::InlineAsm>(ci->getCalledOperand()))
{
// Not a real call. This is just an insert of inline asm
return result;
}
result.is_call_inst = true;
result.is_returning = true;
result.preserve_stack = !ci->isTailCall();
result.callee = ci->getCalledFunction();
result.is_tail_call = ci->isTailCall();
if (!result.callee)
{
// Indirect call (call from raw value).
result.is_indirect = true;
result.callee_is_GHC = ci->getCallingConv() == llvm::CallingConv::GHC;
result.callee_name = "__indirect_call";
}
else
{
result.callee_is_GHC = result.callee->getCallingConv() == llvm::CallingConv::GHC;
result.callee_name = result.callee->getName().str();
}
return result;
}
if (auto bi = llvm::dyn_cast<llvm::BranchInst>(i))
{
// More likely to jump out via an unconditional...
if (!bi->isConditional())
{
ensure(bi->getNumSuccessors() == 1);
auto targetbb = bi->getSuccessor(0);
result.callee = targetbb->getParent();
result.callee_name = result.callee->getName().str();
result.is_call_inst = result.callee_name != f.getName();
}
return result;
}
if (auto bi = llvm::dyn_cast<llvm::IndirectBrInst>(i))
{
// Very unlikely to be the same function. Can be considered a function exit.
ensure(bi->getNumDestinations() == 1);
auto targetbb = ensure(bi->getSuccessor(0)); // This is guaranteed to fail but I've yet to encounter this
result.callee = targetbb->getParent();
result.callee_name = result.callee->getName().str();
result.is_call_inst = result.callee_name != f.getName();
return result;
}
if (auto bi = llvm::dyn_cast<llvm::CallBrInst>(i))
{
ensure(bi->getNumSuccessors() == 1);
auto targetbb = bi->getSuccessor(0);
result.callee = targetbb->getParent();
result.callee_name = result.callee->getName().str();
result.is_call_inst = result.callee_name != f.getName();
return result;
}
if (auto bi = llvm::dyn_cast<llvm::InvokeInst>(i))
{
ensure(bi->getNumSuccessors() == 2);
auto targetbb = bi->getSuccessor(0);
result.callee = targetbb->getParent();
result.callee_name = result.callee->getName().str();
result.is_call_inst = result.callee_name != f.getName();
return result;
}
return result;
}
gpr GHC_frame_preservation_pass::get_base_register_for_call(const std::string& callee_name, gpr default_reg)
{
// We go over the base_register_lookup table and find the first matching pattern
for (const auto& pattern : m_config.base_register_lookup)
{
if (callee_name.starts_with(pattern.first))
{
return pattern.second;
}
}
return default_reg;
}
void GHC_frame_preservation_pass::run(llvm::IRBuilder<>* irb, llvm::Function& f)
{
if (f.getCallingConv() != llvm::CallingConv::GHC)
{
// If we're not doing GHC, the calling conv will have stack fixup on its own via prologue/epilogue
return;
}
if (f.getInstructionCount() == 0)
{
// Nothing to do. Happens with placeholder functions such as branch patchpoints
return;
}
const auto this_name = f.getName().str();
if (m_visited_functions.find(this_name) != m_visited_functions.end())
{
// Already processed. Only useful when recursing which is currently not used.
DPRINT("Function %s was already processed. Skipping.\n", this_name.c_str());
return;
}
if (this_name != "__spu-null") // This name is meaningless and doesn't uniquely identify a function
{
m_visited_functions.insert(this_name);
}
if (m_config.exclusion_callback && m_config.exclusion_callback(this_name))
{
// Function is explicitly excluded
return;
}
// Preprocessing.
auto function_info = preprocess_function(f);
if (function_info.num_external_calls == 0 && function_info.stack_frame_size == 0)
{
// No stack frame injection and no external calls to patch up. This is a leaf function, nothing to do.
DPRINT("Ignoring function %s", this_name.c_str());
return;
}
// Force tail calls on all terminators
force_tail_call_terminators(f);
// Check for leaves
if (function_info.is_leaf && !m_config.use_stack_frames)
{
// Sanity check. If this function had no returning calls, it should have been omitted from processing.
ensure(function_info.clobbers_x30, "Function has no terminator and no non-tail calls but was allowed for frame processing!");
DPRINT("Function %s is a leaf.", this_name.c_str());
process_leaf_function(irb, f);
return;
}
// Asm snippets for patching stack frame
ASMBlock frame_prologue, frame_epilogue;
if (function_info.stack_frame_size > 0)
{
// NOTE: The stack frame here is purely optional, we can pre-allocate scratch on the gateway.
// However, that is an optimization for another time, this helps make debugging easier.
frame_prologue.sub(sp, sp, UASM::Imm(function_info.stack_frame_size));
frame_epilogue.add(sp, sp, UASM::Imm(function_info.stack_frame_size));
// Emit the frame prologue. We use a BB here for extra safety as it solves the problem of backwards jumps re-executing the prologue.
auto functionStart = &f.front();
auto prologueBB = llvm::BasicBlock::Create(f.getContext(), "", &f, functionStart);
irb->SetInsertPoint(prologueBB, prologueBB->begin());
frame_prologue.insert(irb, f.getContext());
irb->CreateBr(functionStart);
}
// Now we start processing
bool terminator_found = false;
for (auto& bb : f)
{
for (auto bit = bb.begin(); bit != bb.end();)
{
const auto instruction_info = decode_instruction(f, &(*bit));
if (!instruction_info.is_call_inst)
{
++bit;
continue;
}
std::string callee_name = "__unknown";
if (const auto cf = instruction_info.callee)
{
callee_name = cf->getName().str();
if (cf->hasFnAttribute(llvm::Attribute::AlwaysInline) || callee_name.starts_with("llvm."))
{
// Always inlined call. Likely inline Asm. Skip
++bit;
continue;
}
// Technically We should also ignore any host functions linked in, usually starting with ppu_ or spu_ prefix.
// However, there is not much guarantee that those are safe with only rare exceptions, and it doesn't hurt to patch the frame around them that much anyway.
}
if (instruction_info.preserve_stack)
{
// Non-tail call. If we have a stack allocated, we preserve it across the call
++bit;
continue;
}
ensure(instruction_info.is_tail_call);
terminator_found = true;
// Now we patch the call if required. For normal calls that 'return' (i.e calls to C/C++ ABI), we do not patch them as they will manage the stack themselves (callee-managed)
bit = patch_tail_call(irb, f, bit, instruction_info, function_info, frame_epilogue);
// Next
if (bit != bb.end())
{
++bit;
}
}
}
if (!terminator_found)
{
// If we got here, we must be using stack frames.
ensure(function_info.is_leaf && function_info.stack_frame_size > 0, "Leaf function was processed without using stack frames!");
// We want to insert a frame cleanup at the tail at every return instruction we find.
for (auto& bb : f)
{
for (auto& i : bb)
{
if (is_ret_instruction(&i))
{
irb->SetInsertPoint(&i);
frame_epilogue.insert(irb, f.getContext());
}
}
}
}
}
llvm::BasicBlock::iterator
GHC_frame_preservation_pass::patch_tail_call(
llvm::IRBuilder<>* irb,
llvm::Function& f,
llvm::BasicBlock::iterator where,
const instruction_info_t& instruction_info,
const function_info_t& function_info,
const UASM& frame_epilogue)
{
auto ci = llvm::dyn_cast<llvm::CallInst>(where);
irb->SetInsertPoint(ensure(ci));
const auto this_name = f.getName().str();
// Insert breadcrumb info before the call
// WARNING: This can corrupt the call because LLVM somehow ignores the clobbered register during a call instruction for some reason
// In case of a blr on x27..x29 you can end up corrupting the binary, but it is invaluable for debugging.
// Debug frames are disabled in shipping code so this is not a big deal.
if (m_config.debug_info)
{
// Call-chain tracing
ASMBlock c;
c.mov(x29, x28);
c.mov(x28, x27);
c.adr(x27, UASM::Reg(pc));
c.insert(irb, f.getContext());
}
// Clean up any injected frames before the call
if (function_info.stack_frame_size > 0)
{
frame_epilogue.insert(irb, f.getContext());
}
// Insert the next piece after the call, before the ret
++where;
ensure(llvm::isa<llvm::ReturnInst>(where));
irb->SetInsertPoint(llvm::dyn_cast<llvm::Instruction>(where));
if (instruction_info.callee_is_GHC && // Calls to C++ ABI will always return
!instruction_info.is_indirect && // We don't know enough when calling indirectly to know if we'll return or not
!is_faux_function(instruction_info.callee_name)) // Ignore branch patch-points and imposter functions. Their behavior is unreliable.
{
// We're making a one-way call. This branch shouldn't even bother linking as it will never return here.
ASMBlock c;
c.brk(0x99);
c.insert(irb, f.getContext());
return where;
}
// Patch the return path. No GHC call shall ever return to another. If we reach the function endpoint, immediately abort to GW
auto thread_base_reg = get_base_register_for_call(f.getName().str());
auto arg_index = static_cast<int>(thread_base_reg) - static_cast<int>(x19);
ASMBlock c;
auto thread_arg = ensure(f.getArg(arg_index)); // Guaranteed to hold our original 'thread'
c.mov(x30, UASM::Var(thread_arg));
c.ldr(x30, x30, UASM::Imm(m_config.hypervisor_context_offset));
c.insert(irb, f.getContext());
// Next
return where;
}
bool GHC_frame_preservation_pass::is_ret_instruction(const llvm::Instruction* i)
{
if (llvm::isa<llvm::ReturnInst>(i))
{
return true;
}
// Check for inline asm invoking "ret". This really shouldn't be a thing, but it is present in SPULLVMRecompiler for some reason.
if (auto ci = llvm::dyn_cast<llvm::CallInst>(i))
{
if (auto asm_ = llvm::dyn_cast<llvm::InlineAsm>(ci->getCalledOperand()))
{
if (asm_->getAsmString() == "ret")
{
return true;
}
}
}
return false;
}
bool GHC_frame_preservation_pass::is_inlined_call(const llvm::CallInst* ci)
{
const auto callee = ci->getCalledFunction();
if (!callee)
{
// Indirect BLR
return false;
}
const std::string callee_name = callee->getName().str();
if (callee_name.starts_with("llvm."))
{
// Intrinsic
return true;
}
if (callee->hasFnAttribute(llvm::Attribute::AlwaysInline))
{
// Assume LLVM always obeys this
return true;
}
return false;
}
bool GHC_frame_preservation_pass::is_faux_function(const std::string& function_name)
{
// Is it a branch patch-point?
if (function_name.find("-pp-") != umax)
{
return true;
}
// Now we search the known imposters list
if (m_config.faux_function_list.empty())
{
return false;
}
const auto& x = m_config.faux_function_list;
return std::find(x.begin(), x.end(), function_name) != x.end();
}
void GHC_frame_preservation_pass::process_leaf_function(llvm::IRBuilder<>* irb, llvm::Function& f)
{
for (auto &bb : f)
{
for (auto bit = bb.begin(); bit != bb.end();)
{
auto i = llvm::dyn_cast<llvm::Instruction>(bit);
if (!is_ret_instruction(i))
{
++bit;
continue;
}
// Insert sequence before the return
irb->SetInsertPoint(llvm::dyn_cast<llvm::Instruction>(bit));
if (m_config.debug_info)
{
// We need to save the chain return point.
ASMBlock c;
c.mov(x29, x28);
c.mov(x28, x27);
c.adr(x27, UASM::Reg(pc));
c.insert(irb, f.getContext());
}
// Now we need to reload LR. We abuse the function's caller arg set for this to avoid messing with regs too much
auto thread_base_reg = get_base_register_for_call(f.getName().str());
auto arg_index = static_cast<int>(thread_base_reg) - static_cast<int>(x19);
ASMBlock c;
auto thread_arg = ensure(f.getArg(arg_index)); // Guaranteed to hold our original 'thread'
c.mov(x30, UASM::Var(thread_arg));
c.ldr(x30, x30, UASM::Imm(m_config.hypervisor_context_offset));
c.insert(irb, f.getContext());
if (bit != bb.end())
{
++bit;
}
}
}
}
}
| 20,764
|
C++
|
.cpp
| 471
| 31.656051
| 189
| 0.548283
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,379
|
vm.cpp
|
RPCS3_rpcs3/rpcs3/Emu/Memory/vm.cpp
|
#include "stdafx.h"
#include "vm_locking.h"
#include "vm_ptr.h"
#include "vm_ref.h"
#include "vm_reservation.h"
#include "Utilities/mutex.h"
#include "Utilities/Thread.h"
#include "Utilities/address_range.h"
#include "Utilities/JIT.h"
#include "Emu/CPU/CPUThread.h"
#include "Emu/RSX/RSXThread.h"
#include "Emu/Cell/SPURecompiler.h"
#include "Emu/perf_meter.hpp"
#include <deque>
#include <span>
#include "util/vm.hpp"
#include "util/asm.hpp"
#include "util/simd.hpp"
#include "util/serialization.hpp"
LOG_CHANNEL(vm_log, "VM");
void ppu_remove_hle_instructions(u32 addr, u32 size);
extern bool is_memory_compatible_for_copy_from_executable_optimization(u32 addr, u32 size);
namespace vm
{
static u8* memory_reserve_4GiB(void* _addr, u64 size = 0x100000000, bool is_memory_mapping = false)
{
for (u64 addr = reinterpret_cast<u64>(_addr) + 0x100000000; addr < 0x8000'0000'0000; addr += 0x100000000)
{
if (auto ptr = utils::memory_reserve(size, reinterpret_cast<void*>(addr), is_memory_mapping))
{
return static_cast<u8*>(ptr);
}
}
fmt::throw_exception("Failed to reserve vm memory");
}
// Emulated virtual memory
u8* const g_base_addr = memory_reserve_4GiB(reinterpret_cast<void*>(0x2'0000'0000), 0x2'0000'0000, true);
// Unprotected virtual memory mirror
u8* const g_sudo_addr = g_base_addr + 0x1'0000'0000;
// Auxiliary virtual memory for executable areas
u8* const g_exec_addr = memory_reserve_4GiB(g_sudo_addr, 0x200000000);
// Hooks for memory R/W interception (default: zero offset to some function with only ret instructions)
u8* const g_hook_addr = memory_reserve_4GiB(g_exec_addr, 0x800000000);
// Stats for debugging
u8* const g_stat_addr = memory_reserve_4GiB(g_hook_addr);
// For SPU
u8* const g_free_addr = g_stat_addr + 0x1'0000'0000;
// Reservation stats
alignas(4096) u8 g_reservations[65536 / 128 * 64]{0};
// Pointers to shared memory mirror or zeros for "normal" memory
alignas(4096) atomic_t<u64> g_shmem[65536]{0};
// Memory locations
alignas(64) std::vector<std::shared_ptr<block_t>> g_locations;
// Memory mutex acknowledgement
thread_local atomic_t<cpu_thread*>* g_tls_locked = nullptr;
// Memory mutex: passive locks
std::array<atomic_t<cpu_thread*>, g_cfg.core.ppu_threads.max> g_locks{};
// Range lock slot allocation bits
atomic_t<u64, 64> g_range_lock_bits[2]{};
auto& get_range_lock_bits(bool is_exclusive_range)
{
return g_range_lock_bits[+is_exclusive_range];
}
// Memory range lock slots (sparse atomics)
atomic_t<u64, 64> g_range_lock_set[64]{};
// Memory pages
std::array<memory_page, 0x100000000 / 4096> g_pages;
std::pair<bool, u64> try_reservation_update(u32 addr)
{
// Update reservation info with new timestamp
auto& res = reservation_acquire(addr);
const u64 rtime = res;
return {!(rtime & vm::rsrv_unique_lock) && res.compare_and_swap_test(rtime, rtime + 128), rtime};
}
void reservation_update(u32 addr)
{
u64 old = -1;
const auto cpu = get_current_cpu_thread();
const bool had_wait = cpu && cpu->state & cpu_flag::wait;
if (cpu && !had_wait)
{
cpu->state += cpu_flag::wait;
}
while (true)
{
const auto [ok, rtime] = try_reservation_update(addr);
if (ok || (old & -128) < (rtime & -128))
{
if (ok)
{
reservation_notifier_notify(addr);
}
if (cpu && !had_wait && cpu->test_stopped())
{
//
}
return;
}
old = rtime;
}
}
static void _register_lock(cpu_thread* _cpu)
{
for (u32 i = 0, max = g_cfg.core.ppu_threads;;)
{
if (!g_locks[i] && g_locks[i].compare_and_swap_test(nullptr, _cpu))
{
g_tls_locked = g_locks.data() + i;
break;
}
if (++i == max) i = 0;
}
}
atomic_t<u64, 64>* alloc_range_lock()
{
const auto [bits, ok] = get_range_lock_bits(false).fetch_op([](u64& bits)
{
// MSB is reserved for locking with memory setting changes
if ((~(bits | (bits + 1))) << 1) [[likely]]
{
bits |= bits + 1;
return true;
}
return false;
});
if (!ok) [[unlikely]]
{
fmt::throw_exception("Out of range lock bits");
}
return &g_range_lock_set[std::countr_one(bits)];
}
template <typename F>
static u64 for_all_range_locks(u64 input, F func);
void range_lock_internal(atomic_t<u64, 64>* range_lock, u32 begin, u32 size)
{
perf_meter<"RHW_LOCK"_u64> perf0(0);
cpu_thread* _cpu = nullptr;
if (u64 to_store = begin | (u64{size} << 32); *range_lock != to_store)
{
range_lock->store(to_store);
}
for (u64 i = 0;; i++)
{
const u64 is_share = g_shmem[begin >> 16].load();
const u64 busy = for_all_range_locks(get_range_lock_bits(true), [&](u64 addr_exec, u32 size_exec)
{
u64 addr = begin;
if ((size_exec & (range_full_mask >> 32)) == (range_locked >> 32)) [[likely]]
{
size_exec = 128;
if (is_share)
{
addr = static_cast<u16>(addr) | is_share;
}
}
size_exec = (size_exec << range_bits) >> range_bits;
// TODO (currently not possible): handle 2 64K pages (inverse range), or more pages
if (u64 is_shared = g_shmem[addr_exec >> 16]) [[unlikely]]
{
addr_exec = static_cast<u16>(addr_exec) | is_shared;
}
if (addr <= addr_exec + size_exec - 1 && addr_exec <= addr + size - 1) [[unlikely]]
{
return 1;
}
return 0;
});
if (!busy) [[likely]]
{
if (vm::check_addr(begin, vm::page_readable, size)) [[likely]]
{
break;
}
u32 test = umax;
for (u32 i = begin / 4096, max = (begin + size - 1) / 4096; i <= max; i++)
{
if (!(g_pages[i] & (vm::page_readable)))
{
test = i * 4096;
break;
}
}
if (test != umax)
{
range_lock->release(0);
if (!perf0)
{
perf0.restart();
}
// Try triggering a page fault (write)
// TODO: Read memory if needed
utils::trigger_write_page_fault(vm::base(test / 4096 == begin / 4096 ? begin : test));
continue;
}
}
// Wait a bit before accessing global lock
range_lock->release(0);
if (!perf0)
{
perf0.restart();
}
busy_wait(200);
if (i >= 2 && !_cpu)
{
_cpu = cpu_thread::get_current();
if (_cpu)
{
_cpu->state += cpu_flag::wait + cpu_flag::temp;
}
}
range_lock->store(begin | (u64{size} << 32));
}
if (_cpu)
{
_cpu->check_state();
}
}
void free_range_lock(atomic_t<u64, 64>* range_lock) noexcept
{
if (range_lock < g_range_lock_set || range_lock >= std::end(g_range_lock_set))
{
fmt::throw_exception("Invalid range lock");
}
range_lock->release(0);
// Use ptr difference to determine location
const auto diff = range_lock - g_range_lock_set;
g_range_lock_bits[0] &= ~(1ull << diff);
}
template <typename F>
FORCE_INLINE static u64 for_all_range_locks(u64 input, F func)
{
u64 result = input;
for (u64 bits = input; bits; bits &= bits - 1)
{
const u32 id = std::countr_zero(bits);
const u64 lock_val = g_range_lock_set[id].load();
if (const u32 size = static_cast<u32>(lock_val >> 32)) [[unlikely]]
{
const u32 addr = static_cast<u32>(lock_val);
if (func(addr, size)) [[unlikely]]
{
continue;
}
}
result &= ~(1ull << id);
}
return result;
}
static atomic_t<u64, 64>* _lock_main_range_lock(u64 flags, u32 addr, u32 size)
{
// Shouldn't really happen
if (size == 0)
{
vm_log.warning("Tried to lock empty range (flags=0x%x, addr=0x%x)", flags >> 32, addr);
return {};
}
// Limit to <512 MiB at once; make sure if it operates on big amount of data, it's page-aligned
if (size >= 512 * 1024 * 1024 || (size > 65536 && size % 4096))
{
fmt::throw_exception("Failed to lock range (flags=0x%x, addr=0x%x, size=0x%x)", flags >> 32, addr, size);
}
// Block or signal new range locks
auto range_lock = &*std::prev(std::end(vm::g_range_lock_set));
*range_lock = addr | u64{size} << 32 | flags;
utils::prefetch_read(g_range_lock_set + 0);
utils::prefetch_read(g_range_lock_set + 2);
utils::prefetch_read(g_range_lock_set + 4);
const auto range = utils::address_range::start_length(addr, size);
u64 to_clear = get_range_lock_bits(false).load();
while (to_clear)
{
to_clear = for_all_range_locks(to_clear, [&](u32 addr2, u32 size2)
{
if (range.overlaps(utils::address_range::start_length(addr2, size2))) [[unlikely]]
{
return 1;
}
return 0;
});
if (!to_clear) [[likely]]
{
break;
}
utils::pause();
}
return range_lock;
}
void passive_lock(cpu_thread& cpu)
{
ensure(cpu.state & cpu_flag::wait);
bool ok = true;
if (!g_tls_locked || *g_tls_locked != &cpu) [[unlikely]]
{
_register_lock(&cpu);
if (!get_range_lock_bits(true))
{
return;
}
ok = false;
}
if (!ok || cpu.state & cpu_flag::memory)
{
for (u64 i = 0;; i++)
{
if (cpu.is_paused())
{
// Assume called from cpu_thread::check_state(), it can handle the pause flags better
return;
}
if (!get_range_lock_bits(true)) [[likely]]
{
return;
}
if (i < 100)
busy_wait(200);
else
std::this_thread::yield();
if (cpu_flag::wait - cpu.state)
{
cpu.state += cpu_flag::wait;
}
}
}
}
void passive_unlock(cpu_thread& cpu)
{
if (auto& ptr = g_tls_locked)
{
ptr->release(nullptr);
ptr = nullptr;
if (cpu.state & cpu_flag::memory)
{
cpu.state -= cpu_flag::memory;
}
}
}
bool temporary_unlock(cpu_thread& cpu) noexcept
{
bs_t<cpu_flag> add_state = cpu_flag::wait;
if (g_tls_locked && g_tls_locked->compare_and_swap_test(&cpu, nullptr))
{
add_state += cpu_flag::memory;
}
if (add_state - cpu.state)
{
cpu.state += add_state;
return true;
}
return false;
}
void temporary_unlock() noexcept
{
if (auto cpu = get_current_cpu_thread())
{
temporary_unlock(*cpu);
}
}
writer_lock::writer_lock() noexcept
: writer_lock(0, nullptr, 1)
{
}
writer_lock::writer_lock(u32 const addr, atomic_t<u64, 64>* range_lock, u32 const size, u64 const flags) noexcept
: range_lock(range_lock)
{
cpu_thread* cpu{};
if (g_tls_locked)
{
cpu = get_current_cpu_thread();
AUDIT(cpu);
if (*g_tls_locked != cpu || cpu->state & cpu_flag::wait)
{
cpu = nullptr;
}
else
{
cpu->state += cpu_flag::wait;
}
}
bool to_prepare_memory = true;
for (u64 i = 0;; i++)
{
auto& bits = get_range_lock_bits(true);
if (!range_lock)
{
if (!bits && bits.compare_and_swap_test(0, u64{umax}))
{
break;
}
}
else
{
range_lock->release(addr | u64{size} << 32 | flags);
const auto diff = range_lock - g_range_lock_set;
if (bits != umax && !bits.bit_test_set(static_cast<u32>(diff)))
{
break;
}
range_lock->release(0);
}
if (i < 100)
{
if (to_prepare_memory)
{
// We have some spare time, prepare cache lines (todo: reservation tests here)
utils::prefetch_write(vm::get_super_ptr(addr));
utils::prefetch_write(vm::get_super_ptr(addr) + 64);
to_prepare_memory = false;
}
busy_wait(200);
}
else
{
std::this_thread::yield();
// Thread may have been switched or the cache clue has been undermined, cache needs to be prapred again
to_prepare_memory = true;
}
}
if (range_lock)
{
perf_meter<"SUSPEND"_u64> perf0;
for (auto lock = g_locks.cbegin(), end = lock + g_cfg.core.ppu_threads; lock != end; lock++)
{
if (auto ptr = +*lock; ptr && ptr->state.none_of(cpu_flag::wait + cpu_flag::memory))
{
ptr->state.test_and_set(cpu_flag::memory);
}
}
u64 addr1 = addr;
if (u64 is_shared = g_shmem[addr >> 16]) [[unlikely]]
{
// Reservation address in shareable memory range
addr1 = static_cast<u16>(addr) | is_shared;
}
utils::prefetch_read(g_range_lock_set + 0);
utils::prefetch_read(g_range_lock_set + 2);
utils::prefetch_read(g_range_lock_set + 4);
u64 to_clear = get_range_lock_bits(false);
u64 point = addr1 / 128;
while (true)
{
to_clear = for_all_range_locks(to_clear & ~get_range_lock_bits(true), [&](u64 addr2, u32 size2)
{
// Split and check every 64K page separately
for (u64 hi = addr2 >> 16, max = (addr2 + size2 - 1) >> 16; hi <= max; hi++)
{
u64 addr3 = addr2;
u64 size3 = std::min<u64>(addr2 + size2, utils::align(addr2, 0x10000)) - addr2;
if (u64 is_shared = g_shmem[hi]) [[unlikely]]
{
addr3 = static_cast<u16>(addr2) | is_shared;
}
if (point - (addr3 / 128) <= (addr3 + size3 - 1) / 128 - (addr3 / 128)) [[unlikely]]
{
return 1;
}
addr2 += size3;
size2 -= static_cast<u32>(size3);
}
return 0;
});
if (!to_clear) [[likely]]
{
break;
}
if (to_prepare_memory)
{
utils::prefetch_write(vm::get_super_ptr(addr));
utils::prefetch_write(vm::get_super_ptr(addr) + 64);
to_prepare_memory = false;
}
utils::pause();
}
for (auto lock = g_locks.cbegin(), end = lock + g_cfg.core.ppu_threads; lock != end; lock++)
{
if (auto ptr = +*lock)
{
while (!(ptr->state & cpu_flag::wait))
{
if (to_prepare_memory)
{
utils::prefetch_write(vm::get_super_ptr(addr));
utils::prefetch_write(vm::get_super_ptr(addr) + 64);
to_prepare_memory = false;
}
utils::pause();
}
}
}
}
if (cpu)
{
cpu->state -= cpu_flag::memory + cpu_flag::wait;
}
}
u64 reservation_lock_internal(u32 addr, atomic_t<u64>& res)
{
for (u64 i = 0;; i++)
{
if (u64 rtime = res; !(rtime & 127) && reservation_try_lock(res, rtime)) [[likely]]
{
return rtime;
}
if (auto cpu = get_current_cpu_thread(); cpu && cpu->state)
{
cpu->check_state();
}
else if (i < 15)
{
busy_wait(500);
}
else
{
// TODO: Accurate locking in this case
if (!(g_pages[addr / 4096] & page_writable))
{
return -1;
}
std::this_thread::yield();
}
}
}
void reservation_shared_lock_internal(atomic_t<u64>& res)
{
for (u64 i = 0;; i++)
{
auto [_oldd, _ok] = res.fetch_op([&](u64& r)
{
if (r & rsrv_unique_lock)
{
return false;
}
r += 1;
return true;
});
if (_ok) [[likely]]
{
return;
}
if (auto cpu = get_current_cpu_thread(); cpu && cpu->state)
{
cpu->check_state();
}
else if (i < 15)
{
busy_wait(500);
}
else
{
std::this_thread::yield();
}
}
}
void reservation_op_internal(u32 addr, std::function<bool()> func)
{
auto& res = vm::reservation_acquire(addr);
auto* ptr = vm::get_super_ptr(addr & -128);
cpu_thread::suspend_all<+1>(get_current_cpu_thread(), {ptr, ptr + 64, &res}, [&]
{
if (func())
{
// Success, release the lock and progress
res += 127;
}
else
{
// Only release the lock on failure
res -= 1;
}
});
}
[[noreturn]] void reservation_escape_internal()
{
const auto _cpu = get_current_cpu_thread();
if (_cpu && _cpu->get_class() == thread_class::ppu)
{
// TODO: PPU g_escape
}
if (_cpu && _cpu->get_class() == thread_class::spu)
{
spu_runtime::g_escape(static_cast<spu_thread*>(_cpu));
}
thread_ctrl::emergency_exit("vm::reservation_escape");
}
static void _page_map(u32 addr, u8 flags, u32 size, utils::shm* shm, u64 bflags, std::pair<const u32, std::pair<u32, std::shared_ptr<utils::shm>>>* (*search_shm)(vm::block_t* block, utils::shm* shm))
{
perf_meter<"PAGE_MAP"_u64> perf0;
if (!size || (size | addr) % 4096 || flags & page_allocated)
{
fmt::throw_exception("Invalid arguments (addr=0x%x, size=0x%x)", addr, size);
}
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
{
if (g_pages[i])
{
fmt::throw_exception("Memory already mapped (addr=0x%x, size=0x%x, flags=0x%x, current_addr=0x%x)", addr, size, flags, i * 4096);
}
}
// If native page size exceeds 4096, don't map native pages (expected to be always mapped in this case)
const bool is_noop = bflags & page_size_4k && utils::c_page_size > 4096;
// Lock range being mapped
auto range_lock = _lock_main_range_lock(range_allocation, addr, size);
if (shm && shm->flags() != 0 && shm->info++)
{
// Check ref counter (using unused member info for it)
if (shm->info == 2)
{
// Allocate shm object for itself
u64 shm_self = reinterpret_cast<u64>(shm->map_self()) ^ range_locked;
// Pre-set range-locked flag (real pointers are 47 bits)
// 1. To simplify range_lock logic
// 2. To make sure it never overlaps with 32-bit addresses
// Also check that it's aligned (lowest 16 bits)
ensure((shm_self & 0xffff'0000'0000'ffff) == range_locked);
// Find another mirror and map it as shareable too
for (auto& ploc : g_locations)
{
if (auto loc = ploc.get())
{
if (auto pp = search_shm(loc, shm))
{
auto& [size2, ptr] = pp->second;
for (u32 i = pp->first / 65536; i < pp->first / 65536 + size2 / 65536; i++)
{
g_shmem[i].release(shm_self);
// Advance to the next position
shm_self += 0x10000;
}
}
}
}
// Unsharing only happens on deallocation currently, so make sure all further refs are shared
shm->info = 0xffff'ffff;
}
// Obtain existing pointer
u64 shm_self = reinterpret_cast<u64>(shm->get()) ^ range_locked;
// Check (see above)
ensure((shm_self & 0xffff'0000'0000'ffff) == range_locked);
// Map range as shareable
for (u32 i = addr / 65536; i < addr / 65536 + size / 65536; i++)
{
g_shmem[i].release(std::exchange(shm_self, shm_self + 0x10000));
}
}
// Notify rsx that range has become valid
// Note: This must be done *before* memory gets mapped while holding the vm lock, otherwise
// the RSX might try to invalidate memory that got unmapped and remapped
if (const auto rsxthr = g_fxo->try_get<rsx::thread>())
{
rsxthr->on_notify_memory_mapped(addr, size);
}
auto prot = utils::protection::rw;
if (~flags & page_writable)
prot = utils::protection::ro;
if (~flags & page_readable)
prot = utils::protection::no;
std::string map_error;
auto map_critical = [&](u8* ptr, utils::protection prot)
{
auto [res, error] = shm->map_critical(ptr, prot);
if (res != ptr)
{
map_error = std::move(error);
return false;
}
return true;
};
if (is_noop)
{
}
else if (!shm)
{
utils::memory_protect(g_base_addr + addr, size, prot);
perf_meter<"PAGE_LCK"_u64> perf;
utils::memory_lock(g_base_addr + addr, size);
utils::memory_lock(g_sudo_addr + addr, size);
}
else if (!map_critical(g_base_addr + addr, prot) || !map_critical(g_sudo_addr + addr, utils::protection::rw) || (map_error = "map_self()", !shm->map_self()))
{
fmt::throw_exception("Memory mapping failed (addr=0x%x, size=0x%x, flags=0x%x): %s", addr, size, flags, map_error);
}
if (flags & page_executable && !is_noop)
{
// TODO (dead code)
utils::memory_commit(g_exec_addr + addr * 2, size * 2);
if (g_cfg.core.ppu_debug)
{
utils::memory_commit(g_stat_addr + addr, size);
}
}
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
{
if (g_pages[i].exchange(flags | page_allocated))
{
fmt::throw_exception("Concurrent access (addr=0x%x, size=0x%x, flags=0x%x, current_addr=0x%x)", addr, size, flags, i * 4096);
}
}
range_lock->release(0);
}
bool page_protect(u32 addr, u32 size, u8 flags_test, u8 flags_set, u8 flags_clear)
{
perf_meter<"PAGE_PRO"_u64> perf0;
vm::writer_lock lock;
if (!size || (size | addr) % 4096)
{
fmt::throw_exception("Invalid arguments (addr=0x%x, size=0x%x)", addr, size);
}
const u8 flags_both = flags_set & flags_clear;
flags_test |= page_allocated;
flags_set &= ~flags_both;
flags_clear &= ~flags_both;
if (!check_addr(addr, flags_test, size))
{
return false;
}
if (!flags_set && !flags_clear)
{
return true;
}
// Choose some impossible value (not valid without page_allocated)
u8 start_value = page_executable;
for (u32 start = addr / 4096, end = start + size / 4096, i = start; i < end + 1; i++)
{
u8 new_val = page_executable;
if (i < end)
{
new_val = g_pages[i];
new_val |= flags_set;
new_val &= ~flags_clear;
}
if (new_val != start_value)
{
const u8 old_val = g_pages[start];
if (u32 page_size = (i - start) * 4096; page_size && old_val != start_value)
{
u64 safe_bits = 0;
if (old_val & start_value & page_readable)
safe_bits |= range_readable;
if (old_val & start_value & page_writable && safe_bits & range_readable)
safe_bits |= range_writable;
// Protect range locks from observing changes in memory protection
auto range_lock = _lock_main_range_lock(safe_bits, start * 4096, page_size);
for (u32 j = start; j < i; j++)
{
g_pages[j].release(start_value);
}
if ((old_val ^ start_value) & (page_readable | page_writable))
{
const auto protection = start_value & page_writable ? utils::protection::rw : (start_value & page_readable ? utils::protection::ro : utils::protection::no);
utils::memory_protect(g_base_addr + start * 4096, page_size, protection);
}
range_lock->release(0);
}
start_value = new_val;
start = i;
}
}
return true;
}
static u32 _page_unmap(u32 addr, u32 max_size, u64 bflags, utils::shm* shm, std::vector<std::pair<u64, u64>>& unmap_events)
{
perf_meter<"PAGE_UNm"_u64> perf0;
if (!max_size || (max_size | addr) % 4096)
{
fmt::throw_exception("Invalid arguments (addr=0x%x, max_size=0x%x)", addr, max_size);
}
// If native page size exceeds 4096, don't unmap native pages (always mapped)
const bool is_noop = bflags & page_size_4k && utils::c_page_size > 4096;
// Determine deallocation size
u32 size = 0;
bool is_exec = false;
for (u32 i = addr / 4096; i < addr / 4096 + max_size / 4096; i++)
{
if ((g_pages[i] & page_allocated) == 0)
{
break;
}
if (size == 0)
{
is_exec = !!(g_pages[i] & page_executable);
}
else
{
// Must be consistent
ensure(is_exec == !!(g_pages[i] & page_executable));
}
size += 4096;
}
// Protect range locks from actual memory protection changes
auto range_lock = _lock_main_range_lock(range_allocation, addr, size);
if (shm && shm->flags() != 0 && g_shmem[addr >> 16])
{
shm->info--;
for (u32 i = addr / 65536; i < addr / 65536 + size / 65536; i++)
{
g_shmem[i].release(0);
}
}
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
{
if (!(g_pages[i] & page_allocated))
{
fmt::throw_exception("Concurrent access (addr=0x%x, size=0x%x, current_addr=0x%x)", addr, size, i * 4096);
}
g_pages[i].release(0);
}
// Notify rsx to invalidate range
// Note: This must be done *before* memory gets unmapped while holding the vm lock, otherwise
// the RSX might try to call VirtualProtect on memory that is already unmapped
if (auto rsxthr = g_fxo->try_get<rsx::thread>())
{
rsxthr->on_notify_pre_memory_unmapped(addr, size, unmap_events);
}
// Deregister PPU related data
ppu_remove_hle_instructions(addr, size);
// Actually unmap memory
if (is_noop)
{
std::memset(g_sudo_addr + addr, 0, size);
}
else if (!shm)
{
utils::memory_protect(g_base_addr + addr, size, utils::protection::no);
std::memset(g_sudo_addr + addr, 0, size);
}
else
{
shm->unmap_critical(g_base_addr + addr);
#ifdef _WIN32
shm->unmap_critical(g_sudo_addr + addr);
#endif
}
if (is_exec && !is_noop)
{
utils::memory_decommit(g_exec_addr + addr * 2, size * 2);
if (g_cfg.core.ppu_debug)
{
utils::memory_decommit(g_stat_addr + addr, size);
}
}
range_lock->release(0);
return size;
}
bool check_addr(u32 addr, u8 flags, u32 size)
{
if (size == 0)
{
return true;
}
// Overflow checking
if (0x10000'0000ull - addr < size)
{
return false;
}
// Always check this flag
flags |= page_allocated;
for (u32 i = addr / 4096, max = (addr + size - 1) / 4096; i <= max;)
{
auto state = +g_pages[i];
if (~state & flags) [[unlikely]]
{
return false;
}
if (state & page_1m_size)
{
i = utils::align(i + 1, 0x100000 / 4096);
continue;
}
if (state & page_64k_size)
{
i = utils::align(i + 1, 0x10000 / 4096);
continue;
}
i++;
}
return true;
}
u32 alloc(u32 size, memory_location_t location, u32 align)
{
const auto block = get(location);
if (!block)
{
vm_log.error("vm::alloc(): Invalid memory location (%u)", +location);
ensure(location < memory_location_max); // The only allowed locations to fail
return 0;
}
return block->alloc(size, nullptr, align);
}
bool falloc(u32 addr, u32 size, memory_location_t location, const std::shared_ptr<utils::shm>* src)
{
const auto block = get(location, addr);
if (!block)
{
vm_log.error("vm::falloc(): Invalid memory location (%u, addr=0x%x)", +location, addr);
ensure(location == any || location < memory_location_max); // The only allowed locations to fail
return false;
}
return block->falloc(addr, size, src);
}
u32 dealloc(u32 addr, memory_location_t location, const std::shared_ptr<utils::shm>* src)
{
const auto block = get(location, addr);
if (!block)
{
vm_log.error("vm::dealloc(): Invalid memory location (%u, addr=0x%x)", +location, addr);
ensure(location == any || location < memory_location_max); // The only allowed locations to fail
return 0;
}
return block->dealloc(addr, src);
}
void lock_sudo(u32 addr, u32 size)
{
perf_meter<"PAGE_LCK"_u64> perf;
ensure(addr % 4096 == 0);
ensure(size % 4096 == 0);
if (!utils::memory_lock(g_sudo_addr + addr, size))
{
vm_log.error("Failed to lock sudo memory (addr=0x%x, size=0x%x). Consider increasing your system limits.", addr, size);
}
}
// Mapped regions: addr -> shm handle
constexpr auto block_map = &auto_typemap<block_t>::get<std::map<u32, std::pair<u32, std::shared_ptr<utils::shm>>>>;
bool block_t::try_alloc(u32 addr, u64 bflags, u32 size, std::shared_ptr<utils::shm>&& shm) const
{
// Check if memory area is already mapped
for (u32 i = addr / 4096; i <= (addr + size - 1) / 4096; i++)
{
if (g_pages[i])
{
return false;
}
}
const u32 page_addr = addr + (this->flags & stack_guarded ? 0x1000 : 0);
const u32 page_size = size - (this->flags & stack_guarded ? 0x2000 : 0);
// No flags are default to readable/writable
// Explicit (un...) flags are used to protect from such access
u8 flags = 0;
if (~bflags & alloc_hidden)
{
flags |= page_readable;
if (~bflags & alloc_unwritable)
{
flags |= page_writable;
}
}
if (bflags & alloc_executable)
{
flags |= page_executable;
}
if ((bflags & page_size_mask) == page_size_64k)
{
flags |= page_64k_size;
}
else if (!(bflags & (page_size_mask & ~page_size_1m)))
{
flags |= page_1m_size;
}
if (this->flags & stack_guarded)
{
// Mark overflow/underflow guard pages as allocated
ensure(!g_pages[addr / 4096].exchange(page_allocated));
ensure(!g_pages[addr / 4096 + size / 4096 - 1].exchange(page_allocated));
}
// Map "real" memory pages; provide a function to search for mirrors with private member access
_page_map(page_addr, flags, page_size, shm.get(), this->flags, [](vm::block_t* _this, utils::shm* shm)
{
auto& map = (_this->m.*block_map)();
std::remove_reference_t<decltype(map)>::value_type* result = nullptr;
// Check eligibility
if (!_this || !(page_size_mask & _this->flags) || _this->addr < 0x20000000 || _this->addr >= 0xC0000000)
{
return result;
}
for (auto& pp : map)
{
if (pp.second.second.get() == shm)
{
// Found match
return &pp;
}
}
return result;
});
// Fill stack guards with STACKGRD
if (this->flags & stack_guarded)
{
auto fill64 = [](u8* ptr, u64 data, usz count)
{
#if defined(_M_X64) && defined(_MSC_VER)
__stosq(reinterpret_cast<u64*>(ptr), data, count);
#elif defined(ARCH_X64)
__asm__ ("mov %0, %%rdi; mov %1, %%rax; mov %2, %%rcx; rep stosq;"
:
: "r" (ptr), "r" (data), "r" (count)
: "rdi", "rax", "rcx", "memory");
#else
for (usz i = 0; i < count; i++)
reinterpret_cast<u64*>(ptr)[i] = data;
#endif
};
const u32 enda = addr + size - 4096;
fill64(g_sudo_addr + addr, "STACKGRD"_u64, 4096 / sizeof(u64));
fill64(g_sudo_addr + enda, "UNDERFLO"_u64, 4096 / sizeof(u64));
}
// Add entry
(m.*block_map)()[addr] = std::make_pair(size, std::move(shm));
return true;
}
static constexpr u64 process_block_flags(u64 flags)
{
if ((flags & page_size_mask) == 0)
{
flags |= page_size_1m;
}
if (flags & page_size_4k)
{
flags |= preallocated;
}
else
{
flags &= ~stack_guarded;
}
return flags;
}
static u64 init_block_id()
{
static atomic_t<u64> s_id = 1;
return s_id++;
}
block_t::block_t(u32 addr, u32 size, u64 flags)
: m_id(init_block_id())
, addr(addr)
, size(size)
, flags(process_block_flags(flags))
{
if (this->flags & preallocated)
{
std::string map_error;
auto map_critical = [&](u8* ptr, utils::protection prot)
{
auto [res, error] = m_common->map_critical(ptr, prot);
if (res != ptr)
{
map_error = std::move(error);
return false;
}
return true;
};
// Special path for whole-allocated areas allowing 4k granularity
m_common = std::make_shared<utils::shm>(size, fmt::format("_block_x%08x", addr));
if (!map_critical(vm::_ptr<u8>(addr), this->flags & page_size_4k && utils::c_page_size > 4096 ? utils::protection::rw : utils::protection::no) || !map_critical(vm::get_super_ptr(addr), utils::protection::rw))
{
fmt::throw_exception("Memory mapping failed (addr=0x%x, size=0x%x, flags=0x%x): %s", addr, size, flags, map_error);
}
}
}
bool block_t::unmap(std::vector<std::pair<u64, u64>>* unmapped)
{
auto& m_map = (m.*block_map)();
if (m_id.exchange(0))
{
// Deallocate all memory
for (auto it = m_map.begin(), end = m_map.end(); it != end;)
{
const auto next = std::next(it);
const auto size = it->second.first;
std::vector<std::pair<u64, u64>> event_data;
ensure(size == _page_unmap(it->first, size, this->flags, it->second.second.get(), unmapped ? *unmapped : event_data));
it = next;
}
if (m_common)
{
m_common->unmap_critical(vm::base(addr));
#ifdef _WIN32
m_common->unmap_critical(vm::get_super_ptr(addr));
#endif
}
return true;
}
return false;
}
block_t::~block_t()
{
ensure(!is_valid());
}
u32 block_t::alloc(const u32 orig_size, const std::shared_ptr<utils::shm>* src, u32 align, u64 flags)
{
if (!src)
{
// Use the block's flags (excpet for protection)
flags = (this->flags & ~alloc_prot_mask) | (flags & alloc_prot_mask);
}
// Determine minimal alignment
const u32 min_page_size = flags & page_size_4k ? 0x1000 : 0x10000;
// Align to minimal page size
const u32 size = utils::align(orig_size, min_page_size) + (flags & stack_guarded ? 0x2000 : 0);
// Check alignment (it's page allocation, so passing small values there is just silly)
if (align < min_page_size || align != (0x80000000u >> std::countl_zero(align)))
{
fmt::throw_exception("Invalid alignment (size=0x%x, align=0x%x)", size, align);
}
// Return if size is invalid
if (!orig_size || !size || orig_size > size || size > this->size)
{
return 0;
}
// Create or import shared memory object
std::shared_ptr<utils::shm> shm;
if (m_common)
ensure(!src);
else if (src)
shm = *src;
else
{
shm = std::make_shared<utils::shm>(size);
}
const u32 max = (this->addr + this->size - size) & (0 - align);
u32 addr = utils::align(this->addr, align);
if (this->addr > max || addr > max)
{
return 0;
}
vm::writer_lock lock;
if (!is_valid())
{
// Expired block
return 0;
}
// Search for an appropriate place (unoptimized)
for (;; addr += align)
{
if (try_alloc(addr, flags, size, std::move(shm)))
{
return addr + (flags & stack_guarded ? 0x1000 : 0);
}
if (addr == max)
{
break;
}
}
return 0;
}
bool block_t::falloc(u32 addr, const u32 orig_size, const std::shared_ptr<utils::shm>* src, u64 flags)
{
if (!src)
{
// Use the block's flags (excpet for protection)
flags = (this->flags & ~alloc_prot_mask) | (flags & alloc_prot_mask);
}
// Determine minimal alignment
const u32 min_page_size = flags & page_size_4k ? 0x1000 : 0x10000;
// Take address misalignment into account
const u32 size0 = orig_size + addr % min_page_size;
// Align to minimal page size
const u32 size = utils::align(size0, min_page_size);
// Return if addr or size is invalid
// If shared memory is provided, addr/size must be aligned
if (!size ||
addr < this->addr ||
orig_size > size0 ||
orig_size > size ||
(addr - addr % min_page_size) + u64{size} > this->addr + u64{this->size} ||
(src && (orig_size | addr) % min_page_size) ||
flags & stack_guarded)
{
return false;
}
// Force aligned address
addr -= addr % min_page_size;
// Create or import shared memory object
std::shared_ptr<utils::shm> shm;
if (m_common)
ensure(!src);
else if (src)
shm = *src;
else
{
shm = std::make_shared<utils::shm>(size);
}
vm::writer_lock lock;
if (!is_valid())
{
// Expired block
return false;
}
if (!try_alloc(addr, flags, size, std::move(shm)))
{
return false;
}
return true;
}
u32 block_t::dealloc(u32 addr, const std::shared_ptr<utils::shm>* src) const
{
auto& m_map = (m.*block_map)();
{
struct notify_t
{
std::vector<std::pair<u64, u64>> event_data;
~notify_t() noexcept
{
if (auto rsxthr = g_fxo->try_get<rsx::thread>())
{
for (const auto& [event_data1, event_data2] : event_data)
{
rsxthr->on_notify_post_memory_unmapped(event_data1, event_data2);
}
}
}
} unmap_notification;
vm::writer_lock lock;
const auto found = m_map.find(addr - (flags & stack_guarded ? 0x1000 : 0));
if (found == m_map.end())
{
return 0;
}
if (src && found->second.second.get() != src->get())
{
return 0;
}
// Get allocation size
const auto size = found->second.first - (flags & stack_guarded ? 0x2000 : 0);
if (flags & stack_guarded)
{
// Clear guard pages
ensure(g_pages[addr / 4096 - 1].exchange(0) == page_allocated);
ensure(g_pages[addr / 4096 + size / 4096].exchange(0) == page_allocated);
}
// Unmap "real" memory pages
ensure(size == _page_unmap(addr, size, this->flags, found->second.second.get(), unmap_notification.event_data));
// Clear stack guards
if (flags & stack_guarded)
{
std::memset(g_sudo_addr + addr - 4096, 0, 4096);
std::memset(g_sudo_addr + addr + size, 0, 4096);
}
// Remove entry
m_map.erase(found);
return size;
}
}
std::pair<u32, std::shared_ptr<utils::shm>> block_t::peek(u32 addr, u32 size) const
{
if (addr < this->addr || addr + u64{size} > this->addr + u64{this->size})
{
return {addr, nullptr};
}
auto& m_map = (m.*block_map)();
vm::writer_lock lock;
const auto upper = m_map.upper_bound(addr);
if (upper == m_map.begin())
{
return {addr, nullptr};
}
const auto found = std::prev(upper);
// Exact address condition (size == 0)
if (size == 0 && found->first != addr)
{
return {addr, nullptr};
}
// Special case
if (m_common)
{
return {addr, nullptr};
}
// Range check
if (addr + u64{size} > found->first + u64{found->second.second->size()})
{
return {addr, nullptr};
}
return {found->first, found->second.second};
}
u32 block_t::imp_used(const vm::writer_lock&) const
{
u32 result = 0;
for (auto& entry : (m.*block_map)())
{
result += entry.second.first - (flags & stack_guarded ? 0x2000 : 0);
}
return result;
}
u32 block_t::used()
{
vm::writer_lock lock;
return imp_used(lock);
}
void block_t::get_shared_memory(std::vector<std::pair<utils::shm*, u32>>& shared)
{
auto& m_map = (m.*block_map)();
if (!(flags & preallocated))
{
shared.reserve(shared.size() + m_map.size());
for (const auto& [addr, shm] : m_map)
{
shared.emplace_back(shm.second.get(), addr);
}
}
}
u32 block_t::get_shm_addr(const std::shared_ptr<utils::shm>& shared)
{
auto& m_map = (m.*block_map)();
if (!(flags & preallocated))
{
for (auto& [addr, pair] : m_map)
{
if (pair.second == shared)
{
return addr;
}
}
}
return 0;
}
static bool check_cache_line_zero(const void* ptr)
{
const auto p = reinterpret_cast<const v128*>(ptr);
const v128 _1 = p[0] | p[1];
const v128 _2 = p[2] | p[3];
const v128 _3 = p[4] | p[5];
const v128 _4 = p[6] | p[7];
const v128 _5 = _1 | _2;
const v128 _6 = _3 | _4;
const v128 _7 = _5 | _6;
return gv_testz(_7);
}
static void serialize_memory_bytes(utils::serial& ar, u8* ptr, usz size)
{
ensure((size % 4096) == 0);
constexpr usz byte_of_pages = 128 * 8;
std::vector<u8> bit_array(size / byte_of_pages);
if (ar.is_writing())
{
auto data_ptr = ptr;
for (usz iter_count = 0; iter_count < bit_array.size(); iter_count++, data_ptr += byte_of_pages)
{
u8 bitmap = 0;
for (usz i = 0; i < byte_of_pages; i += 128 * 2)
{
const u64 sample64_1 = read_from_ptr<u64>(data_ptr, i);
const u64 sample64_2 = read_from_ptr<u64>(data_ptr, i + 128);
// Speed up testing in scenarios where it is likely non-zero data
if (sample64_1 && sample64_2)
{
bitmap |= 3u << (i / 128);
continue;
}
bitmap |= (check_cache_line_zero(data_ptr + i + 0) ? 0 : 1) << (i / 128);
bitmap |= (check_cache_line_zero(data_ptr + i + 128) ? 0 : 2) << (i / 128);
}
// bitmap of 1024 bytes (bit is 128-byte)
ar(bitmap);
bit_array[iter_count] = bitmap;
}
}
else
{
// Load bitmap
ar(std::span<u8>(bit_array.data(), bit_array.size()));
}
ar.breathe();
for (usz iter_count = 0; size; iter_count += sizeof(u32), ptr += byte_of_pages * sizeof(u32))
{
const u32 bitmap = read_from_ptr<le_t<u32>>(bit_array, iter_count);
size -= byte_of_pages * sizeof(bitmap);
for (usz i = 0; i < byte_of_pages * sizeof(bitmap);)
{
usz block_count = 0;
for (usz bit = i / 128; bit < sizeof(bitmap) * 8 && (bitmap & (1u << bit)) != 0;)
{
bit++;
block_count++;
}
if (!block_count)
{
i += 128;
continue;
}
ar(std::span<u8>(ptr + i, block_count * 128));
i += block_count * 128;
}
if (iter_count % 256 == 0)
{
ar.breathe();
}
}
ar.breathe();
}
void block_t::save(utils::serial& ar, std::map<utils::shm*, usz>& shared)
{
auto& m_map = (m.*block_map)();
ar(addr, size, flags);
for (const auto& [addr, shm] : m_map)
{
// Assume first page flags represent all the map
ar(g_pages[addr / 4096 + !!(flags & stack_guarded)]);
ar(addr);
ar(shm.first);
if (flags & preallocated)
{
// Do not save memory which matches the memory found in the executable (we can use it instead)
if (is_memory_compatible_for_copy_from_executable_optimization(addr, shm.first))
{
// Revert changes
ar.data.resize(ar.data.size() - (sizeof(u32) * 2 + sizeof(memory_page)));
ar.seek_end();
vm_log.success("Removed memory block matching the memory of the executable from savestate. (addr=0x%x, size=0x%x)", addr, shm.first);
continue;
}
// Save raw binary image
const u32 guard_size = flags & stack_guarded ? 0x1000 : 0;
serialize_memory_bytes(ar, vm::get_super_ptr<u8>(addr + guard_size), shm.first - guard_size * 2);
}
else
{
// Save index of shm
ar(shared[shm.second.get()]);
}
}
// Terminator
ar(u8{0});
}
block_t::block_t(utils::serial& ar, std::vector<std::shared_ptr<utils::shm>>& shared)
: m_id(init_block_id())
, addr(ar)
, size(ar)
, flags(ar)
{
if (flags & preallocated)
{
m_common = std::make_shared<utils::shm>(size, fmt::format("_block_x%08x", addr));
m_common->map_critical(vm::base(addr), this->flags & page_size_4k && utils::c_page_size > 4096 ? utils::protection::rw : utils::protection::no);
m_common->map_critical(vm::get_super_ptr(addr));
}
std::shared_ptr<utils::shm> null_shm;
while (true)
{
const u8 flags0 = ar;
if (!(flags0 & page_allocated))
{
// Terminator found
break;
}
const u32 addr0 = ar;
const u32 size0 = ar;
u64 pflags = 0;
if (flags0 & page_executable)
{
pflags |= alloc_executable;
}
if (~flags0 & page_writable)
{
pflags |= alloc_unwritable;
}
if (~flags0 & page_readable)
{
pflags |= alloc_hidden;
}
if ((flags & page_size_64k) == page_size_64k)
{
pflags |= page_size_64k;
}
else if (!(flags & (page_size_mask & ~page_size_1m)))
{
pflags |= page_size_1m;
}
// Map the memory through the same method as alloc() and falloc()
// Copy the shared handle unconditionally
ensure(try_alloc(addr0, pflags, size0, ::as_rvalue(flags & preallocated ? null_shm : shared[ar.pop<usz>()])));
if (flags & preallocated)
{
// Load binary image
const u32 guard_size = flags & stack_guarded ? 0x1000 : 0;
serialize_memory_bytes(ar, vm::get_super_ptr<u8>(addr0 + guard_size), size0 - guard_size * 2);
}
}
}
bool _unmap_block(const std::shared_ptr<block_t>& block, std::vector<std::pair<u64, u64>>* unmapped = nullptr)
{
return block->unmap(unmapped);
}
static bool _test_map(u32 addr, u32 size)
{
const auto range = utils::address_range::start_length(addr, size);
if (!range.valid())
{
return false;
}
for (auto& block : g_locations)
{
if (!block)
{
continue;
}
if (range.overlaps(utils::address_range::start_length(block->addr, block->size)))
{
return false;
}
}
return true;
}
static std::shared_ptr<block_t> _find_map(u32 size, u32 align, u64 flags)
{
const u32 max = (0xC0000000 - size) & (0 - align);
if (size > 0xC0000000 - 0x10000000 || max < 0x10000000)
{
return nullptr;
}
for (u32 addr = utils::align<u32>(0x10000000, align);; addr += align)
{
if (_test_map(addr, size))
{
return std::make_shared<block_t>(addr, size, flags);
}
if (addr == max)
{
break;
}
}
return nullptr;
}
static std::shared_ptr<block_t> _map(u32 addr, u32 size, u64 flags)
{
if (!size || (size | addr) % 4096)
{
fmt::throw_exception("Invalid arguments (addr=0x%x, size=0x%x)", addr, size);
}
if (!_test_map(addr, size))
{
return nullptr;
}
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
{
if (g_pages[i])
{
fmt::throw_exception("Unexpected pages allocated (current_addr=0x%x)", i * 4096);
}
}
auto block = std::make_shared<block_t>(addr, size, flags);
g_locations.emplace_back(block);
return block;
}
static std::shared_ptr<block_t> _get_map(memory_location_t location, u32 addr)
{
if (location != any)
{
// return selected location
if (location < g_locations.size())
{
return g_locations[location];
}
return nullptr;
}
// search location by address
for (auto& block : g_locations)
{
if (block && addr >= block->addr && addr <= block->addr + block->size - 1)
{
return block;
}
}
return nullptr;
}
std::shared_ptr<block_t> map(u32 addr, u32 size, u64 flags)
{
vm::writer_lock lock;
return _map(addr, size, flags);
}
std::shared_ptr<block_t> find_map(u32 orig_size, u32 align, u64 flags)
{
vm::writer_lock lock;
// Align to minimal page size
const u32 size = utils::align(orig_size, 0x10000);
// Check alignment
if (align < 0x10000 || align != (0x80000000u >> std::countl_zero(align)))
{
fmt::throw_exception("Invalid alignment (size=0x%x, align=0x%x)", size, align);
}
// Return if size is invalid
if (!size)
{
return nullptr;
}
auto block = _find_map(size, align, flags);
if (block) g_locations.emplace_back(block);
return block;
}
std::pair<std::shared_ptr<block_t>, bool> unmap(u32 addr, bool must_be_empty, const std::shared_ptr<block_t>* ptr)
{
if (ptr)
{
addr = (*ptr)->addr;
}
std::pair<std::shared_ptr<block_t>, bool> result{};
struct notify_t
{
std::vector<std::pair<u64, u64>> unmap_data;
~notify_t() noexcept
{
if (auto rsxthr = g_fxo->try_get<rsx::thread>())
{
for (const auto& [event_data1, event_data2] : unmap_data)
{
rsxthr->on_notify_post_memory_unmapped(event_data1, event_data2);
}
}
}
} unmap_notifications;
vm::writer_lock lock;
for (auto it = g_locations.begin() + memory_location_max; it != g_locations.end(); it++)
{
if (*it && (*it)->addr == addr)
{
if (must_be_empty && (*it)->flags & bf0_mask)
{
continue;
}
if (!must_be_empty && ((*it)->flags & bf0_mask) != bf0_0x2)
{
continue;
}
if (ptr && *it != *ptr)
{
return {};
}
if (must_be_empty && (*it)->imp_used(lock))
{
result.first = *it;
return result;
}
result.first = std::move(*it);
g_locations.erase(it);
ensure(_unmap_block(result.first, &unmap_notifications.unmap_data));
result.second = true;
return result;
}
}
return {};
}
std::shared_ptr<block_t> get(memory_location_t location, u32 addr)
{
vm::writer_lock lock;
return _get_map(location, addr);
}
std::shared_ptr<block_t> reserve_map(memory_location_t location, u32 addr, u32 area_size, u64 flags)
{
vm::writer_lock lock;
auto area = _get_map(location, addr);
if (area)
{
return area;
}
// Allocation on arbitrary address
if (location != any && location < g_locations.size())
{
// return selected location
auto& loc = g_locations[location];
if (!loc)
{
// Deferred allocation
loc = _find_map(area_size, 0x10000000, flags);
}
return loc;
}
// Fixed address allocation
area = _get_map(location, addr);
if (area)
{
return area;
}
return _map(addr, area_size, flags);
}
static bool try_access_internal(u32 addr, void* ptr, u32 size, bool is_write)
{
if (vm::check_addr(addr, is_write ? page_writable : page_readable, size))
{
void* src = vm::g_sudo_addr + addr;
void* dst = ptr;
if (is_write)
std::swap(src, dst);
if (size <= 16 && (size & (size - 1)) == 0 && (addr & (size - 1)) == 0)
{
if (is_write)
{
switch (size)
{
case 1: atomic_storage<u8>::release(*static_cast<u8*>(dst), *static_cast<u8*>(src)); break;
case 2: atomic_storage<u16>::release(*static_cast<u16*>(dst), *static_cast<u16*>(src)); break;
case 4: atomic_storage<u32>::release(*static_cast<u32*>(dst), *static_cast<u32*>(src)); break;
case 8: atomic_storage<u64>::release(*static_cast<u64*>(dst), *static_cast<u64*>(src)); break;
case 16: atomic_storage<u128>::release(*static_cast<u128*>(dst), *static_cast<u128*>(src)); break;
}
return true;
}
}
std::memcpy(dst, src, size);
return true;
}
return false;
}
bool try_access(u32 begin, void* ptr, u32 size, bool is_write)
{
auto* range_lock = alloc_range_lock(); // Released at the end of function
auto mem_lock = &*std::prev(std::end(vm::g_range_lock_set));
while (true)
{
range_lock->store(begin | (u64{size} << 32));
const u64 lock_val = mem_lock->load();
const u64 is_share = g_shmem[begin >> 16].load();
u64 lock_addr = static_cast<u32>(lock_val); // -> u64
u32 lock_size = static_cast<u32>(lock_val << range_bits >> (range_bits + 32));
u64 addr = begin;
if ((lock_val & range_full_mask) == range_locked) [[likely]]
{
lock_size = 128;
if (is_share)
{
addr = static_cast<u16>(addr) | is_share;
lock_addr = lock_val;
}
}
if (addr + size <= lock_addr || addr >= lock_addr + lock_size) [[likely]]
{
if (vm::check_addr(begin, is_write ? page_writable : page_readable, size)) [[likely]]
{
const u64 new_lock_val = mem_lock->load();
if (!new_lock_val || new_lock_val == lock_val) [[likely]]
{
break;
}
}
else
{
free_range_lock(range_lock);
return false;
}
}
else if (lock_val & range_readable && lock_val & range_writable)
{
// Probably a safe case of page protection change
break;
}
else if (!is_write && lock_val & range_readable)
{
// Same but for read-only access
break;
}
else if ((lock_val & range_full_mask) != range_locked)
{
free_range_lock(range_lock);
return false;
}
// Wait a bit before accessing global lock
range_lock->release(0);
busy_wait(200);
}
const bool result = try_access_internal(begin, ptr, size, is_write);
free_range_lock(range_lock);
return result;
}
inline namespace ps3_
{
static utils::shm s_hook{0x800000000, ""};
void init()
{
vm_log.notice("Guest memory bases address ranges:\n"
"vm::g_base_addr = %p - %p\n"
"vm::g_sudo_addr = %p - %p\n"
"vm::g_exec_addr = %p - %p\n"
"vm::g_hook_addr = %p - %p\n"
"vm::g_stat_addr = %p - %p\n"
"vm::g_reservations = %p - %p\n",
g_base_addr, g_base_addr + 0xffff'ffff,
g_sudo_addr, g_sudo_addr + 0xffff'ffff,
g_exec_addr, g_exec_addr + 0x200000000 - 1,
g_hook_addr, g_hook_addr + 0x800000000 - 1,
g_stat_addr, g_stat_addr + 0xffff'ffff,
g_reservations, g_reservations + sizeof(g_reservations) - 1);
std::memset(&g_pages, 0, sizeof(g_pages));
g_locations =
{
std::make_shared<block_t>(0x00010000, 0x0FFF0000, page_size_64k | preallocated), // main
nullptr, // user 64k pages
nullptr, // user 1m pages
nullptr, // rsx context
std::make_shared<block_t>(0xC0000000, 0x10000000, page_size_64k | preallocated), // video
std::make_shared<block_t>(0xD0000000, 0x10000000, page_size_4k | preallocated | stack_guarded | bf0_0x1), // stack
std::make_shared<block_t>(0xE0000000, 0x20000000, page_size_64k), // SPU reserved
};
std::memset(g_reservations, 0, sizeof(g_reservations));
std::memset(g_shmem, 0, sizeof(g_shmem));
std::memset(g_range_lock_set, 0, sizeof(g_range_lock_set));
std::memset(g_range_lock_bits, 0, sizeof(g_range_lock_bits));
#ifdef _WIN32
utils::memory_release(g_hook_addr, 0x800000000);
#endif
ensure(s_hook.map(g_hook_addr, utils::protection::rw, true));
}
}
void close()
{
{
vm::writer_lock lock;
for (auto& block : g_locations)
{
if (block) _unmap_block(block);
}
g_locations.clear();
}
utils::memory_decommit(g_exec_addr, 0x200000000);
utils::memory_decommit(g_stat_addr, 0x100000000);
#ifdef _WIN32
s_hook.unmap(g_hook_addr);
ensure(utils::memory_reserve(0x800000000, g_hook_addr));
#else
utils::memory_decommit(g_hook_addr, 0x800000000);
#endif
std::memset(g_range_lock_set, 0, sizeof(g_range_lock_set));
std::memset(g_range_lock_bits, 0, sizeof(g_range_lock_bits));
}
void save(utils::serial& ar)
{
// Shared memory lookup, sample address is saved for easy memory copy
// Just need one address for this optimization
std::vector<std::pair<utils::shm*, u32>> shared;
for (auto& loc : g_locations)
{
if (loc) loc->get_shared_memory(shared);
}
std::map<utils::shm*, usz> shared_map;
#ifndef _MSC_VER
shared.erase(std::unique(shared.begin(), shared.end(), [](auto& a, auto& b) { return a.first == b.first; }), shared.end());
#else
// Workaround for bugged std::unique
for (auto it = shared.begin(); it != shared.end();)
{
if (shared_map.count(it->first))
{
it = shared.erase(it);
continue;
}
shared_map.emplace(it->first, 0);
it++;
}
shared_map.clear();
#endif
for (auto& p : shared)
{
shared_map.emplace(p.first, &p - shared.data());
}
// TODO: proper serialization of std::map
ar(static_cast<usz>(shared_map.size()));
for (const auto& [shm, addr] : shared)
{
// Save shared memory
ar(shm->flags());
ar(shm->size());
serialize_memory_bytes(ar, vm::get_super_ptr<u8>(addr), shm->size());
}
// TODO: Serialize std::vector direcly
ar(g_locations.size());
for (auto& loc : g_locations)
{
const u8 has = loc.operator bool();
ar(has);
if (loc)
{
loc->save(ar, shared_map);
}
}
is_memory_compatible_for_copy_from_executable_optimization(0, 0); // Cleanup internal data
}
void load(utils::serial& ar)
{
std::vector<std::shared_ptr<utils::shm>> shared;
const usz shared_size = ar.pop<usz>();
if (!shared_size || ar.get_size(umax) / 4096 < shared_size)
{
fmt::throw_exception("Invalid VM serialization state: shared_size=0x%x, ar=%s", shared_size, ar);
}
shared.resize(shared_size);
for (auto& shm : shared)
{
// Load shared memory
const u32 flags = ar.pop<u32>();
const u64 size = ar.pop<u64>();
shm = std::make_shared<utils::shm>(size, flags);
// Load binary image
// elad335: I'm not proud about it as well.. (ideal situation is to not call map_self())
serialize_memory_bytes(ar, shm->map_self(), shm->size());
}
for (auto& block : g_locations)
{
if (block) _unmap_block(block);
}
g_locations.clear();
g_locations.resize(ar.pop<usz>());
for (auto& loc : g_locations)
{
const u8 has = ar.pop<u8>();
if (has)
{
loc = std::make_shared<block_t>(ar, shared);
}
}
}
u32 get_shm_addr(const std::shared_ptr<utils::shm>& shared)
{
for (auto& loc : g_locations)
{
if (u32 addr = loc ? loc->get_shm_addr(shared) : 0)
{
return addr;
}
}
return 0;
}
bool read_string(u32 addr, u32 max_size, std::string& out_string, bool check_pages) noexcept
{
if (!max_size)
{
return true;
}
// Prevent overflow
const u32 size = 0 - max_size < addr ? (0 - addr) : max_size;
for (u32 i = addr, end = utils::align(addr + size, 4096) - 1; i <= end;)
{
if (check_pages && !vm::check_addr(i, vm::page_readable))
{
// Invalid string termination
return false;
}
const char* s_start = vm::get_super_ptr<const char>(i);
const u32 space = std::min<u32>(end - i + 1, 4096 - (i % 4096));
const char* s_end = s_start + space;
const char* s_null = std::find(s_start, s_end, '\0');
// Append string
out_string.append(s_start, s_null);
// Recheck for zeroes after append
const usz old_size = out_string.size();
out_string.erase(std::find(out_string.end() - (s_null - s_start), out_string.end(), '\0'), out_string.end());
if (out_string.size() != old_size || s_null != s_end)
{
// Null terminated
return true;
}
i += space;
if (!i)
{
break;
}
}
// Non-null terminated but terminated by size limit (so the string may continue)
return size == max_size;
}
}
void fmt_class_string<vm::_ptr_base<const void, u32>>::format(std::string& out, u64 arg)
{
fmt_class_string<u32>::format(out, arg);
}
void fmt_class_string<vm::_ptr_base<const char, u32>>::format(std::string& out, u64 arg)
{
// Special case (may be allowed for some arguments)
if (arg == 0)
{
out += reinterpret_cast<const char*>(u8"«NULL»");
return;
}
const u32 addr = ::narrow<u32>(arg);
// Filter certainly invalid addresses
if (!vm::check_addr(addr, vm::page_readable))
{
out += reinterpret_cast<const char*>(u8"«INVALID_ADDRESS:");
fmt_class_string<u32>::format(out, arg);
out += reinterpret_cast<const char*>(u8"»");
return;
}
const auto start = out.size();
out += reinterpret_cast<const char*>(u8"“");
if (!vm::read_string(addr, umax, out, true))
{
// Revert changes
out.resize(start);
out += reinterpret_cast<const char*>(u8"«INVALID_ADDRESS:");
fmt_class_string<u32>::format(out, arg);
out += reinterpret_cast<const char*>(u8"»");
return;
}
out += reinterpret_cast<const char*>(u8"”");
}
| 56,472
|
C++
|
.cpp
| 1,987
| 24.464519
| 211
| 0.618234
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,380
|
rsx_utils.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/rsx_utils.cpp
|
#include "stdafx.h"
#include "rsx_utils.h"
#include "rsx_methods.h"
#include "Emu/RSX/GCM.h"
#include "Emu/Cell/Modules/cellVideoOut.h"
#include "Overlays/overlays.h"
#ifdef _MSC_VER
#pragma warning(push, 0)
#else
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wall"
#pragma GCC diagnostic ignored "-Wextra"
#pragma GCC diagnostic ignored "-Wold-style-cast"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#endif
extern "C"
{
#include "libswscale/swscale.h"
}
#ifdef _MSC_VER
#pragma warning(pop)
#else
#pragma GCC diagnostic pop
#endif
#include "util/sysinfo.hpp"
namespace rsx
{
atomic_t<u64> g_rsx_shared_tag{ 0 };
void convert_scale_image(u8 *dst, AVPixelFormat dst_format, int dst_width, int dst_height, int dst_pitch,
const u8 *src, AVPixelFormat src_format, int src_width, int src_height, int src_pitch, int src_slice_h, bool bilinear)
{
std::unique_ptr<SwsContext, void(*)(SwsContext*)> sws(sws_getContext(src_width, src_height, src_format,
dst_width, dst_height, dst_format, bilinear ? SWS_FAST_BILINEAR : SWS_POINT, nullptr, nullptr, nullptr), sws_freeContext);
sws_scale(sws.get(), &src, &src_pitch, 0, src_slice_h, &dst, &dst_pitch);
}
void clip_image(u8 *dst, const u8 *src, int clip_x, int clip_y, int clip_w, int clip_h, int bpp, int src_pitch, int dst_pitch)
{
const u8* pixels_src = src + clip_y * src_pitch + clip_x * bpp;
u8 *pixels_dst = dst;
const u32 row_length = clip_w * bpp;
for (int y = 0; y < clip_h; ++y)
{
std::memcpy(pixels_dst, pixels_src, row_length);
pixels_src += src_pitch;
pixels_dst += dst_pitch;
}
}
void clip_image_may_overlap(u8 *dst, const u8 *src, int clip_x, int clip_y, int clip_w, int clip_h, int bpp, int src_pitch, int dst_pitch, u8 *buffer)
{
src += clip_y * src_pitch + clip_x * bpp;
const u32 buffer_pitch = bpp * clip_w;
u8* buf = buffer;
// Read the whole buffer from source
for (int y = 0; y < clip_h; ++y)
{
std::memcpy(buf, src, buffer_pitch);
src += src_pitch;
buf += buffer_pitch;
}
buf = buffer;
// Write to destination
for (int y = 0; y < clip_h; ++y)
{
std::memcpy(dst, buf, buffer_pitch);
dst += dst_pitch;
buf += buffer_pitch;
}
}
//Convert decoded integer values for CONSTANT_BLEND_FACTOR into f32 array in 0-1 range
std::array<float, 4> get_constant_blend_colors()
{
//TODO: check another color formats (probably all integer formats with > 8-bits wide channels)
if (rsx::method_registers.surface_color() == rsx::surface_color_format::w16z16y16x16)
{
u16 blend_color_r = rsx::method_registers.blend_color_16b_r();
u16 blend_color_g = rsx::method_registers.blend_color_16b_g();
u16 blend_color_b = rsx::method_registers.blend_color_16b_b();
u16 blend_color_a = rsx::method_registers.blend_color_16b_a();
return { blend_color_r / 65535.f, blend_color_g / 65535.f, blend_color_b / 65535.f, blend_color_a / 65535.f };
}
else
{
u8 blend_color_r = rsx::method_registers.blend_color_8b_r();
u8 blend_color_g = rsx::method_registers.blend_color_8b_g();
u8 blend_color_b = rsx::method_registers.blend_color_8b_b();
u8 blend_color_a = rsx::method_registers.blend_color_8b_a();
return { blend_color_r / 255.f, blend_color_g / 255.f, blend_color_b / 255.f, blend_color_a / 255.f };
}
}
// Fit a aspect-correct rectangle within a frame of wxh dimensions
template <typename T>
area_base<T> convert_aspect_ratio_impl(const size2_base<T>& output_dimensions, double aspect)
{
const double output_aspect = 1. * output_dimensions.width / output_dimensions.height;
const double convert_ratio = aspect / output_aspect;
area_base<T> result;
if (convert_ratio > 1.)
{
const auto height = static_cast<T>(output_dimensions.height / convert_ratio);
result.y1 = (output_dimensions.height - height) / 2;
result.y2 = result.y1 + height;
result.x1 = 0;
result.x2 = output_dimensions.width;
}
else if (convert_ratio < 1.)
{
const auto width = static_cast<T>(output_dimensions.width * convert_ratio);
result.x1 = (output_dimensions.width - width) / 2;
result.x2 = result.x1 + width;
result.y1 = 0;
result.y2 = output_dimensions.height;
}
else
{
result = { 0, 0, output_dimensions.width, output_dimensions.height };
}
return result;
}
avconf::avconf() noexcept
{
switch (g_cfg.video.aspect_ratio)
{
default:
case video_aspect::_16_9:
aspect = CELL_VIDEO_OUT_ASPECT_16_9;
break;
case video_aspect::_4_3:
aspect = CELL_VIDEO_OUT_ASPECT_4_3;
break;
}
}
u32 avconf::get_compatible_gcm_format() const
{
switch (format)
{
default:
rsx_log.error("Invalid AV format 0x%x", format);
[[fallthrough]];
case CELL_VIDEO_OUT_BUFFER_COLOR_FORMAT_X8R8G8B8:
case CELL_VIDEO_OUT_BUFFER_COLOR_FORMAT_X8B8G8R8:
return CELL_GCM_TEXTURE_A8R8G8B8;
case CELL_VIDEO_OUT_BUFFER_COLOR_FORMAT_R16G16B16X16_FLOAT:
return CELL_GCM_TEXTURE_W16_Z16_Y16_X16_FLOAT;
}
}
u8 avconf::get_bpp() const
{
switch (format)
{
default:
rsx_log.error("Invalid AV format 0x%x", format);
[[fallthrough]];
case CELL_VIDEO_OUT_BUFFER_COLOR_FORMAT_X8R8G8B8:
case CELL_VIDEO_OUT_BUFFER_COLOR_FORMAT_X8B8G8R8:
return 4;
case CELL_VIDEO_OUT_BUFFER_COLOR_FORMAT_R16G16B16X16_FLOAT:
return 8;
}
}
double avconf::get_aspect_ratio() const
{
switch (aspect)
{
case CELL_VIDEO_OUT_ASPECT_16_9: return 16. / 9.;
case CELL_VIDEO_OUT_ASPECT_4_3: return 4. / 3.;
default: fmt::throw_exception("Invalid aspect ratio %d", aspect);
}
}
size2u avconf::aspect_convert_dimensions(const size2u& image_dimensions) const
{
if (image_dimensions.width == 0 || image_dimensions.height == 0)
{
rsx_log.trace("Empty region passed to aspect-correct conversion routine [size]. This should never happen.");
return {};
}
// Unconstrained aspect ratio conversion
return size2u{ static_cast<u32>(image_dimensions.height * get_aspect_ratio()), image_dimensions.height };
}
areau avconf::aspect_convert_region(const size2u& image_dimensions, const size2u& output_dimensions) const
{
if (const auto test = image_dimensions * output_dimensions;
test.width == 0 || test.height == 0)
{
rsx_log.trace("Empty region passed to aspect-correct conversion routine [region]. This should never happen.");
return {};
}
// Fit the input image into the virtual display 'window'
const auto source_aspect = 1. * image_dimensions.width / image_dimensions.height;
const auto virtual_output = size2u{ resolution_x, resolution_y };
const auto area1 = convert_aspect_ratio_impl(virtual_output, source_aspect);
// Fit the virtual display into the physical display
const auto area2 = convert_aspect_ratio_impl(output_dimensions, get_aspect_ratio());
// Merge the two regions. Since aspect ratio was conserved between both transforms, a simple scale can be used
const double stretch_x = 1. * area2.width() / virtual_output.width;
const double stretch_y = 1. * area2.height() / virtual_output.height;
return static_cast<areau>(static_cast<aread>(area1) * size2d { stretch_x, stretch_y }) + size2u{ area2.x1, area2.y1 };
}
#ifdef TEXTURE_CACHE_DEBUG
tex_cache_checker_t tex_cache_checker = {};
#endif
}
| 7,220
|
C++
|
.cpp
| 201
| 33.0199
| 151
| 0.708113
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,381
|
gcm_printing.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/gcm_printing.cpp
|
#include "gcm_printing.h"
#include "rsx_decode.h"
#include <unordered_map>
#include <Utilities/StrFmt.h>
namespace
{
constexpr u32 opcode_list[] = {NV4097_SET_VIEWPORT_HORIZONTAL, NV4097_SET_VIEWPORT_VERTICAL,
NV4097_SET_SCISSOR_HORIZONTAL, NV4097_SET_SCISSOR_VERTICAL, NV4097_SET_SURFACE_CLIP_HORIZONTAL,
NV4097_SET_SURFACE_CLIP_VERTICAL, NV4097_SET_CLEAR_RECT_HORIZONTAL,
NV4097_SET_CLEAR_RECT_VERTICAL, NV3089_CLIP_POINT, NV3089_CLIP_SIZE, NV3089_IMAGE_OUT_POINT,
NV3089_IMAGE_OUT_SIZE, NV3089_IMAGE_IN_SIZE, NV3062_SET_PITCH, NV308A_POINT,
NV4097_SET_DEPTH_TEST_ENABLE, NV4097_SET_DEPTH_MASK, NV4097_SET_ALPHA_TEST_ENABLE,
NV4097_SET_STENCIL_TEST_ENABLE, NV4097_SET_RESTART_INDEX_ENABLE,
NV4097_SET_DEPTH_BOUNDS_TEST_ENABLE, NV4097_SET_LOGIC_OP_ENABLE, NV4097_SET_DITHER_ENABLE,
NV4097_SET_BLEND_ENABLE, NV4097_SET_LINE_SMOOTH_ENABLE, NV4097_SET_POLY_OFFSET_POINT_ENABLE,
NV4097_SET_POLY_OFFSET_LINE_ENABLE, NV4097_SET_POLY_OFFSET_FILL_ENABLE,
NV4097_SET_CULL_FACE_ENABLE, NV4097_SET_POLY_SMOOTH_ENABLE,
NV4097_SET_TWO_SIDED_STENCIL_TEST_ENABLE, NV4097_SET_TWO_SIDE_LIGHT_EN,
NV4097_SET_RESTART_INDEX, NV4097_SET_SURFACE_COLOR_AOFFSET, NV4097_SET_SURFACE_COLOR_BOFFSET,
NV4097_SET_SURFACE_COLOR_COFFSET, NV4097_SET_SURFACE_COLOR_DOFFSET, NV4097_SET_SURFACE_PITCH_A,
NV4097_SET_SURFACE_PITCH_B, NV4097_SET_SURFACE_PITCH_C, NV4097_SET_SURFACE_PITCH_D,
NV4097_SET_SURFACE_ZETA_OFFSET, NV4097_SET_SURFACE_PITCH_Z,
NV4097_SET_VERTEX_ATTRIB_OUTPUT_MASK, NV4097_SET_SHADER_CONTROL,
NV4097_SET_VERTEX_DATA_BASE_OFFSET, NV4097_SET_INDEX_ARRAY_ADDRESS,
NV4097_SET_VERTEX_DATA_BASE_INDEX, NV4097_SET_SHADER_PROGRAM,
NV4097_SET_TRANSFORM_PROGRAM_START, NV406E_SET_CONTEXT_DMA_SEMAPHORE, NV406E_SEMAPHORE_OFFSET, NV4097_SET_SEMAPHORE_OFFSET,
NV3089_IMAGE_IN_OFFSET, NV3062_SET_OFFSET_DESTIN, NV309E_SET_OFFSET, NV3089_DS_DX, NV3089_DT_DY,
NV0039_PITCH_IN, NV0039_PITCH_OUT, NV0039_LINE_LENGTH_IN, NV0039_LINE_COUNT, NV0039_OFFSET_OUT,
NV0039_OFFSET_IN, NV4097_SET_VERTEX_ATTRIB_INPUT_MASK, NV4097_SET_FREQUENCY_DIVIDER_OPERATION,
NV4097_SET_DEPTH_BOUNDS_MIN, NV4097_SET_DEPTH_BOUNDS_MAX, NV4097_SET_FOG_PARAMS,
NV4097_SET_FOG_PARAMS + 1, NV4097_SET_CLIP_MIN, NV4097_SET_CLIP_MAX,
NV4097_SET_POLYGON_OFFSET_SCALE_FACTOR, NV4097_SET_POLYGON_OFFSET_BIAS,
NV4097_SET_VIEWPORT_SCALE, NV4097_SET_VIEWPORT_SCALE + 1, NV4097_SET_VIEWPORT_SCALE + 2,
NV4097_SET_VIEWPORT_SCALE + 3, NV4097_SET_VIEWPORT_OFFSET, NV4097_SET_VIEWPORT_OFFSET + 1,
NV4097_SET_VIEWPORT_OFFSET + 2, NV4097_SET_VIEWPORT_OFFSET + 3, NV4097_SET_DEPTH_FUNC,
NV4097_SET_STENCIL_FUNC, NV4097_SET_BACK_STENCIL_FUNC, NV4097_SET_STENCIL_OP_FAIL,
NV4097_SET_STENCIL_OP_ZFAIL, NV4097_SET_STENCIL_OP_ZPASS, NV4097_SET_BACK_STENCIL_OP_FAIL,
NV4097_SET_BACK_STENCIL_OP_ZFAIL, NV4097_SET_BACK_STENCIL_OP_ZPASS, NV4097_SET_LOGIC_OP,
NV4097_SET_FRONT_FACE, NV4097_SET_CULL_FACE, NV4097_SET_SURFACE_COLOR_TARGET,
NV4097_SET_FOG_MODE, NV4097_SET_ALPHA_FUNC, NV4097_SET_BEGIN_END, NV3089_SET_OPERATION,
NV3089_SET_COLOR_FORMAT, NV3089_SET_CONTEXT_SURFACE, NV3062_SET_COLOR_FORMAT,
NV4097_SET_STENCIL_FUNC_REF, NV4097_SET_BACK_STENCIL_FUNC_REF, NV4097_SET_STENCIL_FUNC_MASK,
NV4097_SET_BACK_STENCIL_FUNC_MASK, NV4097_SET_ALPHA_REF, NV4097_SET_COLOR_CLEAR_VALUE,
NV4097_SET_STENCIL_MASK, NV4097_SET_BACK_STENCIL_MASK, NV4097_SET_BLEND_EQUATION,
NV4097_SET_BLEND_FUNC_SFACTOR, NV4097_SET_BLEND_FUNC_DFACTOR, NV4097_SET_COLOR_MASK,
NV4097_SET_SHADER_WINDOW, NV4097_SET_BLEND_ENABLE_MRT, NV4097_SET_USER_CLIP_PLANE_CONTROL,
NV4097_SET_LINE_WIDTH, NV4097_SET_SURFACE_FORMAT, NV4097_SET_WINDOW_OFFSET,
NV4097_SET_ZSTENCIL_CLEAR_VALUE, NV4097_SET_INDEX_ARRAY_DMA, NV4097_SET_CONTEXT_DMA_COLOR_A,
NV4097_SET_CONTEXT_DMA_COLOR_B, NV4097_SET_CONTEXT_DMA_COLOR_C, NV4097_SET_CONTEXT_DMA_COLOR_D,
NV4097_SET_CONTEXT_DMA_ZETA, NV3089_SET_CONTEXT_DMA_IMAGE, NV3062_SET_CONTEXT_DMA_IMAGE_DESTIN,
NV309E_SET_CONTEXT_DMA_IMAGE, NV0039_SET_CONTEXT_DMA_BUFFER_OUT,
NV0039_SET_CONTEXT_DMA_BUFFER_IN, NV4097_SET_CONTEXT_DMA_REPORT, NV3089_IMAGE_IN_FORMAT,
NV309E_SET_FORMAT, NV0039_FORMAT, NV4097_SET_BLEND_COLOR2, NV4097_SET_BLEND_COLOR,
NV3089_IMAGE_IN, NV4097_NO_OPERATION, NV4097_INVALIDATE_VERTEX_CACHE_FILE,
NV4097_INVALIDATE_VERTEX_FILE, NV4097_SET_ANTI_ALIASING_CONTROL, NV4097_SET_FRONT_POLYGON_MODE,
NV4097_SET_BACK_POLYGON_MODE, NV406E_SET_REFERENCE, NV406E_SEMAPHORE_RELEASE, NV406E_SEMAPHORE_ACQUIRE,
NV4097_SET_ZCULL_EN, NV4097_SET_ZCULL_STATS_ENABLE, NV4097_SET_ZPASS_PIXEL_COUNT_ENABLE,
EXPAND_RANGE_16(0, DECLARE_VERTEX_DATA_ARRAY_FORMAT)
EXPAND_RANGE_16(0, DECLARE_VERTEX_DATA_ARRAY_OFFSET)
EXPAND_RANGE_32(0, DECLARE_TRANSFORM_CONSTANT) NV4097_SET_TRANSFORM_CONSTANT_LOAD,
NV4097_DRAW_ARRAYS, NV4097_DRAW_INDEX_ARRAY,
EXPAND_RANGE_32(0, DECLARE_TRANSFORM_PROGRAM) NV4097_SET_TRANSFORM_PROGRAM_LOAD,
EXPAND_RANGE_16(0, DECLARE_TEXTURE_OFFSET) EXPAND_RANGE_16(0, DECLARE_TEXTURE_FORMAT)
EXPAND_RANGE_16(0, DECLARE_TEXTURE_IMAGE_RECT) EXPAND_RANGE_16(0, DECLARE_TEXTURE_CONTROL0)
EXPAND_RANGE_16(0, DECLARE_TEXTURE_CONTROL3) EXPAND_RANGE_4(0, DECLARE_VERTEX_TEXTURE_CONTROL0)};
#define KEY_STR(key) { key, #key }
const std::unordered_map<u32, std::string_view> methods_name =
{
{NV406E_SET_REFERENCE, "NV406E_SET_REFERENCE"},
{NV406E_SET_CONTEXT_DMA_SEMAPHORE, "NV406E_SET_CONTEXT_DMA_SEMAPHORE"},
{NV406E_SEMAPHORE_OFFSET, "NV406E_SEMAPHORE_OFFSET"},
{NV406E_SEMAPHORE_ACQUIRE, "NV406E_SEMAPHORE_ACQUIRE"},
{NV406E_SEMAPHORE_RELEASE, "NV406E_SEMAPHORE_RELEASE"},
{NV4097_SET_OBJECT, "NV4097_SET_OBJECT"},
{NV4097_NO_OPERATION, "NV4097_NO_OPERATION"},
{NV4097_NOTIFY, "NV4097_NOTIFY"},
{NV4097_WAIT_FOR_IDLE, "NV4097_WAIT_FOR_IDLE"},
{NV4097_PM_TRIGGER, "NV4097_PM_TRIGGER"},
{NV4097_SET_CONTEXT_DMA_NOTIFIES, "NV4097_SET_CONTEXT_DMA_NOTIFIES"},
{NV4097_SET_CONTEXT_DMA_A, "NV4097_SET_CONTEXT_DMA_A"},
{NV4097_SET_CONTEXT_DMA_B, "NV4097_SET_CONTEXT_DMA_B"},
{NV4097_SET_CONTEXT_DMA_COLOR_B, "NV4097_SET_CONTEXT_DMA_COLOR_B"},
{NV4097_SET_CONTEXT_DMA_STATE, "NV4097_SET_CONTEXT_DMA_STATE"},
{NV4097_SET_CONTEXT_DMA_COLOR_A, "NV4097_SET_CONTEXT_DMA_COLOR_A"},
{NV4097_SET_CONTEXT_DMA_ZETA, "NV4097_SET_CONTEXT_DMA_ZETA"},
{NV4097_SET_CONTEXT_DMA_VERTEX_A, "NV4097_SET_CONTEXT_DMA_VERTEX_A"},
{NV4097_SET_CONTEXT_DMA_VERTEX_B, "NV4097_SET_CONTEXT_DMA_VERTEX_B"},
{NV4097_SET_CONTEXT_DMA_SEMAPHORE, "NV4097_SET_CONTEXT_DMA_SEMAPHORE"},
{NV4097_SET_CONTEXT_DMA_REPORT, "NV4097_SET_CONTEXT_DMA_REPORT"},
{NV4097_SET_CONTEXT_DMA_CLIP_ID, "NV4097_SET_CONTEXT_DMA_CLIP_ID"},
{NV4097_SET_CONTEXT_DMA_CULL_DATA, "NV4097_SET_CONTEXT_DMA_CULL_DATA"},
{NV4097_SET_CONTEXT_DMA_COLOR_C, "NV4097_SET_CONTEXT_DMA_COLOR_C"},
{NV4097_SET_CONTEXT_DMA_COLOR_D, "NV4097_SET_CONTEXT_DMA_COLOR_D"},
{NV4097_SET_SURFACE_CLIP_HORIZONTAL, "NV4097_SET_SURFACE_CLIP_HORIZONTAL"},
{NV4097_SET_SURFACE_CLIP_VERTICAL, "NV4097_SET_SURFACE_CLIP_VERTICAL"},
{NV4097_SET_SURFACE_FORMAT, "NV4097_SET_SURFACE_FORMAT"},
{NV4097_SET_SURFACE_PITCH_A, "NV4097_SET_SURFACE_PITCH_A"},
{NV4097_SET_SURFACE_COLOR_AOFFSET, "NV4097_SET_SURFACE_COLOR_AOFFSET"},
{NV4097_SET_SURFACE_ZETA_OFFSET, "NV4097_SET_SURFACE_ZETA_OFFSET"},
{NV4097_SET_SURFACE_COLOR_BOFFSET, "NV4097_SET_SURFACE_COLOR_BOFFSET"},
{NV4097_SET_SURFACE_PITCH_B, "NV4097_SET_SURFACE_PITCH_B"},
{NV4097_SET_SURFACE_COLOR_TARGET, "NV4097_SET_SURFACE_COLOR_TARGET"},
{NV4097_SET_SURFACE_PITCH_Z, "NV4097_SET_SURFACE_PITCH_Z"},
{NV4097_INVALIDATE_ZCULL, "NV4097_INVALIDATE_ZCULL"},
{NV4097_SET_CYLINDRICAL_WRAP, "NV4097_SET_CYLINDRICAL_WRAP"},
{NV4097_SET_CYLINDRICAL_WRAP1, "NV4097_SET_CYLINDRICAL_WRAP1"},
{NV4097_SET_SURFACE_PITCH_C, "NV4097_SET_SURFACE_PITCH_C"},
{NV4097_SET_SURFACE_PITCH_D, "NV4097_SET_SURFACE_PITCH_D"},
{NV4097_SET_SURFACE_COLOR_COFFSET, "NV4097_SET_SURFACE_COLOR_COFFSET"},
{NV4097_SET_SURFACE_COLOR_DOFFSET, "NV4097_SET_SURFACE_COLOR_DOFFSET"},
{NV4097_SET_WINDOW_OFFSET, "NV4097_SET_WINDOW_OFFSET"},
{NV4097_SET_WINDOW_CLIP_TYPE, "NV4097_SET_WINDOW_CLIP_TYPE"},
{NV4097_SET_WINDOW_CLIP_HORIZONTAL, "NV4097_SET_WINDOW_CLIP_HORIZONTAL"},
{NV4097_SET_WINDOW_CLIP_VERTICAL, "NV4097_SET_WINDOW_CLIP_VERTICAL"},
{NV4097_SET_DITHER_ENABLE, "NV4097_SET_DITHER_ENABLE"},
{NV4097_SET_ALPHA_TEST_ENABLE, "NV4097_SET_ALPHA_TEST_ENABLE"},
{NV4097_SET_ALPHA_FUNC, "NV4097_SET_ALPHA_FUNC"},
{NV4097_SET_ALPHA_REF, "NV4097_SET_ALPHA_REF"},
{NV4097_SET_BLEND_ENABLE, "NV4097_SET_BLEND_ENABLE"},
{NV4097_SET_BLEND_FUNC_SFACTOR, "NV4097_SET_BLEND_FUNC_SFACTOR"},
{NV4097_SET_BLEND_FUNC_DFACTOR, "NV4097_SET_BLEND_FUNC_DFACTOR"},
{NV4097_SET_BLEND_COLOR, "NV4097_SET_BLEND_COLOR"},
{NV4097_SET_BLEND_EQUATION, "NV4097_SET_BLEND_EQUATION"},
{NV4097_SET_COLOR_MASK, "NV4097_SET_COLOR_MASK"},
{NV4097_SET_STENCIL_TEST_ENABLE, "NV4097_SET_STENCIL_TEST_ENABLE"},
{NV4097_SET_STENCIL_MASK, "NV4097_SET_STENCIL_MASK"},
{NV4097_SET_STENCIL_FUNC, "NV4097_SET_STENCIL_FUNC"},
{NV4097_SET_STENCIL_FUNC_REF, "NV4097_SET_STENCIL_FUNC_REF"},
{NV4097_SET_STENCIL_FUNC_MASK, "NV4097_SET_STENCIL_FUNC_MASK"},
{NV4097_SET_STENCIL_OP_FAIL, "NV4097_SET_STENCIL_OP_FAIL"},
{NV4097_SET_STENCIL_OP_ZFAIL, "NV4097_SET_STENCIL_OP_ZFAIL"},
{NV4097_SET_STENCIL_OP_ZPASS, "NV4097_SET_STENCIL_OP_ZPASS"},
{NV4097_SET_TWO_SIDED_STENCIL_TEST_ENABLE, "NV4097_SET_TWO_SIDED_STENCIL_TEST_ENABLE"},
{NV4097_SET_BACK_STENCIL_MASK, "NV4097_SET_BACK_STENCIL_MASK"},
{NV4097_SET_BACK_STENCIL_FUNC, "NV4097_SET_BACK_STENCIL_FUNC"},
{NV4097_SET_BACK_STENCIL_FUNC_REF, "NV4097_SET_BACK_STENCIL_FUNC_REF"},
{NV4097_SET_BACK_STENCIL_FUNC_MASK, "NV4097_SET_BACK_STENCIL_FUNC_MASK"},
{NV4097_SET_BACK_STENCIL_OP_FAIL, "NV4097_SET_BACK_STENCIL_OP_FAIL"},
{NV4097_SET_BACK_STENCIL_OP_ZFAIL, "NV4097_SET_BACK_STENCIL_OP_ZFAIL"},
{NV4097_SET_BACK_STENCIL_OP_ZPASS, "NV4097_SET_BACK_STENCIL_OP_ZPASS"},
{NV4097_SET_SHADE_MODE, "NV4097_SET_SHADE_MODE"},
{NV4097_SET_BLEND_ENABLE_MRT, "NV4097_SET_BLEND_ENABLE_MRT"},
{NV4097_SET_COLOR_MASK_MRT, "NV4097_SET_COLOR_MASK_MRT"},
{NV4097_SET_LOGIC_OP_ENABLE, "NV4097_SET_LOGIC_OP_ENABLE"},
{NV4097_SET_LOGIC_OP, "NV4097_SET_LOGIC_OP"},
{NV4097_SET_BLEND_COLOR2, "NV4097_SET_BLEND_COLOR2"},
{NV4097_SET_DEPTH_BOUNDS_TEST_ENABLE, "NV4097_SET_DEPTH_BOUNDS_TEST_ENABLE"},
{NV4097_SET_DEPTH_BOUNDS_MIN, "NV4097_SET_DEPTH_BOUNDS_MIN"},
{NV4097_SET_DEPTH_BOUNDS_MAX, "NV4097_SET_DEPTH_BOUNDS_MAX"},
{NV4097_SET_CLIP_MIN, "NV4097_SET_CLIP_MIN"},
{NV4097_SET_CLIP_MAX, "NV4097_SET_CLIP_MAX"},
{NV4097_SET_CONTROL0, "NV4097_SET_CONTROL0"},
{NV4097_SET_LINE_WIDTH, "NV4097_SET_LINE_WIDTH"},
{NV4097_SET_LINE_SMOOTH_ENABLE, "NV4097_SET_LINE_SMOOTH_ENABLE"},
{NV4097_SET_ANISO_SPREAD, "NV4097_SET_ANISO_SPREAD"},
{NV4097_SET_ANISO_SPREAD + 4 / 4, "NV4097_SET_ANISO_SPREAD + 4"},
{NV4097_SET_ANISO_SPREAD + 8 / 4, "NV4097_SET_ANISO_SPREAD + 8"},
{NV4097_SET_ANISO_SPREAD + 12 / 4, "NV4097_SET_ANISO_SPREAD + 12"},
{NV4097_SET_ANISO_SPREAD + 16 / 4, "NV4097_SET_ANISO_SPREAD + 16"},
{NV4097_SET_ANISO_SPREAD + 20 / 4, "NV4097_SET_ANISO_SPREAD + 20"},
{NV4097_SET_ANISO_SPREAD + 24 / 4, "NV4097_SET_ANISO_SPREAD + 24"},
{NV4097_SET_ANISO_SPREAD + 28 / 4, "NV4097_SET_ANISO_SPREAD + 28"},
{NV4097_SET_ANISO_SPREAD + 32 / 4, "NV4097_SET_ANISO_SPREAD + 32"},
{NV4097_SET_ANISO_SPREAD + 36 / 4, "NV4097_SET_ANISO_SPREAD + 36"},
{NV4097_SET_ANISO_SPREAD + 40 / 4, "NV4097_SET_ANISO_SPREAD + 40"},
{NV4097_SET_ANISO_SPREAD + 44 / 4, "NV4097_SET_ANISO_SPREAD + 44"},
{NV4097_SET_ANISO_SPREAD + 48 / 4, "NV4097_SET_ANISO_SPREAD + 48"},
{NV4097_SET_ANISO_SPREAD + 52 / 4, "NV4097_SET_ANISO_SPREAD + 52"},
{NV4097_SET_ANISO_SPREAD + 56 / 4, "NV4097_SET_ANISO_SPREAD + 56"},
{NV4097_SET_ANISO_SPREAD + 60 / 4, "NV4097_SET_ANISO_SPREAD + 60"},
{NV4097_SET_SCISSOR_HORIZONTAL, "NV4097_SET_SCISSOR_HORIZONTAL"},
{NV4097_SET_SCISSOR_VERTICAL, "NV4097_SET_SCISSOR_VERTICAL"},
{NV4097_SET_FOG_MODE, "NV4097_SET_FOG_MODE"},
{NV4097_SET_FOG_PARAMS, "NV4097_SET_FOG_PARAMS"},
{NV4097_SET_FOG_PARAMS + 4 / 4, "NV4097_SET_FOG_PARAMS + 4"},
{NV4097_SET_FOG_PARAMS + 8 / 4, "NV4097_SET_FOG_PARAMS + 8"},
{NV4097_SET_SHADER_PROGRAM, "NV4097_SET_SHADER_PROGRAM"},
{NV4097_SET_VERTEX_TEXTURE_OFFSET, "NV4097_SET_VERTEX_TEXTURE_OFFSET"},
{NV4097_SET_VERTEX_TEXTURE_FORMAT, "NV4097_SET_VERTEX_TEXTURE_FORMAT"},
{NV4097_SET_VERTEX_TEXTURE_ADDRESS, "NV4097_SET_VERTEX_TEXTURE_ADDRESS"},
{NV4097_SET_VERTEX_TEXTURE_CONTROL0, "NV4097_SET_VERTEX_TEXTURE_CONTROL0"},
{NV4097_SET_VERTEX_TEXTURE_CONTROL3, "NV4097_SET_VERTEX_TEXTURE_CONTROL3"},
{NV4097_SET_VERTEX_TEXTURE_FILTER, "NV4097_SET_VERTEX_TEXTURE_FILTER"},
{NV4097_SET_VERTEX_TEXTURE_IMAGE_RECT, "NV4097_SET_VERTEX_TEXTURE_IMAGE_RECT"},
{NV4097_SET_VERTEX_TEXTURE_BORDER_COLOR, "NV4097_SET_VERTEX_TEXTURE_BORDER_COLOR"},
{NV4097_SET_VERTEX_TEXTURE_OFFSET + 0x20 / 4, "NV4097_SET_VERTEX_TEXTURE_OFFSET + 0x20"},
{NV4097_SET_VERTEX_TEXTURE_FORMAT + 0x20 / 4, "NV4097_SET_VERTEX_TEXTURE_FORMAT + 0x20"},
{NV4097_SET_VERTEX_TEXTURE_ADDRESS + 0x20 / 4, "NV4097_SET_VERTEX_TEXTURE_ADDRESS + 0x20"},
{NV4097_SET_VERTEX_TEXTURE_CONTROL0 + 0x20 / 4, "NV4097_SET_VERTEX_TEXTURE_CONTROL0 + 0x20"},
{NV4097_SET_VERTEX_TEXTURE_CONTROL3 + 0x20 / 4, "NV4097_SET_VERTEX_TEXTURE_CONTROL3 + 0x20"},
{NV4097_SET_VERTEX_TEXTURE_FILTER + 0x20 / 4, "NV4097_SET_VERTEX_TEXTURE_FILTER + 0x20"},
{NV4097_SET_VERTEX_TEXTURE_IMAGE_RECT + 0x20 / 4,
"NV4097_SET_VERTEX_TEXTURE_IMAGE_RECT + 0x20"},
{NV4097_SET_VERTEX_TEXTURE_BORDER_COLOR + 0x20 / 4,
"NV4097_SET_VERTEX_TEXTURE_BORDER_COLOR + 0x20"},
{NV4097_SET_VERTEX_TEXTURE_OFFSET + 0x40 / 4, "NV4097_SET_VERTEX_TEXTURE_OFFSET + 0x40"},
{NV4097_SET_VERTEX_TEXTURE_FORMAT + 0x40 / 4, "NV4097_SET_VERTEX_TEXTURE_FORMAT + 0x40"},
{NV4097_SET_VERTEX_TEXTURE_ADDRESS + 0x40 / 4, "NV4097_SET_VERTEX_TEXTURE_ADDRESS + 0x40"},
{NV4097_SET_VERTEX_TEXTURE_CONTROL0 + 0x40 / 4, "NV4097_SET_VERTEX_TEXTURE_CONTROL0 + 0x40"},
{NV4097_SET_VERTEX_TEXTURE_CONTROL3 + 0x40 / 4, "NV4097_SET_VERTEX_TEXTURE_CONTROL3 + 0x40"},
{NV4097_SET_VERTEX_TEXTURE_FILTER + 0x40 / 4, "NV4097_SET_VERTEX_TEXTURE_FILTER + 0x40"},
{NV4097_SET_VERTEX_TEXTURE_IMAGE_RECT + 0x40 / 4,
"NV4097_SET_VERTEX_TEXTURE_IMAGE_RECT + 0x40"},
{NV4097_SET_VERTEX_TEXTURE_BORDER_COLOR + 0x40 / 4,
"NV4097_SET_VERTEX_TEXTURE_BORDER_COLOR + 0x40"},
{NV4097_SET_VERTEX_TEXTURE_OFFSET + 0x60 / 4, "NV4097_SET_VERTEX_TEXTURE_OFFSET + 0x60"},
{NV4097_SET_VERTEX_TEXTURE_FORMAT + 0x60 / 4, "NV4097_SET_VERTEX_TEXTURE_FORMAT + 0x60"},
{NV4097_SET_VERTEX_TEXTURE_ADDRESS + 0x60 / 4, "NV4097_SET_VERTEX_TEXTURE_ADDRESS + 0x60"},
{NV4097_SET_VERTEX_TEXTURE_CONTROL0 + 0x60 / 4, "NV4097_SET_VERTEX_TEXTURE_CONTROL0 + 0x60"},
{NV4097_SET_VERTEX_TEXTURE_CONTROL3 + 0x60 / 4, "NV4097_SET_VERTEX_TEXTURE_CONTROL3 + 0x60"},
{NV4097_SET_VERTEX_TEXTURE_FILTER + 0x60 / 4, "NV4097_SET_VERTEX_TEXTURE_FILTER + 0x60"},
{NV4097_SET_VERTEX_TEXTURE_IMAGE_RECT + 0x60 / 4,
"NV4097_SET_VERTEX_TEXTURE_IMAGE_RECT + 0x60"},
{NV4097_SET_VERTEX_TEXTURE_BORDER_COLOR + 0x60 / 4,
"NV4097_SET_VERTEX_TEXTURE_BORDER_COLOR + 0x60"},
{NV4097_SET_VIEWPORT_HORIZONTAL, "NV4097_SET_VIEWPORT_HORIZONTAL"},
{NV4097_SET_VIEWPORT_VERTICAL, "NV4097_SET_VIEWPORT_VERTICAL"},
{NV4097_SET_POINT_CENTER_MODE, "NV4097_SET_POINT_CENTER_MODE"},
{NV4097_ZCULL_SYNC, "NV4097_ZCULL_SYNC"},
{NV4097_SET_VIEWPORT_OFFSET, "NV4097_SET_VIEWPORT_OFFSET"},
{NV4097_SET_VIEWPORT_SCALE, "NV4097_SET_VIEWPORT_SCALE"},
{NV4097_SET_POLY_OFFSET_POINT_ENABLE, "NV4097_SET_POLY_OFFSET_POINT_ENABLE"},
{NV4097_SET_POLY_OFFSET_LINE_ENABLE, "NV4097_SET_POLY_OFFSET_LINE_ENABLE"},
{NV4097_SET_POLY_OFFSET_FILL_ENABLE, "NV4097_SET_POLY_OFFSET_FILL_ENABLE"},
{NV4097_SET_DEPTH_FUNC, "NV4097_SET_DEPTH_FUNC"},
{NV4097_SET_DEPTH_MASK, "NV4097_SET_DEPTH_MASK"},
{NV4097_SET_DEPTH_TEST_ENABLE, "NV4097_SET_DEPTH_TEST_ENABLE"},
{NV4097_SET_POLYGON_OFFSET_SCALE_FACTOR, "NV4097_SET_POLYGON_OFFSET_SCALE_FACTOR"},
{NV4097_SET_POLYGON_OFFSET_BIAS, "NV4097_SET_POLYGON_OFFSET_BIAS"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M, "NV4097_SET_VERTEX_DATA_SCALED4S_M"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 4 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 4"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 8 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 8"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 12 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 12"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 16 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 16"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 20 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 20"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 24 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 24"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 28 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 28"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 32 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 32"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 36 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 36"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 40 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 40"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 44 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 44"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 48 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 48"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 52 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 52"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 56 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 56"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 60 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 60"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 64 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 64"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 68 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 68"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 72 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 72"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 76 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 76"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 80 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 80"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 84 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 84"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 88 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 88"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 92 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 92"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 96 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 96"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 100 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 100"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 104 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 104"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 108 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 108"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 112 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 112"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 116 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 116"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 120 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 120"},
{NV4097_SET_VERTEX_DATA_SCALED4S_M + 124 / 4, "NV4097_SET_VERTEX_DATA_SCALED4S_M + 124"},
{NV4097_SET_TEXTURE_CONTROL2, "NV4097_SET_TEXTURE_CONTROL2"},
{NV4097_SET_TEXTURE_CONTROL2 + 4 / 4, "NV4097_SET_TEXTURE_CONTROL2 + 4"},
{NV4097_SET_TEXTURE_CONTROL2 + 8 / 4, "NV4097_SET_TEXTURE_CONTROL2 + 8"},
{NV4097_SET_TEXTURE_CONTROL2 + 12 / 4, "NV4097_SET_TEXTURE_CONTROL2 + 12"},
{NV4097_SET_TEXTURE_CONTROL2 + 16 / 4, "NV4097_SET_TEXTURE_CONTROL2 + 16"},
{NV4097_SET_TEXTURE_CONTROL2 + 20 / 4, "NV4097_SET_TEXTURE_CONTROL2 + 20"},
{NV4097_SET_TEXTURE_CONTROL2 + 24 / 4, "NV4097_SET_TEXTURE_CONTROL2 + 24"},
{NV4097_SET_TEXTURE_CONTROL2 + 28 / 4, "NV4097_SET_TEXTURE_CONTROL2 + 28"},
{NV4097_SET_TEXTURE_CONTROL2 + 32 / 4, "NV4097_SET_TEXTURE_CONTROL2 + 32"},
{NV4097_SET_TEXTURE_CONTROL2 + 36 / 4, "NV4097_SET_TEXTURE_CONTROL2 + 36"},
{NV4097_SET_TEXTURE_CONTROL2 + 40 / 4, "NV4097_SET_TEXTURE_CONTROL2 + 40"},
{NV4097_SET_TEXTURE_CONTROL2 + 44 / 4, "NV4097_SET_TEXTURE_CONTROL2 + 44"},
{NV4097_SET_TEXTURE_CONTROL2 + 48 / 4, "NV4097_SET_TEXTURE_CONTROL2 + 48"},
{NV4097_SET_TEXTURE_CONTROL2 + 52 / 4, "NV4097_SET_TEXTURE_CONTROL2 + 52"},
{NV4097_SET_TEXTURE_CONTROL2 + 56 / 4, "NV4097_SET_TEXTURE_CONTROL2 + 56"},
{NV4097_SET_TEXTURE_CONTROL2 + 60 / 4, "NV4097_SET_TEXTURE_CONTROL2 + 60"},
{NV4097_SET_TEX_COORD_CONTROL, "NV4097_SET_TEX_COORD_CONTROL"},
{NV4097_SET_TEX_COORD_CONTROL + 4 / 4, "NV4097_SET_TEX_COORD_CONTROL + 4"},
{NV4097_SET_TEX_COORD_CONTROL + 8 / 4, "NV4097_SET_TEX_COORD_CONTROL + 8"},
{NV4097_SET_TEX_COORD_CONTROL + 12 / 4, "NV4097_SET_TEX_COORD_CONTROL + 12"},
{NV4097_SET_TEX_COORD_CONTROL + 16 / 4, "NV4097_SET_TEX_COORD_CONTROL + 16"},
{NV4097_SET_TEX_COORD_CONTROL + 20 / 4, "NV4097_SET_TEX_COORD_CONTROL + 20"},
{NV4097_SET_TEX_COORD_CONTROL + 24 / 4, "NV4097_SET_TEX_COORD_CONTROL + 24"},
{NV4097_SET_TEX_COORD_CONTROL + 28 / 4, "NV4097_SET_TEX_COORD_CONTROL + 28"},
{NV4097_SET_TEX_COORD_CONTROL + 32 / 4, "NV4097_SET_TEX_COORD_CONTROL + 32"},
{NV4097_SET_TEX_COORD_CONTROL + 36 / 4, "NV4097_SET_TEX_COORD_CONTROL + 36"},
{NV4097_SET_TRANSFORM_PROGRAM, "NV4097_SET_TRANSFORM_PROGRAM"},
{NV4097_SET_TRANSFORM_PROGRAM + 4 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 4"},
{NV4097_SET_TRANSFORM_PROGRAM + 8 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 8"},
{NV4097_SET_TRANSFORM_PROGRAM + 12 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 12"},
{NV4097_SET_TRANSFORM_PROGRAM + 16 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 16"},
{NV4097_SET_TRANSFORM_PROGRAM + 20 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 20"},
{NV4097_SET_TRANSFORM_PROGRAM + 24 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 24"},
{NV4097_SET_TRANSFORM_PROGRAM + 28 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 28"},
{NV4097_SET_TRANSFORM_PROGRAM + 32 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 32"},
{NV4097_SET_TRANSFORM_PROGRAM + 36 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 36"},
{NV4097_SET_TRANSFORM_PROGRAM + 40 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 40"},
{NV4097_SET_TRANSFORM_PROGRAM + 44 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 44"},
{NV4097_SET_TRANSFORM_PROGRAM + 48 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 48"},
{NV4097_SET_TRANSFORM_PROGRAM + 52 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 52"},
{NV4097_SET_TRANSFORM_PROGRAM + 56 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 56"},
{NV4097_SET_TRANSFORM_PROGRAM + 60 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 60"},
{NV4097_SET_TRANSFORM_PROGRAM + 64 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 64"},
{NV4097_SET_TRANSFORM_PROGRAM + 68 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 68"},
{NV4097_SET_TRANSFORM_PROGRAM + 72 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 72"},
{NV4097_SET_TRANSFORM_PROGRAM + 76 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 76"},
{NV4097_SET_TRANSFORM_PROGRAM + 80 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 80"},
{NV4097_SET_TRANSFORM_PROGRAM + 84 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 84"},
{NV4097_SET_TRANSFORM_PROGRAM + 88 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 88"},
{NV4097_SET_TRANSFORM_PROGRAM + 92 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 92"},
{NV4097_SET_TRANSFORM_PROGRAM + 96 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 96"},
{NV4097_SET_TRANSFORM_PROGRAM + 100 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 100"},
{NV4097_SET_TRANSFORM_PROGRAM + 104 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 104"},
{NV4097_SET_TRANSFORM_PROGRAM + 108 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 108"},
{NV4097_SET_TRANSFORM_PROGRAM + 112 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 112"},
{NV4097_SET_TRANSFORM_PROGRAM + 116 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 116"},
{NV4097_SET_TRANSFORM_PROGRAM + 120 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 120"},
{NV4097_SET_TRANSFORM_PROGRAM + 124 / 4, "NV4097_SET_TRANSFORM_PROGRAM + 124"},
{NV4097_SET_SPECULAR_ENABLE, "NV4097_SET_SPECULAR_ENABLE"},
{NV4097_SET_TWO_SIDE_LIGHT_EN, "NV4097_SET_TWO_SIDE_LIGHT_EN"},
{NV4097_CLEAR_ZCULL_SURFACE, "NV4097_CLEAR_ZCULL_SURFACE"},
{NV4097_SET_PERFORMANCE_PARAMS, "NV4097_SET_PERFORMANCE_PARAMS"},
{NV4097_SET_FLAT_SHADE_OP, "NV4097_SET_FLAT_SHADE_OP"},
{NV4097_SET_EDGE_FLAG, "NV4097_SET_EDGE_FLAG"},
{NV4097_SET_USER_CLIP_PLANE_CONTROL, "NV4097_SET_USER_CLIP_PLANE_CONTROL"},
{NV4097_SET_POLYGON_STIPPLE, "NV4097_SET_POLYGON_STIPPLE"},
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 1),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 2),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 3),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 4),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 5),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 6),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 7),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 8),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 9),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 10),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 11),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 12),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 13),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 14),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 15),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 16),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 17),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 18),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 19),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 20),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 21),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 22),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 23),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 24),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 25),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 26),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 28),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 29),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 30),
KEY_STR(NV4097_SET_POLYGON_STIPPLE_PATTERN + 31),
{NV4097_SET_VERTEX_DATA3F_M, "NV4097_SET_VERTEX_DATA3F_M"},
{NV4097_SET_VERTEX_DATA3F_M + 4 / 4, "NV4097_SET_VERTEX_DATA3F_M + 4"},
{NV4097_SET_VERTEX_DATA3F_M + 8 / 4, "NV4097_SET_VERTEX_DATA3F_M + 8"},
{NV4097_SET_VERTEX_DATA3F_M + 12 / 4, "NV4097_SET_VERTEX_DATA3F_M + 12"},
{NV4097_SET_VERTEX_DATA3F_M + 16 / 4, "NV4097_SET_VERTEX_DATA3F_M + 16"},
{NV4097_SET_VERTEX_DATA3F_M + 20 / 4, "NV4097_SET_VERTEX_DATA3F_M + 20"},
{NV4097_SET_VERTEX_DATA3F_M + 24 / 4, "NV4097_SET_VERTEX_DATA3F_M + 24"},
{NV4097_SET_VERTEX_DATA3F_M + 28 / 4, "NV4097_SET_VERTEX_DATA3F_M + 28"},
{NV4097_SET_VERTEX_DATA3F_M + 32 / 4, "NV4097_SET_VERTEX_DATA3F_M + 32"},
{NV4097_SET_VERTEX_DATA3F_M + 36 / 4, "NV4097_SET_VERTEX_DATA3F_M + 36"},
{NV4097_SET_VERTEX_DATA3F_M + 40 / 4, "NV4097_SET_VERTEX_DATA3F_M + 40"},
{NV4097_SET_VERTEX_DATA3F_M + 44 / 4, "NV4097_SET_VERTEX_DATA3F_M + 44"},
{NV4097_SET_VERTEX_DATA3F_M + 48 / 4, "NV4097_SET_VERTEX_DATA3F_M + 48"},
{NV4097_SET_VERTEX_DATA3F_M + 52 / 4, "NV4097_SET_VERTEX_DATA3F_M + 52"},
{NV4097_SET_VERTEX_DATA3F_M + 56 / 4, "NV4097_SET_VERTEX_DATA3F_M + 56"},
{NV4097_SET_VERTEX_DATA3F_M + 60 / 4, "NV4097_SET_VERTEX_DATA3F_M + 60"},
{NV4097_SET_VERTEX_DATA3F_M + 64 / 4, "NV4097_SET_VERTEX_DATA3F_M + 64"},
{NV4097_SET_VERTEX_DATA3F_M + 68 / 4, "NV4097_SET_VERTEX_DATA3F_M + 68"},
{NV4097_SET_VERTEX_DATA3F_M + 72 / 4, "NV4097_SET_VERTEX_DATA3F_M + 72"},
{NV4097_SET_VERTEX_DATA3F_M + 76 / 4, "NV4097_SET_VERTEX_DATA3F_M + 76"},
{NV4097_SET_VERTEX_DATA3F_M + 80 / 4, "NV4097_SET_VERTEX_DATA3F_M + 80"},
{NV4097_SET_VERTEX_DATA3F_M + 84 / 4, "NV4097_SET_VERTEX_DATA3F_M + 84"},
{NV4097_SET_VERTEX_DATA3F_M + 88 / 4, "NV4097_SET_VERTEX_DATA3F_M + 88"},
{NV4097_SET_VERTEX_DATA3F_M + 92 / 4, "NV4097_SET_VERTEX_DATA3F_M + 92"},
{NV4097_SET_VERTEX_DATA3F_M + 96 / 4, "NV4097_SET_VERTEX_DATA3F_M + 96"},
{NV4097_SET_VERTEX_DATA3F_M + 100 / 4, "NV4097_SET_VERTEX_DATA3F_M + 100"},
{NV4097_SET_VERTEX_DATA3F_M + 104 / 4, "NV4097_SET_VERTEX_DATA3F_M + 104"},
{NV4097_SET_VERTEX_DATA3F_M + 108 / 4, "NV4097_SET_VERTEX_DATA3F_M + 108"},
{NV4097_SET_VERTEX_DATA3F_M + 112 / 4, "NV4097_SET_VERTEX_DATA3F_M + 112"},
{NV4097_SET_VERTEX_DATA3F_M + 116 / 4, "NV4097_SET_VERTEX_DATA3F_M + 116"},
{NV4097_SET_VERTEX_DATA3F_M + 120 / 4, "NV4097_SET_VERTEX_DATA3F_M + 120"},
{NV4097_SET_VERTEX_DATA3F_M + 124 / 4, "NV4097_SET_VERTEX_DATA3F_M + 124"},
{NV4097_SET_VERTEX_DATA3F_M + 128 / 4, "NV4097_SET_VERTEX_DATA3F_M + 128"},
{NV4097_SET_VERTEX_DATA3F_M + 132 / 4, "NV4097_SET_VERTEX_DATA3F_M + 132"},
{NV4097_SET_VERTEX_DATA3F_M + 136 / 4, "NV4097_SET_VERTEX_DATA3F_M + 136"},
{NV4097_SET_VERTEX_DATA3F_M + 140 / 4, "NV4097_SET_VERTEX_DATA3F_M + 140"},
{NV4097_SET_VERTEX_DATA3F_M + 144 / 4, "NV4097_SET_VERTEX_DATA3F_M + 144"},
{NV4097_SET_VERTEX_DATA3F_M + 148 / 4, "NV4097_SET_VERTEX_DATA3F_M + 148"},
{NV4097_SET_VERTEX_DATA3F_M + 152 / 4, "NV4097_SET_VERTEX_DATA3F_M + 152"},
{NV4097_SET_VERTEX_DATA3F_M + 156 / 4, "NV4097_SET_VERTEX_DATA3F_M + 156"},
{NV4097_SET_VERTEX_DATA3F_M + 160 / 4, "NV4097_SET_VERTEX_DATA3F_M + 160"},
{NV4097_SET_VERTEX_DATA3F_M + 164 / 4, "NV4097_SET_VERTEX_DATA3F_M + 164"},
{NV4097_SET_VERTEX_DATA3F_M + 168 / 4, "NV4097_SET_VERTEX_DATA3F_M + 168"},
{NV4097_SET_VERTEX_DATA3F_M + 172 / 4, "NV4097_SET_VERTEX_DATA3F_M + 172"},
{NV4097_SET_VERTEX_DATA3F_M + 176 / 4, "NV4097_SET_VERTEX_DATA3F_M + 176"},
{NV4097_SET_VERTEX_DATA3F_M + 180 / 4, "NV4097_SET_VERTEX_DATA3F_M + 180"},
{NV4097_SET_VERTEX_DATA3F_M + 184 / 4, "NV4097_SET_VERTEX_DATA3F_M + 184"},
{NV4097_SET_VERTEX_DATA3F_M + 188 / 4, "NV4097_SET_VERTEX_DATA3F_M + 188"},
{NV4097_SET_VERTEX_DATA_ARRAY_OFFSET, "NV4097_SET_VERTEX_DATA_ARRAY_OFFSET"},
{NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 4 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 4"},
{NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 8 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 8"},
{NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 12 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 12"},
{NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 16 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 16"},
{NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 20 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 20"},
{NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 24 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 24"},
{NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 28 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 28"},
{NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 32 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 32"},
{NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 36 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 36"},
{NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 40 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 40"},
{NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 44 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 44"},
{NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 48 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 48"},
{NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 52 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 52"},
{NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 56 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 56"},
{NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 60 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 60"},
{NV4097_INVALIDATE_VERTEX_CACHE_FILE, "NV4097_INVALIDATE_VERTEX_CACHE_FILE"},
{NV4097_INVALIDATE_VERTEX_FILE, "NV4097_INVALIDATE_VERTEX_FILE"},
{NV4097_PIPE_NOP, "NV4097_PIPE_NOP"},
{NV4097_SET_VERTEX_DATA_BASE_OFFSET, "NV4097_SET_VERTEX_DATA_BASE_OFFSET"},
{NV4097_SET_VERTEX_DATA_BASE_INDEX, "NV4097_SET_VERTEX_DATA_BASE_INDEX"},
{NV4097_SET_VERTEX_DATA_ARRAY_FORMAT, "NV4097_SET_VERTEX_DATA_ARRAY_FORMAT"},
{NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 4 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 4"},
{NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 8 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 8"},
{NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 12 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 12"},
{NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 16 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 16"},
{NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 20 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 20"},
{NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 24 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 24"},
{NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 28 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 28"},
{NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 32 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 32"},
{NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 36 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 36"},
{NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 40 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 40"},
{NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 44 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 44"},
{NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 48 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 48"},
{NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 52 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 52"},
{NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 56 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 56"},
{NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 60 / 4, "NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 60"},
{NV4097_CLEAR_REPORT_VALUE, "NV4097_CLEAR_REPORT_VALUE"},
{NV4097_SET_ZPASS_PIXEL_COUNT_ENABLE, "NV4097_SET_ZPASS_PIXEL_COUNT_ENABLE"},
{NV4097_GET_REPORT, "NV4097_GET_REPORT"},
{NV4097_SET_ZCULL_STATS_ENABLE, "NV4097_SET_ZCULL_STATS_ENABLE"},
{NV4097_SET_BEGIN_END, "NV4097_SET_BEGIN_END"},
{NV4097_ARRAY_ELEMENT16, "NV4097_ARRAY_ELEMENT16"},
{NV4097_ARRAY_ELEMENT32, "NV4097_ARRAY_ELEMENT32"},
{NV4097_DRAW_ARRAYS, "NV4097_DRAW_ARRAYS"},
{NV4097_INLINE_ARRAY, "NV4097_INLINE_ARRAY"},
{NV4097_SET_INDEX_ARRAY_ADDRESS, "NV4097_SET_INDEX_ARRAY_ADDRESS"},
{NV4097_SET_INDEX_ARRAY_DMA, "NV4097_SET_INDEX_ARRAY_DMA"},
{NV4097_DRAW_INDEX_ARRAY, "NV4097_DRAW_INDEX_ARRAY"},
{NV4097_SET_FRONT_POLYGON_MODE, "NV4097_SET_FRONT_POLYGON_MODE"},
{NV4097_SET_BACK_POLYGON_MODE, "NV4097_SET_BACK_POLYGON_MODE"},
{NV4097_SET_CULL_FACE, "NV4097_SET_CULL_FACE"},
{NV4097_SET_FRONT_FACE, "NV4097_SET_FRONT_FACE"},
{NV4097_SET_POLY_SMOOTH_ENABLE, "NV4097_SET_POLY_SMOOTH_ENABLE"},
{NV4097_SET_CULL_FACE_ENABLE, "NV4097_SET_CULL_FACE_ENABLE"},
{NV4097_SET_TEXTURE_CONTROL3, "NV4097_SET_TEXTURE_CONTROL3"},
{NV4097_SET_TEXTURE_CONTROL3 + 4 / 4, "NV4097_SET_TEXTURE_CONTROL3 + 4"},
{NV4097_SET_TEXTURE_CONTROL3 + 8 / 4, "NV4097_SET_TEXTURE_CONTROL3 + 8"},
{NV4097_SET_TEXTURE_CONTROL3 + 12 / 4, "NV4097_SET_TEXTURE_CONTROL3 + 12"},
{NV4097_SET_TEXTURE_CONTROL3 + 16 / 4, "NV4097_SET_TEXTURE_CONTROL3 + 16"},
{NV4097_SET_TEXTURE_CONTROL3 + 20 / 4, "NV4097_SET_TEXTURE_CONTROL3 + 20"},
{NV4097_SET_TEXTURE_CONTROL3 + 24 / 4, "NV4097_SET_TEXTURE_CONTROL3 + 24"},
{NV4097_SET_TEXTURE_CONTROL3 + 28 / 4, "NV4097_SET_TEXTURE_CONTROL3 + 28"},
{NV4097_SET_TEXTURE_CONTROL3 + 32 / 4, "NV4097_SET_TEXTURE_CONTROL3 + 32"},
{NV4097_SET_TEXTURE_CONTROL3 + 36 / 4, "NV4097_SET_TEXTURE_CONTROL3 + 36"},
{NV4097_SET_TEXTURE_CONTROL3 + 40 / 4, "NV4097_SET_TEXTURE_CONTROL3 + 40"},
{NV4097_SET_TEXTURE_CONTROL3 + 44 / 4, "NV4097_SET_TEXTURE_CONTROL3 + 44"},
{NV4097_SET_TEXTURE_CONTROL3 + 48 / 4, "NV4097_SET_TEXTURE_CONTROL3 + 48"},
{NV4097_SET_TEXTURE_CONTROL3 + 52 / 4, "NV4097_SET_TEXTURE_CONTROL3 + 52"},
{NV4097_SET_TEXTURE_CONTROL3 + 56 / 4, "NV4097_SET_TEXTURE_CONTROL3 + 56"},
{NV4097_SET_TEXTURE_CONTROL3 + 60 / 4, "NV4097_SET_TEXTURE_CONTROL3 + 60"},
{NV4097_SET_VERTEX_DATA2F_M, "NV4097_SET_VERTEX_DATA2F_M"},
{NV4097_SET_VERTEX_DATA2F_M + 4 / 4, "NV4097_SET_VERTEX_DATA2F_M + 4"},
{NV4097_SET_VERTEX_DATA2F_M + 8 / 4, "NV4097_SET_VERTEX_DATA2F_M + 8"},
{NV4097_SET_VERTEX_DATA2F_M + 12 / 4, "NV4097_SET_VERTEX_DATA2F_M + 12"},
{NV4097_SET_VERTEX_DATA2F_M + 16 / 4, "NV4097_SET_VERTEX_DATA2F_M + 16"},
{NV4097_SET_VERTEX_DATA2F_M + 20 / 4, "NV4097_SET_VERTEX_DATA2F_M + 20"},
{NV4097_SET_VERTEX_DATA2F_M + 24 / 4, "NV4097_SET_VERTEX_DATA2F_M + 24"},
{NV4097_SET_VERTEX_DATA2F_M + 28 / 4, "NV4097_SET_VERTEX_DATA2F_M + 28"},
{NV4097_SET_VERTEX_DATA2F_M + 32 / 4, "NV4097_SET_VERTEX_DATA2F_M + 32"},
{NV4097_SET_VERTEX_DATA2F_M + 36 / 4, "NV4097_SET_VERTEX_DATA2F_M + 36"},
{NV4097_SET_VERTEX_DATA2F_M + 40 / 4, "NV4097_SET_VERTEX_DATA2F_M + 40"},
{NV4097_SET_VERTEX_DATA2F_M + 44 / 4, "NV4097_SET_VERTEX_DATA2F_M + 44"},
{NV4097_SET_VERTEX_DATA2F_M + 48 / 4, "NV4097_SET_VERTEX_DATA2F_M + 48"},
{NV4097_SET_VERTEX_DATA2F_M + 52 / 4, "NV4097_SET_VERTEX_DATA2F_M + 52"},
{NV4097_SET_VERTEX_DATA2F_M + 56 / 4, "NV4097_SET_VERTEX_DATA2F_M + 56"},
{NV4097_SET_VERTEX_DATA2F_M + 60 / 4, "NV4097_SET_VERTEX_DATA2F_M + 60"},
{NV4097_SET_VERTEX_DATA2F_M + 64 / 4, "NV4097_SET_VERTEX_DATA2F_M + 64"},
{NV4097_SET_VERTEX_DATA2F_M + 68 / 4, "NV4097_SET_VERTEX_DATA2F_M + 68"},
{NV4097_SET_VERTEX_DATA2F_M + 72 / 4, "NV4097_SET_VERTEX_DATA2F_M + 72"},
{NV4097_SET_VERTEX_DATA2F_M + 76 / 4, "NV4097_SET_VERTEX_DATA2F_M + 76"},
{NV4097_SET_VERTEX_DATA2F_M + 80 / 4, "NV4097_SET_VERTEX_DATA2F_M + 80"},
{NV4097_SET_VERTEX_DATA2F_M + 84 / 4, "NV4097_SET_VERTEX_DATA2F_M + 84"},
{NV4097_SET_VERTEX_DATA2F_M + 88 / 4, "NV4097_SET_VERTEX_DATA2F_M + 88"},
{NV4097_SET_VERTEX_DATA2F_M + 92 / 4, "NV4097_SET_VERTEX_DATA2F_M + 92"},
{NV4097_SET_VERTEX_DATA2F_M + 96 / 4, "NV4097_SET_VERTEX_DATA2F_M + 96"},
{NV4097_SET_VERTEX_DATA2F_M + 100 / 4, "NV4097_SET_VERTEX_DATA2F_M + 100"},
{NV4097_SET_VERTEX_DATA2F_M + 104 / 4, "NV4097_SET_VERTEX_DATA2F_M + 104"},
{NV4097_SET_VERTEX_DATA2F_M + 108 / 4, "NV4097_SET_VERTEX_DATA2F_M + 108"},
{NV4097_SET_VERTEX_DATA2F_M + 112 / 4, "NV4097_SET_VERTEX_DATA2F_M + 112"},
{NV4097_SET_VERTEX_DATA2F_M + 116 / 4, "NV4097_SET_VERTEX_DATA2F_M + 116"},
{NV4097_SET_VERTEX_DATA2F_M + 120 / 4, "NV4097_SET_VERTEX_DATA2F_M + 120"},
{NV4097_SET_VERTEX_DATA2F_M + 124 / 4, "NV4097_SET_VERTEX_DATA2F_M + 124"},
{NV4097_SET_VERTEX_DATA2S_M, "NV4097_SET_VERTEX_DATA2S_M"},
{NV4097_SET_VERTEX_DATA2S_M + 4 / 4, "NV4097_SET_VERTEX_DATA2S_M + 4"},
{NV4097_SET_VERTEX_DATA2S_M + 8 / 4, "NV4097_SET_VERTEX_DATA2S_M + 8"},
{NV4097_SET_VERTEX_DATA2S_M + 12 / 4, "NV4097_SET_VERTEX_DATA2S_M + 12"},
{NV4097_SET_VERTEX_DATA2S_M + 16 / 4, "NV4097_SET_VERTEX_DATA2S_M + 16"},
{NV4097_SET_VERTEX_DATA2S_M + 20 / 4, "NV4097_SET_VERTEX_DATA2S_M + 20"},
{NV4097_SET_VERTEX_DATA2S_M + 24 / 4, "NV4097_SET_VERTEX_DATA2S_M + 24"},
{NV4097_SET_VERTEX_DATA2S_M + 28 / 4, "NV4097_SET_VERTEX_DATA2S_M + 28"},
{NV4097_SET_VERTEX_DATA2S_M + 32 / 4, "NV4097_SET_VERTEX_DATA2S_M + 32"},
{NV4097_SET_VERTEX_DATA2S_M + 36 / 4, "NV4097_SET_VERTEX_DATA2S_M + 36"},
{NV4097_SET_VERTEX_DATA2S_M + 40 / 4, "NV4097_SET_VERTEX_DATA2S_M + 40"},
{NV4097_SET_VERTEX_DATA2S_M + 44 / 4, "NV4097_SET_VERTEX_DATA2S_M + 44"},
{NV4097_SET_VERTEX_DATA2S_M + 48 / 4, "NV4097_SET_VERTEX_DATA2S_M + 48"},
{NV4097_SET_VERTEX_DATA2S_M + 52 / 4, "NV4097_SET_VERTEX_DATA2S_M + 52"},
{NV4097_SET_VERTEX_DATA2S_M + 56 / 4, "NV4097_SET_VERTEX_DATA2S_M + 56"},
{NV4097_SET_VERTEX_DATA2S_M + 60 / 4, "NV4097_SET_VERTEX_DATA2S_M + 60"},
{NV4097_SET_VERTEX_DATA4UB_M, "NV4097_SET_VERTEX_DATA4UB_M"},
{NV4097_SET_VERTEX_DATA4UB_M + 4 / 4, "NV4097_SET_VERTEX_DATA4UB_M + 4"},
{NV4097_SET_VERTEX_DATA4UB_M + 8 / 4, "NV4097_SET_VERTEX_DATA4UB_M + 8"},
{NV4097_SET_VERTEX_DATA4UB_M + 12 / 4, "NV4097_SET_VERTEX_DATA4UB_M + 12"},
{NV4097_SET_VERTEX_DATA4UB_M + 16 / 4, "NV4097_SET_VERTEX_DATA4UB_M + 16"},
{NV4097_SET_VERTEX_DATA4UB_M + 20 / 4, "NV4097_SET_VERTEX_DATA4UB_M + 20"},
{NV4097_SET_VERTEX_DATA4UB_M + 24 / 4, "NV4097_SET_VERTEX_DATA4UB_M + 24"},
{NV4097_SET_VERTEX_DATA4UB_M + 28 / 4, "NV4097_SET_VERTEX_DATA4UB_M + 28"},
{NV4097_SET_VERTEX_DATA4UB_M + 32 / 4, "NV4097_SET_VERTEX_DATA4UB_M + 32"},
{NV4097_SET_VERTEX_DATA4UB_M + 36 / 4, "NV4097_SET_VERTEX_DATA4UB_M + 36"},
{NV4097_SET_VERTEX_DATA4UB_M + 40 / 4, "NV4097_SET_VERTEX_DATA4UB_M + 40"},
{NV4097_SET_VERTEX_DATA4UB_M + 44 / 4, "NV4097_SET_VERTEX_DATA4UB_M + 44"},
{NV4097_SET_VERTEX_DATA4UB_M + 48 / 4, "NV4097_SET_VERTEX_DATA4UB_M + 48"},
{NV4097_SET_VERTEX_DATA4UB_M + 52 / 4, "NV4097_SET_VERTEX_DATA4UB_M + 52"},
{NV4097_SET_VERTEX_DATA4UB_M + 56 / 4, "NV4097_SET_VERTEX_DATA4UB_M + 56"},
{NV4097_SET_VERTEX_DATA4UB_M + 60 / 4, "NV4097_SET_VERTEX_DATA4UB_M + 60"},
{NV4097_SET_VERTEX_DATA4S_M, "NV4097_SET_VERTEX_DATA4S_M"},
{NV4097_SET_VERTEX_DATA4S_M + 4 / 4, "NV4097_SET_VERTEX_DATA4S_M + 4"},
{NV4097_SET_VERTEX_DATA4S_M + 8 / 4, "NV4097_SET_VERTEX_DATA4S_M + 8"},
{NV4097_SET_VERTEX_DATA4S_M + 12 / 4, "NV4097_SET_VERTEX_DATA4S_M + 12"},
{NV4097_SET_VERTEX_DATA4S_M + 16 / 4, "NV4097_SET_VERTEX_DATA4S_M + 16"},
{NV4097_SET_VERTEX_DATA4S_M + 20 / 4, "NV4097_SET_VERTEX_DATA4S_M + 20"},
{NV4097_SET_VERTEX_DATA4S_M + 24 / 4, "NV4097_SET_VERTEX_DATA4S_M + 24"},
{NV4097_SET_VERTEX_DATA4S_M + 28 / 4, "NV4097_SET_VERTEX_DATA4S_M + 28"},
{NV4097_SET_VERTEX_DATA4S_M + 32 / 4, "NV4097_SET_VERTEX_DATA4S_M + 32"},
{NV4097_SET_VERTEX_DATA4S_M + 36 / 4, "NV4097_SET_VERTEX_DATA4S_M + 36"},
{NV4097_SET_VERTEX_DATA4S_M + 40 / 4, "NV4097_SET_VERTEX_DATA4S_M + 40"},
{NV4097_SET_VERTEX_DATA4S_M + 44 / 4, "NV4097_SET_VERTEX_DATA4S_M + 44"},
{NV4097_SET_VERTEX_DATA4S_M + 48 / 4, "NV4097_SET_VERTEX_DATA4S_M + 48"},
{NV4097_SET_VERTEX_DATA4S_M + 52 / 4, "NV4097_SET_VERTEX_DATA4S_M + 52"},
{NV4097_SET_VERTEX_DATA4S_M + 56 / 4, "NV4097_SET_VERTEX_DATA4S_M + 56"},
{NV4097_SET_VERTEX_DATA4S_M + 60 / 4, "NV4097_SET_VERTEX_DATA4S_M + 60"},
{NV4097_SET_VERTEX_DATA4S_M + 64 / 4, "NV4097_SET_VERTEX_DATA4S_M + 64"},
{NV4097_SET_VERTEX_DATA4S_M + 68 / 4, "NV4097_SET_VERTEX_DATA4S_M + 68"},
{NV4097_SET_VERTEX_DATA4S_M + 72 / 4, "NV4097_SET_VERTEX_DATA4S_M + 72"},
{NV4097_SET_VERTEX_DATA4S_M + 76 / 4, "NV4097_SET_VERTEX_DATA4S_M + 76"},
{NV4097_SET_VERTEX_DATA4S_M + 80 / 4, "NV4097_SET_VERTEX_DATA4S_M + 80"},
{NV4097_SET_VERTEX_DATA4S_M + 84 / 4, "NV4097_SET_VERTEX_DATA4S_M + 84"},
{NV4097_SET_VERTEX_DATA4S_M + 88 / 4, "NV4097_SET_VERTEX_DATA4S_M + 88"},
{NV4097_SET_VERTEX_DATA4S_M + 92 / 4, "NV4097_SET_VERTEX_DATA4S_M + 92"},
{NV4097_SET_VERTEX_DATA4S_M + 96 / 4, "NV4097_SET_VERTEX_DATA4S_M + 96"},
{NV4097_SET_VERTEX_DATA4S_M + 100 / 4, "NV4097_SET_VERTEX_DATA4S_M + 100"},
{NV4097_SET_VERTEX_DATA4S_M + 104 / 4, "NV4097_SET_VERTEX_DATA4S_M + 104"},
{NV4097_SET_VERTEX_DATA4S_M + 108 / 4, "NV4097_SET_VERTEX_DATA4S_M + 108"},
{NV4097_SET_VERTEX_DATA4S_M + 112 / 4, "NV4097_SET_VERTEX_DATA4S_M + 112"},
{NV4097_SET_VERTEX_DATA4S_M + 116 / 4, "NV4097_SET_VERTEX_DATA4S_M + 116"},
{NV4097_SET_VERTEX_DATA4S_M + 120 / 4, "NV4097_SET_VERTEX_DATA4S_M + 120"},
{NV4097_SET_VERTEX_DATA4S_M + 124 / 4, "NV4097_SET_VERTEX_DATA4S_M + 124"},
{NV4097_SET_TEXTURE_OFFSET, "NV4097_SET_TEXTURE_OFFSET"},
{NV4097_SET_TEXTURE_FORMAT, "NV4097_SET_TEXTURE_FORMAT"},
{NV4097_SET_TEXTURE_ADDRESS, "NV4097_SET_TEXTURE_ADDRESS"},
{NV4097_SET_TEXTURE_CONTROL0, "NV4097_SET_TEXTURE_CONTROL0"},
{NV4097_SET_TEXTURE_CONTROL1, "NV4097_SET_TEXTURE_CONTROL1"},
{NV4097_SET_TEXTURE_FILTER, "NV4097_SET_TEXTURE_FILTER"},
{NV4097_SET_TEXTURE_IMAGE_RECT, "NV4097_SET_TEXTURE_IMAGE_RECT"},
{NV4097_SET_TEXTURE_BORDER_COLOR, "NV4097_SET_TEXTURE_BORDER_COLOR"},
{NV4097_SET_TEXTURE_OFFSET + 0x20 / 4, "NV4097_SET_TEXTURE_OFFSET + 0x20"},
{NV4097_SET_TEXTURE_FORMAT + 0x20 / 4, "NV4097_SET_TEXTURE_FORMAT + 0x20"},
{NV4097_SET_TEXTURE_ADDRESS + 0x20 / 4, "NV4097_SET_TEXTURE_ADDRESS + 0x20"},
{NV4097_SET_TEXTURE_CONTROL0 + 0x20 / 4, "NV4097_SET_TEXTURE_CONTROL0 + 0x20"},
{NV4097_SET_TEXTURE_CONTROL1 + 0x20 / 4, "NV4097_SET_TEXTURE_CONTROL1 + 0x20"},
{NV4097_SET_TEXTURE_FILTER + 0x20 / 4, "NV4097_SET_TEXTURE_FILTER + 0x20"},
{NV4097_SET_TEXTURE_IMAGE_RECT + 0x20 / 4, "NV4097_SET_TEXTURE_IMAGE_RECT + 0x20"},
{NV4097_SET_TEXTURE_BORDER_COLOR + 0x20 / 4, "NV4097_SET_TEXTURE_BORDER_COLOR + 0x20"},
{NV4097_SET_TEXTURE_OFFSET + 0x40 / 4, "NV4097_SET_TEXTURE_OFFSET + 0x40"},
{NV4097_SET_TEXTURE_FORMAT + 0x40 / 4, "NV4097_SET_TEXTURE_FORMAT + 0x40"},
{NV4097_SET_TEXTURE_ADDRESS + 0x40 / 4, "NV4097_SET_TEXTURE_ADDRESS + 0x40"},
{NV4097_SET_TEXTURE_CONTROL0 + 0x40 / 4, "NV4097_SET_TEXTURE_CONTROL0 + 0x40"},
{NV4097_SET_TEXTURE_CONTROL1 + 0x40 / 4, "NV4097_SET_TEXTURE_CONTROL1 + 0x40"},
{NV4097_SET_TEXTURE_FILTER + 0x40 / 4, "NV4097_SET_TEXTURE_FILTER + 0x40"},
{NV4097_SET_TEXTURE_IMAGE_RECT + 0x40 / 4, "NV4097_SET_TEXTURE_IMAGE_RECT + 0x40"},
{NV4097_SET_TEXTURE_BORDER_COLOR + 0x40 / 4, "NV4097_SET_TEXTURE_BORDER_COLOR + 0x40"},
{NV4097_SET_TEXTURE_OFFSET + 0x60 / 4, "NV4097_SET_TEXTURE_OFFSET + 0x60"},
{NV4097_SET_TEXTURE_FORMAT + 0x60 / 4, "NV4097_SET_TEXTURE_FORMAT + 0x60"},
{NV4097_SET_TEXTURE_ADDRESS + 0x60 / 4, "NV4097_SET_TEXTURE_ADDRESS + 0x60"},
{NV4097_SET_TEXTURE_CONTROL0 + 0x60 / 4, "NV4097_SET_TEXTURE_CONTROL0 + 0x60"},
{NV4097_SET_TEXTURE_CONTROL1 + 0x60 / 4, "NV4097_SET_TEXTURE_CONTROL1 + 0x60"},
{NV4097_SET_TEXTURE_FILTER + 0x60 / 4, "NV4097_SET_TEXTURE_FILTER + 0x60"},
{NV4097_SET_TEXTURE_IMAGE_RECT + 0x60 / 4, "NV4097_SET_TEXTURE_IMAGE_RECT + 0x60"},
{NV4097_SET_TEXTURE_BORDER_COLOR + 0x60 / 4, "NV4097_SET_TEXTURE_BORDER_COLOR + 0x60"},
{NV4097_SET_TEXTURE_OFFSET + 0x80 / 4, "NV4097_SET_TEXTURE_OFFSET + 0x80"},
{NV4097_SET_TEXTURE_FORMAT + 0x80 / 4, "NV4097_SET_TEXTURE_FORMAT + 0x80"},
{NV4097_SET_TEXTURE_ADDRESS + 0x80 / 4, "NV4097_SET_TEXTURE_ADDRESS + 0x80"},
{NV4097_SET_TEXTURE_CONTROL0 + 0x80 / 4, "NV4097_SET_TEXTURE_CONTROL0 + 0x80"},
{NV4097_SET_TEXTURE_CONTROL1 + 0x80 / 4, "NV4097_SET_TEXTURE_CONTROL1 + 0x80"},
{NV4097_SET_TEXTURE_FILTER + 0x80 / 4, "NV4097_SET_TEXTURE_FILTER + 0x80"},
{NV4097_SET_TEXTURE_IMAGE_RECT + 0x80 / 4, "NV4097_SET_TEXTURE_IMAGE_RECT + 0x80"},
{NV4097_SET_TEXTURE_BORDER_COLOR + 0x80 / 4, "NV4097_SET_TEXTURE_BORDER_COLOR + 0x80"},
{NV4097_SET_TEXTURE_OFFSET + 0xa0 / 4, "NV4097_SET_TEXTURE_OFFSET + 0xa0"},
{NV4097_SET_TEXTURE_FORMAT + 0xa0 / 4, "NV4097_SET_TEXTURE_FORMAT + 0xa0"},
{NV4097_SET_TEXTURE_ADDRESS + 0xa0 / 4, "NV4097_SET_TEXTURE_ADDRESS + 0xa0"},
{NV4097_SET_TEXTURE_CONTROL0 + 0xa0 / 4, "NV4097_SET_TEXTURE_CONTROL0 + 0xa0"},
{NV4097_SET_TEXTURE_CONTROL1 + 0xa0 / 4, "NV4097_SET_TEXTURE_CONTROL1 + 0xa0"},
{NV4097_SET_TEXTURE_FILTER + 0xa0 / 4, "NV4097_SET_TEXTURE_FILTER + 0xa0"},
{NV4097_SET_TEXTURE_IMAGE_RECT + 0xa0 / 4, "NV4097_SET_TEXTURE_IMAGE_RECT + 0xa0"},
{NV4097_SET_TEXTURE_BORDER_COLOR + 0xa0 / 4, "NV4097_SET_TEXTURE_BORDER_COLOR + 0xa0"},
{NV4097_SET_TEXTURE_OFFSET + 0xc0 / 4, "NV4097_SET_TEXTURE_OFFSET + 0xc0"},
{NV4097_SET_TEXTURE_FORMAT + 0xc0 / 4, "NV4097_SET_TEXTURE_FORMAT + 0xc0"},
{NV4097_SET_TEXTURE_ADDRESS + 0xc0 / 4, "NV4097_SET_TEXTURE_ADDRESS + 0xc0"},
{NV4097_SET_TEXTURE_CONTROL0 + 0xc0 / 4, "NV4097_SET_TEXTURE_CONTROL0 + 0xc0"},
{NV4097_SET_TEXTURE_CONTROL1 + 0xc0 / 4, "NV4097_SET_TEXTURE_CONTROL1 + 0xc0"},
{NV4097_SET_TEXTURE_FILTER + 0xc0 / 4, "NV4097_SET_TEXTURE_FILTER + 0xc0"},
{NV4097_SET_TEXTURE_IMAGE_RECT + 0xc0 / 4, "NV4097_SET_TEXTURE_IMAGE_RECT + 0xc0"},
{NV4097_SET_TEXTURE_BORDER_COLOR + 0xc0 / 4, "NV4097_SET_TEXTURE_BORDER_COLOR + 0xc0"},
{NV4097_SET_TEXTURE_OFFSET + 0xe0 / 4, "NV4097_SET_TEXTURE_OFFSET + 0xe0"},
{NV4097_SET_TEXTURE_FORMAT + 0xe0 / 4, "NV4097_SET_TEXTURE_FORMAT + 0xe0"},
{NV4097_SET_TEXTURE_ADDRESS + 0xe0 / 4, "NV4097_SET_TEXTURE_ADDRESS + 0xe0"},
{NV4097_SET_TEXTURE_CONTROL0 + 0xe0 / 4, "NV4097_SET_TEXTURE_CONTROL0 + 0xe0"},
{NV4097_SET_TEXTURE_CONTROL1 + 0xe0 / 4, "NV4097_SET_TEXTURE_CONTROL1 + 0xe0"},
{NV4097_SET_TEXTURE_FILTER + 0xe0 / 4, "NV4097_SET_TEXTURE_FILTER + 0xe0"},
{NV4097_SET_TEXTURE_IMAGE_RECT + 0xe0 / 4, "NV4097_SET_TEXTURE_IMAGE_RECT + 0xe0"},
{NV4097_SET_TEXTURE_BORDER_COLOR + 0xe0 / 4, "NV4097_SET_TEXTURE_BORDER_COLOR + 0xe0"},
{NV4097_SET_TEXTURE_OFFSET + 0x100 / 4, "NV4097_SET_TEXTURE_OFFSET + 0x100"},
{NV4097_SET_TEXTURE_FORMAT + 0x100 / 4, "NV4097_SET_TEXTURE_FORMAT + 0x100"},
{NV4097_SET_TEXTURE_ADDRESS + 0x100 / 4, "NV4097_SET_TEXTURE_ADDRESS + 0x100"},
{NV4097_SET_TEXTURE_CONTROL0 + 0x100 / 4, "NV4097_SET_TEXTURE_CONTROL0 + 0x100"},
{NV4097_SET_TEXTURE_CONTROL1 + 0x100 / 4, "NV4097_SET_TEXTURE_CONTROL1 + 0x100"},
{NV4097_SET_TEXTURE_FILTER + 0x100 / 4, "NV4097_SET_TEXTURE_FILTER + 0x100"},
{NV4097_SET_TEXTURE_IMAGE_RECT + 0x100 / 4, "NV4097_SET_TEXTURE_IMAGE_RECT + 0x100"},
{NV4097_SET_TEXTURE_BORDER_COLOR + 0x100 / 4, "NV4097_SET_TEXTURE_BORDER_COLOR + 0x100"},
{NV4097_SET_TEXTURE_OFFSET + 0x120 / 4, "NV4097_SET_TEXTURE_OFFSET + 0x120"},
{NV4097_SET_TEXTURE_FORMAT + 0x120 / 4, "NV4097_SET_TEXTURE_FORMAT + 0x120"},
{NV4097_SET_TEXTURE_ADDRESS + 0x120 / 4, "NV4097_SET_TEXTURE_ADDRESS + 0x120"},
{NV4097_SET_TEXTURE_CONTROL0 + 0x120 / 4, "NV4097_SET_TEXTURE_CONTROL0 + 0x120"},
{NV4097_SET_TEXTURE_CONTROL1 + 0x120 / 4, "NV4097_SET_TEXTURE_CONTROL1 + 0x120"},
{NV4097_SET_TEXTURE_FILTER + 0x120 / 4, "NV4097_SET_TEXTURE_FILTER + 0x120"},
{NV4097_SET_TEXTURE_IMAGE_RECT + 0x120 / 4, "NV4097_SET_TEXTURE_IMAGE_RECT + 0x120"},
{NV4097_SET_TEXTURE_BORDER_COLOR + 0x120 / 4, "NV4097_SET_TEXTURE_BORDER_COLOR + 0x120"},
{NV4097_SET_TEXTURE_OFFSET + 0x140 / 4, "NV4097_SET_TEXTURE_OFFSET + 0x140"},
{NV4097_SET_TEXTURE_FORMAT + 0x140 / 4, "NV4097_SET_TEXTURE_FORMAT + 0x140"},
{NV4097_SET_TEXTURE_ADDRESS + 0x140 / 4, "NV4097_SET_TEXTURE_ADDRESS + 0x140"},
{NV4097_SET_TEXTURE_CONTROL0 + 0x140 / 4, "NV4097_SET_TEXTURE_CONTROL0 + 0x140"},
{NV4097_SET_TEXTURE_CONTROL1 + 0x140 / 4, "NV4097_SET_TEXTURE_CONTROL1 + 0x140"},
{NV4097_SET_TEXTURE_FILTER + 0x140 / 4, "NV4097_SET_TEXTURE_FILTER + 0x140"},
{NV4097_SET_TEXTURE_IMAGE_RECT + 0x140 / 4, "NV4097_SET_TEXTURE_IMAGE_RECT + 0x140"},
{NV4097_SET_TEXTURE_BORDER_COLOR + 0x140 / 4, "NV4097_SET_TEXTURE_BORDER_COLOR + 0x140"},
{NV4097_SET_TEXTURE_OFFSET + 0x160 / 4, "NV4097_SET_TEXTURE_OFFSET + 0x160"},
{NV4097_SET_TEXTURE_FORMAT + 0x160 / 4, "NV4097_SET_TEXTURE_FORMAT + 0x160"},
{NV4097_SET_TEXTURE_ADDRESS + 0x160 / 4, "NV4097_SET_TEXTURE_ADDRESS + 0x160"},
{NV4097_SET_TEXTURE_CONTROL0 + 0x160 / 4, "NV4097_SET_TEXTURE_CONTROL0 + 0x160"},
{NV4097_SET_TEXTURE_CONTROL1 + 0x160 / 4, "NV4097_SET_TEXTURE_CONTROL1 + 0x160"},
{NV4097_SET_TEXTURE_FILTER + 0x160 / 4, "NV4097_SET_TEXTURE_FILTER + 0x160"},
{NV4097_SET_TEXTURE_IMAGE_RECT + 0x160 / 4, "NV4097_SET_TEXTURE_IMAGE_RECT + 0x160"},
{NV4097_SET_TEXTURE_BORDER_COLOR + 0x160 / 4, "NV4097_SET_TEXTURE_BORDER_COLOR + 0x160"},
{NV4097_SET_TEXTURE_OFFSET + 0x180 / 4, "NV4097_SET_TEXTURE_OFFSET + 0x180"},
{NV4097_SET_TEXTURE_FORMAT + 0x180 / 4, "NV4097_SET_TEXTURE_FORMAT + 0x180"},
{NV4097_SET_TEXTURE_ADDRESS + 0x180 / 4, "NV4097_SET_TEXTURE_ADDRESS + 0x180"},
{NV4097_SET_TEXTURE_CONTROL0 + 0x180 / 4, "NV4097_SET_TEXTURE_CONTROL0 + 0x180"},
{NV4097_SET_TEXTURE_CONTROL1 + 0x180 / 4, "NV4097_SET_TEXTURE_CONTROL1 + 0x180"},
{NV4097_SET_TEXTURE_FILTER + 0x180 / 4, "NV4097_SET_TEXTURE_FILTER + 0x180"},
{NV4097_SET_TEXTURE_IMAGE_RECT + 0x180 / 4, "NV4097_SET_TEXTURE_IMAGE_RECT + 0x180"},
{NV4097_SET_TEXTURE_BORDER_COLOR + 0x180 / 4, "NV4097_SET_TEXTURE_BORDER_COLOR + 0x180"},
{NV4097_SET_TEXTURE_OFFSET + 0x1a0 / 4, "NV4097_SET_TEXTURE_OFFSET + 0x1a0"},
{NV4097_SET_TEXTURE_FORMAT + 0x1a0 / 4, "NV4097_SET_TEXTURE_FORMAT + 0x1a0"},
{NV4097_SET_TEXTURE_ADDRESS + 0x1a0 / 4, "NV4097_SET_TEXTURE_ADDRESS + 0x1a0"},
{NV4097_SET_TEXTURE_CONTROL0 + 0x1a0 / 4, "NV4097_SET_TEXTURE_CONTROL0 + 0x1a0"},
{NV4097_SET_TEXTURE_CONTROL1 + 0x1a0 / 4, "NV4097_SET_TEXTURE_CONTROL1 + 0x1a0"},
{NV4097_SET_TEXTURE_FILTER + 0x1a0 / 4, "NV4097_SET_TEXTURE_FILTER + 0x1a0"},
{NV4097_SET_TEXTURE_IMAGE_RECT + 0x1a0 / 4, "NV4097_SET_TEXTURE_IMAGE_RECT + 0x1a0"},
{NV4097_SET_TEXTURE_BORDER_COLOR + 0x1a0 / 4, "NV4097_SET_TEXTURE_BORDER_COLOR + 0x1a0"},
{NV4097_SET_TEXTURE_OFFSET + 0x1c0 / 4, "NV4097_SET_TEXTURE_OFFSET + 0x1c0"},
{NV4097_SET_TEXTURE_FORMAT + 0x1c0 / 4, "NV4097_SET_TEXTURE_FORMAT + 0x1c0"},
{NV4097_SET_TEXTURE_ADDRESS + 0x1c0 / 4, "NV4097_SET_TEXTURE_ADDRESS + 0x1c0"},
{NV4097_SET_TEXTURE_CONTROL0 + 0x1c0 / 4, "NV4097_SET_TEXTURE_CONTROL0 + 0x1c0"},
{NV4097_SET_TEXTURE_CONTROL1 + 0x1c0 / 4, "NV4097_SET_TEXTURE_CONTROL1 + 0x1c0"},
{NV4097_SET_TEXTURE_FILTER + 0x1c0 / 4, "NV4097_SET_TEXTURE_FILTER + 0x1c0"},
{NV4097_SET_TEXTURE_IMAGE_RECT + 0x1c0 / 4, "NV4097_SET_TEXTURE_IMAGE_RECT + 0x1c0"},
{NV4097_SET_TEXTURE_BORDER_COLOR + 0x1c0 / 4, "NV4097_SET_TEXTURE_BORDER_COLOR + 0x1c0"},
{NV4097_SET_TEXTURE_OFFSET + 0x1e0 / 4, "NV4097_SET_TEXTURE_OFFSET + 0x1e0"},
{NV4097_SET_TEXTURE_FORMAT + 0x1e0 / 4, "NV4097_SET_TEXTURE_FORMAT + 0x1e0"},
{NV4097_SET_TEXTURE_ADDRESS + 0x1e0 / 4, "NV4097_SET_TEXTURE_ADDRESS + 0x1e0"},
{NV4097_SET_TEXTURE_CONTROL0 + 0x1e0 / 4, "NV4097_SET_TEXTURE_CONTROL0 + 0x1e0"},
{NV4097_SET_TEXTURE_CONTROL1 + 0x1e0 / 4, "NV4097_SET_TEXTURE_CONTROL1 + 0x1e0"},
{NV4097_SET_TEXTURE_FILTER + 0x1e0 / 4, "NV4097_SET_TEXTURE_FILTER + 0x1e0"},
{NV4097_SET_TEXTURE_IMAGE_RECT + 0x1e0 / 4, "NV4097_SET_TEXTURE_IMAGE_RECT + 0x1e0"},
{NV4097_SET_TEXTURE_BORDER_COLOR + 0x1e0 / 4, "NV4097_SET_TEXTURE_BORDER_COLOR + 0x1e0"},
{NV4097_SET_VERTEX_DATA4F_M, "NV4097_SET_VERTEX_DATA4F_M"},
{NV4097_SET_VERTEX_DATA4F_M + 4 / 4, "NV4097_SET_VERTEX_DATA4F_M + 4"},
{NV4097_SET_VERTEX_DATA4F_M + 8 / 4, "NV4097_SET_VERTEX_DATA4F_M + 8"},
{NV4097_SET_VERTEX_DATA4F_M + 12 / 4, "NV4097_SET_VERTEX_DATA4F_M + 12"},
{NV4097_SET_VERTEX_DATA4F_M + 16 / 4, "NV4097_SET_VERTEX_DATA4F_M + 16"},
{NV4097_SET_VERTEX_DATA4F_M + 20 / 4, "NV4097_SET_VERTEX_DATA4F_M + 20"},
{NV4097_SET_VERTEX_DATA4F_M + 24 / 4, "NV4097_SET_VERTEX_DATA4F_M + 24"},
{NV4097_SET_VERTEX_DATA4F_M + 28 / 4, "NV4097_SET_VERTEX_DATA4F_M + 28"},
{NV4097_SET_VERTEX_DATA4F_M + 32 / 4, "NV4097_SET_VERTEX_DATA4F_M + 32"},
{NV4097_SET_VERTEX_DATA4F_M + 36 / 4, "NV4097_SET_VERTEX_DATA4F_M + 36"},
{NV4097_SET_VERTEX_DATA4F_M + 40 / 4, "NV4097_SET_VERTEX_DATA4F_M + 40"},
{NV4097_SET_VERTEX_DATA4F_M + 44 / 4, "NV4097_SET_VERTEX_DATA4F_M + 44"},
{NV4097_SET_VERTEX_DATA4F_M + 48 / 4, "NV4097_SET_VERTEX_DATA4F_M + 48"},
{NV4097_SET_VERTEX_DATA4F_M + 52 / 4, "NV4097_SET_VERTEX_DATA4F_M + 52"},
{NV4097_SET_VERTEX_DATA4F_M + 56 / 4, "NV4097_SET_VERTEX_DATA4F_M + 56"},
{NV4097_SET_VERTEX_DATA4F_M + 60 / 4, "NV4097_SET_VERTEX_DATA4F_M + 60"},
{NV4097_SET_VERTEX_DATA4F_M + 64 / 4, "NV4097_SET_VERTEX_DATA4F_M + 64"},
{NV4097_SET_VERTEX_DATA4F_M + 68 / 4, "NV4097_SET_VERTEX_DATA4F_M + 68"},
{NV4097_SET_VERTEX_DATA4F_M + 72 / 4, "NV4097_SET_VERTEX_DATA4F_M + 72"},
{NV4097_SET_VERTEX_DATA4F_M + 76 / 4, "NV4097_SET_VERTEX_DATA4F_M + 76"},
{NV4097_SET_VERTEX_DATA4F_M + 80 / 4, "NV4097_SET_VERTEX_DATA4F_M + 80"},
{NV4097_SET_VERTEX_DATA4F_M + 84 / 4, "NV4097_SET_VERTEX_DATA4F_M + 84"},
{NV4097_SET_VERTEX_DATA4F_M + 88 / 4, "NV4097_SET_VERTEX_DATA4F_M + 88"},
{NV4097_SET_VERTEX_DATA4F_M + 92 / 4, "NV4097_SET_VERTEX_DATA4F_M + 92"},
{NV4097_SET_VERTEX_DATA4F_M + 96 / 4, "NV4097_SET_VERTEX_DATA4F_M + 96"},
{NV4097_SET_VERTEX_DATA4F_M + 100 / 4, "NV4097_SET_VERTEX_DATA4F_M + 100"},
{NV4097_SET_VERTEX_DATA4F_M + 104 / 4, "NV4097_SET_VERTEX_DATA4F_M + 104"},
{NV4097_SET_VERTEX_DATA4F_M + 108 / 4, "NV4097_SET_VERTEX_DATA4F_M + 108"},
{NV4097_SET_VERTEX_DATA4F_M + 112 / 4, "NV4097_SET_VERTEX_DATA4F_M + 112"},
{NV4097_SET_VERTEX_DATA4F_M + 116 / 4, "NV4097_SET_VERTEX_DATA4F_M + 116"},
{NV4097_SET_VERTEX_DATA4F_M + 120 / 4, "NV4097_SET_VERTEX_DATA4F_M + 120"},
{NV4097_SET_VERTEX_DATA4F_M + 124 / 4, "NV4097_SET_VERTEX_DATA4F_M + 124"},
{NV4097_SET_VERTEX_DATA4F_M + 128 / 4, "NV4097_SET_VERTEX_DATA4F_M + 128"},
{NV4097_SET_VERTEX_DATA4F_M + 132 / 4, "NV4097_SET_VERTEX_DATA4F_M + 132"},
{NV4097_SET_VERTEX_DATA4F_M + 136 / 4, "NV4097_SET_VERTEX_DATA4F_M + 136"},
{NV4097_SET_VERTEX_DATA4F_M + 140 / 4, "NV4097_SET_VERTEX_DATA4F_M + 140"},
{NV4097_SET_VERTEX_DATA4F_M + 144 / 4, "NV4097_SET_VERTEX_DATA4F_M + 144"},
{NV4097_SET_VERTEX_DATA4F_M + 148 / 4, "NV4097_SET_VERTEX_DATA4F_M + 148"},
{NV4097_SET_VERTEX_DATA4F_M + 152 / 4, "NV4097_SET_VERTEX_DATA4F_M + 152"},
{NV4097_SET_VERTEX_DATA4F_M + 156 / 4, "NV4097_SET_VERTEX_DATA4F_M + 156"},
{NV4097_SET_VERTEX_DATA4F_M + 160 / 4, "NV4097_SET_VERTEX_DATA4F_M + 160"},
{NV4097_SET_VERTEX_DATA4F_M + 164 / 4, "NV4097_SET_VERTEX_DATA4F_M + 164"},
{NV4097_SET_VERTEX_DATA4F_M + 168 / 4, "NV4097_SET_VERTEX_DATA4F_M + 168"},
{NV4097_SET_VERTEX_DATA4F_M + 172 / 4, "NV4097_SET_VERTEX_DATA4F_M + 172"},
{NV4097_SET_VERTEX_DATA4F_M + 176 / 4, "NV4097_SET_VERTEX_DATA4F_M + 176"},
{NV4097_SET_VERTEX_DATA4F_M + 180 / 4, "NV4097_SET_VERTEX_DATA4F_M + 180"},
{NV4097_SET_VERTEX_DATA4F_M + 184 / 4, "NV4097_SET_VERTEX_DATA4F_M + 184"},
{NV4097_SET_VERTEX_DATA4F_M + 188 / 4, "NV4097_SET_VERTEX_DATA4F_M + 188"},
{NV4097_SET_VERTEX_DATA4F_M + 192 / 4, "NV4097_SET_VERTEX_DATA4F_M + 192"},
{NV4097_SET_VERTEX_DATA4F_M + 196 / 4, "NV4097_SET_VERTEX_DATA4F_M + 196"},
{NV4097_SET_VERTEX_DATA4F_M + 200 / 4, "NV4097_SET_VERTEX_DATA4F_M + 200"},
{NV4097_SET_VERTEX_DATA4F_M + 204 / 4, "NV4097_SET_VERTEX_DATA4F_M + 204"},
{NV4097_SET_VERTEX_DATA4F_M + 208 / 4, "NV4097_SET_VERTEX_DATA4F_M + 208"},
{NV4097_SET_VERTEX_DATA4F_M + 212 / 4, "NV4097_SET_VERTEX_DATA4F_M + 212"},
{NV4097_SET_VERTEX_DATA4F_M + 216 / 4, "NV4097_SET_VERTEX_DATA4F_M + 216"},
{NV4097_SET_VERTEX_DATA4F_M + 220 / 4, "NV4097_SET_VERTEX_DATA4F_M + 220"},
{NV4097_SET_VERTEX_DATA4F_M + 224 / 4, "NV4097_SET_VERTEX_DATA4F_M + 224"},
{NV4097_SET_VERTEX_DATA4F_M + 228 / 4, "NV4097_SET_VERTEX_DATA4F_M + 228"},
{NV4097_SET_VERTEX_DATA4F_M + 232 / 4, "NV4097_SET_VERTEX_DATA4F_M + 232"},
{NV4097_SET_VERTEX_DATA4F_M + 236 / 4, "NV4097_SET_VERTEX_DATA4F_M + 236"},
{NV4097_SET_VERTEX_DATA4F_M + 240 / 4, "NV4097_SET_VERTEX_DATA4F_M + 240"},
{NV4097_SET_VERTEX_DATA4F_M + 244 / 4, "NV4097_SET_VERTEX_DATA4F_M + 244"},
{NV4097_SET_VERTEX_DATA4F_M + 248 / 4, "NV4097_SET_VERTEX_DATA4F_M + 248"},
{NV4097_SET_VERTEX_DATA4F_M + 252 / 4, "NV4097_SET_VERTEX_DATA4F_M + 252"},
{NV4097_SET_COLOR_KEY_COLOR, "NV4097_SET_COLOR_KEY_COLOR"},
{NV4097_SET_SHADER_CONTROL, "NV4097_SET_SHADER_CONTROL"},
{NV4097_SET_INDEXED_CONSTANT_READ_LIMITS, "NV4097_SET_INDEXED_CONSTANT_READ_LIMITS"},
{NV4097_SET_SEMAPHORE_OFFSET, "NV4097_SET_SEMAPHORE_OFFSET"},
{NV4097_BACK_END_WRITE_SEMAPHORE_RELEASE, "NV4097_BACK_END_WRITE_SEMAPHORE_RELEASE"},
{NV4097_TEXTURE_READ_SEMAPHORE_RELEASE, "NV4097_TEXTURE_READ_SEMAPHORE_RELEASE"},
{NV4097_SET_ZMIN_MAX_CONTROL, "NV4097_SET_ZMIN_MAX_CONTROL"},
{NV4097_SET_ANTI_ALIASING_CONTROL, "NV4097_SET_ANTI_ALIASING_CONTROL"},
{NV4097_SET_SURFACE_COMPRESSION, "NV4097_SET_SURFACE_COMPRESSION"},
{NV4097_SET_ZCULL_EN, "NV4097_SET_ZCULL_EN"},
{NV4097_SET_SHADER_WINDOW, "NV4097_SET_SHADER_WINDOW"},
{NV4097_SET_ZSTENCIL_CLEAR_VALUE, "NV4097_SET_ZSTENCIL_CLEAR_VALUE"},
{NV4097_SET_COLOR_CLEAR_VALUE, "NV4097_SET_COLOR_CLEAR_VALUE"},
{NV4097_CLEAR_SURFACE, "NV4097_CLEAR_SURFACE"},
{NV4097_SET_CLEAR_RECT_HORIZONTAL, "NV4097_SET_CLEAR_RECT_HORIZONTAL"},
{NV4097_SET_CLEAR_RECT_VERTICAL, "NV4097_SET_CLEAR_RECT_VERTICAL"},
{NV4097_SET_CLIP_ID_TEST_ENABLE, "NV4097_SET_CLIP_ID_TEST_ENABLE"},
{NV4097_SET_RESTART_INDEX_ENABLE, "NV4097_SET_RESTART_INDEX_ENABLE"},
{NV4097_SET_RESTART_INDEX, "NV4097_SET_RESTART_INDEX"},
{NV4097_SET_LINE_STIPPLE, "NV4097_SET_LINE_STIPPLE"},
{NV4097_SET_LINE_STIPPLE_PATTERN, "NV4097_SET_LINE_STIPPLE_PATTERN"},
{NV4097_SET_VERTEX_DATA1F_M, "NV4097_SET_VERTEX_DATA1F_M"},
{NV4097_SET_VERTEX_DATA1F_M + 4 / 4, "NV4097_SET_VERTEX_DATA1F_M + 4"},
{NV4097_SET_VERTEX_DATA1F_M + 8 / 4, "NV4097_SET_VERTEX_DATA1F_M + 8"},
{NV4097_SET_VERTEX_DATA1F_M + 12 / 4, "NV4097_SET_VERTEX_DATA1F_M + 12"},
{NV4097_SET_VERTEX_DATA1F_M + 16 / 4, "NV4097_SET_VERTEX_DATA1F_M + 16"},
{NV4097_SET_VERTEX_DATA1F_M + 20 / 4, "NV4097_SET_VERTEX_DATA1F_M + 20"},
{NV4097_SET_VERTEX_DATA1F_M + 24 / 4, "NV4097_SET_VERTEX_DATA1F_M + 24"},
{NV4097_SET_VERTEX_DATA1F_M + 28 / 4, "NV4097_SET_VERTEX_DATA1F_M + 28"},
{NV4097_SET_VERTEX_DATA1F_M + 32 / 4, "NV4097_SET_VERTEX_DATA1F_M + 32"},
{NV4097_SET_VERTEX_DATA1F_M + 36 / 4, "NV4097_SET_VERTEX_DATA1F_M + 36"},
{NV4097_SET_VERTEX_DATA1F_M + 40 / 4, "NV4097_SET_VERTEX_DATA1F_M + 40"},
{NV4097_SET_VERTEX_DATA1F_M + 44 / 4, "NV4097_SET_VERTEX_DATA1F_M + 44"},
{NV4097_SET_VERTEX_DATA1F_M + 48 / 4, "NV4097_SET_VERTEX_DATA1F_M + 48"},
{NV4097_SET_VERTEX_DATA1F_M + 52 / 4, "NV4097_SET_VERTEX_DATA1F_M + 52"},
{NV4097_SET_VERTEX_DATA1F_M + 56 / 4, "NV4097_SET_VERTEX_DATA1F_M + 56"},
{NV4097_SET_VERTEX_DATA1F_M + 60 / 4, "NV4097_SET_VERTEX_DATA1F_M + 60"},
{NV4097_SET_TRANSFORM_EXECUTION_MODE, "NV4097_SET_TRANSFORM_EXECUTION_MODE"},
{NV4097_SET_RENDER_ENABLE, "NV4097_SET_RENDER_ENABLE"},
{NV4097_SET_TRANSFORM_PROGRAM_LOAD, "NV4097_SET_TRANSFORM_PROGRAM_LOAD"},
{NV4097_SET_TRANSFORM_PROGRAM_START, "NV4097_SET_TRANSFORM_PROGRAM_START"},
{NV4097_SET_ZCULL_CONTROL0, "NV4097_SET_ZCULL_CONTROL0"},
{NV4097_SET_ZCULL_CONTROL1, "NV4097_SET_ZCULL_CONTROL1"},
{NV4097_SET_SCULL_CONTROL, "NV4097_SET_SCULL_CONTROL"},
{NV4097_SET_POINT_SIZE, "NV4097_SET_POINT_SIZE"},
{NV4097_SET_POINT_PARAMS_ENABLE, "NV4097_SET_POINT_PARAMS_ENABLE"},
{NV4097_SET_POINT_SPRITE_CONTROL, "NV4097_SET_POINT_SPRITE_CONTROL"},
{NV4097_SET_TRANSFORM_TIMEOUT, "NV4097_SET_TRANSFORM_TIMEOUT"},
{NV4097_SET_TRANSFORM_CONSTANT_LOAD, "NV4097_SET_TRANSFORM_CONSTANT_LOAD"},
{NV4097_SET_FREQUENCY_DIVIDER_OPERATION, "NV4097_SET_FREQUENCY_DIVIDER_OPERATION"},
{NV4097_SET_ATTRIB_COLOR, "NV4097_SET_ATTRIB_COLOR"},
{NV4097_SET_ATTRIB_TEX_COORD, "NV4097_SET_ATTRIB_TEX_COORD"},
{NV4097_SET_ATTRIB_TEX_COORD_EX, "NV4097_SET_ATTRIB_TEX_COORD_EX"},
{NV4097_SET_ATTRIB_UCLIP0, "NV4097_SET_ATTRIB_UCLIP0"},
{NV4097_SET_ATTRIB_UCLIP1, "NV4097_SET_ATTRIB_UCLIP1"},
{NV4097_INVALIDATE_L2, "NV4097_INVALIDATE_L2"},
{NV4097_SET_REDUCE_DST_COLOR, "NV4097_SET_REDUCE_DST_COLOR"},
{NV4097_SET_NO_PARANOID_TEXTURE_FETCHES, "NV4097_SET_NO_PARANOID_TEXTURE_FETCHES"},
{NV4097_SET_SHADER_PACKER, "NV4097_SET_SHADER_PACKER"},
{NV4097_SET_VERTEX_ATTRIB_INPUT_MASK, "NV4097_SET_VERTEX_ATTRIB_INPUT_MASK"},
{NV4097_SET_VERTEX_ATTRIB_OUTPUT_MASK, "NV4097_SET_VERTEX_ATTRIB_OUTPUT_MASK"},
{NV4097_SET_TRANSFORM_BRANCH_BITS, "NV4097_SET_TRANSFORM_BRANCH_BITS"},
{NV0039_SET_OBJECT, "NV0039_SET_OBJECT"},
{NV0039_SET_CONTEXT_DMA_NOTIFIES, "NV0039_SET_CONTEXT_DMA_NOTIFIES"},
{NV0039_SET_CONTEXT_DMA_BUFFER_IN, "NV0039_SET_CONTEXT_DMA_BUFFER_IN"},
{NV0039_SET_CONTEXT_DMA_BUFFER_OUT, "NV0039_SET_CONTEXT_DMA_BUFFER_OUT"},
{NV0039_OFFSET_IN, "NV0039_OFFSET_IN"},
{NV0039_OFFSET_OUT, "NV0039_OFFSET_OUT"},
{NV0039_PITCH_IN, "NV0039_PITCH_IN"},
{NV0039_PITCH_OUT, "NV0039_PITCH_OUT"},
{NV0039_LINE_LENGTH_IN, "NV0039_LINE_LENGTH_IN"},
{NV0039_LINE_COUNT, "NV0039_LINE_COUNT"},
{NV0039_FORMAT, "NV0039_FORMAT"},
{NV0039_BUFFER_NOTIFY, "NV0039_BUFFER_NOTIFY"},
{NV3062_SET_OBJECT, "NV3062_SET_OBJECT"},
{NV3062_SET_CONTEXT_DMA_NOTIFIES, "NV3062_SET_CONTEXT_DMA_NOTIFIES"},
{NV3062_SET_CONTEXT_DMA_IMAGE_SOURCE, "NV3062_SET_CONTEXT_DMA_IMAGE_SOURCE"},
{NV3062_SET_CONTEXT_DMA_IMAGE_DESTIN, "NV3062_SET_CONTEXT_DMA_IMAGE_DESTIN"},
{NV3062_SET_COLOR_FORMAT, "NV3062_SET_COLOR_FORMAT"},
{NV3062_SET_PITCH, "NV3062_SET_PITCH"},
{NV3062_SET_OFFSET_SOURCE, "NV3062_SET_OFFSET_SOURCE"},
{NV3062_SET_OFFSET_DESTIN, "NV3062_SET_OFFSET_DESTIN"},
{NV309E_SET_OBJECT, "NV309E_SET_OBJECT"},
{NV309E_SET_CONTEXT_DMA_NOTIFIES, "NV309E_SET_CONTEXT_DMA_NOTIFIES"},
{NV309E_SET_CONTEXT_DMA_IMAGE, "NV309E_SET_CONTEXT_DMA_IMAGE"},
{NV309E_SET_FORMAT, "NV309E_SET_FORMAT"},
{NV309E_SET_OFFSET, "NV309E_SET_OFFSET"},
{NV308A_SET_OBJECT, "NV308A_SET_OBJECT"},
{NV308A_SET_CONTEXT_DMA_NOTIFIES, "NV308A_SET_CONTEXT_DMA_NOTIFIES"},
{NV308A_SET_CONTEXT_COLOR_KEY, "NV308A_SET_CONTEXT_COLOR_KEY"},
{NV308A_SET_CONTEXT_CLIP_RECTANGLE, "NV308A_SET_CONTEXT_CLIP_RECTANGLE"},
{NV308A_SET_CONTEXT_PATTERN, "NV308A_SET_CONTEXT_PATTERN"},
{NV308A_SET_CONTEXT_ROP, "NV308A_SET_CONTEXT_ROP"},
{NV308A_SET_CONTEXT_BETA1, "NV308A_SET_CONTEXT_BETA1"},
{NV308A_SET_CONTEXT_BETA4, "NV308A_SET_CONTEXT_BETA4"},
{NV308A_SET_CONTEXT_SURFACE, "NV308A_SET_CONTEXT_SURFACE"},
{NV308A_SET_COLOR_CONVERSION, "NV308A_SET_COLOR_CONVERSION"},
{NV308A_SET_OPERATION, "NV308A_SET_OPERATION"},
{NV308A_SET_COLOR_FORMAT, "NV308A_SET_COLOR_FORMAT"},
{NV308A_POINT, "NV308A_POINT"},
{NV308A_SIZE_OUT, "NV308A_SIZE_OUT"},
{NV308A_SIZE_IN, "NV308A_SIZE_IN"},
{NV308A_COLOR, "NV308A_COLOR"},
{NV3089_SET_OBJECT, "NV3089_SET_OBJECT"},
{NV3089_SET_CONTEXT_DMA_NOTIFIES, "NV3089_SET_CONTEXT_DMA_NOTIFIES"},
{NV3089_SET_CONTEXT_DMA_IMAGE, "NV3089_SET_CONTEXT_DMA_IMAGE"},
{NV3089_SET_CONTEXT_PATTERN, "NV3089_SET_CONTEXT_PATTERN"},
{NV3089_SET_CONTEXT_ROP, "NV3089_SET_CONTEXT_ROP"},
{NV3089_SET_CONTEXT_BETA1, "NV3089_SET_CONTEXT_BETA1"},
{NV3089_SET_CONTEXT_BETA4, "NV3089_SET_CONTEXT_BETA4"},
{NV3089_SET_CONTEXT_SURFACE, "NV3089_SET_CONTEXT_SURFACE"},
{NV3089_SET_COLOR_CONVERSION, "NV3089_SET_COLOR_CONVERSION"},
{NV3089_SET_COLOR_FORMAT, "NV3089_SET_COLOR_FORMAT"},
{NV3089_SET_OPERATION, "NV3089_SET_OPERATION"},
{NV3089_CLIP_POINT, "NV3089_CLIP_POINT"},
{NV3089_CLIP_SIZE, "NV3089_CLIP_SIZE"},
{NV3089_IMAGE_OUT_POINT, "NV3089_IMAGE_OUT_POINT"},
{NV3089_IMAGE_OUT_SIZE, "NV3089_IMAGE_OUT_SIZE"},
{NV3089_DS_DX, "NV3089_DS_DX"},
{NV3089_DT_DY, "NV3089_DT_DY"},
{NV3089_IMAGE_IN_SIZE, "NV3089_IMAGE_IN_SIZE"},
{NV3089_IMAGE_IN_FORMAT, "NV3089_IMAGE_IN_FORMAT"},
{NV3089_IMAGE_IN_OFFSET, "NV3089_IMAGE_IN_OFFSET"},
{NV3089_IMAGE_IN, "NV3089_IMAGE_IN"},
{GCM_SET_DRIVER_OBJECT, "SET_DRIVER_OBJECT"},
{GCM_DRIVER_QUEUE, "DRIVER_QUEUE + 0x0"},
{GCM_DRIVER_QUEUE+1, "DRIVER_QUEUE + 0x4"},
{GCM_FLIP_HEAD, "FLIP_HEAD"},
{GCM_FLIP_HEAD+1, "FLIP_HEAD + 0x4"},
{GCM_SET_USER_COMMAND, "SET_USER_COMMAND"},
{GCM_FLIP_COMMAND, "FLIP_COMMAND"},
};
#undef KEY_STR
}
std::pair<std::string_view, std::string_view> rsx::get_method_name(u32 id, std::string& string_name)
{
const auto found = methods_name.find(id);
if (found != methods_name.end())
{
constexpr std::string_view prefix = "CELL_GCM_";
return {prefix, found->second};
}
string_name.clear();
fmt::append(string_name, "Unnamed method 0x%04x", id);
return {};
}
// Various parameter pretty printing function
namespace
{
/*
std::string get_texture_wrap_mode(u8 wrap)
{
switch (rsx::to_texture_wrap_mode(wrap))
{
case rsx::texture_wrap_mode::wrap: return "WRAP";
case rsx::texture_wrap_mode::mirror: return "MIRROR";
case rsx::texture_wrap_mode::clamp_to_edge: return "CLAMP_TO_EDGE";
case rsx::texture_wrap_mode::border: return "BORDER";
case rsx::texture_wrap_mode::clamp: return "CLAMP";
case rsx::texture_wrap_mode::mirror_once_clamp_to_edge: return "MIRROR_ONCE_CLAMP_TO_EDGE";
case rsx::texture_wrap_mode::mirror_once_border: return "MIRROR_ONCE_BORDER";
case rsx::texture_wrap_mode::mirror_once_clamp: return "MIRROR_ONCE_CLAMP";
}
return "Error";
}
std::string texture_address(usz index, u32 arg)
{
return "Texture " + std::to_string(index) + ": wrap_s = " + get_texture_wrap_mode(arg & 0xF) +
" wrap_t = " + get_texture_wrap_mode((arg >> 8) & 0xF) + " wrap_r = " +
get_texture_wrap_mode((arg >> 16) & 0xF) + " unsigned remap = " +
std::to_string((arg >> 12) & 0xF) + " zfunc = " + get_zfunc_name((arg >> 28) & 0xF) +
" gamma = " + std::to_string((arg >> 20) & 0xF) + " aniso bias = " +
std::to_string((arg >> 4) & 0xF) + " signed remap = " + std::to_string((arg >> 24) & 0xF);
}
std::string get_remap_channel(u8 op) noexcept
{
switch (op)
{
case 0: return "A";
case 1: return "R";
case 2: return "G";
case 3: return "B";
}
return "Error";
}
std::string texture_control1(usz index, u32 arg) noexcept
{
return "Texture " + std::to_string(index) + " Component 0 = " + get_remap_channel(arg & 0x3) +
" Component 1 = " + get_remap_channel((arg >> 2) & 0x3) + " Component 2 = " +
get_remap_channel((arg >> 4) & 0x3) + " Component 3 = " +
get_remap_channel((arg >> 6) & 0x3);
}
std::string texture_border_color(usz index, u32 arg)
{
return "Texture " + std::to_string(index) + " border color = " + std::to_string(arg);
}
std::string texture_filter(usz index, u32 arg)
{
return "Texture " + std::to_string(index) + " bias = " + std::to_string(arg & 0x1fff) +
" min_filter = " + std::to_string((arg >> 16) & 0x7) + " mag_filter = " +
std::to_string((arg >> 24) & 0x7) + " convolution_filter = " +
std::to_string((arg >> 13) & 0xF) + " a_signed = " + std::to_string((arg >> 28) & 0x1) +
" r_signed = " + std::to_string((arg >> 29) & 0x1) + " g_signed = " +
std::to_string((arg >> 30) & 0x1) + " b_signed = " + std::to_string((arg >> 31) & 0x1);
}*/
namespace
{
template <u32 Opcode>
void register_pretty_function(std::string& out, u32 /*id*/, u32 arg)
{
rsx::registers_decoder<Opcode>::dump(out, arg);
}
template <typename T, T... Index>
std::array<void(*)(std::string&, u32, u32), 1 << 14> create_printing_table(std::integer_sequence<T, Index...>)
{
std::array<void(*)(std::string&, u32, u32), 1 << 14> result{};
((result[opcode_list[Index * 5 + 0]] = ®ister_pretty_function<opcode_list[Index * 5 + 0]>,
result[opcode_list[Index * 5 + 1]] = ®ister_pretty_function<opcode_list[Index * 5 + 1]>,
result[opcode_list[Index * 5 + 2]] = ®ister_pretty_function<opcode_list[Index * 5 + 2]>,
result[opcode_list[Index * 5 + 3]] = ®ister_pretty_function<opcode_list[Index * 5 + 3]>,
result[opcode_list[Index * 5 + 4]] = ®ister_pretty_function<opcode_list[Index * 5 + 4]>
), ...);
return result;
}
}
const auto printing_functions =
create_printing_table(std::make_index_sequence<std::size(opcode_list) / 5>());
static_assert(std::size(opcode_list) % 5 == 0);
/* {
{ NV4097_DRAW_ARRAYS, [](u32 arg) -> std::string { return "Draw " + std::to_string((arg >> 24) +
1) + " vertex starting from " + std::to_string(arg & 0xFFFFFF); } },
{ NV4097_DRAW_INDEX_ARRAY, [](u32 arg) -> std::string { return "Draw " + std::to_string((arg >>
24) + 1) + " index starting from " + std::to_string(arg & 0xFFFFFF); } },
{ NV4097_TEXTURE_READ_SEMAPHORE_RELEASE, [](u32 arg) -> std::string { return "Write semaphore
value " + std::to_string(arg); } },
{ NV4097_CLEAR_SURFACE, [](u32 arg) -> std::string { return "Clear surface " + std::string(arg &
0x1 ? "Depth " : "") + std::string(arg & 0x2 ? "Stencil " : "") + std::string(arg & 0xF0 ? "Color
" : ""); } },
};*/
}
std::add_pointer_t<void(std::string&, u32, u32)> rsx::get_pretty_printing_function(u32 id)
{
const auto found = id < printing_functions.size() ? printing_functions[id] : nullptr;
if (found)
{
return found;
}
return [](std::string& result, u32 id, u32 v)
{
std::string string_name;
const auto [name_prefix, name] = rsx::get_method_name(id, string_name);
if (!string_name.empty())
{
fmt::append(result, "%s: 0x%08x", string_name, v);
return;
}
fmt::append(result, "%s: 0x%08x", name, v);
};
}
| 70,790
|
C++
|
.cpp
| 1,024
| 66.06543
| 125
| 0.698872
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,382
|
GSRender.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GSRender.cpp
|
#include "stdafx.h"
#include "GSRender.h"
GSRender::GSRender(utils::serial* ar) noexcept : rsx::thread(ar)
{
if (auto gs_frame = Emu.GetCallbacks().get_gs_frame())
{
m_frame = gs_frame.release();
}
else
{
m_frame = nullptr;
}
}
GSRender::~GSRender()
{
m_context = nullptr;
if (m_frame)
{
m_frame->close();
}
}
void GSRender::on_init_thread()
{
if (m_frame)
{
m_context = m_frame->make_context();
m_frame->set_current(m_context);
}
}
void GSRender::on_exit()
{
rsx::thread::on_exit();
if (m_frame)
{
m_frame->hide();
m_frame->delete_context(m_context);
m_context = nullptr;
}
}
void GSRender::flip(const rsx::display_flip_info_t&)
{
if (m_frame)
{
m_frame->flip(m_context);
}
}
f64 GSRender::get_display_refresh_rate() const
{
if (m_frame)
{
return m_frame->client_display_rate();
}
// Minimum
return 20.;
}
| 862
|
C++
|
.cpp
| 55
| 13.672727
| 64
| 0.667503
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,383
|
RSXFIFO.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/RSXFIFO.cpp
|
#include "stdafx.h"
#include "RSXFIFO.h"
#include "RSXThread.h"
#include "Capture/rsx_capture.h"
#include "Common/time.hpp"
#include "Core/RSXReservationLock.hpp"
#include "Emu/Memory/vm_reservation.h"
#include "Emu/Cell/lv2/sys_rsx.h"
#include "NV47/HW/context.h"
#include "util/asm.hpp"
#include <bitset>
using spu_rdata_t = std::byte[128];
extern void mov_rdata(spu_rdata_t& _dst, const spu_rdata_t& _src);
extern bool cmp_rdata(const spu_rdata_t& _lhs, const spu_rdata_t& _rhs);
namespace rsx
{
namespace FIFO
{
FIFO_control::FIFO_control(::rsx::thread* pctrl)
{
m_thread = pctrl;
m_ctrl = pctrl->ctrl;
m_iotable = &pctrl->iomap_table;
}
u32 FIFO_control::translate_address(u32 address) const
{
return m_iotable->get_addr(address);
}
void FIFO_control::sync_get() const
{
m_ctrl->get.release(m_internal_get);
}
void FIFO_control::restore_state(u32 cmd, u32 count)
{
m_cmd = cmd;
m_command_inc = ((m_cmd & RSX_METHOD_NON_INCREMENT_CMD_MASK) == RSX_METHOD_NON_INCREMENT_CMD) ? 0 : 4;
m_remaining_commands = count;
m_internal_get = m_ctrl->get - 4;
m_args_ptr = m_iotable->get_addr(m_internal_get);
m_command_reg = (m_cmd & 0xffff) + m_command_inc * (((m_cmd >> 18) - count) & 0x7ff) - m_command_inc;
}
void FIFO_control::inc_get(bool wait)
{
m_internal_get += 4;
if (wait && read_put<false>() == m_internal_get)
{
// NOTE: Only supposed to be invoked to wait for a single arg on command[0] (4 bytes)
// Wait for put to allow us to procceed execution
sync_get();
invalidate_cache();
while (read_put() == m_internal_get && !Emu.IsStopped())
{
m_thread->cpu_wait({});
}
}
}
template <bool Full>
inline u32 FIFO_control::read_put() const
{
if constexpr (!Full)
{
return m_ctrl->put & ~3;
}
else
{
if (u32 put = m_ctrl->put; (put & 3) == 0) [[likely]]
{
return put;
}
return m_ctrl->put.and_fetch(~3);
}
}
std::pair<bool, u32> FIFO_control::fetch_u32(u32 addr)
{
if (addr - m_cache_addr >= m_cache_size)
{
const u32 put = read_put();
if (put == addr)
{
return {false, FIFO_EMPTY};
}
m_cache_addr = addr & -128;
const u32 addr1 = m_iotable->get_addr(m_cache_addr);
if (addr1 == umax)
{
m_cache_size = 0;
return {false, FIFO_ERROR};
}
m_cache_size = std::min<u32>((put | 0x7f) - m_cache_addr, u32{sizeof(m_cache)} - 1) + 1;
if (0x100000 - (m_cache_addr & 0xfffff) < m_cache_size)
{
// Check if memory layout changes in the next 1MB page boundary
if ((addr1 >> 20) + 1 != (m_iotable->get_addr(m_cache_addr + 0x100000) >> 20))
{
// Trim cache as needed if memory layout changes
m_cache_size = 0x100000 - (m_cache_addr & 0xfffff);
}
}
// Make mask of cache lines to fetch
u8 to_fetch = static_cast<u8>((1u << (m_cache_size / 128)) - 1);
if (addr < put && put < m_cache_addr + m_cache_size)
{
// Adjust to knownly-prepared FIFO buffer bounds
m_cache_size = put - m_cache_addr;
}
// Atomic FIFO debug options
const bool force_cache_fill = g_cfg.core.rsx_fifo_accuracy == rsx_fifo_mode::atomic_ordered;
const bool strict_fetch_ordering = g_cfg.core.rsx_fifo_accuracy >= rsx_fifo_mode::atomic_ordered;
rsx::reservation_lock<true, 1> rsx_lock(addr1, m_cache_size, true);
const auto src = vm::_ptr<spu_rdata_t>(addr1);
u64 start_time = 0;
u32 bytes_read = 0;
// Find the next set bit after every iteration
for (int i = 0;; i = (std::countr_zero<u32>(utils::rol8(to_fetch, 0 - i - 1)) + i + 1) % 8)
{
// If a reservation is being updated, try to load another
const auto& res = vm::reservation_acquire(addr1 + i * 128);
const u64 time0 = res;
if (!(time0 & 127))
{
mov_rdata(m_cache[i], src[i]);
if (time0 == res && cmp_rdata(m_cache[i], src[i]))
{
// The fetch of the cache line content has been successful, unset its bit
to_fetch &= ~(1u << i);
if (!to_fetch)
{
break;
}
bytes_read += 128;
continue;
}
}
if (!start_time)
{
if (bytes_read >= 256 && !force_cache_fill)
{
// Cut our losses if we have something to work with.
// This is the first time falling out of the reservation loop above, so we have clean data with no holes.
m_cache_size = bytes_read;
break;
}
start_time = get_system_time();
}
auto now = get_system_time();
if (now - start_time >= 50u)
{
if (m_thread->is_stopped())
{
return {};
}
m_thread->cpu_wait({});
const auto then = std::exchange(now, get_system_time());
start_time = now;
m_thread->performance_counters.idle_time += now - then;
}
else
{
busy_wait(200);
}
if (strict_fetch_ordering)
{
i = (i - 1) % 8;
}
}
}
const auto ret = read_from_ptr<be_t<u32>>(+m_cache[0], addr - m_cache_addr);
return {true, ret};
}
void FIFO_control::set_get(u32 get, u32 spin_cmd)
{
invalidate_cache();
if (spin_cmd && m_ctrl->get == get)
{
m_memwatch_addr = get;
m_memwatch_cmp = spin_cmd;
return;
}
// Update ctrl registers
m_ctrl->get.release(m_internal_get = get);
m_remaining_commands = 0;
}
std::span<const u32> FIFO_control::get_current_arg_ptr() const
{
if (g_cfg.core.rsx_fifo_accuracy)
{
// Return a pointer to the cache storage with confined access
return {reinterpret_cast<const u32*>(&m_cache) + (m_internal_get - m_cache_addr) / 4, (m_cache_size - (m_internal_get - m_cache_addr)) / 4};
}
else
{
// Return a raw pointer with no limited access
return {static_cast<const u32*>(vm::base(m_iotable->get_addr(m_internal_get))), 0x10000};
}
}
bool FIFO_control::read_unsafe(register_pair& data)
{
// Fast read with no processing, only safe inside a PACKET_BEGIN+count block
if (m_remaining_commands)
{
bool ok{};
u32 arg = 0;
if (g_cfg.core.rsx_fifo_accuracy) [[ unlikely ]]
{
std::tie(ok, arg) = fetch_u32(m_internal_get + 4);
if (!ok)
{
if (arg == FIFO_ERROR)
{
m_thread->recover_fifo();
}
return false;
}
}
else
{
if (m_internal_get + 4 == read_put<false>())
{
return false;
}
m_args_ptr += 4;
arg = vm::read32(m_args_ptr);
}
m_internal_get += 4;
m_command_reg += m_command_inc;
--m_remaining_commands;
data.set(m_command_reg, arg);
return true;
}
m_internal_get += 4;
return false;
}
// Optimization for methods which can be batched together
// Beware, can be easily misused
bool FIFO_control::skip_methods(u32 count)
{
if (m_remaining_commands > count)
{
m_command_reg += m_command_inc * count;
m_remaining_commands -= count;
m_internal_get += 4 * count;
m_args_ptr += 4 * count;
return true;
}
m_internal_get += 4 * m_remaining_commands;
m_remaining_commands = 0;
return false;
}
void FIFO_control::abort()
{
m_remaining_commands = 0;
}
void FIFO_control::read(register_pair& data)
{
if (m_remaining_commands)
{
// Previous block aborted to wait for PUT pointer
read_unsafe(data);
return;
}
if (m_memwatch_addr)
{
if (m_internal_get == m_memwatch_addr)
{
if (const u32 addr = m_iotable->get_addr(m_memwatch_addr); addr + 1)
{
if (vm::read32(addr) == m_memwatch_cmp)
{
// Still spinning in place
data.reg = FIFO_EMPTY;
return;
}
}
}
m_memwatch_addr = 0;
m_memwatch_cmp = 0;
}
if (!g_cfg.core.rsx_fifo_accuracy) [[ likely ]]
{
const u32 put = read_put();
if (put == m_internal_get)
{
// Nothing to do
data.reg = FIFO_EMPTY;
return;
}
if (const u32 addr = m_iotable->get_addr(m_internal_get); addr + 1)
{
m_cmd = vm::read32(addr);
}
else
{
data.reg = FIFO_ERROR;
return;
}
}
else
{
if (auto [ok, arg] = fetch_u32(m_internal_get); ok)
{
m_cmd = arg;
}
else
{
data.reg = arg;
return;
}
}
if (m_cmd & RSX_METHOD_NON_METHOD_CMD_MASK) [[unlikely]]
{
if ((m_cmd & RSX_METHOD_OLD_JUMP_CMD_MASK) == RSX_METHOD_OLD_JUMP_CMD ||
(m_cmd & RSX_METHOD_NEW_JUMP_CMD_MASK) == RSX_METHOD_NEW_JUMP_CMD ||
(m_cmd & RSX_METHOD_CALL_CMD_MASK) == RSX_METHOD_CALL_CMD ||
(m_cmd & RSX_METHOD_RETURN_MASK) == RSX_METHOD_RETURN_CMD)
{
// Flow control, stop reading
data.reg = m_cmd;
return;
}
// Malformed command, optional recovery
data.reg = FIFO_ERROR;
return;
}
ensure(!m_remaining_commands);
const u32 count = (m_cmd >> 18) & 0x7ff;
if (!count)
{
m_ctrl->get.release(m_internal_get += 4);
data.reg = FIFO_NOP;
return;
}
if (count > 1)
{
// Set up readback parameters
m_command_reg = m_cmd & 0xfffc;
m_command_inc = ((m_cmd & RSX_METHOD_NON_INCREMENT_CMD_MASK) == RSX_METHOD_NON_INCREMENT_CMD) ? 0 : 4;
m_remaining_commands = count - 1;
}
if (g_cfg.core.rsx_fifo_accuracy)
{
m_internal_get += 4;
auto [ok, arg] = fetch_u32(m_internal_get);
if (!ok)
{
// Optional recovery
if (arg == FIFO_ERROR)
{
data.reg = FIFO_ERROR;
}
else
{
data.reg = FIFO_EMPTY;
m_command_reg = m_cmd & 0xfffc;
m_remaining_commands++;
}
return;
}
data.set(m_cmd & 0xfffc, arg);
return;
}
inc_get(true); // Wait for data block to become available
// Validate the args ptr if the command attempts to read from it
m_args_ptr = m_iotable->get_addr(m_internal_get);
if (m_args_ptr == umax) [[unlikely]]
{
// Optional recovery
data.reg = FIFO_ERROR;
return;
}
data.set(m_cmd & 0xfffc, vm::read32(m_args_ptr));
}
void flattening_helper::reset(bool _enabled)
{
enabled = _enabled;
num_collapsed = 0;
in_begin_end = false;
}
void flattening_helper::force_disable()
{
if (enabled)
{
rsx_log.warning("FIFO optimizations have been disabled as the application is not compatible with per-frame analysis");
reset(false);
fifo_hint = optimization_hint::application_not_compatible;
}
}
void flattening_helper::evaluate_performance(u32 total_draw_count)
{
if (!enabled)
{
if (fifo_hint == optimization_hint::application_not_compatible)
{
// Not compatible, do nothing
return;
}
if (total_draw_count <= 2000)
{
// Low draw call pressure
fifo_hint = optimization_hint::load_low;
return;
}
if (fifo_hint == optimization_hint::load_unoptimizable)
{
// Nope, wait for stats to change
return;
}
}
if (enabled)
{
// Currently activated. Check if there is any benefit
if (num_collapsed < 500)
{
// Not worth it, disable
enabled = false;
fifo_hint = load_unoptimizable;
}
u32 real_total = total_draw_count + num_collapsed;
if (real_total <= 2000)
{
// Low total number of draws submitted, no need to keep trying for now
enabled = false;
fifo_hint = load_low;
}
reset(enabled);
}
else
{
// Not enabled, check if we should try enabling
ensure(total_draw_count > 2000);
if (fifo_hint != load_unoptimizable)
{
// If its set to unoptimizable, we already tried and it did not work
// If it resets to load low (usually after some kind of loading screen) we can try again
ensure(in_begin_end == false); // "Incorrect initial state"
ensure(num_collapsed == 0);
enabled = true;
}
}
}
flatten_op flattening_helper::test(register_pair& command)
{
u32 flush_cmd = ~0u;
switch (const u32 reg = (command.reg >> 2))
{
case NV4097_SET_BEGIN_END:
{
in_begin_end = !!command.value;
if (command.value)
{
// This is a BEGIN call
if (!deferred_primitive) [[likely]]
{
// New primitive block
deferred_primitive = command.value;
}
else if (deferred_primitive == command.value)
{
// Same primitive can be chanined; do nothing
command.reg = FIFO_DISABLED_COMMAND;
}
else
{
// Primitive command has changed!
// Flush
flush_cmd = command.value;
}
}
else if (deferred_primitive)
{
command.reg = FIFO_DRAW_BARRIER;
draw_count++;
}
else
{
rsx_log.error("Fifo flattener misalignment, disable FIFO reordering and report to developers");
in_begin_end = false;
flush_cmd = 0u;
}
break;
}
case NV4097_DRAW_ARRAYS:
case NV4097_DRAW_INDEX_ARRAY:
{
// TODO: Check type
break;
}
default:
{
if (draw_count) [[unlikely]]
{
if (m_register_properties[reg] & register_props::always_ignore) [[unlikely]]
{
// Always ignore
command.reg = FIFO_DISABLED_COMMAND;
}
else
{
// Flush
flush_cmd = (in_begin_end) ? deferred_primitive : 0u;
}
}
else
{
// Nothing to do
return NOTHING;
}
break;
}
}
if (flush_cmd != ~0u)
{
num_collapsed += draw_count? (draw_count - 1) : 0;
draw_count = 0;
deferred_primitive = flush_cmd;
return in_begin_end ? EMIT_BARRIER : EMIT_END;
}
return NOTHING;
}
}
void thread::run_FIFO()
{
FIFO::register_pair command;
fifo_ctrl->read(command);
const auto cmd = command.reg;
if (cmd & (0xffff0000 | RSX_METHOD_NON_METHOD_CMD_MASK)) [[unlikely]]
{
// Check for special FIFO commands
switch (cmd)
{
case FIFO::FIFO_NOP:
{
if (performance_counters.state == FIFO::state::running)
{
performance_counters.FIFO_idle_timestamp = get_system_time();
performance_counters.state = FIFO::state::nop;
}
return;
}
case FIFO::FIFO_EMPTY:
{
if (performance_counters.state == FIFO::state::running)
{
performance_counters.FIFO_idle_timestamp = get_system_time();
performance_counters.state = FIFO::state::empty;
}
else
{
std::this_thread::yield();
}
return;
}
case FIFO::FIFO_BUSY:
{
// Do something else
return;
}
case FIFO::FIFO_ERROR:
{
rsx_log.error("FIFO error: possible desync event (last cmd = 0x%x)", get_fifo_cmd());
recover_fifo();
return;
}
}
// Check for flow control
if (std::bitset<2> jump_type; jump_type
.set(0, (cmd & RSX_METHOD_OLD_JUMP_CMD_MASK) == RSX_METHOD_OLD_JUMP_CMD)
.set(1, (cmd & RSX_METHOD_NEW_JUMP_CMD_MASK) == RSX_METHOD_NEW_JUMP_CMD)
.any())
{
const u32 offs = cmd & (jump_type.test(0) ? RSX_METHOD_OLD_JUMP_OFFSET_MASK : RSX_METHOD_NEW_JUMP_OFFSET_MASK);
if (offs == fifo_ctrl->get_pos())
{
//Jump to self. Often preceded by NOP
if (performance_counters.state == FIFO::state::running)
{
performance_counters.FIFO_idle_timestamp = get_system_time();
sync_point_request.release(true);
}
performance_counters.state = FIFO::state::spinning;
}
else
{
last_known_code_start = offs;
}
//rsx_log.warning("rsx jump(0x%x) #addr=0x%x, cmd=0x%x, get=0x%x, put=0x%x", offs, m_ioAddress + get, cmd, get, put);
fifo_ctrl->set_get(offs, cmd);
return;
}
if ((cmd & RSX_METHOD_CALL_CMD_MASK) == RSX_METHOD_CALL_CMD)
{
if (fifo_ret_addr != RSX_CALL_STACK_EMPTY)
{
// Only one layer is allowed in the call stack.
rsx_log.error("FIFO: CALL found inside a subroutine (last cmd = 0x%x)", get_fifo_cmd());
recover_fifo();
return;
}
const u32 offs = cmd & RSX_METHOD_CALL_OFFSET_MASK;
fifo_ret_addr = fifo_ctrl->get_pos() + 4;
fifo_ctrl->set_get(offs);
last_known_code_start = offs;
return;
}
if ((cmd & RSX_METHOD_RETURN_MASK) == RSX_METHOD_RETURN_CMD)
{
if (fifo_ret_addr == RSX_CALL_STACK_EMPTY)
{
rsx_log.error("FIFO: RET found without corresponding CALL (last cmd = 0x%x)", get_fifo_cmd());
recover_fifo();
return;
}
// Optimize returning to another CALL
if ((ctrl->put & ~3) != fifo_ret_addr)
{
if (u32 addr = iomap_table.get_addr(fifo_ret_addr); addr != umax)
{
const u32 cmd0 = vm::read32(addr);
// Check for missing step flags, in case the user is single-stepping in the debugger
if ((cmd0 & RSX_METHOD_CALL_CMD_MASK) == RSX_METHOD_CALL_CMD && cpu_flag::dbg_step - state)
{
fifo_ctrl->set_get(cmd0 & RSX_METHOD_CALL_OFFSET_MASK);
last_known_code_start = ctrl->get;
fifo_ret_addr += 4;
return;
}
}
}
fifo_ctrl->set_get(std::exchange(fifo_ret_addr, RSX_CALL_STACK_EMPTY));
last_known_code_start = ctrl->get;
return;
}
// If we reached here, this is likely an error
fmt::throw_exception("Unexpected command 0x%x (last cmd: 0x%x)", cmd, fifo_ctrl->last_cmd());
}
if (const auto state = performance_counters.state;
state != FIFO::state::running)
{
performance_counters.state = FIFO::state::running;
// Hack: Delay FIFO wake-up according to setting
// NOTE: The typical spin setup is a NOP followed by a jump-to-self
// NOTE: There is a small delay when the jump address is dynamically edited by cell
if (state != FIFO::state::nop)
{
fifo_wake_delay();
}
// Update performance counters with time spent in idle mode
performance_counters.idle_time += (get_system_time() - performance_counters.FIFO_idle_timestamp);
}
do
{
if (capture_current_frame) [[unlikely]]
{
const u32 reg = (command.reg & 0xfffc) >> 2;
const u32 value = command.value;
frame_debug.command_queue.emplace_back(reg, value);
if (!(reg == NV406E_SET_REFERENCE || reg == NV406E_SEMAPHORE_RELEASE || reg == NV406E_SEMAPHORE_ACQUIRE))
{
// todo: handle nv406e methods better?, do we care about call/jumps?
rsx::frame_capture_data::replay_command replay_cmd;
replay_cmd.rsx_command = std::make_pair((reg << 2) | (1u << 18), value);
auto& commands = frame_capture.replay_commands;
commands.push_back(replay_cmd);
switch (reg)
{
case NV3089_IMAGE_IN:
capture::capture_image_in(this, commands.back());
break;
case NV0039_BUFFER_NOTIFY:
capture::capture_buffer_notify(this, commands.back());
break;
default:
{
static constexpr std::array<std::pair<u32, u32>, 3> ranges
{{
{NV308A_COLOR, 0x700},
{NV4097_SET_TRANSFORM_PROGRAM, 32},
{NV4097_SET_TRANSFORM_CONSTANT, 32}
}};
// Use legacy logic - enqueue leading command with count
// Then enqueue each command arg alone with a no-op command
for (const auto& range : ranges)
{
if (reg >= range.first && reg < range.first + range.second)
{
const u32 remaining = std::min<u32>(fifo_ctrl->get_remaining_args_count() + 1,
(fifo_ctrl->last_cmd() & RSX_METHOD_NON_INCREMENT_CMD_MASK) ? -1 : (range.first + range.second) - reg);
commands.back().rsx_command.first = (fifo_ctrl->last_cmd() & RSX_METHOD_NON_INCREMENT_CMD_MASK) | (reg << 2) | (remaining << 18);
for (u32 i = 1; i < remaining && fifo_ctrl->get_pos() + i * 4 != (ctrl->put & ~3); i++)
{
replay_cmd.rsx_command = std::make_pair(0, vm::read32(iomap_table.get_addr(fifo_ctrl->get_pos()) + (i * 4)));
commands.push_back(replay_cmd);
}
break;
}
}
break;
}
}
}
}
if (m_flattener.is_enabled()) [[unlikely]]
{
switch(m_flattener.test(command))
{
case FIFO::NOTHING:
{
break;
}
case FIFO::EMIT_END:
{
// Emit end command to close existing scope
AUDIT(in_begin_end);
methods[NV4097_SET_BEGIN_END](m_ctx, NV4097_SET_BEGIN_END, 0);
break;
}
case FIFO::EMIT_BARRIER:
{
AUDIT(in_begin_end);
methods[NV4097_SET_BEGIN_END](m_ctx, NV4097_SET_BEGIN_END, 0);
methods[NV4097_SET_BEGIN_END](m_ctx, NV4097_SET_BEGIN_END, m_flattener.get_primitive());
break;
}
default:
{
fmt::throw_exception("Unreachable");
}
}
if (command.reg == FIFO::FIFO_DISABLED_COMMAND)
{
// Optimized away
continue;
}
}
const u32 reg = (command.reg & 0xffff) >> 2;
const u32 value = command.value;
m_ctx->register_state->decode(reg, value);
if (auto method = methods[reg])
{
method(m_ctx, reg, value);
if (state & cpu_flag::again)
{
m_ctx->register_state->decode(reg, m_ctx->register_state->latch);
break;
}
}
else if (m_ctx->register_state->latch != value)
{
// Something changed, set signal flags if any specified
m_graphics_state |= state_signals[reg];
}
}
while (fifo_ctrl->read_unsafe(command));
fifo_ctrl->sync_get();
}
}
| 21,076
|
C++
|
.cpp
| 755
| 22.744371
| 144
| 0.60813
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,384
|
rsx_vertex_data.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/rsx_vertex_data.cpp
|
#include "stdafx.h"
#include "rsx_vertex_data.h"
#include "rsx_methods.h"
namespace rsx
{
void push_buffer_vertex_info::clear()
{
if (size)
{
data.clear();
vertex_count = 0;
dword_count = 0;
size = 0;
}
}
u8 push_buffer_vertex_info::get_vertex_size_in_dwords() const
{
// NOTE: Types are always provided to fit into 32-bits
// i.e no less than 4 8-bit values and no less than 2 16-bit values
switch (type)
{
case vertex_base_type::f:
return size;
case vertex_base_type::ub:
case vertex_base_type::ub256:
return 1;
case vertex_base_type::s1:
case vertex_base_type::s32k:
return size / 2;
default:
fmt::throw_exception("Unsupported vertex base type %d", static_cast<u8>(type));
}
}
u32 push_buffer_vertex_info::get_vertex_id() const
{
ensure(attr == 0); // Only ask ATTR0 for vertex ID
// Which is the current vertex ID to be written to?
// NOTE: Fully writing to ATTR0 closes the current block
return size ? (dword_count / get_vertex_size_in_dwords()) : 0;
}
void push_buffer_vertex_info::set_vertex_data(u32 attribute_id, u32 vertex_id, u32 sub_index, vertex_base_type type, u32 size, u32 arg)
{
if (vertex_count && (type != this->type || size != this->size))
{
// TODO: Should forcefully break the draw call on this step using an execution barrier.
// While RSX can handle this behavior without problem, it can only be the product of nonsensical game design.
rsx_log.error("Vertex attribute %u was respecced mid-draw (type = %d vs %d, size = %u vs %u). Indexed execution barrier required. Report this to developers.",
attribute_id, static_cast<int>(type), static_cast<int>(this->type), size, this->size);
}
this->type = type;
this->size = size;
this->attr = attribute_id;
const auto required_vertex_count = (vertex_id + 1);
const auto vertex_size = get_vertex_size_in_dwords();
if (vertex_count != required_vertex_count)
{
pad_to(required_vertex_count, true);
ensure(vertex_count == required_vertex_count);
}
auto current_vertex = data.data() + ((vertex_count - 1) * vertex_size);
current_vertex[sub_index] = arg;
++dword_count;
}
void push_buffer_vertex_info::pad_to(u32 required_vertex_count, bool skip_last)
{
if (vertex_count >= required_vertex_count)
{
return;
}
const auto vertex_size = get_vertex_size_in_dwords();
data.resize(vertex_size * required_vertex_count);
// For all previous verts, copy over the register contents duplicated over the stream.
// Internally it appears RSX actually executes the draw commands as they are encountered.
// You can change register data contents mid-way for example and it will pick up for the next N draws.
// This is how immediate mode is implemented internally.
u32* src = rsx::method_registers.register_vertex_info[attr].data.data();
u32* dst = data.data() + (vertex_count * vertex_size);
u32* end = data.data() + ((required_vertex_count - (skip_last ? 1 : 0)) * vertex_size);
while (dst < end)
{
std::memcpy(dst, src, vertex_size * sizeof(u32));
dst += vertex_size;
}
vertex_count = required_vertex_count;
}
}
| 3,141
|
C++
|
.cpp
| 86
| 33.453488
| 161
| 0.699013
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,385
|
RSXZCULL.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/RSXZCULL.cpp
|
#include "stdafx.h"
#include "Core/RSXEngLock.hpp"
#include "Core/RSXReservationLock.hpp"
#include "RSXThread.h"
namespace rsx
{
namespace reports
{
ZCULL_control::ZCULL_control()
{
for (auto& query : m_occlusion_query_data)
{
m_free_occlusion_pool.push(&query);
}
for (auto& stat : m_statistics_map)
{
stat.flags = stat.result = 0;
}
}
ZCULL_control::~ZCULL_control()
{
std::scoped_lock lock(m_pages_mutex);
for (auto& block : m_locked_pages)
{
for (auto& p : block)
{
if (p.second.prot != utils::protection::rw)
{
utils::memory_protect(vm::base(p.first), utils::c_page_size, utils::protection::rw);
}
}
block.clear();
}
}
void ZCULL_control::set_active(class ::rsx::thread* ptimer, bool state, bool flush_queue)
{
if (state != host_queries_active)
{
host_queries_active = state;
if (state)
{
ensure(unit_enabled && m_current_task == nullptr);
allocate_new_query(ptimer);
begin_occlusion_query(m_current_task);
}
else
{
ensure(m_current_task);
if (m_current_task->num_draws)
{
end_occlusion_query(m_current_task);
m_current_task->active = false;
m_current_task->pending = true;
m_current_task->sync_tag = m_timer++;
m_current_task->timestamp = m_tsc;
m_pending_writes.push_back({});
m_pending_writes.back().query = m_current_task;
ptimer->async_tasks_pending++;
}
else
{
discard_occlusion_query(m_current_task);
free_query(m_current_task);
m_current_task->active = false;
}
m_current_task = nullptr;
update(ptimer, 0u, flush_queue);
}
}
}
void ZCULL_control::check_state(class ::rsx::thread* ptimer, bool flush_queue)
{
// NOTE: Only enable host queries if pixel count is active to save on resources
// Can optionally be enabled for either stats enabled or zpass enabled for accuracy
const bool data_stream_available = zpass_count_enabled; // write_enabled && (zpass_count_enabled || stats_enabled);
if (host_queries_active && !data_stream_available)
{
// Stop
set_active(ptimer, false, flush_queue);
}
else if (!host_queries_active && data_stream_available && unit_enabled)
{
// Start
set_active(ptimer, true, flush_queue);
}
}
void ZCULL_control::set_enabled(class ::rsx::thread* ptimer, bool state, bool flush_queue)
{
if (state != unit_enabled)
{
unit_enabled = state;
check_state(ptimer, flush_queue);
}
}
void ZCULL_control::set_status(class ::rsx::thread* ptimer, bool surface_active, bool zpass_active, bool zcull_stats_active, bool flush_queue)
{
write_enabled = surface_active;
zpass_count_enabled = zpass_active;
stats_enabled = zcull_stats_active;
check_state(ptimer, flush_queue);
// Disabled since only ZPASS is implemented right now
if (false) //(m_current_task && m_current_task->active)
{
// Data check
u32 expected_type = 0;
if (zpass_active) expected_type |= CELL_GCM_ZPASS_PIXEL_CNT;
if (zcull_stats_active) expected_type |= CELL_GCM_ZCULL_STATS;
if (m_current_task->data_type != expected_type) [[unlikely]]
{
rsx_log.error("ZCULL queue interrupted by data type change!");
// Stop+start the current setup
set_active(ptimer, false, false);
set_active(ptimer, true, false);
}
}
}
void ZCULL_control::read_report(::rsx::thread* ptimer, vm::addr_t sink, u32 type)
{
if (m_current_task && type == CELL_GCM_ZPASS_PIXEL_CNT)
{
m_current_task->owned = true;
end_occlusion_query(m_current_task);
m_pending_writes.push_back({});
m_current_task->active = false;
m_current_task->pending = true;
m_current_task->timestamp = m_tsc;
m_current_task->sync_tag = m_timer++;
m_pending_writes.back().query = m_current_task;
allocate_new_query(ptimer);
begin_occlusion_query(m_current_task);
}
else
{
// Spam; send null query down the pipeline to copy the last result
// Might be used to capture a timestamp (verify)
if (m_pending_writes.empty())
{
// No need to queue this if there is no pending request in the pipeline anyway
write(sink, ptimer->timestamp(), type, m_statistics_map[m_statistics_tag_id].result);
return;
}
m_pending_writes.push_back({});
}
auto forwarder = &m_pending_writes.back();
m_statistics_map[m_statistics_tag_id].flags |= 1;
for (auto It = m_pending_writes.rbegin(); It != m_pending_writes.rend(); It++)
{
if (!It->sink)
{
It->counter_tag = m_statistics_tag_id;
It->sink = sink;
It->type = type;
if (forwarder != &(*It))
{
// Not the last one in the chain, forward the writing operation to the last writer
// Usually comes from truncated queries caused by disabling the testing
ensure(It->query);
It->forwarder = forwarder;
It->query->owned = true;
}
continue;
}
break;
}
on_report_enqueued(sink);
ptimer->async_tasks_pending++;
if (m_statistics_map[m_statistics_tag_id].result != 0)
{
// Flush guaranteed results; only one positive is needed
update(ptimer);
}
}
void ZCULL_control::allocate_new_query(::rsx::thread* ptimer)
{
int retries = 0;
while (true)
{
if (!m_free_occlusion_pool.empty())
{
m_current_task = m_free_occlusion_pool.top();
m_free_occlusion_pool.pop();
m_current_task->data_type = 0;
m_current_task->num_draws = 0;
m_current_task->result = 0;
m_current_task->active = true;
m_current_task->owned = false;
m_current_task->sync_tag = 0;
m_current_task->timestamp = 0;
// Flags determine what kind of payload is carried by queries in the 'report'
if (zpass_count_enabled) m_current_task->data_type |= CELL_GCM_ZPASS_PIXEL_CNT;
if (stats_enabled) m_current_task->data_type |= CELL_GCM_ZCULL_STATS;
return;
}
if (retries > 0)
{
fmt::throw_exception("Allocation failed!");
}
// All slots are occupied, try to pop the earliest entry
if (!m_pending_writes.front().query)
{
// If this happens, the assert above will fire. There should never be a queue header with no work to be done
rsx_log.error("Close to our death.");
}
m_next_tsc = 0;
update(ptimer, m_pending_writes.front().sink);
retries++;
}
}
void ZCULL_control::free_query(occlusion_query_info* query)
{
query->pending = false;
m_free_occlusion_pool.push(query);
}
void ZCULL_control::clear(class ::rsx::thread* ptimer, u32 type)
{
if (!(type & CELL_GCM_ZPASS_PIXEL_CNT))
{
// Other types do not generate queries at the moment
return;
}
if (!m_pending_writes.empty())
{
//Remove any dangling/unclaimed queries as the information is lost anyway
auto valid_size = m_pending_writes.size();
for (auto It = m_pending_writes.rbegin(); It != m_pending_writes.rend(); ++It)
{
if (!It->sink)
{
discard_occlusion_query(It->query);
free_query(It->query);
valid_size--;
ptimer->async_tasks_pending--;
continue;
}
break;
}
m_pending_writes.resize(valid_size);
}
if (m_pending_writes.empty())
{
// Clear can be invoked from flip as a workaround to prevent query leakage.
m_statistics_map[m_statistics_tag_id].flags = 0;
}
if (m_statistics_map[m_statistics_tag_id].flags)
{
// Move to the next slot if this one is still in use.
m_statistics_tag_id = (m_statistics_tag_id + 1) % max_stat_registers;
}
auto& current_stats = m_statistics_map[m_statistics_tag_id];
if (current_stats.flags != 0)
{
// This shouldn't happen
rsx_log.error("Allocating a new ZCULL statistics slot %u overwrites previous data.", m_statistics_tag_id);
}
// Clear value before use
current_stats.result = 0;
}
void ZCULL_control::on_draw()
{
if (m_current_task)
{
m_current_task->num_draws++;
m_current_task->sync_tag = m_timer++;
}
}
void ZCULL_control::on_sync_hint(sync_hint_payload_t payload)
{
m_sync_tag = std::max(m_sync_tag, payload.query->sync_tag);
}
void ZCULL_control::write(vm::addr_t sink, u64 timestamp, u32 type, u32 value)
{
ensure(sink);
auto scale_result = [](u32 value)
{
const auto scale = rsx::get_resolution_scale_percent();
const auto result = (value * 10000ull) / (scale * scale);
return std::max(1u, static_cast<u32>(result));
};
switch (type)
{
case CELL_GCM_ZPASS_PIXEL_CNT:
if (value)
{
value = (g_cfg.video.precise_zpass_count) ?
scale_result(value) :
u16{ umax };
}
break;
case CELL_GCM_ZCULL_STATS3:
value = (value || !write_enabled || !stats_enabled) ? 0 : u16{ umax };
break;
case CELL_GCM_ZCULL_STATS2:
case CELL_GCM_ZCULL_STATS1:
case CELL_GCM_ZCULL_STATS:
default:
// Not implemented
value = (write_enabled && stats_enabled) ? -1 : 0;
break;
}
rsx::reservation_lock<true> lock(sink, 16);
auto report = vm::get_super_ptr<atomic_t<CellGcmReportData>>(sink);
report->store({timestamp, value, 0});
}
void ZCULL_control::write(queued_report_write* writer, u64 timestamp, u32 value)
{
write(writer->sink, timestamp, writer->type, value);
on_report_completed(writer->sink);
for (auto& addr : writer->sink_alias)
{
write(addr, timestamp, writer->type, value);
}
}
void ZCULL_control::retire(::rsx::thread* ptimer, queued_report_write* writer, u32 result)
{
if (!writer->forwarder)
{
// No other queries in the chain, write result
const auto value = (writer->type == CELL_GCM_ZPASS_PIXEL_CNT) ? m_statistics_map[writer->counter_tag].result : result;
write(writer, ptimer->timestamp(), value);
}
if (writer->query && writer->query->sync_tag == ptimer->cond_render_ctrl.eval_sync_tag)
{
bool eval_failed;
if (!writer->forwarder) [[likely]]
{
// Normal evaluation
eval_failed = (result == 0u);
}
else
{
// Eval was inserted while ZCULL was active but not enqueued to write to memory yet
// write(addr) -> enable_zpass_stats -> eval_condition -> write(addr)
// In this case, use what already exists in memory, not the current counter
eval_failed = (vm::_ref<CellGcmReportData>(writer->sink).value == 0u);
}
ptimer->cond_render_ctrl.set_eval_result(ptimer, eval_failed);
}
}
void ZCULL_control::sync(::rsx::thread* ptimer)
{
if (m_pending_writes.empty())
{
// Nothing to do
return;
}
if (!m_critical_reports_in_flight)
{
// Valid call, but nothing important queued up
return;
}
if (g_cfg.video.relaxed_zcull_sync)
{
update(ptimer, 0, true);
return;
}
// Quick reverse scan to push commands ahead of time
for (auto It = m_pending_writes.rbegin(); It != m_pending_writes.rend(); ++It)
{
if (It->sink && It->query && It->query->num_draws)
{
if (It->query->sync_tag > m_sync_tag)
{
// rsx_log.trace("[Performance warning] Query hint emit during sync command.");
ptimer->sync_hint(FIFO::interrupt_hint::zcull_sync, { .query = It->query });
}
break;
}
}
u32 processed = 0;
const bool has_unclaimed = (m_pending_writes.back().sink == 0);
// Write all claimed reports unconditionally
for (auto& writer : m_pending_writes)
{
if (!writer.sink)
break;
auto query = writer.query;
auto& counter = m_statistics_map[writer.counter_tag];
if (query)
{
ensure(query->pending);
const bool implemented = (writer.type == CELL_GCM_ZPASS_PIXEL_CNT || writer.type == CELL_GCM_ZCULL_STATS3);
const bool have_result = counter.result && !g_cfg.video.precise_zpass_count;
if (implemented && !have_result && query->num_draws)
{
get_occlusion_query_result(query);
counter.result += query->result;
}
else
{
// Already have a hit, no need to retest
discard_occlusion_query(query);
}
free_query(query);
}
retire(ptimer, &writer, counter.result);
processed++;
}
if (!has_unclaimed)
{
ensure(processed == m_pending_writes.size());
m_pending_writes.clear();
}
else
{
auto remaining = m_pending_writes.size() - processed;
ensure(remaining > 0);
if (remaining == 1)
{
m_pending_writes[0] = std::move(m_pending_writes.back());
m_pending_writes.resize(1);
}
else
{
std::move(m_pending_writes.begin() + processed, m_pending_writes.end(), m_pending_writes.begin());
m_pending_writes.resize(remaining);
}
}
// Delete all statistics caches but leave the current one
const u32 current_index = m_statistics_tag_id;
const u32 previous_index = (current_index + max_stat_registers - 1) % max_stat_registers;
for (u32 index = previous_index; index != current_index;)
{
if (m_statistics_map[index].flags == 0)
{
break;
}
m_statistics_map[index].flags = 0;
index = (index + max_stat_registers - 1) % max_stat_registers;
}
//Decrement jobs counter
ptimer->async_tasks_pending -= processed;
}
void ZCULL_control::update(::rsx::thread* ptimer, u32 sync_address, bool hint)
{
if (m_pending_writes.empty())
{
return;
}
const auto& front = m_pending_writes.front();
if (!front.sink)
{
// No writables in queue, abort
return;
}
if (!sync_address)
{
if (hint || ptimer->async_tasks_pending + 0u >= max_safe_queue_depth)
{
// Prepare the whole queue for reading. This happens when zcull activity is disabled or queue is too long
for (auto It = m_pending_writes.rbegin(); It != m_pending_writes.rend(); ++It)
{
if (It->query)
{
if (It->query->num_draws && It->query->sync_tag > m_sync_tag)
{
ptimer->sync_hint(FIFO::interrupt_hint::zcull_sync, { .query = It->query });
ensure(It->query->sync_tag <= m_sync_tag);
}
break;
}
}
}
if (m_tsc = get_system_time(); m_tsc < m_next_tsc)
{
return;
}
else
{
// Schedule ahead
m_next_tsc = m_tsc + min_zcull_tick_us;
// Schedule a queue flush if needed
if (!g_cfg.video.relaxed_zcull_sync && m_critical_reports_in_flight &&
front.query && front.query->num_draws && front.query->sync_tag > m_sync_tag)
{
const auto elapsed = m_tsc - front.query->timestamp;
if (elapsed > max_zcull_delay_us)
{
ptimer->sync_hint(FIFO::interrupt_hint::zcull_sync, { .query = front.query });
ensure(front.query->sync_tag <= m_sync_tag);
}
return;
}
}
}
u32 processed = 0;
for (auto& writer : m_pending_writes)
{
if (!writer.sink)
break;
auto query = writer.query;
auto& counter = m_statistics_map[writer.counter_tag];
const bool force_read = (sync_address != 0);
if (force_read && writer.sink == sync_address && !writer.forwarder)
{
// Forced reads end here
sync_address = 0;
}
if (query)
{
ensure(query->pending);
const bool implemented = (writer.type == CELL_GCM_ZPASS_PIXEL_CNT || writer.type == CELL_GCM_ZCULL_STATS3);
const bool have_result = counter.result && !g_cfg.video.precise_zpass_count;
if (!implemented || !query->num_draws || have_result)
{
discard_occlusion_query(query);
}
else if (force_read || check_occlusion_query_status(query))
{
get_occlusion_query_result(query);
counter.result += query->result;
}
else
{
// Too early; abort
ensure(!force_read && implemented);
break;
}
free_query(query);
}
// Release the stat tag for this object. Slots are all or nothing.
m_statistics_map[writer.counter_tag].flags = 0;
retire(ptimer, &writer, counter.result);
processed++;
}
if (processed)
{
auto remaining = m_pending_writes.size() - processed;
if (remaining == 1)
{
m_pending_writes[0] = std::move(m_pending_writes.back());
m_pending_writes.resize(1);
}
else if (remaining)
{
std::move(m_pending_writes.begin() + processed, m_pending_writes.end(), m_pending_writes.begin());
m_pending_writes.resize(remaining);
}
else
{
m_pending_writes.clear();
}
ptimer->async_tasks_pending -= processed;
}
}
flags32_t ZCULL_control::read_barrier(::rsx::thread* ptimer, u32 memory_address, u32 memory_range, flags32_t flags)
{
if (m_pending_writes.empty())
return result_none;
const auto memory_end = memory_address + memory_range;
AUDIT(memory_end >= memory_address);
u32 sync_address = 0;
occlusion_query_info* query = nullptr;
for (auto It = m_pending_writes.crbegin(); It != m_pending_writes.crend(); ++It)
{
if (sync_address)
{
if (It->query)
{
sync_address = It->sink;
query = It->query;
break;
}
continue;
}
if (It->sink >= memory_address && It->sink < memory_end)
{
sync_address = It->sink;
// NOTE: If application is spamming requests, there may be no query attached
if (It->query)
{
query = It->query;
break;
}
}
}
if (!sync_address || !query)
{
return result_none;
}
// Disable optimizations across the accessed range if reports reside within
{
std::scoped_lock lock(m_pages_mutex);
const auto location1 = rsx::classify_location(memory_address);
const auto location2 = rsx::classify_location(memory_end - 1);
if (!m_pages_accessed[location1])
{
disable_optimizations(ptimer, location1);
}
if (!m_pages_accessed[location2])
{
disable_optimizations(ptimer, location2);
}
}
if (!(flags & sync_defer_copy))
{
if (!(flags & sync_no_notify))
{
if (query->sync_tag > m_sync_tag) [[unlikely]]
{
ptimer->sync_hint(FIFO::interrupt_hint::zcull_sync, { .query = query });
ensure(m_sync_tag >= query->sync_tag);
}
}
// There can be multiple queries all writing to the same address, loop to flush all of them
while (query->pending)
{
update(ptimer, sync_address);
}
return result_none;
}
return result_zcull_intr;
}
flags32_t ZCULL_control::read_barrier(class ::rsx::thread* ptimer, u32 memory_address, occlusion_query_info* query)
{
// Called by cond render control. Internal RSX usage, do not disable optimizations
while (query->pending)
{
update(ptimer, memory_address);
}
return result_none;
}
query_search_result ZCULL_control::find_query(vm::addr_t sink_address, bool all)
{
query_search_result result{};
u32 stat_id = 0;
for (auto It = m_pending_writes.crbegin(); It != m_pending_writes.crend(); ++It)
{
if (stat_id) [[unlikely]]
{
if (It->counter_tag != stat_id)
{
if (result.found)
{
// Some result was found, return it instead
break;
}
// Zcull stats were cleared between this query and the required stats, result can only be 0
return { true, 0, {} };
}
if (It->query && It->query->num_draws)
{
result.found = true;
result.queries.push_back(It->query);
if (!all)
{
break;
}
}
}
else if (It->sink == sink_address)
{
if (It->query && It->query->num_draws)
{
result.found = true;
result.queries.push_back(It->query);
if (!all)
{
break;
}
}
stat_id = It->counter_tag;
}
}
return result;
}
u32 ZCULL_control::copy_reports_to(u32 start, u32 range, u32 dest)
{
u32 bytes_to_write = 0;
const auto memory_range = utils::address_range::start_length(start, range);
for (auto& writer : m_pending_writes)
{
if (!writer.sink)
break;
if (!writer.forwarder && memory_range.overlaps(writer.sink))
{
u32 address = (writer.sink - start) + dest;
writer.sink_alias.push_back(vm::cast(address));
}
}
return bytes_to_write;
}
void ZCULL_control::on_report_enqueued(vm::addr_t address)
{
const auto location = rsx::classify_location(address);
std::scoped_lock lock(m_pages_mutex);
if (!m_pages_accessed[location]) [[ likely ]]
{
const auto page_address = utils::page_start(static_cast<u32>(address));
auto& page = m_locked_pages[location][page_address];
page.add_ref();
if (page.prot == utils::protection::rw)
{
utils::memory_protect(vm::base(page_address), utils::c_page_size, utils::protection::no);
page.prot = utils::protection::no;
}
}
else
{
m_critical_reports_in_flight++;
}
}
void ZCULL_control::on_report_completed(vm::addr_t address)
{
const auto location = rsx::classify_location(address);
if (!m_pages_accessed[location])
{
const auto page_address = utils::page_start(static_cast<u32>(address));
std::scoped_lock lock(m_pages_mutex);
if (auto found = m_locked_pages[location].find(page_address);
found != m_locked_pages[location].end())
{
auto& page = found->second;
ensure(page.has_refs());
page.release();
}
}
if (m_pages_accessed[location])
{
m_critical_reports_in_flight--;
}
}
void ZCULL_control::disable_optimizations(::rsx::thread*, u32 location)
{
// Externally synchronized
rsx_log.warning("Reports area at location %s was accessed. ZCULL optimizations will be disabled.", location_tostring(location));
m_pages_accessed[location] = true;
// Unlock pages
for (auto& p : m_locked_pages[location])
{
const auto this_address = p.first;
auto& page = p.second;
if (page.prot != utils::protection::rw)
{
utils::memory_protect(vm::base(this_address), utils::c_page_size, utils::protection::rw);
page.prot = utils::protection::rw;
}
while (page.has_refs())
{
m_critical_reports_in_flight++;
page.release();
}
}
m_locked_pages[location].clear();
}
bool ZCULL_control::on_access_violation(u32 address)
{
const auto page_address = utils::page_start(address);
const auto location = rsx::classify_location(address);
if (m_pages_accessed[location])
{
// Already faulted, no locks possible
return false;
}
bool need_disable_optimizations = false;
{
reader_lock lock(m_pages_mutex);
if (auto found = m_locked_pages[location].find(page_address);
found != m_locked_pages[location].end())
{
lock.upgrade();
auto& fault_page = m_locked_pages[location][page_address];
if (fault_page.prot != utils::protection::rw)
{
if (fault_page.has_refs())
{
// R/W to active block
need_disable_optimizations = true; // Defer actual operation
m_pages_accessed[location] = true;
}
else
{
// R/W to stale block, unload it and move on
utils::memory_protect(vm::base(page_address), utils::c_page_size, utils::protection::rw);
m_locked_pages[location].erase(page_address);
return true;
}
}
}
}
// Deadlock avoidance, do not pause RSX FIFO eng while holding the pages lock
if (need_disable_optimizations)
{
auto thr = rsx::get_current_renderer();
rsx::eng_lock rlock(thr);
std::scoped_lock lock(m_pages_mutex);
disable_optimizations(thr, location);
thr->m_eng_interrupt_mask |= rsx::pipe_flush_interrupt;
return true;
}
return false;
}
// Conditional rendering helpers
void conditional_render_eval::reset()
{
eval_address = 0;
eval_sync_tag = 0;
eval_sources.clear();
eval_failed = false;
}
bool conditional_render_eval::disable_rendering() const
{
return (enabled && eval_failed);
}
bool conditional_render_eval::eval_pending() const
{
return (enabled && eval_address);
}
void conditional_render_eval::enable_conditional_render(::rsx::thread* pthr, u32 address)
{
if (hw_cond_active)
{
ensure(enabled);
pthr->end_conditional_rendering();
}
reset();
enabled = true;
eval_address = address;
}
void conditional_render_eval::disable_conditional_render(::rsx::thread* pthr)
{
if (hw_cond_active)
{
ensure(enabled);
pthr->end_conditional_rendering();
}
reset();
enabled = false;
}
void conditional_render_eval::set_eval_sources(std::vector<occlusion_query_info*>& sources)
{
eval_sources = std::move(sources);
eval_sync_tag = eval_sources.front()->sync_tag;
}
void conditional_render_eval::set_eval_result(::rsx::thread* pthr, bool failed)
{
if (hw_cond_active)
{
ensure(enabled);
pthr->end_conditional_rendering();
}
reset();
eval_failed = failed;
}
void conditional_render_eval::eval_result(::rsx::thread* pthr)
{
vm::ptr<CellGcmReportData> result = vm::cast(eval_address);
const bool failed = (result->value == 0u);
set_eval_result(pthr, failed);
}
}
}
| 26,182
|
C++
|
.cpp
| 851
| 24.549941
| 145
| 0.615695
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,386
|
RSXDisAsm.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/RSXDisAsm.cpp
|
#include "stdafx.h"
#include "RSXDisAsm.h"
#include "RSXThread.h"
#include "gcm_enums.h"
#include "gcm_printing.h"
#include "rsx_methods.h"
namespace rsx
{
void invalid_method(context*, u32, u32);
}
u32 RSXDisAsm::disasm(u32 pc)
{
last_opcode.clear();
auto try_read_op = [this](u32 pc) -> bool
{
if (pc < m_start_pc)
{
return false;
}
if (m_offset == vm::g_sudo_addr)
{
// Translation needed
pc = static_cast<const rsx::thread*>(m_cpu)->iomap_table.get_addr(pc);
if (pc == umax) return false;
}
m_op = *reinterpret_cast<const atomic_be_t<u32>*>(m_offset + pc);
return true;
};
if (!try_read_op(pc))
{
return 0;
}
dump_pc = pc;
if (m_op & RSX_METHOD_NON_METHOD_CMD_MASK)
{
if (m_mode == cpu_disasm_mode::survey_cmd_size)
{
return 4;
}
if ((m_op & RSX_METHOD_OLD_JUMP_CMD_MASK) == RSX_METHOD_OLD_JUMP_CMD)
{
u32 jumpAddr = m_op & RSX_METHOD_OLD_JUMP_OFFSET_MASK;
Write(fmt::format("jump 0x%07x", jumpAddr), -1);
}
else if ((m_op & RSX_METHOD_NEW_JUMP_CMD_MASK) == RSX_METHOD_NEW_JUMP_CMD)
{
u32 jumpAddr = m_op & RSX_METHOD_NEW_JUMP_OFFSET_MASK;
Write(fmt::format("jump 0x%07x", jumpAddr), -1);
}
else if ((m_op & RSX_METHOD_CALL_CMD_MASK) == RSX_METHOD_CALL_CMD)
{
u32 callAddr = m_op & RSX_METHOD_CALL_OFFSET_MASK;
Write(fmt::format("call 0x%07x", callAddr), -1);
}
else if ((m_op & RSX_METHOD_RETURN_MASK) == RSX_METHOD_RETURN_CMD)
{
Write("ret", -1);
}
else
{
Write(fmt::format("?? ?? (0x%x)", m_op), -1);
}
return 4;
}
else if ((m_op & RSX_METHOD_NOP_MASK) == RSX_METHOD_NOP_CMD)
{
u32 i = 1;
for (pc += 4; m_mode != cpu_disasm_mode::list && pc % (4096 * 4); i++, pc += 4)
{
if (!try_read_op(pc))
{
break;
}
if ((m_op & RSX_METHOD_NOP_MASK) != RSX_METHOD_NOP_CMD)
{
break;
}
}
if (m_mode != cpu_disasm_mode::survey_cmd_size)
{
if (i == 1)
Write("nop", 0);
else
Write(fmt::format("nop x%u", i), 0);
}
return i * 4;
}
else
{
const u32 count = (m_op & RSX_METHOD_COUNT_MASK) >> RSX_METHOD_COUNT_SHIFT;
const bool non_inc = (m_op & RSX_METHOD_NON_INCREMENT_CMD_MASK) == RSX_METHOD_NON_INCREMENT_CMD && count > 1;
const u32 id_start = (m_op & 0x3ffff) >> 2;
if (count > 10 && id_start == NV4097_SET_OBJECT)
{
// Hack: 0 method with large count is unlikely to be a command
// But is very common in floating point args, messing up debugger's code-flow
Write(fmt::format("?? ?? (0x%x)", m_op), -1);
return 4;
}
pc += 4;
std::string str;
for (u32 i = 0; i < count; i++, pc += 4)
{
if (!try_read_op(pc))
{
last_opcode.clear();
Write(fmt::format("?? ?? (0x%08x:unmapped)", m_op), -1);
return 4;
}
const u32 id = id_start + (non_inc ? 0 : i);
if (rsx::methods[id] == &rsx::invalid_method)
{
last_opcode.clear();
Write(fmt::format("?? ?? (0x%08x:method)", m_op), -1);
return 4;
}
if (m_mode == cpu_disasm_mode::survey_cmd_size)
{
continue;
}
if (m_mode != cpu_disasm_mode::list && !last_opcode.empty())
{
continue;
}
str.clear();
rsx::get_pretty_printing_function(id)(str, id, m_op);
Write(str, m_mode == cpu_disasm_mode::list ? i : count, non_inc, id);
}
return (count + 1) * 4;
}
}
std::pair<const void*, usz> RSXDisAsm::get_memory_span() const
{
return {m_offset + m_start_pc, (1ull << 32) - m_start_pc};
}
std::unique_ptr<CPUDisAsm> RSXDisAsm::copy_type_erased() const
{
return std::make_unique<RSXDisAsm>(*this);
}
void RSXDisAsm::Write(std::string_view str, s32 count, bool is_non_inc, u32 id)
{
switch (m_mode)
{
case cpu_disasm_mode::interpreter:
{
last_opcode.clear();
if (count == 1 && !is_non_inc)
{
fmt::append(last_opcode, "[%08x] ( )", dump_pc);
}
else if (count >= 0)
{
fmt::append(last_opcode, "[%08x] (%s%u)", dump_pc, is_non_inc ? "+" : "", count);
}
else
{
fmt::append(last_opcode, "[%08x] (x)", dump_pc);
}
auto& res = last_opcode;
res.resize(7 + 11, ' ');
res += str;
break;
}
case cpu_disasm_mode::list:
{
if (!last_opcode.empty())
last_opcode += '\n';
fmt::append(last_opcode, "[%04x] 0x%08x: %s", id, m_op, str);
break;
}
default:
break;
}
}
| 4,236
|
C++
|
.cpp
| 174
| 21.16092
| 111
| 0.60144
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,387
|
rsx_methods.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/rsx_methods.cpp
|
#include "stdafx.h"
#include "rsx_methods.h"
#include "RSXThread.h"
#include "rsx_utils.h"
#include "rsx_decode.h"
#include "Common/time.hpp"
#include "Emu/Cell/PPUCallback.h"
#include "Emu/Cell/lv2/sys_rsx.h"
#include "Emu/RSX/Common/BufferUtils.h"
#include "Emu/RSX/NV47/HW/nv47.h"
#include "Emu/RSX/NV47/HW/nv47_sync.hpp"
#include "Emu/RSX/NV47/HW/context_accessors.define.h" // TODO: Context objects belong in FW not HW
namespace rsx
{
rsx_state method_registers;
std::array<rsx_method_t, 0x10000 / 4> methods{};
std::array<u32, 0x10000 / 4> state_signals{};
void invalid_method(context* ctx, u32 reg, u32 arg)
{
//Don't throw, gather information and ignore broken/garbage commands
//TODO: Investigate why these commands are executed at all. (Heap corruption? Alignment padding?)
const u32 cmd = RSX(ctx)->get_fifo_cmd();
rsx_log.error("Invalid RSX method 0x%x (arg=0x%x, start=0x%x, count=0x%x, non-inc=%s)", reg << 2, arg,
cmd & 0xfffc, (cmd >> 18) & 0x7ff, !!(cmd & RSX_METHOD_NON_INCREMENT_CMD));
if (g_cfg.core.rsx_fifo_accuracy != rsx_fifo_mode::as_ps3)
{
RSX(ctx)->recover_fifo();
}
}
static void trace_method(context* /*ctx*/, u32 reg, u32 arg)
{
// For unknown yet valid methods
rsx_log.trace("RSX method 0x%x (arg=0x%x)", reg << 2, arg);
}
void flip_command(context* ctx, u32, u32 arg)
{
ensure(RSX(ctx)->isHLE);
if (RSX(ctx)->vblank_at_flip != umax)
{
RSX(ctx)->flip_notification_count++;
}
if (auto ptr = RSX(ctx)->queue_handler)
{
RSX(ctx)->intr_thread->cmd_list
({
{ ppu_cmd::set_args, 1 }, u64{1},
{ ppu_cmd::lle_call, ptr },
{ ppu_cmd::sleep, 0 }
});
RSX(ctx)->intr_thread->cmd_notify.store(1);
RSX(ctx)->intr_thread->cmd_notify.notify_one();
}
RSX(ctx)->reset();
RSX(ctx)->on_frame_end(arg);
RSX(ctx)->request_emu_flip(arg);
vm::_ref<atomic_t<u128>>(RSX(ctx)->label_addr + 0x10).store(u128{});
}
void user_command(context* ctx, u32, u32 arg)
{
if (!RSX(ctx)->isHLE)
{
sys_rsx_context_attribute(0x55555555, 0xFEF, 0, arg, 0, 0);
return;
}
if (auto ptr = RSX(ctx)->user_handler)
{
RSX(ctx)->intr_thread->cmd_list
({
{ ppu_cmd::set_args, 1 }, u64{arg},
{ ppu_cmd::lle_call, ptr },
{ ppu_cmd::sleep, 0 }
});
RSX(ctx)->intr_thread->cmd_notify.store(1);
RSX(ctx)->intr_thread->cmd_notify.notify_one();
}
}
namespace gcm
{
template<u32 index>
struct driver_flip
{
static void impl(context*, u32 /*reg*/, u32 arg)
{
sys_rsx_context_attribute(0x55555555, 0x102, index, arg, 0, 0);
}
};
template<u32 index>
struct queue_flip
{
static void impl(context* ctx, u32 /*reg*/, u32 arg)
{
if (RSX(ctx)->vblank_at_flip != umax)
{
RSX(ctx)->flip_notification_count++;
}
sys_rsx_context_attribute(0x55555555, 0x103, index, arg, 0, 0);
}
};
}
namespace fifo
{
void draw_barrier(context* ctx, u32, u32)
{
if (RSX(ctx)->in_begin_end)
{
if (!REGS(ctx)->current_draw_clause.is_disjoint_primitive)
{
// Enable primitive barrier request
REGS(ctx)->current_draw_clause.primitive_barrier_enable = true;
}
}
}
}
void rsx_state::init()
{
// Reset all regsiters
registers.fill(0);
state_signals.fill(0);
transform_program.fill(0);
transform_constants = {};
current_draw_clause = {};
register_vertex_info = {};
// Special values set at initialization, these are not set by a context reset
registers[NV4097_SET_SHADER_PROGRAM] = (0 << 2) | (CELL_GCM_LOCATION_LOCAL + 1);
for (u32 i = 0; i < 16; i++)
{
registers[NV4097_SET_TEXTURE_FORMAT + (i * 8)] = (1 << 16 /* mipmap */) | ((CELL_GCM_TEXTURE_R5G6B5 | CELL_GCM_TEXTURE_SZ | CELL_GCM_TEXTURE_NR) << 8) | (2 << 4 /* 2D */) | (CELL_GCM_LOCATION_LOCAL + 1);
}
for (u32 i = 0; i < 4; i++)
{
registers[NV4097_SET_VERTEX_TEXTURE_FORMAT + (i * 8)] = (1 << 16 /* mipmap */) | ((CELL_GCM_TEXTURE_X32_FLOAT | CELL_GCM_TEXTURE_LN | CELL_GCM_TEXTURE_NR) << 8) | (2 << 4 /* 2D */) | (CELL_GCM_LOCATION_LOCAL + 1);
}
registers[NV406E_SET_CONTEXT_DMA_SEMAPHORE] = CELL_GCM_CONTEXT_DMA_SEMAPHORE_R;
registers[NV4097_SET_CONTEXT_DMA_SEMAPHORE] = CELL_GCM_CONTEXT_DMA_SEMAPHORE_RW;
{
// Commands injected by cellGcmInit
registers[NV406E_SEMAPHORE_OFFSET] = 0x30;
registers[NV406E_SEMAPHORE_ACQUIRE] = 0x1;
registers[NV406E_SET_CONTEXT_DMA_SEMAPHORE] = 0x66616661;
registers[0x0] = 0x31337000;
registers[NV4097_SET_CONTEXT_DMA_NOTIFIES] = 0x66604200;
registers[NV4097_SET_CONTEXT_DMA_A] = 0xfeed0000;
registers[NV4097_SET_CONTEXT_DMA_B] = 0xfeed0001;
registers[NV4097_SET_CONTEXT_DMA_COLOR_B] = 0xfeed0000;
registers[NV4097_SET_CONTEXT_DMA_STATE] = 0x0;
registers[NV4097_SET_CONTEXT_DMA_COLOR_A] = 0xfeed0000;
registers[NV4097_SET_CONTEXT_DMA_ZETA] = 0xfeed0000;
registers[NV4097_SET_CONTEXT_DMA_VERTEX_A] = 0xfeed0000;
registers[NV4097_SET_CONTEXT_DMA_VERTEX_B] = 0xfeed0001;
registers[NV4097_SET_CONTEXT_DMA_SEMAPHORE] = 0x66606660;
registers[NV4097_SET_CONTEXT_DMA_REPORT] = 0x66626660;
registers[NV4097_SET_CONTEXT_DMA_CLIP_ID] = 0x0;
registers[NV4097_SET_CONTEXT_DMA_CULL_DATA] = 0x0;
registers[NV4097_SET_CONTEXT_DMA_COLOR_C] = 0xfeed0000;
registers[NV4097_SET_CONTEXT_DMA_COLOR_D] = 0xfeed0000;
registers[NV406E_SET_CONTEXT_DMA_SEMAPHORE] = 0x66616661;
registers[NV4097_SET_SURFACE_CLIP_HORIZONTAL] = 0x0;
registers[NV4097_SET_SURFACE_CLIP_VERTICAL] = 0x0;
registers[NV4097_SET_SURFACE_FORMAT] = 0x121;
registers[NV4097_SET_SURFACE_PITCH_A] = 0x40;
registers[NV4097_SET_SURFACE_COLOR_AOFFSET] = 0x0;
registers[NV4097_SET_SURFACE_ZETA_OFFSET] = 0x0;
registers[NV4097_SET_SURFACE_COLOR_BOFFSET] = 0x0;
registers[NV4097_SET_SURFACE_PITCH_B] = 0x40;
registers[NV4097_SET_SURFACE_COLOR_TARGET] = 0x1;
registers[0x224 / 4] = 0x80;
registers[0x228 / 4] = 0x100;
registers[NV4097_SET_SURFACE_PITCH_Z] = 0x40;
registers[0x230 / 4] = 0x0;
registers[NV4097_SET_SURFACE_PITCH_C] = 0x40;
registers[NV4097_SET_SURFACE_PITCH_D] = 0x40;
registers[NV4097_SET_SURFACE_COLOR_COFFSET] = 0x0;
registers[NV4097_SET_SURFACE_COLOR_DOFFSET] = 0x0;
registers[0x1d80 / 4] = 0x3;
registers[NV4097_SET_WINDOW_OFFSET] = 0x0;
registers[0x2bc / 4] = 0x0;
registers[0x2c0 / 4] = 0xfff0000;
registers[0x2c4 / 4] = 0xfff0000;
registers[0x2c8 / 4] = 0xfff0000;
registers[0x2cc / 4] = 0xfff0000;
registers[0x2d0 / 4] = 0xfff0000;
registers[0x2d4 / 4] = 0xfff0000;
registers[0x2d8 / 4] = 0xfff0000;
registers[0x2dc / 4] = 0xfff0000;
registers[0x2e0 / 4] = 0xfff0000;
registers[0x2e4 / 4] = 0xfff0000;
registers[0x2e8 / 4] = 0xfff0000;
registers[0x2ec / 4] = 0xfff0000;
registers[0x2f0 / 4] = 0xfff0000;
registers[0x2f4 / 4] = 0xfff0000;
registers[0x2f8 / 4] = 0xfff0000;
registers[0x2fc / 4] = 0xfff0000;
registers[0x1d98 / 4] = 0xfff0000;
registers[0x1d9c / 4] = 0xfff0000;
registers[0x1da4 / 4] = 0x0;
registers[NV4097_SET_CONTROL0] = 0x100000;
registers[0x1454 / 4] = 0x0;
registers[NV4097_SET_VERTEX_ATTRIB_OUTPUT_MASK] = 0x3fffff;
registers[NV4097_SET_FREQUENCY_DIVIDER_OPERATION] = 0x0;
registers[NV4097_SET_ATTRIB_COLOR] = 0x6144321;
registers[NV4097_SET_ATTRIB_TEX_COORD] = 0xedcba987;
registers[NV4097_SET_ATTRIB_TEX_COORD_EX] = 0x6f;
registers[NV4097_SET_ATTRIB_UCLIP0] = 0x171615;
registers[NV4097_SET_ATTRIB_UCLIP1] = 0x1b1a19;
registers[NV4097_SET_TEX_COORD_CONTROL] = 0x0;
registers[NV4097_SET_TEX_COORD_CONTROL + 1] = 0x0;
registers[NV4097_SET_TEX_COORD_CONTROL + 2] = 0x0;
registers[NV4097_SET_TEX_COORD_CONTROL + 3] = 0x0;
registers[NV4097_SET_TEX_COORD_CONTROL + 4] = 0x0;
registers[NV4097_SET_TEX_COORD_CONTROL + 5] = 0x0;
registers[NV4097_SET_TEX_COORD_CONTROL + 6] = 0x0;
registers[NV4097_SET_TEX_COORD_CONTROL + 7] = 0x0;
registers[NV4097_SET_TEX_COORD_CONTROL + 8] = 0x0;
registers[NV4097_SET_TEX_COORD_CONTROL + 9] = 0x0;
registers[0xa0c / 4] = 0x0;
registers[0xa60 / 4] = 0x0;
registers[NV4097_SET_POLY_OFFSET_LINE_ENABLE] = 0x0;
registers[NV4097_SET_POLY_OFFSET_FILL_ENABLE] = 0x0;
registers[NV4097_SET_POLYGON_OFFSET_SCALE_FACTOR] = 0x0;
registers[NV4097_SET_POLYGON_OFFSET_BIAS] = 0x0;
registers[0x1428 / 4] = 0x1;
registers[NV4097_SET_SHADER_WINDOW] = 0x1000;
registers[0x1e94 / 4] = 0x11;
registers[0x1450 / 4] = 0x80003;
registers[0x1d64 / 4] = 0x2000000;
registers[0x145c / 4] = 0x1;
registers[NV4097_SET_REDUCE_DST_COLOR] = 0x1;
registers[NV4097_SET_TEXTURE_CONTROL2] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 1] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 2] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 3] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 4] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 5] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 6] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 7] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 8] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 9] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 10] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 11] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 12] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 13] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 14] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 15] = 0x2dc8;
registers[NV4097_SET_FOG_MODE] = 0x800;
registers[NV4097_SET_FOG_PARAMS] = 0x0;
registers[NV4097_SET_FOG_PARAMS + 1] = 0x0;
registers[NV4097_SET_FOG_PARAMS + 2] = 0x0;
registers[0x240 / 4] = 0xffff;
registers[0x244 / 4] = 0x0;
registers[0x248 / 4] = 0x0;
registers[0x24c / 4] = 0x0;
registers[NV4097_SET_ANISO_SPREAD] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 1] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 2] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 3] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 4] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 5] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 6] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 7] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 8] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 9] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 10] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 11] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 12] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 13] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 14] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 15] = 0x10101;
registers[0x400 / 4] = 0x7421;
registers[0x404 / 4] = 0x7421;
registers[0x408 / 4] = 0x7421;
registers[0x40c / 4] = 0x7421;
registers[0x410 / 4] = 0x7421;
registers[0x414 / 4] = 0x7421;
registers[0x418 / 4] = 0x7421;
registers[0x41c / 4] = 0x7421;
registers[0x420 / 4] = 0x7421;
registers[0x424 / 4] = 0x7421;
registers[0x428 / 4] = 0x7421;
registers[0x42c / 4] = 0x7421;
registers[0x430 / 4] = 0x7421;
registers[0x434 / 4] = 0x7421;
registers[0x438 / 4] = 0x7421;
registers[0x43c / 4] = 0x7421;
registers[0x440 / 4] = 0x9aabaa98;
registers[0x444 / 4] = 0x66666789;
registers[0x448 / 4] = 0x98766666;
registers[0x44c / 4] = 0x89aabaa9;
registers[0x450 / 4] = 0x99999999;
registers[0x454 / 4] = 0x88888889;
registers[0x458 / 4] = 0x98888888;
registers[0x45c / 4] = 0x99999999;
registers[0x460 / 4] = 0x56676654;
registers[0x464 / 4] = 0x33333345;
registers[0x468 / 4] = 0x54333333;
registers[0x46c / 4] = 0x45667665;
registers[0x470 / 4] = 0xaabbba99;
registers[0x474 / 4] = 0x66667899;
registers[0x478 / 4] = 0x99876666;
registers[0x47c / 4] = 0x99abbbaa;
registers[NV4097_SET_VERTEX_DATA_BASE_OFFSET] = 0x0;
registers[NV4097_SET_VERTEX_DATA_BASE_INDEX] = 0x0;
registers[0xe000 / 4] = 0xcafebabe;
registers[NV4097_SET_ALPHA_FUNC] = 0x207;
registers[NV4097_SET_ALPHA_REF] = 0x0;
registers[NV4097_SET_ALPHA_TEST_ENABLE] = 0x0;
registers[NV4097_SET_BACK_STENCIL_FUNC] = 0x207;
registers[NV4097_SET_BACK_STENCIL_FUNC_REF] = 0x0;
registers[NV4097_SET_BACK_STENCIL_FUNC_MASK] = 0xff;
registers[NV4097_SET_BACK_STENCIL_MASK] = 0xff;
registers[NV4097_SET_BACK_STENCIL_OP_FAIL] = 0x1e00;
registers[NV4097_SET_BACK_STENCIL_OP_ZFAIL] = 0x1e00;
registers[NV4097_SET_BACK_STENCIL_OP_ZPASS] = 0x1e00;
registers[NV4097_SET_BLEND_COLOR] = 0x0;
registers[NV4097_SET_BLEND_COLOR2] = 0x0;
registers[NV4097_SET_BLEND_ENABLE] = 0x0;
registers[NV4097_SET_BLEND_ENABLE_MRT] = 0x0;
registers[NV4097_SET_BLEND_EQUATION] = 0x80068006;
registers[NV4097_SET_BLEND_FUNC_SFACTOR] = 0x10001;
registers[NV4097_SET_BLEND_FUNC_DFACTOR] = 0x0;
registers[NV4097_SET_ZSTENCIL_CLEAR_VALUE] = 0xffffff00;
registers[NV4097_CLEAR_SURFACE] = 0x0;
registers[NV4097_NO_OPERATION] = 0x0;
registers[NV4097_SET_COLOR_MASK] = 0x1010101;
registers[NV4097_SET_CULL_FACE_ENABLE] = 0x0;
registers[NV4097_SET_CULL_FACE] = 0x405;
registers[NV4097_SET_DEPTH_BOUNDS_MIN] = 0x0;
registers[NV4097_SET_DEPTH_BOUNDS_MAX] = 0x3f800000;
registers[NV4097_SET_DEPTH_BOUNDS_TEST_ENABLE] = 0x0;
registers[NV4097_SET_DEPTH_FUNC] = 0x201;
registers[NV4097_SET_DEPTH_MASK] = 0x1;
registers[NV4097_SET_DEPTH_TEST_ENABLE] = 0x0;
registers[NV4097_SET_DITHER_ENABLE] = 0x1;
registers[NV4097_SET_SHADER_PACKER] = 0x0;
registers[NV4097_SET_FREQUENCY_DIVIDER_OPERATION] = 0x0;
registers[NV4097_SET_FRONT_FACE] = 0x901;
registers[NV4097_SET_LINE_WIDTH] = 0x8;
registers[NV4097_SET_LOGIC_OP_ENABLE] = 0x0;
registers[NV4097_SET_LOGIC_OP] = 0x1503;
registers[NV4097_SET_POINT_SIZE] = 0x3f800000;
registers[NV4097_SET_POLY_OFFSET_FILL_ENABLE] = 0x0;
registers[NV4097_SET_POLYGON_OFFSET_SCALE_FACTOR] = 0x0;
registers[NV4097_SET_POLYGON_OFFSET_BIAS] = 0x0;
registers[NV4097_SET_RESTART_INDEX_ENABLE] = 0x0;
registers[NV4097_SET_RESTART_INDEX] = 0xffffffff;
registers[NV4097_SET_SCISSOR_HORIZONTAL] = 0x10000000;
registers[NV4097_SET_SCISSOR_VERTICAL] = 0x10000000;
registers[NV4097_SET_SHADE_MODE] = 0x1d01;
registers[NV4097_SET_STENCIL_FUNC] = 0x207;
registers[NV4097_SET_STENCIL_FUNC_REF] = 0x0;
registers[NV4097_SET_STENCIL_FUNC_MASK] = 0xff;
registers[NV4097_SET_STENCIL_MASK] = 0xff;
registers[NV4097_SET_STENCIL_OP_FAIL] = 0x1e00;
registers[NV4097_SET_STENCIL_OP_ZFAIL] = 0x1e00;
registers[NV4097_SET_STENCIL_OP_ZPASS] = 0x1e00;
registers[NV4097_SET_STENCIL_TEST_ENABLE] = 0x0;
registers[NV4097_SET_TEXTURE_ADDRESS] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 8] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 8] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 8] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 8] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 16] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 16] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 16] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 16] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 24] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 24] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 24] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 24] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 32] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 32] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 32] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 32] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 40] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 40] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 40] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 40] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 48] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 48] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 48] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 48] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 56] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 56] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 56] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 56] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 64] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 64] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 64] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 64] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 72] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 72] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 72] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 72] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 80] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 80] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 80] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 80] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 88] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 88] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 88] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 88] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 96] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 96] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 96] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 96] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 104] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 104] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 104] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 104] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 112] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 112] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 112] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 112] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 120] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 120] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 120] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 120] = 0x2052000;
registers[NV4097_SET_TWO_SIDED_STENCIL_TEST_ENABLE] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 1] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 1] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 2] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 2] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 3] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 3] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 4] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 4] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 5] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 5] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 6] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 6] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 7] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 7] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 8] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 8] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 9] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 9] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 10] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 10] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 11] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 11] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 12] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 12] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 13] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 13] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 14] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 14] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 15] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 15] = 0x0;
registers[NV4097_SET_VIEWPORT_HORIZONTAL] = 0x10000000;
registers[NV4097_SET_VIEWPORT_VERTICAL] = 0x10000000;
registers[NV4097_SET_CLIP_MIN] = 0x0;
registers[NV4097_SET_CLIP_MAX] = 0x3f800000;
registers[NV4097_SET_VIEWPORT_OFFSET + 0] = 0x45000000;
registers[NV4097_SET_VIEWPORT_OFFSET + 1] = 0x45000000;
registers[NV4097_SET_VIEWPORT_OFFSET + 2] = 0x3f000000;
registers[NV4097_SET_VIEWPORT_OFFSET + 3] = 0x0;
registers[NV4097_SET_VIEWPORT_SCALE + 0] = 0x45000000;
registers[NV4097_SET_VIEWPORT_SCALE + 1] = 0x45000000;
registers[NV4097_SET_VIEWPORT_SCALE + 2] = 0x3f000000;
registers[NV4097_SET_VIEWPORT_SCALE + 3] = 0x0;
// NOTE: Realhw emits this sequence twice, likely to work around a hardware bug. Similar behavior can be seen in other buggy register blocks
//registers[NV4097_SET_VIEWPORT_OFFSET + 0] = 0x45000000;
//registers[NV4097_SET_VIEWPORT_OFFSET + 1] = 0x45000000;
//registers[NV4097_SET_VIEWPORT_OFFSET + 2] = 0x3f000000;
//registers[NV4097_SET_VIEWPORT_OFFSET + 3] = 0x0;
//registers[NV4097_SET_VIEWPORT_SCALE + 0] = 0x45000000;
//registers[NV4097_SET_VIEWPORT_SCALE + 1] = 0x45000000;
//registers[NV4097_SET_VIEWPORT_SCALE + 2] = 0x3f000000;
//registers[NV4097_SET_VIEWPORT_SCALE + 3] = 0x0;
registers[NV4097_SET_ANTI_ALIASING_CONTROL] = 0xffff0000;
registers[NV4097_SET_BACK_POLYGON_MODE] = 0x1b02;
registers[NV4097_SET_COLOR_CLEAR_VALUE] = 0x0;
registers[NV4097_SET_COLOR_MASK_MRT] = 0x0;
registers[NV4097_SET_FRONT_POLYGON_MODE] = 0x1b02;
registers[NV4097_SET_LINE_SMOOTH_ENABLE] = 0x0;
registers[NV4097_SET_LINE_STIPPLE] = 0x0;
registers[NV4097_SET_POINT_PARAMS_ENABLE] = 0x0;
registers[NV4097_SET_POINT_SPRITE_CONTROL] = 0x0;
registers[NV4097_SET_POLY_SMOOTH_ENABLE] = 0x0;
registers[NV4097_SET_POLYGON_STIPPLE] = 0x0;
registers[NV4097_SET_RENDER_ENABLE] = 0x1000000;
registers[NV4097_SET_USER_CLIP_PLANE_CONTROL] = 0x0;
registers[NV4097_SET_VERTEX_ATTRIB_INPUT_MASK] = 0xffff;
registers[NV4097_SET_ZPASS_PIXEL_COUNT_ENABLE] = 0x0;
registers[NV4097_SET_VERTEX_TEXTURE_ADDRESS] = 0x101;
registers[NV4097_SET_VERTEX_TEXTURE_BORDER_COLOR] = 0x0;
registers[NV4097_SET_VERTEX_TEXTURE_CONTROL0] = 0x60000;
registers[NV4097_SET_VERTEX_TEXTURE_FILTER] = 0x0;
registers[NV4097_SET_VERTEX_TEXTURE_ADDRESS + 8] = 0x101;
registers[NV4097_SET_VERTEX_TEXTURE_BORDER_COLOR + 8] = 0x0;
registers[NV4097_SET_VERTEX_TEXTURE_CONTROL0 + 8] = 0x60000;
registers[NV4097_SET_VERTEX_TEXTURE_FILTER + 8] = 0x0;
registers[NV4097_SET_VERTEX_TEXTURE_ADDRESS + 16] = 0x101;
registers[NV4097_SET_VERTEX_TEXTURE_BORDER_COLOR + 16] = 0x0;
registers[NV4097_SET_VERTEX_TEXTURE_CONTROL0 + 16] = 0x60000;
registers[NV4097_SET_VERTEX_TEXTURE_FILTER + 16] = 0x0;
registers[NV4097_SET_VERTEX_TEXTURE_ADDRESS + 24] = 0x101;
registers[NV4097_SET_VERTEX_TEXTURE_BORDER_COLOR + 24] = 0x0;
registers[NV4097_SET_VERTEX_TEXTURE_CONTROL0 + 24] = 0x60000;
registers[NV4097_SET_VERTEX_TEXTURE_FILTER + 24] = 0x0;
registers[NV4097_SET_CYLINDRICAL_WRAP] = 0x0;
registers[NV4097_SET_ZMIN_MAX_CONTROL] = 0x1;
registers[NV4097_SET_TWO_SIDE_LIGHT_EN] = 0x0;
registers[NV4097_SET_TRANSFORM_BRANCH_BITS] = 0x0;
registers[NV4097_SET_NO_PARANOID_TEXTURE_FETCHES] = 0x0;
registers[0x2000 / 4] = 0x31337303;
registers[0x2180 / 4] = 0x66604200;
registers[0x2184 / 4] = 0xfeed0001;
registers[0x2188 / 4] = 0xfeed0000;
registers[NV3062_SET_OBJECT] = 0x313371c3;
registers[NV3062_SET_CONTEXT_DMA_NOTIFIES] = 0x66604200;
registers[NV3062_SET_CONTEXT_DMA_IMAGE_SOURCE] = 0xfeed0000;
registers[NV3062_SET_CONTEXT_DMA_IMAGE_DESTIN] = 0xfeed0000;
registers[0xa000 / 4] = 0x31337808;
registers[0xa180 / 4] = 0x66604200;
registers[0xa184 / 4] = 0x0;
registers[0xa188 / 4] = 0x0;
registers[0xa18c / 4] = 0x0;
registers[0xa190 / 4] = 0x0;
registers[0xa194 / 4] = 0x0;
registers[0xa198 / 4] = 0x0;
registers[0xa19c / 4] = 0x313371c3;
registers[0xa2fc / 4] = 0x3;
registers[0xa300 / 4] = 0x4;
registers[0x8000 / 4] = 0x31337a73;
registers[0x8180 / 4] = 0x66604200;
registers[0x8184 / 4] = 0xfeed0000;
registers[0xc000 / 4] = 0x3137af00;
registers[0xc180 / 4] = 0x66604200;
registers[NV4097_SET_ZCULL_EN] = 0x3;
registers[NV4097_SET_ZCULL_STATS_ENABLE] = 0x0;
registers[NV4097_SET_ZCULL_CONTROL0] = 0x10;
registers[NV4097_SET_ZCULL_CONTROL1] = 0x1000100;
registers[NV4097_SET_SCULL_CONTROL] = 0xff000002;
registers[NV4097_SET_TEXTURE_OFFSET] = 0x0;
registers[NV4097_SET_TEXTURE_FORMAT] = 0x18429;
registers[NV4097_SET_TEXTURE_IMAGE_RECT] = 0x80008;
registers[NV4097_SET_TEXTURE_CONTROL3] = 0x100008;
registers[NV4097_SET_TEXTURE_CONTROL1] = 0xaae4;
registers[NV4097_SET_TEXTURE_OFFSET + 8] = 0x0;
registers[NV4097_SET_TEXTURE_FORMAT + 8] = 0x18429;
registers[NV4097_SET_TEXTURE_IMAGE_RECT + 8] = 0x80008;
registers[NV4097_SET_TEXTURE_CONTROL3 + 1] = 0x100008;
registers[NV4097_SET_TEXTURE_CONTROL1 + 8] = 0xaae4;
registers[NV4097_SET_TEXTURE_OFFSET + 16] = 0x0;
registers[NV4097_SET_TEXTURE_FORMAT + 16] = 0x18429;
registers[NV4097_SET_TEXTURE_IMAGE_RECT + 16] = 0x80008;
registers[NV4097_SET_TEXTURE_CONTROL3 + 2] = 0x100008;
registers[NV4097_SET_TEXTURE_CONTROL1 + 16] = 0xaae4;
registers[NV4097_SET_TEXTURE_OFFSET + 24] = 0x0;
registers[NV4097_SET_TEXTURE_FORMAT + 24] = 0x18429;
registers[NV4097_SET_TEXTURE_IMAGE_RECT + 24] = 0x80008;
registers[NV4097_SET_TEXTURE_CONTROL3 + 3] = 0x100008;
registers[NV4097_SET_TEXTURE_CONTROL1 + 24] = 0xaae4;
registers[NV4097_SET_TEXTURE_OFFSET + 32] = 0x0;
registers[NV4097_SET_TEXTURE_FORMAT + 32] = 0x18429;
registers[NV4097_SET_TEXTURE_IMAGE_RECT + 32] = 0x80008;
registers[NV4097_SET_TEXTURE_CONTROL3 + 4] = 0x100008;
registers[NV4097_SET_TEXTURE_CONTROL1 + 32] = 0xaae4;
registers[NV4097_SET_TEXTURE_OFFSET + 40] = 0x0;
registers[NV4097_SET_TEXTURE_FORMAT + 40] = 0x18429;
registers[NV4097_SET_TEXTURE_IMAGE_RECT + 40] = 0x80008;
registers[NV4097_SET_TEXTURE_CONTROL3 + 5] = 0x100008;
registers[NV4097_SET_TEXTURE_CONTROL1 + 40] = 0xaae4;
registers[NV4097_SET_TEXTURE_OFFSET + 48] = 0x0;
registers[NV4097_SET_TEXTURE_FORMAT + 48] = 0x18429;
registers[NV4097_SET_TEXTURE_IMAGE_RECT + 48] = 0x80008;
registers[NV4097_SET_TEXTURE_CONTROL3 + 6] = 0x100008;
registers[NV4097_SET_TEXTURE_CONTROL1 + 48] = 0xaae4;
registers[NV4097_SET_TEXTURE_OFFSET + 56] = 0x0;
registers[NV4097_SET_TEXTURE_FORMAT + 56] = 0x18429;
registers[NV4097_SET_TEXTURE_IMAGE_RECT + 56] = 0x80008;
registers[NV4097_SET_TEXTURE_CONTROL3 + 7] = 0x100008;
registers[NV4097_SET_TEXTURE_CONTROL1 + 56] = 0xaae4;
registers[NV4097_SET_TEXTURE_OFFSET + 64] = 0x0;
registers[NV4097_SET_TEXTURE_FORMAT + 64] = 0x18429;
registers[NV4097_SET_TEXTURE_IMAGE_RECT + 64] = 0x80008;
registers[NV4097_SET_TEXTURE_CONTROL3 + 8] = 0x100008;
registers[NV4097_SET_TEXTURE_CONTROL1 + 64] = 0xaae4;
registers[NV4097_SET_TEXTURE_OFFSET + 72] = 0x0;
registers[NV4097_SET_TEXTURE_FORMAT + 72] = 0x18429;
registers[NV4097_SET_TEXTURE_IMAGE_RECT + 72] = 0x80008;
registers[NV4097_SET_TEXTURE_CONTROL3 + 9] = 0x100008;
registers[NV4097_SET_TEXTURE_CONTROL1 + 72] = 0xaae4;
registers[NV4097_SET_TEXTURE_OFFSET + 80] = 0x0;
registers[NV4097_SET_TEXTURE_FORMAT + 80] = 0x18429;
registers[NV4097_SET_TEXTURE_IMAGE_RECT + 80] = 0x80008;
registers[NV4097_SET_TEXTURE_CONTROL3 + 10] = 0x100008;
registers[NV4097_SET_TEXTURE_CONTROL1 + 80] = 0xaae4;
registers[NV4097_SET_TEXTURE_OFFSET + 88] = 0x0;
registers[NV4097_SET_TEXTURE_FORMAT + 88] = 0x18429;
registers[NV4097_SET_TEXTURE_IMAGE_RECT + 88] = 0x80008;
registers[NV4097_SET_TEXTURE_CONTROL3 + 11] = 0x100008;
registers[NV4097_SET_TEXTURE_CONTROL1 + 88] = 0xaae4;
registers[NV4097_SET_TEXTURE_OFFSET + 96] = 0x0;
registers[NV4097_SET_TEXTURE_FORMAT + 96] = 0x18429;
registers[NV4097_SET_TEXTURE_IMAGE_RECT + 96] = 0x80008;
registers[NV4097_SET_TEXTURE_CONTROL3 + 12] = 0x100008;
registers[NV4097_SET_TEXTURE_CONTROL1 + 96] = 0xaae4;
registers[NV4097_SET_TEXTURE_OFFSET + 104] = 0x0;
registers[NV4097_SET_TEXTURE_FORMAT + 104] = 0x18429;
registers[NV4097_SET_TEXTURE_IMAGE_RECT + 104] = 0x80008;
registers[NV4097_SET_TEXTURE_CONTROL3 + 13] = 0x100008;
registers[NV4097_SET_TEXTURE_CONTROL1 + 104] = 0xaae4;
registers[NV4097_SET_TEXTURE_OFFSET + 112] = 0x0;
registers[NV4097_SET_TEXTURE_FORMAT + 112] = 0x18429;
registers[NV4097_SET_TEXTURE_IMAGE_RECT + 112] = 0x80008;
registers[NV4097_SET_TEXTURE_CONTROL3 + 14] = 0x100008;
registers[NV4097_SET_TEXTURE_CONTROL1 + 112] = 0xaae4;
registers[NV4097_SET_TEXTURE_OFFSET + 120] = 0x0;
registers[NV4097_SET_TEXTURE_FORMAT + 120] = 0x18429;
registers[NV4097_SET_TEXTURE_IMAGE_RECT + 120] = 0x80008;
registers[NV4097_SET_TEXTURE_CONTROL3 + 15] = 0x100008;
registers[NV4097_SET_TEXTURE_CONTROL1 + 120] = 0xaae4;
registers[NV4097_SET_VERTEX_TEXTURE_OFFSET] = 0x0;
registers[NV4097_SET_VERTEX_TEXTURE_FORMAT] = 0x1bc21;
registers[NV4097_SET_VERTEX_TEXTURE_CONTROL3] = 0x8;
registers[NV4097_SET_VERTEX_TEXTURE_IMAGE_RECT] = 0x80008;
registers[NV4097_SET_VERTEX_TEXTURE_OFFSET + 8] = 0x0;
registers[NV4097_SET_VERTEX_TEXTURE_FORMAT + 8] = 0x1bc21;
registers[NV4097_SET_VERTEX_TEXTURE_CONTROL3 + 8] = 0x8;
registers[NV4097_SET_VERTEX_TEXTURE_IMAGE_RECT + 8] = 0x80008;
registers[NV4097_SET_VERTEX_TEXTURE_OFFSET + 16] = 0x0;
registers[NV4097_SET_VERTEX_TEXTURE_FORMAT + 16] = 0x1bc21;
registers[NV4097_SET_VERTEX_TEXTURE_CONTROL3 + 16] = 0x8;
registers[NV4097_SET_VERTEX_TEXTURE_IMAGE_RECT + 16] = 0x80008;
registers[NV4097_SET_VERTEX_TEXTURE_OFFSET + 24] = 0x0;
registers[NV4097_SET_VERTEX_TEXTURE_FORMAT + 24] = 0x1bc21;
registers[NV4097_SET_VERTEX_TEXTURE_CONTROL3 + 24] = 0x8;
registers[NV4097_SET_VERTEX_TEXTURE_IMAGE_RECT + 24] = 0x80008;
registers[0x230c / 4] = 0x0;
registers[0x2310 / 4] = 0x0;
registers[0x2314 / 4] = 0x0;
registers[0x2318 / 4] = 0x0;
registers[0x231c / 4] = 0x0;
registers[0x2320 / 4] = 0x0;
registers[0x2324 / 4] = 0x101;
registers[NV3062_SET_COLOR_FORMAT] = 0xa;
registers[NV3062_SET_PITCH] = 0x400040;
registers[NV3062_SET_OFFSET_SOURCE] = 0x0;
registers[NV3062_SET_OFFSET_DESTIN] = 0x0;
registers[0x8300 / 4] = 0x1000a;
registers[0x8304 / 4] = 0x0;
registers[0xc184 / 4] = 0xfeed0000;
registers[0xc198 / 4] = 0x313371c3;
registers[0xc2fc / 4] = 0x1;
registers[0xc300 / 4] = 0x3;
registers[0xc304 / 4] = 0x3;
registers[0xc308 / 4] = 0x0;
registers[0xc30c / 4] = 0x0;
registers[0xc310 / 4] = 0x0;
registers[0xc314 / 4] = 0x0;
registers[0xc318 / 4] = 0x0;
registers[0xc31c / 4] = 0x0;
registers[0xc400 / 4] = 0x10002;
registers[0xc404 / 4] = 0x10000;
registers[0xc408 / 4] = 0x0;
registers[0xc40c / 4] = 0x0;
registers[NV308A_POINT] = 0x0;
registers[NV308A_SIZE_OUT] = 0x0;
registers[NV308A_SIZE_IN] = 0x0;
registers[NV406E_SET_REFERENCE] = umax;
if (auto rsx = Emu.IsStopped() ? nullptr : get_current_renderer(); rsx && rsx->ctrl)
{
// FIXME: Multi-context unaware
rsx->ctrl->ref = u32{ umax };
}
}
{
// Signal definitions
state_signals[NV4097_SET_SHADER_CONTROL] = rsx::fragment_program_state_dirty;
state_signals[NV4097_SET_TEX_COORD_CONTROL + 0] = rsx::fragment_program_state_dirty;
state_signals[NV4097_SET_TEX_COORD_CONTROL + 1] = rsx::fragment_program_state_dirty;
state_signals[NV4097_SET_TEX_COORD_CONTROL + 2] = rsx::fragment_program_state_dirty;
state_signals[NV4097_SET_TEX_COORD_CONTROL + 3] = rsx::fragment_program_state_dirty;
state_signals[NV4097_SET_TEX_COORD_CONTROL + 4] = rsx::fragment_program_state_dirty;
state_signals[NV4097_SET_TEX_COORD_CONTROL + 5] = rsx::fragment_program_state_dirty;
state_signals[NV4097_SET_TEX_COORD_CONTROL + 6] = rsx::fragment_program_state_dirty;
state_signals[NV4097_SET_TEX_COORD_CONTROL + 7] = rsx::fragment_program_state_dirty;
state_signals[NV4097_SET_TEX_COORD_CONTROL + 8] = rsx::fragment_program_state_dirty;
state_signals[NV4097_SET_TEX_COORD_CONTROL + 9] = rsx::fragment_program_state_dirty;
state_signals[NV4097_SET_TWO_SIDE_LIGHT_EN] = rsx::fragment_program_state_dirty;
state_signals[NV4097_SET_POINT_SPRITE_CONTROL] = rsx::fragment_program_state_dirty;
state_signals[NV4097_SET_USER_CLIP_PLANE_CONTROL] = rsx::vertex_state_dirty;
state_signals[NV4097_SET_TRANSFORM_BRANCH_BITS] = rsx::vertex_state_dirty;
state_signals[NV4097_SET_CLIP_MIN] = rsx::invalidate_zclip_bits;
state_signals[NV4097_SET_CLIP_MAX] = rsx::invalidate_zclip_bits;
state_signals[NV4097_SET_POINT_SIZE] = rsx::vertex_state_dirty;
state_signals[NV4097_SET_ALPHA_FUNC] = rsx::fragment_state_dirty;
state_signals[NV4097_SET_ALPHA_REF] = rsx::fragment_state_dirty;
state_signals[NV4097_SET_ALPHA_TEST_ENABLE] = rsx::fragment_state_dirty;
state_signals[NV4097_SET_ANTI_ALIASING_CONTROL] = rsx::fragment_state_dirty | rsx::pipeline_config_dirty;
state_signals[NV4097_SET_SHADER_PACKER] = rsx::fragment_state_dirty;
state_signals[NV4097_SET_SHADER_WINDOW] = rsx::fragment_state_dirty;
state_signals[NV4097_SET_FOG_MODE] = rsx::fragment_state_dirty;
state_signals[NV4097_SET_SCISSOR_HORIZONTAL] = rsx::scissor_config_state_dirty;
state_signals[NV4097_SET_SCISSOR_VERTICAL] = rsx::scissor_config_state_dirty;
state_signals[NV4097_SET_VIEWPORT_HORIZONTAL] = rsx::scissor_config_state_dirty;
state_signals[NV4097_SET_VIEWPORT_VERTICAL] = rsx::scissor_config_state_dirty;
state_signals[NV4097_SET_FOG_PARAMS + 0] = rsx::fragment_state_dirty;
state_signals[NV4097_SET_FOG_PARAMS + 1] = rsx::fragment_state_dirty;
state_signals[NV4097_SET_VIEWPORT_SCALE + 0] = rsx::vertex_state_dirty;
state_signals[NV4097_SET_VIEWPORT_SCALE + 1] = rsx::vertex_state_dirty;
state_signals[NV4097_SET_VIEWPORT_SCALE + 2] = rsx::vertex_state_dirty;
state_signals[NV4097_SET_VIEWPORT_OFFSET + 0] = rsx::vertex_state_dirty;
state_signals[NV4097_SET_VIEWPORT_OFFSET + 1] = rsx::vertex_state_dirty;
state_signals[NV4097_SET_VIEWPORT_OFFSET + 2] = rsx::vertex_state_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE] = rsx::fragment_state_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 0] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 1] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 2] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 3] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 4] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 5] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 6] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 7] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 8] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 9] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 10] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 11] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 12] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 13] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 14] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 15] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 16] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 17] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 18] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 19] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 20] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 21] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 22] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 23] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 24] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 25] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 26] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 27] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 28] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 29] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 30] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLYGON_STIPPLE_PATTERN + 31] = rsx::polygon_stipple_pattern_dirty;
state_signals[NV4097_SET_POLY_OFFSET_FILL_ENABLE] = rsx::polygon_offset_state_dirty;
state_signals[NV4097_SET_POLYGON_OFFSET_SCALE_FACTOR] = rsx::polygon_offset_state_dirty;
state_signals[NV4097_SET_POLYGON_OFFSET_BIAS] = rsx::polygon_offset_state_dirty;
state_signals[NV4097_SET_DEPTH_BOUNDS_TEST_ENABLE] = rsx::depth_bounds_state_dirty;
state_signals[NV4097_SET_DEPTH_BOUNDS_MIN] = rsx::depth_bounds_state_dirty;
state_signals[NV4097_SET_DEPTH_BOUNDS_MAX] = rsx::depth_bounds_state_dirty;
state_signals[NV4097_SET_CULL_FACE_ENABLE] = rsx::pipeline_config_dirty;
state_signals[NV4097_SET_ZMIN_MAX_CONTROL] = rsx::pipeline_config_dirty;
state_signals[NV4097_SET_LOGIC_OP_ENABLE] = rsx::pipeline_config_dirty;
state_signals[NV4097_SET_LOGIC_OP] = rsx::pipeline_config_dirty;
state_signals[NV4097_SET_BLEND_ENABLE] = rsx::pipeline_config_dirty;
state_signals[NV4097_SET_BLEND_ENABLE_MRT] = rsx::pipeline_config_dirty;
state_signals[NV4097_SET_STENCIL_FUNC] = rsx::pipeline_config_dirty;
state_signals[NV4097_SET_BACK_STENCIL_FUNC] = rsx::pipeline_config_dirty;
state_signals[NV4097_SET_RESTART_INDEX_ENABLE] = rsx::pipeline_config_dirty;
}
// Sanity checks
for (size_t id = 0; id < methods.size(); ++id)
{
if (methods[id] && state_signals[id])
{
rsx_log.error("FIXME: Method register 0x%x is registered as a method and signal. The signal will be ignored.");
}
}
}
void rsx_state::reset()
{
// TODO: Name unnamed registers and constants, better group methods
registers[NV406E_SET_CONTEXT_DMA_SEMAPHORE] = 0x56616661;
registers[NV4097_SET_OBJECT] = 0x31337000;
registers[NV4097_SET_CONTEXT_DMA_NOTIFIES] = 0x66604200;
registers[NV4097_SET_CONTEXT_DMA_A] = 0xfeed0000;
registers[NV4097_SET_CONTEXT_DMA_B] = 0xfeed0001;
registers[NV4097_SET_CONTEXT_DMA_COLOR_B] = 0xfeed0000;
registers[NV4097_SET_CONTEXT_DMA_STATE] = 0x0;
registers[NV4097_SET_CONTEXT_DMA_COLOR_A] = 0xfeed0000;
registers[NV4097_SET_CONTEXT_DMA_ZETA] = 0xfeed0000;
registers[NV4097_SET_CONTEXT_DMA_VERTEX_A] = 0xfeed0000;
registers[NV4097_SET_CONTEXT_DMA_VERTEX_B] = 0xfeed0001;
registers[NV4097_SET_CONTEXT_DMA_SEMAPHORE] = 0x66606660;
registers[NV4097_SET_CONTEXT_DMA_REPORT] = 0x66626660;
registers[NV4097_SET_CONTEXT_DMA_CLIP_ID] = 0x0;
registers[NV4097_SET_CONTEXT_DMA_CULL_DATA] = 0x0;
registers[NV4097_SET_CONTEXT_DMA_COLOR_C] = 0xfeed0000;
registers[NV4097_SET_CONTEXT_DMA_COLOR_D] = 0xfeed0000;
registers[NV406E_SET_CONTEXT_DMA_SEMAPHORE] = 0x66616661;
registers[NV4097_SET_SURFACE_CLIP_HORIZONTAL] = 0x0;
registers[NV4097_SET_SURFACE_CLIP_VERTICAL] = 0x0;
registers[NV4097_SET_SURFACE_FORMAT] = 0x121;
registers[NV4097_SET_SURFACE_PITCH_A] = 0x40;
registers[NV4097_SET_SURFACE_COLOR_AOFFSET] = 0x0;
registers[NV4097_SET_SURFACE_ZETA_OFFSET] = 0x0;
registers[NV4097_SET_SURFACE_COLOR_BOFFSET] = 0x0;
registers[NV4097_SET_SURFACE_PITCH_B] = 0x40;
registers[NV4097_SET_SURFACE_COLOR_TARGET] = 0x1;
registers[0x224 / 4] = 0x80;
registers[0x228 / 4] = 0x100;
registers[NV4097_SET_SURFACE_PITCH_Z] = 0x40;
registers[0x230 / 4] = 0x0;
registers[NV4097_SET_SURFACE_PITCH_C] = 0x40;
registers[NV4097_SET_SURFACE_PITCH_D] = 0x40;
registers[NV4097_SET_SURFACE_COLOR_COFFSET] = 0x0;
registers[NV4097_SET_SURFACE_COLOR_DOFFSET] = 0x0;
registers[0x1d80 / 4] = 0x3;
registers[NV4097_SET_WINDOW_OFFSET] = 0x0;
registers[0x02bc / 4] = 0x0;
registers[0x02c0 / 4] = 0xfff0000;
registers[0x02c4 / 4] = 0xfff0000;
registers[0x02c8 / 4] = 0xfff0000;
registers[0x02cc / 4] = 0xfff0000;
registers[0x02d0 / 4] = 0xfff0000;
registers[0x02d4 / 4] = 0xfff0000;
registers[0x02d8 / 4] = 0xfff0000;
registers[0x02dc / 4] = 0xfff0000;
registers[0x02e0 / 4] = 0xfff0000;
registers[0x02e4 / 4] = 0xfff0000;
registers[0x02e8 / 4] = 0xfff0000;
registers[0x02ec / 4] = 0xfff0000;
registers[0x02f0 / 4] = 0xfff0000;
registers[0x02f4 / 4] = 0xfff0000;
registers[0x02f8 / 4] = 0xfff0000;
registers[0x02fc / 4] = 0xfff0000;
registers[0x1d98 / 4] = 0xfff0000;
registers[0x1d9c / 4] = 0xfff0000;
registers[0x1da4 / 4] = 0x0;
registers[NV4097_SET_CONTROL0] = 0x100000;
registers[0x1454 / 4] = 0x0;
registers[NV4097_SET_VERTEX_ATTRIB_OUTPUT_MASK] = 0x3fffff;
registers[NV4097_SET_FREQUENCY_DIVIDER_OPERATION] = 0x0;
registers[NV4097_SET_ATTRIB_COLOR] = 0x6144321;
registers[NV4097_SET_ATTRIB_TEX_COORD] = 0xedcba987;
registers[NV4097_SET_ATTRIB_TEX_COORD_EX] = 0x6f;
registers[NV4097_SET_ATTRIB_UCLIP0] = 0x171615;
registers[NV4097_SET_ATTRIB_UCLIP1] = 0x1b1a19;
registers[NV4097_SET_TEX_COORD_CONTROL] = 0x0;
registers[NV4097_SET_TEX_COORD_CONTROL + 1] = 0x0;
registers[NV4097_SET_TEX_COORD_CONTROL + 2] = 0x0;
registers[NV4097_SET_TEX_COORD_CONTROL + 3] = 0x0;
registers[NV4097_SET_TEX_COORD_CONTROL + 4] = 0x0;
registers[NV4097_SET_TEX_COORD_CONTROL + 5] = 0x0;
registers[NV4097_SET_TEX_COORD_CONTROL + 6] = 0x0;
registers[NV4097_SET_TEX_COORD_CONTROL + 7] = 0x0;
registers[NV4097_SET_TEX_COORD_CONTROL + 8] = 0x0;
registers[NV4097_SET_TEX_COORD_CONTROL + 9] = 0x0;
registers[0xa0c / 4] = 0x0;
registers[0xa60 / 4] = 0x0;
registers[NV4097_SET_POLY_OFFSET_LINE_ENABLE] = 0x0;
registers[NV4097_SET_POLY_OFFSET_FILL_ENABLE] = 0x0;
registers[NV4097_SET_POLYGON_OFFSET_SCALE_FACTOR] = 0x0;
registers[NV4097_SET_POLYGON_OFFSET_BIAS] = 0x0;
registers[0x1428 / 4] = 0x1;
registers[NV4097_SET_SHADER_WINDOW] = 0x1000;
registers[0x1e94 / 4] = 0x11;
registers[0x1450 / 4] = 0x80003;
registers[0x1d64 / 4] = 0x2000000;
registers[0x145c / 4] = 0x1;
registers[NV4097_SET_REDUCE_DST_COLOR] = 0x1;
registers[NV4097_SET_TEXTURE_CONTROL2] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 1] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 2] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 3] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 4] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 5] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 6] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 7] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 8] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 9] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 10] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 11] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 12] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 13] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 14] = 0x2dc8;
registers[NV4097_SET_TEXTURE_CONTROL2 + 15] = 0x2dc8;
registers[NV4097_SET_FOG_MODE] = 0x800;
registers[NV4097_SET_FOG_PARAMS] = 0x0;
registers[NV4097_SET_FOG_PARAMS + 1] = 0x0;
registers[NV4097_SET_FOG_PARAMS + 2] = 0x0;
registers[0x240 / 4] = 0xffff;
registers[0x244 / 4] = 0x0;
registers[0x248 / 4] = 0x0;
registers[0x24c / 4] = 0x0;
registers[NV4097_SET_ANISO_SPREAD] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 1] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 2] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 3] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 4] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 5] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 6] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 7] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 8] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 9] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 10] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 11] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 12] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 13] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 14] = 0x10101;
registers[NV4097_SET_ANISO_SPREAD + 15] = 0x10101;
registers[0x400 / 4] = 0x7421;
registers[0x404 / 4] = 0x7421;
registers[0x408 / 4] = 0x7421;
registers[0x40c / 4] = 0x7421;
registers[0x410 / 4] = 0x7421;
registers[0x414 / 4] = 0x7421;
registers[0x418 / 4] = 0x7421;
registers[0x41c / 4] = 0x7421;
registers[0x420 / 4] = 0x7421;
registers[0x424 / 4] = 0x7421;
registers[0x428 / 4] = 0x7421;
registers[0x42c / 4] = 0x7421;
registers[0x430 / 4] = 0x7421;
registers[0x434 / 4] = 0x7421;
registers[0x438 / 4] = 0x7421;
registers[0x43c / 4] = 0x7421;
registers[0x440 / 4] = 0x9aabaa98;
registers[0x444 / 4] = 0x66666789;
registers[0x448 / 4] = 0x98766666;
registers[0x44c / 4] = 0x89aabaa9;
registers[0x450 / 4] = 0x99999999;
registers[0x454 / 4] = 0x88888889;
registers[0x458 / 4] = 0x98888888;
registers[0x45c / 4] = 0x99999999;
registers[0x460 / 4] = 0x56676654;
registers[0x464 / 4] = 0x33333345;
registers[0x468 / 4] = 0x54333333;
registers[0x46c / 4] = 0x45667665;
registers[0x470 / 4] = 0xaabbba99;
registers[0x474 / 4] = 0x66667899;
registers[0x478 / 4] = 0x99876666;
registers[0x47c / 4] = 0x99abbbaa;
registers[NV4097_SET_VERTEX_DATA_BASE_OFFSET] = 0x0;
registers[NV4097_SET_VERTEX_DATA_BASE_INDEX] = 0x0;
registers[GCM_SET_DRIVER_OBJECT] = 0xcafebabe;
registers[NV4097_SET_ALPHA_FUNC] = 0x207;
registers[NV4097_SET_ALPHA_REF] = 0x0;
registers[NV4097_SET_ALPHA_TEST_ENABLE] = 0x0;
registers[NV4097_SET_BACK_STENCIL_FUNC] = 0x207;
registers[NV4097_SET_BACK_STENCIL_FUNC_REF] = 0x0;
registers[NV4097_SET_BACK_STENCIL_FUNC_MASK] = 0xff;
registers[NV4097_SET_BACK_STENCIL_MASK] = 0xff;
registers[NV4097_SET_BACK_STENCIL_OP_FAIL] = 0x1e00;
registers[NV4097_SET_BACK_STENCIL_OP_ZFAIL] = 0x1e00;
registers[NV4097_SET_BACK_STENCIL_OP_ZPASS] = 0x1e00;
registers[NV4097_SET_BLEND_COLOR] = 0x0;
registers[NV4097_SET_BLEND_COLOR2] = 0x0;
registers[NV4097_SET_BLEND_ENABLE] = 0x0;
registers[NV4097_SET_BLEND_ENABLE_MRT] = 0x0;
registers[NV4097_SET_BLEND_EQUATION] = 0x80068006;
registers[NV4097_SET_BLEND_FUNC_SFACTOR] = 0x10001;
registers[NV4097_SET_BLEND_FUNC_DFACTOR] = 0x0;
registers[NV4097_SET_ZSTENCIL_CLEAR_VALUE] = 0xffffff00;
registers[NV4097_CLEAR_SURFACE] = 0x0;
registers[NV4097_NO_OPERATION] = 0x0;
registers[NV4097_SET_COLOR_MASK] = 0x1010101;
registers[NV4097_SET_CULL_FACE_ENABLE] = 0x0;
registers[NV4097_SET_CULL_FACE] = 0x405;
registers[NV4097_SET_DEPTH_BOUNDS_MIN] = 0x0;
registers[NV4097_SET_DEPTH_BOUNDS_MAX] = 0x3f800000;
registers[NV4097_SET_DEPTH_BOUNDS_TEST_ENABLE] = 0x0;
registers[NV4097_SET_DEPTH_FUNC] = 0x201;
registers[NV4097_SET_DEPTH_MASK] = 0x1;
registers[NV4097_SET_DEPTH_TEST_ENABLE] = 0x0;
registers[NV4097_SET_DITHER_ENABLE] = 0x1;
registers[NV4097_SET_SHADER_PACKER] = 0x0;
registers[NV4097_SET_FREQUENCY_DIVIDER_OPERATION] = 0x0;
registers[NV4097_SET_FRONT_FACE] = 0x901;
registers[NV4097_SET_LINE_WIDTH] = 0x8;
registers[NV4097_SET_LOGIC_OP_ENABLE] = 0x0;
registers[NV4097_SET_LOGIC_OP] = 0x1503;
registers[NV4097_SET_POINT_SIZE] = 0x3f800000;
registers[NV4097_SET_POLY_OFFSET_FILL_ENABLE] = 0x0;
registers[NV4097_SET_POLYGON_OFFSET_SCALE_FACTOR] = 0x0;
registers[NV4097_SET_POLYGON_OFFSET_BIAS] = 0x0;
registers[NV4097_SET_RESTART_INDEX_ENABLE] = 0x0;
registers[NV4097_SET_RESTART_INDEX] = 0xffffffff;
registers[NV4097_SET_SCISSOR_HORIZONTAL] = 0x10000000;
registers[NV4097_SET_SCISSOR_VERTICAL] = 0x10000000;
registers[NV4097_SET_SHADE_MODE] = 0x1d01;
registers[NV4097_SET_STENCIL_FUNC] = 0x207;
registers[NV4097_SET_STENCIL_FUNC_REF] = 0x0;
registers[NV4097_SET_STENCIL_FUNC_MASK] = 0xff;
registers[NV4097_SET_STENCIL_MASK] = 0xff;
registers[NV4097_SET_STENCIL_OP_FAIL] = 0x1e00;
registers[NV4097_SET_STENCIL_OP_ZFAIL] = 0x1e00;
registers[NV4097_SET_STENCIL_OP_ZPASS] = 0x1e00;
registers[NV4097_SET_STENCIL_TEST_ENABLE] = 0x0;
registers[NV4097_SET_TEXTURE_ADDRESS] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 8] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 8] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 8] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 8] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 8] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 16] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 16] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 16] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 24] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 24] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 24] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 24] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 32] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 32] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 32] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 32] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 40] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 40] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 40] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 40] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 48] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 48] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 48] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 48] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 56] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 56] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 56] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 56] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 64] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 64] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 64] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 64] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 72] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 72] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 72] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 72] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 80] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 80] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 80] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 80] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 88] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 88] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 88] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 88] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 96] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 96] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 96] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 96] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 104] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 104] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 104] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 104] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 112] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 112] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 112] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 112] = 0x2052000;
registers[NV4097_SET_TEXTURE_ADDRESS + 120] = 0x30101;
registers[NV4097_SET_TEXTURE_BORDER_COLOR + 120] = 0x0;
registers[NV4097_SET_TEXTURE_CONTROL0 + 120] = 0x60000;
registers[NV4097_SET_TEXTURE_FILTER + 120] = 0x2052000;
registers[NV4097_SET_TWO_SIDED_STENCIL_TEST_ENABLE] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 1] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 1] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 2] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 2] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 3] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 3] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 4] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 4] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 5] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 5] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 6] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 6] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 7] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 7] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 8] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 8] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 9] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 9] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 10] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 10] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 11] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 11] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 12] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 12] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 13] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 13] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 14] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 14] = 0x0;
registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + 15] = 0x2;
registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + 15] = 0x0;
registers[NV4097_SET_VIEWPORT_HORIZONTAL] = 0x10000000;
registers[NV4097_SET_VIEWPORT_VERTICAL] = 0x10000000;
registers[NV4097_SET_CLIP_MIN] = 0x0;
registers[NV4097_SET_CLIP_MAX] = 0x3f800000;
registers[NV4097_SET_VIEWPORT_OFFSET + 0] = 0x45000000;
registers[NV4097_SET_VIEWPORT_OFFSET + 1] = 0x45000000;
registers[NV4097_SET_VIEWPORT_OFFSET + 2] = 0x3f000000;
registers[NV4097_SET_VIEWPORT_OFFSET + 3] = 0x0;
registers[NV4097_SET_VIEWPORT_SCALE + 0] = 0x45000000;
registers[NV4097_SET_VIEWPORT_SCALE + 1] = 0x45000000;
registers[NV4097_SET_VIEWPORT_SCALE + 2] = 0x3f000000;
registers[NV4097_SET_VIEWPORT_SCALE + 3] = 0x0;
// NOTE: Realhw emits this sequence twice, likely to work around a hardware bug. Similar behavior can be seen in other buggy register blocks
//registers[NV4097_SET_VIEWPORT_OFFSET + 0] = 0x45000000;
//registers[NV4097_SET_VIEWPORT_OFFSET + 1] = 0x45000000;
//registers[NV4097_SET_VIEWPORT_OFFSET + 2] = 0x3f000000;
//registers[NV4097_SET_VIEWPORT_OFFSET + 3] = 0x0;
//registers[NV4097_SET_VIEWPORT_SCALE + 0] = 0x45000000;
//registers[NV4097_SET_VIEWPORT_SCALE + 1] = 0x45000000;
//registers[NV4097_SET_VIEWPORT_SCALE + 2] = 0x3f000000;
//registers[NV4097_SET_VIEWPORT_SCALE + 3] = 0x0;
registers[NV4097_SET_ANTI_ALIASING_CONTROL] = 0xffff0000;
registers[NV4097_SET_BACK_POLYGON_MODE] = 0x1b02;
registers[NV4097_SET_COLOR_CLEAR_VALUE] = 0x0;
registers[NV4097_SET_COLOR_MASK_MRT] = 0x0;
registers[NV4097_SET_FRONT_POLYGON_MODE] = 0x1b02;
registers[NV4097_SET_LINE_SMOOTH_ENABLE] = 0x0;
registers[NV4097_SET_LINE_STIPPLE] = 0x0;
registers[NV4097_SET_POINT_PARAMS_ENABLE] = 0x0;
registers[NV4097_SET_POINT_SPRITE_CONTROL] = 0x0;
registers[NV4097_SET_POLY_SMOOTH_ENABLE] = 0x0;
registers[NV4097_SET_POLYGON_STIPPLE] = 0x0;
registers[NV4097_SET_RENDER_ENABLE] = 0x1000000;
registers[NV4097_SET_USER_CLIP_PLANE_CONTROL] = 0x0;
registers[NV4097_SET_VERTEX_ATTRIB_INPUT_MASK] = 0xffff;
registers[NV4097_SET_ZPASS_PIXEL_COUNT_ENABLE] = 0x0;
registers[NV4097_SET_VERTEX_TEXTURE_ADDRESS] = 0x101;
registers[NV4097_SET_VERTEX_TEXTURE_BORDER_COLOR] = 0x0;
registers[NV4097_SET_VERTEX_TEXTURE_CONTROL0] = 0x60000;
registers[NV4097_SET_VERTEX_TEXTURE_FILTER] = 0x0;
registers[NV4097_SET_VERTEX_TEXTURE_ADDRESS + 8] = 0x101;
registers[NV4097_SET_VERTEX_TEXTURE_BORDER_COLOR + 8] = 0x0;
registers[NV4097_SET_VERTEX_TEXTURE_CONTROL0 + 8] = 0x60000;
registers[NV4097_SET_VERTEX_TEXTURE_FILTER + 8] = 0x0;
registers[NV4097_SET_VERTEX_TEXTURE_ADDRESS + 16] = 0x101;
registers[NV4097_SET_VERTEX_TEXTURE_BORDER_COLOR + 16] = 0x0;
registers[NV4097_SET_VERTEX_TEXTURE_CONTROL0 + 16] = 0x60000;
registers[NV4097_SET_VERTEX_TEXTURE_FILTER + 16] = 0x0;
registers[NV4097_SET_VERTEX_TEXTURE_ADDRESS + 24] = 0x101;
registers[NV4097_SET_VERTEX_TEXTURE_BORDER_COLOR + 24] = 0x0;
registers[NV4097_SET_VERTEX_TEXTURE_CONTROL0 + 24] = 0x60000;
registers[NV4097_SET_VERTEX_TEXTURE_FILTER + 24] = 0x0;
registers[NV4097_SET_CYLINDRICAL_WRAP] = 0x0;
registers[NV4097_SET_ZMIN_MAX_CONTROL] = 0x1;
registers[NV4097_SET_TWO_SIDE_LIGHT_EN] = 0x0;
registers[NV4097_SET_TRANSFORM_BRANCH_BITS] = 0x0;
registers[NV4097_SET_NO_PARANOID_TEXTURE_FETCHES] = 0x0;
registers[NV0039_SET_OBJECT] = 0x31337303;
registers[NV0039_SET_CONTEXT_DMA_NOTIFIES] = 0x66604200;
registers[NV0039_SET_CONTEXT_DMA_BUFFER_IN] = 0xfeed0001;
registers[NV0039_SET_CONTEXT_DMA_BUFFER_OUT] = 0xfeed0000;
registers[NV3062_SET_OBJECT] = 0x313371c3;
registers[NV3062_SET_CONTEXT_DMA_NOTIFIES] = 0x66604200;
registers[NV3062_SET_CONTEXT_DMA_IMAGE_SOURCE] = 0xfeed0000;
registers[NV3062_SET_CONTEXT_DMA_IMAGE_DESTIN] = 0xfeed0000;
registers[0xa000 / 4] = 0x31337808;
registers[0xa180 / 4] = 0x66604200;
registers[0xa184 / 4] = 0x0;
registers[0xa188 / 4] = 0x0;
registers[0xa18c / 4] = 0x0;
registers[0xa190 / 4] = 0x0;
registers[0xa194 / 4] = 0x0;
registers[0xa198 / 4] = 0x0;
registers[0xa19c / 4] = 0x313371c3;
registers[0xa2fc / 4] = 0x3;
registers[0xa300 / 4] = 0x4;
registers[0x8000 / 4] = 0x31337a73;
registers[0x8180 / 4] = 0x66604200;
registers[0x8184 / 4] = 0xfeed0000;
registers[0xc000 / 4] = 0x3137af00;
registers[0xc180 / 4] = 0x66604200;
registers[NV406E_SEMAPHORE_OFFSET] = 0x10;
}
void rsx_state::decode(u32 reg, u32 value)
{
// Store new value and save previous
latch = std::exchange(registers[reg], value);
}
bool rsx_state::test(u32 reg, u32 value) const
{
return registers[reg] == value;
}
namespace method_detail
{
template <u32 Id, u32 Step, u32 Count, template<u32> class T, u32 Index = 0>
struct bind_range_impl_t
{
static inline void impl()
{
methods[Id] = &T<Index>::impl;
if constexpr (Count > 1)
{
bind_range_impl_t<Id + Step, Step, Count - 1, T, Index + 1>::impl();
}
}
};
template <u32 Id, u32 Step, u32 Count, template<u32> class T, u32 Index = 0>
static inline void bind_range()
{
static_assert(Step && Count && Id + u64{Step} * (Count - 1) < 0x10000 / 4);
bind_range_impl_t<Id, Step, Count, T, Index>::impl();
}
}
// TODO: implement this as virtual function: rsx::thread::init_methods() or something
// TODO: this is unused
static const bool s_methods_init = []() -> bool
{
using namespace method_detail;
methods.fill(&invalid_method);
auto bind = [](u32 id, rsx_method_t func)
{
::at32(methods, id) = func;
};
auto bind_array = [](u32 id, u32 step, u32 count, rsx_method_t func)
{
ensure(step && count && id + u64{step} * (count - 1) < 0x10000 / 4);
for (u32 i = id; i < id + count * step; i += step)
{
methods[i] = func;
}
};
// NV40_CHANNEL_DMA (NV406E)
methods[NV406E_SET_REFERENCE] = nullptr;
methods[NV406E_SET_CONTEXT_DMA_SEMAPHORE] = nullptr;
methods[NV406E_SEMAPHORE_OFFSET] = nullptr;
methods[NV406E_SEMAPHORE_ACQUIRE] = nullptr;
methods[NV406E_SEMAPHORE_RELEASE] = nullptr;
// NV40_CURIE_PRIMITIVE (NV4097)
methods[NV4097_SET_OBJECT] = nullptr;
methods[NV4097_NO_OPERATION] = nullptr;
methods[NV4097_NOTIFY] = nullptr;
methods[NV4097_WAIT_FOR_IDLE] = nullptr;
methods[NV4097_PM_TRIGGER] = nullptr;
methods[NV4097_SET_CONTEXT_DMA_NOTIFIES] = nullptr;
methods[NV4097_SET_CONTEXT_DMA_A] = nullptr;
methods[NV4097_SET_CONTEXT_DMA_B] = nullptr;
methods[NV4097_SET_CONTEXT_DMA_COLOR_B] = nullptr;
methods[NV4097_SET_CONTEXT_DMA_STATE] = nullptr;
methods[NV4097_SET_CONTEXT_DMA_COLOR_A] = nullptr;
methods[NV4097_SET_CONTEXT_DMA_ZETA] = nullptr;
methods[NV4097_SET_CONTEXT_DMA_VERTEX_A] = nullptr;
methods[NV4097_SET_CONTEXT_DMA_VERTEX_B] = nullptr;
methods[NV4097_SET_CONTEXT_DMA_SEMAPHORE] = nullptr;
methods[NV4097_SET_CONTEXT_DMA_REPORT] = nullptr;
methods[NV4097_SET_CONTEXT_DMA_CLIP_ID] = nullptr;
methods[NV4097_SET_CONTEXT_DMA_CULL_DATA] = nullptr;
methods[NV4097_SET_CONTEXT_DMA_COLOR_C] = nullptr;
methods[NV4097_SET_CONTEXT_DMA_COLOR_D] = nullptr;
methods[NV4097_SET_SURFACE_CLIP_HORIZONTAL] = nullptr;
methods[NV4097_SET_SURFACE_CLIP_VERTICAL] = nullptr;
methods[NV4097_SET_SURFACE_FORMAT] = nullptr;
methods[NV4097_SET_SURFACE_PITCH_A] = nullptr;
methods[NV4097_SET_SURFACE_COLOR_AOFFSET] = nullptr;
methods[NV4097_SET_SURFACE_ZETA_OFFSET] = nullptr;
methods[NV4097_SET_SURFACE_COLOR_BOFFSET] = nullptr;
methods[NV4097_SET_SURFACE_PITCH_B] = nullptr;
methods[NV4097_SET_SURFACE_COLOR_TARGET] = nullptr;
methods[0x224 >> 2] = nullptr;
methods[0x228 >> 2] = nullptr;
methods[0x230 >> 2] = nullptr;
methods[NV4097_SET_SURFACE_PITCH_Z] = nullptr;
methods[NV4097_INVALIDATE_ZCULL] = nullptr;
methods[NV4097_SET_CYLINDRICAL_WRAP] = nullptr;
methods[NV4097_SET_CYLINDRICAL_WRAP1] = nullptr;
methods[0x240 >> 2] = nullptr;
methods[0x244 >> 2] = nullptr;
methods[0x248 >> 2] = nullptr;
methods[0x24C >> 2] = nullptr;
methods[NV4097_SET_SURFACE_PITCH_C] = nullptr;
methods[NV4097_SET_SURFACE_PITCH_D] = nullptr;
methods[NV4097_SET_SURFACE_COLOR_COFFSET] = nullptr;
methods[NV4097_SET_SURFACE_COLOR_DOFFSET] = nullptr;
methods[NV4097_SET_WINDOW_OFFSET] = nullptr;
methods[NV4097_SET_WINDOW_CLIP_TYPE] = nullptr;
methods[NV4097_SET_WINDOW_CLIP_HORIZONTAL] = nullptr;
methods[NV4097_SET_WINDOW_CLIP_VERTICAL] = nullptr;
methods[0x2c8 >> 2] = nullptr;
methods[0x2cc >> 2] = nullptr;
methods[0x2d0 >> 2] = nullptr;
methods[0x2d4 >> 2] = nullptr;
methods[0x2d8 >> 2] = nullptr;
methods[0x2dc >> 2] = nullptr;
methods[0x2e0 >> 2] = nullptr;
methods[0x2e4 >> 2] = nullptr;
methods[0x2e8 >> 2] = nullptr;
methods[0x2ec >> 2] = nullptr;
methods[0x2f0 >> 2] = nullptr;
methods[0x2f4 >> 2] = nullptr;
methods[0x2f8 >> 2] = nullptr;
methods[0x2fc >> 2] = nullptr;
methods[NV4097_SET_DITHER_ENABLE] = nullptr;
methods[NV4097_SET_ALPHA_TEST_ENABLE] = nullptr;
methods[NV4097_SET_ALPHA_FUNC] = nullptr;
methods[NV4097_SET_ALPHA_REF] = nullptr;
methods[NV4097_SET_BLEND_ENABLE] = nullptr;
methods[NV4097_SET_BLEND_FUNC_SFACTOR] = nullptr;
methods[NV4097_SET_BLEND_FUNC_DFACTOR] = nullptr;
methods[NV4097_SET_BLEND_COLOR] = nullptr;
methods[NV4097_SET_BLEND_EQUATION] = nullptr;
methods[NV4097_SET_COLOR_MASK] = nullptr;
methods[NV4097_SET_STENCIL_TEST_ENABLE] = nullptr;
methods[NV4097_SET_STENCIL_MASK] = nullptr;
methods[NV4097_SET_STENCIL_FUNC] = nullptr;
methods[NV4097_SET_STENCIL_FUNC_REF] = nullptr;
methods[NV4097_SET_STENCIL_FUNC_MASK] = nullptr;
methods[NV4097_SET_STENCIL_OP_FAIL] = nullptr;
methods[NV4097_SET_STENCIL_OP_ZFAIL] = nullptr;
methods[NV4097_SET_STENCIL_OP_ZPASS] = nullptr;
methods[NV4097_SET_TWO_SIDED_STENCIL_TEST_ENABLE] = nullptr;
methods[NV4097_SET_BACK_STENCIL_MASK] = nullptr;
methods[NV4097_SET_BACK_STENCIL_FUNC] = nullptr;
methods[NV4097_SET_BACK_STENCIL_FUNC_REF] = nullptr;
methods[NV4097_SET_BACK_STENCIL_FUNC_MASK] = nullptr;
methods[NV4097_SET_BACK_STENCIL_OP_FAIL] = nullptr;
methods[NV4097_SET_BACK_STENCIL_OP_ZFAIL] = nullptr;
methods[NV4097_SET_BACK_STENCIL_OP_ZPASS] = nullptr;
methods[NV4097_SET_SHADE_MODE] = nullptr;
methods[NV4097_SET_BLEND_ENABLE_MRT] = nullptr;
methods[NV4097_SET_COLOR_MASK_MRT] = nullptr;
methods[NV4097_SET_LOGIC_OP_ENABLE] = nullptr;
methods[NV4097_SET_LOGIC_OP] = nullptr;
methods[NV4097_SET_BLEND_COLOR2] = nullptr;
methods[NV4097_SET_DEPTH_BOUNDS_TEST_ENABLE] = nullptr;
methods[NV4097_SET_DEPTH_BOUNDS_MIN] = nullptr;
methods[NV4097_SET_DEPTH_BOUNDS_MAX] = nullptr;
methods[NV4097_SET_CLIP_MIN] = nullptr;
methods[NV4097_SET_CLIP_MAX] = nullptr;
methods[NV4097_SET_CONTROL0] = nullptr;
methods[NV4097_SET_LINE_WIDTH] = nullptr;
methods[NV4097_SET_LINE_SMOOTH_ENABLE] = nullptr;
methods[NV4097_SET_ANISO_SPREAD] = nullptr;
methods[NV4097_SET_SCISSOR_HORIZONTAL] = nullptr;
methods[NV4097_SET_SCISSOR_VERTICAL] = nullptr;
methods[NV4097_SET_FOG_MODE] = nullptr;
methods[NV4097_SET_FOG_PARAMS] = nullptr;
methods[NV4097_SET_FOG_PARAMS + 1] = nullptr;
methods[0x8d8 >> 2] = nullptr;
methods[NV4097_SET_SHADER_PROGRAM] = nullptr;
methods[NV4097_SET_VERTEX_TEXTURE_OFFSET] = nullptr;
methods[NV4097_SET_VERTEX_TEXTURE_FORMAT] = nullptr;
methods[NV4097_SET_VERTEX_TEXTURE_ADDRESS] = nullptr;
methods[NV4097_SET_VERTEX_TEXTURE_CONTROL0] = nullptr;
methods[NV4097_SET_VERTEX_TEXTURE_CONTROL3] = nullptr;
methods[NV4097_SET_VERTEX_TEXTURE_FILTER] = nullptr;
methods[NV4097_SET_VERTEX_TEXTURE_IMAGE_RECT] = nullptr;
methods[NV4097_SET_VERTEX_TEXTURE_BORDER_COLOR] = nullptr;
methods[NV4097_SET_VIEWPORT_HORIZONTAL] = nullptr;
methods[NV4097_SET_VIEWPORT_VERTICAL] = nullptr;
methods[NV4097_SET_POINT_CENTER_MODE] = nullptr;
methods[NV4097_ZCULL_SYNC] = nullptr;
methods[NV4097_SET_VIEWPORT_OFFSET] = nullptr;
methods[NV4097_SET_VIEWPORT_OFFSET + 1] = nullptr;
methods[NV4097_SET_VIEWPORT_OFFSET + 2] = nullptr;
methods[NV4097_SET_VIEWPORT_OFFSET + 3] = nullptr;
methods[NV4097_SET_VIEWPORT_SCALE] = nullptr;
methods[NV4097_SET_VIEWPORT_SCALE + 1] = nullptr;
methods[NV4097_SET_VIEWPORT_SCALE + 2] = nullptr;
methods[NV4097_SET_VIEWPORT_SCALE + 3] = nullptr;
methods[NV4097_SET_POLY_OFFSET_POINT_ENABLE] = nullptr;
methods[NV4097_SET_POLY_OFFSET_LINE_ENABLE] = nullptr;
methods[NV4097_SET_POLY_OFFSET_FILL_ENABLE] = nullptr;
methods[NV4097_SET_DEPTH_FUNC] = nullptr;
methods[NV4097_SET_DEPTH_MASK] = nullptr;
methods[NV4097_SET_DEPTH_TEST_ENABLE] = nullptr;
methods[NV4097_SET_POLYGON_OFFSET_SCALE_FACTOR] = nullptr;
methods[NV4097_SET_POLYGON_OFFSET_BIAS] = nullptr;
methods[NV4097_SET_VERTEX_DATA_SCALED4S_M] = nullptr;
methods[NV4097_SET_TEXTURE_CONTROL2] = nullptr;
methods[NV4097_SET_TEX_COORD_CONTROL] = nullptr;
methods[NV4097_SET_TRANSFORM_PROGRAM] = nullptr;
methods[NV4097_SET_SPECULAR_ENABLE] = nullptr;
methods[NV4097_SET_TWO_SIDE_LIGHT_EN] = nullptr;
methods[NV4097_CLEAR_ZCULL_SURFACE] = nullptr;
methods[NV4097_SET_PERFORMANCE_PARAMS] = nullptr;
methods[NV4097_SET_FLAT_SHADE_OP] = nullptr;
methods[NV4097_SET_EDGE_FLAG] = nullptr;
methods[NV4097_SET_USER_CLIP_PLANE_CONTROL] = nullptr;
methods[NV4097_SET_POLYGON_STIPPLE] = nullptr;
methods[NV4097_SET_POLYGON_STIPPLE_PATTERN] = nullptr;
methods[NV4097_SET_VERTEX_DATA3F_M] = nullptr;
methods[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET] = nullptr;
methods[NV4097_INVALIDATE_VERTEX_CACHE_FILE] = nullptr;
methods[NV4097_INVALIDATE_VERTEX_FILE] = nullptr;
methods[NV4097_PIPE_NOP] = nullptr;
methods[NV4097_SET_VERTEX_DATA_BASE_OFFSET] = nullptr;
methods[NV4097_SET_VERTEX_DATA_BASE_INDEX] = nullptr;
methods[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT] = nullptr;
methods[NV4097_CLEAR_REPORT_VALUE] = nullptr;
methods[NV4097_SET_ZPASS_PIXEL_COUNT_ENABLE] = nullptr;
methods[NV4097_GET_REPORT] = nullptr;
methods[NV4097_SET_ZCULL_STATS_ENABLE] = nullptr;
methods[NV4097_SET_BEGIN_END] = nullptr;
methods[NV4097_ARRAY_ELEMENT16] = nullptr;
methods[NV4097_ARRAY_ELEMENT32] = nullptr;
methods[NV4097_DRAW_ARRAYS] = nullptr;
methods[NV4097_INLINE_ARRAY] = nullptr;
methods[NV4097_SET_INDEX_ARRAY_ADDRESS] = nullptr;
methods[NV4097_SET_INDEX_ARRAY_DMA] = nullptr;
methods[NV4097_DRAW_INDEX_ARRAY] = nullptr;
methods[NV4097_SET_FRONT_POLYGON_MODE] = nullptr;
methods[NV4097_SET_BACK_POLYGON_MODE] = nullptr;
methods[NV4097_SET_CULL_FACE] = nullptr;
methods[NV4097_SET_FRONT_FACE] = nullptr;
methods[NV4097_SET_POLY_SMOOTH_ENABLE] = nullptr;
methods[NV4097_SET_CULL_FACE_ENABLE] = nullptr;
methods[NV4097_SET_TEXTURE_CONTROL3] = nullptr;
methods[NV4097_SET_VERTEX_DATA2F_M] = nullptr;
methods[NV4097_SET_VERTEX_DATA2S_M] = nullptr;
methods[NV4097_SET_VERTEX_DATA4UB_M] = nullptr;
methods[NV4097_SET_VERTEX_DATA4S_M] = nullptr;
methods[NV4097_SET_TEXTURE_OFFSET] = nullptr;
methods[NV4097_SET_TEXTURE_FORMAT] = nullptr;
methods[NV4097_SET_TEXTURE_ADDRESS] = nullptr;
methods[NV4097_SET_TEXTURE_CONTROL0] = nullptr;
methods[NV4097_SET_TEXTURE_CONTROL1] = nullptr;
methods[NV4097_SET_TEXTURE_FILTER] = nullptr;
methods[NV4097_SET_TEXTURE_IMAGE_RECT] = nullptr;
methods[NV4097_SET_TEXTURE_BORDER_COLOR] = nullptr;
methods[NV4097_SET_VERTEX_DATA4F_M] = nullptr;
methods[NV4097_SET_COLOR_KEY_COLOR] = nullptr;
methods[0x1d04 >> 2] = nullptr;
methods[NV4097_SET_SHADER_CONTROL] = nullptr;
methods[NV4097_SET_INDEXED_CONSTANT_READ_LIMITS] = nullptr;
methods[NV4097_SET_SEMAPHORE_OFFSET] = nullptr;
methods[NV4097_BACK_END_WRITE_SEMAPHORE_RELEASE] = nullptr;
methods[NV4097_TEXTURE_READ_SEMAPHORE_RELEASE] = nullptr;
methods[NV4097_SET_ZMIN_MAX_CONTROL] = nullptr;
methods[NV4097_SET_ANTI_ALIASING_CONTROL] = nullptr;
methods[NV4097_SET_SURFACE_COMPRESSION] = nullptr;
methods[NV4097_SET_ZCULL_EN] = nullptr;
methods[NV4097_SET_SHADER_WINDOW] = nullptr;
methods[NV4097_SET_ZSTENCIL_CLEAR_VALUE] = nullptr;
methods[NV4097_SET_COLOR_CLEAR_VALUE] = nullptr;
methods[NV4097_CLEAR_SURFACE] = nullptr;
methods[NV4097_SET_CLEAR_RECT_HORIZONTAL] = nullptr;
methods[NV4097_SET_CLEAR_RECT_VERTICAL] = nullptr;
methods[NV4097_SET_CLIP_ID_TEST_ENABLE] = nullptr;
methods[NV4097_SET_RESTART_INDEX_ENABLE] = nullptr;
methods[NV4097_SET_RESTART_INDEX] = nullptr;
methods[NV4097_SET_LINE_STIPPLE] = nullptr;
methods[NV4097_SET_LINE_STIPPLE_PATTERN] = nullptr;
methods[NV4097_SET_VERTEX_DATA1F_M] = nullptr;
methods[NV4097_SET_TRANSFORM_EXECUTION_MODE] = nullptr;
methods[NV4097_SET_RENDER_ENABLE] = nullptr;
methods[NV4097_SET_TRANSFORM_PROGRAM_LOAD] = nullptr;
methods[NV4097_SET_TRANSFORM_PROGRAM_START] = nullptr;
methods[NV4097_SET_ZCULL_CONTROL0] = nullptr;
methods[NV4097_SET_ZCULL_CONTROL1] = nullptr;
methods[NV4097_SET_SCULL_CONTROL] = nullptr;
methods[NV4097_SET_POINT_SIZE] = nullptr;
methods[NV4097_SET_POINT_PARAMS_ENABLE] = nullptr;
methods[NV4097_SET_POINT_SPRITE_CONTROL] = nullptr;
methods[NV4097_SET_TRANSFORM_TIMEOUT] = nullptr;
methods[NV4097_SET_TRANSFORM_CONSTANT_LOAD] = nullptr;
methods[NV4097_SET_TRANSFORM_CONSTANT] = nullptr;
methods[NV4097_SET_FREQUENCY_DIVIDER_OPERATION] = nullptr;
methods[NV4097_SET_ATTRIB_COLOR] = nullptr;
methods[NV4097_SET_ATTRIB_TEX_COORD] = nullptr;
methods[NV4097_SET_ATTRIB_TEX_COORD_EX] = nullptr;
methods[NV4097_SET_ATTRIB_UCLIP0] = nullptr;
methods[NV4097_SET_ATTRIB_UCLIP1] = nullptr;
methods[NV4097_INVALIDATE_L2] = nullptr;
methods[NV4097_SET_REDUCE_DST_COLOR] = nullptr;
methods[NV4097_SET_NO_PARANOID_TEXTURE_FETCHES] = nullptr;
methods[NV4097_SET_SHADER_PACKER] = nullptr;
methods[NV4097_SET_VERTEX_ATTRIB_INPUT_MASK] = nullptr;
methods[NV4097_SET_VERTEX_ATTRIB_OUTPUT_MASK] = nullptr;
methods[NV4097_SET_TRANSFORM_BRANCH_BITS] = nullptr;
// NV03_MEMORY_TO_MEMORY_FORMAT (NV0039)
methods[NV0039_SET_OBJECT] = nullptr;
bind(0x2100 >> 2, trace_method);
methods[NV0039_SET_CONTEXT_DMA_NOTIFIES] = nullptr;
methods[NV0039_SET_CONTEXT_DMA_BUFFER_IN] = nullptr;
methods[NV0039_SET_CONTEXT_DMA_BUFFER_OUT] = nullptr;
methods[NV0039_OFFSET_IN] = nullptr;
methods[NV0039_OFFSET_OUT] = nullptr;
methods[NV0039_PITCH_IN] = nullptr;
methods[NV0039_PITCH_OUT] = nullptr;
methods[NV0039_LINE_LENGTH_IN] = nullptr;
methods[NV0039_LINE_COUNT] = nullptr;
methods[NV0039_FORMAT] = nullptr;
methods[NV0039_BUFFER_NOTIFY] = nullptr;
// NV30_CONTEXT_SURFACES_2D (NV3062)
methods[NV3062_SET_OBJECT] = nullptr;
methods[NV3062_SET_CONTEXT_DMA_NOTIFIES] = nullptr;
methods[NV3062_SET_CONTEXT_DMA_IMAGE_SOURCE] = nullptr;
methods[NV3062_SET_CONTEXT_DMA_IMAGE_DESTIN] = nullptr;
methods[NV3062_SET_COLOR_FORMAT] = nullptr;
methods[NV3062_SET_PITCH] = nullptr;
methods[NV3062_SET_OFFSET_SOURCE] = nullptr;
methods[NV3062_SET_OFFSET_DESTIN] = nullptr;
// NV30_CONTEXT_SURFACE_SWIZZLED (NV309E)
methods[NV309E_SET_OBJECT] = nullptr;
methods[NV309E_SET_CONTEXT_DMA_NOTIFIES] = nullptr;
methods[NV309E_SET_CONTEXT_DMA_IMAGE] = nullptr;
methods[NV309E_SET_FORMAT] = nullptr;
methods[NV309E_SET_OFFSET] = nullptr;
// NV30_IMAGE_FROM_CPU (NV308A)
methods[NV308A_SET_OBJECT] = nullptr;
methods[NV308A_SET_CONTEXT_DMA_NOTIFIES] = nullptr;
methods[NV308A_SET_CONTEXT_COLOR_KEY] = nullptr;
methods[NV308A_SET_CONTEXT_CLIP_RECTANGLE] = nullptr;
methods[NV308A_SET_CONTEXT_PATTERN] = nullptr;
methods[NV308A_SET_CONTEXT_ROP] = nullptr;
methods[NV308A_SET_CONTEXT_BETA1] = nullptr;
methods[NV308A_SET_CONTEXT_BETA4] = nullptr;
methods[NV308A_SET_CONTEXT_SURFACE] = nullptr;
methods[NV308A_SET_COLOR_CONVERSION] = nullptr;
methods[NV308A_SET_OPERATION] = nullptr;
methods[NV308A_SET_COLOR_FORMAT] = nullptr;
methods[NV308A_POINT] = nullptr;
methods[NV308A_SIZE_OUT] = nullptr;
methods[NV308A_SIZE_IN] = nullptr;
methods[NV308A_COLOR] = nullptr;
// NV30_SCALED_IMAGE_FROM_MEMORY (NV3089)
methods[NV3089_SET_OBJECT] = nullptr;
methods[NV3089_SET_CONTEXT_DMA_NOTIFIES] = nullptr;
methods[NV3089_SET_CONTEXT_DMA_IMAGE] = nullptr;
methods[NV3089_SET_CONTEXT_PATTERN] = nullptr;
methods[NV3089_SET_CONTEXT_ROP] = nullptr;
methods[NV3089_SET_CONTEXT_BETA1] = nullptr;
methods[NV3089_SET_CONTEXT_BETA4] = nullptr;
methods[NV3089_SET_CONTEXT_SURFACE] = nullptr;
methods[NV3089_SET_COLOR_CONVERSION] = nullptr;
methods[NV3089_SET_COLOR_FORMAT] = nullptr;
methods[NV3089_SET_OPERATION] = nullptr;
methods[NV3089_CLIP_POINT] = nullptr;
methods[NV3089_CLIP_SIZE] = nullptr;
methods[NV3089_IMAGE_OUT_POINT] = nullptr;
methods[NV3089_IMAGE_OUT_SIZE] = nullptr;
methods[NV3089_DS_DX] = nullptr;
methods[NV3089_DT_DY] = nullptr;
methods[NV3089_IMAGE_IN_SIZE] = nullptr;
methods[NV3089_IMAGE_IN_FORMAT] = nullptr;
methods[NV3089_IMAGE_IN_OFFSET] = nullptr;
methods[NV3089_IMAGE_IN] = nullptr;
//Some custom GCM methods
methods[GCM_SET_DRIVER_OBJECT] = nullptr;
methods[FIFO::FIFO_DRAW_BARRIER >> 2] = nullptr;
bind_array(GCM_FLIP_HEAD, 1, 2, nullptr);
bind_array(GCM_DRIVER_QUEUE, 1, 8, nullptr);
bind_array(0x400 >> 2, 1, 0x10, nullptr);
bind_array(0x440 >> 2, 1, 0x20, nullptr);
bind_array(NV4097_SET_ANISO_SPREAD, 1, 16, nullptr);
bind_array(NV4097_SET_VERTEX_TEXTURE_OFFSET, 1, 8 * 4, nullptr);
bind_array(NV4097_SET_VERTEX_DATA_SCALED4S_M, 1, 32, nullptr);
bind_array(NV4097_SET_TEXTURE_CONTROL2, 1, 16, nullptr);
bind_array(NV4097_SET_TEX_COORD_CONTROL, 1, 10, nullptr);
bind_array(NV4097_SET_TRANSFORM_PROGRAM, 1, 32, nullptr);
bind_array(NV4097_SET_POLYGON_STIPPLE_PATTERN, 1, 32, nullptr);
bind_array(NV4097_SET_VERTEX_DATA3F_M, 1, 64, nullptr);
bind_array(NV4097_SET_VERTEX_DATA_ARRAY_OFFSET, 1, 16, nullptr);
bind_array(NV4097_SET_VERTEX_DATA_ARRAY_FORMAT, 1, 16, nullptr);
bind_array(NV4097_SET_TEXTURE_CONTROL3, 1, 16, nullptr);
bind_array(NV4097_SET_VERTEX_DATA2F_M, 1, 32, nullptr);
bind_array(NV4097_SET_VERTEX_DATA2S_M, 1, 16, nullptr);
bind_array(NV4097_SET_VERTEX_DATA4UB_M, 1, 16, nullptr);
bind_array(NV4097_SET_VERTEX_DATA4S_M, 1, 32, nullptr);
bind_array(NV4097_SET_TEXTURE_OFFSET, 1, 8 * 16, nullptr);
bind_array(NV4097_SET_VERTEX_DATA4F_M, 1, 64, nullptr);
bind_array(NV4097_SET_VERTEX_DATA1F_M, 1, 16, nullptr);
bind_array(NV4097_SET_COLOR_KEY_COLOR, 1, 16, nullptr);
// Unknown (NV4097?)
bind(0x171c >> 2, trace_method);
// NV406E
bind(NV406E_SET_REFERENCE, nv406e::set_reference);
bind(NV406E_SEMAPHORE_ACQUIRE, nv406e::semaphore_acquire);
bind(NV406E_SEMAPHORE_RELEASE, nv406e::semaphore_release);
// NV4097
bind(NV4097_SET_CULL_FACE, nv4097::set_face_property);
bind(NV4097_SET_FRONT_FACE, nv4097::set_face_property);
bind(NV4097_TEXTURE_READ_SEMAPHORE_RELEASE, nv4097::texture_read_semaphore_release);
bind(NV4097_BACK_END_WRITE_SEMAPHORE_RELEASE, nv4097::back_end_write_semaphore_release);
bind(NV4097_SET_BEGIN_END, nv4097::set_begin_end);
bind(NV4097_CLEAR_SURFACE, nv4097::clear);
bind(NV4097_DRAW_ARRAYS, nv4097::draw_arrays);
bind(NV4097_DRAW_INDEX_ARRAY, nv4097::draw_index_array);
bind(NV4097_INLINE_ARRAY, nv4097::draw_inline_array);
bind(NV4097_ARRAY_ELEMENT16, nv4097::set_array_element16);
bind(NV4097_ARRAY_ELEMENT32, nv4097::set_array_element32);
bind_range<NV4097_SET_VERTEX_DATA_SCALED4S_M, 1, 32, nv4097::set_vertex_data_scaled4s_m>();
bind_range<NV4097_SET_VERTEX_DATA4UB_M, 1, 16, nv4097::set_vertex_data4ub_m>();
bind_range<NV4097_SET_VERTEX_DATA1F_M, 1, 16, nv4097::set_vertex_data1f_m>();
bind_range<NV4097_SET_VERTEX_DATA2F_M, 1, 32, nv4097::set_vertex_data2f_m>();
bind_range<NV4097_SET_VERTEX_DATA3F_M, 1, 64, nv4097::set_vertex_data3f_m>();
bind_range<NV4097_SET_VERTEX_DATA4F_M, 1, 64, nv4097::set_vertex_data4f_m>();
bind_range<NV4097_SET_VERTEX_DATA2S_M, 1, 16, nv4097::set_vertex_data2s_m>();
bind_range<NV4097_SET_VERTEX_DATA4S_M, 1, 32, nv4097::set_vertex_data4s_m>();
bind(NV4097_SET_TRANSFORM_CONSTANT_LOAD, nv4097::set_transform_constant_load);
bind_array(NV4097_SET_TRANSFORM_CONSTANT, 1, 32, nv4097::set_transform_constant::impl);
bind_array(NV4097_SET_TRANSFORM_PROGRAM, 1, 32, nv4097::set_transform_program::impl);
bind(NV4097_GET_REPORT, nv4097::get_report);
bind(NV4097_CLEAR_REPORT_VALUE, nv4097::clear_report_value);
bind(NV4097_SET_SURFACE_CLIP_HORIZONTAL, nv4097::set_surface_dirty_bit);
bind(NV4097_SET_SURFACE_CLIP_VERTICAL, nv4097::set_surface_dirty_bit);
bind(NV4097_SET_SURFACE_COLOR_AOFFSET, nv4097::set_surface_dirty_bit);
bind(NV4097_SET_SURFACE_COLOR_BOFFSET, nv4097::set_surface_dirty_bit);
bind(NV4097_SET_SURFACE_COLOR_COFFSET, nv4097::set_surface_dirty_bit);
bind(NV4097_SET_SURFACE_COLOR_DOFFSET, nv4097::set_surface_dirty_bit);
bind(NV4097_SET_SURFACE_COLOR_TARGET, nv4097::set_surface_dirty_bit);
bind(NV4097_SET_SURFACE_ZETA_OFFSET, nv4097::set_surface_dirty_bit);
bind(NV4097_SET_CONTEXT_DMA_COLOR_A, nv4097::set_surface_dirty_bit);
bind(NV4097_SET_CONTEXT_DMA_COLOR_B, nv4097::set_surface_dirty_bit);
bind(NV4097_SET_CONTEXT_DMA_COLOR_C, nv4097::set_surface_dirty_bit);
bind(NV4097_SET_CONTEXT_DMA_COLOR_D, nv4097::set_surface_dirty_bit);
bind(NV4097_SET_CONTEXT_DMA_ZETA, nv4097::set_surface_dirty_bit);
bind(NV4097_NOTIFY, nv4097::set_notify);
bind(NV4097_SET_SURFACE_FORMAT, nv4097::set_surface_format);
bind(NV4097_SET_SURFACE_PITCH_A, nv4097::set_surface_dirty_bit);
bind(NV4097_SET_SURFACE_PITCH_B, nv4097::set_surface_dirty_bit);
bind(NV4097_SET_SURFACE_PITCH_C, nv4097::set_surface_dirty_bit);
bind(NV4097_SET_SURFACE_PITCH_D, nv4097::set_surface_dirty_bit);
bind(NV4097_SET_SURFACE_PITCH_Z, nv4097::set_surface_dirty_bit);
bind(NV4097_SET_WINDOW_OFFSET, nv4097::set_surface_dirty_bit);
bind_range<NV4097_SET_TEXTURE_OFFSET, 8, 16, nv4097::set_texture_dirty_bit>();
bind_range<NV4097_SET_TEXTURE_FORMAT, 8, 16, nv4097::set_texture_dirty_bit>();
bind_range<NV4097_SET_TEXTURE_ADDRESS, 8, 16, nv4097::set_texture_dirty_bit>();
bind_range<NV4097_SET_TEXTURE_CONTROL0, 8, 16, nv4097::set_texture_dirty_bit>();
bind_range<NV4097_SET_TEXTURE_CONTROL1, 8, 16, nv4097::set_texture_dirty_bit>();
bind_range<NV4097_SET_TEXTURE_CONTROL2, 1, 16, nv4097::set_texture_dirty_bit>();
bind_range<NV4097_SET_TEXTURE_CONTROL3, 1, 16, nv4097::set_texture_dirty_bit>();
bind_range<NV4097_SET_TEXTURE_FILTER, 8, 16, nv4097::set_texture_dirty_bit>();
bind_range<NV4097_SET_TEXTURE_IMAGE_RECT, 8, 16, nv4097::set_texture_dirty_bit>();
bind_range<NV4097_SET_TEXTURE_BORDER_COLOR, 8, 16, nv4097::set_texture_dirty_bit>();
bind_range<NV4097_SET_VERTEX_TEXTURE_OFFSET, 8, 4, nv4097::set_vertex_texture_dirty_bit>();
bind_range<NV4097_SET_VERTEX_TEXTURE_FORMAT, 8, 4, nv4097::set_vertex_texture_dirty_bit>();
bind_range<NV4097_SET_VERTEX_TEXTURE_ADDRESS, 8, 4, nv4097::set_vertex_texture_dirty_bit>();
bind_range<NV4097_SET_VERTEX_TEXTURE_CONTROL0, 8, 4, nv4097::set_vertex_texture_dirty_bit>();
bind_range<NV4097_SET_VERTEX_TEXTURE_CONTROL3, 8, 4, nv4097::set_vertex_texture_dirty_bit>();
bind_range<NV4097_SET_VERTEX_TEXTURE_FILTER, 8, 4, nv4097::set_vertex_texture_dirty_bit>();
bind_range<NV4097_SET_VERTEX_TEXTURE_IMAGE_RECT, 8, 4, nv4097::set_vertex_texture_dirty_bit>();
bind_range<NV4097_SET_VERTEX_TEXTURE_BORDER_COLOR, 8, 4, nv4097::set_vertex_texture_dirty_bit>();
bind(NV4097_SET_RENDER_ENABLE, nv4097::set_render_mode);
bind(NV4097_SET_ZCULL_EN, nv4097::set_zcull_render_enable);
bind(NV4097_SET_ZCULL_STATS_ENABLE, nv4097::set_zcull_stats_enable);
bind(NV4097_SET_ZPASS_PIXEL_COUNT_ENABLE, nv4097::set_zcull_pixel_count_enable);
bind(NV4097_CLEAR_ZCULL_SURFACE, nv4097::clear_zcull);
bind(NV4097_SET_DEPTH_TEST_ENABLE, nv4097::set_surface_options_dirty_bit);
bind(NV4097_SET_DEPTH_FUNC, nv4097::set_surface_options_dirty_bit);
bind(NV4097_SET_DEPTH_MASK, nv4097::set_surface_options_dirty_bit);
bind(NV4097_SET_COLOR_MASK, nv4097::set_color_mask);
bind(NV4097_SET_COLOR_MASK_MRT, nv4097::set_surface_options_dirty_bit);
bind(NV4097_SET_TWO_SIDED_STENCIL_TEST_ENABLE, nv4097::set_surface_options_dirty_bit);
bind(NV4097_SET_STENCIL_TEST_ENABLE, nv4097::set_surface_options_dirty_bit);
bind(NV4097_SET_STENCIL_MASK, nv4097::set_surface_options_dirty_bit);
bind(NV4097_SET_STENCIL_OP_ZPASS, nv4097::set_stencil_op);
bind(NV4097_SET_STENCIL_OP_FAIL, nv4097::set_stencil_op);
bind(NV4097_SET_STENCIL_OP_ZFAIL, nv4097::set_stencil_op);
bind(NV4097_SET_BACK_STENCIL_MASK, nv4097::set_surface_options_dirty_bit);
bind(NV4097_SET_BACK_STENCIL_OP_ZPASS, nv4097::set_stencil_op);
bind(NV4097_SET_BACK_STENCIL_OP_FAIL, nv4097::set_stencil_op);
bind(NV4097_SET_BACK_STENCIL_OP_ZFAIL, nv4097::set_stencil_op);
bind(NV4097_WAIT_FOR_IDLE, nv4097::sync);
bind(NV4097_INVALIDATE_L2, nv4097::set_shader_program_dirty);
bind(NV4097_SET_SHADER_PROGRAM, nv4097::set_shader_program_dirty);
bind(NV4097_SET_TRANSFORM_PROGRAM_START, nv4097::set_transform_program_start);
bind(NV4097_SET_VERTEX_ATTRIB_OUTPUT_MASK, nv4097::set_vertex_attribute_output_mask);
bind(NV4097_SET_VERTEX_DATA_BASE_OFFSET, nv4097::set_vertex_base_offset);
bind(NV4097_SET_VERTEX_DATA_BASE_INDEX, nv4097::set_index_base_offset);
bind_range<NV4097_SET_VERTEX_DATA_ARRAY_OFFSET, 1, 16, nv4097::set_vertex_array_offset>();
bind(NV4097_SET_INDEX_ARRAY_DMA, nv4097::check_index_array_dma);
bind(NV4097_SET_BLEND_EQUATION, nv4097::set_blend_equation);
bind(NV4097_SET_BLEND_FUNC_SFACTOR, nv4097::set_blend_factor);
bind(NV4097_SET_BLEND_FUNC_DFACTOR, nv4097::set_blend_factor);
//NV308A (0xa400..0xbffc!)
bind_array(NV308A_COLOR, 1, 256 * 7, nv308a::color::impl);
//NV3089
bind(NV3089_IMAGE_IN, nv3089::image_in);
//NV0039
bind(NV0039_BUFFER_NOTIFY, nv0039::buffer_notify);
// lv1 hypervisor
bind_array(GCM_SET_USER_COMMAND, 1, 2, user_command);
bind_range<GCM_FLIP_HEAD, 1, 2, gcm::driver_flip>();
bind_range<GCM_DRIVER_QUEUE, 1, 8, gcm::queue_flip>();
// custom methods
bind(GCM_FLIP_COMMAND, flip_command);
// FIFO
bind(FIFO::FIFO_DRAW_BARRIER >> 2, fifo::draw_barrier);
// REGS(ctx)->init();
method_registers.init();
return true;
}();
}
| 89,507
|
C++
|
.cpp
| 1,679
| 49.887433
| 216
| 0.693341
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,388
|
RSXOffload.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/RSXOffload.cpp
|
#include "stdafx.h"
#include "Emu/Memory/vm.h"
#include "Common/BufferUtils.h"
#include "Core/RSXReservationLock.hpp"
#include "RSXOffload.h"
#include "RSXThread.h"
#include <thread>
#include "util/asm.hpp"
namespace rsx
{
struct dma_manager::offload_thread
{
lf_queue<transport_packet> m_work_queue;
atomic_t<u64> m_enqueued_count = 0;
atomic_t<u64> m_processed_count = 0;
transport_packet* m_current_job = nullptr;
thread_base* current_thread_ = nullptr;
void operator ()()
{
if (!g_cfg.video.multithreaded_rsx)
{
// Abort if disabled
return;
}
current_thread_ = thread_ctrl::get_current();
ensure(current_thread_);
if (g_cfg.core.thread_scheduler != thread_scheduler_mode::os)
{
thread_ctrl::set_thread_affinity_mask(thread_ctrl::get_affinity_mask(thread_class::rsx));
}
while (thread_ctrl::state() != thread_state::aborting)
{
for (auto&& job : m_work_queue.pop_all())
{
m_current_job = &job;
switch (job.type)
{
case raw_copy:
{
const u32 vm_addr = vm::try_get_addr(job.src).first;
rsx::reservation_lock<true, 1> rsx_lock(vm_addr, job.length, g_cfg.video.strict_rendering_mode && vm_addr);
std::memcpy(job.dst, job.src, job.length);
break;
}
case vector_copy:
{
std::memcpy(job.dst, job.opt_storage.data(), job.length);
break;
}
case index_emulate:
{
write_index_array_for_non_indexed_non_native_primitive_to_buffer(static_cast<char*>(job.dst), static_cast<rsx::primitive_type>(job.aux_param0), job.length);
break;
}
case callback:
{
rsx::get_current_renderer()->renderctl(job.aux_param0, job.src);
break;
}
default: fmt::throw_exception("Unreachable");
}
m_processed_count.release(m_processed_count + 1);
}
m_current_job = nullptr;
if (m_enqueued_count.load() == m_processed_count.load())
{
m_processed_count.notify_all();
std::this_thread::yield();
}
}
m_processed_count = -1;
m_processed_count.notify_all();
}
static constexpr auto thread_name = "RSX Offloader"sv;
};
// initialization
void dma_manager::init()
{
m_thread = std::make_shared<named_thread<offload_thread>>();
}
// General transport
void dma_manager::copy(void *dst, std::vector<u8>& src, u32 length) const
{
if (length <= max_immediate_transfer_size || !g_cfg.video.multithreaded_rsx)
{
std::memcpy(dst, src.data(), length);
}
else
{
m_thread->m_enqueued_count++;
m_thread->m_work_queue.push(dst, src, length);
}
}
void dma_manager::copy(void *dst, void *src, u32 length) const
{
if (length <= max_immediate_transfer_size || !g_cfg.video.multithreaded_rsx)
{
const u32 vm_addr = vm::try_get_addr(src).first;
rsx::reservation_lock<true, 1> rsx_lock(vm_addr, length, g_cfg.video.strict_rendering_mode && vm_addr);
std::memcpy(dst, src, length);
}
else
{
m_thread->m_enqueued_count++;
m_thread->m_work_queue.push(dst, src, length);
}
}
// Vertex utilities
void dma_manager::emulate_as_indexed(void *dst, rsx::primitive_type primitive, u32 count)
{
if (!g_cfg.video.multithreaded_rsx)
{
write_index_array_for_non_indexed_non_native_primitive_to_buffer(
static_cast<char*>(dst), primitive, count);
}
else
{
m_thread->m_enqueued_count++;
m_thread->m_work_queue.push(dst, primitive, count);
}
}
// Backend callback
void dma_manager::backend_ctrl(u32 request_code, void* args)
{
ensure(g_cfg.video.multithreaded_rsx);
m_thread->m_enqueued_count++;
m_thread->m_work_queue.push(request_code, args);
}
// Synchronization
bool dma_manager::is_current_thread() const
{
if (auto cpu = thread_ctrl::get_current())
{
return m_thread->current_thread_ == cpu;
}
return false;
}
bool dma_manager::sync() const
{
auto& _thr = *m_thread;
if (_thr.m_enqueued_count.load() <= _thr.m_processed_count.load()) [[likely]]
{
// Nothing to do
return true;
}
if (auto rsxthr = get_current_renderer(); rsxthr->is_current_thread())
{
if (m_mem_fault_flag)
{
// Abort if offloader is in recovery mode
return false;
}
while (_thr.m_enqueued_count.load() > _thr.m_processed_count.load())
{
rsxthr->on_semaphore_acquire_wait();
utils::pause();
}
}
else
{
while (_thr.m_enqueued_count.load() > _thr.m_processed_count.load())
utils::pause();
}
return true;
}
void dma_manager::join()
{
sync();
*m_thread = thread_state::aborting;
}
void dma_manager::set_mem_fault_flag()
{
ensure(is_current_thread()); // "Access denied"
m_mem_fault_flag.release(true);
}
void dma_manager::clear_mem_fault_flag()
{
ensure(is_current_thread()); // "Access denied"
m_mem_fault_flag.release(false);
}
// Fault recovery
utils::address_range dma_manager::get_fault_range(bool writing) const
{
const auto m_current_job = ensure(m_thread->m_current_job);
void *address = nullptr;
u32 range = m_current_job->length;
switch (m_current_job->type)
{
case raw_copy:
address = (writing) ? m_current_job->dst : m_current_job->src;
break;
case vector_copy:
ensure(writing);
address = m_current_job->dst;
break;
case index_emulate:
ensure(writing);
address = m_current_job->dst;
range = get_index_count(static_cast<rsx::primitive_type>(m_current_job->aux_param0), m_current_job->length);
break;
default:
fmt::throw_exception("Unreachable");
}
return utils::address_range::start_length(vm::get_addr(address), range);
}
}
| 5,580
|
C++
|
.cpp
| 206
| 23.378641
| 162
| 0.667416
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,389
|
RSXThread.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/RSXThread.cpp
|
#include "stdafx.h"
#include "RSXThread.h"
#include "Capture/rsx_capture.h"
#include "Common/BufferUtils.h"
#include "Common/buffer_stream.hpp"
#include "Common/texture_cache.h"
#include "Common/surface_store.h"
#include "Common/time.hpp"
#include "Core/RSXReservationLock.hpp"
#include "Core/RSXEngLock.hpp"
#include "Host/RSXDMAWriter.h"
#include "NV47/HW/context.h"
#include "Program/GLSLCommon.h"
#include "rsx_methods.h"
#include "gcm_printing.h"
#include "RSXDisAsm.h"
#include "Emu/Cell/PPUCallback.h"
#include "Emu/Cell/SPUThread.h"
#include "Emu/Cell/timers.hpp"
#include "Emu/Cell/lv2/sys_event.h"
#include "Emu/Cell/lv2/sys_time.h"
#include "Emu/Cell/Modules/cellGcmSys.h"
#include "util/serialization_ext.hpp"
#include "Overlays/overlay_perf_metrics.h"
#include "Overlays/overlay_debug_overlay.h"
#include "Overlays/overlay_message.h"
#include "Utilities/date_time.h"
#include "Utilities/StrUtil.h"
#include "Crypto/unzip.h"
#include "util/asm.hpp"
#include <span>
#include <sstream>
#include <thread>
#include <unordered_set>
#include <cfenv>
class GSRender;
#define CMD_DEBUG 0
atomic_t<bool> g_user_asked_for_recording = false;
atomic_t<bool> g_user_asked_for_screenshot = false;
atomic_t<bool> g_user_asked_for_frame_capture = false;
atomic_t<bool> g_disable_frame_limit = false;
rsx::frame_trace_data frame_debug;
rsx::frame_capture_data frame_capture;
extern CellGcmOffsetTable offsetTable;
extern thread_local std::string(*g_tls_log_prefix)();
extern atomic_t<u32> g_lv2_preempts_taken;
LOG_CHANNEL(perf_log, "PERF");
template <>
bool serialize<rsx::rsx_state>(utils::serial& ar, rsx::rsx_state& o)
{
ar(o.transform_program);
// Work around for old RSX captures.
// RSX capture and savestates both call this method.
// We do not want to grab transform constants if it is not savestate capture.
const bool is_savestate_capture = thread_ctrl::get_current() && thread_ctrl::get_name() == "Emu State Capture Thread";
if (GET_SERIALIZATION_VERSION(global_version) || is_savestate_capture)
{
ar(o.transform_constants);
}
return ar(o.registers);
}
template <>
bool serialize<rsx::frame_capture_data>(utils::serial& ar, rsx::frame_capture_data& o)
{
ar(o.magic, o.version, o.LE_format);
if (o.magic != rsx::c_fc_magic || o.version != rsx::c_fc_version || o.LE_format != u32{std::endian::little == std::endian::native})
{
return false;
}
return ar(o.tile_map, o.memory_map, o.memory_data_map, o.display_buffers_map, o.replay_commands, o.reg_state);
}
template <>
bool serialize<rsx::frame_capture_data::memory_block_data>(utils::serial& ar, rsx::frame_capture_data::memory_block_data& o)
{
return ar(o.data);
}
template <>
bool serialize<rsx::frame_capture_data::replay_command>(utils::serial& ar, rsx::frame_capture_data::replay_command& o)
{
return ar(o.rsx_command, o.memory_state, o.tile_state, o.display_buffer_state);
}
template <>
bool serialize<rsx::rsx_iomap_table>(utils::serial& ar, rsx::rsx_iomap_table& o)
{
// We do not need more than that
ar(std::span(o.ea.data(), 512));
if (!ar.is_writing())
{
// Populate o.io
for (const atomic_t<u32>& ea_addr : o.ea)
{
const u32& addr = ea_addr.raw();
if (addr != umax)
{
o.io[addr >> 20].raw() = static_cast<u32>(&ea_addr - o.ea.data()) << 20;
}
}
}
return true;
}
namespace rsx
{
std::function<bool(u32 addr, bool is_writing)> g_access_violation_handler;
// TODO: Proper context manager
static rsx::context s_ctx{ .rsxthr = nullptr, .register_state = &method_registers };
rsx_iomap_table::rsx_iomap_table() noexcept
: ea(fill_array(-1))
, io(fill_array(-1))
{
}
u32 get_address(u32 offset, u32 location, u32 size_to_check, std::source_location src_loc)
{
const auto render = get_current_renderer();
std::string_view msg;
switch (location)
{
case CELL_GCM_CONTEXT_DMA_MEMORY_FRAME_BUFFER:
case CELL_GCM_LOCATION_LOCAL:
{
if (offset < render->local_mem_size && render->local_mem_size - offset >= size_to_check)
{
return rsx::constants::local_mem_base + offset;
}
msg = "Local RSX offset out of range!"sv;
break;
}
case CELL_GCM_CONTEXT_DMA_MEMORY_HOST_BUFFER:
case CELL_GCM_LOCATION_MAIN:
{
if (const u32 ea = render->iomap_table.get_addr(offset); ea + 1)
{
if (!size_to_check || vm::check_addr(ea, 0, size_to_check))
{
return ea;
}
}
msg = "RSXIO memory not mapped!"sv;
break;
}
case CELL_GCM_CONTEXT_DMA_REPORT_LOCATION_LOCAL:
{
if (offset < sizeof(RsxReports::report) /*&& (offset % 0x10) == 0*/)
{
return render->label_addr + ::offset32(&RsxReports::report) + offset;
}
msg = "Local RSX REPORT offset out of range!"sv;
break;
}
case CELL_GCM_CONTEXT_DMA_REPORT_LOCATION_MAIN:
{
if (const u32 ea = offset < 0x1000000 ? render->iomap_table.get_addr(0x0e000000 + offset) : -1; ea + 1)
{
if (!size_to_check || vm::check_addr(ea, 0, size_to_check))
{
return ea;
}
}
msg = "RSXIO REPORT memory not mapped!"sv;
break;
}
// They are handled elsewhere for targeted methods, so it's unexpected for them to be passed here
case CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFY0:
case CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFY1:
case CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFY2:
case CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFY3:
case CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFY4:
case CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFY5:
case CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFY6:
case CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFY7:
msg = "CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFYx"sv; break;
case CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_0:
case CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_1:
case CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_2:
case CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_3:
case CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_4:
case CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_5:
case CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_6:
case CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_7:
msg = "CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_x"sv; break;
case CELL_GCM_CONTEXT_DMA_SEMAPHORE_RW:
case CELL_GCM_CONTEXT_DMA_SEMAPHORE_R:
{
if (offset < sizeof(RsxReports::semaphore) /*&& (offset % 0x10) == 0*/)
{
return render->label_addr + offset;
}
msg = "DMA SEMAPHORE offset out of range!"sv;
break;
}
case CELL_GCM_CONTEXT_DMA_DEVICE_RW:
case CELL_GCM_CONTEXT_DMA_DEVICE_R:
{
if (offset < 0x100000 /*&& (offset % 0x10) == 0*/)
{
return render->device_addr + offset;
}
// TODO: What happens here? It could wrap around or access other segments of rsx internal memory etc
// Or can simply throw access violation error
msg = "DMA DEVICE offset out of range!"sv;
break;
}
default:
{
msg = "Invalid location!"sv;
break;
}
}
if (size_to_check)
{
// Allow failure if specified size
// This is to allow accurate recovery for failures
rsx_log.warning("rsx::get_address(offset=0x%x, location=0x%x, size=0x%x): %s%s", offset, location, size_to_check, msg, src_loc);
return 0;
}
fmt::throw_exception("rsx::get_address(offset=0x%x, location=0x%x): %s%s", offset, location, msg, src_loc);
}
extern void set_rsx_yield_flag() noexcept
{
if (auto rsx = get_current_renderer())
{
if (g_cfg.core.allow_rsx_cpu_preempt)
{
rsx->state += cpu_flag::yield;
}
}
}
extern void set_native_ui_flip()
{
if (auto rsxthr = rsx::get_current_renderer())
{
rsxthr->async_flip_requested |= rsx::thread::flip_request::native_ui;
}
}
std::pair<u32, u32> interleaved_range_info::calculate_required_range(u32 first, u32 count)
{
if (vertex_range.second)
{
// Cached result
return vertex_range;
}
if (single_vertex)
{
return { 0, 1 };
}
const u32 max_index = (first + count) - 1;
u32 _max_index = 0;
u32 _min_index = first;
u32 frequencies[rsx::limits::vertex_count];
u32 freq_count = rsx::method_registers.current_draw_clause.command == rsx::draw_command::indexed ? 0 : u32{umax};
u32 max_result_by_division = 0; // Guaranteed maximum
for (const auto &attrib : locations)
{
if (attrib.frequency <= 1) [[likely]]
{
freq_count = umax;
_max_index = max_index;
}
else
{
if (attrib.modulo)
{
if (max_index >= attrib.frequency)
{
// Actually uses the modulo operator
_min_index = 0;
_max_index = std::max<u32>(_max_index, attrib.frequency - 1);
if (max_result_by_division < _max_index)
{
if (freq_count != umax)
{
if (std::find(frequencies, frequencies + freq_count, attrib.frequency) == frequencies + freq_count)
{
frequencies[freq_count++] = attrib.frequency;
}
}
}
}
else
{
// Same as having no modulo
_max_index = max_index;
freq_count = umax;
}
}
else
{
// Division operator
_min_index = std::min(_min_index, first / attrib.frequency);
_max_index = std::max<u32>(_max_index, utils::aligned_div(max_index, attrib.frequency));
if (freq_count > 0 && freq_count != umax)
{
const u32 max = utils::aligned_div(max_index, attrib.frequency);
max_result_by_division = std::max<u32>(max_result_by_division, max);
// Discard lower frequencies because it has been proven that there are indices higher than them
const usz discard_cnt = frequencies + freq_count - std::remove_if(frequencies, frequencies + freq_count, [&max_result_by_division](u32 freq)
{
return freq <= max_result_by_division;
});
freq_count -= static_cast<u32>(discard_cnt);
}
}
}
}
while (freq_count > 0 && freq_count != umax)
{
const rsx::index_array_type index_type = rsx::method_registers.current_draw_clause.is_immediate_draw ?
rsx::index_array_type::u32 :
rsx::method_registers.index_type();
const u32 index_size = index_type == rsx::index_array_type::u32 ? 4 : 2;
const auto render = rsx::get_current_renderer();
// If we can access a bit a more memory than required - do it
// The alternative would be re-iterating again over all of them
if (get_location(real_offset_address) == CELL_GCM_LOCATION_LOCAL)
{
if (utils::add_saturate<u32>(real_offset_address - rsx::constants::local_mem_base, (_max_index + 1) * attribute_stride) <= render->local_mem_size)
{
break;
}
}
else if (real_offset_address % 0x100000 + (_max_index + 1) * attribute_stride <= 0x100000)//(vm::check_addr(real_offset_address, vm::page_readable, (_max_index + 1) * attribute_stride))
{
break;
}
_max_index = 0;
auto re_evaluate = [&] <typename T> (const std::byte* ptr, T)
{
const u64 restart = rsx::method_registers.restart_index_enabled() ? rsx::method_registers.restart_index() : u64{umax};
for (u32 _index = first; _index < first + count; _index++)
{
const auto value = read_from_ptr<be_t<T>>(ptr, _index * sizeof(T));
if (value == restart)
{
continue;
}
for (u32 freq_it = 0; freq_it < freq_count; freq_it++)
{
const auto res = value % frequencies[freq_it];
if (res > _max_index)
{
_max_index = res;
}
}
}
};
if (index_size == 4)
{
if (!render->element_push_buffer.empty()) [[unlikely]]
{
// Indices provided via immediate mode
re_evaluate(reinterpret_cast<const std::byte*>(render->element_push_buffer.data()), u32{});
}
else
{
const u32 address = (0 - index_size) & get_address(rsx::method_registers.index_array_address(), rsx::method_registers.index_array_location());
re_evaluate(vm::get_super_ptr<std::byte>(address), u32{});
}
}
else
{
if (!render->element_push_buffer.empty()) [[unlikely]]
{
// Indices provided via immediate mode
re_evaluate(reinterpret_cast<const std::byte*>(render->element_push_buffer.data()), u16{});
}
else
{
const u32 address = (0 - index_size) & get_address(rsx::method_registers.index_array_address(), rsx::method_registers.index_array_location());
re_evaluate(vm::get_super_ptr<std::byte>(address), u16{});
}
}
break;
}
ensure(_max_index >= _min_index);
vertex_range = { _min_index, (_max_index - _min_index) + 1 };
return vertex_range;
}
u32 get_vertex_type_size_on_host(vertex_base_type type, u32 size)
{
switch (type)
{
case vertex_base_type::s1:
case vertex_base_type::s32k:
switch (size)
{
case 1:
case 2:
case 4:
return sizeof(u16) * size;
case 3:
return sizeof(u16) * 4;
default:
break;
}
fmt::throw_exception("Wrong vector size");
case vertex_base_type::f: return sizeof(f32) * size;
case vertex_base_type::sf:
switch (size)
{
case 1:
case 2:
case 4:
return sizeof(f16) * size;
case 3:
return sizeof(f16) * 4;
default:
break;
}
fmt::throw_exception("Wrong vector size");
case vertex_base_type::ub:
switch (size)
{
case 1:
case 2:
case 4:
return sizeof(u8) * size;
case 3:
return sizeof(u8) * 4;
default:
break;
}
fmt::throw_exception("Wrong vector size");
case vertex_base_type::cmp: return 4;
case vertex_base_type::ub256: ensure(size == 4); return sizeof(u8) * 4;
default:
break;
}
fmt::throw_exception("RSXVertexData::GetTypeSize: Bad vertex data type (%d)!", static_cast<u8>(type));
}
void tiled_region::write(const void *src, u32 width, u32 height, u32 pitch)
{
if (!tile)
{
memcpy(ptr, src, height * pitch);
return;
}
const u32 offset_x = base % tile->pitch;
const u32 offset_y = base / tile->pitch;
switch (tile->comp)
{
case CELL_GCM_COMPMODE_C32_2X1:
case CELL_GCM_COMPMODE_DISABLED:
for (u32 y = 0; y < height; ++y)
{
memcpy(ptr + (offset_y + y) * tile->pitch + offset_x, static_cast<const u8*>(src) + pitch * y, pitch);
}
break;
/*
case CELL_GCM_COMPMODE_C32_2X1:
for (u32 y = 0; y < height; ++y)
{
const u32* src_line = reinterpret_cast<const u32*>(static_cast<const u8*>(src) + pitch * y);
u32* dst_line = reinterpret_cast<u32*>(ptr + (offset_y + y) * tile->pitch + offset_x);
for (u32 x = 0; x < width; ++x)
{
u32 value = src_line[x];
dst_line[x * 2 + 0] = value;
dst_line[x * 2 + 1] = value;
}
}
break;
*/
case CELL_GCM_COMPMODE_C32_2X2:
for (u32 y = 0; y < height; ++y)
{
const u32* src_line = reinterpret_cast<const u32*>(static_cast<const u8*>(src) + pitch * y);
u32* line_0 = reinterpret_cast<u32*>(ptr + (offset_y + y * 2 + 0) * tile->pitch + offset_x);
u32* line_1 = reinterpret_cast<u32*>(ptr + (offset_y + y * 2 + 1) * tile->pitch + offset_x);
for (u32 x = 0; x < width; ++x)
{
u32 value = src_line[x];
line_0[x * 2 + 0] = value;
line_0[x * 2 + 1] = value;
line_1[x * 2 + 0] = value;
line_1[x * 2 + 1] = value;
}
}
break;
default:
::narrow(tile->comp);
}
}
void tiled_region::read(void *dst, u32 width, u32 height, u32 pitch)
{
if (!tile)
{
memcpy(dst, ptr, height * pitch);
return;
}
u32 offset_x = base % tile->pitch;
u32 offset_y = base / tile->pitch;
switch (tile->comp)
{
case CELL_GCM_COMPMODE_C32_2X1:
case CELL_GCM_COMPMODE_DISABLED:
for (u32 y = 0; y < height; ++y)
{
memcpy(static_cast<u8*>(dst) + pitch * y, ptr + (offset_y + y) * tile->pitch + offset_x, pitch);
}
break;
/*
case CELL_GCM_COMPMODE_C32_2X1:
for (u32 y = 0; y < height; ++y)
{
const u32* src_line = reinterpret_cast<const u32*>(ptr + (offset_y + y) * tile->pitch + offset_x);
u32* dst_line = reinterpret_cast<u32*>(static_cast<u8*>(dst) + pitch * y);
for (u32 x = 0; x < width; ++x)
{
dst_line[x] = src_line[x * 2 + 0];
}
}
break;
*/
case CELL_GCM_COMPMODE_C32_2X2:
for (u32 y = 0; y < height; ++y)
{
const u32* src_line = reinterpret_cast<const u32*>(ptr + (offset_y + y * 2 + 0) * tile->pitch + offset_x);
u32* dst_line = reinterpret_cast<u32*>(static_cast<u8*>(dst) + pitch * y);
for (u32 x = 0; x < width; ++x)
{
dst_line[x] = src_line[x * 2 + 0];
}
}
break;
default:
::narrow(tile->comp);
}
}
thread::~thread()
{
g_access_violation_handler = nullptr;
}
void thread::save(utils::serial& ar)
{
[[maybe_unused]] const s32 version = GET_OR_USE_SERIALIZATION_VERSION(ar.is_writing(), rsx);
ar(rsx::method_registers);
for (auto& v : vertex_push_buffers)
{
ar(v.attr, v.size, v.type, v.vertex_count, v.dword_count, v.data);
}
ar(element_push_buffer, fifo_ret_addr, saved_fifo_ret, zcull_surface_active, m_surface_info, m_depth_surface_info, m_framebuffer_layout);
ar(dma_address, iomap_table, restore_point, tiles, zculls, display_buffers, display_buffers_count, current_display_buffer);
ar(enable_second_vhandler, requested_vsync);
ar(device_addr, label_addr, main_mem_size, local_mem_size, rsx_event_port, driver_info);
ar(in_begin_end);
ar(display_buffers, display_buffers_count, current_display_buffer);
ar(unsent_gcm_events, rsx::method_registers.current_draw_clause);
if (ar.is_writing() || version >= 2)
{
ar(vblank_count);
b8 flip_pending{};
if (ar.is_writing())
{
flip_pending = !!(async_flip_requested & flip_request::emu_requested);
}
ar(flip_pending);
if (flip_pending)
{
ar(vblank_at_flip);
ar(async_flip_buffer);
if (!ar.is_writing())
{
async_flip_requested |= flip_request::emu_requested;
flip_notification_count = 1;
}
}
}
if (ar.is_writing())
{
if (fifo_ctrl && state & cpu_flag::again)
{
ar(fifo_ctrl->get_remaining_args_count() + 1);
ar(fifo_ctrl->last_cmd());
}
else
{
ar(u32{0});
}
}
else if (u32 count = ar)
{
restore_fifo_count = count;
ar(restore_fifo_cmd);
}
}
thread::thread(utils::serial* _ar)
: cpu_thread(0x5555'5555)
{
g_access_violation_handler = [this](u32 address, bool is_writing)
{
return on_access_violation(address, is_writing);
};
m_textures_dirty.fill(true);
m_vertex_textures_dirty.fill(true);
m_graphics_state |= pipeline_state::all_dirty;
g_user_asked_for_frame_capture = false;
// TODO: Proper context management in the driver
s_ctx.rsxthr = this;
m_ctx = &s_ctx;
if (g_cfg.misc.use_native_interface && (g_cfg.video.renderer == video_renderer::opengl || g_cfg.video.renderer == video_renderer::vulkan))
{
m_overlay_manager = g_fxo->init<rsx::overlays::display_manager>(0);
}
if (!_ar)
{
add_remove_flags({}, cpu_flag::stop); // TODO: Remove workaround
return;
}
add_remove_flags(cpu_flag::suspend, cpu_flag::stop);
serialized = true;
save(*_ar);
if (dma_address)
{
ctrl = vm::_ptr<RsxDmaControl>(dma_address);
rsx_thread_running = true;
}
if (g_cfg.savestate.start_paused)
{
// Allow to render a whole frame within this emulation session so there won't be missing graphics
m_pause_after_x_flips = 2;
}
}
avconf::avconf(utils::serial& ar)
{
save(ar);
}
void avconf::save(utils::serial& ar)
{
[[maybe_unused]] const s32 version = GET_OR_USE_SERIALIZATION_VERSION(ar.is_writing(), rsx);
if (!ar.is_writing() && version < 3)
{
// Be compatible with previous bitwise serialization
ar(std::span<u8>(reinterpret_cast<u8*>(this), ::offset32(&avconf::scan_mode)));
ar.pos += utils::align<usz>(::offset32(&avconf::scan_mode), alignof(avconf)) - ::offset32(&avconf::scan_mode);
return;
}
ar(stereo_mode, format, aspect, resolution_id, scanline_pitch, gamma, resolution_x, resolution_y, state, scan_mode);
}
void thread::capture_frame(const std::string& name)
{
frame_trace_data::draw_state draw_state{};
draw_state.programs = get_programs();
draw_state.name = name;
frame_debug.draw_calls.emplace_back(std::move(draw_state));
}
void thread::begin()
{
if (cond_render_ctrl.hw_cond_active)
{
if (!cond_render_ctrl.eval_pending())
{
// End conditional rendering if still active
end_conditional_rendering();
}
// If hw cond render is enabled and evalutation is still pending, do nothing
}
else if (cond_render_ctrl.eval_pending())
{
// Evaluate conditional rendering test or enable hw cond render until results are available
if (backend_config.supports_hw_conditional_render)
{
// In this mode, it is possible to skip the cond render while the backend is still processing data.
// The backend guarantees that any draw calls emitted during this time will NOT generate any ROP writes
ensure(!cond_render_ctrl.hw_cond_active);
// Pending evaluation, use hardware test
begin_conditional_rendering(cond_render_ctrl.eval_sources);
}
else
{
// NOTE: eval_sources list is reversed with newest query first
zcull_ctrl->read_barrier(this, cond_render_ctrl.eval_address, cond_render_ctrl.eval_sources.front());
ensure(!cond_render_ctrl.eval_pending());
}
}
if (!backend_config.supports_normalized_barycentrics)
{
// Check for mode change between rasterized polys vs lines and points
// Luckily this almost never happens in real games
const auto current_mode = rsx::method_registers.current_draw_clause.classify_mode();
if (current_mode != m_current_draw_mode)
{
m_graphics_state |= (rsx::vertex_program_state_dirty | rsx::fragment_program_state_dirty);
m_current_draw_mode = current_mode;
}
}
in_begin_end = true;
}
void thread::append_to_push_buffer(u32 attribute, u32 size, u32 subreg_index, vertex_base_type type, u32 value)
{
if (!(rsx::method_registers.vertex_attrib_input_mask() & (1 << attribute)))
{
return;
}
// Enforce ATTR0 as vertex attribute for push buffers.
// This whole thing becomes a mess if we don't have a provoking attribute.
const auto vertex_id = vertex_push_buffers[0].get_vertex_id();
vertex_push_buffers[attribute].set_vertex_data(attribute, vertex_id, subreg_index, type, size, value);
m_graphics_state |= rsx::pipeline_state::push_buffer_arrays_dirty;
}
u32 thread::get_push_buffer_vertex_count() const
{
// Enforce ATTR0 as vertex attribute for push buffers.
// This whole thing becomes a mess if we don't have a provoking attribute.
return vertex_push_buffers[0].vertex_count;
}
void thread::append_array_element(u32 index)
{
// Endianness is swapped because common upload code expects input in BE
// TODO: Implement fast upload path for LE inputs and do away with this
element_push_buffer.push_back(std::bit_cast<u32, be_t<u32>>(index));
}
u32 thread::get_push_buffer_index_count() const
{
return ::size32(element_push_buffer);
}
void thread::end()
{
if (capture_current_frame)
{
capture::capture_draw_memory(this);
}
in_begin_end = false;
m_frame_stats.draw_calls++;
method_registers.current_draw_clause.post_execute_cleanup(m_ctx);
m_graphics_state |= rsx::pipeline_state::framebuffer_reads_dirty;
m_eng_interrupt_mask |= rsx::backend_interrupt;
ROP_sync_timestamp = rsx::get_shared_tag();
if (m_graphics_state & rsx::pipeline_state::push_buffer_arrays_dirty)
{
for (auto& push_buf : vertex_push_buffers)
{
//Disabled, see https://github.com/RPCS3/rpcs3/issues/1932
//rsx::method_registers.register_vertex_info[index].size = 0;
push_buf.clear();
}
m_graphics_state.clear(rsx::pipeline_state::push_buffer_arrays_dirty);
}
element_push_buffer.clear();
zcull_ctrl->on_draw();
if (capture_current_frame)
{
u32 element_count = rsx::method_registers.current_draw_clause.get_elements_count();
capture_frame(fmt::format("Draw %s %d", rsx::method_registers.current_draw_clause.primitive, element_count));
}
}
void thread::execute_nop_draw()
{
method_registers.current_draw_clause.begin();
do
{
method_registers.current_draw_clause.execute_pipeline_dependencies(m_ctx);
}
while (method_registers.current_draw_clause.next());
}
void thread::cpu_task()
{
while (Emu.IsReady())
{
thread_ctrl::wait_for(1000);
}
do
{
on_task();
state -= cpu_flag::ret;
}
while (!is_stopped());
on_exit();
}
void thread::cpu_wait(bs_t<cpu_flag> old)
{
if (external_interrupt_lock)
{
wait_pause();
}
if ((state & (cpu_flag::dbg_global_pause + cpu_flag::exit)) == cpu_flag::dbg_global_pause)
{
// Wait 16ms during emulation pause. This reduces cpu load while still giving us the chance to render overlays.
do_local_task(rsx::FIFO::state::paused);
thread_ctrl::wait_on(state, old, 16000);
}
else
{
on_semaphore_acquire_wait();
std::this_thread::yield();
}
}
void thread::post_vblank_event(u64 post_event_time)
{
vblank_count++;
if (isHLE)
{
if (auto ptr = vblank_handler)
{
intr_thread->cmd_list
({
{ ppu_cmd::set_args, 1 }, u64{1},
{ ppu_cmd::lle_call, ptr },
{ ppu_cmd::sleep, 0 }
});
intr_thread->cmd_notify.store(1);
intr_thread->cmd_notify.notify_one();
}
}
else
{
sys_rsx_context_attribute(0x55555555, 0xFED, 1, get_guest_system_time(post_event_time), 0, 0);
}
}
namespace nv4097
{
void set_render_mode(context* rsx, u32, u32 arg);
}
void thread::on_task()
{
g_tls_log_prefix = []
{
const auto rsx = get_current_renderer();
return fmt::format("RSX [0x%07x]", rsx->ctrl ? +rsx->ctrl->get : 0);
};
if (!serialized) method_registers.init();
rsx::overlays::reset_performance_overlay();
rsx::overlays::reset_debug_overlay();
if (!is_initialized)
{
g_fxo->get<rsx::dma_manager>().init();
on_init_thread();
if (in_begin_end)
{
// on_init_thread should have prepared the backend resources
// Run draw call warmup again if the savestate happened mid-draw
ensure(serialized);
begin();
}
}
is_initialized = true;
is_initialized.notify_all();
if (!zcull_ctrl)
{
// Backend did not provide an implementation, provide NULL object
zcull_ctrl = std::make_unique<::rsx::reports::ZCULL_control>();
}
check_zcull_status(false);
nv4097::set_render_mode(m_ctx, 0, method_registers.registers[NV4097_SET_RENDER_ENABLE]);
performance_counters.state = FIFO::state::empty;
const u64 event_flags = unsent_gcm_events.exchange(0);
if (Emu.IsStarting())
{
Emu.CallFromMainThread([]
{
Emu.RunPPU();
});
}
// Wait for startup (TODO)
while (!rsx_thread_running || Emu.IsPaused())
{
// Execute backend-local tasks first
do_local_task(performance_counters.state);
// Update sub-units
zcull_ctrl->update(this);
if (is_stopped())
{
return;
}
thread_ctrl::wait_for(1000);
}
performance_counters.state = FIFO::state::running;
fifo_ctrl = std::make_unique<::rsx::FIFO::FIFO_control>(this);
fifo_ctrl->set_get(ctrl->get);
last_guest_flip_timestamp = get_system_time() - 1000000;
vblank_count = 0;
if (restore_fifo_count)
{
fifo_ctrl->restore_state(restore_fifo_cmd, restore_fifo_count);
}
if (!send_event(0, event_flags, 0))
{
return;
}
g_fxo->get<vblank_thread>().set_thread(std::shared_ptr<named_thread<std::function<void()>>>(new named_thread<std::function<void()>>("VBlank Thread"sv, [this]() -> void
{
#ifdef __linux__
constexpr u32 host_min_quantum = 10;
#else
constexpr u32 host_min_quantum = 500;
#endif
u64 start_time = get_system_time();
u64 vblank_rate = g_cfg.video.vblank_rate;
u64 vblank_period = 1'000'000 + u64{g_cfg.video.vblank_ntsc.get()} * 1000;
u64 local_vblank_count = 0;
// TODO: exit condition
while (!is_stopped() && !unsent_gcm_events && thread_ctrl::state() != thread_state::aborting)
{
// Get current time
const u64 current = get_system_time();
// Calculate the time at which we need to send a new VBLANK signal
const u64 post_event_time = start_time + (local_vblank_count + 1) * vblank_period / vblank_rate;
// Calculate time remaining to that time (0 if we passed it)
const u64 wait_for = current >= post_event_time ? 0 : post_event_time - current;
#ifdef __linux__
const u64 wait_sleep = wait_for;
#else
// Substract host operating system min sleep quantom to get sleep time
const u64 wait_sleep = wait_for - u64{wait_for >= host_min_quantum} * host_min_quantum;
#endif
if (!wait_for)
{
{
local_vblank_count++;
if (local_vblank_count == vblank_rate)
{
// Advance start_time to the moment of the current VBLANK
// Which is the last VBLANK event in this period
// This is in order for multiplication by ratio above to use only small numbers
start_time += vblank_period;
local_vblank_count = 0;
// We have a rare chance to update settings without losing precision whenever local_vblank_count is 0
vblank_rate = g_cfg.video.vblank_rate;
vblank_period = 1'000'000 + u64{g_cfg.video.vblank_ntsc.get()} * 1000;
}
post_vblank_event(post_event_time);
}
}
else if (wait_sleep)
{
thread_ctrl::wait_for(wait_sleep);
}
else if (wait_for >= host_min_quantum / 3 * 2)
{
std::this_thread::yield();
}
if (Emu.IsPaused())
{
// Save the difference before pause
start_time = get_system_time() - start_time;
while (Emu.IsPaused() && !is_stopped())
{
thread_ctrl::wait_for(5'000);
}
// Restore difference
start_time = get_system_time() - start_time;
}
}
})));
struct join_vblank
{
~join_vblank() noexcept
{
g_fxo->get<vblank_thread>() = thread_state::finished;
}
} join_vblank_obj{};
// Raise priority above other threads
thread_ctrl::scoped_priority high_prio(+1);
if (g_cfg.core.thread_scheduler != thread_scheduler_mode::os)
{
thread_ctrl::set_thread_affinity_mask(thread_ctrl::get_affinity_mask(thread_class::rsx));
}
while (!test_stopped())
{
// Wait for external pause events
if (external_interrupt_lock)
{
wait_pause();
if (!rsx_thread_running)
{
return;
}
}
// Note a possible rollback address
if (sync_point_request && !in_begin_end)
{
restore_point = ctrl->get;
saved_fifo_ret = fifo_ret_addr;
sync_point_request.release(false);
}
// Update sub-units every 64 cycles. The local handler is invoked for other functions externally on-demand anyway.
// This avoids expensive calls to check timestamps which involves reading some values from TLS storage on windows.
// If something is going on in the backend that requires an update, set the interrupt bit explicitly.
if ((m_cycles_counter++ & 63) == 0 || m_eng_interrupt_mask)
{
// Execute backend-local tasks first
do_local_task(performance_counters.state);
// Update other sub-units
zcull_ctrl->update(this);
if (m_host_dma_ctrl)
{
m_host_dma_ctrl->update();
}
}
// Execute FIFO queue
run_FIFO();
}
}
void thread::on_exit()
{
if (zcull_ctrl)
{
zcull_ctrl->sync(this);
}
// Deregister violation handler
g_access_violation_handler = nullptr;
// Clear any pending flush requests to release threads
std::this_thread::sleep_for(10ms);
do_local_task(rsx::FIFO::state::lock_wait);
g_fxo->get<rsx::dma_manager>().join();
g_fxo->get<vblank_thread>() = thread_state::finished;
state += cpu_flag::exit;
}
void thread::fill_scale_offset_data(void *buffer, bool flip_y) const
{
int clip_w = rsx::method_registers.surface_clip_width();
int clip_h = rsx::method_registers.surface_clip_height();
float scale_x = rsx::method_registers.viewport_scale_x() / (clip_w / 2.f);
float offset_x = rsx::method_registers.viewport_offset_x() - (clip_w / 2.f);
offset_x /= clip_w / 2.f;
float scale_y = rsx::method_registers.viewport_scale_y() / (clip_h / 2.f);
float offset_y = (rsx::method_registers.viewport_offset_y() - (clip_h / 2.f));
offset_y /= clip_h / 2.f;
if (flip_y) scale_y *= -1;
if (flip_y) offset_y *= -1;
float scale_z = rsx::method_registers.viewport_scale_z();
float offset_z = rsx::method_registers.viewport_offset_z();
float one = 1.f;
utils::stream_vector(buffer, std::bit_cast<u32>(scale_x), 0, 0, std::bit_cast<u32>(offset_x));
utils::stream_vector(static_cast<char*>(buffer) + 16, 0, std::bit_cast<u32>(scale_y), 0, std::bit_cast<u32>(offset_y));
utils::stream_vector(static_cast<char*>(buffer) + 32, 0, 0, std::bit_cast<u32>(scale_z), std::bit_cast<u32>(offset_z));
utils::stream_vector(static_cast<char*>(buffer) + 48, 0, 0, 0, std::bit_cast<u32>(one));
}
void thread::fill_user_clip_data(void *buffer) const
{
const rsx::user_clip_plane_op clip_plane_control[6] =
{
rsx::method_registers.clip_plane_0_enabled(),
rsx::method_registers.clip_plane_1_enabled(),
rsx::method_registers.clip_plane_2_enabled(),
rsx::method_registers.clip_plane_3_enabled(),
rsx::method_registers.clip_plane_4_enabled(),
rsx::method_registers.clip_plane_5_enabled(),
};
u8 data_block[64];
s32* clip_enabled_flags = reinterpret_cast<s32*>(data_block);
f32* clip_distance_factors = reinterpret_cast<f32*>(data_block + 32);
for (int index = 0; index < 6; ++index)
{
switch (clip_plane_control[index])
{
default:
rsx_log.error("bad clip plane control (0x%x)", static_cast<u8>(clip_plane_control[index]));
[[fallthrough]];
case rsx::user_clip_plane_op::disable:
clip_enabled_flags[index] = 0;
clip_distance_factors[index] = 0.f;
break;
case rsx::user_clip_plane_op::greater_or_equal:
clip_enabled_flags[index] = 1;
clip_distance_factors[index] = 1.f;
break;
case rsx::user_clip_plane_op::less_than:
clip_enabled_flags[index] = 1;
clip_distance_factors[index] = -1.f;
break;
}
}
memcpy(buffer, data_block, 2 * 8 * sizeof(u32));
}
/**
* Fill buffer with vertex program constants.
* Buffer must be at least 512 float4 wide.
*/
void thread::fill_vertex_program_constants_data(void* buffer, const std::span<const u16>& reloc_table)
{
if (!reloc_table.empty()) [[ likely ]]
{
char* dst = reinterpret_cast<char*>(buffer);
for (const auto& index : reloc_table)
{
utils::stream_vector_from_memory(dst, &rsx::method_registers.transform_constants[index]);
dst += 16;
}
}
else
{
memcpy(buffer, rsx::method_registers.transform_constants.data(), 468 * 4 * sizeof(float));
}
}
void thread::fill_fragment_state_buffer(void* buffer, const RSXFragmentProgram& /*fragment_program*/)
{
ROP_control_t rop_control{};
if (rsx::method_registers.alpha_test_enabled())
{
const u32 alpha_func = static_cast<u32>(rsx::method_registers.alpha_func());
rop_control.set_alpha_test_func(alpha_func);
rop_control.enable_alpha_test();
}
if (rsx::method_registers.polygon_stipple_enabled())
{
rop_control.enable_polygon_stipple();
}
if (rsx::method_registers.msaa_alpha_to_coverage_enabled() && !backend_config.supports_hw_a2c)
{
// TODO: Properly support alpha-to-coverage and alpha-to-one behavior in shaders
// Alpha values generate a coverage mask for order independent blending
// Requires hardware AA to work properly (or just fragment sample stage in fragment shaders)
// Simulated using combined alpha blend and alpha test
rop_control.enable_alpha_to_coverage();
if (rsx::method_registers.msaa_sample_mask())
{
rop_control.enable_MSAA_writes();
}
// Sample configuration bits
switch (rsx::method_registers.surface_antialias())
{
case rsx::surface_antialiasing::center_1_sample:
break;
case rsx::surface_antialiasing::diagonal_centered_2_samples:
rop_control.set_msaa_control(1u);
break;
default:
rop_control.set_msaa_control(3u);
break;
}
}
const f32 fog0 = rsx::method_registers.fog_params_0();
const f32 fog1 = rsx::method_registers.fog_params_1();
const u32 fog_mode = static_cast<u32>(rsx::method_registers.fog_equation());
// Check if framebuffer is actually an XRGB format and not a WZYX format
switch (rsx::method_registers.surface_color())
{
case rsx::surface_color_format::w16z16y16x16:
case rsx::surface_color_format::w32z32y32x32:
case rsx::surface_color_format::x32:
// These behave very differently from "normal" formats.
break;
default:
// Integer framebuffer formats.
rop_control.enable_framebuffer_INT();
// Check if we want sRGB conversion.
if (rsx::method_registers.framebuffer_srgb_enabled())
{
rop_control.enable_framebuffer_sRGB();
}
break;
}
// Generate wpos coefficients
// wpos equation is now as follows:
// wpos.y = (frag_coord / resolution_scale) * ((window_origin!=top)?-1.: 1.) + ((window_origin!=top)? window_height : 0)
// wpos.x = (frag_coord / resolution_scale)
// wpos.zw = frag_coord.zw
const auto window_origin = rsx::method_registers.shader_window_origin();
const u32 window_height = rsx::method_registers.shader_window_height();
const f32 resolution_scale = (window_height <= static_cast<u32>(g_cfg.video.min_scalable_dimension)) ? 1.f : rsx::get_resolution_scale();
const f32 wpos_scale = (window_origin == rsx::window_origin::top) ? (1.f / resolution_scale) : (-1.f / resolution_scale);
const f32 wpos_bias = (window_origin == rsx::window_origin::top) ? 0.f : window_height;
const f32 alpha_ref = rsx::method_registers.alpha_ref();
u32 *dst = static_cast<u32*>(buffer);
utils::stream_vector(dst, std::bit_cast<u32>(fog0), std::bit_cast<u32>(fog1), rop_control.value, std::bit_cast<u32>(alpha_ref));
utils::stream_vector(dst + 4, 0u, fog_mode, std::bit_cast<u32>(wpos_scale), std::bit_cast<u32>(wpos_bias));
}
u64 thread::timestamp()
{
const u64 freq = sys_time_get_timebase_frequency();
auto get_time_ns = [freq]()
{
const u64 t = get_timebased_time();
return (t / freq * 1'000'000'000 + t % freq * 1'000'000'000 / freq);
};
const u64 t = get_time_ns();
if (t != timestamp_ctrl)
{
timestamp_ctrl = t;
timestamp_subvalue = 0;
return t;
}
// Check if we passed the limit of what fixed increments is legal for
// Wait for the next time value reported if we passed the limit
if ((1'000'000'000 / freq) - timestamp_subvalue <= 2)
{
u64 now = get_time_ns();
for (; t == now; now = get_time_ns())
{
utils::pause();
}
timestamp_ctrl = now;
timestamp_subvalue = 0;
return now;
}
timestamp_subvalue += 2;
return t + timestamp_subvalue;
}
std::span<const std::byte> thread::get_raw_index_array(const draw_clause& draw_indexed_clause) const
{
if (!element_push_buffer.empty()) [[ unlikely ]]
{
// Indices provided via immediate mode
return {reinterpret_cast<const std::byte*>(element_push_buffer.data()), ::narrow<u32>(element_push_buffer.size() * sizeof(u32))};
}
const rsx::index_array_type type = rsx::method_registers.index_type();
const u32 type_size = get_index_type_size(type);
// Force aligned indices as realhw
const u32 address = (0 - type_size) & get_address(rsx::method_registers.index_array_address(), rsx::method_registers.index_array_location());
const u32 first = draw_indexed_clause.min_index();
const u32 count = draw_indexed_clause.get_elements_count();
const auto ptr = vm::_ptr<const std::byte>(address);
return { ptr + first * type_size, count * type_size };
}
std::variant<draw_array_command, draw_indexed_array_command, draw_inlined_array>
thread::get_draw_command(const rsx::rsx_state& state) const
{
if (rsx::method_registers.current_draw_clause.command == rsx::draw_command::indexed) [[ likely ]]
{
return draw_indexed_array_command
{
get_raw_index_array(state.current_draw_clause)
};
}
if (rsx::method_registers.current_draw_clause.command == rsx::draw_command::array)
{
return draw_array_command{};
}
if (rsx::method_registers.current_draw_clause.command == rsx::draw_command::inlined_array)
{
return draw_inlined_array{};
}
fmt::throw_exception("ill-formed draw command");
}
void thread::do_local_task(FIFO::state state)
{
m_eng_interrupt_mask.clear(rsx::backend_interrupt);
if (async_flip_requested & flip_request::emu_requested)
{
// NOTE: This has to be executed immediately
// Delaying this operation can cause desync due to the delay in firing the flip event
handle_emu_flip(async_flip_buffer);
}
if (state != FIFO::state::lock_wait)
{
if (!in_begin_end && atomic_storage<u32>::load(m_invalidated_memory_range.end) != 0)
{
std::lock_guard lock(m_mtx_task);
if (m_invalidated_memory_range.valid())
{
handle_invalidated_memory_range();
}
}
if (m_eng_interrupt_mask & rsx::dma_control_interrupt && !is_stopped())
{
if (const u64 get_put = new_get_put.exchange(u64{umax});
get_put != umax)
{
vm::_ref<atomic_be_t<u64>>(dma_address + ::offset32(&RsxDmaControl::put)).release(get_put);
fifo_ctrl->set_get(static_cast<u32>(get_put));
fifo_ctrl->abort();
fifo_ret_addr = RSX_CALL_STACK_EMPTY;
last_known_code_start = static_cast<u32>(get_put);
sync_point_request.release(true);
}
m_eng_interrupt_mask.clear(rsx::dma_control_interrupt);
}
}
if (m_eng_interrupt_mask & rsx::pipe_flush_interrupt)
{
sync();
}
if (is_stopped())
{
std::lock_guard lock(m_mtx_task);
m_invalidated_memory_range = utils::address_range::start_end(0x2 << 28, constants::local_mem_base + local_mem_size - 1);
handle_invalidated_memory_range();
}
}
std::array<u32, 4> thread::get_color_surface_addresses() const
{
u32 offset_color[] =
{
rsx::method_registers.surface_offset(0),
rsx::method_registers.surface_offset(1),
rsx::method_registers.surface_offset(2),
rsx::method_registers.surface_offset(3),
};
u32 context_dma_color[] =
{
rsx::method_registers.surface_dma(0),
rsx::method_registers.surface_dma(1),
rsx::method_registers.surface_dma(2),
rsx::method_registers.surface_dma(3),
};
return
{
rsx::get_address(offset_color[0], context_dma_color[0]),
rsx::get_address(offset_color[1], context_dma_color[1]),
rsx::get_address(offset_color[2], context_dma_color[2]),
rsx::get_address(offset_color[3], context_dma_color[3]),
};
}
u32 thread::get_zeta_surface_address() const
{
u32 m_context_dma_z = rsx::method_registers.surface_z_dma();
u32 offset_zeta = rsx::method_registers.surface_z_offset();
return rsx::get_address(offset_zeta, m_context_dma_z);
}
void thread::get_framebuffer_layout(rsx::framebuffer_creation_context context, framebuffer_layout &layout)
{
layout = {};
layout.ignore_change = true;
layout.width = rsx::method_registers.surface_clip_width();
layout.height = rsx::method_registers.surface_clip_height();
m_graphics_state.clear(rsx::rtt_config_contested | rsx::rtt_config_valid);
m_current_framebuffer_context = context;
if (layout.width == 0 || layout.height == 0)
{
rsx_log.trace("Invalid framebuffer setup, w=%d, h=%d", layout.width, layout.height);
return;
}
//const u16 clip_x = rsx::method_registers.surface_clip_origin_x();
//const u16 clip_y = rsx::method_registers.surface_clip_origin_y();
layout.color_addresses = get_color_surface_addresses();
layout.zeta_address = get_zeta_surface_address();
layout.zeta_pitch = rsx::method_registers.surface_z_pitch();
layout.color_pitch =
{
rsx::method_registers.surface_pitch(0),
rsx::method_registers.surface_pitch(1),
rsx::method_registers.surface_pitch(2),
rsx::method_registers.surface_pitch(3),
};
layout.color_format = rsx::method_registers.surface_color();
layout.depth_format = rsx::method_registers.surface_depth_fmt();
layout.target = rsx::method_registers.surface_color_target();
const auto mrt_buffers = rsx::utility::get_rtt_indexes(layout.target);
const auto aa_mode = rsx::method_registers.surface_antialias();
const u32 aa_factor_u = (aa_mode == rsx::surface_antialiasing::center_1_sample) ? 1 : 2;
const u32 aa_factor_v = (aa_mode == rsx::surface_antialiasing::center_1_sample || aa_mode == rsx::surface_antialiasing::diagonal_centered_2_samples) ? 1 : 2;
const u8 sample_count = get_format_sample_count(aa_mode);
const auto depth_texel_size = get_format_block_size_in_bytes(layout.depth_format) * aa_factor_u;
const auto color_texel_size = get_format_block_size_in_bytes(layout.color_format) * aa_factor_u;
const bool stencil_test_enabled = is_depth_stencil_format(layout.depth_format) && rsx::method_registers.stencil_test_enabled();
const bool depth_test_enabled = rsx::method_registers.depth_test_enabled();
// Check write masks
layout.zeta_write_enabled = (depth_test_enabled && rsx::method_registers.depth_write_enabled());
if (!layout.zeta_write_enabled && stencil_test_enabled)
{
// Check if stencil data is modified
auto mask = rsx::method_registers.stencil_mask();
bool active_write_op = (rsx::method_registers.stencil_op_zpass() != rsx::stencil_op::keep ||
rsx::method_registers.stencil_op_fail() != rsx::stencil_op::keep ||
rsx::method_registers.stencil_op_zfail() != rsx::stencil_op::keep);
if ((!mask || !active_write_op) && rsx::method_registers.two_sided_stencil_test_enabled())
{
mask |= rsx::method_registers.back_stencil_mask();
active_write_op |= (rsx::method_registers.back_stencil_op_zpass() != rsx::stencil_op::keep ||
rsx::method_registers.back_stencil_op_fail() != rsx::stencil_op::keep ||
rsx::method_registers.back_stencil_op_zfail() != rsx::stencil_op::keep);
}
layout.zeta_write_enabled = (mask && active_write_op);
}
// NOTE: surface_target_a is index 1 but is not MRT since only one surface is active
bool color_write_enabled = false;
for (uint i = 0; i < mrt_buffers.size(); ++i)
{
if (rsx::method_registers.color_write_enabled(i))
{
const auto real_index = mrt_buffers[i];
layout.color_write_enabled[real_index] = true;
color_write_enabled = true;
}
}
bool depth_buffer_unused = false, color_buffer_unused = false;
switch (context)
{
case rsx::framebuffer_creation_context::context_clear_all:
break;
case rsx::framebuffer_creation_context::context_clear_depth:
color_buffer_unused = true;
break;
case rsx::framebuffer_creation_context::context_clear_color:
depth_buffer_unused = true;
break;
case rsx::framebuffer_creation_context::context_draw:
// NOTE: As with all other hw, depth/stencil writes involve the corresponding depth/stencil test, i.e No test = No write
// NOTE: Depth test is not really using the memory if its set to always or never
// TODO: Perform similar checks for stencil test
if (!stencil_test_enabled)
{
if (!depth_test_enabled)
{
depth_buffer_unused = true;
}
else if (!rsx::method_registers.depth_write_enabled())
{
// Depth test is enabled but depth write is disabled
switch (rsx::method_registers.depth_func())
{
default:
break;
case rsx::comparison_function::never:
case rsx::comparison_function::always:
// No access to depth buffer memory
depth_buffer_unused = true;
break;
}
}
if (depth_buffer_unused) [[unlikely]]
{
// Check if depth bounds is active. Depth bounds test does NOT need depth test to be enabled to access the Z buffer
// Bind Z buffer in read mode for bounds check in this case
if (rsx::method_registers.depth_bounds_test_enabled() &&
(rsx::method_registers.depth_bounds_min() > 0.f || rsx::method_registers.depth_bounds_max() < 1.f))
{
depth_buffer_unused = false;
}
}
}
color_buffer_unused = !color_write_enabled || layout.target == rsx::surface_target::none;
if (color_buffer_unused || depth_buffer_unused)
{
m_graphics_state.set(rsx::rtt_config_contested);
}
break;
default:
fmt::throw_exception("Unknown framebuffer context 0x%x", static_cast<u32>(context));
}
// Swizzled render does tight packing of bytes
bool packed_render = false;
u32 minimum_color_pitch = 64u;
u32 minimum_zeta_pitch = 64u;
switch (layout.raster_type = rsx::method_registers.surface_type())
{
default:
rsx_log.error("Unknown raster mode 0x%x", static_cast<u32>(layout.raster_type));
[[fallthrough]];
case rsx::surface_raster_type::linear:
break;
case rsx::surface_raster_type::swizzle:
packed_render = true;
break;
}
if (!packed_render)
{
// Well, this is a write operation either way (clearing or drawing)
// We can deduce a minimum pitch for which this operation is guaranteed to require by checking for the lesser of scissor or clip
const u32 write_limit_x = std::min<u32>(layout.width, rsx::method_registers.scissor_origin_x() + rsx::method_registers.scissor_width());
minimum_color_pitch = color_texel_size * write_limit_x;
minimum_zeta_pitch = depth_texel_size * write_limit_x;
// Check for size fit and attempt to correct incorrect inputs.
// BLUS30072 is misconfigured here and renders fine on PS3. The width fails to account for AA being active in that engine.
u16 corrected_width = umax;
std::vector<u32*> pitch_fixups;
if (!depth_buffer_unused)
{
if (layout.zeta_pitch < minimum_zeta_pitch)
{
// Observed in CoD3 where the depth buffer is clearly misconfigured.
if (layout.zeta_pitch > 64)
{
corrected_width = layout.zeta_pitch / depth_texel_size;
layout.zeta_pitch = depth_texel_size;
pitch_fixups.push_back(&layout.zeta_pitch);
}
else
{
rsx_log.warning("Misconfigured surface could not fit a depth buffer. Dropping.");
layout.zeta_address = 0;
}
}
else if (layout.width * depth_texel_size > layout.zeta_pitch)
{
// This is ok, misconfigured raster dimensions, but we're only writing the pitch as determined by the scissor
corrected_width = layout.zeta_pitch / depth_texel_size;
}
}
if (!color_buffer_unused)
{
for (const auto& index : rsx::utility::get_rtt_indexes(layout.target))
{
if (layout.color_pitch[index] < minimum_color_pitch)
{
if (layout.color_pitch[index] > 64)
{
corrected_width = std::min<u16>(corrected_width, layout.color_pitch[index] / color_texel_size);
layout.color_pitch[index] = color_texel_size;
pitch_fixups.push_back(&layout.color_pitch[index]);
}
else
{
rsx_log.warning("Misconfigured surface could not fit color buffer %d. Dropping.", index);
layout.color_addresses[index] = 0;
}
continue;
}
if (layout.width * color_texel_size > layout.color_pitch[index])
{
// This is ok, misconfigured raster dimensions, but we're only writing the pitch as determined by the scissor
corrected_width = std::min<u16>(corrected_width, layout.color_pitch[index] / color_texel_size);
}
}
}
if (corrected_width != umax)
{
layout.width = corrected_width;
for (auto& value : pitch_fixups)
{
*value = *value * layout.width;
}
}
}
if (depth_buffer_unused)
{
layout.zeta_address = 0;
}
else if (packed_render)
{
layout.actual_zeta_pitch = (layout.width * depth_texel_size);
}
else
{
layout.actual_zeta_pitch = layout.zeta_pitch;
}
for (const auto &index : rsx::utility::get_rtt_indexes(layout.target))
{
if (color_buffer_unused)
{
layout.color_addresses[index] = 0;
continue;
}
if (layout.color_pitch[index] < minimum_color_pitch)
{
// Unlike the depth buffer, when given a color target we know it is intended to be rendered to
rsx_log.warning("Framebuffer setup error: Color target failed pitch check, Pitch=[%d, %d, %d, %d] + %d, target=%d, context=%d",
layout.color_pitch[0], layout.color_pitch[1], layout.color_pitch[2], layout.color_pitch[3],
layout.zeta_pitch, static_cast<u32>(layout.target), static_cast<u32>(context));
// Some games (COD4) are buggy and set incorrect width + AA + pitch combo. Force fit in such scenarios.
if (layout.color_pitch[index] > 64)
{
layout.width = layout.color_pitch[index] / color_texel_size;
}
else
{
layout.color_addresses[index] = 0;
continue;
}
}
if (layout.color_addresses[index] == layout.zeta_address)
{
rsx_log.warning("Framebuffer at 0x%X has aliasing color/depth targets, color_index=%d, zeta_pitch = %d, color_pitch=%d, context=%d",
layout.zeta_address, index, layout.zeta_pitch, layout.color_pitch[index], static_cast<u32>(context));
m_graphics_state.set(rsx::rtt_config_contested);
// TODO: Research clearing both depth AND color
// TODO: If context is creation_draw, deal with possibility of a lost buffer clear
if (depth_test_enabled || stencil_test_enabled || (!layout.color_write_enabled[index] && layout.zeta_write_enabled))
{
// Use address for depth data
layout.color_addresses[index] = 0;
continue;
}
else
{
// Use address for color data
layout.zeta_address = 0;
}
}
ensure(layout.color_addresses[index]);
const auto packed_pitch = (layout.width * color_texel_size);
if (packed_render)
{
layout.actual_color_pitch[index] = packed_pitch;
}
else
{
layout.actual_color_pitch[index] = layout.color_pitch[index];
}
m_graphics_state.set(rsx::rtt_config_valid);
}
if (!m_graphics_state.test(rsx::rtt_config_valid) && !layout.zeta_address)
{
rsx_log.warning("Framebuffer setup failed. Draw calls may have been lost");
return;
}
// At least one attachment exists
m_graphics_state.set(rsx::rtt_config_valid);
// Window (raster) offsets
const auto window_offset_x = rsx::method_registers.window_offset_x();
const auto window_offset_y = rsx::method_registers.window_offset_y();
const auto window_clip_width = rsx::method_registers.window_clip_horizontal();
const auto window_clip_height = rsx::method_registers.window_clip_vertical();
if (window_offset_x || window_offset_y)
{
// Window offset is what affects the raster position!
// Tested with Turbo: Super stunt squad that only changes the window offset to declare new framebuffers
// Sampling behavior clearly indicates the addresses are expected to have changed
if (auto clip_type = rsx::method_registers.window_clip_type())
rsx_log.error("Unknown window clip type 0x%X", clip_type);
for (const auto &index : rsx::utility::get_rtt_indexes(layout.target))
{
if (layout.color_addresses[index])
{
const u32 window_offset_bytes = (layout.actual_color_pitch[index] * window_offset_y) + (color_texel_size * window_offset_x);
layout.color_addresses[index] += window_offset_bytes;
}
}
if (layout.zeta_address)
{
layout.zeta_address += (layout.actual_zeta_pitch * window_offset_y) + (depth_texel_size * window_offset_x);
}
}
if ((window_clip_width && window_clip_width < layout.width) ||
(window_clip_height && window_clip_height < layout.height))
{
rsx_log.error("Unexpected window clip dimensions: window_clip=%dx%d, surface_clip=%dx%d",
window_clip_width, window_clip_height, layout.width, layout.height);
}
layout.aa_mode = aa_mode;
layout.aa_factors[0] = aa_factor_u;
layout.aa_factors[1] = aa_factor_v;
bool really_changed = false;
for (u8 i = 0; i < rsx::limits::color_buffers_count; ++i)
{
if (m_surface_info[i].address != layout.color_addresses[i])
{
really_changed = true;
break;
}
if (layout.color_addresses[i])
{
if (m_surface_info[i].width != layout.width ||
m_surface_info[i].height != layout.height ||
m_surface_info[i].color_format != layout.color_format ||
m_surface_info[i].samples != sample_count)
{
really_changed = true;
break;
}
}
}
if (!really_changed)
{
if (layout.zeta_address == m_depth_surface_info.address &&
layout.depth_format == m_depth_surface_info.depth_format &&
sample_count == m_depth_surface_info.samples)
{
// Same target is reused
return;
}
}
layout.ignore_change = false;
}
void thread::on_framebuffer_options_changed(u32 opt)
{
if (m_graphics_state & rsx::rtt_config_dirty)
{
// Nothing to do
return;
}
auto evaluate_depth_buffer_state = [&]()
{
m_framebuffer_layout.zeta_write_enabled =
(rsx::method_registers.depth_test_enabled() && rsx::method_registers.depth_write_enabled());
};
auto evaluate_stencil_buffer_state = [&]()
{
if (!m_framebuffer_layout.zeta_write_enabled &&
rsx::method_registers.stencil_test_enabled() &&
is_depth_stencil_format(m_framebuffer_layout.depth_format))
{
// Check if stencil data is modified
auto mask = rsx::method_registers.stencil_mask();
bool active_write_op = (rsx::method_registers.stencil_op_zpass() != rsx::stencil_op::keep ||
rsx::method_registers.stencil_op_fail() != rsx::stencil_op::keep ||
rsx::method_registers.stencil_op_zfail() != rsx::stencil_op::keep);
if ((!mask || !active_write_op) && rsx::method_registers.two_sided_stencil_test_enabled())
{
mask |= rsx::method_registers.back_stencil_mask();
active_write_op |= (rsx::method_registers.back_stencil_op_zpass() != rsx::stencil_op::keep ||
rsx::method_registers.back_stencil_op_fail() != rsx::stencil_op::keep ||
rsx::method_registers.back_stencil_op_zfail() != rsx::stencil_op::keep);
}
m_framebuffer_layout.zeta_write_enabled = (mask && active_write_op);
}
};
auto evaluate_color_buffer_state = [&]() -> bool
{
const auto mrt_buffers = rsx::utility::get_rtt_indexes(m_framebuffer_layout.target);
bool any_found = false;
for (uint i = 0; i < mrt_buffers.size(); ++i)
{
if (rsx::method_registers.color_write_enabled(i))
{
const auto real_index = mrt_buffers[i];
m_framebuffer_layout.color_write_enabled[real_index] = true;
any_found = true;
}
}
return any_found;
};
auto evaluate_depth_buffer_contested = [&]()
{
if (m_framebuffer_layout.zeta_address) [[likely]]
{
// Nothing to do, depth buffer already exists
return false;
}
// Check if depth read/write is enabled
if (m_framebuffer_layout.zeta_write_enabled ||
rsx::method_registers.depth_test_enabled())
{
return true;
}
// Check if stencil read is enabled
if (is_depth_stencil_format(m_framebuffer_layout.depth_format) &&
rsx::method_registers.stencil_test_enabled())
{
return true;
}
return false;
};
switch (opt)
{
case NV4097_SET_DEPTH_TEST_ENABLE:
case NV4097_SET_DEPTH_MASK:
case NV4097_SET_DEPTH_FUNC:
{
evaluate_depth_buffer_state();
if (m_graphics_state.test(rsx::rtt_config_contested) && evaluate_depth_buffer_contested())
{
m_graphics_state.set(rsx::rtt_config_dirty);
}
break;
}
case NV4097_SET_TWO_SIDED_STENCIL_TEST_ENABLE:
case NV4097_SET_STENCIL_TEST_ENABLE:
case NV4097_SET_STENCIL_MASK:
case NV4097_SET_STENCIL_OP_ZPASS:
case NV4097_SET_STENCIL_OP_FAIL:
case NV4097_SET_STENCIL_OP_ZFAIL:
case NV4097_SET_BACK_STENCIL_MASK:
case NV4097_SET_BACK_STENCIL_OP_ZPASS:
case NV4097_SET_BACK_STENCIL_OP_FAIL:
case NV4097_SET_BACK_STENCIL_OP_ZFAIL:
{
// Stencil takes a back seat to depth buffer stuff
evaluate_depth_buffer_state();
if (!m_framebuffer_layout.zeta_write_enabled)
{
evaluate_stencil_buffer_state();
}
if (m_graphics_state.test(rsx::rtt_config_contested) && evaluate_depth_buffer_contested())
{
m_graphics_state.set(rsx::rtt_config_dirty);
}
break;
}
case NV4097_SET_COLOR_MASK:
case NV4097_SET_COLOR_MASK_MRT:
{
if (!m_graphics_state.test(rsx::rtt_config_contested)) [[likely]]
{
// Update write masks and continue
evaluate_color_buffer_state();
}
else
{
bool old_state = false;
for (const auto& enabled : m_framebuffer_layout.color_write_enabled)
{
if (old_state = enabled; old_state) break;
}
const auto new_state = evaluate_color_buffer_state();
if (!old_state && new_state)
{
// Color buffers now in use
m_graphics_state.set(rsx::rtt_config_dirty);
}
}
break;
}
default:
rsx_log.fatal("Unhandled framebuffer option changed 0x%x", opt);
}
}
bool thread::get_scissor(areau& region, bool clip_viewport)
{
if (!m_graphics_state.test(rsx::pipeline_state::scissor_config_state_dirty))
{
if (clip_viewport == m_graphics_state.test(rsx::pipeline_state::scissor_setup_clipped))
{
// Nothing to do
return false;
}
}
m_graphics_state.clear(rsx::pipeline_state::scissor_config_state_dirty | rsx::pipeline_state::scissor_setup_clipped);
u16 x1, x2, y1, y2;
u16 scissor_x = rsx::method_registers.scissor_origin_x();
u16 scissor_w = rsx::method_registers.scissor_width();
u16 scissor_y = rsx::method_registers.scissor_origin_y();
u16 scissor_h = rsx::method_registers.scissor_height();
if (clip_viewport)
{
u16 raster_x = rsx::method_registers.viewport_origin_x();
u16 raster_w = rsx::method_registers.viewport_width();
u16 raster_y = rsx::method_registers.viewport_origin_y();
u16 raster_h = rsx::method_registers.viewport_height();
// Get the minimum area between these two
x1 = std::max(scissor_x, raster_x);
y1 = std::max(scissor_y, raster_y);
x2 = std::min(scissor_x + scissor_w, raster_x + raster_w);
y2 = std::min(scissor_y + scissor_h, raster_y + raster_h);
m_graphics_state |= rsx::pipeline_state::scissor_setup_clipped;
}
else
{
x1 = scissor_x;
x2 = scissor_x + scissor_w;
y1 = scissor_y;
y2 = scissor_y + scissor_h;
}
if (x2 <= x1 ||
y2 <= y1 ||
x1 >= rsx::method_registers.window_clip_horizontal() ||
y1 >= rsx::method_registers.window_clip_vertical())
{
m_graphics_state |= rsx::pipeline_state::scissor_setup_invalid;
m_graphics_state.clear(rsx::rtt_config_valid);
return false;
}
if (m_graphics_state & rsx::pipeline_state::scissor_setup_invalid)
{
m_graphics_state.clear(rsx::pipeline_state::scissor_setup_invalid);
m_graphics_state.set(rsx::rtt_config_valid);
}
std::tie(region.x1, region.y1) = rsx::apply_resolution_scale<false>(x1, y1, m_framebuffer_layout.width, m_framebuffer_layout.height);
std::tie(region.x2, region.y2) = rsx::apply_resolution_scale<true>(x2, y2, m_framebuffer_layout.width, m_framebuffer_layout.height);
return true;
}
void thread::prefetch_fragment_program()
{
if (!m_graphics_state.test(rsx::pipeline_state::fragment_program_ucode_dirty))
{
return;
}
m_graphics_state.clear(rsx::pipeline_state::fragment_program_ucode_dirty);
// Request for update of fragment constants if the program block is invalidated
m_graphics_state |= rsx::pipeline_state::fragment_constants_dirty;
const auto [program_offset, program_location] = method_registers.shader_program_address();
const auto prev_textures_reference_mask = current_fp_metadata.referenced_textures_mask;
auto data_ptr = vm::base(rsx::get_address(program_offset, program_location));
current_fp_metadata = program_hash_util::fragment_program_utils::analyse_fragment_program(data_ptr);
current_fragment_program.data = (static_cast<u8*>(data_ptr) + current_fp_metadata.program_start_offset);
current_fragment_program.offset = program_offset + current_fp_metadata.program_start_offset;
current_fragment_program.ucode_length = current_fp_metadata.program_ucode_length;
current_fragment_program.total_length = current_fp_metadata.program_ucode_length + current_fp_metadata.program_start_offset;
current_fragment_program.texture_state.import(current_fp_texture_state, current_fp_metadata.referenced_textures_mask);
current_fragment_program.valid = true;
if (!m_graphics_state.test(rsx::pipeline_state::fragment_program_state_dirty))
{
// Verify current texture state is valid
for (u32 textures_ref = current_fp_metadata.referenced_textures_mask, i = 0; textures_ref; textures_ref >>= 1, ++i)
{
if (!(textures_ref & 1)) continue;
if (m_textures_dirty[i])
{
m_graphics_state |= rsx::pipeline_state::fragment_program_state_dirty;
break;
}
}
}
if (!m_graphics_state.test(rsx::pipeline_state::fragment_program_state_dirty) &&
(prev_textures_reference_mask != current_fp_metadata.referenced_textures_mask))
{
// If different textures are used, upload their coefficients.
// The texture parameters transfer routine is optimized and only writes data for textures consumed by the ucode.
m_graphics_state |= rsx::pipeline_state::fragment_texture_state_dirty;
}
}
void thread::prefetch_vertex_program()
{
if (!m_graphics_state.test(rsx::pipeline_state::vertex_program_ucode_dirty))
{
return;
}
m_graphics_state.clear(rsx::pipeline_state::vertex_program_ucode_dirty);
// Reload transform constants unconditionally for now
m_graphics_state |= rsx::pipeline_state::transform_constants_dirty;
const u32 transform_program_start = rsx::method_registers.transform_program_start();
current_vertex_program.data.reserve(512 * 4);
current_vertex_program.jump_table.clear();
current_vp_metadata = program_hash_util::vertex_program_utils::analyse_vertex_program
(
method_registers.transform_program.data(), // Input raw block
transform_program_start, // Address of entry point
current_vertex_program // [out] Program object
);
current_vertex_program.texture_state.import(current_vp_texture_state, current_vp_metadata.referenced_textures_mask);
if (!m_graphics_state.test(rsx::pipeline_state::vertex_program_state_dirty))
{
// Verify current texture state is valid
for (u32 textures_ref = current_vp_metadata.referenced_textures_mask, i = 0; textures_ref; textures_ref >>= 1, ++i)
{
if (!(textures_ref & 1)) continue;
if (m_vertex_textures_dirty[i])
{
m_graphics_state |= rsx::pipeline_state::vertex_program_state_dirty;
break;
}
}
}
}
void thread::analyse_current_rsx_pipeline()
{
prefetch_vertex_program();
prefetch_fragment_program();
}
void thread::get_current_vertex_program(const std::array<std::unique_ptr<rsx::sampled_image_descriptor_base>, rsx::limits::vertex_textures_count>& sampler_descriptors)
{
if (!m_graphics_state.test(rsx::pipeline_state::vertex_program_dirty))
{
return;
}
ensure(!m_graphics_state.test(rsx::pipeline_state::vertex_program_ucode_dirty));
current_vertex_program.output_mask = rsx::method_registers.vertex_attrib_output_mask();
current_vertex_program.ctrl = 0; // Reserved
for (u32 textures_ref = current_vp_metadata.referenced_textures_mask, i = 0; textures_ref; textures_ref >>= 1, ++i)
{
if (!(textures_ref & 1)) continue;
const auto &tex = rsx::method_registers.vertex_textures[i];
if (tex.enabled() && (current_vp_metadata.referenced_textures_mask & (1 << i)))
{
current_vp_texture_state.clear(i);
current_vp_texture_state.set_dimension(sampler_descriptors[i]->image_type, i);
if (backend_config.supports_hw_msaa &&
sampler_descriptors[i]->samples > 1)
{
current_vp_texture_state.multisampled_textures |= (1 << i);
}
}
}
current_vertex_program.texture_state.import(current_vp_texture_state, current_vp_metadata.referenced_textures_mask);
}
void thread::analyse_inputs_interleaved(vertex_input_layout& result)
{
const rsx_state& state = rsx::method_registers;
const u32 input_mask = state.vertex_attrib_input_mask() & current_vp_metadata.referenced_inputs_mask;
result.clear();
result.attribute_mask = static_cast<u16>(input_mask);
if (state.current_draw_clause.command == rsx::draw_command::inlined_array)
{
interleaved_range_info& info = *result.alloc_interleaved_block();
info.interleaved = true;
for (u8 index = 0; index < rsx::limits::vertex_count; ++index)
{
auto &vinfo = state.vertex_arrays_info[index];
result.attribute_placement[index] = attribute_buffer_placement::none;
if (vinfo.size() > 0)
{
// Stride must be updated even if the stream is disabled
info.attribute_stride += rsx::get_vertex_type_size_on_host(vinfo.type(), vinfo.size());
info.locations.push_back({ index, false, 1 });
if (input_mask & (1u << index))
{
result.attribute_placement[index] = attribute_buffer_placement::transient;
}
}
else if (state.register_vertex_info[index].size > 0 && input_mask & (1u << index))
{
// Reads from register
result.referenced_registers.push_back(index);
result.attribute_placement[index] = attribute_buffer_placement::transient;
}
}
if (info.attribute_stride)
{
// At least one array feed must be enabled for vertex input
result.interleaved_blocks.push_back(&info);
}
return;
}
const u32 frequency_divider_mask = rsx::method_registers.frequency_divider_operation_mask();
result.interleaved_blocks.reserve(16);
result.referenced_registers.reserve(16);
for (auto [ref_mask, index] = std::tuple{ input_mask, u8(0) }; ref_mask; ++index, ref_mask >>= 1)
{
ensure(index < rsx::limits::vertex_count);
if (!(ref_mask & 1u))
{
// Nothing to do, uninitialized
continue;
}
// Always reset attribute placement by default
result.attribute_placement[index] = attribute_buffer_placement::none;
// Check for interleaving
if (rsx::method_registers.current_draw_clause.is_immediate_draw &&
rsx::method_registers.current_draw_clause.command != rsx::draw_command::indexed)
{
// NOTE: In immediate rendering mode, all vertex setup is ignored
// Observed with GT5, immediate render bypasses array pointers completely, even falling back to fixed-function register defaults
if (vertex_push_buffers[index].vertex_count > 1)
{
// Ensure consistent number of vertices per attribute.
vertex_push_buffers[index].pad_to(vertex_push_buffers[0].vertex_count, false);
// Read temp buffer (register array)
std::pair<u8, u32> volatile_range_info = std::make_pair(index, static_cast<u32>(vertex_push_buffers[index].data.size() * sizeof(u32)));
result.volatile_blocks.push_back(volatile_range_info);
result.attribute_placement[index] = attribute_buffer_placement::transient;
}
else if (state.register_vertex_info[index].size > 0)
{
// Reads from register
result.referenced_registers.push_back(index);
result.attribute_placement[index] = attribute_buffer_placement::transient;
}
// Fall back to the default register value if no source is specified via register
continue;
}
const auto& info = state.vertex_arrays_info[index];
if (!info.size())
{
if (state.register_vertex_info[index].size > 0)
{
//Reads from register
result.referenced_registers.push_back(index);
result.attribute_placement[index] = attribute_buffer_placement::transient;
continue;
}
}
else
{
result.attribute_placement[index] = attribute_buffer_placement::persistent;
const u32 base_address = info.offset() & 0x7fffffff;
bool alloc_new_block = true;
bool modulo = !!(frequency_divider_mask & (1 << index));
for (auto &block : result.interleaved_blocks)
{
if (block->single_vertex)
{
//Single vertex definition, continue
continue;
}
if (block->attribute_stride != info.stride())
{
//Stride does not match, continue
continue;
}
if (base_address > block->base_offset)
{
const u32 diff = base_address - block->base_offset;
if (diff > info.stride())
{
//Not interleaved, continue
continue;
}
}
else
{
const u32 diff = block->base_offset - base_address;
if (diff > info.stride())
{
//Not interleaved, continue
continue;
}
//Matches, and this address is lower than existing
block->base_offset = base_address;
}
alloc_new_block = false;
block->locations.push_back({ index, modulo, info.frequency() });
block->interleaved = true;
break;
}
if (alloc_new_block)
{
interleaved_range_info& block = *result.alloc_interleaved_block();
block.base_offset = base_address;
block.attribute_stride = info.stride();
block.memory_location = info.offset() >> 31;
block.locations.reserve(16);
block.locations.push_back({ index, modulo, info.frequency() });
if (block.attribute_stride == 0)
{
block.single_vertex = true;
block.attribute_stride = rsx::get_vertex_type_size_on_host(info.type(), info.size());
}
result.interleaved_blocks.push_back(&block);
}
}
}
for (auto &info : result.interleaved_blocks)
{
//Calculate real data address to be used during upload
info->real_offset_address = rsx::get_address(rsx::get_vertex_offset_from_base(state.vertex_data_base_offset(), info->base_offset), info->memory_location);
}
}
void thread::get_current_fragment_program(const std::array<std::unique_ptr<rsx::sampled_image_descriptor_base>, rsx::limits::fragment_textures_count>& sampler_descriptors)
{
if (!m_graphics_state.test(rsx::pipeline_state::fragment_program_dirty))
{
return;
}
ensure(!m_graphics_state.test(rsx::pipeline_state::fragment_program_ucode_dirty));
m_graphics_state.clear(rsx::pipeline_state::fragment_program_dirty);
current_fragment_program.ctrl = rsx::method_registers.shader_control() & (CELL_GCM_SHADER_CONTROL_32_BITS_EXPORTS | CELL_GCM_SHADER_CONTROL_DEPTH_EXPORT);
current_fragment_program.texcoord_control_mask = rsx::method_registers.texcoord_control_mask();
current_fragment_program.two_sided_lighting = rsx::method_registers.two_side_light_en();
if (method_registers.current_draw_clause.classify_mode() == primitive_class::polygon)
{
if (!backend_config.supports_normalized_barycentrics)
{
current_fragment_program.ctrl |= RSX_SHADER_CONTROL_ATTRIBUTE_INTERPOLATION;
}
}
else if (method_registers.point_sprite_enabled() &&
method_registers.current_draw_clause.primitive == primitive_type::points)
{
// Set high word of the control mask to store point sprite control
current_fragment_program.texcoord_control_mask |= u32(method_registers.point_sprite_control_mask()) << 16;
}
for (u32 textures_ref = current_fp_metadata.referenced_textures_mask, i = 0; textures_ref; textures_ref >>= 1, ++i)
{
if (!(textures_ref & 1)) continue;
auto &tex = rsx::method_registers.fragment_textures[i];
current_fp_texture_state.clear(i);
if (tex.enabled() && sampler_descriptors[i]->format_class != RSX_FORMAT_CLASS_UNDEFINED)
{
std::memcpy(current_fragment_program.texture_params[i].scale, sampler_descriptors[i]->texcoord_xform.scale, 6 * sizeof(f32));
current_fragment_program.texture_params[i].remap = tex.remap();
m_graphics_state |= rsx::pipeline_state::fragment_texture_state_dirty;
u32 texture_control = 0;
current_fp_texture_state.set_dimension(sampler_descriptors[i]->image_type, i);
if (sampler_descriptors[i]->texcoord_xform.clamp)
{
std::memcpy(current_fragment_program.texture_params[i].clamp_min, sampler_descriptors[i]->texcoord_xform.clamp_min, 4 * sizeof(f32));
texture_control |= (1 << rsx::texture_control_bits::CLAMP_TEXCOORDS_BIT);
}
if (tex.alpha_kill_enabled())
{
//alphakill can be ignored unless a valid comparison function is set
texture_control |= (1 << texture_control_bits::ALPHAKILL);
}
//const u32 texaddr = rsx::get_address(tex.offset(), tex.location());
const u32 raw_format = tex.format();
const u32 format = raw_format & ~(CELL_GCM_TEXTURE_LN | CELL_GCM_TEXTURE_UN);
if (raw_format & CELL_GCM_TEXTURE_UN)
{
if (tex.min_filter() == rsx::texture_minify_filter::nearest ||
tex.mag_filter() == rsx::texture_magnify_filter::nearest)
{
// Subpixel offset so that (X + bias) * scale will round correctly.
// This is done to work around fdiv precision issues in some GPUs (NVIDIA)
// We apply the simplification where (x + bias) * z = xz + zbias here.
constexpr auto subpixel_bias = 0.01f;
current_fragment_program.texture_params[i].bias[0] += (subpixel_bias * current_fragment_program.texture_params[i].scale[0]);
current_fragment_program.texture_params[i].bias[1] += (subpixel_bias * current_fragment_program.texture_params[i].scale[1]);
current_fragment_program.texture_params[i].bias[2] += (subpixel_bias * current_fragment_program.texture_params[i].scale[2]);
}
}
if (backend_config.supports_hw_msaa && sampler_descriptors[i]->samples > 1)
{
current_fp_texture_state.multisampled_textures |= (1 << i);
texture_control |= (static_cast<u32>(tex.zfunc()) << texture_control_bits::DEPTH_COMPARE_OP);
texture_control |= (static_cast<u32>(tex.mag_filter() != rsx::texture_magnify_filter::nearest) << texture_control_bits::FILTERED_MAG);
texture_control |= (static_cast<u32>(tex.min_filter() != rsx::texture_minify_filter::nearest) << texture_control_bits::FILTERED_MIN);
texture_control |= (((tex.format() & CELL_GCM_TEXTURE_UN) >> 6) << texture_control_bits::UNNORMALIZED_COORDS);
if (rsx::is_texcoord_wrapping_mode(tex.wrap_s()))
{
texture_control |= (1 << texture_control_bits::WRAP_S);
}
if (rsx::is_texcoord_wrapping_mode(tex.wrap_t()))
{
texture_control |= (1 << texture_control_bits::WRAP_T);
}
if (rsx::is_texcoord_wrapping_mode(tex.wrap_r()))
{
texture_control |= (1 << texture_control_bits::WRAP_R);
}
}
if (sampler_descriptors[i]->format_class != RSX_FORMAT_CLASS_COLOR)
{
switch (sampler_descriptors[i]->format_class)
{
case RSX_FORMAT_CLASS_DEPTH16_FLOAT:
case RSX_FORMAT_CLASS_DEPTH24_FLOAT_X8_PACK32:
texture_control |= (1 << texture_control_bits::DEPTH_FLOAT);
break;
default:
break;
}
switch (format)
{
case CELL_GCM_TEXTURE_A8R8G8B8:
case CELL_GCM_TEXTURE_D8R8G8B8:
{
// Emulate bitcast in shader
current_fp_texture_state.redirected_textures |= (1 << i);
const auto float_en = (sampler_descriptors[i]->format_class == RSX_FORMAT_CLASS_DEPTH24_FLOAT_X8_PACK32)? 1 : 0;
texture_control |= (float_en << texture_control_bits::DEPTH_FLOAT);
break;
}
case CELL_GCM_TEXTURE_X16:
{
// A simple way to quickly read DEPTH16 data without shadow comparison
break;
}
case CELL_GCM_TEXTURE_DEPTH16:
case CELL_GCM_TEXTURE_DEPTH24_D8:
case CELL_GCM_TEXTURE_DEPTH16_FLOAT:
case CELL_GCM_TEXTURE_DEPTH24_D8_FLOAT:
{
// Natively supported Z formats with shadow comparison feature
const auto compare_mode = tex.zfunc();
if (!tex.alpha_kill_enabled() &&
compare_mode < rsx::comparison_function::always &&
compare_mode > rsx::comparison_function::never)
{
current_fp_texture_state.shadow_textures |= (1 << i);
}
break;
}
default:
rsx_log.error("Depth texture bound to pipeline with unexpected format 0x%X", format);
}
}
else if (!backend_config.supports_hw_renormalization)
{
switch (format)
{
case CELL_GCM_TEXTURE_A1R5G5B5:
case CELL_GCM_TEXTURE_A4R4G4B4:
case CELL_GCM_TEXTURE_D1R5G5B5:
case CELL_GCM_TEXTURE_R5G5B5A1:
case CELL_GCM_TEXTURE_R5G6B5:
case CELL_GCM_TEXTURE_R6G5B5:
texture_control |= (1 << texture_control_bits::RENORMALIZE);
break;
default:
break;
}
}
if (rsx::is_int8_remapped_format(format))
{
// Special operations applied to 8-bit formats such as gamma correction and sign conversion
// NOTE: The unsigned_remap=bias flag being set flags the texture as being compressed normal (2n-1 / BX2) (UE3)
// NOTE: The ARGB8_signed flag means to reinterpret the raw bytes as signed. This is different than unsigned_remap=bias which does range decompression.
// This is a separate method of setting the format to signed mode without doing so per-channel
// Precedence = SNORM > GAMMA > UNSIGNED_REMAP (See Resistance 3 for GAMMA/BX2 relationship, UE3 for BX2 effect)
const u32 argb8_signed = tex.argb_signed(); // _SNROM
const u32 gamma = tex.gamma() & ~argb8_signed; // _SRGB
const u32 unsigned_remap = (tex.unsigned_remap() == CELL_GCM_TEXTURE_UNSIGNED_REMAP_NORMAL)? 0u : (~(gamma | argb8_signed) & 0xF); // _BX2
u32 argb8_convert = gamma;
// The options are mutually exclusive
ensure((argb8_signed & gamma) == 0);
ensure((argb8_signed & unsigned_remap) == 0);
ensure((gamma & unsigned_remap) == 0);
// Helper function to apply a per-channel mask based on an input mask
const auto apply_sign_convert_mask = [&](u32 mask, u32 bit_offset)
{
// TODO: Use actual remap mask to account for 0 and 1 overrides in default mapping
// TODO: Replace this clusterfuck of texture control with matrix transformation
const auto remap_ctrl = (tex.remap() >> 8) & 0xAA;
if (remap_ctrl == 0xAA)
{
argb8_convert |= (mask & 0xFu) << bit_offset;
return;
}
if ((remap_ctrl & 0x03) == 0x02) argb8_convert |= (mask & 0x1u) << bit_offset;
if ((remap_ctrl & 0x0C) == 0x08) argb8_convert |= (mask & 0x2u) << bit_offset;
if ((remap_ctrl & 0x30) == 0x20) argb8_convert |= (mask & 0x4u) << bit_offset;
if ((remap_ctrl & 0xC0) == 0x80) argb8_convert |= (mask & 0x8u) << bit_offset;
};
if (argb8_signed)
{
// Apply integer sign extension from uint8 to sint8 and renormalize
apply_sign_convert_mask(argb8_signed, texture_control_bits::SEXT_OFFSET);
}
if (unsigned_remap)
{
// Apply sign expansion, compressed normal-map style (2n - 1)
apply_sign_convert_mask(unsigned_remap, texture_control_bits::EXPAND_OFFSET);
}
texture_control |= argb8_convert;
}
current_fragment_program.texture_params[i].control = texture_control;
}
}
// Update texture configuration
current_fragment_program.texture_state.import(current_fp_texture_state, current_fp_metadata.referenced_textures_mask);
//Sanity checks
if (current_fragment_program.ctrl & CELL_GCM_SHADER_CONTROL_DEPTH_EXPORT)
{
//Check that the depth stage is not disabled
if (!rsx::method_registers.depth_test_enabled())
{
rsx_log.trace("FS exports depth component but depth test is disabled (INVALID_OPERATION)");
}
}
}
bool thread::invalidate_fragment_program(u32 dst_dma, u32 dst_offset, u32 size)
{
if (!current_fragment_program.total_length)
{
// No shader loaded
return false;
}
const auto [shader_offset, shader_dma] = rsx::method_registers.shader_program_address();
if ((dst_dma & CELL_GCM_LOCATION_MAIN) != shader_dma)
{
// Shader not loaded in XDR memory
return false;
}
const auto current_fragment_shader_range = address_range::start_length(shader_offset, current_fragment_program.total_length);
if (!current_fragment_shader_range.overlaps(address_range::start_length(dst_offset, size)))
{
// No range overlap
return false;
}
// Data overlaps. Force ucode reload.
m_graphics_state |= rsx::pipeline_state::fragment_program_ucode_dirty;
return true;
}
void thread::reset()
{
rsx::method_registers.reset();
check_zcull_status(false);
nv4097::set_render_mode(m_ctx, 0, method_registers.registers[NV4097_SET_RENDER_ENABLE]);
m_graphics_state |= pipeline_state::all_dirty;
}
void thread::init(u32 ctrlAddress)
{
dma_address = ctrlAddress;
ctrl = vm::_ptr<RsxDmaControl>(ctrlAddress);
flip_status = CELL_GCM_DISPLAY_FLIP_STATUS_DONE;
fifo_ret_addr = RSX_CALL_STACK_EMPTY;
vm::write32(device_addr + 0x30, 1);
std::memset(display_buffers, 0, sizeof(display_buffers));
rsx_thread_running = true;
}
std::pair<u32, u32> thread::calculate_memory_requirements(const vertex_input_layout& layout, u32 first_vertex, u32 vertex_count)
{
u32 persistent_memory_size = 0;
u32 volatile_memory_size = 0;
volatile_memory_size += ::size32(layout.referenced_registers) * 16u;
if (rsx::method_registers.current_draw_clause.command == rsx::draw_command::inlined_array)
{
for (const auto &block : layout.interleaved_blocks)
{
volatile_memory_size += block->attribute_stride * vertex_count;
}
}
else
{
//NOTE: Immediate commands can be index array only or both index array and vertex data
//Check both - but only check volatile blocks if immediate_draw flag is set
if (rsx::method_registers.current_draw_clause.is_immediate_draw)
{
for (const auto &info : layout.volatile_blocks)
{
volatile_memory_size += info.second;
}
}
persistent_memory_size = layout.calculate_interleaved_memory_requirements(first_vertex, vertex_count);
}
return std::make_pair(persistent_memory_size, volatile_memory_size);
}
void thread::fill_vertex_layout_state(const vertex_input_layout& layout, u32 first_vertex, u32 vertex_count, s32* buffer, u32 persistent_offset_base, u32 volatile_offset_base)
{
std::array<s32, 16> offset_in_block = {};
u32 volatile_offset = volatile_offset_base;
u32 persistent_offset = persistent_offset_base;
//NOTE: Order is important! Transient ayout is always push_buffers followed by register data
if (rsx::method_registers.current_draw_clause.is_immediate_draw)
{
for (const auto &info : layout.volatile_blocks)
{
offset_in_block[info.first] = volatile_offset;
volatile_offset += info.second;
}
}
for (u8 index : layout.referenced_registers)
{
offset_in_block[index] = volatile_offset;
volatile_offset += 16;
}
if (rsx::method_registers.current_draw_clause.command == rsx::draw_command::inlined_array)
{
const auto &block = layout.interleaved_blocks[0];
u32 inline_data_offset = volatile_offset;
for (const auto& attrib : block->locations)
{
auto &info = rsx::method_registers.vertex_arrays_info[attrib.index];
offset_in_block[attrib.index] = inline_data_offset;
inline_data_offset += rsx::get_vertex_type_size_on_host(info.type(), info.size());
}
}
else
{
for (const auto &block : layout.interleaved_blocks)
{
for (const auto& attrib : block->locations)
{
const u32 local_address = (rsx::method_registers.vertex_arrays_info[attrib.index].offset() & 0x7fffffff);
offset_in_block[attrib.index] = persistent_offset + (local_address - block->base_offset);
}
const auto range = block->calculate_required_range(first_vertex, vertex_count);
persistent_offset += block->attribute_stride * range.second;
}
}
// Fill the data
// Each descriptor field is 64 bits wide
// [0-8] attribute stride
// [8-24] attribute divisor
// [24-27] attribute type
// [27-30] attribute size
// [30-31] reserved
// [31-60] starting offset
// [60-21] swap bytes flag
// [61-22] volatile flag
// [62-63] modulo enable flag
const s32 default_frequency_mask = (1 << 8);
const s32 swap_storage_mask = (1 << 29);
const s32 volatile_storage_mask = (1 << 30);
const s32 modulo_op_frequency_mask = smin;
const u32 modulo_mask = rsx::method_registers.frequency_divider_operation_mask();
const auto max_index = (first_vertex + vertex_count) - 1;
for (u16 ref_mask = current_vp_metadata.referenced_inputs_mask, index = 0; ref_mask; ++index, ref_mask >>= 1)
{
if (!(ref_mask & 1u))
{
// Unused input, ignore this
continue;
}
if (layout.attribute_placement[index] == attribute_buffer_placement::none)
{
static constexpr u64 zero = 0;
std::memcpy(buffer + index * 2, &zero, sizeof(zero));
continue;
}
rsx::vertex_base_type type = {};
s32 size = 0;
s32 attrib0 = 0;
s32 attrib1 = 0;
if (layout.attribute_placement[index] == attribute_buffer_placement::transient)
{
if (rsx::method_registers.current_draw_clause.command == rsx::draw_command::inlined_array)
{
const auto &info = rsx::method_registers.vertex_arrays_info[index];
if (!info.size())
{
// Register
const auto& reginfo = rsx::method_registers.register_vertex_info[index];
type = reginfo.type;
size = reginfo.size;
attrib0 = rsx::get_vertex_type_size_on_host(type, size);
}
else
{
// Array
type = info.type();
size = info.size();
attrib0 = layout.interleaved_blocks[0]->attribute_stride | default_frequency_mask;
}
}
else
{
// Data is either from an immediate render or register input
// Immediate data overrides register input
if (rsx::method_registers.current_draw_clause.is_immediate_draw &&
vertex_push_buffers[index].vertex_count > 1)
{
// Push buffer
const auto &info = vertex_push_buffers[index];
type = info.type;
size = info.size;
attrib0 = rsx::get_vertex_type_size_on_host(type, size) | default_frequency_mask;
}
else
{
// Register
const auto& info = rsx::method_registers.register_vertex_info[index];
type = info.type;
size = info.size;
attrib0 = rsx::get_vertex_type_size_on_host(type, size);
}
}
attrib1 |= volatile_storage_mask;
}
else
{
auto &info = rsx::method_registers.vertex_arrays_info[index];
type = info.type();
size = info.size();
auto stride = info.stride();
attrib0 = stride;
if (stride > 0) //when stride is 0, input is not an array but a single element
{
const u32 frequency = info.frequency();
switch (frequency)
{
case 0:
case 1:
{
attrib0 |= default_frequency_mask;
break;
}
default:
{
if (modulo_mask & (1 << index))
{
if (max_index >= frequency)
{
// Only set modulo mask if a modulo op is actually necessary!
// This requires that the uploaded range for this attr = [0, freq-1]
// Ignoring modulo op if the rendered range does not wrap allows for range optimization
attrib0 |= (frequency << 8);
attrib1 |= modulo_op_frequency_mask;
}
else
{
attrib0 |= default_frequency_mask;
}
}
else
{
// Division
attrib0 |= (frequency << 8);
}
break;
}
}
}
} //end attribute placement check
// Special compressed 4 components into one 4-byte value. Decoded as one value.
if (type == rsx::vertex_base_type::cmp)
{
size = 1;
}
// All data is passed in in PS3-native order (BE) so swap flag should be set
attrib1 |= swap_storage_mask;
attrib0 |= (static_cast<s32>(type) << 24);
attrib0 |= (size << 27);
attrib1 |= offset_in_block[index];
buffer[index * 2 + 0] = attrib0;
buffer[index * 2 + 1] = attrib1;
}
}
void thread::write_vertex_data_to_memory(const vertex_input_layout& layout, u32 first_vertex, u32 vertex_count, void *persistent_data, void *volatile_data)
{
auto transient = static_cast<char*>(volatile_data);
auto persistent = static_cast<char*>(persistent_data);
auto &draw_call = rsx::method_registers.current_draw_clause;
if (transient != nullptr)
{
if (draw_call.command == rsx::draw_command::inlined_array)
{
for (const u8 index : layout.referenced_registers)
{
memcpy(transient, rsx::method_registers.register_vertex_info[index].data.data(), 16);
transient += 16;
}
memcpy(transient, draw_call.inline_vertex_array.data(), draw_call.inline_vertex_array.size() * sizeof(u32));
//Is it possible to reference data outside of the inlined array?
return;
}
//NOTE: Order is important! Transient layout is always push_buffers followed by register data
if (draw_call.is_immediate_draw)
{
//NOTE: It is possible for immediate draw to only contain index data, so vertex data can be in persistent memory
for (const auto &info : layout.volatile_blocks)
{
memcpy(transient, vertex_push_buffers[info.first].data.data(), info.second);
transient += info.second;
}
}
for (const u8 index : layout.referenced_registers)
{
memcpy(transient, rsx::method_registers.register_vertex_info[index].data.data(), 16);
transient += 16;
}
}
if (persistent != nullptr)
{
for (interleaved_range_info* block : layout.interleaved_blocks)
{
auto range = block->calculate_required_range(first_vertex, vertex_count);
const u32 data_size = range.second * block->attribute_stride;
const u32 vertex_base = range.first * block->attribute_stride;
g_fxo->get<rsx::dma_manager>().copy(persistent, vm::_ptr<char>(block->real_offset_address) + vertex_base, data_size);
persistent += data_size;
}
}
}
void thread::flip(const display_flip_info_t& info)
{
m_eng_interrupt_mask.clear(rsx::display_interrupt);
if (async_flip_requested & flip_request::any)
{
// Deferred flip
if (info.emu_flip)
{
async_flip_requested.clear(flip_request::emu_requested);
}
else
{
async_flip_requested.clear(flip_request::native_ui);
}
}
if (info.emu_flip)
{
performance_counters.sampled_frames++;
if (m_pause_after_x_flips && m_pause_after_x_flips-- == 1)
{
Emu.Pause();
}
}
last_host_flip_timestamp = get_system_time();
}
void thread::check_zcull_status(bool framebuffer_swap)
{
const bool zcull_rendering_enabled = !!method_registers.registers[NV4097_SET_ZCULL_EN];
const bool zcull_stats_enabled = !!method_registers.registers[NV4097_SET_ZCULL_STATS_ENABLE];
const bool zcull_pixel_cnt_enabled = !!method_registers.registers[NV4097_SET_ZPASS_PIXEL_COUNT_ENABLE];
if (framebuffer_swap)
{
zcull_surface_active = false;
const u32 zeta_address = m_depth_surface_info.address;
if (zeta_address)
{
//Find zeta address in bound zculls
for (const auto& zcull : zculls)
{
if (zcull.bound &&
rsx::to_surface_depth_format(zcull.zFormat) == m_depth_surface_info.depth_format &&
rsx::to_surface_antialiasing(zcull.aaFormat) == rsx::method_registers.surface_antialias())
{
const u32 rsx_address = rsx::get_address(zcull.offset, CELL_GCM_LOCATION_LOCAL);
if (rsx_address == zeta_address)
{
zcull_surface_active = true;
break;
}
}
}
}
}
zcull_ctrl->set_enabled(this, zcull_rendering_enabled);
zcull_ctrl->set_status(this, zcull_surface_active, zcull_pixel_cnt_enabled, zcull_stats_enabled);
}
void thread::clear_zcull_stats(u32 type)
{
zcull_ctrl->clear(this, type);
}
void thread::get_zcull_stats(u32 type, vm::addr_t sink)
{
u32 value = 0;
if (!g_cfg.video.disable_zcull_queries)
{
switch (type)
{
case CELL_GCM_ZPASS_PIXEL_CNT:
case CELL_GCM_ZCULL_STATS:
case CELL_GCM_ZCULL_STATS1:
case CELL_GCM_ZCULL_STATS2:
case CELL_GCM_ZCULL_STATS3:
{
zcull_ctrl->read_report(this, sink, type);
return;
}
default:
rsx_log.error("Unknown zcull stat type %d", type);
break;
}
}
rsx::reservation_lock<true> lock(sink, 16);
vm::_ref<atomic_t<CellGcmReportData>>(sink).store({timestamp(), value, 0});
}
u32 thread::copy_zcull_stats(u32 memory_range_start, u32 memory_range, u32 destination)
{
return zcull_ctrl->copy_reports_to(memory_range_start, memory_range, destination);
}
void thread::enable_conditional_rendering(vm::addr_t ref)
{
cond_render_ctrl.enable_conditional_render(this, ref);
auto result = zcull_ctrl->find_query(ref, true);
if (result.found)
{
if (!result.queries.empty())
{
cond_render_ctrl.set_eval_sources(result.queries);
sync_hint(FIFO::interrupt_hint::conditional_render_eval, { .query = cond_render_ctrl.eval_sources.front(), .address = ref });
}
else
{
bool failed = (result.raw_zpass_result == 0);
cond_render_ctrl.set_eval_result(this, failed);
}
}
else
{
cond_render_ctrl.eval_result(this);
}
}
void thread::disable_conditional_rendering()
{
cond_render_ctrl.disable_conditional_render(this);
}
void thread::begin_conditional_rendering(const std::vector<reports::occlusion_query_info*>& /*sources*/)
{
cond_render_ctrl.hw_cond_active = true;
cond_render_ctrl.eval_sources.clear();
}
void thread::end_conditional_rendering()
{
cond_render_ctrl.hw_cond_active = false;
}
void thread::sync()
{
m_eng_interrupt_mask.clear(rsx::pipe_flush_interrupt);
if (zcull_ctrl->has_pending())
{
zcull_ctrl->sync(this);
}
// Fragment constants may have been updated
m_graphics_state |= rsx::pipeline_state::fragment_constants_dirty;
// DMA sync; if you need this, don't use MTRSX
// g_fxo->get<rsx::dma_manager>().sync();
//TODO: On sync every sub-unit should finish any pending tasks
//Might cause zcull lockup due to zombie 'unclaimed reports' which are not forcefully removed currently
//ensure(async_tasks_pending.load() == 0);
}
void thread::sync_hint(FIFO::interrupt_hint /*hint*/, rsx::reports::sync_hint_payload_t payload)
{
zcull_ctrl->on_sync_hint(payload);
}
bool thread::is_fifo_idle() const
{
return ctrl == nullptr || ctrl->get == (ctrl->put & ~3);
}
void thread::flush_fifo()
{
// Make sure GET value is exposed before sync points
fifo_ctrl->sync_get();
fifo_ctrl->invalidate_cache();
}
std::pair<u32, u32> thread::try_get_pc_of_x_cmds_backwards(s32 count, u32 get) const
{
if (!ctrl || state & cpu_flag::exit)
{
return {0, umax};
}
if (!count)
{
return {0, get};
}
u32 true_get = ctrl->get;
u32 start = last_known_code_start;
RSXDisAsm disasm(cpu_disasm_mode::survey_cmd_size, vm::g_sudo_addr, 0, this);
std::vector<u32> pcs_of_valid_cmds;
if (get > start)
{
pcs_of_valid_cmds.reserve(std::min<u32>((get - start) / 16, 0x4000)); // Rough estimation of final array size
}
auto probe_code_region = [&](u32 probe_start) -> std::pair<u32, u32>
{
if (probe_start > get)
{
return {0, get};
}
pcs_of_valid_cmds.clear();
pcs_of_valid_cmds.push_back(probe_start);
usz index_of_get = umax;
usz until = umax;
while (pcs_of_valid_cmds.size() < until)
{
if (u32 advance = disasm.disasm(pcs_of_valid_cmds.back()))
{
pcs_of_valid_cmds.push_back(utils::add_saturate<u32>(pcs_of_valid_cmds.back(), advance));
}
else
{
break;
}
if (index_of_get == umax && pcs_of_valid_cmds.back() >= get)
{
index_of_get = pcs_of_valid_cmds.size() - 1;
until = index_of_get + 1;
if (count < 0 && pcs_of_valid_cmds.back() == get)
{
until -= count;
}
}
}
if (index_of_get == umax || pcs_of_valid_cmds[index_of_get] != get)
{
return {0, get};
}
if (count < 0)
{
const u32 found_cmds_count = static_cast<u32>(std::min<s64>(-count, pcs_of_valid_cmds.size() - 1LL - index_of_get));
return {found_cmds_count, pcs_of_valid_cmds[index_of_get + found_cmds_count]};
}
const u32 found_cmds_count = std::min<u32>(count, ::size32(pcs_of_valid_cmds) - 1);
return {found_cmds_count, *(pcs_of_valid_cmds.end() - 1 - found_cmds_count)};
};
auto pair = probe_code_region(start);
if (!pair.first)
{
pair = probe_code_region(true_get);
}
return pair;
}
void thread::recover_fifo(std::source_location src_loc)
{
bool kill_itself = g_cfg.core.rsx_fifo_accuracy == rsx_fifo_mode::as_ps3;
const u64 current_time = get_system_time();
if (recovered_fifo_cmds_history.size() == 20u)
{
const auto cmd_info = recovered_fifo_cmds_history.front();
// Check timestamp of last tracked cmd
// Shorten the range of forbidden difference if driver wake-up delay is used
if (current_time - cmd_info.timestamp < 2'000'000u - std::min<u32>(g_cfg.video.driver_wakeup_delay * 700, 1'400'000))
{
// Probably hopeless
kill_itself = true;
}
// Erase the last command from history, keep the size of the queue the same
recovered_fifo_cmds_history.pop();
}
if (kill_itself)
{
fmt::throw_exception("Dead FIFO commands queue state has been detected!"
"\nTry increasing \"Driver Wake-Up Delay\" setting or setting \"RSX FIFO Accuracy\" to \"%s\", both in Advanced settings. Called from %s", std::min<rsx_fifo_mode>(rsx_fifo_mode{static_cast<u32>(g_cfg.core.rsx_fifo_accuracy.get()) + 1}, rsx_fifo_mode::atomic_ordered), src_loc);
}
// Error. Should reset the queue
fifo_ctrl->set_get(restore_point);
fifo_ret_addr = saved_fifo_ret;
std::this_thread::sleep_for(2ms);
fifo_ctrl->abort();
if (std::exchange(in_begin_end, false) && !rsx::method_registers.current_draw_clause.empty())
{
execute_nop_draw();
rsx::thread::end();
}
recovered_fifo_cmds_history.push({fifo_ctrl->last_cmd(), current_time});
}
std::string thread::dump_misc() const
{
std::string ret = cpu_thread::dump_misc();
const auto flags = +state;
if (is_paused(flags) && flags & cpu_flag::wait)
{
fmt::append(ret, "\nFragment Program Hash: %X.fp", current_fragment_program.get_data() ? program_hash_util::fragment_program_utils::get_fragment_program_ucode_hash(current_fragment_program) : 0);
fmt::append(ret, "\nVertex Program Hash: %X.vp", current_vertex_program.data.empty() ? 0 : program_hash_util::vertex_program_utils::get_vertex_program_ucode_hash(current_vertex_program));
}
else
{
fmt::append(ret, "\n");
}
return ret;
}
std::vector<std::pair<u32, u32>> thread::dump_callstack_list() const
{
std::vector<std::pair<u32, u32>> result;
if (u32 addr = fifo_ret_addr; addr != RSX_CALL_STACK_EMPTY)
{
result.emplace_back(addr, 0);
}
return result;
}
void thread::fifo_wake_delay(u64 div)
{
// TODO: Nanoseconds accuracy
u64 remaining = g_cfg.video.driver_wakeup_delay;
if (!remaining)
{
return;
}
// Some cases do not need full delay
remaining = utils::aligned_div(remaining, div);
const u64 until = get_system_time() + remaining;
while (true)
{
#ifdef __linux__
// NOTE: Assumption that timer initialization has succeeded
constexpr u64 host_min_quantum = 10;
#else
// Host scheduler quantum for windows (worst case)
// NOTE: On ps3 this function has very high accuracy
constexpr u64 host_min_quantum = 500;
#endif
if (remaining >= host_min_quantum)
{
#ifdef __linux__
thread_ctrl::wait_for(remaining, false);
#else
// Wait on multiple of min quantum for large durations to avoid overloading low thread cpus
thread_ctrl::wait_for(remaining - (remaining % host_min_quantum), false);
#endif
}
// TODO: Determine best value for yield delay
else if (remaining >= host_min_quantum / 2)
{
std::this_thread::yield();
}
else
{
busy_wait(100);
}
const u64 current = get_system_time();
if (current >= until)
{
break;
}
remaining = until - current;
}
}
u32 thread::get_fifo_cmd() const
{
// Last fifo cmd for logging and utility
return fifo_ctrl->last_cmd();
}
void invalid_method(context*, u32, u32);
void thread::dump_regs(std::string& result, std::any& /*custom_data*/) const
{
if (ctrl)
{
fmt::append(result, "FIFO: GET=0x%07x, PUT=0x%07x, REF=0x%08x\n", +ctrl->get, +ctrl->put, +ctrl->ref);
}
for (u32 i = 0; i < 1 << 14; i++)
{
if (rsx::methods[i] == &invalid_method)
{
continue;
}
switch (i)
{
case NV4097_NO_OPERATION:
case NV4097_INVALIDATE_L2:
case NV4097_INVALIDATE_VERTEX_FILE:
case NV4097_INVALIDATE_VERTEX_CACHE_FILE:
case NV4097_INVALIDATE_ZCULL:
case NV4097_WAIT_FOR_IDLE:
case NV4097_PM_TRIGGER:
case NV4097_ZCULL_SYNC:
continue;
case NV308A_COLOR:
{
i = NV3089_SET_OBJECT;
continue;
}
default:
{
break;
}
}
fmt::append(result, "[%04x] ", i);
ensure(rsx::get_pretty_printing_function(i))(result, i, method_registers.registers[i]);
result += '\n';
}
}
flags32_t thread::read_barrier(u32 memory_address, u32 memory_range, bool unconditional)
{
flags32_t zcull_flags = (unconditional)? reports::sync_none : reports::sync_defer_copy;
return zcull_ctrl->read_barrier(this, memory_address, memory_range, zcull_flags);
}
void thread::notify_zcull_info_changed()
{
check_zcull_status(false);
}
void thread::on_notify_memory_mapped(u32 address, u32 size)
{
// In the case where an unmap is followed shortly after by a remap of the same address space
// we must block until RSX has invalidated the memory
// or lock m_mtx_task and do it ourselves
if (!rsx_thread_running)
return;
reader_lock lock(m_mtx_task);
const auto map_range = address_range::start_length(address, size);
if (!m_invalidated_memory_range.valid())
return;
if (m_invalidated_memory_range.overlaps(map_range))
{
lock.upgrade();
handle_invalidated_memory_range();
}
}
void thread::on_notify_pre_memory_unmapped(u32 address, u32 size, std::vector<std::pair<u64, u64>>& event_data)
{
if (rsx_thread_running && address < rsx::constants::local_mem_base)
{
// Each bit represents io entry to be unmapped
u64 unmap_status[512 / 64]{};
for (u32 ea = address >> 20, end = ea + (size >> 20); ea < end; ea++)
{
const u32 io = utils::rol32(iomap_table.io[ea], 32 - 20);
if (io + 1)
{
unmap_status[io / 64] |= 1ull << (io & 63);
iomap_table.io[ea].release(-1);
iomap_table.ea[io].release(-1);
}
}
auto& cfg = g_fxo->get<gcm_config>();
std::unique_lock<shared_mutex> hle_lock;
for (u32 i = 0; i < std::size(unmap_status); i++)
{
// TODO: Check order when sending multiple events
if (u64 to_unmap = unmap_status[i])
{
if (isHLE)
{
if (!hle_lock)
{
hle_lock = std::unique_lock{cfg.gcmio_mutex};
}
int bit = 0;
while (to_unmap)
{
bit = (std::countr_zero<u64>(utils::rol64(to_unmap, 0 - bit)) + bit);
to_unmap &= ~(1ull << bit);
constexpr u16 null_entry = 0xFFFF;
const u32 ea = std::exchange(cfg.offsetTable.eaAddress[(i * 64 + bit)], null_entry);
if (ea < (rsx::constants::local_mem_base >> 20))
{
cfg.offsetTable.eaAddress[ea] = null_entry;
}
}
continue;
}
// Each 64 entries are grouped by a bit
const u64 io_event = SYS_RSX_EVENT_UNMAPPED_BASE << i;
event_data.emplace_back(io_event, to_unmap);
}
}
if (hle_lock)
{
hle_lock.unlock();
}
// Pause RSX thread momentarily to handle unmapping
eng_lock elock(this);
// Queue up memory invalidation
std::lock_guard lock(m_mtx_task);
const bool existing_range_valid = m_invalidated_memory_range.valid();
const auto unmap_range = address_range::start_length(address, size);
if (existing_range_valid && m_invalidated_memory_range.touches(unmap_range))
{
// Merge range-to-invalidate in case of consecutive unmaps
m_invalidated_memory_range.set_min_max(unmap_range);
}
else
{
if (existing_range_valid)
{
// We can only delay consecutive unmaps.
// Otherwise, to avoid VirtualProtect failures, we need to do the invalidation here
handle_invalidated_memory_range();
}
m_invalidated_memory_range = unmap_range;
}
m_eng_interrupt_mask |= rsx::memory_config_interrupt;
}
}
void thread::on_notify_post_memory_unmapped(u64 event_data1, u64 event_data2)
{
if (!isHLE)
{
send_event(0, event_data1, event_data2);
}
}
// NOTE: m_mtx_task lock must be acquired before calling this method
void thread::handle_invalidated_memory_range()
{
AUDIT(!m_mtx_task.is_free());
m_eng_interrupt_mask.clear(rsx::memory_config_interrupt);
if (!m_invalidated_memory_range.valid())
{
return;
}
if (is_stopped())
{
// We only need to commit host-resident memory to the guest in case of savestates or captures.
on_invalidate_memory_range(m_invalidated_memory_range, rsx::invalidation_cause::read);
}
on_invalidate_memory_range(m_invalidated_memory_range, rsx::invalidation_cause::unmap);
m_invalidated_memory_range.invalidate();
}
//Pause/cont wrappers for FIFO ctrl. Never call this from rsx thread itself!
void thread::pause()
{
external_interrupt_lock++;
while (!external_interrupt_ack && !is_stopped())
{
utils::pause();
}
}
void thread::unpause()
{
// TODO: Clean this shit up
external_interrupt_lock--;
}
void thread::wait_pause()
{
do
{
if (g_cfg.video.multithreaded_rsx)
{
g_fxo->get<rsx::dma_manager>().sync();
}
external_interrupt_ack.store(true);
while (external_interrupt_lock && (cpu_flag::ret - state))
{
// TODO: Investigate non busy-spinning method
utils::pause();
}
external_interrupt_ack.store(false);
}
while (external_interrupt_lock && (cpu_flag::ret - state));
}
u32 thread::get_load()
{
//Average load over around 30 frames
if (!performance_counters.last_update_timestamp || performance_counters.sampled_frames > 30)
{
const auto timestamp = get_system_time();
const auto idle = performance_counters.idle_time.load();
const auto elapsed = timestamp - performance_counters.last_update_timestamp;
if (elapsed > idle)
performance_counters.approximate_load = static_cast<u32>((elapsed - idle) * 100 / elapsed);
else
performance_counters.approximate_load = 0u;
performance_counters.idle_time = 0;
performance_counters.sampled_frames = 0;
performance_counters.last_update_timestamp = timestamp;
}
return performance_counters.approximate_load;
}
void thread::on_frame_end(u32 buffer, bool forced)
{
bool pause_emulator = false;
// Marks the end of a frame scope GPU-side
if (g_user_asked_for_frame_capture.exchange(false) && !capture_current_frame)
{
capture_current_frame = true;
frame_debug.reset();
frame_capture.reset();
// random number just to jumpstart the size
frame_capture.replay_commands.reserve(8000);
// capture first tile state with nop cmd
rsx::frame_capture_data::replay_command replay_cmd;
replay_cmd.rsx_command = std::make_pair(NV4097_NO_OPERATION, 0);
frame_capture.replay_commands.push_back(replay_cmd);
capture::capture_display_tile_state(this, frame_capture.replay_commands.back());
}
else if (capture_current_frame)
{
capture_current_frame = false;
std::string file_path = fs::get_config_dir() + "captures/" + Emu.GetTitleID() + "_" + date_time::current_time_narrow() + "_capture.rrc.gz";
fs::pending_file temp(file_path);
utils::serial save_manager;
if (temp.file)
{
save_manager.m_file_handler = make_compressed_serialization_file_handler(temp.file);
save_manager(frame_capture);
save_manager.m_file_handler->finalize(save_manager);
if (temp.commit(false))
{
rsx_log.success("Capture successful: %s", file_path);
frame_capture.reset();
pause_emulator = true;
}
else
{
rsx_log.error("Capture failed: %s (%s)", file_path, fs::g_tls_error);
}
}
else
{
rsx_log.fatal("Capture failed: %s (%s)", file_path, fs::g_tls_error);
}
}
if (zcull_ctrl->has_pending())
{
// NOTE: This is a workaround for buggy games.
// Some applications leave the zpass/stats gathering active but don't use the information.
// This can lead to the zcull unit using up all the memory queueing up operations that never get consumed.
// Seen in Diablo III and Yakuza 5
zcull_ctrl->clear(this, CELL_GCM_ZPASS_PIXEL_CNT | CELL_GCM_ZCULL_STATS);
}
// Save current state
m_queued_flip.stats = m_frame_stats;
m_queued_flip.push(buffer);
m_queued_flip.skip_frame = skip_current_frame;
if (!forced) [[likely]]
{
if (!g_cfg.video.disable_FIFO_reordering)
{
// Try to enable FIFO optimizations
// Only rarely useful for some games like RE4
m_flattener.evaluate_performance(m_frame_stats.draw_calls);
}
if (g_cfg.video.frame_skip_enabled)
{
m_skip_frame_ctr++;
if (m_skip_frame_ctr >= g_cfg.video.consecutive_frames_to_draw)
m_skip_frame_ctr = -g_cfg.video.consecutive_frames_to_skip;
skip_current_frame = (m_skip_frame_ctr < 0);
}
}
else
{
if (!g_cfg.video.disable_FIFO_reordering)
{
// Flattener is unusable due to forced random flips
m_flattener.force_disable();
}
if (g_cfg.video.frame_skip_enabled)
{
rsx_log.error("Frame skip is not compatible with this application");
}
}
if (pause_emulator)
{
Emu.Pause();
thread_ctrl::wait_for(30'000);
}
// Reset current stats
m_frame_stats = {};
m_profiler.enabled = !!g_cfg.video.overlay;
}
f64 thread::get_cached_display_refresh_rate()
{
constexpr u64 uses_per_query = 512;
f64 result = m_cached_display_rate;
u64 count = m_display_rate_fetch_count++;
while (true)
{
if (count % 512 == 0)
{
result = get_display_refresh_rate();
m_cached_display_rate.store(result);
m_display_rate_fetch_count += uses_per_query; // Notify users of the new value
break;
}
const u64 new_count = m_display_rate_fetch_count;
const f64 new_cached = m_cached_display_rate;
if (result == new_cached && count / uses_per_query == new_count / uses_per_query)
{
break;
}
// An update might have gone through
count = new_count;
result = new_cached;
}
return result;
}
bool thread::request_emu_flip(u32 buffer)
{
if (is_current_thread()) // requested through command buffer
{
// NOTE: The flip will clear any queued flip requests
handle_emu_flip(buffer);
}
else // requested 'manually' through ppu syscall
{
if (async_flip_requested & flip_request::emu_requested)
{
// ignore multiple requests until previous happens
return true;
}
async_flip_buffer = buffer;
async_flip_requested |= flip_request::emu_requested;
m_eng_interrupt_mask |= rsx::display_interrupt;
if (state & cpu_flag::exit)
{
// Resubmit possibly-ignored flip on savestate load
return false;
}
}
return true;
}
void thread::handle_emu_flip(u32 buffer)
{
if (m_queued_flip.in_progress)
{
// Rescursion not allowed!
return;
}
if (!m_queued_flip.pop(buffer))
{
// Frame was not queued before flipping
on_frame_end(buffer, true);
ensure(m_queued_flip.pop(buffer));
}
double limit = 0.;
const auto frame_limit = g_disable_frame_limit ? frame_limit_type::none : g_cfg.video.frame_limit;
switch (frame_limit)
{
case frame_limit_type::none: limit = g_cfg.core.max_cpu_preempt_count_per_frame ? static_cast<double>(g_cfg.video.vblank_rate) : 0.; break;
case frame_limit_type::_30: limit = 30.; break;
case frame_limit_type::_50: limit = 50.; break;
case frame_limit_type::_60: limit = 60.; break;
case frame_limit_type::_120: limit = 120.; break;
case frame_limit_type::display_rate: limit = get_cached_display_refresh_rate(); break;
case frame_limit_type::_auto: limit = static_cast<double>(g_cfg.video.vblank_rate); break;
case frame_limit_type::_ps3: limit = 0.; break;
case frame_limit_type::infinite: limit = 0.; break;
default:
break;
}
if (double limit2 = g_cfg.video.second_frame_limit; limit2 >= 0.1 && (limit2 < limit || !limit))
{
// Apply a second limit
limit = limit2;
}
if (limit)
{
const u64 needed_us = static_cast<u64>(1000000 / limit);
const u64 time = std::max<u64>(get_system_time(), target_rsx_flip_time > needed_us ? target_rsx_flip_time - needed_us : 0);
if (int_flip_index)
{
if (target_rsx_flip_time > time + 1000)
{
const auto delay_us = target_rsx_flip_time - time;
lv2_obj::wait_timeout(delay_us, nullptr, false);
performance_counters.idle_time += delay_us;
}
}
target_rsx_flip_time = std::max(time, target_rsx_flip_time) + needed_us;
flip_notification_count = 1;
}
else if (frame_limit == frame_limit_type::_ps3)
{
bool exit = false;
if (vblank_at_flip == umax)
{
vblank_at_flip = +vblank_count;
flip_notification_count = 1;
exit = true;
}
if (requested_vsync && (exit || vblank_at_flip == vblank_count))
{
// Not yet signaled, handle it later
async_flip_requested |= flip_request::emu_requested;
async_flip_buffer = buffer;
return;
}
vblank_at_flip = umax;
}
else
{
flip_notification_count = 1;
}
int_flip_index += flip_notification_count;
current_display_buffer = buffer;
m_queued_flip.emu_flip = true;
m_queued_flip.in_progress = true;
m_queued_flip.skip_frame |= g_cfg.video.disable_video_output && !g_cfg.video.perf_overlay.perf_overlay_enabled;
flip(m_queued_flip);
last_guest_flip_timestamp = get_system_time() - 1000000;
flip_status = CELL_GCM_DISPLAY_FLIP_STATUS_DONE;
m_queued_flip.in_progress = false;
while (flip_notification_count--)
{
if (!isHLE)
{
sys_rsx_context_attribute(0x55555555, 0xFEC, buffer, 0, 0, 0);
if (unsent_gcm_events)
{
// TODO: A proper fix
return;
}
continue;
}
if (auto ptr = flip_handler)
{
intr_thread->cmd_list
({
{ ppu_cmd::set_args, 1 }, u64{ 1 },
{ ppu_cmd::lle_call, ptr },
{ ppu_cmd::sleep, 0 }
});
intr_thread->cmd_notify.store(1);
intr_thread->cmd_notify.notify_one();
}
}
}
void thread::evaluate_cpu_usage_reduction_limits()
{
const u64 max_preempt_count = g_cfg.core.max_cpu_preempt_count_per_frame;
if (!max_preempt_count)
{
frame_times.clear();
lv2_obj::set_yield_frequency(0, 0);
return;
}
const u64 current_time = get_system_time();
const u64 current_tsc = utils::get_tsc();
u64 preempt_count = 0;
if (frame_times.size() >= 60)
{
u64 diffs = 0;
for (usz i = 1; i < frame_times.size(); i++)
{
const u64 cur_diff = frame_times[i].timestamp - frame_times[i - 1].timestamp;
diffs += cur_diff;
}
const usz avg_frame_time = diffs / 59;
u32 lowered_delay = 0;
u32 raised_delay = 0;
bool can_reevaluate = true;
u64 prev_preempt_count = umax;
for (usz i = frame_times.size() - 30; i < frame_times.size(); i++)
{
if (prev_preempt_count == umax)
{
prev_preempt_count = frame_times[i].preempt_count;
continue;
}
if (prev_preempt_count != frame_times[i].preempt_count)
{
if (prev_preempt_count > frame_times[i].preempt_count)
{
lowered_delay++;
}
else if (prev_preempt_count < frame_times[i].preempt_count)
{
raised_delay++;
}
if (i > frame_times.size() - 30)
{
// Slow preemption count increase
can_reevaluate = false;
}
}
prev_preempt_count = frame_times[i].preempt_count;
}
preempt_count = std::min<u64>(frame_times.back().preempt_count, max_preempt_count);
u32 fails = 0;
u32 hard_fails = 0;
bool is_last_frame_a_fail = false;
auto abs_dst = [](u64 a, u64 b)
{
return a >= b ? a - b : b - a;
};
for (u32 i = 1; i <= frame_times.size(); i++)
{
const u64 cur_diff = (i == frame_times.size() ? current_time : frame_times[i].timestamp) - frame_times[i - 1].timestamp;
if (const u64 diff_of_diff = abs_dst(cur_diff, avg_frame_time);
diff_of_diff >= avg_frame_time / 7)
{
if (diff_of_diff >= avg_frame_time / 3)
{
raised_delay++;
hard_fails++;
if (i == frame_times.size())
{
is_last_frame_a_fail = true;
}
}
if (fails != umax)
{
fails++;
}
}
}
bool hard_measures_taken = false;
const usz fps_10 = 10'000'000 / avg_frame_time;
auto lower_preemption_count = [&]()
{
if (preempt_count >= 10)
{
preempt_count -= 10;
}
else
{
preempt_count = 0;
}
if ((hard_fails > 2 || fails > 20) && is_last_frame_a_fail)
{
hard_measures_taken = preempt_count > 1;
preempt_count = preempt_count * 7 / 8;
prevent_preempt_increase_tickets = 10;
}
else
{
prevent_preempt_increase_tickets = std::max<u32>(7, prevent_preempt_increase_tickets);
}
};
const u64 vblank_rate_10 = g_cfg.video.vblank_rate * 10;
if (can_reevaluate)
{
const bool is_avg_fps_ok = (abs_dst(fps_10, 300) < 3 || abs_dst(fps_10, 600) < 4 || abs_dst(fps_10, vblank_rate_10) < 4 || abs_dst(fps_10, vblank_rate_10 / 2) < 3);
if (!hard_fails && fails < 6 && is_avg_fps_ok)
{
if (prevent_preempt_increase_tickets)
{
prevent_preempt_increase_tickets--;
}
else
{
preempt_count = std::min<u64>(preempt_count + 4, max_preempt_count);
}
}
else
{
lower_preemption_count();
}
}
// Sudden FPS drop detection
else if ((fails > 13 || hard_fails > 2 || !(abs_dst(fps_10, 300) < 20 || abs_dst(fps_10, 600) < 30 || abs_dst(fps_10, g_cfg.video.vblank_rate * 10) < 30 || abs_dst(fps_10, g_cfg.video.vblank_rate * 10 / 2) < 20)) && lowered_delay < raised_delay && is_last_frame_a_fail)
{
lower_preemption_count();
}
perf_log.trace("CPU preemption control: reeval=%d, preempt_count=%llu, fails=%u, hard=%u, avg_frame_time=%llu, highered=%u, lowered=%u, taken=%u", can_reevaluate, preempt_count, fails, hard_fails, avg_frame_time, raised_delay, lowered_delay, ::g_lv2_preempts_taken.load());
if (hard_measures_taken)
{
preempt_fail_old_preempt_count = std::max<u64>(preempt_fail_old_preempt_count, std::min<u64>(frame_times.back().preempt_count, max_preempt_count));
}
else if (preempt_fail_old_preempt_count)
{
perf_log.error("Lowering current preemption count significantly due to a performance drop, if this issue persists frequently consider lowering max preemptions count to 'new-count' or lower. (old-count=%llu, new-count=%llu)", preempt_fail_old_preempt_count, preempt_count);
preempt_fail_old_preempt_count = 0;
}
const u64 tsc_diff = (current_tsc - frame_times.back().tsc);
const u64 time_diff = (current_time - frame_times.back().timestamp);
const u64 preempt_diff = tsc_diff * (1'000'000 / 30) / (time_diff * std::max<u64>(preempt_count, 1ull));
if (!preempt_count)
{
lv2_obj::set_yield_frequency(0, 0);
}
else if (abs_dst(fps_10, 300) < 30)
{
// Set an upper limit so a backoff technique would be taken if there is a sudden performance drop
// Allow 4% of no yield to reduce significantly the risk of stutter
lv2_obj::set_yield_frequency(preempt_diff, current_tsc + (tsc_diff * (1'000'000 * 96 / (30 * 100)) / time_diff));
}
else if (abs_dst(fps_10, 600) < 40)
{
// 5% for 60fps
lv2_obj::set_yield_frequency(preempt_diff, current_tsc + (tsc_diff * (1'000'000 * 94 / (60 * 100)) / time_diff));
}
else if (abs_dst(fps_10, vblank_rate_10) < 40)
{
lv2_obj::set_yield_frequency(preempt_diff, current_tsc + (tsc_diff * (1'000'000 * 94 / (vblank_rate_10 * 10)) / time_diff));
}
else if (abs_dst(fps_10, vblank_rate_10 / 2) < 30)
{
lv2_obj::set_yield_frequency(preempt_diff, current_tsc + (tsc_diff * (1'000'000 * 96 / ((vblank_rate_10 / 2) * 10)) / time_diff));
}
else
{
// Undetected case, last 12% is with no yield
lv2_obj::set_yield_frequency(preempt_diff, current_tsc + (tsc_diff * 88 / 100));
}
frame_times.pop_front();
}
else
{
lv2_obj::set_yield_frequency(0, 0);
}
frame_times.push_back(frame_time_t{preempt_count, current_time, current_tsc});
}
void vblank_thread::set_thread(std::shared_ptr<named_thread<std::function<void()>>> thread)
{
std::swap(m_thread, thread);
}
vblank_thread& vblank_thread::operator=(thread_state state)
{
if (m_thread)
{
*m_thread = state;
}
return *this;
}
} // namespace rsx
| 123,626
|
C++
|
.cpp
| 3,566
| 30.448121
| 281
| 0.675362
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,390
|
RSXTexture.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/RSXTexture.cpp
|
#include "stdafx.h"
#include "RSXTexture.h"
#include "rsx_methods.h"
#include "rsx_utils.h"
#include "Emu/system_config.h"
namespace rsx
{
u32 fragment_texture::offset() const
{
return registers[NV4097_SET_TEXTURE_OFFSET + (m_index * 8)] & 0x7FFFFFFF;
}
u8 fragment_texture::location() const
{
return (registers[NV4097_SET_TEXTURE_FORMAT + (m_index * 8)] & 0x3) - 1;
}
bool fragment_texture::cubemap() const
{
return ((registers[NV4097_SET_TEXTURE_FORMAT + (m_index * 8)] >> 2) & 0x1);
}
u8 fragment_texture::border_type() const
{
return ((registers[NV4097_SET_TEXTURE_FORMAT + (m_index * 8)] >> 3) & 0x1);
}
rsx::texture_dimension fragment_texture::dimension() const
{
return rsx::to_texture_dimension((registers[NV4097_SET_TEXTURE_FORMAT + (m_index * 8)] >> 4) & 0xf);
}
rsx::texture_dimension_extended fragment_texture::get_extended_texture_dimension() const
{
switch (dimension())
{
case rsx::texture_dimension::dimension1d: return rsx::texture_dimension_extended::texture_dimension_1d;
case rsx::texture_dimension::dimension3d: return rsx::texture_dimension_extended::texture_dimension_3d;
case rsx::texture_dimension::dimension2d: return cubemap() ? rsx::texture_dimension_extended::texture_dimension_cubemap : rsx::texture_dimension_extended::texture_dimension_2d;
default: fmt::throw_exception("Unreachable");
}
}
u8 fragment_texture::format() const
{
return ((registers[NV4097_SET_TEXTURE_FORMAT + (m_index * 8)] >> 8) & 0xff);
}
bool fragment_texture::is_compressed_format() const
{
int texture_format = format() & ~(CELL_GCM_TEXTURE_LN | CELL_GCM_TEXTURE_UN);
if (texture_format == CELL_GCM_TEXTURE_COMPRESSED_DXT1 ||
texture_format == CELL_GCM_TEXTURE_COMPRESSED_DXT23 ||
texture_format == CELL_GCM_TEXTURE_COMPRESSED_DXT45)
return true;
return false;
}
u16 fragment_texture::mipmap() const
{
return ((registers[NV4097_SET_TEXTURE_FORMAT + (m_index * 8)] >> 16) & 0xffff);
}
u16 fragment_texture::get_exact_mipmap_count() const
{
u16 max_mipmap_count;
if (is_compressed_format())
{
// OpenGL considers that highest mipmap level for DXTC format is when either width or height is 1
// not both. Assume it's the same for others backend.
max_mipmap_count = floor_log2(static_cast<u32>(std::min(width() / 4, height() / 4))) + 1;
}
else
max_mipmap_count = floor_log2(static_cast<u32>(std::max(width(), height()))) + 1;
return std::min(ensure(mipmap()), max_mipmap_count);
}
rsx::texture_wrap_mode fragment_texture::wrap_s() const
{
return rsx::to_texture_wrap_mode((registers[NV4097_SET_TEXTURE_ADDRESS + (m_index * 8)]) & 0xf);
}
rsx::texture_wrap_mode fragment_texture::wrap_t() const
{
return rsx::to_texture_wrap_mode((registers[NV4097_SET_TEXTURE_ADDRESS + (m_index * 8)] >> 8) & 0xf);
}
rsx::texture_wrap_mode fragment_texture::wrap_r() const
{
return rsx::to_texture_wrap_mode((registers[NV4097_SET_TEXTURE_ADDRESS + (m_index * 8)] >> 16) & 0xf);
}
rsx::comparison_function fragment_texture::zfunc() const
{
return rsx::to_comparison_function((registers[NV4097_SET_TEXTURE_ADDRESS + (m_index * 8)] >> 28) & 0xf);
}
u8 fragment_texture::unsigned_remap() const
{
return ((registers[NV4097_SET_TEXTURE_ADDRESS + (m_index * 8)] >> 12) & 0xf);
}
u8 fragment_texture::gamma() const
{
// Converts gamma mask from RGBA to ARGB for compatibility with other per-channel mask registers
const u32 rgba8_ctrl = ((registers[NV4097_SET_TEXTURE_ADDRESS + (m_index * 8)] >> 20) & 0xf);
return ((rgba8_ctrl << 1) & 0xF) | (rgba8_ctrl >> 3);
}
u8 fragment_texture::aniso_bias() const
{
return ((registers[NV4097_SET_TEXTURE_ADDRESS + (m_index * 8)] >> 4) & 0xf);
}
u8 fragment_texture::signed_remap() const
{
return ((registers[NV4097_SET_TEXTURE_ADDRESS + (m_index * 8)] >> 24) & 0xf);
}
bool fragment_texture::enabled() const
{
return ((registers[NV4097_SET_TEXTURE_CONTROL0 + (m_index * 8)] >> 31) & 0x1);
}
f32 fragment_texture::min_lod() const
{
return rsx::decode_fxp<4, 8, false>((registers[NV4097_SET_TEXTURE_CONTROL0 + (m_index * 8)] >> 19) & 0xfff);
}
f32 fragment_texture::max_lod() const
{
return rsx::decode_fxp<4, 8, false>((registers[NV4097_SET_TEXTURE_CONTROL0 + (m_index * 8)] >> 7) & 0xfff);
}
rsx::texture_max_anisotropy fragment_texture::max_aniso() const
{
switch (g_cfg.video.strict_rendering_mode ? 0 : g_cfg.video.anisotropic_level_override)
{
case 1: return rsx::texture_max_anisotropy::x1;
case 2: return rsx::texture_max_anisotropy::x2;
case 4: return rsx::texture_max_anisotropy::x4;
case 6: return rsx::texture_max_anisotropy::x6;
case 8: return rsx::texture_max_anisotropy::x8;
case 10: return rsx::texture_max_anisotropy::x10;
case 12: return rsx::texture_max_anisotropy::x12;
case 16: return rsx::texture_max_anisotropy::x16;
default: break;
}
return rsx::to_texture_max_anisotropy((registers[NV4097_SET_TEXTURE_CONTROL0 + (m_index * 8)] >> 4) & 0x7);
}
bool fragment_texture::alpha_kill_enabled() const
{
return ((registers[NV4097_SET_TEXTURE_CONTROL0 + (m_index * 8)] >> 2) & 0x1);
}
u32 fragment_texture::remap() const
{
return (registers[NV4097_SET_TEXTURE_CONTROL1 + (m_index * 8)]);
}
rsx::texture_channel_remap_t fragment_texture::decoded_remap() const
{
u32 remap_ctl = registers[NV4097_SET_TEXTURE_CONTROL1 + (m_index * 8)];
u32 remap_override = (remap_ctl >> 16) & 0xFFFF;
switch (format() & ~(CELL_GCM_TEXTURE_LN | CELL_GCM_TEXTURE_UN))
{
case CELL_GCM_TEXTURE_X32_FLOAT:
case CELL_GCM_TEXTURE_W32_Z32_Y32_X32_FLOAT:
case CELL_GCM_TEXTURE_W16_Z16_Y16_X16_FLOAT:
{
// Floating point textures cannot be remapped on realhw, throws error 261
remap_ctl &= ~(0xFF);
remap_ctl |= 0xE4;
break;
}
case CELL_GCM_TEXTURE_Y16_X16_FLOAT:
{
// Floating point textures cannot be remapped on realhw, throws error 261
// High word of remap ctrl remaps ARGB to YXXX
const u32 lo_word = (remap_override) ? 0x56 : 0x66;
remap_ctl &= ~(0xFF);
remap_ctl |= lo_word;
break;
}
case CELL_GCM_TEXTURE_X16:
case CELL_GCM_TEXTURE_Y16_X16:
case CELL_GCM_TEXTURE_COMPRESSED_HILO8:
case CELL_GCM_TEXTURE_COMPRESSED_HILO_S8:
{
// These are special formats whose remap encoding is in 16-bit blocks
// The first channel is encoded as 0x4 (combination of 0 and 1) and the second channel is 0xE (combination of 2 and 3)
// There are only 2 valid combinations - 0xE4 or 0x4E, any attempts to mess with this will crash the system
// This means only R and G channels exist for these formats
// Note that for the X16 format, 0xE refers to the "second" channel of a Y16_X16 format. 0xE is actually the existing data in this case
// Low bit in remap override (high word) affects whether the G component should match R and B components
// Components are usually interleaved R-G-R-G unless flag is set, then its R-R-R-G (Virtua Fighter 5)
// NOTE: The remap vector can also read from B-A-B-A in some cases (Mass Effect 3)
u32 lo_word = remap_ctl & 0xFF;
remap_ctl &= 0xFF00;
switch (lo_word)
{
case 0xE4:
lo_word = (remap_override) ? 0x56 : 0x66;
break;
case 0x4E:
lo_word = (remap_override) ? 0xA9 : 0x99;
break;
case 0xEE:
lo_word = 0xAA;
break;
case 0x44:
lo_word = 0x55;
break;
}
remap_ctl |= lo_word;
break;
}
default:
break;
}
return decode_remap_encoding(remap_ctl);
}
f32 fragment_texture::bias() const
{
const f32 bias = rsx::decode_fxp<4, 8>((registers[NV4097_SET_TEXTURE_FILTER + (m_index * 8)]) & 0x1fff);
return std::clamp<f32>(bias + static_cast<f32>(g_cfg.video.texture_lod_bias.get()), -16.f, 16.f - 1.f / 256);
}
rsx::texture_minify_filter fragment_texture::min_filter() const
{
return rsx::to_texture_minify_filter((registers[NV4097_SET_TEXTURE_FILTER + (m_index * 8)] >> 16) & 0x7);
}
rsx::texture_magnify_filter fragment_texture::mag_filter() const
{
return rsx::to_texture_magnify_filter((registers[NV4097_SET_TEXTURE_FILTER + (m_index * 8)] >> 24) & 0x7);
}
u8 fragment_texture::convolution_filter() const
{
return ((registers[NV4097_SET_TEXTURE_FILTER + (m_index * 8)] >> 13) & 0xf);
}
u8 fragment_texture::argb_signed() const
{
return ((registers[NV4097_SET_TEXTURE_FILTER + (m_index * 8)] >> 28) & 0xf);
}
bool fragment_texture::a_signed() const
{
return ((registers[NV4097_SET_TEXTURE_FILTER + (m_index * 8)] >> 28) & 0x1);
}
bool fragment_texture::r_signed() const
{
return ((registers[NV4097_SET_TEXTURE_FILTER + (m_index * 8)] >> 29) & 0x1);
}
bool fragment_texture::g_signed() const
{
return ((registers[NV4097_SET_TEXTURE_FILTER + (m_index * 8)] >> 30) & 0x1);
}
bool fragment_texture::b_signed() const
{
return ((registers[NV4097_SET_TEXTURE_FILTER + (m_index * 8)] >> 31) & 0x1);
}
u16 fragment_texture::width() const
{
return ((registers[NV4097_SET_TEXTURE_IMAGE_RECT + (m_index * 8)] >> 16) & 0xffff);
}
u16 fragment_texture::height() const
{
return dimension() != rsx::texture_dimension::dimension1d ? ((registers[NV4097_SET_TEXTURE_IMAGE_RECT + (m_index * 8)]) & 0xffff) : 1;
}
u32 fragment_texture::border_color() const
{
return registers[NV4097_SET_TEXTURE_BORDER_COLOR + (m_index * 8)];
}
color4f fragment_texture::remapped_border_color() const
{
color4f base_color = rsx::decode_border_color(border_color());
if (remap() == RSX_TEXTURE_REMAP_IDENTITY)
{
return base_color;
}
return decoded_remap().remap(base_color);
}
u16 fragment_texture::depth() const
{
return dimension() == rsx::texture_dimension::dimension3d ? (registers[NV4097_SET_TEXTURE_CONTROL3 + m_index] >> 20) : 1;
}
u32 fragment_texture::pitch() const
{
return registers[NV4097_SET_TEXTURE_CONTROL3 + m_index] & 0xfffff;
}
u32 vertex_texture::offset() const
{
return registers[NV4097_SET_VERTEX_TEXTURE_OFFSET + (m_index * 8)] & 0x7FFFFFFF;
}
u8 vertex_texture::location() const
{
return (registers[NV4097_SET_VERTEX_TEXTURE_FORMAT + (m_index * 8)] & 0x3) - 1;
}
bool vertex_texture::cubemap() const
{
return ((registers[NV4097_SET_VERTEX_TEXTURE_FORMAT + (m_index * 8)] >> 2) & 0x1);
}
u8 vertex_texture::border_type() const
{
// Border bit has no effect on vertex textures, it is always zero
return 1;
}
rsx::texture_dimension vertex_texture::dimension() const
{
return rsx::to_texture_dimension((registers[NV4097_SET_VERTEX_TEXTURE_FORMAT + (m_index * 8)] >> 4) & 0xf);
}
rsx::texture_dimension_extended vertex_texture::get_extended_texture_dimension() const
{
switch (dimension())
{
case rsx::texture_dimension::dimension1d: return rsx::texture_dimension_extended::texture_dimension_1d;
case rsx::texture_dimension::dimension3d: return rsx::texture_dimension_extended::texture_dimension_3d;
case rsx::texture_dimension::dimension2d: return cubemap() ? rsx::texture_dimension_extended::texture_dimension_cubemap : rsx::texture_dimension_extended::texture_dimension_2d;
default: fmt::throw_exception("Unreachable");
}
}
u8 vertex_texture::format() const
{
return ((registers[NV4097_SET_VERTEX_TEXTURE_FORMAT + (m_index * 8)] >> 8) & 0xff);
}
u16 vertex_texture::mipmap() const
{
return ((registers[NV4097_SET_VERTEX_TEXTURE_FORMAT + (m_index * 8)] >> 16) & 0xffff);
}
u16 vertex_texture::get_exact_mipmap_count() const
{
const u16 max_mipmap_count = floor_log2(static_cast<u32>(std::max(width(), height()))) + 1;
return std::min(ensure(mipmap()), max_mipmap_count);
}
rsx::texture_channel_remap_t vertex_texture::decoded_remap() const
{
return rsx::default_remap_vector;
}
u32 vertex_texture::remap() const
{
// disabled
return RSX_TEXTURE_REMAP_IDENTITY;
}
bool vertex_texture::enabled() const
{
return ((registers[NV4097_SET_VERTEX_TEXTURE_CONTROL0 + (m_index * 8)] >> 31) & 0x1);
}
f32 vertex_texture::min_lod() const
{
return rsx::decode_fxp<4, 8, false>((registers[NV4097_SET_VERTEX_TEXTURE_CONTROL0 + (m_index * 8)] >> 19) & 0xfff);
}
f32 vertex_texture::max_lod() const
{
return rsx::decode_fxp<4, 8, false>((registers[NV4097_SET_VERTEX_TEXTURE_CONTROL0 + (m_index * 8)] >> 7) & 0xfff);
}
f32 vertex_texture::bias() const
{
const f32 bias = rsx::decode_fxp<4, 8>((registers[NV4097_SET_VERTEX_TEXTURE_FILTER + (m_index * 8)]) & 0x1fff);
return std::clamp<f32>(bias + static_cast<f32>(g_cfg.video.texture_lod_bias.get()), -16.f, 16.f - 1.f / 256);
}
rsx::texture_minify_filter vertex_texture::min_filter() const
{
return rsx::texture_minify_filter::nearest;
}
rsx::texture_magnify_filter vertex_texture::mag_filter() const
{
return rsx::texture_magnify_filter::nearest;
}
rsx::texture_wrap_mode vertex_texture::wrap_s() const
{
return rsx::to_texture_wrap_mode((registers[NV4097_SET_VERTEX_TEXTURE_ADDRESS + (m_index * 8)]) & 0xf);
}
rsx::texture_wrap_mode vertex_texture::wrap_t() const
{
return rsx::to_texture_wrap_mode((registers[NV4097_SET_VERTEX_TEXTURE_ADDRESS + (m_index * 8)] >> 8) & 0xf);
}
rsx::texture_wrap_mode vertex_texture::wrap_r() const
{
return rsx::texture_wrap_mode::wrap;
}
u16 vertex_texture::width() const
{
return ((registers[NV4097_SET_VERTEX_TEXTURE_IMAGE_RECT + (m_index * 8)] >> 16) & 0xffff);
}
u16 vertex_texture::height() const
{
return dimension() != rsx::texture_dimension::dimension1d ? ((registers[NV4097_SET_VERTEX_TEXTURE_IMAGE_RECT + (m_index * 8)]) & 0xffff) : 1;
}
u32 vertex_texture::border_color() const
{
return registers[NV4097_SET_VERTEX_TEXTURE_BORDER_COLOR + (m_index * 8)];
}
color4f vertex_texture::remapped_border_color() const
{
return rsx::decode_border_color(border_color());
}
u16 vertex_texture::depth() const
{
return dimension() == rsx::texture_dimension::dimension3d ? (registers[NV4097_SET_VERTEX_TEXTURE_CONTROL3 + (m_index * 8)] >> 20) : 1;
}
u32 vertex_texture::pitch() const
{
return registers[NV4097_SET_VERTEX_TEXTURE_CONTROL3 + (m_index * 8)] & 0xfffff;
}
}
| 14,009
|
C++
|
.cpp
| 380
| 34.081579
| 178
| 0.700465
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,391
|
gcm_enums.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/gcm_enums.cpp
|
#include "gcm_enums.h"
#include "Utilities/StrFmt.h"
#include "Utilities/Thread.h"
using namespace rsx;
template <>
void fmt_class_string<CellGcmLocation>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](CellGcmLocation value)
{
switch (value)
{
case CELL_GCM_LOCATION_LOCAL: return "Local";
case CELL_GCM_LOCATION_MAIN: return "Main";
case CELL_GCM_CONTEXT_DMA_MEMORY_FRAME_BUFFER: return "Local-Buffer"; // Local memory for DMA operations
case CELL_GCM_CONTEXT_DMA_MEMORY_HOST_BUFFER: return "Main-Buffer"; // Main memory for DMA operations
case CELL_GCM_CONTEXT_DMA_REPORT_LOCATION_LOCAL: return "Report Local";
case CELL_GCM_CONTEXT_DMA_REPORT_LOCATION_MAIN: return "Report Main";
case CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_0: return "_Notify0";
case CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_1: return "_Notify1";
case CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_2: return "_Notify2";
case CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_3: return "_Notify3";
case CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_4: return "_Notify4";
case CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_5: return "_Notify5";
case CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_6: return "_Notify6";
case CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_7: return "_Notify7";
case CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFY0: return "_Get-Notify0";
case CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFY1: return "_Get-Notify1";
case CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFY2: return "_Get-Notify2";
case CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFY3: return "_Get-Notify3";
case CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFY4: return "_Get-Notify4";
case CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFY5: return "_Get-Notify5";
case CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFY6: return "_Get-Notify6";
case CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFY7: return "_Get-Notify7";
case CELL_GCM_CONTEXT_DMA_SEMAPHORE_RW: return "SEMA-RW";
case CELL_GCM_CONTEXT_DMA_SEMAPHORE_R: return "SEMA-R";
case CELL_GCM_CONTEXT_DMA_DEVICE_RW: return "DEVICE-RW";
case CELL_GCM_CONTEXT_DMA_DEVICE_R: return "DEVICE-R";
}
return unknown;
});
}
template <>
void fmt_class_string<CellGcmTexture>::format(std::string& out, u64 arg)
{
switch (static_cast<u32>(arg) & ~(CELL_GCM_TEXTURE_LN | CELL_GCM_TEXTURE_UN))
{
case CELL_GCM_TEXTURE_COMPRESSED_HILO8: out += "COMPRESSED_HILO8"; break;
case CELL_GCM_TEXTURE_COMPRESSED_HILO_S8: out += "COMPRESSED_HILO_S8"; break;
case CELL_GCM_TEXTURE_B8: out += "B8"; break;
case CELL_GCM_TEXTURE_A1R5G5B5: out += "A1R5G5B5"; break;
case CELL_GCM_TEXTURE_A4R4G4B4: out += "A4R4G4B4"; break;
case CELL_GCM_TEXTURE_R5G6B5: out += "R5G6B5"; break;
case CELL_GCM_TEXTURE_A8R8G8B8: out += "A8R8G8B8"; break;
case CELL_GCM_TEXTURE_COMPRESSED_DXT1: out += "COMPRESSED_DXT1"; break;
case CELL_GCM_TEXTURE_COMPRESSED_DXT23: out += "COMPRESSED_DXT23"; break;
case CELL_GCM_TEXTURE_COMPRESSED_DXT45: out += "COMPRESSED_DXT45"; break;
case CELL_GCM_TEXTURE_G8B8: out += "G8B8"; break;
case CELL_GCM_TEXTURE_R6G5B5: out += "R6G5B5"; break;
case CELL_GCM_TEXTURE_DEPTH24_D8: out += "DEPTH24_D8"; break;
case CELL_GCM_TEXTURE_DEPTH24_D8_FLOAT: out += "DEPTH24_D8_FLOAT"; break;
case CELL_GCM_TEXTURE_DEPTH16: out += "DEPTH16"; break;
case CELL_GCM_TEXTURE_DEPTH16_FLOAT: out += "DEPTH16_FLOAT"; break;
case CELL_GCM_TEXTURE_X16: out += "X16"; break;
case CELL_GCM_TEXTURE_Y16_X16: out += "Y16_X16"; break;
case CELL_GCM_TEXTURE_R5G5B5A1: out += "R5G5B5A1"; break;
case CELL_GCM_TEXTURE_W16_Z16_Y16_X16_FLOAT: out += "W16_Z16_Y16_X16_FLOAT"; break;
case CELL_GCM_TEXTURE_W32_Z32_Y32_X32_FLOAT: out += "W32_Z32_Y32_X32_FLOAT"; break;
case CELL_GCM_TEXTURE_X32_FLOAT: out += "X32_FLOAT"; break;
case CELL_GCM_TEXTURE_D1R5G5B5: out += "D1R5G5B5"; break;
case CELL_GCM_TEXTURE_D8R8G8B8: out += "D8R8G8B8"; break;
case CELL_GCM_TEXTURE_Y16_X16_FLOAT: out += "Y16_X16_FLOAT"; break;
case CELL_GCM_TEXTURE_COMPRESSED_B8R8_G8R8: out += "COMPRESSED_B8R8_G8R8"; break;
case CELL_GCM_TEXTURE_COMPRESSED_R8B8_R8G8: out += "COMPRESSED_R8B8_R8G8"; break;
default: fmt::append(out, "%s", arg); return;
}
switch (arg & (CELL_GCM_TEXTURE_LN | CELL_GCM_TEXTURE_UN))
{
case CELL_GCM_TEXTURE_LN: out += "-LN"; break;
case CELL_GCM_TEXTURE_UN: out += "-UN"; break;
case CELL_GCM_TEXTURE_LN | CELL_GCM_TEXTURE_UN: out += "-LN-UN"; break;
default: break;
}
}
template <>
void fmt_class_string<comparison_function>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](comparison_function value)
{
switch (value)
{
case comparison_function::never: return "Never";
case comparison_function::less: return "Less";
case comparison_function::equal: return "Equal";
case comparison_function::less_or_equal: return "Less_equal";
case comparison_function::greater: return "Greater";
case comparison_function::not_equal: return "Not_equal";
case comparison_function::greater_or_equal: return "Greater_equal";
case comparison_function::always: return "Always";
}
return unknown;
});
}
template <>
void fmt_class_string<stencil_op>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](stencil_op value)
{
switch (value)
{
case stencil_op::keep: return "Keep";
case stencil_op::zero: return "Zero";
case stencil_op::replace: return "Replace";
case stencil_op::incr: return "Incr";
case stencil_op::decr: return "Decr";
case stencil_op::incr_wrap: return "Incr_wrap";
case stencil_op::decr_wrap: return "Decr_wrap";
case stencil_op::invert: return "Invert";
}
return unknown;
});
}
template <>
void fmt_class_string<fog_mode>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](fog_mode value)
{
switch (value)
{
case fog_mode::exponential: return "exponential";
case fog_mode::exponential2: return "exponential2";
case fog_mode::exponential2_abs: return "exponential2(abs)";
case fog_mode::exponential_abs: return "exponential(abs)";
case fog_mode::linear: return "linear";
case fog_mode::linear_abs: return "linear(abs)";
}
return unknown;
});
}
template <>
void fmt_class_string<logic_op>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](logic_op value)
{
switch (value)
{
case logic_op::logic_clear: return "Clear";
case logic_op::logic_and: return "And";
case logic_op::logic_set: return "Set";
case logic_op::logic_and_reverse: return "And_reverse";
case logic_op::logic_copy: return "Copy";
case logic_op::logic_and_inverted: return "And_inverted";
case logic_op::logic_noop: return "Noop";
case logic_op::logic_xor: return "Xor";
case logic_op::logic_or: return "Or";
case logic_op::logic_nor: return "Nor";
case logic_op::logic_equiv: return "Equiv";
case logic_op::logic_invert: return "Invert";
case logic_op::logic_or_reverse: return "Or_reverse";
case logic_op::logic_copy_inverted: return "Copy_inverted";
case logic_op::logic_or_inverted: return "Or_inverted";
case logic_op::logic_nand: return "Nand";
}
return unknown;
});
}
template <>
void fmt_class_string<front_face>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](front_face value)
{
switch (value)
{
case front_face::ccw: return "counter clock wise";
case front_face::cw: return "clock wise";
}
return unknown;
});
}
template <>
void fmt_class_string<cull_face>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](cull_face value)
{
switch (value)
{
case cull_face::back: return "back";
case cull_face::front: return "front";
case cull_face::front_and_back: return "front and back";
}
return unknown;
});
}
template <>
void fmt_class_string<surface_target>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](surface_target value)
{
switch (value)
{
case surface_target::none: return "none";
case surface_target::surface_a: return "surface A";
case surface_target::surface_b: return "surface B";
case surface_target::surfaces_a_b: return "surfaces A and B";
case surface_target::surfaces_a_b_c: return "surfaces A, B and C";
case surface_target::surfaces_a_b_c_d: return "surfaces A,B, C and D";
}
return unknown;
});
}
template <>
void fmt_class_string<primitive_type>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](primitive_type value)
{
switch (value)
{
case primitive_type::points: return "Points";
case primitive_type::lines: return "Lines";
case primitive_type::line_loop: return "Line_loop";
case primitive_type::line_strip: return "Line_strip";
case primitive_type::triangles: return "Triangles";
case primitive_type::triangle_strip: return "Triangle_strip";
case primitive_type::triangle_fan: return "Triangle_fan";
case primitive_type::quads: return "Quads";
case primitive_type::quad_strip: return "Quad_strip";
case primitive_type::polygon: return "Polygon";
}
return unknown;
});
}
template <>
void fmt_class_string<blit_engine::transfer_operation>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](blit_engine::transfer_operation value)
{
switch (value)
{
case blit_engine::transfer_operation::blend_and: return "blend and";
case blit_engine::transfer_operation::blend_premult: return "blend premult";
case blit_engine::transfer_operation::rop_and: return "rop and";
case blit_engine::transfer_operation::srccopy: return "srccopy";
case blit_engine::transfer_operation::srccopy_and: return "srccopy_and";
case blit_engine::transfer_operation::srccopy_premult: return "srccopy_premult";
default: return unknown;
}
});
}
template <>
void fmt_class_string<blit_engine::transfer_source_format>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](blit_engine::transfer_source_format value)
{
switch (value)
{
case blit_engine::transfer_source_format::a1r5g5b5: return "a1r5g5b5";
case blit_engine::transfer_source_format::a8b8g8r8: return "a8b8g8r8";
case blit_engine::transfer_source_format::a8r8g8b8: return "a8r8g8b8";
case blit_engine::transfer_source_format::ay8: return "ay8";
case blit_engine::transfer_source_format::cr8yb8cb8ya8: return "cr8yb8cb8ya8";
case blit_engine::transfer_source_format::ecr8eyb8ecb8eya8: return "ecr8eyb8ecb8eya8";
case blit_engine::transfer_source_format::eyb8ecr8eya8ecb8: return "eyb8ecr8eya8ecb8";
case blit_engine::transfer_source_format::r5g6b5: return "r5g6b5";
case blit_engine::transfer_source_format::x1r5g5b5: return "x1r5g5b5";
case blit_engine::transfer_source_format::x8b8g8r8: return "x8b8g8r8";
case blit_engine::transfer_source_format::x8r8g8b8: return "x8r8g8b8";
case blit_engine::transfer_source_format::y8: return "y8";
case blit_engine::transfer_source_format::yb8cr8ya8cb8: return "yb8cr8ya8cb8";
default: return unknown;
}
});
}
template <>
void fmt_class_string<blit_engine::context_surface>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](blit_engine::context_surface value)
{
switch (value)
{
case blit_engine::context_surface::surface2d: return "surface 2d";
case blit_engine::context_surface::swizzle2d: return "swizzle 2d";
}
return unknown;
});
}
template <>
void fmt_class_string<blit_engine::transfer_destination_format>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](blit_engine::transfer_destination_format value)
{
switch (value)
{
case blit_engine::transfer_destination_format::a8r8g8b8: return "a8r8g8b8";
case blit_engine::transfer_destination_format::r5g6b5: return "r5g6b5";
case blit_engine::transfer_destination_format::y32: return "y32";
default: return unknown;
}
});
}
template <>
void fmt_class_string<index_array_type>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](index_array_type value)
{
switch (value)
{
case index_array_type::u16: return "u16";
case index_array_type::u32: return "u32";
}
return unknown;
});
}
template <>
void fmt_class_string<polygon_mode>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](polygon_mode value)
{
switch (value)
{
case polygon_mode::fill: return "fill";
case polygon_mode::line: return "line";
case polygon_mode::point: return "point";
}
return unknown;
});
}
template <>
void fmt_class_string<surface_color_format>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](surface_color_format value)
{
switch (value)
{
case surface_color_format::x1r5g5b5_z1r5g5b5: return "X1R5G5B5_Z1R5G5B5";
case surface_color_format::x1r5g5b5_o1r5g5b5: return "X1R5G5B5_O1R5G5B5";
case surface_color_format::r5g6b5: return "R5G6B5";
case surface_color_format::x8r8g8b8_z8r8g8b8: return "X8R8G8B8_Z8R8G8B8";
case surface_color_format::x8r8g8b8_o8r8g8b8: return "X8R8G8B8_O8R8G8B8";
case surface_color_format::a8r8g8b8: return "A8R8G8B8";
case surface_color_format::b8: return "B8";
case surface_color_format::g8b8: return "G8B8";
case surface_color_format::w16z16y16x16: return "F_W16Z16Y16X16";
case surface_color_format::w32z32y32x32: return "F_W32Z32Y32X32";
case surface_color_format::x32: return "F_X32";
case surface_color_format::x8b8g8r8_z8b8g8r8: return "X8B8G8R8_Z8B8G8R8";
case surface_color_format::x8b8g8r8_o8b8g8r8: return "X8B8G8R8_O8B8G8R8";
case surface_color_format::a8b8g8r8: return "A8B8G8R8";
}
return unknown;
});
}
template <>
void fmt_class_string<surface_antialiasing>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](surface_antialiasing value)
{
switch (value)
{
case surface_antialiasing::center_1_sample: return "1 sample centered";
case surface_antialiasing::diagonal_centered_2_samples: return "2 samples diagonal centered";
case surface_antialiasing::square_centered_4_samples: return "4 samples square centered";
case surface_antialiasing::square_rotated_4_samples: return "4 samples diagonal rotated";
}
return unknown;
});
}
template <>
void fmt_class_string<blend_equation>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](blend_equation value)
{
switch (value)
{
case blend_equation::add: return "Add";
case blend_equation::subtract: return "Subtract";
case blend_equation::reverse_subtract: return "Reverse_subtract";
case blend_equation::min: return "Min";
case blend_equation::max: return "Max";
case blend_equation::add_signed: return "Add_signed";
case blend_equation::reverse_add_signed: return "Reverse_add_signed";
case blend_equation::reverse_subtract_signed: return "Reverse_subtract_signed";
}
return unknown;
});
}
template <>
void fmt_class_string<blend_factor>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](blend_factor value)
{
switch (value)
{
case blend_factor::zero: return "0";
case blend_factor::one: return "1";
case blend_factor::src_color: return "src.rgb";
case blend_factor::one_minus_src_color: return "(1 - src.rgb)";
case blend_factor::src_alpha: return "src.a";
case blend_factor::one_minus_src_alpha: return "(1 - src.a)";
case blend_factor::dst_alpha: return "dst.a";
case blend_factor::one_minus_dst_alpha: return "(1 - dst.a)";
case blend_factor::dst_color: return "dst.rgb";
case blend_factor::one_minus_dst_color: return "(1 - dst.rgb)";
case blend_factor::src_alpha_saturate: return "sat(src.a)";
case blend_factor::constant_color: return "const.rgb";
case blend_factor::one_minus_constant_color: return "(1 - const.rgb)";
case blend_factor::constant_alpha: return "const.a";
case blend_factor::one_minus_constant_alpha: return "(1 - const.a)";
}
return unknown;
});
}
template <>
void fmt_class_string<window_origin>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](window_origin value)
{
switch (value)
{
case window_origin::bottom: return "bottom";
case window_origin::top: return "top";
}
return unknown;
});
}
template <>
void fmt_class_string<window_pixel_center>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](window_pixel_center value)
{
switch (value)
{
case window_pixel_center::half: return "half";
case window_pixel_center::integer: return "integer";
}
return unknown;
});
}
template <>
void fmt_class_string<user_clip_plane_op>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](user_clip_plane_op value)
{
switch (value)
{
case user_clip_plane_op::disable: return "disabled";
case user_clip_plane_op::greater_or_equal: return "greater or equal";
case user_clip_plane_op::less_than: return "less than";
}
return unknown;
});
}
template <>
void fmt_class_string<blit_engine::context_dma>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](blit_engine::context_dma value)
{
switch (value)
{
case blit_engine::context_dma::report_location_main: return "report location main";
case blit_engine::context_dma::to_memory_get_report: return "to memory get report";
case blit_engine::context_dma::memory_host_buffer: return "memory host buffer";
}
return unknown;
});
}
template <>
void fmt_class_string<blit_engine::transfer_origin>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](blit_engine::transfer_origin value)
{
switch (value)
{
case blit_engine::transfer_origin::center: return "center";
case blit_engine::transfer_origin::corner: return "corner";
}
return unknown;
});
}
template <>
void fmt_class_string<shading_mode>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](shading_mode value)
{
switch (value)
{
case shading_mode::flat: return "flat";
case shading_mode::smooth: return "smooth";
}
return unknown;
});
}
template <>
void fmt_class_string<surface_depth_format>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](surface_depth_format value)
{
switch (value)
{
case surface_depth_format::z16: return "Z16";
case surface_depth_format::z24s8: return "Z24S8";
}
return unknown;
});
}
template <>
void fmt_class_string<blit_engine::transfer_interpolator>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](blit_engine::transfer_interpolator value)
{
switch (value)
{
case blit_engine::transfer_interpolator::foh: return "foh";
case blit_engine::transfer_interpolator::zoh: return "zoh";
}
return unknown;
});
}
template <>
void fmt_class_string<texture_dimension>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](texture_dimension value)
{
switch (value)
{
case texture_dimension::dimension1d: return "1D";
case texture_dimension::dimension2d: return "2D";
case texture_dimension::dimension3d: return "3D";
}
return unknown;
});
}
template <>
void fmt_class_string<texture_max_anisotropy>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](texture_max_anisotropy value)
{
switch (value)
{
case texture_max_anisotropy::x1: return "1";
case texture_max_anisotropy::x2: return "2";
case texture_max_anisotropy::x4: return "4";
case texture_max_anisotropy::x6: return "6";
case texture_max_anisotropy::x8: return "8";
case texture_max_anisotropy::x10: return "10";
case texture_max_anisotropy::x12: return "12";
case texture_max_anisotropy::x16: return "16";
}
return unknown;
});
}
namespace rsx
{
enum class boolean_to_string_t : u8;
}
template <>
void fmt_class_string<boolean_to_string_t>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](boolean_to_string_t value)
{
switch (value)
{
case boolean_to_string_t{+true}: return "true";
case boolean_to_string_t{+false}: return "false";
default: break; // TODO: This is technically unreachable but need needs to be reachable when value is not 1 or 0
}
return unknown;
});
}
| 19,601
|
C++
|
.cpp
| 567
| 32.144621
| 114
| 0.72629
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,392
|
NullGSRender.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Null/NullGSRender.cpp
|
#include "stdafx.h"
#include "NullGSRender.h"
u64 NullGSRender::get_cycles()
{
return thread_ctrl::get_cycles(static_cast<named_thread<NullGSRender>&>(*this));
}
NullGSRender::NullGSRender(utils::serial* ar) noexcept : GSRender(ar)
{
}
void NullGSRender::end()
{
execute_nop_draw();
rsx::thread::end();
}
| 311
|
C++
|
.cpp
| 14
| 20.785714
| 81
| 0.748299
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,393
|
texture_cache.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/texture_cache.cpp
|
#include "stdafx.h"
#include "texture_cache_utils.h"
#include "Utilities/address_range.h"
#include "util/fnv_hash.hpp"
namespace rsx
{
constexpr u32 min_lockable_data_size = 4096; // Increasing this value has worse results even on systems with pages > 4k
void buffered_section::init_lockable_range(const address_range& range)
{
locked_range = range.to_page_range();
AUDIT((locked_range.start == page_start(range.start)) || (locked_range.start == next_page(range.start)));
AUDIT(locked_range.end <= page_end(range.end));
ensure(locked_range.is_page_range());
}
void buffered_section::reset(const address_range& memory_range)
{
ensure(memory_range.valid() && locked == false);
cpu_range = address_range(memory_range);
confirmed_range.invalidate();
locked_range.invalidate();
protection = utils::protection::rw;
protection_strat = section_protection_strategy::lock;
locked = false;
init_lockable_range(cpu_range);
if (memory_range.length() < min_lockable_data_size)
{
protection_strat = section_protection_strategy::hash;
mem_hash = 0;
}
}
void buffered_section::invalidate_range()
{
ensure(!locked);
cpu_range.invalidate();
confirmed_range.invalidate();
locked_range.invalidate();
}
void buffered_section::protect(utils::protection new_prot, bool force)
{
if (new_prot == protection && !force) return;
ensure(locked_range.is_page_range());
AUDIT(!confirmed_range.valid() || confirmed_range.inside(cpu_range));
#ifdef TEXTURE_CACHE_DEBUG
if (new_prot != protection || force)
{
if (locked && !force) // When force=true, it is the responsibility of the caller to remove this section from the checker refcounting
tex_cache_checker.remove(locked_range, protection);
if (new_prot != utils::protection::rw)
tex_cache_checker.add(locked_range, new_prot);
}
#endif // TEXTURE_CACHE_DEBUG
if (new_prot == utils::protection::no)
{
// Override
protection_strat = section_protection_strategy::lock;
}
if (protection_strat == section_protection_strategy::lock)
{
rsx::memory_protect(locked_range, new_prot);
}
else if (new_prot != utils::protection::rw)
{
mem_hash = fast_hash_internal();
}
protection = new_prot;
locked = (protection != utils::protection::rw);
if (!locked)
{
// Unprotect range also invalidates secured range
confirmed_range.invalidate();
}
}
void buffered_section::protect(utils::protection prot, const std::pair<u32, u32>& new_confirm)
{
// new_confirm.first is an offset after cpu_range.start
// new_confirm.second is the length (after cpu_range.start + new_confirm.first)
#ifdef TEXTURE_CACHE_DEBUG
// We need to remove the lockable range from page_info as we will be re-protecting with force==true
if (locked)
tex_cache_checker.remove(locked_range, protection);
#endif
// Save previous state to compare for changes
const auto prev_confirmed_range = confirmed_range;
if (prot != utils::protection::rw)
{
if (confirmed_range.valid())
{
confirmed_range.start = std::min(confirmed_range.start, cpu_range.start + new_confirm.first);
confirmed_range.end = std::max(confirmed_range.end, cpu_range.start + new_confirm.first + new_confirm.second - 1);
}
else
{
confirmed_range = address_range::start_length(cpu_range.start + new_confirm.first, new_confirm.second);
ensure(!locked || locked_range.inside(confirmed_range.to_page_range()));
}
ensure(confirmed_range.inside(cpu_range));
init_lockable_range(confirmed_range);
}
protect(prot, confirmed_range != prev_confirmed_range);
}
void buffered_section::unprotect()
{
AUDIT(protection != utils::protection::rw);
protect(utils::protection::rw);
}
void buffered_section::discard()
{
#ifdef TEXTURE_CACHE_DEBUG
if (locked)
tex_cache_checker.remove(locked_range, protection);
#endif
protection = utils::protection::rw;
confirmed_range.invalidate();
locked = false;
}
const address_range& buffered_section::get_bounds(section_bounds bounds) const
{
switch (bounds)
{
case section_bounds::full_range:
return cpu_range;
case section_bounds::locked_range:
return locked_range;
case section_bounds::confirmed_range:
return confirmed_range.valid() ? confirmed_range : cpu_range;
default:
fmt::throw_exception("Unreachable");
}
}
u64 buffered_section::fast_hash_internal() const
{
const auto hash_range = confirmed_range.valid() ? confirmed_range : cpu_range;
const auto hash_length = hash_range.length();
const auto cycles = hash_length / 8;
auto rem = hash_length % 8;
auto src = get_ptr<const char>(hash_range.start);
auto data64 = reinterpret_cast<const u64*>(src);
usz hash = rpcs3::fnv_seed;
for (unsigned i = 0; i < cycles; ++i)
{
hash = rpcs3::hash64(hash, data64[i]);
}
if (rem) [[unlikely]] // Data often aligned to some power of 2
{
src += hash_length - rem;
if (rem > 4)
{
hash = rpcs3::hash64(hash, *reinterpret_cast<const u32*>(src));
src += 4;
}
if (rem > 2)
{
hash = rpcs3::hash64(hash, *reinterpret_cast<const u16*>(src));
src += 2;
}
while (rem--)
{
hash = rpcs3::hash64(hash, *reinterpret_cast<const u8*>(src));
src++;
}
}
return hash;
}
bool buffered_section::is_locked(bool actual_page_flags) const
{
if (!actual_page_flags || !locked)
{
return locked;
}
return (protection_strat == section_protection_strategy::lock);
}
bool buffered_section::sync() const
{
if (protection_strat == section_protection_strategy::lock || !locked)
{
return true;
}
return (fast_hash_internal() == mem_hash);
}
}
| 5,650
|
C++
|
.cpp
| 180
| 28.172222
| 135
| 0.706738
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,394
|
surface_store.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/surface_store.cpp
|
#include "stdafx.h"
#include "surface_store.h"
#include "util/asm.hpp"
namespace rsx
{
namespace utility
{
std::vector<u8> get_rtt_indexes(surface_target color_target)
{
switch (color_target)
{
case surface_target::none: return{};
case surface_target::surface_a: return{ 0 };
case surface_target::surface_b: return{ 1 };
case surface_target::surfaces_a_b: return{ 0, 1 };
case surface_target::surfaces_a_b_c: return{ 0, 1, 2 };
case surface_target::surfaces_a_b_c_d: return{ 0, 1, 2, 3 };
}
fmt::throw_exception("Wrong color_target");
}
usz get_aligned_pitch(surface_color_format format, u32 width)
{
switch (format)
{
case surface_color_format::b8: return utils::align(width, 256);
case surface_color_format::g8b8:
case surface_color_format::x1r5g5b5_o1r5g5b5:
case surface_color_format::x1r5g5b5_z1r5g5b5:
case surface_color_format::r5g6b5: return utils::align(width * 2, 256);
case surface_color_format::a8b8g8r8:
case surface_color_format::x8b8g8r8_o8b8g8r8:
case surface_color_format::x8b8g8r8_z8b8g8r8:
case surface_color_format::x8r8g8b8_o8r8g8b8:
case surface_color_format::x8r8g8b8_z8r8g8b8:
case surface_color_format::x32:
case surface_color_format::a8r8g8b8: return utils::align(width * 4, 256);
case surface_color_format::w16z16y16x16: return utils::align(width * 8, 256);
case surface_color_format::w32z32y32x32: return utils::align(width * 16, 256);
}
fmt::throw_exception("Unknown color surface format");
}
usz get_packed_pitch(surface_color_format format, u32 width)
{
switch (format)
{
case surface_color_format::b8: return width;
case surface_color_format::g8b8:
case surface_color_format::x1r5g5b5_o1r5g5b5:
case surface_color_format::x1r5g5b5_z1r5g5b5:
case surface_color_format::r5g6b5: return width * 2;
case surface_color_format::a8b8g8r8:
case surface_color_format::x8b8g8r8_o8b8g8r8:
case surface_color_format::x8b8g8r8_z8b8g8r8:
case surface_color_format::x8r8g8b8_o8r8g8b8:
case surface_color_format::x8r8g8b8_z8r8g8b8:
case surface_color_format::x32:
case surface_color_format::a8r8g8b8: return width * 4;
case surface_color_format::w16z16y16x16: return width * 8;
case surface_color_format::w32z32y32x32: return width * 16;
}
fmt::throw_exception("Unknown color surface format");
}
}
}
| 2,377
|
C++
|
.cpp
| 64
| 33.609375
| 81
| 0.731169
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,395
|
BufferUtils.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/BufferUtils.cpp
|
#include "stdafx.h"
#include "BufferUtils.h"
#include "../rsx_methods.h"
#include "../RSXThread.h"
#include "util/to_endian.hpp"
#include "util/sysinfo.hpp"
#include "Utilities/JIT.h"
#include "util/asm.hpp"
#include "util/v128.hpp"
#include "util/simd.hpp"
#if !defined(_MSC_VER)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wold-style-cast"
#endif
#if defined(_MSC_VER) || !defined(__SSE2__)
#define SSE4_1_FUNC
#define AVX2_FUNC
#define AVX3_FUNC
#else
#define SSE4_1_FUNC __attribute__((__target__("sse4.1")))
#define AVX2_FUNC __attribute__((__target__("avx2")))
#define AVX3_FUNC __attribute__((__target__("avx512f,avx512bw,avx512dq,avx512cd,avx512vl")))
#endif // _MSC_VER
#if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && defined(__AVX512CD__) && defined(__AVX512BW__)
[[maybe_unused]] constexpr bool s_use_ssse3 = true;
[[maybe_unused]] constexpr bool s_use_sse4_1 = true;
[[maybe_unused]] constexpr bool s_use_avx2 = true;
[[maybe_unused]] constexpr bool s_use_avx3 = true;
#elif defined(__AVX2__)
[[maybe_unused]] constexpr bool s_use_ssse3 = true;
[[maybe_unused]] constexpr bool s_use_sse4_1 = true;
[[maybe_unused]] constexpr bool s_use_avx2 = true;
[[maybe_unused]] constexpr bool s_use_avx3 = false;
#elif defined(__SSE4_1__)
[[maybe_unused]] constexpr bool s_use_ssse3 = true;
[[maybe_unused]] constexpr bool s_use_sse4_1 = true;
[[maybe_unused]] constexpr bool s_use_avx2 = false;
[[maybe_unused]] constexpr bool s_use_avx3 = false;
#elif defined(__SSSE3__)
[[maybe_unused]] constexpr bool s_use_ssse3 = true;
[[maybe_unused]] constexpr bool s_use_sse4_1 = false;
[[maybe_unused]] constexpr bool s_use_avx2 = false;
[[maybe_unused]] constexpr bool s_use_avx3 = false;
#elif defined(ARCH_X64)
[[maybe_unused]] const bool s_use_ssse3 = utils::has_ssse3();
[[maybe_unused]] const bool s_use_sse4_1 = utils::has_sse41();
[[maybe_unused]] const bool s_use_avx2 = utils::has_avx2();
[[maybe_unused]] const bool s_use_avx3 = utils::has_avx512();
#else
[[maybe_unused]] constexpr bool s_use_ssse3 = true; // Non x86
[[maybe_unused]] constexpr bool s_use_sse4_1 = true; // Non x86
[[maybe_unused]] constexpr bool s_use_avx2 = false;
[[maybe_unused]] constexpr bool s_use_avx3 = false;
#endif
const v128 s_bswap_u32_mask = v128::from32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
const v128 s_bswap_u16_mask = v128::from32(0x02030001, 0x06070405, 0x0a0b0809, 0x0e0f0c0d);
namespace utils
{
template <typename T, typename U>
[[nodiscard]] auto bless(const std::span<U>& span)
{
return std::span<T>(bless<T>(span.data()), sizeof(U) * span.size() / sizeof(T));
}
}
namespace
{
template <bool Compare>
auto copy_data_swap_u32_naive(u32* dst, const u32* src, u32 count)
{
u32 result = 0;
for (u32 i = 0; i < count; i++)
{
const u32 data = stx::se_storage<u32>::swap(src[i]);
if constexpr (Compare)
{
result |= data ^ dst[i];
}
dst[i] = data;
}
if constexpr (Compare)
{
return static_cast<bool>(result);
}
}
#if defined(ARCH_X64)
template <bool Compare>
void build_copy_data_swap_u32(asmjit::simd_builder& c, native_args& args)
{
using namespace asmjit;
// Load and broadcast shuffle mask
if (utils::has_ssse3())
{
c.vec_set_const(c.v1, s_bswap_u32_mask);
}
// Clear v2 (bitwise inequality accumulator)
if constexpr (Compare)
{
c.vec_set_all_zeros(c.v2);
}
c.build_loop(sizeof(u32), x86::eax, args[2].r32(), [&]
{
c.zero_if_not_masked().vec_load_unaligned(sizeof(u32), c.v0, c.ptr_scale_for_vec(sizeof(u32), args[1], x86::rax));
if (utils::has_ssse3())
{
c.vec_shuffle_xi8(c.v0, c.v0, c.v1);
}
else
{
c.emit(x86::Inst::kIdMovdqa, c.v1, c.v0);
c.emit(x86::Inst::kIdPsrlw, c.v0, 8);
c.emit(x86::Inst::kIdPsllw, c.v1, 8);
c.emit(x86::Inst::kIdPor, c.v0, c.v1);
c.emit(x86::Inst::kIdPshuflw, c.v0, c.v0, 0b10110001);
c.emit(x86::Inst::kIdPshufhw, c.v0, c.v0, 0b10110001);
}
if constexpr (Compare)
{
if (utils::has_avx512())
{
c.keep_if_not_masked().emit(x86::Inst::kIdVpternlogd, c.v2, c.v0, c.ptr_scale_for_vec(sizeof(u32), args[0], x86::rax), 0xf6); // orAxorBC
}
else
{
c.zero_if_not_masked().vec_load_unaligned(sizeof(u32), c.v3, c.ptr_scale_for_vec(sizeof(u32), args[0], x86::rax));
c.vec_xor(sizeof(u32), c.v3, c.v3, c.v0);
c.vec_or(sizeof(u32), c.v2, c.v2, c.v3);
}
}
c.keep_if_not_masked().vec_store_unaligned(sizeof(u32), c.v0, c.ptr_scale_for_vec(sizeof(u32), args[0], x86::rax));
}, [&]
{
if constexpr (Compare)
{
if (c.vsize == 16 && c.vmask == 0)
{
// Fix for AVX2 path
c.vextracti128(x86::xmm0, x86::ymm2, 1);
c.vpor(x86::xmm2, x86::xmm2, x86::xmm0);
}
}
});
if constexpr (Compare)
{
if (c.vsize == 32 && c.vmask == 0)
c.vec_clobbering_test(16, x86::xmm2, x86::xmm2);
else
c.vec_clobbering_test(c.vsize, c.v2, c.v2);
c.setnz(x86::al);
}
c.vec_cleanup_ret();
}
#endif
}
#if defined(ARCH_X64)
DECLARE(copy_data_swap_u32) = build_function_asm<void(*)(u32*, const u32*, u32), asmjit::simd_builder>("copy_data_swap_u32", &build_copy_data_swap_u32<false>);
DECLARE(copy_data_swap_u32_cmp) = build_function_asm<bool(*)(u32*, const u32*, u32), asmjit::simd_builder>("copy_data_swap_u32_cmp", &build_copy_data_swap_u32<true>);
#else
DECLARE(copy_data_swap_u32) = copy_data_swap_u32_naive<false>;
DECLARE(copy_data_swap_u32_cmp) = copy_data_swap_u32_naive<true>;
#endif
namespace
{
template <typename T>
constexpr T index_limit()
{
return -1;
}
template <typename T>
const T& min_max(T& min, T& max, const T& value)
{
if (value < min)
min = value;
if (value > max)
max = value;
return value;
}
struct untouched_impl
{
template <typename T>
static u64 upload_untouched_naive(const be_t<T>* src, T* dst, u32 count)
{
u32 written = 0;
T max_index = 0;
T min_index = -1;
while (count--)
{
T index = src[written];
dst[written++] = min_max(min_index, max_index, index);
}
return (u64{max_index} << 32) | u64{min_index};
}
#if defined(ARCH_X64)
template <typename T>
static void build_upload_untouched(asmjit::simd_builder& c, native_args& args)
{
using namespace asmjit;
if (!utils::has_sse41())
{
c.jmp(&upload_untouched_naive<T>);
return;
}
c.vec_set_const(c.v1, sizeof(T) == 2 ? s_bswap_u16_mask : s_bswap_u32_mask);
c.vec_set_all_ones(c.v2); // vec min
c.vec_set_all_zeros(c.v3); // vec max
c.build_loop(sizeof(T), x86::eax, args[2].r32(), [&]
{
c.zero_if_not_masked().vec_load_unaligned(sizeof(T), c.v0, c.ptr_scale_for_vec(sizeof(T), args[0], x86::rax));
if (utils::has_ssse3())
{
c.vec_shuffle_xi8(c.v0, c.v0, c.v1);
}
else
{
c.emit(x86::Inst::kIdMovdqa, c.v1, c.v0);
c.emit(x86::Inst::kIdPsrlw, c.v0, 8);
c.emit(x86::Inst::kIdPsllw, c.v1, 8);
c.emit(x86::Inst::kIdPor, c.v0, c.v1);
if constexpr (sizeof(T) == 4)
{
c.emit(x86::Inst::kIdPshuflw, c.v0, c.v0, 0b10110001);
c.emit(x86::Inst::kIdPshufhw, c.v0, c.v0, 0b10110001);
}
}
c.keep_if_not_masked().vec_umax(sizeof(T), c.v3, c.v3, c.v0);
c.keep_if_not_masked().vec_umin(sizeof(T), c.v2, c.v2, c.v0);
c.keep_if_not_masked().vec_store_unaligned(sizeof(T), c.v0, c.ptr_scale_for_vec(sizeof(T), args[1], x86::rax));
}, [&]
{
// Compress horizontally, protect high values
c.vec_extract_high(sizeof(T), c.v0, c.v3);
c.vec_umax(sizeof(T), c.v3, c.v3, c.v0);
c.vec_extract_high(sizeof(T), c.v0, c.v2);
c.vec_umin(sizeof(T), c.v2, c.v2, c.v0);
});
c.vec_extract_gpr(sizeof(T), x86::edx, c.v3);
c.vec_extract_gpr(sizeof(T), x86::eax, c.v2);
c.shl(x86::rdx, 32);
c.or_(x86::rax, x86::rdx);
c.vec_cleanup_ret();
}
static inline auto upload_xi16 = build_function_asm<u64(*)(const be_t<u16>*, u16*, u32), asmjit::simd_builder>("untouched_upload_xi16", &build_upload_untouched<u16>);
static inline auto upload_xi32 = build_function_asm<u64(*)(const be_t<u32>*, u32*, u32), asmjit::simd_builder>("untouched_upload_xi32", &build_upload_untouched<u32>);
#endif
template <typename T>
static std::tuple<T, T, u32> upload_untouched(std::span<to_be_t<const T>> src, std::span<T> dst)
{
T min_index, max_index;
u32 count = ::size32(src);
u64 r;
#if defined(ARCH_X64)
if constexpr (sizeof(T) == 2)
r = upload_xi16(src.data(), dst.data(), count);
else
r = upload_xi32(src.data(), dst.data(), count);
#else
r = upload_untouched_naive(src.data(), dst.data(), count);
#endif
min_index = static_cast<T>(r);
max_index = static_cast<T>(r >> 32);
return std::make_tuple(min_index, max_index, count);
}
};
struct primitive_restart_impl
{
template <typename T>
static inline u64 upload_untouched_naive(const be_t<T>* src, T* dst, u32 count, T restart_index)
{
T min_index = index_limit<T>();
T max_index = 0;
for (u32 i = 0; i < count; ++i)
{
T index = src[i].value();
dst[i] = index == restart_index ? index_limit<T>() : min_max(min_index, max_index, index);
}
return (u64{max_index} << 32) | u64{min_index};
}
#ifdef ARCH_X64
template <typename T>
static void build_upload_untouched(asmjit::simd_builder& c, native_args& args)
{
using namespace asmjit;
if (!utils::has_sse41())
{
c.jmp(&upload_untouched_naive<T>);
return;
}
c.vec_set_const(c.v1, sizeof(T) == 2 ? s_bswap_u16_mask : s_bswap_u32_mask);
c.vec_set_all_ones(c.v2); // vec min
c.vec_set_all_zeros(c.v3); // vec max
c.vec_broadcast_gpr(sizeof(T), c.v4, args[3].r32());
c.build_loop(sizeof(T), x86::eax, args[2].r32(), [&]
{
c.zero_if_not_masked().vec_load_unaligned(sizeof(T), c.v0, c.ptr_scale_for_vec(sizeof(T), args[0], x86::rax));
if (utils::has_ssse3())
{
c.vec_shuffle_xi8(c.v0, c.v0, c.v1);
}
else
{
c.emit(x86::Inst::kIdMovdqa, c.v1, c.v0);
c.emit(x86::Inst::kIdPsrlw, c.v0, 8);
c.emit(x86::Inst::kIdPsllw, c.v1, 8);
c.emit(x86::Inst::kIdPor, c.v0, c.v1);
if constexpr (sizeof(T) == 4)
{
c.emit(x86::Inst::kIdPshuflw, c.v0, c.v0, 0b10110001);
c.emit(x86::Inst::kIdPshufhw, c.v0, c.v0, 0b10110001);
}
}
c.vec_cmp_eq(sizeof(T), c.v5, c.v4, c.v0);
c.vec_andn(sizeof(T), c.v5, c.v5, c.v0);
c.keep_if_not_masked().vec_umax(sizeof(T), c.v3, c.v3, c.v5);
c.vec_cmp_eq(sizeof(T), c.v5, c.v4, c.v0);
c.vec_or(sizeof(T), c.v0, c.v0, c.v5);
c.keep_if_not_masked().vec_umin(sizeof(T), c.v2, c.v2, c.v0);
c.keep_if_not_masked().vec_store_unaligned(sizeof(T), c.v0, c.ptr_scale_for_vec(sizeof(T), args[1], x86::rax));
}, [&]
{
// Compress horizontally, protect high values
c.vec_extract_high(sizeof(T), c.v0, c.v3);
c.vec_umax(sizeof(T), c.v3, c.v3, c.v0);
c.vec_extract_high(sizeof(T), c.v0, c.v2);
c.vec_umin(sizeof(T), c.v2, c.v2, c.v0);
});
c.vec_extract_gpr(sizeof(T), x86::edx, c.v3);
c.vec_extract_gpr(sizeof(T), x86::eax, c.v2);
c.shl(x86::rdx, 32);
c.or_(x86::rax, x86::rdx);
c.vec_cleanup_ret();
}
static inline auto upload_xi16 = build_function_asm<u64(*)(const be_t<u16>*, u16*, u32, u32), asmjit::simd_builder>("restart_untouched_upload_xi16", &build_upload_untouched<u16>);
static inline auto upload_xi32 = build_function_asm<u64(*)(const be_t<u32>*, u32*, u32, u32), asmjit::simd_builder>("restart_untouched_upload_xi32", &build_upload_untouched<u32>);
#endif
template <typename T>
static inline std::tuple<T, T, u32> upload_untouched(std::span<to_be_t<const T>> src, std::span<T> dst, T restart_index)
{
T min_index, max_index;
u32 count = ::size32(src);
u64 r;
#if defined(ARCH_X64)
if constexpr (sizeof(T) == 2)
r = upload_xi16(src.data(), dst.data(), count, restart_index);
else
r = upload_xi32(src.data(), dst.data(), count, restart_index);
#else
r = upload_untouched_naive(src.data(), dst.data(), count, restart_index);
#endif
min_index = static_cast<T>(r);
max_index = static_cast<T>(r >> 32);
return std::make_tuple(min_index, max_index, count);
}
};
template <typename T>
NEVER_INLINE std::tuple<T, T, u32> upload_untouched_skip_restart(std::span<to_be_t<const T>> src, std::span<T> dst, T restart_index)
{
T min_index = index_limit<T>();
T max_index = 0;
u32 written = 0;
u32 length = ::size32(src);
for (u32 i = written; i < length; ++i)
{
T index = src[i];
if (index != restart_index)
{
dst[written++] = min_max(min_index, max_index, index);
}
}
return std::make_tuple(min_index, max_index, written);
}
template<typename T>
std::tuple<T, T, u32> upload_untouched(std::span<to_be_t<const T>> src, std::span<T> dst, rsx::primitive_type draw_mode, bool is_primitive_restart_enabled, u32 primitive_restart_index)
{
if (!is_primitive_restart_enabled)
{
return untouched_impl::upload_untouched(src, dst);
}
else if constexpr (std::is_same_v<T, u16>)
{
if (primitive_restart_index > 0xffff)
{
return untouched_impl::upload_untouched(src, dst);
}
else if (is_primitive_disjointed(draw_mode))
{
return upload_untouched_skip_restart(src, dst, static_cast<u16>(primitive_restart_index));
}
else
{
return primitive_restart_impl::upload_untouched(src, dst, static_cast<u16>(primitive_restart_index));
}
}
else if (is_primitive_disjointed(draw_mode))
{
return upload_untouched_skip_restart(src, dst, primitive_restart_index);
}
else
{
return primitive_restart_impl::upload_untouched(src, dst, primitive_restart_index);
}
}
template<typename T>
std::tuple<T, T, u32> expand_indexed_triangle_fan(std::span<to_be_t<const T>> src, std::span<T> dst, bool is_primitive_restart_enabled, u32 primitive_restart_index)
{
const T invalid_index = index_limit<T>();
T min_index = invalid_index;
T max_index = 0;
ensure((dst.size() >= 3 * (src.size() - 2)));
u32 dst_idx = 0;
bool needs_anchor = true;
T anchor = invalid_index;
T last_index = invalid_index;
for (const T index : src)
{
if (needs_anchor)
{
if (is_primitive_restart_enabled && index == primitive_restart_index)
continue;
anchor = min_max(min_index, max_index, index);
needs_anchor = false;
continue;
}
if (is_primitive_restart_enabled && index == primitive_restart_index)
{
needs_anchor = true;
last_index = invalid_index;
continue;
}
if (last_index == invalid_index)
{
//Need at least one anchor and one outer index to create a triangle
last_index = min_max(min_index, max_index, index);
continue;
}
dst[dst_idx++] = anchor;
dst[dst_idx++] = last_index;
dst[dst_idx++] = min_max(min_index, max_index, index);
last_index = index;
}
return std::make_tuple(min_index, max_index, dst_idx);
}
template<typename T>
std::tuple<T, T, u32> expand_indexed_quads(std::span<to_be_t<const T>> src, std::span<T> dst, bool is_primitive_restart_enabled, u32 primitive_restart_index)
{
T min_index = index_limit<T>();
T max_index = 0;
ensure((4 * dst.size_bytes() >= 6 * src.size_bytes()));
u32 dst_idx = 0;
u8 set_size = 0;
T tmp_indices[4];
for (const T index : src)
{
if (is_primitive_restart_enabled && index == primitive_restart_index)
{
//empty temp buffer
set_size = 0;
continue;
}
tmp_indices[set_size++] = min_max(min_index, max_index, index);
if (set_size == 4)
{
// First triangle
dst[dst_idx++] = tmp_indices[0];
dst[dst_idx++] = tmp_indices[1];
dst[dst_idx++] = tmp_indices[2];
// Second triangle
dst[dst_idx++] = tmp_indices[2];
dst[dst_idx++] = tmp_indices[3];
dst[dst_idx++] = tmp_indices[0];
set_size = 0;
}
}
return std::make_tuple(min_index, max_index, dst_idx);
}
}
// Only handle quads and triangle fan now
bool is_primitive_native(rsx::primitive_type draw_mode)
{
switch (draw_mode)
{
case rsx::primitive_type::points:
case rsx::primitive_type::lines:
case rsx::primitive_type::line_strip:
case rsx::primitive_type::triangles:
case rsx::primitive_type::triangle_strip:
case rsx::primitive_type::quad_strip:
return true;
case rsx::primitive_type::line_loop:
case rsx::primitive_type::polygon:
case rsx::primitive_type::triangle_fan:
case rsx::primitive_type::quads:
return false;
}
fmt::throw_exception("Wrong primitive type");
}
bool is_primitive_disjointed(rsx::primitive_type draw_mode)
{
switch (draw_mode)
{
case rsx::primitive_type::line_loop:
case rsx::primitive_type::line_strip:
case rsx::primitive_type::polygon:
case rsx::primitive_type::quad_strip:
case rsx::primitive_type::triangle_fan:
case rsx::primitive_type::triangle_strip:
return false;
default:
return true;
}
}
u32 get_index_count(rsx::primitive_type draw_mode, u32 initial_index_count)
{
// Index count
if (is_primitive_native(draw_mode))
return initial_index_count;
switch (draw_mode)
{
case rsx::primitive_type::line_loop:
return initial_index_count + 1;
case rsx::primitive_type::polygon:
case rsx::primitive_type::triangle_fan:
return (initial_index_count - 2) * 3;
case rsx::primitive_type::quads:
return (6 * initial_index_count) / 4;
default:
return 0;
}
}
u32 get_index_type_size(rsx::index_array_type type)
{
switch (type)
{
case rsx::index_array_type::u16: return sizeof(u16);
case rsx::index_array_type::u32: return sizeof(u32);
}
fmt::throw_exception("Wrong index type");
}
void write_index_array_for_non_indexed_non_native_primitive_to_buffer(char* dst, rsx::primitive_type draw_mode, unsigned count)
{
auto typedDst = reinterpret_cast<u16*>(dst);
switch (draw_mode)
{
case rsx::primitive_type::line_loop:
for (unsigned i = 0; i < count; ++i)
typedDst[i] = i;
typedDst[count] = 0;
return;
case rsx::primitive_type::triangle_fan:
case rsx::primitive_type::polygon:
for (unsigned i = 0; i < (count - 2); i++)
{
typedDst[3 * i] = 0;
typedDst[3 * i + 1] = i + 2 - 1;
typedDst[3 * i + 2] = i + 2;
}
return;
case rsx::primitive_type::quads:
for (unsigned i = 0; i < count / 4; i++)
{
// First triangle
typedDst[6 * i] = 4 * i;
typedDst[6 * i + 1] = 4 * i + 1;
typedDst[6 * i + 2] = 4 * i + 2;
// Second triangle
typedDst[6 * i + 3] = 4 * i + 2;
typedDst[6 * i + 4] = 4 * i + 3;
typedDst[6 * i + 5] = 4 * i;
}
return;
case rsx::primitive_type::quad_strip:
case rsx::primitive_type::points:
case rsx::primitive_type::lines:
case rsx::primitive_type::line_strip:
case rsx::primitive_type::triangles:
case rsx::primitive_type::triangle_strip:
fmt::throw_exception("Native primitive type doesn't require expansion");
}
fmt::throw_exception("Tried to load invalid primitive type");
}
namespace
{
template<typename T>
std::tuple<T, T, u32> write_index_array_data_to_buffer_impl(std::span<T> dst,
std::span<const be_t<T>> src,
rsx::primitive_type draw_mode, bool restart_index_enabled, u32 restart_index,
const std::function<bool(rsx::primitive_type)>& expands)
{
if (!expands(draw_mode)) [[likely]]
{
return upload_untouched<T>(src, dst, draw_mode, restart_index_enabled, restart_index);
}
switch (draw_mode)
{
case rsx::primitive_type::line_loop:
{
const auto &returnvalue = upload_untouched<T>(src, dst, draw_mode, restart_index_enabled, restart_index);
const auto index_count = dst.size_bytes() / sizeof(T);
dst[index_count] = src[0];
return returnvalue;
}
case rsx::primitive_type::polygon:
case rsx::primitive_type::triangle_fan:
{
return expand_indexed_triangle_fan<T>(src, dst, restart_index_enabled, restart_index);
}
case rsx::primitive_type::quads:
{
return expand_indexed_quads<T>(src, dst, restart_index_enabled, restart_index);
}
default:
fmt::throw_exception("Unknown draw mode (0x%x)", static_cast<u8>(draw_mode));
}
}
}
std::tuple<u32, u32, u32> write_index_array_data_to_buffer(std::span<std::byte> dst_ptr,
std::span<const std::byte> src_ptr,
rsx::index_array_type type, rsx::primitive_type draw_mode, bool restart_index_enabled, u32 restart_index,
const std::function<bool(rsx::primitive_type)>& expands)
{
switch (type)
{
case rsx::index_array_type::u16:
{
return write_index_array_data_to_buffer_impl<u16>(utils::bless<u16>(dst_ptr), utils::bless<const be_t<u16>>(src_ptr),
draw_mode, restart_index_enabled, restart_index, expands);
}
case rsx::index_array_type::u32:
{
return write_index_array_data_to_buffer_impl<u32>(utils::bless<u32>(dst_ptr), utils::bless<const be_t<u32>>(src_ptr),
draw_mode, restart_index_enabled, restart_index, expands);
}
default:
fmt::throw_exception("Unreachable");
}
}
| 20,698
|
C++
|
.cpp
| 634
| 29.425868
| 184
| 0.661259
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,396
|
TextureUtils.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/TextureUtils.cpp
|
#include "stdafx.h"
#include "Emu/Memory/vm.h"
#include "TextureUtils.h"
#include "../RSXThread.h"
#include "../rsx_utils.h"
#include "util/asm.hpp"
namespace utils
{
template <typename T, typename U>
[[nodiscard]] auto bless(const std::span<U>& span)
{
return std::span<T>(bless<T>(span.data()), sizeof(U) * span.size() / sizeof(T));
}
template <typename T> requires(std::is_integral_v<T> && std::is_unsigned_v<T>)
bool is_power_of_2(T value)
{
return !(value & (value - 1));
}
}
namespace
{
#ifndef __APPLE__
u16 convert_rgb655_to_rgb565(const u16 bits)
{
// g6 = g5
// r5 = (((bits & 0xFC00) >> 1) & 0xFC00) << 1 is equivalent to truncating the least significant bit
return (bits & 0xF81F) | (bits & 0x3E0) << 1;
}
#else
u32 convert_rgb565_to_bgra8(const u16 bits)
{
const u8 r5 = ((bits >> 11) & 0x1F);
const u8 g6 = ((bits >> 5) & 0x3F);
const u8 b5 = (bits & 0x1F);
const u8 b8 = ((b5 * 527) + 23) >> 6;
const u8 g8 = ((g6 * 259) + 33) >> 6;
const u8 r8 = ((r5 * 527) + 23) >> 6;
const u8 a8 = 255;
return b8 | (g8 << 8) | (r8 << 16) | (a8 << 24);
}
u32 convert_argb4_to_bgra8(const u16 bits)
{
const u8 b8 = (bits & 0xF0);
const u8 g8 = ((bits >> 4) & 0xF0);
const u8 r8 = ((bits >> 8) & 0xF0);
const u8 a8 = ((bits << 4) & 0xF0);
return b8 | (g8 << 8) | (r8 << 16) | (a8 << 24);
}
u32 convert_a1rgb5_to_bgra8(const u16 bits)
{
const u8 a1 = ((bits >> 11) & 0x80);
const u8 r5 = ((bits >> 10) & 0x1F);
const u8 g5 = ((bits >> 5) & 0x1F);
const u8 b5 = (bits & 0x1F);
const u8 b8 = ((b5 * 527) + 23) >> 6;
const u8 g8 = ((g5 * 527) + 23) >> 6;
const u8 r8 = ((r5 * 527) + 23) >> 6;
const u8 a8 = a1;
return b8 | (g8 << 8) | (r8 << 16) | (a8 << 24);
}
u32 convert_rgb5a1_to_bgra8(const u16 bits)
{
const u8 r5 = ((bits >> 11) & 0x1F);
const u8 g5 = ((bits >> 6) & 0x1F);
const u8 b5 = ((bits >> 1) & 0x1F);
const u8 a1 = (bits & 0x80);
const u8 b8 = ((b5 * 527) + 23) >> 6;
const u8 g8 = ((g5 * 527) + 23) >> 6;
const u8 r8 = ((r5 * 527) + 23) >> 6;
const u8 a8 = a1;
return b8 | (g8 << 8) | (r8 << 16) | (a8 << 24);
}
u32 convert_rgb655_to_bgra8(const u16 bits)
{
const u8 r6 = ((bits >> 10) & 0x3F);
const u8 g5 = ((bits >> 5) & 0x1F);
const u8 b5 = ((bits) & 0x1F);
const u8 b8 = ((b5 * 527) + 23) >> 6;
const u8 g8 = ((g5 * 527) + 23) >> 6;
const u8 r8 = ((r6 * 259) + 33) >> 6;
const u8 a8 = 1;
return b8 | (g8 << 8) | (r8 << 16) | (a8 << 24);
}
u32 convert_d1rgb5_to_bgra8(const u16 bits)
{
const u8 r5 = ((bits >> 10) & 0x1F);
const u8 g5 = ((bits >> 5) & 0x1F);
const u8 b5 = (bits & 0x1F);
const u8 b8 = ((b5 * 527) + 23) >> 6;
const u8 g8 = ((g5 * 527) + 23) >> 6;
const u8 r8 = ((r5 * 527) + 23) >> 6;
const u8 a8 = 1;
return b8 | (g8 << 8) | (r8 << 16) | (a8 << 24);
}
struct convert_16_block_32
{
template<typename T>
static void copy_mipmap_level(std::span<u32> dst, std::span<const T> src, u16 width_in_block, u16 row_count, u16 depth, u8 border, u32 dst_pitch_in_block, u32 src_pitch_in_block, u32 (*converter)(const u16))
{
static_assert(sizeof(T) == 2, "Type size doesn't match.");
u32 src_offset = 0, dst_offset = 0;
const u32 v_porch = src_pitch_in_block * border;
for (int layer = 0; layer < depth; ++layer)
{
// Front
src_offset += v_porch;
for (u32 row = 0; row < row_count; ++row)
{
for (int col = 0; col < width_in_block; ++col)
{
dst[dst_offset + col] = converter(src[src_offset + col + border]);
}
src_offset += src_pitch_in_block;
dst_offset += dst_pitch_in_block;
}
// Back
src_offset += v_porch;
}
}
};
struct convert_16_block_32_swizzled
{
template<typename T, typename U>
static void copy_mipmap_level(std::span<T> dst, std::span<const U> src, u16 width_in_block, u16 row_count, u16 depth, u8 border, u32 dst_pitch_in_block, u32 (*converter)(const u16))
{
u32 padded_width, padded_height;
if (border)
{
padded_width = rsx::next_pow2(width_in_block + border + border);
padded_height = rsx::next_pow2(row_count + border + border);
}
else
{
padded_width = width_in_block;
padded_height = row_count;
}
u32 size = padded_width * padded_height * depth * 2;
rsx::simple_array<U> tmp(size);
rsx::convert_linear_swizzle_3d<U>(src.data(), tmp.data(), padded_width, padded_height, depth);
std::span<const U> src_span = tmp;
convert_16_block_32::copy_mipmap_level(dst, src_span, width_in_block, row_count, depth, border, dst_pitch_in_block, padded_width, converter);
}
};
#endif
struct copy_unmodified_block
{
template<typename T, typename U>
static void copy_mipmap_level(std::span<T> dst, std::span<const U> src, u16 words_per_block, u16 width_in_block, u16 row_count, u16 depth, u8 border, u32 dst_pitch_in_block, u32 src_pitch_in_block)
{
static_assert(sizeof(T) == sizeof(U), "Type size doesn't match.");
if (src_pitch_in_block == dst_pitch_in_block && !border)
{
// Fast copy
const auto data_length = src_pitch_in_block * words_per_block * row_count * depth;
std::copy_n(src.begin(), std::min<usz>({data_length, src.size(), dst.size()}), dst.begin());
return;
}
const u32 width_in_words = width_in_block * words_per_block;
const u32 src_pitch_in_words = src_pitch_in_block * words_per_block;
const u32 dst_pitch_in_words = dst_pitch_in_block * words_per_block;
const u32 h_porch = border * words_per_block;
const u32 v_porch = src_pitch_in_words * border;
u32 src_offset = h_porch, dst_offset = 0;
for (int layer = 0; layer < depth; ++layer)
{
// Front
src_offset += v_porch;
for (int row = 0; row < row_count; ++row)
{
// NOTE: src_offset is already shifted along the border at initialization
std::copy_n(src.begin() + src_offset, width_in_words, dst.begin() + dst_offset);
src_offset += src_pitch_in_words;
dst_offset += dst_pitch_in_words;
}
// Back
src_offset += v_porch;
}
}
};
struct copy_unmodified_block_swizzled
{
// NOTE: Pixel channel types are T (out) and const U (in). V is the pixel block type that consumes one whole pixel.
// e.g 4x16-bit format can use u16, be_t<u16>, u64 as arguments
template<typename T, typename U>
static void copy_mipmap_level(std::span<T> dst, std::span<const U> src, u16 words_per_block, u16 width_in_block, u16 row_count, u16 depth, u8 border, u32 dst_pitch_in_block)
{
if (std::is_same_v<T, U> && dst_pitch_in_block == width_in_block && words_per_block == 1 && !border)
{
rsx::convert_linear_swizzle_3d<T>(src.data(), dst.data(), width_in_block, row_count, depth);
}
else
{
u32 padded_width, padded_height;
if (border)
{
padded_width = rsx::next_pow2(width_in_block + border + border);
padded_height = rsx::next_pow2(row_count + border + border);
}
else
{
padded_width = width_in_block;
padded_height = row_count;
}
const u32 size_in_block = padded_width * padded_height * depth * 2;
rsx::simple_array<U> tmp(size_in_block * words_per_block);
if (words_per_block == 1) [[likely]]
{
rsx::convert_linear_swizzle_3d<T>(src.data(), tmp.data(), padded_width, padded_height, depth);
}
else
{
switch (words_per_block * sizeof(T))
{
case 4:
rsx::convert_linear_swizzle_3d<u32>(src.data(), tmp.data(), padded_width, padded_height, depth);
break;
case 8:
rsx::convert_linear_swizzle_3d<u64>(src.data(), tmp.data(), padded_width, padded_height, depth);
break;
case 16:
rsx::convert_linear_swizzle_3d<u128>(src.data(), tmp.data(), padded_width, padded_height, depth);
break;
default:
fmt::throw_exception("Failed to decode swizzled format, words_per_block=%d, src_type_size=%d", words_per_block, sizeof(T));
}
}
std::span<const U> src_span = tmp;
copy_unmodified_block::copy_mipmap_level(dst, src_span, words_per_block, width_in_block, row_count, depth, border, dst_pitch_in_block, padded_width);
}
}
};
struct copy_unmodified_block_vtc
{
template<typename T, typename U>
static void copy_mipmap_level(std::span<T> dst, std::span<const U> src, u16 width_in_block, u16 row_count, u16 depth, u32 dst_pitch_in_block, u32 /*src_pitch_in_block*/)
{
static_assert(sizeof(T) == sizeof(U), "Type size doesn't match.");
u32 plane_size = dst_pitch_in_block * row_count;
u32 row_element_count = width_in_block * row_count;
u32 dst_offset = 0;
u32 src_offset = 0;
const u16 depth_4 = (depth >> 2) * 4; // multiple of 4
// Undo Nvidia VTC tiling - place each 2D texture slice back to back in linear memory
//
// More info:
// https://www.khronos.org/registry/OpenGL/extensions/NV/NV_texture_compression_vtc.txt
//
// Note that the memory is tiled 4 planes at a time in the depth direction.
// e.g. d0, d1, d2, d3 is tiled as a group then d4, d5, d6, d7
//
// Tile as 4x4x4
for (int d = 0; d < depth_4; d++)
{
// Copy one slice of the 3d texture
for (u32 i = 0; i < row_element_count; i += 1)
{
// Copy one span (8 bytes for DXT1 or 16 bytes for DXT5)
dst[dst_offset + i] = src[src_offset + i * 4];
}
dst_offset += plane_size;
// Last plane in the group of 4?
if ((d & 0x3) == 0x3)
{
// Move forward to next group of 4 planes
src_offset += row_element_count * 4 - 3;
}
else
{
src_offset += 1;
}
}
// End Case - tile as 4x4x3 or 4x4x2 or 4x4x1
const int vtc_tile_count = depth - depth_4;
for (int d = 0; d < vtc_tile_count; d++)
{
// Copy one slice of the 3d texture
for (u32 i = 0; i < row_element_count; i += 1)
{
// Copy one span (8 bytes for DXT1 or 16 bytes for DXT5)
dst[dst_offset + i] = src[src_offset + i * vtc_tile_count];
}
dst_offset += plane_size;
src_offset += 1;
}
}
};
struct copy_linear_block_to_vtc
{
template<typename T, typename U>
static void copy_mipmap_level(std::span<T> dst, std::span<const U> src, u16 width_in_block, u16 row_count, u16 depth, u32 /*dst_pitch_in_block*/, u32 src_pitch_in_block)
{
static_assert(sizeof(T) == sizeof(U), "Type size doesn't match.");
u32 plane_size = src_pitch_in_block * row_count;
u32 row_element_count = width_in_block * row_count;
u32 dst_offset = 0;
u32 src_offset = 0;
const u16 depth_4 = (depth >> 2) * 4; // multiple of 4
// Convert incoming linear texture to VTC compressed texture
// https://www.khronos.org/registry/OpenGL/extensions/NV/NV_texture_compression_vtc.txt
// Tile as 4x4x4
for (int d = 0; d < depth_4; d++)
{
// Copy one slice of the 3d texture
for (u32 i = 0; i < row_element_count; i += 1)
{
// Copy one span (8 bytes for DXT1 or 16 bytes for DXT5)
dst[dst_offset + i * 4] = src[src_offset + i];
}
src_offset += plane_size;
// Last plane in the group of 4?
if ((d & 0x3) == 0x3)
{
// Move forward to next group of 4 planes
dst_offset += row_element_count * 4 - 3;
}
else
{
dst_offset ++;
}
}
// End Case - tile as 4x4x3 or 4x4x2 or 4x4x1
const int vtc_tile_count = depth - depth_4;
for (int d = 0; d < vtc_tile_count; d++)
{
// Copy one slice of the 3d texture
for (u32 i = 0; i < row_element_count; i += 1)
{
// Copy one span (8 bytes for DXT1 or 16 bytes for DXT5)
dst[dst_offset + i * vtc_tile_count] = src[src_offset + i];
}
src_offset += row_element_count;
dst_offset ++;
}
}
};
struct copy_decoded_rb_rg_block
{
template <bool SwapWords = false, typename T>
static void copy_mipmap_level(std::span<u32> dst, std::span<const T> src, u16 width_in_block, u16 row_count, u16 depth, u32 dst_pitch_in_block, u32 src_pitch_in_block)
{
static_assert(sizeof(T) == 4, "Type size doesn't match.");
u32 src_offset = 0;
u32 dst_offset = 0;
// Temporaries
u32 red0, red1, blue, green;
for (int row = 0; row < row_count * depth; ++row)
{
for (int col = 0; col < width_in_block; ++col)
{
// Decompress one block to 2 pixels at a time and write output in BGRA format
const auto data = src[src_offset + col];
if constexpr (SwapWords)
{
// BR_GR
blue = (data >> 0) & 0xFF;
red0 = (data >> 8) & 0xFF;
green = (data >> 16) & 0XFF;
red1 = (data >> 24) & 0xFF;
}
else
{
// RB_RG
red0 = (data >> 0) & 0xFF;
blue = (data >> 8) & 0xFF;
red1 = (data >> 16) & 0XFF;
green = (data >> 24) & 0xFF;
}
dst[dst_offset + (col * 2)] = blue | (green << 8) | (red0 << 16) | (0xFF << 24);
dst[dst_offset + (col * 2 + 1)] = blue | (green << 8) | (red1 << 16) | (0xFF << 24);
}
src_offset += src_pitch_in_block;
dst_offset += dst_pitch_in_block;
}
}
};
struct copy_rgb655_block
{
template<typename T>
static void copy_mipmap_level(std::span<u16> dst, std::span<const T> src, u16 width_in_block, u16 row_count, u16 depth, u8 border, u32 dst_pitch_in_block, u32 src_pitch_in_block)
{
static_assert(sizeof(T) == 2, "Type size doesn't match.");
u32 src_offset = 0, dst_offset = 0;
const u32 v_porch = src_pitch_in_block * border;
for (int layer = 0; layer < depth; ++layer)
{
// Front
src_offset += v_porch;
for (u32 row = 0; row < row_count; ++row)
{
for (int col = 0; col < width_in_block; ++col)
{
dst[dst_offset + col] = convert_rgb655_to_rgb565(src[src_offset + col + border]);
}
src_offset += src_pitch_in_block;
dst_offset += dst_pitch_in_block;
}
// Back
src_offset += v_porch;
}
}
};
struct copy_rgb655_block_swizzled
{
template<typename T, typename U>
static void copy_mipmap_level(std::span<T> dst, std::span<const U> src, u16 width_in_block, u16 row_count, u16 depth, u8 border, u32 dst_pitch_in_block)
{
u32 padded_width, padded_height;
if (border)
{
padded_width = rsx::next_pow2(width_in_block + border + border);
padded_height = rsx::next_pow2(row_count + border + border);
}
else
{
padded_width = width_in_block;
padded_height = row_count;
}
u32 size = padded_width * padded_height * depth * 2;
rsx::simple_array<U> tmp(size);
rsx::convert_linear_swizzle_3d<U>(src.data(), tmp.data(), padded_width, padded_height, depth);
std::span<const U> src_span = tmp;
copy_rgb655_block::copy_mipmap_level(dst, src_span, width_in_block, row_count, depth, border, dst_pitch_in_block, padded_width);
}
};
namespace
{
/**
* Generates copy instructions required to build the texture GPU side without actually copying anything.
* Returns a set of addresses and data lengths to use. This can be used to generate a GPU task to avoid CPU doing the heavy lifting.
*/
std::vector<rsx::memory_transfer_cmd>
build_transfer_cmds(const void* src, u16 block_size_in_bytes, u16 width_in_block, u16 row_count, u16 depth, u8 border, u32 dst_pitch_in_block, u32 src_pitch_in_block)
{
std::vector<rsx::memory_transfer_cmd> result;
if (src_pitch_in_block == dst_pitch_in_block && !border)
{
// Fast copy
rsx::memory_transfer_cmd cmd;
cmd.src = src;
cmd.dst = nullptr;
cmd.length = src_pitch_in_block * block_size_in_bytes * row_count * depth;
return { cmd };
}
const u32 width_in_bytes = width_in_block * block_size_in_bytes;
const u32 src_pitch_in_bytes = src_pitch_in_block * block_size_in_bytes;
const u32 dst_pitch_in_bytes = dst_pitch_in_block * block_size_in_bytes;
const u32 h_porch = border * block_size_in_bytes;
const u32 v_porch = src_pitch_in_bytes * border;
auto src_ = static_cast<const char*>(src) + h_porch;
auto dst_ = static_cast<const char*>(nullptr);
for (int layer = 0; layer < depth; ++layer)
{
// Front
src_ += v_porch;
for (int row = 0; row < row_count; ++row)
{
rsx::memory_transfer_cmd cmd{ dst_, src_, width_in_bytes };
result.push_back(cmd);
src_ += src_pitch_in_bytes;
dst_ += dst_pitch_in_bytes;
}
// Back
src_ += v_porch;
}
return result;
}
/**
* Texture upload template.
*
* Source textures are stored as following (for power of 2 textures):
* - For linear texture every mipmap level share rowpitch (which is the one of mipmap 0). This means that for non 0 mipmap there's padding between row.
* - For swizzled texture row pitch is texture width X pixel/block size. There's not padding between row.
* - There is no padding between 2 mipmap levels. This means that next mipmap level starts at offset rowpitch X row count
* - Cubemap images are 128 bytes aligned.
*
* The template iterates over all depth (including cubemap) and over all mipmaps.
* Sometimes texture provides a pitch even if texture is swizzled (and then packed) and in such case it's ignored. It's passed via suggested_pitch and is used only if padded_row is false.
*/
template <u8 block_edge_in_texel, typename SRC_TYPE>
std::vector<rsx::subresource_layout> get_subresources_layout_impl(const std::byte *texture_data_pointer, u16 width_in_texel, u16 height_in_texel, u16 depth, u8 layer_count, u16 mipmap_count, u32 suggested_pitch_in_bytes, bool padded_row, bool border)
{
/**
* Note about size type: RSX texture width is stored in a 16 bits int and pitch is stored in a 20 bits int.
*/
// <= 128 so fits in u8
u8 block_size_in_bytes = sizeof(SRC_TYPE);
std::vector<rsx::subresource_layout> result;
usz offset_in_src = 0;
const u8 border_size = border ? (padded_row ? 1 : 4) : 0;
u32 src_pitch_in_block;
u32 full_height_in_block;
for (unsigned layer = 0; layer < layer_count; layer++)
{
u16 miplevel_width_in_texel = width_in_texel, miplevel_height_in_texel = height_in_texel, miplevel_depth = depth;
for (unsigned mip_level = 0; mip_level < mipmap_count; mip_level++)
{
result.push_back({});
rsx::subresource_layout& current_subresource_layout = result.back();
current_subresource_layout.width_in_texel = miplevel_width_in_texel;
current_subresource_layout.height_in_texel = miplevel_height_in_texel;
current_subresource_layout.level = mip_level;
current_subresource_layout.layer = layer;
current_subresource_layout.depth = miplevel_depth;
current_subresource_layout.border = border_size;
if constexpr (block_edge_in_texel == 1)
{
current_subresource_layout.width_in_block = miplevel_width_in_texel;
current_subresource_layout.height_in_block = miplevel_height_in_texel;
}
else if constexpr (block_edge_in_texel == 4)
{
current_subresource_layout.width_in_block = utils::aligned_div(miplevel_width_in_texel, block_edge_in_texel);
current_subresource_layout.height_in_block = utils::aligned_div(miplevel_height_in_texel, block_edge_in_texel);
}
else
{
// Only the width is compressed
current_subresource_layout.width_in_block = utils::aligned_div(miplevel_width_in_texel, block_edge_in_texel);
current_subresource_layout.height_in_block = miplevel_height_in_texel;
}
if (padded_row)
{
src_pitch_in_block = suggested_pitch_in_bytes / block_size_in_bytes;
full_height_in_block = current_subresource_layout.height_in_block + (border_size + border_size);
}
else if (!border)
{
src_pitch_in_block = current_subresource_layout.width_in_block;
full_height_in_block = current_subresource_layout.height_in_block;
}
else
{
src_pitch_in_block = rsx::next_pow2(current_subresource_layout.width_in_block + border_size + border_size);
full_height_in_block = rsx::next_pow2(current_subresource_layout.height_in_block + border_size + border_size);
}
const u32 slice_sz = src_pitch_in_block * block_size_in_bytes * full_height_in_block * miplevel_depth;
current_subresource_layout.pitch_in_block = src_pitch_in_block;
current_subresource_layout.data = { texture_data_pointer + offset_in_src, slice_sz };
offset_in_src += slice_sz;
miplevel_width_in_texel = std::max(miplevel_width_in_texel / 2, 1);
miplevel_height_in_texel = std::max(miplevel_height_in_texel / 2, 1);
miplevel_depth = std::max(miplevel_depth / 2, 1);
}
if (!padded_row) // Only swizzled textures obey this restriction
{
offset_in_src = utils::align(offset_in_src, 128);
}
}
return result;
}
}
template<typename T>
u32 get_row_pitch_in_block(u16 width_in_block, usz alignment)
{
if (const usz pitch = width_in_block * sizeof(T);
pitch == alignment)
{
return width_in_block;
}
else
{
usz divided = (pitch + alignment - 1) / alignment;
return static_cast<u32>(divided * alignment / sizeof(T));
}
}
u32 get_row_pitch_in_block(u16 block_size_in_bytes, u16 width_in_block, usz alignment)
{
if (const usz pitch = width_in_block * block_size_in_bytes;
pitch == alignment)
{
return width_in_block;
}
else
{
usz divided = (pitch + alignment - 1) / alignment;
return static_cast<u32>(divided * alignment / block_size_in_bytes);
}
}
/**
* Since rsx ignore unused dimensionality some app set them to 0.
* Use 1 value instead to be more general.
*/
template<typename RsxTextureType>
std::tuple<u16, u16, u8> get_height_depth_layer(const RsxTextureType &tex)
{
switch (tex.get_extended_texture_dimension())
{
case rsx::texture_dimension_extended::texture_dimension_1d: return std::make_tuple(1, 1, 1);
case rsx::texture_dimension_extended::texture_dimension_2d: return std::make_tuple(tex.height(), 1, 1);
case rsx::texture_dimension_extended::texture_dimension_cubemap: return std::make_tuple(tex.height(), 1, 6);
case rsx::texture_dimension_extended::texture_dimension_3d: return std::make_tuple(tex.height(), tex.depth(), 1);
}
fmt::throw_exception("Unsupported texture dimension");
}
}
template<typename RsxTextureType>
std::vector<rsx::subresource_layout> get_subresources_layout_impl(const RsxTextureType &texture)
{
u16 w = texture.width();
u16 h;
u16 depth;
u8 layer;
std::tie(h, depth, layer) = get_height_depth_layer(texture);
const auto format = texture.format() & ~(CELL_GCM_TEXTURE_LN | CELL_GCM_TEXTURE_UN);
auto pitch = texture.pitch();
const u32 texaddr = rsx::get_address(texture.offset(), texture.location());
auto pixels = vm::_ptr<const std::byte>(texaddr);
const bool is_swizzled = !(texture.format() & CELL_GCM_TEXTURE_LN);
const bool has_border = !texture.border_type();
if (!is_swizzled)
{
if (const auto packed_pitch = rsx::get_format_packed_pitch(format, w, has_border, false); pitch < packed_pitch) [[unlikely]]
{
if (pitch)
{
const u32 real_width_in_block = pitch / rsx::get_format_block_size_in_bytes(format);
w = std::max<u16>(real_width_in_block * rsx::get_format_block_size_in_texel(format), 1);
}
else
{
h = depth = 1;
pitch = packed_pitch;
}
}
}
switch (format)
{
case CELL_GCM_TEXTURE_B8:
return get_subresources_layout_impl<1, u8>(pixels, w, h, depth, layer, texture.get_exact_mipmap_count(), pitch, !is_swizzled, has_border);
case CELL_GCM_TEXTURE_COMPRESSED_B8R8_G8R8:
case CELL_GCM_TEXTURE_COMPRESSED_R8B8_R8G8:
return get_subresources_layout_impl<2, u32>(pixels, w, h, depth, layer, texture.get_exact_mipmap_count(), pitch, !is_swizzled, has_border);
case CELL_GCM_TEXTURE_COMPRESSED_HILO8:
case CELL_GCM_TEXTURE_COMPRESSED_HILO_S8:
case CELL_GCM_TEXTURE_DEPTH16:
case CELL_GCM_TEXTURE_DEPTH16_FLOAT: // Untested
case CELL_GCM_TEXTURE_D1R5G5B5:
case CELL_GCM_TEXTURE_A1R5G5B5:
case CELL_GCM_TEXTURE_A4R4G4B4:
case CELL_GCM_TEXTURE_R5G5B5A1:
case CELL_GCM_TEXTURE_R5G6B5:
case CELL_GCM_TEXTURE_R6G5B5:
case CELL_GCM_TEXTURE_G8B8:
case CELL_GCM_TEXTURE_X16:
return get_subresources_layout_impl<1, u16>(pixels, w, h, depth, layer, texture.get_exact_mipmap_count(), pitch, !is_swizzled, has_border);
case CELL_GCM_TEXTURE_DEPTH24_D8: // Untested
case CELL_GCM_TEXTURE_DEPTH24_D8_FLOAT: // Untested
case CELL_GCM_TEXTURE_D8R8G8B8:
case CELL_GCM_TEXTURE_A8R8G8B8:
case CELL_GCM_TEXTURE_Y16_X16:
case CELL_GCM_TEXTURE_Y16_X16_FLOAT:
case CELL_GCM_TEXTURE_X32_FLOAT:
return get_subresources_layout_impl<1, u32>(pixels, w, h, depth, layer, texture.get_exact_mipmap_count(), pitch, !is_swizzled, has_border);
case CELL_GCM_TEXTURE_W16_Z16_Y16_X16_FLOAT:
return get_subresources_layout_impl<1, u64>(pixels, w, h, depth, layer, texture.get_exact_mipmap_count(), pitch, !is_swizzled, has_border);
case CELL_GCM_TEXTURE_W32_Z32_Y32_X32_FLOAT:
return get_subresources_layout_impl<1, u128>(pixels, w, h, depth, layer, texture.get_exact_mipmap_count(), pitch, !is_swizzled, has_border);
case CELL_GCM_TEXTURE_COMPRESSED_DXT1:
return get_subresources_layout_impl<4, u64>(pixels, w, h, depth, layer, texture.get_exact_mipmap_count(), pitch, !is_swizzled, false);
case CELL_GCM_TEXTURE_COMPRESSED_DXT23:
case CELL_GCM_TEXTURE_COMPRESSED_DXT45:
return get_subresources_layout_impl<4, u128>(pixels, w, h, depth, layer, texture.get_exact_mipmap_count(), pitch, !is_swizzled, false);
}
fmt::throw_exception("Wrong format 0x%x", format);
}
namespace rsx
{
void typeless_xfer::analyse()
{
// TODO: This method needs to be re-evaluated
// Check if scaling hints match, which likely means internal formats match as well
// Only possible when doing RTT->RTT transfer with non-base-type formats like WZYX16/32
if (src_is_typeless && dst_is_typeless && src_gcm_format == dst_gcm_format)
{
if (fcmp(src_scaling_hint, dst_scaling_hint) && !fcmp(src_scaling_hint, 1.f))
{
src_is_typeless = dst_is_typeless = false;
src_scaling_hint = dst_scaling_hint = 1.f;
}
}
}
std::vector<rsx::subresource_layout> get_subresources_layout(const rsx::fragment_texture& texture)
{
return get_subresources_layout_impl(texture);
}
std::vector<rsx::subresource_layout> get_subresources_layout(const rsx::vertex_texture& texture)
{
return get_subresources_layout_impl(texture);
}
texture_memory_info upload_texture_subresource(rsx::io_buffer& dst_buffer, const rsx::subresource_layout& src_layout, int format, bool is_swizzled, texture_uploader_capabilities& caps)
{
u16 w = src_layout.width_in_block;
u16 h = src_layout.height_in_block;
u16 depth = src_layout.depth;
u32 pitch = src_layout.pitch_in_block;
texture_memory_info result{};
// Ignore when texture width > pitch
if (w > pitch)
return result;
// Check if we can use a fast path
int word_size = 0;
int words_per_block;
u32 dst_pitch_in_block;
switch (format)
{
case CELL_GCM_TEXTURE_B8:
{
word_size = words_per_block = 1;
dst_pitch_in_block = get_row_pitch_in_block<u8>(w, caps.alignment);
break;
}
case CELL_GCM_TEXTURE_COMPRESSED_B8R8_G8R8:
{
copy_decoded_rb_rg_block::copy_mipmap_level<true>(dst_buffer.as_span<u32>(), src_layout.data.as_span<const u32>(), w, h, depth, get_row_pitch_in_block<u32>(src_layout.width_in_texel, caps.alignment), src_layout.pitch_in_block);
break;
}
case CELL_GCM_TEXTURE_COMPRESSED_R8B8_R8G8:
{
copy_decoded_rb_rg_block::copy_mipmap_level(dst_buffer.as_span<u32>(), src_layout.data.as_span<const u32>(), w, h, depth, get_row_pitch_in_block<u32>(src_layout.width_in_texel, caps.alignment), src_layout.pitch_in_block);
break;
}
#ifndef __APPLE__
case CELL_GCM_TEXTURE_R6G5B5:
{
if (is_swizzled)
copy_rgb655_block_swizzled::copy_mipmap_level(dst_buffer.as_span<u16>(), src_layout.data.as_span<const be_t<u16>>(), w, h, depth, src_layout.border, get_row_pitch_in_block<u16>(w, caps.alignment));
else
copy_rgb655_block::copy_mipmap_level(dst_buffer.as_span<u16>(), src_layout.data.as_span<const be_t<u16>>(), w, h, depth, src_layout.border, get_row_pitch_in_block<u16>(w, caps.alignment), src_layout.pitch_in_block);
break;
}
case CELL_GCM_TEXTURE_D1R5G5B5:
case CELL_GCM_TEXTURE_A1R5G5B5:
case CELL_GCM_TEXTURE_A4R4G4B4:
case CELL_GCM_TEXTURE_R5G5B5A1:
case CELL_GCM_TEXTURE_R5G6B5:
#else
// convert the following formats to B8G8R8A8_UNORM, because they are not supported by Metal
case CELL_GCM_TEXTURE_R6G5B5:
{
if (is_swizzled)
convert_16_block_32_swizzled::copy_mipmap_level(dst_buffer.as_span<u32>(), src_layout.data.as_span<const be_t<u16>>(), w, h, depth, src_layout.border, get_row_pitch_in_block<u32>(w, caps.alignment), &convert_rgb655_to_bgra8);
else
convert_16_block_32::copy_mipmap_level(dst_buffer.as_span<u32>(), src_layout.data.as_span<const be_t<u16>>(), w, h, depth, src_layout.border, get_row_pitch_in_block<u32>(w, caps.alignment), src_layout.pitch_in_block, &convert_rgb655_to_bgra8);
break;
}
case CELL_GCM_TEXTURE_D1R5G5B5:
{
if (is_swizzled)
convert_16_block_32_swizzled::copy_mipmap_level(dst_buffer.as_span<u32>(), src_layout.data.as_span<const be_t<u16>>(), w, h, depth, src_layout.border, get_row_pitch_in_block<u32>(w, caps.alignment), &convert_d1rgb5_to_bgra8);
else
convert_16_block_32::copy_mipmap_level(dst_buffer.as_span<u32>(), src_layout.data.as_span<const be_t<u16>>(), w, h, depth, src_layout.border, get_row_pitch_in_block<u32>(w, caps.alignment), src_layout.pitch_in_block, &convert_d1rgb5_to_bgra8);
break;
}
case CELL_GCM_TEXTURE_A1R5G5B5:
{
if (is_swizzled)
convert_16_block_32_swizzled::copy_mipmap_level(dst_buffer.as_span<u32>(), src_layout.data.as_span<const be_t<u16>>(), w, h, depth, src_layout.border, get_row_pitch_in_block<u32>(w, caps.alignment), &convert_a1rgb5_to_bgra8);
else
convert_16_block_32::copy_mipmap_level(dst_buffer.as_span<u32>(), src_layout.data.as_span<const be_t<u16>>(), w, h, depth, src_layout.border, get_row_pitch_in_block<u32>(w, caps.alignment), src_layout.pitch_in_block, &convert_a1rgb5_to_bgra8);
break;
}
case CELL_GCM_TEXTURE_A4R4G4B4:
{
if (is_swizzled)
convert_16_block_32_swizzled::copy_mipmap_level(dst_buffer.as_span<u32>(), src_layout.data.as_span<const be_t<u16>>(), w, h, depth, src_layout.border, get_row_pitch_in_block<u32>(w, caps.alignment), &convert_argb4_to_bgra8);
else
convert_16_block_32::copy_mipmap_level(dst_buffer.as_span<u32>(), src_layout.data.as_span<const be_t<u16>>(), w, h, depth, src_layout.border, get_row_pitch_in_block<u32>(w, caps.alignment), src_layout.pitch_in_block, &convert_argb4_to_bgra8);
break;
}
case CELL_GCM_TEXTURE_R5G5B5A1:
{
if (is_swizzled)
convert_16_block_32_swizzled::copy_mipmap_level(dst_buffer.as_span<u32>(), src_layout.data.as_span<const be_t<u16>>(), w, h, depth, src_layout.border, get_row_pitch_in_block<u32>(w, caps.alignment), &convert_rgb5a1_to_bgra8);
else
convert_16_block_32::copy_mipmap_level(dst_buffer.as_span<u32>(), src_layout.data.as_span<const be_t<u16>>(), w, h, depth, src_layout.border, get_row_pitch_in_block<u32>(w, caps.alignment), src_layout.pitch_in_block, &convert_rgb5a1_to_bgra8);
break;
}
case CELL_GCM_TEXTURE_R5G6B5:
{
if (is_swizzled)
convert_16_block_32_swizzled::copy_mipmap_level(dst_buffer.as_span<u32>(), src_layout.data.as_span<const be_t<u16>>(), w, h, depth, src_layout.border, get_row_pitch_in_block<u32>(w, caps.alignment), &convert_rgb565_to_bgra8);
else
convert_16_block_32::copy_mipmap_level(dst_buffer.as_span<u32>(), src_layout.data.as_span<const be_t<u16>>(), w, h, depth, src_layout.border, get_row_pitch_in_block<u32>(w, caps.alignment), src_layout.pitch_in_block, &convert_rgb565_to_bgra8);
break;
}
#endif
case CELL_GCM_TEXTURE_COMPRESSED_HILO8:
case CELL_GCM_TEXTURE_COMPRESSED_HILO_S8:
// TODO: Test if the HILO compressed formats support swizzling (other compressed_* formats ignore this option)
case CELL_GCM_TEXTURE_DEPTH16:
case CELL_GCM_TEXTURE_DEPTH16_FLOAT: // Untested
case CELL_GCM_TEXTURE_G8B8:
{
word_size = 2;
words_per_block = 1;
dst_pitch_in_block = get_row_pitch_in_block<u16>(w, caps.alignment);
break;
}
case CELL_GCM_TEXTURE_A8R8G8B8:
case CELL_GCM_TEXTURE_D8R8G8B8:
case CELL_GCM_TEXTURE_DEPTH24_D8:
case CELL_GCM_TEXTURE_DEPTH24_D8_FLOAT: // Untested
{
word_size = 4;
words_per_block = 1;
dst_pitch_in_block = get_row_pitch_in_block<u32>(w, caps.alignment);
break;
}
// NOTE: Textures with WZYX notations refer to arbitrary data and not color swizzles as in common GPU lang
// WZYX actually maps directly as a RGBA16 format in Cell memory! R=W, not R=X
case CELL_GCM_TEXTURE_X16:
case CELL_GCM_TEXTURE_Y16_X16:
case CELL_GCM_TEXTURE_Y16_X16_FLOAT:
case CELL_GCM_TEXTURE_W16_Z16_Y16_X16_FLOAT:
{
const u16 block_size = get_format_block_size_in_bytes(format);
word_size = 2;
words_per_block = block_size / 2;
dst_pitch_in_block = get_row_pitch_in_block(block_size, w, caps.alignment);
break;
}
case CELL_GCM_TEXTURE_X32_FLOAT:
case CELL_GCM_TEXTURE_W32_Z32_Y32_X32_FLOAT:
{
const u16 block_size = get_format_block_size_in_bytes(format);
word_size = 4;
words_per_block = block_size / 4;
dst_pitch_in_block = get_row_pitch_in_block(block_size, w, caps.alignment);
break;
}
case CELL_GCM_TEXTURE_COMPRESSED_DXT1:
{
const bool is_3d = depth > 1;
const bool is_po2 = utils::is_power_of_2(src_layout.width_in_texel) && utils::is_power_of_2(src_layout.height_in_texel);
if (is_3d && is_po2 && !caps.supports_vtc_decoding)
{
// PS3 uses the Nvidia VTC memory layout for compressed 3D textures.
// This is only supported using Nvidia OpenGL.
// Remove the VTC tiling to support ATI and Vulkan.
copy_unmodified_block_vtc::copy_mipmap_level(dst_buffer.as_span<u64>(), src_layout.data.as_span<const u64>(), w, h, depth, get_row_pitch_in_block<u64>(w, caps.alignment), src_layout.pitch_in_block);
}
else if (is_3d && !is_po2 && caps.supports_vtc_decoding)
{
// In this case, hardware expects us to feed it a VTC input, but on PS3 we only have a linear one.
// We need to compress the 2D-planar DXT input into a VTC output
copy_linear_block_to_vtc::copy_mipmap_level(dst_buffer.as_span<u64>(), src_layout.data.as_span<const u64>(), w, h, depth, get_row_pitch_in_block<u64>(w, caps.alignment), src_layout.pitch_in_block);
}
else if (caps.supports_zero_copy)
{
result.require_upload = true;
result.deferred_cmds = build_transfer_cmds(src_layout.data.data(), 8, w, h, depth, 0, get_row_pitch_in_block<u64>(w, caps.alignment), src_layout.pitch_in_block);
}
else
{
copy_unmodified_block::copy_mipmap_level(dst_buffer.as_span<u64>(), src_layout.data.as_span<const u64>(), 1, w, h, depth, 0, get_row_pitch_in_block<u64>(w, caps.alignment), src_layout.pitch_in_block);
}
break;
}
case CELL_GCM_TEXTURE_COMPRESSED_DXT23:
case CELL_GCM_TEXTURE_COMPRESSED_DXT45:
{
const bool is_3d = depth > 1;
const bool is_po2 = utils::is_power_of_2(src_layout.width_in_texel) && utils::is_power_of_2(src_layout.height_in_texel);
if (is_3d && is_po2 && !caps.supports_vtc_decoding)
{
// PS3 uses the Nvidia VTC memory layout for compressed 3D textures.
// This is only supported using Nvidia OpenGL.
// Remove the VTC tiling to support ATI and Vulkan.
copy_unmodified_block_vtc::copy_mipmap_level(dst_buffer.as_span<u128>(), src_layout.data.as_span<const u128>(), w, h, depth, get_row_pitch_in_block<u128>(w, caps.alignment), src_layout.pitch_in_block);
}
else if (is_3d && !is_po2 && caps.supports_vtc_decoding)
{
// In this case, hardware expects us to feed it a VTC input, but on PS3 we only have a linear one.
// We need to compress the 2D-planar DXT input into a VTC output
copy_linear_block_to_vtc::copy_mipmap_level(dst_buffer.as_span<u128>(), src_layout.data.as_span<const u128>(), w, h, depth, get_row_pitch_in_block<u128>(w, caps.alignment), src_layout.pitch_in_block);
}
else if (caps.supports_zero_copy)
{
result.require_upload = true;
result.deferred_cmds = build_transfer_cmds(src_layout.data.data(), 16, w, h, depth, 0, get_row_pitch_in_block<u128>(w, caps.alignment), src_layout.pitch_in_block);
}
else
{
copy_unmodified_block::copy_mipmap_level(dst_buffer.as_span<u128>(), src_layout.data.as_span<const u128>(), 1, w, h, depth, 0, get_row_pitch_in_block<u128>(w, caps.alignment), src_layout.pitch_in_block);
}
break;
}
default:
fmt::throw_exception("Wrong format 0x%x", format);
}
if (word_size)
{
if (word_size == 1)
{
if (is_swizzled)
{
copy_unmodified_block_swizzled::copy_mipmap_level(dst_buffer.as_span<u8>(), src_layout.data.as_span<const u8>(), words_per_block, w, h, depth, src_layout.border, dst_pitch_in_block);
}
else if (caps.supports_zero_copy)
{
result.require_upload = true;
result.deferred_cmds = build_transfer_cmds(src_layout.data.data(), words_per_block, w, h, depth, src_layout.border, dst_pitch_in_block, src_layout.pitch_in_block);
}
else
{
copy_unmodified_block::copy_mipmap_level(dst_buffer.as_span<u8>(), src_layout.data.as_span<const u8>(), words_per_block, w, h, depth, src_layout.border, dst_pitch_in_block, src_layout.pitch_in_block);
}
}
else
{
result.element_size = word_size;
result.block_length = words_per_block;
bool require_cpu_swizzle = !caps.supports_hw_deswizzle && is_swizzled;
bool require_cpu_byteswap = !caps.supports_byteswap;
if (is_swizzled && caps.supports_hw_deswizzle)
{
if (word_size == 4 || (((word_size * words_per_block) & 3) == 0))
{
result.require_deswizzle = true;
}
else
{
require_cpu_swizzle = true;
}
}
if (!require_cpu_byteswap && !require_cpu_swizzle)
{
result.require_swap = true;
if (caps.supports_zero_copy)
{
result.require_upload = true;
result.deferred_cmds = build_transfer_cmds(src_layout.data.data(), word_size * words_per_block, w, h, depth, src_layout.border, dst_pitch_in_block, src_layout.pitch_in_block);
}
else if (word_size == 2)
{
copy_unmodified_block::copy_mipmap_level(dst_buffer.as_span<u16>(), src_layout.data.as_span<const u16>(), words_per_block, w, h, depth, src_layout.border, dst_pitch_in_block, src_layout.pitch_in_block);
}
else if (word_size == 4)
{
copy_unmodified_block::copy_mipmap_level(dst_buffer.as_span<u32>(), src_layout.data.as_span<const u32>(), words_per_block, w, h, depth, src_layout.border, dst_pitch_in_block, src_layout.pitch_in_block);
}
}
else
{
if (word_size == 2)
{
if (is_swizzled)
copy_unmodified_block_swizzled::copy_mipmap_level(dst_buffer.as_span<u16>(), src_layout.data.as_span<const be_t<u16>>(), words_per_block, w, h, depth, src_layout.border, dst_pitch_in_block);
else
copy_unmodified_block::copy_mipmap_level(dst_buffer.as_span<u16>(), src_layout.data.as_span<const be_t<u16>>(), words_per_block, w, h, depth, src_layout.border, dst_pitch_in_block, src_layout.pitch_in_block);
}
else if (word_size == 4)
{
if (is_swizzled)
copy_unmodified_block_swizzled::copy_mipmap_level(dst_buffer.as_span<u32>(), src_layout.data.as_span<const be_t<u32>>(), words_per_block, w, h, depth, src_layout.border, dst_pitch_in_block);
else
copy_unmodified_block::copy_mipmap_level(dst_buffer.as_span<u32>(), src_layout.data.as_span<const be_t<u32>>(), words_per_block, w, h, depth, src_layout.border, dst_pitch_in_block, src_layout.pitch_in_block);
}
}
}
}
return result;
}
bool is_compressed_host_format(u32 texture_format)
{
switch (texture_format)
{
case CELL_GCM_TEXTURE_B8:
case CELL_GCM_TEXTURE_A1R5G5B5:
case CELL_GCM_TEXTURE_A4R4G4B4:
case CELL_GCM_TEXTURE_R5G6B5:
case CELL_GCM_TEXTURE_A8R8G8B8:
case CELL_GCM_TEXTURE_G8B8:
case CELL_GCM_TEXTURE_R6G5B5:
case CELL_GCM_TEXTURE_DEPTH24_D8:
case CELL_GCM_TEXTURE_DEPTH24_D8_FLOAT:
case CELL_GCM_TEXTURE_DEPTH16:
case CELL_GCM_TEXTURE_DEPTH16_FLOAT:
case CELL_GCM_TEXTURE_X16:
case CELL_GCM_TEXTURE_Y16_X16:
case CELL_GCM_TEXTURE_R5G5B5A1:
case CELL_GCM_TEXTURE_W16_Z16_Y16_X16_FLOAT:
case CELL_GCM_TEXTURE_W32_Z32_Y32_X32_FLOAT:
case CELL_GCM_TEXTURE_X32_FLOAT:
case CELL_GCM_TEXTURE_D1R5G5B5:
case CELL_GCM_TEXTURE_D8R8G8B8:
case CELL_GCM_TEXTURE_Y16_X16_FLOAT:
// The following formats are compressed in RSX/GCM but not on the host device.
// They are decompressed in sw before uploading
case CELL_GCM_TEXTURE_COMPRESSED_HILO8:
case CELL_GCM_TEXTURE_COMPRESSED_HILO_S8:
case CELL_GCM_TEXTURE_COMPRESSED_B8R8_G8R8:
case CELL_GCM_TEXTURE_COMPRESSED_R8B8_R8G8:
return false;
// True compressed formats on the host device
case CELL_GCM_TEXTURE_COMPRESSED_DXT1:
case CELL_GCM_TEXTURE_COMPRESSED_DXT23:
case CELL_GCM_TEXTURE_COMPRESSED_DXT45:
return true;
}
fmt::throw_exception("Unknown format 0x%x", texture_format);
}
bool is_int8_remapped_format(u32 format)
{
switch (format)
{
case CELL_GCM_TEXTURE_DEPTH24_D8:
case CELL_GCM_TEXTURE_DEPTH24_D8_FLOAT:
case CELL_GCM_TEXTURE_DEPTH16:
case CELL_GCM_TEXTURE_DEPTH16_FLOAT:
case CELL_GCM_TEXTURE_X16:
case CELL_GCM_TEXTURE_Y16_X16:
case CELL_GCM_TEXTURE_COMPRESSED_HILO8:
case CELL_GCM_TEXTURE_COMPRESSED_HILO_S8:
case CELL_GCM_TEXTURE_W16_Z16_Y16_X16_FLOAT:
case CELL_GCM_TEXTURE_W32_Z32_Y32_X32_FLOAT:
case CELL_GCM_TEXTURE_X32_FLOAT:
case CELL_GCM_TEXTURE_Y16_X16_FLOAT:
// NOTE: Special data formats (XY, HILO, DEPTH) are not RGB formats
return false;
default:
return true;
}
}
/**
* A texture is stored as an array of blocks, where a block is a pixel for standard texture
* but is a structure containing several pixels for compressed format
*/
u8 get_format_block_size_in_bytes(int format)
{
switch (format)
{
case CELL_GCM_TEXTURE_B8: return 1;
case CELL_GCM_TEXTURE_X16:
case CELL_GCM_TEXTURE_G8B8:
case CELL_GCM_TEXTURE_R6G5B5:
case CELL_GCM_TEXTURE_R5G6B5:
case CELL_GCM_TEXTURE_D1R5G5B5:
case CELL_GCM_TEXTURE_R5G5B5A1:
case CELL_GCM_TEXTURE_A1R5G5B5:
case CELL_GCM_TEXTURE_A4R4G4B4:
case CELL_GCM_TEXTURE_DEPTH16:
case CELL_GCM_TEXTURE_DEPTH16_FLOAT:
case CELL_GCM_TEXTURE_COMPRESSED_HILO8:
case CELL_GCM_TEXTURE_COMPRESSED_HILO_S8: return 2;
case CELL_GCM_TEXTURE_A8R8G8B8:
case CELL_GCM_TEXTURE_D8R8G8B8:
case CELL_GCM_TEXTURE_DEPTH24_D8:
case CELL_GCM_TEXTURE_DEPTH24_D8_FLOAT:
case CELL_GCM_TEXTURE_X32_FLOAT:
case CELL_GCM_TEXTURE_Y16_X16:
case CELL_GCM_TEXTURE_Y16_X16_FLOAT:
case CELL_GCM_TEXTURE_COMPRESSED_B8R8_G8R8:
case CELL_GCM_TEXTURE_COMPRESSED_R8B8_R8G8: return 4;
case CELL_GCM_TEXTURE_COMPRESSED_DXT1:
case CELL_GCM_TEXTURE_W16_Z16_Y16_X16_FLOAT: return 8;
case CELL_GCM_TEXTURE_COMPRESSED_DXT23:
case CELL_GCM_TEXTURE_COMPRESSED_DXT45:
case CELL_GCM_TEXTURE_W32_Z32_Y32_X32_FLOAT: return 16;
default:
rsx_log.error("Unimplemented block size in bytes for texture format: 0x%x", format);
return 1;
}
}
u8 get_format_block_size_in_texel(int format)
{
switch (format)
{
case CELL_GCM_TEXTURE_B8:
case CELL_GCM_TEXTURE_G8B8:
case CELL_GCM_TEXTURE_D8R8G8B8:
case CELL_GCM_TEXTURE_D1R5G5B5:
case CELL_GCM_TEXTURE_A1R5G5B5:
case CELL_GCM_TEXTURE_A4R4G4B4:
case CELL_GCM_TEXTURE_A8R8G8B8:
case CELL_GCM_TEXTURE_R5G5B5A1:
case CELL_GCM_TEXTURE_R6G5B5:
case CELL_GCM_TEXTURE_R5G6B5:
case CELL_GCM_TEXTURE_DEPTH24_D8:
case CELL_GCM_TEXTURE_DEPTH24_D8_FLOAT:
case CELL_GCM_TEXTURE_DEPTH16:
case CELL_GCM_TEXTURE_DEPTH16_FLOAT:
case CELL_GCM_TEXTURE_X16:
case CELL_GCM_TEXTURE_Y16_X16:
case CELL_GCM_TEXTURE_Y16_X16_FLOAT:
case CELL_GCM_TEXTURE_W16_Z16_Y16_X16_FLOAT:
case CELL_GCM_TEXTURE_W32_Z32_Y32_X32_FLOAT:
case CELL_GCM_TEXTURE_X32_FLOAT:
case CELL_GCM_TEXTURE_COMPRESSED_HILO8:
case CELL_GCM_TEXTURE_COMPRESSED_HILO_S8: return 1;
case CELL_GCM_TEXTURE_COMPRESSED_B8R8_G8R8:
case CELL_GCM_TEXTURE_COMPRESSED_R8B8_R8G8: return 2;
case CELL_GCM_TEXTURE_COMPRESSED_DXT1:
case CELL_GCM_TEXTURE_COMPRESSED_DXT23:
case CELL_GCM_TEXTURE_COMPRESSED_DXT45: return 4;
default:
rsx_log.error("Unimplemented block size in texels for texture format: 0x%x", format);
return 1;
}
}
u8 get_format_block_size_in_bytes(rsx::surface_color_format format)
{
switch (format)
{
case rsx::surface_color_format::b8:
return 1;
case rsx::surface_color_format::g8b8:
case rsx::surface_color_format::r5g6b5:
case rsx::surface_color_format::x1r5g5b5_o1r5g5b5:
case rsx::surface_color_format::x1r5g5b5_z1r5g5b5:
return 2;
case rsx::surface_color_format::a8b8g8r8:
case rsx::surface_color_format::a8r8g8b8:
case rsx::surface_color_format::x8b8g8r8_o8b8g8r8:
case rsx::surface_color_format::x8b8g8r8_z8b8g8r8:
case rsx::surface_color_format::x8r8g8b8_o8r8g8b8:
case rsx::surface_color_format::x8r8g8b8_z8r8g8b8:
case rsx::surface_color_format::x32:
return 4;
case rsx::surface_color_format::w16z16y16x16:
return 8;
case rsx::surface_color_format::w32z32y32x32:
return 16;
default:
fmt::throw_exception("Invalid color format 0x%x", static_cast<u32>(format));
}
}
u8 get_format_block_size_in_bytes(rsx::surface_depth_format2 format)
{
switch (format)
{
case rsx::surface_depth_format2::z24s8_uint:
case rsx::surface_depth_format2::z24s8_float:
return 4;
default:
return 2;
}
}
u8 get_format_sample_count(rsx::surface_antialiasing antialias)
{
switch (antialias)
{
case rsx::surface_antialiasing::center_1_sample:
return 1;
case rsx::surface_antialiasing::diagonal_centered_2_samples:
return 2;
case rsx::surface_antialiasing::square_centered_4_samples:
case rsx::surface_antialiasing::square_rotated_4_samples:
return 4;
default:
fmt::throw_exception("Unreachable");
}
}
bool is_depth_stencil_format(rsx::surface_depth_format2 format)
{
switch (format)
{
case rsx::surface_depth_format2::z24s8_uint:
case rsx::surface_depth_format2::z24s8_float:
return true;
default:
return false;
}
}
/**
* Returns number of texel lines decoded in one pitch-length number of bytes
*/
u8 get_format_texel_rows_per_line(u32 format)
{
switch (format)
{
case CELL_GCM_TEXTURE_COMPRESSED_DXT1:
case CELL_GCM_TEXTURE_COMPRESSED_DXT23:
case CELL_GCM_TEXTURE_COMPRESSED_DXT45:
// Layout is 4x4 blocks, i.e one row of pitch bytes in length actually encodes 4 texel rows
return 4;
default:
return 1;
}
}
u32 get_format_packed_pitch(u32 format, u16 width, bool border, bool swizzled)
{
const auto texels_per_block = get_format_block_size_in_texel(format);
const auto bytes_per_block = get_format_block_size_in_bytes(format);
auto width_in_block = ((width + texels_per_block - 1) / texels_per_block);
if (border)
{
width_in_block = swizzled ? rsx::next_pow2(width_in_block + 8) :
width_in_block + 2;
}
return width_in_block * bytes_per_block;
}
usz get_placed_texture_storage_size(u16 width, u16 height, u32 depth, u8 format, u16 mipmap, bool cubemap, usz row_pitch_alignment, usz mipmap_alignment)
{
format &= ~(CELL_GCM_TEXTURE_LN | CELL_GCM_TEXTURE_UN);
usz block_edge = get_format_block_size_in_texel(format);
usz block_size_in_byte = get_format_block_size_in_bytes(format);
usz height_in_blocks = (height + block_edge - 1) / block_edge;
usz width_in_blocks = (width + block_edge - 1) / block_edge;
usz result = 0;
for (u16 i = 0; i < mipmap; ++i)
{
usz rowPitch = utils::align(block_size_in_byte * width_in_blocks, row_pitch_alignment);
result += utils::align(rowPitch * height_in_blocks * depth, mipmap_alignment);
height_in_blocks = std::max<usz>(height_in_blocks / 2, 1);
width_in_blocks = std::max<usz>(width_in_blocks / 2, 1);
}
// Mipmap, height and width aren't allowed to be zero
return (ensure(result) * (cubemap ? 6 : 1));
}
usz get_placed_texture_storage_size(const rsx::fragment_texture& texture, usz row_pitch_alignment, usz mipmap_alignment)
{
return get_placed_texture_storage_size(texture.width(), texture.height(), texture.depth(), texture.format(), texture.mipmap(), texture.cubemap(),
row_pitch_alignment, mipmap_alignment);
}
usz get_placed_texture_storage_size(const rsx::vertex_texture& texture, usz row_pitch_alignment, usz mipmap_alignment)
{
return get_placed_texture_storage_size(texture.width(), texture.height(), texture.depth(), texture.format(), texture.mipmap(), texture.cubemap(),
row_pitch_alignment, mipmap_alignment);
}
static usz get_texture_size(u32 format, u16 width, u16 height, u16 depth, u32 pitch, u16 mipmaps, u16 layers, u8 border)
{
const auto gcm_format = format & ~(CELL_GCM_TEXTURE_LN | CELL_GCM_TEXTURE_UN);
const bool packed = !(format & CELL_GCM_TEXTURE_LN);
const auto texel_rows_per_line = get_format_texel_rows_per_line(gcm_format);
if (!pitch && !packed)
{
if (width > 1 || height > 1)
{
// If width == 1, the scanning just returns texel 0, so it is a valid setup
rsx_log.warning("Invalid texture pitch setup, width=%d, height=%d, format=0x%x(0x%x)",
width, height, format, gcm_format);
}
pitch = get_format_packed_pitch(gcm_format, width, !!border, packed);
}
u32 size = 0;
if (!packed)
{
// Constant pitch layout, simple scanning
const u32 internal_height = (height + texel_rows_per_line - 1) / texel_rows_per_line; // Convert texels to blocks
for (u32 layer = 0; layer < layers; ++layer)
{
u32 mip_height = internal_height;
for (u32 mipmap = 0; mipmap < mipmaps && mip_height > 0; ++mipmap)
{
size += pitch * mip_height * depth;
mip_height = std::max(mip_height / 2u, 1u);
}
}
}
else
{
// Variable pitch per mipmap level
const auto texels_per_block = get_format_block_size_in_texel(gcm_format);
const auto bytes_per_block = get_format_block_size_in_bytes(gcm_format);
const u32 internal_height = (height + texel_rows_per_line - 1) / texel_rows_per_line; // Convert texels to blocks
const u32 internal_width = (width + texels_per_block - 1) / texels_per_block; // Convert texels to blocks
for (u32 layer = 0; layer < layers; ++layer)
{
u32 mip_height = internal_height;
u32 mip_width = internal_width;
for (u32 mipmap = 0; mipmap < mipmaps && mip_height > 0; ++mipmap)
{
size += (mip_width * bytes_per_block * mip_height * depth);
mip_height = std::max(mip_height / 2u, 1u);
mip_width = std::max(mip_width / 2u, 1u);
}
}
}
return size;
}
usz get_texture_size(const rsx::fragment_texture& texture)
{
return get_texture_size(texture.format(), texture.width(), texture.height(), texture.depth(),
texture.pitch(), texture.get_exact_mipmap_count(), texture.cubemap() ? 6 : 1,
texture.border_type() ^ 1);
}
usz get_texture_size(const rsx::vertex_texture& texture)
{
return get_texture_size(texture.format(), texture.width(), texture.height(), texture.depth(),
texture.pitch(), texture.get_exact_mipmap_count(), texture.cubemap() ? 6 : 1,
texture.border_type() ^ 1);
}
u32 get_remap_encoding(const texture_channel_remap_t& remap)
{
u32 encode = 0;
encode |= (remap.channel_map[0] << 0);
encode |= (remap.channel_map[1] << 2);
encode |= (remap.channel_map[2] << 4);
encode |= (remap.channel_map[3] << 6);
encode |= (remap.control_map[0] << 8);
encode |= (remap.control_map[1] << 10);
encode |= (remap.control_map[2] << 12);
encode |= (remap.control_map[3] << 14);
return encode;
}
std::pair<u32, bool> get_compatible_gcm_format(rsx::surface_color_format format)
{
switch (format)
{
case rsx::surface_color_format::r5g6b5:
return{ CELL_GCM_TEXTURE_R5G6B5, false };
case rsx::surface_color_format::x8r8g8b8_z8r8g8b8:
case rsx::surface_color_format::x8r8g8b8_o8r8g8b8:
case rsx::surface_color_format::a8r8g8b8:
return{ CELL_GCM_TEXTURE_A8R8G8B8, true }; //verified
case rsx::surface_color_format::x8b8g8r8_o8b8g8r8:
case rsx::surface_color_format::x8b8g8r8_z8b8g8r8:
case rsx::surface_color_format::a8b8g8r8:
return{ CELL_GCM_TEXTURE_A8R8G8B8, true };
case rsx::surface_color_format::w16z16y16x16:
return{ CELL_GCM_TEXTURE_W16_Z16_Y16_X16_FLOAT, true };
case rsx::surface_color_format::w32z32y32x32:
return{ CELL_GCM_TEXTURE_W32_Z32_Y32_X32_FLOAT, true };
case rsx::surface_color_format::x1r5g5b5_o1r5g5b5:
case rsx::surface_color_format::x1r5g5b5_z1r5g5b5:
return{ CELL_GCM_TEXTURE_A1R5G5B5, false };
case rsx::surface_color_format::b8:
return{ CELL_GCM_TEXTURE_B8, false };
case rsx::surface_color_format::g8b8:
return{ CELL_GCM_TEXTURE_G8B8, true };
case rsx::surface_color_format::x32:
return{ CELL_GCM_TEXTURE_X32_FLOAT, true }; //verified
default:
fmt::throw_exception("Unhandled surface format 0x%x", static_cast<u32>(format));
}
}
std::pair<u32, bool> get_compatible_gcm_format(rsx::surface_depth_format2 format)
{
switch (format)
{
case rsx::surface_depth_format2::z16_uint:
return{ CELL_GCM_TEXTURE_DEPTH16, true };
case rsx::surface_depth_format2::z24s8_uint:
return{ CELL_GCM_TEXTURE_DEPTH24_D8, true };
case rsx::surface_depth_format2::z16_float:
return{ CELL_GCM_TEXTURE_DEPTH16_FLOAT, true };
case rsx::surface_depth_format2::z24s8_float:
return{ CELL_GCM_TEXTURE_DEPTH24_D8_FLOAT, true };
default:
fmt::throw_exception("Unreachable");
}
}
rsx::format_class classify_format(rsx::surface_depth_format2 format)
{
switch (format)
{
case rsx::surface_depth_format2::z16_uint:
return RSX_FORMAT_CLASS_DEPTH16_UNORM;
case rsx::surface_depth_format2::z24s8_uint:
return RSX_FORMAT_CLASS_DEPTH24_UNORM_X8_PACK32;
case rsx::surface_depth_format2::z16_float:
return RSX_FORMAT_CLASS_DEPTH16_FLOAT;
case rsx::surface_depth_format2::z24s8_float:
return RSX_FORMAT_CLASS_DEPTH24_FLOAT_X8_PACK32;
default:
return RSX_FORMAT_CLASS_COLOR;
}
}
rsx::format_class classify_format(u32 gcm_format)
{
switch (gcm_format)
{
case CELL_GCM_TEXTURE_DEPTH16:
return RSX_FORMAT_CLASS_DEPTH16_UNORM;
case CELL_GCM_TEXTURE_DEPTH16_FLOAT:
return RSX_FORMAT_CLASS_DEPTH16_FLOAT;
case CELL_GCM_TEXTURE_DEPTH24_D8:
return RSX_FORMAT_CLASS_DEPTH24_UNORM_X8_PACK32;
case CELL_GCM_TEXTURE_DEPTH24_D8_FLOAT:
return RSX_FORMAT_CLASS_DEPTH24_FLOAT_X8_PACK32;
default:
return RSX_FORMAT_CLASS_COLOR;
}
}
u32 get_max_depth_value(rsx::surface_depth_format2 format)
{
return get_format_block_size_in_bytes(format) == 2 ? 0xFFFF : 0xFFFFFF;
}
bool is_texcoord_wrapping_mode(rsx::texture_wrap_mode mode)
{
switch (mode)
{
// Clamping modes
default:
rsx_log.error("Unknown texture wrap mode: %d", static_cast<int>(mode));
[[ fallthrough ]];
case rsx::texture_wrap_mode::border:
case rsx::texture_wrap_mode::clamp:
case rsx::texture_wrap_mode::clamp_to_edge:
case rsx::texture_wrap_mode::mirror_once_clamp_to_edge:
case rsx::texture_wrap_mode::mirror_once_border:
case rsx::texture_wrap_mode::mirror_once_clamp:
return false;
// Wrapping modes
case rsx::texture_wrap_mode::wrap:
case rsx::texture_wrap_mode::mirror:
return true;
}
}
bool is_border_clamped_texture(
rsx::texture_wrap_mode wrap_s,
rsx::texture_wrap_mode wrap_t,
rsx::texture_wrap_mode wrap_r,
rsx::texture_dimension dimension)
{
// Technically we should check border and mirror_once_border
// However, the latter is not implemented in any modern API, so we can just ignore it (emulated with mirror_once_clamp).
switch (dimension)
{
case rsx::texture_dimension::dimension1d:
return wrap_s == rsx::texture_wrap_mode::border;
case rsx::texture_dimension::dimension2d:
return wrap_s == rsx::texture_wrap_mode::border || wrap_t == rsx::texture_wrap_mode::border;
case rsx::texture_dimension::dimension3d:
return wrap_s == rsx::texture_wrap_mode::border || wrap_t == rsx::texture_wrap_mode::border || wrap_r == rsx::texture_wrap_mode::border;
default:
return false;
}
}
}
| 55,559
|
C++
|
.cpp
| 1,421
| 35.660099
| 251
| 0.691993
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,397
|
rsx_replay.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Capture/rsx_replay.cpp
|
#include "stdafx.h"
#include "rsx_replay.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/lv2/sys_rsx.h"
#include "Emu/Cell/lv2/sys_memory.h"
#include "Emu/RSX/RSXThread.h"
#include "util/asm.hpp"
namespace rsx
{
be_t<u32> rsx_replay_thread::allocate_context()
{
u32 buffer_size = 4;
// run through replay commands to figure out how big command buffer needs to be
for (const auto& rc : frame->replay_commands)
{
const u32 count = (rc.rsx_command.first >> 18) & 0x7ff;
// allocate for register plus w/e number of arguments it has
buffer_size += (count * 4) + 4;
}
// User memory + fifo size
buffer_size = utils::align<u32>(buffer_size, 0x100000) + 0x10000000;
// We are not allowed to drain all memory so add a little
g_fxo->init<lv2_memory_container>(buffer_size + 0x1000000);
const u32 contextAddr = vm::alloc(sizeof(rsx_context), vm::main);
if (contextAddr == 0)
fmt::throw_exception("Capture Replay: context alloc failed");
const auto contextInfo = vm::ptr<rsx_context>::make(contextAddr);
// 'fake' initialize usermemory
sys_memory_allocate(*this, buffer_size, SYS_MEMORY_PAGE_SIZE_1M, contextInfo.ptr(&rsx_context::user_addr));
ensure((user_mem_addr = contextInfo->user_addr) != 0);
if (sys_rsx_device_map(*this, contextInfo.ptr(&rsx_context::dev_addr), vm::null, 0x8) != CELL_OK)
fmt::throw_exception("Capture Replay: sys_rsx_device_map failed!");
if (sys_rsx_memory_allocate(*this, contextInfo.ptr(&rsx_context::mem_handle), contextInfo.ptr(&rsx_context::mem_addr), 0x0F900000, 0, 0, 0, 0) != CELL_OK)
fmt::throw_exception("Capture Replay: sys_rsx_memory_allocate failed!");
if (sys_rsx_context_allocate(*this, contextInfo.ptr(&rsx_context::context_id), contextInfo.ptr(&rsx_context::dma_addr), contextInfo.ptr(&rsx_context::driver_info), contextInfo.ptr(&rsx_context::reports_addr), contextInfo->mem_handle, 0) != CELL_OK)
fmt::throw_exception("Capture Replay: sys_rsx_context_allocate failed!");
get_current_renderer()->main_mem_size = buffer_size;
if (sys_rsx_context_iomap(*this, contextInfo->context_id, 0, user_mem_addr, buffer_size, 0xf000000000000800ull) != CELL_OK)
fmt::throw_exception("Capture Replay: rsx io mapping failed!");
return contextInfo->context_id;
}
std::vector<u32> rsx_replay_thread::alloc_write_fifo(be_t<u32> /*context_id*/) const
{
// copy commands into fifo buffer
// todo: could change rsx_command to just be values to avoid this loop,
auto fifo_addr = vm::ptr<u32>::make(user_mem_addr + 0x10000000);
u32 count = 0;
std::vector<u32> fifo_stops;
u32 currentOffset = 0x10000000;
for (const auto& rc : frame->replay_commands)
{
bool hasState = (!rc.memory_state.empty()) || (rc.display_buffer_state != 0) || (rc.tile_state != 0);
if (hasState)
{
if (count != 0)
{
// todo: support memory state in the middle of incremented command
// This shouldn't ever happen as long as captures stay in 'strict' aka non-multidraw mode
fmt::throw_exception("capture replay: state change not supported between increment commands");
}
fifo_stops.emplace_back(currentOffset);
}
// spit out command
if (count == 0)
{
count = (rc.rsx_command.first >> 18) & 0x7ff;
*fifo_addr = rc.rsx_command.first;
fifo_addr++;
currentOffset += 4;
}
if (count != 0)
{
*fifo_addr = rc.rsx_command.second;
fifo_addr++;
count--;
currentOffset += 4;
}
}
fifo_stops.emplace_back(currentOffset);
return fifo_stops;
}
void rsx_replay_thread::apply_frame_state(be_t<u32> context_id, const frame_capture_data::replay_command& replay_cmd)
{
// apply memory needed for command
for (const auto& state : replay_cmd.memory_state)
{
auto it = frame->memory_map.find(state);
if (it == frame->memory_map.end())
fmt::throw_exception("requested memory state for command not found in memory_map");
const auto& memblock = it->second;
auto it_data = frame->memory_data_map.find(it->second.data_state);
if (it_data == frame->memory_data_map.end())
fmt::throw_exception("requested memory data state for command not found in memory_data_map");
const auto& data_block = it_data->second;
std::memcpy(vm::base(get_address(memblock.offset, memblock.location)), data_block.data.data(), data_block.data.size());
}
if (replay_cmd.display_buffer_state != 0 && replay_cmd.display_buffer_state != cs.display_buffer_hash)
{
auto it = frame->display_buffers_map.find(replay_cmd.display_buffer_state);
if (it == frame->display_buffers_map.end())
fmt::throw_exception("requested display buffer for command not found");
const auto& dbstate = it->second;
for (u32 i = 0; i < dbstate.count; ++i)
{
const auto& buf = dbstate.buffers[i];
if (cs.display_buffer_hash != 0 && memcmp(&cs.buffer_state.buffers[i], &buf, sizeof(rsx::frame_capture_data::buffer_state)) == 0)
continue;
cs.buffer_state.buffers[i] = buf;
sys_rsx_context_attribute(context_id, 0x104, i,
u64{dbstate.buffers[i].width} << 32 | dbstate.buffers[i].height, u64{dbstate.buffers[i].pitch} << 32 | dbstate.buffers[i].offset, 0);
}
cs.display_buffer_hash = replay_cmd.display_buffer_state;
}
if (replay_cmd.tile_state != 0 && replay_cmd.tile_state != cs.tile_hash)
{
auto it = frame->tile_map.find(replay_cmd.tile_state);
if (it == frame->tile_map.end())
fmt::throw_exception("requested tile state command not found");
const auto& tstate = it->second;
for (u32 i = 0; i < limits::tiles_count; ++i)
{
const auto& ti = tstate.tiles[i];
if (cs.tile_hash != 0 && memcmp(&cs.tile_state.tiles[i], &ti, sizeof(rsx::frame_capture_data::tile_info)) == 0)
continue;
cs.tile_state.tiles[i] = ti;
sys_rsx_context_attribute(context_id, 0x300, i, u64{ti.tile} << 32 | ti.limit, u64{ti.pitch} << 32 | ti.format, 0);
}
for (u32 i = 0; i < limits::zculls_count; ++i)
{
const auto& zci = tstate.zculls[i];
if (cs.tile_hash != 0 && memcmp(&cs.tile_state.zculls[i], &zci, sizeof(rsx::frame_capture_data::zcull_info)) == 0)
continue;
cs.tile_state.zculls[i] = zci;
sys_rsx_context_attribute(context_id, 0x301, i, u64{zci.region} << 32 | zci.size, u64{zci.start} << 32 | zci.offset, u64{zci.status0} << 32 | zci.status1);
}
cs.tile_hash = replay_cmd.tile_state;
}
}
void rsx_replay_thread::cpu_task()
{
be_t<u32> context_id = allocate_context();
auto fifo_stops = alloc_write_fifo(context_id);
while (thread_ctrl::state() != thread_state::aborting)
{
// Load registers while the RSX is still idle
method_registers = frame->reg_state;
atomic_fence_seq_cst();
// start up fifo buffer by dumping the put ptr to first stop
sys_rsx_context_attribute(context_id, 0x001, 0x10000000, fifo_stops[0], 0, 0);
auto render = get_current_renderer();
auto last_flip = render->int_flip_index;
usz stopIdx = 0;
for (const auto& replay_cmd : frame->replay_commands)
{
while (Emu.IsPaused())
thread_ctrl::wait_for(10'000);
if (thread_ctrl::state() == thread_state::aborting)
break;
// Loop and hunt down our next state change that needs to be done
if (!(!replay_cmd.memory_state.empty() || (replay_cmd.display_buffer_state != 0) || (replay_cmd.tile_state != 0)))
continue;
// wait until rsx idle and at our first 'stop' to apply state
while (thread_ctrl::state() != thread_state::aborting && !render->is_fifo_idle() && (render->ctrl->get != fifo_stops[stopIdx]))
{
if (Emu.IsPaused())
thread_ctrl::wait_for(10'000);
else
std::this_thread::yield();
}
stopIdx++;
apply_frame_state(context_id, replay_cmd);
// move put ptr to next stop
if (stopIdx >= fifo_stops.size())
fmt::throw_exception("Capture Replay: StopIdx greater than size of fifo_stops");
render->ctrl->put = fifo_stops[stopIdx];
}
// dump put to end of stops, which should have actual end
u32 end = fifo_stops.back();
render->ctrl->put = end;
while (!render->is_fifo_idle() && thread_ctrl::state() != thread_state::aborting)
{
if (Emu.IsPaused())
thread_ctrl::wait_for(10'000);
else
std::this_thread::yield();
}
// Check if the captured application used syscall instead of a gcm command to flip
if (render->int_flip_index == last_flip)
{
// Capture did not include a display flip, flip manually
render->request_emu_flip(1u);
}
// random pause to not destroy gpu
thread_ctrl::wait_for(10'000);
}
get_current_cpu_thread()->state += (cpu_flag::exit + cpu_flag::wait);
}
}
| 8,633
|
C++
|
.cpp
| 198
| 39.464646
| 250
| 0.678073
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,398
|
rsx_capture.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Capture/rsx_capture.cpp
|
#include "stdafx.h"
#include "rsx_capture.h"
#include "Emu/RSX/Common/BufferUtils.h"
#include "Emu/RSX/Common/TextureUtils.h"
#include "Emu/RSX/Common/surface_store.h"
#include "Emu/RSX/GCM.h"
#include "Emu/RSX/RSXThread.h"
#include "Emu/Memory/vm.h"
#include "xxhash.h"
namespace rsx
{
namespace capture
{
void insert_mem_block_in_map(std::unordered_set<u64>& mem_changes, frame_capture_data::memory_block&& block, frame_capture_data::memory_block_data&& data)
{
if (!data.data.empty())
{
u64 data_hash = XXH64(data.data.data(), data.data.size(), 0);
block.data_state = data_hash;
auto it = frame_capture.memory_data_map.find(data_hash);
if (it != frame_capture.memory_data_map.end())
{
if (it->second.data != data.data)
// screw this
fmt::throw_exception("Memory map hash collision detected...cant capture");
}
else
frame_capture.memory_data_map.insert(std::make_pair(data_hash, std::move(data)));
u64 block_hash = XXH64(&block, sizeof(frame_capture_data::memory_block), 0);
mem_changes.insert(block_hash);
if (frame_capture.memory_map.find(block_hash) == frame_capture.memory_map.end())
frame_capture.memory_map.insert(std::make_pair(block_hash, std::move(block)));
}
}
void capture_draw_memory(thread* rsx)
{
// the idea here is to copy any memory that is needed to make the calls work
// todo:
// - tile / zcull state changing during other commands
// - track memory that is rendered into and ignore saving it later, this one will be tough
if (frame_capture.replay_commands.empty())
fmt::throw_exception("no replay commands to attach memory state to");
// shove the mem_changes onto the last issued command
std::unordered_set<u64>& mem_changes = frame_capture.replay_commands.back().memory_state;
// capture fragment shader mem
const auto [program_offset, program_location] = method_registers.shader_program_address();
const u32 addr = get_address(program_offset, program_location);
const auto program_info = program_hash_util::fragment_program_utils::analyse_fragment_program(vm::base(addr));
const u32 program_start = program_info.program_start_offset;
const u32 ucode_size = program_info.program_ucode_length;
frame_capture_data::memory_block block;
block.offset = program_offset;
block.location = program_location;
frame_capture_data::memory_block_data block_data;
block_data.data.resize(ucode_size + program_start);
std::memcpy(block_data.data.data(), vm::base(addr), ucode_size + program_start);
insert_mem_block_in_map(mem_changes, std::move(block), std::move(block_data));
// vertex shader is passed in registers, so it can be ignored
// save fragment tex mem
for (const auto& tex : method_registers.fragment_textures)
{
if (!tex.enabled())
continue;
const u32 texaddr = get_address(tex.offset(), tex.location());
auto layout = get_subresources_layout(tex);
// todo: dont use this function and just get size somehow
usz texSize = 0;
for (const auto& l : layout)
texSize += l.data.size();
if (!texSize)
continue;
frame_capture_data::memory_block block;
block.offset = tex.offset();
block.location = tex.location();
frame_capture_data::memory_block_data block_data;
block_data.data.resize(texSize);
std::memcpy(block_data.data.data(), vm::base(texaddr), texSize);
insert_mem_block_in_map(mem_changes, std::move(block), std::move(block_data));
}
// save vertex texture mem
for (const auto& tex : method_registers.vertex_textures)
{
if (!tex.enabled())
continue;
const u32 texaddr = get_address(tex.offset(), tex.location());
auto layout = get_subresources_layout(tex);
// todo: dont use this function and just get size somehow
usz texSize = 0;
for (const auto& l : layout)
texSize += l.data.size();
if (!texSize)
continue;
frame_capture_data::memory_block block;
block.offset = tex.offset();
block.location = tex.location();
frame_capture_data::memory_block_data block_data;
block_data.data.resize(texSize);
std::memcpy(block_data.data.data(), vm::base(texaddr), texSize);
insert_mem_block_in_map(mem_changes, std::move(block), std::move(block_data));
}
// save vertex buffer memory
if (method_registers.current_draw_clause.command == draw_command::array)
{
const u32 input_mask = method_registers.vertex_attrib_input_mask();
for (u8 index = 0; index < limits::vertex_count; ++index)
{
const bool enabled = !!(input_mask & (1 << index));
if (!enabled)
continue;
const auto& info = method_registers.vertex_arrays_info[index];
if (!info.size())
continue;
// vert buffer
const u32 base_address = get_vertex_offset_from_base(method_registers.vertex_data_base_offset(), info.offset() & 0x7fffffff);
const u32 memory_location = info.offset() >> 31;
const u32 addr = get_address(base_address, memory_location);
const u32 vertSize = get_vertex_type_size_on_host(info.type(), info.size());
const u32 vertStride = info.stride();
method_registers.current_draw_clause.begin();
do
{
const auto& range = method_registers.current_draw_clause.get_range();
const u32 vertCount = range.count;
const usz bufferSize = (vertCount - 1) * vertStride + vertSize;
frame_capture_data::memory_block block;
block.offset = base_address + (range.first * vertStride);
block.location = memory_location;
frame_capture_data::memory_block_data block_data;
block_data.data.resize(bufferSize);
std::memcpy(block_data.data.data(), vm::base(addr + (range.first * vertStride)), bufferSize);
insert_mem_block_in_map(mem_changes, std::move(block), std::move(block_data));
}
while (method_registers.current_draw_clause.next());
}
}
// save index buffer if used
else if (method_registers.current_draw_clause.command == draw_command::indexed)
{
const u32 input_mask = method_registers.vertex_attrib_input_mask();
const u32 base_address = method_registers.index_array_address();
const u32 memory_location = method_registers.index_array_location();
const auto index_type = method_registers.index_type();
const u32 type_size = get_index_type_size(index_type);
const u32 base_addr = get_address(base_address, memory_location) & (0 - type_size);
// manually parse index buffer and copy vertex buffer
u32 min_index = 0xFFFFFFFF, max_index = 0;
const bool is_primitive_restart_enabled = method_registers.restart_index_enabled();
const u32 primitive_restart_index = method_registers.restart_index();
method_registers.current_draw_clause.begin();
do
{
const auto& range = method_registers.current_draw_clause.get_range();
const u32 idxFirst = range.first;
const u32 idxCount = range.count;
const u32 idxAddr = base_addr + (idxFirst * type_size);
const usz bufferSize = idxCount * type_size;
frame_capture_data::memory_block block;
block.offset = base_address + (idxFirst * type_size);
block.location = memory_location;
frame_capture_data::memory_block_data block_data;
block_data.data.resize(bufferSize);
std::memcpy(block_data.data.data(), vm::base(idxAddr), bufferSize);
insert_mem_block_in_map(mem_changes, std::move(block), std::move(block_data));
switch (index_type)
{
case index_array_type::u16:
{
auto fifo = vm::ptr<u16>::make(idxAddr);
for (u32 i = 0; i < idxCount; ++i)
{
u16 index = fifo[i];
if (is_primitive_restart_enabled && u32{index} == primitive_restart_index)
continue;
index = static_cast<u16>(get_index_from_base(index, method_registers.vertex_data_base_index()));
min_index = std::min<u16>(index, static_cast<u16>(min_index));
max_index = std::max<u16>(index, static_cast<u16>(max_index));
}
break;
}
case index_array_type::u32:
{
auto fifo = vm::ptr<u32>::make(idxAddr);
for (u32 i = 0; i < idxCount; ++i)
{
u32 index = fifo[i];
if (is_primitive_restart_enabled && index == primitive_restart_index)
continue;
index = get_index_from_base(index, method_registers.vertex_data_base_index());
min_index = std::min(index, min_index);
max_index = std::max(index, max_index);
}
break;
}
}
}
while (method_registers.current_draw_clause.next());
if (min_index <= max_index)
{
for (u8 index = 0; index < limits::vertex_count; ++index)
{
const bool enabled = !!(input_mask & (1 << index));
if (!enabled)
continue;
const auto& info = method_registers.vertex_arrays_info[index];
if (!info.size())
continue;
// vert buffer
const u32 vertStride = info.stride();
const u32 base_address = get_vertex_offset_from_base(method_registers.vertex_data_base_offset(), (info.offset() & 0x7fffffff));
const u32 memory_location = info.offset() >> 31;
const u32 addr = get_address(base_address, memory_location);
const u32 vertSize = get_vertex_type_size_on_host(info.type(), info.size());
const u32 bufferSize = vertStride * (max_index - min_index + 1) + vertSize;
frame_capture_data::memory_block block;
block.offset = base_address + (min_index * vertStride);
block.location = memory_location;
frame_capture_data::memory_block_data block_data;
block_data.data.resize(bufferSize);
std::memcpy(block_data.data.data(), vm::base(addr + (min_index * vertStride)), bufferSize);
insert_mem_block_in_map(mem_changes, std::move(block), std::move(block_data));
}
}
}
capture_display_tile_state(rsx, frame_capture.replay_commands.back());
}
// i realize these are a slight copy pasta of the rsx_method implementations but its kinda unavoidable currently
void capture_image_in(thread* rsx, frame_capture_data::replay_command& replay_command)
{
//const rsx::blit_engine::transfer_operation operation = method_registers.blit_engine_operation();
const u16 clip_w = std::min(method_registers.blit_engine_output_width(), method_registers.blit_engine_clip_width());
const u16 clip_h = std::min(method_registers.blit_engine_output_height(), method_registers.blit_engine_clip_height());
const u16 in_w = method_registers.blit_engine_input_width();
const u16 in_h = method_registers.blit_engine_input_height();
//const blit_engine::transfer_origin in_origin = method_registers.blit_engine_input_origin();
//const blit_engine::transfer_interpolator in_inter = method_registers.blit_engine_input_inter();
const rsx::blit_engine::transfer_source_format src_color_format = method_registers.blit_engine_src_color_format();
const f32 in_x = std::floor(method_registers.blit_engine_in_x());
const f32 in_y = std::floor(method_registers.blit_engine_in_y());
u16 in_pitch = method_registers.blit_engine_input_pitch();
if (in_w == 0 || in_h == 0 || clip_w == 0 || clip_h == 0)
{
return;
}
const u32 src_offset = method_registers.blit_engine_input_offset();
const u32 src_dma = method_registers.blit_engine_input_location();
const u32 in_bpp = (src_color_format == rsx::blit_engine::transfer_source_format::r5g6b5) ? 2 : 4; // bytes per pixel
const u32 in_offset = u32(in_x * in_bpp + in_pitch * in_y);
frame_capture_data::memory_block block;
block.offset = src_offset + in_offset;
block.location = src_dma & 0xf;
const auto src_address = rsx::get_address(block.offset, block.location);
u8* pixels_src = vm::_ptr<u8>(src_address);
const u32 src_size = in_pitch * (in_h - 1) + (in_w * in_bpp);
rsx->read_barrier(src_address, src_size, true);
frame_capture_data::memory_block_data block_data;
block_data.data.resize(src_size);
std::memcpy(block_data.data.data(), pixels_src, src_size);
insert_mem_block_in_map(replay_command.memory_state, std::move(block), std::move(block_data));
capture_display_tile_state(rsx, replay_command);
}
void capture_buffer_notify(thread* rsx, frame_capture_data::replay_command& replay_command)
{
s32 in_pitch = method_registers.nv0039_input_pitch();
const u32 line_length = method_registers.nv0039_line_length();
const u32 line_count = method_registers.nv0039_line_count();
//const u8 in_format = method_registers.nv0039_input_format();
u32 src_offset = method_registers.nv0039_input_offset();
u32 src_dma = method_registers.nv0039_input_location();
u32 src_addr = get_address(src_offset, src_dma);
rsx->read_barrier(src_addr, in_pitch * (line_count - 1) + line_length, true);
const u8* src = vm::_ptr<u8>(src_addr);
frame_capture_data::memory_block block;
block.offset = src_offset;
block.location = src_dma;
frame_capture_data::memory_block_data block_data;
block_data.data.resize(in_pitch * (line_count - 1) + line_length);
for (u32 i = 0; i < line_count; ++i)
{
std::memcpy(block_data.data.data() + (line_length * i), src, line_length);
src += in_pitch;
}
insert_mem_block_in_map(replay_command.memory_state, std::move(block), std::move(block_data));
capture_display_tile_state(rsx, replay_command);
}
void capture_display_tile_state(thread* rsx, frame_capture_data::replay_command& replay_command)
{
frame_capture_data::display_buffers_state dbstate;
dbstate.count = rsx->display_buffers_count;
// should this only happen on flip?
for (u32 i = 0; i < rsx->display_buffers_count; ++i)
{
const auto& db = rsx->display_buffers[i];
dbstate.buffers[i].height = db.height;
dbstate.buffers[i].width = db.width;
dbstate.buffers[i].offset = db.offset;
dbstate.buffers[i].pitch = db.pitch;
}
const u64 dbnum = XXH64(&dbstate, sizeof(frame_capture_data::display_buffers_state), 0);
if (frame_capture.display_buffers_map.find(dbnum) == frame_capture.display_buffers_map.end())
frame_capture.display_buffers_map.insert(std::make_pair(dbnum, std::move(dbstate)));
// todo: hook tile call sys_rsx call or something
frame_capture_data::tile_state tilestate;
for (u32 i = 0; i < limits::tiles_count; ++i)
{
const auto tile = rsx->tiles[i].pack();
auto& tstate = tilestate.tiles[i];
tstate.tile = tile.tile;
tstate.limit = tile.limit;
tstate.pitch = rsx->tiles[i].bound ? u32{tile.pitch} : 0;
tstate.format = rsx->tiles[i].bound ? u32{tile.format} : 0;
}
for (u32 i = 0; i < limits::zculls_count; ++i)
{
const auto zc = rsx->zculls[i].pack();
auto& zcstate = tilestate.zculls[i];
zcstate.region = zc.region;
zcstate.size = zc.size;
zcstate.start = zc.start;
zcstate.offset = zc.offset;
zcstate.status0 = rsx->zculls[i].bound ? u32{zc.status0} : 0;
zcstate.status1 = rsx->zculls[i].bound ? u32{zc.status1} : 0;
}
const u64 tsnum = XXH64(&tilestate, sizeof(frame_capture_data::tile_state), 0);
if (frame_capture.tile_map.find(tsnum) == frame_capture.tile_map.end())
frame_capture.tile_map.insert(std::make_pair(tsnum, std::move(tilestate)));
replay_command.display_buffer_state = dbnum;
replay_command.tile_state = tsnum;
}
}
}
| 15,497
|
C++
|
.cpp
| 330
| 41.821212
| 156
| 0.676778
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
5,399
|
RSXDMAWriter.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Host/RSXDMAWriter.cpp
|
#include "stdafx.h"
#include "RSXDMAWriter.h"
#include "Utilities//Thread.h"
#include <util/asm.hpp>
namespace rsx
{
void RSXDMAWriter::update()
{
if (m_dispatch_handlers.empty())
{
m_job_queue.clear();
return;
}
while (!m_job_queue.empty())
{
const auto job = m_job_queue.front();
if (const auto dispatch = m_dispatch_handlers.find(job.dispatch_class);
dispatch == m_dispatch_handlers.end() || dispatch->second.handler(m_host_context_ptr, &job))
{
// No handler registered, or callback consumed the job
m_job_queue.pop_front();
continue;
}
// Dispatcher found and rejected the job. Stop, we'll try again later.
break;
}
}
void RSXDMAWriter::register_handler(host_dispatch_handler_t handler)
{
m_dispatch_handlers[handler.dispatch_class] = handler;
}
void RSXDMAWriter::deregister_handler(int dispatch_class)
{
m_dispatch_handlers.erase(dispatch_class);
}
void RSXDMAWriter::enqueue(const host_gpu_write_op_t& request)
{
m_job_queue.push_back(request);
}
void RSXDMAWriter::drain_label_queue()
{
if (!m_host_context_ptr)
{
return;
}
// FIXME: This is a busy wait, consider yield to improve responsiveness on weak devices.
while (!m_host_context_ptr->in_flight_commands_completed())
{
utils::pause();
if (thread_ctrl::state() == thread_state::aborting)
{
break;
}
}
}
}
| 1,453
|
C++
|
.cpp
| 56
| 21.660714
| 97
| 0.672727
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,400
|
overlay_cursor.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_cursor.cpp
|
#include "stdafx.h"
#include "overlay_cursor.h"
#include "overlay_manager.h"
namespace rsx
{
namespace overlays
{
cursor_item::cursor_item()
{
m_cross_h.set_size(15, 1);
m_cross_v.set_size(1, 15);
}
bool cursor_item::set_position(s16 x, s16 y)
{
if (m_x == x && m_y == y)
{
return false;
}
m_x = x;
m_y = y;
m_cross_h.set_pos(m_x - m_cross_h.w / 2, m_y - m_cross_h.h / 2);
m_cross_v.set_pos(m_x - m_cross_v.w / 2, m_y - m_cross_v.h / 2);
return true;
}
bool cursor_item::set_color(color4f color)
{
if (m_cross_h.back_color == color && m_cross_v.back_color == color)
{
return false;
}
m_cross_h.back_color = color;
m_cross_h.refresh();
m_cross_v.back_color = color;
m_cross_v.refresh();
return true;
}
void cursor_item::set_expiration(u64 expiration_time)
{
m_expiration_time = expiration_time;
}
bool cursor_item::update_visibility(u64 time)
{
m_visible = time <= m_expiration_time;
return m_visible;
}
bool cursor_item::visible() const
{
return m_visible;
}
compiled_resource cursor_item::get_compiled()
{
if (!m_visible)
{
return {};
}
compiled_resource cr = m_cross_h.get_compiled();
cr.add(m_cross_v.get_compiled());
return cr;
}
void cursor_manager::update(u64 timestamp_us)
{
if (!visible)
{
return;
}
std::lock_guard lock(m_mutex);
bool any_cursor_visible = false;
for (auto& entry : m_cursors)
{
any_cursor_visible |= entry.second.update_visibility(timestamp_us);
}
if (!any_cursor_visible)
{
visible = false;
}
}
compiled_resource cursor_manager::get_compiled()
{
if (!visible)
{
return {};
}
std::lock_guard lock(m_mutex);
compiled_resource cr{};
for (auto& entry : m_cursors)
{
cr.add(entry.second.get_compiled());
}
return cr;
}
void cursor_manager::update_cursor(u32 id, s16 x, s16 y, const color4f& color, u64 duration_us, bool force_update)
{
std::lock_guard lock(m_mutex);
cursor_item& cursor = m_cursors[id];
bool is_dirty = cursor.set_position(x, y);
is_dirty |= cursor.set_color(color);
if (is_dirty || force_update)
{
const u64 expiration_time = get_system_time() + duration_us;
cursor.set_expiration(expiration_time);
if (cursor.update_visibility(expiration_time))
{
visible = true;
}
}
}
void set_cursor(u32 id, s16 x, s16 y, const color4f& color, u64 duration_us, bool force_update)
{
if (auto manager = g_fxo->try_get<rsx::overlays::display_manager>())
{
auto cursor_overlay = manager->get<rsx::overlays::cursor_manager>();
if (!cursor_overlay)
{
cursor_overlay = std::make_shared<rsx::overlays::cursor_manager>();
cursor_overlay = manager->add(cursor_overlay);
}
cursor_overlay->update_cursor(id, x, y, color, duration_us, force_update);
}
}
} // namespace overlays
} // namespace rsx
| 2,973
|
C++
|
.cpp
| 121
| 20.570248
| 116
| 0.639943
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,401
|
overlay_animation.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_animation.cpp
|
#include "stdafx.h"
#include "overlay_animation.h"
#include "overlay_controls.h"
#include "Emu/system_config.h"
namespace rsx
{
namespace overlays
{
void animation_base::begin_animation(u64 timestamp_us)
{
timestamp_start_us = timestamp_us;
timestamp_end_us = timestamp_us + get_total_duration_us();
}
u64 animation_base::get_total_duration_us() const
{
return u64(duration_sec * 1'000'000.f);
}
u64 animation_base::get_remaining_duration_us(u64 timestamp_us) const
{
return timestamp_us >= timestamp_end_us ? 0 : (timestamp_end_us - timestamp_us);
}
f32 animation_base::get_progress_ratio(u64 timestamp_us) const
{
if (!timestamp_start_us)
{
return 0.f;
}
f32 t = f32(timestamp_us - timestamp_start_us) / (timestamp_end_us - timestamp_start_us);
switch (type) {
case animation_type::linear:
break;
case animation_type::ease_in_quad:
t = t * t;
break;
case animation_type::ease_out_quad:
t = t * (2.0f - t);
break;
case animation_type::ease_in_out_cubic:
t = t > 0.5f ? 4.0f * std::pow((t - 1.0f), 3.0f) + 1.0f : 4.0f * std::pow(t, 3.0f);
break;
}
return t;
}
void animation_translate::reset(u64 start_timestamp_us)
{
active = false;
current = start;
timestamp_start_us = start_timestamp_us;
if (timestamp_start_us > 0)
{
timestamp_end_us = timestamp_start_us + get_total_duration_us();
}
}
void animation_translate::apply(compiled_resource& resource)
{
if (!active)
{
return;
}
const vertex delta = { current.x, current.y, current.z, 0.f };
for (auto& cmd : resource.draw_commands)
{
for (auto& v : cmd.verts)
{
v += delta;
}
}
}
void animation_translate::update(u64 timestamp_us)
{
if (!active)
{
return;
}
if (timestamp_start_us == 0)
{
start = current;
begin_animation(timestamp_us);
return;
}
if (timestamp_us >= timestamp_end_us)
{
// Exit condition
finish();
return;
}
f32 t = get_progress_ratio(timestamp_us);
current = lerp(start, end, t);
}
void animation_translate::finish()
{
active = false;
timestamp_start_us = 0;
timestamp_end_us = 0;
current = end; // Snap current to limit in case we went over
if (on_finish)
{
on_finish();
}
}
void animation_color_interpolate::reset(u64 start_timestamp_us)
{
active = false;
current = start;
timestamp_start_us = start_timestamp_us;
if (timestamp_start_us > 0)
{
timestamp_end_us = timestamp_start_us + get_total_duration_us();
}
}
void animation_color_interpolate::apply(compiled_resource& data)
{
if (!active)
{
return;
}
for (auto& cmd : data.draw_commands)
{
cmd.config.color *= current;
}
}
void animation_color_interpolate::update(u64 timestamp_us)
{
if (!active)
{
return;
}
if (timestamp_start_us == 0)
{
start = current;
begin_animation(timestamp_us);
return;
}
if (timestamp_us >= timestamp_end_us)
{
finish();
return;
}
f32 t = get_progress_ratio(timestamp_us);
current = lerp(start, end, t);
}
void animation_color_interpolate::finish()
{
active = false;
timestamp_start_us = 0;
timestamp_end_us = 0;
current = end;
if (on_finish)
{
on_finish();
}
}
};
}
| 3,378
|
C++
|
.cpp
| 154
| 17.954545
| 92
| 0.636221
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,402
|
overlay_compile_notification.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_compile_notification.cpp
|
#include "stdafx.h"
#include "overlays.h"
#include "overlay_message.h"
#include "overlay_loading_icon.hpp"
namespace rsx
{
namespace overlays
{
static std::shared_ptr<loading_icon24> s_shader_loading_icon24;
static std::shared_ptr<loading_icon24> s_ppu_loading_icon24;
void show_shader_compile_notification()
{
if (!s_shader_loading_icon24)
{
// Creating the icon requires FS read, so it is important to cache it
s_shader_loading_icon24 = std::make_shared<loading_icon24>();
}
queue_message(
localized_string_id::RSX_OVERLAYS_COMPILING_SHADERS,
5'000'000,
{},
message_pin_location::bottom_left,
s_shader_loading_icon24,
true);
}
std::shared_ptr<atomic_t<u32>> show_ppu_compile_notification()
{
if (!s_ppu_loading_icon24)
{
// Creating the icon requires FS read, so it is important to cache it
s_ppu_loading_icon24 = std::make_shared<loading_icon24>();
}
std::shared_ptr<atomic_t<u32>> refs = std::make_shared<atomic_t<u32>>(1);
queue_message(
localized_string_id::RSX_OVERLAYS_COMPILING_PPU_MODULES,
20'000'000,
refs,
message_pin_location::bottom_left,
s_ppu_loading_icon24,
true);
return refs;
}
}
}
| 1,221
|
C++
|
.cpp
| 44
| 24.022727
| 76
| 0.700855
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,403
|
overlay_user_list_dialog.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_user_list_dialog.cpp
|
#include "stdafx.h"
#include "overlay_manager.h"
#include "overlay_user_list_dialog.h"
#include "Emu/vfs_config.h"
#include "Emu/system_utils.hpp"
#include "Emu/System.h"
#include "Utilities/StrUtil.h"
#include "Utilities/Thread.h"
namespace rsx
{
namespace overlays
{
user_list_dialog::user_list_entry::user_list_entry(const std::string& username, const std::string& user_id, const std::string& avatar_path)
{
std::unique_ptr<overlay_element> image = std::make_unique<image_view>();
image->set_size(160, 110);
image->set_padding(36, 36, 11, 11); // Square image, 88x88
if (fs::exists(avatar_path))
{
icon_data = std::make_unique<image_info>(avatar_path.c_str());
static_cast<image_view*>(image.get())->set_raw_image(icon_data.get());
}
else
{
// Fallback
// TODO: use proper icon
static_cast<image_view*>(image.get())->set_image_resource(resource_config::standard_image_resource::square);
}
std::unique_ptr<overlay_element> text_stack = std::make_unique<vertical_layout>();
std::unique_ptr<overlay_element> padding = std::make_unique<spacer>();
std::unique_ptr<overlay_element> header_text = std::make_unique<label>(username);
std::unique_ptr<overlay_element> subtext = std::make_unique<label>(user_id);
padding->set_size(1, 1);
header_text->set_size(800, 40);
header_text->set_font("Arial", 16);
header_text->set_wrap_text(true);
subtext->set_size(800, 0);
subtext->set_font("Arial", 14);
subtext->set_wrap_text(true);
static_cast<label*>(subtext.get())->auto_resize(true);
// Make back color transparent for text
header_text->back_color.a = 0.f;
subtext->back_color.a = 0.f;
static_cast<vertical_layout*>(text_stack.get())->pack_padding = 5;
static_cast<vertical_layout*>(text_stack.get())->add_element(padding);
static_cast<vertical_layout*>(text_stack.get())->add_element(header_text);
static_cast<vertical_layout*>(text_stack.get())->add_element(subtext);
if (text_stack->h > image->h)
{
std::unique_ptr<overlay_element> padding2 = std::make_unique<spacer>();
padding2->set_size(1, 5);
static_cast<vertical_layout*>(text_stack.get())->add_element(padding2);
}
// Pack
this->pack_padding = 15;
add_element(image);
add_element(text_stack);
}
user_list_dialog::user_list_dialog()
{
m_dim_background = std::make_unique<overlay_element>();
m_dim_background->set_size(virtual_width, virtual_height);
m_dim_background->back_color.a = 0.5f;
m_list = std::make_unique<list_view>(virtual_width - 2 * 20, 540);
m_list->set_pos(20, 85);
m_description = std::make_unique<label>();
m_description->set_font("Arial", 20);
m_description->set_pos(20, 37);
m_description->set_text("Select user"); // Fallback. I don't think this will ever be used, so I won't localize it.
m_description->auto_resize();
m_description->back_color.a = 0.f;
fade_animation.duration_sec = 0.15f;
return_code = selection_code::canceled;
}
void user_list_dialog::update(u64 timestamp_us)
{
if (fade_animation.active)
{
fade_animation.update(timestamp_us);
}
}
void user_list_dialog::on_button_pressed(pad_button button_press, bool is_auto_repeat)
{
if (fade_animation.active) return;
bool close_dialog = false;
switch (button_press)
{
case pad_button::cross:
if (m_list->m_items.empty())
break;
if (const usz index = static_cast<usz>(m_list->get_selected_index()); index < m_entry_ids.size())
{
return_code = static_cast<s32>(m_entry_ids[index]);
}
else
{
return_code = selection_code::error;
}
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_decide.wav");
close_dialog = true;
break;
case pad_button::circle:
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_cancel.wav");
close_dialog = true;
break;
case pad_button::dpad_up:
case pad_button::ls_up:
m_list->select_previous();
break;
case pad_button::dpad_down:
case pad_button::ls_down:
m_list->select_next();
break;
case pad_button::L1:
m_list->select_previous(10);
break;
case pad_button::R1:
m_list->select_next(10);
break;
default:
rsx_log.trace("[ui] Button %d pressed", static_cast<u8>(button_press));
break;
}
if (close_dialog)
{
fade_animation.current = color4f(1.f);
fade_animation.end = color4f(0.f);
fade_animation.active = true;
fade_animation.on_finish = [this]
{
close(true, true);
};
}
// Play a sound unless this is a fast auto repeat which would induce a nasty noise
else if (!is_auto_repeat || m_auto_repeat_ms_interval >= m_auto_repeat_ms_interval_default)
{
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_cursor.wav");
}
}
compiled_resource user_list_dialog::get_compiled()
{
if (!visible)
{
return {};
}
compiled_resource result;
result.add(m_dim_background->get_compiled());
result.add(m_list->get_compiled());
result.add(m_description->get_compiled());
fade_animation.apply(result);
return result;
}
error_code user_list_dialog::show(const std::string& title, u32 focused, const std::vector<u32>& user_ids, bool enable_overlay, std::function<void(s32 status)> on_close)
{
visible = false;
if (enable_overlay)
{
m_dim_background->back_color.a = 0.9f;
}
else
{
m_dim_background->back_color.a = 0.5f;
}
std::vector<std::unique_ptr<overlay_element>> entries;
const std::string home_dir = rpcs3::utils::get_hdd0_dir() + "home/";
s32 selected_index = 0;
for (const auto& id : user_ids)
{
const std::string user_id = fmt::format("%08d", id);
if (const fs::file file{home_dir + user_id + "/localusername"})
{
if (id == focused)
{
selected_index = static_cast<s32>(entries.size());
}
// Let's assume there are 26 avatar pngs (like in my installation)
const std::string avatar_path = g_cfg_vfs.get_dev_flash() + fmt::format("vsh/resource/explore/user/%03d.png", id % 26);
const std::string username = file.to_string();
std::unique_ptr<overlay_element> entry = std::make_unique<user_list_entry>(username, user_id, avatar_path);
entries.emplace_back(std::move(entry));
m_entry_ids.emplace_back(id);
}
}
for (auto& entry : entries)
{
m_list->add_entry(entry);
}
if (m_list->m_items.empty())
{
m_list->set_cancel_only(true);
}
else
{
// Only select an entry if there are entries available
m_list->select_entry(selected_index);
}
m_description->set_text(title);
m_description->auto_resize();
fade_animation.current = color4f(0.f);
fade_animation.end = color4f(1.f);
fade_animation.active = true;
this->on_close = std::move(on_close);
visible = true;
const auto notify = std::make_shared<atomic_t<u32>>(0);
auto& overlayman = g_fxo->get<display_manager>();
overlayman.attach_thread_input(
uid, "User list dialog",
[notify]() { *notify = true; notify->notify_one(); }
);
while (!Emu.IsStopped() && !*notify)
{
notify->wait(0, atomic_wait_timeout{1'000'000});
}
return CELL_OK;
}
} // namespace overlays
} // namespace RSX
| 7,324
|
C++
|
.cpp
| 217
| 29.447005
| 171
| 0.66261
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,404
|
overlay_message_dialog.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_message_dialog.cpp
|
#include "stdafx.h"
#include "overlay_manager.h"
#include "overlay_message_dialog.h"
#include "Emu/System.h"
#include "Emu/system_config.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/IdManager.h"
#include "Utilities/Thread.h"
#include <thread>
namespace rsx
{
namespace overlays
{
message_dialog::message_dialog(bool allow_custom_background)
: custom_background_allowed(allow_custom_background)
{
background.set_size(virtual_width, virtual_height);
background.back_color.a = 0.85f;
text_display.set_size(1100, 40);
text_display.set_pos(90, 364);
text_display.set_font("Arial", 16);
text_display.align_text(overlay_element::text_align::center);
text_display.set_wrap_text(true);
text_display.back_color.a = 0.f;
bottom_bar.back_color = color4f(1.f, 1.f, 1.f, 1.f);
bottom_bar.set_size(1200, 2);
bottom_bar.set_pos(40, 400);
for (progress_bar& bar : progress_bars)
{
bar.set_size(800, 4);
bar.back_color = color4f(0.25f, 0.f, 0.f, 0.85f);
}
btn_ok.set_text(localized_string_id::RSX_OVERLAYS_MSG_DIALOG_YES);
btn_ok.set_size(140, 30);
btn_ok.set_pos(545, 420);
btn_ok.set_font("Arial", 16);
btn_cancel.set_text(localized_string_id::RSX_OVERLAYS_MSG_DIALOG_NO);
btn_cancel.set_size(140, 30);
btn_cancel.set_pos(685, 420);
btn_cancel.set_font("Arial", 16);
if (g_cfg.sys.enter_button_assignment == enter_button_assign::circle)
{
btn_ok.set_image_resource(resource_config::standard_image_resource::circle);
btn_cancel.set_image_resource(resource_config::standard_image_resource::cross);
}
else
{
btn_ok.set_image_resource(resource_config::standard_image_resource::cross);
btn_cancel.set_image_resource(resource_config::standard_image_resource::circle);
}
fade_animation.duration_sec = 0.15f;
update_custom_background();
return_code = CELL_MSGDIALOG_BUTTON_NONE;
}
compiled_resource message_dialog::get_compiled()
{
if (!visible)
{
return {};
}
if (const auto [dirty, text] = text_guard.get_text(); dirty)
{
u16 text_w, text_h;
text_display.set_pos(90, 364);
text_display.set_text(text);
text_display.measure_text(text_w, text_h);
text_display.translate(0, -(text_h - 16));
}
for (u32 i = 0; i < progress_bars.size(); i++)
{
if (const auto [dirty, text] = ::at32(bar_text_guard, i).get_text(); dirty)
{
::at32(progress_bars, i).set_text(text);
}
}
compiled_resource result;
update_custom_background();
if (background_image && background_image->data)
{
result.add(background_poster.get_compiled());
}
result.add(background.get_compiled());
result.add(text_display.get_compiled());
if (num_progress_bars > 0)
{
result.add(::at32(progress_bars, 0).get_compiled());
}
if (num_progress_bars > 1)
{
result.add(::at32(progress_bars, 1).get_compiled());
}
if (interactive)
{
if (!num_progress_bars)
result.add(bottom_bar.get_compiled());
if (!cancel_only)
result.add(btn_ok.get_compiled());
if (!ok_only)
result.add(btn_cancel.get_compiled());
}
fade_animation.apply(result);
return result;
}
void message_dialog::on_button_pressed(pad_button button_press, bool /*is_auto_repeat*/)
{
if (fade_animation.active) return;
switch (button_press)
{
case pad_button::cross:
{
if (ok_only)
{
return_code = CELL_MSGDIALOG_BUTTON_OK;
}
else if (cancel_only)
{
// Do not accept for cancel-only dialogs
return;
}
else
{
return_code = CELL_MSGDIALOG_BUTTON_YES;
}
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_decide.wav");
break;
}
case pad_button::circle:
{
if (ok_only)
{
// Ignore cancel operation for Ok-only
return;
}
if (cancel_only)
{
return_code = CELL_MSGDIALOG_BUTTON_ESCAPE;
}
else
{
return_code = CELL_MSGDIALOG_BUTTON_NO;
}
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_cancel.wav");
break;
}
default: return;
}
fade_animation.current = color4f(1.f);
fade_animation.end = color4f(0.f);
fade_animation.active = true;
fade_animation.on_finish = [this]
{
close(true, true);
};
}
void message_dialog::close(bool use_callback, bool stop_pad_interception)
{
if (num_progress_bars > 0)
{
Emu.GetCallbacks().handle_taskbar_progress(0, 1);
}
user_interface::close(use_callback, stop_pad_interception);
}
void message_dialog::update(u64 timestamp_us)
{
if (fade_animation.active)
fade_animation.update(timestamp_us);
}
error_code message_dialog::show(bool is_blocking, const std::string& text, const MsgDialogType& type, msg_dialog_source source, std::function<void(s32 status)> on_close)
{
visible = false;
m_source = source;
num_progress_bars = type.progress_bar_count;
if (num_progress_bars)
{
s16 offset = 58;
::at32(progress_bars, 0).set_pos(240, 412);
if (num_progress_bars > 1)
{
::at32(progress_bars, 1).set_pos(240, 462);
offset = 98;
}
// Push the other stuff down
bottom_bar.translate(0, offset);
btn_ok.translate(0, offset);
btn_cancel.translate(0, offset);
}
else
{
fade_animation.current = color4f(0.f);
fade_animation.end = color4f(1.f);
fade_animation.active = true;
}
if (!type.se_mute_on)
{
if (type.se_normal)
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_system_ok.wav");
else
Emu.GetCallbacks().play_sound(fs::get_config_dir() + "sounds/snd_system_ng.wav");
}
set_text(text);
switch (type.button_type.unshifted())
{
case CELL_MSGDIALOG_TYPE_BUTTON_TYPE_NONE:
interactive = !type.disable_cancel;
if (interactive)
{
btn_cancel.set_pos(585, btn_cancel.y);
btn_cancel.set_text(localized_string_id::RSX_OVERLAYS_MSG_DIALOG_CANCEL);
cancel_only = true;
}
break;
case CELL_MSGDIALOG_TYPE_BUTTON_TYPE_OK:
btn_ok.set_pos(600, btn_ok.y);
btn_ok.set_text(localized_string_id::RSX_OVERLAYS_MSG_DIALOG_OK);
interactive = true;
ok_only = true;
break;
case CELL_MSGDIALOG_TYPE_BUTTON_TYPE_YESNO:
interactive = true;
break;
default:
break;
}
this->on_close = std::move(on_close);
visible = true;
if (is_blocking)
{
if (interactive)
{
if (const auto error = run_input_loop())
{
if (error != selection_code::canceled)
{
rsx_log.error("Message dialog input loop exited with error code=%d", error);
}
return error;
}
}
else
{
while (!m_stop_input_loop)
{
refresh();
// Only update the screen at about 60fps since updating it everytime slows down the process
std::this_thread::sleep_for(16ms);
}
}
}
else
{
if (!m_stop_input_loop)
{
const auto notify = std::make_shared<atomic_t<u32>>(0);
auto& overlayman = g_fxo->get<display_manager>();
if (interactive)
{
overlayman.attach_thread_input(
uid, "Message dialog",
[notify]() { *notify = true; notify->notify_one(); }
);
}
else
{
overlayman.attach_thread_input(
uid, "Message dialog",
[notify]() { *notify = true; notify->notify_one(); },
nullptr,
[&]()
{
while (!m_stop_input_loop && thread_ctrl::state() != thread_state::aborting)
{
refresh();
// Only update the screen at about 60fps since updating it everytime slows down the process
std::this_thread::sleep_for(16ms);
if (!g_fxo->is_init<display_manager>())
{
rsx_log.fatal("display_manager was improperly destroyed");
break;
}
}
return 0;
}
);
}
while (!Emu.IsStopped() && !*notify)
{
notify->wait(false, atomic_wait_timeout{1'000'000});
}
}
}
return CELL_OK;
}
void message_dialog::set_text(std::string text)
{
text_guard.set_text(std::move(text));
}
void message_dialog::update_custom_background()
{
if (custom_background_allowed && g_cfg.video.shader_preloading_dialog.use_custom_background)
{
bool dirty = std::exchange(background_blur_strength, g_cfg.video.shader_preloading_dialog.blur_strength.get()) != background_blur_strength;
dirty |= std::exchange(background_darkening_strength, g_cfg.video.shader_preloading_dialog.darkening_strength.get()) != background_darkening_strength;
if (!background_image)
{
if (const auto picture_path = Emu.GetBackgroundPicturePath(); fs::exists(picture_path))
{
background_image = std::make_unique<image_info>(picture_path.c_str());
dirty |= !!background_image->data;
}
}
if (dirty && background_image && background_image->data)
{
const f32 color = (100 - background_darkening_strength) / 100.f;
background_poster.fore_color = color4f(color, color, color, 1.);
background.back_color.a = 0.f;
background_poster.set_size(virtual_width, virtual_height);
background_poster.set_raw_image(background_image.get());
background_poster.set_blur_strength(static_cast<u8>(background_blur_strength));
ensure(background_image->w > 0);
ensure(background_image->h > 0);
ensure(background_poster.h > 0);
// Set padding in order to keep the aspect ratio
if ((background_image->w / static_cast<double>(background_image->h)) > (background_poster.w / static_cast<double>(background_poster.h)))
{
const int padding = (background_poster.h - static_cast<int>(background_image->h * (background_poster.w / static_cast<double>(background_image->w)))) / 2;
background_poster.set_padding(0, 0, padding, padding);
}
else
{
const int padding = (background_poster.w - static_cast<int>(background_image->w * (background_poster.h / static_cast<double>(background_image->h)))) / 2;
background_poster.set_padding(padding, padding, 0, 0);
}
}
}
else
{
if (background_image)
{
background_poster.clear_image();
background_image.reset();
}
background.back_color.a = 0.85f;
}
}
u32 message_dialog::progress_bar_count() const
{
return num_progress_bars;
}
void message_dialog::progress_bar_set_taskbar_index(s32 index)
{
taskbar_index = index;
}
error_code message_dialog::progress_bar_set_message(u32 index, std::string msg)
{
if (index >= num_progress_bars)
return CELL_MSGDIALOG_ERROR_PARAM;
::at32(bar_text_guard, index).set_text(std::move(msg));
return CELL_OK;
}
error_code message_dialog::progress_bar_increment(u32 index, f32 value)
{
if (index >= num_progress_bars)
return CELL_MSGDIALOG_ERROR_PARAM;
::at32(progress_bars, index).inc(value);
if (index == static_cast<u32>(taskbar_index) || taskbar_index == -1)
Emu.GetCallbacks().handle_taskbar_progress(1, static_cast<s32>(value));
return CELL_OK;
}
error_code message_dialog::progress_bar_set_value(u32 index, f32 value)
{
if (index >= num_progress_bars)
return CELL_MSGDIALOG_ERROR_PARAM;
::at32(progress_bars, index).set_value(value);
if (index == static_cast<u32>(taskbar_index) || taskbar_index == -1)
Emu.GetCallbacks().handle_taskbar_progress(3, static_cast<s32>(value));
return CELL_OK;
}
error_code message_dialog::progress_bar_reset(u32 index)
{
if (index >= num_progress_bars)
return CELL_MSGDIALOG_ERROR_PARAM;
::at32(progress_bars, index).set_value(0.f);
Emu.GetCallbacks().handle_taskbar_progress(0, 0);
return CELL_OK;
}
error_code message_dialog::progress_bar_set_limit(u32 index, u32 limit)
{
if (index >= num_progress_bars)
return CELL_MSGDIALOG_ERROR_PARAM;
::at32(progress_bars, index).set_limit(static_cast<f32>(limit));
if (index == static_cast<u32>(taskbar_index))
{
taskbar_limit = limit;
Emu.GetCallbacks().handle_taskbar_progress(2, taskbar_limit);
}
else if (taskbar_index == -1)
{
taskbar_limit += limit;
Emu.GetCallbacks().handle_taskbar_progress(2, taskbar_limit);
}
return CELL_OK;
}
} // namespace overlays
} // namespace rsx
| 12,367
|
C++
|
.cpp
| 404
| 25.693069
| 171
| 0.654746
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
5,405
|
overlay_progress_bar.cpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Overlays/overlay_progress_bar.cpp
|
#include "stdafx.h"
#include "overlay_progress_bar.hpp"
namespace rsx
{
namespace overlays
{
progress_bar::progress_bar()
{
text_view.back_color = {0.f, 0.f, 0.f, 0.f};
}
void progress_bar::inc(f32 value)
{
set_value(m_value + value);
}
void progress_bar::dec(f32 value)
{
set_value(m_value - value);
}
void progress_bar::set_limit(f32 limit)
{
m_limit = limit;
is_compiled = false;
}
void progress_bar::set_value(f32 value)
{
m_value = std::clamp(value, 0.f, m_limit);
f32 indicator_width = (w * m_value) / m_limit;
indicator.set_size(static_cast<u16>(indicator_width), h);
is_compiled = false;
}
void progress_bar::set_pos(s16 _x, s16 _y)
{
u16 text_w, text_h;
text_view.measure_text(text_w, text_h);
text_h += 13;
overlay_element::set_pos(_x, _y + text_h);
indicator.set_pos(_x, _y + text_h);
text_view.set_pos(_x, _y);
}
void progress_bar::set_size(u16 _w, u16 _h)
{
overlay_element::set_size(_w, _h);
text_view.set_size(_w, text_view.h);
set_value(m_value);
}
void progress_bar::translate(s16 dx, s16 dy)
{
set_pos(x + dx, y + dy);
}
void progress_bar::set_text(const std::string& str)
{
text_view.set_text(str);
text_view.align_text(text_align::center);
u16 text_w, text_h;
text_view.measure_text(text_w, text_h);
text_view.set_size(w, text_h);
set_pos(text_view.x, text_view.y);
is_compiled = false;
}
compiled_resource& progress_bar::get_compiled()
{
if (!is_compiled)
{
auto& compiled = overlay_element::get_compiled();
compiled.add(text_view.get_compiled());
indicator.back_color = fore_color;
indicator.refresh();
compiled.add(indicator.get_compiled());
}
return compiled_resources;
}
} // namespace overlays
} // namespace rsx
| 1,832
|
C++
|
.cpp
| 73
| 21.506849
| 60
| 0.646586
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.