id
int64 0
755k
| file_name
stringlengths 3
109
| file_path
stringlengths 13
185
| content
stringlengths 31
9.38M
| size
int64 31
9.38M
| language
stringclasses 1
value | extension
stringclasses 11
values | total_lines
int64 1
340k
| avg_line_length
float64 2.18
149k
| max_line_length
int64 7
2.22M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 6
65
| repo_stars
int64 100
47.3k
| repo_forks
int64 0
12k
| repo_open_issues
int64 0
3.4k
| repo_license
stringclasses 9
values | repo_extraction_date
stringclasses 92
values | exact_duplicates_redpajama
bool 2
classes | near_duplicates_redpajama
bool 2
classes | exact_duplicates_githubcode
bool 2
classes | exact_duplicates_stackv2
bool 1
class | exact_duplicates_stackv1
bool 2
classes | near_duplicates_githubcode
bool 2
classes | near_duplicates_stackv1
bool 2
classes | near_duplicates_stackv2
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
23,203
|
PPCRecompilerImlRegisterAllocator.cpp
|
cemu-project_Cemu/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlRegisterAllocator.cpp
|
#include "PPCRecompiler.h"
#include "PPCRecompilerIml.h"
#include "PPCRecompilerX64.h"
#include "PPCRecompilerImlRanges.h"
void PPCRecompiler_replaceGPRRegisterUsageMultiple(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction, sint32 gprRegisterSearched[4], sint32 gprRegisterReplaced[4]);
bool PPCRecompiler_isSuffixInstruction(PPCRecImlInstruction_t* iml);
uint32 recRACurrentIterationIndex = 0;
uint32 PPCRecRA_getNextIterationIndex()
{
recRACurrentIterationIndex++;
return recRACurrentIterationIndex;
}
bool _detectLoop(PPCRecImlSegment_t* currentSegment, sint32 depth, uint32 iterationIndex, PPCRecImlSegment_t* imlSegmentLoopBase)
{
if (currentSegment == imlSegmentLoopBase)
return true;
if (currentSegment->raInfo.lastIterationIndex == iterationIndex)
return currentSegment->raInfo.isPartOfProcessedLoop;
if (depth >= 9)
return false;
currentSegment->raInfo.lastIterationIndex = iterationIndex;
currentSegment->raInfo.isPartOfProcessedLoop = false;
if (currentSegment->nextSegmentIsUncertain)
return false;
if (currentSegment->nextSegmentBranchNotTaken)
{
if (currentSegment->nextSegmentBranchNotTaken->momentaryIndex > currentSegment->momentaryIndex)
{
currentSegment->raInfo.isPartOfProcessedLoop = _detectLoop(currentSegment->nextSegmentBranchNotTaken, depth + 1, iterationIndex, imlSegmentLoopBase);
}
}
if (currentSegment->nextSegmentBranchTaken)
{
if (currentSegment->nextSegmentBranchTaken->momentaryIndex > currentSegment->momentaryIndex)
{
currentSegment->raInfo.isPartOfProcessedLoop = _detectLoop(currentSegment->nextSegmentBranchTaken, depth + 1, iterationIndex, imlSegmentLoopBase);
}
}
if (currentSegment->raInfo.isPartOfProcessedLoop)
currentSegment->loopDepth++;
return currentSegment->raInfo.isPartOfProcessedLoop;
}
void PPCRecRA_detectLoop(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegmentLoopBase)
{
uint32 iterationIndex = PPCRecRA_getNextIterationIndex();
imlSegmentLoopBase->raInfo.lastIterationIndex = iterationIndex;
if (_detectLoop(imlSegmentLoopBase->nextSegmentBranchTaken, 0, iterationIndex, imlSegmentLoopBase))
{
imlSegmentLoopBase->loopDepth++;
}
}
void PPCRecRA_identifyLoop(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment)
{
if (imlSegment->nextSegmentIsUncertain)
return;
// check if this segment has a branch that links to itself (tight loop)
if (imlSegment->nextSegmentBranchTaken == imlSegment)
{
// segment loops over itself
imlSegment->loopDepth++;
return;
}
// check if this segment has a branch that goes backwards (potential complex loop)
if (imlSegment->nextSegmentBranchTaken && imlSegment->nextSegmentBranchTaken->momentaryIndex < imlSegment->momentaryIndex)
{
PPCRecRA_detectLoop(ppcImlGenContext, imlSegment);
}
}
typedef struct
{
sint32 name;
sint32 virtualRegister;
sint32 physicalRegister;
bool isDirty;
}raRegisterState_t;
const sint32 _raInfo_physicalGPRCount = PPC_X64_GPR_USABLE_REGISTERS;
raRegisterState_t* PPCRecRA_getRegisterState(raRegisterState_t* regState, sint32 virtualRegister)
{
for (sint32 i = 0; i < _raInfo_physicalGPRCount; i++)
{
if (regState[i].virtualRegister == virtualRegister)
{
#ifdef CEMU_DEBUG_ASSERT
if (regState[i].physicalRegister < 0)
assert_dbg();
#endif
return regState + i;
}
}
return nullptr;
}
raRegisterState_t* PPCRecRA_getFreePhysicalRegister(raRegisterState_t* regState)
{
for (sint32 i = 0; i < _raInfo_physicalGPRCount; i++)
{
if (regState[i].physicalRegister < 0)
{
regState[i].physicalRegister = i;
return regState + i;
}
}
return nullptr;
}
typedef struct
{
uint16 registerIndex;
uint16 registerName;
}raLoadStoreInfo_t;
void PPCRecRA_insertGPRLoadInstruction(PPCRecImlSegment_t* imlSegment, sint32 insertIndex, sint32 registerIndex, sint32 registerName)
{
PPCRecompiler_pushBackIMLInstructions(imlSegment, insertIndex, 1);
PPCRecImlInstruction_t* imlInstructionItr = imlSegment->imlList + (insertIndex + 0);
memset(imlInstructionItr, 0x00, sizeof(PPCRecImlInstruction_t));
imlInstructionItr->type = PPCREC_IML_TYPE_R_NAME;
imlInstructionItr->operation = PPCREC_IML_OP_ASSIGN;
imlInstructionItr->op_r_name.registerIndex = registerIndex;
imlInstructionItr->op_r_name.name = registerName;
imlInstructionItr->op_r_name.copyWidth = 32;
imlInstructionItr->op_r_name.flags = 0;
}
void PPCRecRA_insertGPRLoadInstructions(PPCRecImlSegment_t* imlSegment, sint32 insertIndex, raLoadStoreInfo_t* loadList, sint32 loadCount)
{
PPCRecompiler_pushBackIMLInstructions(imlSegment, insertIndex, loadCount);
memset(imlSegment->imlList + (insertIndex + 0), 0x00, sizeof(PPCRecImlInstruction_t)*loadCount);
for (sint32 i = 0; i < loadCount; i++)
{
PPCRecImlInstruction_t* imlInstructionItr = imlSegment->imlList + (insertIndex + i);
imlInstructionItr->type = PPCREC_IML_TYPE_R_NAME;
imlInstructionItr->operation = PPCREC_IML_OP_ASSIGN;
imlInstructionItr->op_r_name.registerIndex = (uint8)loadList[i].registerIndex;
imlInstructionItr->op_r_name.name = (uint32)loadList[i].registerName;
imlInstructionItr->op_r_name.copyWidth = 32;
imlInstructionItr->op_r_name.flags = 0;
}
}
void PPCRecRA_insertGPRStoreInstruction(PPCRecImlSegment_t* imlSegment, sint32 insertIndex, sint32 registerIndex, sint32 registerName)
{
PPCRecompiler_pushBackIMLInstructions(imlSegment, insertIndex, 1);
PPCRecImlInstruction_t* imlInstructionItr = imlSegment->imlList + (insertIndex + 0);
memset(imlInstructionItr, 0x00, sizeof(PPCRecImlInstruction_t));
imlInstructionItr->type = PPCREC_IML_TYPE_NAME_R;
imlInstructionItr->operation = PPCREC_IML_OP_ASSIGN;
imlInstructionItr->op_r_name.registerIndex = registerIndex;
imlInstructionItr->op_r_name.name = registerName;
imlInstructionItr->op_r_name.copyWidth = 32;
imlInstructionItr->op_r_name.flags = 0;
}
void PPCRecRA_insertGPRStoreInstructions(PPCRecImlSegment_t* imlSegment, sint32 insertIndex, raLoadStoreInfo_t* storeList, sint32 storeCount)
{
PPCRecompiler_pushBackIMLInstructions(imlSegment, insertIndex, storeCount);
memset(imlSegment->imlList + (insertIndex + 0), 0x00, sizeof(PPCRecImlInstruction_t)*storeCount);
for (sint32 i = 0; i < storeCount; i++)
{
PPCRecImlInstruction_t* imlInstructionItr = imlSegment->imlList + (insertIndex + i);
memset(imlInstructionItr, 0x00, sizeof(PPCRecImlInstruction_t));
imlInstructionItr->type = PPCREC_IML_TYPE_NAME_R;
imlInstructionItr->operation = PPCREC_IML_OP_ASSIGN;
imlInstructionItr->op_r_name.registerIndex = (uint8)storeList[i].registerIndex;
imlInstructionItr->op_r_name.name = (uint32)storeList[i].registerName;
imlInstructionItr->op_r_name.copyWidth = 32;
imlInstructionItr->op_r_name.flags = 0;
}
}
#define SUBRANGE_LIST_SIZE (128)
sint32 PPCRecRA_countInstructionsUntilNextUse(raLivenessSubrange_t* subrange, sint32 startIndex)
{
for (sint32 i = 0; i < subrange->list_locations.size(); i++)
{
if (subrange->list_locations.data()[i].index >= startIndex)
return subrange->list_locations.data()[i].index - startIndex;
}
return INT_MAX;
}
// count how many instructions there are until physRegister is used by any subrange (returns 0 if register is in use at startIndex, and INT_MAX if not used for the remainder of the segment)
sint32 PPCRecRA_countInstructionsUntilNextLocalPhysRegisterUse(PPCRecImlSegment_t* imlSegment, sint32 startIndex, sint32 physRegister)
{
sint32 minDistance = INT_MAX;
// next
raLivenessSubrange_t* subrangeItr = imlSegment->raInfo.linkedList_allSubranges;
while(subrangeItr)
{
if (subrangeItr->range->physicalRegister != physRegister)
{
subrangeItr = subrangeItr->link_segmentSubrangesGPR.next;
continue;
}
if (startIndex >= subrangeItr->start.index && startIndex < subrangeItr->end.index)
return 0;
if (subrangeItr->start.index >= startIndex)
{
minDistance = std::min(minDistance, (subrangeItr->start.index - startIndex));
}
subrangeItr = subrangeItr->link_segmentSubrangesGPR.next;
}
return minDistance;
}
typedef struct
{
raLivenessSubrange_t* liveRangeList[64];
sint32 liveRangesCount;
}raLiveRangeInfo_t;
// return a bitmask that contains only registers that are not used by any colliding range
uint32 PPCRecRA_getAllowedRegisterMaskForFullRange(raLivenessRange_t* range)
{
uint32 physRegisterMask = (1 << PPC_X64_GPR_USABLE_REGISTERS) - 1;
for (auto& subrange : range->list_subranges)
{
PPCRecImlSegment_t* imlSegment = subrange->imlSegment;
raLivenessSubrange_t* subrangeItr = imlSegment->raInfo.linkedList_allSubranges;
while(subrangeItr)
{
if (subrange == subrangeItr)
{
// next
subrangeItr = subrangeItr->link_segmentSubrangesGPR.next;
continue;
}
if (subrange->start.index < subrangeItr->end.index && subrange->end.index > subrangeItr->start.index ||
(subrange->start.index == RA_INTER_RANGE_START && subrange->start.index == subrangeItr->start.index) ||
(subrange->end.index == RA_INTER_RANGE_END && subrange->end.index == subrangeItr->end.index) )
{
if(subrangeItr->range->physicalRegister >= 0)
physRegisterMask &= ~(1<<(subrangeItr->range->physicalRegister));
}
// next
subrangeItr = subrangeItr->link_segmentSubrangesGPR.next;
}
}
return physRegisterMask;
}
bool _livenessRangeStartCompare(raLivenessSubrange_t* lhs, raLivenessSubrange_t* rhs) { return lhs->start.index < rhs->start.index; }
void _sortSegmentAllSubrangesLinkedList(PPCRecImlSegment_t* imlSegment)
{
raLivenessSubrange_t* subrangeList[4096+1];
sint32 count = 0;
// disassemble linked list
raLivenessSubrange_t* subrangeItr = imlSegment->raInfo.linkedList_allSubranges;
while (subrangeItr)
{
if (count >= 4096)
assert_dbg();
subrangeList[count] = subrangeItr;
count++;
// next
subrangeItr = subrangeItr->link_segmentSubrangesGPR.next;
}
if (count == 0)
{
imlSegment->raInfo.linkedList_allSubranges = nullptr;
return;
}
// sort
std::sort(subrangeList, subrangeList + count, _livenessRangeStartCompare);
//for (sint32 i1 = 0; i1 < count; i1++)
//{
// for (sint32 i2 = i1+1; i2 < count; i2++)
// {
// if (subrangeList[i1]->start.index > subrangeList[i2]->start.index)
// {
// // swap
// raLivenessSubrange_t* temp = subrangeList[i1];
// subrangeList[i1] = subrangeList[i2];
// subrangeList[i2] = temp;
// }
// }
//}
// reassemble linked list
subrangeList[count] = nullptr;
imlSegment->raInfo.linkedList_allSubranges = subrangeList[0];
subrangeList[0]->link_segmentSubrangesGPR.prev = nullptr;
subrangeList[0]->link_segmentSubrangesGPR.next = subrangeList[1];
for (sint32 i = 1; i < count; i++)
{
subrangeList[i]->link_segmentSubrangesGPR.prev = subrangeList[i - 1];
subrangeList[i]->link_segmentSubrangesGPR.next = subrangeList[i + 1];
}
// validate list
#ifdef CEMU_DEBUG_ASSERT
sint32 count2 = 0;
subrangeItr = imlSegment->raInfo.linkedList_allSubranges;
sint32 currentStartIndex = RA_INTER_RANGE_START;
while (subrangeItr)
{
count2++;
if (subrangeItr->start.index < currentStartIndex)
assert_dbg();
currentStartIndex = subrangeItr->start.index;
// next
subrangeItr = subrangeItr->link_segmentSubrangesGPR.next;
}
if (count != count2)
assert_dbg();
#endif
}
bool PPCRecRA_assignSegmentRegisters(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment)
{
// sort subranges ascending by start index
//std::sort(imlSegment->raInfo.list_subranges.begin(), imlSegment->raInfo.list_subranges.end(), _sortSubrangesByStartIndexDepr);
_sortSegmentAllSubrangesLinkedList(imlSegment);
raLiveRangeInfo_t liveInfo;
liveInfo.liveRangesCount = 0;
//sint32 subrangeIndex = 0;
//for (auto& subrange : imlSegment->raInfo.list_subranges)
raLivenessSubrange_t* subrangeItr = imlSegment->raInfo.linkedList_allSubranges;
while(subrangeItr)
{
sint32 currentIndex = subrangeItr->start.index;
// validate subrange
PPCRecRA_debugValidateSubrange(subrangeItr);
// expire ranges
for (sint32 f = 0; f < liveInfo.liveRangesCount; f++)
{
raLivenessSubrange_t* liverange = liveInfo.liveRangeList[f];
if (liverange->end.index <= currentIndex && liverange->end.index != RA_INTER_RANGE_END)
{
#ifdef CEMU_DEBUG_ASSERT
if (liverange->subrangeBranchTaken || liverange->subrangeBranchNotTaken)
assert_dbg(); // infinite subranges should not expire
#endif
// remove entry
liveInfo.liveRangesCount--;
liveInfo.liveRangeList[f] = liveInfo.liveRangeList[liveInfo.liveRangesCount];
f--;
}
}
// check if subrange already has register assigned
if (subrangeItr->range->physicalRegister >= 0)
{
// verify if register is actually available
#ifdef CEMU_DEBUG_ASSERT
for (sint32 f = 0; f < liveInfo.liveRangesCount; f++)
{
raLivenessSubrange_t* liverangeItr = liveInfo.liveRangeList[f];
if (liverangeItr->range->physicalRegister == subrangeItr->range->physicalRegister)
{
// this should never happen because we try to preventively avoid register conflicts
assert_dbg();
}
}
#endif
// add to live ranges
liveInfo.liveRangeList[liveInfo.liveRangesCount] = subrangeItr;
liveInfo.liveRangesCount++;
// next
subrangeItr = subrangeItr->link_segmentSubrangesGPR.next;
continue;
}
// find free register
uint32 physRegisterMask = (1<<PPC_X64_GPR_USABLE_REGISTERS)-1;
for (sint32 f = 0; f < liveInfo.liveRangesCount; f++)
{
raLivenessSubrange_t* liverange = liveInfo.liveRangeList[f];
if (liverange->range->physicalRegister < 0)
assert_dbg();
physRegisterMask &= ~(1<<liverange->range->physicalRegister);
}
// check intersections with other ranges and determine allowed registers
uint32 allowedPhysRegisterMask = 0;
uint32 unusedRegisterMask = physRegisterMask; // mask of registers that are currently not used (does not include range checks)
if (physRegisterMask != 0)
{
allowedPhysRegisterMask = PPCRecRA_getAllowedRegisterMaskForFullRange(subrangeItr->range);
physRegisterMask &= allowedPhysRegisterMask;
}
if (physRegisterMask == 0)
{
struct
{
// estimated costs and chosen candidates for the different spill strategies
// hole cutting into a local range
struct
{
sint32 distance;
raLivenessSubrange_t* largestHoleSubrange;
sint32 cost; // additional cost of choosing this candidate
}localRangeHoleCutting;
// split current range (this is generally only a good choice when the current range is long but rarely used)
struct
{
sint32 cost;
sint32 physRegister;
sint32 distance; // size of hole
}availableRegisterHole;
// explode a inter-segment range (prefer ranges that are not read/written in this segment)
struct
{
raLivenessRange_t* range;
sint32 cost;
sint32 distance; // size of hole
// note: If we explode a range, we still have to check the size of the hole that becomes available, if too small then we need to add cost of splitting local subrange
}explodeRange;
// todo - add more strategies, make cost estimation smarter (for example, in some cases splitting can have reduced or no cost if read/store can be avoided due to data flow)
}spillStrategies;
// cant assign register
// there might be registers available, we just can't use them due to range conflicts
if (subrangeItr->end.index != RA_INTER_RANGE_END)
{
// range ends in current segment
// Current algo looks like this:
// 1) Get the size of the largest possible hole that we can cut into any of the live local subranges
// 1.1) Check if the hole is large enough to hold the current subrange
// 2) If yes, cut hole and return false (full retry)
// 3) If no, try to reuse free register (need to determine how large the region is we can use)
// 4) If there is no free register or the range is extremely short go back to step 1+2 but additionally split the current subrange at where the hole ends
cemu_assert_debug(currentIndex == subrangeItr->start.index);
sint32 requiredSize = subrangeItr->end.index - subrangeItr->start.index;
// evaluate strategy: Cut hole into local subrange
spillStrategies.localRangeHoleCutting.distance = -1;
spillStrategies.localRangeHoleCutting.largestHoleSubrange = nullptr;
spillStrategies.localRangeHoleCutting.cost = INT_MAX;
if (currentIndex >= 0)
{
for (sint32 f = 0; f < liveInfo.liveRangesCount; f++)
{
raLivenessSubrange_t* candidate = liveInfo.liveRangeList[f];
if (candidate->end.index == RA_INTER_RANGE_END)
continue;
sint32 distance = PPCRecRA_countInstructionsUntilNextUse(candidate, currentIndex);
if (distance < 2)
continue; // not even worth the consideration
// calculate split cost of candidate
sint32 cost = PPCRecRARange_estimateAdditionalCostAfterSplit(candidate, currentIndex + distance);
// calculate additional split cost of currentRange if hole is not large enough
if (distance < requiredSize)
{
cost += PPCRecRARange_estimateAdditionalCostAfterSplit(subrangeItr, currentIndex + distance);
// we also slightly increase cost in relation to the remaining length (in order to make the algorithm prefer larger holes)
cost += (requiredSize - distance) / 10;
}
// compare cost with previous candidates
if (cost < spillStrategies.localRangeHoleCutting.cost)
{
spillStrategies.localRangeHoleCutting.cost = cost;
spillStrategies.localRangeHoleCutting.distance = distance;
spillStrategies.localRangeHoleCutting.largestHoleSubrange = candidate;
}
}
}
// evaluate strategy: Split current range to fit in available holes
spillStrategies.availableRegisterHole.cost = INT_MAX;
spillStrategies.availableRegisterHole.distance = -1;
spillStrategies.availableRegisterHole.physRegister = -1;
if (currentIndex >= 0)
{
if (unusedRegisterMask != 0)
{
for (sint32 t = 0; t < PPC_X64_GPR_USABLE_REGISTERS; t++)
{
if ((unusedRegisterMask&(1 << t)) == 0)
continue;
// get size of potential hole for this register
sint32 distance = PPCRecRA_countInstructionsUntilNextLocalPhysRegisterUse(imlSegment, currentIndex, t);
if (distance < 2)
continue; // not worth consideration
// calculate additional cost due to split
if (distance >= requiredSize)
assert_dbg(); // should not happen or else we would have selected this register
sint32 cost = PPCRecRARange_estimateAdditionalCostAfterSplit(subrangeItr, currentIndex + distance);
// add small additional cost for the remaining range (prefer larger holes)
cost += (requiredSize - distance) / 10;
if (cost < spillStrategies.availableRegisterHole.cost)
{
spillStrategies.availableRegisterHole.cost = cost;
spillStrategies.availableRegisterHole.distance = distance;
spillStrategies.availableRegisterHole.physRegister = t;
}
}
}
}
// evaluate strategy: Explode inter-segment ranges
spillStrategies.explodeRange.cost = INT_MAX;
spillStrategies.explodeRange.range = nullptr;
spillStrategies.explodeRange.distance = -1;
for (sint32 f = 0; f < liveInfo.liveRangesCount; f++)
{
raLivenessSubrange_t* candidate = liveInfo.liveRangeList[f];
if (candidate->end.index != RA_INTER_RANGE_END)
continue;
sint32 distance = PPCRecRA_countInstructionsUntilNextUse(liveInfo.liveRangeList[f], currentIndex);
if( distance < 2)
continue;
sint32 cost;
cost = PPCRecRARange_estimateAdditionalCostAfterRangeExplode(candidate->range);
// if the hole is not large enough, add cost of splitting current subrange
if (distance < requiredSize)
{
cost += PPCRecRARange_estimateAdditionalCostAfterSplit(subrangeItr, currentIndex + distance);
// add small additional cost for the remaining range (prefer larger holes)
cost += (requiredSize - distance) / 10;
}
// compare with current best candidate for this strategy
if (cost < spillStrategies.explodeRange.cost)
{
spillStrategies.explodeRange.cost = cost;
spillStrategies.explodeRange.distance = distance;
spillStrategies.explodeRange.range = candidate->range;
}
}
// choose strategy
if (spillStrategies.explodeRange.cost != INT_MAX && spillStrategies.explodeRange.cost <= spillStrategies.localRangeHoleCutting.cost && spillStrategies.explodeRange.cost <= spillStrategies.availableRegisterHole.cost)
{
// explode range
PPCRecRA_explodeRange(ppcImlGenContext, spillStrategies.explodeRange.range);
// split current subrange if necessary
if( requiredSize > spillStrategies.explodeRange.distance)
PPCRecRA_splitLocalSubrange(ppcImlGenContext, subrangeItr, currentIndex+spillStrategies.explodeRange.distance, true);
}
else if (spillStrategies.availableRegisterHole.cost != INT_MAX && spillStrategies.availableRegisterHole.cost <= spillStrategies.explodeRange.cost && spillStrategies.availableRegisterHole.cost <= spillStrategies.localRangeHoleCutting.cost)
{
// use available register
PPCRecRA_splitLocalSubrange(ppcImlGenContext, subrangeItr, currentIndex + spillStrategies.availableRegisterHole.distance, true);
}
else if (spillStrategies.localRangeHoleCutting.cost != INT_MAX && spillStrategies.localRangeHoleCutting.cost <= spillStrategies.explodeRange.cost && spillStrategies.localRangeHoleCutting.cost <= spillStrategies.availableRegisterHole.cost)
{
// cut hole
PPCRecRA_splitLocalSubrange(ppcImlGenContext, spillStrategies.localRangeHoleCutting.largestHoleSubrange, currentIndex + spillStrategies.localRangeHoleCutting.distance, true);
// split current subrange if necessary
if (requiredSize > spillStrategies.localRangeHoleCutting.distance)
PPCRecRA_splitLocalSubrange(ppcImlGenContext, subrangeItr, currentIndex + spillStrategies.localRangeHoleCutting.distance, true);
}
else if (subrangeItr->start.index == RA_INTER_RANGE_START)
{
// alternative strategy if we have no other choice: explode current range
PPCRecRA_explodeRange(ppcImlGenContext, subrangeItr->range);
}
else
assert_dbg();
return false;
}
else
{
// range exceeds segment border
// simple but bad solution -> explode the entire range (no longer allow it to cross segment boundaries)
// better solutions: 1) Depending on the situation, we can explode other ranges to resolve the conflict. Thus we should explode the range with the lowest extra cost
// 2) Or we explode the range only partially
// explode the range with the least cost
spillStrategies.explodeRange.cost = INT_MAX;
spillStrategies.explodeRange.range = nullptr;
spillStrategies.explodeRange.distance = -1;
for (sint32 f = 0; f < liveInfo.liveRangesCount; f++)
{
raLivenessSubrange_t* candidate = liveInfo.liveRangeList[f];
if (candidate->end.index != RA_INTER_RANGE_END)
continue;
// only select candidates that clash with current subrange
if (candidate->range->physicalRegister < 0 && candidate != subrangeItr)
continue;
sint32 cost;
cost = PPCRecRARange_estimateAdditionalCostAfterRangeExplode(candidate->range);
// compare with current best candidate for this strategy
if (cost < spillStrategies.explodeRange.cost)
{
spillStrategies.explodeRange.cost = cost;
spillStrategies.explodeRange.distance = INT_MAX;
spillStrategies.explodeRange.range = candidate->range;
}
}
// add current range as a candidate too
sint32 ownCost;
ownCost = PPCRecRARange_estimateAdditionalCostAfterRangeExplode(subrangeItr->range);
if (ownCost < spillStrategies.explodeRange.cost)
{
spillStrategies.explodeRange.cost = ownCost;
spillStrategies.explodeRange.distance = INT_MAX;
spillStrategies.explodeRange.range = subrangeItr->range;
}
if (spillStrategies.explodeRange.cost == INT_MAX)
assert_dbg(); // should not happen
PPCRecRA_explodeRange(ppcImlGenContext, spillStrategies.explodeRange.range);
}
return false;
}
// assign register to range
sint32 registerIndex = -1;
for (sint32 f = 0; f < PPC_X64_GPR_USABLE_REGISTERS; f++)
{
if ((physRegisterMask&(1 << f)) != 0)
{
registerIndex = f;
break;
}
}
subrangeItr->range->physicalRegister = registerIndex;
// add to live ranges
liveInfo.liveRangeList[liveInfo.liveRangesCount] = subrangeItr;
liveInfo.liveRangesCount++;
// next
subrangeItr = subrangeItr->link_segmentSubrangesGPR.next;
}
return true;
}
void PPCRecRA_assignRegisters(ppcImlGenContext_t* ppcImlGenContext)
{
// start with frequently executed segments first
sint32 maxLoopDepth = 0;
for (sint32 i = 0; i < ppcImlGenContext->segmentListCount; i++)
{
maxLoopDepth = std::max(maxLoopDepth, ppcImlGenContext->segmentList[i]->loopDepth);
}
while (true)
{
bool done = false;
for (sint32 d = maxLoopDepth; d >= 0; d--)
{
for (sint32 i = 0; i < ppcImlGenContext->segmentListCount; i++)
{
PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[i];
if (imlSegment->loopDepth != d)
continue;
done = PPCRecRA_assignSegmentRegisters(ppcImlGenContext, imlSegment);
if (done == false)
break;
}
if (done == false)
break;
}
if (done)
break;
}
}
typedef struct
{
raLivenessSubrange_t* subrangeList[SUBRANGE_LIST_SIZE];
sint32 subrangeCount;
bool hasUndefinedEndings;
}subrangeEndingInfo_t;
void _findSubrangeWriteEndings(raLivenessSubrange_t* subrange, uint32 iterationIndex, sint32 depth, subrangeEndingInfo_t* info)
{
if (depth >= 30)
{
info->hasUndefinedEndings = true;
return;
}
if (subrange->lastIterationIndex == iterationIndex)
return; // already processed
subrange->lastIterationIndex = iterationIndex;
if (subrange->hasStoreDelayed)
return; // no need to traverse this subrange
PPCRecImlSegment_t* imlSegment = subrange->imlSegment;
if (subrange->end.index != RA_INTER_RANGE_END)
{
// ending segment
if (info->subrangeCount >= SUBRANGE_LIST_SIZE)
{
info->hasUndefinedEndings = true;
return;
}
else
{
info->subrangeList[info->subrangeCount] = subrange;
info->subrangeCount++;
}
return;
}
// traverse next subranges in flow
if (imlSegment->nextSegmentBranchNotTaken)
{
if (subrange->subrangeBranchNotTaken == nullptr)
{
info->hasUndefinedEndings = true;
}
else
{
_findSubrangeWriteEndings(subrange->subrangeBranchNotTaken, iterationIndex, depth + 1, info);
}
}
if (imlSegment->nextSegmentBranchTaken)
{
if (subrange->subrangeBranchTaken == nullptr)
{
info->hasUndefinedEndings = true;
}
else
{
_findSubrangeWriteEndings(subrange->subrangeBranchTaken, iterationIndex, depth + 1, info);
}
}
}
void _analyzeRangeDataFlow(raLivenessSubrange_t* subrange)
{
if (subrange->end.index != RA_INTER_RANGE_END)
return;
// analyze data flow across segments (if this segment has writes)
if (subrange->hasStore)
{
subrangeEndingInfo_t writeEndingInfo;
writeEndingInfo.subrangeCount = 0;
writeEndingInfo.hasUndefinedEndings = false;
_findSubrangeWriteEndings(subrange, PPCRecRA_getNextIterationIndex(), 0, &writeEndingInfo);
if (writeEndingInfo.hasUndefinedEndings == false)
{
// get cost of delaying store into endings
sint32 delayStoreCost = 0;
bool alreadyStoredInAllEndings = true;
for (sint32 i = 0; i < writeEndingInfo.subrangeCount; i++)
{
raLivenessSubrange_t* subrangeItr = writeEndingInfo.subrangeList[i];
if( subrangeItr->hasStore )
continue; // this ending already stores, no extra cost
alreadyStoredInAllEndings = false;
sint32 storeCost = PPCRecRARange_getReadWriteCost(subrangeItr->imlSegment);
delayStoreCost = std::max(storeCost, delayStoreCost);
}
if (alreadyStoredInAllEndings)
{
subrange->hasStore = false;
subrange->hasStoreDelayed = true;
}
else if (delayStoreCost <= PPCRecRARange_getReadWriteCost(subrange->imlSegment))
{
subrange->hasStore = false;
subrange->hasStoreDelayed = true;
for (sint32 i = 0; i < writeEndingInfo.subrangeCount; i++)
{
raLivenessSubrange_t* subrangeItr = writeEndingInfo.subrangeList[i];
subrangeItr->hasStore = true;
}
}
}
}
}
void PPCRecRA_generateSegmentInstructions(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment)
{
sint16 virtualReg2PhysReg[PPC_REC_MAX_VIRTUAL_GPR];
for (sint32 i = 0; i < PPC_REC_MAX_VIRTUAL_GPR; i++)
virtualReg2PhysReg[i] = -1;
raLiveRangeInfo_t liveInfo;
liveInfo.liveRangesCount = 0;
sint32 index = 0;
sint32 suffixInstructionCount = (imlSegment->imlListCount > 0 && PPCRecompiler_isSuffixInstruction(imlSegment->imlList + imlSegment->imlListCount - 1)) ? 1 : 0;
// load register ranges that are supplied from previous segments
raLivenessSubrange_t* subrangeItr = imlSegment->raInfo.linkedList_allSubranges;
//for (auto& subrange : imlSegment->raInfo.list_subranges)
while(subrangeItr)
{
if (subrangeItr->start.index == RA_INTER_RANGE_START)
{
liveInfo.liveRangeList[liveInfo.liveRangesCount] = subrangeItr;
liveInfo.liveRangesCount++;
#ifdef CEMU_DEBUG_ASSERT
// load GPR
if (subrangeItr->_noLoad == false)
{
assert_dbg();
}
// update translation table
if (virtualReg2PhysReg[subrangeItr->range->virtualRegister] != -1)
assert_dbg();
#endif
virtualReg2PhysReg[subrangeItr->range->virtualRegister] = subrangeItr->range->physicalRegister;
}
// next
subrangeItr = subrangeItr->link_segmentSubrangesGPR.next;
}
// process instructions
while(index < imlSegment->imlListCount+1)
{
// expire ranges
for (sint32 f = 0; f < liveInfo.liveRangesCount; f++)
{
raLivenessSubrange_t* liverange = liveInfo.liveRangeList[f];
if (liverange->end.index <= index)
{
// update translation table
if (virtualReg2PhysReg[liverange->range->virtualRegister] == -1)
assert_dbg();
virtualReg2PhysReg[liverange->range->virtualRegister] = -1;
// store GPR
if (liverange->hasStore)
{
PPCRecRA_insertGPRStoreInstruction(imlSegment, std::min(index, imlSegment->imlListCount - suffixInstructionCount), liverange->range->physicalRegister, liverange->range->name);
index++;
}
// remove entry
liveInfo.liveRangesCount--;
liveInfo.liveRangeList[f] = liveInfo.liveRangeList[liveInfo.liveRangesCount];
f--;
}
}
// load new ranges
subrangeItr = imlSegment->raInfo.linkedList_allSubranges;
while(subrangeItr)
{
if (subrangeItr->start.index == index)
{
liveInfo.liveRangeList[liveInfo.liveRangesCount] = subrangeItr;
liveInfo.liveRangesCount++;
// load GPR
if (subrangeItr->_noLoad == false)
{
PPCRecRA_insertGPRLoadInstruction(imlSegment, std::min(index, imlSegment->imlListCount - suffixInstructionCount), subrangeItr->range->physicalRegister, subrangeItr->range->name);
index++;
subrangeItr->start.index--;
}
// update translation table
cemu_assert_debug(virtualReg2PhysReg[subrangeItr->range->virtualRegister] == -1);
virtualReg2PhysReg[subrangeItr->range->virtualRegister] = subrangeItr->range->physicalRegister;
}
subrangeItr = subrangeItr->link_segmentSubrangesGPR.next;
}
// replace registers
if (index < imlSegment->imlListCount)
{
PPCImlOptimizerUsedRegisters_t gprTracking;
PPCRecompiler_checkRegisterUsage(NULL, imlSegment->imlList + index, &gprTracking);
sint32 inputGpr[4];
inputGpr[0] = gprTracking.gpr[0];
inputGpr[1] = gprTracking.gpr[1];
inputGpr[2] = gprTracking.gpr[2];
inputGpr[3] = gprTracking.gpr[3];
sint32 replaceGpr[4];
for (sint32 f = 0; f < 4; f++)
{
sint32 virtualRegister = gprTracking.gpr[f];
if (virtualRegister < 0)
{
replaceGpr[f] = -1;
continue;
}
if (virtualRegister >= PPC_REC_MAX_VIRTUAL_GPR)
assert_dbg();
replaceGpr[f] = virtualReg2PhysReg[virtualRegister];
cemu_assert_debug(replaceGpr[f] >= 0);
}
PPCRecompiler_replaceGPRRegisterUsageMultiple(ppcImlGenContext, imlSegment->imlList + index, inputGpr, replaceGpr);
}
// next iml instruction
index++;
}
// expire infinite subranges (subranges that cross the segment border)
sint32 storeLoadListLength = 0;
raLoadStoreInfo_t loadStoreList[PPC_REC_MAX_VIRTUAL_GPR];
for (sint32 f = 0; f < liveInfo.liveRangesCount; f++)
{
raLivenessSubrange_t* liverange = liveInfo.liveRangeList[f];
if (liverange->end.index == RA_INTER_RANGE_END)
{
// update translation table
cemu_assert_debug(virtualReg2PhysReg[liverange->range->virtualRegister] != -1);
virtualReg2PhysReg[liverange->range->virtualRegister] = -1;
// store GPR
if (liverange->hasStore)
{
loadStoreList[storeLoadListLength].registerIndex = liverange->range->physicalRegister;
loadStoreList[storeLoadListLength].registerName = liverange->range->name;
storeLoadListLength++;
}
// remove entry
liveInfo.liveRangesCount--;
liveInfo.liveRangeList[f] = liveInfo.liveRangeList[liveInfo.liveRangesCount];
f--;
}
else
{
cemu_assert_suspicious();
}
}
if (storeLoadListLength > 0)
{
PPCRecRA_insertGPRStoreInstructions(imlSegment, imlSegment->imlListCount - suffixInstructionCount, loadStoreList, storeLoadListLength);
}
// load subranges for next segments
subrangeItr = imlSegment->raInfo.linkedList_allSubranges;
storeLoadListLength = 0;
while(subrangeItr)
{
if (subrangeItr->start.index == RA_INTER_RANGE_END)
{
liveInfo.liveRangeList[liveInfo.liveRangesCount] = subrangeItr;
liveInfo.liveRangesCount++;
// load GPR
if (subrangeItr->_noLoad == false)
{
loadStoreList[storeLoadListLength].registerIndex = subrangeItr->range->physicalRegister;
loadStoreList[storeLoadListLength].registerName = subrangeItr->range->name;
storeLoadListLength++;
}
// update translation table
cemu_assert_debug(virtualReg2PhysReg[subrangeItr->range->virtualRegister] == -1);
virtualReg2PhysReg[subrangeItr->range->virtualRegister] = subrangeItr->range->physicalRegister;
}
// next
subrangeItr = subrangeItr->link_segmentSubrangesGPR.next;
}
if (storeLoadListLength > 0)
{
PPCRecRA_insertGPRLoadInstructions(imlSegment, imlSegment->imlListCount - suffixInstructionCount, loadStoreList, storeLoadListLength);
}
}
void PPCRecRA_generateMoveInstructions(ppcImlGenContext_t* ppcImlGenContext)
{
for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++)
{
PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s];
PPCRecRA_generateSegmentInstructions(ppcImlGenContext, imlSegment);
}
}
void PPCRecRA_calculateLivenessRangesV2(ppcImlGenContext_t* ppcImlGenContext);
void PPCRecRA_processFlowAndCalculateLivenessRangesV2(ppcImlGenContext_t* ppcImlGenContext);
void PPCRecRA_analyzeRangeDataFlowV2(ppcImlGenContext_t* ppcImlGenContext);
void PPCRecompilerImm_prepareForRegisterAllocation(ppcImlGenContext_t* ppcImlGenContext)
{
// insert empty segments after every non-taken branch if the linked segment has more than one input
// this gives the register allocator more room to create efficient spill code
sint32 segmentIndex = 0;
while (segmentIndex < ppcImlGenContext->segmentListCount)
{
PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[segmentIndex];
if (imlSegment->nextSegmentIsUncertain)
{
segmentIndex++;
continue;
}
if (imlSegment->nextSegmentBranchTaken == nullptr || imlSegment->nextSegmentBranchNotTaken == nullptr)
{
segmentIndex++;
continue;
}
if (imlSegment->nextSegmentBranchNotTaken->list_prevSegments.size() <= 1)
{
segmentIndex++;
continue;
}
if (imlSegment->nextSegmentBranchNotTaken->isEnterable)
{
segmentIndex++;
continue;
}
PPCRecompilerIml_insertSegments(ppcImlGenContext, segmentIndex + 1, 1);
PPCRecImlSegment_t* imlSegmentP0 = ppcImlGenContext->segmentList[segmentIndex + 0];
PPCRecImlSegment_t* imlSegmentP1 = ppcImlGenContext->segmentList[segmentIndex + 1];
PPCRecImlSegment_t* nextSegment = imlSegment->nextSegmentBranchNotTaken;
PPCRecompilerIML_removeLink(imlSegmentP0, nextSegment);
PPCRecompilerIml_setLinkBranchNotTaken(imlSegmentP1, nextSegment);
PPCRecompilerIml_setLinkBranchNotTaken(imlSegmentP0, imlSegmentP1);
segmentIndex++;
}
// detect loops
for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++)
{
PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s];
imlSegment->momentaryIndex = s;
}
for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++)
{
PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s];
PPCRecRA_identifyLoop(ppcImlGenContext, imlSegment);
}
}
void PPCRecompilerImm_allocateRegisters(ppcImlGenContext_t* ppcImlGenContext)
{
PPCRecompilerImm_prepareForRegisterAllocation(ppcImlGenContext);
ppcImlGenContext->raInfo.list_ranges = std::vector<raLivenessRange_t*>();
// calculate liveness
PPCRecRA_calculateLivenessRangesV2(ppcImlGenContext);
PPCRecRA_processFlowAndCalculateLivenessRangesV2(ppcImlGenContext);
PPCRecRA_assignRegisters(ppcImlGenContext);
PPCRecRA_analyzeRangeDataFlowV2(ppcImlGenContext);
PPCRecRA_generateMoveInstructions(ppcImlGenContext);
PPCRecRA_deleteAllRanges(ppcImlGenContext);
}
| 37,328
|
C++
|
.cpp
| 961
| 35.264308
| 242
| 0.758655
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,204
|
PPCRecompilerImlAnalyzer.cpp
|
cemu-project_Cemu/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlAnalyzer.cpp
|
#include "PPCRecompiler.h"
#include "PPCRecompilerIml.h"
#include "util/helpers/fixedSizeList.h"
#include "Cafe/HW/Espresso/Interpreter/PPCInterpreterInternal.h"
/*
* Initializes a single segment and returns true if it is a finite loop
*/
bool PPCRecompilerImlAnalyzer_isTightFiniteLoop(PPCRecImlSegment_t* imlSegment)
{
bool isTightFiniteLoop = false;
// base criteria, must jump to beginning of same segment
if (imlSegment->nextSegmentBranchTaken != imlSegment)
return false;
// loops using BDNZ are assumed to always be finite
for (sint32 t = 0; t < imlSegment->imlListCount; t++)
{
if (imlSegment->imlList[t].type == PPCREC_IML_TYPE_R_S32 && imlSegment->imlList[t].operation == PPCREC_IML_OP_SUB && imlSegment->imlList[t].crRegister == 8)
{
return true;
}
}
// for non-BDNZ loops, check for common patterns
// risky approach, look for ADD/SUB operations and assume that potential overflow means finite (does not include r_r_s32 ADD/SUB)
// this catches most loops with load-update and store-update instructions, but also those with decrementing counters
FixedSizeList<sint32, 64, true> list_modifiedRegisters;
for (sint32 t = 0; t < imlSegment->imlListCount; t++)
{
if (imlSegment->imlList[t].type == PPCREC_IML_TYPE_R_S32 && (imlSegment->imlList[t].operation == PPCREC_IML_OP_ADD || imlSegment->imlList[t].operation == PPCREC_IML_OP_SUB) )
{
list_modifiedRegisters.addUnique(imlSegment->imlList[t].op_r_immS32.registerIndex);
}
}
if (list_modifiedRegisters.count > 0)
{
// remove all registers from the list that are modified by non-ADD/SUB instructions
// todo: We should also cover the case where ADD+SUB on the same register cancel the effect out
PPCImlOptimizerUsedRegisters_t registersUsed;
for (sint32 t = 0; t < imlSegment->imlListCount; t++)
{
if (imlSegment->imlList[t].type == PPCREC_IML_TYPE_R_S32 && (imlSegment->imlList[t].operation == PPCREC_IML_OP_ADD || imlSegment->imlList[t].operation == PPCREC_IML_OP_SUB))
continue;
PPCRecompiler_checkRegisterUsage(NULL, imlSegment->imlList + t, ®istersUsed);
if(registersUsed.writtenNamedReg1 < 0)
continue;
list_modifiedRegisters.remove(registersUsed.writtenNamedReg1);
}
if (list_modifiedRegisters.count > 0)
{
return true;
}
}
return false;
}
/*
* Returns true if the imlInstruction can overwrite CR (depending on value of ->crRegister)
*/
bool PPCRecompilerImlAnalyzer_canTypeWriteCR(PPCRecImlInstruction_t* imlInstruction)
{
if (imlInstruction->type == PPCREC_IML_TYPE_R_R)
return true;
if (imlInstruction->type == PPCREC_IML_TYPE_R_R_R)
return true;
if (imlInstruction->type == PPCREC_IML_TYPE_R_R_S32)
return true;
if (imlInstruction->type == PPCREC_IML_TYPE_R_S32)
return true;
if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R)
return true;
if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R)
return true;
if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R_R)
return true;
if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R)
return true;
return false;
}
void PPCRecompilerImlAnalyzer_getCRTracking(PPCRecImlInstruction_t* imlInstruction, PPCRecCRTracking_t* crTracking)
{
crTracking->readCRBits = 0;
crTracking->writtenCRBits = 0;
if (imlInstruction->type == PPCREC_IML_TYPE_CJUMP)
{
if (imlInstruction->op_conditionalJump.condition != PPCREC_JUMP_CONDITION_NONE)
{
uint32 crBitFlag = 1 << (imlInstruction->op_conditionalJump.crRegisterIndex * 4 + imlInstruction->op_conditionalJump.crBitIndex);
crTracking->readCRBits = (crBitFlag);
}
}
else if (imlInstruction->type == PPCREC_IML_TYPE_CONDITIONAL_R_S32)
{
uint32 crBitFlag = 1 << (imlInstruction->op_conditional_r_s32.crRegisterIndex * 4 + imlInstruction->op_conditional_r_s32.crBitIndex);
crTracking->readCRBits = crBitFlag;
}
else if (imlInstruction->type == PPCREC_IML_TYPE_R_S32 && imlInstruction->operation == PPCREC_IML_OP_MFCR)
{
crTracking->readCRBits = 0xFFFFFFFF;
}
else if (imlInstruction->type == PPCREC_IML_TYPE_R_S32 && imlInstruction->operation == PPCREC_IML_OP_MTCRF)
{
crTracking->writtenCRBits |= ppc_MTCRFMaskToCRBitMask((uint32)imlInstruction->op_r_immS32.immS32);
}
else if (imlInstruction->type == PPCREC_IML_TYPE_CR)
{
if (imlInstruction->operation == PPCREC_IML_OP_CR_CLEAR ||
imlInstruction->operation == PPCREC_IML_OP_CR_SET)
{
uint32 crBitFlag = 1 << (imlInstruction->op_cr.crD);
crTracking->writtenCRBits = crBitFlag;
}
else if (imlInstruction->operation == PPCREC_IML_OP_CR_OR ||
imlInstruction->operation == PPCREC_IML_OP_CR_ORC ||
imlInstruction->operation == PPCREC_IML_OP_CR_AND ||
imlInstruction->operation == PPCREC_IML_OP_CR_ANDC)
{
uint32 crBitFlag = 1 << (imlInstruction->op_cr.crD);
crTracking->writtenCRBits = crBitFlag;
crBitFlag = 1 << (imlInstruction->op_cr.crA);
crTracking->readCRBits = crBitFlag;
crBitFlag = 1 << (imlInstruction->op_cr.crB);
crTracking->readCRBits |= crBitFlag;
}
else
assert_dbg();
}
else if (PPCRecompilerImlAnalyzer_canTypeWriteCR(imlInstruction) && imlInstruction->crRegister >= 0 && imlInstruction->crRegister <= 7)
{
crTracking->writtenCRBits |= (0xF << (imlInstruction->crRegister * 4));
}
else if ((imlInstruction->type == PPCREC_IML_TYPE_STORE || imlInstruction->type == PPCREC_IML_TYPE_STORE_INDEXED) && imlInstruction->op_storeLoad.copyWidth == PPC_REC_STORE_STWCX_MARKER)
{
// overwrites CR0
crTracking->writtenCRBits |= (0xF << 0);
}
}
| 5,456
|
C++
|
.cpp
| 134
| 38.134328
| 187
| 0.743185
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,205
|
PPCRecompilerX64.cpp
|
cemu-project_Cemu/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64.cpp
|
#include "Cafe/HW/Espresso/PPCState.h"
#include "Cafe/HW/Espresso/Interpreter/PPCInterpreterInternal.h"
#include "Cafe/HW/Espresso/Interpreter/PPCInterpreterHelper.h"
#include "PPCRecompiler.h"
#include "PPCRecompilerIml.h"
#include "PPCRecompilerX64.h"
#include "Cafe/OS/libs/coreinit/coreinit_Time.h"
#include "util/MemMapper/MemMapper.h"
#include "Common/cpu_features.h"
sint32 x64Gen_registerMap[12] = // virtual GPR to x64 register mapping
{
REG_RAX, REG_RDX, REG_RBX, REG_RBP, REG_RSI, REG_RDI, REG_R8, REG_R9, REG_R10, REG_R11, REG_R12, REG_RCX
};
/*
* Remember current instruction output offset for reloc
* The instruction generated after this method has been called will be adjusted
*/
void PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext_t* x64GenContext, uint8 type, void* extraInfo = nullptr)
{
if( x64GenContext->relocateOffsetTableCount >= x64GenContext->relocateOffsetTableSize )
{
x64GenContext->relocateOffsetTableSize = std::max(4, x64GenContext->relocateOffsetTableSize*2);
x64GenContext->relocateOffsetTable = (x64RelocEntry_t*)realloc(x64GenContext->relocateOffsetTable, sizeof(x64RelocEntry_t)*x64GenContext->relocateOffsetTableSize);
}
x64GenContext->relocateOffsetTable[x64GenContext->relocateOffsetTableCount].offset = x64GenContext->codeBufferIndex;
x64GenContext->relocateOffsetTable[x64GenContext->relocateOffsetTableCount].type = type;
x64GenContext->relocateOffsetTable[x64GenContext->relocateOffsetTableCount].extraInfo = extraInfo;
x64GenContext->relocateOffsetTableCount++;
}
/*
* Overwrites the currently cached (in x64 cf) cr* register
* Should be called before each x64 instruction which overwrites the current status flags (with mappedCRRegister set to PPCREC_CR_TEMPORARY unless explicitly set by PPC instruction)
*/
void PPCRecompilerX64Gen_crConditionFlags_set(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, sint32 mappedCRRegister, sint32 crState)
{
x64GenContext->activeCRRegister = mappedCRRegister;
x64GenContext->activeCRState = crState;
}
/*
* Reset cached cr* register without storing it first
*/
void PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext)
{
x64GenContext->activeCRRegister = PPC_REC_INVALID_REGISTER;
}
void PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext_t* x64GenContext, sint32 jumpInstructionOffset, sint32 destinationOffset)
{
uint8* instructionData = x64GenContext->codeBuffer + jumpInstructionOffset;
if (instructionData[0] == 0x0F && (instructionData[1] >= 0x80 && instructionData[1] <= 0x8F))
{
// far conditional jump
*(uint32*)(instructionData + 2) = (destinationOffset - (jumpInstructionOffset + 6));
}
else if (instructionData[0] >= 0x70 && instructionData[0] <= 0x7F)
{
// short conditional jump
sint32 distance = (sint32)((destinationOffset - (jumpInstructionOffset + 2)));
cemu_assert_debug(distance >= -128 && distance <= 127);
*(uint8*)(instructionData + 1) = (uint8)distance;
}
else if (instructionData[0] == 0xE9)
{
*(uint32*)(instructionData + 1) = (destinationOffset - (jumpInstructionOffset + 5));
}
else if (instructionData[0] == 0xEB)
{
sint32 distance = (sint32)((destinationOffset - (jumpInstructionOffset + 2)));
cemu_assert_debug(distance >= -128 && distance <= 127);
*(uint8*)(instructionData + 1) = (uint8)distance;
}
else
{
assert_dbg();
}
}
void PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction)
{
sint32 crRegister = imlInstruction->crRegister;
if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_LT))) == 0 )
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_SIGN, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_LT)); // check for sign instead of _BELOW (CF) which is not set by TEST
if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_GT))) == 0 )
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_SIGNED_GREATER, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_GT));
if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_EQ))) == 0 )
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_EQUAL, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_EQ));
// todo: Set CR SO if XER SO bit is set
PPCRecompilerX64Gen_crConditionFlags_set(PPCRecFunction, ppcImlGenContext, x64GenContext, crRegister, PPCREC_CR_STATE_TYPE_LOGICAL);
}
void* ATTR_MS_ABI PPCRecompiler_virtualHLE(PPCInterpreter_t* hCPU, uint32 hleFuncId)
{
void* prevRSPTemp = hCPU->rspTemp;
if( hleFuncId == 0xFFD0 )
{
hCPU->remainingCycles -= 500; // let subtract about 500 cycles for each HLE call
hCPU->gpr[3] = 0;
PPCInterpreter_nextInstruction(hCPU);
return hCPU;
}
else
{
auto hleCall = PPCInterpreter_getHLECall(hleFuncId);
cemu_assert(hleCall != nullptr);
hleCall(hCPU);
}
hCPU->rspTemp = prevRSPTemp;
return PPCInterpreter_getCurrentInstance();
}
void ATTR_MS_ABI PPCRecompiler_getTBL(PPCInterpreter_t* hCPU, uint32 gprIndex)
{
uint64 coreTime = coreinit::coreinit_getTimerTick();
hCPU->gpr[gprIndex] = (uint32)(coreTime&0xFFFFFFFF);
}
void ATTR_MS_ABI PPCRecompiler_getTBU(PPCInterpreter_t* hCPU, uint32 gprIndex)
{
uint64 coreTime = coreinit::coreinit_getTimerTick();
hCPU->gpr[gprIndex] = (uint32)((coreTime>>32)&0xFFFFFFFF);
}
bool PPCRecompilerX64Gen_imlInstruction_macro(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction)
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
if( imlInstruction->operation == PPCREC_IML_MACRO_BLR || imlInstruction->operation == PPCREC_IML_MACRO_BLRL )
{
uint32 currentInstructionAddress = imlInstruction->op_macro.param;
// MOV EDX, [SPR_LR]
x64Emit_mov_reg64_mem32(x64GenContext, REG_RDX, REG_RSP, offsetof(PPCInterpreter_t, spr.LR));
// if BLRL, then update SPR LR
if (imlInstruction->operation == PPCREC_IML_MACRO_BLRL)
x64Gen_mov_mem32Reg64_imm32(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, spr.LR), currentInstructionAddress + 4);
// JMP [offset+RDX*(8/4)+R15]
x64Gen_writeU8(x64GenContext, 0x41);
x64Gen_writeU8(x64GenContext, 0xFF);
x64Gen_writeU8(x64GenContext, 0xA4);
x64Gen_writeU8(x64GenContext, 0x57);
x64Gen_writeU32(x64GenContext, (uint32)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable));
return true;
}
else if( imlInstruction->operation == PPCREC_IML_MACRO_BCTR || imlInstruction->operation == PPCREC_IML_MACRO_BCTRL )
{
uint32 currentInstructionAddress = imlInstruction->op_macro.param;
// MOV EDX, [SPR_CTR]
x64Emit_mov_reg64_mem32(x64GenContext, REG_RDX, REG_RSP, offsetof(PPCInterpreter_t, spr.CTR));
// if BCTRL, then update SPR LR
if (imlInstruction->operation == PPCREC_IML_MACRO_BCTRL)
x64Gen_mov_mem32Reg64_imm32(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, spr.LR), currentInstructionAddress + 4);
// JMP [offset+RDX*(8/4)+R15]
x64Gen_writeU8(x64GenContext, 0x41);
x64Gen_writeU8(x64GenContext, 0xFF);
x64Gen_writeU8(x64GenContext, 0xA4);
x64Gen_writeU8(x64GenContext, 0x57);
x64Gen_writeU32(x64GenContext, (uint32)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable));
return true;
}
else if( imlInstruction->operation == PPCREC_IML_MACRO_BL )
{
// MOV DWORD [SPR_LinkRegister], newLR
uint32 newLR = imlInstruction->op_macro.param + 4;
x64Gen_mov_mem32Reg64_imm32(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, spr.LR), newLR);
// remember new instruction pointer in RDX
uint32 newIP = imlInstruction->op_macro.param2;
x64Gen_mov_reg64Low32_imm32(x64GenContext, REG_RDX, newIP);
// since RDX is constant we can use JMP [R15+const_offset] if jumpTableOffset+RDX*2 does not exceed the 2GB boundary
uint64 lookupOffset = (uint64)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable) + (uint64)newIP * 2ULL;
if (lookupOffset >= 0x80000000ULL)
{
// JMP [offset+RDX*(8/4)+R15]
x64Gen_writeU8(x64GenContext, 0x41);
x64Gen_writeU8(x64GenContext, 0xFF);
x64Gen_writeU8(x64GenContext, 0xA4);
x64Gen_writeU8(x64GenContext, 0x57);
x64Gen_writeU32(x64GenContext, (uint32)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable));
}
else
{
x64Gen_writeU8(x64GenContext, 0x41);
x64Gen_writeU8(x64GenContext, 0xFF);
x64Gen_writeU8(x64GenContext, 0xA7);
x64Gen_writeU32(x64GenContext, (uint32)lookupOffset);
}
return true;
}
else if( imlInstruction->operation == PPCREC_IML_MACRO_B_FAR )
{
// remember new instruction pointer in RDX
uint32 newIP = imlInstruction->op_macro.param2;
x64Gen_mov_reg64Low32_imm32(x64GenContext, REG_RDX, newIP);
// Since RDX is constant we can use JMP [R15+const_offset] if jumpTableOffset+RDX*2 does not exceed the 2GB boundary
uint64 lookupOffset = (uint64)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable) + (uint64)newIP * 2ULL;
if (lookupOffset >= 0x80000000ULL)
{
// JMP [offset+RDX*(8/4)+R15]
x64Gen_writeU8(x64GenContext, 0x41);
x64Gen_writeU8(x64GenContext, 0xFF);
x64Gen_writeU8(x64GenContext, 0xA4);
x64Gen_writeU8(x64GenContext, 0x57);
x64Gen_writeU32(x64GenContext, (uint32)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable));
}
else
{
x64Gen_writeU8(x64GenContext, 0x41);
x64Gen_writeU8(x64GenContext, 0xFF);
x64Gen_writeU8(x64GenContext, 0xA7);
x64Gen_writeU32(x64GenContext, (uint32)lookupOffset);
}
return true;
}
else if( imlInstruction->operation == PPCREC_IML_MACRO_LEAVE )
{
uint32 currentInstructionAddress = imlInstruction->op_macro.param;
// remember PC value in REG_EDX
x64Gen_mov_reg64Low32_imm32(x64GenContext, REG_RDX, currentInstructionAddress);
uint32 newIP = 0; // special value for recompiler exit
uint64 lookupOffset = (uint64)&(((PPCRecompilerInstanceData_t*)NULL)->ppcRecompilerDirectJumpTable) + (uint64)newIP * 2ULL;
// JMP [R15+offset]
x64Gen_writeU8(x64GenContext, 0x41);
x64Gen_writeU8(x64GenContext, 0xFF);
x64Gen_writeU8(x64GenContext, 0xA7);
x64Gen_writeU32(x64GenContext, (uint32)lookupOffset);
return true;
}
else if( imlInstruction->operation == PPCREC_IML_MACRO_DEBUGBREAK )
{
x64Gen_mov_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, imlInstruction->op_macro.param2);
x64Gen_int3(x64GenContext);
return true;
}
else if( imlInstruction->operation == PPCREC_IML_MACRO_COUNT_CYCLES )
{
uint32 cycleCount = imlInstruction->op_macro.param;
x64Gen_sub_mem32reg64_imm32(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, remainingCycles), cycleCount);
return true;
}
else if( imlInstruction->operation == PPCREC_IML_MACRO_HLE )
{
uint32 ppcAddress = imlInstruction->op_macro.param;
uint32 funcId = imlInstruction->op_macro.param2;
//x64Gen_int3(x64GenContext);
// update instruction pointer
x64Gen_mov_mem32Reg64_imm32(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, instructionPointer), ppcAddress);
//// save hCPU (RSP)
//x64Gen_mov_reg64_imm64(x64GenContext, REG_RESV_TEMP, (uint64)&ppcRecompilerX64_hCPUTemp);
//x64Emit_mov_mem64_reg64(x64GenContext, REG_RESV_TEMP, 0, REG_RSP);
// set parameters
x64Gen_mov_reg64_reg64(x64GenContext, REG_RCX, REG_RSP);
x64Gen_mov_reg64_imm64(x64GenContext, REG_RDX, funcId);
// restore stackpointer from executionContext/hCPU->rspTemp
x64Emit_mov_reg64_mem64(x64GenContext, REG_RSP, REG_RESV_HCPU, offsetof(PPCInterpreter_t, rspTemp));
//x64Emit_mov_reg64_mem64(x64GenContext, REG_RSP, REG_R14, 0);
//x64Gen_int3(x64GenContext);
// reserve space on stack for call parameters
x64Gen_sub_reg64_imm32(x64GenContext, REG_RSP, 8*11); // must be uneven number in order to retain stack 0x10 alignment
x64Gen_mov_reg64_imm64(x64GenContext, REG_RBP, 0);
// call HLE function
x64Gen_mov_reg64_imm64(x64GenContext, REG_RAX, (uint64)PPCRecompiler_virtualHLE);
x64Gen_call_reg64(x64GenContext, REG_RAX);
// restore RSP to hCPU (from RAX, result of PPCRecompiler_virtualHLE)
//x64Gen_mov_reg64_imm64(x64GenContext, REG_RESV_TEMP, (uint64)&ppcRecompilerX64_hCPUTemp);
//x64Emit_mov_reg64_mem64Reg64(x64GenContext, REG_RSP, REG_RESV_TEMP, 0);
x64Gen_mov_reg64_reg64(x64GenContext, REG_RSP, REG_RAX);
// MOV R15, ppcRecompilerInstanceData
x64Gen_mov_reg64_imm64(x64GenContext, REG_R15, (uint64)ppcRecompilerInstanceData);
// MOV R13, memory_base
x64Gen_mov_reg64_imm64(x64GenContext, REG_R13, (uint64)memory_base);
// check if cycles where decreased beyond zero, if yes -> leave recompiler
x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, remainingCycles), 31); // check if negative
sint32 jumpInstructionOffset1 = x64GenContext->codeBufferIndex;
x64Gen_jmpc_near(x64GenContext, X86_CONDITION_NOT_CARRY, 0);
//x64Gen_int3(x64GenContext);
//x64Gen_mov_reg64Low32_imm32(x64GenContext, REG_RDX, ppcAddress);
x64Emit_mov_reg64_mem32(x64GenContext, REG_RDX, REG_RSP, offsetof(PPCInterpreter_t, instructionPointer));
// set EAX to 0 (we assume that ppcRecompilerDirectJumpTable[0] will be a recompiler escape function)
x64Gen_xor_reg32_reg32(x64GenContext, REG_RAX, REG_RAX);
// ADD RAX, R15 (R15 -> Pointer to ppcRecompilerInstanceData
x64Gen_add_reg64_reg64(x64GenContext, REG_RAX, REG_R15);
//// JMP [recompilerCallTable+EAX/4*8]
//x64Gen_int3(x64GenContext);
x64Gen_jmp_memReg64(x64GenContext, REG_RAX, (uint32)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable));
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset1, x64GenContext->codeBufferIndex);
// check if instruction pointer was changed
// assign new instruction pointer to EAX
x64Emit_mov_reg64_mem32(x64GenContext, REG_RAX, REG_RSP, offsetof(PPCInterpreter_t, instructionPointer));
// remember instruction pointer in REG_EDX
x64Gen_mov_reg64_reg64(x64GenContext, REG_RDX, REG_RAX);
// EAX *= 2
x64Gen_add_reg64_reg64(x64GenContext, REG_RAX, REG_RAX);
// ADD RAX, R15 (R15 -> Pointer to ppcRecompilerInstanceData
x64Gen_add_reg64_reg64(x64GenContext, REG_RAX, REG_R15);
// JMP [ppcRecompilerDirectJumpTable+RAX/4*8]
x64Gen_jmp_memReg64(x64GenContext, REG_RAX, (uint32)offsetof(PPCRecompilerInstanceData_t, ppcRecompilerDirectJumpTable));
return true;
}
else if( imlInstruction->operation == PPCREC_IML_MACRO_MFTB )
{
uint32 ppcAddress = imlInstruction->op_macro.param;
uint32 sprId = imlInstruction->op_macro.param2&0xFFFF;
uint32 gprIndex = (imlInstruction->op_macro.param2>>16)&0x1F;
// update instruction pointer
x64Gen_mov_mem32Reg64_imm32(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, instructionPointer), ppcAddress);
// set parameters
x64Gen_mov_reg64_reg64(x64GenContext, REG_RCX, REG_RSP);
x64Gen_mov_reg64_imm64(x64GenContext, REG_RDX, gprIndex);
// restore stackpointer to original RSP
x64Emit_mov_reg64_mem64(x64GenContext, REG_RSP, REG_RESV_HCPU, offsetof(PPCInterpreter_t, rspTemp));
// push hCPU on stack
x64Gen_push_reg64(x64GenContext, REG_RCX);
// reserve space on stack for call parameters
x64Gen_sub_reg64_imm32(x64GenContext, REG_RSP, 8*11 + 8);
x64Gen_mov_reg64_imm64(x64GenContext, REG_RBP, 0);
// call HLE function
if( sprId == SPR_TBL )
x64Gen_mov_reg64_imm64(x64GenContext, REG_RAX, (uint64)PPCRecompiler_getTBL);
else if( sprId == SPR_TBU )
x64Gen_mov_reg64_imm64(x64GenContext, REG_RAX, (uint64)PPCRecompiler_getTBU);
else
assert_dbg();
x64Gen_call_reg64(x64GenContext, REG_RAX);
// restore hCPU from stack
x64Gen_add_reg64_imm32(x64GenContext, REG_RSP, 8 * 11 + 8);
x64Gen_pop_reg64(x64GenContext, REG_RSP);
// MOV R15, ppcRecompilerInstanceData
x64Gen_mov_reg64_imm64(x64GenContext, REG_R15, (uint64)ppcRecompilerInstanceData);
// MOV R13, memory_base
x64Gen_mov_reg64_imm64(x64GenContext, REG_R13, (uint64)memory_base);
return true;
}
else
{
debug_printf("Unknown recompiler macro operation %d\n", imlInstruction->operation);
assert_dbg();
}
return false;
}
/*
* Load from memory
*/
bool PPCRecompilerX64Gen_imlInstruction_load(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction, bool indexed)
{
sint32 realRegisterData = tempToRealRegister(imlInstruction->op_storeLoad.registerData);
sint32 realRegisterMem = tempToRealRegister(imlInstruction->op_storeLoad.registerMem);
sint32 realRegisterMem2 = PPC_REC_INVALID_REGISTER;
if( indexed )
realRegisterMem2 = tempToRealRegister(imlInstruction->op_storeLoad.registerMem2);
if( false )//imlInstruction->op_storeLoad.flags & PPCREC_IML_OP_FLAG_FASTMEMACCESS )
{
// load u8/u16/u32 via direct memory access + optional sign extend
assert_dbg(); // todo
}
else
{
if( indexed && realRegisterMem == realRegisterMem2 )
{
return false;
}
if( indexed && realRegisterData == realRegisterMem2 )
{
// for indexed memory access realRegisterData must not be the same register as the second memory register,
// this can easily be fixed by swapping the logic of realRegisterMem and realRegisterMem2
sint32 temp = realRegisterMem;
realRegisterMem = realRegisterMem2;
realRegisterMem2 = temp;
}
bool signExtend = imlInstruction->op_storeLoad.flags2.signExtend;
bool switchEndian = imlInstruction->op_storeLoad.flags2.swapEndian;
if( imlInstruction->op_storeLoad.copyWidth == 32 )
{
//if( indexed )
// PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
if (indexed)
{
x64Gen_lea_reg64Low32_reg64Low32PlusReg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem, realRegisterMem2);
}
if( g_CPUFeatures.x86.movbe && switchEndian )
{
if (indexed)
{
x64Gen_movBEZeroExtend_reg64_mem32Reg64PlusReg64(x64GenContext, realRegisterData, REG_R13, REG_RESV_TEMP, imlInstruction->op_storeLoad.immS32);
//if (indexed && realRegisterMem != realRegisterData)
// x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
}
else
{
x64Gen_movBEZeroExtend_reg64_mem32Reg64PlusReg64(x64GenContext, realRegisterData, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32);
}
}
else
{
if (indexed)
{
x64Emit_mov_reg32_mem32(x64GenContext, realRegisterData, REG_R13, REG_RESV_TEMP, imlInstruction->op_storeLoad.immS32);
//if (realRegisterMem != realRegisterData)
// x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
if (switchEndian)
x64Gen_bswap_reg64Lower32bit(x64GenContext, realRegisterData);
}
else
{
x64Emit_mov_reg32_mem32(x64GenContext, realRegisterData, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32);
if (switchEndian)
x64Gen_bswap_reg64Lower32bit(x64GenContext, realRegisterData);
}
}
}
else if( imlInstruction->op_storeLoad.copyWidth == 16 )
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); // todo: We can avoid this if MOVBE is available
if (indexed)
{
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
}
if( g_CPUFeatures.x86.movbe && switchEndian )
{
x64Gen_movBEZeroExtend_reg64Low16_mem16Reg64PlusReg64(x64GenContext, realRegisterData, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32);
if( indexed && realRegisterMem != realRegisterData )
x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
}
else
{
x64Gen_movZeroExtend_reg64Low16_mem16Reg64PlusReg64(x64GenContext, realRegisterData, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32);
if( indexed && realRegisterMem != realRegisterData )
x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
if( switchEndian )
x64Gen_rol_reg64Low16_imm8(x64GenContext, realRegisterData, 8);
}
if( signExtend )
x64Gen_movSignExtend_reg64Low32_reg64Low16(x64GenContext, realRegisterData, realRegisterData);
else
x64Gen_movZeroExtend_reg64Low32_reg64Low16(x64GenContext, realRegisterData, realRegisterData);
}
else if( imlInstruction->op_storeLoad.copyWidth == 8 )
{
if( indexed )
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
// todo: Optimize by using only MOVZX/MOVSX
if( indexed )
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
// todo: Use sign extend move from memory instead of separate sign-extend?
if( signExtend )
x64Gen_movSignExtend_reg64Low32_mem8Reg64PlusReg64(x64GenContext, realRegisterData, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32);
else
x64Emit_movZX_reg32_mem8(x64GenContext, realRegisterData, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32);
if( indexed && realRegisterMem != realRegisterData )
x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
}
else if( imlInstruction->op_storeLoad.copyWidth == PPC_REC_LOAD_LWARX_MARKER )
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
if( imlInstruction->op_storeLoad.immS32 != 0 )
assert_dbg(); // not supported
if( indexed )
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
x64Emit_mov_mem32_reg32(x64GenContext, REG_RSP, (uint32)offsetof(PPCInterpreter_t, reservedMemAddr), realRegisterMem); // remember EA for reservation
x64Emit_mov_reg32_mem32(x64GenContext, realRegisterData, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32);
if( indexed && realRegisterMem != realRegisterData )
x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
if( switchEndian )
x64Gen_bswap_reg64Lower32bit(x64GenContext, realRegisterData);
x64Emit_mov_mem32_reg32(x64GenContext, REG_RSP, (uint32)offsetof(PPCInterpreter_t, reservedMemValue), realRegisterData); // remember value for reservation
// LWARX instruction costs extra cycles (this speeds up busy loops)
x64Gen_sub_mem32reg64_imm32(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, remainingCycles), 20);
}
else if( imlInstruction->op_storeLoad.copyWidth == PPC_REC_STORE_LSWI_3 )
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
if( switchEndian == false )
assert_dbg();
if( indexed )
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2); // can be replaced with LEA temp, [memReg1+memReg2] (this way we can avoid the SUB instruction after the move)
if( g_CPUFeatures.x86.movbe )
{
x64Gen_movBEZeroExtend_reg64_mem32Reg64PlusReg64(x64GenContext, realRegisterData, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32);
if( indexed && realRegisterMem != realRegisterData )
x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
}
else
{
x64Emit_mov_reg32_mem32(x64GenContext, realRegisterData, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32);
if( indexed && realRegisterMem != realRegisterData )
x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
x64Gen_bswap_reg64Lower32bit(x64GenContext, realRegisterData);
}
x64Gen_and_reg64Low32_imm32(x64GenContext, realRegisterData, 0xFFFFFF00);
}
else
return false;
return true;
}
return false;
}
/*
* Write to memory
*/
bool PPCRecompilerX64Gen_imlInstruction_store(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction, bool indexed)
{
sint32 realRegisterData = tempToRealRegister(imlInstruction->op_storeLoad.registerData);
sint32 realRegisterMem = tempToRealRegister(imlInstruction->op_storeLoad.registerMem);
sint32 realRegisterMem2 = PPC_REC_INVALID_REGISTER;
if (indexed)
realRegisterMem2 = tempToRealRegister(imlInstruction->op_storeLoad.registerMem2);
if (false)//imlInstruction->op_storeLoad.flags & PPCREC_IML_OP_FLAG_FASTMEMACCESS )
{
// load u8/u16/u32 via direct memory access + optional sign extend
assert_dbg(); // todo
}
else
{
if (indexed && realRegisterMem == realRegisterMem2)
{
return false;
}
if (indexed && realRegisterData == realRegisterMem2)
{
// for indexed memory access realRegisterData must not be the same register as the second memory register,
// this can easily be fixed by swapping the logic of realRegisterMem and realRegisterMem2
sint32 temp = realRegisterMem;
realRegisterMem = realRegisterMem2;
realRegisterMem2 = temp;
}
bool signExtend = imlInstruction->op_storeLoad.flags2.signExtend;
bool swapEndian = imlInstruction->op_storeLoad.flags2.swapEndian;
if (imlInstruction->op_storeLoad.copyWidth == 32)
{
if (indexed)
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
uint32 valueRegister;
if ((swapEndian == false || g_CPUFeatures.x86.movbe) && realRegisterMem != realRegisterData)
{
valueRegister = realRegisterData;
}
else
{
x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, realRegisterData);
valueRegister = REG_RESV_TEMP;
}
if (g_CPUFeatures.x86.movbe == false && swapEndian)
x64Gen_bswap_reg64Lower32bit(x64GenContext, valueRegister);
if (indexed)
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
if (g_CPUFeatures.x86.movbe && swapEndian)
x64Gen_movBETruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32, valueRegister);
else
x64Gen_movTruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32, valueRegister);
if (indexed)
x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
}
else if (imlInstruction->op_storeLoad.copyWidth == 16)
{
if (indexed || swapEndian)
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, realRegisterData);
if (swapEndian)
x64Gen_rol_reg64Low16_imm8(x64GenContext, REG_RESV_TEMP, 8);
if (indexed)
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
x64Gen_movTruncate_mem16Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32, REG_RESV_TEMP);
if (indexed)
x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
// todo: Optimize this, e.g. by using MOVBE
}
else if (imlInstruction->op_storeLoad.copyWidth == 8)
{
if (indexed)
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
if (indexed && realRegisterMem == realRegisterData)
{
x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, realRegisterData);
realRegisterData = REG_RESV_TEMP;
}
if (indexed)
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
x64Gen_movTruncate_mem8Reg64PlusReg64_reg64(x64GenContext, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32, realRegisterData);
if (indexed)
x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
}
else if (imlInstruction->op_storeLoad.copyWidth == PPC_REC_STORE_STWCX_MARKER)
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
if (imlInstruction->op_storeLoad.immS32 != 0)
assert_dbg(); // todo
// reset cr0 LT, GT and EQ
sint32 crRegister = 0;
x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*(crRegister * 4 + PPCREC_CR_BIT_LT), 0);
x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*(crRegister * 4 + PPCREC_CR_BIT_GT), 0);
x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*(crRegister * 4 + PPCREC_CR_BIT_EQ), 0);
// calculate effective address
x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, realRegisterData);
if (swapEndian)
x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP);
if (indexed)
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
// realRegisterMem now holds EA
x64Gen_cmp_reg64Low32_mem32reg64(x64GenContext, realRegisterMem, REG_RESV_HCPU, offsetof(PPCInterpreter_t, reservedMemAddr));
sint32 jumpInstructionOffsetJumpToEnd = x64GenContext->codeBufferIndex;
x64Gen_jmpc_near(x64GenContext, X86_CONDITION_NOT_EQUAL, 0);
// EA matches reservation
// backup EAX (since it's an explicit operand of CMPXCHG and will be overwritten)
x64Emit_mov_mem32_reg32(x64GenContext, REG_RSP, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[0]), REG_EAX);
// backup REG_RESV_MEMBASE
x64Emit_mov_mem64_reg64(x64GenContext, REG_RESV_HCPU, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[2]), REG_RESV_MEMBASE);
// add mem register to REG_RESV_MEMBASE
x64Gen_add_reg64_reg64(x64GenContext, REG_RESV_MEMBASE, realRegisterMem);
// load reserved value in EAX
x64Emit_mov_reg64_mem32(x64GenContext, REG_EAX, REG_RESV_HCPU, offsetof(PPCInterpreter_t, reservedMemValue));
// bswap EAX
x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_EAX);
//x64Gen_lock_cmpxchg_mem32Reg64PlusReg64_reg64(x64GenContext, REG_RESV_MEMBASE, realRegisterMem, 0, REG_RESV_TEMP);
x64Gen_lock_cmpxchg_mem32Reg64_reg64(x64GenContext, REG_RESV_MEMBASE, 0, REG_RESV_TEMP);
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_EQUAL, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*(crRegister * 4 + PPCREC_CR_BIT_EQ));
// reset reservation
x64Gen_mov_mem32Reg64_imm32(x64GenContext, REG_RESV_HCPU, (uint32)offsetof(PPCInterpreter_t, reservedMemAddr), 0);
x64Gen_mov_mem32Reg64_imm32(x64GenContext, REG_RESV_HCPU, (uint32)offsetof(PPCInterpreter_t, reservedMemValue), 0);
// restore EAX
x64Emit_mov_reg64_mem32(x64GenContext, REG_EAX, REG_RESV_HCPU, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[0]));
// restore REG_RESV_MEMBASE
x64Emit_mov_reg64_mem64(x64GenContext, REG_RESV_MEMBASE, REG_RESV_HCPU, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[2]));
// copy XER SO to CR0 SO
x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, spr.XER), 31);
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_CARRY, REG_RESV_HCPU, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*(crRegister * 4 + PPCREC_CR_BIT_SO));
// end
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffsetJumpToEnd, x64GenContext->codeBufferIndex);
}
else if (imlInstruction->op_storeLoad.copyWidth == PPC_REC_STORE_STSWI_2)
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, realRegisterData);
x64Gen_shr_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, 16); // store upper 2 bytes ..
x64Gen_rol_reg64Low16_imm8(x64GenContext, REG_RESV_TEMP, 8); // .. as big-endian
if (indexed)
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
x64Gen_movTruncate_mem16Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32, REG_RESV_TEMP);
if (indexed)
x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
}
else if (imlInstruction->op_storeLoad.copyWidth == PPC_REC_STORE_STSWI_3)
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, realRegisterData);
if (indexed)
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
x64Gen_shr_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, 8);
x64Gen_movTruncate_mem8Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32 + 2, REG_RESV_TEMP);
x64Gen_shr_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, 8);
x64Gen_movTruncate_mem8Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32 + 1, REG_RESV_TEMP);
x64Gen_shr_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, 8);
x64Gen_movTruncate_mem8Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32 + 0, REG_RESV_TEMP);
if (indexed)
x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
}
else
return false;
return true;
}
return false;
}
/*
* Copy byte/word/dword from memory to memory
*/
void PPCRecompilerX64Gen_imlInstruction_mem2mem(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction)
{
sint32 realSrcMemReg = tempToRealRegister(imlInstruction->op_mem2mem.src.registerMem);
sint32 realSrcMemImm = imlInstruction->op_mem2mem.src.immS32;
sint32 realDstMemReg = tempToRealRegister(imlInstruction->op_mem2mem.dst.registerMem);
sint32 realDstMemImm = imlInstruction->op_mem2mem.dst.immS32;
// PPCRecompilerX64Gen_crConditionFlags_forget() is not needed here, since MOVs don't affect eflags
if (imlInstruction->op_mem2mem.copyWidth == 32)
{
x64Emit_mov_reg32_mem32(x64GenContext, REG_RESV_TEMP, REG_R13, realSrcMemReg, realSrcMemImm);
x64Gen_movTruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_R13, realDstMemReg, realDstMemImm, REG_RESV_TEMP);
}
else
{
assert_dbg();
}
}
bool PPCRecompilerX64Gen_imlInstruction_r_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction)
{
if (imlInstruction->operation == PPCREC_IML_OP_ASSIGN)
{
// registerResult = registerA
if (imlInstruction->crRegister != PPC_REC_INVALID_REGISTER)
{
x64Gen_mov_reg64_reg64(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA));
if (imlInstruction->crMode == PPCREC_CR_MODE_LOGICAL)
{
// since MOV doesn't set eflags we need another test instruction
x64Gen_test_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerResult));
// set cr bits
PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction);
}
else
{
assert_dbg();
}
}
else
{
x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA));
}
}
else if (imlInstruction->operation == PPCREC_IML_OP_ENDIAN_SWAP)
{
// registerResult = endianSwap32(registerA)
if (imlInstruction->op_r_r.registerA != imlInstruction->op_r_r.registerResult)
assert_dbg();
x64Gen_bswap_reg64Lower32bit(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult));
}
else if( imlInstruction->operation == PPCREC_IML_OP_ADD )
{
// registerResult += registerA
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA));
}
else if( imlInstruction->operation == PPCREC_IML_OP_ASSIGN_S8_TO_S32 )
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
x64Gen_movSignExtend_reg64Low32_reg64Low8(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA));
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
if( imlInstruction->crMode == PPCREC_CR_MODE_ARITHMETIC )
{
x64Gen_test_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerResult));
// set cr bits
PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction);
}
else
{
debug_printf("PPCRecompilerX64Gen_imlInstruction_r_r(): Unsupported operation\n");
assert_dbg();
}
}
}
else if( imlInstruction->operation == PPCREC_IML_OP_OR || imlInstruction->operation == PPCREC_IML_OP_AND || imlInstruction->operation == PPCREC_IML_OP_XOR )
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
if( imlInstruction->operation == PPCREC_IML_OP_OR )
{
// registerResult |= registerA
x64Gen_or_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA));
}
else if( imlInstruction->operation == PPCREC_IML_OP_AND )
{
// registerResult &= registerA
x64Gen_and_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA));
}
else
{
// registerResult ^= registerA
x64Gen_xor_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA));
}
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
// set cr bits
PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction);
}
}
else if( imlInstruction->operation == PPCREC_IML_OP_NOT )
{
// copy register content if different registers
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
if( imlInstruction->op_r_r.registerResult != imlInstruction->op_r_r.registerA )
{
x64Gen_mov_reg64_reg64(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA));
}
// NOT destination register
x64Gen_not_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult));
// update cr bits
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
// NOT instruction does not update flags, so we have to generate an additional TEST instruction
x64Gen_test_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerResult));
// set cr bits
PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction);
}
}
else if( imlInstruction->operation == PPCREC_IML_OP_CNTLZW )
{
// count leading zeros
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
if( g_CPUFeatures.x86.lzcnt )
{
x64Gen_lzcnt_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA));
}
else
{
x64Gen_test_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerA), tempToRealRegister(imlInstruction->op_r_r.registerA));
sint32 jumpInstructionOffset1 = x64GenContext->codeBufferIndex;
x64Gen_jmpc_near(x64GenContext, X86_CONDITION_EQUAL, 0);
x64Gen_bsr_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA));
x64Gen_neg_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult));
x64Gen_add_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), 32-1);
sint32 jumpInstructionOffset2 = x64GenContext->codeBufferIndex;
x64Gen_jmpc_near(x64GenContext, X86_CONDITION_NONE, 0);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset1, x64GenContext->codeBufferIndex);
x64Gen_mov_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), 32);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset2, x64GenContext->codeBufferIndex);
}
}
else if( imlInstruction->operation == PPCREC_IML_OP_COMPARE_SIGNED || imlInstruction->operation == PPCREC_IML_OP_COMPARE_UNSIGNED )
{
// registerA CMP registerB (arithmetic compare)
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
if( imlInstruction->crRegister == PPC_REC_INVALID_REGISTER )
{
return false; // a NO-OP instruction
}
if( imlInstruction->crRegister >= 8 )
{
return false;
}
// update state of cr register
if( imlInstruction->operation == PPCREC_IML_OP_COMPARE_SIGNED )
PPCRecompilerX64Gen_crConditionFlags_set(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction->crRegister, PPCREC_CR_STATE_TYPE_SIGNED_ARITHMETIC);
else
PPCRecompilerX64Gen_crConditionFlags_set(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction->crRegister, PPCREC_CR_STATE_TYPE_UNSIGNED_ARITHMETIC);
// create compare instruction
x64Gen_cmp_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA));
// set cr bits
sint32 crRegister = imlInstruction->crRegister;
if( imlInstruction->operation == PPCREC_IML_OP_COMPARE_SIGNED )
{
if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_LT))) == 0 )
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_SIGNED_LESS, REG_ESP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_LT));
if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_GT))) == 0 )
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_SIGNED_GREATER, REG_ESP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_GT));
if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_EQ))) == 0 )
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_EQUAL, REG_ESP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_EQ));
// todo: Also set summary overflow if xer bit is set
}
else if( imlInstruction->operation == PPCREC_IML_OP_COMPARE_UNSIGNED )
{
if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_LT))) == 0 )
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_UNSIGNED_BELOW, REG_ESP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_LT));
if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_GT))) == 0 )
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_UNSIGNED_ABOVE, REG_ESP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_GT));
if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_EQ))) == 0 )
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_EQUAL, REG_ESP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_EQ));
// todo: Also set summary overflow if xer bit is set
}
else
assert_dbg();
}
else if( imlInstruction->operation == PPCREC_IML_OP_NEG )
{
// copy register content if different registers
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
if( imlInstruction->op_r_r.registerResult != imlInstruction->op_r_r.registerA )
{
x64Gen_mov_reg64_reg64(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA));
}
// NEG destination register
x64Gen_neg_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult));
// update cr bits
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
// set cr bits
PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction);
}
}
else if( imlInstruction->operation == PPCREC_IML_OP_ADD_CARRY )
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
// copy operand to result if different registers
if( imlInstruction->op_r_r.registerResult != imlInstruction->op_r_r.registerA )
{
x64Gen_mov_reg64_reg64(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA));
}
// copy xer_ca to eflags carry
x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0);
// add carry bit
x64Gen_adc_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), 0);
// update xer carry
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_CARRY, REG_RSP, offsetof(PPCInterpreter_t, xer_ca));
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
// set cr bits
sint32 crRegister = imlInstruction->crRegister;
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_SIGN, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_LT)); // check for sign instead of _BELOW (CF) which is not set by AND/OR
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_UNSIGNED_ABOVE, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_GT));
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_EQUAL, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_EQ));
// todo: Use different version of PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction)
// todo: Also set summary overflow if xer bit is set
}
}
else if( imlInstruction->operation == PPCREC_IML_OP_ADD_CARRY_ME )
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
// copy operand to result if different registers
if( imlInstruction->op_r_r.registerResult != imlInstruction->op_r_r.registerA )
{
x64Gen_mov_reg64_reg64(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerA));
}
// copy xer_ca to eflags carry
x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0);
// add carry bit
x64Gen_adc_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), (uint32)-1);
// update xer carry
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_CARRY, REG_RSP, offsetof(PPCInterpreter_t, xer_ca));
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
// set cr bits
sint32 crRegister = imlInstruction->crRegister;
x64Gen_test_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerResult));
PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction);
}
}
else if( imlInstruction->operation == PPCREC_IML_OP_SUB_CARRY_UPDATE_CARRY )
{
// registerResult = ~registerOperand1 + carry
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r.registerResult);
sint32 rRegOperand1 = tempToRealRegister(imlInstruction->op_r_r.registerA);
// copy operand to result register
x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand1);
// execute NOT on result
x64Gen_not_reg64Low32(x64GenContext, rRegResult);
// copy xer_ca to eflags carry
x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0);
// add carry
x64Gen_adc_reg64Low32_imm32(x64GenContext, rRegResult, 0);
// update carry
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_CARRY, REG_RSP, offsetof(PPCInterpreter_t, xer_ca));
// update cr if requested
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
if( imlInstruction->crMode == PPCREC_CR_MODE_LOGICAL )
{
x64Gen_test_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegResult);
// set cr bits
PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction);
}
else
{
assert_dbg();
}
}
}
else if( imlInstruction->operation == PPCREC_IML_OP_ASSIGN_S16_TO_S32 )
{
// registerResult = (uint32)(sint32)(sint16)registerA
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
x64Gen_movSignExtend_reg64Low32_reg64Low16(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), reg32ToReg16(tempToRealRegister(imlInstruction->op_r_r.registerA)));
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
if( imlInstruction->crMode == PPCREC_CR_MODE_ARITHMETIC )
{
x64Gen_test_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r.registerResult), tempToRealRegister(imlInstruction->op_r_r.registerResult));
// set cr bits
PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction);
}
else
{
debug_printf("PPCRecompilerX64Gen_imlInstruction_r_r(): Unsupported operation\n");
assert_dbg();
}
}
}
else if( imlInstruction->operation == PPCREC_IML_OP_DCBZ )
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
if( imlInstruction->op_r_r.registerResult != imlInstruction->op_r_r.registerA )
{
x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, tempToRealRegister(imlInstruction->op_r_r.registerA));
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, tempToRealRegister(imlInstruction->op_r_r.registerResult));
x64Gen_and_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, ~0x1F);
x64Gen_add_reg64_reg64(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE);
for(sint32 f=0; f<0x20; f+=8)
x64Gen_mov_mem64Reg64_imm32(x64GenContext, REG_RESV_TEMP, f, 0);
}
else
{
// calculate effective address
x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, tempToRealRegister(imlInstruction->op_r_r.registerA));
x64Gen_and_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, ~0x1F);
x64Gen_add_reg64_reg64(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE);
for(sint32 f=0; f<0x20; f+=8)
x64Gen_mov_mem64Reg64_imm32(x64GenContext, REG_RESV_TEMP, f, 0);
}
}
else
{
debug_printf("PPCRecompilerX64Gen_imlInstruction_r_r(): Unsupported operation 0x%x\n", imlInstruction->operation);
return false;
}
return true;
}
bool PPCRecompilerX64Gen_imlInstruction_r_s32(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction)
{
if( imlInstruction->operation == PPCREC_IML_OP_ASSIGN )
{
// registerResult = immS32
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
x64Gen_mov_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_immS32.registerIndex), (uint32)imlInstruction->op_r_immS32.immS32);
}
else if( imlInstruction->operation == PPCREC_IML_OP_ADD )
{
// registerResult += immS32
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
assert_dbg();
}
x64Gen_add_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_immS32.registerIndex), (uint32)imlInstruction->op_r_immS32.immS32);
}
else if( imlInstruction->operation == PPCREC_IML_OP_SUB )
{
// registerResult -= immS32
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
if (imlInstruction->crRegister == PPCREC_CR_REG_TEMP)
{
// do nothing -> SUB is for BDNZ instruction
}
else if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
// update cr register
assert_dbg();
}
x64Gen_sub_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_immS32.registerIndex), (uint32)imlInstruction->op_r_immS32.immS32);
}
else if( imlInstruction->operation == PPCREC_IML_OP_AND )
{
// registerResult &= immS32
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
x64Gen_and_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_immS32.registerIndex), (uint32)imlInstruction->op_r_immS32.immS32);
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
if( imlInstruction->crMode != PPCREC_CR_MODE_LOGICAL )
{
assert_dbg();
}
// set cr bits
sint32 crRegister = imlInstruction->crRegister;
PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction);
// todo: Set CR SO if XER SO bit is set
}
}
else if( imlInstruction->operation == PPCREC_IML_OP_OR )
{
// registerResult |= immS32
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
x64Gen_or_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_immS32.registerIndex), (uint32)imlInstruction->op_r_immS32.immS32);
}
else if( imlInstruction->operation == PPCREC_IML_OP_XOR )
{
// registerResult ^= immS32
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
x64Gen_xor_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_immS32.registerIndex), (uint32)imlInstruction->op_r_immS32.immS32);
}
else if( imlInstruction->operation == PPCREC_IML_OP_LEFT_ROTATE )
{
// registerResult <<<= immS32
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
if( (imlInstruction->op_r_immS32.immS32&0x80) )
assert_dbg(); // should not happen
x64Gen_rol_reg64Low32_imm8(x64GenContext, tempToRealRegister(imlInstruction->op_r_immS32.registerIndex), (uint8)imlInstruction->op_r_immS32.immS32);
}
else if( imlInstruction->operation == PPCREC_IML_OP_COMPARE_SIGNED || imlInstruction->operation == PPCREC_IML_OP_COMPARE_UNSIGNED )
{
// registerResult CMP immS32 (arithmetic compare)
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
if( imlInstruction->crRegister == PPC_REC_INVALID_REGISTER )
{
debug_printf("PPCRecompilerX64Gen_imlInstruction_r_s32(): No-Op CMP found\n");
return true; // a NO-OP instruction
}
if( imlInstruction->crRegister >= 8 )
{
debug_printf("PPCRecompilerX64Gen_imlInstruction_r_s32(): Unsupported CMP with crRegister = 8\n");
return false;
}
// update state of cr register
if( imlInstruction->operation == PPCREC_IML_OP_COMPARE_SIGNED )
PPCRecompilerX64Gen_crConditionFlags_set(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction->crRegister, PPCREC_CR_STATE_TYPE_SIGNED_ARITHMETIC);
else
PPCRecompilerX64Gen_crConditionFlags_set(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction->crRegister, PPCREC_CR_STATE_TYPE_UNSIGNED_ARITHMETIC);
// create compare instruction
x64Gen_cmp_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_immS32.registerIndex), imlInstruction->op_r_immS32.immS32);
// set cr bits
uint32 crRegister = imlInstruction->crRegister;
if( imlInstruction->operation == PPCREC_IML_OP_COMPARE_SIGNED )
{
if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_LT))) == 0 )
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_SIGNED_LESS, REG_ESP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_LT));
if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_GT))) == 0 )
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_SIGNED_GREATER, REG_ESP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_GT));
if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_EQ))) == 0 )
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_EQUAL, REG_ESP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_EQ));
}
else if( imlInstruction->operation == PPCREC_IML_OP_COMPARE_UNSIGNED )
{
if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_LT))) == 0 )
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_UNSIGNED_BELOW, REG_ESP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_LT));
if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_GT))) == 0 )
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_UNSIGNED_ABOVE, REG_ESP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_GT));
if( (imlInstruction->crIgnoreMask&(1<<(crRegister*4+PPCREC_CR_BIT_EQ))) == 0 )
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_EQUAL, REG_ESP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_EQ));
}
else
assert_dbg();
// todo: Also set summary overflow if xer bit is set?
}
else if( imlInstruction->operation == PPCREC_IML_OP_MFCR )
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
uint32 destRegister = tempToRealRegister(imlInstruction->op_r_immS32.registerIndex);
x64Gen_xor_reg64Low32_reg64Low32(x64GenContext, destRegister, destRegister);
for(sint32 f=0; f<32; f++)
{
x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, cr)+f, 0);
x64Gen_adc_reg64Low32_reg64Low32(x64GenContext, destRegister, destRegister);
}
}
else if (imlInstruction->operation == PPCREC_IML_OP_MTCRF)
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
uint32 srcRegister = tempToRealRegister(imlInstruction->op_r_immS32.registerIndex);
uint32 crBitMask = ppc_MTCRFMaskToCRBitMask((uint32)imlInstruction->op_r_immS32.immS32);
for (sint32 f = 0; f < 32; f++)
{
if(((crBitMask >> f) & 1) == 0)
continue;
x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_ESP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8) * (f), 0);
x64Gen_test_reg64Low32_imm32(x64GenContext, srcRegister, 0x80000000>>f);
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_NOT_EQUAL, REG_ESP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8) * (f));
}
}
else
{
debug_printf("PPCRecompilerX64Gen_imlInstruction_r_s32(): Unsupported operation 0x%x\n", imlInstruction->operation);
return false;
}
return true;
}
bool PPCRecompilerX64Gen_imlInstruction_conditional_r_s32(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction)
{
if (imlInstruction->operation == PPCREC_IML_OP_ASSIGN)
{
// registerResult = immS32 (conditional)
if (imlInstruction->crRegister != PPC_REC_INVALID_REGISTER)
{
assert_dbg();
}
x64Gen_mov_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, (uint32)imlInstruction->op_conditional_r_s32.immS32);
uint8 crBitIndex = imlInstruction->op_conditional_r_s32.crRegisterIndex * 4 + imlInstruction->op_conditional_r_s32.crBitIndex;
if (imlInstruction->op_conditional_r_s32.crRegisterIndex == x64GenContext->activeCRRegister)
{
if (x64GenContext->activeCRState == PPCREC_CR_STATE_TYPE_UNSIGNED_ARITHMETIC)
{
if (imlInstruction->op_conditional_r_s32.crBitIndex == CR_BIT_LT)
{
x64Gen_cmovcc_reg64Low32_reg64Low32(x64GenContext, imlInstruction->op_conditional_r_s32.bitMustBeSet ? X86_CONDITION_CARRY : X86_CONDITION_NOT_CARRY, tempToRealRegister(imlInstruction->op_conditional_r_s32.registerIndex), REG_RESV_TEMP);
return true;
}
else if (imlInstruction->op_conditional_r_s32.crBitIndex == CR_BIT_EQ)
{
x64Gen_cmovcc_reg64Low32_reg64Low32(x64GenContext, imlInstruction->op_conditional_r_s32.bitMustBeSet ? X86_CONDITION_EQUAL : X86_CONDITION_NOT_EQUAL, tempToRealRegister(imlInstruction->op_conditional_r_s32.registerIndex), REG_RESV_TEMP);
return true;
}
else if (imlInstruction->op_conditional_r_s32.crBitIndex == CR_BIT_GT)
{
x64Gen_cmovcc_reg64Low32_reg64Low32(x64GenContext, imlInstruction->op_conditional_r_s32.bitMustBeSet ? X86_CONDITION_UNSIGNED_ABOVE : X86_CONDITION_UNSIGNED_BELOW_EQUAL, tempToRealRegister(imlInstruction->op_conditional_r_s32.registerIndex), REG_RESV_TEMP);
return true;
}
}
else if (x64GenContext->activeCRState == PPCREC_CR_STATE_TYPE_SIGNED_ARITHMETIC)
{
if (imlInstruction->op_conditional_r_s32.crBitIndex == CR_BIT_LT)
{
x64Gen_cmovcc_reg64Low32_reg64Low32(x64GenContext, imlInstruction->op_conditional_r_s32.bitMustBeSet ? X86_CONDITION_SIGNED_LESS : X86_CONDITION_SIGNED_GREATER_EQUAL, tempToRealRegister(imlInstruction->op_conditional_r_s32.registerIndex), REG_RESV_TEMP);
return true;
}
else if (imlInstruction->op_conditional_r_s32.crBitIndex == CR_BIT_EQ)
{
x64Gen_cmovcc_reg64Low32_reg64Low32(x64GenContext, imlInstruction->op_conditional_r_s32.bitMustBeSet ? X86_CONDITION_EQUAL : X86_CONDITION_NOT_EQUAL, tempToRealRegister(imlInstruction->op_conditional_r_s32.registerIndex), REG_RESV_TEMP);
return true;
}
else if (imlInstruction->op_conditional_r_s32.crBitIndex == CR_BIT_GT)
{
x64Gen_cmovcc_reg64Low32_reg64Low32(x64GenContext, imlInstruction->op_conditional_r_s32.bitMustBeSet ? X86_CONDITION_SIGNED_GREATER : X86_CONDITION_SIGNED_LESS_EQUAL, tempToRealRegister(imlInstruction->op_conditional_r_s32.registerIndex), REG_RESV_TEMP);
return true;
}
}
else if (x64GenContext->activeCRState == PPCREC_CR_STATE_TYPE_LOGICAL)
{
if (imlInstruction->op_conditional_r_s32.crBitIndex == CR_BIT_LT)
{
x64Gen_cmovcc_reg64Low32_reg64Low32(x64GenContext, imlInstruction->op_conditional_r_s32.bitMustBeSet ? X86_CONDITION_SIGN : X86_CONDITION_NOT_SIGN, tempToRealRegister(imlInstruction->op_conditional_r_s32.registerIndex), REG_RESV_TEMP);
return true;
}
else if (imlInstruction->op_conditional_r_s32.crBitIndex == CR_BIT_EQ)
{
x64Gen_cmovcc_reg64Low32_reg64Low32(x64GenContext, imlInstruction->op_conditional_r_s32.bitMustBeSet ? X86_CONDITION_EQUAL : X86_CONDITION_NOT_EQUAL, tempToRealRegister(imlInstruction->op_conditional_r_s32.registerIndex), REG_RESV_TEMP);
return true;
}
else if (imlInstruction->op_conditional_r_s32.crBitIndex == CR_BIT_GT)
{
x64Gen_cmovcc_reg64Low32_reg64Low32(x64GenContext, imlInstruction->op_conditional_r_s32.bitMustBeSet ? X86_CONDITION_SIGNED_GREATER : X86_CONDITION_SIGNED_LESS_EQUAL, tempToRealRegister(imlInstruction->op_conditional_r_s32.registerIndex), REG_RESV_TEMP);
return true;
}
}
}
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, cr) + crBitIndex * sizeof(uint8), 0);
if (imlInstruction->op_conditional_r_s32.bitMustBeSet)
x64Gen_cmovcc_reg64Low32_reg64Low32(x64GenContext, X86_CONDITION_CARRY, tempToRealRegister(imlInstruction->op_conditional_r_s32.registerIndex), REG_RESV_TEMP);
else
x64Gen_cmovcc_reg64Low32_reg64Low32(x64GenContext, X86_CONDITION_NOT_CARRY, tempToRealRegister(imlInstruction->op_conditional_r_s32.registerIndex), REG_RESV_TEMP);
return true;
}
return false;
}
bool PPCRecompilerX64Gen_imlInstruction_r_r_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction)
{
if( imlInstruction->operation == PPCREC_IML_OP_ADD || imlInstruction->operation == PPCREC_IML_OP_ADD_UPDATE_CARRY || imlInstruction->operation == PPCREC_IML_OP_ADD_CARRY_UPDATE_CARRY )
{
// registerResult = registerOperand1 + registerOperand2
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_r.registerResult);
sint32 rRegOperand1 = tempToRealRegister(imlInstruction->op_r_r_r.registerA);
sint32 rRegOperand2 = tempToRealRegister(imlInstruction->op_r_r_r.registerB);
bool addCarry = imlInstruction->operation == PPCREC_IML_OP_ADD_CARRY_UPDATE_CARRY;
if( (rRegResult == rRegOperand1) || (rRegResult == rRegOperand2) )
{
// be careful not to overwrite the operand before we use it
if( rRegResult == rRegOperand1 )
{
if( addCarry )
{
x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0);
x64Gen_adc_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2);
}
else
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2);
}
else
{
if( addCarry )
{
x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0);
x64Gen_adc_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand1);
}
else
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand1);
}
}
else
{
// copy operand1 to destination register before doing addition
x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, rRegOperand1);
// add operand2
if( addCarry )
{
x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0);
x64Gen_adc_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2);
}
else
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2);
}
// update carry
if( imlInstruction->operation == PPCREC_IML_OP_ADD_UPDATE_CARRY || imlInstruction->operation == PPCREC_IML_OP_ADD_CARRY_UPDATE_CARRY )
{
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_CARRY, REG_RSP, offsetof(PPCInterpreter_t, xer_ca));
}
// set cr bits if enabled
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
if( imlInstruction->crMode != PPCREC_CR_MODE_LOGICAL )
{
assert_dbg();
}
sint32 crRegister = imlInstruction->crRegister;
PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction);
return true;
}
}
else if( imlInstruction->operation == PPCREC_IML_OP_SUB )
{
// registerResult = registerOperand1 - registerOperand2
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_r.registerResult);
sint32 rRegOperand1 = tempToRealRegister(imlInstruction->op_r_r_r.registerA);
sint32 rRegOperand2 = tempToRealRegister(imlInstruction->op_r_r_r.registerB);
if( rRegOperand1 == rRegOperand2 )
{
// result = operand1 - operand1 -> 0
x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegResult);
}
else if( rRegResult == rRegOperand1 )
{
// result = result - operand2
x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2);
}
else if ( rRegResult == rRegOperand2 )
{
// result = operand1 - result
// NEG result
x64Gen_neg_reg64Low32(x64GenContext, rRegResult);
// ADD result, operand1
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand1);
}
else
{
// copy operand1 to destination register before doing addition
x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, rRegOperand1);
// sub operand2
x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2);
}
// set cr bits if enabled
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
if( imlInstruction->crMode != PPCREC_CR_MODE_LOGICAL )
{
assert_dbg();
}
sint32 crRegister = imlInstruction->crRegister;
PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction);
return true;
}
}
else if( imlInstruction->operation == PPCREC_IML_OP_SUB_CARRY_UPDATE_CARRY )
{
// registerResult = registerOperand1 - registerOperand2 + carry
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_r.registerResult);
sint32 rRegOperand1 = tempToRealRegister(imlInstruction->op_r_r_r.registerA);
sint32 rRegOperand2 = tempToRealRegister(imlInstruction->op_r_r_r.registerB);
if( rRegOperand1 == rRegOperand2 )
{
// copy xer_ca to eflags carry
x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0);
x64Gen_cmc(x64GenContext);
// result = operand1 - operand1 -> 0
x64Gen_sbb_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegResult);
}
else if( rRegResult == rRegOperand1 )
{
// copy inverted xer_ca to eflags carry
x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0);
x64Gen_cmc(x64GenContext);
// result = result - operand2
x64Gen_sbb_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2);
}
else if ( rRegResult == rRegOperand2 )
{
// result = operand1 - result
// NOT result
x64Gen_not_reg64Low32(x64GenContext, rRegResult);
// copy xer_ca to eflags carry
x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0);
// ADC result, operand1
x64Gen_adc_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand1);
}
else
{
// copy operand1 to destination register before doing addition
x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, rRegOperand1);
// copy xer_ca to eflags carry
x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0);
x64Gen_cmc(x64GenContext);
// sub operand2
x64Gen_sbb_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2);
}
// update carry flag (todo: is this actually correct in all cases?)
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_CARRY, REG_RSP, offsetof(PPCInterpreter_t, xer_ca));
// update cr0 if requested
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
if( imlInstruction->crMode != PPCREC_CR_MODE_LOGICAL )
assert_dbg();
x64Gen_test_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegResult);
PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction);
}
}
else if( imlInstruction->operation == PPCREC_IML_OP_MULTIPLY_SIGNED )
{
// registerResult = registerOperand1 * registerOperand2
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_r.registerResult);
sint32 rRegOperand1 = tempToRealRegister(imlInstruction->op_r_r_r.registerA);
sint32 rRegOperand2 = tempToRealRegister(imlInstruction->op_r_r_r.registerB);
if( (rRegResult == rRegOperand1) || (rRegResult == rRegOperand2) )
{
// be careful not to overwrite the operand before we use it
if( rRegResult == rRegOperand1 )
x64Gen_imul_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2);
else
x64Gen_imul_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand1);
}
else
{
// copy operand1 to destination register before doing multiplication
x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, rRegOperand1);
// add operand2
x64Gen_imul_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand2);
}
// set cr bits if enabled
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
if( imlInstruction->crMode != PPCREC_CR_MODE_LOGICAL )
{
assert_dbg();
}
// since IMUL instruction leaves relevant flags undefined, we have to use another TEST instruction to get the correct results
x64Gen_test_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegResult);
PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction);
}
}
else if( imlInstruction->operation == PPCREC_IML_OP_SUBFC )
{
// registerResult = registerOperand2(rB) - registerOperand1(rA)
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
// updates carry flag
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
return false;
}
sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_r.registerResult);
sint32 rRegOperandA = tempToRealRegister(imlInstruction->op_r_r_r.registerA);
sint32 rRegOperandB = tempToRealRegister(imlInstruction->op_r_r_r.registerB);
// update carry flag
// carry flag is detected this way:
//if ((~a+b) < a) {
// return true;
//}
//if ((~a+b+1) < 1) {
// return true;
//}
// set carry to zero
x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0);
// ((~a+b)<~a) == true -> ca = 1
x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperandA);
x64Gen_not_reg64Low32(x64GenContext, REG_RESV_TEMP);
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, rRegOperandB);
x64Gen_not_reg64Low32(x64GenContext, rRegOperandA);
x64Gen_cmp_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, rRegOperandA);
x64Gen_not_reg64Low32(x64GenContext, rRegOperandA);
sint32 jumpInstructionOffset1 = x64GenContext->codeBufferIndex;
x64Gen_jmpc_near(x64GenContext, X86_CONDITION_UNSIGNED_ABOVE_EQUAL, 0);
// reset carry flag + jump destination afterwards
x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 1);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset1, x64GenContext->codeBufferIndex);
// OR ((~a+b+1)<1) == true -> ca = 1
x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperandA);
// todo: Optimize by reusing result in REG_RESV_TEMP from above and only add 1
x64Gen_not_reg64Low32(x64GenContext, REG_RESV_TEMP);
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, rRegOperandB);
x64Gen_add_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 1);
x64Gen_cmp_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 1);
sint32 jumpInstructionOffset2 = x64GenContext->codeBufferIndex;
x64Gen_jmpc_near(x64GenContext, X86_CONDITION_UNSIGNED_ABOVE_EQUAL, 0);
// reset carry flag + jump destination afterwards
x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 1);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset2, x64GenContext->codeBufferIndex);
// do subtraction
if( rRegOperandB == rRegOperandA )
{
// result = operandA - operandA -> 0
x64Gen_xor_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegResult);
}
else if( rRegResult == rRegOperandB )
{
// result = result - operandA
x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperandA);
}
else if ( rRegResult == rRegOperandA )
{
// result = operandB - result
// NEG result
x64Gen_neg_reg64Low32(x64GenContext, rRegResult);
// ADD result, operandB
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperandB);
}
else
{
// copy operand1 to destination register before doing addition
x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, rRegOperandB);
// sub operand2
x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperandA);
}
}
else if( imlInstruction->operation == PPCREC_IML_OP_SLW || imlInstruction->operation == PPCREC_IML_OP_SRW )
{
// registerResult = registerOperand1(rA) >> registerOperand2(rB) (up to 63 bits)
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_r.registerResult);
sint32 rRegOperand1 = tempToRealRegister(imlInstruction->op_r_r_r.registerA);
sint32 rRegOperand2 = tempToRealRegister(imlInstruction->op_r_r_r.registerB);
if (g_CPUFeatures.x86.bmi2 && imlInstruction->operation == PPCREC_IML_OP_SRW)
{
// use BMI2 SHRX if available
x64Gen_shrx_reg64_reg64_reg64(x64GenContext, rRegResult, rRegOperand1, rRegOperand2);
}
else if (g_CPUFeatures.x86.bmi2 && imlInstruction->operation == PPCREC_IML_OP_SLW)
{
// use BMI2 SHLX if available
x64Gen_shlx_reg64_reg64_reg64(x64GenContext, rRegResult, rRegOperand1, rRegOperand2);
x64Gen_and_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegResult); // trim result to 32bit
}
else
{
// lazy and slow way to do shift by register without relying on ECX/CL or BMI2
x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperand1);
for (sint32 b = 0; b < 6; b++)
{
x64Gen_test_reg64Low32_imm32(x64GenContext, rRegOperand2, (1 << b));
sint32 jumpInstructionOffset = x64GenContext->codeBufferIndex;
x64Gen_jmpc_near(x64GenContext, X86_CONDITION_EQUAL, 0); // jump if bit not set
if (b == 5)
{
x64Gen_xor_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, REG_RESV_TEMP);
}
else
{
if (imlInstruction->operation == PPCREC_IML_OP_SLW)
x64Gen_shl_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, (1 << b));
else
x64Gen_shr_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, (1 << b));
}
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset, x64GenContext->codeBufferIndex);
}
x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, REG_RESV_TEMP);
}
// set cr bits if enabled
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
if( imlInstruction->crMode != PPCREC_CR_MODE_LOGICAL )
{
assert_dbg();
}
x64Gen_test_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegResult);
PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction);
}
}
else if( imlInstruction->operation == PPCREC_IML_OP_LEFT_ROTATE )
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_r.registerResult);
sint32 rRegOperand1 = tempToRealRegister(imlInstruction->op_r_r_r.registerA);
sint32 rRegOperand2 = tempToRealRegister(imlInstruction->op_r_r_r.registerB);
// todo: Use BMI2 rotate if available
// check if CL/ECX/RCX is available
if( rRegResult != REG_RCX && rRegOperand1 != REG_RCX && rRegOperand2 != REG_RCX )
{
// swap operand 2 with RCX
x64Gen_xchg_reg64_reg64(x64GenContext, REG_RCX, rRegOperand2);
// move operand 1 to temp register
x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperand1);
// rotate
x64Gen_rol_reg64Low32_cl(x64GenContext, REG_RESV_TEMP);
// undo swap operand 2 with RCX
x64Gen_xchg_reg64_reg64(x64GenContext, REG_RCX, rRegOperand2);
// copy to result register
x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, REG_RESV_TEMP);
}
else
{
x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperand1);
// lazy and slow way to do shift by register without relying on ECX/CL
for(sint32 b=0; b<5; b++)
{
x64Gen_test_reg64Low32_imm32(x64GenContext, rRegOperand2, (1<<b));
sint32 jumpInstructionOffset = x64GenContext->codeBufferIndex;
x64Gen_jmpc_near(x64GenContext, X86_CONDITION_EQUAL, 0); // jump if bit not set
x64Gen_rol_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, (1<<b));
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset, x64GenContext->codeBufferIndex);
}
x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, REG_RESV_TEMP);
}
// set cr bits if enabled
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
if( imlInstruction->crMode != PPCREC_CR_MODE_LOGICAL )
{
assert_dbg();
}
x64Gen_test_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegResult);
PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction);
}
}
else if( imlInstruction->operation == PPCREC_IML_OP_SRAW )
{
// registerResult = (sint32)registerOperand1(rA) >> (sint32)registerOperand2(rB) (up to 63 bits)
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_r.registerResult);
sint32 rRegOperand1 = tempToRealRegister(imlInstruction->op_r_r_r.registerA);
sint32 rRegOperand2 = tempToRealRegister(imlInstruction->op_r_r_r.registerB);
// save cr
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
return false;
}
// todo: Use BMI instructions if available?
// MOV registerResult, registerOperand (if different)
x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperand1);
// reset carry
x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0);
// we use the same shift by register approach as in SLW/SRW, but we have to differentiate by signed/unsigned shift since it influences how the carry flag is set
x64Gen_test_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 0x80000000);
sint32 jumpInstructionJumpToSignedShift = x64GenContext->codeBufferIndex;
x64Gen_jmpc_far(x64GenContext, X86_CONDITION_NOT_EQUAL, 0);
//sint32 jumpInstructionJumpToEnd = x64GenContext->codeBufferIndex;
//x64Gen_jmpc(x64GenContext, X86_CONDITION_EQUAL, 0);
// unsigned shift (MSB of input register is not set)
for(sint32 b=0; b<6; b++)
{
x64Gen_test_reg64Low32_imm32(x64GenContext, rRegOperand2, (1<<b));
sint32 jumpInstructionOffset = x64GenContext->codeBufferIndex;
x64Gen_jmpc_near(x64GenContext, X86_CONDITION_EQUAL, 0); // jump if bit not set
if( b == 5 )
{
x64Gen_sar_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, (1<<b)/2);
x64Gen_sar_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, (1<<b)/2);
}
else
{
x64Gen_sar_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, (1<<b));
}
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset, x64GenContext->codeBufferIndex);
}
sint32 jumpInstructionJumpToEnd = x64GenContext->codeBufferIndex;
x64Gen_jmpc_far(x64GenContext, X86_CONDITION_NONE, 0);
// signed shift
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionJumpToSignedShift, x64GenContext->codeBufferIndex);
for(sint32 b=0; b<6; b++)
{
// check if we need to shift by (1<<bit)
x64Gen_test_reg64Low32_imm32(x64GenContext, rRegOperand2, (1<<b));
sint32 jumpInstructionOffset = x64GenContext->codeBufferIndex;
x64Gen_jmpc_near(x64GenContext, X86_CONDITION_EQUAL, 0); // jump if bit not set
// set ca if any non-zero bit is shifted out
x64Gen_test_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, (1<<(1<<b))-1);
sint32 jumpInstructionJumpToAfterCa = x64GenContext->codeBufferIndex;
x64Gen_jmpc_near(x64GenContext, X86_CONDITION_EQUAL, 0); // jump if no bit is set
x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 1);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionJumpToAfterCa, x64GenContext->codeBufferIndex);
// arithmetic shift
if( b == 5 )
{
// copy sign bit into all bits
x64Gen_sar_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, (1<<b)/2);
x64Gen_sar_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, (1<<b)/2);
}
else
{
x64Gen_sar_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, (1<<b));
}
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset, x64GenContext->codeBufferIndex);
}
// end
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionJumpToEnd, x64GenContext->codeBufferIndex);
x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, REG_RESV_TEMP);
// update CR if requested
// todo
}
else if( imlInstruction->operation == PPCREC_IML_OP_DIVIDE_SIGNED || imlInstruction->operation == PPCREC_IML_OP_DIVIDE_UNSIGNED )
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_r.registerResult);
sint32 rRegOperand1 = tempToRealRegister(imlInstruction->op_r_r_r.registerA);
sint32 rRegOperand2 = tempToRealRegister(imlInstruction->op_r_r_r.registerB);
x64Emit_mov_mem32_reg32(x64GenContext, REG_RSP, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[0]), REG_EAX);
x64Emit_mov_mem32_reg32(x64GenContext, REG_RSP, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[1]), REG_EDX);
// mov operand 2 to temp register
x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperand2);
// mov operand1 to EAX
x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, REG_EAX, rRegOperand1);
// sign or zero extend EAX to EDX:EAX based on division sign mode
if( imlInstruction->operation == PPCREC_IML_OP_DIVIDE_SIGNED )
x64Gen_cdq(x64GenContext);
else
x64Gen_xor_reg64Low32_reg64Low32(x64GenContext, REG_EDX, REG_EDX);
// make sure we avoid division by zero
x64Gen_test_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, REG_RESV_TEMP);
x64Gen_jmpc_near(x64GenContext, X86_CONDITION_EQUAL, 3);
// divide
if( imlInstruction->operation == PPCREC_IML_OP_DIVIDE_SIGNED )
x64Gen_idiv_reg64Low32(x64GenContext, REG_RESV_TEMP);
else
x64Gen_div_reg64Low32(x64GenContext, REG_RESV_TEMP);
// result of division is now stored in EAX, move it to result register
if( rRegResult != REG_EAX )
x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, REG_EAX);
// restore EAX / EDX
if( rRegResult != REG_RAX )
x64Emit_mov_reg64_mem32(x64GenContext, REG_EAX, REG_RSP, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[0]));
if( rRegResult != REG_RDX )
x64Emit_mov_reg64_mem32(x64GenContext, REG_EDX, REG_RSP, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[1]));
// set cr bits if requested
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
if( imlInstruction->crMode != PPCREC_CR_MODE_ARITHMETIC )
{
assert_dbg();
}
x64Gen_test_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegResult);
PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction);
}
}
else if( imlInstruction->operation == PPCREC_IML_OP_MULTIPLY_HIGH_SIGNED || imlInstruction->operation == PPCREC_IML_OP_MULTIPLY_HIGH_UNSIGNED )
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_r.registerResult);
sint32 rRegOperand1 = tempToRealRegister(imlInstruction->op_r_r_r.registerA);
sint32 rRegOperand2 = tempToRealRegister(imlInstruction->op_r_r_r.registerB);
x64Emit_mov_mem32_reg32(x64GenContext, REG_RSP, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[0]), REG_EAX);
x64Emit_mov_mem32_reg32(x64GenContext, REG_RSP, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[1]), REG_EDX);
// mov operand 2 to temp register
x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperand2);
// mov operand1 to EAX
x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, REG_EAX, rRegOperand1);
if( imlInstruction->operation == PPCREC_IML_OP_MULTIPLY_HIGH_SIGNED )
{
// zero extend EAX to EDX:EAX
x64Gen_xor_reg64Low32_reg64Low32(x64GenContext, REG_EDX, REG_EDX);
}
else
{
// sign extend EAX to EDX:EAX
x64Gen_cdq(x64GenContext);
}
// multiply
if( imlInstruction->operation == PPCREC_IML_OP_MULTIPLY_HIGH_SIGNED )
x64Gen_imul_reg64Low32(x64GenContext, REG_RESV_TEMP);
else
x64Gen_mul_reg64Low32(x64GenContext, REG_RESV_TEMP);
// result of multiplication is now stored in EDX:EAX, move it to result register
if( rRegResult != REG_EDX )
x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, REG_EDX);
// restore EAX / EDX
if( rRegResult != REG_RAX )
x64Emit_mov_reg64_mem32(x64GenContext, REG_EAX, REG_RSP, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[0]));
if( rRegResult != REG_RDX )
x64Emit_mov_reg64_mem32(x64GenContext, REG_EDX, REG_RSP, (uint32)offsetof(PPCInterpreter_t, temporaryGPR[1]));
// set cr bits if requested
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
if( imlInstruction->crMode != PPCREC_CR_MODE_LOGICAL )
{
assert_dbg();
}
x64Gen_test_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegResult);
PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction);
}
}
else if( imlInstruction->operation == PPCREC_IML_OP_ORC )
{
// registerResult = registerOperand1 | ~registerOperand2
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_r.registerResult);
sint32 rRegOperand1 = tempToRealRegister(imlInstruction->op_r_r_r.registerA);
sint32 rRegOperand2 = tempToRealRegister(imlInstruction->op_r_r_r.registerB);
x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperand2);
x64Gen_not_reg64Low32(x64GenContext, REG_RESV_TEMP);
if( rRegResult != rRegOperand1 )
x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, rRegResult, rRegOperand1);
x64Gen_or_reg64Low32_reg64Low32(x64GenContext, rRegResult, REG_RESV_TEMP);
// set cr bits if enabled
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
if( imlInstruction->crMode != PPCREC_CR_MODE_LOGICAL )
{
assert_dbg();
}
PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction);
return true;
}
}
else
{
debug_printf("PPCRecompilerX64Gen_imlInstruction_r_r_r(): Unsupported operation 0x%x\n", imlInstruction->operation);
return false;
}
return true;
}
bool PPCRecompilerX64Gen_imlInstruction_r_r_s32(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction)
{
if( imlInstruction->operation == PPCREC_IML_OP_ADD )
{
// registerResult = registerOperand + immS32
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_s32.registerResult);
sint32 rRegOperand = tempToRealRegister(imlInstruction->op_r_r_s32.registerA);
uint32 immU32 = (uint32)imlInstruction->op_r_r_s32.immS32;
if( rRegResult != rRegOperand )
{
// copy value to destination register before doing addition
x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, rRegOperand);
}
x64Gen_add_reg64Low32_imm32(x64GenContext, rRegResult, (uint32)immU32);
}
else if( imlInstruction->operation == PPCREC_IML_OP_ADD_UPDATE_CARRY )
{
// registerResult = registerOperand + immS32
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_s32.registerResult);
sint32 rRegOperand = tempToRealRegister(imlInstruction->op_r_r_s32.registerA);
uint32 immU32 = (uint32)imlInstruction->op_r_r_s32.immS32;
if( rRegResult != rRegOperand )
{
// copy value to destination register before doing addition
x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, rRegOperand);
}
x64Gen_add_reg64Low32_imm32(x64GenContext, rRegResult, (uint32)immU32);
// update carry flag
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_CARRY, REG_RSP, offsetof(PPCInterpreter_t, xer_ca));
// set cr bits if enabled
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
if( imlInstruction->crMode != PPCREC_CR_MODE_LOGICAL )
{
assert_dbg();
}
sint32 crRegister = imlInstruction->crRegister;
//x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_SIGN, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_LT));
//x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_SIGNED_GREATER, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_GT));
//x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_EQUAL, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_EQ));
PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction);
}
}
else if( imlInstruction->operation == PPCREC_IML_OP_SUBFC )
{
// registerResult = immS32 - registerOperand
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_s32.registerResult);
sint32 rRegOperand = tempToRealRegister(imlInstruction->op_r_r_s32.registerA);
sint32 immS32 = (sint32)imlInstruction->op_r_r_s32.immS32;
if( rRegResult != rRegOperand )
{
// copy value to destination register before doing addition
x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, rRegOperand);
}
// set carry to zero
x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0);
// ((~a+b)<~a) == true -> ca = 1
x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperand);
x64Gen_not_reg64Low32(x64GenContext, REG_RESV_TEMP);
x64Gen_add_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, (uint32)immS32);
x64Gen_not_reg64Low32(x64GenContext, rRegOperand);
x64Gen_cmp_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, rRegOperand);
x64Gen_not_reg64Low32(x64GenContext, rRegOperand);
sint32 jumpInstructionOffset1 = x64GenContext->codeBufferIndex;
x64Gen_jmpc_far(x64GenContext, X86_CONDITION_UNSIGNED_ABOVE_EQUAL, 0);
// reset carry flag + jump destination afterwards
x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 1);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset1, x64GenContext->codeBufferIndex);
// OR ((~a+b+1)<1) == true -> ca = 1
x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, rRegOperand);
// todo: Optimize by reusing result in REG_RESV_TEMP from above and only add 1
x64Gen_not_reg64Low32(x64GenContext, REG_RESV_TEMP);
x64Gen_add_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, (uint32)immS32);
x64Gen_add_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 1);
x64Gen_cmp_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 1);
sint32 jumpInstructionOffset2 = x64GenContext->codeBufferIndex;
x64Gen_jmpc_far(x64GenContext, X86_CONDITION_UNSIGNED_ABOVE_EQUAL, 0);
// reset carry flag + jump destination afterwards
x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 1);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset2, x64GenContext->codeBufferIndex);
// do actual computation of value, note: a - b is equivalent to a + ~b + 1
x64Gen_not_reg64Low32(x64GenContext, rRegResult);
x64Gen_add_reg64Low32_imm32(x64GenContext, rRegResult, (uint32)immS32 + 1);
}
else if( imlInstruction->operation == PPCREC_IML_OP_RLWIMI )
{
// registerResult = ((registerResult<<<SH)&mask) | (registerOperand&~mask)
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
uint32 vImm = (uint32)imlInstruction->op_r_r_s32.immS32;
uint32 mb = (vImm>>0)&0xFF;
uint32 me = (vImm>>8)&0xFF;
uint32 sh = (vImm>>16)&0xFF;
uint32 mask = ppc_mask(mb, me);
// save cr
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
// copy rS to temporary register
x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, tempToRealRegister(imlInstruction->op_r_r_s32.registerA));
// rotate destination register
if( sh )
x64Gen_rol_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, (uint8)sh&0x1F);
// AND destination register with inverted mask
x64Gen_and_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r_s32.registerResult), ~mask);
// AND temporary rS register with mask
x64Gen_and_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, mask);
// OR result with temporary
x64Gen_or_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r_s32.registerResult), REG_RESV_TEMP);
}
else if( imlInstruction->operation == PPCREC_IML_OP_MULTIPLY_SIGNED )
{
// registerResult = registerOperand * immS32
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
sint32 rRegResult = tempToRealRegister(imlInstruction->op_r_r_s32.registerResult);
sint32 rRegOperand = tempToRealRegister(imlInstruction->op_r_r_s32.registerA);
sint32 immS32 = (uint32)imlInstruction->op_r_r_s32.immS32;
x64Gen_mov_reg64_imm64(x64GenContext, REG_RESV_TEMP, (sint64)immS32); // todo: Optimize
if( rRegResult != rRegOperand )
x64Gen_mov_reg64_reg64(x64GenContext, rRegResult, rRegOperand);
x64Gen_imul_reg64Low32_reg64Low32(x64GenContext, rRegResult, REG_RESV_TEMP);
}
else if( imlInstruction->operation == PPCREC_IML_OP_SRAW )
{
// registerResult = registerOperand>>SH and set xer ca flag
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
uint32 sh = (uint32)imlInstruction->op_r_r_s32.immS32;
// MOV registerResult, registerOperand (if different)
if( imlInstruction->op_r_r_s32.registerA != imlInstruction->op_r_r_s32.registerResult )
x64Gen_mov_reg64_reg64(x64GenContext, tempToRealRegister(imlInstruction->op_r_r_s32.registerResult), tempToRealRegister(imlInstruction->op_r_r_s32.registerA));
// todo: Detect if we don't need to update carry
// generic case
// TEST registerResult, (1<<(SH+1))-1
uint32 caTestMask = 0;
if (sh >= 31)
caTestMask = 0x7FFFFFFF;
else
caTestMask = (1 << (sh)) - 1;
x64Gen_test_reg64Low32_imm32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r_s32.registerResult), caTestMask);
// SETNE/NZ [ESP+XER_CA]
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_NOT_EQUAL, REG_RSP, offsetof(PPCInterpreter_t, xer_ca));
// SAR registerResult, SH
x64Gen_sar_reg64Low32_imm8(x64GenContext, tempToRealRegister(imlInstruction->op_r_r_s32.registerResult), sh);
// JNS <skipInstruction> (if sign not set)
sint32 jumpInstructionOffset = x64GenContext->codeBufferIndex;
x64Gen_jmpc_near(x64GenContext, X86_CONDITION_SIGN, 0); // todo: Can use 2-byte form of jump instruction here
// MOV BYTE [ESP+xer_ca], 0
x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, xer_ca), 0);
// jump destination
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset, x64GenContext->codeBufferIndex);
// CR update
if (imlInstruction->crRegister != PPC_REC_INVALID_REGISTER)
{
sint32 crRegister = imlInstruction->crRegister;
x64Gen_test_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r_s32.registerResult), tempToRealRegister(imlInstruction->op_r_r_s32.registerResult));
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_SIGN, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*(crRegister * 4 + PPCREC_CR_BIT_LT));
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_SIGNED_GREATER, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*(crRegister * 4 + PPCREC_CR_BIT_GT));
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_EQUAL, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*(crRegister * 4 + PPCREC_CR_BIT_EQ));
}
}
else if( imlInstruction->operation == PPCREC_IML_OP_LEFT_SHIFT ||
imlInstruction->operation == PPCREC_IML_OP_RIGHT_SHIFT )
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
// MOV registerResult, registerOperand (if different)
if( imlInstruction->op_r_r_s32.registerA != imlInstruction->op_r_r_s32.registerResult )
x64Gen_mov_reg64_reg64(x64GenContext, tempToRealRegister(imlInstruction->op_r_r_s32.registerResult), tempToRealRegister(imlInstruction->op_r_r_s32.registerA));
// Shift
if( imlInstruction->operation == PPCREC_IML_OP_LEFT_SHIFT )
x64Gen_shl_reg64Low32_imm8(x64GenContext, tempToRealRegister(imlInstruction->op_r_r_s32.registerResult), imlInstruction->op_r_r_s32.immS32);
else
x64Gen_shr_reg64Low32_imm8(x64GenContext, tempToRealRegister(imlInstruction->op_r_r_s32.registerResult), imlInstruction->op_r_r_s32.immS32);
// CR update
if (imlInstruction->crRegister != PPC_REC_INVALID_REGISTER)
{
// since SHL/SHR only modifies the OF flag we need another TEST reg,reg here
x64Gen_test_reg64Low32_reg64Low32(x64GenContext, tempToRealRegister(imlInstruction->op_r_r_s32.registerResult), tempToRealRegister(imlInstruction->op_r_r_s32.registerResult));
PPCRecompilerX64Gen_updateCRLogical(PPCRecFunction, ppcImlGenContext, x64GenContext, imlInstruction);
}
}
else
{
debug_printf("PPCRecompilerX64Gen_imlInstruction_r_r_s32(): Unsupported operation 0x%x\n", imlInstruction->operation);
return false;
}
return true;
}
bool PPCRecompilerX64Gen_imlInstruction_conditionalJump(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlSegment_t* imlSegment, PPCRecImlInstruction_t* imlInstruction)
{
if( imlInstruction->op_conditionalJump.condition == PPCREC_JUMP_CONDITION_NONE )
{
// jump always
if (imlInstruction->op_conditionalJump.jumpAccordingToSegment)
{
// jump to segment
if (imlSegment->nextSegmentBranchTaken == nullptr)
assert_dbg();
PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_SEGMENT, imlSegment->nextSegmentBranchTaken);
x64Gen_jmp_imm32(x64GenContext, 0);
}
else
{
// deprecated (jump to jumpmark)
PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress);
x64Gen_jmp_imm32(x64GenContext, 0);
}
}
else
{
if (imlInstruction->op_conditionalJump.jumpAccordingToSegment)
assert_dbg();
// generate jump update marker
if( imlInstruction->op_conditionalJump.crRegisterIndex == PPCREC_CR_TEMPORARY || imlInstruction->op_conditionalJump.crRegisterIndex >= 8 )
{
// temporary cr is used, which means we use the currently active eflags
PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress);
sint32 condition = imlInstruction->op_conditionalJump.condition;
if( condition == PPCREC_JUMP_CONDITION_E )
x64Gen_jmpc_far(x64GenContext, X86_CONDITION_EQUAL, 0);
else if( condition == PPCREC_JUMP_CONDITION_NE )
x64Gen_jmpc_far(x64GenContext, X86_CONDITION_NOT_EQUAL, 0);
else
assert_dbg();
}
else
{
uint8 crBitIndex = imlInstruction->op_conditionalJump.crRegisterIndex*4 + imlInstruction->op_conditionalJump.crBitIndex;
if (imlInstruction->op_conditionalJump.crRegisterIndex == x64GenContext->activeCRRegister )
{
if (x64GenContext->activeCRState == PPCREC_CR_STATE_TYPE_UNSIGNED_ARITHMETIC)
{
if (imlInstruction->op_conditionalJump.crBitIndex == CR_BIT_LT)
{
PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress);
x64Gen_jmpc_far(x64GenContext, imlInstruction->op_conditionalJump.bitMustBeSet ? X86_CONDITION_CARRY : X86_CONDITION_NOT_CARRY, 0);
return true;
}
else if (imlInstruction->op_conditionalJump.crBitIndex == CR_BIT_EQ)
{
PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress);
x64Gen_jmpc_far(x64GenContext, imlInstruction->op_conditionalJump.bitMustBeSet ? X86_CONDITION_EQUAL : X86_CONDITION_NOT_EQUAL, 0);
return true;
}
else if (imlInstruction->op_conditionalJump.crBitIndex == CR_BIT_GT)
{
PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress);
x64Gen_jmpc_far(x64GenContext, imlInstruction->op_conditionalJump.bitMustBeSet ? X86_CONDITION_UNSIGNED_ABOVE : X86_CONDITION_UNSIGNED_BELOW_EQUAL, 0);
return true;
}
}
else if (x64GenContext->activeCRState == PPCREC_CR_STATE_TYPE_SIGNED_ARITHMETIC)
{
if (imlInstruction->op_conditionalJump.crBitIndex == CR_BIT_LT)
{
PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress);
x64Gen_jmpc_far(x64GenContext, imlInstruction->op_conditionalJump.bitMustBeSet ? X86_CONDITION_SIGNED_LESS : X86_CONDITION_SIGNED_GREATER_EQUAL, 0);
return true;
}
else if (imlInstruction->op_conditionalJump.crBitIndex == CR_BIT_EQ)
{
PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress);
x64Gen_jmpc_far(x64GenContext, imlInstruction->op_conditionalJump.bitMustBeSet ? X86_CONDITION_EQUAL : X86_CONDITION_NOT_EQUAL, 0);
return true;
}
else if (imlInstruction->op_conditionalJump.crBitIndex == CR_BIT_GT)
{
PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress);
x64Gen_jmpc_far(x64GenContext, imlInstruction->op_conditionalJump.bitMustBeSet ? X86_CONDITION_SIGNED_GREATER : X86_CONDITION_SIGNED_LESS_EQUAL, 0);
return true;
}
}
else if (x64GenContext->activeCRState == PPCREC_CR_STATE_TYPE_LOGICAL)
{
if (imlInstruction->op_conditionalJump.crBitIndex == CR_BIT_LT)
{
PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress);
x64Gen_jmpc_far(x64GenContext, imlInstruction->op_conditionalJump.bitMustBeSet ? X86_CONDITION_SIGN : X86_CONDITION_NOT_SIGN, 0);
return true;
}
else if (imlInstruction->op_conditionalJump.crBitIndex == CR_BIT_EQ)
{
PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress);
x64Gen_jmpc_far(x64GenContext, imlInstruction->op_conditionalJump.bitMustBeSet ? X86_CONDITION_EQUAL : X86_CONDITION_NOT_EQUAL, 0);
return true;
}
else if (imlInstruction->op_conditionalJump.crBitIndex == CR_BIT_GT)
{
PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress);
x64Gen_jmpc_far(x64GenContext, imlInstruction->op_conditionalJump.bitMustBeSet ? X86_CONDITION_SIGNED_GREATER : X86_CONDITION_SIGNED_LESS_EQUAL, 0);
return true;
}
}
}
x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, cr) + crBitIndex * sizeof(uint8), 0);
PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress);
if( imlInstruction->op_conditionalJump.bitMustBeSet )
{
x64Gen_jmpc_far(x64GenContext, X86_CONDITION_CARRY, 0);
}
else
{
x64Gen_jmpc_far(x64GenContext, X86_CONDITION_NOT_CARRY, 0);
}
}
}
return true;
}
bool PPCRecompilerX64Gen_imlInstruction_conditionalJumpCycleCheck(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction)
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
// some tests (all performed on a i7-4790K)
// 1) DEC [mem] + JNS has significantly worse performance than BT + JNC (probably due to additional memory write)
// 2) CMP [mem], 0 + JG has about equal (or slightly worse) performance than BT + JNC
// BT
x64Gen_bt_mem8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, remainingCycles), 31); // check if negative
PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X64_RELOC_LINK_TO_PPC, (void*)(size_t)imlInstruction->op_conditionalJump.jumpmarkAddress);
x64Gen_jmpc_far(x64GenContext, X86_CONDITION_NOT_CARRY, 0);
return true;
}
/*
* PPC condition register operation
*/
bool PPCRecompilerX64Gen_imlInstruction_cr(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction)
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext); // while these instruction do not directly affect eflags, they change the CR bit
if (imlInstruction->operation == PPCREC_IML_OP_CR_CLEAR)
{
// clear cr bit
x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*imlInstruction->op_cr.crD, 0);
return true;
}
else if (imlInstruction->operation == PPCREC_IML_OP_CR_SET)
{
// set cr bit
x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*imlInstruction->op_cr.crD, 1);
return true;
}
else if(imlInstruction->operation == PPCREC_IML_OP_CR_OR || imlInstruction->operation == PPCREC_IML_OP_CR_ORC ||
imlInstruction->operation == PPCREC_IML_OP_CR_AND || imlInstruction->operation == PPCREC_IML_OP_CR_ANDC )
{
x64Emit_movZX_reg64_mem8(x64GenContext, REG_RESV_TEMP, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*imlInstruction->op_cr.crB);
if (imlInstruction->operation == PPCREC_IML_OP_CR_ORC || imlInstruction->operation == PPCREC_IML_OP_CR_ANDC)
{
return false; // untested
x64Gen_int3(x64GenContext);
x64Gen_xor_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 1); // complement
}
if(imlInstruction->operation == PPCREC_IML_OP_CR_OR || imlInstruction->operation == PPCREC_IML_OP_CR_ORC)
x64Gen_or_reg64Low8_mem8Reg64(x64GenContext, REG_RESV_TEMP, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*imlInstruction->op_cr.crA);
else
x64Gen_and_reg64Low8_mem8Reg64(x64GenContext, REG_RESV_TEMP, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*imlInstruction->op_cr.crA);
x64Gen_mov_mem8Reg64_reg64Low8(x64GenContext, REG_RESV_TEMP, REG_RSP, offsetof(PPCInterpreter_t, cr) + sizeof(uint8)*imlInstruction->op_cr.crD);
return true;
}
else
{
assert_dbg();
}
return false;
}
void PPCRecompilerX64Gen_imlInstruction_ppcEnter(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction)
{
imlInstruction->op_ppcEnter.x64Offset = x64GenContext->codeBufferIndex;
// generate code
if( ppcImlGenContext->hasFPUInstruction )
{
// old FPU unavailable code
//PPCRecompilerX86_crConditionFlags_saveBeforeOverwrite(PPCRecFunction, ppcImlGenContext, x64GenContext);
//// skip if FP bit in MSR is set
//// #define MSR_FP (1<<13)
//x64Gen_bt_mem8(x64GenContext, REG_ESP, offsetof(PPCInterpreter_t, msr), 13);
//uint32 jmpCodeOffset = x64GenContext->codeBufferIndex;
//x64Gen_jmpc(x64GenContext, X86_CONDITION_CARRY, 0);
//x64Gen_mov_reg32_imm32(x64GenContext, REG_EAX, imlInstruction->op_ppcEnter.ppcAddress&0x7FFFFFFF);
//PPCRecompilerX64Gen_rememberRelocatableOffset(x64GenContext, X86_RELOC_MAKE_RELATIVE);
//x64Gen_jmp_imm32(x64GenContext, (uint32)PPCRecompiler_recompilerCallEscapeAndCallFPUUnavailable);
//// patch jump
//*(uint32*)(x64GenContext->codeBuffer+jmpCodeOffset+2) = x64GenContext->codeBufferIndex-jmpCodeOffset-6;
}
}
void PPCRecompilerX64Gen_imlInstruction_r_name(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction)
{
uint32 name = imlInstruction->op_r_name.name;
if( name >= PPCREC_NAME_R0 && name < PPCREC_NAME_R0+32 )
{
x64Emit_mov_reg64_mem32(x64GenContext, tempToRealRegister(imlInstruction->op_r_name.registerIndex), REG_RSP, offsetof(PPCInterpreter_t, gpr)+sizeof(uint32)*(name-PPCREC_NAME_R0));
}
else if( name >= PPCREC_NAME_SPR0 && name < PPCREC_NAME_SPR0+999 )
{
sint32 sprIndex = (name - PPCREC_NAME_SPR0);
if (sprIndex == SPR_LR)
x64Emit_mov_reg64_mem32(x64GenContext, tempToRealRegister(imlInstruction->op_r_name.registerIndex), REG_RSP, offsetof(PPCInterpreter_t, spr.LR));
else if (sprIndex == SPR_CTR)
x64Emit_mov_reg64_mem32(x64GenContext, tempToRealRegister(imlInstruction->op_r_name.registerIndex), REG_RSP, offsetof(PPCInterpreter_t, spr.CTR));
else if (sprIndex == SPR_XER)
x64Emit_mov_reg64_mem32(x64GenContext, tempToRealRegister(imlInstruction->op_r_name.registerIndex), REG_RSP, offsetof(PPCInterpreter_t, spr.XER));
else if (sprIndex >= SPR_UGQR0 && sprIndex <= SPR_UGQR7)
{
sint32 memOffset = offsetof(PPCInterpreter_t, spr.UGQR) + sizeof(PPCInterpreter_t::spr.UGQR[0]) * (sprIndex - SPR_UGQR0);
x64Emit_mov_reg64_mem32(x64GenContext, tempToRealRegister(imlInstruction->op_r_name.registerIndex), REG_RSP, memOffset);
}
else
assert_dbg();
//x64Emit_mov_reg64_mem32(x64GenContext, tempToRealRegister(imlInstruction->op_r_name.registerIndex), REG_RSP, offsetof(PPCInterpreter_t, spr)+sizeof(uint32)*(name-PPCREC_NAME_SPR0));
}
else
assert_dbg();
}
void PPCRecompilerX64Gen_imlInstruction_name_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction)
{
uint32 name = imlInstruction->op_r_name.name;
if( name >= PPCREC_NAME_R0 && name < PPCREC_NAME_R0+32 )
{
x64Emit_mov_mem32_reg64(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, gpr)+sizeof(uint32)*(name-PPCREC_NAME_R0), tempToRealRegister(imlInstruction->op_r_name.registerIndex));
}
else if( name >= PPCREC_NAME_SPR0 && name < PPCREC_NAME_SPR0+999 )
{
uint32 sprIndex = (name - PPCREC_NAME_SPR0);
if (sprIndex == SPR_LR)
x64Emit_mov_mem32_reg64(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, spr.LR), tempToRealRegister(imlInstruction->op_r_name.registerIndex));
else if (sprIndex == SPR_CTR)
x64Emit_mov_mem32_reg64(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, spr.CTR), tempToRealRegister(imlInstruction->op_r_name.registerIndex));
else if (sprIndex == SPR_XER)
x64Emit_mov_mem32_reg64(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, spr.XER), tempToRealRegister(imlInstruction->op_r_name.registerIndex));
else if (sprIndex >= SPR_UGQR0 && sprIndex <= SPR_UGQR7)
{
sint32 memOffset = offsetof(PPCInterpreter_t, spr.UGQR) + sizeof(PPCInterpreter_t::spr.UGQR[0]) * (sprIndex - SPR_UGQR0);
x64Emit_mov_mem32_reg64(x64GenContext, REG_RSP, memOffset, tempToRealRegister(imlInstruction->op_r_name.registerIndex));
}
else
assert_dbg();
}
else
assert_dbg();
}
uint8* codeMemoryBlock = nullptr;
sint32 codeMemoryBlockIndex = 0;
sint32 codeMemoryBlockSize = 0;
std::mutex mtx_allocExecutableMemory;
uint8* PPCRecompilerX86_allocateExecutableMemory(sint32 size)
{
std::lock_guard<std::mutex> lck(mtx_allocExecutableMemory);
if( codeMemoryBlockIndex+size > codeMemoryBlockSize )
{
// allocate new block
codeMemoryBlockSize = std::max(1024*1024*4, size+1024); // 4MB (or more if the function is larger than 4MB)
codeMemoryBlockIndex = 0;
codeMemoryBlock = (uint8*)MemMapper::AllocateMemory(nullptr, codeMemoryBlockSize, MemMapper::PAGE_PERMISSION::P_RWX);
}
uint8* codeMem = codeMemoryBlock + codeMemoryBlockIndex;
codeMemoryBlockIndex += size;
// pad to 4 byte alignment
while (codeMemoryBlockIndex & 3)
{
codeMemoryBlock[codeMemoryBlockIndex] = 0x90;
codeMemoryBlockIndex++;
}
return codeMem;
}
void PPCRecompiler_dumpIML(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext);
bool PPCRecompiler_generateX64Code(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext)
{
x64GenContext_t x64GenContext = {0};
x64GenContext.codeBufferSize = 1024;
x64GenContext.codeBuffer = (uint8*)malloc(x64GenContext.codeBufferSize);
x64GenContext.codeBufferIndex = 0;
x64GenContext.activeCRRegister = PPC_REC_INVALID_REGISTER;
// generate iml instruction code
bool codeGenerationFailed = false;
for(sint32 s=0; s<ppcImlGenContext->segmentListCount; s++)
{
PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s];
ppcImlGenContext->segmentList[s]->x64Offset = x64GenContext.codeBufferIndex;
for(sint32 i=0; i<imlSegment->imlListCount; i++)
{
PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList+i;
if( imlInstruction->type == PPCREC_IML_TYPE_R_NAME )
{
PPCRecompilerX64Gen_imlInstruction_r_name(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction);
}
else if( imlInstruction->type == PPCREC_IML_TYPE_NAME_R )
{
PPCRecompilerX64Gen_imlInstruction_name_r(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction);
}
else if( imlInstruction->type == PPCREC_IML_TYPE_R_R )
{
if( PPCRecompilerX64Gen_imlInstruction_r_r(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction) == false )
{
codeGenerationFailed = true;
}
}
else if (imlInstruction->type == PPCREC_IML_TYPE_R_S32)
{
if (PPCRecompilerX64Gen_imlInstruction_r_s32(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction) == false)
{
codeGenerationFailed = true;
}
}
else if (imlInstruction->type == PPCREC_IML_TYPE_CONDITIONAL_R_S32)
{
if (PPCRecompilerX64Gen_imlInstruction_conditional_r_s32(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction) == false)
{
codeGenerationFailed = true;
}
}
else if( imlInstruction->type == PPCREC_IML_TYPE_R_R_S32 )
{
if( PPCRecompilerX64Gen_imlInstruction_r_r_s32(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction) == false )
{
codeGenerationFailed = true;
}
}
else if( imlInstruction->type == PPCREC_IML_TYPE_R_R_R )
{
if( PPCRecompilerX64Gen_imlInstruction_r_r_r(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction) == false )
{
codeGenerationFailed = true;
}
}
else if( imlInstruction->type == PPCREC_IML_TYPE_CJUMP )
{
if( PPCRecompilerX64Gen_imlInstruction_conditionalJump(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlSegment, imlInstruction) == false )
{
codeGenerationFailed = true;
}
}
else if( imlInstruction->type == PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK )
{
PPCRecompilerX64Gen_imlInstruction_conditionalJumpCycleCheck(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction);
}
else if( imlInstruction->type == PPCREC_IML_TYPE_MACRO )
{
if( PPCRecompilerX64Gen_imlInstruction_macro(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction) == false )
{
codeGenerationFailed = true;
}
}
else if( imlInstruction->type == PPCREC_IML_TYPE_LOAD )
{
if( PPCRecompilerX64Gen_imlInstruction_load(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, false) == false )
{
codeGenerationFailed = true;
}
}
else if( imlInstruction->type == PPCREC_IML_TYPE_LOAD_INDEXED )
{
if( PPCRecompilerX64Gen_imlInstruction_load(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, true) == false )
{
codeGenerationFailed = true;
}
}
else if( imlInstruction->type == PPCREC_IML_TYPE_STORE )
{
if( PPCRecompilerX64Gen_imlInstruction_store(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, false) == false )
{
codeGenerationFailed = true;
}
}
else if( imlInstruction->type == PPCREC_IML_TYPE_STORE_INDEXED )
{
if( PPCRecompilerX64Gen_imlInstruction_store(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, true) == false )
{
codeGenerationFailed = true;
}
}
else if (imlInstruction->type == PPCREC_IML_TYPE_MEM2MEM)
{
PPCRecompilerX64Gen_imlInstruction_mem2mem(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction);
}
else if( imlInstruction->type == PPCREC_IML_TYPE_CR )
{
if( PPCRecompilerX64Gen_imlInstruction_cr(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction) == false )
{
codeGenerationFailed = true;
}
}
else if( imlInstruction->type == PPCREC_IML_TYPE_JUMPMARK )
{
// no op
}
else if( imlInstruction->type == PPCREC_IML_TYPE_NO_OP )
{
// no op
}
else if( imlInstruction->type == PPCREC_IML_TYPE_PPC_ENTER )
{
PPCRecompilerX64Gen_imlInstruction_ppcEnter(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction);
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_NAME )
{
PPCRecompilerX64Gen_imlInstruction_fpr_r_name(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction);
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_NAME_R )
{
PPCRecompilerX64Gen_imlInstruction_fpr_name_r(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction);
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD )
{
if( PPCRecompilerX64Gen_imlInstruction_fpr_load(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, false) == false )
{
codeGenerationFailed = true;
}
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD_INDEXED )
{
if( PPCRecompilerX64Gen_imlInstruction_fpr_load(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, true) == false )
{
codeGenerationFailed = true;
}
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE )
{
if( PPCRecompilerX64Gen_imlInstruction_fpr_store(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, false) == false )
{
codeGenerationFailed = true;
}
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE_INDEXED )
{
if( PPCRecompilerX64Gen_imlInstruction_fpr_store(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction, true) == false )
{
codeGenerationFailed = true;
}
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R )
{
PPCRecompilerX64Gen_imlInstruction_fpr_r_r(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction);
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R )
{
PPCRecompilerX64Gen_imlInstruction_fpr_r_r_r(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction);
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R_R )
{
PPCRecompilerX64Gen_imlInstruction_fpr_r_r_r_r(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction);
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R )
{
PPCRecompilerX64Gen_imlInstruction_fpr_r(PPCRecFunction, ppcImlGenContext, &x64GenContext, imlInstruction);
}
else
{
debug_printf("PPCRecompiler_generateX64Code(): Unsupported iml type 0x%x\n", imlInstruction->type);
assert_dbg();
}
}
}
// handle failed code generation
if( codeGenerationFailed )
{
free(x64GenContext.codeBuffer);
if (x64GenContext.relocateOffsetTable)
free(x64GenContext.relocateOffsetTable);
return false;
}
// allocate executable memory
uint8* executableMemory = PPCRecompilerX86_allocateExecutableMemory(x64GenContext.codeBufferIndex);
size_t baseAddress = (size_t)executableMemory;
// fix relocs
for(sint32 i=0; i<x64GenContext.relocateOffsetTableCount; i++)
{
if( x64GenContext.relocateOffsetTable[i].type == X86_RELOC_MAKE_RELATIVE )
{
assert_dbg(); // deprecated
}
else if(x64GenContext.relocateOffsetTable[i].type == X64_RELOC_LINK_TO_PPC || x64GenContext.relocateOffsetTable[i].type == X64_RELOC_LINK_TO_SEGMENT)
{
// if link to PPC, search for segment that starts with this offset
uint32 ppcOffset = (uint32)(size_t)x64GenContext.relocateOffsetTable[i].extraInfo;
uint32 x64Offset = 0xFFFFFFFF;
if (x64GenContext.relocateOffsetTable[i].type == X64_RELOC_LINK_TO_PPC)
{
for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++)
{
if (ppcImlGenContext->segmentList[s]->isJumpDestination && ppcImlGenContext->segmentList[s]->jumpDestinationPPCAddress == ppcOffset)
{
x64Offset = ppcImlGenContext->segmentList[s]->x64Offset;
break;
}
}
if (x64Offset == 0xFFFFFFFF)
{
debug_printf("Recompiler could not resolve jump (function at 0x%08x)\n", PPCRecFunction->ppcAddress);
// todo: Cleanup
return false;
}
}
else
{
PPCRecImlSegment_t* destSegment = (PPCRecImlSegment_t*)x64GenContext.relocateOffsetTable[i].extraInfo;
x64Offset = destSegment->x64Offset;
}
uint32 relocBase = x64GenContext.relocateOffsetTable[i].offset;
uint8* relocInstruction = x64GenContext.codeBuffer+relocBase;
if( relocInstruction[0] == 0x0F && (relocInstruction[1] >= 0x80 && relocInstruction[1] <= 0x8F) )
{
// Jcc relativeImm32
sint32 distanceNearJump = (sint32)((baseAddress + x64Offset) - (baseAddress + relocBase + 2));
if (distanceNearJump >= -128 && distanceNearJump < 127) // disabled
{
// convert to near Jcc
*(uint8*)(relocInstruction + 0) = (uint8)(relocInstruction[1]-0x80 + 0x70);
// patch offset
*(uint8*)(relocInstruction + 1) = (uint8)distanceNearJump;
// replace unused 4 bytes with NOP instruction
relocInstruction[2] = 0x0F;
relocInstruction[3] = 0x1F;
relocInstruction[4] = 0x40;
relocInstruction[5] = 0x00;
}
else
{
// patch offset
*(uint32*)(relocInstruction + 2) = (uint32)((baseAddress + x64Offset) - (baseAddress + relocBase + 6));
}
}
else if( relocInstruction[0] == 0xE9 )
{
// JMP relativeImm32
*(uint32*)(relocInstruction+1) = (uint32)((baseAddress+x64Offset)-(baseAddress+relocBase+5));
}
else
assert_dbg();
}
else
{
assert_dbg();
}
}
// copy code to executable memory
memcpy(executableMemory, x64GenContext.codeBuffer, x64GenContext.codeBufferIndex);
free(x64GenContext.codeBuffer);
x64GenContext.codeBuffer = nullptr;
if (x64GenContext.relocateOffsetTable)
free(x64GenContext.relocateOffsetTable);
// set code
PPCRecFunction->x86Code = executableMemory;
PPCRecFunction->x86Size = x64GenContext.codeBufferIndex;
return true;
}
void PPCRecompilerX64Gen_generateEnterRecompilerCode()
{
x64GenContext_t x64GenContext = {0};
x64GenContext.codeBufferSize = 1024;
x64GenContext.codeBuffer = (uint8*)malloc(x64GenContext.codeBufferSize);
x64GenContext.codeBufferIndex = 0;
x64GenContext.activeCRRegister = PPC_REC_INVALID_REGISTER;
// start of recompiler entry function
x64Gen_push_reg64(&x64GenContext, REG_RAX);
x64Gen_push_reg64(&x64GenContext, REG_RCX);
x64Gen_push_reg64(&x64GenContext, REG_RDX);
x64Gen_push_reg64(&x64GenContext, REG_RBX);
x64Gen_push_reg64(&x64GenContext, REG_RBP);
x64Gen_push_reg64(&x64GenContext, REG_RDI);
x64Gen_push_reg64(&x64GenContext, REG_RSI);
x64Gen_push_reg64(&x64GenContext, REG_R8);
x64Gen_push_reg64(&x64GenContext, REG_R9);
x64Gen_push_reg64(&x64GenContext, REG_R10);
x64Gen_push_reg64(&x64GenContext, REG_R11);
x64Gen_push_reg64(&x64GenContext, REG_R12);
x64Gen_push_reg64(&x64GenContext, REG_R13);
x64Gen_push_reg64(&x64GenContext, REG_R14);
x64Gen_push_reg64(&x64GenContext, REG_R15);
// 000000007775EF04 | E8 00 00 00 00 call +0x00
x64Gen_writeU8(&x64GenContext, 0xE8);
x64Gen_writeU8(&x64GenContext, 0x00);
x64Gen_writeU8(&x64GenContext, 0x00);
x64Gen_writeU8(&x64GenContext, 0x00);
x64Gen_writeU8(&x64GenContext, 0x00);
//000000007775EF09 | 48 83 04 24 05 add qword ptr ss:[rsp],5
x64Gen_writeU8(&x64GenContext, 0x48);
x64Gen_writeU8(&x64GenContext, 0x83);
x64Gen_writeU8(&x64GenContext, 0x04);
x64Gen_writeU8(&x64GenContext, 0x24);
uint32 jmpPatchOffset = x64GenContext.codeBufferIndex;
x64Gen_writeU8(&x64GenContext, 0); // skip the distance until after the JMP
x64Emit_mov_mem64_reg64(&x64GenContext, REG_RDX, offsetof(PPCInterpreter_t, rspTemp), REG_RSP);
// MOV RSP, RDX (ppc interpreter instance)
x64Gen_mov_reg64_reg64(&x64GenContext, REG_RSP, REG_RDX);
// MOV R15, ppcRecompilerInstanceData
x64Gen_mov_reg64_imm64(&x64GenContext, REG_R15, (uint64)ppcRecompilerInstanceData);
// MOV R13, memory_base
x64Gen_mov_reg64_imm64(&x64GenContext, REG_R13, (uint64)memory_base);
//JMP recFunc
x64Gen_jmp_reg64(&x64GenContext, REG_RCX); // call argument 1
x64GenContext.codeBuffer[jmpPatchOffset] = (x64GenContext.codeBufferIndex-(jmpPatchOffset-4));
//recompilerExit1:
x64Gen_pop_reg64(&x64GenContext, REG_R15);
x64Gen_pop_reg64(&x64GenContext, REG_R14);
x64Gen_pop_reg64(&x64GenContext, REG_R13);
x64Gen_pop_reg64(&x64GenContext, REG_R12);
x64Gen_pop_reg64(&x64GenContext, REG_R11);
x64Gen_pop_reg64(&x64GenContext, REG_R10);
x64Gen_pop_reg64(&x64GenContext, REG_R9);
x64Gen_pop_reg64(&x64GenContext, REG_R8);
x64Gen_pop_reg64(&x64GenContext, REG_RSI);
x64Gen_pop_reg64(&x64GenContext, REG_RDI);
x64Gen_pop_reg64(&x64GenContext, REG_RBP);
x64Gen_pop_reg64(&x64GenContext, REG_RBX);
x64Gen_pop_reg64(&x64GenContext, REG_RDX);
x64Gen_pop_reg64(&x64GenContext, REG_RCX);
x64Gen_pop_reg64(&x64GenContext, REG_RAX);
// RET
x64Gen_ret(&x64GenContext);
uint8* executableMemory = PPCRecompilerX86_allocateExecutableMemory(x64GenContext.codeBufferIndex);
// copy code to executable memory
memcpy(executableMemory, x64GenContext.codeBuffer, x64GenContext.codeBufferIndex);
free(x64GenContext.codeBuffer);
PPCRecompiler_enterRecompilerCode = (void ATTR_MS_ABI (*)(uint64,uint64))executableMemory;
}
void* PPCRecompilerX64Gen_generateLeaveRecompilerCode()
{
x64GenContext_t x64GenContext = {0};
x64GenContext.codeBufferSize = 128;
x64GenContext.codeBuffer = (uint8*)malloc(x64GenContext.codeBufferSize);
x64GenContext.codeBufferIndex = 0;
x64GenContext.activeCRRegister = PPC_REC_INVALID_REGISTER;
// update instruction pointer
// LR is in EDX
x64Emit_mov_mem32_reg32(&x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, instructionPointer), REG_EDX);
// MOV RSP, [ppcRecompilerX64_rspTemp]
x64Emit_mov_reg64_mem64(&x64GenContext, REG_RSP, REG_RESV_HCPU, offsetof(PPCInterpreter_t, rspTemp));
// RET
x64Gen_ret(&x64GenContext);
uint8* executableMemory = PPCRecompilerX86_allocateExecutableMemory(x64GenContext.codeBufferIndex);
// copy code to executable memory
memcpy(executableMemory, x64GenContext.codeBuffer, x64GenContext.codeBufferIndex);
free(x64GenContext.codeBuffer);
return executableMemory;
}
void PPCRecompilerX64Gen_generateRecompilerInterfaceFunctions()
{
PPCRecompilerX64Gen_generateEnterRecompilerCode();
PPCRecompiler_leaveRecompilerCode_unvisited = (void ATTR_MS_ABI (*)())PPCRecompilerX64Gen_generateLeaveRecompilerCode();
PPCRecompiler_leaveRecompilerCode_visited = (void ATTR_MS_ABI (*)())PPCRecompilerX64Gen_generateLeaveRecompilerCode();
cemu_assert_debug(PPCRecompiler_leaveRecompilerCode_unvisited != PPCRecompiler_leaveRecompilerCode_visited);
}
| 130,667
|
C++
|
.cpp
| 2,614
| 46.579572
| 262
| 0.769778
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,206
|
PPCRecompilerImlRegisterAllocator2.cpp
|
cemu-project_Cemu/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlRegisterAllocator2.cpp
|
#include "PPCRecompiler.h"
#include "PPCRecompilerIml.h"
#include "PPCRecompilerX64.h"
#include "PPCRecompilerImlRanges.h"
#include <queue>
bool _isRangeDefined(PPCRecImlSegment_t* imlSegment, sint32 vGPR)
{
return (imlSegment->raDistances.reg[vGPR].usageStart != INT_MAX);
}
void PPCRecRA_calculateSegmentMinMaxRanges(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment)
{
for (sint32 i = 0; i < PPC_REC_MAX_VIRTUAL_GPR; i++)
{
imlSegment->raDistances.reg[i].usageStart = INT_MAX;
imlSegment->raDistances.reg[i].usageEnd = INT_MIN;
}
// scan instructions for usage range
sint32 index = 0;
PPCImlOptimizerUsedRegisters_t gprTracking;
while (index < imlSegment->imlListCount)
{
// end loop at suffix instruction
if (PPCRecompiler_isSuffixInstruction(imlSegment->imlList + index))
break;
// get accessed GPRs
PPCRecompiler_checkRegisterUsage(NULL, imlSegment->imlList + index, &gprTracking);
for (sint32 t = 0; t < 4; t++)
{
sint32 virtualRegister = gprTracking.gpr[t];
if (virtualRegister < 0)
continue;
cemu_assert_debug(virtualRegister < PPC_REC_MAX_VIRTUAL_GPR);
imlSegment->raDistances.reg[virtualRegister].usageStart = std::min(imlSegment->raDistances.reg[virtualRegister].usageStart, index); // index before/at instruction
imlSegment->raDistances.reg[virtualRegister].usageEnd = std::max(imlSegment->raDistances.reg[virtualRegister].usageEnd, index+1); // index after instruction
}
// next instruction
index++;
}
}
void PPCRecRA_calculateLivenessRangesV2(ppcImlGenContext_t* ppcImlGenContext)
{
// for each register calculate min/max index of usage range within each segment
for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++)
{
PPCRecRA_calculateSegmentMinMaxRanges(ppcImlGenContext, ppcImlGenContext->segmentList[s]);
}
}
raLivenessSubrange_t* PPCRecRA_convertToMappedRanges(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, sint32 vGPR, raLivenessRange_t* range)
{
if (imlSegment->raDistances.isProcessed[vGPR])
{
// return already existing segment
return imlSegment->raInfo.linkedList_perVirtualGPR[vGPR];
}
imlSegment->raDistances.isProcessed[vGPR] = true;
if (_isRangeDefined(imlSegment, vGPR) == false)
return nullptr;
// create subrange
cemu_assert_debug(imlSegment->raInfo.linkedList_perVirtualGPR[vGPR] == nullptr);
raLivenessSubrange_t* subrange = PPCRecRA_createSubrange(ppcImlGenContext, range, imlSegment, imlSegment->raDistances.reg[vGPR].usageStart, imlSegment->raDistances.reg[vGPR].usageEnd);
// traverse forward
if (imlSegment->raDistances.reg[vGPR].usageEnd == RA_INTER_RANGE_END)
{
if (imlSegment->nextSegmentBranchTaken && imlSegment->nextSegmentBranchTaken->raDistances.reg[vGPR].usageStart == RA_INTER_RANGE_START)
{
subrange->subrangeBranchTaken = PPCRecRA_convertToMappedRanges(ppcImlGenContext, imlSegment->nextSegmentBranchTaken, vGPR, range);
cemu_assert_debug(subrange->subrangeBranchTaken->start.index == RA_INTER_RANGE_START);
}
if (imlSegment->nextSegmentBranchNotTaken && imlSegment->nextSegmentBranchNotTaken->raDistances.reg[vGPR].usageStart == RA_INTER_RANGE_START)
{
subrange->subrangeBranchNotTaken = PPCRecRA_convertToMappedRanges(ppcImlGenContext, imlSegment->nextSegmentBranchNotTaken, vGPR, range);
cemu_assert_debug(subrange->subrangeBranchNotTaken->start.index == RA_INTER_RANGE_START);
}
}
// traverse backward
if (imlSegment->raDistances.reg[vGPR].usageStart == RA_INTER_RANGE_START)
{
for (auto& it : imlSegment->list_prevSegments)
{
if (it->raDistances.reg[vGPR].usageEnd == RA_INTER_RANGE_END)
PPCRecRA_convertToMappedRanges(ppcImlGenContext, it, vGPR, range);
}
}
// return subrange
return subrange;
}
void PPCRecRA_createSegmentLivenessRanges(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment)
{
for (sint32 i = 0; i < PPC_REC_MAX_VIRTUAL_GPR; i++)
{
if( _isRangeDefined(imlSegment, i) == false )
continue;
if( imlSegment->raDistances.isProcessed[i])
continue;
raLivenessRange_t* range = PPCRecRA_createRangeBase(ppcImlGenContext, i, ppcImlGenContext->mappedRegister[i]);
PPCRecRA_convertToMappedRanges(ppcImlGenContext, imlSegment, i, range);
}
// create lookup table of ranges
raLivenessSubrange_t* vGPR2Subrange[PPC_REC_MAX_VIRTUAL_GPR];
for (sint32 i = 0; i < PPC_REC_MAX_VIRTUAL_GPR; i++)
{
vGPR2Subrange[i] = imlSegment->raInfo.linkedList_perVirtualGPR[i];
#ifdef CEMU_DEBUG_ASSERT
if (vGPR2Subrange[i] && vGPR2Subrange[i]->link_sameVirtualRegisterGPR.next != nullptr)
assert_dbg();
#endif
}
// parse instructions and convert to locations
sint32 index = 0;
PPCImlOptimizerUsedRegisters_t gprTracking;
while (index < imlSegment->imlListCount)
{
// end loop at suffix instruction
if (PPCRecompiler_isSuffixInstruction(imlSegment->imlList + index))
break;
// get accessed GPRs
PPCRecompiler_checkRegisterUsage(NULL, imlSegment->imlList + index, &gprTracking);
// handle accessed GPR
for (sint32 t = 0; t < 4; t++)
{
sint32 virtualRegister = gprTracking.gpr[t];
if (virtualRegister < 0)
continue;
bool isWrite = (t == 3);
// add location
PPCRecRA_updateOrAddSubrangeLocation(vGPR2Subrange[virtualRegister], index, isWrite == false, isWrite);
#ifdef CEMU_DEBUG_ASSERT
if (index < vGPR2Subrange[virtualRegister]->start.index)
assert_dbg();
if (index+1 > vGPR2Subrange[virtualRegister]->end.index)
assert_dbg();
#endif
}
// next instruction
index++;
}
}
void PPCRecRA_extendRangeToEndOfSegment(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, sint32 vGPR)
{
if (_isRangeDefined(imlSegment, vGPR) == false)
{
imlSegment->raDistances.reg[vGPR].usageStart = RA_INTER_RANGE_END;
imlSegment->raDistances.reg[vGPR].usageEnd = RA_INTER_RANGE_END;
return;
}
imlSegment->raDistances.reg[vGPR].usageEnd = RA_INTER_RANGE_END;
}
void PPCRecRA_extendRangeToBeginningOfSegment(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, sint32 vGPR)
{
if (_isRangeDefined(imlSegment, vGPR) == false)
{
imlSegment->raDistances.reg[vGPR].usageStart = RA_INTER_RANGE_START;
imlSegment->raDistances.reg[vGPR].usageEnd = RA_INTER_RANGE_START;
}
else
{
imlSegment->raDistances.reg[vGPR].usageStart = RA_INTER_RANGE_START;
}
// propagate backwards
for (auto& it : imlSegment->list_prevSegments)
{
PPCRecRA_extendRangeToEndOfSegment(ppcImlGenContext, it, vGPR);
}
}
void _PPCRecRA_connectRanges(ppcImlGenContext_t* ppcImlGenContext, sint32 vGPR, PPCRecImlSegment_t** route, sint32 routeDepth)
{
#ifdef CEMU_DEBUG_ASSERT
if (routeDepth < 2)
assert_dbg();
#endif
// extend starting range to end of segment
PPCRecRA_extendRangeToEndOfSegment(ppcImlGenContext, route[0], vGPR);
// extend all the connecting segments in both directions
for (sint32 i = 1; i < (routeDepth - 1); i++)
{
PPCRecRA_extendRangeToEndOfSegment(ppcImlGenContext, route[i], vGPR);
PPCRecRA_extendRangeToBeginningOfSegment(ppcImlGenContext, route[i], vGPR);
}
// extend the final segment towards the beginning
PPCRecRA_extendRangeToBeginningOfSegment(ppcImlGenContext, route[routeDepth-1], vGPR);
}
void _PPCRecRA_checkAndTryExtendRange(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* currentSegment, sint32 vGPR, sint32 distanceLeft, PPCRecImlSegment_t** route, sint32 routeDepth)
{
if (routeDepth >= 64)
{
cemuLog_logDebug(LogType::Force, "Recompiler RA route maximum depth exceeded for function 0x{:08x}", ppcImlGenContext->functionRef->ppcAddress);
return;
}
route[routeDepth] = currentSegment;
if (currentSegment->raDistances.reg[vGPR].usageStart == INT_MAX)
{
// measure distance to end of segment
distanceLeft -= currentSegment->imlListCount;
if (distanceLeft > 0)
{
if (currentSegment->nextSegmentBranchNotTaken)
_PPCRecRA_checkAndTryExtendRange(ppcImlGenContext, currentSegment->nextSegmentBranchNotTaken, vGPR, distanceLeft, route, routeDepth + 1);
if (currentSegment->nextSegmentBranchTaken)
_PPCRecRA_checkAndTryExtendRange(ppcImlGenContext, currentSegment->nextSegmentBranchTaken, vGPR, distanceLeft, route, routeDepth + 1);
}
return;
}
else
{
// measure distance to range
if (currentSegment->raDistances.reg[vGPR].usageStart == RA_INTER_RANGE_END)
{
if (distanceLeft < currentSegment->imlListCount)
return; // range too far away
}
else if (currentSegment->raDistances.reg[vGPR].usageStart != RA_INTER_RANGE_START && currentSegment->raDistances.reg[vGPR].usageStart > distanceLeft)
return; // out of range
// found close range -> connect ranges
_PPCRecRA_connectRanges(ppcImlGenContext, vGPR, route, routeDepth + 1);
}
}
void PPCRecRA_checkAndTryExtendRange(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* currentSegment, sint32 vGPR)
{
#ifdef CEMU_DEBUG_ASSERT
if (currentSegment->raDistances.reg[vGPR].usageEnd < 0)
assert_dbg();
#endif
// count instructions to end of initial segment
if (currentSegment->raDistances.reg[vGPR].usageEnd == RA_INTER_RANGE_START)
assert_dbg();
sint32 instructionsUntilEndOfSeg;
if (currentSegment->raDistances.reg[vGPR].usageEnd == RA_INTER_RANGE_END)
instructionsUntilEndOfSeg = 0;
else
instructionsUntilEndOfSeg = currentSegment->imlListCount - currentSegment->raDistances.reg[vGPR].usageEnd;
#ifdef CEMU_DEBUG_ASSERT
if (instructionsUntilEndOfSeg < 0)
assert_dbg();
#endif
sint32 remainingScanDist = 45 - instructionsUntilEndOfSeg;
if (remainingScanDist <= 0)
return; // can't reach end
// also dont forget: Extending is easier if we allow 'non symetric' branches. E.g. register range one enters one branch
PPCRecImlSegment_t* route[64];
route[0] = currentSegment;
if (currentSegment->nextSegmentBranchNotTaken)
{
_PPCRecRA_checkAndTryExtendRange(ppcImlGenContext, currentSegment->nextSegmentBranchNotTaken, vGPR, remainingScanDist, route, 1);
}
if (currentSegment->nextSegmentBranchTaken)
{
_PPCRecRA_checkAndTryExtendRange(ppcImlGenContext, currentSegment->nextSegmentBranchTaken, vGPR, remainingScanDist, route, 1);
}
}
void PPCRecRA_mergeCloseRangesForSegmentV2(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment)
{
for (sint32 i = 0; i < PPC_REC_MAX_VIRTUAL_GPR; i++) // todo: Use dynamic maximum or list of used vGPRs so we can avoid parsing empty entries
{
if(imlSegment->raDistances.reg[i].usageStart == INT_MAX)
continue; // not used
// check and extend if possible
PPCRecRA_checkAndTryExtendRange(ppcImlGenContext, imlSegment, i);
}
#ifdef CEMU_DEBUG_ASSERT
if (imlSegment->list_prevSegments.empty() == false && imlSegment->isEnterable)
assert_dbg();
if ((imlSegment->nextSegmentBranchNotTaken != nullptr || imlSegment->nextSegmentBranchTaken != nullptr) && imlSegment->nextSegmentIsUncertain)
assert_dbg();
#endif
}
void PPCRecRA_followFlowAndExtendRanges(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment)
{
std::vector<PPCRecImlSegment_t*> list_segments;
list_segments.reserve(1000);
sint32 index = 0;
imlSegment->raRangeExtendProcessed = true;
list_segments.push_back(imlSegment);
while (index < list_segments.size())
{
PPCRecImlSegment_t* currentSegment = list_segments[index];
PPCRecRA_mergeCloseRangesForSegmentV2(ppcImlGenContext, currentSegment);
// follow flow
if (currentSegment->nextSegmentBranchNotTaken && currentSegment->nextSegmentBranchNotTaken->raRangeExtendProcessed == false)
{
currentSegment->nextSegmentBranchNotTaken->raRangeExtendProcessed = true;
list_segments.push_back(currentSegment->nextSegmentBranchNotTaken);
}
if (currentSegment->nextSegmentBranchTaken && currentSegment->nextSegmentBranchTaken->raRangeExtendProcessed == false)
{
currentSegment->nextSegmentBranchTaken->raRangeExtendProcessed = true;
list_segments.push_back(currentSegment->nextSegmentBranchTaken);
}
index++;
}
}
void PPCRecRA_mergeCloseRangesV2(ppcImlGenContext_t* ppcImlGenContext)
{
for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++)
{
PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s];
if (imlSegment->list_prevSegments.empty())
{
if (imlSegment->raRangeExtendProcessed)
assert_dbg(); // should not happen
PPCRecRA_followFlowAndExtendRanges(ppcImlGenContext, imlSegment);
}
}
}
void PPCRecRA_extendRangesOutOfLoopsV2(ppcImlGenContext_t* ppcImlGenContext)
{
for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++)
{
PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s];
auto localLoopDepth = imlSegment->loopDepth;
if( localLoopDepth <= 0 )
continue; // not inside a loop
// look for loop exit
bool hasLoopExit = false;
if (imlSegment->nextSegmentBranchTaken && imlSegment->nextSegmentBranchTaken->loopDepth < localLoopDepth)
{
hasLoopExit = true;
}
if (imlSegment->nextSegmentBranchNotTaken && imlSegment->nextSegmentBranchNotTaken->loopDepth < localLoopDepth)
{
hasLoopExit = true;
}
if(hasLoopExit == false)
continue;
// extend looping ranges into all exits (this allows the data flow analyzer to move stores out of the loop)
for (sint32 i = 0; i < PPC_REC_MAX_VIRTUAL_GPR; i++) // todo: Use dynamic maximum or list of used vGPRs so we can avoid parsing empty entries
{
if (imlSegment->raDistances.reg[i].usageEnd != RA_INTER_RANGE_END)
continue; // range not set or does not reach end of segment
if(imlSegment->nextSegmentBranchTaken)
PPCRecRA_extendRangeToBeginningOfSegment(ppcImlGenContext, imlSegment->nextSegmentBranchTaken, i);
if(imlSegment->nextSegmentBranchNotTaken)
PPCRecRA_extendRangeToBeginningOfSegment(ppcImlGenContext, imlSegment->nextSegmentBranchNotTaken, i);
}
}
}
void PPCRecRA_processFlowAndCalculateLivenessRangesV2(ppcImlGenContext_t* ppcImlGenContext)
{
// merge close ranges
PPCRecRA_mergeCloseRangesV2(ppcImlGenContext);
// extra pass to move register stores out of loops
PPCRecRA_extendRangesOutOfLoopsV2(ppcImlGenContext);
// calculate liveness ranges
for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++)
{
PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s];
PPCRecRA_createSegmentLivenessRanges(ppcImlGenContext, imlSegment);
}
}
void PPCRecRA_analyzeSubrangeDataDependencyV2(raLivenessSubrange_t* subrange)
{
bool isRead = false;
bool isWritten = false;
bool isOverwritten = false;
for (auto& location : subrange->list_locations)
{
if (location.isRead)
{
isRead = true;
}
if (location.isWrite)
{
if (isRead == false)
isOverwritten = true;
isWritten = true;
}
}
subrange->_noLoad = isOverwritten;
subrange->hasStore = isWritten;
if (subrange->start.index == RA_INTER_RANGE_START)
subrange->_noLoad = true;
}
void _analyzeRangeDataFlow(raLivenessSubrange_t* subrange);
void PPCRecRA_analyzeRangeDataFlowV2(ppcImlGenContext_t* ppcImlGenContext)
{
// this function is called after _assignRegisters(), which means that all ranges are already final and wont change anymore
// first do a per-subrange pass
for (auto& range : ppcImlGenContext->raInfo.list_ranges)
{
for (auto& subrange : range->list_subranges)
{
PPCRecRA_analyzeSubrangeDataDependencyV2(subrange);
}
}
// then do a second pass where we scan along subrange flow
for (auto& range : ppcImlGenContext->raInfo.list_ranges)
{
for (auto& subrange : range->list_subranges) // todo - traversing this backwards should be faster and yield better results due to the nature of the algorithm
{
_analyzeRangeDataFlow(subrange);
}
}
}
| 15,505
|
C++
|
.cpp
| 392
| 37.030612
| 192
| 0.780266
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,207
|
PPCRecompilerImlRanges.cpp
|
cemu-project_Cemu/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlRanges.cpp
|
#include "PPCRecompiler.h"
#include "PPCRecompilerIml.h"
#include "PPCRecompilerX64.h"
#include "PPCRecompilerImlRanges.h"
#include "util/helpers/MemoryPool.h"
void PPCRecRARange_addLink_perVirtualGPR(raLivenessSubrange_t** root, raLivenessSubrange_t* subrange)
{
#ifdef CEMU_DEBUG_ASSERT
if ((*root) && (*root)->range->virtualRegister != subrange->range->virtualRegister)
assert_dbg();
#endif
subrange->link_sameVirtualRegisterGPR.next = *root;
if (*root)
(*root)->link_sameVirtualRegisterGPR.prev = subrange;
subrange->link_sameVirtualRegisterGPR.prev = nullptr;
*root = subrange;
}
void PPCRecRARange_addLink_allSubrangesGPR(raLivenessSubrange_t** root, raLivenessSubrange_t* subrange)
{
subrange->link_segmentSubrangesGPR.next = *root;
if (*root)
(*root)->link_segmentSubrangesGPR.prev = subrange;
subrange->link_segmentSubrangesGPR.prev = nullptr;
*root = subrange;
}
void PPCRecRARange_removeLink_perVirtualGPR(raLivenessSubrange_t** root, raLivenessSubrange_t* subrange)
{
raLivenessSubrange_t* tempPrev = subrange->link_sameVirtualRegisterGPR.prev;
if (subrange->link_sameVirtualRegisterGPR.prev)
subrange->link_sameVirtualRegisterGPR.prev->link_sameVirtualRegisterGPR.next = subrange->link_sameVirtualRegisterGPR.next;
else
(*root) = subrange->link_sameVirtualRegisterGPR.next;
if (subrange->link_sameVirtualRegisterGPR.next)
subrange->link_sameVirtualRegisterGPR.next->link_sameVirtualRegisterGPR.prev = tempPrev;
#ifdef CEMU_DEBUG_ASSERT
subrange->link_sameVirtualRegisterGPR.prev = (raLivenessSubrange_t*)1;
subrange->link_sameVirtualRegisterGPR.next = (raLivenessSubrange_t*)1;
#endif
}
void PPCRecRARange_removeLink_allSubrangesGPR(raLivenessSubrange_t** root, raLivenessSubrange_t* subrange)
{
raLivenessSubrange_t* tempPrev = subrange->link_segmentSubrangesGPR.prev;
if (subrange->link_segmentSubrangesGPR.prev)
subrange->link_segmentSubrangesGPR.prev->link_segmentSubrangesGPR.next = subrange->link_segmentSubrangesGPR.next;
else
(*root) = subrange->link_segmentSubrangesGPR.next;
if (subrange->link_segmentSubrangesGPR.next)
subrange->link_segmentSubrangesGPR.next->link_segmentSubrangesGPR.prev = tempPrev;
#ifdef CEMU_DEBUG_ASSERT
subrange->link_segmentSubrangesGPR.prev = (raLivenessSubrange_t*)1;
subrange->link_segmentSubrangesGPR.next = (raLivenessSubrange_t*)1;
#endif
}
MemoryPoolPermanentObjects<raLivenessRange_t> memPool_livenessRange(4096);
MemoryPoolPermanentObjects<raLivenessSubrange_t> memPool_livenessSubrange(4096);
raLivenessRange_t* PPCRecRA_createRangeBase(ppcImlGenContext_t* ppcImlGenContext, uint32 virtualRegister, uint32 name)
{
raLivenessRange_t* livenessRange = memPool_livenessRange.acquireObj();
livenessRange->list_subranges.resize(0);
livenessRange->virtualRegister = virtualRegister;
livenessRange->name = name;
livenessRange->physicalRegister = -1;
ppcImlGenContext->raInfo.list_ranges.push_back(livenessRange);
return livenessRange;
}
raLivenessSubrange_t* PPCRecRA_createSubrange(ppcImlGenContext_t* ppcImlGenContext, raLivenessRange_t* range, PPCRecImlSegment_t* imlSegment, sint32 startIndex, sint32 endIndex)
{
raLivenessSubrange_t* livenessSubrange = memPool_livenessSubrange.acquireObj();
livenessSubrange->list_locations.resize(0);
livenessSubrange->range = range;
livenessSubrange->imlSegment = imlSegment;
PPCRecompilerIml_setSegmentPoint(&livenessSubrange->start, imlSegment, startIndex);
PPCRecompilerIml_setSegmentPoint(&livenessSubrange->end, imlSegment, endIndex);
// default values
livenessSubrange->hasStore = false;
livenessSubrange->hasStoreDelayed = false;
livenessSubrange->lastIterationIndex = 0;
livenessSubrange->subrangeBranchNotTaken = nullptr;
livenessSubrange->subrangeBranchTaken = nullptr;
livenessSubrange->_noLoad = false;
// add to range
range->list_subranges.push_back(livenessSubrange);
// add to segment
PPCRecRARange_addLink_perVirtualGPR(&(imlSegment->raInfo.linkedList_perVirtualGPR[range->virtualRegister]), livenessSubrange);
PPCRecRARange_addLink_allSubrangesGPR(&imlSegment->raInfo.linkedList_allSubranges, livenessSubrange);
return livenessSubrange;
}
void _unlinkSubrange(raLivenessSubrange_t* subrange)
{
PPCRecImlSegment_t* imlSegment = subrange->imlSegment;
PPCRecRARange_removeLink_perVirtualGPR(&imlSegment->raInfo.linkedList_perVirtualGPR[subrange->range->virtualRegister], subrange);
PPCRecRARange_removeLink_allSubrangesGPR(&imlSegment->raInfo.linkedList_allSubranges, subrange);
}
void PPCRecRA_deleteSubrange(ppcImlGenContext_t* ppcImlGenContext, raLivenessSubrange_t* subrange)
{
_unlinkSubrange(subrange);
subrange->range->list_subranges.erase(std::find(subrange->range->list_subranges.begin(), subrange->range->list_subranges.end(), subrange));
subrange->list_locations.clear();
PPCRecompilerIml_removeSegmentPoint(&subrange->start);
PPCRecompilerIml_removeSegmentPoint(&subrange->end);
memPool_livenessSubrange.releaseObj(subrange);
}
void _PPCRecRA_deleteSubrangeNoUnlinkFromRange(ppcImlGenContext_t* ppcImlGenContext, raLivenessSubrange_t* subrange)
{
_unlinkSubrange(subrange);
PPCRecompilerIml_removeSegmentPoint(&subrange->start);
PPCRecompilerIml_removeSegmentPoint(&subrange->end);
memPool_livenessSubrange.releaseObj(subrange);
}
void PPCRecRA_deleteRange(ppcImlGenContext_t* ppcImlGenContext, raLivenessRange_t* range)
{
for (auto& subrange : range->list_subranges)
{
_PPCRecRA_deleteSubrangeNoUnlinkFromRange(ppcImlGenContext, subrange);
}
ppcImlGenContext->raInfo.list_ranges.erase(std::find(ppcImlGenContext->raInfo.list_ranges.begin(), ppcImlGenContext->raInfo.list_ranges.end(), range));
memPool_livenessRange.releaseObj(range);
}
void PPCRecRA_deleteRangeNoUnlink(ppcImlGenContext_t* ppcImlGenContext, raLivenessRange_t* range)
{
for (auto& subrange : range->list_subranges)
{
_PPCRecRA_deleteSubrangeNoUnlinkFromRange(ppcImlGenContext, subrange);
}
memPool_livenessRange.releaseObj(range);
}
void PPCRecRA_deleteAllRanges(ppcImlGenContext_t* ppcImlGenContext)
{
for(auto& range : ppcImlGenContext->raInfo.list_ranges)
{
PPCRecRA_deleteRangeNoUnlink(ppcImlGenContext, range);
}
ppcImlGenContext->raInfo.list_ranges.clear();
}
void PPCRecRA_mergeRanges(ppcImlGenContext_t* ppcImlGenContext, raLivenessRange_t* range, raLivenessRange_t* absorbedRange)
{
cemu_assert_debug(range != absorbedRange);
cemu_assert_debug(range->virtualRegister == absorbedRange->virtualRegister);
// move all subranges from absorbedRange to range
for (auto& subrange : absorbedRange->list_subranges)
{
range->list_subranges.push_back(subrange);
subrange->range = range;
}
absorbedRange->list_subranges.clear();
PPCRecRA_deleteRange(ppcImlGenContext, absorbedRange);
}
void PPCRecRA_mergeSubranges(ppcImlGenContext_t* ppcImlGenContext, raLivenessSubrange_t* subrange, raLivenessSubrange_t* absorbedSubrange)
{
#ifdef CEMU_DEBUG_ASSERT
PPCRecRA_debugValidateSubrange(subrange);
PPCRecRA_debugValidateSubrange(absorbedSubrange);
if (subrange->imlSegment != absorbedSubrange->imlSegment)
assert_dbg();
if (subrange->end.index > absorbedSubrange->start.index)
assert_dbg();
if (subrange->subrangeBranchTaken || subrange->subrangeBranchNotTaken)
assert_dbg();
if (subrange == absorbedSubrange)
assert_dbg();
#endif
subrange->subrangeBranchTaken = absorbedSubrange->subrangeBranchTaken;
subrange->subrangeBranchNotTaken = absorbedSubrange->subrangeBranchNotTaken;
// merge usage locations
for (auto& location : absorbedSubrange->list_locations)
{
subrange->list_locations.push_back(location);
}
absorbedSubrange->list_locations.clear();
subrange->end.index = absorbedSubrange->end.index;
PPCRecRA_debugValidateSubrange(subrange);
PPCRecRA_deleteSubrange(ppcImlGenContext, absorbedSubrange);
}
// remove all inter-segment connections from the range and split it into local ranges (also removes empty ranges)
void PPCRecRA_explodeRange(ppcImlGenContext_t* ppcImlGenContext, raLivenessRange_t* range)
{
if (range->list_subranges.size() == 1)
assert_dbg();
for (auto& subrange : range->list_subranges)
{
if (subrange->list_locations.empty())
continue;
raLivenessRange_t* newRange = PPCRecRA_createRangeBase(ppcImlGenContext, range->virtualRegister, range->name);
raLivenessSubrange_t* newSubrange = PPCRecRA_createSubrange(ppcImlGenContext, newRange, subrange->imlSegment, subrange->list_locations.data()[0].index, subrange->list_locations.data()[subrange->list_locations.size() - 1].index + 1);
// copy locations
for (auto& location : subrange->list_locations)
{
newSubrange->list_locations.push_back(location);
}
}
// remove original range
PPCRecRA_deleteRange(ppcImlGenContext, range);
}
#ifdef CEMU_DEBUG_ASSERT
void PPCRecRA_debugValidateSubrange(raLivenessSubrange_t* subrange)
{
// validate subrange
if (subrange->subrangeBranchTaken && subrange->subrangeBranchTaken->imlSegment != subrange->imlSegment->nextSegmentBranchTaken)
assert_dbg();
if (subrange->subrangeBranchNotTaken && subrange->subrangeBranchNotTaken->imlSegment != subrange->imlSegment->nextSegmentBranchNotTaken)
assert_dbg();
}
#else
void PPCRecRA_debugValidateSubrange(raLivenessSubrange_t* subrange) {}
#endif
// split subrange at the given index
// After the split there will be two ranges/subranges:
// head -> subrange is shortned to end at splitIndex
// tail -> a new subrange that reaches from splitIndex to the end of the original subrange
// if head has a physical register assigned it will not carry over to tail
// The return value is the tail subrange
// If trimToHole is true, the end of the head subrange and the start of the tail subrange will be moved to fit the locations
// Ranges that begin at RA_INTER_RANGE_START are allowed and can be split
raLivenessSubrange_t* PPCRecRA_splitLocalSubrange(ppcImlGenContext_t* ppcImlGenContext, raLivenessSubrange_t* subrange, sint32 splitIndex, bool trimToHole)
{
// validation
#ifdef CEMU_DEBUG_ASSERT
if (subrange->end.index == RA_INTER_RANGE_END || subrange->end.index == RA_INTER_RANGE_START)
assert_dbg();
if (subrange->start.index >= splitIndex)
assert_dbg();
if (subrange->end.index <= splitIndex)
assert_dbg();
#endif
// create tail
raLivenessRange_t* tailRange = PPCRecRA_createRangeBase(ppcImlGenContext, subrange->range->virtualRegister, subrange->range->name);
raLivenessSubrange_t* tailSubrange = PPCRecRA_createSubrange(ppcImlGenContext, tailRange, subrange->imlSegment, splitIndex, subrange->end.index);
// copy locations
for (auto& location : subrange->list_locations)
{
if (location.index >= splitIndex)
tailSubrange->list_locations.push_back(location);
}
// remove tail locations from head
for (sint32 i = 0; i < subrange->list_locations.size(); i++)
{
raLivenessLocation_t* location = subrange->list_locations.data() + i;
if (location->index >= splitIndex)
{
subrange->list_locations.resize(i);
break;
}
}
// adjust start/end
if (trimToHole)
{
if (subrange->list_locations.empty())
{
subrange->end.index = subrange->start.index+1;
}
else
{
subrange->end.index = subrange->list_locations.back().index + 1;
}
if (tailSubrange->list_locations.empty())
{
assert_dbg(); // should not happen? (In this case we can just avoid generating a tail at all)
}
else
{
tailSubrange->start.index = tailSubrange->list_locations.front().index;
}
}
return tailSubrange;
}
void PPCRecRA_updateOrAddSubrangeLocation(raLivenessSubrange_t* subrange, sint32 index, bool isRead, bool isWrite)
{
if (subrange->list_locations.empty())
{
subrange->list_locations.emplace_back(index, isRead, isWrite);
return;
}
raLivenessLocation_t* lastLocation = subrange->list_locations.data() + (subrange->list_locations.size() - 1);
cemu_assert_debug(lastLocation->index <= index);
if (lastLocation->index == index)
{
// update
lastLocation->isRead = lastLocation->isRead || isRead;
lastLocation->isWrite = lastLocation->isWrite || isWrite;
return;
}
// add new
subrange->list_locations.emplace_back(index, isRead, isWrite);
}
sint32 PPCRecRARange_getReadWriteCost(PPCRecImlSegment_t* imlSegment)
{
sint32 v = imlSegment->loopDepth + 1;
v *= 5;
return v*v; // 25, 100, 225, 400
}
// calculate cost of entire range
// ignores data flow and does not detect avoidable reads/stores
sint32 PPCRecRARange_estimateCost(raLivenessRange_t* range)
{
sint32 cost = 0;
// todo - this algorithm isn't accurate. If we have 10 parallel branches with a load each then the actual cost is still only that of one branch (plus minimal extra cost for generating more code).
// currently we calculate the cost based on the most expensive entry/exit point
sint32 mostExpensiveRead = 0;
sint32 mostExpensiveWrite = 0;
sint32 readCount = 0;
sint32 writeCount = 0;
for (auto& subrange : range->list_subranges)
{
if (subrange->start.index != RA_INTER_RANGE_START)
{
//cost += PPCRecRARange_getReadWriteCost(subrange->imlSegment);
mostExpensiveRead = std::max(mostExpensiveRead, PPCRecRARange_getReadWriteCost(subrange->imlSegment));
readCount++;
}
if (subrange->end.index != RA_INTER_RANGE_END)
{
//cost += PPCRecRARange_getReadWriteCost(subrange->imlSegment);
mostExpensiveWrite = std::max(mostExpensiveWrite, PPCRecRARange_getReadWriteCost(subrange->imlSegment));
writeCount++;
}
}
cost = mostExpensiveRead + mostExpensiveWrite;
cost = cost + (readCount + writeCount) / 10;
return cost;
}
// calculate cost of range that it would have after calling PPCRecRA_explodeRange() on it
sint32 PPCRecRARange_estimateAdditionalCostAfterRangeExplode(raLivenessRange_t* range)
{
sint32 cost = -PPCRecRARange_estimateCost(range);
for (auto& subrange : range->list_subranges)
{
if (subrange->list_locations.empty())
continue;
cost += PPCRecRARange_getReadWriteCost(subrange->imlSegment) * 2; // we assume a read and a store
}
return cost;
}
sint32 PPCRecRARange_estimateAdditionalCostAfterSplit(raLivenessSubrange_t* subrange, sint32 splitIndex)
{
// validation
#ifdef CEMU_DEBUG_ASSERT
if (subrange->end.index == RA_INTER_RANGE_END)
assert_dbg();
#endif
sint32 cost = 0;
// find split position in location list
if (subrange->list_locations.empty())
{
assert_dbg(); // should not happen?
return 0;
}
if (splitIndex <= subrange->list_locations.front().index)
return 0;
if (splitIndex > subrange->list_locations.back().index)
return 0;
// todo - determine exact cost of split subranges
cost += PPCRecRARange_getReadWriteCost(subrange->imlSegment) * 2; // currently we assume that the additional region will require a read and a store
//for (sint32 f = 0; f < subrange->list_locations.size(); f++)
//{
// raLivenessLocation_t* location = subrange->list_locations.data() + f;
// if (location->index >= splitIndex)
// {
// ...
// return cost;
// }
//}
return cost;
}
| 14,825
|
C++
|
.cpp
| 363
| 38.741047
| 234
| 0.792042
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,208
|
PPCRecompilerIntermediate.cpp
|
cemu-project_Cemu/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerIntermediate.cpp
|
#include "PPCRecompiler.h"
#include "PPCRecompilerIml.h"
PPCRecImlSegment_t* PPCRecompiler_getSegmentByPPCJumpAddress(ppcImlGenContext_t* ppcImlGenContext, uint32 ppcOffset)
{
for(sint32 s=0; s<ppcImlGenContext->segmentListCount; s++)
{
if( ppcImlGenContext->segmentList[s]->isJumpDestination && ppcImlGenContext->segmentList[s]->jumpDestinationPPCAddress == ppcOffset )
{
return ppcImlGenContext->segmentList[s];
}
}
debug_printf("PPCRecompiler_getSegmentByPPCJumpAddress(): Unable to find segment (ppcOffset 0x%08x)\n", ppcOffset);
return NULL;
}
void PPCRecompilerIml_setLinkBranchNotTaken(PPCRecImlSegment_t* imlSegmentSrc, PPCRecImlSegment_t* imlSegmentDst)
{
// make sure segments aren't already linked
if (imlSegmentSrc->nextSegmentBranchNotTaken == imlSegmentDst)
return;
// add as next segment for source
if (imlSegmentSrc->nextSegmentBranchNotTaken != NULL)
assert_dbg();
imlSegmentSrc->nextSegmentBranchNotTaken = imlSegmentDst;
// add as previous segment for destination
imlSegmentDst->list_prevSegments.push_back(imlSegmentSrc);
}
void PPCRecompilerIml_setLinkBranchTaken(PPCRecImlSegment_t* imlSegmentSrc, PPCRecImlSegment_t* imlSegmentDst)
{
// make sure segments aren't already linked
if (imlSegmentSrc->nextSegmentBranchTaken == imlSegmentDst)
return;
// add as next segment for source
if (imlSegmentSrc->nextSegmentBranchTaken != NULL)
assert_dbg();
imlSegmentSrc->nextSegmentBranchTaken = imlSegmentDst;
// add as previous segment for destination
imlSegmentDst->list_prevSegments.push_back(imlSegmentSrc);
}
void PPCRecompilerIML_removeLink(PPCRecImlSegment_t* imlSegmentSrc, PPCRecImlSegment_t* imlSegmentDst)
{
if (imlSegmentSrc->nextSegmentBranchNotTaken == imlSegmentDst)
{
imlSegmentSrc->nextSegmentBranchNotTaken = NULL;
}
else if (imlSegmentSrc->nextSegmentBranchTaken == imlSegmentDst)
{
imlSegmentSrc->nextSegmentBranchTaken = NULL;
}
else
assert_dbg();
bool matchFound = false;
for (sint32 i = 0; i < imlSegmentDst->list_prevSegments.size(); i++)
{
if (imlSegmentDst->list_prevSegments[i] == imlSegmentSrc)
{
imlSegmentDst->list_prevSegments.erase(imlSegmentDst->list_prevSegments.begin()+i);
matchFound = true;
break;
}
}
if (matchFound == false)
assert_dbg();
}
/*
* Replaces all links to segment orig with linkts to segment new
*/
void PPCRecompilerIML_relinkInputSegment(PPCRecImlSegment_t* imlSegmentOrig, PPCRecImlSegment_t* imlSegmentNew)
{
while (imlSegmentOrig->list_prevSegments.size() != 0)
{
PPCRecImlSegment_t* prevSegment = imlSegmentOrig->list_prevSegments[0];
if (prevSegment->nextSegmentBranchNotTaken == imlSegmentOrig)
{
PPCRecompilerIML_removeLink(prevSegment, imlSegmentOrig);
PPCRecompilerIml_setLinkBranchNotTaken(prevSegment, imlSegmentNew);
}
else if (prevSegment->nextSegmentBranchTaken == imlSegmentOrig)
{
PPCRecompilerIML_removeLink(prevSegment, imlSegmentOrig);
PPCRecompilerIml_setLinkBranchTaken(prevSegment, imlSegmentNew);
}
else
{
assert_dbg();
}
}
}
void PPCRecompilerIML_linkSegments(ppcImlGenContext_t* ppcImlGenContext)
{
for(sint32 s=0; s<ppcImlGenContext->segmentListCount; s++)
{
PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s];
bool isLastSegment = (s+1)>=ppcImlGenContext->segmentListCount;
PPCRecImlSegment_t* nextSegment = isLastSegment?NULL:ppcImlGenContext->segmentList[s+1];
// handle empty segment
if( imlSegment->imlListCount == 0 )
{
if (isLastSegment == false)
PPCRecompilerIml_setLinkBranchNotTaken(imlSegment, ppcImlGenContext->segmentList[s+1]); // continue execution to next segment
else
imlSegment->nextSegmentIsUncertain = true;
continue;
}
// check last instruction of segment
PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList+(imlSegment->imlListCount-1);
if( imlInstruction->type == PPCREC_IML_TYPE_CJUMP || imlInstruction->type == PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK )
{
// find destination segment by ppc jump address
PPCRecImlSegment_t* jumpDestSegment = PPCRecompiler_getSegmentByPPCJumpAddress(ppcImlGenContext, imlInstruction->op_conditionalJump.jumpmarkAddress);
if( jumpDestSegment )
{
if (imlInstruction->op_conditionalJump.condition != PPCREC_JUMP_CONDITION_NONE)
PPCRecompilerIml_setLinkBranchNotTaken(imlSegment, nextSegment);
PPCRecompilerIml_setLinkBranchTaken(imlSegment, jumpDestSegment);
}
else
{
imlSegment->nextSegmentIsUncertain = true;
}
}
else if( imlInstruction->type == PPCREC_IML_TYPE_MACRO )
{
// currently we assume that the next segment is unknown for all macros
imlSegment->nextSegmentIsUncertain = true;
}
else
{
// all other instruction types do not branch
//imlSegment->nextSegment[0] = nextSegment;
PPCRecompilerIml_setLinkBranchNotTaken(imlSegment, nextSegment);
//imlSegment->nextSegmentIsUncertain = true;
}
}
}
void PPCRecompilerIML_isolateEnterableSegments(ppcImlGenContext_t* ppcImlGenContext)
{
sint32 initialSegmentCount = ppcImlGenContext->segmentListCount;
for (sint32 i = 0; i < ppcImlGenContext->segmentListCount; i++)
{
PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[i];
if (imlSegment->list_prevSegments.empty() == false && imlSegment->isEnterable)
{
// spawn new segment at end
PPCRecompilerIml_insertSegments(ppcImlGenContext, ppcImlGenContext->segmentListCount, 1);
PPCRecImlSegment_t* entrySegment = ppcImlGenContext->segmentList[ppcImlGenContext->segmentListCount-1];
entrySegment->isEnterable = true;
entrySegment->enterPPCAddress = imlSegment->enterPPCAddress;
// create jump instruction
PPCRecompiler_pushBackIMLInstructions(entrySegment, 0, 1);
PPCRecompilerImlGen_generateNewInstruction_jumpSegment(ppcImlGenContext, entrySegment->imlList + 0);
PPCRecompilerIml_setLinkBranchTaken(entrySegment, imlSegment);
// remove enterable flag from original segment
imlSegment->isEnterable = false;
imlSegment->enterPPCAddress = 0;
}
}
}
PPCRecImlInstruction_t* PPCRecompilerIML_getLastInstruction(PPCRecImlSegment_t* imlSegment)
{
if (imlSegment->imlListCount == 0)
return nullptr;
return imlSegment->imlList + (imlSegment->imlListCount - 1);
}
| 6,214
|
C++
|
.cpp
| 163
| 35.355828
| 152
| 0.791425
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,209
|
PPCRecompilerImlOptimizer.cpp
|
cemu-project_Cemu/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlOptimizer.cpp
|
#include "../Interpreter/PPCInterpreterInternal.h"
#include "PPCRecompiler.h"
#include "PPCRecompilerIml.h"
#include "PPCRecompilerX64.h"
void PPCRecompiler_checkRegisterUsage(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction, PPCImlOptimizerUsedRegisters_t* registersUsed)
{
registersUsed->readNamedReg1 = -1;
registersUsed->readNamedReg2 = -1;
registersUsed->readNamedReg3 = -1;
registersUsed->writtenNamedReg1 = -1;
registersUsed->readFPR1 = -1;
registersUsed->readFPR2 = -1;
registersUsed->readFPR3 = -1;
registersUsed->readFPR4 = -1;
registersUsed->writtenFPR1 = -1;
if( imlInstruction->type == PPCREC_IML_TYPE_R_NAME )
{
registersUsed->writtenNamedReg1 = imlInstruction->op_r_name.registerIndex;
}
else if( imlInstruction->type == PPCREC_IML_TYPE_NAME_R )
{
registersUsed->readNamedReg1 = imlInstruction->op_r_name.registerIndex;
}
else if( imlInstruction->type == PPCREC_IML_TYPE_R_R )
{
if (imlInstruction->operation == PPCREC_IML_OP_COMPARE_SIGNED || imlInstruction->operation == PPCREC_IML_OP_COMPARE_UNSIGNED || imlInstruction->operation == PPCREC_IML_OP_DCBZ)
{
// both operands are read only
registersUsed->readNamedReg1 = imlInstruction->op_r_r.registerResult;
registersUsed->readNamedReg2 = imlInstruction->op_r_r.registerA;
}
else if (
imlInstruction->operation == PPCREC_IML_OP_OR ||
imlInstruction->operation == PPCREC_IML_OP_AND ||
imlInstruction->operation == PPCREC_IML_OP_XOR ||
imlInstruction->operation == PPCREC_IML_OP_ADD ||
imlInstruction->operation == PPCREC_IML_OP_ADD_CARRY ||
imlInstruction->operation == PPCREC_IML_OP_ADD_CARRY_ME ||
imlInstruction->operation == PPCREC_IML_OP_SUB_CARRY_UPDATE_CARRY)
{
// result is read and written, operand is read
registersUsed->writtenNamedReg1 = imlInstruction->op_r_r.registerResult;
registersUsed->readNamedReg1 = imlInstruction->op_r_r.registerResult;
registersUsed->readNamedReg2 = imlInstruction->op_r_r.registerA;
}
else if (
imlInstruction->operation == PPCREC_IML_OP_ASSIGN ||
imlInstruction->operation == PPCREC_IML_OP_ENDIAN_SWAP ||
imlInstruction->operation == PPCREC_IML_OP_CNTLZW ||
imlInstruction->operation == PPCREC_IML_OP_NOT ||
imlInstruction->operation == PPCREC_IML_OP_NEG ||
imlInstruction->operation == PPCREC_IML_OP_ASSIGN_S16_TO_S32 ||
imlInstruction->operation == PPCREC_IML_OP_ASSIGN_S8_TO_S32)
{
// result is written, operand is read
registersUsed->writtenNamedReg1 = imlInstruction->op_r_r.registerResult;
registersUsed->readNamedReg1 = imlInstruction->op_r_r.registerA;
}
else
cemu_assert_unimplemented();
}
else if (imlInstruction->type == PPCREC_IML_TYPE_R_S32)
{
if (imlInstruction->operation == PPCREC_IML_OP_COMPARE_SIGNED || imlInstruction->operation == PPCREC_IML_OP_COMPARE_UNSIGNED || imlInstruction->operation == PPCREC_IML_OP_MTCRF)
{
// operand register is read only
registersUsed->readNamedReg1 = imlInstruction->op_r_immS32.registerIndex;
}
else if (imlInstruction->operation == PPCREC_IML_OP_ADD ||
imlInstruction->operation == PPCREC_IML_OP_SUB ||
imlInstruction->operation == PPCREC_IML_OP_AND ||
imlInstruction->operation == PPCREC_IML_OP_OR ||
imlInstruction->operation == PPCREC_IML_OP_XOR ||
imlInstruction->operation == PPCREC_IML_OP_LEFT_ROTATE)
{
// operand register is read and write
registersUsed->readNamedReg1 = imlInstruction->op_r_immS32.registerIndex;
registersUsed->writtenNamedReg1 = imlInstruction->op_r_immS32.registerIndex;
}
else
{
// operand register is write only
// todo - use explicit lists, avoid default cases
registersUsed->writtenNamedReg1 = imlInstruction->op_r_immS32.registerIndex;
}
}
else if (imlInstruction->type == PPCREC_IML_TYPE_CONDITIONAL_R_S32)
{
if (imlInstruction->operation == PPCREC_IML_OP_ASSIGN)
{
// result is written, but also considered read (in case the condition fails)
registersUsed->readNamedReg1 = imlInstruction->op_conditional_r_s32.registerIndex;
registersUsed->writtenNamedReg1 = imlInstruction->op_conditional_r_s32.registerIndex;
}
else
cemu_assert_unimplemented();
}
else if( imlInstruction->type == PPCREC_IML_TYPE_R_R_S32 )
{
if( imlInstruction->operation == PPCREC_IML_OP_RLWIMI )
{
// result and operand register are both read, result is written
registersUsed->writtenNamedReg1 = imlInstruction->op_r_r_s32.registerResult;
registersUsed->readNamedReg1 = imlInstruction->op_r_r_s32.registerResult;
registersUsed->readNamedReg2 = imlInstruction->op_r_r_s32.registerA;
}
else
{
// result is write only and operand is read only
registersUsed->writtenNamedReg1 = imlInstruction->op_r_r_s32.registerResult;
registersUsed->readNamedReg1 = imlInstruction->op_r_r_s32.registerA;
}
}
else if( imlInstruction->type == PPCREC_IML_TYPE_R_R_R )
{
// in all cases result is written and other operands are read only
registersUsed->writtenNamedReg1 = imlInstruction->op_r_r_r.registerResult;
registersUsed->readNamedReg1 = imlInstruction->op_r_r_r.registerA;
registersUsed->readNamedReg2 = imlInstruction->op_r_r_r.registerB;
}
else if( imlInstruction->type == PPCREC_IML_TYPE_CJUMP || imlInstruction->type == PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK )
{
// no effect on registers
}
else if( imlInstruction->type == PPCREC_IML_TYPE_NO_OP )
{
// no effect on registers
}
else if( imlInstruction->type == PPCREC_IML_TYPE_MACRO )
{
if( imlInstruction->operation == PPCREC_IML_MACRO_BL || imlInstruction->operation == PPCREC_IML_MACRO_B_FAR || imlInstruction->operation == PPCREC_IML_MACRO_BLR || imlInstruction->operation == PPCREC_IML_MACRO_BLRL || imlInstruction->operation == PPCREC_IML_MACRO_BCTR || imlInstruction->operation == PPCREC_IML_MACRO_BCTRL || imlInstruction->operation == PPCREC_IML_MACRO_LEAVE || imlInstruction->operation == PPCREC_IML_MACRO_DEBUGBREAK || imlInstruction->operation == PPCREC_IML_MACRO_COUNT_CYCLES || imlInstruction->operation == PPCREC_IML_MACRO_HLE || imlInstruction->operation == PPCREC_IML_MACRO_MFTB )
{
// no effect on registers
}
else
cemu_assert_unimplemented();
}
else if (imlInstruction->type == PPCREC_IML_TYPE_LOAD)
{
registersUsed->writtenNamedReg1 = imlInstruction->op_storeLoad.registerData;
if (imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER)
registersUsed->readNamedReg1 = imlInstruction->op_storeLoad.registerMem;
}
else if (imlInstruction->type == PPCREC_IML_TYPE_MEM2MEM)
{
registersUsed->readNamedReg1 = imlInstruction->op_mem2mem.src.registerMem;
registersUsed->readNamedReg2 = imlInstruction->op_mem2mem.dst.registerMem;
}
else if( imlInstruction->type == PPCREC_IML_TYPE_LOAD_INDEXED )
{
registersUsed->writtenNamedReg1 = imlInstruction->op_storeLoad.registerData;
if( imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER )
registersUsed->readNamedReg1 = imlInstruction->op_storeLoad.registerMem;
if( imlInstruction->op_storeLoad.registerMem2 != PPC_REC_INVALID_REGISTER )
registersUsed->readNamedReg2 = imlInstruction->op_storeLoad.registerMem2;
}
else if( imlInstruction->type == PPCREC_IML_TYPE_STORE )
{
registersUsed->readNamedReg1 = imlInstruction->op_storeLoad.registerData;
if( imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER )
registersUsed->readNamedReg2 = imlInstruction->op_storeLoad.registerMem;
}
else if( imlInstruction->type == PPCREC_IML_TYPE_STORE_INDEXED )
{
registersUsed->readNamedReg1 = imlInstruction->op_storeLoad.registerData;
if( imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER )
registersUsed->readNamedReg2 = imlInstruction->op_storeLoad.registerMem;
if( imlInstruction->op_storeLoad.registerMem2 != PPC_REC_INVALID_REGISTER )
registersUsed->readNamedReg3 = imlInstruction->op_storeLoad.registerMem2;
}
else if( imlInstruction->type == PPCREC_IML_TYPE_CR )
{
// only affects cr register
}
else if( imlInstruction->type == PPCREC_IML_TYPE_JUMPMARK )
{
// no effect on registers
}
else if( imlInstruction->type == PPCREC_IML_TYPE_PPC_ENTER )
{
// no op
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_NAME )
{
// fpr operation
registersUsed->writtenFPR1 = imlInstruction->op_r_name.registerIndex;
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_NAME_R )
{
// fpr operation
registersUsed->readFPR1 = imlInstruction->op_r_name.registerIndex;
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD )
{
// fpr load operation
registersUsed->writtenFPR1 = imlInstruction->op_storeLoad.registerData;
// address is in gpr register
if (imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER)
registersUsed->readNamedReg1 = imlInstruction->op_storeLoad.registerMem;
// determine partially written result
switch (imlInstruction->op_storeLoad.mode)
{
case PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0:
case PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0_PS1:
cemu_assert_debug(imlInstruction->op_storeLoad.registerGQR != PPC_REC_INVALID_REGISTER);
registersUsed->readNamedReg2 = imlInstruction->op_storeLoad.registerGQR;
break;
case PPCREC_FPR_LD_MODE_DOUBLE_INTO_PS0:
// PS1 remains the same
registersUsed->readFPR4 = imlInstruction->op_storeLoad.registerData;
break;
case PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0_PS1:
case PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0_PS1:
case PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0:
case PPCREC_FPR_LD_MODE_PSQ_S16_PS0:
case PPCREC_FPR_LD_MODE_PSQ_S16_PS0_PS1:
case PPCREC_FPR_LD_MODE_PSQ_U16_PS0_PS1:
case PPCREC_FPR_LD_MODE_PSQ_U16_PS0:
case PPCREC_FPR_LD_MODE_PSQ_S8_PS0_PS1:
case PPCREC_FPR_LD_MODE_PSQ_U8_PS0_PS1:
case PPCREC_FPR_LD_MODE_PSQ_U8_PS0:
case PPCREC_FPR_LD_MODE_PSQ_S8_PS0:
break;
default:
cemu_assert_unimplemented();
}
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD_INDEXED )
{
// fpr load operation
registersUsed->writtenFPR1 = imlInstruction->op_storeLoad.registerData;
// address is in gpr registers
if (imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER)
registersUsed->readNamedReg1 = imlInstruction->op_storeLoad.registerMem;
if (imlInstruction->op_storeLoad.registerMem2 != PPC_REC_INVALID_REGISTER)
registersUsed->readNamedReg2 = imlInstruction->op_storeLoad.registerMem2;
// determine partially written result
switch (imlInstruction->op_storeLoad.mode)
{
case PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0:
case PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0_PS1:
cemu_assert_debug(imlInstruction->op_storeLoad.registerGQR != PPC_REC_INVALID_REGISTER);
registersUsed->readNamedReg3 = imlInstruction->op_storeLoad.registerGQR;
break;
case PPCREC_FPR_LD_MODE_DOUBLE_INTO_PS0:
// PS1 remains the same
registersUsed->readFPR4 = imlInstruction->op_storeLoad.registerData;
break;
case PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0_PS1:
case PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0_PS1:
case PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0:
case PPCREC_FPR_LD_MODE_PSQ_S16_PS0:
case PPCREC_FPR_LD_MODE_PSQ_S16_PS0_PS1:
case PPCREC_FPR_LD_MODE_PSQ_U16_PS0_PS1:
case PPCREC_FPR_LD_MODE_PSQ_U16_PS0:
case PPCREC_FPR_LD_MODE_PSQ_S8_PS0_PS1:
case PPCREC_FPR_LD_MODE_PSQ_U8_PS0_PS1:
case PPCREC_FPR_LD_MODE_PSQ_U8_PS0:
break;
default:
cemu_assert_unimplemented();
}
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE )
{
// fpr store operation
registersUsed->readFPR1 = imlInstruction->op_storeLoad.registerData;
if( imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER )
registersUsed->readNamedReg1 = imlInstruction->op_storeLoad.registerMem;
// PSQ generic stores also access GQR
switch (imlInstruction->op_storeLoad.mode)
{
case PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0:
case PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0_PS1:
cemu_assert_debug(imlInstruction->op_storeLoad.registerGQR != PPC_REC_INVALID_REGISTER);
registersUsed->readNamedReg2 = imlInstruction->op_storeLoad.registerGQR;
break;
default:
break;
}
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE_INDEXED )
{
// fpr store operation
registersUsed->readFPR1 = imlInstruction->op_storeLoad.registerData;
// address is in gpr registers
if( imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER )
registersUsed->readNamedReg1 = imlInstruction->op_storeLoad.registerMem;
if( imlInstruction->op_storeLoad.registerMem2 != PPC_REC_INVALID_REGISTER )
registersUsed->readNamedReg2 = imlInstruction->op_storeLoad.registerMem2;
// PSQ generic stores also access GQR
switch (imlInstruction->op_storeLoad.mode)
{
case PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0:
case PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0_PS1:
cemu_assert_debug(imlInstruction->op_storeLoad.registerGQR != PPC_REC_INVALID_REGISTER);
registersUsed->readNamedReg3 = imlInstruction->op_storeLoad.registerGQR;
break;
default:
break;
}
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R )
{
// fpr operation
if( imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP ||
imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_TOP_TO_BOTTOM_AND_TOP ||
imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_BOTTOM_AND_TOP_SWAPPED ||
imlInstruction->operation == PPCREC_IML_OP_ASSIGN ||
imlInstruction->operation == PPCREC_IML_OP_FPR_BOTTOM_FRES_TO_BOTTOM_AND_TOP ||
imlInstruction->operation == PPCREC_IML_OP_FPR_NEGATE_PAIR ||
imlInstruction->operation == PPCREC_IML_OP_FPR_ABS_PAIR ||
imlInstruction->operation == PPCREC_IML_OP_FPR_FRES_PAIR ||
imlInstruction->operation == PPCREC_IML_OP_FPR_FRSQRTE_PAIR )
{
// operand read, result written
registersUsed->readFPR1 = imlInstruction->op_fpr_r_r.registerOperand;
registersUsed->writtenFPR1 = imlInstruction->op_fpr_r_r.registerResult;
}
else if(
imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM ||
imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_TOP ||
imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_TOP_TO_TOP ||
imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_TOP_TO_BOTTOM ||
imlInstruction->operation == PPCREC_IML_OP_FPR_EXPAND_BOTTOM32_TO_BOTTOM64_AND_TOP64 ||
imlInstruction->operation == PPCREC_IML_OP_FPR_BOTTOM_FCTIWZ ||
imlInstruction->operation == PPCREC_IML_OP_FPR_BOTTOM_RECIPROCAL_SQRT
)
{
// operand read, result read and (partially) written
registersUsed->readFPR1 = imlInstruction->op_fpr_r_r.registerOperand;
registersUsed->readFPR4 = imlInstruction->op_fpr_r_r.registerResult;
registersUsed->writtenFPR1 = imlInstruction->op_fpr_r_r.registerResult;
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM ||
imlInstruction->operation == PPCREC_IML_OP_FPR_MULTIPLY_PAIR ||
imlInstruction->operation == PPCREC_IML_OP_FPR_DIVIDE_BOTTOM ||
imlInstruction->operation == PPCREC_IML_OP_FPR_DIVIDE_PAIR ||
imlInstruction->operation == PPCREC_IML_OP_FPR_ADD_BOTTOM ||
imlInstruction->operation == PPCREC_IML_OP_FPR_ADD_PAIR ||
imlInstruction->operation == PPCREC_IML_OP_FPR_SUB_PAIR ||
imlInstruction->operation == PPCREC_IML_OP_FPR_SUB_BOTTOM )
{
// operand read, result read and written
registersUsed->readFPR1 = imlInstruction->op_fpr_r_r.registerOperand;
registersUsed->readFPR2 = imlInstruction->op_fpr_r_r.registerResult;
registersUsed->writtenFPR1 = imlInstruction->op_fpr_r_r.registerResult;
}
else if(imlInstruction->operation == PPCREC_IML_OP_FPR_FCMPU_BOTTOM ||
imlInstruction->operation == PPCREC_IML_OP_FPR_FCMPU_TOP ||
imlInstruction->operation == PPCREC_IML_OP_FPR_FCMPO_BOTTOM)
{
// operand read, result read
registersUsed->readFPR1 = imlInstruction->op_fpr_r_r.registerOperand;
registersUsed->readFPR2 = imlInstruction->op_fpr_r_r.registerResult;
}
else
cemu_assert_unimplemented();
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R )
{
// fpr operation
registersUsed->readFPR1 = imlInstruction->op_fpr_r_r_r.registerOperandA;
registersUsed->readFPR2 = imlInstruction->op_fpr_r_r_r.registerOperandB;
registersUsed->writtenFPR1 = imlInstruction->op_fpr_r_r_r.registerResult;
// handle partially written result
switch (imlInstruction->operation)
{
case PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM:
case PPCREC_IML_OP_FPR_ADD_BOTTOM:
case PPCREC_IML_OP_FPR_SUB_BOTTOM:
registersUsed->readFPR4 = imlInstruction->op_fpr_r_r_r.registerResult;
break;
case PPCREC_IML_OP_FPR_SUB_PAIR:
break;
default:
cemu_assert_unimplemented();
}
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R_R )
{
// fpr operation
registersUsed->readFPR1 = imlInstruction->op_fpr_r_r_r_r.registerOperandA;
registersUsed->readFPR2 = imlInstruction->op_fpr_r_r_r_r.registerOperandB;
registersUsed->readFPR3 = imlInstruction->op_fpr_r_r_r_r.registerOperandC;
registersUsed->writtenFPR1 = imlInstruction->op_fpr_r_r_r_r.registerResult;
// handle partially written result
switch (imlInstruction->operation)
{
case PPCREC_IML_OP_FPR_SELECT_BOTTOM:
registersUsed->readFPR4 = imlInstruction->op_fpr_r_r_r_r.registerResult;
break;
case PPCREC_IML_OP_FPR_SUM0:
case PPCREC_IML_OP_FPR_SUM1:
case PPCREC_IML_OP_FPR_SELECT_PAIR:
break;
default:
cemu_assert_unimplemented();
}
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R )
{
// fpr operation
if( imlInstruction->operation == PPCREC_IML_OP_FPR_NEGATE_BOTTOM ||
imlInstruction->operation == PPCREC_IML_OP_FPR_ABS_BOTTOM ||
imlInstruction->operation == PPCREC_IML_OP_FPR_NEGATIVE_ABS_BOTTOM ||
imlInstruction->operation == PPCREC_IML_OP_FPR_EXPAND_BOTTOM32_TO_BOTTOM64_AND_TOP64 ||
imlInstruction->operation == PPCREC_IML_OP_FPR_ROUND_TO_SINGLE_PRECISION_BOTTOM ||
imlInstruction->operation == PPCREC_IML_OP_FPR_ROUND_TO_SINGLE_PRECISION_PAIR )
{
registersUsed->readFPR1 = imlInstruction->op_fpr_r.registerResult;
registersUsed->writtenFPR1 = imlInstruction->op_fpr_r.registerResult;
}
else
cemu_assert_unimplemented();
}
else
{
cemu_assert_unimplemented();
}
}
#define replaceRegister(__x,__r,__n) (((__x)==(__r))?(__n):(__x))
sint32 replaceRegisterMultiple(sint32 reg, sint32 match[4], sint32 replaced[4])
{
for (sint32 i = 0; i < 4; i++)
{
if(match[i] < 0)
continue;
if (reg == match[i])
{
return replaced[i];
}
}
return reg;
}
void PPCRecompiler_replaceGPRRegisterUsageMultiple(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction, sint32 gprRegisterSearched[4], sint32 gprRegisterReplaced[4])
{
if (imlInstruction->type == PPCREC_IML_TYPE_R_NAME)
{
imlInstruction->op_r_name.registerIndex = replaceRegisterMultiple(imlInstruction->op_r_name.registerIndex, gprRegisterSearched, gprRegisterReplaced);
}
else if (imlInstruction->type == PPCREC_IML_TYPE_NAME_R)
{
imlInstruction->op_r_name.registerIndex = replaceRegisterMultiple(imlInstruction->op_r_name.registerIndex, gprRegisterSearched, gprRegisterReplaced);
}
else if (imlInstruction->type == PPCREC_IML_TYPE_R_R)
{
imlInstruction->op_r_r.registerResult = replaceRegisterMultiple(imlInstruction->op_r_r.registerResult, gprRegisterSearched, gprRegisterReplaced);
imlInstruction->op_r_r.registerA = replaceRegisterMultiple(imlInstruction->op_r_r.registerA, gprRegisterSearched, gprRegisterReplaced);
}
else if (imlInstruction->type == PPCREC_IML_TYPE_R_S32)
{
imlInstruction->op_r_immS32.registerIndex = replaceRegisterMultiple(imlInstruction->op_r_immS32.registerIndex, gprRegisterSearched, gprRegisterReplaced);
}
else if (imlInstruction->type == PPCREC_IML_TYPE_CONDITIONAL_R_S32)
{
imlInstruction->op_conditional_r_s32.registerIndex = replaceRegisterMultiple(imlInstruction->op_conditional_r_s32.registerIndex, gprRegisterSearched, gprRegisterReplaced);
}
else if (imlInstruction->type == PPCREC_IML_TYPE_R_R_S32)
{
// in all cases result is written and other operand is read only
imlInstruction->op_r_r_s32.registerResult = replaceRegisterMultiple(imlInstruction->op_r_r_s32.registerResult, gprRegisterSearched, gprRegisterReplaced);
imlInstruction->op_r_r_s32.registerA = replaceRegisterMultiple(imlInstruction->op_r_r_s32.registerA, gprRegisterSearched, gprRegisterReplaced);
}
else if (imlInstruction->type == PPCREC_IML_TYPE_R_R_R)
{
// in all cases result is written and other operands are read only
imlInstruction->op_r_r_r.registerResult = replaceRegisterMultiple(imlInstruction->op_r_r_r.registerResult, gprRegisterSearched, gprRegisterReplaced);
imlInstruction->op_r_r_r.registerA = replaceRegisterMultiple(imlInstruction->op_r_r_r.registerA, gprRegisterSearched, gprRegisterReplaced);
imlInstruction->op_r_r_r.registerB = replaceRegisterMultiple(imlInstruction->op_r_r_r.registerB, gprRegisterSearched, gprRegisterReplaced);
}
else if (imlInstruction->type == PPCREC_IML_TYPE_CJUMP || imlInstruction->type == PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK)
{
// no effect on registers
}
else if (imlInstruction->type == PPCREC_IML_TYPE_NO_OP)
{
// no effect on registers
}
else if (imlInstruction->type == PPCREC_IML_TYPE_MACRO)
{
if (imlInstruction->operation == PPCREC_IML_MACRO_BL || imlInstruction->operation == PPCREC_IML_MACRO_B_FAR || imlInstruction->operation == PPCREC_IML_MACRO_BLR || imlInstruction->operation == PPCREC_IML_MACRO_BLRL || imlInstruction->operation == PPCREC_IML_MACRO_BCTR || imlInstruction->operation == PPCREC_IML_MACRO_BCTRL || imlInstruction->operation == PPCREC_IML_MACRO_LEAVE || imlInstruction->operation == PPCREC_IML_MACRO_DEBUGBREAK || imlInstruction->operation == PPCREC_IML_MACRO_HLE || imlInstruction->operation == PPCREC_IML_MACRO_MFTB || imlInstruction->operation == PPCREC_IML_MACRO_COUNT_CYCLES )
{
// no effect on registers
}
else
{
cemu_assert_unimplemented();
}
}
else if (imlInstruction->type == PPCREC_IML_TYPE_LOAD)
{
imlInstruction->op_storeLoad.registerData = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerData, gprRegisterSearched, gprRegisterReplaced);
if (imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER)
{
imlInstruction->op_storeLoad.registerMem = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerMem, gprRegisterSearched, gprRegisterReplaced);
}
}
else if (imlInstruction->type == PPCREC_IML_TYPE_LOAD_INDEXED)
{
imlInstruction->op_storeLoad.registerData = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerData, gprRegisterSearched, gprRegisterReplaced);
if (imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER)
imlInstruction->op_storeLoad.registerMem = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerMem, gprRegisterSearched, gprRegisterReplaced);
if (imlInstruction->op_storeLoad.registerMem2 != PPC_REC_INVALID_REGISTER)
imlInstruction->op_storeLoad.registerMem2 = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerMem2, gprRegisterSearched, gprRegisterReplaced);
}
else if (imlInstruction->type == PPCREC_IML_TYPE_STORE)
{
imlInstruction->op_storeLoad.registerData = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerData, gprRegisterSearched, gprRegisterReplaced);
if (imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER)
imlInstruction->op_storeLoad.registerMem = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerMem, gprRegisterSearched, gprRegisterReplaced);
}
else if (imlInstruction->type == PPCREC_IML_TYPE_STORE_INDEXED)
{
imlInstruction->op_storeLoad.registerData = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerData, gprRegisterSearched, gprRegisterReplaced);
if (imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER)
imlInstruction->op_storeLoad.registerMem = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerMem, gprRegisterSearched, gprRegisterReplaced);
if (imlInstruction->op_storeLoad.registerMem2 != PPC_REC_INVALID_REGISTER)
imlInstruction->op_storeLoad.registerMem2 = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerMem2, gprRegisterSearched, gprRegisterReplaced);
}
else if (imlInstruction->type == PPCREC_IML_TYPE_CR)
{
// only affects cr register
}
else if (imlInstruction->type == PPCREC_IML_TYPE_JUMPMARK)
{
// no effect on registers
}
else if (imlInstruction->type == PPCREC_IML_TYPE_PPC_ENTER)
{
// no op
}
else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_NAME)
{
}
else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_NAME_R)
{
}
else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD)
{
if (imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER)
{
imlInstruction->op_storeLoad.registerMem = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerMem, gprRegisterSearched, gprRegisterReplaced);
}
if (imlInstruction->op_storeLoad.registerGQR != PPC_REC_INVALID_REGISTER)
{
imlInstruction->op_storeLoad.registerGQR = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerGQR, gprRegisterSearched, gprRegisterReplaced);
}
}
else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD_INDEXED)
{
if (imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER)
{
imlInstruction->op_storeLoad.registerMem = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerMem, gprRegisterSearched, gprRegisterReplaced);
}
if (imlInstruction->op_storeLoad.registerMem2 != PPC_REC_INVALID_REGISTER)
{
imlInstruction->op_storeLoad.registerMem2 = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerMem2, gprRegisterSearched, gprRegisterReplaced);
}
if (imlInstruction->op_storeLoad.registerGQR != PPC_REC_INVALID_REGISTER)
{
imlInstruction->op_storeLoad.registerGQR = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerGQR, gprRegisterSearched, gprRegisterReplaced);
}
}
else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE)
{
if (imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER)
{
imlInstruction->op_storeLoad.registerMem = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerMem, gprRegisterSearched, gprRegisterReplaced);
}
if (imlInstruction->op_storeLoad.registerGQR != PPC_REC_INVALID_REGISTER)
{
imlInstruction->op_storeLoad.registerGQR = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerGQR, gprRegisterSearched, gprRegisterReplaced);
}
}
else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE_INDEXED)
{
if (imlInstruction->op_storeLoad.registerMem != PPC_REC_INVALID_REGISTER)
{
imlInstruction->op_storeLoad.registerMem = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerMem, gprRegisterSearched, gprRegisterReplaced);
}
if (imlInstruction->op_storeLoad.registerMem2 != PPC_REC_INVALID_REGISTER)
{
imlInstruction->op_storeLoad.registerMem2 = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerMem2, gprRegisterSearched, gprRegisterReplaced);
}
if (imlInstruction->op_storeLoad.registerGQR != PPC_REC_INVALID_REGISTER)
{
imlInstruction->op_storeLoad.registerGQR = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerGQR, gprRegisterSearched, gprRegisterReplaced);
}
}
else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R)
{
}
else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R)
{
}
else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R_R)
{
}
else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R)
{
}
else
{
cemu_assert_unimplemented();
}
}
void PPCRecompiler_replaceFPRRegisterUsageMultiple(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction, sint32 fprRegisterSearched[4], sint32 fprRegisterReplaced[4])
{
if (imlInstruction->type == PPCREC_IML_TYPE_R_NAME)
{
// not affected
}
else if (imlInstruction->type == PPCREC_IML_TYPE_NAME_R)
{
// not affected
}
else if (imlInstruction->type == PPCREC_IML_TYPE_R_R)
{
// not affected
}
else if (imlInstruction->type == PPCREC_IML_TYPE_R_S32)
{
// not affected
}
else if (imlInstruction->type == PPCREC_IML_TYPE_R_R_S32)
{
// not affected
}
else if (imlInstruction->type == PPCREC_IML_TYPE_R_R_R)
{
// not affected
}
else if (imlInstruction->type == PPCREC_IML_TYPE_CJUMP || imlInstruction->type == PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK)
{
// no effect on registers
}
else if (imlInstruction->type == PPCREC_IML_TYPE_NO_OP)
{
// no effect on registers
}
else if (imlInstruction->type == PPCREC_IML_TYPE_MACRO)
{
// not affected
}
else if (imlInstruction->type == PPCREC_IML_TYPE_LOAD)
{
// not affected
}
else if (imlInstruction->type == PPCREC_IML_TYPE_MEM2MEM)
{
// not affected
}
else if (imlInstruction->type == PPCREC_IML_TYPE_LOAD_INDEXED)
{
// not affected
}
else if (imlInstruction->type == PPCREC_IML_TYPE_STORE)
{
// not affected
}
else if (imlInstruction->type == PPCREC_IML_TYPE_STORE_INDEXED)
{
// not affected
}
else if (imlInstruction->type == PPCREC_IML_TYPE_CR)
{
// only affects cr register
}
else if (imlInstruction->type == PPCREC_IML_TYPE_JUMPMARK)
{
// no effect on registers
}
else if (imlInstruction->type == PPCREC_IML_TYPE_PPC_ENTER)
{
// no op
}
else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_NAME)
{
imlInstruction->op_r_name.registerIndex = replaceRegisterMultiple(imlInstruction->op_r_name.registerIndex, fprRegisterSearched, fprRegisterReplaced);
}
else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_NAME_R)
{
imlInstruction->op_r_name.registerIndex = replaceRegisterMultiple(imlInstruction->op_r_name.registerIndex, fprRegisterSearched, fprRegisterReplaced);
}
else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD)
{
imlInstruction->op_storeLoad.registerData = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerData, fprRegisterSearched, fprRegisterReplaced);
}
else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD_INDEXED)
{
imlInstruction->op_storeLoad.registerData = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerData, fprRegisterSearched, fprRegisterReplaced);
}
else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE)
{
imlInstruction->op_storeLoad.registerData = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerData, fprRegisterSearched, fprRegisterReplaced);
}
else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE_INDEXED)
{
imlInstruction->op_storeLoad.registerData = replaceRegisterMultiple(imlInstruction->op_storeLoad.registerData, fprRegisterSearched, fprRegisterReplaced);
}
else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R)
{
imlInstruction->op_fpr_r_r.registerResult = replaceRegisterMultiple(imlInstruction->op_fpr_r_r.registerResult, fprRegisterSearched, fprRegisterReplaced);
imlInstruction->op_fpr_r_r.registerOperand = replaceRegisterMultiple(imlInstruction->op_fpr_r_r.registerOperand, fprRegisterSearched, fprRegisterReplaced);
}
else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R)
{
imlInstruction->op_fpr_r_r_r.registerResult = replaceRegisterMultiple(imlInstruction->op_fpr_r_r_r.registerResult, fprRegisterSearched, fprRegisterReplaced);
imlInstruction->op_fpr_r_r_r.registerOperandA = replaceRegisterMultiple(imlInstruction->op_fpr_r_r_r.registerOperandA, fprRegisterSearched, fprRegisterReplaced);
imlInstruction->op_fpr_r_r_r.registerOperandB = replaceRegisterMultiple(imlInstruction->op_fpr_r_r_r.registerOperandB, fprRegisterSearched, fprRegisterReplaced);
}
else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R_R)
{
imlInstruction->op_fpr_r_r_r_r.registerResult = replaceRegisterMultiple(imlInstruction->op_fpr_r_r_r_r.registerResult, fprRegisterSearched, fprRegisterReplaced);
imlInstruction->op_fpr_r_r_r_r.registerOperandA = replaceRegisterMultiple(imlInstruction->op_fpr_r_r_r_r.registerOperandA, fprRegisterSearched, fprRegisterReplaced);
imlInstruction->op_fpr_r_r_r_r.registerOperandB = replaceRegisterMultiple(imlInstruction->op_fpr_r_r_r_r.registerOperandB, fprRegisterSearched, fprRegisterReplaced);
imlInstruction->op_fpr_r_r_r_r.registerOperandC = replaceRegisterMultiple(imlInstruction->op_fpr_r_r_r_r.registerOperandC, fprRegisterSearched, fprRegisterReplaced);
}
else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_R)
{
imlInstruction->op_fpr_r.registerResult = replaceRegisterMultiple(imlInstruction->op_fpr_r.registerResult, fprRegisterSearched, fprRegisterReplaced);
}
else
{
cemu_assert_unimplemented();
}
}
void PPCRecompiler_replaceFPRRegisterUsage(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction, sint32 fprRegisterSearched, sint32 fprRegisterReplaced)
{
if( imlInstruction->type == PPCREC_IML_TYPE_R_NAME )
{
// not affected
}
else if( imlInstruction->type == PPCREC_IML_TYPE_NAME_R )
{
// not affected
}
else if( imlInstruction->type == PPCREC_IML_TYPE_R_R )
{
// not affected
}
else if( imlInstruction->type == PPCREC_IML_TYPE_R_S32 )
{
// not affected
}
else if( imlInstruction->type == PPCREC_IML_TYPE_R_R_S32 )
{
// not affected
}
else if( imlInstruction->type == PPCREC_IML_TYPE_R_R_R )
{
// not affected
}
else if( imlInstruction->type == PPCREC_IML_TYPE_CJUMP || imlInstruction->type == PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK )
{
// no effect on registers
}
else if( imlInstruction->type == PPCREC_IML_TYPE_NO_OP )
{
// no effect on registers
}
else if( imlInstruction->type == PPCREC_IML_TYPE_MACRO )
{
// not affected
}
else if( imlInstruction->type == PPCREC_IML_TYPE_LOAD )
{
// not affected
}
else if (imlInstruction->type == PPCREC_IML_TYPE_MEM2MEM)
{
// not affected
}
else if( imlInstruction->type == PPCREC_IML_TYPE_LOAD_INDEXED )
{
// not affected
}
else if( imlInstruction->type == PPCREC_IML_TYPE_STORE )
{
// not affected
}
else if( imlInstruction->type == PPCREC_IML_TYPE_STORE_INDEXED )
{
// not affected
}
else if( imlInstruction->type == PPCREC_IML_TYPE_CR )
{
// only affects cr register
}
else if( imlInstruction->type == PPCREC_IML_TYPE_JUMPMARK )
{
// no effect on registers
}
else if( imlInstruction->type == PPCREC_IML_TYPE_PPC_ENTER )
{
// no op
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_NAME )
{
imlInstruction->op_r_name.registerIndex = replaceRegister(imlInstruction->op_r_name.registerIndex, fprRegisterSearched, fprRegisterReplaced);
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_NAME_R )
{
imlInstruction->op_r_name.registerIndex = replaceRegister(imlInstruction->op_r_name.registerIndex, fprRegisterSearched, fprRegisterReplaced);
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD )
{
imlInstruction->op_storeLoad.registerData = replaceRegister(imlInstruction->op_storeLoad.registerData, fprRegisterSearched, fprRegisterReplaced);
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD_INDEXED )
{
imlInstruction->op_storeLoad.registerData = replaceRegister(imlInstruction->op_storeLoad.registerData, fprRegisterSearched, fprRegisterReplaced);
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE )
{
imlInstruction->op_storeLoad.registerData = replaceRegister(imlInstruction->op_storeLoad.registerData, fprRegisterSearched, fprRegisterReplaced);
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE_INDEXED )
{
imlInstruction->op_storeLoad.registerData = replaceRegister(imlInstruction->op_storeLoad.registerData, fprRegisterSearched, fprRegisterReplaced);
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R )
{
imlInstruction->op_fpr_r_r.registerResult = replaceRegister(imlInstruction->op_fpr_r_r.registerResult, fprRegisterSearched, fprRegisterReplaced);
imlInstruction->op_fpr_r_r.registerOperand = replaceRegister(imlInstruction->op_fpr_r_r.registerOperand, fprRegisterSearched, fprRegisterReplaced);
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R )
{
imlInstruction->op_fpr_r_r_r.registerResult = replaceRegister(imlInstruction->op_fpr_r_r_r.registerResult, fprRegisterSearched, fprRegisterReplaced);
imlInstruction->op_fpr_r_r_r.registerOperandA = replaceRegister(imlInstruction->op_fpr_r_r_r.registerOperandA, fprRegisterSearched, fprRegisterReplaced);
imlInstruction->op_fpr_r_r_r.registerOperandB = replaceRegister(imlInstruction->op_fpr_r_r_r.registerOperandB, fprRegisterSearched, fprRegisterReplaced);
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R_R_R_R )
{
imlInstruction->op_fpr_r_r_r_r.registerResult = replaceRegister(imlInstruction->op_fpr_r_r_r_r.registerResult, fprRegisterSearched, fprRegisterReplaced);
imlInstruction->op_fpr_r_r_r_r.registerOperandA = replaceRegister(imlInstruction->op_fpr_r_r_r_r.registerOperandA, fprRegisterSearched, fprRegisterReplaced);
imlInstruction->op_fpr_r_r_r_r.registerOperandB = replaceRegister(imlInstruction->op_fpr_r_r_r_r.registerOperandB, fprRegisterSearched, fprRegisterReplaced);
imlInstruction->op_fpr_r_r_r_r.registerOperandC = replaceRegister(imlInstruction->op_fpr_r_r_r_r.registerOperandC, fprRegisterSearched, fprRegisterReplaced);
}
else if( imlInstruction->type == PPCREC_IML_TYPE_FPR_R )
{
imlInstruction->op_fpr_r.registerResult = replaceRegister(imlInstruction->op_fpr_r.registerResult, fprRegisterSearched, fprRegisterReplaced);
}
else
{
cemu_assert_unimplemented();
}
}
typedef struct
{
struct
{
sint32 instructionIndex;
sint32 registerPreviousName;
sint32 registerNewName;
sint32 index; // new index
sint32 previousIndex; // previous index (always out of range)
bool nameMustBeMaintained; // must be stored before replacement and loaded after replacement ends
}replacedRegisterEntry[PPC_X64_GPR_USABLE_REGISTERS];
sint32 count;
}replacedRegisterTracker_t;
bool PPCRecompiler_checkIfGPRRegisterIsAccessed(PPCImlOptimizerUsedRegisters_t* registersUsed, sint32 gprRegister)
{
if( registersUsed->readNamedReg1 == gprRegister )
return true;
if( registersUsed->readNamedReg2 == gprRegister )
return true;
if( registersUsed->readNamedReg3 == gprRegister )
return true;
if( registersUsed->writtenNamedReg1 == gprRegister )
return true;
return false;
}
/*
* Returns index of register to replace
* If no register needs to be replaced, -1 is returned
*/
sint32 PPCRecompiler_getNextRegisterToReplace(PPCImlOptimizerUsedRegisters_t* registersUsed)
{
// get index of register to replace
sint32 gprToReplace = -1;
if( registersUsed->readNamedReg1 >= PPC_X64_GPR_USABLE_REGISTERS )
gprToReplace = registersUsed->readNamedReg1;
else if( registersUsed->readNamedReg2 >= PPC_X64_GPR_USABLE_REGISTERS )
gprToReplace = registersUsed->readNamedReg2;
else if( registersUsed->readNamedReg3 >= PPC_X64_GPR_USABLE_REGISTERS )
gprToReplace = registersUsed->readNamedReg3;
else if( registersUsed->writtenNamedReg1 >= PPC_X64_GPR_USABLE_REGISTERS )
gprToReplace = registersUsed->writtenNamedReg1;
// return
return gprToReplace;
}
bool PPCRecompiler_findAvailableRegisterDepr(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, sint32 imlIndexStart, replacedRegisterTracker_t* replacedRegisterTracker, sint32* registerIndex, sint32* registerName, bool* isUsed)
{
PPCImlOptimizerUsedRegisters_t registersUsed;
PPCRecompiler_checkRegisterUsage(ppcImlGenContext, imlSegment->imlList+imlIndexStart, ®istersUsed);
// mask all registers used by this instruction
uint32 instructionReservedRegisterMask = 0;//(1<<(PPC_X64_GPR_USABLE_REGISTERS+1))-1;
if( registersUsed.readNamedReg1 != -1 )
instructionReservedRegisterMask |= (1<<(registersUsed.readNamedReg1));
if( registersUsed.readNamedReg2 != -1 )
instructionReservedRegisterMask |= (1<<(registersUsed.readNamedReg2));
if( registersUsed.readNamedReg3 != -1 )
instructionReservedRegisterMask |= (1<<(registersUsed.readNamedReg3));
if( registersUsed.writtenNamedReg1 != -1 )
instructionReservedRegisterMask |= (1<<(registersUsed.writtenNamedReg1));
// mask all registers that are reserved for other replacements
uint32 replacementReservedRegisterMask = 0;
for(sint32 i=0; i<replacedRegisterTracker->count; i++)
{
replacementReservedRegisterMask |= (1<<replacedRegisterTracker->replacedRegisterEntry[i].index);
}
// potential improvement: Scan ahead a few instructions and look for registers that are the least used (or ideally never used)
// pick available register
const uint32 allRegisterMask = (1<<(PPC_X64_GPR_USABLE_REGISTERS+1))-1; // mask with set bit for every register
uint32 reservedRegisterMask = instructionReservedRegisterMask | replacementReservedRegisterMask;
cemu_assert(instructionReservedRegisterMask != allRegisterMask); // no usable register! (Need to store a register from the replacedRegisterTracker)
sint32 usedRegisterIndex = -1;
for(sint32 i=0; i<PPC_X64_GPR_USABLE_REGISTERS; i++)
{
if( (reservedRegisterMask&(1<<i)) == 0 )
{
if( (instructionReservedRegisterMask&(1<<i)) == 0 && ppcImlGenContext->mappedRegister[i] != -1 )
{
// register is reserved by segment -> In use
*isUsed = true;
*registerName = ppcImlGenContext->mappedRegister[i];
}
else
{
*isUsed = false;
*registerName = -1;
}
*registerIndex = i;
return true;
}
}
return false;
}
bool PPCRecompiler_hasSuffixInstruction(PPCRecImlSegment_t* imlSegment)
{
if( imlSegment->imlListCount == 0 )
return false;
PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList+imlSegment->imlListCount-1;
if( imlInstruction->type == PPCREC_IML_TYPE_MACRO && (imlInstruction->operation == PPCREC_IML_MACRO_BLR || imlInstruction->operation == PPCREC_IML_MACRO_BCTR) ||
imlInstruction->type == PPCREC_IML_TYPE_MACRO && imlInstruction->operation == PPCREC_IML_MACRO_BL ||
imlInstruction->type == PPCREC_IML_TYPE_MACRO && imlInstruction->operation == PPCREC_IML_MACRO_B_FAR ||
imlInstruction->type == PPCREC_IML_TYPE_MACRO && imlInstruction->operation == PPCREC_IML_MACRO_BLRL ||
imlInstruction->type == PPCREC_IML_TYPE_MACRO && imlInstruction->operation == PPCREC_IML_MACRO_BCTRL ||
imlInstruction->type == PPCREC_IML_TYPE_MACRO && imlInstruction->operation == PPCREC_IML_MACRO_LEAVE ||
imlInstruction->type == PPCREC_IML_TYPE_MACRO && imlInstruction->operation == PPCREC_IML_MACRO_HLE ||
imlInstruction->type == PPCREC_IML_TYPE_MACRO && imlInstruction->operation == PPCREC_IML_MACRO_MFTB ||
imlInstruction->type == PPCREC_IML_TYPE_PPC_ENTER ||
imlInstruction->type == PPCREC_IML_TYPE_CJUMP ||
imlInstruction->type == PPCREC_IML_TYPE_CJUMP_CYCLE_CHECK )
return true;
return false;
}
void PPCRecompiler_storeReplacedRegister(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, replacedRegisterTracker_t* replacedRegisterTracker, sint32 registerTrackerIndex, sint32* imlIndex)
{
// store register
sint32 imlIndexEdit = *imlIndex;
PPCRecompiler_pushBackIMLInstructions(imlSegment, imlIndexEdit, 1);
// name_unusedRegister = unusedRegister
PPCRecImlInstruction_t* imlInstructionItr = imlSegment->imlList+(imlIndexEdit+0);
memset(imlInstructionItr, 0x00, sizeof(PPCRecImlInstruction_t));
imlInstructionItr->type = PPCREC_IML_TYPE_NAME_R;
imlInstructionItr->crRegister = PPC_REC_INVALID_REGISTER;
imlInstructionItr->operation = PPCREC_IML_OP_ASSIGN;
imlInstructionItr->op_r_name.registerIndex = replacedRegisterTracker->replacedRegisterEntry[registerTrackerIndex].index;
imlInstructionItr->op_r_name.name = replacedRegisterTracker->replacedRegisterEntry[registerTrackerIndex].registerNewName;
imlInstructionItr->op_r_name.copyWidth = 32;
imlInstructionItr->op_r_name.flags = 0;
imlIndexEdit++;
// load new register if required
if( replacedRegisterTracker->replacedRegisterEntry[registerTrackerIndex].nameMustBeMaintained )
{
PPCRecompiler_pushBackIMLInstructions(imlSegment, imlIndexEdit, 1);
PPCRecImlInstruction_t* imlInstructionItr = imlSegment->imlList+(imlIndexEdit+0);
memset(imlInstructionItr, 0x00, sizeof(PPCRecImlInstruction_t));
imlInstructionItr->type = PPCREC_IML_TYPE_R_NAME;
imlInstructionItr->crRegister = PPC_REC_INVALID_REGISTER;
imlInstructionItr->operation = PPCREC_IML_OP_ASSIGN;
imlInstructionItr->op_r_name.registerIndex = replacedRegisterTracker->replacedRegisterEntry[registerTrackerIndex].index;
imlInstructionItr->op_r_name.name = replacedRegisterTracker->replacedRegisterEntry[registerTrackerIndex].registerPreviousName;//ppcImlGenContext->mappedRegister[replacedRegisterTracker.replacedRegisterEntry[i].index];
imlInstructionItr->op_r_name.copyWidth = 32;
imlInstructionItr->op_r_name.flags = 0;
imlIndexEdit += 1;
}
// move last entry to current one
memcpy(replacedRegisterTracker->replacedRegisterEntry+registerTrackerIndex, replacedRegisterTracker->replacedRegisterEntry+replacedRegisterTracker->count-1, sizeof(replacedRegisterTracker->replacedRegisterEntry[0]));
replacedRegisterTracker->count--;
*imlIndex = imlIndexEdit;
}
bool PPCRecompiler_reduceNumberOfFPRRegisters(ppcImlGenContext_t* ppcImlGenContext)
{
// only xmm0 to xmm14 may be used, xmm15 is reserved
// this method will reduce the number of fpr registers used
// inefficient algorithm for optimizing away excess registers
// we simply load, use and store excess registers into other unused registers when we need to
// first we remove all name load and store instructions that involve out-of-bounds registers
for(sint32 s=0; s<ppcImlGenContext->segmentListCount; s++)
{
PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s];
sint32 imlIndex = 0;
while( imlIndex < imlSegment->imlListCount )
{
PPCRecImlInstruction_t* imlInstructionItr = imlSegment->imlList+imlIndex;
if( imlInstructionItr->type == PPCREC_IML_TYPE_FPR_R_NAME || imlInstructionItr->type == PPCREC_IML_TYPE_FPR_NAME_R )
{
if( imlInstructionItr->op_r_name.registerIndex >= PPC_X64_FPR_USABLE_REGISTERS )
{
// convert to NO-OP instruction
imlInstructionItr->type = PPCREC_IML_TYPE_NO_OP;
imlInstructionItr->associatedPPCAddress = 0;
}
}
imlIndex++;
}
}
// replace registers
for(sint32 s=0; s<ppcImlGenContext->segmentListCount; s++)
{
PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s];
sint32 imlIndex = 0;
while( imlIndex < imlSegment->imlListCount )
{
PPCImlOptimizerUsedRegisters_t registersUsed;
while( true )
{
PPCRecompiler_checkRegisterUsage(ppcImlGenContext, imlSegment->imlList+imlIndex, ®istersUsed);
if( registersUsed.readFPR1 >= PPC_X64_FPR_USABLE_REGISTERS || registersUsed.readFPR2 >= PPC_X64_FPR_USABLE_REGISTERS || registersUsed.readFPR3 >= PPC_X64_FPR_USABLE_REGISTERS || registersUsed.readFPR4 >= PPC_X64_FPR_USABLE_REGISTERS || registersUsed.writtenFPR1 >= PPC_X64_FPR_USABLE_REGISTERS )
{
// get index of register to replace
sint32 fprToReplace = -1;
if( registersUsed.readFPR1 >= PPC_X64_FPR_USABLE_REGISTERS )
fprToReplace = registersUsed.readFPR1;
else if( registersUsed.readFPR2 >= PPC_X64_FPR_USABLE_REGISTERS )
fprToReplace = registersUsed.readFPR2;
else if (registersUsed.readFPR3 >= PPC_X64_FPR_USABLE_REGISTERS)
fprToReplace = registersUsed.readFPR3;
else if (registersUsed.readFPR4 >= PPC_X64_FPR_USABLE_REGISTERS)
fprToReplace = registersUsed.readFPR4;
else if( registersUsed.writtenFPR1 >= PPC_X64_FPR_USABLE_REGISTERS )
fprToReplace = registersUsed.writtenFPR1;
// generate mask of useable registers
uint8 useableRegisterMask = 0x7F; // lowest bit is fpr register 0
if( registersUsed.readFPR1 != -1 )
useableRegisterMask &= ~(1<<(registersUsed.readFPR1));
if( registersUsed.readFPR2 != -1 )
useableRegisterMask &= ~(1<<(registersUsed.readFPR2));
if (registersUsed.readFPR3 != -1)
useableRegisterMask &= ~(1 << (registersUsed.readFPR3));
if (registersUsed.readFPR4 != -1)
useableRegisterMask &= ~(1 << (registersUsed.readFPR4));
if( registersUsed.writtenFPR1 != -1 )
useableRegisterMask &= ~(1<<(registersUsed.writtenFPR1));
// get highest unused register index (0-6 range)
sint32 unusedRegisterIndex = -1;
for(sint32 f=0; f<PPC_X64_FPR_USABLE_REGISTERS; f++)
{
if( useableRegisterMask&(1<<f) )
{
unusedRegisterIndex = f;
}
}
if( unusedRegisterIndex == -1 )
assert_dbg();
// determine if the placeholder register is actually used (if not we must not load/store it)
uint32 unusedRegisterName = ppcImlGenContext->mappedFPRRegister[unusedRegisterIndex];
bool replacedRegisterIsUsed = true;
if( unusedRegisterName >= PPCREC_NAME_FPR0 && unusedRegisterName < (PPCREC_NAME_FPR0+32) )
{
replacedRegisterIsUsed = imlSegment->ppcFPRUsed[unusedRegisterName-PPCREC_NAME_FPR0];
}
// replace registers that are out of range
PPCRecompiler_replaceFPRRegisterUsage(ppcImlGenContext, imlSegment->imlList+imlIndex, fprToReplace, unusedRegisterIndex);
// add load/store name after instruction
PPCRecompiler_pushBackIMLInstructions(imlSegment, imlIndex+1, 2);
// add load/store before current instruction
PPCRecompiler_pushBackIMLInstructions(imlSegment, imlIndex, 2);
// name_unusedRegister = unusedRegister
PPCRecImlInstruction_t* imlInstructionItr = imlSegment->imlList+(imlIndex+0);
memset(imlInstructionItr, 0x00, sizeof(PPCRecImlInstruction_t));
if( replacedRegisterIsUsed )
{
imlInstructionItr->type = PPCREC_IML_TYPE_FPR_NAME_R;
imlInstructionItr->operation = PPCREC_IML_OP_ASSIGN;
imlInstructionItr->op_r_name.registerIndex = unusedRegisterIndex;
imlInstructionItr->op_r_name.name = ppcImlGenContext->mappedFPRRegister[unusedRegisterIndex];
imlInstructionItr->op_r_name.copyWidth = 32;
imlInstructionItr->op_r_name.flags = 0;
}
else
imlInstructionItr->type = PPCREC_IML_TYPE_NO_OP;
imlInstructionItr = imlSegment->imlList+(imlIndex+1);
memset(imlInstructionItr, 0x00, sizeof(PPCRecImlInstruction_t));
imlInstructionItr->type = PPCREC_IML_TYPE_FPR_R_NAME;
imlInstructionItr->operation = PPCREC_IML_OP_ASSIGN;
imlInstructionItr->op_r_name.registerIndex = unusedRegisterIndex;
imlInstructionItr->op_r_name.name = ppcImlGenContext->mappedFPRRegister[fprToReplace];
imlInstructionItr->op_r_name.copyWidth = 32;
imlInstructionItr->op_r_name.flags = 0;
// name_gprToReplace = unusedRegister
imlInstructionItr = imlSegment->imlList+(imlIndex+3);
memset(imlInstructionItr, 0x00, sizeof(PPCRecImlInstruction_t));
imlInstructionItr->type = PPCREC_IML_TYPE_FPR_NAME_R;
imlInstructionItr->operation = PPCREC_IML_OP_ASSIGN;
imlInstructionItr->op_r_name.registerIndex = unusedRegisterIndex;
imlInstructionItr->op_r_name.name = ppcImlGenContext->mappedFPRRegister[fprToReplace];
imlInstructionItr->op_r_name.copyWidth = 32;
imlInstructionItr->op_r_name.flags = 0;
// unusedRegister = name_unusedRegister
imlInstructionItr = imlSegment->imlList+(imlIndex+4);
memset(imlInstructionItr, 0x00, sizeof(PPCRecImlInstruction_t));
if( replacedRegisterIsUsed )
{
imlInstructionItr->type = PPCREC_IML_TYPE_FPR_R_NAME;
imlInstructionItr->operation = PPCREC_IML_OP_ASSIGN;
imlInstructionItr->op_r_name.registerIndex = unusedRegisterIndex;
imlInstructionItr->op_r_name.name = ppcImlGenContext->mappedFPRRegister[unusedRegisterIndex];
imlInstructionItr->op_r_name.copyWidth = 32;
imlInstructionItr->op_r_name.flags = 0;
}
else
imlInstructionItr->type = PPCREC_IML_TYPE_NO_OP;
}
else
break;
}
imlIndex++;
}
}
return true;
}
typedef struct
{
bool isActive;
uint32 virtualReg;
sint32 lastUseIndex;
}ppcRecRegisterMapping_t;
typedef struct
{
ppcRecRegisterMapping_t currentMapping[PPC_X64_FPR_USABLE_REGISTERS];
sint32 ppcRegToMapping[64];
sint32 currentUseIndex;
}ppcRecManageRegisters_t;
ppcRecRegisterMapping_t* PPCRecompiler_findAvailableRegisterDepr(ppcRecManageRegisters_t* rCtx, PPCImlOptimizerUsedRegisters_t* instructionUsedRegisters)
{
// find free register
for (sint32 i = 0; i < PPC_X64_FPR_USABLE_REGISTERS; i++)
{
if (rCtx->currentMapping[i].isActive == false)
{
rCtx->currentMapping[i].isActive = true;
rCtx->currentMapping[i].virtualReg = -1;
rCtx->currentMapping[i].lastUseIndex = rCtx->currentUseIndex;
return rCtx->currentMapping + i;
}
}
// all registers are used
return nullptr;
}
ppcRecRegisterMapping_t* PPCRecompiler_findUnloadableRegister(ppcRecManageRegisters_t* rCtx, PPCImlOptimizerUsedRegisters_t* instructionUsedRegisters, uint32 unloadLockedMask)
{
// find unloadable register (with lowest lastUseIndex)
sint32 unloadIndex = -1;
sint32 unloadIndexLastUse = 0x7FFFFFFF;
for (sint32 i = 0; i < PPC_X64_FPR_USABLE_REGISTERS; i++)
{
if (rCtx->currentMapping[i].isActive == false)
continue;
if( (unloadLockedMask&(1<<i)) != 0 )
continue;
uint32 virtualReg = rCtx->currentMapping[i].virtualReg;
bool isReserved = false;
for (sint32 f = 0; f < 4; f++)
{
if (virtualReg == (sint32)instructionUsedRegisters->fpr[f])
{
isReserved = true;
break;
}
}
if (isReserved)
continue;
if (rCtx->currentMapping[i].lastUseIndex < unloadIndexLastUse)
{
unloadIndexLastUse = rCtx->currentMapping[i].lastUseIndex;
unloadIndex = i;
}
}
cemu_assert(unloadIndex != -1);
return rCtx->currentMapping + unloadIndex;
}
bool PPCRecompiler_manageFPRRegistersForSegment(ppcImlGenContext_t* ppcImlGenContext, sint32 segmentIndex)
{
ppcRecManageRegisters_t rCtx = { 0 };
for (sint32 i = 0; i < 64; i++)
rCtx.ppcRegToMapping[i] = -1;
PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[segmentIndex];
sint32 idx = 0;
sint32 currentUseIndex = 0;
PPCImlOptimizerUsedRegisters_t registersUsed;
while (idx < imlSegment->imlListCount)
{
if ( PPCRecompiler_isSuffixInstruction(imlSegment->imlList + idx) )
break;
PPCRecompiler_checkRegisterUsage(ppcImlGenContext, imlSegment->imlList + idx, ®istersUsed);
sint32 fprMatch[4];
sint32 fprReplace[4];
fprMatch[0] = -1;
fprMatch[1] = -1;
fprMatch[2] = -1;
fprMatch[3] = -1;
fprReplace[0] = -1;
fprReplace[1] = -1;
fprReplace[2] = -1;
fprReplace[3] = -1;
// generate a mask of registers that we may not free
sint32 numReplacedOperands = 0;
uint32 unloadLockedMask = 0;
for (sint32 f = 0; f < 5; f++)
{
sint32 virtualFpr;
if (f == 0)
virtualFpr = registersUsed.readFPR1;
else if (f == 1)
virtualFpr = registersUsed.readFPR2;
else if (f == 2)
virtualFpr = registersUsed.readFPR3;
else if (f == 3)
virtualFpr = registersUsed.readFPR4;
else if (f == 4)
virtualFpr = registersUsed.writtenFPR1;
if( virtualFpr < 0 )
continue;
cemu_assert_debug(virtualFpr < 64);
// check if this virtual FPR is already loaded in any real register
ppcRecRegisterMapping_t* regMapping;
if (rCtx.ppcRegToMapping[virtualFpr] == -1)
{
// not loaded
// find available register
while (true)
{
regMapping = PPCRecompiler_findAvailableRegisterDepr(&rCtx, ®istersUsed);
if (regMapping == NULL)
{
// unload least recently used register and try again
ppcRecRegisterMapping_t* unloadRegMapping = PPCRecompiler_findUnloadableRegister(&rCtx, ®istersUsed, unloadLockedMask);
// mark as locked
unloadLockedMask |= (1<<(unloadRegMapping- rCtx.currentMapping));
// create unload instruction
PPCRecompiler_pushBackIMLInstructions(imlSegment, idx, 1);
PPCRecImlInstruction_t* imlInstructionTemp = imlSegment->imlList + idx;
memset(imlInstructionTemp, 0x00, sizeof(PPCRecImlInstruction_t));
imlInstructionTemp->type = PPCREC_IML_TYPE_FPR_NAME_R;
imlInstructionTemp->operation = PPCREC_IML_OP_ASSIGN;
imlInstructionTemp->op_r_name.registerIndex = (uint8)(unloadRegMapping - rCtx.currentMapping);
imlInstructionTemp->op_r_name.name = ppcImlGenContext->mappedFPRRegister[unloadRegMapping->virtualReg];
imlInstructionTemp->op_r_name.copyWidth = 32;
imlInstructionTemp->op_r_name.flags = 0;
idx++;
// update mapping
unloadRegMapping->isActive = false;
rCtx.ppcRegToMapping[unloadRegMapping->virtualReg] = -1;
}
else
break;
}
// create load instruction
PPCRecompiler_pushBackIMLInstructions(imlSegment, idx, 1);
PPCRecImlInstruction_t* imlInstructionTemp = imlSegment->imlList + idx;
memset(imlInstructionTemp, 0x00, sizeof(PPCRecImlInstruction_t));
imlInstructionTemp->type = PPCREC_IML_TYPE_FPR_R_NAME;
imlInstructionTemp->operation = PPCREC_IML_OP_ASSIGN;
imlInstructionTemp->op_r_name.registerIndex = (uint8)(regMapping-rCtx.currentMapping);
imlInstructionTemp->op_r_name.name = ppcImlGenContext->mappedFPRRegister[virtualFpr];
imlInstructionTemp->op_r_name.copyWidth = 32;
imlInstructionTemp->op_r_name.flags = 0;
idx++;
// update mapping
regMapping->virtualReg = virtualFpr;
rCtx.ppcRegToMapping[virtualFpr] = (sint32)(regMapping - rCtx.currentMapping);
regMapping->lastUseIndex = rCtx.currentUseIndex;
rCtx.currentUseIndex++;
}
else
{
regMapping = rCtx.currentMapping + rCtx.ppcRegToMapping[virtualFpr];
regMapping->lastUseIndex = rCtx.currentUseIndex;
rCtx.currentUseIndex++;
}
// replace FPR
bool entryFound = false;
for (sint32 t = 0; t < numReplacedOperands; t++)
{
if (fprMatch[t] == virtualFpr)
{
cemu_assert_debug(fprReplace[t] == (regMapping - rCtx.currentMapping));
entryFound = true;
break;
}
}
if (entryFound == false)
{
cemu_assert_debug(numReplacedOperands != 4);
fprMatch[numReplacedOperands] = virtualFpr;
fprReplace[numReplacedOperands] = (sint32)(regMapping - rCtx.currentMapping);
numReplacedOperands++;
}
}
if (numReplacedOperands > 0)
{
PPCRecompiler_replaceFPRRegisterUsageMultiple(ppcImlGenContext, imlSegment->imlList + idx, fprMatch, fprReplace);
}
// next
idx++;
}
// count loaded registers
sint32 numLoadedRegisters = 0;
for (sint32 i = 0; i < PPC_X64_FPR_USABLE_REGISTERS; i++)
{
if (rCtx.currentMapping[i].isActive)
numLoadedRegisters++;
}
// store all loaded registers
if (numLoadedRegisters > 0)
{
PPCRecompiler_pushBackIMLInstructions(imlSegment, idx, numLoadedRegisters);
for (sint32 i = 0; i < PPC_X64_FPR_USABLE_REGISTERS; i++)
{
if (rCtx.currentMapping[i].isActive == false)
continue;
PPCRecImlInstruction_t* imlInstructionTemp = imlSegment->imlList + idx;
memset(imlInstructionTemp, 0x00, sizeof(PPCRecImlInstruction_t));
imlInstructionTemp->type = PPCREC_IML_TYPE_FPR_NAME_R;
imlInstructionTemp->operation = PPCREC_IML_OP_ASSIGN;
imlInstructionTemp->op_r_name.registerIndex = i;
imlInstructionTemp->op_r_name.name = ppcImlGenContext->mappedFPRRegister[rCtx.currentMapping[i].virtualReg];
imlInstructionTemp->op_r_name.copyWidth = 32;
imlInstructionTemp->op_r_name.flags = 0;
idx++;
}
}
return true;
}
bool PPCRecompiler_manageFPRRegisters(ppcImlGenContext_t* ppcImlGenContext)
{
for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++)
{
if (PPCRecompiler_manageFPRRegistersForSegment(ppcImlGenContext, s) == false)
return false;
}
return true;
}
/*
* Returns true if the loaded value is guaranteed to be overwritten
*/
bool PPCRecompiler_trackRedundantNameLoadInstruction(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, sint32 startIndex, PPCRecImlInstruction_t* nameStoreInstruction, sint32 scanDepth)
{
sint16 registerIndex = nameStoreInstruction->op_r_name.registerIndex;
for(sint32 i=startIndex; i<imlSegment->imlListCount; i++)
{
PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList+i;
//nameStoreInstruction->op_r_name.registerIndex
PPCImlOptimizerUsedRegisters_t registersUsed;
PPCRecompiler_checkRegisterUsage(ppcImlGenContext, imlSegment->imlList+i, ®istersUsed);
if( registersUsed.readNamedReg1 == registerIndex || registersUsed.readNamedReg2 == registerIndex || registersUsed.readNamedReg3 == registerIndex )
return false;
if( registersUsed.writtenNamedReg1 == registerIndex )
return true;
}
// todo: Scan next segment(s)
return false;
}
/*
* Returns true if the loaded value is guaranteed to be overwritten
*/
bool PPCRecompiler_trackRedundantFPRNameLoadInstruction(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, sint32 startIndex, PPCRecImlInstruction_t* nameStoreInstruction, sint32 scanDepth)
{
sint16 registerIndex = nameStoreInstruction->op_r_name.registerIndex;
for(sint32 i=startIndex; i<imlSegment->imlListCount; i++)
{
PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList+i;
PPCImlOptimizerUsedRegisters_t registersUsed;
PPCRecompiler_checkRegisterUsage(ppcImlGenContext, imlSegment->imlList+i, ®istersUsed);
if( registersUsed.readFPR1 == registerIndex || registersUsed.readFPR2 == registerIndex || registersUsed.readFPR3 == registerIndex || registersUsed.readFPR4 == registerIndex)
return false;
if( registersUsed.writtenFPR1 == registerIndex )
return true;
}
// todo: Scan next segment(s)
return false;
}
/*
* Returns true if the loaded name is never changed
*/
bool PPCRecompiler_trackRedundantNameStoreInstruction(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, sint32 startIndex, PPCRecImlInstruction_t* nameStoreInstruction, sint32 scanDepth)
{
sint16 registerIndex = nameStoreInstruction->op_r_name.registerIndex;
for(sint32 i=startIndex; i>=0; i--)
{
PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList+i;
PPCImlOptimizerUsedRegisters_t registersUsed;
PPCRecompiler_checkRegisterUsage(ppcImlGenContext, imlSegment->imlList+i, ®istersUsed);
if( registersUsed.writtenNamedReg1 == registerIndex )
{
if( imlSegment->imlList[i].type == PPCREC_IML_TYPE_R_NAME )
return true;
return false;
}
}
return false;
}
sint32 debugCallCounter1 = 0;
/*
* Returns true if the name is overwritten in the current or any following segments
*/
bool PPCRecompiler_trackOverwrittenNameStoreInstruction(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, sint32 startIndex, PPCRecImlInstruction_t* nameStoreInstruction, sint32 scanDepth)
{
//sint16 registerIndex = nameStoreInstruction->op_r_name.registerIndex;
uint32 name = nameStoreInstruction->op_r_name.name;
for(sint32 i=startIndex; i<imlSegment->imlListCount; i++)
{
PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList+i;
if( imlSegment->imlList[i].type == PPCREC_IML_TYPE_R_NAME )
{
// name is loaded before being written
if( imlSegment->imlList[i].op_r_name.name == name )
return false;
}
else if( imlSegment->imlList[i].type == PPCREC_IML_TYPE_NAME_R )
{
// name is written before being loaded
if( imlSegment->imlList[i].op_r_name.name == name )
return true;
}
}
if( scanDepth >= 2 )
return false;
if( imlSegment->nextSegmentIsUncertain )
return false;
if( imlSegment->nextSegmentBranchTaken && PPCRecompiler_trackOverwrittenNameStoreInstruction(ppcImlGenContext, imlSegment->nextSegmentBranchTaken, 0, nameStoreInstruction, scanDepth+1) == false )
return false;
if( imlSegment->nextSegmentBranchNotTaken && PPCRecompiler_trackOverwrittenNameStoreInstruction(ppcImlGenContext, imlSegment->nextSegmentBranchNotTaken, 0, nameStoreInstruction, scanDepth+1) == false )
return false;
if( imlSegment->nextSegmentBranchTaken == NULL && imlSegment->nextSegmentBranchNotTaken == NULL )
return false;
return true;
}
/*
* Returns true if the loaded FPR name is never changed
*/
bool PPCRecompiler_trackRedundantFPRNameStoreInstruction(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, sint32 startIndex, PPCRecImlInstruction_t* nameStoreInstruction, sint32 scanDepth)
{
sint16 registerIndex = nameStoreInstruction->op_r_name.registerIndex;
for(sint32 i=startIndex; i>=0; i--)
{
PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList+i;
PPCImlOptimizerUsedRegisters_t registersUsed;
PPCRecompiler_checkRegisterUsage(ppcImlGenContext, imlSegment->imlList+i, ®istersUsed);
if( registersUsed.writtenFPR1 == registerIndex )
{
if( imlSegment->imlList[i].type == PPCREC_IML_TYPE_FPR_R_NAME )
return true;
return false;
}
}
// todo: Scan next segment(s)
return false;
}
uint32 _PPCRecompiler_getCROverwriteMask(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, uint32 currentOverwriteMask, uint32 currentReadMask, uint32 scanDepth)
{
// is any bit overwritten but not read?
uint32 overwriteMask = imlSegment->crBitsWritten&~imlSegment->crBitsInput;
currentOverwriteMask |= overwriteMask;
// next segment
if( imlSegment->nextSegmentIsUncertain == false && scanDepth < 3 )
{
uint32 nextSegmentOverwriteMask = 0;
if( imlSegment->nextSegmentBranchTaken && imlSegment->nextSegmentBranchNotTaken )
{
uint32 mask0 = _PPCRecompiler_getCROverwriteMask(ppcImlGenContext, imlSegment->nextSegmentBranchTaken, 0, 0, scanDepth+1);
uint32 mask1 = _PPCRecompiler_getCROverwriteMask(ppcImlGenContext, imlSegment->nextSegmentBranchNotTaken, 0, 0, scanDepth+1);
nextSegmentOverwriteMask = mask0&mask1;
}
else if( imlSegment->nextSegmentBranchNotTaken)
{
nextSegmentOverwriteMask = _PPCRecompiler_getCROverwriteMask(ppcImlGenContext, imlSegment->nextSegmentBranchNotTaken, 0, 0, scanDepth+1);
}
nextSegmentOverwriteMask &= ~imlSegment->crBitsRead;
currentOverwriteMask |= nextSegmentOverwriteMask;
}
else if (imlSegment->nextSegmentIsUncertain)
{
if (ppcImlGenContext->segmentListCount >= 5)
{
return 7; // for more complex functions we assume that CR is not passed on
}
}
return currentOverwriteMask;
}
/*
* Returns a mask of all CR bits that are overwritten (written but not read) in the segment and all it's following segments
* If the write state of a CR bit cannot be determined, it is returned as 0 (not overwritten)
*/
uint32 PPCRecompiler_getCROverwriteMask(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment)
{
if (imlSegment->nextSegmentIsUncertain)
{
return 0;
}
if( imlSegment->nextSegmentBranchTaken && imlSegment->nextSegmentBranchNotTaken )
{
uint32 mask0 = _PPCRecompiler_getCROverwriteMask(ppcImlGenContext, imlSegment->nextSegmentBranchTaken, 0, 0, 0);
uint32 mask1 = _PPCRecompiler_getCROverwriteMask(ppcImlGenContext, imlSegment->nextSegmentBranchNotTaken, 0, 0, 0);
return mask0&mask1; // only return bits that are overwritten in both branches
}
else if( imlSegment->nextSegmentBranchNotTaken )
{
uint32 mask = _PPCRecompiler_getCROverwriteMask(ppcImlGenContext, imlSegment->nextSegmentBranchNotTaken, 0, 0, 0);
return mask;
}
else
{
// not implemented
}
return 0;
}
void PPCRecompiler_removeRedundantCRUpdates(ppcImlGenContext_t* ppcImlGenContext)
{
for(sint32 s=0; s<ppcImlGenContext->segmentListCount; s++)
{
PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s];
for(sint32 i=0; i<imlSegment->imlListCount; i++)
{
PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList+i;
if (imlInstruction->type == PPCREC_IML_TYPE_CJUMP)
{
if (imlInstruction->op_conditionalJump.condition != PPCREC_JUMP_CONDITION_NONE)
{
uint32 crBitFlag = 1 << (imlInstruction->op_conditionalJump.crRegisterIndex * 4 + imlInstruction->op_conditionalJump.crBitIndex);
imlSegment->crBitsInput |= (crBitFlag&~imlSegment->crBitsWritten); // flag bits that have not already been written
imlSegment->crBitsRead |= (crBitFlag);
}
}
else if (imlInstruction->type == PPCREC_IML_TYPE_CONDITIONAL_R_S32)
{
uint32 crBitFlag = 1 << (imlInstruction->op_conditional_r_s32.crRegisterIndex * 4 + imlInstruction->op_conditional_r_s32.crBitIndex);
imlSegment->crBitsInput |= (crBitFlag&~imlSegment->crBitsWritten); // flag bits that have not already been written
imlSegment->crBitsRead |= (crBitFlag);
}
else if (imlInstruction->type == PPCREC_IML_TYPE_R_S32 && imlInstruction->operation == PPCREC_IML_OP_MFCR)
{
imlSegment->crBitsRead |= 0xFFFFFFFF;
}
else if (imlInstruction->type == PPCREC_IML_TYPE_R_S32 && imlInstruction->operation == PPCREC_IML_OP_MTCRF)
{
imlSegment->crBitsWritten |= ppc_MTCRFMaskToCRBitMask((uint32)imlInstruction->op_r_immS32.immS32);
}
else if( imlInstruction->type == PPCREC_IML_TYPE_CR )
{
if (imlInstruction->operation == PPCREC_IML_OP_CR_CLEAR ||
imlInstruction->operation == PPCREC_IML_OP_CR_SET)
{
uint32 crBitFlag = 1 << (imlInstruction->op_cr.crD);
imlSegment->crBitsWritten |= (crBitFlag & ~imlSegment->crBitsWritten);
}
else if (imlInstruction->operation == PPCREC_IML_OP_CR_OR ||
imlInstruction->operation == PPCREC_IML_OP_CR_ORC ||
imlInstruction->operation == PPCREC_IML_OP_CR_AND ||
imlInstruction->operation == PPCREC_IML_OP_CR_ANDC)
{
uint32 crBitFlag = 1 << (imlInstruction->op_cr.crD);
imlSegment->crBitsWritten |= (crBitFlag & ~imlSegment->crBitsWritten);
crBitFlag = 1 << (imlInstruction->op_cr.crA);
imlSegment->crBitsRead |= (crBitFlag & ~imlSegment->crBitsRead);
crBitFlag = 1 << (imlInstruction->op_cr.crB);
imlSegment->crBitsRead |= (crBitFlag & ~imlSegment->crBitsRead);
}
else
cemu_assert_unimplemented();
}
else if( PPCRecompilerImlAnalyzer_canTypeWriteCR(imlInstruction) && imlInstruction->crRegister >= 0 && imlInstruction->crRegister <= 7 )
{
imlSegment->crBitsWritten |= (0xF<<(imlInstruction->crRegister*4));
}
else if( (imlInstruction->type == PPCREC_IML_TYPE_STORE || imlInstruction->type == PPCREC_IML_TYPE_STORE_INDEXED) && imlInstruction->op_storeLoad.copyWidth == PPC_REC_STORE_STWCX_MARKER )
{
// overwrites CR0
imlSegment->crBitsWritten |= (0xF<<0);
}
}
}
// flag instructions that write to CR where we can ignore individual CR bits
for(sint32 s=0; s<ppcImlGenContext->segmentListCount; s++)
{
PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s];
for(sint32 i=0; i<imlSegment->imlListCount; i++)
{
PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList+i;
if( PPCRecompilerImlAnalyzer_canTypeWriteCR(imlInstruction) && imlInstruction->crRegister >= 0 && imlInstruction->crRegister <= 7 )
{
uint32 crBitFlags = 0xF<<((uint32)imlInstruction->crRegister*4);
uint32 crOverwriteMask = PPCRecompiler_getCROverwriteMask(ppcImlGenContext, imlSegment);
uint32 crIgnoreMask = crOverwriteMask & ~imlSegment->crBitsRead;
imlInstruction->crIgnoreMask = crIgnoreMask;
}
}
}
}
bool PPCRecompiler_checkIfGPRIsModifiedInRange(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, sint32 startIndex, sint32 endIndex, sint32 vreg)
{
PPCImlOptimizerUsedRegisters_t registersUsed;
for (sint32 i = startIndex; i <= endIndex; i++)
{
PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList + i;
PPCRecompiler_checkRegisterUsage(ppcImlGenContext, imlInstruction, ®istersUsed);
if (registersUsed.writtenNamedReg1 == vreg)
return true;
}
return false;
}
sint32 PPCRecompiler_scanBackwardsForReusableRegister(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* startSegment, sint32 startIndex, sint32 name)
{
// current segment
sint32 currentIndex = startIndex;
PPCRecImlSegment_t* currentSegment = startSegment;
sint32 segmentIterateCount = 0;
sint32 foundRegister = -1;
while (true)
{
// stop scanning if segment is enterable
if (currentSegment->isEnterable)
return -1;
while (currentIndex >= 0)
{
if (currentSegment->imlList[currentIndex].type == PPCREC_IML_TYPE_NAME_R && currentSegment->imlList[currentIndex].op_r_name.name == name)
{
foundRegister = currentSegment->imlList[currentIndex].op_r_name.registerIndex;
break;
}
// previous instruction
currentIndex--;
}
if (foundRegister >= 0)
break;
// continue at previous segment (if there is only one)
if (segmentIterateCount >= 1)
return -1;
if (currentSegment->list_prevSegments.size() != 1)
return -1;
currentSegment = currentSegment->list_prevSegments[0];
currentIndex = currentSegment->imlListCount - 1;
segmentIterateCount++;
}
// scan again to make sure the register is not modified inbetween
currentIndex = startIndex;
currentSegment = startSegment;
segmentIterateCount = 0;
PPCImlOptimizerUsedRegisters_t registersUsed;
while (true)
{
while (currentIndex >= 0)
{
// check if register is modified
PPCRecompiler_checkRegisterUsage(ppcImlGenContext, currentSegment->imlList+currentIndex, ®istersUsed);
if (registersUsed.writtenNamedReg1 == foundRegister)
return -1;
// check if end of scan reached
if (currentSegment->imlList[currentIndex].type == PPCREC_IML_TYPE_NAME_R && currentSegment->imlList[currentIndex].op_r_name.name == name)
{
//foundRegister = currentSegment->imlList[currentIndex].op_r_name.registerIndex;
return foundRegister;
}
// previous instruction
currentIndex--;
}
// continue at previous segment (if there is only one)
if (segmentIterateCount >= 1)
return -1;
if (currentSegment->list_prevSegments.size() != 1)
return -1;
currentSegment = currentSegment->list_prevSegments[0];
currentIndex = currentSegment->imlListCount - 1;
segmentIterateCount++;
}
return -1;
}
void PPCRecompiler_optimizeDirectFloatCopiesScanForward(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, sint32 imlIndexLoad, sint32 fprIndex)
{
PPCRecImlInstruction_t* imlInstructionLoad = imlSegment->imlList + imlIndexLoad;
if (imlInstructionLoad->op_storeLoad.flags2.notExpanded)
return;
PPCImlOptimizerUsedRegisters_t registersUsed;
sint32 scanRangeEnd = std::min(imlIndexLoad + 25, imlSegment->imlListCount); // don't scan too far (saves performance and also the chances we can merge the load+store become low at high distances)
bool foundMatch = false;
sint32 lastStore = -1;
for (sint32 i = imlIndexLoad + 1; i < scanRangeEnd; i++)
{
PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList + i;
if (PPCRecompiler_isSuffixInstruction(imlInstruction))
{
break;
}
// check if FPR is stored
if ((imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE && imlInstruction->op_storeLoad.mode == PPCREC_FPR_ST_MODE_SINGLE_FROM_PS0) ||
(imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE_INDEXED && imlInstruction->op_storeLoad.mode == PPCREC_FPR_ST_MODE_SINGLE_FROM_PS0))
{
if (imlInstruction->op_storeLoad.registerData == fprIndex)
{
if (foundMatch == false)
{
// flag the load-single instruction as "don't expand" (leave single value as-is)
imlInstructionLoad->op_storeLoad.flags2.notExpanded = true;
}
// also set the flag for the store instruction
PPCRecImlInstruction_t* imlInstructionStore = imlInstruction;
imlInstructionStore->op_storeLoad.flags2.notExpanded = true;
foundMatch = true;
lastStore = i + 1;
continue;
}
}
// check if FPR is overwritten (we can actually ignore read operations?)
PPCRecompiler_checkRegisterUsage(ppcImlGenContext, imlInstruction, ®istersUsed);
if (registersUsed.writtenFPR1 == fprIndex)
break;
if (registersUsed.readFPR1 == fprIndex)
break;
if (registersUsed.readFPR2 == fprIndex)
break;
if (registersUsed.readFPR3 == fprIndex)
break;
if (registersUsed.readFPR4 == fprIndex)
break;
}
if (foundMatch)
{
// insert expand instruction after store
PPCRecImlInstruction_t* newExpand = PPCRecompiler_insertInstruction(imlSegment, lastStore);
PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext, newExpand, PPCREC_IML_OP_FPR_EXPAND_BOTTOM32_TO_BOTTOM64_AND_TOP64, fprIndex);
}
}
/*
* Scans for patterns:
* <Load sp float into register f>
* <Random unrelated instructions>
* <Store sp float from register f>
* For these patterns the store and load is modified to work with un-extended values (float remains as float, no double conversion)
* The float->double extension is then executed later
* Advantages:
* Keeps denormals and other special float values intact
* Slightly improves performance
*/
void PPCRecompiler_optimizeDirectFloatCopies(ppcImlGenContext_t* ppcImlGenContext)
{
for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++)
{
PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s];
for (sint32 i = 0; i < imlSegment->imlListCount; i++)
{
PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList + i;
if (imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD && imlInstruction->op_storeLoad.mode == PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0_PS1)
{
PPCRecompiler_optimizeDirectFloatCopiesScanForward(ppcImlGenContext, imlSegment, i, imlInstruction->op_storeLoad.registerData);
}
else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD_INDEXED && imlInstruction->op_storeLoad.mode == PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0_PS1)
{
PPCRecompiler_optimizeDirectFloatCopiesScanForward(ppcImlGenContext, imlSegment, i, imlInstruction->op_storeLoad.registerData);
}
}
}
}
void PPCRecompiler_optimizeDirectIntegerCopiesScanForward(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlSegment_t* imlSegment, sint32 imlIndexLoad, sint32 gprIndex)
{
PPCRecImlInstruction_t* imlInstructionLoad = imlSegment->imlList + imlIndexLoad;
if ( imlInstructionLoad->op_storeLoad.flags2.swapEndian == false )
return;
bool foundMatch = false;
PPCImlOptimizerUsedRegisters_t registersUsed;
sint32 scanRangeEnd = std::min(imlIndexLoad + 25, imlSegment->imlListCount); // don't scan too far (saves performance and also the chances we can merge the load+store become low at high distances)
sint32 i = imlIndexLoad + 1;
for (; i < scanRangeEnd; i++)
{
PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList + i;
if (PPCRecompiler_isSuffixInstruction(imlInstruction))
{
break;
}
// check if GPR is stored
if ((imlInstruction->type == PPCREC_IML_TYPE_STORE && imlInstruction->op_storeLoad.copyWidth == 32 ) )
{
if (imlInstruction->op_storeLoad.registerMem == gprIndex)
break;
if (imlInstruction->op_storeLoad.registerData == gprIndex)
{
PPCRecImlInstruction_t* imlInstructionStore = imlInstruction;
if (foundMatch == false)
{
// switch the endian swap flag for the load instruction
imlInstructionLoad->op_storeLoad.flags2.swapEndian = !imlInstructionLoad->op_storeLoad.flags2.swapEndian;
foundMatch = true;
}
// switch the endian swap flag for the store instruction
imlInstructionStore->op_storeLoad.flags2.swapEndian = !imlInstructionStore->op_storeLoad.flags2.swapEndian;
// keep scanning
continue;
}
}
// check if GPR is accessed
PPCRecompiler_checkRegisterUsage(ppcImlGenContext, imlInstruction, ®istersUsed);
if (registersUsed.readNamedReg1 == gprIndex ||
registersUsed.readNamedReg2 == gprIndex ||
registersUsed.readNamedReg3 == gprIndex)
{
break;
}
if (registersUsed.writtenNamedReg1 == gprIndex)
return; // GPR overwritten, we don't need to byte swap anymore
}
if (foundMatch)
{
// insert expand instruction
PPCRecImlInstruction_t* newExpand = PPCRecompiler_insertInstruction(imlSegment, i);
PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, newExpand, PPCREC_IML_OP_ENDIAN_SWAP, gprIndex, gprIndex);
}
}
/*
* Scans for patterns:
* <Load sp integer into register r>
* <Random unrelated instructions>
* <Store sp integer from register r>
* For these patterns the store and load is modified to work with non-swapped values
* The big_endian->little_endian conversion is then executed later
* Advantages:
* Slightly improves performance
*/
void PPCRecompiler_optimizeDirectIntegerCopies(ppcImlGenContext_t* ppcImlGenContext)
{
for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++)
{
PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s];
for (sint32 i = 0; i < imlSegment->imlListCount; i++)
{
PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList + i;
if (imlInstruction->type == PPCREC_IML_TYPE_LOAD && imlInstruction->op_storeLoad.copyWidth == 32 && imlInstruction->op_storeLoad.flags2.swapEndian )
{
PPCRecompiler_optimizeDirectIntegerCopiesScanForward(ppcImlGenContext, imlSegment, i, imlInstruction->op_storeLoad.registerData);
}
}
}
}
sint32 _getGQRIndexFromRegister(ppcImlGenContext_t* ppcImlGenContext, sint32 registerIndex)
{
if (registerIndex == PPC_REC_INVALID_REGISTER)
return -1;
sint32 namedReg = ppcImlGenContext->mappedRegister[registerIndex];
if (namedReg >= (PPCREC_NAME_SPR0 + SPR_UGQR0) && namedReg <= (PPCREC_NAME_SPR0 + SPR_UGQR7))
{
return namedReg - (PPCREC_NAME_SPR0 + SPR_UGQR0);
}
return -1;
}
bool PPCRecompiler_isUGQRValueKnown(ppcImlGenContext_t* ppcImlGenContext, sint32 gqrIndex, uint32& gqrValue)
{
// UGQR 2 to 7 are initialized by the OS and we assume that games won't ever permanently touch those
// todo - hack - replace with more accurate solution
if (gqrIndex == 2)
gqrValue = 0x00040004;
else if (gqrIndex == 3)
gqrValue = 0x00050005;
else if (gqrIndex == 4)
gqrValue = 0x00060006;
else if (gqrIndex == 5)
gqrValue = 0x00070007;
else
return false;
return true;
}
/*
* If value of GQR can be predicted for a given PSQ load or store instruction then replace it with an optimized version
*/
void PPCRecompiler_optimizePSQLoadAndStore(ppcImlGenContext_t* ppcImlGenContext)
{
for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++)
{
PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s];
for (sint32 i = 0; i < imlSegment->imlListCount; i++)
{
PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList + i;
if (imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD || imlInstruction->type == PPCREC_IML_TYPE_FPR_LOAD_INDEXED)
{
if(imlInstruction->op_storeLoad.mode != PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0 &&
imlInstruction->op_storeLoad.mode != PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0_PS1 )
continue;
// get GQR value
cemu_assert_debug(imlInstruction->op_storeLoad.registerGQR != PPC_REC_INVALID_REGISTER);
sint32 gqrIndex = _getGQRIndexFromRegister(ppcImlGenContext, imlInstruction->op_storeLoad.registerGQR);
cemu_assert(gqrIndex >= 0);
if (ppcImlGenContext->tracking.modifiesGQR[gqrIndex])
continue;
//uint32 gqrValue = ppcInterpreterCurrentInstance->sprNew.UGQR[gqrIndex];
uint32 gqrValue;
if (!PPCRecompiler_isUGQRValueKnown(ppcImlGenContext, gqrIndex, gqrValue))
continue;
uint32 formatType = (gqrValue >> 16) & 7;
uint32 scale = (gqrValue >> 24) & 0x3F;
if (scale != 0)
continue; // only generic handler supports scale
if (imlInstruction->op_storeLoad.mode == PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0)
{
if (formatType == 0)
imlInstruction->op_storeLoad.mode = PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0;
else if (formatType == 4)
imlInstruction->op_storeLoad.mode = PPCREC_FPR_LD_MODE_PSQ_U8_PS0;
else if (formatType == 5)
imlInstruction->op_storeLoad.mode = PPCREC_FPR_LD_MODE_PSQ_U16_PS0;
else if (formatType == 6)
imlInstruction->op_storeLoad.mode = PPCREC_FPR_LD_MODE_PSQ_S8_PS0;
else if (formatType == 7)
imlInstruction->op_storeLoad.mode = PPCREC_FPR_LD_MODE_PSQ_S16_PS0;
}
else if (imlInstruction->op_storeLoad.mode == PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0_PS1)
{
if (formatType == 0)
imlInstruction->op_storeLoad.mode = PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0_PS1;
else if (formatType == 4)
imlInstruction->op_storeLoad.mode = PPCREC_FPR_LD_MODE_PSQ_U8_PS0_PS1;
else if (formatType == 5)
imlInstruction->op_storeLoad.mode = PPCREC_FPR_LD_MODE_PSQ_U16_PS0_PS1;
else if (formatType == 6)
imlInstruction->op_storeLoad.mode = PPCREC_FPR_LD_MODE_PSQ_S8_PS0_PS1;
else if (formatType == 7)
imlInstruction->op_storeLoad.mode = PPCREC_FPR_LD_MODE_PSQ_S16_PS0_PS1;
}
}
else if (imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE || imlInstruction->type == PPCREC_IML_TYPE_FPR_STORE_INDEXED)
{
if(imlInstruction->op_storeLoad.mode != PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0 &&
imlInstruction->op_storeLoad.mode != PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0_PS1)
continue;
// get GQR value
cemu_assert_debug(imlInstruction->op_storeLoad.registerGQR != PPC_REC_INVALID_REGISTER);
sint32 gqrIndex = _getGQRIndexFromRegister(ppcImlGenContext, imlInstruction->op_storeLoad.registerGQR);
cemu_assert(gqrIndex >= 0);
if (ppcImlGenContext->tracking.modifiesGQR[gqrIndex])
continue;
uint32 gqrValue;
if(!PPCRecompiler_isUGQRValueKnown(ppcImlGenContext, gqrIndex, gqrValue))
continue;
uint32 formatType = (gqrValue >> 16) & 7;
uint32 scale = (gqrValue >> 24) & 0x3F;
if (scale != 0)
continue; // only generic handler supports scale
if (imlInstruction->op_storeLoad.mode == PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0)
{
if (formatType == 0)
imlInstruction->op_storeLoad.mode = PPCREC_FPR_ST_MODE_PSQ_FLOAT_PS0;
else if (formatType == 4)
imlInstruction->op_storeLoad.mode = PPCREC_FPR_ST_MODE_PSQ_U8_PS0;
else if (formatType == 5)
imlInstruction->op_storeLoad.mode = PPCREC_FPR_ST_MODE_PSQ_U16_PS0;
else if (formatType == 6)
imlInstruction->op_storeLoad.mode = PPCREC_FPR_ST_MODE_PSQ_S8_PS0;
else if (formatType == 7)
imlInstruction->op_storeLoad.mode = PPCREC_FPR_ST_MODE_PSQ_S16_PS0;
}
else if (imlInstruction->op_storeLoad.mode == PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0_PS1)
{
if (formatType == 0)
imlInstruction->op_storeLoad.mode = PPCREC_FPR_ST_MODE_PSQ_FLOAT_PS0_PS1;
else if (formatType == 4)
imlInstruction->op_storeLoad.mode = PPCREC_FPR_ST_MODE_PSQ_U8_PS0_PS1;
else if (formatType == 5)
imlInstruction->op_storeLoad.mode = PPCREC_FPR_ST_MODE_PSQ_U16_PS0_PS1;
else if (formatType == 6)
imlInstruction->op_storeLoad.mode = PPCREC_FPR_ST_MODE_PSQ_S8_PS0_PS1;
else if (formatType == 7)
imlInstruction->op_storeLoad.mode = PPCREC_FPR_ST_MODE_PSQ_S16_PS0_PS1;
}
}
}
}
}
/*
* Returns true if registerWrite overwrites any of the registers read by registerRead
*/
bool PPCRecompilerAnalyzer_checkForGPROverwrite(PPCImlOptimizerUsedRegisters_t* registerRead, PPCImlOptimizerUsedRegisters_t* registerWrite)
{
if (registerWrite->writtenNamedReg1 < 0)
return false;
if (registerWrite->writtenNamedReg1 == registerRead->readNamedReg1)
return true;
if (registerWrite->writtenNamedReg1 == registerRead->readNamedReg2)
return true;
if (registerWrite->writtenNamedReg1 == registerRead->readNamedReg3)
return true;
return false;
}
void _reorderConditionModifyInstructions(PPCRecImlSegment_t* imlSegment)
{
PPCRecImlInstruction_t* lastInstruction = PPCRecompilerIML_getLastInstruction(imlSegment);
// last instruction a conditional branch?
if (lastInstruction == nullptr || lastInstruction->type != PPCREC_IML_TYPE_CJUMP)
return;
if (lastInstruction->op_conditionalJump.crRegisterIndex >= 8)
return;
// get CR bitmask of bit required for conditional jump
PPCRecCRTracking_t crTracking;
PPCRecompilerImlAnalyzer_getCRTracking(lastInstruction, &crTracking);
uint32 requiredCRBits = crTracking.readCRBits;
// scan backwards until we find the instruction that sets the CR
sint32 crSetterInstructionIndex = -1;
sint32 unsafeInstructionIndex = -1;
for (sint32 i = imlSegment->imlListCount-2; i >= 0; i--)
{
PPCRecImlInstruction_t* imlInstruction = imlSegment->imlList + i;
PPCRecompilerImlAnalyzer_getCRTracking(imlInstruction, &crTracking);
if (crTracking.readCRBits != 0)
return; // dont handle complex cases for now
if (crTracking.writtenCRBits != 0)
{
if ((crTracking.writtenCRBits&requiredCRBits) != 0)
{
crSetterInstructionIndex = i;
break;
}
else
{
return; // other CR bits overwritten (dont handle complex cases)
}
}
// is safe? (no risk of overwriting x64 eflags)
if ((imlInstruction->type == PPCREC_IML_TYPE_NAME_R || imlInstruction->type == PPCREC_IML_TYPE_R_NAME || imlInstruction->type == PPCREC_IML_TYPE_NO_OP) ||
(imlInstruction->type == PPCREC_IML_TYPE_FPR_NAME_R || imlInstruction->type == PPCREC_IML_TYPE_FPR_R_NAME) ||
(imlInstruction->type == PPCREC_IML_TYPE_R_S32 && (imlInstruction->operation == PPCREC_IML_OP_ASSIGN)) ||
(imlInstruction->type == PPCREC_IML_TYPE_R_R && (imlInstruction->operation == PPCREC_IML_OP_ASSIGN)) )
continue;
// not safe
//hasUnsafeInstructions = true;
if (unsafeInstructionIndex == -1)
unsafeInstructionIndex = i;
}
if (crSetterInstructionIndex < 0)
return;
if (unsafeInstructionIndex < 0)
return; // no danger of overwriting eflags, don't reorder
// check if we can move the CR setter instruction to after unsafeInstructionIndex
PPCRecCRTracking_t crTrackingSetter = crTracking;
PPCImlOptimizerUsedRegisters_t regTrackingCRSetter;
PPCRecompiler_checkRegisterUsage(NULL, imlSegment->imlList+crSetterInstructionIndex, ®TrackingCRSetter);
if (regTrackingCRSetter.writtenFPR1 >= 0 || regTrackingCRSetter.readFPR1 >= 0 || regTrackingCRSetter.readFPR2 >= 0 || regTrackingCRSetter.readFPR3 >= 0 || regTrackingCRSetter.readFPR4 >= 0)
return; // we don't handle FPR dependency yet so just ignore FPR instructions
PPCImlOptimizerUsedRegisters_t registerTracking;
if (regTrackingCRSetter.writtenNamedReg1 >= 0)
{
// CR setter does write GPR
for (sint32 i = crSetterInstructionIndex + 1; i <= unsafeInstructionIndex; i++)
{
PPCRecompiler_checkRegisterUsage(NULL, imlSegment->imlList + i, ®isterTracking);
// reads register written by CR setter?
if (PPCRecompilerAnalyzer_checkForGPROverwrite(®isterTracking, ®TrackingCRSetter))
{
return; // cant move CR setter because of dependency
}
// writes register read by CR setter?
if (PPCRecompilerAnalyzer_checkForGPROverwrite(®TrackingCRSetter, ®isterTracking))
{
return; // cant move CR setter because of dependency
}
// overwrites register written by CR setter?
if (regTrackingCRSetter.writtenNamedReg1 == registerTracking.writtenNamedReg1)
return;
}
}
else
{
// CR setter does not write GPR
for (sint32 i = crSetterInstructionIndex + 1; i <= unsafeInstructionIndex; i++)
{
PPCRecompiler_checkRegisterUsage(NULL, imlSegment->imlList + i, ®isterTracking);
// writes register read by CR setter?
if (PPCRecompilerAnalyzer_checkForGPROverwrite(®TrackingCRSetter, ®isterTracking))
{
return; // cant move CR setter because of dependency
}
}
}
// move CR setter instruction
#ifdef CEMU_DEBUG_ASSERT
if ((unsafeInstructionIndex + 1) <= crSetterInstructionIndex)
assert_dbg();
#endif
PPCRecImlInstruction_t* newCRSetterInstruction = PPCRecompiler_insertInstruction(imlSegment, unsafeInstructionIndex+1);
memcpy(newCRSetterInstruction, imlSegment->imlList + crSetterInstructionIndex, sizeof(PPCRecImlInstruction_t));
PPCRecompilerImlGen_generateNewInstruction_noOp(NULL, imlSegment->imlList + crSetterInstructionIndex);
}
/*
* Move instructions which update the condition flags closer to the instruction that consumes them
* On x64 this improves performance since we often can avoid storing CR in memory
*/
void PPCRecompiler_reorderConditionModifyInstructions(ppcImlGenContext_t* ppcImlGenContext)
{
// check if this segment has a conditional branch
for (sint32 s = 0; s < ppcImlGenContext->segmentListCount; s++)
{
PPCRecImlSegment_t* imlSegment = ppcImlGenContext->segmentList[s];
_reorderConditionModifyInstructions(imlSegment);
}
}
| 91,634
|
C++
|
.cpp
| 2,114
| 40.152791
| 611
| 0.762841
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,210
|
PPCRecompilerX64GenFPU.cpp
|
cemu-project_Cemu/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64GenFPU.cpp
|
#include "PPCRecompiler.h"
#include "PPCRecompilerIml.h"
#include "PPCRecompilerX64.h"
void x64Gen_genSSEVEXPrefix2(x64GenContext_t* x64GenContext, sint32 xmmRegister1, sint32 xmmRegister2, bool use64BitMode)
{
if( xmmRegister1 < 8 && xmmRegister2 < 8 && use64BitMode == false )
return;
uint8 v = 0x40;
if( xmmRegister1 >= 8 )
v |= 0x01;
if( xmmRegister2 >= 8 )
v |= 0x04;
if( use64BitMode )
v |= 0x08;
x64Gen_writeU8(x64GenContext, v);
}
void x64Gen_genSSEVEXPrefix1(x64GenContext_t* x64GenContext, sint32 xmmRegister, bool use64BitMode)
{
if( xmmRegister < 8 && use64BitMode == false )
return;
uint8 v = 0x40;
if( use64BitMode )
v |= 0x01;
if( xmmRegister >= 8 )
v |= 0x04;
x64Gen_writeU8(x64GenContext, v);
}
void x64Gen_movaps_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSource)
{
// SSE
// copy xmm register
// MOVAPS <xmm>, <xmm>
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSource, xmmRegisterDest, false); // tested
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x28); // alternative encoding: 0x29, source and destination register are exchanged
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSource&7));
}
void x64Gen_movupd_xmmReg_memReg128(x64GenContext_t* x64GenContext, sint32 xmmRegister, sint32 memRegister, uint32 memImmU32)
{
// SSE2
// move two doubles from memory into xmm register
// MOVUPD <xmm>, [<reg>+<imm>]
if( memRegister == REG_ESP )
{
// todo: Short form of instruction if memImmU32 is 0 or in -128 to 127 range
// 66 0F 10 84 E4 23 01 00 00
x64Gen_writeU8(x64GenContext, 0x66);
x64Gen_genSSEVEXPrefix1(x64GenContext, xmmRegister, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x10);
x64Gen_writeU8(x64GenContext, 0x84+(xmmRegister&7)*8);
x64Gen_writeU8(x64GenContext, 0xE4);
x64Gen_writeU32(x64GenContext, memImmU32);
}
else if( memRegister == REG_NONE )
{
assert_dbg();
//x64Gen_writeU8(x64GenContext, 0x66);
//x64Gen_writeU8(x64GenContext, 0x0F);
//x64Gen_writeU8(x64GenContext, 0x10);
//x64Gen_writeU8(x64GenContext, 0x05+(xmmRegister&7)*8);
//x64Gen_writeU32(x64GenContext, memImmU32);
}
else
{
assert_dbg();
}
}
void x64Gen_movupd_memReg128_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegister, sint32 memRegister, uint32 memImmU32)
{
// SSE2
// move two doubles from memory into xmm register
// MOVUPD [<reg>+<imm>], <xmm>
if( memRegister == REG_ESP )
{
// todo: Short form of instruction if memImmU32 is 0 or in -128 to 127 range
x64Gen_writeU8(x64GenContext, 0x66);
x64Gen_genSSEVEXPrefix1(x64GenContext, xmmRegister, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x11);
x64Gen_writeU8(x64GenContext, 0x84+(xmmRegister&7)*8);
x64Gen_writeU8(x64GenContext, 0xE4);
x64Gen_writeU32(x64GenContext, memImmU32);
}
else if( memRegister == REG_NONE )
{
assert_dbg();
//x64Gen_writeU8(x64GenContext, 0x66);
//x64Gen_writeU8(x64GenContext, 0x0F);
//x64Gen_writeU8(x64GenContext, 0x11);
//x64Gen_writeU8(x64GenContext, 0x05+(xmmRegister&7)*8);
//x64Gen_writeU32(x64GenContext, memImmU32);
}
else
{
assert_dbg();
}
}
void x64Gen_movddup_xmmReg_memReg64(x64GenContext_t* x64GenContext, sint32 xmmRegister, sint32 memRegister, uint32 memImmU32)
{
// SSE3
// move one double from memory into lower and upper half of a xmm register
if( memRegister == REG_RSP )
{
// MOVDDUP <xmm>, [<reg>+<imm>]
// todo: Short form of instruction if memImmU32 is 0 or in -128 to 127 range
x64Gen_writeU8(x64GenContext, 0xF2);
if( xmmRegister >= 8 )
x64Gen_writeU8(x64GenContext, 0x44);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x12);
x64Gen_writeU8(x64GenContext, 0x84+(xmmRegister&7)*8);
x64Gen_writeU8(x64GenContext, 0xE4);
x64Gen_writeU32(x64GenContext, memImmU32);
}
else if( memRegister == REG_R15 )
{
// MOVDDUP <xmm>, [<reg>+<imm>]
// todo: Short form of instruction if memImmU32 is 0 or in -128 to 127 range
// F2 41 0F 12 87 - 44 33 22 11
x64Gen_writeU8(x64GenContext, 0xF2);
x64Gen_genSSEVEXPrefix1(x64GenContext, xmmRegister, true);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x12);
x64Gen_writeU8(x64GenContext, 0x87+(xmmRegister&7)*8);
x64Gen_writeU32(x64GenContext, memImmU32);
}
else if( memRegister == REG_NONE )
{
// MOVDDUP <xmm>, [<imm>]
// 36 F2 0F 12 05 - 00 00 00 00
assert_dbg();
//x64Gen_writeU8(x64GenContext, 0x36);
//x64Gen_writeU8(x64GenContext, 0xF2);
//x64Gen_writeU8(x64GenContext, 0x0F);
//x64Gen_writeU8(x64GenContext, 0x12);
//x64Gen_writeU8(x64GenContext, 0x05+(xmmRegister&7)*8);
//x64Gen_writeU32(x64GenContext, memImmU32);
}
else
{
assert_dbg();
}
}
void x64Gen_movddup_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc)
{
// SSE3
// move low double from xmm register into lower and upper half of a different xmm register
x64Gen_writeU8(x64GenContext, 0xF2);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x12);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_movhlps_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc)
{
// SSE1
// move high double from xmm register into lower and upper half of a different xmm register
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x12);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_movsd_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc)
{
// SSE2
// move lower double from xmm register into lower half of a different xmm register, leave other half untouched
x64Gen_writeU8(x64GenContext, 0xF2);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x10); // alternative encoding: 0x11, src and dest exchanged
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_movsd_memReg64_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegister, sint32 memRegister, uint32 memImmU32)
{
// SSE2
// move lower 64bits (double) of xmm register to memory location
if( memRegister == REG_NONE )
{
// MOVSD [<imm>], <xmm>
// F2 0F 11 05 - 45 23 01 00
assert_dbg();
//x64Gen_writeU8(x64GenContext, 0xF2);
//x64Gen_genSSEVEXPrefix(x64GenContext, xmmRegister, 0, false);
//x64Gen_writeU8(x64GenContext, 0x0F);
//x64Gen_writeU8(x64GenContext, 0x11);
//x64Gen_writeU8(x64GenContext, 0x05+xmmRegister*8);
//x64Gen_writeU32(x64GenContext, memImmU32);
}
else if( memRegister == REG_RSP )
{
// MOVSD [RSP+<imm>], <xmm>
// F2 0F 11 84 24 - 33 22 11 00
x64Gen_writeU8(x64GenContext, 0xF2);
x64Gen_genSSEVEXPrefix2(x64GenContext, 0, xmmRegister, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x11);
x64Gen_writeU8(x64GenContext, 0x84+(xmmRegister&7)*8);
x64Gen_writeU8(x64GenContext, 0x24);
x64Gen_writeU32(x64GenContext, memImmU32);
}
else
{
assert_dbg();
}
}
void x64Gen_movlpd_xmmReg_memReg64(x64GenContext_t* x64GenContext, sint32 xmmRegister, sint32 memRegister, uint32 memImmU32)
{
// SSE3
// move one double from memory into lower half of a xmm register, leave upper half unchanged(?)
if( memRegister == REG_NONE )
{
// MOVLPD <xmm>, [<imm>]
//x64Gen_writeU8(x64GenContext, 0x66);
//x64Gen_writeU8(x64GenContext, 0x0F);
//x64Gen_writeU8(x64GenContext, 0x12);
//x64Gen_writeU8(x64GenContext, 0x05+(xmmRegister&7)*8);
//x64Gen_writeU32(x64GenContext, memImmU32);
assert_dbg();
}
else if( memRegister == REG_RSP )
{
// MOVLPD <xmm>, [<reg64>+<imm>]
// 66 0F 12 84 24 - 33 22 11 00
x64Gen_writeU8(x64GenContext, 0x66);
x64Gen_genSSEVEXPrefix2(x64GenContext, 0, xmmRegister, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x12);
x64Gen_writeU8(x64GenContext, 0x84+(xmmRegister&7)*8);
x64Gen_writeU8(x64GenContext, 0x24);
x64Gen_writeU32(x64GenContext, memImmU32);
}
else
{
assert_dbg();
}
}
void x64Gen_unpcklpd_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc)
{
// SSE2
x64Gen_writeU8(x64GenContext, 0x66);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x14);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_unpckhpd_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc)
{
// SSE2
x64Gen_writeU8(x64GenContext, 0x66);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x15);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_shufpd_xmmReg_xmmReg_imm8(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc, uint8 imm8)
{
// SSE2
// shuffled copy source to destination
x64Gen_writeU8(x64GenContext, 0x66);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0xC6);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
x64Gen_writeU8(x64GenContext, imm8);
}
void x64Gen_addsd_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc)
{
// SSE2
// add bottom double of two xmm registers, leave upper quadword unchanged
x64Gen_writeU8(x64GenContext, 0xF2);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false); // untested
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x58);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_addpd_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc)
{
// SSE2
// add both doubles of two xmm registers
x64Gen_writeU8(x64GenContext, 0x66);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x58);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_subsd_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc)
{
// SSE2
// subtract bottom double of two xmm registers, leave upper quadword unchanged
x64Gen_writeU8(x64GenContext, 0xF2);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x5C);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_subpd_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc)
{
// SSE2
// subtract both doubles of two xmm registers
x64Gen_writeU8(x64GenContext, 0x66);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false); // untested
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x5C);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_mulsd_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc)
{
// SSE2
// multiply bottom double of two xmm registers, leave upper quadword unchanged
x64Gen_writeU8(x64GenContext, 0xF2);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x59);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_mulpd_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc)
{
// SSE2
// multiply both doubles of two xmm registers
x64Gen_writeU8(x64GenContext, 0x66);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false); // untested
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x59);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_mulpd_xmmReg_memReg128(x64GenContext_t* x64GenContext, sint32 xmmRegister, sint32 memRegister, uint32 memImmU32)
{
// SSE2
if (memRegister == REG_NONE)
{
assert_dbg();
}
else if (memRegister == REG_R14)
{
x64Gen_writeU8(x64GenContext, 0x66);
x64Gen_writeU8(x64GenContext, (xmmRegister < 8) ? 0x41 : 0x45);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x59);
x64Gen_writeU8(x64GenContext, 0x86 + (xmmRegister & 7) * 8);
x64Gen_writeU32(x64GenContext, memImmU32);
}
else
{
assert_dbg();
}
}
void x64Gen_divsd_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc)
{
// SSE2
// divide bottom double of two xmm registers, leave upper quadword unchanged
x64Gen_writeU8(x64GenContext, 0xF2);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x5E);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_divpd_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc)
{
// SSE2
// divide bottom and top double of two xmm registers
x64Gen_writeU8(x64GenContext, 0x66);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x5E);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_comisd_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc)
{
// SSE2
// compare bottom doubles
x64Gen_writeU8(x64GenContext, 0x66);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false); // untested
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x2F);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_comisd_xmmReg_mem64Reg64(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 memoryReg, sint32 memImmS32)
{
// SSE2
// compare bottom double with double from memory location
if( memoryReg == REG_R15 )
{
x64Gen_writeU8(x64GenContext, 0x66);
x64Gen_genSSEVEXPrefix1(x64GenContext, xmmRegisterDest, true);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x2F);
x64Gen_writeU8(x64GenContext, 0x87+(xmmRegisterDest&7)*8);
x64Gen_writeU32(x64GenContext, (uint32)memImmS32);
}
else
assert_dbg();
}
void x64Gen_ucomisd_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc)
{
// SSE2
// compare bottom doubles
x64Gen_writeU8(x64GenContext, 0x66);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x2E);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_comiss_xmmReg_mem64Reg64(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 memoryReg, sint32 memImmS32)
{
// SSE2
// compare bottom float with float from memory location
if (memoryReg == REG_R15)
{
x64Gen_genSSEVEXPrefix1(x64GenContext, xmmRegisterDest, true);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x2F);
x64Gen_writeU8(x64GenContext, 0x87 + (xmmRegisterDest & 7) * 8);
x64Gen_writeU32(x64GenContext, (uint32)memImmS32);
}
else
assert_dbg();
}
void x64Gen_orps_xmmReg_mem128Reg64(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, uint32 memReg, uint32 memImmS32)
{
// SSE2
// and xmm register with 128 bit value from memory
if( memReg == REG_R15 )
{
x64Gen_genSSEVEXPrefix2(x64GenContext, memReg, xmmRegisterDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x56);
x64Gen_writeU8(x64GenContext, 0x87+(xmmRegisterDest&7)*8);
x64Gen_writeU32(x64GenContext, (uint32)memImmS32);
}
else
assert_dbg();
}
void x64Gen_xorps_xmmReg_mem128Reg64(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, uint32 memReg, uint32 memImmS32)
{
// SSE2
// xor xmm register with 128 bit value from memory
if( memReg == REG_R15 )
{
x64Gen_genSSEVEXPrefix1(x64GenContext, xmmRegisterDest, true); // todo: should be x64Gen_genSSEVEXPrefix2() with memReg?
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x57);
x64Gen_writeU8(x64GenContext, 0x87+(xmmRegisterDest&7)*8);
x64Gen_writeU32(x64GenContext, (uint32)memImmS32);
}
else
assert_dbg();
}
void x64Gen_andpd_xmmReg_memReg128(x64GenContext_t* x64GenContext, sint32 xmmRegister, sint32 memRegister, uint32 memImmU32)
{
// SSE2
if (memRegister == REG_NONE)
{
assert_dbg();
}
else if (memRegister == REG_R14)
{
x64Gen_writeU8(x64GenContext, 0x66);
x64Gen_writeU8(x64GenContext, (xmmRegister < 8) ? 0x41 : 0x45);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x54);
x64Gen_writeU8(x64GenContext, 0x86 + (xmmRegister & 7) * 8);
x64Gen_writeU32(x64GenContext, memImmU32);
}
else
{
assert_dbg();
}
}
void x64Gen_andps_xmmReg_mem128Reg64(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, uint32 memReg, uint32 memImmS32)
{
// SSE2
// and xmm register with 128 bit value from memory
if( memReg == REG_R15 )
{
x64Gen_genSSEVEXPrefix1(x64GenContext, xmmRegisterDest, true); // todo: should be x64Gen_genSSEVEXPrefix2() with memReg?
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x54);
x64Gen_writeU8(x64GenContext, 0x87+(xmmRegisterDest&7)*8);
x64Gen_writeU32(x64GenContext, (uint32)memImmS32);
}
else
assert_dbg();
}
void x64Gen_andps_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc)
{
// SSE2
// and xmm register with xmm register
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x54);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_pcmpeqd_xmmReg_mem128Reg64(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, uint32 memReg, uint32 memImmS32)
{
// SSE2
// doubleword integer compare
if( memReg == REG_R15 )
{
x64Gen_writeU8(x64GenContext, 0x66);
x64Gen_genSSEVEXPrefix1(x64GenContext, xmmRegisterDest, true);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x76);
x64Gen_writeU8(x64GenContext, 0x87+(xmmRegisterDest&7)*8);
x64Gen_writeU32(x64GenContext, (uint32)memImmS32);
}
else
assert_dbg();
}
void x64Gen_cvttpd2dq_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc)
{
// SSE2
// convert two doubles into two 32-bit integers in bottom part of xmm register, reset upper 64 bits of destination register
x64Gen_writeU8(x64GenContext, 0x66);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0xE6);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_cvttsd2si_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 registerDest, sint32 xmmRegisterSrc)
{
// SSE2
// convert double to truncated integer in general purpose register
x64Gen_writeU8(x64GenContext, 0xF2);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, registerDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x2C);
x64Gen_writeU8(x64GenContext, 0xC0+(registerDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_cvtsd2ss_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc)
{
// SSE2
// converts bottom 64bit double to bottom 32bit single
x64Gen_writeU8(x64GenContext, 0xF2);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x5A);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_cvtpd2ps_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc)
{
// SSE2
// converts two 64bit doubles to two 32bit singles in bottom half of register
x64Gen_writeU8(x64GenContext, 0x66);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x5A);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_cvtps2pd_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc)
{
// SSE2
// converts two 32bit singles to two 64bit doubles
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x5A);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_cvtss2sd_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc)
{
// SSE2
// converts bottom 32bit single to bottom 64bit double
x64Gen_writeU8(x64GenContext, 0xF3);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x5A);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_cvtpi2pd_xmmReg_mem64Reg64(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 memReg, sint32 memImmS32)
{
// SSE2
// converts two signed 32bit integers to two doubles
if( memReg == REG_RSP )
{
x64Gen_writeU8(x64GenContext, 0x66);
x64Gen_genSSEVEXPrefix1(x64GenContext, xmmRegisterDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x2A);
x64Gen_writeU8(x64GenContext, 0x84+(xmmRegisterDest&7)*8);
x64Gen_writeU8(x64GenContext, 0x24);
x64Gen_writeU32(x64GenContext, (uint32)memImmS32);
}
else
{
assert_dbg();
}
}
void x64Gen_cvtsd2si_reg64Low_xmmReg(x64GenContext_t* x64GenContext, sint32 registerDest, sint32 xmmRegisterSrc)
{
// SSE2
// converts bottom 64bit double to 32bit signed integer in general purpose register, round based on float-point control
x64Gen_writeU8(x64GenContext, 0xF2);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, registerDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x2D);
x64Gen_writeU8(x64GenContext, 0xC0+(registerDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_cvttsd2si_reg64Low_xmmReg(x64GenContext_t* x64GenContext, sint32 registerDest, sint32 xmmRegisterSrc)
{
// SSE2
// converts bottom 64bit double to 32bit signed integer in general purpose register, always truncate
x64Gen_writeU8(x64GenContext, 0xF2);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, registerDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x2C);
x64Gen_writeU8(x64GenContext, 0xC0+(registerDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_sqrtsd_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc)
{
// SSE2
// calculates square root of bottom double
x64Gen_writeU8(x64GenContext, 0xF2);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x51);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_sqrtpd_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc)
{
// SSE2
// calculates square root of bottom and top double
x64Gen_writeU8(x64GenContext, 0x66);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x51);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_rcpss_xmmReg_xmmReg(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 xmmRegisterSrc)
{
// SSE2
// approximates reciprocal of bottom 32bit single
x64Gen_writeU8(x64GenContext, 0xF3);
x64Gen_genSSEVEXPrefix2(x64GenContext, xmmRegisterSrc, xmmRegisterDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x53);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(xmmRegisterSrc&7));
}
void x64Gen_mulss_xmmReg_memReg64(x64GenContext_t* x64GenContext, sint32 xmmRegister, sint32 memRegister, uint32 memImmU32)
{
// SSE2
if( memRegister == REG_NONE )
{
assert_dbg();
}
else if( memRegister == 15 )
{
x64Gen_writeU8(x64GenContext, 0xF3);
x64Gen_writeU8(x64GenContext, (xmmRegister<8)?0x41:0x45);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x59);
x64Gen_writeU8(x64GenContext, 0x87+(xmmRegister&7)*8);
x64Gen_writeU32(x64GenContext, memImmU32);
}
else
{
assert_dbg();
}
}
void x64Gen_movd_xmmReg_reg64Low32(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 registerSrc)
{
// SSE2
// copy low 32bit of general purpose register into xmm register
// MOVD <xmm>, <reg32>
x64Gen_writeU8(x64GenContext, 0x66);
x64Gen_genSSEVEXPrefix2(x64GenContext, registerSrc, xmmRegisterDest, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x6E); // alternative encoding: 0x29, source and destination register are exchanged
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(registerSrc&7));
}
void x64Gen_movd_reg64Low32_xmmReg(x64GenContext_t* x64GenContext, sint32 registerDest, sint32 xmmRegisterSrc)
{
// SSE2
// copy low 32bit of general purpose register into xmm register
// MOVD <reg32>, <xmm>
x64Gen_writeU8(x64GenContext, 0x66);
x64Gen_genSSEVEXPrefix2(x64GenContext, registerDest, xmmRegisterSrc, false);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x7E); // alternative encoding: 0x29, source and destination register are exchanged
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterSrc&7)*8+(registerDest&7));
}
void x64Gen_movq_xmmReg_reg64(x64GenContext_t* x64GenContext, sint32 xmmRegisterDest, sint32 registerSrc)
{
// SSE2
// copy general purpose register into xmm register
// MOVD <xmm>, <reg64>
x64Gen_writeU8(x64GenContext, 0x66);
x64Gen_genSSEVEXPrefix2(x64GenContext, registerSrc, xmmRegisterDest, true);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x6E); // alternative encoding: 0x29, source and destination register are exchanged
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterDest&7)*8+(registerSrc&7));
}
void x64Gen_movq_reg64_xmmReg(x64GenContext_t* x64GenContext, sint32 registerDst, sint32 xmmRegisterSrc)
{
// SSE2
// copy general purpose register into xmm register
// MOVD <xmm>, <reg64>
x64Gen_writeU8(x64GenContext, 0x66);
x64Gen_genSSEVEXPrefix2(x64GenContext, registerDst, xmmRegisterSrc, true);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x7E);
x64Gen_writeU8(x64GenContext, 0xC0+(xmmRegisterSrc&7)*8+(registerDst&7));
}
| 27,402
|
C++
|
.cpp
| 702
| 36.933048
| 128
| 0.788676
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,211
|
PPCRecompilerX64FPU.cpp
|
cemu-project_Cemu/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64FPU.cpp
|
#include "PPCRecompiler.h"
#include "PPCRecompilerIml.h"
#include "PPCRecompilerX64.h"
#include "asm/x64util.h"
#include "Common/cpu_features.h"
void PPCRecompilerX64Gen_imlInstruction_fpr_r_name(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction)
{
uint32 name = imlInstruction->op_r_name.name;
if( name >= PPCREC_NAME_FPR0 && name < (PPCREC_NAME_FPR0+32) )
{
x64Gen_movupd_xmmReg_memReg128(x64GenContext, tempToRealFPRRegister(imlInstruction->op_r_name.registerIndex), REG_ESP, offsetof(PPCInterpreter_t, fpr)+sizeof(FPR_t)*(name-PPCREC_NAME_FPR0));
}
else if( name >= PPCREC_NAME_TEMPORARY_FPR0 || name < (PPCREC_NAME_TEMPORARY_FPR0+8) )
{
x64Gen_movupd_xmmReg_memReg128(x64GenContext, tempToRealFPRRegister(imlInstruction->op_r_name.registerIndex), REG_ESP, offsetof(PPCInterpreter_t, temporaryFPR)+sizeof(FPR_t)*(name-PPCREC_NAME_TEMPORARY_FPR0));
}
else
{
cemu_assert_debug(false);
}
}
void PPCRecompilerX64Gen_imlInstruction_fpr_name_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction)
{
uint32 name = imlInstruction->op_r_name.name;
if( name >= PPCREC_NAME_FPR0 && name < (PPCREC_NAME_FPR0+32) )
{
x64Gen_movupd_memReg128_xmmReg(x64GenContext, tempToRealFPRRegister(imlInstruction->op_r_name.registerIndex), REG_ESP, offsetof(PPCInterpreter_t, fpr)+sizeof(FPR_t)*(name-PPCREC_NAME_FPR0));
}
else if( name >= PPCREC_NAME_TEMPORARY_FPR0 && name < (PPCREC_NAME_TEMPORARY_FPR0+8) )
{
x64Gen_movupd_memReg128_xmmReg(x64GenContext, tempToRealFPRRegister(imlInstruction->op_r_name.registerIndex), REG_ESP, offsetof(PPCInterpreter_t, temporaryFPR)+sizeof(FPR_t)*(name-PPCREC_NAME_TEMPORARY_FPR0));
}
else
{
cemu_assert_debug(false);
}
}
void PPCRecompilerX64Gen_imlInstr_gqr_generateScaleCode(ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, sint32 registerXMM, bool isLoad, bool scalePS1, sint32 registerGQR)
{
// load GQR
x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, registerGQR);
// extract scale field and multiply by 16 to get array offset
x64Gen_shr_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, (isLoad?16:0)+8-4);
x64Gen_and_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, (0x3F<<4));
// multiply xmm by scale
x64Gen_add_reg64_reg64(x64GenContext, REG_RESV_TEMP, REG_RESV_RECDATA);
if (isLoad)
{
if(scalePS1)
x64Gen_mulpd_xmmReg_memReg128(x64GenContext, registerXMM, REG_RESV_TEMP, offsetof(PPCRecompilerInstanceData_t, _psq_ld_scale_ps0_ps1));
else
x64Gen_mulpd_xmmReg_memReg128(x64GenContext, registerXMM, REG_RESV_TEMP, offsetof(PPCRecompilerInstanceData_t, _psq_ld_scale_ps0_1));
}
else
{
if (scalePS1)
x64Gen_mulpd_xmmReg_memReg128(x64GenContext, registerXMM, REG_RESV_TEMP, offsetof(PPCRecompilerInstanceData_t, _psq_st_scale_ps0_ps1));
else
x64Gen_mulpd_xmmReg_memReg128(x64GenContext, registerXMM, REG_RESV_TEMP, offsetof(PPCRecompilerInstanceData_t, _psq_st_scale_ps0_1));
}
}
// generate code for PSQ load for a particular type
// if scaleGQR is -1 then a scale of 1.0 is assumed (no scale)
void PPCRecompilerX64Gen_imlInstr_psq_load(ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, uint8 mode, sint32 registerXMM, sint32 memReg, sint32 memRegEx, sint32 memImmS32, bool indexed, sint32 registerGQR = -1)
{
if (mode == PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0_PS1)
{
if (indexed)
{
assert_dbg();
}
// optimized code for ps float load
x64Emit_mov_reg64_mem64(x64GenContext, REG_RESV_TEMP, REG_R13, memReg, memImmS32);
x64Gen_bswap_reg64(x64GenContext, REG_RESV_TEMP);
x64Gen_rol_reg64_imm8(x64GenContext, REG_RESV_TEMP, 32); // swap upper and lower DWORD
x64Gen_movq_xmmReg_reg64(x64GenContext, registerXMM, REG_RESV_TEMP);
x64Gen_cvtps2pd_xmmReg_xmmReg(x64GenContext, registerXMM, registerXMM);
// note: floats are not scaled
}
else if (mode == PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0)
{
if (indexed)
{
x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, memRegEx);
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, memReg);
if (g_CPUFeatures.x86.movbe)
{
x64Gen_movBEZeroExtend_reg64_mem32Reg64PlusReg64(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, REG_RESV_TEMP, memImmS32);
}
else
{
x64Emit_mov_reg32_mem32(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, REG_RESV_TEMP, memImmS32);
x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP);
}
}
else
{
if (g_CPUFeatures.x86.movbe)
{
x64Gen_movBEZeroExtend_reg64_mem32Reg64PlusReg64(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, memReg, memImmS32);
}
else
{
x64Emit_mov_reg32_mem32(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, memReg, memImmS32);
x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP);
}
}
if (g_CPUFeatures.x86.avx)
{
x64Gen_movd_xmmReg_reg64Low32(x64GenContext, REG_RESV_FPR_TEMP, REG_RESV_TEMP);
}
else
{
x64Emit_mov_mem32_reg64(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR), REG_RESV_TEMP);
x64Gen_movddup_xmmReg_memReg64(x64GenContext, REG_RESV_FPR_TEMP, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR));
}
x64Gen_cvtss2sd_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, REG_RESV_FPR_TEMP);
// load constant 1.0 into lower half and upper half of temp register
x64Gen_movddup_xmmReg_memReg64(x64GenContext, registerXMM, REG_RESV_RECDATA, offsetof(PPCRecompilerInstanceData_t, _x64XMM_constDouble1_1));
// overwrite lower half with single from memory
x64Gen_movsd_xmmReg_xmmReg(x64GenContext, registerXMM, REG_RESV_FPR_TEMP);
// note: floats are not scaled
}
else
{
sint32 readSize;
bool isSigned = false;
if (mode == PPCREC_FPR_LD_MODE_PSQ_S16_PS0 ||
mode == PPCREC_FPR_LD_MODE_PSQ_S16_PS0_PS1)
{
readSize = 16;
isSigned = true;
}
else if (mode == PPCREC_FPR_LD_MODE_PSQ_U16_PS0 ||
mode == PPCREC_FPR_LD_MODE_PSQ_U16_PS0_PS1)
{
readSize = 16;
isSigned = false;
}
else if (mode == PPCREC_FPR_LD_MODE_PSQ_S8_PS0 ||
mode == PPCREC_FPR_LD_MODE_PSQ_S8_PS0_PS1)
{
readSize = 8;
isSigned = true;
}
else if (mode == PPCREC_FPR_LD_MODE_PSQ_U8_PS0 ||
mode == PPCREC_FPR_LD_MODE_PSQ_U8_PS0_PS1)
{
readSize = 8;
isSigned = false;
}
else
assert_dbg();
bool loadPS1 = (mode == PPCREC_FPR_LD_MODE_PSQ_S16_PS0_PS1 ||
mode == PPCREC_FPR_LD_MODE_PSQ_U16_PS0_PS1 ||
mode == PPCREC_FPR_LD_MODE_PSQ_U8_PS0_PS1 ||
mode == PPCREC_FPR_LD_MODE_PSQ_S8_PS0_PS1);
for (sint32 wordIndex = 0; wordIndex < 2; wordIndex++)
{
if (indexed)
{
assert_dbg();
}
// read from memory
if (wordIndex == 1 && loadPS1 == false)
{
// store constant 1
x64Gen_mov_mem32Reg64_imm32(x64GenContext, REG_RESV_HCPU, offsetof(PPCInterpreter_t, temporaryGPR) + sizeof(uint32) * 1, 1);
}
else
{
uint32 memOffset = memImmS32 + wordIndex * (readSize / 8);
if (readSize == 16)
{
// half word
x64Gen_movZeroExtend_reg64Low16_mem16Reg64PlusReg64(x64GenContext, REG_RESV_TEMP, REG_R13, memReg, memOffset);
x64Gen_rol_reg64Low16_imm8(x64GenContext, REG_RESV_TEMP, 8); // endian swap
if (isSigned)
x64Gen_movSignExtend_reg64Low32_reg64Low16(x64GenContext, REG_RESV_TEMP, REG_RESV_TEMP);
else
x64Gen_movZeroExtend_reg64Low32_reg64Low16(x64GenContext, REG_RESV_TEMP, REG_RESV_TEMP);
}
else if (readSize == 8)
{
// byte
x64Emit_mov_reg64b_mem8(x64GenContext, REG_RESV_TEMP, REG_R13, memReg, memOffset);
if (isSigned)
x64Gen_movSignExtend_reg64Low32_reg64Low8(x64GenContext, REG_RESV_TEMP, REG_RESV_TEMP);
else
x64Gen_movZeroExtend_reg64Low32_reg64Low8(x64GenContext, REG_RESV_TEMP, REG_RESV_TEMP);
}
// store
x64Emit_mov_mem32_reg32(x64GenContext, REG_RESV_HCPU, offsetof(PPCInterpreter_t, temporaryGPR) + sizeof(uint32) * wordIndex, REG_RESV_TEMP);
}
}
// convert the two integers to doubles
x64Gen_cvtpi2pd_xmmReg_mem64Reg64(x64GenContext, registerXMM, REG_RESV_HCPU, offsetof(PPCInterpreter_t, temporaryGPR));
// scale
if (registerGQR >= 0)
PPCRecompilerX64Gen_imlInstr_gqr_generateScaleCode(ppcImlGenContext, x64GenContext, registerXMM, true, loadPS1, registerGQR);
}
}
void PPCRecompilerX64Gen_imlInstr_psq_load_generic(ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, uint8 mode, sint32 registerXMM, sint32 memReg, sint32 memRegEx, sint32 memImmS32, bool indexed, sint32 registerGQR)
{
bool loadPS1 = (mode == PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0_PS1);
// load GQR
x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, registerGQR);
// extract load type field
x64Gen_shr_reg64Low32_imm8(x64GenContext, REG_RESV_TEMP, 16);
x64Gen_and_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 7);
// jump cases
x64Gen_cmp_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 4); // type 4 -> u8
sint32 jumpOffset_caseU8 = x64GenContext->codeBufferIndex;
x64Gen_jmpc_far(x64GenContext, X86_CONDITION_EQUAL, 0);
x64Gen_cmp_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 5); // type 5 -> u16
sint32 jumpOffset_caseU16 = x64GenContext->codeBufferIndex;
x64Gen_jmpc_far(x64GenContext, X86_CONDITION_EQUAL, 0);
x64Gen_cmp_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 6); // type 4 -> s8
sint32 jumpOffset_caseS8 = x64GenContext->codeBufferIndex;
x64Gen_jmpc_far(x64GenContext, X86_CONDITION_EQUAL, 0);
x64Gen_cmp_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 7); // type 5 -> s16
sint32 jumpOffset_caseS16 = x64GenContext->codeBufferIndex;
x64Gen_jmpc_far(x64GenContext, X86_CONDITION_EQUAL, 0);
// default case -> float
// generate cases
uint32 jumpOffset_endOfFloat;
uint32 jumpOffset_endOfU8;
uint32 jumpOffset_endOfU16;
uint32 jumpOffset_endOfS8;
PPCRecompilerX64Gen_imlInstr_psq_load(ppcImlGenContext, x64GenContext, loadPS1 ? PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0_PS1 : PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0, registerXMM, memReg, memRegEx, memImmS32, indexed, registerGQR);
jumpOffset_endOfFloat = x64GenContext->codeBufferIndex;
x64Gen_jmp_imm32(x64GenContext, 0);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_caseU16, x64GenContext->codeBufferIndex);
PPCRecompilerX64Gen_imlInstr_psq_load(ppcImlGenContext, x64GenContext, loadPS1 ? PPCREC_FPR_LD_MODE_PSQ_U16_PS0_PS1 : PPCREC_FPR_LD_MODE_PSQ_U16_PS0, registerXMM, memReg, memRegEx, memImmS32, indexed, registerGQR);
jumpOffset_endOfU8 = x64GenContext->codeBufferIndex;
x64Gen_jmp_imm32(x64GenContext, 0);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_caseS16, x64GenContext->codeBufferIndex);
PPCRecompilerX64Gen_imlInstr_psq_load(ppcImlGenContext, x64GenContext, loadPS1 ? PPCREC_FPR_LD_MODE_PSQ_S16_PS0_PS1 : PPCREC_FPR_LD_MODE_PSQ_S16_PS0, registerXMM, memReg, memRegEx, memImmS32, indexed, registerGQR);
jumpOffset_endOfU16 = x64GenContext->codeBufferIndex;
x64Gen_jmp_imm32(x64GenContext, 0);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_caseU8, x64GenContext->codeBufferIndex);
PPCRecompilerX64Gen_imlInstr_psq_load(ppcImlGenContext, x64GenContext, loadPS1 ? PPCREC_FPR_LD_MODE_PSQ_U8_PS0_PS1 : PPCREC_FPR_LD_MODE_PSQ_U8_PS0, registerXMM, memReg, memRegEx, memImmS32, indexed, registerGQR);
jumpOffset_endOfS8 = x64GenContext->codeBufferIndex;
x64Gen_jmp_imm32(x64GenContext, 0);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_caseS8, x64GenContext->codeBufferIndex);
PPCRecompilerX64Gen_imlInstr_psq_load(ppcImlGenContext, x64GenContext, loadPS1 ? PPCREC_FPR_LD_MODE_PSQ_S8_PS0_PS1 : PPCREC_FPR_LD_MODE_PSQ_S8_PS0, registerXMM, memReg, memRegEx, memImmS32, indexed, registerGQR);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_endOfFloat, x64GenContext->codeBufferIndex);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_endOfU8, x64GenContext->codeBufferIndex);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_endOfU16, x64GenContext->codeBufferIndex);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_endOfS8, x64GenContext->codeBufferIndex);
}
// load from memory
bool PPCRecompilerX64Gen_imlInstruction_fpr_load(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction, bool indexed)
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
sint32 realRegisterXMM = tempToRealFPRRegister(imlInstruction->op_storeLoad.registerData);
sint32 realRegisterMem = tempToRealRegister(imlInstruction->op_storeLoad.registerMem);
sint32 realRegisterMem2 = PPC_REC_INVALID_REGISTER;
if( indexed )
realRegisterMem2 = tempToRealRegister(imlInstruction->op_storeLoad.registerMem2);
uint8 mode = imlInstruction->op_storeLoad.mode;
if( mode == PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0_PS1 )
{
// load byte swapped single into temporary FPR
if( indexed )
{
x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem2);
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem);
if( g_CPUFeatures.x86.movbe )
x64Gen_movBEZeroExtend_reg64_mem32Reg64PlusReg64(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, REG_RESV_TEMP, imlInstruction->op_storeLoad.immS32);
else
x64Emit_mov_reg32_mem32(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, REG_RESV_TEMP, imlInstruction->op_storeLoad.immS32);
}
else
{
if( g_CPUFeatures.x86.movbe )
x64Gen_movBEZeroExtend_reg64_mem32Reg64PlusReg64(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32);
else
x64Emit_mov_reg32_mem32(x64GenContext, REG_RESV_TEMP, REG_RESV_MEMBASE, realRegisterMem, imlInstruction->op_storeLoad.immS32);
}
if( g_CPUFeatures.x86.movbe == false )
x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP);
if( g_CPUFeatures.x86.avx )
{
x64Gen_movd_xmmReg_reg64Low32(x64GenContext, realRegisterXMM, REG_RESV_TEMP);
}
else
{
x64Emit_mov_mem32_reg64(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR), REG_RESV_TEMP);
x64Gen_movddup_xmmReg_memReg64(x64GenContext, realRegisterXMM, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR));
}
if (imlInstruction->op_storeLoad.flags2.notExpanded)
{
// leave value as single
}
else
{
x64Gen_cvtss2sd_xmmReg_xmmReg(x64GenContext, realRegisterXMM, realRegisterXMM);
x64Gen_movddup_xmmReg_xmmReg(x64GenContext, realRegisterXMM, realRegisterXMM);
}
}
else if( mode == PPCREC_FPR_LD_MODE_DOUBLE_INTO_PS0 )
{
if( g_CPUFeatures.x86.avx )
{
if( indexed )
{
// calculate offset
x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem);
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem2);
// load value
x64Emit_mov_reg64_mem64(x64GenContext, REG_RESV_TEMP, REG_R13, REG_RESV_TEMP, imlInstruction->op_storeLoad.immS32+0);
x64Gen_bswap_reg64(x64GenContext, REG_RESV_TEMP);
x64Gen_movq_xmmReg_reg64(x64GenContext, REG_RESV_FPR_TEMP, REG_RESV_TEMP);
x64Gen_movsd_xmmReg_xmmReg(x64GenContext, realRegisterXMM, REG_RESV_FPR_TEMP);
}
else
{
x64Emit_mov_reg64_mem64(x64GenContext, REG_RESV_TEMP, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32+0);
x64Gen_bswap_reg64(x64GenContext, REG_RESV_TEMP);
x64Gen_movq_xmmReg_reg64(x64GenContext, REG_RESV_FPR_TEMP, REG_RESV_TEMP);
x64Gen_movsd_xmmReg_xmmReg(x64GenContext, realRegisterXMM, REG_RESV_FPR_TEMP);
}
}
else
{
if( indexed )
{
// calculate offset
x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem);
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem2);
// load double low part to temporaryFPR
x64Emit_mov_reg32_mem32(x64GenContext, REG_RESV_TEMP, REG_R13, REG_RESV_TEMP, imlInstruction->op_storeLoad.immS32+0);
x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP);
x64Emit_mov_mem32_reg64(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)+4, REG_RESV_TEMP);
// calculate offset again
x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem);
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, realRegisterMem2);
// load double high part to temporaryFPR
x64Emit_mov_reg32_mem32(x64GenContext, REG_RESV_TEMP, REG_R13, REG_RESV_TEMP, imlInstruction->op_storeLoad.immS32+4);
x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP);
x64Emit_mov_mem32_reg64(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)+0, REG_RESV_TEMP);
// load double from temporaryFPR
x64Gen_movlpd_xmmReg_memReg64(x64GenContext, realRegisterXMM, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR));
}
else
{
// load double low part to temporaryFPR
x64Emit_mov_reg32_mem32(x64GenContext, REG_RESV_TEMP, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32+0);
x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP);
x64Emit_mov_mem32_reg64(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)+4, REG_RESV_TEMP);
// load double high part to temporaryFPR
x64Emit_mov_reg32_mem32(x64GenContext, REG_RESV_TEMP, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32+4);
x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP);
x64Emit_mov_mem32_reg64(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)+0, REG_RESV_TEMP);
// load double from temporaryFPR
x64Gen_movlpd_xmmReg_memReg64(x64GenContext, realRegisterXMM, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR));
}
}
}
else if (mode == PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0_PS1 ||
mode == PPCREC_FPR_LD_MODE_PSQ_FLOAT_PS0 ||
mode == PPCREC_FPR_LD_MODE_PSQ_S16_PS0 ||
mode == PPCREC_FPR_LD_MODE_PSQ_S16_PS0_PS1 ||
mode == PPCREC_FPR_LD_MODE_PSQ_S16_PS0 ||
mode == PPCREC_FPR_LD_MODE_PSQ_U16_PS0 ||
mode == PPCREC_FPR_LD_MODE_PSQ_U16_PS0_PS1 ||
mode == PPCREC_FPR_LD_MODE_PSQ_S8_PS0 ||
mode == PPCREC_FPR_LD_MODE_PSQ_S8_PS0_PS1 ||
mode == PPCREC_FPR_LD_MODE_PSQ_S8_PS0 ||
mode == PPCREC_FPR_LD_MODE_PSQ_U8_PS0_PS1 )
{
PPCRecompilerX64Gen_imlInstr_psq_load(ppcImlGenContext, x64GenContext, mode, realRegisterXMM, realRegisterMem, realRegisterMem2, imlInstruction->op_storeLoad.immS32, indexed);
}
else if (mode == PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0_PS1 ||
mode == PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0)
{
PPCRecompilerX64Gen_imlInstr_psq_load_generic(ppcImlGenContext, x64GenContext, mode, realRegisterXMM, realRegisterMem, realRegisterMem2, imlInstruction->op_storeLoad.immS32, indexed, tempToRealRegister(imlInstruction->op_storeLoad.registerGQR));
}
else
{
return false;
}
return true;
}
void PPCRecompilerX64Gen_imlInstr_psq_store(ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, uint8 mode, sint32 registerXMM, sint32 memReg, sint32 memRegEx, sint32 memImmS32, bool indexed, sint32 registerGQR = -1)
{
bool storePS1 = (mode == PPCREC_FPR_ST_MODE_PSQ_FLOAT_PS0_PS1 ||
mode == PPCREC_FPR_ST_MODE_PSQ_S8_PS0_PS1 ||
mode == PPCREC_FPR_ST_MODE_PSQ_U8_PS0_PS1 ||
mode == PPCREC_FPR_ST_MODE_PSQ_U16_PS0_PS1 ||
mode == PPCREC_FPR_ST_MODE_PSQ_S16_PS0_PS1);
bool isFloat = mode == PPCREC_FPR_ST_MODE_PSQ_FLOAT_PS0 || mode == PPCREC_FPR_ST_MODE_PSQ_FLOAT_PS0_PS1;
if (registerGQR >= 0)
{
// move to temporary xmm and update registerXMM
x64Gen_movaps_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, registerXMM);
registerXMM = REG_RESV_FPR_TEMP;
// apply scale
if(isFloat == false)
PPCRecompilerX64Gen_imlInstr_gqr_generateScaleCode(ppcImlGenContext, x64GenContext, registerXMM, false, storePS1, registerGQR);
}
if (mode == PPCREC_FPR_ST_MODE_PSQ_FLOAT_PS0)
{
x64Gen_cvtsd2ss_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, registerXMM);
if (g_CPUFeatures.x86.avx)
{
x64Gen_movd_reg64Low32_xmmReg(x64GenContext, REG_RESV_TEMP, REG_RESV_FPR_TEMP);
}
else
{
x64Gen_movsd_memReg64_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR));
x64Emit_mov_reg64_mem32(x64GenContext, REG_RESV_TEMP, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR));
}
if (g_CPUFeatures.x86.movbe == false)
x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP);
if (indexed)
{
cemu_assert_debug(memReg != memRegEx);
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, memReg, memRegEx);
}
if (g_CPUFeatures.x86.movbe)
x64Gen_movBETruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_R13, memReg, memImmS32, REG_RESV_TEMP);
else
x64Gen_movTruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_R13, memReg, memImmS32, REG_RESV_TEMP);
if (indexed)
{
x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, memReg, memRegEx);
}
return;
}
else if (mode == PPCREC_FPR_ST_MODE_PSQ_FLOAT_PS0_PS1)
{
if (indexed)
assert_dbg(); // todo
x64Gen_cvtpd2ps_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, registerXMM);
x64Gen_movq_reg64_xmmReg(x64GenContext, REG_RESV_TEMP, REG_RESV_FPR_TEMP);
x64Gen_rol_reg64_imm8(x64GenContext, REG_RESV_TEMP, 32); // swap upper and lower DWORD
x64Gen_bswap_reg64(x64GenContext, REG_RESV_TEMP);
x64Gen_mov_mem64Reg64PlusReg64_reg64(x64GenContext, REG_RESV_TEMP, REG_R13, memReg, memImmS32);
return;
}
// store as integer
// get limit from mode
sint32 clampMin, clampMax;
sint32 bitWriteSize;
if (mode == PPCREC_FPR_ST_MODE_PSQ_S8_PS0 ||
mode == PPCREC_FPR_ST_MODE_PSQ_S8_PS0_PS1 )
{
clampMin = -128;
clampMax = 127;
bitWriteSize = 8;
}
else if (mode == PPCREC_FPR_ST_MODE_PSQ_U8_PS0 ||
mode == PPCREC_FPR_ST_MODE_PSQ_U8_PS0_PS1 )
{
clampMin = 0;
clampMax = 255;
bitWriteSize = 8;
}
else if (mode == PPCREC_FPR_ST_MODE_PSQ_U16_PS0 ||
mode == PPCREC_FPR_ST_MODE_PSQ_U16_PS0_PS1 )
{
clampMin = 0;
clampMax = 0xFFFF;
bitWriteSize = 16;
}
else if (mode == PPCREC_FPR_ST_MODE_PSQ_S16_PS0 ||
mode == PPCREC_FPR_ST_MODE_PSQ_S16_PS0_PS1 )
{
clampMin = -32768;
clampMax = 32767;
bitWriteSize = 16;
}
else
{
cemu_assert(false);
}
for (sint32 valueIndex = 0; valueIndex < (storePS1?2:1); valueIndex++)
{
// todo - multiply by GQR scale
if (valueIndex == 0)
{
// convert low half (PS0) to integer
x64Gen_cvttsd2si_reg64Low_xmmReg(x64GenContext, REG_RESV_TEMP, registerXMM);
}
else
{
// load top half (PS1) into bottom half of temporary register
x64Gen_movhlps_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, registerXMM);
// convert low half to integer
x64Gen_cvttsd2si_reg64Low_xmmReg(x64GenContext, REG_RESV_TEMP, REG_RESV_FPR_TEMP);
}
// max(i, -clampMin)
x64Gen_cmp_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, clampMin);
sint32 jumpInstructionOffset1 = x64GenContext->codeBufferIndex;
x64Gen_jmpc_near(x64GenContext, X86_CONDITION_SIGNED_GREATER_EQUAL, 0);
x64Gen_mov_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, clampMin);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset1, x64GenContext->codeBufferIndex);
// min(i, clampMax)
x64Gen_cmp_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, clampMax);
sint32 jumpInstructionOffset2 = x64GenContext->codeBufferIndex;
x64Gen_jmpc_near(x64GenContext, X86_CONDITION_SIGNED_LESS_EQUAL, 0);
x64Gen_mov_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, clampMax);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset2, x64GenContext->codeBufferIndex);
// endian swap
if( bitWriteSize == 16)
x64Gen_rol_reg64Low16_imm8(x64GenContext, REG_RESV_TEMP, 8);
// write to memory
if (indexed)
assert_dbg(); // unsupported
sint32 memOffset = memImmS32 + valueIndex * (bitWriteSize/8);
if (bitWriteSize == 8)
x64Gen_movTruncate_mem8Reg64PlusReg64_reg64(x64GenContext, REG_RESV_MEMBASE, memReg, memOffset, REG_RESV_TEMP);
else if (bitWriteSize == 16)
x64Gen_movTruncate_mem16Reg64PlusReg64_reg64(x64GenContext, REG_RESV_MEMBASE, memReg, memOffset, REG_RESV_TEMP);
}
}
void PPCRecompilerX64Gen_imlInstr_psq_store_generic(ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, uint8 mode, sint32 registerXMM, sint32 memReg, sint32 memRegEx, sint32 memImmS32, bool indexed, sint32 registerGQR)
{
bool storePS1 = (mode == PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0_PS1);
// load GQR
x64Gen_mov_reg64_reg64(x64GenContext, REG_RESV_TEMP, registerGQR);
// extract store type field
x64Gen_and_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 7);
// jump cases
x64Gen_cmp_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 4); // type 4 -> u8
sint32 jumpOffset_caseU8 = x64GenContext->codeBufferIndex;
x64Gen_jmpc_far(x64GenContext, X86_CONDITION_EQUAL, 0);
x64Gen_cmp_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 5); // type 5 -> u16
sint32 jumpOffset_caseU16 = x64GenContext->codeBufferIndex;
x64Gen_jmpc_far(x64GenContext, X86_CONDITION_EQUAL, 0);
x64Gen_cmp_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 6); // type 4 -> s8
sint32 jumpOffset_caseS8 = x64GenContext->codeBufferIndex;
x64Gen_jmpc_far(x64GenContext, X86_CONDITION_EQUAL, 0);
x64Gen_cmp_reg64Low32_imm32(x64GenContext, REG_RESV_TEMP, 7); // type 5 -> s16
sint32 jumpOffset_caseS16 = x64GenContext->codeBufferIndex;
x64Gen_jmpc_far(x64GenContext, X86_CONDITION_EQUAL, 0);
// default case -> float
// generate cases
uint32 jumpOffset_endOfFloat;
uint32 jumpOffset_endOfU8;
uint32 jumpOffset_endOfU16;
uint32 jumpOffset_endOfS8;
PPCRecompilerX64Gen_imlInstr_psq_store(ppcImlGenContext, x64GenContext, storePS1 ? PPCREC_FPR_ST_MODE_PSQ_FLOAT_PS0_PS1 : PPCREC_FPR_ST_MODE_PSQ_FLOAT_PS0, registerXMM, memReg, memRegEx, memImmS32, indexed, registerGQR);
jumpOffset_endOfFloat = x64GenContext->codeBufferIndex;
x64Gen_jmp_imm32(x64GenContext, 0);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_caseU16, x64GenContext->codeBufferIndex);
PPCRecompilerX64Gen_imlInstr_psq_store(ppcImlGenContext, x64GenContext, storePS1 ? PPCREC_FPR_ST_MODE_PSQ_U16_PS0_PS1 : PPCREC_FPR_ST_MODE_PSQ_U16_PS0, registerXMM, memReg, memRegEx, memImmS32, indexed, registerGQR);
jumpOffset_endOfU8 = x64GenContext->codeBufferIndex;
x64Gen_jmp_imm32(x64GenContext, 0);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_caseS16, x64GenContext->codeBufferIndex);
PPCRecompilerX64Gen_imlInstr_psq_store(ppcImlGenContext, x64GenContext, storePS1 ? PPCREC_FPR_ST_MODE_PSQ_S16_PS0_PS1 : PPCREC_FPR_ST_MODE_PSQ_S16_PS0, registerXMM, memReg, memRegEx, memImmS32, indexed, registerGQR);
jumpOffset_endOfU16 = x64GenContext->codeBufferIndex;
x64Gen_jmp_imm32(x64GenContext, 0);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_caseU8, x64GenContext->codeBufferIndex);
PPCRecompilerX64Gen_imlInstr_psq_store(ppcImlGenContext, x64GenContext, storePS1 ? PPCREC_FPR_ST_MODE_PSQ_U8_PS0_PS1 : PPCREC_FPR_ST_MODE_PSQ_U8_PS0, registerXMM, memReg, memRegEx, memImmS32, indexed, registerGQR);
jumpOffset_endOfS8 = x64GenContext->codeBufferIndex;
x64Gen_jmp_imm32(x64GenContext, 0);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_caseS8, x64GenContext->codeBufferIndex);
PPCRecompilerX64Gen_imlInstr_psq_store(ppcImlGenContext, x64GenContext, storePS1 ? PPCREC_FPR_ST_MODE_PSQ_S8_PS0_PS1 : PPCREC_FPR_ST_MODE_PSQ_S8_PS0, registerXMM, memReg, memRegEx, memImmS32, indexed, registerGQR);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_endOfFloat, x64GenContext->codeBufferIndex);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_endOfU8, x64GenContext->codeBufferIndex);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_endOfU16, x64GenContext->codeBufferIndex);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpOffset_endOfS8, x64GenContext->codeBufferIndex);
}
// store to memory
bool PPCRecompilerX64Gen_imlInstruction_fpr_store(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction, bool indexed)
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
sint32 realRegisterXMM = tempToRealFPRRegister(imlInstruction->op_storeLoad.registerData);
sint32 realRegisterMem = tempToRealRegister(imlInstruction->op_storeLoad.registerMem);
sint32 realRegisterMem2 = PPC_REC_INVALID_REGISTER;
if( indexed )
realRegisterMem2 = tempToRealRegister(imlInstruction->op_storeLoad.registerMem2);
uint8 mode = imlInstruction->op_storeLoad.mode;
if( mode == PPCREC_FPR_ST_MODE_SINGLE_FROM_PS0 )
{
if (imlInstruction->op_storeLoad.flags2.notExpanded)
{
// value is already in single format
if (g_CPUFeatures.x86.avx)
{
x64Gen_movd_reg64Low32_xmmReg(x64GenContext, REG_RESV_TEMP, realRegisterXMM);
}
else
{
x64Gen_movsd_memReg64_xmmReg(x64GenContext, realRegisterXMM, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR));
x64Emit_mov_reg64_mem32(x64GenContext, REG_RESV_TEMP, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR));
}
}
else
{
x64Gen_cvtsd2ss_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, realRegisterXMM);
if (g_CPUFeatures.x86.avx)
{
x64Gen_movd_reg64Low32_xmmReg(x64GenContext, REG_RESV_TEMP, REG_RESV_FPR_TEMP);
}
else
{
x64Gen_movsd_memReg64_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR));
x64Emit_mov_reg64_mem32(x64GenContext, REG_RESV_TEMP, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR));
}
}
if( g_CPUFeatures.x86.movbe == false )
x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP);
if( indexed )
{
if( realRegisterMem == realRegisterMem2 )
assert_dbg();
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
}
if( g_CPUFeatures.x86.movbe )
x64Gen_movBETruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32, REG_RESV_TEMP);
else
x64Gen_movTruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32, REG_RESV_TEMP);
if( indexed )
{
x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
}
}
else if( mode == PPCREC_FPR_ST_MODE_DOUBLE_FROM_PS0 )
{
if( indexed )
{
if( realRegisterMem == realRegisterMem2 )
assert_dbg();
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
}
x64Gen_movsd_memReg64_xmmReg(x64GenContext, realRegisterXMM, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR));
// store double low part
x64Emit_mov_reg64_mem32(x64GenContext, REG_RESV_TEMP, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)+0);
x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP);
x64Gen_movTruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32+4, REG_RESV_TEMP);
// store double high part
x64Emit_mov_reg64_mem32(x64GenContext, REG_RESV_TEMP, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR)+4);
x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP);
x64Gen_movTruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32+0, REG_RESV_TEMP);
if( indexed )
{
x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
}
}
else if( mode == PPCREC_FPR_ST_MODE_UI32_FROM_PS0 )
{
if( g_CPUFeatures.x86.avx )
{
x64Gen_movd_reg64Low32_xmmReg(x64GenContext, REG_RESV_TEMP, realRegisterXMM);
}
else
{
x64Gen_movsd_memReg64_xmmReg(x64GenContext, realRegisterXMM, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR));
x64Emit_mov_reg64_mem32(x64GenContext, REG_RESV_TEMP, REG_RSP, offsetof(PPCInterpreter_t, temporaryFPR));
}
x64Gen_bswap_reg64Lower32bit(x64GenContext, REG_RESV_TEMP);
if( indexed )
{
if( realRegisterMem == realRegisterMem2 )
assert_dbg();
x64Gen_add_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
x64Gen_movTruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32, REG_RESV_TEMP);
x64Gen_sub_reg64Low32_reg64Low32(x64GenContext, realRegisterMem, realRegisterMem2);
}
else
{
x64Gen_movTruncate_mem32Reg64PlusReg64_reg64(x64GenContext, REG_R13, realRegisterMem, imlInstruction->op_storeLoad.immS32, REG_RESV_TEMP);
}
}
else if(mode == PPCREC_FPR_ST_MODE_PSQ_FLOAT_PS0_PS1 ||
mode == PPCREC_FPR_ST_MODE_PSQ_FLOAT_PS0 ||
mode == PPCREC_FPR_ST_MODE_PSQ_S8_PS0 ||
mode == PPCREC_FPR_ST_MODE_PSQ_S8_PS0_PS1 ||
mode == PPCREC_FPR_ST_MODE_PSQ_U8_PS0 ||
mode == PPCREC_FPR_ST_MODE_PSQ_U8_PS0_PS1 ||
mode == PPCREC_FPR_ST_MODE_PSQ_S16_PS0 ||
mode == PPCREC_FPR_ST_MODE_PSQ_S16_PS0_PS1 ||
mode == PPCREC_FPR_ST_MODE_PSQ_U16_PS0 ||
mode == PPCREC_FPR_ST_MODE_PSQ_U16_PS0_PS1 )
{
cemu_assert_debug(imlInstruction->op_storeLoad.flags2.notExpanded == false);
PPCRecompilerX64Gen_imlInstr_psq_store(ppcImlGenContext, x64GenContext, mode, realRegisterXMM, realRegisterMem, realRegisterMem2, imlInstruction->op_storeLoad.immS32, indexed);
}
else if (mode == PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0_PS1 ||
mode == PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0)
{
PPCRecompilerX64Gen_imlInstr_psq_store_generic(ppcImlGenContext, x64GenContext, mode, realRegisterXMM, realRegisterMem, realRegisterMem2, imlInstruction->op_storeLoad.immS32, indexed, tempToRealRegister(imlInstruction->op_storeLoad.registerGQR));
}
else
{
if( indexed )
assert_dbg(); // todo
debug_printf("PPCRecompilerX64Gen_imlInstruction_fpr_store(): Unsupported mode %d\n", mode);
return false;
}
return true;
}
void _swapPS0PS1(x64GenContext_t* x64GenContext, sint32 xmmReg)
{
x64Gen_shufpd_xmmReg_xmmReg_imm8(x64GenContext, xmmReg, xmmReg, 1);
}
// FPR op FPR
void PPCRecompilerX64Gen_imlInstruction_fpr_r_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction)
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
if( imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP )
{
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
assert_dbg();
}
x64Gen_movddup_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand);
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_TOP_TO_BOTTOM_AND_TOP )
{
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
assert_dbg();
}
// VPUNPCKHQDQ
if (imlInstruction->op_fpr_r_r.registerResult == imlInstruction->op_fpr_r_r.registerOperand)
{
// unpack top to bottom and top
x64Gen_unpckhpd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand);
}
//else if ( g_CPUFeatures.x86.avx )
//{
// // unpack top to bottom and top with non-destructive destination
// // update: On Ivy Bridge this causes weird stalls?
// x64Gen_avx_VUNPCKHPD_xmm_xmm_xmm(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand, imlInstruction->op_fpr_r_r.registerOperand);
//}
else
{
// move top to bottom
x64Gen_movhlps_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand);
// duplicate bottom
x64Gen_movddup_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerResult);
}
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM )
{
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
x64Gen_movsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand);
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_TOP )
{
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
x64Gen_unpcklpd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand);
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_BOTTOM_AND_TOP_SWAPPED )
{
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
if( imlInstruction->op_fpr_r_r.registerResult != imlInstruction->op_fpr_r_r.registerOperand )
x64Gen_movaps_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand);
_swapPS0PS1(x64GenContext, imlInstruction->op_fpr_r_r.registerResult);
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_TOP_TO_TOP )
{
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
x64Gen_shufpd_xmmReg_xmmReg_imm8(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand, 2);
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_COPY_TOP_TO_BOTTOM )
{
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
// use unpckhpd here?
x64Gen_shufpd_xmmReg_xmmReg_imm8(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand, 3);
_swapPS0PS1(x64GenContext, imlInstruction->op_fpr_r_r.registerResult);
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM )
{
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
assert_dbg();
}
x64Gen_mulsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand);
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_MULTIPLY_PAIR )
{
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
assert_dbg();
}
x64Gen_mulpd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand);
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_DIVIDE_BOTTOM )
{
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
assert_dbg();
}
x64Gen_divsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand);
}
else if (imlInstruction->operation == PPCREC_IML_OP_FPR_DIVIDE_PAIR)
{
if (imlInstruction->crRegister != PPC_REC_INVALID_REGISTER)
{
assert_dbg();
}
x64Gen_divpd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand);
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_ADD_BOTTOM )
{
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
assert_dbg();
}
x64Gen_addsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand);
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_ADD_PAIR )
{
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
assert_dbg();
}
x64Gen_addpd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand);
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_SUB_PAIR )
{
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
assert_dbg();
}
x64Gen_subpd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand);
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_SUB_BOTTOM )
{
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
assert_dbg();
}
x64Gen_subsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand);
}
else if( imlInstruction->operation == PPCREC_IML_OP_ASSIGN )
{
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
assert_dbg();
}
x64Gen_movaps_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand);
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_BOTTOM_FCTIWZ )
{
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
assert_dbg();
}
x64Gen_cvttsd2si_xmmReg_xmmReg(x64GenContext, REG_RESV_TEMP, imlInstruction->op_fpr_r_r.registerOperand);
x64Gen_mov_reg64Low32_reg64Low32(x64GenContext, REG_RESV_TEMP, REG_RESV_TEMP);
// move to FPR register
x64Gen_movq_xmmReg_reg64(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, REG_RESV_TEMP);
}
else if(imlInstruction->operation == PPCREC_IML_OP_FPR_FCMPU_BOTTOM ||
imlInstruction->operation == PPCREC_IML_OP_FPR_FCMPU_TOP ||
imlInstruction->operation == PPCREC_IML_OP_FPR_FCMPO_BOTTOM )
{
if( imlInstruction->crRegister == PPC_REC_INVALID_REGISTER )
{
assert_dbg();
}
if (imlInstruction->operation == PPCREC_IML_OP_FPR_FCMPU_BOTTOM)
x64Gen_ucomisd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand);
else if (imlInstruction->operation == PPCREC_IML_OP_FPR_FCMPU_TOP)
{
// temporarily switch top/bottom of both operands and compare
if (imlInstruction->op_fpr_r_r.registerResult == imlInstruction->op_fpr_r_r.registerOperand)
{
_swapPS0PS1(x64GenContext, imlInstruction->op_fpr_r_r.registerResult);
x64Gen_ucomisd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand);
_swapPS0PS1(x64GenContext, imlInstruction->op_fpr_r_r.registerResult);
}
else
{
_swapPS0PS1(x64GenContext, imlInstruction->op_fpr_r_r.registerResult);
_swapPS0PS1(x64GenContext, imlInstruction->op_fpr_r_r.registerOperand);
x64Gen_ucomisd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand);
_swapPS0PS1(x64GenContext, imlInstruction->op_fpr_r_r.registerResult);
_swapPS0PS1(x64GenContext, imlInstruction->op_fpr_r_r.registerOperand);
}
}
else
x64Gen_comisd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand);
// todo: handle FPSCR updates
// update cr
sint32 crRegister = imlInstruction->crRegister;
// if the parity bit is set (NaN) we need to manually set CR LT, GT and EQ to 0 (comisd/ucomisd sets the respective flags to 1 in case of NaN)
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_PARITY, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_SO)); // unordered
sint32 jumpInstructionOffset1 = x64GenContext->codeBufferIndex;
x64Gen_jmpc_near(x64GenContext, X86_CONDITION_PARITY, 0);
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_UNSIGNED_BELOW, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_LT)); // same as X64_CONDITION_CARRY
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_UNSIGNED_ABOVE, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_GT));
x64Gen_setcc_mem8(x64GenContext, X86_CONDITION_EQUAL, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_EQ));
sint32 jumpInstructionOffset2 = x64GenContext->codeBufferIndex;
x64Gen_jmpc_near(x64GenContext, X86_CONDITION_NONE, 0);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset1, x64GenContext->codeBufferIndex);
x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_LT), 0);
x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_GT), 0);
x64Gen_mov_mem8Reg64_imm8(x64GenContext, REG_RSP, offsetof(PPCInterpreter_t, cr)+sizeof(uint8)*(crRegister*4+PPCREC_CR_BIT_EQ), 0);
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset2, x64GenContext->codeBufferIndex);
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_BOTTOM_FRES_TO_BOTTOM_AND_TOP )
{
if( imlInstruction->crRegister != PPC_REC_INVALID_REGISTER )
{
assert_dbg();
}
// move register to XMM15
x64Gen_movsd_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r.registerOperand);
// call assembly routine to calculate accurate FRES result in XMM15
x64Gen_mov_reg64_imm64(x64GenContext, REG_RESV_TEMP, (uint64)recompiler_fres);
x64Gen_call_reg64(x64GenContext, REG_RESV_TEMP);
// copy result to bottom and top half of result register
x64Gen_movddup_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, REG_RESV_FPR_TEMP);
}
else if (imlInstruction->operation == PPCREC_IML_OP_FPR_BOTTOM_RECIPROCAL_SQRT)
{
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
// move register to XMM15
x64Gen_movsd_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r.registerOperand);
// call assembly routine to calculate accurate FRSQRTE result in XMM15
x64Gen_mov_reg64_imm64(x64GenContext, REG_RESV_TEMP, (uint64)recompiler_frsqrte);
x64Gen_call_reg64(x64GenContext, REG_RESV_TEMP);
// copy result to bottom of result register
x64Gen_movsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, REG_RESV_FPR_TEMP);
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_NEGATE_PAIR )
{
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
// copy register
if( imlInstruction->op_fpr_r_r.registerResult != imlInstruction->op_fpr_r_r.registerOperand )
{
x64Gen_movaps_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand);
}
// toggle sign bits
x64Gen_xorps_xmmReg_mem128Reg64(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, REG_RESV_RECDATA, offsetof(PPCRecompilerInstanceData_t, _x64XMM_xorNegateMaskPair));
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_ABS_PAIR )
{
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
// copy register
if( imlInstruction->op_fpr_r_r.registerResult != imlInstruction->op_fpr_r_r.registerOperand )
{
x64Gen_movaps_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, imlInstruction->op_fpr_r_r.registerOperand);
}
// set sign bit to 0
x64Gen_andps_xmmReg_mem128Reg64(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, REG_RESV_RECDATA, offsetof(PPCRecompilerInstanceData_t, _x64XMM_andAbsMaskPair));
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_FRES_PAIR || imlInstruction->operation == PPCREC_IML_OP_FPR_FRSQRTE_PAIR)
{
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
// calculate bottom half of result
x64Gen_movsd_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r.registerOperand);
if(imlInstruction->operation == PPCREC_IML_OP_FPR_FRES_PAIR)
x64Gen_mov_reg64_imm64(x64GenContext, REG_RESV_TEMP, (uint64)recompiler_fres);
else
x64Gen_mov_reg64_imm64(x64GenContext, REG_RESV_TEMP, (uint64)recompiler_frsqrte);
x64Gen_call_reg64(x64GenContext, REG_RESV_TEMP); // calculate fres result in xmm15
x64Gen_movsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, REG_RESV_FPR_TEMP);
// calculate top half of result
// todo - this top to bottom copy can be optimized?
x64Gen_shufpd_xmmReg_xmmReg_imm8(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r.registerOperand, 3);
x64Gen_shufpd_xmmReg_xmmReg_imm8(x64GenContext, REG_RESV_FPR_TEMP, REG_RESV_FPR_TEMP, 1); // swap top and bottom
x64Gen_call_reg64(x64GenContext, REG_RESV_TEMP); // calculate fres result in xmm15
x64Gen_unpcklpd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r.registerResult, REG_RESV_FPR_TEMP); // copy bottom to top
}
else
{
assert_dbg();
}
}
/*
* FPR = op (fprA, fprB)
*/
void PPCRecompilerX64Gen_imlInstruction_fpr_r_r_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction)
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
if (imlInstruction->operation == PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM)
{
if (imlInstruction->crRegister != PPC_REC_INVALID_REGISTER)
{
assert_dbg();
}
if (imlInstruction->op_fpr_r_r_r.registerResult == imlInstruction->op_fpr_r_r_r.registerOperandA)
{
x64Gen_mulsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandB);
}
else if (imlInstruction->op_fpr_r_r_r.registerResult == imlInstruction->op_fpr_r_r_r.registerOperandB)
{
x64Gen_mulsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandA);
}
else
{
x64Gen_movsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandA);
x64Gen_mulsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandB);
}
}
else if (imlInstruction->operation == PPCREC_IML_OP_FPR_ADD_BOTTOM)
{
// registerResult(fp0) = registerOperandA(fp0) + registerOperandB(fp0)
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
// todo: Use AVX 3-operand VADDSD if available
if (imlInstruction->op_fpr_r_r_r.registerResult == imlInstruction->op_fpr_r_r_r.registerOperandA)
{
x64Gen_addsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandB);
}
else if (imlInstruction->op_fpr_r_r_r.registerResult == imlInstruction->op_fpr_r_r_r.registerOperandB)
{
x64Gen_addsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandA);
}
else
{
x64Gen_movaps_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandA);
x64Gen_addsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandB);
}
}
else if (imlInstruction->operation == PPCREC_IML_OP_FPR_SUB_PAIR)
{
// registerResult = registerOperandA - registerOperandB
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
if( imlInstruction->op_fpr_r_r_r.registerResult == imlInstruction->op_fpr_r_r_r.registerOperandA )
{
x64Gen_subpd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandB);
}
else if (g_CPUFeatures.x86.avx)
{
x64Gen_avx_VSUBPD_xmm_xmm_xmm(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandA, imlInstruction->op_fpr_r_r_r.registerOperandB);
}
else if( imlInstruction->op_fpr_r_r_r.registerResult == imlInstruction->op_fpr_r_r_r.registerOperandB )
{
x64Gen_movaps_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r_r.registerOperandA);
x64Gen_subpd_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r_r.registerOperandB);
x64Gen_movaps_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, REG_RESV_FPR_TEMP);
}
else
{
x64Gen_movaps_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandA);
x64Gen_subpd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandB);
}
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_SUB_BOTTOM )
{
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
if( imlInstruction->op_fpr_r_r_r.registerResult == imlInstruction->op_fpr_r_r_r.registerOperandA )
{
x64Gen_subsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandB);
}
else if( imlInstruction->op_fpr_r_r_r.registerResult == imlInstruction->op_fpr_r_r_r.registerOperandB )
{
x64Gen_movsd_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r_r.registerOperandA);
x64Gen_subsd_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r_r.registerOperandB);
x64Gen_movsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, REG_RESV_FPR_TEMP);
}
else
{
x64Gen_movsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandA);
x64Gen_subsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r.registerOperandB);
}
}
else
assert_dbg();
}
/*
* FPR = op (fprA, fprB, fprC)
*/
void PPCRecompilerX64Gen_imlInstruction_fpr_r_r_r_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction)
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
if( imlInstruction->operation == PPCREC_IML_OP_FPR_SUM0 )
{
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
// todo: Investigate if there are other optimizations possible if the operand registers overlap
// generic case
// 1) move frA bottom to frTemp bottom and top
x64Gen_movddup_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r_r_r.registerOperandA);
// 2) add frB (both halfs, lower half is overwritten in the next step)
x64Gen_addpd_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r_r_r.registerOperandB);
// 3) Interleave top of frTemp and frC
x64Gen_unpckhpd_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r_r_r.registerOperandC);
// todo: We can optimize the REG_RESV_FPR_TEMP -> resultReg copy operation away when the result register does not overlap with any of the operand registers
x64Gen_movaps_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r_r.registerResult, REG_RESV_FPR_TEMP);
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_SUM1 )
{
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
// todo: Investigate if there are other optimizations possible if the operand registers overlap
// 1) move frA bottom to frTemp bottom and top
x64Gen_movddup_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r_r_r.registerOperandA);
// 2) add frB (both halfs, lower half is overwritten in the next step)
x64Gen_addpd_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r_r_r.registerOperandB);
// 3) Copy bottom from frC
x64Gen_movsd_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r_r_r.registerOperandC);
//// 4) Swap bottom and top half
//x64Gen_shufpd_xmmReg_xmmReg_imm8(x64GenContext, REG_RESV_FPR_TEMP, REG_RESV_FPR_TEMP, 1);
// todo: We can optimize the REG_RESV_FPR_TEMP -> resultReg copy operation away when the result register does not overlap with any of the operand registers
x64Gen_movaps_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r_r.registerResult, REG_RESV_FPR_TEMP);
//float s0 = (float)hCPU->fpr[frC].fp0;
//float s1 = (float)(hCPU->fpr[frA].fp0 + hCPU->fpr[frB].fp1);
//hCPU->fpr[frD].fp0 = s0;
//hCPU->fpr[frD].fp1 = s1;
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_SELECT_BOTTOM )
{
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
x64Gen_comisd_xmmReg_mem64Reg64(x64GenContext, imlInstruction->op_fpr_r_r_r_r.registerOperandA, REG_RESV_RECDATA, offsetof(PPCRecompilerInstanceData_t, _x64XMM_constDouble0_0));
sint32 jumpInstructionOffset1 = x64GenContext->codeBufferIndex;
x64Gen_jmpc_near(x64GenContext, X86_CONDITION_UNSIGNED_BELOW, 0);
// select C
x64Gen_movsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r_r.registerOperandC);
sint32 jumpInstructionOffset2 = x64GenContext->codeBufferIndex;
x64Gen_jmpc_near(x64GenContext, X86_CONDITION_NONE, 0);
// select B
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset1, x64GenContext->codeBufferIndex);
x64Gen_movsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r_r.registerOperandB);
// end
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset2, x64GenContext->codeBufferIndex);
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_SELECT_PAIR )
{
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
// select bottom
x64Gen_comisd_xmmReg_mem64Reg64(x64GenContext, imlInstruction->op_fpr_r_r_r_r.registerOperandA, REG_RESV_RECDATA, offsetof(PPCRecompilerInstanceData_t, _x64XMM_constDouble0_0));
sint32 jumpInstructionOffset1_bottom = x64GenContext->codeBufferIndex;
x64Gen_jmpc_near(x64GenContext, X86_CONDITION_UNSIGNED_BELOW, 0);
// select C bottom
x64Gen_movsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r_r.registerOperandC);
sint32 jumpInstructionOffset2_bottom = x64GenContext->codeBufferIndex;
x64Gen_jmpc_near(x64GenContext, X86_CONDITION_NONE, 0);
// select B bottom
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset1_bottom, x64GenContext->codeBufferIndex);
x64Gen_movsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r_r.registerOperandB);
// end
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset2_bottom, x64GenContext->codeBufferIndex);
// select top
x64Gen_movhlps_xmmReg_xmmReg(x64GenContext, REG_RESV_FPR_TEMP, imlInstruction->op_fpr_r_r_r_r.registerOperandA); // copy top to bottom (todo: May cause stall?)
x64Gen_comisd_xmmReg_mem64Reg64(x64GenContext, REG_RESV_FPR_TEMP, REG_RESV_RECDATA, offsetof(PPCRecompilerInstanceData_t, _x64XMM_constDouble0_0));
sint32 jumpInstructionOffset1_top = x64GenContext->codeBufferIndex;
x64Gen_jmpc_near(x64GenContext, X86_CONDITION_UNSIGNED_BELOW, 0);
// select C top
//x64Gen_movsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r_r.registerOperandC);
x64Gen_shufpd_xmmReg_xmmReg_imm8(x64GenContext, imlInstruction->op_fpr_r_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r_r.registerOperandC, 2);
sint32 jumpInstructionOffset2_top = x64GenContext->codeBufferIndex;
x64Gen_jmpc_near(x64GenContext, X86_CONDITION_NONE, 0);
// select B top
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset1_top, x64GenContext->codeBufferIndex);
//x64Gen_movsd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r_r.registerOperandB);
x64Gen_shufpd_xmmReg_xmmReg_imm8(x64GenContext, imlInstruction->op_fpr_r_r_r_r.registerResult, imlInstruction->op_fpr_r_r_r_r.registerOperandB, 2);
// end
PPCRecompilerX64Gen_redirectRelativeJump(x64GenContext, jumpInstructionOffset2_top, x64GenContext->codeBufferIndex);
}
else
assert_dbg();
}
/*
* Single FPR operation
*/
void PPCRecompilerX64Gen_imlInstruction_fpr_r(PPCRecFunction_t* PPCRecFunction, ppcImlGenContext_t* ppcImlGenContext, x64GenContext_t* x64GenContext, PPCRecImlInstruction_t* imlInstruction)
{
PPCRecompilerX64Gen_crConditionFlags_forget(PPCRecFunction, ppcImlGenContext, x64GenContext);
if( imlInstruction->operation == PPCREC_IML_OP_FPR_NEGATE_BOTTOM )
{
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
// toggle sign bit
x64Gen_xorps_xmmReg_mem128Reg64(x64GenContext, imlInstruction->op_fpr_r.registerResult, REG_RESV_RECDATA, offsetof(PPCRecompilerInstanceData_t, _x64XMM_xorNegateMaskBottom));
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_ABS_BOTTOM )
{
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
// mask out sign bit
x64Gen_andps_xmmReg_mem128Reg64(x64GenContext, imlInstruction->op_fpr_r.registerResult, REG_RESV_RECDATA, offsetof(PPCRecompilerInstanceData_t, _x64XMM_andAbsMaskBottom));
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_NEGATIVE_ABS_BOTTOM )
{
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
// set sign bit
x64Gen_orps_xmmReg_mem128Reg64(x64GenContext, imlInstruction->op_fpr_r.registerResult, REG_RESV_RECDATA, offsetof(PPCRecompilerInstanceData_t, _x64XMM_xorNegateMaskBottom));
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_ROUND_TO_SINGLE_PRECISION_BOTTOM )
{
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
// convert to 32bit single
x64Gen_cvtsd2ss_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r.registerResult, imlInstruction->op_fpr_r.registerResult);
// convert back to 64bit double
x64Gen_cvtss2sd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r.registerResult, imlInstruction->op_fpr_r.registerResult);
}
else if( imlInstruction->operation == PPCREC_IML_OP_FPR_ROUND_TO_SINGLE_PRECISION_PAIR )
{
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
// convert to 32bit singles
x64Gen_cvtpd2ps_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r.registerResult, imlInstruction->op_fpr_r.registerResult);
// convert back to 64bit doubles
x64Gen_cvtps2pd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r.registerResult, imlInstruction->op_fpr_r.registerResult);
}
else if (imlInstruction->operation == PPCREC_IML_OP_FPR_EXPAND_BOTTOM32_TO_BOTTOM64_AND_TOP64)
{
cemu_assert_debug(imlInstruction->crRegister == PPC_REC_INVALID_REGISTER);
// convert bottom to 64bit double
x64Gen_cvtss2sd_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r.registerResult, imlInstruction->op_fpr_r.registerResult);
// copy to top half
x64Gen_movddup_xmmReg_xmmReg(x64GenContext, imlInstruction->op_fpr_r.registerResult, imlInstruction->op_fpr_r.registerResult);
}
else
{
cemu_assert_unimplemented();
}
}
| 62,311
|
C++
|
.cpp
| 1,203
| 48.77473
| 248
| 0.773991
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,212
|
PPCRecompilerX64BMI.cpp
|
cemu-project_Cemu/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerX64BMI.cpp
|
#include "PPCRecompiler.h"
#include "PPCRecompilerX64.h"
void _x64Gen_writeMODRMDeprecated(x64GenContext_t* x64GenContext, sint32 dataRegister, sint32 memRegisterA64, sint32 memRegisterB64, sint32 memImmS32);
void x64Gen_movBEZeroExtend_reg64_mem32Reg64PlusReg64(x64GenContext_t* x64GenContext, sint32 dstRegister, sint32 memRegisterA64, sint32 memRegisterB64, sint32 memImmS32)
{
// MOVBE <dstReg64> (low dword), DWORD [<reg64> + <reg64> + <imm64>]
if( dstRegister >= 8 && memRegisterA64 >= 8 && memRegisterB64 >= 8 )
x64Gen_writeU8(x64GenContext, 0x47);
else if( memRegisterA64 >= 8 && memRegisterB64 >= 8 )
x64Gen_writeU8(x64GenContext, 0x43);
else if( dstRegister >= 8 && memRegisterB64 >= 8 )
x64Gen_writeU8(x64GenContext, 0x42);
else if( dstRegister >= 8 && memRegisterA64 >= 8 )
x64Gen_writeU8(x64GenContext, 0x45);
else if( dstRegister >= 8 )
x64Gen_writeU8(x64GenContext, 0x44);
else if( memRegisterA64 >= 8 )
x64Gen_writeU8(x64GenContext, 0x41);
else if( memRegisterB64 >= 8 )
x64Gen_writeU8(x64GenContext, 0x42);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x38);
x64Gen_writeU8(x64GenContext, 0xF0);
_x64Gen_writeMODRMDeprecated(x64GenContext, dstRegister, memRegisterA64, memRegisterB64, memImmS32);
}
void x64Gen_movBEZeroExtend_reg64Low16_mem16Reg64PlusReg64(x64GenContext_t* x64GenContext, sint32 dstRegister, sint32 memRegisterA64, sint32 memRegisterB64, sint32 memImmS32)
{
// MOVBE <dstReg64> (low word), WORD [<reg64> + <reg64> + <imm64>]
// note: Unlike the 32bit version this instruction does not set the upper 32bits of the 64bit register to 0
x64Gen_writeU8(x64GenContext, 0x66); // 16bit prefix
x64Gen_movBEZeroExtend_reg64_mem32Reg64PlusReg64(x64GenContext, dstRegister, memRegisterA64, memRegisterB64, memImmS32);
}
void x64Gen_movBETruncate_mem32Reg64PlusReg64_reg64(x64GenContext_t* x64GenContext, sint32 memRegisterA64, sint32 memRegisterB64, sint32 memImmS32, sint32 srcRegister)
{
// MOVBE DWORD [<reg64> + <reg64> + <imm64>], <srcReg64> (low dword)
if( srcRegister >= 8 && memRegisterA64 >= 8 && memRegisterB64 >= 8 )
x64Gen_writeU8(x64GenContext, 0x47);
else if( memRegisterA64 >= 8 && memRegisterB64 >= 8 )
x64Gen_writeU8(x64GenContext, 0x43);
else if( srcRegister >= 8 && memRegisterB64 >= 8 )
x64Gen_writeU8(x64GenContext, 0x42);
else if( srcRegister >= 8 && memRegisterA64 >= 8 )
x64Gen_writeU8(x64GenContext, 0x45);
else if( srcRegister >= 8 )
x64Gen_writeU8(x64GenContext, 0x44);
else if( memRegisterA64 >= 8 )
x64Gen_writeU8(x64GenContext, 0x41);
else if( memRegisterB64 >= 8 )
x64Gen_writeU8(x64GenContext, 0x42);
x64Gen_writeU8(x64GenContext, 0x0F);
x64Gen_writeU8(x64GenContext, 0x38);
x64Gen_writeU8(x64GenContext, 0xF1);
_x64Gen_writeMODRMDeprecated(x64GenContext, srcRegister, memRegisterA64, memRegisterB64, memImmS32);
}
void x64Gen_shrx_reg64_reg64_reg64(x64GenContext_t* x64GenContext, sint32 registerDst, sint32 registerA, sint32 registerB)
{
// SHRX reg64, reg64, reg64
x64Gen_writeU8(x64GenContext, 0xC4);
x64Gen_writeU8(x64GenContext, 0xE2 - ((registerDst >= 8) ? 0x80 : 0) - ((registerA >= 8) ? 0x20 : 0));
x64Gen_writeU8(x64GenContext, 0xFB - registerB * 8);
x64Gen_writeU8(x64GenContext, 0xF7);
x64Gen_writeU8(x64GenContext, 0xC0 + (registerDst & 7) * 8 + (registerA & 7));
}
void x64Gen_shlx_reg64_reg64_reg64(x64GenContext_t* x64GenContext, sint32 registerDst, sint32 registerA, sint32 registerB)
{
// SHLX reg64, reg64, reg64
x64Gen_writeU8(x64GenContext, 0xC4);
x64Gen_writeU8(x64GenContext, 0xE2 - ((registerDst >= 8) ? 0x80 : 0) - ((registerA >= 8) ? 0x20 : 0));
x64Gen_writeU8(x64GenContext, 0xF9 - registerB * 8);
x64Gen_writeU8(x64GenContext, 0xF7);
x64Gen_writeU8(x64GenContext, 0xC0 + (registerDst & 7) * 8 + (registerA & 7));
}
| 3,796
|
C++
|
.cpp
| 72
| 50.680556
| 174
| 0.762712
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,213
|
PPCRecompilerImlGenFPU.cpp
|
cemu-project_Cemu/src/Cafe/HW/Espresso/Recompiler/PPCRecompilerImlGenFPU.cpp
|
#include "../Interpreter/PPCInterpreterInternal.h"
#include "PPCRecompiler.h"
#include "PPCRecompilerIml.h"
#include "Cafe/GameProfile/GameProfile.h"
void PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory(ppcImlGenContext_t* ppcImlGenContext, uint8 registerDestination, uint8 registerMemory, sint32 immS32, uint32 mode, bool switchEndian, uint8 registerGQR = PPC_REC_INVALID_REGISTER)
{
// load from memory
PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext);
imlInstruction->type = PPCREC_IML_TYPE_FPR_LOAD;
imlInstruction->crRegister = PPC_REC_INVALID_REGISTER;
imlInstruction->operation = 0;
imlInstruction->op_storeLoad.registerData = registerDestination;
imlInstruction->op_storeLoad.registerMem = registerMemory;
imlInstruction->op_storeLoad.registerGQR = registerGQR;
imlInstruction->op_storeLoad.immS32 = immS32;
imlInstruction->op_storeLoad.mode = mode;
imlInstruction->op_storeLoad.flags2.swapEndian = switchEndian;
}
void PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory_indexed(ppcImlGenContext_t* ppcImlGenContext, uint8 registerDestination, uint8 registerMemory1, uint8 registerMemory2, uint32 mode, bool switchEndian, uint8 registerGQR = PPC_REC_INVALID_REGISTER)
{
// load from memory
PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext);
imlInstruction->type = PPCREC_IML_TYPE_FPR_LOAD_INDEXED;
imlInstruction->crRegister = PPC_REC_INVALID_REGISTER;
imlInstruction->operation = 0;
imlInstruction->op_storeLoad.registerData = registerDestination;
imlInstruction->op_storeLoad.registerMem = registerMemory1;
imlInstruction->op_storeLoad.registerMem2 = registerMemory2;
imlInstruction->op_storeLoad.registerGQR = registerGQR;
imlInstruction->op_storeLoad.immS32 = 0;
imlInstruction->op_storeLoad.mode = mode;
imlInstruction->op_storeLoad.flags2.swapEndian = switchEndian;
}
void PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r(ppcImlGenContext_t* ppcImlGenContext, uint8 registerSource, uint8 registerMemory, sint32 immS32, uint32 mode, bool switchEndian, uint8 registerGQR = PPC_REC_INVALID_REGISTER)
{
// store to memory
PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext);
imlInstruction->type = PPCREC_IML_TYPE_FPR_STORE;
imlInstruction->crRegister = PPC_REC_INVALID_REGISTER;
imlInstruction->operation = 0;
imlInstruction->op_storeLoad.registerData = registerSource;
imlInstruction->op_storeLoad.registerMem = registerMemory;
imlInstruction->op_storeLoad.registerGQR = registerGQR;
imlInstruction->op_storeLoad.immS32 = immS32;
imlInstruction->op_storeLoad.mode = mode;
imlInstruction->op_storeLoad.flags2.swapEndian = switchEndian;
}
void PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r_indexed(ppcImlGenContext_t* ppcImlGenContext, uint8 registerSource, uint8 registerMemory1, uint8 registerMemory2, sint32 immS32, uint32 mode, bool switchEndian, uint8 registerGQR = 0)
{
// store to memory
PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext);
imlInstruction->type = PPCREC_IML_TYPE_FPR_STORE_INDEXED;
imlInstruction->crRegister = PPC_REC_INVALID_REGISTER;
imlInstruction->operation = 0;
imlInstruction->op_storeLoad.registerData = registerSource;
imlInstruction->op_storeLoad.registerMem = registerMemory1;
imlInstruction->op_storeLoad.registerMem2 = registerMemory2;
imlInstruction->op_storeLoad.registerGQR = registerGQR;
imlInstruction->op_storeLoad.immS32 = immS32;
imlInstruction->op_storeLoad.mode = mode;
imlInstruction->op_storeLoad.flags2.swapEndian = switchEndian;
}
void PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext_t* ppcImlGenContext, sint32 operation, uint8 registerResult, uint8 registerOperand, sint32 crRegister=PPC_REC_INVALID_REGISTER)
{
// fpr OP fpr
PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext);
imlInstruction->type = PPCREC_IML_TYPE_FPR_R_R;
imlInstruction->operation = operation;
imlInstruction->op_fpr_r_r.registerResult = registerResult;
imlInstruction->op_fpr_r_r.registerOperand = registerOperand;
imlInstruction->crRegister = crRegister;
imlInstruction->op_fpr_r_r.flags = 0;
}
void PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r(ppcImlGenContext_t* ppcImlGenContext, sint32 operation, uint8 registerResult, uint8 registerOperand1, uint8 registerOperand2, sint32 crRegister=PPC_REC_INVALID_REGISTER)
{
// fpr = OP (fpr,fpr)
PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext);
imlInstruction->type = PPCREC_IML_TYPE_FPR_R_R_R;
imlInstruction->operation = operation;
imlInstruction->op_fpr_r_r_r.registerResult = registerResult;
imlInstruction->op_fpr_r_r_r.registerOperandA = registerOperand1;
imlInstruction->op_fpr_r_r_r.registerOperandB = registerOperand2;
imlInstruction->crRegister = crRegister;
imlInstruction->op_fpr_r_r_r.flags = 0;
}
void PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r_r(ppcImlGenContext_t* ppcImlGenContext, sint32 operation, uint8 registerResult, uint8 registerOperandA, uint8 registerOperandB, uint8 registerOperandC, sint32 crRegister=PPC_REC_INVALID_REGISTER)
{
// fpr = OP (fpr,fpr,fpr)
PPCRecImlInstruction_t* imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext);
imlInstruction->type = PPCREC_IML_TYPE_FPR_R_R_R_R;
imlInstruction->operation = operation;
imlInstruction->op_fpr_r_r_r_r.registerResult = registerResult;
imlInstruction->op_fpr_r_r_r_r.registerOperandA = registerOperandA;
imlInstruction->op_fpr_r_r_r_r.registerOperandB = registerOperandB;
imlInstruction->op_fpr_r_r_r_r.registerOperandC = registerOperandC;
imlInstruction->crRegister = crRegister;
imlInstruction->op_fpr_r_r_r_r.flags = 0;
}
void PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext_t* ppcImlGenContext, PPCRecImlInstruction_t* imlInstruction, sint32 operation, uint8 registerResult, sint32 crRegister)
{
// OP (fpr)
if(imlInstruction == NULL)
imlInstruction = PPCRecompilerImlGen_generateNewEmptyInstruction(ppcImlGenContext);
imlInstruction->type = PPCREC_IML_TYPE_FPR_R;
imlInstruction->operation = operation;
imlInstruction->op_fpr_r.registerResult = registerResult;
imlInstruction->crRegister = crRegister;
}
/*
* Rounds the bottom double to single precision (if single precision accuracy is emulated)
*/
void PPRecompilerImmGen_optionalRoundBottomFPRToSinglePrecision(ppcImlGenContext_t* ppcImlGenContext, uint32 fprRegister, bool flushDenormals=false)
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext, NULL, PPCREC_IML_OP_FPR_ROUND_TO_SINGLE_PRECISION_BOTTOM, fprRegister);
if( flushDenormals )
assert_dbg();
}
/*
* Rounds pair of doubles to single precision (if single precision accuracy is emulated)
*/
void PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext_t* ppcImlGenContext, uint32 fprRegister, bool flushDenormals=false)
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext, NULL, PPCREC_IML_OP_FPR_ROUND_TO_SINGLE_PRECISION_PAIR, fprRegister);
if( flushDenormals )
assert_dbg();
}
bool PPCRecompilerImlGen_LFS(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 rA, frD;
uint32 imm;
PPC_OPC_TEMPL_D_SImm(opcode, frD, rA, imm);
// get memory gpr register index
uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false);
// get fpr register index
uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
if( ppcImlGenContext->LSQE )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory(ppcImlGenContext, fprRegister, gprRegister, imm, PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0_PS1, true);
}
else
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory(ppcImlGenContext, fprRegister, gprRegister, imm, PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0, true);
}
return true;
}
bool PPCRecompilerImlGen_LFSU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 rA, frD;
uint32 imm;
PPC_OPC_TEMPL_D_SImm(opcode, frD, rA, imm);
// get memory gpr register index
uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false);
// add imm to memory register
PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD, gprRegister, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0);
// get fpr register index
uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
if( ppcImlGenContext->LSQE )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory(ppcImlGenContext, fprRegister, gprRegister, 0, PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0_PS1, true);
}
else
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory(ppcImlGenContext, fprRegister, gprRegister, 0, PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0, true);
}
return true;
}
bool PPCRecompilerImlGen_LFSX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 rA, frD, rB;
PPC_OPC_TEMPL_X(opcode, frD, rA, rB);
if( rA == 0 )
{
debugBreakpoint();
return false;
}
// get memory gpr registers
uint32 gprRegister1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false);
uint32 gprRegister2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false);
// get fpr register index
uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
if( ppcImlGenContext->LSQE )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory_indexed(ppcImlGenContext, fprRegister, gprRegister1, gprRegister2, PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0_PS1, true);
}
else
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory_indexed(ppcImlGenContext, fprRegister, gprRegister1, gprRegister2, PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0, true);
}
return true;
}
bool PPCRecompilerImlGen_LFSUX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 rA, frD, rB;
PPC_OPC_TEMPL_X(opcode, frD, rA, rB);
if( rA == 0 )
{
debugBreakpoint();
return false;
}
// get memory gpr registers
uint32 gprRegister1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false);
uint32 gprRegister2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false);
// add rB to rA (if rA != 0)
PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ADD, gprRegister1, gprRegister2);
// get fpr register index
uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
if( ppcImlGenContext->LSQE )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory(ppcImlGenContext, fprRegister, gprRegister1, 0, PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0_PS1, true);
}
else
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory(ppcImlGenContext, fprRegister, gprRegister1, 0, PPCREC_FPR_LD_MODE_SINGLE_INTO_PS0, true);
}
return true;
}
bool PPCRecompilerImlGen_LFD(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 rA, frD;
uint32 imm;
PPC_OPC_TEMPL_D_SImm(opcode, frD, rA, imm);
if( rA == 0 )
{
assert_dbg();
}
// get memory gpr register index
uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false);
// get fpr register index
uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory(ppcImlGenContext, fprRegister, gprRegister, imm, PPCREC_FPR_LD_MODE_DOUBLE_INTO_PS0, true);
return true;
}
bool PPCRecompilerImlGen_LFDU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 rA, frD;
uint32 imm;
PPC_OPC_TEMPL_D_SImm(opcode, frD, rA, imm);
if( rA == 0 )
{
assert_dbg();
}
// get memory gpr register index
uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false);
// add imm to memory register
PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD, gprRegister, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0);
// get fpr register index
uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
// emit load iml
PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory(ppcImlGenContext, fprRegister, gprRegister, 0, PPCREC_FPR_LD_MODE_DOUBLE_INTO_PS0, true);
return true;
}
bool PPCRecompilerImlGen_LFDX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 rA, frD, rB;
PPC_OPC_TEMPL_X(opcode, frD, rA, rB);
if( rA == 0 )
{
debugBreakpoint();
return false;
}
// get memory gpr registers
uint32 gprRegister1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false);
uint32 gprRegister2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false);
// get fpr register index
uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory_indexed(ppcImlGenContext, fprRegister, gprRegister1, gprRegister2, PPCREC_FPR_LD_MODE_DOUBLE_INTO_PS0, true);
return true;
}
bool PPCRecompilerImlGen_LFDUX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 rA, frD, rB;
PPC_OPC_TEMPL_X(opcode, frD, rA, rB);
if( rA == 0 )
{
debugBreakpoint();
return false;
}
// get memory gpr registers
uint32 gprRegister1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false);
uint32 gprRegister2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false);
// add rB to rA (if rA != 0)
PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ADD, gprRegister1, gprRegister2);
// get fpr register index
uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory(ppcImlGenContext, fprRegister, gprRegister1, 0, PPCREC_FPR_LD_MODE_DOUBLE_INTO_PS0, true);
return true;
}
bool PPCRecompilerImlGen_STFS(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 rA, frD;
uint32 imm;
PPC_OPC_TEMPL_D_SImm(opcode, frD, rA, imm);
// get memory gpr register index
uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false);
// get fpr register index
uint32 fprRegister = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r(ppcImlGenContext, fprRegister, gprRegister, imm, PPCREC_FPR_ST_MODE_SINGLE_FROM_PS0, true);
return true;
}
bool PPCRecompilerImlGen_STFSU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 rA, frD;
uint32 imm;
PPC_OPC_TEMPL_D_SImm(opcode, frD, rA, imm);
// get memory gpr register index
uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false);
// add imm to memory register
PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD, gprRegister, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0);
// get fpr register index
uint32 fprRegister = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r(ppcImlGenContext, fprRegister, gprRegister, 0, PPCREC_FPR_ST_MODE_SINGLE_FROM_PS0, true);
return true;
}
bool PPCRecompilerImlGen_STFSX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 rA, frS, rB;
PPC_OPC_TEMPL_X(opcode, frS, rA, rB);
if( rA == 0 )
{
debugBreakpoint();
return false;
}
// get memory gpr registers
uint32 gprRegister1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false);
uint32 gprRegister2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false);
// get fpr register index
uint32 fprRegister = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frS);
if( ppcImlGenContext->LSQE )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r_indexed(ppcImlGenContext, fprRegister, gprRegister1, gprRegister2, 0, PPCREC_FPR_ST_MODE_SINGLE_FROM_PS0, true);
}
else
{
PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r_indexed(ppcImlGenContext, fprRegister, gprRegister1, gprRegister2, 0, PPCREC_FPR_ST_MODE_SINGLE_FROM_PS0, true);
}
return true;
}
bool PPCRecompilerImlGen_STFSUX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 rA, frS, rB;
PPC_OPC_TEMPL_X(opcode, frS, rA, rB);
if( rA == 0 )
{
debugBreakpoint();
return false;
}
// get memory gpr registers
uint32 gprRegister1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false);
uint32 gprRegister2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false);
// get fpr register index
uint32 fprRegister = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frS);
// calculate EA in rA
PPCRecompilerImlGen_generateNewInstruction_r_r(ppcImlGenContext, NULL, PPCREC_IML_OP_ADD, gprRegister1, gprRegister2);
PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r(ppcImlGenContext, fprRegister, gprRegister1, 0, PPCREC_FPR_ST_MODE_SINGLE_FROM_PS0, true);
return true;
}
bool PPCRecompilerImlGen_STFD(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 rA, frD;
uint32 imm;
PPC_OPC_TEMPL_D_SImm(opcode, frD, rA, imm);
if( rA == 0 )
{
debugBreakpoint();
return false;
}
// get memory gpr register index
uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false);
// get fpr register index
uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r(ppcImlGenContext, fprRegister, gprRegister, imm, PPCREC_FPR_ST_MODE_DOUBLE_FROM_PS0, true);
return true;
}
bool PPCRecompilerImlGen_STFDU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 rA, frD;
uint32 imm;
PPC_OPC_TEMPL_D_SImm(opcode, frD, rA, imm);
if( rA == 0 )
{
debugBreakpoint();
return false;
}
// get memory gpr register index
uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false);
// add imm to memory register
PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD, gprRegister, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0);
// get fpr register index
uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r(ppcImlGenContext, fprRegister, gprRegister, 0, PPCREC_FPR_ST_MODE_DOUBLE_FROM_PS0, true);
return true;
}
bool PPCRecompilerImlGen_STFDX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 rA, frS, rB;
PPC_OPC_TEMPL_X(opcode, frS, rA, rB);
if( rA == 0 )
{
debugBreakpoint();
return false;
}
// get memory gpr registers
uint32 gprRegister1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false);
uint32 gprRegister2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false);
// get fpr register index
uint32 fprRegister = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frS);
if( ppcImlGenContext->LSQE )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r_indexed(ppcImlGenContext, fprRegister, gprRegister1, gprRegister2, 0, PPCREC_FPR_ST_MODE_DOUBLE_FROM_PS0, true);
}
else
{
PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r_indexed(ppcImlGenContext, fprRegister, gprRegister1, gprRegister2, 0, PPCREC_FPR_ST_MODE_DOUBLE_FROM_PS0, true);
}
return true;
}
bool PPCRecompilerImlGen_STFIWX(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 rA, frS, rB;
PPC_OPC_TEMPL_X(opcode, frS, rA, rB);
// get memory gpr registers
uint32 gprRegister1;
uint32 gprRegister2;
if( rA != 0 )
{
gprRegister1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rA, false);
gprRegister2 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false);
}
else
{
// rA is not used
gprRegister1 = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0+rB, false);
gprRegister2 = 0;
}
// get fpr register index
uint32 fprRegister = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frS);
if( rA != 0 )
PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r_indexed(ppcImlGenContext, fprRegister, gprRegister1, gprRegister2, 0, PPCREC_FPR_ST_MODE_UI32_FROM_PS0, true);
else
PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r(ppcImlGenContext, fprRegister, gprRegister1, 0, PPCREC_FPR_ST_MODE_UI32_FROM_PS0, true);
return true;
}
bool PPCRecompilerImlGen_FADD(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB, frC;
PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC);
PPC_ASSERT(frC==0);
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_BOTTOM, fprRegisterD, fprRegisterA, fprRegisterB);
return true;
}
bool PPCRecompilerImlGen_FSUB(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB, frC;
PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC);
PPC_ASSERT(frC==0);
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
// subtract bottom double of frB from bottom double of frD
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUB_BOTTOM, fprRegisterD, fprRegisterA, fprRegisterB);
return true;
}
bool PPCRecompilerImlGen_FMUL(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB_unused, frC;
PPC_OPC_TEMPL_A(opcode, frD, frA, frB_unused, frC);
if( frD == frC )
{
// swap frA and frB
sint32 temp = frA;
frA = frC;
frC = temp;
}
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
// move frA to frD (if different register)
if( fprRegisterD != fprRegisterA )
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_ASSIGN, fprRegisterD, fprRegisterA); // always copy ps0 and ps1
// multiply bottom double of frD with bottom double of frB
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM, fprRegisterD, fprRegisterC);
return true;
}
bool PPCRecompilerImlGen_FDIV(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB, frC_unused;
PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC_unused);
PPC_ASSERT(frB==0);
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
if( frB == frD && frA != frB )
{
uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0);
// move frA to temporary register
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_ASSIGN, fprRegisterTemp, fprRegisterA);
// divide bottom double of temporary register by bottom double of frB
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_DIVIDE_BOTTOM, fprRegisterTemp, fprRegisterB);
// move result to frD
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterTemp);
return true;
}
// move frA to frD (if different register)
if( fprRegisterD != fprRegisterA )
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterA); // copy ps0
// divide bottom double of frD by bottom double of frB
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_DIVIDE_BOTTOM, fprRegisterD, fprRegisterB);
return true;
}
bool PPCRecompilerImlGen_FMADD(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB, frC;
PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC);
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
// if frB is already in frD we need a temporary register to store the product of frA*frC
if( frB == frD )
{
uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0);
// move frA to temporary register
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_ASSIGN, fprRegisterTemp, fprRegisterA);
// multiply bottom double of temporary register with bottom double of frC
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM, fprRegisterTemp, fprRegisterC);
// add result to frD
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_BOTTOM, fprRegisterD, fprRegisterTemp);
return true;
}
// if frC == frD -> swap registers, we assume that frC != frD
if( fprRegisterD == fprRegisterC )
{
// swap frA and frC
sint32 temp = fprRegisterA;
fprRegisterA = fprRegisterC;
fprRegisterC = temp;
}
// move frA to frD (if different register)
if( fprRegisterD != fprRegisterA )
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_ASSIGN, fprRegisterD, fprRegisterA); // always copy ps0 and ps1
// multiply bottom double of frD with bottom double of frC
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM, fprRegisterD, fprRegisterC);
// add frB
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_BOTTOM, fprRegisterD, fprRegisterB);
return true;
}
bool PPCRecompilerImlGen_FMSUB(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB, frC;
PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC);
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
// if frB is already in frD we need a temporary register to store the product of frA*frC
if( frB == frD )
{
// not implemented
return false;
}
// if frC == frD -> swap registers, we assume that frC != frD
if( fprRegisterD == fprRegisterC )
{
// swap frA and frC
sint32 temp = fprRegisterA;
fprRegisterA = fprRegisterC;
fprRegisterC = temp;
}
// move frA to frD (if different register)
if( fprRegisterD != fprRegisterA )
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_ASSIGN, fprRegisterD, fprRegisterA); // always copy ps0 and ps1
// multiply bottom double of frD with bottom double of frC
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM, fprRegisterD, fprRegisterC);
// sub frB
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUB_BOTTOM, fprRegisterD, fprRegisterB);
return true;
}
bool PPCRecompilerImlGen_FNMSUB(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB, frC;
PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC);
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
// if frB is already in frD we need a temporary register to store the product of frA*frC
if( frB == frD )
{
// hCPU->fpr[frD].fpr = -(hCPU->fpr[frA].fpr * hCPU->fpr[frC].fpr - hCPU->fpr[frD].fpr);
uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0);
//// negate frB/frD
//PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext, NULL,PPCREC_IML_OP_FPR_NEGATE_BOTTOM, fprRegisterD, true);
// move frA to temporary register
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterTemp, fprRegisterA);
// multiply bottom double of temporary register with bottom double of frC
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM, fprRegisterTemp, fprRegisterC);
// sub frB from temporary register
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUB_BOTTOM, fprRegisterTemp, fprRegisterB);
// negate result
PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext, NULL,PPCREC_IML_OP_FPR_NEGATE_BOTTOM, fprRegisterTemp);
// move result to frD
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterTemp);
return true;
}
// if frC == frD -> swap registers, we assume that frC != frD
if( fprRegisterD == fprRegisterC )
{
// swap frA and frC
sint32 temp = fprRegisterA;
fprRegisterA = fprRegisterC;
fprRegisterC = temp;
}
// move frA to frD (if different register)
if( fprRegisterD != fprRegisterA )
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterA); // always copy ps0 and ps1
// multiply bottom double of frD with bottom double of frC
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM, fprRegisterD, fprRegisterC);
// sub frB
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUB_BOTTOM, fprRegisterD, fprRegisterB);
// negate result
PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext, NULL,PPCREC_IML_OP_FPR_NEGATE_BOTTOM, fprRegisterD);
return true;
}
bool PPCRecompilerImlGen_FMULS(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB_unused, frC;
PPC_OPC_TEMPL_A(opcode, frD, frA, frB_unused, frC);
if( frD == frC )
{
// swap frA and frC
sint32 temp = frA;
frA = frC;
frC = temp;
}
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
// move frA to frD (if different register)
if( fprRegisterD != fprRegisterA )
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_ASSIGN, fprRegisterD, fprRegisterA); // always copy ps0 and ps1
// multiply bottom double of frD with bottom double of frB
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM, fprRegisterD, fprRegisterC);
// adjust accuracy
PPRecompilerImmGen_optionalRoundBottomFPRToSinglePrecision(ppcImlGenContext, fprRegisterD);
// if paired single mode, copy frD ps0 to ps1
if( ppcImlGenContext->PSE )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterD);
}
return true;
}
bool PPCRecompilerImlGen_FDIVS(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB, frC_unused;
PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC_unused);
PPC_ASSERT(frB==0);
/*hCPU->fpr[frD].fpr = (float)(hCPU->fpr[frA].fpr / hCPU->fpr[frB].fpr);
if( hCPU->PSE )
hCPU->fpr[frD].fp1 = hCPU->fpr[frD].fp0;*/
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
if( frB == frD && frA != frB )
{
uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0);
// move frA to temporary register
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_ASSIGN, fprRegisterTemp, fprRegisterA);
// divide bottom double of temporary register by bottom double of frB
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_DIVIDE_BOTTOM, fprRegisterTemp, fprRegisterB);
// move result to frD
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterTemp);
// adjust accuracy
PPRecompilerImmGen_optionalRoundBottomFPRToSinglePrecision(ppcImlGenContext, fprRegisterD);
// if paired single mode, copy frD ps0 to ps1
if( ppcImlGenContext->PSE )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterD);
}
return true;
}
// move frA to frD (if different register)
if( fprRegisterD != fprRegisterA )
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_ASSIGN, fprRegisterD, fprRegisterA); // always copy ps0 and ps1
// subtract bottom double of frB from bottom double of frD
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_DIVIDE_BOTTOM, fprRegisterD, fprRegisterB);
// adjust accuracy
PPRecompilerImmGen_optionalRoundBottomFPRToSinglePrecision(ppcImlGenContext, fprRegisterD);
// if paired single mode, copy frD ps0 to ps1
if( ppcImlGenContext->PSE )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterD);
}
return true;
}
bool PPCRecompilerImlGen_FADDS(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB, frC;
PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC);
if( frD == frB )
{
// swap frA and frB
sint32 temp = frA;
frA = frB;
frB = temp;
}
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
// move frA to frD (if different register)
if( fprRegisterD != fprRegisterA )
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_ASSIGN, fprRegisterD, fprRegisterA); // always copy ps0 and ps1
// add bottom double of frD and bottom double of frB
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_BOTTOM, fprRegisterD, fprRegisterB);
// adjust accuracy
PPRecompilerImmGen_optionalRoundBottomFPRToSinglePrecision(ppcImlGenContext, fprRegisterD);
// if paired single mode, copy frD ps0 to ps1
if( ppcImlGenContext->PSE )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterD);
}
return true;
}
bool PPCRecompilerImlGen_FSUBS(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
int frD, frA, frB, frC;
PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC);
PPC_ASSERT(frB==0);
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
// subtract bottom
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUB_BOTTOM, fprRegisterD, fprRegisterA, fprRegisterB);
// adjust accuracy
PPRecompilerImmGen_optionalRoundBottomFPRToSinglePrecision(ppcImlGenContext, fprRegisterD);
// if paired single mode, copy frD ps0 to ps1
if( ppcImlGenContext->PSE )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterD);
}
return true;
}
bool PPCRecompilerImlGen_FMADDS(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB, frC;
PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC);
//FPRD(RD) = FPRD(RA) * FPRD(RC) + FPRD(RB);
//hCPU->fpr[frD].fpr = hCPU->fpr[frA].fpr * hCPU->fpr[frC].fpr + hCPU->fpr[frB].fpr;
//if( hCPU->PSE )
// hCPU->fpr[frD].fp1 = hCPU->fpr[frD].fp0;
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
uint32 fprRegisterTemp;
// if none of the operand registers overlap with the result register then we can avoid the usage of a temporary register
if( fprRegisterD != fprRegisterA && fprRegisterD != fprRegisterB && fprRegisterD != fprRegisterC )
fprRegisterTemp = fprRegisterD;
else
fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM, fprRegisterTemp, fprRegisterA, fprRegisterC);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_BOTTOM, fprRegisterTemp, fprRegisterB);
// adjust accuracy
PPRecompilerImmGen_optionalRoundBottomFPRToSinglePrecision(ppcImlGenContext, fprRegisterTemp);
// set result
if( ppcImlGenContext->PSE )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterTemp);
}
else if( fprRegisterD != fprRegisterTemp )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterTemp);
}
return true;
}
bool PPCRecompilerImlGen_FMSUBS(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB, frC;
PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC);
//hCPU->fpr[frD].fp0 = (float)(hCPU->fpr[frA].fp0 * hCPU->fpr[frC].fp0 - hCPU->fpr[frB].fp0);
//if( hCPU->PSE )
// hCPU->fpr[frD].fp1 = hCPU->fpr[frD].fp0;
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
uint32 fprRegisterTemp;
// if none of the operand registers overlap with the result register then we can avoid the usage of a temporary register
if( fprRegisterD != fprRegisterA && fprRegisterD != fprRegisterB && fprRegisterD != fprRegisterC )
fprRegisterTemp = fprRegisterD;
else
fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM, fprRegisterTemp, fprRegisterA, fprRegisterC);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUB_BOTTOM, fprRegisterTemp, fprRegisterB);
// adjust accuracy
PPRecompilerImmGen_optionalRoundBottomFPRToSinglePrecision(ppcImlGenContext, fprRegisterTemp);
// set result
if( ppcImlGenContext->PSE )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterTemp);
}
else if( fprRegisterD != fprRegisterTemp )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterTemp);
}
return true;
}
bool PPCRecompilerImlGen_FNMSUBS(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB, frC;
PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC);
//[FP1(RD) = ]FP0(RD) = -(FP0(RA) * FP0(RC) - FP0(RB));
//hCPU->fpr[frD].fp0 = (float)-(hCPU->fpr[frA].fp0 * hCPU->fpr[frC].fp0 - hCPU->fpr[frB].fp0);
//if( PPC_PSE )
// hCPU->fpr[frD].fp1 = hCPU->fpr[frD].fp0;
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
uint32 fprRegisterTemp;
// if none of the operand registers overlap with the result register then we can avoid the usage of a temporary register
if( fprRegisterD != fprRegisterA && fprRegisterD != fprRegisterB && fprRegisterD != fprRegisterC )
fprRegisterTemp = fprRegisterD;
else
fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_BOTTOM, fprRegisterTemp, fprRegisterA, fprRegisterC);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUB_BOTTOM, fprRegisterTemp, fprRegisterB);
PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext, NULL,PPCREC_IML_OP_FPR_NEGATE_BOTTOM, fprRegisterTemp);
// adjust accuracy
PPRecompilerImmGen_optionalRoundBottomFPRToSinglePrecision(ppcImlGenContext, fprRegisterTemp);
// set result
if( ppcImlGenContext->PSE )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterTemp);
}
else if( fprRegisterD != fprRegisterTemp )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterTemp);
}
return true;
}
bool PPCRecompilerImlGen_FCMPO(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 crfD, frA, frB;
PPC_OPC_TEMPL_X(opcode, crfD, frA, frB);
crfD >>= 2;
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_FCMPO_BOTTOM, fprRegisterA, fprRegisterB, crfD);
return true;
}
bool PPCRecompilerImlGen_FCMPU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 crfD, frA, frB;
PPC_OPC_TEMPL_X(opcode, crfD, frA, frB);
crfD >>= 2;
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_FCMPU_BOTTOM, fprRegisterA, fprRegisterB, crfD);
return true;
}
bool PPCRecompilerImlGen_FMR(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, rA, frB;
PPC_OPC_TEMPL_X(opcode, frD, rA, frB);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterB);
return true;
}
bool PPCRecompilerImlGen_FABS(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB;
PPC_OPC_TEMPL_X(opcode, frD, frA, frB);
PPC_ASSERT(frA==0);
// load registers
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
// move frB to frD (if different register)
if( fprRegisterD != fprRegisterB )
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterB);
// abs frD
PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext, NULL,PPCREC_IML_OP_FPR_ABS_BOTTOM, fprRegisterD);
return true;
}
bool PPCRecompilerImlGen_FNABS(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB;
PPC_OPC_TEMPL_X(opcode, frD, frA, frB);
PPC_ASSERT(frA==0);
// load registers
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
// move frB to frD (if different register)
if( fprRegisterD != fprRegisterB )
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterB);
// abs frD
PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext, NULL,PPCREC_IML_OP_FPR_NEGATIVE_ABS_BOTTOM, fprRegisterD);
return true;
}
bool PPCRecompilerImlGen_FRES(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB;
PPC_OPC_TEMPL_X(opcode, frD, frA, frB);
PPC_ASSERT(frA==0);
// load registers
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_BOTTOM_FRES_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterB);
// adjust accuracy
PPRecompilerImmGen_optionalRoundBottomFPRToSinglePrecision(ppcImlGenContext, fprRegisterD);
return true;
}
bool PPCRecompilerImlGen_FRSP(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB;
PPC_OPC_TEMPL_X(opcode, frD, frA, frB);
PPC_ASSERT(frA==0);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
if( fprRegisterD != fprRegisterB )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterB);
}
PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext, NULL,PPCREC_IML_OP_FPR_ROUND_TO_SINGLE_PRECISION_BOTTOM, fprRegisterD);
if( ppcImlGenContext->PSE )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterD);
}
return true;
}
bool PPCRecompilerImlGen_FNEG(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB;
PPC_OPC_TEMPL_X(opcode, frD, frA, frB);
PPC_ASSERT(frA==0);
if( opcode&PPC_OPC_RC )
{
return false;
}
// load registers
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
// move frB to frD (if different register)
if( fprRegisterD != fprRegisterB )
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterB);
// negate frD
PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext, NULL,PPCREC_IML_OP_FPR_NEGATE_BOTTOM, fprRegisterD);
return true;
}
bool PPCRecompilerImlGen_FSEL(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB, frC;
PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC);
if( opcode&PPC_OPC_RC )
{
return false;
}
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SELECT_BOTTOM, fprRegisterD, fprRegisterA, fprRegisterB, fprRegisterC);
return true;
}
bool PPCRecompilerImlGen_FRSQRTE(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB, frC;
PPC_OPC_TEMPL_A(opcode, frD, frA, frB, frC);
// hCPU->fpr[frD].fpr = 1.0 / sqrt(hCPU->fpr[frB].fpr);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_BOTTOM_RECIPROCAL_SQRT, fprRegisterD, fprRegisterB);
// adjust accuracy
PPRecompilerImmGen_optionalRoundBottomFPRToSinglePrecision(ppcImlGenContext, fprRegisterD);
return true;
}
bool PPCRecompilerImlGen_FCTIWZ(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB;
PPC_OPC_TEMPL_X(opcode, frD, frA, frB);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_BOTTOM_FCTIWZ, fprRegisterD, fprRegisterB);
return true;
}
bool PPCRecompilerImlGen_PSQ_L(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
int rA, frD;
uint32 immUnused;
PPC_OPC_TEMPL_D_SImm(opcode, frD, rA, immUnused);
sint32 gqrIndex = ((opcode >> 12) & 7);
uint32 imm = opcode & 0xFFF;
if (imm & 0x800)
imm |= ~0xFFF;
bool readPS1 = (opcode & 0x8000) == false;
// get gqr register
uint32 gqrRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_SPR0 + SPR_UGQR0 + gqrIndex, false);
// get memory gpr register index
uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0 + rA, false);
// get fpr register index
uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frD);
// psq load
PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory(ppcImlGenContext, fprRegister, gprRegister, imm, readPS1 ? PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0_PS1 : PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0, true, gqrRegister);
return true;
}
bool PPCRecompilerImlGen_PSQ_LU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
int rA, frD;
uint32 immUnused;
PPC_OPC_TEMPL_D_SImm(opcode, frD, rA, immUnused);
if (rA == 0)
return false;
sint32 gqrIndex = ((opcode >> 12) & 7);
uint32 imm = opcode & 0xFFF;
if (imm & 0x800)
imm |= ~0xFFF;
bool readPS1 = (opcode & 0x8000) == false;
// get gqr register
uint32 gqrRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_SPR0 + SPR_UGQR0 + gqrIndex, false);
// get memory gpr register index
uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0 + rA, false);
// add imm to memory register
PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD, gprRegister, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0);
// get fpr register index
uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frD);
// paired load
PPCRecompilerImlGen_generateNewInstruction_fpr_r_memory(ppcImlGenContext, fprRegister, gprRegister, 0, readPS1 ? PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0_PS1 : PPCREC_FPR_LD_MODE_PSQ_GENERIC_PS0, true, gqrRegister);
return true;
}
bool PPCRecompilerImlGen_PSQ_ST(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
int rA, frD;
uint32 immUnused;
PPC_OPC_TEMPL_D_SImm(opcode, frD, rA, immUnused);
uint32 imm = opcode & 0xFFF;
if (imm & 0x800)
imm |= ~0xFFF;
sint32 gqrIndex = ((opcode >> 12) & 7);
bool storePS1 = (opcode & 0x8000) == false;
// get gqr register
uint32 gqrRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_SPR0 + SPR_UGQR0 + gqrIndex, false);
// get memory gpr register index
uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0 + rA, false);
// get fpr register index
uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frD);
// paired store
PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r(ppcImlGenContext, fprRegister, gprRegister, imm, storePS1 ? PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0_PS1 : PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0, true, gqrRegister);
return true;
}
bool PPCRecompilerImlGen_PSQ_STU(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
int rA, frD;
uint32 immUnused;
PPC_OPC_TEMPL_D_SImm(opcode, frD, rA, immUnused);
if (rA == 0)
return false;
uint32 imm = opcode & 0xFFF;
if (imm & 0x800)
imm |= ~0xFFF;
sint32 gqrIndex = ((opcode >> 12) & 7);
bool storePS1 = (opcode & 0x8000) == false;
// get gqr register
uint32 gqrRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_SPR0 + SPR_UGQR0 + gqrIndex, false);
// get memory gpr register index
uint32 gprRegister = PPCRecompilerImlGen_loadRegister(ppcImlGenContext, PPCREC_NAME_R0 + rA, false);
// add imm to memory register
PPCRecompilerImlGen_generateNewInstruction_r_s32(ppcImlGenContext, PPCREC_IML_OP_ADD, gprRegister, (sint32)imm, 0, false, false, PPC_REC_INVALID_REGISTER, 0);
// get fpr register index
uint32 fprRegister = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frD);
// paired store
PPCRecompilerImlGen_generateNewInstruction_fpr_memory_r(ppcImlGenContext, fprRegister, gprRegister, 0, storePS1 ? PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0_PS1 : PPCREC_FPR_ST_MODE_PSQ_GENERIC_PS0, true, gqrRegister);
return true;
}
bool PPCRecompilerImlGen_PS_MULS0(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frC;
frC = (opcode>>6)&0x1F;
frA = (opcode>>16)&0x1F;
frD = (opcode>>21)&0x1F;
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
// we need a temporary register to store frC.fp0 in low and high half
uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, fprRegisterTemp, fprRegisterC);
// if frD == frA we can multiply frD immediately and safe a copy instruction
if( frD == frA )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterD, fprRegisterTemp);
}
else
{
// we multiply temporary by frA
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterTemp, fprRegisterA);
// copy result to frD
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterD, fprRegisterTemp);
}
// adjust accuracy
PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD);
return true;
}
bool PPCRecompilerImlGen_PS_MULS1(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frC;
frC = (opcode>>6)&0x1F;
frA = (opcode>>16)&0x1F;
frD = (opcode>>21)&0x1F;
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
// we need a temporary register to store frC.fp0 in low and high half
uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_TOP_TO_BOTTOM_AND_TOP, fprRegisterTemp, fprRegisterC);
// if frD == frA we can multiply frD immediately and safe a copy instruction
if( frD == frA )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterD, fprRegisterTemp);
}
else
{
// we multiply temporary by frA
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterTemp, fprRegisterA);
// copy result to frD
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterD, fprRegisterTemp);
}
// adjust accuracy
PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD);
return true;
}
bool PPCRecompilerImlGen_PS_MADDS0(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB, frC;
frC = (opcode>>6)&0x1F;
frB = (opcode>>11)&0x1F;
frA = (opcode>>16)&0x1F;
frD = (opcode>>21)&0x1F;
//float s0 = (float)(hCPU->fpr[frA].fp0 * hCPU->fpr[frC].fp0 + hCPU->fpr[frB].fp0);
//float s1 = (float)(hCPU->fpr[frA].fp1 * hCPU->fpr[frC].fp0 + hCPU->fpr[frB].fp1);
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
// we need a temporary register to store frC.fp0 in low and high half
uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, fprRegisterTemp, fprRegisterC);
// if frD == frA and frD != frB we can multiply frD immediately and safe a copy instruction
if( frD == frA && frD != frB )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterD, fprRegisterTemp);
// add frB
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_PAIR, fprRegisterD, fprRegisterB);
}
else
{
// we multiply temporary by frA
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterTemp, fprRegisterA);
// add frB
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_PAIR, fprRegisterTemp, fprRegisterB);
// copy result to frD
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterD, fprRegisterTemp);
}
// adjust accuracy
PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD);
return true;
}
bool PPCRecompilerImlGen_PS_MADDS1(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB, frC;
frC = (opcode>>6)&0x1F;
frB = (opcode>>11)&0x1F;
frA = (opcode>>16)&0x1F;
frD = (opcode>>21)&0x1F;
//float s0 = (float)(hCPU->fpr[frA].fp0 * hCPU->fpr[frC].fp1 + hCPU->fpr[frB].fp0);
//float s1 = (float)(hCPU->fpr[frA].fp1 * hCPU->fpr[frC].fp1 + hCPU->fpr[frB].fp1);
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
// we need a temporary register to store frC.fp1 in bottom and top half
uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_TOP_TO_BOTTOM_AND_TOP, fprRegisterTemp, fprRegisterC);
// if frD == frA and frD != frB we can multiply frD immediately and safe a copy instruction
if( frD == frA && frD != frB )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterD, fprRegisterTemp);
// add frB
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_PAIR, fprRegisterD, fprRegisterB);
}
else
{
// we multiply temporary by frA
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterTemp, fprRegisterA);
// add frB
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_PAIR, fprRegisterTemp, fprRegisterB);
// copy result to frD
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterD, fprRegisterTemp);
}
// adjust accuracy
PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD);
return true;
}
bool PPCRecompilerImlGen_PS_ADD(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB;
frB = (opcode>>11)&0x1F;
frA = (opcode>>16)&0x1F;
frD = (opcode>>21)&0x1F;
//hCPU->fpr[frD].fp0 = hCPU->fpr[frA].fp0 + hCPU->fpr[frB].fp0;
//hCPU->fpr[frD].fp1 = hCPU->fpr[frA].fp1 + hCPU->fpr[frB].fp1;
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
if( frD == frA )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_PAIR, fprRegisterD, fprRegisterB);
}
else if( frD == frB )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_PAIR, fprRegisterD, fprRegisterA);
}
else
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterD, fprRegisterA);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_PAIR, fprRegisterD, fprRegisterB);
}
// adjust accuracy
PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD);
return true;
}
bool PPCRecompilerImlGen_PS_SUB(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB;
frB = (opcode>>11)&0x1F;
frA = (opcode>>16)&0x1F;
frD = (opcode>>21)&0x1F;
//hCPU->fpr[frD].fp0 = hCPU->fpr[frA].fp0 - hCPU->fpr[frB].fp0;
//hCPU->fpr[frD].fp1 = hCPU->fpr[frA].fp1 - hCPU->fpr[frB].fp1;
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUB_PAIR, fprRegisterD, fprRegisterA, fprRegisterB);
// adjust accuracy
PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD);
return true;
}
bool PPCRecompilerImlGen_PS_MUL(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frC;
frC = (opcode >> 6) & 0x1F;
frA = (opcode >> 16) & 0x1F;
frD = (opcode >> 21) & 0x1F;
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frA);
uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frC);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frD);
// we need a temporary register
uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0 + 0);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterTemp, fprRegisterC);
// todo-optimize: This instruction can be optimized so that it doesn't always use a temporary register
// if frD == frA we can multiply frD immediately and safe a copy instruction
if (frD == frA)
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterD, fprRegisterTemp);
}
else
{
// we multiply temporary by frA
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterTemp, fprRegisterA);
// copy result to frD
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterD, fprRegisterTemp);
}
// adjust accuracy
PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD);
return true;
}
bool PPCRecompilerImlGen_PS_DIV(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB;
frB = (opcode >> 11) & 0x1F;
frA = (opcode >> 16) & 0x1F;
frD = (opcode >> 21) & 0x1F;
//hCPU->fpr[frD].fp0 = hCPU->fpr[frA].fp0 / hCPU->fpr[frB].fp0;
//hCPU->fpr[frD].fp1 = hCPU->fpr[frA].fp1 / hCPU->fpr[frB].fp1;
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frD);
// todo-optimize: This instruction can be optimized so that it doesn't always use a temporary register
// if frD == frA we can divide frD immediately and safe a copy instruction
if (frD == frA)
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_DIVIDE_PAIR, fprRegisterD, fprRegisterB);
}
else
{
// we need a temporary register
uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0 + 0);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterTemp, fprRegisterA);
// we divide temporary by frB
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_DIVIDE_PAIR, fprRegisterTemp, fprRegisterB);
// copy result to frD
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterD, fprRegisterTemp);
}
// adjust accuracy
PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD);
return true;
}
bool PPCRecompilerImlGen_PS_MADD(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB, frC;
frC = (opcode>>6)&0x1F;
frB = (opcode>>11)&0x1F;
frA = (opcode>>16)&0x1F;
frD = (opcode>>21)&0x1F;
//float s0 = (float)(hCPU->fpr[frA].fp0 * hCPU->fpr[frC].fp0 + hCPU->fpr[frB].fp0);
//float s1 = (float)(hCPU->fpr[frA].fp1 * hCPU->fpr[frC].fp1 + hCPU->fpr[frB].fp1);
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
// we need a temporary register to store frC.fp0 in low and high half
uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterTemp, fprRegisterC);
// todo-optimize: This instruction can be optimized so that it doesn't always use a temporary register
// if frD == frA and frD != frB we can multiply frD immediately and save a copy instruction
if( frD == frA && frD != frB )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterD, fprRegisterTemp);
// add frB
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_PAIR, fprRegisterD, fprRegisterB);
}
else
{
// we multiply temporary by frA
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterTemp, fprRegisterA);
// add frB
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_PAIR, fprRegisterTemp, fprRegisterB);
// copy result to frD
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterD, fprRegisterTemp);
}
// adjust accuracy
PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD);
return true;
}
bool PPCRecompilerImlGen_PS_NMADD(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB, frC;
frC = (opcode>>6)&0x1F;
frB = (opcode>>11)&0x1F;
frA = (opcode>>16)&0x1F;
frD = (opcode>>21)&0x1F;
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
// we need a temporary register to store frC.fp0 in low and high half
uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterTemp, fprRegisterC);
// todo-optimize: This instruction can be optimized so that it doesn't always use a temporary register
// if frD == frA and frD != frB we can multiply frD immediately and safe a copy instruction
if( frD == frA && frD != frB )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterD, fprRegisterTemp);
// add frB
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_PAIR, fprRegisterD, fprRegisterB);
}
else
{
// we multiply temporary by frA
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterTemp, fprRegisterA);
// add frB
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ADD_PAIR, fprRegisterTemp, fprRegisterB);
// copy result to frD
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterD, fprRegisterTemp);
}
// negate
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_NEGATE_PAIR, fprRegisterD, fprRegisterD);
// adjust accuracy
//PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD);
// Splatoon requires that we emulate flush-to-denormals for this instruction
//PPCRecompilerImlGen_generateNewInstruction_fpr_r(ppcImlGenContext, NULL,PPCREC_IML_OP_FPR_ROUND_FLDN_TO_SINGLE_PRECISION_PAIR, fprRegisterD, false);
return true;
}
bool PPCRecompilerImlGen_PS_MSUB(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB, frC;
frC = (opcode>>6)&0x1F;
frB = (opcode>>11)&0x1F;
frA = (opcode>>16)&0x1F;
frD = (opcode>>21)&0x1F;
//hCPU->fpr[frD].fp0 = (hCPU->fpr[frA].fp0 * hCPU->fpr[frC].fp0 - hCPU->fpr[frB].fp0);
//hCPU->fpr[frD].fp1 = (hCPU->fpr[frA].fp1 * hCPU->fpr[frC].fp1 - hCPU->fpr[frB].fp1);
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
// we need a temporary register to store frC.fp0 in low and high half
uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterTemp, fprRegisterC);
// todo-optimize: This instruction can be optimized so that it doesn't always use a temporary register
// if frD == frA and frD != frB we can multiply frD immediately and safe a copy instruction
if( frD == frA && frD != frB )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterD, fprRegisterTemp);
// sub frB
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUB_PAIR, fprRegisterD, fprRegisterB);
}
else
{
// we multiply temporary by frA
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterTemp, fprRegisterA);
// sub frB
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUB_PAIR, fprRegisterTemp, fprRegisterB);
// copy result to frD
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterD, fprRegisterTemp);
}
// adjust accuracy
PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD);
return true;
}
bool PPCRecompilerImlGen_PS_NMSUB(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB, frC;
frC = (opcode>>6)&0x1F;
frB = (opcode>>11)&0x1F;
frA = (opcode>>16)&0x1F;
frD = (opcode>>21)&0x1F;
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
// we need a temporary register to store frC.fp0 in low and high half
uint32 fprRegisterTemp = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_TEMPORARY_FPR0+0);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterTemp, fprRegisterC);
// todo-optimize: This instruction can be optimized so that it doesn't always use a temporary register
// if frD == frA and frD != frB we can multiply frD immediately and safe a copy instruction
if( frD == frA && frD != frB )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterD, fprRegisterTemp);
// sub frB
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUB_PAIR, fprRegisterD, fprRegisterB);
}
else
{
// we multiply temporary by frA
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_MULTIPLY_PAIR, fprRegisterTemp, fprRegisterA);
// sub frB
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUB_PAIR, fprRegisterTemp, fprRegisterB);
// copy result to frD
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterD, fprRegisterTemp);
}
// negate result
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_NEGATE_PAIR, fprRegisterD, fprRegisterD);
// adjust accuracy
PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD);
return true;
}
bool PPCRecompilerImlGen_PS_SUM0(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB, frC;
frC = (opcode>>6)&0x1F;
frB = (opcode>>11)&0x1F;
frA = (opcode>>16)&0x1F;
frD = (opcode>>21)&0x1F;
//float s0 = (float)(hCPU->fpr[frA].fp0 + hCPU->fpr[frB].fp1);
//float s1 = (float)hCPU->fpr[frC].fp1;
//hCPU->fpr[frD].fp0 = s0;
//hCPU->fpr[frD].fp1 = s1;
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUM0, fprRegisterD, fprRegisterA, fprRegisterB, fprRegisterC);
// adjust accuracy
PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD);
return true;
}
bool PPCRecompilerImlGen_PS_SUM1(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB, frC;
frC = (opcode>>6)&0x1F;
frB = (opcode>>11)&0x1F;
frA = (opcode>>16)&0x1F;
frD = (opcode>>21)&0x1F;
//float s0 = (float)hCPU->fpr[frC].fp0;
//float s1 = (float)(hCPU->fpr[frA].fp0 + hCPU->fpr[frB].fp1);
//hCPU->fpr[frD].fp0 = s0;
//hCPU->fpr[frD].fp1 = s1;
// load registers
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SUM1, fprRegisterD, fprRegisterA, fprRegisterB, fprRegisterC);
// adjust accuracy
PPRecompilerImmGen_optionalRoundPairFPRToSinglePrecision(ppcImlGenContext, fprRegisterD);
return true;
}
bool PPCRecompilerImlGen_PS_NEG(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frB;
frB = (opcode>>11)&0x1F;
frD = (opcode>>21)&0x1F;
//hCPU->fpr[frD].fp0 = -hCPU->fpr[frB].fp0;
//hCPU->fpr[frD].fp1 = -hCPU->fpr[frB].fp1;
// load registers
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_NEGATE_PAIR, fprRegisterD, fprRegisterB);
return true;
}
bool PPCRecompilerImlGen_PS_ABS(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frB;
frB = (opcode>>11)&0x1F;
frD = (opcode>>21)&0x1F;
// load registers
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_ABS_PAIR, fprRegisterD, fprRegisterB);
return true;
}
bool PPCRecompilerImlGen_PS_RES(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frB;
frB = (opcode>>11)&0x1F;
frD = (opcode>>21)&0x1F;
//hCPU->fpr[frD].fp0 = (float)(1.0f / (float)hCPU->fpr[frB].fp0);
//hCPU->fpr[frD].fp1 = (float)(1.0f / (float)hCPU->fpr[frB].fp1);
// load registers
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_FRES_PAIR, fprRegisterD, fprRegisterB);
return true;
}
bool PPCRecompilerImlGen_PS_RSQRTE(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frB;
frB = (opcode>>11)&0x1F;
frD = (opcode>>21)&0x1F;
//hCPU->fpr[frD].fp0 = (float)(1.0f / (float)sqrt(hCPU->fpr[frB].fp0));
//hCPU->fpr[frD].fp1 = (float)(1.0f / (float)sqrt(hCPU->fpr[frB].fp1));
// load registers
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_FRSQRTE_PAIR, fprRegisterD, fprRegisterB);
return true;
}
bool PPCRecompilerImlGen_PS_MR(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frB;
frB = (opcode>>11)&0x1F;
frD = (opcode>>21)&0x1F;
//hCPU->fpr[frD].fp0 = hCPU->fpr[frB].fp0;
//hCPU->fpr[frD].fp1 = hCPU->fpr[frB].fp1;
// load registers
if( frB != frD )
{
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_PAIR, fprRegisterD, fprRegisterB);
}
return true;
}
bool PPCRecompilerImlGen_PS_SEL(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB, frC;
frC = (opcode>>6)&0x1F;
frB = (opcode>>11)&0x1F;
frA = (opcode>>16)&0x1F;
frD = (opcode>>21)&0x1F;
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterC = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frC);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_SELECT_PAIR, fprRegisterD, fprRegisterA, fprRegisterB, fprRegisterC);
return true;
}
bool PPCRecompilerImlGen_PS_MERGE00(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB;
frB = (opcode>>11)&0x1F;
frA = (opcode>>16)&0x1F;
frD = (opcode>>21)&0x1F;
//float s0 = (float)hCPU->fpr[frA].fp0;
//float s1 = (float)hCPU->fpr[frB].fp0;
//hCPU->fpr[frD].fp0 = s0;
//hCPU->fpr[frD].fp1 = s1;
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
// unpcklpd
if( frA == frB )
{
// simply duplicate bottom into bottom and top of destination register
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterA);
}
else
{
// copy bottom of frB to top first
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_TOP, fprRegisterD, fprRegisterB);
// copy bottom of frA
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterA);
}
return true;
}
bool PPCRecompilerImlGen_PS_MERGE01(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB;
frB = (opcode>>11)&0x1F;
frA = (opcode>>16)&0x1F;
frD = (opcode>>21)&0x1F;
// hCPU->fpr[frD].fp0 = hCPU->fpr[frA].fp0;
// hCPU->fpr[frD].fp1 = hCPU->fpr[frB].fp1;
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
if( fprRegisterD != fprRegisterB )
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_TOP_TO_TOP, fprRegisterD, fprRegisterB);
if( fprRegisterD != fprRegisterA )
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterA);
return true;
}
bool PPCRecompilerImlGen_PS_MERGE10(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB;
frB = (opcode>>11)&0x1F;
frA = (opcode>>16)&0x1F;
frD = (opcode>>21)&0x1F;
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
if( frA == frB )
{
// swap bottom and top
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_AND_TOP_SWAPPED, fprRegisterD, fprRegisterA);
}
else if( frA == frD )
{
// copy frB bottom to frD bottom
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterB);
// swap lower and upper half of frD
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_AND_TOP_SWAPPED, fprRegisterD, fprRegisterD);
}
else if( frB == frD )
{
// copy upper half of frA to upper half of frD
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_TOP_TO_TOP, fprRegisterD, fprRegisterA);
// swap lower and upper half of frD
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_AND_TOP_SWAPPED, fprRegisterD, fprRegisterD);
}
else
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_TOP_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterA);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_TO_BOTTOM, fprRegisterD, fprRegisterB);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_BOTTOM_AND_TOP_SWAPPED, fprRegisterD, fprRegisterD);
}
return true;
}
bool PPCRecompilerImlGen_PS_MERGE11(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 frD, frA, frB;
frB = (opcode>>11)&0x1F;
frA = (opcode>>16)&0x1F;
frD = (opcode>>21)&0x1F;
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
uint32 fprRegisterD = PPCRecompilerImlGen_loadOverwriteFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frD);
if( fprRegisterA == fprRegisterB )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_TOP_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterA);
}
else if( fprRegisterD != fprRegisterB )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_TOP_TO_BOTTOM_AND_TOP, fprRegisterD, fprRegisterA);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_TOP_TO_TOP, fprRegisterD, fprRegisterB);
}
else if( fprRegisterD == fprRegisterB )
{
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_COPY_TOP_TO_BOTTOM, fprRegisterD, fprRegisterA);
}
else
{
debugBreakpoint();
return false;
}
return true;
}
bool PPCRecompilerImlGen_PS_CMPO0(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 crfD, frA, frB;
uint32 c=0;
frB = (opcode>>11)&0x1F;
frA = (opcode>>16)&0x1F;
crfD = (opcode>>23)&0x7;
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0+frB);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_FCMPO_BOTTOM, fprRegisterA, fprRegisterB, crfD);
return true;
}
bool PPCRecompilerImlGen_PS_CMPU0(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 crfD, frA, frB;
frB = (opcode >> 11) & 0x1F;
frA = (opcode >> 16) & 0x1F;
crfD = (opcode >> 23) & 0x7;
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frB);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_FCMPU_BOTTOM, fprRegisterA, fprRegisterB, crfD);
return true;
}
bool PPCRecompilerImlGen_PS_CMPU1(ppcImlGenContext_t* ppcImlGenContext, uint32 opcode)
{
sint32 crfD, frA, frB;
frB = (opcode >> 11) & 0x1F;
frA = (opcode >> 16) & 0x1F;
crfD = (opcode >> 23) & 0x7;
uint32 fprRegisterA = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frA);
uint32 fprRegisterB = PPCRecompilerImlGen_loadFPRRegister(ppcImlGenContext, PPCREC_NAME_FPR0 + frB);
PPCRecompilerImlGen_generateNewInstruction_fpr_r_r(ppcImlGenContext, PPCREC_IML_OP_FPR_FCMPU_TOP, fprRegisterA, fprRegisterB, crfD);
return true;
}
| 91,544
|
C++
|
.cpp
| 1,750
| 50.227429
| 257
| 0.80391
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,214
|
PPCRecompiler.cpp
|
cemu-project_Cemu/src/Cafe/HW/Espresso/Recompiler/PPCRecompiler.cpp
|
#include "Cafe/HW/Espresso/Interpreter/PPCInterpreterInternal.h"
#include "PPCFunctionBoundaryTracker.h"
#include "PPCRecompiler.h"
#include "PPCRecompilerIml.h"
#include "PPCRecompilerX64.h"
#include "Cafe/OS/RPL/rpl.h"
#include "util/containers/RangeStore.h"
#include "Cafe/OS/libs/coreinit/coreinit_CodeGen.h"
#include "config/ActiveSettings.h"
#include "config/LaunchSettings.h"
#include "Common/ExceptionHandler/ExceptionHandler.h"
#include "Common/cpu_features.h"
#include "util/helpers/fspinlock.h"
#include "util/helpers/helpers.h"
#include "util/MemMapper/MemMapper.h"
struct PPCInvalidationRange
{
MPTR startAddress;
uint32 size;
PPCInvalidationRange(MPTR _startAddress, uint32 _size) : startAddress(_startAddress), size(_size) {};
};
struct
{
FSpinlock recompilerSpinlock;
std::queue<MPTR> targetQueue;
std::vector<PPCInvalidationRange> invalidationRanges;
}PPCRecompilerState;
RangeStore<PPCRecFunction_t*, uint32, 7703, 0x2000> rangeStore_ppcRanges;
void ATTR_MS_ABI (*PPCRecompiler_enterRecompilerCode)(uint64 codeMem, uint64 ppcInterpreterInstance);
void ATTR_MS_ABI (*PPCRecompiler_leaveRecompilerCode_visited)();
void ATTR_MS_ABI (*PPCRecompiler_leaveRecompilerCode_unvisited)();
PPCRecompilerInstanceData_t* ppcRecompilerInstanceData;
bool ppcRecompilerEnabled = false;
// this function does never block and can fail if the recompiler lock cannot be acquired immediately
void PPCRecompiler_visitAddressNoBlock(uint32 enterAddress)
{
// quick read-only check without lock
if (ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[enterAddress / 4] != PPCRecompiler_leaveRecompilerCode_unvisited)
return;
// try to acquire lock
if (!PPCRecompilerState.recompilerSpinlock.try_lock())
return;
auto funcPtr = ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[enterAddress / 4];
if (funcPtr != PPCRecompiler_leaveRecompilerCode_unvisited)
{
// was visited since previous check
PPCRecompilerState.recompilerSpinlock.unlock();
return;
}
// add to recompilation queue and flag as visited
PPCRecompilerState.targetQueue.emplace(enterAddress);
ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[enterAddress / 4] = PPCRecompiler_leaveRecompilerCode_visited;
PPCRecompilerState.recompilerSpinlock.unlock();
}
void PPCRecompiler_recompileIfUnvisited(uint32 enterAddress)
{
if (ppcRecompilerEnabled == false)
return;
PPCRecompiler_visitAddressNoBlock(enterAddress);
}
void PPCRecompiler_enter(PPCInterpreter_t* hCPU, PPCREC_JUMP_ENTRY funcPtr)
{
#if BOOST_OS_WINDOWS
uint32 prevState = _controlfp(0, 0);
_controlfp(_RC_NEAR, _MCW_RC);
PPCRecompiler_enterRecompilerCode((uint64)funcPtr, (uint64)hCPU);
_controlfp(prevState, _MCW_RC);
// debug recompiler exit - useful to find frequently executed functions which couldn't be recompiled
#ifdef CEMU_DEBUG_ASSERT
if (hCPU->remainingCycles > 0 && GetAsyncKeyState(VK_F4))
{
auto t = std::chrono::high_resolution_clock::now();
auto dur = std::chrono::duration_cast<std::chrono::microseconds>(t.time_since_epoch()).count();
cemuLog_log(LogType::Force, "Recompiler exit: 0x{:08x} LR: 0x{:08x} Timestamp {}.{:04}", hCPU->instructionPointer, hCPU->spr.LR, dur / 1000LL, (dur % 1000LL));
}
#endif
#else
PPCRecompiler_enterRecompilerCode((uint64)funcPtr, (uint64)hCPU);
#endif
// after leaving recompiler prematurely attempt to recompile the code at the new location
if (hCPU->remainingCycles > 0)
{
PPCRecompiler_visitAddressNoBlock(hCPU->instructionPointer);
}
}
void PPCRecompiler_attemptEnterWithoutRecompile(PPCInterpreter_t* hCPU, uint32 enterAddress)
{
cemu_assert_debug(hCPU->instructionPointer == enterAddress);
if (ppcRecompilerEnabled == false)
return;
auto funcPtr = ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[enterAddress / 4];
if (funcPtr != PPCRecompiler_leaveRecompilerCode_unvisited && funcPtr != PPCRecompiler_leaveRecompilerCode_visited)
{
cemu_assert_debug(ppcRecompilerInstanceData != nullptr);
PPCRecompiler_enter(hCPU, funcPtr);
}
}
void PPCRecompiler_attemptEnter(PPCInterpreter_t* hCPU, uint32 enterAddress)
{
cemu_assert_debug(hCPU->instructionPointer == enterAddress);
if (ppcRecompilerEnabled == false)
return;
if (hCPU->remainingCycles <= 0)
return;
auto funcPtr = ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[enterAddress / 4];
if (funcPtr == PPCRecompiler_leaveRecompilerCode_unvisited)
{
PPCRecompiler_visitAddressNoBlock(enterAddress);
}
else if (funcPtr != PPCRecompiler_leaveRecompilerCode_visited)
{
// enter
cemu_assert_debug(ppcRecompilerInstanceData != nullptr);
PPCRecompiler_enter(hCPU, funcPtr);
}
}
PPCRecFunction_t* PPCRecompiler_recompileFunction(PPCFunctionBoundaryTracker::PPCRange_t range, std::set<uint32>& entryAddresses, std::vector<std::pair<MPTR, uint32>>& entryPointsOut)
{
if (range.startAddress >= PPC_REC_CODE_AREA_END)
{
cemuLog_log(LogType::Force, "Attempting to recompile function outside of allowed code area");
return nullptr;
}
uint32 codeGenRangeStart;
uint32 codeGenRangeSize = 0;
coreinit::OSGetCodegenVirtAddrRangeInternal(codeGenRangeStart, codeGenRangeSize);
if (codeGenRangeSize != 0)
{
if (range.startAddress >= codeGenRangeStart && range.startAddress < (codeGenRangeStart + codeGenRangeSize))
{
if (coreinit::codeGenShouldAvoid())
{
return nullptr;
}
}
}
PPCRecFunction_t* ppcRecFunc = new PPCRecFunction_t();
ppcRecFunc->ppcAddress = range.startAddress;
ppcRecFunc->ppcSize = range.length;
// generate intermediate code
ppcImlGenContext_t ppcImlGenContext = { 0 };
bool compiledSuccessfully = PPCRecompiler_generateIntermediateCode(ppcImlGenContext, ppcRecFunc, entryAddresses);
if (compiledSuccessfully == false)
{
// todo: Free everything
PPCRecompiler_freeContext(&ppcImlGenContext);
delete ppcRecFunc;
return NULL;
}
// emit x64 code
bool x64GenerationSuccess = PPCRecompiler_generateX64Code(ppcRecFunc, &ppcImlGenContext);
if (x64GenerationSuccess == false)
{
PPCRecompiler_freeContext(&ppcImlGenContext);
return nullptr;
}
// collect list of PPC-->x64 entry points
entryPointsOut.clear();
for (sint32 s = 0; s < ppcImlGenContext.segmentListCount; s++)
{
PPCRecImlSegment_t* imlSegment = ppcImlGenContext.segmentList[s];
if (imlSegment->isEnterable == false)
continue;
uint32 ppcEnterOffset = imlSegment->enterPPCAddress;
uint32 x64Offset = imlSegment->x64Offset;
entryPointsOut.emplace_back(ppcEnterOffset, x64Offset);
}
PPCRecompiler_freeContext(&ppcImlGenContext);
return ppcRecFunc;
}
bool PPCRecompiler_makeRecompiledFunctionActive(uint32 initialEntryPoint, PPCFunctionBoundaryTracker::PPCRange_t& range, PPCRecFunction_t* ppcRecFunc, std::vector<std::pair<MPTR, uint32>>& entryPoints)
{
// update jump table
PPCRecompilerState.recompilerSpinlock.lock();
// check if the initial entrypoint is still flagged for recompilation
// its possible that the range has been invalidated during the time it took to translate the function
if (ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[initialEntryPoint / 4] != PPCRecompiler_leaveRecompilerCode_visited)
{
PPCRecompilerState.recompilerSpinlock.unlock();
return false;
}
// check if the current range got invalidated in the time it took to recompile it
bool isInvalidated = false;
for (auto& invRange : PPCRecompilerState.invalidationRanges)
{
MPTR rStartAddr = invRange.startAddress;
MPTR rEndAddr = rStartAddr + invRange.size;
for (auto& recFuncRange : ppcRecFunc->list_ranges)
{
if (recFuncRange.ppcAddress < (rEndAddr) && (recFuncRange.ppcAddress + recFuncRange.ppcSize) >= rStartAddr)
{
isInvalidated = true;
break;
}
}
}
PPCRecompilerState.invalidationRanges.clear();
if (isInvalidated)
{
PPCRecompilerState.recompilerSpinlock.unlock();
return false;
}
// update jump table
for (auto& itr : entryPoints)
{
ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[itr.first / 4] = (PPCREC_JUMP_ENTRY)((uint8*)ppcRecFunc->x86Code + itr.second);
}
// due to inlining, some entrypoints can get optimized away
// therefore we reset all addresses that are still marked as visited (but not recompiled)
// we dont remove the points from the queue but any address thats not marked as visited won't get recompiled
// if they are reachable, the interpreter will queue them again
for (uint32 v = range.startAddress; v <= (range.startAddress + range.length); v += 4)
{
auto funcPtr = ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[v / 4];
if (funcPtr == PPCRecompiler_leaveRecompilerCode_visited)
ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[v / 4] = PPCRecompiler_leaveRecompilerCode_unvisited;
}
// register ranges
for (auto& r : ppcRecFunc->list_ranges)
{
r.storedRange = rangeStore_ppcRanges.storeRange(ppcRecFunc, r.ppcAddress, r.ppcAddress + r.ppcSize);
}
PPCRecompilerState.recompilerSpinlock.unlock();
return true;
}
void PPCRecompiler_recompileAtAddress(uint32 address)
{
cemu_assert_debug(ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[address / 4] == PPCRecompiler_leaveRecompilerCode_visited);
// get size
PPCFunctionBoundaryTracker funcBoundaries;
funcBoundaries.trackStartPoint(address);
// get range that encompasses address
PPCFunctionBoundaryTracker::PPCRange_t range;
if (funcBoundaries.getRangeForAddress(address, range) == false)
{
cemu_assert_debug(false);
}
// todo - use info from previously compiled ranges to determine full size of this function (and merge all the entryAddresses)
// collect all currently known entry points for this range
PPCRecompilerState.recompilerSpinlock.lock();
std::set<uint32> entryAddresses;
entryAddresses.emplace(address);
PPCRecompilerState.recompilerSpinlock.unlock();
std::vector<std::pair<MPTR, uint32>> functionEntryPoints;
auto func = PPCRecompiler_recompileFunction(range, entryAddresses, functionEntryPoints);
if (!func)
{
return; // recompilation failed
}
bool r = PPCRecompiler_makeRecompiledFunctionActive(address, range, func, functionEntryPoints);
}
std::thread s_threadRecompiler;
std::atomic_bool s_recompilerThreadStopSignal{false};
void PPCRecompiler_thread()
{
SetThreadName("PPCRecompiler");
while (true)
{
if(s_recompilerThreadStopSignal)
return;
std::this_thread::sleep_for(std::chrono::milliseconds(10));
// asynchronous recompilation:
// 1) take address from queue
// 2) check if address is still marked as visited
// 3) if yes -> calculate size, gather all entry points, recompile and update jump table
while (true)
{
PPCRecompilerState.recompilerSpinlock.lock();
if (PPCRecompilerState.targetQueue.empty())
{
PPCRecompilerState.recompilerSpinlock.unlock();
break;
}
auto enterAddress = PPCRecompilerState.targetQueue.front();
PPCRecompilerState.targetQueue.pop();
auto funcPtr = ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[enterAddress / 4];
if (funcPtr != PPCRecompiler_leaveRecompilerCode_visited)
{
// only recompile functions if marked as visited
PPCRecompilerState.recompilerSpinlock.unlock();
continue;
}
PPCRecompilerState.recompilerSpinlock.unlock();
PPCRecompiler_recompileAtAddress(enterAddress);
if(s_recompilerThreadStopSignal)
return;
}
}
}
#define PPC_REC_ALLOC_BLOCK_SIZE (4*1024*1024) // 4MB
constexpr uint32 PPCRecompiler_GetNumAddressSpaceBlocks()
{
return (MEMORY_CODEAREA_ADDR + MEMORY_CODEAREA_SIZE + PPC_REC_ALLOC_BLOCK_SIZE - 1) / PPC_REC_ALLOC_BLOCK_SIZE;
}
std::bitset<PPCRecompiler_GetNumAddressSpaceBlocks()> ppcRecompiler_reservedBlockMask;
void PPCRecompiler_reserveLookupTableBlock(uint32 offset)
{
uint32 blockIndex = offset / PPC_REC_ALLOC_BLOCK_SIZE;
offset = blockIndex * PPC_REC_ALLOC_BLOCK_SIZE;
if (ppcRecompiler_reservedBlockMask[blockIndex])
return;
ppcRecompiler_reservedBlockMask[blockIndex] = true;
void* p1 = MemMapper::AllocateMemory(&(ppcRecompilerInstanceData->ppcRecompilerFuncTable[offset/4]), (PPC_REC_ALLOC_BLOCK_SIZE/4)*sizeof(void*), MemMapper::PAGE_PERMISSION::P_RW, true);
void* p3 = MemMapper::AllocateMemory(&(ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[offset/4]), (PPC_REC_ALLOC_BLOCK_SIZE/4)*sizeof(void*), MemMapper::PAGE_PERMISSION::P_RW, true);
if( !p1 || !p3 )
{
cemuLog_log(LogType::Force, "Failed to allocate memory for recompiler (0x{:08x})", offset);
cemu_assert(false);
return;
}
for(uint32 i=0; i<PPC_REC_ALLOC_BLOCK_SIZE/4; i++)
{
ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[offset/4+i] = PPCRecompiler_leaveRecompilerCode_unvisited;
}
}
void PPCRecompiler_allocateRange(uint32 startAddress, uint32 size)
{
if (ppcRecompilerInstanceData == nullptr)
return;
uint32 endAddress = (startAddress + size + PPC_REC_ALLOC_BLOCK_SIZE - 1) & ~(PPC_REC_ALLOC_BLOCK_SIZE-1);
startAddress = (startAddress) & ~(PPC_REC_ALLOC_BLOCK_SIZE-1);
startAddress = std::min(startAddress, (uint32)MEMORY_CODEAREA_ADDR + MEMORY_CODEAREA_SIZE);
endAddress = std::min(endAddress, (uint32)MEMORY_CODEAREA_ADDR + MEMORY_CODEAREA_SIZE);
for (uint32 i = startAddress; i < endAddress; i += PPC_REC_ALLOC_BLOCK_SIZE)
{
PPCRecompiler_reserveLookupTableBlock(i);
}
}
struct ppcRecompilerFuncRange_t
{
MPTR ppcStart;
uint32 ppcSize;
void* x86Start;
size_t x86Size;
};
bool PPCRecompiler_findFuncRanges(uint32 addr, ppcRecompilerFuncRange_t* rangesOut, size_t* countInOut)
{
PPCRecompilerState.recompilerSpinlock.lock();
size_t countIn = *countInOut;
size_t countOut = 0;
rangeStore_ppcRanges.findRanges(addr, addr + 4, [rangesOut, countIn, &countOut](uint32 start, uint32 end, PPCRecFunction_t* func)
{
if (countOut < countIn)
{
rangesOut[countOut].ppcStart = start;
rangesOut[countOut].ppcSize = (end-start);
rangesOut[countOut].x86Start = func->x86Code;
rangesOut[countOut].x86Size = func->x86Size;
}
countOut++;
}
);
PPCRecompilerState.recompilerSpinlock.unlock();
*countInOut = countOut;
if (countOut > countIn)
return false;
return true;
}
extern "C" DLLEXPORT uintptr_t * PPCRecompiler_getJumpTableBase()
{
if (ppcRecompilerInstanceData == nullptr)
return nullptr;
return (uintptr_t*)ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable;
}
void PPCRecompiler_invalidateTableRange(uint32 offset, uint32 size)
{
if (ppcRecompilerInstanceData == nullptr)
return;
for (uint32 i = 0; i < size / 4; i++)
{
ppcRecompilerInstanceData->ppcRecompilerFuncTable[offset / 4 + i] = nullptr;
ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[offset / 4 + i] = PPCRecompiler_leaveRecompilerCode_unvisited;
}
}
void PPCRecompiler_deleteFunction(PPCRecFunction_t* func)
{
// assumes PPCRecompilerState.recompilerSpinlock is already held
cemu_assert_debug(PPCRecompilerState.recompilerSpinlock.is_locked());
for (auto& r : func->list_ranges)
{
PPCRecompiler_invalidateTableRange(r.ppcAddress, r.ppcSize);
if(r.storedRange)
rangeStore_ppcRanges.deleteRange(r.storedRange);
r.storedRange = nullptr;
}
// todo - free x86 code
}
void PPCRecompiler_invalidateRange(uint32 startAddr, uint32 endAddr)
{
if (ppcRecompilerEnabled == false)
return;
if (startAddr >= PPC_REC_CODE_AREA_SIZE)
return;
cemu_assert_debug(endAddr >= startAddr);
PPCRecompilerState.recompilerSpinlock.lock();
uint32 rStart;
uint32 rEnd;
PPCRecFunction_t* rFunc;
// mark range as unvisited
for (uint64 currentAddr = (uint64)startAddr&~3; currentAddr < (uint64)(endAddr&~3); currentAddr += 4)
ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[currentAddr / 4] = PPCRecompiler_leaveRecompilerCode_unvisited;
// add entry to invalidation queue
PPCRecompilerState.invalidationRanges.emplace_back(startAddr, endAddr-startAddr);
while (rangeStore_ppcRanges.findFirstRange(startAddr, endAddr, rStart, rEnd, rFunc) )
{
PPCRecompiler_deleteFunction(rFunc);
}
PPCRecompilerState.recompilerSpinlock.unlock();
}
#if defined(ARCH_X86_64)
void PPCRecompiler_initPlatform()
{
// mxcsr
ppcRecompilerInstanceData->_x64XMM_mxCsr_ftzOn = 0x1F80 | 0x8000;
ppcRecompilerInstanceData->_x64XMM_mxCsr_ftzOff = 0x1F80;
}
#else
void PPCRecompiler_initPlatform()
{
}
#endif
void PPCRecompiler_init()
{
if (ActiveSettings::GetCPUMode() == CPUMode::SinglecoreInterpreter)
{
ppcRecompilerEnabled = false;
return;
}
if (LaunchSettings::ForceInterpreter())
{
cemuLog_log(LogType::Force, "Recompiler disabled. Command line --force-interpreter was passed");
return;
}
if (ppcRecompilerInstanceData)
{
MemMapper::FreeReservation(ppcRecompilerInstanceData, sizeof(PPCRecompilerInstanceData_t));
ppcRecompilerInstanceData = nullptr;
}
debug_printf("Allocating %dMB for recompiler instance data...\n", (sint32)(sizeof(PPCRecompilerInstanceData_t) / 1024 / 1024));
ppcRecompilerInstanceData = (PPCRecompilerInstanceData_t*)MemMapper::ReserveMemory(nullptr, sizeof(PPCRecompilerInstanceData_t), MemMapper::PAGE_PERMISSION::P_RW);
MemMapper::AllocateMemory(&(ppcRecompilerInstanceData->_x64XMM_xorNegateMaskBottom), sizeof(PPCRecompilerInstanceData_t) - offsetof(PPCRecompilerInstanceData_t, _x64XMM_xorNegateMaskBottom), MemMapper::PAGE_PERMISSION::P_RW, true);
PPCRecompilerX64Gen_generateRecompilerInterfaceFunctions();
PPCRecompiler_allocateRange(0, 0x1000); // the first entry is used for fallback to interpreter
PPCRecompiler_allocateRange(mmuRange_TRAMPOLINE_AREA.getBase(), mmuRange_TRAMPOLINE_AREA.getSize());
PPCRecompiler_allocateRange(mmuRange_CODECAVE.getBase(), mmuRange_CODECAVE.getSize());
// init x64 recompiler instance data
ppcRecompilerInstanceData->_x64XMM_xorNegateMaskBottom[0] = 1ULL << 63ULL;
ppcRecompilerInstanceData->_x64XMM_xorNegateMaskBottom[1] = 0ULL;
ppcRecompilerInstanceData->_x64XMM_xorNegateMaskPair[0] = 1ULL << 63ULL;
ppcRecompilerInstanceData->_x64XMM_xorNegateMaskPair[1] = 1ULL << 63ULL;
ppcRecompilerInstanceData->_x64XMM_xorNOTMask[0] = 0xFFFFFFFFFFFFFFFFULL;
ppcRecompilerInstanceData->_x64XMM_xorNOTMask[1] = 0xFFFFFFFFFFFFFFFFULL;
ppcRecompilerInstanceData->_x64XMM_andAbsMaskBottom[0] = ~(1ULL << 63ULL);
ppcRecompilerInstanceData->_x64XMM_andAbsMaskBottom[1] = ~0ULL;
ppcRecompilerInstanceData->_x64XMM_andAbsMaskPair[0] = ~(1ULL << 63ULL);
ppcRecompilerInstanceData->_x64XMM_andAbsMaskPair[1] = ~(1ULL << 63ULL);
ppcRecompilerInstanceData->_x64XMM_andFloatAbsMaskBottom[0] = ~(1 << 31);
ppcRecompilerInstanceData->_x64XMM_andFloatAbsMaskBottom[1] = 0xFFFFFFFF;
ppcRecompilerInstanceData->_x64XMM_andFloatAbsMaskBottom[2] = 0xFFFFFFFF;
ppcRecompilerInstanceData->_x64XMM_andFloatAbsMaskBottom[3] = 0xFFFFFFFF;
ppcRecompilerInstanceData->_x64XMM_singleWordMask[0] = 0xFFFFFFFFULL;
ppcRecompilerInstanceData->_x64XMM_singleWordMask[1] = 0ULL;
ppcRecompilerInstanceData->_x64XMM_constDouble1_1[0] = 1.0;
ppcRecompilerInstanceData->_x64XMM_constDouble1_1[1] = 1.0;
ppcRecompilerInstanceData->_x64XMM_constDouble0_0[0] = 0.0;
ppcRecompilerInstanceData->_x64XMM_constDouble0_0[1] = 0.0;
ppcRecompilerInstanceData->_x64XMM_constFloat0_0[0] = 0.0f;
ppcRecompilerInstanceData->_x64XMM_constFloat0_0[1] = 0.0f;
ppcRecompilerInstanceData->_x64XMM_constFloat1_1[0] = 1.0f;
ppcRecompilerInstanceData->_x64XMM_constFloat1_1[1] = 1.0f;
*(uint32*)&ppcRecompilerInstanceData->_x64XMM_constFloatMin[0] = 0x00800000;
*(uint32*)&ppcRecompilerInstanceData->_x64XMM_constFloatMin[1] = 0x00800000;
ppcRecompilerInstanceData->_x64XMM_flushDenormalMask1[0] = 0x7F800000;
ppcRecompilerInstanceData->_x64XMM_flushDenormalMask1[1] = 0x7F800000;
ppcRecompilerInstanceData->_x64XMM_flushDenormalMask1[2] = 0x7F800000;
ppcRecompilerInstanceData->_x64XMM_flushDenormalMask1[3] = 0x7F800000;
ppcRecompilerInstanceData->_x64XMM_flushDenormalMaskResetSignBits[0] = ~0x80000000;
ppcRecompilerInstanceData->_x64XMM_flushDenormalMaskResetSignBits[1] = ~0x80000000;
ppcRecompilerInstanceData->_x64XMM_flushDenormalMaskResetSignBits[2] = ~0x80000000;
ppcRecompilerInstanceData->_x64XMM_flushDenormalMaskResetSignBits[3] = ~0x80000000;
// setup GQR scale tables
for (uint32 i = 0; i < 32; i++)
{
float a = 1.0f / (float)(1u << i);
float b = 0;
if (i == 0)
b = 4294967296.0f;
else
b = (float)(1u << (32u - i));
float ar = (float)(1u << i);
float br = 0;
if (i == 0)
br = 1.0f / 4294967296.0f;
else
br = 1.0f / (float)(1u << (32u - i));
ppcRecompilerInstanceData->_psq_ld_scale_ps0_1[i * 2 + 0] = a;
ppcRecompilerInstanceData->_psq_ld_scale_ps0_1[i * 2 + 1] = 1.0f;
ppcRecompilerInstanceData->_psq_ld_scale_ps0_1[(i + 32) * 2 + 0] = b;
ppcRecompilerInstanceData->_psq_ld_scale_ps0_1[(i + 32) * 2 + 1] = 1.0f;
ppcRecompilerInstanceData->_psq_ld_scale_ps0_ps1[i * 2 + 0] = a;
ppcRecompilerInstanceData->_psq_ld_scale_ps0_ps1[i * 2 + 1] = a;
ppcRecompilerInstanceData->_psq_ld_scale_ps0_ps1[(i + 32) * 2 + 0] = b;
ppcRecompilerInstanceData->_psq_ld_scale_ps0_ps1[(i + 32) * 2 + 1] = b;
ppcRecompilerInstanceData->_psq_st_scale_ps0_1[i * 2 + 0] = ar;
ppcRecompilerInstanceData->_psq_st_scale_ps0_1[i * 2 + 1] = 1.0f;
ppcRecompilerInstanceData->_psq_st_scale_ps0_1[(i + 32) * 2 + 0] = br;
ppcRecompilerInstanceData->_psq_st_scale_ps0_1[(i + 32) * 2 + 1] = 1.0f;
ppcRecompilerInstanceData->_psq_st_scale_ps0_ps1[i * 2 + 0] = ar;
ppcRecompilerInstanceData->_psq_st_scale_ps0_ps1[i * 2 + 1] = ar;
ppcRecompilerInstanceData->_psq_st_scale_ps0_ps1[(i + 32) * 2 + 0] = br;
ppcRecompilerInstanceData->_psq_st_scale_ps0_ps1[(i + 32) * 2 + 1] = br;
}
PPCRecompiler_initPlatform();
cemuLog_log(LogType::Force, "Recompiler initialized");
ppcRecompilerEnabled = true;
// launch recompilation thread
s_recompilerThreadStopSignal = false;
s_threadRecompiler = std::thread(PPCRecompiler_thread);
}
void PPCRecompiler_Shutdown()
{
// shut down recompiler thread
s_recompilerThreadStopSignal = true;
if(s_threadRecompiler.joinable())
s_threadRecompiler.join();
// clean up queues
while(!PPCRecompilerState.targetQueue.empty())
PPCRecompilerState.targetQueue.pop();
PPCRecompilerState.invalidationRanges.clear();
// clean range store
rangeStore_ppcRanges.clear();
// clean up memory
uint32 numBlocks = PPCRecompiler_GetNumAddressSpaceBlocks();
for(uint32 i=0; i<numBlocks; i++)
{
if(!ppcRecompiler_reservedBlockMask[i])
continue;
// deallocate
uint64 offset = i * PPC_REC_ALLOC_BLOCK_SIZE;
MemMapper::FreeMemory(&(ppcRecompilerInstanceData->ppcRecompilerFuncTable[offset/4]), (PPC_REC_ALLOC_BLOCK_SIZE/4)*sizeof(void*), true);
MemMapper::FreeMemory(&(ppcRecompilerInstanceData->ppcRecompilerDirectJumpTable[offset/4]), (PPC_REC_ALLOC_BLOCK_SIZE/4)*sizeof(void*), true);
// mark as unmapped
ppcRecompiler_reservedBlockMask[i] = false;
}
}
| 22,812
|
C++
|
.cpp
| 545
| 39.244037
| 232
| 0.779837
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,215
|
VI.cpp
|
cemu-project_Cemu/src/Cafe/HW/VI/VI.cpp
|
#include "Cafe/HW/MMU/MMU.h"
namespace HW_VI
{
RunAtCemuBoot _initVI([]()
{
//MMU::RegisterMMIO_R16(MMU::MMIOInterface::INTERFACE_0C000000, 0x1e0002, VI_UKN1E0002_R16);
});
}
| 183
|
C++
|
.cpp
| 8
| 20.875
| 94
| 0.726744
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,216
|
AI.cpp
|
cemu-project_Cemu/src/Cafe/HW/AI/AI.cpp
|
#include "Cafe/HW/MMU/MMU.h"
namespace HW_AI
{
void AI_STATUS_W16(uint32 addr, uint16 value)
{
}
RunAtCemuBoot _init([]()
{
//using MMIOFuncWrite16 = void (*)(uint32 addr, uint16 value);
//using MMIOFuncWrite32 = void (*)(uint32 addr, uint32 value);
//void RegisterMMIO_W16(MMIOFuncWrite16 ptr);
});
}
| 320
|
C++
|
.cpp
| 13
| 22.230769
| 64
| 0.707641
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,217
|
MMU.cpp
|
cemu-project_Cemu/src/Cafe/HW/MMU/MMU.cpp
|
#include "Cafe/HW/MMU/MMU.h"
#include "Cafe/GraphicPack/GraphicPack2.h"
#include "util/MemMapper/MemMapper.h"
#include <wx/msgdlg.h>
#include "config/ActiveSettings.h"
uint8* memory_base = NULL; // base address of the reserved 4GB space
uint8* memory_elfCodeArena = NULL;
void checkMemAlloc(void* result)
{
if (result == nullptr)
assert_dbg();
}
void memory_initPhysicalLayout()
{
assert_dbg();
// todo - rewrite this using new MemMapper and MMU tables
//memory_base = (uint8*)VirtualAlloc(NULL, 0x100000000ULL, MEM_RESERVE, PAGE_READWRITE);
//VirtualFree(memory_base, 0, MEM_RELEASE);
//// todo - figure out all the ranges and allocate them properly
//// allocate memory for the kernel
////checkMemAlloc(VirtualAlloc(memory_base + 0x08000000, 1024*1024*2, MEM_COMMIT, PAGE_READWRITE));
//// allocate memory for bootrom
//checkMemAlloc(VirtualAlloc(memory_base + 0x00000000, 1024*16, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE));
//// allocate memory at 0x016FFFFC (is this some sort of register interface or maybe just temporary storage?)
//checkMemAlloc(VirtualAlloc(memory_base + 0x016FF000, 0x1000, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE));
//// temporary storage for bootrom copy
//checkMemAlloc(VirtualAlloc(memory_base + 0x016c0000, 0x4000 + 0x4000, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE));
//// 0x016c0000
//// L2
//checkMemAlloc(VirtualAlloc(memory_base + 0xE0000000, 1024 * 16, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE));
//// kernel memory
//// currently it is unknown if this is it's own physical memory region or if this is mapped somehow
//// considering the ancast is never copied here and no memory mapping is setup it seems like a hardwired mirror to 0x08000000?
////checkMemAlloc(VirtualAlloc(memory_base + 0xFFE00000, 0x180000, MEM_COMMIT, PAGE_READWRITE));
//HANDLE hKernelMem = CreateFileMappingA(
// INVALID_HANDLE_VALUE, // use paging file
// NULL, // default security
// PAGE_READWRITE, // read/write access
// 0, // maximum object size (high-order DWORD)
// 1024 * 1024 * 2, // maximum object size (low-order DWORD)
// "kernelMem08000000"); // name of mapping object
//
//checkMemAlloc(MapViewOfFileEx(hKernelMem, FILE_MAP_ALL_ACCESS, 0, 0, 1024 * 1024 * 2, memory_base + 0x08000000));
//checkMemAlloc(MapViewOfFileEx(hKernelMem, FILE_MAP_ALL_ACCESS, 0, 0, 1024 * 1024 * 2, memory_base + 0xFFE00000));
//// IOSU->PPC bootParamBlock
//checkMemAlloc(VirtualAlloc(memory_base + 0x01FFF000, 0x1000, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE));
//// used as dynamic kernel memory?
//checkMemAlloc(VirtualAlloc(memory_base + 0x1C000000, 0x01000000, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE));
//// mapped by kernel to FF200000 (loader.elf?)
//checkMemAlloc(VirtualAlloc(memory_base + 0x1B800000, 0x00800000, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE));
}
std::vector<struct MMURange*> g_mmuRanges;
std::vector<MMURange*> memory_getMMURanges()
{
return g_mmuRanges;
}
MMURange* memory_getMMURangeByAddress(MPTR address)
{
for (auto& itr : g_mmuRanges)
{
if (address >= itr->getBase() && address < itr->getEnd())
return itr;
}
return nullptr;
}
MMURange::MMURange(const uint32 baseAddress, const uint32 size, MMU_MEM_AREA_ID areaId, const std::string_view name, MFLAG flags) : baseAddress(baseAddress), size(size), initSize(size), areaId(areaId), name(name), flags(flags)
{
g_mmuRanges.emplace_back(this);
}
void MMURange::mapMem()
{
cemu_assert_debug(!m_isMapped);
if (MemMapper::AllocateMemory(memory_base + baseAddress, size, MemMapper::PAGE_PERMISSION::P_RW, true) == nullptr)
{
std::string errorMsg = fmt::format("Unable to allocate {} memory", name);
wxMessageBox(errorMsg.c_str(), "Error", wxOK | wxCENTRE | wxICON_ERROR);
#if BOOST_OS_WINDOWS
ExitProcess(-1);
#else
exit(-1);
#endif
}
m_isMapped = true;
}
void MMURange::unmapMem()
{
MemMapper::FreeMemory(memory_base + baseAddress, size, true);
m_isMapped = false;
}
MMURange mmuRange_LOW0 { 0x00010000, 0x000F0000, MMU_MEM_AREA_ID::CODE_LOW0, "CODE_LOW0" }; // code cave (Cemuhook)
MMURange mmuRange_TRAMPOLINE_AREA { 0x00E00000, 0x00200000, MMU_MEM_AREA_ID::CODE_TRAMPOLINE, "TRAMPOLINE_AREA" }; // code area for trampolines and imports
MMURange mmuRange_CODECAVE { 0x01800000, 0x00400000, MMU_MEM_AREA_ID::CODE_CAVE, "CODECAVE" }; // code cave area (4MiB)
MMURange mmuRange_TEXT_AREA { 0x02000000, 0x0C000000, MMU_MEM_AREA_ID::CODE_MAIN, "TEXT_AREA" }; // module text sections go here (0x02000000 to 0x10000000, 224MiB)
MMURange mmuRange_CEMU_AREA { 0x0E000000, 0x02000000, MMU_MEM_AREA_ID::CEMU_PRIVATE, "CEMU_AREA", MMURange::MFLAG::FLAG_MAP_EARLY }; // Cemu-only, 32MiB. Should be allocated early for SysAllocator
MMURange mmuRange_MEM2 { 0x10000000, 0x40000000, MMU_MEM_AREA_ID::MEM2_DATA, "MEM2" }; // main memory area (1GB)
MMURange mmuRange_OVERLAY_AREA { 0xA0000000, 0x1C000000, MMU_MEM_AREA_ID::OVERLAY, "OVERLAY_AREA", MMURange::MFLAG::FLAG_OPTIONAL }; // has to be requested, 448MiB
MMURange mmuRange_FGBUCKET { 0xE0000000, 0x04000000, MMU_MEM_AREA_ID::FGBUCKET, "FGBUCKET" }; // foreground bucket (64MiB)
MMURange mmuRange_TILINGAPERTURE { 0xE8000000, 0x02000000, MMU_MEM_AREA_ID::TILING_APERATURE, "TILINGAPERTURE" }; // tiling aperture
MMURange mmuRange_MEM1 { 0xF4000000, 0x02000000, MMU_MEM_AREA_ID::MEM1, "MEM1" }; // 32MiB
MMURange mmuRange_RPLLOADER { 0xF6000000, 0x02000000, MMU_MEM_AREA_ID::RPLLOADER, "RPLLOADER_AREA" }; // shared with RPLLoader
MMURange mmuRange_SHARED_AREA { 0xF8000000, 0x02000000, MMU_MEM_AREA_ID::SHAREDDATA, "SHARED_AREA", MMURange::MFLAG::FLAG_MAP_EARLY }; // 32MiB, Cemuhook accesses this memory region at boot
MMURange mmuRange_CORE0_LC { 0xFFC00000, 0x00005000, MMU_MEM_AREA_ID::CPU_LC0, "CORE0_LC" }; // locked L2 cache of core 0
MMURange mmuRange_CORE1_LC { 0xFFC40000, 0x00005000, MMU_MEM_AREA_ID::CPU_LC1, "CORE1_LC" }; // locked L2 cache of core 1
MMURange mmuRange_CORE2_LC { 0xFFC80000, 0x00005000, MMU_MEM_AREA_ID::CPU_LC2, "CORE2_LC" }; // locked L2 cache of core 2
MMURange mmuRange_HIGHMEM { 0xFFFFF000, 0x00001000, MMU_MEM_AREA_ID::CPU_PER_CORE, "PER-CORE" }; // per-core memory? Used by coreinit and PPC kernel to store core context specific data (like current thread ptr). We dont use it but Project Zero has a bug where it writes a byte at 0xfffffffe thus this memory range needs to be writable
void memory_init()
{
// reserve a continous range of 4GB
if(!memory_base)
memory_base = (uint8*)MemMapper::ReserveMemory(nullptr, (size_t)0x100000000, MemMapper::PAGE_PERMISSION::P_RW);
if( !memory_base )
{
debug_printf("memory_init(): Unable to reserve 4GB of memory\n");
debugBreakpoint();
wxMessageBox("Unable to reserve 4GB of memory\n", "Error", wxOK | wxCENTRE | wxICON_ERROR);
exit(-1);
}
for (auto& itr : g_mmuRanges)
{
if (itr->isMappedEarly())
itr->mapMem();
}
}
void memory_mapForCurrentTitle()
{
for (auto& itr : g_mmuRanges)
if(!itr->isMapped())
itr->resetConfig();
// expand ranges
auto gfxPackMappings = GraphicPack2::GetActiveRAMMappings();
for (auto& mapping : gfxPackMappings)
{
MMURange* mmuRange = nullptr;
for (auto& itr : g_mmuRanges)
{
if (itr->getBase() == mapping.first)
{
mmuRange = itr;
break;
}
}
if (!mmuRange)
{
cemuLog_log(LogType::Force, fmt::format("Graphic pack error: Unable to apply modified RAM mapping {:08x}-{:08x}. Start address must match one of the existing MMU ranges:", mapping.first, mapping.second));
for (auto& itr : g_mmuRanges)
{
if(itr->isMapped())
continue;
cemuLog_log(LogType::Force, fmt::format("{:08x}-{:08x} ({:})", itr->getBase(), itr->getEnd(), itr->getName()));
}
continue;
}
// make sure the new range isn't overlapping with anything
bool isOverlapping = false;
for (auto& itr : g_mmuRanges)
{
if(itr == mmuRange)
continue;
if (mapping.first < itr->getEnd() && mapping.second > itr->getBase())
{
cemuLog_log(LogType::Force, fmt::format("Graphic pack error: Unable to apply modified memory range {:08x}-{:08x} since it is overlapping with {:08x}-{:08x} ({:})", mapping.first, mapping.second, itr->getBase(), itr->getEnd(), itr->getName()));
isOverlapping = true;
}
}
if(isOverlapping)
continue;
mmuRange->setEnd(mapping.second);
}
for (auto& itr : g_mmuRanges)
{
if (!itr->isOptional() && !itr->isMappedEarly())
itr->mapMem();
}
}
void memory_unmapForCurrentTitle()
{
for (auto& itr : g_mmuRanges)
{
if (itr->isMapped() && !itr->isMappedEarly())
itr->unmapMem();
}
}
void memory_logModifiedMemoryRanges()
{
auto gfxPackMappings = GraphicPack2::GetActiveRAMMappings();
for (auto& mapping : gfxPackMappings)
{
MMURange* mmuRange = nullptr;
for (auto& itr : g_mmuRanges)
{
if (itr->getBase() == mapping.first)
{
mmuRange = itr;
break;
}
}
if (!mmuRange)
continue;
sint32 extraMem = (sint32)mapping.second - (sint32)(mmuRange->getBase() + mmuRange->getInitSize());
extraMem = (extraMem + 1023) / 1024;
std::string memAmountStr;
if (extraMem >= 8 * 1024 * 1024)
memAmountStr = fmt::format("{:+}MiB", (extraMem + 1023) / 1024);
else
memAmountStr = fmt::format("{:+}KiB", extraMem);
cemuLog_log(LogType::Force, fmt::format("Graphic pack: Using modified RAM mapping {:08x}-{:08x} ({})", mapping.first, mapping.second, memAmountStr));
}
}
void memory_enableOverlayArena()
{
if (mmuRange_OVERLAY_AREA.isMapped())
return;
mmuRange_OVERLAY_AREA.mapMem();
}
void memory_enableHBLELFCodeArea()
{
if (memory_elfCodeArena != NULL)
return;
memory_elfCodeArena = (uint8*)MemMapper::AllocateMemory(memory_base + 0x00800000, 0x00800000, MemMapper::PAGE_PERMISSION::P_RW, true);
if (memory_elfCodeArena == NULL)
{
debug_printf("memory_enableHBLELFCodeArea(): Unable to allocate memory for ELF arena\n");
debugBreakpoint();
}
}
bool memory_isAddressRangeAccessible(MPTR virtualAddress, uint32 size)
{
for (auto& itr : g_mmuRanges)
{
if(!itr->isMapped())
continue;
if (virtualAddress >= itr->getBase() && virtualAddress < itr->getEnd())
{
uint32 remainingSize = itr->getEnd() - virtualAddress;
return size <= remainingSize && itr->isMapped();
}
}
return false;
}
uint32 memory_virtualToPhysical(uint32 virtualOffset)
{
// currently we map virtual to physical space 1:1
return virtualOffset;
}
uint32 memory_physicalToVirtual(uint32 physicalOffset)
{
// currently we map virtual to physical space 1:1
return physicalOffset;
}
uint8* memory_getPointerFromPhysicalOffset(uint32 physicalOffset)
{
return memory_base + physicalOffset;
}
uint32 memory_getVirtualOffsetFromPointer(void* ptr)
{
if( !ptr )
return MPTR_NULL;
return (uint32)((uint8*)ptr - (uint8*)memory_base);
}
uint8* memory_getPointerFromVirtualOffset(uint32 virtualOffset)
{
return memory_base + virtualOffset;
}
uint8* memory_getPointerFromVirtualOffsetAllowNull(uint32 virtualOffset)
{
if( virtualOffset == MPTR_NULL )
return nullptr;
return memory_getPointerFromVirtualOffset(virtualOffset);
}
// write access
void memory_writeDouble(uint32 address, double vf)
{
uint64 v = *(uint64*)&vf;
uint32 v1 = v&0xFFFFFFFF;
uint32 v2 = v>>32;
uint8* ptr = memory_getPointerFromVirtualOffset(address);
*(uint32*)(ptr+4) = CPU_swapEndianU32(v1);
*(uint32*)(ptr+0) = CPU_swapEndianU32(v2);
}
void memory_writeFloat(uint32 address, float vf)
{
uint32 v = *(uint32*)&vf;
*(uint32*)(memory_getPointerFromVirtualOffset(address)) = CPU_swapEndianU32(v);
}
void memory_writeU32(uint32 address, uint32 v)
{
*(uint32*)(memory_getPointerFromVirtualOffset(address)) = CPU_swapEndianU32(v);
}
void memory_writeU64(uint32 address, uint64 v)
{
*(uint64*)(memory_getPointerFromVirtualOffset(address)) = CPU_swapEndianU64(v);
}
void memory_writeU16(uint32 address, uint16 v)
{
*(uint16*)(memory_getPointerFromVirtualOffset(address)) = CPU_swapEndianU16(v);
}
void memory_writeU8(uint32 address, uint8 v)
{
*(uint8*)(memory_getPointerFromVirtualOffset(address)) = v;
}
// read access
double memory_readDouble(uint32 address)
{
uint32 v[2];
v[1] = *(uint32*)(memory_getPointerFromVirtualOffset(address));
v[0] = *(uint32*)(memory_getPointerFromVirtualOffset(address)+4);
v[0] = CPU_swapEndianU32(v[0]);
v[1] = CPU_swapEndianU32(v[1]);
return *(double*)v;
}
float memory_readFloat(uint32 address)
{
uint32 v = *(uint32*)(memory_getPointerFromVirtualOffset(address));
v = CPU_swapEndianU32(v);
return *(float*)&v;
}
uint64 memory_readU64(uint32 address)
{
uint64 v = *(uint64*)(memory_getPointerFromVirtualOffset(address));
return CPU_swapEndianU64(v);
}
uint32 memory_readU32(uint32 address)
{
uint32 v = *(uint32*)(memory_getPointerFromVirtualOffset(address));
return CPU_swapEndianU32(v);
}
uint16 memory_readU16(uint32 address)
{
uint16 v = *(uint16*)(memory_getPointerFromVirtualOffset(address));
return CPU_swapEndianU16(v);
}
uint8 memory_readU8(uint32 address)
{
return *(uint8*)(memory_getPointerFromVirtualOffset(address));
}
extern "C" DLLEXPORT void* memory_getBase()
{
return memory_base;
}
void memory_writeDumpFile(uint32 startAddr, uint32 size, const fs::path& path)
{
fs::path filePath = path;
filePath /= fmt::format("{:08x}.bin", startAddr);
FileStream* fs = FileStream::createFile2(filePath);
if (fs)
{
fs->writeData(memory_base + startAddr, size);
delete fs;
}
}
void memory_createDump()
{
const uint32 pageSize = MemMapper::GetPageSize();
fs::path path = ActiveSettings::GetUserDataPath("dump/ramDump{:}", (uint32)time(nullptr));
fs::create_directories(path);
for (auto& itr : g_mmuRanges)
{
if(!itr->isMapped())
continue;
memory_writeDumpFile(itr->getBase(), itr->getSize(), path);
}
}
namespace MMU
{
// MMIO access handler
// located in address region 0x0C000000 - 0x0E000000
// there seem to be multiple subregions + special meanings for some address bits maybe?
// Try to figure this out. We know these regions (in Wii U mode):
// 0x0C000000 (the old GC register interface?)
// 0x0D000000 (new Wii U stuff?)
std::unordered_map<PAddr, MMIOFuncWrite32>* g_mmioHandlerW32{};
std::unordered_map<PAddr, MMIOFuncWrite16>* g_mmioHandlerW16{};
std::unordered_map<PAddr, MMIOFuncRead32>* g_mmioHandlerR32{};
std::unordered_map<PAddr, MMIOFuncRead16>* g_mmioHandlerR16{};
void _initHandlers()
{
if (g_mmioHandlerW32)
return;
g_mmioHandlerW32 = new std::unordered_map<PAddr, MMIOFuncWrite32>();
g_mmioHandlerW16 = new std::unordered_map<PAddr, MMIOFuncWrite16>();
g_mmioHandlerR32 = new std::unordered_map<PAddr, MMIOFuncRead32>();
g_mmioHandlerR16 = new std::unordered_map<PAddr, MMIOFuncRead16>();
}
PAddr _MakeMMIOAddress(MMIOInterface interfaceLocation, uint32 relativeAddress)
{
PAddr addr = 0;
if (interfaceLocation == MMIOInterface::INTERFACE_0C000000)
addr = 0x0C000000;
else if (interfaceLocation == MMIOInterface::INTERFACE_0D000000)
addr = 0x0D000000;
else
assert_dbg();
return addr + relativeAddress;
}
void RegisterMMIO_W32(MMIOInterface interfaceLocation, uint32 relativeAddress, MMIOFuncWrite32 ptr)
{
_initHandlers();
g_mmioHandlerW32->emplace(_MakeMMIOAddress(interfaceLocation, relativeAddress), ptr);
}
void RegisterMMIO_W16(MMIOInterface interfaceLocation, uint32 relativeAddress, MMIOFuncWrite16 ptr)
{
_initHandlers();
g_mmioHandlerW16->emplace(_MakeMMIOAddress(interfaceLocation, relativeAddress), ptr);
}
void RegisterMMIO_R32(MMIOInterface interfaceLocation, uint32 relativeAddress, MMIOFuncRead32 ptr)
{
_initHandlers();
PAddr addr = _MakeMMIOAddress(interfaceLocation, relativeAddress);
g_mmioHandlerR32->emplace(addr, ptr);
}
void RegisterMMIO_R16(MMIOInterface interfaceLocation, uint32 relativeAddress, MMIOFuncRead16 ptr)
{
_initHandlers();
g_mmioHandlerR16->emplace(_MakeMMIOAddress(interfaceLocation, relativeAddress), ptr);
}
void WriteMMIO_32(PAddr address, uint32 value)
{
cemu_assert_debug((address & 0x3) == 0);
auto itr = g_mmioHandlerW32->find(address);
if (itr == g_mmioHandlerW32->end())
{
//cemuLog_logDebug(LogType::Force, "[MMU] MMIO write u32 0x{:08x} from unhandled address 0x{:08x}", value, address);
return;
}
return itr->second(address, value);
}
void WriteMMIO_16(PAddr address, uint16 value)
{
cemu_assert_debug((address & 0x1) == 0);
auto itr = g_mmioHandlerW16->find(address);
if (itr == g_mmioHandlerW16->end())
{
//cemuLog_logDebug(LogType::Force, "[MMU] MMIO write u16 0x{:04x} from unhandled address 0x{:08x}", (uint32)value, address);
return;
}
return itr->second(address, value);
}
// todo - instead of passing the physical address to Read/WriteMMIO we should pass an interface id and a relative address? This would allow remapping the hardware address (tho we can just unregister + register at different addresses)
uint16 ReadMMIO_32(PAddr address)
{
cemu_assert_debug((address & 0x3) == 0);
auto itr = g_mmioHandlerR32->find(address);
if(itr == g_mmioHandlerR32->end())
{
//cemuLog_logDebug(LogType::Force, "[MMU] MMIO read u32 from unhandled address 0x{:08x}", address);
return 0;
}
return itr->second(address);
}
uint16 ReadMMIO_16(PAddr address)
{
cemu_assert_debug((address & 0x1) == 0);
auto itr = g_mmioHandlerR16->find(address);
if (itr == g_mmioHandlerR16->end())
{
//cemuLog_logDebug(LogType::Force, "[MMU] MMIO read u16 from unhandled address 0x{:08x}", address);
return 0;
}
return itr->second(address);
}
}
| 17,547
|
C++
|
.cpp
| 461
| 35.685466
| 337
| 0.732828
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,218
|
LatteTCGenIR.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Transcompiler/LatteTCGenIR.cpp
|
#include "Cafe/HW/Latte/Transcompiler/LatteTC.h"
#include "Cafe/HW/Latte/ISA/LatteInstructions.h"
#include "util/Zir/Core/ZpIRBuilder.h"
void LatteTCGenIR::CF_CALL_FS_emitFetchAttribute(LatteParsedFetchShaderAttribute_t& attribute, Latte::GPRType dstGPR)
{
auto irBuilder = m_irGenContext.irBuilder;
// extract each channel
for (sint32 t = 0; t < 4; t++)
{
uint32 gprElementIndex = (uint32)dstGPR * 4 + t;
LatteConst::VertexFetchDstSel ds = (LatteConst::VertexFetchDstSel)attribute.ds[t];
switch (ds)
{
case LatteConst::VertexFetchDstSel::X:
case LatteConst::VertexFetchDstSel::Y:
case LatteConst::VertexFetchDstSel::Z:
case LatteConst::VertexFetchDstSel::W:
{
uint8 channelIndex = (uint8)((uint32)ds - (uint32)LatteConst::VertexFetchDstSel::X);
ZpIR::IRReg resultHolder = m_irGenContext.irBuilder->createReg(ZpIR::DataType::U32);
ZpIR::LocationSymbolName importSource = ZpIR::ShaderSubset::ShaderImportLocation().SetVertexAttribute(attribute.semanticId, channelIndex);
m_irGenContext.irBuilder->emit_IMPORT(importSource, resultHolder);
// swap endianness
if (attribute.endianSwap == LatteConst::VertexFetchEndianMode::SWAP_U32)
{
// todo - this may be more complex depending on type
ZpIR::IRReg elementResult;
irBuilder->emit_RR(ZpIR::IR::OpCode::SWAP_ENDIAN, irBuilder->createReg(elementResult, ZpIR::DataType::U32), resultHolder);
resultHolder = elementResult;
}
bool isSigned = attribute.isSigned;
// transform
LatteConst::VertexFetchFormat fmt = (LatteConst::VertexFetchFormat)attribute.format;
LatteClauseInstruction_VTX::NUM_FORMAT_ALL nfa = (LatteClauseInstruction_VTX::NUM_FORMAT_ALL)attribute.nfa;
if (fmt == LatteConst::VertexFetchFormat::VTX_FMT_32_32_32_FLOAT ||
fmt == LatteConst::VertexFetchFormat::VTX_FMT_32_32_FLOAT)
{
uint32 numComp;
if (fmt == LatteConst::VertexFetchFormat::VTX_FMT_32_32_32_FLOAT)
numComp = 3;
else if (fmt == LatteConst::VertexFetchFormat::VTX_FMT_32_32_FLOAT)
numComp = 2;
else
{
cemu_assert_debug(false);
}
cemu_assert_debug(attribute.endianSwap == LatteConst::VertexFetchEndianMode::SWAP_U32);
cemu_assert_debug(nfa == LatteClauseInstruction_VTX::NUM_FORMAT_ALL::NUM_FORMAT_SCALED);
cemu_assert_debug(channelIndex < numComp);
ZpIR::IRReg elementResult;
irBuilder->emit_RR(ZpIR::IR::OpCode::BITCAST, irBuilder->createReg(elementResult, ZpIR::DataType::F32), resultHolder);
resultHolder = elementResult;
}
else if (fmt == LatteConst::VertexFetchFormat::VTX_FMT_8_8_8_8)
{
uint32 numComp;
switch (fmt)
{
case LatteConst::VertexFetchFormat::VTX_FMT_8_8_8_8:
numComp = 4;
break;
case LatteConst::VertexFetchFormat::VTX_FMT_8_8_8:
numComp = 3;
break;
case LatteConst::VertexFetchFormat::VTX_FMT_8_8:
numComp = 2;
break;
case LatteConst::VertexFetchFormat::VTX_FMT_8:
numComp = 1;
break;
}
cemu_assert_debug(attribute.endianSwap == LatteConst::VertexFetchEndianMode::SWAP_NONE);
cemu_assert_debug(channelIndex < numComp);
if (nfa == LatteClauseInstruction_VTX::NUM_FORMAT_ALL::NUM_FORMAT_NORM)
{
// scaled
if (isSigned)
{
assert_dbg();
// we can fake sign extend by subtracting 128? Would be faster than the AND + Conditional OR
}
else
{
resultHolder = irBuilder->emit_RR(ZpIR::IR::OpCode::CONVERT_INT_TO_FLOAT, ZpIR::DataType::F32, resultHolder);
resultHolder = irBuilder->emit_RRR(ZpIR::IR::OpCode::DIV, ZpIR::DataType::F32, resultHolder, irBuilder->createConstF32(255.0f));
}
}
else
{
assert_dbg();
}
}
else
{
assert_dbg();
}
// todo - we need a sign-extend instruction for this which should take arbitrary bit count
this->m_irGenContext.activeVars.set(dstGPR, t, resultHolder); // set GPR.channel to the result
break;
}
case LatteConst::VertexFetchDstSel::CONST_0F:
{
// todo - this could also be an integer zero. Use attribute format / other channel info to determine if this type is integer/float
ZpIR::IRReg resultHolder;
irBuilder->emit_RR(ZpIR::IR::OpCode::MOV, irBuilder->createReg(resultHolder, ZpIR::DataType::F32), irBuilder->createConstF32(0.0f));
this->m_irGenContext.activeVars.set(dstGPR, t, resultHolder);
break;
}
case LatteConst::VertexFetchDstSel::CONST_1F:
{
ZpIR::IRReg resultHolder;
irBuilder->emit_RR(ZpIR::IR::OpCode::MOV, irBuilder->createReg(resultHolder, ZpIR::DataType::F32), irBuilder->createConstF32(1.0f));
this->m_irGenContext.activeVars.set(dstGPR, t, resultHolder);
break;
}
default:
assert_dbg();
}
}
}
void LatteTCGenIR::processCF_CALL_FS(const LatteCFInstruction_DEFAULT& cfInstruction)
{
auto fetchShader = m_vertexShaderCtx.parsedFetchShader;
auto semanticTable = m_vertexShaderCtx.vtxSemanticTable;
// generate IR to decode vertex attributes
cemu_assert_debug(fetchShader->bufferGroupsInvalid.size() == 0); // todo
for(auto& bufferGroup : fetchShader->bufferGroups)
{
for (sint32 i = 0; i < bufferGroup.attribCount; i++)
{
auto& attribute = bufferGroup.attrib[i];
uint32 dstGPR = 0;
// get register index based on vtx semantic table
uint32 attributeShaderLoc = 0xFFFFFFFF;
for (sint32 f = 0; f < 32; f++)
{
if (semanticTable[f] == attribute.semanticId)
{
attributeShaderLoc = f;
break;
}
}
if (attributeShaderLoc == 0xFFFFFFFF)
continue; // attribute is not mapped to VS input
dstGPR = attributeShaderLoc + 1; // R0 is skipped
// emit IR code for attribute import (decode into GPR)
CF_CALL_FS_emitFetchAttribute(attribute, dstGPR);
}
}
}
// get IRReg for Latte GPR (single channel) typeHint is used when register has to be imported
// if convertOnTypeMismatch is set then we bitcast the register on type mismatch
ZpIR::IRReg LatteTCGenIR::getIRRegFromGPRElement(uint32 gprIndex, uint32 channel, ZpIR::DataType typeHint)
{
// get IR register for <GPR>.<channel> from currently active context
ZpIR::IRReg r;
if (m_irGenContext.activeVars.get(gprIndex, channel, r))
return r;
// if GPR.channel is not known
// in the entry basic block we can assume a value of zero because there is nowhere to import from
if (m_irGenContext.isEntryBasicBlock)
{
if (typeHint == ZpIR::DataType::F32)
return m_irGenContext.irBuilder->createConstF32(0.0f);
else if (typeHint == ZpIR::DataType::U32)
return m_irGenContext.irBuilder->createConstU32(0);
else if (typeHint == ZpIR::DataType::S32)
return m_irGenContext.irBuilder->createConstS32(0);
cemu_assert_debug(false);
}
// otherwise create import and resolve later during register allocation
r = m_irGenContext.irBuilder->createReg(typeHint);
m_irGenContext.irBuilder->addImport(r, gprIndex*4 + channel);
return r;
}
// similar to getIRRegFromGPRElement() but will bitcast the type if it mismatches
ZpIR::IRReg LatteTCGenIR::getTypedIRRegFromGPRElement(uint32 gprIndex, uint32 channel, ZpIR::DataType type)
{
auto irReg = getIRRegFromGPRElement(gprIndex, channel, type);
if (m_irGenContext.irBuilder->getRegType(irReg) == type)
return irReg;
// type does not match, bitcast into new reg
auto newReg = m_irGenContext.irBuilder->createReg(type);
m_irGenContext.irBuilder->emit_RR(ZpIR::IR::OpCode::BITCAST, newReg, irReg);
// remember converted register since its likely that it is accessed with the same type again
// todo - ideally, we would keep track of all the types. But it has to be efficient
m_irGenContext.activeVars.set(gprIndex, channel, newReg);
return newReg;
}
// try to determine the type of the constant from the raw u32 value
ZpIR::DataType _guessTypeFromConstantValue(uint32 bits)
{
if (bits == 0x3F800000) // float 1.0
return ZpIR::DataType::F32;
return ZpIR::DataType::S32;
}
// maybe pass a type hint parameter
ZpIR::IRReg LatteTCGenIR::loadALUOperand(LatteALUSrcSel srcSel, uint8 srcChan, bool isNeg, bool isAbs, bool isRel, uint8 indexMode, const uint32* literalData, ZpIR::DataType typeHint, bool convertOnTypeMismatch)
{
if (srcSel.isGPR())
{
//LatteTCGenIR::GPRElement gprElement = srcSel.getGPR() * 4 + srcChan;
if (isRel)
assert_dbg();
ZpIR::IRReg reg;
if(convertOnTypeMismatch)
reg = getTypedIRRegFromGPRElement(srcSel.getGPR(), srcChan, typeHint);
else
reg = getIRRegFromGPRElement(srcSel.getGPR(), srcChan, typeHint);
// if additional transformations are applied then we create a temporary IRReg here
// todo - is caching&recycling the transformed registers worth it?
if (isAbs || isNeg)
{
// create new var and apply transformation
assert_dbg();
}
return reg;
}
else if (srcSel.isAnyConst())
{
if (srcSel.isConst_0F())
{
return m_irGenContext.irBuilder->createConstF32(0.0f); // todo - could also be integer type constant? Try to find a way to predict the type correctly
}
else
assert_dbg();
}
else if (srcSel.isLiteral())
{
// literal constant
// we guess the type
return m_irGenContext.irBuilder->createTypedConst(literalData[srcChan], _guessTypeFromConstantValue(literalData[srcChan]));
}
else if (srcSel.isCFile())
{
// constant registers / uniform registers
uint32 cfileIndex = srcSel.getCFile();
auto newReg = m_irGenContext.irBuilder->createReg(typeHint);
ZpIR::LocationSymbolName importSource = ZpIR::ShaderSubset::ShaderImportLocation().SetUniformRegister(cfileIndex*4 + srcChan);
m_irGenContext.irBuilder->emit_IMPORT(importSource, newReg);
return newReg;
}
else
assert_dbg();
return 0;
}
void LatteTCGenIR::emitALUGroup(const LatteClauseInstruction_ALU* aluUnit[5], const uint32* literalData)
{
//struct
//{
// uint32 gprElementIndex;
// ZpIR::IRReg irReg;
// bool isSet;
//}groupOutput[5] = {};
ZpIR::BasicBlockBuilder* irBuilder = m_irGenContext.irBuilder;
// used by MOV instruction which can be used with any 32bit type (float, int, uint)
auto getMOVSourceType = [&](const LatteClauseInstruction_ALU_OP2* instrOP2) -> ZpIR::DataType
{
auto sel = instrOP2->getSrc0Sel();
if (sel.isGPR())
{
ZpIR::IRReg r;
if (!m_irGenContext.activeVars.get(sel.getGPR(), instrOP2->getSrc0Chan(), r))
{
// import, do we have an alternative way to guess the type?
// for now lets assume float because it will be correct more often than not
// getting the type wrong means a temporary register and two bit cast instructions will be spawned
return ZpIR::DataType::F32;
}
return m_irGenContext.irBuilder->getRegType(r);
}
else if (sel.isLiteral())
{
return _guessTypeFromConstantValue(literalData[instrOP2->getSrc0Chan()]);
}
else
assert_dbg();
return ZpIR::DataType::S32;
};
auto getOp0Reg = [&](const LatteClauseInstruction_ALU_OP2* instrOP2, ZpIR::DataType type) -> ZpIR::IRReg
{
// todo - pass type hint, so internally correct type is used if register needs to be created
ZpIR::IRReg r = loadALUOperand(instrOP2->getSrc0Sel(), instrOP2->getSrc0Chan(), instrOP2->isSrc0Neg(), false, instrOP2->isSrc0Rel(), instrOP2->getIndexMode(), literalData, type, true);
// make sure type matches with 'type' (loadALUOperand should convert)
cemu_assert_debug(irBuilder->getRegType(r) == type);
return r;
};
auto getOp1Reg = [&](const LatteClauseInstruction_ALU_OP2* instrOP2, ZpIR::DataType type) -> ZpIR::IRReg
{
// todo - pass type hint, so internally correct type is used if register needs to be created
ZpIR::IRReg r = loadALUOperand(instrOP2->getSrc1Sel(), instrOP2->getSrc1Chan(), instrOP2->isSrc1Neg(), false, instrOP2->isSrc1Rel(), instrOP2->getIndexMode(), literalData, type, true);
// make sure type matches with 'type' (loadALUOperand should convert)
cemu_assert_debug(irBuilder->getRegType(r) == type);
return r;
};
auto getResultReg = [&](uint8 aluUnit, const LatteClauseInstruction_ALU_OP2* instrOP2, ZpIR::DataType type) -> ZpIR::IRReg
{
// create output register
ZpIR::IRReg r = m_irGenContext.irBuilder->createReg(type);
cemu_assert_debug(instrOP2->getDestClamp() == 0); // todo
cemu_assert_debug(instrOP2->getDestRel() == 0); // todo
cemu_assert_debug(instrOP2->getOMod() == 0); // todo
if (instrOP2->getWriteMask())
{
// output to GPR
m_irGenContext.activeVars.setAfterGroup(aluUnit, instrOP2->getDestGpr(), instrOP2->getDestElem(), r);
}
else
{
// output only to PV/PS
assert_dbg();
}
// output to PV/PS
// todo
// check for disabled destination GPR?
// also assign PV/PS
// todo
//ZpIR::IRReg r;
//if (m_irGenContext.activeVars.get(gprIndex, channel, r))
// return r;
//// if GPR not present then create import for it
//r = m_irGenContext.irBuilder->createReg(typeHint);
//m_irGenContext.irBuilder->addImport(r, 0x12345678);
return r;
};
for (sint32 aluUnitIndex = 0; aluUnitIndex < 5; aluUnitIndex++)
{
const LatteClauseInstruction_ALU* instr = aluUnit[aluUnitIndex];
if (instr == nullptr)
continue;
if (instr->isOP3())
{
assert_dbg();
}
else
{
auto opcode2 = instr->getOP2Code();
auto instrOP2 = instr->getOP2Instruction();
// prepare operands, load them into IRVars if they aren't already
uint8 indexMode = instrOP2->getIndexMode();
switch (opcode2)
{
case LatteClauseInstruction_ALU::OPCODE_OP2::MUL:
case LatteClauseInstruction_ALU::OPCODE_OP2::MUL_IEEE:
{
// how to implement this with least amount of copy paste and still having very good performance?
// maybe use lambdas? Or functions?
irBuilder->emit_RRR(ZpIR::IR::OpCode::MUL, getResultReg(aluUnitIndex, instrOP2, ZpIR::DataType::F32), getOp0Reg(instrOP2, ZpIR::DataType::F32), getOp1Reg(instrOP2, ZpIR::DataType::F32));
break;
}
case LatteClauseInstruction_ALU::OPCODE_OP2::MOV:
{
// MOV is type-agnostic, but some flags might make it apply float operations
ZpIR::DataType guessedType = getMOVSourceType(instrOP2);
irBuilder->emit_RR(ZpIR::IR::OpCode::MOV, getResultReg(aluUnitIndex, instrOP2, guessedType), getOp0Reg(instrOP2, guessedType));
break;
}
case LatteClauseInstruction_ALU::OPCODE_OP2::DOT4:
{
// reduction opcode
// must be mirrored to .xyzw units
cemu_assert_debug(aluUnitIndex == 0);
cemu_assert_debug(aluUnit[0]->getOP2Code() == aluUnit[1]->getOP2Code());
cemu_assert_debug(aluUnit[1]->getOP2Code() == aluUnit[2]->getOP2Code());
cemu_assert_debug(aluUnit[2]->getOP2Code() == aluUnit[3]->getOP2Code());
auto unit_x = instrOP2;
auto unit_y = aluUnit[1]->getOP2Instruction();
auto unit_z = aluUnit[2]->getOP2Instruction();
auto unit_w = aluUnit[3]->getOP2Instruction();
cemu_assert_debug(unit_x->getDestClamp() == false);
cemu_assert_debug(unit_x->getOMod() == 0);
cemu_assert_debug(unit_x->getDestRel() == false);
ZpIR::IRReg productX = irBuilder->emit_RRR(ZpIR::IR::OpCode::MUL, ZpIR::DataType::F32, getOp0Reg(unit_x, ZpIR::DataType::F32), getOp1Reg(unit_x, ZpIR::DataType::F32));
ZpIR::IRReg productY = irBuilder->emit_RRR(ZpIR::IR::OpCode::MUL, ZpIR::DataType::F32, getOp0Reg(unit_y, ZpIR::DataType::F32), getOp1Reg(unit_y, ZpIR::DataType::F32));
ZpIR::IRReg productZ = irBuilder->emit_RRR(ZpIR::IR::OpCode::MUL, ZpIR::DataType::F32, getOp0Reg(unit_z, ZpIR::DataType::F32), getOp1Reg(unit_z, ZpIR::DataType::F32));
ZpIR::IRReg productW = irBuilder->emit_RRR(ZpIR::IR::OpCode::MUL, ZpIR::DataType::F32, getOp0Reg(unit_w, ZpIR::DataType::F32), getOp1Reg(unit_w, ZpIR::DataType::F32));
ZpIR::IRReg sum = irBuilder->emit_RRR(ZpIR::IR::OpCode::ADD, ZpIR::DataType::F32, productX, productY);
sum = irBuilder->emit_RRR(ZpIR::IR::OpCode::ADD, ZpIR::DataType::F32, sum, productZ);
sum = irBuilder->emit_RRR(ZpIR::IR::OpCode::ADD, ZpIR::DataType::F32, sum, productW);
// assign result
if (unit_x->getWriteMask())
m_irGenContext.activeVars.setAfterGroup(0, unit_x->getDestGpr(), unit_x->getDestElem(), sum);
if (unit_y->getWriteMask())
m_irGenContext.activeVars.setAfterGroup(1, unit_y->getDestGpr(), unit_y->getDestElem(), sum);
if (unit_z->getWriteMask())
m_irGenContext.activeVars.setAfterGroup(2, unit_z->getDestGpr(), unit_z->getDestElem(), sum);
if (unit_w->getWriteMask())
m_irGenContext.activeVars.setAfterGroup(3, unit_w->getDestGpr(), unit_w->getDestElem(), sum);
// also set result in PV.x
m_irGenContext.activeVars.setAfterGroupPVPS(0, sum);
// todo - do we need to update the other units?
aluUnitIndex += 3;
continue;;
}
default:
assert_dbg();
}
// handle dest clamp
if (instrOP2->getDestClamp())
{
assert_dbg();
}
//uint32 src0Sel = (aluWord0 >> 0) & 0x1FF; // source selection
//uint32 src1Sel = (aluWord0 >> 13) & 0x1FF;
//uint32 src0Rel = (aluWord0 >> 9) & 0x1; // relative addressing mode
//uint32 src1Rel = (aluWord0 >> 22) & 0x1;
//uint32 src0Chan = (aluWord0 >> 10) & 0x3; // component selection x/y/z/w
//uint32 src1Chan = (aluWord0 >> 23) & 0x3;
//uint32 src0Neg = (aluWord0 >> 12) & 0x1; // negate input
//uint32 src1Neg = (aluWord0 >> 25) & 0x1;
//uint32 indexMode = (aluWord0 >> 26) & 7;
//uint32 predSel = (aluWord0 >> 29) & 3;
//uint32 src0Abs = (aluWord1 >> 0) & 1;
//uint32 src1Abs = (aluWord1 >> 1) & 1;
//uint32 updateExecuteMask = (aluWord1 >> 2) & 1;
//uint32 updatePredicate = (aluWord1 >> 3) & 1;
//uint32 writeMask = (aluWord1 >> 4) & 1;
//uint32 omod = (aluWord1 >> 5) & 3;
//uint32 destGpr = (aluWord1 >> 21) & 0x7F;
//uint32 destRel = (aluWord1 >> 28) & 1;
//uint32 destElem = (aluWord1 >> 29) & 3;
//uint32 destClamp = (aluWord1 >> 31) & 1;
}
}
// update IR vars with outputs from group
m_irGenContext.activeVars.applyDelayedAfterGroup();
}
void LatteTCGenIR::processCF_ALU(const LatteCFInstruction_ALU& cfInstruction)
{
uint32 aluAddr = cfInstruction.getField_ADDR();
uint32 aluCount = cfInstruction.getField_COUNT();
const uint32* clauseCode = m_ctx.programData + aluAddr * 2;
uint32 clauseLength = aluCount;
const LatteClauseInstruction_ALU* aluCode = (const LatteClauseInstruction_ALU*)clauseCode;
const LatteClauseInstruction_ALU* aluUnit[5] = {};
const LatteClauseInstruction_ALU* instr = aluCode;
const LatteClauseInstruction_ALU* instrLast = aluCode + clauseLength;
// process instructions in groups
uint8 literalMask = 0;
while (instr < instrLast)
{
if (instr->isOP3())
{
assert_dbg();
}
else
{
LatteClauseInstruction_ALU::OPCODE_OP2 opcode2 = instr->getOP2Code();
const LatteClauseInstruction_ALU_OP2* op = instr->getOP2Instruction();
uint32 unitIndex = 0;
if (op->isTranscedentalUnit())
unitIndex = 4;
else
{
unitIndex = op->getDestElem();
if (aluUnit[unitIndex]) // unit already occupied, use transcendental unit instead
unitIndex = 4;
}
cemu_assert_debug(!aluUnit[unitIndex]); // unit already used
aluUnit[unitIndex] = op;
// check for literal access
if (op->getSrc0Sel().isLiteral())
literalMask |= (op->getSrc0Chan() >= 2 ? 2 : 1);
if (op->getSrc1Sel().isLiteral())
literalMask |= (op->getSrc1Chan() >= 2 ? 2 : 1);
}
if (instr->isLastInGroup())
{
// emit code for group
// extract literal constants
const uint32* literalData = nullptr;
if (literalMask)
{
literalData = (const uint32*)(instr + 1);
if (literalMask & 2)
instr += 2;
else
instr += 1;
if ((instr + 1) > instrLast)
assert_dbg(); // out of bounds
}
// generate code for group
emitALUGroup(aluUnit, literalData);
// reset group
std::fill(aluUnit, aluUnit + 5, nullptr);
literalMask = 0;
}
instr++;
}
if (aluUnit[0] || aluUnit[1] || aluUnit[2] || aluUnit[3] || aluUnit[4])
assert_dbg();
}
void LatteTCGenIR::processCF_EXPORT(const LatteCFInstruction_EXPORT_IMPORT& cfInstruction)
{
auto exportType = cfInstruction.getField_TYPE();
cemu_assert_debug(cfInstruction.getField_BURST_COUNT() == 1); // todo
uint32 arrayBase = cfInstruction.getField_ARRAY_BASE();
cemu_assert_debug(cfInstruction.isEncodingBUF() == false); // todo
LatteCFInstruction_EXPORT_IMPORT::COMPSEL sel[4];
sel[0] = cfInstruction.getSwizField_SEL_X();
sel[1] = cfInstruction.getSwizField_SEL_Y();
sel[2] = cfInstruction.getSwizField_SEL_Z();
sel[3] = cfInstruction.getSwizField_SEL_W();
uint32 sourceGPR = cfInstruction.getField_RW_GPR();
ZpIR::DataType typeHint;
if (exportType == LatteCFInstruction_EXPORT_IMPORT::EXPORT_TYPE::POSITION)
typeHint = ZpIR::DataType::F32;
else if (exportType == LatteCFInstruction_EXPORT_IMPORT::EXPORT_TYPE::PARAMETER)
{
// todo - determine correct type for parameter
typeHint = ZpIR::DataType::F32;
}
else
assert_dbg();
// get xyzw registers
ZpIR::IRReg regArray[4];
size_t regExportCount = 0; // number of exported registers/channels, number of valid regArray entries
for (size_t i = 0; i < 4; i++)
{
switch (sel[i])
{
case LatteCFInstruction_EXPORT_IMPORT::COMPSEL::X:
case LatteCFInstruction_EXPORT_IMPORT::COMPSEL::Y:
case LatteCFInstruction_EXPORT_IMPORT::COMPSEL::Z:
case LatteCFInstruction_EXPORT_IMPORT::COMPSEL::W:
{
uint32 channelIndex = (uint32)sel[i];
regArray[regExportCount] = getTypedIRRegFromGPRElement(sourceGPR, channelIndex, typeHint);
regExportCount++;
break;
}
default:
{
assert_dbg();
break;
}
}
//ZpIR::IRReg r;
//if (m_irGenContext.activeVars.get(gprIndex, channel, r))
// return r;
}
//ZpIR::LocationSymbolName exportSymbolName;
ZpIR::ShaderSubset::ShaderExportLocation loc;
if (exportType == LatteCFInstruction_EXPORT_IMPORT::EXPORT_TYPE::POSITION)
{
loc.SetPosition();
}
else if (exportType == LatteCFInstruction_EXPORT_IMPORT::EXPORT_TYPE::PARAMETER)
{
loc.SetOutputAttribute(arrayBase);
//exportSymbolName = 0x20000 + arrayBase;
}
else
{
// todo
assert_dbg();
}
cemu_assert_debug(regExportCount == 4); // todo - encode channel mask (e.g. xyz, xw, w, etc.) into export symbol name
m_irGenContext.irBuilder->emit_EXPORT(loc, std::span(regArray, regArray + regExportCount));
}
| 22,066
|
C++
|
.cpp
| 560
| 35.830357
| 211
| 0.717888
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,219
|
LatteTC.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Transcompiler/LatteTC.cpp
|
#include "Cafe/HW/Latte/Transcompiler/LatteTC.h"
#include "Cafe/HW/Latte/ISA/LatteInstructions.h"
#include "util/Zir/Core/IR.h"
#include "util/Zir/Core/ZpIRBuilder.h"
#include "util/Zir/Core/ZpIRDebug.h"
class CFBlockNode
{
public:
CFBlockNode(uint32 cfAddress, const LatteCFInstruction& cfInstruction) : cfAddress(cfAddress), cfNext(cfAddress + sizeof(LatteCFInstruction))
{
m_cfInstructions.emplace_back(cfInstruction);
irBasicBlock = new ZpIR::ZpIRBasicBlock();
};
void addInstruction(const LatteCFInstruction& cfInstruction)
{
m_cfInstructions.emplace_back(cfInstruction);
}
// next CF address after this block of CF instructions (assuming no branches)
void setNextAddress(uint32 addr)
{
cfNext = addr;
}
uint32 cfAddress; // offset of the first cf instruction
uint32 cfNext{ 0 }; // offset of the next cf instruction if no branch happens
ZpIR::ZpIRBasicBlock* irBasicBlock{};
std::vector<LatteCFInstruction> m_cfInstructions;
private:
};
// emit IR code for all clauses in a DAG node
void LatteTCGenIR::genIRForNode(CFBlockNode& node)
{
m_irGenContext.reset();
m_irGenContext.irBuilder = new ZpIR::BasicBlockBuilder(node.irBasicBlock);
m_irGenContext.isEntryBasicBlock = (&node == m_ctx.mainFunctionDAG.GetEntryNode());
// for vertex shaders add initialization code to main()
if (&node == m_ctx.mainFunctionDAG.GetEntryNode() && m_ctx.shaderType == SHADER_TYPE::VERTEX)
{
for (uint32 channel = 0; channel < 4; channel++)
{
ZpIR::IRReg irReg = m_irGenContext.irBuilder->createReg(ZpIR::DataType::U32);
m_irGenContext.irBuilder->emit_RR(ZpIR::IR::OpCode::MOV, irReg, m_irGenContext.irBuilder->createConstU32(0));
this->m_irGenContext.activeVars.set(0, channel, irReg);
}
// todo - correctly init R0 based on currently set context register state
}
for (auto& itr : node.m_cfInstructions)
{
const auto opcode = itr.getField_Opcode();
if (const auto cfInstr = itr.getParserIfOpcodeMatch<LatteCFInstruction_DEFAULT>())
{
if(opcode == LatteCFInstruction::OPCODE::INST_CALL_FS)
processCF_CALL_FS(*cfInstr);
else
assert_dbg();
}
else if (const auto cfInstr = itr.getParserIfOpcodeMatch<LatteCFInstruction_ALU>())
{
if (opcode == LatteCFInstruction::OPCODE::INST_ALU)
processCF_ALU(*cfInstr);
else
assert_dbg();
}
else if (const auto cfInstr = itr.getParserIfOpcodeMatch<LatteCFInstruction_EXPORT_IMPORT>())
{
if (opcode == LatteCFInstruction::OPCODE::INST_EXPORT ||
opcode == LatteCFInstruction::OPCODE::INST_EXPORT_DONE)
processCF_EXPORT(*cfInstr);
else
assert_dbg();
}
else
{
debug_printf("Missing implementation for CF opcode 0x%02x\n", itr.getField_Opcode());
assert_dbg(); // todo
}
}
}
// parse CF program and create unlinked DAG
void LatteTCGenIR::parseCF_createNodes(NodeDAG& nodeDAG)
{
const LatteCFInstruction* cfCode = (const LatteCFInstruction*)m_ctx.programData;
const size_t cfMaxCount = m_ctx.programSize / 8;
// quick prepass to gather a list of jump destinations used by the next pass
// todo
// linear pass where we turn uninterrupted sequences of CF instructions (no branch to or from) into CFBlockNode
// algorithm description:
// 1) Create CFBlockNode from first CF instruction. Make it the currently active node
// 2) For each remaining (1 .. n) CF instruction of program
// 2.1) If CF instruction can be merged into active node (no branch destination, no conditionals or other control flow branches) then add it to the currently active node
// 2.2) Otherwise finalize active node, add it to node list. Then create new CFBlockNode node from CF instruction and make it active node
// 3) Finalize active node and add to node list
cemu_assert_debug(cfMaxCount != 0); // zero not allowed
CFBlockNode* activeNode = new CFBlockNode(0, cfCode[0]); // first instruction becomes the initial node
size_t cfIndex = 1;
//m_nodes.emplace_back(activeNode);
while (cfIndex < cfMaxCount)
{
const LatteCFInstruction* baseInstr = cfCode + cfIndex;
cfIndex++;
bool canMerge;
bool isALU = false;
if (const auto cfInstr = baseInstr->getParserIfOpcodeMatch<LatteCFInstruction_DEFAULT>())
{
cemu_assert_debug(cfInstr->getField_WHOLE_QUAD_MODE() == 0);
cemu_assert_debug(cfInstr->getField_CALL_COUNT() == 0); // todo
cemu_assert_debug(cfInstr->getField_POP_COUNT() == 0); // todo
auto cond = cfInstr->getField_COND();
assert_dbg();
//cfInstr->getField_COND() == LatteCFInstruction::CF_COND::CF_COND_ACTIVE;
}
else if (const auto cfInstr = baseInstr->getParserIfOpcodeMatch<LatteCFInstruction_ALU>())
{
// always merge ALU clauses since they dont have their own condition modes?
// todo - except if they are a jump target
canMerge = true;
isALU = true;
}
else if (const auto cfInstr = baseInstr->getParserIfOpcodeMatch<LatteCFInstruction_EXPORT_IMPORT>())
{
// no extra conditions, always merge
canMerge = true;
}
else
{
debug_printf("Missing implementation for CF opcode 0x%02x\n", baseInstr->getField_Opcode());
assert_dbg(); // todo
}
if (canMerge)
{
activeNode->addInstruction(*baseInstr);
}
else
{
activeNode->setNextAddress((uint32)cfIndex - 1);
nodeDAG.m_nodes.emplace_back(activeNode);
// start new active node
activeNode = new CFBlockNode((uint32)cfIndex - 1, *baseInstr);
}
if (!isALU && baseInstr->getField_END_OF_PROGRAM())
break;
}
// finalize last node
cemu_assert_debug(!activeNode->m_cfInstructions.empty());
nodeDAG.m_nodes.emplace_back(activeNode);
}
void LatteTCGenIR::parseCFToDAG()
{
// parse CF and create preliminary node DAG
parseCF_createNodes(m_ctx.mainFunctionDAG);
// link up the nodes
cemu_assert_debug(m_ctx.mainFunctionDAG.m_nodes.size() == 1);
// assign to ir object
for (auto& itr : m_ctx.mainFunctionDAG.m_nodes)
m_ctx.irObject->m_basicBlocks.emplace_back(itr->irBasicBlock);
m_ctx.irObject->m_entryBlocks.emplace_back(m_ctx.mainFunctionDAG.m_nodes[0]->irBasicBlock);
}
void LatteTCGenIR::emitIR()
{
cemu_assert_debug(m_ctx.mainFunctionDAG.m_nodes.size() == 1);
for (auto& itr : m_ctx.mainFunctionDAG.m_nodes)
{
genIRForNode(*itr);
}
}
void LatteTCGenIR::cleanup()
{
// clean up
//for (auto itr : m_ctx.list_irNodesCtx)
// delete itr;
//m_ctx.list_irNodesCtx.clear();
}
void LatteTCGenIR::setVertexShaderContext(const LatteFetchShader* parsedFetchShader, const uint32* vtxSemanticTable)
{
m_vertexShaderCtx.parsedFetchShader = parsedFetchShader;
m_vertexShaderCtx.vtxSemanticTable = vtxSemanticTable;
}
ZpIR::ZpIRFunction* LatteTCGenIR::transcompileLatteToIR(const void* programData, uint32 programSize, SHADER_TYPE shaderType)
{
//return nullptr;
ZpIR::ZpIRFunction* irObject = new ZpIR::ZpIRFunction();
// init context
m_ctx = {};
m_ctx.programData = (const uint32*)programData;
m_ctx.programSize = programSize;
m_ctx.irObject = irObject;
m_ctx.shaderType = shaderType;
// parse control flow instructions and convert it to list of CFBlockNode
// each node is a single IR basic block, consisting of one or multiple CF instructions
parseCFToDAG();
// process clauses and emit IR nodes
emitIR();
// cleanup
cleanup();
return irObject;
}
| 7,186
|
C++
|
.cpp
| 195
| 34.194872
| 170
| 0.750144
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,220
|
LatteDecompilerAnalyzer.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompilerAnalyzer.cpp
|
#include "Cafe/HW/Latte/Core/LatteConst.h"
#include "Cafe/HW/Latte/Core/LatteShaderAssembly.h"
#include "Cafe/HW/Latte/ISA/RegDefines.h"
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompiler.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompilerInternal.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompilerInstructions.h"
#include "Cafe/HW/Latte/Core/FetchShader.h"
#include "Cafe/HW/Latte/Core/LatteShader.h"
#include "Cafe/HW/Latte/Renderer/Renderer.h"
/*
* Return index of used color attachment based on shader pixel export index (0-7)
*/
sint32 LatteDecompiler_getColorOutputIndexFromExportIndex(LatteDecompilerShaderContext* shaderContext, sint32 exportIndex)
{
sint32 colorOutputIndex = -1;
sint32 outputCounter = 0;
uint32 cbShaderMask = shaderContext->contextRegisters[mmCB_SHADER_MASK];
uint32 cbShaderControl = shaderContext->contextRegisters[mmCB_SHADER_CONTROL];
for(sint32 m=0; m<8; m++)
{
uint32 outputMask = (cbShaderMask>>(m*4))&0xF;
if( outputMask == 0 )
continue;
cemu_assert_debug(outputMask == 0xF); // mask is unsupported
if( outputCounter == exportIndex )
{
colorOutputIndex = m;
break;
}
outputCounter++;
}
cemu_assert_debug(colorOutputIndex != -1); // real outputs and outputs defined via mask do not match up
return colorOutputIndex;
}
void _remapUniformAccess(LatteDecompilerShaderContext* shaderContext, bool isRegisterUniform, uint32 kcacheBankId, uint32 uniformIndex)
{
auto& list_uniformMapping = shaderContext->shader->list_remappedUniformEntries;
for(uint32 i=0; i<list_uniformMapping.size(); i++)
{
LatteDecompilerRemappedUniformEntry_t* ufMapping = list_uniformMapping.data()+i;
if( isRegisterUniform )
{
if( ufMapping->isRegister == true && ufMapping->index == uniformIndex )
{
return;
}
}
else
{
if( ufMapping->isRegister == false && ufMapping->kcacheBankId == kcacheBankId && ufMapping->index == uniformIndex )
{
return;
}
}
}
// add new mapping
LatteDecompilerRemappedUniformEntry_t newMapping = {0};
if( isRegisterUniform )
{
newMapping.isRegister = true;
newMapping.index = uniformIndex;
newMapping.mappedIndex = (uint32)list_uniformMapping.size();
}
else
{
newMapping.isRegister = false;
newMapping.kcacheBankId = kcacheBankId;
newMapping.index = uniformIndex;
newMapping.mappedIndex = (uint32)list_uniformMapping.size();
}
list_uniformMapping.emplace_back(newMapping);
}
/*
* Returns true if the instruction takes integer operands or returns a integer value
*/
bool _isIntegerInstruction(const LatteDecompilerALUInstruction& aluInstruction)
{
if (aluInstruction.isOP3 == false)
{
// OP2
switch (aluInstruction.opcode)
{
case ALU_OP2_INST_ADD:
case ALU_OP2_INST_MUL:
case ALU_OP2_INST_MUL_IEEE:
case ALU_OP2_INST_MAX:
case ALU_OP2_INST_MIN:
case ALU_OP2_INST_FLOOR:
case ALU_OP2_INST_FRACT:
case ALU_OP2_INST_TRUNC:
case ALU_OP2_INST_MOV:
case ALU_OP2_INST_NOP:
case ALU_OP2_INST_DOT4:
case ALU_OP2_INST_DOT4_IEEE:
case ALU_OP2_INST_CUBE:
case ALU_OP2_INST_EXP_IEEE:
case ALU_OP2_INST_LOG_CLAMPED:
case ALU_OP2_INST_LOG_IEEE:
case ALU_OP2_INST_SQRT_IEEE:
case ALU_OP2_INST_SIN:
case ALU_OP2_INST_COS:
case ALU_OP2_INST_RNDNE:
case ALU_OP2_INST_MAX_DX10:
case ALU_OP2_INST_MIN_DX10:
case ALU_OP2_INST_SETGT:
case ALU_OP2_INST_SETGE:
case ALU_OP2_INST_SETNE:
case ALU_OP2_INST_SETE:
case ALU_OP2_INST_PRED_SETE:
case ALU_OP2_INST_PRED_SETGT:
case ALU_OP2_INST_PRED_SETGE:
case ALU_OP2_INST_PRED_SETNE:
case ALU_OP2_INST_KILLE:
case ALU_OP2_INST_KILLGT:
case ALU_OP2_INST_KILLGE:
case ALU_OP2_INST_RECIP_FF:
case ALU_OP2_INST_RECIP_IEEE:
case ALU_OP2_INST_RECIPSQRT_CLAMPED:
case ALU_OP2_INST_RECIPSQRT_FF:
case ALU_OP2_INST_RECIPSQRT_IEEE:
return false;
case ALU_OP2_INST_FLT_TO_INT:
case ALU_OP2_INST_INT_TO_FLOAT:
case ALU_OP2_INST_UINT_TO_FLOAT:
case ALU_OP2_INST_ASHR_INT:
case ALU_OP2_INST_LSHR_INT:
case ALU_OP2_INST_LSHL_INT:
case ALU_OP2_INST_MULLO_INT:
case ALU_OP2_INST_MULLO_UINT:
case ALU_OP2_INST_FLT_TO_UINT:
case ALU_OP2_INST_AND_INT:
case ALU_OP2_INST_OR_INT:
case ALU_OP2_INST_XOR_INT:
case ALU_OP2_INST_NOT_INT:
case ALU_OP2_INST_ADD_INT:
case ALU_OP2_INST_SUB_INT:
case ALU_OP2_INST_MAX_INT:
case ALU_OP2_INST_MIN_INT:
case ALU_OP2_INST_SETE_INT:
case ALU_OP2_INST_SETGT_INT:
case ALU_OP2_INST_SETGE_INT:
case ALU_OP2_INST_SETNE_INT:
case ALU_OP2_INST_SETGT_UINT:
case ALU_OP2_INST_SETGE_UINT:
case ALU_OP2_INST_PRED_SETE_INT:
case ALU_OP2_INST_PRED_SETGT_INT:
case ALU_OP2_INST_PRED_SETGE_INT:
case ALU_OP2_INST_PRED_SETNE_INT:
case ALU_OP2_INST_KILLE_INT:
case ALU_OP2_INST_KILLGT_INT:
case ALU_OP2_INST_KILLNE_INT:
case ALU_OP2_INST_MOVA_FLOOR:
case ALU_OP2_INST_MOVA_INT:
return true;
// these return an integer result but are usually used only for conditionals
case ALU_OP2_INST_SETE_DX10:
case ALU_OP2_INST_SETGT_DX10:
case ALU_OP2_INST_SETGE_DX10:
case ALU_OP2_INST_SETNE_DX10:
return true;
default:
#ifdef CEMU_DEBUG_ASSERT
debug_printf("_isIntegerInstruction(): OP3=%s opcode=%02x\n", aluInstruction.isOP3 ? "true" : "false", aluInstruction.opcode);
cemu_assert_debug(false);
#endif
break;
}
}
else
{
// OP3
switch (aluInstruction.opcode)
{
case ALU_OP3_INST_MULADD:
case ALU_OP3_INST_MULADD_D2:
case ALU_OP3_INST_MULADD_M2:
case ALU_OP3_INST_MULADD_M4:
case ALU_OP3_INST_MULADD_IEEE:
case ALU_OP3_INST_CMOVE:
case ALU_OP3_INST_CMOVGT:
case ALU_OP3_INST_CMOVGE:
return false;
case ALU_OP3_INST_CNDE_INT:
case ALU_OP3_INST_CNDGT_INT:
case ALU_OP3_INST_CMOVGE_INT:
return true;
default:
#ifdef CEMU_DEBUG_ASSERT
debug_printf("_isIntegerInstruction(): OP3=%s opcode=%02x\n", aluInstruction.isOP3?"true":"false", aluInstruction.opcode);
#endif
break;
}
}
return false;
}
/*
* Analyze ALU CF instruction and all instructions within the ALU clause
*/
void LatteDecompiler_analyzeALUClause(LatteDecompilerShaderContext* shaderContext, LatteDecompilerCFInstruction* cfInstruction)
{
// check if this shader has any clause that potentially modifies the pixel execution state
if( cfInstruction->type == GPU7_CF_INST_ALU_PUSH_BEFORE || cfInstruction->type == GPU7_CF_INST_ALU_POP_AFTER || cfInstruction->type == GPU7_CF_INST_ALU_POP2_AFTER || cfInstruction->type == GPU7_CF_INST_ALU_BREAK || cfInstruction->type == GPU7_CF_INST_ALU_ELSE_AFTER )
{
shaderContext->analyzer.modifiesPixelActiveState = true;
}
// analyze ALU instructions
for(auto& aluInstruction : cfInstruction->instructionsALU)
{
// ignore NOP instruction
if( !aluInstruction.isOP3 && aluInstruction.opcode == ALU_OP2_INST_NOP )
continue;
// check for CUBE instruction
if( !aluInstruction.isOP3 && aluInstruction.opcode == ALU_OP2_INST_CUBE )
{
shaderContext->analyzer.hasRedcCUBE = true;
}
// check for integer instruction
if (_isIntegerInstruction(aluInstruction))
shaderContext->analyzer.usesIntegerValues = true;
// process all available operands (inputs)
for(sint32 f=0; f<3; f++)
{
// check input for uniform access
if( aluInstruction.sourceOperand[f].sel == 0xFFFFFFFF )
continue; // source operand not set/used
// about uniform register and buffer access tracking:
// for absolute indices we can determine a maximum size that is accessed
// relative accesses are tricky because the upper bound of accessed indices is unknown
// worst case we have to load the full file (256 * 16 byte entries) or for buffers an arbitrary upper bound (64KB in our case)
if( GPU7_ALU_SRC_IS_CFILE(aluInstruction.sourceOperand[f].sel) )
{
if (aluInstruction.sourceOperand[f].rel)
{
shaderContext->analyzer.uniformRegisterAccessTracker.TrackAccess(GPU7_ALU_SRC_GET_CFILE_INDEX(aluInstruction.sourceOperand[f].sel), true);
}
else
{
_remapUniformAccess(shaderContext, true, 0, GPU7_ALU_SRC_GET_CFILE_INDEX(aluInstruction.sourceOperand[f].sel));
shaderContext->analyzer.uniformRegisterAccessTracker.TrackAccess(GPU7_ALU_SRC_GET_CFILE_INDEX(aluInstruction.sourceOperand[f].sel), false);
}
}
else if( GPU7_ALU_SRC_IS_CBANK0(aluInstruction.sourceOperand[f].sel) )
{
// uniform bank 0 (uniform buffer with index cfInstruction->cBank0Index)
uint32 uniformBufferIndex = cfInstruction->cBank0Index;
cemu_assert(uniformBufferIndex < LATTE_NUM_MAX_UNIFORM_BUFFERS);
uint32 offset = GPU7_ALU_SRC_GET_CBANK0_INDEX(aluInstruction.sourceOperand[f].sel)+cfInstruction->cBank0AddrBase;
_remapUniformAccess(shaderContext, false, uniformBufferIndex, offset);
shaderContext->analyzer.uniformBufferAccessTracker[uniformBufferIndex].TrackAccess(offset, aluInstruction.sourceOperand[f].rel);
}
else if( GPU7_ALU_SRC_IS_CBANK1(aluInstruction.sourceOperand[f].sel) )
{
// uniform bank 1 (uniform buffer with index cfInstruction->cBank1Index)
uint32 uniformBufferIndex = cfInstruction->cBank1Index;
cemu_assert(uniformBufferIndex < LATTE_NUM_MAX_UNIFORM_BUFFERS);
uint32 offset = GPU7_ALU_SRC_GET_CBANK1_INDEX(aluInstruction.sourceOperand[f].sel)+cfInstruction->cBank1AddrBase;
_remapUniformAccess(shaderContext, false, uniformBufferIndex, offset);
shaderContext->analyzer.uniformBufferAccessTracker[uniformBufferIndex].TrackAccess(offset, aluInstruction.sourceOperand[f].rel);
}
else if( GPU7_ALU_SRC_IS_GPR(aluInstruction.sourceOperand[f].sel) )
{
sint32 gprIndex = GPU7_ALU_SRC_GET_GPR_INDEX(aluInstruction.sourceOperand[f].sel);
shaderContext->analyzer.gprUseMask[gprIndex/8] |= (1<<(gprIndex%8));
if( aluInstruction.sourceOperand[f].rel != 0 )
{
// if indexed register access is used, all possibly referenced registers are stored to a separate array at the beginning of the group
shaderContext->analyzer.usesRelativeGPRRead = true;
continue;
}
}
}
if( aluInstruction.destRel != 0 )
shaderContext->analyzer.usesRelativeGPRWrite = true;
shaderContext->analyzer.gprUseMask[aluInstruction.destGpr/8] |= (1<<(aluInstruction.destGpr%8));
}
}
// analyze TEX CF instruction and all instructions within the TEX clause
void LatteDecompiler_analyzeTEXClause(LatteDecompilerShaderContext* shaderContext, LatteDecompilerCFInstruction* cfInstruction)
{
LatteDecompilerShader* shader = shaderContext->shader;
for(auto& texInstruction : cfInstruction->instructionsTEX)
{
if( texInstruction.opcode == GPU7_TEX_INST_SAMPLE ||
texInstruction.opcode == GPU7_TEX_INST_SAMPLE_L ||
texInstruction.opcode == GPU7_TEX_INST_SAMPLE_LB ||
texInstruction.opcode == GPU7_TEX_INST_SAMPLE_LZ ||
texInstruction.opcode == GPU7_TEX_INST_SAMPLE_C ||
texInstruction.opcode == GPU7_TEX_INST_SAMPLE_C_L ||
texInstruction.opcode == GPU7_TEX_INST_SAMPLE_C_LZ ||
texInstruction.opcode == GPU7_TEX_INST_FETCH4 ||
texInstruction.opcode == GPU7_TEX_INST_SAMPLE_G ||
texInstruction.opcode == GPU7_TEX_INST_LD )
{
if (texInstruction.textureFetch.textureIndex < 0 || texInstruction.textureFetch.textureIndex >= LATTE_NUM_MAX_TEX_UNITS)
{
cemuLog_logDebug(LogType::Force, "Shader {:16x} has out of bounds texture access (texture {})", shaderContext->shader->baseHash, (sint32)texInstruction.textureFetch.textureIndex);
continue;
}
if( texInstruction.textureFetch.samplerIndex < 0 || texInstruction.textureFetch.samplerIndex >= 0x12 )
cemu_assert_debug(false);
if(shaderContext->output->textureUnitMask[texInstruction.textureFetch.textureIndex] && shader->textureUnitSamplerAssignment[texInstruction.textureFetch.textureIndex] != texInstruction.textureFetch.samplerIndex && shader->textureUnitSamplerAssignment[texInstruction.textureFetch.textureIndex] != LATTE_DECOMPILER_SAMPLER_NONE )
{
cemu_assert_debug(false);
}
shaderContext->output->textureUnitMask[texInstruction.textureFetch.textureIndex] = true;
shader->textureUnitSamplerAssignment[texInstruction.textureFetch.textureIndex] = texInstruction.textureFetch.samplerIndex;
if( texInstruction.opcode == GPU7_TEX_INST_SAMPLE_C || texInstruction.opcode == GPU7_TEX_INST_SAMPLE_C_L || texInstruction.opcode == GPU7_TEX_INST_SAMPLE_C_LZ)
shader->textureUsesDepthCompare[texInstruction.textureFetch.textureIndex] = true;
bool useTexelCoords = false;
if (texInstruction.opcode == GPU7_TEX_INST_SAMPLE && (texInstruction.textureFetch.unnormalized[0] && texInstruction.textureFetch.unnormalized[1] && texInstruction.textureFetch.unnormalized[2] && texInstruction.textureFetch.unnormalized[3]))
useTexelCoords = true;
else if (texInstruction.opcode == GPU7_TEX_INST_LD)
useTexelCoords = true;
if (useTexelCoords)
{
shaderContext->analyzer.texUnitUsesTexelCoordinates.set(texInstruction.textureFetch.textureIndex);
}
}
else if( texInstruction.opcode == GPU7_TEX_INST_GET_COMP_TEX_LOD || texInstruction.opcode == GPU7_TEX_INST_GET_TEXTURE_RESINFO )
{
if( texInstruction.textureFetch.textureIndex < 0 || texInstruction.textureFetch.textureIndex >= LATTE_NUM_MAX_TEX_UNITS )
debugBreakpoint();
if( texInstruction.textureFetch.samplerIndex != 0 )
debugBreakpoint(); // sampler is ignored and should be 0
shaderContext->output->textureUnitMask[texInstruction.textureFetch.textureIndex] = true;
}
else if( texInstruction.opcode == GPU7_TEX_INST_SET_CUBEMAP_INDEX )
{
// no analysis required
}
else if (texInstruction.opcode == GPU7_TEX_INST_GET_GRADIENTS_H || texInstruction.opcode == GPU7_TEX_INST_GET_GRADIENTS_V)
{
// no analysis required
}
else if (texInstruction.opcode == GPU7_TEX_INST_SET_GRADIENTS_H || texInstruction.opcode == GPU7_TEX_INST_SET_GRADIENTS_V)
{
shaderContext->analyzer.hasGradientLookup = true;
}
else if( texInstruction.opcode == GPU7_TEX_INST_VFETCH )
{
// VFETCH is used to access uniform buffers dynamically
if( texInstruction.textureFetch.textureIndex >= 0x80 && texInstruction.textureFetch.textureIndex <= 0x8F )
{
uint32 uniformBufferIndex = texInstruction.textureFetch.textureIndex - 0x80;
shaderContext->analyzer.uniformBufferAccessTracker[uniformBufferIndex].TrackAccess(0, true);
}
else if( texInstruction.textureFetch.textureIndex == 0x9F && shader->shaderType == LatteConst::ShaderType::Geometry )
{
// instruction to read geometry shader input from ringbuffer
}
else
debugBreakpoint();
}
else if (texInstruction.opcode == GPU7_TEX_INST_MEM)
{
// SSBO access
shaderContext->analyzer.hasSSBORead = true;
}
else
debugBreakpoint();
// mark read and written registers as used
if(texInstruction.dstGpr < LATTE_NUM_GPR)
shaderContext->analyzer.gprUseMask[texInstruction.dstGpr/8] |= (1<<(texInstruction.dstGpr%8));
if(texInstruction.srcGpr < LATTE_NUM_GPR)
shaderContext->analyzer.gprUseMask[texInstruction.srcGpr/8] |= (1<<(texInstruction.srcGpr%8));
}
}
/*
* Analyze export CF instruction
*/
void LatteDecompiler_analyzeExport(LatteDecompilerShaderContext* shaderContext, LatteDecompilerCFInstruction* cfInstruction)
{
LatteDecompilerShader* shader = shaderContext->shader;
if( shader->shaderType == LatteConst::ShaderType::Pixel )
{
if( cfInstruction->exportType == 0 && cfInstruction->exportArrayBase < 8 )
{
// remember color outputs that are written
for(uint32 i=0; i<(cfInstruction->exportBurstCount+1); i++)
{
sint32 colorOutputIndex = LatteDecompiler_getColorOutputIndexFromExportIndex(shaderContext, cfInstruction->exportArrayBase+i);
shader->pixelColorOutputMask |= (1<<colorOutputIndex);
}
}
else if( cfInstruction->exportType == 0 && cfInstruction->exportArrayBase == 61 )
{
// writes pixel depth
}
else
debugBreakpoint();
}
else if (shader->shaderType == LatteConst::ShaderType::Vertex)
{
if (cfInstruction->exportType == 2 && cfInstruction->exportArrayBase < 32)
{
shaderContext->shader->outputParameterMask |= (1<<cfInstruction->exportArrayBase);
}
else if (cfInstruction->exportType == 1 && cfInstruction->exportArrayBase == GPU7_DECOMPILER_CF_EXPORT_POINT_SIZE)
{
shaderContext->analyzer.writesPointSize = true;
}
}
// mark input GPRs as used
for(uint32 i=0; i<(cfInstruction->exportBurstCount+1); i++)
{
shaderContext->analyzer.gprUseMask[(cfInstruction->exportSourceGPR+i)/8] |= (1<<((cfInstruction->exportSourceGPR+i)%8));
}
}
void LatteDecompiler_analyzeSubroutine(LatteDecompilerShaderContext* shaderContext, uint32 cfAddr)
{
// analyze CF and clauses up to RET statement
// todo - find cfInstruction index from cfAddr
cemu_assert_debug(false);
for(auto& cfInstruction : shaderContext->cfInstructions)
{
if (cfInstruction.type == GPU7_CF_INST_ALU || cfInstruction.type == GPU7_CF_INST_ALU_PUSH_BEFORE || cfInstruction.type == GPU7_CF_INST_ALU_POP_AFTER || cfInstruction.type == GPU7_CF_INST_ALU_POP2_AFTER || cfInstruction.type == GPU7_CF_INST_ALU_BREAK || cfInstruction.type == GPU7_CF_INST_ALU_ELSE_AFTER)
{
LatteDecompiler_analyzeALUClause(shaderContext, &cfInstruction);
}
else if (cfInstruction.type == GPU7_CF_INST_TEX)
{
LatteDecompiler_analyzeTEXClause(shaderContext, &cfInstruction);
}
else if (cfInstruction.type == GPU7_CF_INST_EXPORT || cfInstruction.type == GPU7_CF_INST_EXPORT_DONE)
{
LatteDecompiler_analyzeExport(shaderContext, &cfInstruction);
}
else if (cfInstruction.type == GPU7_CF_INST_ELSE || cfInstruction.type == GPU7_CF_INST_POP)
{
shaderContext->analyzer.modifiesPixelActiveState = true;
}
else if (cfInstruction.type == GPU7_CF_INST_LOOP_START_DX10 || cfInstruction.type == GPU7_CF_INST_LOOP_END ||
cfInstruction.type == GPU7_CF_INST_LOOP_START_NO_AL)
{
shaderContext->analyzer.modifiesPixelActiveState = true;
}
else if (cfInstruction.type == GPU7_CF_INST_LOOP_BREAK)
{
shaderContext->analyzer.modifiesPixelActiveState = true;
}
else if (cfInstruction.type == GPU7_CF_INST_EMIT_VERTEX)
{
// nothing to analyze
}
else if (cfInstruction.type == GPU7_CF_INST_CALL)
{
cemu_assert_debug(false); // CALLs inside subroutines are still todo
}
else
{
cemu_assert_unimplemented();
}
}
}
namespace LatteDecompiler
{
void _initTextureBindingPointsGL(LatteDecompilerShaderContext* decompilerContext)
{
// for OpenGL we use the relative texture unit index
for (sint32 i = 0; i < LATTE_NUM_MAX_TEX_UNITS; i++)
{
if (!decompilerContext->output->textureUnitMask[i])
continue;
sint32 textureBindingPoint;
if (decompilerContext->shaderType == LatteConst::ShaderType::Vertex)
textureBindingPoint = i + LATTE_CEMU_VS_TEX_UNIT_BASE;
else if (decompilerContext->shaderType == LatteConst::ShaderType::Geometry)
textureBindingPoint = i + LATTE_CEMU_GS_TEX_UNIT_BASE;
else if (decompilerContext->shaderType == LatteConst::ShaderType::Pixel)
textureBindingPoint = i + LATTE_CEMU_PS_TEX_UNIT_BASE;
decompilerContext->output->resourceMappingGL.textureUnitToBindingPoint[i] = textureBindingPoint;
}
}
void _initTextureBindingPointsVK(LatteDecompilerShaderContext* decompilerContext)
{
// for Vulkan we use consecutive indices
for (sint32 i = 0; i < LATTE_NUM_MAX_TEX_UNITS; i++)
{
if (!decompilerContext->output->textureUnitMask[i])
continue;
decompilerContext->output->resourceMappingVK.textureUnitToBindingPoint[i] = decompilerContext->currentBindingPointVK;
decompilerContext->currentBindingPointVK++;
}
}
void _initHasUniformVarBlock(LatteDecompilerShaderContext* decompilerContext)
{
decompilerContext->hasUniformVarBlock = false;
if (decompilerContext->shader->uniformMode == LATTE_DECOMPILER_UNIFORM_MODE_REMAPPED)
decompilerContext->hasUniformVarBlock = true;
else if (decompilerContext->shader->uniformMode == LATTE_DECOMPILER_UNIFORM_MODE_FULL_CFILE)
decompilerContext->hasUniformVarBlock = true;
bool hasAnyViewportScaleDisabled =
!decompilerContext->contextRegistersNew->PA_CL_VTE_CNTL.get_VPORT_X_SCALE_ENA() ||
!decompilerContext->contextRegistersNew->PA_CL_VTE_CNTL.get_VPORT_Y_SCALE_ENA() ||
!decompilerContext->contextRegistersNew->PA_CL_VTE_CNTL.get_VPORT_Z_SCALE_ENA();
// we currently only support all on/off. Individual component scaling is not supported
cemu_assert_debug(decompilerContext->contextRegistersNew->PA_CL_VTE_CNTL.get_VPORT_X_SCALE_ENA() == !hasAnyViewportScaleDisabled);
cemu_assert_debug(decompilerContext->contextRegistersNew->PA_CL_VTE_CNTL.get_VPORT_Y_SCALE_ENA() == !hasAnyViewportScaleDisabled);
cemu_assert_debug(decompilerContext->contextRegistersNew->PA_CL_VTE_CNTL.get_VPORT_Z_SCALE_ENA() == !hasAnyViewportScaleDisabled);
cemu_assert_debug(decompilerContext->contextRegistersNew->PA_CL_VTE_CNTL.get_VPORT_X_OFFSET_ENA() == !hasAnyViewportScaleDisabled);
cemu_assert_debug(decompilerContext->contextRegistersNew->PA_CL_VTE_CNTL.get_VPORT_Y_OFFSET_ENA() == !hasAnyViewportScaleDisabled);
cemu_assert_debug(decompilerContext->contextRegistersNew->PA_CL_VTE_CNTL.get_VPORT_Z_OFFSET_ENA() == !hasAnyViewportScaleDisabled);
if (decompilerContext->shaderType == LatteConst::ShaderType::Vertex && hasAnyViewportScaleDisabled)
decompilerContext->hasUniformVarBlock = true; // uf_windowSpaceToClipSpaceTransform
bool alphaTestEnable = decompilerContext->contextRegistersNew->SX_ALPHA_TEST_CONTROL.get_ALPHA_TEST_ENABLE();
if (decompilerContext->shaderType == LatteConst::ShaderType::Pixel && alphaTestEnable != 0)
decompilerContext->hasUniformVarBlock = true; // uf_alphaTestRef
if (decompilerContext->shaderType == LatteConst::ShaderType::Pixel)
decompilerContext->hasUniformVarBlock = true; // uf_fragCoordScale
if (decompilerContext->shaderType == LatteConst::ShaderType::Vertex && decompilerContext->analyzer.outputPointSize && decompilerContext->analyzer.writesPointSize == false)
decompilerContext->hasUniformVarBlock = true; // uf_pointSize
if (decompilerContext->shaderType == LatteConst::ShaderType::Geometry && decompilerContext->analyzer.outputPointSize && decompilerContext->analyzer.writesPointSize == false)
decompilerContext->hasUniformVarBlock = true; // uf_pointSize
if (decompilerContext->analyzer.useSSBOForStreamout &&
(decompilerContext->shaderType == LatteConst::ShaderType::Vertex && !decompilerContext->options->usesGeometryShader) ||
(decompilerContext->shaderType == LatteConst::ShaderType::Geometry))
{
decompilerContext->hasUniformVarBlock = true; // uf_verticesPerInstance and uf_streamoutBufferBase*
}
}
void _initUniformBindingPoints(LatteDecompilerShaderContext* decompilerContext)
{
// check if uniform vars block has at least one variable
_initHasUniformVarBlock(decompilerContext);
if (decompilerContext->shaderType == LatteConst::ShaderType::Pixel)
{
for (sint32 t = 0; t < LATTE_NUM_MAX_TEX_UNITS; t++)
{
if (decompilerContext->analyzer.texUnitUsesTexelCoordinates.test(t) == false)
continue;
decompilerContext->hasUniformVarBlock = true; // uf_tex%dScale
}
}
// assign binding point to uniform var block
decompilerContext->output->resourceMappingGL.uniformVarsBufferBindingPoint = -1; // OpenGL currently doesnt use a uniform block
if (decompilerContext->hasUniformVarBlock)
{
decompilerContext->output->resourceMappingVK.uniformVarsBufferBindingPoint = decompilerContext->currentBindingPointVK;
decompilerContext->currentBindingPointVK++;
}
else
decompilerContext->output->resourceMappingVK.uniformVarsBufferBindingPoint = -1;
// assign binding points to uniform buffers
if (decompilerContext->shader->uniformMode == LATTE_DECOMPILER_UNIFORM_MODE_FULL_CBANK)
{
// for Vulkan we use consecutive indices
for (uint32 i = 0; i < LATTE_NUM_MAX_UNIFORM_BUFFERS; i++)
{
if (!decompilerContext->analyzer.uniformBufferAccessTracker[i].HasAccess())
continue;
sint32 uniformBindingPoint = i;
if (decompilerContext->shaderType == LatteConst::ShaderType::Geometry)
uniformBindingPoint += 64;
else if (decompilerContext->shaderType == LatteConst::ShaderType::Vertex)
uniformBindingPoint += 0;
else if (decompilerContext->shaderType == LatteConst::ShaderType::Pixel)
uniformBindingPoint += 32;
decompilerContext->output->resourceMappingVK.uniformBuffersBindingPoint[i] = decompilerContext->currentBindingPointVK;
decompilerContext->currentBindingPointVK++;
}
// for OpenGL we use the relative buffer index
for (uint32 i = 0; i < LATTE_NUM_MAX_UNIFORM_BUFFERS; i++)
{
if (!decompilerContext->analyzer.uniformBufferAccessTracker[i].HasAccess())
continue;
sint32 uniformBindingPoint = i;
if (decompilerContext->shaderType == LatteConst::ShaderType::Geometry)
uniformBindingPoint += 64;
else if (decompilerContext->shaderType == LatteConst::ShaderType::Vertex)
uniformBindingPoint += 0;
else if (decompilerContext->shaderType == LatteConst::ShaderType::Pixel)
uniformBindingPoint += 32;
decompilerContext->output->resourceMappingGL.uniformBuffersBindingPoint[i] = uniformBindingPoint;
}
}
// shader storage buffer for alternative transform feedback path
if (decompilerContext->analyzer.useSSBOForStreamout)
{
decompilerContext->output->resourceMappingVK.tfStorageBindingPoint = decompilerContext->currentBindingPointVK;
decompilerContext->currentBindingPointVK++;
}
}
void _initAttributeBindingPoints(LatteDecompilerShaderContext* decompilerContext)
{
if (decompilerContext->shaderType != LatteConst::ShaderType::Vertex)
return;
// create input attribute binding mapping
// OpenGL and Vulkan use consecutive indices starting at 0
sint8 bindingIndex = 0;
for (sint32 i = 0; i < LATTE_NUM_MAX_ATTRIBUTE_LOCATIONS; i++)
{
if (decompilerContext->analyzer.inputAttributSemanticMask[i])
{
decompilerContext->output->resourceMappingGL.attributeMapping[i] = bindingIndex;
decompilerContext->output->resourceMappingVK.attributeMapping[i] = bindingIndex;
bindingIndex++;
}
}
}
}
/*
* Analyze the shader program
* This will help to determine:
* 1) Uniform usage
* 2) Texture usage
* 3) Data types
* 4) CF stack and execution flow
*/
void LatteDecompiler_analyze(LatteDecompilerShaderContext* shaderContext, LatteDecompilerShader* shader)
{
// analyze render state
shaderContext->analyzer.isPointsPrimitive = shaderContext->contextRegistersNew->VGT_PRIMITIVE_TYPE.get_PRIMITIVE_MODE() == Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::POINTS;
shaderContext->analyzer.hasStreamoutEnable = shaderContext->contextRegisters[mmVGT_STRMOUT_EN] != 0; // set if the shader is used for transform feedback operations
if (shaderContext->shaderType == LatteConst::ShaderType::Vertex && !shaderContext->options->usesGeometryShader)
shaderContext->analyzer.outputPointSize = shaderContext->analyzer.isPointsPrimitive;
else if (shaderContext->shaderType == LatteConst::ShaderType::Geometry)
{
uint32 gsOutPrimType = shaderContext->contextRegisters[mmVGT_GS_OUT_PRIM_TYPE];
if (gsOutPrimType == 0) // points
shaderContext->analyzer.outputPointSize = true;
}
// analyze input attributes for vertex/geometry shader
if (shader->shaderType == LatteConst::ShaderType::Vertex || shader->shaderType == LatteConst::ShaderType::Geometry)
{
if(shaderContext->fetchShader)
{
LatteFetchShader* parsedFetchShader = shaderContext->fetchShader;
for(auto& bufferGroup : parsedFetchShader->bufferGroups)
{
for (sint32 i = 0; i < bufferGroup.attribCount; i++)
{
uint8 semanticId = bufferGroup.attrib[i].semanticId;
if (semanticId == 0xFF)
{
// unused attribute? Found in Hot Wheels: World's best driver
continue;
}
cemu_assert_debug(semanticId < 0x80);
shaderContext->analyzer.inputAttributSemanticMask[semanticId] = true;
}
}
}
}
// list of subroutines (call destinations)
std::vector<uint32> list_subroutineAddrs;
// analyze CF and clauses
for(auto& cfInstruction : shaderContext->cfInstructions)
{
if (cfInstruction.type == GPU7_CF_INST_ALU || cfInstruction.type == GPU7_CF_INST_ALU_PUSH_BEFORE || cfInstruction.type == GPU7_CF_INST_ALU_POP_AFTER || cfInstruction.type == GPU7_CF_INST_ALU_POP2_AFTER || cfInstruction.type == GPU7_CF_INST_ALU_BREAK || cfInstruction.type == GPU7_CF_INST_ALU_ELSE_AFTER)
{
LatteDecompiler_analyzeALUClause(shaderContext, &cfInstruction);
}
else if (cfInstruction.type == GPU7_CF_INST_TEX)
{
LatteDecompiler_analyzeTEXClause(shaderContext, &cfInstruction);
}
else if (cfInstruction.type == GPU7_CF_INST_EXPORT || cfInstruction.type == GPU7_CF_INST_EXPORT_DONE)
{
LatteDecompiler_analyzeExport(shaderContext, &cfInstruction);
}
else if (cfInstruction.type == GPU7_CF_INST_ELSE || cfInstruction.type == GPU7_CF_INST_POP)
{
shaderContext->analyzer.modifiesPixelActiveState = true;
}
else if (cfInstruction.type == GPU7_CF_INST_LOOP_START_DX10 || cfInstruction.type == GPU7_CF_INST_LOOP_END ||
cfInstruction.type == GPU7_CF_INST_LOOP_START_NO_AL)
{
shaderContext->analyzer.modifiesPixelActiveState = true;
shaderContext->analyzer.hasLoops = true;
}
else if (cfInstruction.type == GPU7_CF_INST_LOOP_BREAK)
{
shaderContext->analyzer.modifiesPixelActiveState = true;
shaderContext->analyzer.hasLoops = true;
}
else if (cfInstruction.type == GPU7_CF_INST_MEM_STREAM0_WRITE ||
cfInstruction.type == GPU7_CF_INST_MEM_STREAM1_WRITE)
{
uint32 streamoutBufferIndex;
if (cfInstruction.type == GPU7_CF_INST_MEM_STREAM0_WRITE)
streamoutBufferIndex = 0;
else if (cfInstruction.type == GPU7_CF_INST_MEM_STREAM1_WRITE)
streamoutBufferIndex = 1;
else
cemu_assert_debug(false);
shaderContext->analyzer.hasStreamoutWrite = true;
cemu_assert(streamoutBufferIndex < shaderContext->output->streamoutBufferWriteMask.size());
shaderContext->output->streamoutBufferWriteMask[streamoutBufferIndex] = true;
uint32 vectorWriteSize = 0;
for (sint32 f = 0; f < 4; f++)
{
if ((cfInstruction.memWriteCompMask & (1 << f)) != 0)
vectorWriteSize = (f + 1) * 4;
shaderContext->output->streamoutBufferStride[f] = shaderContext->contextRegisters[mmVGT_STRMOUT_VTX_STRIDE_0 + f * 4] << 2;
}
cemu_assert_debug((cfInstruction.exportArrayBase * 4 + vectorWriteSize) <= shaderContext->output->streamoutBufferStride[streamoutBufferIndex]);
}
else if (cfInstruction.type == GPU7_CF_INST_MEM_RING_WRITE)
{
// track number of parameters that are output (simplified by just tracking the offset of the last one)
if (cfInstruction.memWriteElemSize != 3)
debugBreakpoint();
if (cfInstruction.exportBurstCount != 0 && cfInstruction.memWriteElemSize != 3)
{
debugBreakpoint();
}
uint32 dwordWriteCount = (cfInstruction.exportBurstCount + 1) * 4;
uint32 numRingParameter = (cfInstruction.exportArrayBase + dwordWriteCount) / 4;
shader->ringParameterCount = std::max(shader->ringParameterCount, numRingParameter);
// mark input GPRs as used
for (uint32 i = 0; i < (cfInstruction.exportBurstCount + 1); i++)
{
shaderContext->analyzer.gprUseMask[(cfInstruction.exportSourceGPR + i) / 8] |= (1 << ((cfInstruction.exportSourceGPR + i) % 8));
}
}
else if (cfInstruction.type == GPU7_CF_INST_EMIT_VERTEX)
{
shaderContext->analyzer.numEmitVertex++;
}
else if (cfInstruction.type == GPU7_CF_INST_CALL)
{
// CALL instruction does not need analyzing
// and subroutines are analyzed separately
}
else
cemu_assert_unimplemented();
}
// analyze subroutines
for (auto subroutineAddr : list_subroutineAddrs)
{
LatteDecompiler_analyzeSubroutine(shaderContext, subroutineAddr);
}
// decide which uniform mode to use
bool hasAnyDynamicBufferAccess = false;
bool hasAnyBufferAccess = false;
for(auto& it : shaderContext->analyzer.uniformBufferAccessTracker)
{
if( it.HasRelativeAccess() )
hasAnyDynamicBufferAccess = true;
if( it.HasAccess() )
hasAnyBufferAccess = true;
}
if (hasAnyDynamicBufferAccess)
{
shader->uniformMode = LATTE_DECOMPILER_UNIFORM_MODE_FULL_CBANK;
}
else if(shaderContext->analyzer.uniformRegisterAccessTracker.HasRelativeAccess() )
{
shader->uniformMode = LATTE_DECOMPILER_UNIFORM_MODE_FULL_CFILE;
}
else if(hasAnyBufferAccess || shaderContext->analyzer.uniformRegisterAccessTracker.HasAccess() )
{
shader->uniformMode = LATTE_DECOMPILER_UNIFORM_MODE_REMAPPED;
}
else
{
shader->uniformMode = LATTE_DECOMPILER_UNIFORM_MODE_NONE;
}
// generate compact list of uniform buffers (for faster access)
cemu_assert_debug(shader->list_quickBufferList.empty());
for (uint32 i = 0; i < LATTE_NUM_MAX_UNIFORM_BUFFERS; i++)
{
if( !shaderContext->analyzer.uniformBufferAccessTracker[i].HasAccess() )
continue;
LatteDecompilerShader::QuickBufferEntry entry;
entry.index = i;
entry.size = shaderContext->analyzer.uniformBufferAccessTracker[i].DetermineSize(shaderContext->shaderBaseHash, LATTE_GLSL_DYNAMIC_UNIFORM_BLOCK_SIZE) * 16;
shader->list_quickBufferList.push_back(entry);
}
// get dimension of each used texture
_LatteRegisterSetTextureUnit* texRegs = nullptr;
if( shader->shaderType == LatteConst::ShaderType::Vertex )
texRegs = shaderContext->contextRegistersNew->SQ_TEX_START_VS;
else if( shader->shaderType == LatteConst::ShaderType::Pixel )
texRegs = shaderContext->contextRegistersNew->SQ_TEX_START_PS;
else if( shader->shaderType == LatteConst::ShaderType::Geometry )
texRegs = shaderContext->contextRegistersNew->SQ_TEX_START_GS;
for(sint32 i=0; i<LATTE_NUM_MAX_TEX_UNITS; i++)
{
if (!shaderContext->output->textureUnitMask[i])
{
// texture unit not used
shader->textureUnitDim[i] = (Latte::E_DIM)0xFF;
continue;
}
auto& texUnit = texRegs[i];
auto dim = texUnit.word0.get_DIM();
shader->textureUnitDim[i] = dim;
if(dim == Latte::E_DIM::DIM_CUBEMAP)
shaderContext->analyzer.hasCubeMapTexture = true;
shader->textureIsIntegerFormat[i] = texUnit.word4.get_NUM_FORM_ALL() == Latte::LATTE_SQ_TEX_RESOURCE_WORD4_N::E_NUM_FORMAT_ALL::NUM_FORMAT_INT;
}
// generate list of used texture units
shader->textureUnitListCount = 0;
for (sint32 i = 0; i < LATTE_NUM_MAX_TEX_UNITS; i++)
{
if (shaderContext->output->textureUnitMask[i])
{
shader->textureUnitList[shader->textureUnitListCount] = i;
shader->textureUnitListCount++;
}
}
// for geometry shaders check the copy shader for stream writes
if (shader->shaderType == LatteConst::ShaderType::Geometry && shaderContext->parsedGSCopyShader->list_streamWrites.empty() == false)
{
shaderContext->analyzer.hasStreamoutWrite = true;
if (shaderContext->contextRegisters[mmVGT_STRMOUT_EN] != 0)
shaderContext->analyzer.hasStreamoutEnable = true;
for (auto& it : shaderContext->parsedGSCopyShader->list_streamWrites)
{
shaderContext->output->streamoutBufferWriteMask[it.bufferIndex] = true;
uint32 vectorWriteSize = 0;
for (sint32 f = 0; f < 4; f++)
{
if ((it.memWriteCompMask&(1 << f)) != 0)
vectorWriteSize = (f + 1) * 4;
}
shaderContext->output->streamoutBufferStride[it.bufferIndex] = std::max(shaderContext->output->streamoutBufferStride[it.bufferIndex], it.exportArrayBase * 4 + vectorWriteSize);
}
}
// analyze input attributes again (if shader has relative GPR read)
if(shaderContext->analyzer.usesRelativeGPRRead && (shader->shaderType == LatteConst::ShaderType::Vertex || shader->shaderType == LatteConst::ShaderType::Geometry) )
{
if(shaderContext->fetchShader)
{
LatteFetchShader* parsedFetchShader = shaderContext->fetchShader;
for(auto& bufferGroup : parsedFetchShader->bufferGroups)
{
for (sint32 i = 0; i < bufferGroup.attribCount; i++)
{
uint32 registerIndex;
// get register index based on vtx semantic table
uint32 attributeShaderLoc = 0xFFFFFFFF;
for (sint32 f = 0; f < 32; f++)
{
if (shaderContext->contextRegisters[mmSQ_VTX_SEMANTIC_0 + f] == bufferGroup.attrib[i].semanticId)
{
attributeShaderLoc = f;
break;
}
}
if (attributeShaderLoc == 0xFFFFFFFF)
continue; // attribute is not mapped to VS input
registerIndex = attributeShaderLoc + 1;
shaderContext->analyzer.gprUseMask[registerIndex / 8] |= (1 << (registerIndex % 8));
}
}
}
}
else if (shaderContext->analyzer.usesRelativeGPRRead && shader->shaderType == LatteConst::ShaderType::Pixel)
{
// mark pixel shader inputs as used if there is any relative GPR access
LatteShaderPSInputTable* psInputTable = LatteSHRC_GetPSInputTable();
for (sint32 i = 0; i < psInputTable->count; i++)
{
shaderContext->analyzer.gprUseMask[i / 8] |= (1 << (i % 8));
}
}
// analyze CF stack
sint32 cfCurrentStackDepth = 0;
sint32 cfCurrentMaxStackDepth = 0;
for(auto& cfInstruction : shaderContext->cfInstructions)
{
if (cfInstruction.type == GPU7_CF_INST_ALU)
{
// no effect on stack depth
cfInstruction.activeStackDepth = cfCurrentStackDepth;
}
else if (cfInstruction.type == GPU7_CF_INST_ALU_PUSH_BEFORE )
{
cfCurrentStackDepth++;
cfCurrentMaxStackDepth = std::max(cfCurrentMaxStackDepth, cfCurrentStackDepth);
cfInstruction.activeStackDepth = cfCurrentStackDepth;
}
else if (cfInstruction.type == GPU7_CF_INST_ALU_POP_AFTER)
{
cfInstruction.activeStackDepth = cfCurrentStackDepth;
cfCurrentStackDepth--;
}
else if (cfInstruction.type == GPU7_CF_INST_ALU_POP2_AFTER)
{
cfInstruction.activeStackDepth = cfCurrentStackDepth;
cfCurrentStackDepth -= 2;
}
else if (cfInstruction.type == GPU7_CF_INST_ALU_BREAK )
{
cfInstruction.activeStackDepth = cfCurrentStackDepth;
}
else if (cfInstruction.type == GPU7_CF_INST_ALU_ELSE_AFTER)
{
if (cfInstruction.popCount != 0)
debugBreakpoint();
cfInstruction.activeStackDepth = cfCurrentStackDepth;
}
else if (cfInstruction.type == GPU7_CF_INST_ELSE )
{
//if (cfInstruction.popCount != 0)
// debugBreakpoint(); -> Only relevant when ELSE jump is taken
cfInstruction.activeStackDepth = cfCurrentStackDepth;
}
else if (cfInstruction.type == GPU7_CF_INST_POP)
{
cfInstruction.activeStackDepth = cfCurrentStackDepth;
cfCurrentStackDepth -= cfInstruction.popCount;
if (cfCurrentStackDepth < 0)
debugBreakpoint();
}
else if (cfInstruction.type == GPU7_CF_INST_LOOP_START_DX10 || cfInstruction.type == GPU7_CF_INST_LOOP_END ||
cfInstruction.type == GPU7_CF_INST_LOOP_START_NO_AL)
{
// no effect on stack depth
cfInstruction.activeStackDepth = cfCurrentStackDepth;
}
else if (cfInstruction.type == GPU7_CF_INST_LOOP_BREAK)
{
// since we assume that the break is not taken (for all pixels), we also don't need to worry about the stack depth adjustment
cfInstruction.activeStackDepth = cfCurrentStackDepth;
}
else if (cfInstruction.type == GPU7_CF_INST_TEX)
{
// no effect on stack depth
cfInstruction.activeStackDepth = cfCurrentStackDepth;
}
else if (cfInstruction.type == GPU7_CF_INST_EXPORT || cfInstruction.type == GPU7_CF_INST_EXPORT_DONE)
{
// no effect on stack depth
cfInstruction.activeStackDepth = cfCurrentStackDepth;
}
else if (cfInstruction.type == GPU7_CF_INST_MEM_STREAM0_WRITE ||
cfInstruction.type == GPU7_CF_INST_MEM_STREAM1_WRITE)
{
// no effect on stack depth
cfInstruction.activeStackDepth = cfCurrentStackDepth;
}
else if (cfInstruction.type == GPU7_CF_INST_MEM_RING_WRITE)
{
// no effect on stack depth
cfInstruction.activeStackDepth = cfCurrentStackDepth;
}
else if (cfInstruction.type == GPU7_CF_INST_EMIT_VERTEX)
{
// no effect on stack depth
cfInstruction.activeStackDepth = cfCurrentStackDepth;
}
else if (cfInstruction.type == GPU7_CF_INST_CALL)
{
// no effect on stack depth
cfInstruction.activeStackDepth = cfCurrentStackDepth;
}
else
{
cemu_assert_debug(false);
}
}
shaderContext->analyzer.activeStackMaxDepth = cfCurrentMaxStackDepth;
if (cfCurrentStackDepth != 0)
{
debug_printf("cfCurrentStackDepth is not zero after all CF instructions. depth is %d\n", cfCurrentStackDepth);
cemu_assert_debug(false);
}
if(list_subroutineAddrs.empty() == false)
cemuLog_logDebug(LogType::Force, "Todo - analyze shader subroutine CF stack");
// TF mode
if (shaderContext->options->useTFViaSSBO && shaderContext->output->streamoutBufferWriteMask.any())
{
shaderContext->analyzer.useSSBOForStreamout = true;
}
// assign binding points
if (shaderContext->shaderType == LatteConst::ShaderType::Vertex)
shaderContext->output->resourceMappingVK.setIndex = 0;
else if (shaderContext->shaderType == LatteConst::ShaderType::Pixel)
shaderContext->output->resourceMappingVK.setIndex = 1;
else if (shaderContext->shaderType == LatteConst::ShaderType::Geometry)
shaderContext->output->resourceMappingVK.setIndex = 2;
LatteDecompiler::_initTextureBindingPointsGL(shaderContext);
LatteDecompiler::_initTextureBindingPointsVK(shaderContext);
LatteDecompiler::_initUniformBindingPoints(shaderContext);
LatteDecompiler::_initAttributeBindingPoints(shaderContext);
}
| 41,043
|
C++
|
.cpp
| 980
| 38.560204
| 329
| 0.757544
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,221
|
LatteDecompilerRegisterDataTypeTracker.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompilerRegisterDataTypeTracker.cpp
|
#include "Cafe/HW/Latte/Core/LatteConst.h"
#include "Cafe/HW/Latte/Core/LatteShaderAssembly.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompilerInstructions.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompiler.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompilerInternal.h"
void LatteDecompiler_analyzeDataTypes(LatteDecompilerShaderContext* shaderContext)
{
// determine default type
if (shaderContext->analyzer.usesIntegerValues)
{
shaderContext->typeTracker.defaultDataType = LATTE_DECOMPILER_DTYPE_SIGNED_INT;
shaderContext->typeTracker.genIntReg = true;
}
else
{
shaderContext->typeTracker.defaultDataType = LATTE_DECOMPILER_DTYPE_FLOAT;
shaderContext->typeTracker.genFloatReg = true;
}
// determine register representation
if (shaderContext->analyzer.usesRelativeGPRWrite)
{
shaderContext->typeTracker.useArrayGPRs = true;
}
}
| 893
|
C++
|
.cpp
| 24
| 35.333333
| 82
| 0.834292
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,222
|
LatteDecompilerEmitGLSLAttrDecoder.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompilerEmitGLSLAttrDecoder.cpp
|
#include "Cafe/HW/Latte/Core/LatteConst.h"
#include "Cafe/HW/Latte/Core/LatteShaderAssembly.h"
#include "Cafe/HW/Latte/ISA/RegDefines.h"
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/HW/Latte/Core/LatteDraw.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompiler.h"
#include "Cafe/HW/Latte/Core/FetchShader.h"
#include "Cafe/HW/Latte/Renderer/Renderer.h"
#include "util/helpers/StringBuf.h"
#define _CRLF "\r\n"
void _readLittleEndianAttributeU32x4(LatteDecompilerShader* shaderContext, StringBuf* src, uint32 attributeInputIndex)
{
src->addFmt("attrDecoder = attrDataSem{};" _CRLF, attributeInputIndex);
}
void _readLittleEndianAttributeU32x3(LatteDecompilerShader* shaderContext, StringBuf* src, uint32 attributeInputIndex)
{
src->addFmt("attrDecoder = uvec4(attrDataSem{}.xyz,0);" _CRLF, attributeInputIndex);
}
void _readLittleEndianAttributeU32x2(LatteDecompilerShader* shaderContext, StringBuf* src, uint32 attributeInputIndex)
{
src->addFmt("attrDecoder = uvec4(attrDataSem{}.xy,0,0);" _CRLF, attributeInputIndex);
}
void _readLittleEndianAttributeU32x1(LatteDecompilerShader* shaderContext, StringBuf* src, uint32 attributeInputIndex)
{
src->addFmt("attrDecoder = uvec4(attrDataSem{}.x,0,0,0);" _CRLF, attributeInputIndex);
}
void _readLittleEndianAttributeU16x2(LatteDecompilerShader* shaderContext, StringBuf* src, uint32 attributeInputIndex)
{
src->addFmt("attrDecoder = uvec4(attrDataSem{}.xy,0,0);" _CRLF, attributeInputIndex);
}
void _readLittleEndianAttributeU16x4(LatteDecompilerShader* shaderContext, StringBuf* src, uint32 attributeInputIndex)
{
src->addFmt("attrDecoder = attrDataSem{};" _CRLF, attributeInputIndex);
}
void _readBigEndianAttributeU32x4(LatteDecompilerShader* shaderContext, StringBuf* src, uint32 attributeInputIndex)
{
src->addFmt("attrDecoder = attrDataSem{};" _CRLF, attributeInputIndex);
src->add("attrDecoder = (attrDecoder>>24)|((attrDecoder>>8)&0xFF00)|((attrDecoder<<8)&0xFF0000)|((attrDecoder<<24));" _CRLF);
}
void _readBigEndianAttributeU32x3(LatteDecompilerShader* shaderContext, StringBuf* src, uint32 attributeInputIndex)
{
src->addFmt("attrDecoder.xyz = attrDataSem{}.xyz;" _CRLF, attributeInputIndex);
src->add("attrDecoder.xyz = (attrDecoder.xyz>>24)|((attrDecoder.xyz>>8)&0xFF00)|((attrDecoder.xyz<<8)&0xFF0000)|((attrDecoder.xyz<<24));" _CRLF);
src->add("attrDecoder.w = 0;" _CRLF);
}
void _readBigEndianAttributeU32x2(LatteDecompilerShader* shaderContext, StringBuf* src, uint32 attributeInputIndex)
{
src->addFmt("attrDecoder.xy = attrDataSem{}.xy;" _CRLF, attributeInputIndex);
src->add("attrDecoder.xy = (attrDecoder.xy>>24)|((attrDecoder.xy>>8)&0xFF00)|((attrDecoder.xy<<8)&0xFF0000)|((attrDecoder.xy<<24));" _CRLF);
src->add("attrDecoder.z = 0;" _CRLF);
src->add("attrDecoder.w = 0;" _CRLF);
}
void _readBigEndianAttributeU32x1(LatteDecompilerShader* shaderContext, StringBuf* src, uint32 attributeInputIndex)
{
src->addFmt("attrDecoder.x = attrDataSem{}.x;" _CRLF, attributeInputIndex);
src->add("attrDecoder.x = (attrDecoder.x>>24)|((attrDecoder.x>>8)&0xFF00)|((attrDecoder.x<<8)&0xFF0000)|((attrDecoder.x<<24));" _CRLF);
src->add("attrDecoder.y = 0;" _CRLF);
src->add("attrDecoder.z = 0;" _CRLF);
src->add("attrDecoder.w = 0;" _CRLF);
}
void _readBigEndianAttributeU16x1(LatteDecompilerShader* shaderContext, StringBuf* src, uint32 attributeInputIndex)
{
src->addFmt("attrDecoder.xy = attrDataSem{}.xy;" _CRLF, attributeInputIndex);
src->add("attrDecoder.x = ((attrDecoder.x>>8)&0xFF)|((attrDecoder.x<<8)&0xFF00);" _CRLF);
src->add("attrDecoder.y = 0;" _CRLF);
src->add("attrDecoder.z = 0;" _CRLF);
src->add("attrDecoder.w = 0;" _CRLF);
}
void _readBigEndianAttributeU16x2(LatteDecompilerShader* shaderContext, StringBuf* src, uint32 attributeInputIndex)
{
src->addFmt("attrDecoder.xy = attrDataSem{}.xy;" _CRLF, attributeInputIndex);
src->add("attrDecoder.xy = ((attrDecoder.xy>>8)&0xFF)|((attrDecoder.xy<<8)&0xFF00);" _CRLF);
src->add("attrDecoder.z = 0;" _CRLF);
src->add("attrDecoder.w = 0;" _CRLF);
}
void _readBigEndianAttributeU16x4(LatteDecompilerShader* shaderContext, StringBuf* src, uint32 attributeInputIndex)
{
src->addFmt("attrDecoder.xyzw = attrDataSem{}.xyzw;" _CRLF, attributeInputIndex);
src->add("attrDecoder = ((attrDecoder>>8)&0xFF)|((attrDecoder<<8)&0xFF00);" _CRLF);
}
void LatteDecompiler_emitAttributeDecodeGLSL(LatteDecompilerShader* shaderContext, StringBuf* src, LatteParsedFetchShaderAttribute_t* attrib)
{
if (attrib->attributeBufferIndex >= Latte::GPU_LIMITS::NUM_VERTEX_BUFFERS)
{
src->add("attrDecoder = ivec4(0);" _CRLF);
return;
}
uint32 attributeInputIndex = attrib->semanticId;
if( attrib->endianSwap == LatteConst::VertexFetchEndianMode::SWAP_U32 )
{
if( attrib->format == FMT_32_32_32_32_FLOAT && attrib->nfa == 2 )
{
_readBigEndianAttributeU32x4(shaderContext, src, attributeInputIndex);
}
else if( attrib->format == FMT_32_32_32_FLOAT && attrib->nfa == 2 )
{
_readBigEndianAttributeU32x3(shaderContext, src, attributeInputIndex);
}
else if( attrib->format == FMT_32_32_FLOAT && attrib->nfa == 2 )
{
_readBigEndianAttributeU32x2(shaderContext, src, attributeInputIndex);
}
else if( attrib->format == FMT_32_FLOAT && attrib->nfa == 2 )
{
_readBigEndianAttributeU32x1(shaderContext, src, attributeInputIndex);
}
else if( attrib->format == FMT_2_10_10_10 && attrib->nfa == 0 )
{
_readBigEndianAttributeU32x1(shaderContext, src, attributeInputIndex);
// Bayonetta 2 uses this format to store normals
src->add("attrDecoder.xyzw = uvec4((attrDecoder.x>>0)&0x3FF,(attrDecoder.x>>10)&0x3FF,(attrDecoder.x>>20)&0x3FF,(attrDecoder.x>>30)&0x3);" _CRLF);
if (attrib->isSigned != 0)
{
src->add("if( (attrDecoder.x&0x200) != 0 ) attrDecoder.x |= 0xFFFFFC00;" _CRLF);
src->add("if( (attrDecoder.y&0x200) != 0 ) attrDecoder.y |= 0xFFFFFC00;" _CRLF);
src->add("if( (attrDecoder.z&0x200) != 0 ) attrDecoder.z |= 0xFFFFFC00;" _CRLF);
src->add("attrDecoder.x = floatBitsToUint(max(float(int(attrDecoder.x))/511.0,-1.0));" _CRLF);
src->add("attrDecoder.y = floatBitsToUint(max(float(int(attrDecoder.y))/511.0,-1.0));" _CRLF);
src->add("attrDecoder.z = floatBitsToUint(max(float(int(attrDecoder.z))/511.0,-1.0));" _CRLF);
}
else
{
src->add("attrDecoder.x = floatBitsToUint(max(float(int(attrDecoder.x))/1023.0,-1.0));" _CRLF);
src->add("attrDecoder.y = floatBitsToUint(max(float(int(attrDecoder.y))/1023.0,-1.0));" _CRLF);
src->add("attrDecoder.z = floatBitsToUint(max(float(int(attrDecoder.z))/1023.0,-1.0));" _CRLF);
}
src->add("attrDecoder.w = floatBitsToUint(float(attrDecoder.w));" _CRLF); // unsure?
}
else if( attrib->format == FMT_32_32_32_32 && attrib->nfa == 1 && attrib->isSigned == 0 )
{
_readBigEndianAttributeU32x4(shaderContext, src, attributeInputIndex);
}
else if( attrib->format == FMT_32_32_32 && attrib->nfa == 1 && attrib->isSigned == 0 )
{
_readBigEndianAttributeU32x3(shaderContext, src, attributeInputIndex);
}
else if( attrib->format == FMT_32_32 && attrib->nfa == 1 && attrib->isSigned == 0 )
{
_readBigEndianAttributeU32x2(shaderContext, src, attributeInputIndex);
}
else if (attrib->format == FMT_32 && attrib->nfa == 1 && attrib->isSigned == 0)
{
_readBigEndianAttributeU32x1(shaderContext, src, attributeInputIndex);
}
else if (attrib->format == FMT_32 && attrib->nfa == 1 && attrib->isSigned == 1)
{
// we can just read the signed s32 as a u32 since no sign-extension is necessary
_readBigEndianAttributeU32x1(shaderContext, src, attributeInputIndex);
}
else if( attrib->format == FMT_8_8_8_8 && attrib->nfa == 0 && attrib->isSigned == 0 )
{
// seen in Minecraft Wii U Edition
src->addFmt("attrDecoder.xyzw = floatBitsToUint(vec4(attrDataSem{}.wzyx)/255.0);" _CRLF, attributeInputIndex);
}
else if( attrib->format == FMT_8_8_8_8 && attrib->nfa == 0 && attrib->isSigned != 0 )
{
// seen in Minecraft Wii U Edition
src->addFmt("attrDecoder.xyzw = attrDataSem{}.wzyx;" _CRLF, attributeInputIndex);
src->add("if( (attrDecoder.x&0x80) != 0 ) attrDecoder.x |= 0xFFFFFF00;" _CRLF);
src->add("if( (attrDecoder.y&0x80) != 0 ) attrDecoder.y |= 0xFFFFFF00;" _CRLF);
src->add("if( (attrDecoder.z&0x80) != 0 ) attrDecoder.z |= 0xFFFFFF00;" _CRLF);
src->add("if( (attrDecoder.w&0x80) != 0 ) attrDecoder.w |= 0xFFFFFF00;" _CRLF);
src->add("attrDecoder.x = floatBitsToUint(max(float(int(attrDecoder.x))/127.0,-1.0));" _CRLF);
src->add("attrDecoder.y = floatBitsToUint(max(float(int(attrDecoder.y))/127.0,-1.0));" _CRLF);
src->add("attrDecoder.z = floatBitsToUint(max(float(int(attrDecoder.z))/127.0,-1.0));" _CRLF);
src->add("attrDecoder.w = floatBitsToUint(max(float(int(attrDecoder.w))/127.0,-1.0));" _CRLF);
}
else if( attrib->format == FMT_8_8_8_8 && attrib->nfa == 1 && attrib->isSigned == 0 )
{
// seen in Minecraft Wii U Edition
src->addFmt("attrDecoder.xyzw = attrDataSem{}.wzyx;" _CRLF, attributeInputIndex);
}
else if (attrib->format == FMT_8_8_8_8 && attrib->nfa == 2 && attrib->isSigned == 0)
{
// seen in Ben 10 Omniverse
src->addFmt("attrDecoder.xyzw = floatBitsToUint(vec4(attrDataSem{}.wzyx));" _CRLF, attributeInputIndex);
}
else
{
cemuLog_log(LogType::Force, "_emitAttributeDecodeGLSL(): Unsupported fmt {:02x} nfa {} signed {} endian {}\n", attrib->format, attrib->nfa, attrib->isSigned, attrib->endianSwap);
cemu_assert_unimplemented();
}
}
else if( attrib->endianSwap == LatteConst::VertexFetchEndianMode::SWAP_NONE )
{
if( attrib->format == FMT_32_32_32_32_FLOAT && attrib->nfa == 2 )
{
_readLittleEndianAttributeU32x4(shaderContext, src, attributeInputIndex);
}
else if (attrib->format == FMT_32_32_32_FLOAT && attrib->nfa == 2)
{
_readLittleEndianAttributeU32x3(shaderContext, src, attributeInputIndex);
}
else if (attrib->format == FMT_32_32_FLOAT && attrib->nfa == 2)
{
// seen in Cities of Gold
_readLittleEndianAttributeU32x2(shaderContext, src, attributeInputIndex);
}
else if (attrib->format == FMT_32 && attrib->nfa == 1 && attrib->isSigned == 0)
{
// seen in Nano Assault Neo
_readLittleEndianAttributeU32x1(shaderContext, src, attributeInputIndex);
}
else if (attrib->format == FMT_2_10_10_10 && attrib->nfa == 0 && attrib->isSigned == 0)
{
// seen in Fast Racing Neo
_readLittleEndianAttributeU32x1(shaderContext, src, attributeInputIndex);
src->add("attrDecoder.xyzw = uvec4((attrDecoder.x>>0)&0x3FF,(attrDecoder.x>>10)&0x3FF,(attrDecoder.x>>20)&0x3FF,(attrDecoder.x>>30)&0x3);" _CRLF);
src->add("attrDecoder.x = floatBitsToUint(max(float(int(attrDecoder.x))/1023.0,-1.0));" _CRLF);
src->add("attrDecoder.y = floatBitsToUint(max(float(int(attrDecoder.y))/1023.0,-1.0));" _CRLF);
src->add("attrDecoder.z = floatBitsToUint(max(float(int(attrDecoder.z))/1023.0,-1.0));" _CRLF);
src->add("attrDecoder.w = floatBitsToUint(float(attrDecoder.w));" _CRLF); // todo - is this correct?
}
else if (attrib->format == FMT_16_16_16_16 && attrib->nfa == 0 && attrib->isSigned != 0)
{
// seen in CoD ghosts
_readLittleEndianAttributeU16x4(shaderContext, src, attributeInputIndex);
src->add("if( (attrDecoder.x&0x8000) != 0 ) attrDecoder.x |= 0xFFFF0000;" _CRLF);
src->add("if( (attrDecoder.y&0x8000) != 0 ) attrDecoder.y |= 0xFFFF0000;" _CRLF);
src->add("if( (attrDecoder.z&0x8000) != 0 ) attrDecoder.z |= 0xFFFF0000;" _CRLF);
src->add("if( (attrDecoder.w&0x8000) != 0 ) attrDecoder.w |= 0xFFFF0000;" _CRLF);
src->add("attrDecoder.x = floatBitsToUint(max(float(int(attrDecoder.x))/32767.0,-1.0));" _CRLF);
src->add("attrDecoder.y = floatBitsToUint(max(float(int(attrDecoder.y))/32767.0,-1.0));" _CRLF);
src->add("attrDecoder.z = floatBitsToUint(max(float(int(attrDecoder.z))/32767.0,-1.0));" _CRLF);
src->add("attrDecoder.w = floatBitsToUint(max(float(int(attrDecoder.w))/32767.0,-1.0));" _CRLF);
}
else if( attrib->format == FMT_16_16_16_16 && attrib->nfa == 2 && attrib->isSigned == 1 )
{
// seen in Rabbids Land
_readLittleEndianAttributeU16x4(shaderContext, src, attributeInputIndex);
src->add("if( (attrDecoder.x&0x8000) != 0 ) attrDecoder.x |= 0xFFFF0000;" _CRLF);
src->add("if( (attrDecoder.y&0x8000) != 0 ) attrDecoder.y |= 0xFFFF0000;" _CRLF);
src->add("if( (attrDecoder.z&0x8000) != 0 ) attrDecoder.z |= 0xFFFF0000;" _CRLF);
src->add("if( (attrDecoder.w&0x8000) != 0 ) attrDecoder.w |= 0xFFFF0000;" _CRLF);
src->add("attrDecoder.xyzw = floatBitsToUint(vec4(ivec4(attrDecoder)));" _CRLF);
}
else if (attrib->format == FMT_16_16_16_16_FLOAT && attrib->nfa == 2)
{
// seen in Giana Sisters: Twisted Dreams
_readLittleEndianAttributeU16x4(shaderContext, src, attributeInputIndex);
src->add("attrDecoder.xyzw = floatBitsToInt(vec4(unpackHalf2x16(attrDecoder.x|(attrDecoder.y<<16)),unpackHalf2x16(attrDecoder.z|(attrDecoder.w<<16))));" _CRLF);
}
else if (attrib->format == FMT_16_16 && attrib->nfa == 0 && attrib->isSigned != 0)
{
// seen in Nano Assault Neo
_readLittleEndianAttributeU16x2(shaderContext, src, attributeInputIndex);
src->add("if( (attrDecoder.x&0x8000) != 0 ) attrDecoder.x |= 0xFFFF0000;" _CRLF);
src->add("if( (attrDecoder.y&0x8000) != 0 ) attrDecoder.y |= 0xFFFF0000;" _CRLF);
src->add("attrDecoder.x = floatBitsToUint(max(float(int(attrDecoder.x))/32767.0,-1.0));" _CRLF);
src->add("attrDecoder.y = floatBitsToUint(max(float(int(attrDecoder.y))/32767.0,-1.0));" _CRLF);
}
else if (attrib->format == FMT_16_16_FLOAT && attrib->nfa == 2)
{
// seen in Giana Sisters: Twisted Dreams
_readLittleEndianAttributeU16x2(shaderContext, src, attributeInputIndex);
src->add("attrDecoder.xy = floatBitsToUint(unpackHalf2x16(attrDecoder.x|(attrDecoder.y<<16)));" _CRLF);
src->add("attrDecoder.zw = uvec2(0);" _CRLF);
}
else if( attrib->format == FMT_8_8_8_8 && attrib->nfa == 0 && attrib->isSigned == 0 )
{
src->addFmt("attrDecoder.xyzw = floatBitsToUint(vec4(attrDataSem{}.xyzw)/255.0);" _CRLF, attributeInputIndex);
}
else if( attrib->format == FMT_8_8_8_8 && attrib->nfa == 0 && attrib->isSigned != 0 )
{
src->addFmt("attrDecoder.xyzw = attrDataSem{}.xyzw;" _CRLF, attributeInputIndex);
src->add("if( (attrDecoder.x&0x80) != 0 ) attrDecoder.x |= 0xFFFFFF00;" _CRLF);
src->add("if( (attrDecoder.y&0x80) != 0 ) attrDecoder.y |= 0xFFFFFF00;" _CRLF);
src->add("if( (attrDecoder.z&0x80) != 0 ) attrDecoder.z |= 0xFFFFFF00;" _CRLF);
src->add("if( (attrDecoder.w&0x80) != 0 ) attrDecoder.w |= 0xFFFFFF00;" _CRLF);
src->add("attrDecoder.x = floatBitsToUint(max(float(int(attrDecoder.x))/127.0,-1.0));" _CRLF);
src->add("attrDecoder.y = floatBitsToUint(max(float(int(attrDecoder.y))/127.0,-1.0));" _CRLF);
src->add("attrDecoder.z = floatBitsToUint(max(float(int(attrDecoder.z))/127.0,-1.0));" _CRLF);
src->add("attrDecoder.w = floatBitsToUint(max(float(int(attrDecoder.w))/127.0,-1.0));" _CRLF);
}
else if (attrib->format == FMT_8_8_8_8 && attrib->nfa == 1 && attrib->isSigned == 0)
{
src->addFmt("attrDecoder.xyzw = attrDataSem{}.xyzw;" _CRLF, attributeInputIndex);
}
else if (attrib->format == FMT_8_8_8_8 && attrib->nfa == 1 && attrib->isSigned != 0)
{
// seen in Sonic Lost World
src->addFmt("attrDecoder.xyzw = attrDataSem{}.xyzw;" _CRLF, attributeInputIndex);
src->add("if( (attrDecoder.x&0x80) != 0 ) attrDecoder.x |= 0xFFFFFF00;" _CRLF);
src->add("if( (attrDecoder.y&0x80) != 0 ) attrDecoder.y |= 0xFFFFFF00;" _CRLF);
src->add("if( (attrDecoder.z&0x80) != 0 ) attrDecoder.z |= 0xFFFFFF00;" _CRLF);
src->add("if( (attrDecoder.w&0x80) != 0 ) attrDecoder.w |= 0xFFFFFF00;" _CRLF);
}
else if( attrib->format == FMT_8_8_8_8 && attrib->nfa == 2 && attrib->isSigned == 0 )
{
// seen in One Piece
src->addFmt("attrDecoder.xyzw = floatBitsToInt(vec4(attrDataSem{}.xyzw));" _CRLF, attributeInputIndex);
}
else if (attrib->format == FMT_8_8 && attrib->nfa == 0 && attrib->isSigned == 0)
{
if( (attrib->offset&3) == 2 && LatteGPUState.glVendor == GLVENDOR_AMD && g_renderer->GetType() == RendererAPI::OpenGL )
{
// AMD workaround
src->addFmt("attrDecoder.xy = floatBitsToUint(vec2(attrDataSem{}.zw)/255.0);" _CRLF, attributeInputIndex);
src->add("attrDecoder.zw = uvec2(0);" _CRLF);
}
else
{
src->addFmt("attrDecoder.xy = floatBitsToUint(vec2(attrDataSem{}.xy)/255.0);" _CRLF, attributeInputIndex);
src->add("attrDecoder.zw = uvec2(0);" _CRLF);
}
}
else if (attrib->format == FMT_8_8 && attrib->nfa == 2 && attrib->isSigned == 0)
{
// seen in BotW
if ((attrib->offset & 3) == 2 && LatteGPUState.glVendor == GLVENDOR_AMD && g_renderer->GetType() == RendererAPI::OpenGL)
{
// AMD workaround
src->addFmt("attrDecoder.xy = floatBitsToUint(vec2(attrDataSem{}.zw));" _CRLF, attributeInputIndex);
src->add("attrDecoder.zw = uvec2(0);" _CRLF);
}
else
{
src->addFmt("attrDecoder.xy = floatBitsToUint(vec2(attrDataSem{}.xy));" _CRLF, attributeInputIndex);
src->add("attrDecoder.zw = uvec2(0);" _CRLF);
}
}
else if (attrib->format == FMT_8_8 && attrib->nfa == 0 && attrib->isSigned != 0)
{
if ((attrib->offset & 3) == 2 && LatteGPUState.glVendor == GLVENDOR_AMD && g_renderer->GetType() == RendererAPI::OpenGL)
{
// AMD workaround
src->addFmt("attrDecoder.xy = attrDataSem{}.zw;" _CRLF, attributeInputIndex);
src->add("if( (attrDecoder.x&0x80) != 0 ) attrDecoder.x |= 0xFFFFFF00;" _CRLF);
src->add("if( (attrDecoder.y&0x80) != 0 ) attrDecoder.y |= 0xFFFFFF00;" _CRLF);
src->add("attrDecoder.x = floatBitsToUint(max(float(int(attrDecoder.x))/127.0,-1.0));" _CRLF);
src->add("attrDecoder.y = floatBitsToUint(max(float(int(attrDecoder.y))/127.0,-1.0));" _CRLF);
src->add("attrDecoder.zw = uvec2(0);" _CRLF);
}
else
{
src->addFmt("attrDecoder.xy = attrDataSem{}.xy;" _CRLF, attributeInputIndex);
src->add("if( (attrDecoder.x&0x80) != 0 ) attrDecoder.x |= 0xFFFFFF00;" _CRLF);
src->add("if( (attrDecoder.y&0x80) != 0 ) attrDecoder.y |= 0xFFFFFF00;" _CRLF);
src->add("attrDecoder.x = floatBitsToUint(max(float(int(attrDecoder.x))/127.0,-1.0));" _CRLF);
src->add("attrDecoder.y = floatBitsToUint(max(float(int(attrDecoder.y))/127.0,-1.0));" _CRLF);
src->add("attrDecoder.zw = uvec2(0);" _CRLF);
}
}
else if (attrib->format == FMT_8_8 && attrib->nfa == 1 && attrib->isSigned == 0)
{
if ((attrib->offset & 3) == 2 && LatteGPUState.glVendor == GLVENDOR_AMD && g_renderer->GetType() == RendererAPI::OpenGL)
{
// AMD workaround
src->addFmt("attrDecoder.xyzw = uvec4(attrDataSem{}.zw,0,0);" _CRLF, attributeInputIndex);
}
else
{
src->addFmt("attrDecoder.xyzw = uvec4(attrDataSem{}.xy,0,0);" _CRLF, attributeInputIndex);
}
}
else if( attrib->format == FMT_8 && attrib->nfa == 0 && attrib->isSigned == 0 )
{
// seen in Pikmin 3
src->addFmt("attrDecoder.x = floatBitsToUint(float(attrDataSem{}.x)/255.0);" _CRLF, attributeInputIndex);
src->add("attrDecoder.yzw = uvec3(0);" _CRLF);
}
else if( attrib->format == FMT_8 && attrib->nfa == 1 && attrib->isSigned == 0 )
{
src->addFmt("attrDecoder.xyzw = uvec4(attrDataSem{}.x,0,0,0);" _CRLF, attributeInputIndex);
}
else
{
cemuLog_log(LogType::Force, "_emitAttributeDecodeGLSL(): Unsupported fmt {:02x} nfa {} signed {} endian {}\n", attrib->format, attrib->nfa, attrib->isSigned, attrib->endianSwap);
cemu_assert_debug(false);
}
}
else if( attrib->endianSwap == LatteConst::VertexFetchEndianMode::SWAP_U16 )
{
if( attrib->format == FMT_16_16_16_16_FLOAT && attrib->nfa == 2 )
{
_readBigEndianAttributeU16x4(shaderContext, src, attributeInputIndex);
src->add("attrDecoder.xyzw = floatBitsToInt(vec4(unpackHalf2x16(attrDecoder.x|(attrDecoder.y<<16)),unpackHalf2x16(attrDecoder.z|(attrDecoder.w<<16))));" _CRLF);
}
else if (attrib->format == FMT_16_16_16_16 && attrib->nfa == 0 && attrib->isSigned != 0)
{
_readBigEndianAttributeU16x4(shaderContext, src, attributeInputIndex);
src->add("if( (attrDecoder.x&0x8000) != 0 ) attrDecoder.x |= 0xFFFF0000;" _CRLF);
src->add("if( (attrDecoder.y&0x8000) != 0 ) attrDecoder.y |= 0xFFFF0000;" _CRLF);
src->add("if( (attrDecoder.z&0x8000) != 0 ) attrDecoder.z |= 0xFFFF0000;" _CRLF);
src->add("if( (attrDecoder.w&0x8000) != 0 ) attrDecoder.w |= 0xFFFF0000;" _CRLF);
src->add("attrDecoder.x = floatBitsToUint(max(float(int(attrDecoder.x))/32767.0,-1.0));" _CRLF);
src->add("attrDecoder.y = floatBitsToUint(max(float(int(attrDecoder.y))/32767.0,-1.0));" _CRLF);
src->add("attrDecoder.z = floatBitsToUint(max(float(int(attrDecoder.z))/32767.0,-1.0));" _CRLF);
src->add("attrDecoder.w = floatBitsToUint(max(float(int(attrDecoder.w))/32767.0,-1.0));" _CRLF);
}
else if (attrib->format == FMT_16_16_16_16 && attrib->nfa == 0 && attrib->isSigned == 0)
{
// seen in BotW
_readBigEndianAttributeU16x4(shaderContext, src, attributeInputIndex);
src->add("attrDecoder.x = floatBitsToUint(float(int(attrDecoder.x))/65535.0);" _CRLF);
src->add("attrDecoder.y = floatBitsToUint(float(int(attrDecoder.y))/65535.0);" _CRLF);
src->add("attrDecoder.z = floatBitsToUint(float(int(attrDecoder.z))/65535.0);" _CRLF);
src->add("attrDecoder.w = floatBitsToUint(float(int(attrDecoder.w))/65535.0);" _CRLF);
}
else if( attrib->format == FMT_16_16_16_16 && attrib->nfa == 2 && attrib->isSigned != 0 )
{
// seen in Minecraft Wii U Edition
_readBigEndianAttributeU16x4(shaderContext, src, attributeInputIndex);
src->add("if( (attrDecoder.x&0x8000) != 0 ) attrDecoder.x |= 0xFFFF0000;" _CRLF);
src->add("if( (attrDecoder.y&0x8000) != 0 ) attrDecoder.y |= 0xFFFF0000;" _CRLF);
src->add("if( (attrDecoder.z&0x8000) != 0 ) attrDecoder.z |= 0xFFFF0000;" _CRLF);
src->add("if( (attrDecoder.w&0x8000) != 0 ) attrDecoder.w |= 0xFFFF0000;" _CRLF);
src->add("attrDecoder.x = floatBitsToUint(float(int(attrDecoder.x)));" _CRLF);
src->add("attrDecoder.y = floatBitsToUint(float(int(attrDecoder.y)));" _CRLF);
src->add("attrDecoder.z = floatBitsToUint(float(int(attrDecoder.z)));" _CRLF);
src->add("attrDecoder.w = floatBitsToUint(float(int(attrDecoder.w)));" _CRLF);
}
else if( attrib->format == FMT_16_16_16_16 && attrib->nfa == 1 && attrib->isSigned != 0 )
{
// seen in Minecraft Wii U Edition
_readBigEndianAttributeU16x4(shaderContext, src, attributeInputIndex);
src->add("if( (attrDecoder.x&0x8000) != 0 ) attrDecoder.x |= 0xFFFF0000;" _CRLF);
src->add("if( (attrDecoder.y&0x8000) != 0 ) attrDecoder.y |= 0xFFFF0000;" _CRLF);
src->add("if( (attrDecoder.z&0x8000) != 0 ) attrDecoder.z |= 0xFFFF0000;" _CRLF);
src->add("if( (attrDecoder.w&0x8000) != 0 ) attrDecoder.w |= 0xFFFF0000;" _CRLF);
}
else if( attrib->format == FMT_16_16_16_16 && attrib->nfa == 1 && attrib->isSigned == 0 )
{
_readBigEndianAttributeU16x4(shaderContext, src, attributeInputIndex);
}
else if( attrib->format == FMT_16_16_FLOAT && attrib->nfa == 2 )
{
_readBigEndianAttributeU16x2(shaderContext, src, attributeInputIndex);
src->add("attrDecoder.xy = floatBitsToUint(unpackHalf2x16(attrDecoder.x|(attrDecoder.y<<16)));" _CRLF);
src->add("attrDecoder.zw = uvec2(0);" _CRLF);
}
else if( attrib->format == FMT_16_16 && attrib->nfa == 0 && attrib->isSigned == 0 )
{
_readBigEndianAttributeU16x2(shaderContext, src, attributeInputIndex);
src->add("attrDecoder.xy = floatBitsToUint(vec2(float(attrDecoder.x), float(attrDecoder.y))/65535.0);" _CRLF);
src->add("attrDecoder.zw = uvec2(0);" _CRLF);
}
else if( attrib->format == FMT_16_16 && attrib->nfa == 0 && attrib->isSigned != 0 )
{
_readBigEndianAttributeU16x2(shaderContext, src, attributeInputIndex);
src->add("if( (attrDecoder.x&0x8000) != 0 ) attrDecoder.x |= 0xFFFF0000;" _CRLF);
src->add("if( (attrDecoder.y&0x8000) != 0 ) attrDecoder.y |= 0xFFFF0000;" _CRLF);
src->add("attrDecoder.x = floatBitsToUint(max(float(int(attrDecoder.x))/32767.0,-1.0));" _CRLF);
src->add("attrDecoder.y = floatBitsToUint(max(float(int(attrDecoder.y))/32767.0,-1.0));" _CRLF);
src->add("attrDecoder.zw = uvec2(0);" _CRLF);
}
else if( attrib->format == FMT_16_16 && attrib->nfa == 1 && attrib->isSigned == 0 )
{
_readBigEndianAttributeU16x2(shaderContext, src, attributeInputIndex);
}
else if( attrib->format == FMT_16_16 && attrib->nfa == 1 && attrib->isSigned != 0 )
{
_readBigEndianAttributeU16x2(shaderContext, src, attributeInputIndex);
src->add("if( (attrDecoder.x&0x8000) != 0 ) attrDecoder.x |= 0xFFFF0000;" _CRLF);
src->add("if( (attrDecoder.y&0x8000) != 0 ) attrDecoder.y |= 0xFFFF0000;" _CRLF);
src->add("attrDecoder.zw = uvec2(0);" _CRLF);
}
else if( attrib->format == FMT_16_16 && attrib->nfa == 2 && attrib->isSigned == 0 )
{
_readBigEndianAttributeU16x2(shaderContext, src, attributeInputIndex);
src->add("attrDecoder.xy = floatBitsToUint(vec2(float(attrDecoder.x), float(attrDecoder.y)));" _CRLF);
src->add("attrDecoder.zw = uvec2(0);" _CRLF);
}
else if( attrib->format == FMT_16_16 && attrib->nfa == 2 && attrib->isSigned != 0 )
{
_readBigEndianAttributeU16x2(shaderContext, src, attributeInputIndex);
src->add("if( (attrDecoder.x&0x8000) != 0 ) attrDecoder.x |= 0xFFFF0000;" _CRLF);
src->add("if( (attrDecoder.y&0x8000) != 0 ) attrDecoder.y |= 0xFFFF0000;" _CRLF);
src->add("attrDecoder.xy = floatBitsToUint(vec2(float(int(attrDecoder.x)), float(int(attrDecoder.y))));" _CRLF);
src->add("attrDecoder.zw = uvec2(0);" _CRLF);
}
else if (attrib->format == FMT_16 && attrib->nfa == 1 && attrib->isSigned == 0)
{
_readBigEndianAttributeU16x1(shaderContext, src, attributeInputIndex);
}
else if (attrib->format == FMT_16 && attrib->nfa == 0 && attrib->isSigned == 0)
{
// seen in CoD ghosts
_readBigEndianAttributeU16x1(shaderContext, src, attributeInputIndex);
src->add("attrDecoder.x = floatBitsToUint(float(int(attrDecoder.x))/65535.0);" _CRLF);
}
else
{
cemuLog_logDebug(LogType::Force, "_emitAttributeDecodeGLSL(): Unsupported fmt {:02x} nfa {} signed {} endian {}", attrib->format, attrib->nfa, attrib->isSigned, attrib->endianSwap);
}
}
else
{
cemu_assert_debug(false);
}
}
| 26,293
|
C++
|
.cpp
| 491
| 50.211813
| 184
| 0.685103
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,223
|
LatteDecompilerEmitGLSL.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompilerEmitGLSL.cpp
|
#include "Cafe/HW/Latte/Core/LatteConst.h"
#include "Cafe/HW/Latte/Core/LatteShaderAssembly.h"
#include "Cafe/HW/Latte/ISA/RegDefines.h"
#include "Cafe/OS/libs/gx2/GX2.h" // todo - remove dependency
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/HW/Latte/Core/LatteDraw.h"
#include "Cafe/HW/Latte/Core/LatteShader.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompiler.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompilerInternal.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompilerInstructions.h"
#include "Cafe/HW/Latte/Core/FetchShader.h"
#include "Cafe/HW/Latte/Renderer/Renderer.h"
#include "config/ActiveSettings.h"
#include "util/helpers/StringBuf.h"
#include <bitset>
#include <boost/container/small_vector.hpp>
#define _CRLF "\r\n"
void LatteDecompiler_emitAttributeDecodeGLSL(LatteDecompilerShader* shaderContext, StringBuf* src, LatteParsedFetchShaderAttribute_t* attrib);
/*
* Variable names:
* R0-R127 temp
* Most variables are multi-typed and the respective type is appended to the name
* Type suffixes are: f (float), i (32bit int), ui (unsigned 32bit int)
* Examples: R13ui.x, tempf.z
*/
// local prototypes
void _emitTypeConversionPrefix(LatteDecompilerShaderContext* shaderContext, sint32 sourceType, sint32 destinationType);
void _emitTypeConversionSuffix(LatteDecompilerShaderContext* shaderContext, sint32 sourceType, sint32 destinationType);
void LatteDecompiler_emitClauseCode(LatteDecompilerShaderContext* shaderContext, LatteDecompilerCFInstruction* cfInstruction, bool isSubroutine);
const char* _getShaderUniformBlockInterfaceName(LatteConst::ShaderType mode)
{
switch (mode)
{
case LatteConst::ShaderType::Vertex:
return "uniformBlockVS";
case LatteConst::ShaderType::Pixel:
return "uniformBlockPS";
case LatteConst::ShaderType::Geometry:
return "uniformBlockGS";
default:
break;
}
cemu_assert_unimplemented();
return nullptr;
}
const char* _getShaderUniformBlockVariableName(LatteConst::ShaderType mode)
{
switch (mode)
{
case LatteConst::ShaderType::Vertex:
return "uf_blockVS";
case LatteConst::ShaderType::Pixel:
return "uf_blockPS";
case LatteConst::ShaderType::Geometry:
return "uf_blockGS";
default:
break;
}
cemu_assert_unimplemented();
return nullptr;
}
const char* _getTextureUnitVariablePrefixName(LatteConst::ShaderType mode)
{
switch (mode)
{
case LatteConst::ShaderType::Vertex:
return "textureUnitVS";
case LatteConst::ShaderType::Pixel:
return "textureUnitPS";
case LatteConst::ShaderType::Geometry:
return "textureUnitGS";
}
cemu_assert_unimplemented();
return nullptr;
}
const char* _getElementStrByIndex(uint32 channel)
{
switch (channel)
{
case 0:
return "x";
case 1:
return "y";
case 2:
return "z";
case 3:
return "w";
}
return "UNDEFINED";
}
char _tempGenString[64][256];
uint32 _tempGenStringIndex = 0;
char* _getTempString()
{
char* str = _tempGenString[_tempGenStringIndex];
_tempGenStringIndex = (_tempGenStringIndex+1)%64;
return str;
}
static char* _getActiveMaskVarName(LatteDecompilerShaderContext* shaderContext, sint32 index)
{
char* varName = _getTempString();
if (shaderContext->isSubroutine)
sprintf(varName, "activeMaskStackSub%04x[%d]", shaderContext->subroutineInfo->cfAddr, index);
else
sprintf(varName, "activeMaskStack[%d]", index);
return varName;
}
static char* _getActiveMaskCVarName(LatteDecompilerShaderContext* shaderContext, sint32 index)
{
char* varName = _getTempString();
if (shaderContext->isSubroutine)
sprintf(varName, "activeMaskStackCSub%04x[%d]", shaderContext->subroutineInfo->cfAddr, index);
else
sprintf(varName, "activeMaskStackC[%d]", index);
return varName;
}
static char* _getRegisterVarName(LatteDecompilerShaderContext* shaderContext, uint32 index, sint32 destRelIndexMode=-1)
{
auto type = shaderContext->typeTracker.defaultDataType;
char* tempStr = _getTempString();
if (shaderContext->typeTracker.useArrayGPRs == false)
{
if (type == LATTE_DECOMPILER_DTYPE_SIGNED_INT)
sprintf(tempStr, "R%di", index);
else if (type == LATTE_DECOMPILER_DTYPE_FLOAT)
sprintf(tempStr, "R%df", index);
}
else
{
char destRelOffset[32];
if (destRelIndexMode >= 0)
{
if (destRelIndexMode == GPU7_INDEX_AR_X)
strcpy(destRelOffset, "ARi.x");
else if (destRelIndexMode == GPU7_INDEX_AR_Y)
strcpy(destRelOffset, "ARi.y");
else if (destRelIndexMode == GPU7_INDEX_AR_Z)
strcpy(destRelOffset, "ARi.z");
else if (destRelIndexMode == GPU7_INDEX_AR_W)
strcpy(destRelOffset, "ARi.w");
else
debugBreakpoint();
if (type == LATTE_DECOMPILER_DTYPE_SIGNED_INT)
{
sprintf(tempStr, "Ri[%d+%s]", index, destRelOffset);
}
else if (type == LATTE_DECOMPILER_DTYPE_FLOAT)
{
sprintf(tempStr, "Rf[%d+%s]", index, destRelOffset);
}
}
else
{
if (type == LATTE_DECOMPILER_DTYPE_SIGNED_INT)
{
sprintf(tempStr, "Ri[%d]", index);
}
else if (type == LATTE_DECOMPILER_DTYPE_FLOAT)
{
sprintf(tempStr, "Rf[%d]", index);
}
}
}
return tempStr;
}
static void _appendRegisterTypeSuffix(StringBuf* src, sint32 dataType)
{
if (dataType == LATTE_DECOMPILER_DTYPE_SIGNED_INT)
src->add("i");
else if (dataType == LATTE_DECOMPILER_DTYPE_UNSIGNED_INT)
src->add("ui");
else if (dataType == LATTE_DECOMPILER_DTYPE_FLOAT)
src->add("f");
else
cemu_assert_unimplemented();
}
// appends x/y/z/w
static void _appendChannel(StringBuf* src, sint32 channelIndex)
{
cemu_assert_debug(channelIndex >= 0 && channelIndex <= 3);
switch (channelIndex)
{
case 0:
src->add("x");
return;
case 1:
src->add("y");
return;
case 2:
src->add("z");
return;
case 3:
src->add("w");
return;
}
}
// appends .x/.y/.z/.w
static void _appendChannelAccess(StringBuf* src, sint32 channelIndex)
{
cemu_assert_debug(channelIndex >= 0 && channelIndex <= 3);
switch (channelIndex)
{
case 0:
src->add(".x");
return;
case 1:
src->add(".y");
return;
case 2:
src->add(".z");
return;
case 3:
src->add(".w");
return;
}
}
static void _appendPVPS(LatteDecompilerShaderContext* shaderContext, StringBuf* src, uint32 groupIndex, uint8 aluUnit)
{
cemu_assert_debug(aluUnit < 5);
if (aluUnit == 4)
{
src->addFmt("PS{}", (groupIndex & 1));
_appendRegisterTypeSuffix(src, shaderContext->typeTracker.defaultDataType);
return;
}
src->addFmt("PV{}", (groupIndex & 1));
_appendRegisterTypeSuffix(src, shaderContext->typeTracker.defaultDataType);
_appendChannel(src, aluUnit);
}
std::string _FormatFloatAsGLSLConstant(float f)
{
char floatAsStr[64];
size_t floatAsStrLen = fmt::format_to_n(floatAsStr, 64, "{:#}", f).size;
size_t floatAsStrLenOrg = floatAsStrLen;
if(floatAsStrLen > 0 && floatAsStr[floatAsStrLen-1] == '.')
{
floatAsStr[floatAsStrLen] = '0';
floatAsStrLen++;
}
cemu_assert(floatAsStrLen < 50); // constant suspiciously long?
floatAsStr[floatAsStrLen] = '\0';
cemu_assert_debug(floatAsStrLen >= 3); // shortest possible form is "0.0"
return floatAsStr;
}
// tracks PV/PS and register backups
struct ALUClauseTemporariesState
{
struct PVPSAlias
{
enum class LOCATION_TYPE : uint8
{
LOCATION_NONE,
LOCATION_GPR,
LOCATION_PVPS,
};
LOCATION_TYPE location{ LOCATION_TYPE::LOCATION_NONE };
uint8 index; // GPR index or temporary index
uint8 aluUnit; // x,y,z,w (or 5 for PS)
void SetLocationGPR(uint8 gprIndex, uint8 channel)
{
cemu_assert_debug(channel < 4);
this->location = LOCATION_TYPE::LOCATION_GPR;
this->index = gprIndex;
this->aluUnit = channel;
}
void SetLocationPSPVTemporary(uint8 aluUnit, uint32 groupIndex)
{
cemu_assert_debug(aluUnit < 5);
this->location = LOCATION_TYPE::LOCATION_PVPS;
this->index = groupIndex & 1;
this->aluUnit = aluUnit;
}
};
struct GPRTemporary
{
GPRTemporary(uint8 gprIndex, uint8 channel, uint8 backupVarIndex) : gprIndex(gprIndex), channel(channel), backupVarIndex(backupVarIndex) {}
uint8 gprIndex;
uint8 channel;
uint8 backupVarIndex;
};
void TrackGroupOutputPVPS(LatteDecompilerShaderContext* shaderContext, LatteDecompilerALUInstruction* aluInstr, size_t numInstr)
{
// unset current
for (auto& it : m_pvps)
it.location = PVPSAlias::LOCATION_TYPE::LOCATION_NONE;
for (size_t i = 0; i < numInstr; i++)
{
LatteDecompilerALUInstruction& inst = aluInstr[i];
if (!inst.isOP3 && inst.opcode == ALU_OP2_INST_NOP)
continue; // skip NOP instruction
if (inst.writeMask == 0)
{
// map to temporary
m_pvps[inst.aluUnit].SetLocationPSPVTemporary(inst.aluUnit, aluInstr->instructionGroupIndex);
}
else
{
// map to GPR
if(inst.destRel == 0) // is PV/PS set for indexed writes?
m_pvps[inst.aluUnit].SetLocationGPR(inst.destGpr, inst.destElem);
}
}
}
bool HasPVPS(uint8 aluUnitIndex) const
{
cemu_assert_debug(aluUnitIndex < 5);
return m_pvps[aluUnitIndex].location != PVPSAlias::LOCATION_TYPE::LOCATION_NONE;
}
void EmitPVPSAccess(LatteDecompilerShaderContext* shaderContext, uint8 aluUnitIndex, uint32 currentGroupIndex) const
{
switch (m_pvps[aluUnitIndex].location)
{
case PVPSAlias::LOCATION_TYPE::LOCATION_GPR:
{
sint32 temporaryIndex = GetTemporaryForGPR(m_pvps[aluUnitIndex].index, m_pvps[aluUnitIndex].aluUnit);
if (temporaryIndex < 0)
{
shaderContext->shaderSource->add(_getRegisterVarName(shaderContext, m_pvps[aluUnitIndex].index, -1));
_appendChannelAccess(shaderContext->shaderSource, m_pvps[aluUnitIndex].aluUnit);
}
else
{
// use temporary instead of GPR
shaderContext->shaderSource->addFmt("backupReg{}", temporaryIndex);
_appendRegisterTypeSuffix(shaderContext->shaderSource, shaderContext->typeTracker.defaultDataType);
}
break;
}
case PVPSAlias::LOCATION_TYPE::LOCATION_PVPS:
_appendPVPS(shaderContext, shaderContext->shaderSource, currentGroupIndex-1, m_pvps[aluUnitIndex].aluUnit);
break;
default:
cemuLog_log(LogType::Force, "Shader {:016x} accesses PV/PS without writing to it", shaderContext->shaderBaseHash);
cemu_assert_suspicious();
break;
}
}
/*
* Check for GPR channels which are modified before they are read within the same group
* These registers need to be copied to a temporary
*/
void CreateGPRTemporaries(LatteDecompilerShaderContext* shaderContext, std::span<LatteDecompilerALUInstruction> aluInstructions)
{
uint8 registerChannelWriteMask[(LATTE_NUM_GPR * 4 + 7) / 8] = { 0 };
m_gprTemporaries.clear();
for (auto& aluInstruction : aluInstructions)
{
// ignore NOP instructions
if (aluInstruction.isOP3 == false && aluInstruction.opcode == ALU_OP2_INST_NOP)
continue;
cemu_assert_debug(aluInstruction.destElem <= 3);
// check if any previously written register is read
for (sint32 f = 0; f < 3; f++)
{
uint32 readGPRIndex;
uint32 readGPRChannel;
if (GPU7_ALU_SRC_IS_GPR(aluInstruction.sourceOperand[f].sel))
{
readGPRIndex = GPU7_ALU_SRC_GET_GPR_INDEX(aluInstruction.sourceOperand[f].sel);
cemu_assert_debug(aluInstruction.sourceOperand[f].chan <= 3);
readGPRChannel = aluInstruction.sourceOperand[f].chan;
}
else if (GPU7_ALU_SRC_IS_PV(aluInstruction.sourceOperand[f].sel) || GPU7_ALU_SRC_IS_PS(aluInstruction.sourceOperand[f].sel))
{
uint8 aluUnitIndex = 0;
if (GPU7_ALU_SRC_IS_PV(aluInstruction.sourceOperand[f].sel))
aluUnitIndex = aluInstruction.sourceOperand[f].chan;
else
aluUnitIndex = 4;
// if aliased to a GPR, then consider it a GPR read
if(m_pvps[aluUnitIndex].location != PVPSAlias::LOCATION_TYPE::LOCATION_GPR)
continue;
readGPRIndex = m_pvps[aluUnitIndex].index;
readGPRChannel = m_pvps[aluUnitIndex].aluUnit;
}
else
continue;
// track GPR read
if ((registerChannelWriteMask[(readGPRIndex * 4 + aluInstruction.sourceOperand[f].chan) / 8] & (1 << ((readGPRIndex * 4 + aluInstruction.sourceOperand[f].chan) % 8))) != 0)
{
// register is overwritten by previous instruction, a temporary variable is required
if (GetTemporaryForGPR(readGPRIndex, readGPRChannel) < 0)
m_gprTemporaries.emplace_back(readGPRIndex, readGPRChannel, m_gprTemporaries.size());
}
}
// track write
if (aluInstruction.writeMask != 0)
registerChannelWriteMask[(aluInstruction.destGpr * 4 + aluInstruction.destElem) / 8] |= (1 << ((aluInstruction.destGpr * 4 + aluInstruction.destElem) % 8));
}
// output code to move GPRs into temporaries
StringBuf* src = shaderContext->shaderSource;
for (auto& it : m_gprTemporaries)
{
src->addFmt("backupReg{}", it.backupVarIndex);
_appendRegisterTypeSuffix(src, shaderContext->typeTracker.defaultDataType);
src->add(" = ");
src->add(_getRegisterVarName(shaderContext, it.gprIndex));
_appendChannelAccess(src, it.channel);
src->add(";" _CRLF);
}
}
// returns -1 if none present
sint32 GetTemporaryForGPR(uint8 gprIndex, uint8 channel) const
{
for (auto& it : m_gprTemporaries)
{
if (it.gprIndex == gprIndex && it.channel == channel)
return (sint32)it.backupVarIndex;
}
return -1;
}
private:
PVPSAlias m_pvps[5]{};
boost::container::small_vector<GPRTemporary, 4> m_gprTemporaries;
};
sint32 _getVertexShaderOutParamSemanticId(uint32* contextRegisters, sint32 index) // deprecated - move to LatteShaderPSInputTable
{
uint32 vsSemanticId = (contextRegisters[mmSPI_VS_OUT_ID_0 + (index / 4)] >> (8 * (index % 4))) & 0xFF;
// check if export exists since exports are generated based on PS inputs
LatteShaderPSInputTable* psInputTable = LatteSHRC_GetPSInputTable();
for (sint32 i = 0; i < psInputTable->count; i++)
{
if(psInputTable->import[i].semanticId == vsSemanticId)
return vsSemanticId;
}
return 0xFF;
}
sint32 _getInputRegisterDataType(LatteDecompilerShaderContext* shaderContext, LatteDecompilerALUInstruction* aluInstruction, sint32 operandIndex)
{
return shaderContext->typeTracker.defaultDataType;
}
sint32 _getALUInstructionOutputDataType(LatteDecompilerShaderContext* shaderContext, LatteDecompilerALUInstruction* aluInstruction)
{
return shaderContext->typeTracker.defaultDataType;
}
// returns true if the ALU instruction is a OP2 reduction instruction
bool _isReductionInstruction(LatteDecompilerALUInstruction* aluInstruction)
{
return aluInstruction->isOP3 == false && (aluInstruction->opcode == ALU_OP2_INST_DOT4 || aluInstruction->opcode == ALU_OP2_INST_DOT4_IEEE || aluInstruction->opcode == ALU_OP2_INST_CUBE);
}
/*
* Writes the name of the output variable and channel
* E.g. R5f.x or tempf.x if writeMask is 0
*/
void _emitInstructionOutputVariableName(LatteDecompilerShaderContext* shaderContext, LatteDecompilerALUInstruction* aluInstruction)
{
auto src = shaderContext->shaderSource;
sint32 outputDataType = _getALUInstructionOutputDataType(shaderContext, aluInstruction);
if( aluInstruction->writeMask == 0 )
{
// does not output to GPR
if( !_isReductionInstruction(aluInstruction) )
{
// output to PV/PS
_appendPVPS(shaderContext, src, aluInstruction->instructionGroupIndex, aluInstruction->aluUnit);
return;
}
else
{
// output to temp
src->add("temp");
_appendRegisterTypeSuffix(src, outputDataType);
}
_appendChannelAccess(src, aluInstruction->aluUnit);
}
else
{
// output to GPR. Aliasing to PV/PS happens at the end of the group
src->add(_getRegisterVarName(shaderContext, aluInstruction->destGpr, aluInstruction->destRel==0?-1:aluInstruction->indexMode));
_appendChannelAccess(src, aluInstruction->destElem);
}
}
void _emitInstructionPVPSOutputVariableName(LatteDecompilerShaderContext* shaderContext, LatteDecompilerALUInstruction* aluInstruction)
{
_appendPVPS(shaderContext, shaderContext->shaderSource, aluInstruction->instructionGroupIndex, aluInstruction->aluUnit);
}
void _emitRegisterAccessCode(LatteDecompilerShaderContext* shaderContext, sint32 gprIndex, sint32 channel0, sint32 channel1, sint32 channel2, sint32 channel3, sint32 dataType = -1)
{
StringBuf* src = shaderContext->shaderSource;
sint32 registerElementDataType = shaderContext->typeTracker.defaultDataType;
cemu_assert_debug(gprIndex >= 0 && gprIndex <= 127);
if (dataType >= 0)
{
_emitTypeConversionPrefix(shaderContext, registerElementDataType, dataType);
}
if (shaderContext->typeTracker.useArrayGPRs)
src->add("R");
else
src->addFmt("R{}", gprIndex);
_appendRegisterTypeSuffix(src, registerElementDataType);
if (shaderContext->typeTracker.useArrayGPRs)
src->addFmt("[{}]", gprIndex);
src->add(".");
sint32 channelArray[4];
channelArray[0] = channel0;
channelArray[1] = channel1;
channelArray[2] = channel2;
channelArray[3] = channel3;
for (sint32 i = 0; i < 4; i++)
{
if (channelArray[i] >= 0 && channelArray[i] <= 3)
src->add(_getElementStrByIndex(channelArray[i]));
else if (channelArray[i] == -1)
{
// channel not used
}
else
{
cemu_assert_unimplemented();
}
}
if (dataType >= 0)
_emitTypeConversionSuffix(shaderContext, registerElementDataType, dataType);
}
// optimized variant of _emitRegisterAccessCode for raw one channel reads
void _emitRegisterChannelAccessCode(LatteDecompilerShaderContext* shaderContext, sint32 gprIndex, sint32 channel, sint32 dataType)
{
cemu_assert_debug(gprIndex >= 0 && gprIndex <= 127);
cemu_assert_debug(channel >= 0 && channel < 4);
StringBuf* src = shaderContext->shaderSource;
sint32 registerElementDataType = shaderContext->typeTracker.defaultDataType;
_emitTypeConversionPrefix(shaderContext, registerElementDataType, dataType);
if (shaderContext->typeTracker.useArrayGPRs)
src->add("R");
else
src->addFmt("R{}", gprIndex);
_appendRegisterTypeSuffix(src, registerElementDataType);
if (shaderContext->typeTracker.useArrayGPRs)
src->addFmt("[{}]", gprIndex);
src->add(".");
src->add(_getElementStrByIndex(channel));
_emitTypeConversionSuffix(shaderContext, registerElementDataType, dataType);
}
void _emitALURegisterInputAccessCode(LatteDecompilerShaderContext* shaderContext, LatteDecompilerALUInstruction* aluInstruction, sint32 operandIndex)
{
StringBuf* src = shaderContext->shaderSource;
sint32 currentRegisterElementType = _getInputRegisterDataType(shaderContext, aluInstruction, operandIndex);
cemu_assert_debug(GPU7_ALU_SRC_IS_GPR(aluInstruction->sourceOperand[operandIndex].sel));
sint32 gprIndex = GPU7_ALU_SRC_GET_GPR_INDEX(aluInstruction->sourceOperand[operandIndex].sel);
sint32 temporaryIndex = shaderContext->aluPVPSState->GetTemporaryForGPR(gprIndex, aluInstruction->sourceOperand[operandIndex].chan);
if(temporaryIndex >= 0)
{
// access via backup variable
src->addFmt("backupReg{}", temporaryIndex);
_appendRegisterTypeSuffix(src, currentRegisterElementType);
}
else
{
// access via register variable
_emitRegisterAccessCode(shaderContext, gprIndex, aluInstruction->sourceOperand[operandIndex].chan, -1, -1, -1);
}
}
void _emitPVPSAccessCode(LatteDecompilerShaderContext* shaderContext, LatteDecompilerALUInstruction* aluInstruction, sint32 operandIndex, uint8 aluUnitIndex)
{
cemu_assert_debug(aluInstruction->instructionGroupIndex > 0); // PV/PS is uninitialized for group 0
// PV/PS vars are currently always using the default type (shaderContext->typeTracker.defaultDataType)
shaderContext->aluPVPSState->EmitPVPSAccess(shaderContext, aluUnitIndex, aluInstruction->instructionGroupIndex);
}
/*
* Emits the expression used for calculating the index for uniform access
* For static access, this is a number
* For dynamic access, this is AR.* + base
*/
void _emitUniformAccessIndexCode(LatteDecompilerShaderContext* shaderContext, LatteDecompilerALUInstruction* aluInstruction, sint32 operandIndex)
{
StringBuf* src = shaderContext->shaderSource;
bool isUniformRegister = GPU7_ALU_SRC_IS_CFILE(aluInstruction->sourceOperand[operandIndex].sel);
sint32 uniformOffset = 0; // index into array, for relative accesses this is the base offset
if( isUniformRegister )
{
uniformOffset = GPU7_ALU_SRC_GET_CFILE_INDEX(aluInstruction->sourceOperand[operandIndex].sel);
}
else
{
if( GPU7_ALU_SRC_IS_CBANK0(aluInstruction->sourceOperand[operandIndex].sel) )
{
uniformOffset = GPU7_ALU_SRC_GET_CBANK0_INDEX(aluInstruction->sourceOperand[operandIndex].sel) + aluInstruction->cfInstruction->cBank0AddrBase;
}
else
{
uniformOffset = GPU7_ALU_SRC_GET_CBANK1_INDEX(aluInstruction->sourceOperand[operandIndex].sel) + aluInstruction->cfInstruction->cBank1AddrBase;
}
}
if( aluInstruction->sourceOperand[operandIndex].rel != 0 )
{
if (aluInstruction->indexMode == GPU7_INDEX_AR_X)
src->addFmt("ARi.x+{}", uniformOffset);
else if (aluInstruction->indexMode == GPU7_INDEX_AR_Y)
src->addFmt("ARi.y+{}", uniformOffset);
else if (aluInstruction->indexMode == GPU7_INDEX_AR_Z)
src->addFmt("ARi.z+{}", uniformOffset);
else if (aluInstruction->indexMode == GPU7_INDEX_AR_W)
src->addFmt("ARi.w+{}", uniformOffset);
else
cemu_assert_unimplemented();
}
else
{
src->addFmt("{}", uniformOffset);
}
}
void _emitUniformAccessCode(LatteDecompilerShaderContext* shaderContext, LatteDecompilerALUInstruction* aluInstruction, sint32 operandIndex, sint32 requiredType)
{
StringBuf* src = shaderContext->shaderSource;
if(shaderContext->shader->uniformMode == LATTE_DECOMPILER_UNIFORM_MODE_REMAPPED )
{
// uniform registers or buffers are accessed statically with predictable offsets
// find entry in remapped uniform
if( aluInstruction->sourceOperand[operandIndex].rel != 0 )
debugBreakpoint();
bool isUniformRegister = GPU7_ALU_SRC_IS_CFILE(aluInstruction->sourceOperand[operandIndex].sel);
sint32 uniformOffset = 0; // index into array
sint32 uniformBufferIndex = 0;
if( isUniformRegister )
{
uniformOffset = GPU7_ALU_SRC_GET_CFILE_INDEX(aluInstruction->sourceOperand[operandIndex].sel);
uniformBufferIndex = 0;
}
else
{
if( GPU7_ALU_SRC_IS_CBANK0(aluInstruction->sourceOperand[operandIndex].sel) )
{
uniformOffset = GPU7_ALU_SRC_GET_CBANK0_INDEX(aluInstruction->sourceOperand[operandIndex].sel) + aluInstruction->cfInstruction->cBank0AddrBase;
uniformBufferIndex = aluInstruction->cfInstruction->cBank0Index;
}
else
{
uniformOffset = GPU7_ALU_SRC_GET_CBANK1_INDEX(aluInstruction->sourceOperand[operandIndex].sel) + aluInstruction->cfInstruction->cBank1AddrBase;
uniformBufferIndex = aluInstruction->cfInstruction->cBank1Index;
}
}
LatteDecompilerRemappedUniformEntry_t* remappedUniformEntry = NULL;
for(size_t i=0; i< shaderContext->shader->list_remappedUniformEntries.size(); i++)
{
LatteDecompilerRemappedUniformEntry_t* remappedUniformEntryItr = shaderContext->shader->list_remappedUniformEntries.data() + i;
if( remappedUniformEntryItr->isRegister && isUniformRegister )
{
if( remappedUniformEntryItr->index == uniformOffset )
{
remappedUniformEntry = remappedUniformEntryItr;
break;
}
}
else
{
if( remappedUniformEntryItr->kcacheBankId == uniformBufferIndex && remappedUniformEntryItr->index == uniformOffset )
{
remappedUniformEntry = remappedUniformEntryItr;
break;
}
}
}
cemu_assert_debug(remappedUniformEntry);
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, requiredType);
if(shaderContext->shader->shaderType == LatteConst::ShaderType::Vertex )
src->addFmt("uf_remappedVS[{}]", remappedUniformEntry->mappedIndex);
else if(shaderContext->shader->shaderType == LatteConst::ShaderType::Pixel )
src->addFmt("uf_remappedPS[{}]", remappedUniformEntry->mappedIndex);
else if(shaderContext->shader->shaderType == LatteConst::ShaderType::Geometry )
src->addFmt("uf_remappedGS[{}]", remappedUniformEntry->mappedIndex);
else
debugBreakpoint();
_appendChannelAccess(src, aluInstruction->sourceOperand[operandIndex].chan);
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, requiredType);
}
else if( shaderContext->shader->uniformMode == LATTE_DECOMPILER_UNIFORM_MODE_FULL_CFILE )
{
// uniform registers are accessed with unpredictable (dynamic) offset
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, requiredType);
if(shaderContext->shader->shaderType == LatteConst::ShaderType::Vertex )
src->add("uf_uniformRegisterVS[");
else if (shaderContext->shader->shaderType == LatteConst::ShaderType::Pixel)
src->add("uf_uniformRegisterPS[");
else if(shaderContext->shader->shaderType == LatteConst::ShaderType::Geometry )
src->add("uf_uniformRegisterGS[");
else
debugBreakpoint();
_emitUniformAccessIndexCode(shaderContext, aluInstruction, operandIndex);
src->add("]");
_appendChannelAccess(src, aluInstruction->sourceOperand[operandIndex].chan);
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, requiredType);
}
else if( shaderContext->shader->uniformMode == LATTE_DECOMPILER_UNIFORM_MODE_FULL_CBANK )
{
// uniform buffers are available as a whole
bool isUniformRegister = GPU7_ALU_SRC_IS_CFILE(aluInstruction->sourceOperand[operandIndex].sel);
if( isUniformRegister )
debugBreakpoint();
sint32 uniformBufferIndex = 0;
if( GPU7_ALU_SRC_IS_CBANK0(aluInstruction->sourceOperand[operandIndex].sel) )
{
uniformBufferIndex = aluInstruction->cfInstruction->cBank0Index;
}
else
{
uniformBufferIndex = aluInstruction->cfInstruction->cBank1Index;
}
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, requiredType);
src->addFmt("{}{}[", _getShaderUniformBlockVariableName(shaderContext->shader->shaderType), uniformBufferIndex);
_emitUniformAccessIndexCode(shaderContext, aluInstruction, operandIndex);
src->addFmt("]");
_appendChannelAccess(src, aluInstruction->sourceOperand[operandIndex].chan);
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, requiredType);
}
else
debugBreakpoint();
}
// Generates (slow) code to read an indexed GPR
void _emitCodeToReadRelativeGPR(LatteDecompilerShaderContext* shaderContext, LatteDecompilerALUInstruction* aluInstruction, sint32 operandIndex, sint32 requiredType)
{
StringBuf* src = shaderContext->shaderSource;
uint32 gprBaseIndex = GPU7_ALU_SRC_GET_GPR_INDEX(aluInstruction->sourceOperand[operandIndex].sel);
cemu_assert_debug(aluInstruction->sourceOperand[operandIndex].rel != 0);
if( shaderContext->typeTracker.useArrayGPRs )
{
_emitTypeConversionPrefix(shaderContext, shaderContext->typeTracker.defaultDataType, requiredType);
src->add(_getRegisterVarName(shaderContext, gprBaseIndex, aluInstruction->indexMode));
_appendChannelAccess(src, aluInstruction->sourceOperand[operandIndex].chan);
_emitTypeConversionSuffix(shaderContext, shaderContext->typeTracker.defaultDataType, requiredType);
return;
}
char indexAccessCode[64];
if (aluInstruction->indexMode == GPU7_INDEX_AR_X)
sprintf(indexAccessCode, "ARi.x");
else if (aluInstruction->indexMode == GPU7_INDEX_AR_Y)
sprintf(indexAccessCode, "ARi.y");
else if (aluInstruction->indexMode == GPU7_INDEX_AR_Z)
sprintf(indexAccessCode, "ARi.z");
else if (aluInstruction->indexMode == GPU7_INDEX_AR_W)
sprintf(indexAccessCode, "ARi.w");
else
cemu_assert_unimplemented();
if( LATTE_DECOMPILER_DTYPE_SIGNED_INT != requiredType )
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, requiredType);
// generated code looks like this:
// result = ((lookupIndex==0)?GPR5:(lookupIndex==1)?GPR6:(lookupIndex==2)?GPR7:...:(lookupIndex==122)?GPR127:0)
src->add("(");
for(sint32 i=gprBaseIndex; i<LATTE_NUM_GPR; i++)
{
// only emit access code for registers which are potentially written
if((shaderContext->analyzer.gprUseMask[i / 8] & (1 << (i % 8))) == 0 )
continue;
src->addFmt("({}=={})?", indexAccessCode, i-gprBaseIndex);
// code to access gpr
uint32 gprIndex = i;
src->add(_getRegisterVarName(shaderContext, i));
_appendChannelAccess(src, aluInstruction->sourceOperand[operandIndex].chan);
src->add(":");
}
src->add("0)");
if( LATTE_DECOMPILER_DTYPE_SIGNED_INT != requiredType )
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, requiredType);
}
void _emitOperandInputCode(LatteDecompilerShaderContext* shaderContext, LatteDecompilerALUInstruction* aluInstruction, sint32 operandIndex, sint32 requiredType)
{
StringBuf* src = shaderContext->shaderSource;
if( operandIndex < 0 || operandIndex >= 3 )
debugBreakpoint();
sint32 requiredTypeOut = requiredType;
if( requiredType != LATTE_DECOMPILER_DTYPE_FLOAT && (aluInstruction->sourceOperand[operandIndex].abs != 0 || aluInstruction->sourceOperand[operandIndex].neg != 0) )
{
// we need to apply float operations on the input but it's not read as a float
// force internal required type to float and then cast it back to whatever type is actually required
requiredType = LATTE_DECOMPILER_DTYPE_FLOAT;
}
if( requiredTypeOut != requiredType )
_emitTypeConversionPrefix(shaderContext, requiredType, requiredTypeOut);
if( aluInstruction->sourceOperand[operandIndex].neg != 0 )
src->add("-(");
if( aluInstruction->sourceOperand[operandIndex].abs != 0 )
src->add("abs(");
if( GPU7_ALU_SRC_IS_GPR(aluInstruction->sourceOperand[operandIndex].sel) )
{
if( aluInstruction->sourceOperand[operandIndex].rel != 0 )
{
_emitCodeToReadRelativeGPR(shaderContext, aluInstruction, operandIndex, requiredType);
}
else
{
uint32 gprIndex = GPU7_ALU_SRC_GET_GPR_INDEX(aluInstruction->sourceOperand[operandIndex].sel);
if( requiredType == LATTE_DECOMPILER_DTYPE_SIGNED_INT )
{
// signed int 32bit
sint32 currentRegisterElementType = _getInputRegisterDataType(shaderContext, aluInstruction, operandIndex);
// write code for register input
_emitTypeConversionPrefix(shaderContext, currentRegisterElementType, requiredType);
_emitALURegisterInputAccessCode(shaderContext, aluInstruction, operandIndex);
_emitTypeConversionSuffix(shaderContext, currentRegisterElementType, requiredType);
}
else if( requiredType == LATTE_DECOMPILER_DTYPE_UNSIGNED_INT )
{
// unsigned int 32bit
sint32 currentRegisterElementType = _getInputRegisterDataType(shaderContext, aluInstruction, operandIndex);
if( currentRegisterElementType == LATTE_DECOMPILER_DTYPE_SIGNED_INT )
{
// need to convert from int to uint
src->add("uint(");
}
else if( currentRegisterElementType == LATTE_DECOMPILER_DTYPE_UNSIGNED_INT )
{
// no extra work necessary
}
else
debugBreakpoint();
// write code for register input
_emitALURegisterInputAccessCode(shaderContext, aluInstruction, operandIndex);
if( currentRegisterElementType == LATTE_DECOMPILER_DTYPE_SIGNED_INT )
{
src->add(")");
}
}
else if( requiredType == LATTE_DECOMPILER_DTYPE_FLOAT )
{
// float 32bit
sint32 currentRegisterElementType = _getInputRegisterDataType(shaderContext, aluInstruction, operandIndex);
if( currentRegisterElementType == LATTE_DECOMPILER_DTYPE_SIGNED_INT )
{
// need to convert (not cast) from int bits to float
src->add("intBitsToFloat(");
}
else if( currentRegisterElementType == LATTE_DECOMPILER_DTYPE_FLOAT )
{
// no extra work necessary
}
else
debugBreakpoint();
// write code for register input
_emitALURegisterInputAccessCode(shaderContext, aluInstruction, operandIndex);
if( currentRegisterElementType == LATTE_DECOMPILER_DTYPE_SIGNED_INT )
{
src->add(")");
}
}
else
debugBreakpoint();
}
}
else if( GPU7_ALU_SRC_IS_CONST_0F(aluInstruction->sourceOperand[operandIndex].sel) )
{
if(requiredType == LATTE_DECOMPILER_DTYPE_SIGNED_INT || requiredType == LATTE_DECOMPILER_DTYPE_UNSIGNED_INT)
src->add("0");
else if( requiredType == LATTE_DECOMPILER_DTYPE_FLOAT )
src->add("0.0");
}
else if( GPU7_ALU_SRC_IS_CONST_1F(aluInstruction->sourceOperand[operandIndex].sel) )
{
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, requiredType);
src->add("1.0");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, requiredType);
}
else if( GPU7_ALU_SRC_IS_CONST_0_5F(aluInstruction->sourceOperand[operandIndex].sel) )
{
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, requiredType);
src->add("0.5");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, requiredType);
}
else if( GPU7_ALU_SRC_IS_CONST_1I(aluInstruction->sourceOperand[operandIndex].sel) )
{
if (requiredType == LATTE_DECOMPILER_DTYPE_SIGNED_INT)
src->add("int(1)");
else if (requiredType == LATTE_DECOMPILER_DTYPE_UNSIGNED_INT)
src->add("uint(1)");
else
cemu_assert_suspicious();
}
else if( GPU7_ALU_SRC_IS_CONST_M1I(aluInstruction->sourceOperand[operandIndex].sel) )
{
if( requiredType == LATTE_DECOMPILER_DTYPE_SIGNED_INT )
src->add("int(-1)");
else
cemu_assert_suspicious();
}
else if( GPU7_ALU_SRC_IS_LITERAL(aluInstruction->sourceOperand[operandIndex].sel) )
{
if( requiredType == LATTE_DECOMPILER_DTYPE_SIGNED_INT )
src->addFmt("0x{:x}", aluInstruction->literalData.w[aluInstruction->sourceOperand[operandIndex].chan]);
else if( requiredType == LATTE_DECOMPILER_DTYPE_UNSIGNED_INT )
src->addFmt("uint(0x{:x})", aluInstruction->literalData.w[aluInstruction->sourceOperand[operandIndex].chan]);
else if (requiredType == LATTE_DECOMPILER_DTYPE_FLOAT)
{
uint32 constVal = aluInstruction->literalData.w[aluInstruction->sourceOperand[operandIndex].chan];
sint32 exponent = (constVal >> 23) & 0xFF;
exponent -= 127;
if ((constVal & 0xFF) == 0 && exponent >= -10 && exponent <= 10)
{
src->add(_FormatFloatAsGLSLConstant(*(float*)&constVal));
}
else
src->addFmt("intBitsToFloat(0x{:08x})", constVal);
}
}
else if( GPU7_ALU_SRC_IS_CFILE(aluInstruction->sourceOperand[operandIndex].sel) )
{
_emitUniformAccessCode(shaderContext, aluInstruction, operandIndex, requiredType);
}
else if( GPU7_ALU_SRC_IS_CBANK0(aluInstruction->sourceOperand[operandIndex].sel) ||
GPU7_ALU_SRC_IS_CBANK1(aluInstruction->sourceOperand[operandIndex].sel) )
{
_emitUniformAccessCode(shaderContext, aluInstruction, operandIndex, requiredType);
}
else if( GPU7_ALU_SRC_IS_PV(aluInstruction->sourceOperand[operandIndex].sel) )
{
sint32 currentPVDataType = _getInputRegisterDataType(shaderContext, aluInstruction, operandIndex);
_emitTypeConversionPrefix(shaderContext, currentPVDataType, requiredType);
_emitPVPSAccessCode(shaderContext, aluInstruction, operandIndex, aluInstruction->sourceOperand[operandIndex].chan);
_emitTypeConversionSuffix(shaderContext, currentPVDataType, requiredType);
}
else if( GPU7_ALU_SRC_IS_PS(aluInstruction->sourceOperand[operandIndex].sel) )
{
sint32 currentPSDataType = _getInputRegisterDataType(shaderContext, aluInstruction, operandIndex);
_emitTypeConversionPrefix(shaderContext, currentPSDataType, requiredType);
_emitPVPSAccessCode(shaderContext, aluInstruction, operandIndex, 4);
_emitTypeConversionSuffix(shaderContext, currentPSDataType, requiredType);
}
else
{
cemuLog_log(LogType::Force, "Unsupported shader ALU operand sel {:#x}\n", aluInstruction->sourceOperand[operandIndex].sel);
debugBreakpoint();
}
if( aluInstruction->sourceOperand[operandIndex].abs != 0 )
src->add(")");
if( aluInstruction->sourceOperand[operandIndex].neg != 0 )
src->add(")");
if( requiredTypeOut != requiredType )
_emitTypeConversionSuffix(shaderContext, requiredType, requiredTypeOut);
}
void _emitTypeConversionPrefix(LatteDecompilerShaderContext* shaderContext, sint32 sourceType, sint32 destinationType)
{
if( sourceType == destinationType )
return;
StringBuf* src = shaderContext->shaderSource;
if (sourceType == LATTE_DECOMPILER_DTYPE_FLOAT && destinationType == LATTE_DECOMPILER_DTYPE_SIGNED_INT)
src->add("floatBitsToInt(");
else if (sourceType == LATTE_DECOMPILER_DTYPE_FLOAT && destinationType == LATTE_DECOMPILER_DTYPE_UNSIGNED_INT)
src->add("floatBitsToUint(");
else if( sourceType == LATTE_DECOMPILER_DTYPE_SIGNED_INT && destinationType == LATTE_DECOMPILER_DTYPE_FLOAT )
src->add("intBitsToFloat(");
else if( sourceType == LATTE_DECOMPILER_DTYPE_UNSIGNED_INT && destinationType == LATTE_DECOMPILER_DTYPE_SIGNED_INT )
src->add("int(");
else if( sourceType == LATTE_DECOMPILER_DTYPE_SIGNED_INT && destinationType == LATTE_DECOMPILER_DTYPE_UNSIGNED_INT )
src->add("uint(");
else
cemu_assert_debug(false);
}
void _emitTypeConversionSuffix(LatteDecompilerShaderContext* shaderContext, sint32 sourceType, sint32 destinationType)
{
if( sourceType == destinationType )
return;
StringBuf* src = shaderContext->shaderSource;
src->add(")");
}
template<int TDataType>
void _emitALUOperationBinary(LatteDecompilerShaderContext* shaderContext, LatteDecompilerALUInstruction* aluInstruction, const char* operandStr)
{
StringBuf* src = shaderContext->shaderSource;
sint32 outputType = _getALUInstructionOutputDataType(shaderContext, aluInstruction);
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, TDataType, outputType);
_emitOperandInputCode(shaderContext, aluInstruction, 0, TDataType);
src->add((char*)operandStr);
_emitOperandInputCode(shaderContext, aluInstruction, 1, TDataType);
_emitTypeConversionSuffix(shaderContext, TDataType, outputType);
src->add(";" _CRLF);
}
static bool _isSameGPROperand(LatteDecompilerALUInstruction* aluInstruction, sint32 opIndexA, sint32 opIndexB)
{
if (aluInstruction->sourceOperand[opIndexA].sel != aluInstruction->sourceOperand[opIndexB].sel)
return false;
if (!GPU7_ALU_SRC_IS_GPR(aluInstruction->sourceOperand[opIndexA].sel))
return false;
if (aluInstruction->sourceOperand[opIndexA].chan != aluInstruction->sourceOperand[opIndexB].chan)
return false;
if (aluInstruction->sourceOperand[opIndexA].abs != aluInstruction->sourceOperand[opIndexB].abs)
return false;
if (aluInstruction->sourceOperand[opIndexA].neg != aluInstruction->sourceOperand[opIndexB].neg)
return false;
if (aluInstruction->sourceOperand[opIndexA].rel != aluInstruction->sourceOperand[opIndexB].rel)
return false;
return true;
}
static bool _operandHasModifiers(LatteDecompilerALUInstruction* aluInstruction, sint32 opIndex)
{
return aluInstruction->sourceOperand[opIndex].abs != 0 || aluInstruction->sourceOperand[opIndex].neg != 0;
}
void _emitALUOP2InstructionCode(LatteDecompilerShaderContext* shaderContext, LatteDecompilerCFInstruction* cfInstruction, LatteDecompilerALUInstruction* aluInstruction)
{
StringBuf* src = shaderContext->shaderSource;
sint32 outputType = _getALUInstructionOutputDataType(shaderContext, aluInstruction); // data type of output
if( aluInstruction->opcode == ALU_OP2_INST_MOV )
{
bool requiresFloatMove = false;
requiresFloatMove = aluInstruction->sourceOperand[0].abs != 0 || aluInstruction->sourceOperand[0].neg != 0;
if( requiresFloatMove )
{
// abs/neg operations are applied to source operand, do float based move
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add(";" _CRLF);
}
else
{
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitOperandInputCode(shaderContext, aluInstruction, 0, outputType);
src->add(";" _CRLF);
}
}
else if( aluInstruction->opcode == ALU_OP2_INST_MOVA_FLOOR )
{
cemu_assert_debug(aluInstruction->writeMask == 0);
cemu_assert_debug(aluInstruction->omod == 0);
src->add("tempResultf = ");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(";" _CRLF);
src->add("tempResultf = floor(tempResultf);" _CRLF);
src->add("tempResultf = clamp(tempResultf, -256.0, 255.0);" _CRLF);
// set AR
if( aluInstruction->destElem == 0 )
src->add("ARi.x = int(tempResultf);" _CRLF);
else if( aluInstruction->destElem == 1 )
src->add("ARi.y = int(tempResultf);" _CRLF);
else if( aluInstruction->destElem == 2 )
src->add("ARi.z = int(tempResultf);" _CRLF);
else
src->add("ARi.w = int(tempResultf);" _CRLF);
// set output
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
if( outputType != LATTE_DECOMPILER_DTYPE_SIGNED_INT )
debugBreakpoint(); // todo
src->add("floatBitsToInt(tempResultf)");
src->add(";" _CRLF);
}
else if( aluInstruction->opcode == ALU_OP2_INST_MOVA_INT )
{
cemu_assert_debug(aluInstruction->writeMask == 0);
cemu_assert_debug(aluInstruction->omod == 0);
src->add("tempResulti = ");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_SIGNED_INT);
src->add(";" _CRLF);
src->add("tempResulti = clamp(tempResulti, -256, 255);" _CRLF);
// set AR
if( aluInstruction->destElem == 0 )
src->add("ARi.x = tempResulti;" _CRLF);
else if( aluInstruction->destElem == 1 )
src->add("ARi.y = tempResulti;" _CRLF);
else if( aluInstruction->destElem == 2 )
src->add("ARi.z = tempResulti;" _CRLF);
else
src->add("ARi.w = tempResulti;" _CRLF);
// set output
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
if( outputType != LATTE_DECOMPILER_DTYPE_SIGNED_INT )
debugBreakpoint(); // todo
src->add("tempResulti");
src->add(";" _CRLF);
}
else if( aluInstruction->opcode == ALU_OP2_INST_ADD )
{
_emitALUOperationBinary<LATTE_DECOMPILER_DTYPE_FLOAT>(shaderContext, aluInstruction, " + ");
}
else if( aluInstruction->opcode == ALU_OP2_INST_MUL )
{
// 0*anything is always 0
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
// if any operand is a non-zero literal or constant we can use standard multiplication
bool useDefaultMul = false;
if (GPU7_ALU_SRC_IS_CONST_0F(aluInstruction->sourceOperand[0].sel) || GPU7_ALU_SRC_IS_CONST_0F(aluInstruction->sourceOperand[1].sel))
{
// result is always zero
src->add("0.0");
}
else
{
// multiply
if (GPU7_ALU_SRC_IS_LITERAL(aluInstruction->sourceOperand[0].sel) || GPU7_ALU_SRC_IS_LITERAL(aluInstruction->sourceOperand[1].sel) ||
GPU7_ALU_SRC_IS_ANY_CONST(aluInstruction->sourceOperand[0].sel) || GPU7_ALU_SRC_IS_ANY_CONST(aluInstruction->sourceOperand[1].sel))
{
useDefaultMul = true;
}
if (shaderContext->options->strictMul && useDefaultMul == false)
{
src->add("mul_nonIEEE(");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(", ");
_emitOperandInputCode(shaderContext, aluInstruction, 1, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(")");
}
else
{
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(" * ");
_emitOperandInputCode(shaderContext, aluInstruction, 1, LATTE_DECOMPILER_DTYPE_FLOAT);
}
}
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add(";" _CRLF);
}
else if( aluInstruction->opcode == ALU_OP2_INST_MUL_IEEE )
{
// 0*anything according to IEEE rules
_emitALUOperationBinary<LATTE_DECOMPILER_DTYPE_FLOAT>(shaderContext, aluInstruction, " * ");
}
else if (aluInstruction->opcode == ALU_OP2_INST_RECIP_IEEE)
{
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add("1.0");
src->add(" / ");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add(";" _CRLF);
}
else if (aluInstruction->opcode == ALU_OP2_INST_RECIP_FF)
{
// untested (BotW bombs)
src->add("tempResultf = 1.0 / (");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(");" _CRLF);
// INF becomes 0.0
src->add("if( isinf(tempResultf) == true && (floatBitsToInt(tempResultf)&0x80000000) == 0 ) tempResultf = 0.0;" _CRLF);
// -INF becomes -0.0
src->add("else if( isinf(tempResultf) == true && (floatBitsToInt(tempResultf)&0x80000000) != 0 ) tempResultf = -0.0;" _CRLF);
// assign result to output
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add("tempResultf");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add(";" _CRLF);
}
else if( aluInstruction->opcode == ALU_OP2_INST_RECIPSQRT_IEEE ||
aluInstruction->opcode == ALU_OP2_INST_RECIPSQRT_CLAMPED ||
aluInstruction->opcode == ALU_OP2_INST_RECIPSQRT_FF )
{
// todo: This should be correct but testing is needed
src->add("tempResultf = 1.0 / sqrt(");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(");" _CRLF);
if (aluInstruction->opcode == ALU_OP2_INST_RECIPSQRT_CLAMPED)
{
// note: if( -INF < 0.0 ) does not resolve to true
src->add("if( isinf(tempResultf) == true && (floatBitsToInt(tempResultf)&0x80000000) != 0 ) tempResultf = -3.40282347E+38F;" _CRLF);
src->add("else if( isinf(tempResultf) == true && (floatBitsToInt(tempResultf)&0x80000000) == 0 ) tempResultf = 3.40282347E+38F;" _CRLF);
}
else if (aluInstruction->opcode == ALU_OP2_INST_RECIPSQRT_FF)
{
// untested (BotW bombs)
src->add("if( isinf(tempResultf) == true && (floatBitsToInt(tempResultf)&0x80000000) != 0 ) tempResultf = -0.0;" _CRLF);
src->add("else if( isinf(tempResultf) == true && (floatBitsToInt(tempResultf)&0x80000000) == 0 ) tempResultf = 0.0;" _CRLF);
}
// assign result to output
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add("tempResultf");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add(";" _CRLF);
}
else if( aluInstruction->opcode == ALU_OP2_INST_MAX ||
aluInstruction->opcode == ALU_OP2_INST_MIN ||
aluInstruction->opcode == ALU_OP2_INST_MAX_DX10 ||
aluInstruction->opcode == ALU_OP2_INST_MIN_DX10 )
{
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
if( aluInstruction->opcode == ALU_OP2_INST_MAX )
src->add("max");
else if( aluInstruction->opcode == ALU_OP2_INST_MIN )
src->add("min");
else if (aluInstruction->opcode == ALU_OP2_INST_MAX_DX10)
src->add("max");
else if (aluInstruction->opcode == ALU_OP2_INST_MIN_DX10)
src->add("min");
src->add("(");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(", ");
_emitOperandInputCode(shaderContext, aluInstruction, 1, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(")");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add(";" _CRLF);
}
else if( aluInstruction->opcode == ALU_OP2_INST_FLOOR ||
aluInstruction->opcode == ALU_OP2_INST_FRACT ||
aluInstruction->opcode == ALU_OP2_INST_TRUNC )
{
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
if( aluInstruction->opcode == ALU_OP2_INST_FLOOR )
src->add("floor");
else if( aluInstruction->opcode == ALU_OP2_INST_FRACT )
src->add("fract");
else
src->add("trunc");
src->add("(");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(")");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add(";" _CRLF);
}
else if( aluInstruction->opcode == ALU_OP2_INST_LOG_CLAMPED ||
aluInstruction->opcode == ALU_OP2_INST_LOG_IEEE )
{
src->add("tempResultf = max(0.0, ");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(");" _CRLF);
src->add("tempResultf = log2(tempResultf);" _CRLF);
if( aluInstruction->opcode == ALU_OP2_INST_LOG_CLAMPED )
{
src->add("if( isinf(tempResultf) == true ) tempResultf = -3.40282347E+38F;" _CRLF);
}
// assign result to output
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add("tempResultf");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add(";" _CRLF);
}
else if( aluInstruction->opcode == ALU_OP2_INST_RNDNE )
{
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add("roundEven");
src->add("(");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(")");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add(";" _CRLF);
}
else if( aluInstruction->opcode == ALU_OP2_INST_EXP_IEEE )
{
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add("exp2");
src->add("(");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(")");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add(";" _CRLF);
}
else if( aluInstruction->opcode == ALU_OP2_INST_SQRT_IEEE )
{
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add("sqrt");
src->add("(");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(")");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add(";" _CRLF);
}
else if( aluInstruction->opcode == ALU_OP2_INST_SIN ||
aluInstruction->opcode == ALU_OP2_INST_COS )
{
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
if( aluInstruction->opcode == ALU_OP2_INST_SIN )
src->add("sin");
else
src->add("cos");
src->add("((");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(")/0.1591549367)");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add(";" _CRLF);
}
else if( aluInstruction->opcode == ALU_OP2_INST_FLT_TO_INT )
{
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, outputType);
src->add("int");
src->add("(");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(")");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, outputType);
src->add(";" _CRLF);
}
else if( aluInstruction->opcode == ALU_OP2_INST_FLT_TO_UINT )
{
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_UNSIGNED_INT, outputType);
src->add("uint");
src->add("(");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(")");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_UNSIGNED_INT, outputType);
src->add(";" _CRLF);
}
else if( aluInstruction->opcode == ALU_OP2_INST_INT_TO_FLOAT )
{
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add("float(");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_SIGNED_INT);
src->add(")");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add(";" _CRLF);
}
else if( aluInstruction->opcode == ALU_OP2_INST_UINT_TO_FLOAT )
{
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add("float(");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_UNSIGNED_INT);
src->add(")");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add(";" _CRLF);
}
else if (aluInstruction->opcode == ALU_OP2_INST_AND_INT)
_emitALUOperationBinary<LATTE_DECOMPILER_DTYPE_SIGNED_INT>(shaderContext, aluInstruction, " & ");
else if (aluInstruction->opcode == ALU_OP2_INST_OR_INT)
_emitALUOperationBinary<LATTE_DECOMPILER_DTYPE_SIGNED_INT>(shaderContext, aluInstruction, " | ");
else if (aluInstruction->opcode == ALU_OP2_INST_XOR_INT)
_emitALUOperationBinary<LATTE_DECOMPILER_DTYPE_SIGNED_INT>(shaderContext, aluInstruction, " ^ ");
else if( aluInstruction->opcode == ALU_OP2_INST_NOT_INT )
{
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, outputType);
src->add("~(");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_SIGNED_INT);
src->add(")");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, outputType);
src->add(";" _CRLF);
}
else if( aluInstruction->opcode == ALU_OP2_INST_ADD_INT )
_emitALUOperationBinary<LATTE_DECOMPILER_DTYPE_SIGNED_INT>(shaderContext, aluInstruction, " + ");
else if( aluInstruction->opcode == ALU_OP2_INST_MAX_INT || aluInstruction->opcode == ALU_OP2_INST_MIN_INT )
{
// not verified
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
if( aluInstruction->opcode == ALU_OP2_INST_MAX_INT )
src->add(" = max(");
else
src->add(" = min(");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, outputType);
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_SIGNED_INT);
src->add(", ");
_emitOperandInputCode(shaderContext, aluInstruction, 1, LATTE_DECOMPILER_DTYPE_SIGNED_INT);
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, outputType);
src->add(");" _CRLF);
}
else if( aluInstruction->opcode == ALU_OP2_INST_SUB_INT )
{
// note: The AMD doc says src1 is on the left side but tests indicate otherwise. It's src0 - src1.
_emitALUOperationBinary<LATTE_DECOMPILER_DTYPE_SIGNED_INT>(shaderContext, aluInstruction, " - ");
}
else if (aluInstruction->opcode == ALU_OP2_INST_MULLO_INT)
_emitALUOperationBinary<LATTE_DECOMPILER_DTYPE_SIGNED_INT>(shaderContext, aluInstruction, " * ");
else if (aluInstruction->opcode == ALU_OP2_INST_MULLO_UINT)
_emitALUOperationBinary<LATTE_DECOMPILER_DTYPE_UNSIGNED_INT>(shaderContext, aluInstruction, " * ");
else if( aluInstruction->opcode == ALU_OP2_INST_LSHL_INT )
_emitALUOperationBinary<LATTE_DECOMPILER_DTYPE_SIGNED_INT>(shaderContext, aluInstruction, " << ");
else if( aluInstruction->opcode == ALU_OP2_INST_LSHR_INT )
_emitALUOperationBinary<LATTE_DECOMPILER_DTYPE_SIGNED_INT>(shaderContext, aluInstruction, " >> ");
else if( aluInstruction->opcode == ALU_OP2_INST_ASHR_INT )
{
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, outputType);
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_SIGNED_INT);
src->add(" >> ");
_emitOperandInputCode(shaderContext, aluInstruction, 1, LATTE_DECOMPILER_DTYPE_SIGNED_INT);
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, outputType);
src->add(";" _CRLF);
}
else if( aluInstruction->opcode == ALU_OP2_INST_SETGT ||
aluInstruction->opcode == ALU_OP2_INST_SETGE ||
aluInstruction->opcode == ALU_OP2_INST_SETNE ||
aluInstruction->opcode == ALU_OP2_INST_SETE )
{
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add("(");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
if( aluInstruction->opcode == ALU_OP2_INST_SETGT )
src->add(" > ");
else if( aluInstruction->opcode == ALU_OP2_INST_SETGE )
src->add(" >= ");
else if (aluInstruction->opcode == ALU_OP2_INST_SETNE)
src->add(" != ");
else if (aluInstruction->opcode == ALU_OP2_INST_SETE)
src->add(" == ");
_emitOperandInputCode(shaderContext, aluInstruction, 1, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(")?1.0:0.0");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add(";" _CRLF);
}
else if( aluInstruction->opcode == ALU_OP2_INST_SETGT_DX10 ||
aluInstruction->opcode == ALU_OP2_INST_SETE_DX10 ||
aluInstruction->opcode == ALU_OP2_INST_SETNE_DX10 ||
aluInstruction->opcode == ALU_OP2_INST_SETGE_DX10 )
{
if( aluInstruction->omod != 0 )
debugBreakpoint();
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, outputType);
src->add("((");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
if( aluInstruction->opcode == ALU_OP2_INST_SETE_DX10 )
src->add(" == ");
else if( aluInstruction->opcode == ALU_OP2_INST_SETNE_DX10 )
src->add(" != ");
else if( aluInstruction->opcode == ALU_OP2_INST_SETGT_DX10 )
src->add(" > ");
else if( aluInstruction->opcode == ALU_OP2_INST_SETGE_DX10 )
src->add(" >= ");
_emitOperandInputCode(shaderContext, aluInstruction, 1, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(")?-1:0)");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, outputType);
src->add(";");
src->add(_CRLF);
}
else if( aluInstruction->opcode == ALU_OP2_INST_SETE_INT ||
aluInstruction->opcode == ALU_OP2_INST_SETNE_INT ||
aluInstruction->opcode == ALU_OP2_INST_SETGT_INT ||
aluInstruction->opcode == ALU_OP2_INST_SETGE_INT )
{
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, outputType);
src->add("(");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_SIGNED_INT);
if( aluInstruction->opcode == ALU_OP2_INST_SETE_INT )
src->add(" == ");
else if( aluInstruction->opcode == ALU_OP2_INST_SETNE_INT )
src->add(" != ");
else if( aluInstruction->opcode == ALU_OP2_INST_SETGT_INT )
src->add(" > ");
else if( aluInstruction->opcode == ALU_OP2_INST_SETGE_INT )
src->add(" >= ");
_emitOperandInputCode(shaderContext, aluInstruction, 1, LATTE_DECOMPILER_DTYPE_SIGNED_INT);
src->add(")?-1:0");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, outputType);
src->add(";" _CRLF);
}
else if( aluInstruction->opcode == ALU_OP2_INST_SETGE_UINT ||
aluInstruction->opcode == ALU_OP2_INST_SETGT_UINT )
{
// todo: Unsure if the result is unsigned or signed
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, outputType);
src->add("(");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_UNSIGNED_INT);
if( aluInstruction->opcode == ALU_OP2_INST_SETGE_UINT )
src->add(" >= ");
else if( aluInstruction->opcode == ALU_OP2_INST_SETGT_UINT )
src->add(" > ");
_emitOperandInputCode(shaderContext, aluInstruction, 1, LATTE_DECOMPILER_DTYPE_UNSIGNED_INT);
src->add(")?int(0xFFFFFFFF):int(0x0)");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, outputType);
src->add(";" _CRLF);
}
else if( aluInstruction->opcode == ALU_OP2_INST_PRED_SETGT ||
aluInstruction->opcode == ALU_OP2_INST_PRED_SETGE ||
aluInstruction->opcode == ALU_OP2_INST_PRED_SETE ||
aluInstruction->opcode == ALU_OP2_INST_PRED_SETNE ||
aluInstruction->opcode == ALU_OP2_INST_PRED_SETNE_INT ||
aluInstruction->opcode == ALU_OP2_INST_PRED_SETE_INT ||
aluInstruction->opcode == ALU_OP2_INST_PRED_SETGE_INT ||
aluInstruction->opcode == ALU_OP2_INST_PRED_SETGT_INT )
{
cemu_assert_debug(aluInstruction->writeMask == 0);
bool isIntPred = (aluInstruction->opcode == ALU_OP2_INST_PRED_SETNE_INT) || (aluInstruction->opcode == ALU_OP2_INST_PRED_SETE_INT) || (aluInstruction->opcode == ALU_OP2_INST_PRED_SETGE_INT) || (aluInstruction->opcode == ALU_OP2_INST_PRED_SETGT_INT);
src->add("predResult");
src->add(" = (");
_emitOperandInputCode(shaderContext, aluInstruction, 0, isIntPred?LATTE_DECOMPILER_DTYPE_SIGNED_INT:LATTE_DECOMPILER_DTYPE_FLOAT);
if (aluInstruction->opcode == ALU_OP2_INST_PRED_SETE || aluInstruction->opcode == ALU_OP2_INST_PRED_SETE_INT)
src->add(" == ");
else if (aluInstruction->opcode == ALU_OP2_INST_PRED_SETGT || aluInstruction->opcode == ALU_OP2_INST_PRED_SETGT_INT)
src->add(" > ");
else if (aluInstruction->opcode == ALU_OP2_INST_PRED_SETGE || aluInstruction->opcode == ALU_OP2_INST_PRED_SETGE_INT)
src->add(" >= ");
else if (aluInstruction->opcode == ALU_OP2_INST_PRED_SETNE || aluInstruction->opcode == ALU_OP2_INST_PRED_SETNE_INT)
src->add(" != ");
else
cemu_assert_debug(false);
_emitOperandInputCode(shaderContext, aluInstruction, 1, isIntPred?LATTE_DECOMPILER_DTYPE_SIGNED_INT:LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(");" _CRLF);
// handle result of predicate instruction based on current ALU clause type
if( cfInstruction->type == GPU7_CF_INST_ALU_PUSH_BEFORE )
{
src->addFmt("{} = predResult;" _CRLF, _getActiveMaskVarName(shaderContext, cfInstruction->activeStackDepth));
src->addFmt("{} = predResult == true && {} == true;" _CRLF, _getActiveMaskCVarName(shaderContext, cfInstruction->activeStackDepth + 1), _getActiveMaskCVarName(shaderContext, cfInstruction->activeStackDepth));
}
else if( cfInstruction->type == GPU7_CF_INST_ALU_BREAK )
{
// leave current loop
src->add("if( predResult == false ) break;" _CRLF);
}
else
cemu_assert_debug(false);
}
else if( aluInstruction->opcode == ALU_OP2_INST_KILLE_INT ||
aluInstruction->opcode == ALU_OP2_INST_KILLNE_INT ||
aluInstruction->opcode == ALU_OP2_INST_KILLGT_INT)
{
src->add("if( ");
src->add(" (");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_SIGNED_INT);
if( aluInstruction->opcode == ALU_OP2_INST_KILLE_INT )
src->add(" == ");
else if (aluInstruction->opcode == ALU_OP2_INST_KILLNE_INT)
src->add(" != ");
else if (aluInstruction->opcode == ALU_OP2_INST_KILLGT_INT)
src->add(" > ");
else
debugBreakpoint();
_emitOperandInputCode(shaderContext, aluInstruction, 1, LATTE_DECOMPILER_DTYPE_SIGNED_INT);
src->add(")");
src->add(") discard;");
src->add(_CRLF);
}
else if( aluInstruction->opcode == ALU_OP2_INST_KILLGT ||
aluInstruction->opcode == ALU_OP2_INST_KILLGE ||
aluInstruction->opcode == ALU_OP2_INST_KILLE )
{
src->add("if( ");
src->add(" (");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
if( aluInstruction->opcode == ALU_OP2_INST_KILLGT )
src->add(" > ");
else if( aluInstruction->opcode == ALU_OP2_INST_KILLGE )
src->add(" >= ");
else if( aluInstruction->opcode == ALU_OP2_INST_KILLE )
src->add(" == ");
else
debugBreakpoint();
_emitOperandInputCode(shaderContext, aluInstruction, 1, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(")");
src->add(") discard;");
src->add(_CRLF);
}
else
{
src->add("Unsupported instruction;" _CRLF);
debug_printf("Unsupported ALU op2 instruction 0x%x\n", aluInstruction->opcode);
shaderContext->shader->hasError = true;
}
}
void _emitALUOP3InstructionCode(LatteDecompilerShaderContext* shaderContext, LatteDecompilerCFInstruction* cfInstruction, LatteDecompilerALUInstruction* aluInstruction)
{
StringBuf* src = shaderContext->shaderSource;
cemu_assert_debug(aluInstruction->destRel == 0); // todo
sint32 outputType = _getALUInstructionOutputDataType(shaderContext, aluInstruction);
/* check for common no-op or mov-like instructions */
if (aluInstruction->opcode == ALU_OP3_INST_CMOVGE ||
aluInstruction->opcode == ALU_OP3_INST_CMOVE ||
aluInstruction->opcode == ALU_OP3_INST_CMOVGT ||
aluInstruction->opcode == ALU_OP3_INST_CNDE_INT ||
aluInstruction->opcode == ALU_OP3_INST_CNDGT_INT ||
aluInstruction->opcode == ALU_OP3_INST_CMOVGE_INT)
{
if (_isSameGPROperand(aluInstruction, 1, 2) && !_operandHasModifiers(aluInstruction, 1))
{
// the condition is irrelevant as both operands are the same
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitOperandInputCode(shaderContext, aluInstruction, 1, outputType);
src->add(";" _CRLF);
return;
}
}
/* generic handlers */
if( aluInstruction->opcode == ALU_OP3_INST_MULADD ||
aluInstruction->opcode == ALU_OP3_INST_MULADD_D2 ||
aluInstruction->opcode == ALU_OP3_INST_MULADD_M2 ||
aluInstruction->opcode == ALU_OP3_INST_MULADD_M4 ||
aluInstruction->opcode == ALU_OP3_INST_MULADD_IEEE )
{
// todo: The difference between MULADD and MULADD IEEE is that the former has 0*anything=0 rule similar to MUL/MUL_IEEE?
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
if (aluInstruction->opcode != ALU_OP3_INST_MULADD) // avoid unnecessary parenthesis to improve code readability slightly
src->add("(");
bool useDefaultMul = false;
if (GPU7_ALU_SRC_IS_LITERAL(aluInstruction->sourceOperand[0].sel) || GPU7_ALU_SRC_IS_LITERAL(aluInstruction->sourceOperand[1].sel) ||
GPU7_ALU_SRC_IS_ANY_CONST(aluInstruction->sourceOperand[0].sel) || GPU7_ALU_SRC_IS_ANY_CONST(aluInstruction->sourceOperand[1].sel))
{
useDefaultMul = true;
}
if (aluInstruction->opcode == ALU_OP3_INST_MULADD_IEEE)
useDefaultMul = true;
if (shaderContext->options->strictMul && useDefaultMul == false)
{
src->add("mul_nonIEEE(");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(",");
_emitOperandInputCode(shaderContext, aluInstruction, 1, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(")");
}
else
{
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(" * ");
_emitOperandInputCode(shaderContext, aluInstruction, 1, LATTE_DECOMPILER_DTYPE_FLOAT);
}
src->add(" + ");
_emitOperandInputCode(shaderContext, aluInstruction, 2, LATTE_DECOMPILER_DTYPE_FLOAT);
if(aluInstruction->opcode != ALU_OP3_INST_MULADD)
src->add(")");
if( aluInstruction->opcode == ALU_OP3_INST_MULADD_D2 )
src->add("/2.0");
else if( aluInstruction->opcode == ALU_OP3_INST_MULADD_M2 )
src->add("*2.0");
else if( aluInstruction->opcode == ALU_OP3_INST_MULADD_M4 )
src->add("*4.0");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add(";" _CRLF);
}
else if(aluInstruction->opcode == ALU_OP3_INST_CNDE_INT || aluInstruction->opcode == ALU_OP3_INST_CNDGT_INT || aluInstruction->opcode == ALU_OP3_INST_CMOVGE_INT)
{
bool requiresFloatResult = (aluInstruction->sourceOperand[1].neg != 0) || (aluInstruction->sourceOperand[2].neg != 0);
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, requiresFloatResult?LATTE_DECOMPILER_DTYPE_FLOAT:LATTE_DECOMPILER_DTYPE_SIGNED_INT, outputType);
src->add("((");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_SIGNED_INT);
if (aluInstruction->opcode == ALU_OP3_INST_CNDE_INT)
src->add(" == ");
else if (aluInstruction->opcode == ALU_OP3_INST_CNDGT_INT)
src->add(" > ");
else if (aluInstruction->opcode == ALU_OP3_INST_CMOVGE_INT)
src->add(" >= ");
src->add("0)?(");
_emitOperandInputCode(shaderContext, aluInstruction, 1, requiresFloatResult?LATTE_DECOMPILER_DTYPE_FLOAT:LATTE_DECOMPILER_DTYPE_SIGNED_INT);
src->add("):(");
_emitOperandInputCode(shaderContext, aluInstruction, 2, requiresFloatResult?LATTE_DECOMPILER_DTYPE_FLOAT:LATTE_DECOMPILER_DTYPE_SIGNED_INT);
src->add("))");
_emitTypeConversionSuffix(shaderContext, requiresFloatResult?LATTE_DECOMPILER_DTYPE_FLOAT:LATTE_DECOMPILER_DTYPE_SIGNED_INT, outputType);
src->add(";" _CRLF);
}
else if( aluInstruction->opcode == ALU_OP3_INST_CMOVGE ||
aluInstruction->opcode == ALU_OP3_INST_CMOVE ||
aluInstruction->opcode == ALU_OP3_INST_CMOVGT )
{
_emitInstructionOutputVariableName(shaderContext, aluInstruction);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, outputType);
src->add("((");
_emitOperandInputCode(shaderContext, aluInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
if (aluInstruction->opcode == ALU_OP3_INST_CMOVE)
src->add(" == ");
else if (aluInstruction->opcode == ALU_OP3_INST_CMOVGE)
src->add(" >= ");
else if (aluInstruction->opcode == ALU_OP3_INST_CMOVGT)
src->add(" > ");
src->add("0.0)?(");
_emitOperandInputCode(shaderContext, aluInstruction, 1, LATTE_DECOMPILER_DTYPE_SIGNED_INT);
src->add("):(");
_emitOperandInputCode(shaderContext, aluInstruction, 2, LATTE_DECOMPILER_DTYPE_SIGNED_INT);
src->add("))");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, outputType);
src->add(";" _CRLF);
}
else
{
src->add("Unsupported instruction;" _CRLF);
debug_printf("Unsupported ALU op3 instruction 0x%x\n", aluInstruction->opcode);
shaderContext->shader->hasError = true;
}
}
void _emitALUReductionInstructionCode(LatteDecompilerShaderContext* shaderContext, LatteDecompilerALUInstruction* aluRedcInstruction[4])
{
StringBuf* src = shaderContext->shaderSource;
if( aluRedcInstruction[0]->isOP3 == false && (aluRedcInstruction[0]->opcode == ALU_OP2_INST_DOT4 || aluRedcInstruction[0]->opcode == ALU_OP2_INST_DOT4_IEEE) )
{
// todo: Figure out and implement the difference between normal DOT4 and DOT4_IEEE
sint32 outputType = _getALUInstructionOutputDataType(shaderContext, aluRedcInstruction[0]);
_emitInstructionOutputVariableName(shaderContext, aluRedcInstruction[0]);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
// dot(vec4(op0),vec4(op1))
src->add("dot(vec4(");
_emitOperandInputCode(shaderContext, aluRedcInstruction[0], 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(",");
_emitOperandInputCode(shaderContext, aluRedcInstruction[1], 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(",");
_emitOperandInputCode(shaderContext, aluRedcInstruction[2], 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(",");
_emitOperandInputCode(shaderContext, aluRedcInstruction[3], 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add("),vec4(");
_emitOperandInputCode(shaderContext, aluRedcInstruction[0], 1, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(",");
_emitOperandInputCode(shaderContext, aluRedcInstruction[1], 1, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(",");
_emitOperandInputCode(shaderContext, aluRedcInstruction[2], 1, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(",");
_emitOperandInputCode(shaderContext, aluRedcInstruction[3], 1, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add("))");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add(";" _CRLF);
}
else if( aluRedcInstruction[0]->isOP3 == false && (aluRedcInstruction[0]->opcode == ALU_OP2_INST_CUBE) )
{
/*
* How the CUBE instruction works (guessed mostly, based on DirectX/OpenGL spec):
Input: vec4, 3d direction vector (can be unnormalized) + w component (which can be ignored, since it only scales the vector but does not affect the direction)
First we figure out the major axis (closest axis-aligned vector). There are six possible vectors:
+rx 0
-rx 1
+ry 2
-ry 3
+rz 4
-rz 5
The major axis vector is calculated by looking at the largest (absolute) 3d vector component and then setting the other components to 0.0
The value that remains in the axis vector is referred to as 'MajorAxis' by the AMD documentation.
The S,T coordinates are taken from the other two components.
Example: -0.5,0.2,0.4 -> -rx -> -0.5,0.0,0.0 MajorAxis: -0.5, S: 0.2 T: 0.4
The CUBE reduction instruction requires a specific mapping for the input vector:
src0 = Rn.zzxy
src1 = Rn.yxzz
It's probably related to the way the instruction works internally?
If we look at the individual components per ALU unit:
z y -> Compare y/z
z x -> Compare x/z
x z -> Compare x/z
y z -> Compare y/z
*/
sint32 outputType;
src->add("redcCUBE(");
src->add("vec4(");
_emitOperandInputCode(shaderContext, aluRedcInstruction[0], 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(",");
_emitOperandInputCode(shaderContext, aluRedcInstruction[1], 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(",");
_emitOperandInputCode(shaderContext, aluRedcInstruction[2], 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(",");
_emitOperandInputCode(shaderContext, aluRedcInstruction[3], 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add("),");
src->add("vec4(");
_emitOperandInputCode(shaderContext, aluRedcInstruction[0], 1, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(",");
_emitOperandInputCode(shaderContext, aluRedcInstruction[1], 1, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(",");
_emitOperandInputCode(shaderContext, aluRedcInstruction[2], 1, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(",");
_emitOperandInputCode(shaderContext, aluRedcInstruction[3], 1, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add("),");
src->add("cubeMapSTM,cubeMapFaceId);" _CRLF);
// dst.X (S)
outputType = _getALUInstructionOutputDataType(shaderContext, aluRedcInstruction[0]);
_emitInstructionOutputVariableName(shaderContext, aluRedcInstruction[0]);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add("cubeMapSTM.x");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add(";" _CRLF);
// dst.Y (T)
outputType = _getALUInstructionOutputDataType(shaderContext, aluRedcInstruction[1]);
_emitInstructionOutputVariableName(shaderContext, aluRedcInstruction[1]);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add("cubeMapSTM.y");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add(";" _CRLF);
// dst.Z (MajorAxis)
outputType = _getALUInstructionOutputDataType(shaderContext, aluRedcInstruction[2]);
_emitInstructionOutputVariableName(shaderContext, aluRedcInstruction[2]);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add("cubeMapSTM.z");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, outputType);
src->add(";" _CRLF);
// dst.W (FaceId)
outputType = _getALUInstructionOutputDataType(shaderContext, aluRedcInstruction[3]);
_emitInstructionOutputVariableName(shaderContext, aluRedcInstruction[3]);
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, outputType);
src->add("cubeMapFaceId");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, outputType);
src->add(";" _CRLF);
}
else
cemu_assert_unimplemented();
}
void _emitALUClauseRegisterBackupCode(LatteDecompilerShaderContext* shaderContext, LatteDecompilerCFInstruction* cfInstruction, sint32 startIndex)
{
sint32 instructionGroupIndex = cfInstruction->instructionsALU[startIndex].instructionGroupIndex;
size_t groupSize = 1;
while ((startIndex + groupSize) < cfInstruction->instructionsALU.size())
{
if (instructionGroupIndex != cfInstruction->instructionsALU[startIndex + groupSize].instructionGroupIndex)
break;
groupSize++;
}
shaderContext->aluPVPSState->CreateGPRTemporaries(shaderContext, { cfInstruction->instructionsALU.data() + startIndex, groupSize });
}
/*
bool _isPVUsedInNextGroup(LatteDecompilerCFInstruction* cfInstruction, sint32 startIndex, sint32 pvUnit)
{
sint32 currentGroupIndex = cfInstruction->instructionsALU[startIndex].instructionGroupIndex;
for (sint32 i = startIndex + 1; i < (sint32)cfInstruction->instructionsALU.size(); i++)
{
LatteDecompilerALUInstruction& aluInstructionItr = cfInstruction->instructionsALU[i];
if(aluInstructionItr.instructionGroupIndex == currentGroupIndex )
continue;
if ((sint32)aluInstructionItr.instructionGroupIndex > currentGroupIndex + 1)
return false;
// check OP code type
if (aluInstructionItr.isOP3)
{
// op0
if (GPU7_ALU_SRC_IS_PV(aluInstructionItr.sourceOperand[0].sel))
{
uint32 chan = aluInstructionItr.sourceOperand[0].chan;
if (pvUnit == chan)
return true;
}
// op1
if (GPU7_ALU_SRC_IS_PV(aluInstructionItr.sourceOperand[1].sel))
{
uint32 chan = aluInstructionItr.sourceOperand[1].chan;
if (pvUnit == chan)
return true;
}
// op2
if (GPU7_ALU_SRC_IS_PV(aluInstructionItr.sourceOperand[2].sel))
{
uint32 chan = aluInstructionItr.sourceOperand[2].chan;
if (pvUnit == chan)
return true;
}
}
else
{
// op0
if (GPU7_ALU_SRC_IS_PV(aluInstructionItr.sourceOperand[0].sel))
{
uint32 chan = aluInstructionItr.sourceOperand[0].chan;
if (pvUnit == chan)
return true;
}
// op1
if (GPU7_ALU_SRC_IS_PV(aluInstructionItr.sourceOperand[1].sel))
{
uint32 chan = aluInstructionItr.sourceOperand[1].chan;
if (pvUnit == chan)
return true;
}
// todo: Not all operations use both operands
}
}
return false;
}
*/
void _emitVec3(LatteDecompilerShaderContext* shaderContext, uint32 dataType, LatteDecompilerALUInstruction* aluInst0, sint32 opIdx0, LatteDecompilerALUInstruction* aluInst1, sint32 opIdx1, LatteDecompilerALUInstruction* aluInst2, sint32 opIdx2)
{
StringBuf* src = shaderContext->shaderSource;
if (dataType == LATTE_DECOMPILER_DTYPE_FLOAT)
{
src->add("vec3(");
_emitOperandInputCode(shaderContext, aluInst0, opIdx0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(",");
_emitOperandInputCode(shaderContext, aluInst1, opIdx1, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(",");
_emitOperandInputCode(shaderContext, aluInst2, opIdx2, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(")");
}
else if (dataType == LATTE_DECOMPILER_DTYPE_SIGNED_INT)
{
src->add("ivec3(");
_emitOperandInputCode(shaderContext, aluInst0, opIdx0, LATTE_DECOMPILER_DTYPE_SIGNED_INT);
src->add(",");
_emitOperandInputCode(shaderContext, aluInst1, opIdx1, LATTE_DECOMPILER_DTYPE_SIGNED_INT);
src->add(",");
_emitOperandInputCode(shaderContext, aluInst2, opIdx2, LATTE_DECOMPILER_DTYPE_SIGNED_INT);
src->add(")");
}
else
cemu_assert_unimplemented();
}
void _emitGPRVectorAssignment(LatteDecompilerShaderContext* shaderContext, LatteDecompilerALUInstruction** aluInstructions, sint32 count)
{
StringBuf* src = shaderContext->shaderSource;
// output var name (GPR)
src->add(_getRegisterVarName(shaderContext, aluInstructions[0]->destGpr, -1));
src->add(".");
for (sint32 f = 0; f < count; f++)
{
src->add(_getElementStrByIndex(aluInstructions[f]->destElem));
}
src->add(" = ");
}
void _emitALUClauseCode(LatteDecompilerShaderContext* shaderContext, LatteDecompilerCFInstruction* cfInstruction)
{
ALUClauseTemporariesState pvpsState;
shaderContext->aluPVPSState = &pvpsState;
StringBuf* src = shaderContext->shaderSource;
LatteDecompilerALUInstruction* aluRedcInstruction[4];
size_t groupStartIndex = 0;
for(size_t i=0; i<cfInstruction->instructionsALU.size(); i++)
{
LatteDecompilerALUInstruction& aluInstruction = cfInstruction->instructionsALU[i];
if( aluInstruction.indexInGroup == 0 )
{
src->addFmt("// {}" _CRLF, aluInstruction.instructionGroupIndex);
// apply PV/PS updates for previous group
if (i > 0)
{
pvpsState.TrackGroupOutputPVPS(shaderContext, cfInstruction->instructionsALU.data() + groupStartIndex, i - groupStartIndex);
}
groupStartIndex = i;
// backup registers which are read after being written
_emitALUClauseRegisterBackupCode(shaderContext, cfInstruction, i);
}
// detect reduction instructions and use a special handler
bool isReductionOperation = _isReductionInstruction(&aluInstruction);
if( isReductionOperation )
{
cemu_assert_debug((i + 4) <= cfInstruction->instructionsALU.size());
aluRedcInstruction[0] = &aluInstruction;
aluRedcInstruction[1] = &cfInstruction->instructionsALU[i + 1];
aluRedcInstruction[2] = &cfInstruction->instructionsALU[i + 2];
aluRedcInstruction[3] = &cfInstruction->instructionsALU[i + 3];
if( aluRedcInstruction[0]->isOP3 != aluRedcInstruction[1]->isOP3 || aluRedcInstruction[1]->isOP3 != aluRedcInstruction[2]->isOP3 || aluRedcInstruction[2]->isOP3 != aluRedcInstruction[3]->isOP3 )
debugBreakpoint();
if( aluRedcInstruction[0]->opcode != aluRedcInstruction[1]->opcode || aluRedcInstruction[1]->opcode != aluRedcInstruction[2]->opcode || aluRedcInstruction[2]->opcode != aluRedcInstruction[3]->opcode )
debugBreakpoint();
if( aluRedcInstruction[0]->omod != aluRedcInstruction[1]->omod || aluRedcInstruction[1]->omod != aluRedcInstruction[2]->omod || aluRedcInstruction[2]->omod != aluRedcInstruction[3]->omod )
debugBreakpoint();
if( aluRedcInstruction[0]->destClamp != aluRedcInstruction[1]->destClamp || aluRedcInstruction[1]->destClamp != aluRedcInstruction[2]->destClamp || aluRedcInstruction[2]->destClamp != aluRedcInstruction[3]->destClamp )
debugBreakpoint();
_emitALUReductionInstructionCode(shaderContext, aluRedcInstruction);
i += 3; // skip the instructions that are part of the reduction operation
}
else /* not a reduction operation */
{
if( aluInstruction.isOP3 )
{
// op3
_emitALUOP3InstructionCode(shaderContext, cfInstruction, &aluInstruction);
}
else
{
// op2
if( aluInstruction.opcode == ALU_OP2_INST_NOP )
continue; // skip NOP instruction
_emitALUOP2InstructionCode(shaderContext, cfInstruction, &aluInstruction);
}
}
// handle omod
sint32 outputDataType = _getALUInstructionOutputDataType(shaderContext, &aluInstruction);
if( aluInstruction.omod != ALU_OMOD_NONE )
{
if( outputDataType == LATTE_DECOMPILER_DTYPE_FLOAT )
{
_emitInstructionOutputVariableName(shaderContext, &aluInstruction);
if( aluInstruction.omod == ALU_OMOD_MUL2 )
src->add(" *= 2.0;" _CRLF);
else if( aluInstruction.omod == ALU_OMOD_MUL4 )
src->add(" *= 4.0;" _CRLF);
else if( aluInstruction.omod == ALU_OMOD_DIV2 )
src->add(" /= 2.0;" _CRLF);
}
else if( outputDataType == LATTE_DECOMPILER_DTYPE_SIGNED_INT )
{
_emitInstructionOutputVariableName(shaderContext, &aluInstruction);
src->add(" = ");
src->add("floatBitsToInt(intBitsToFloat(");
_emitInstructionOutputVariableName(shaderContext, &aluInstruction);
src->add(")");
if( aluInstruction.omod == 1 )
src->add(" * 2.0");
else if( aluInstruction.omod == 2 )
src->add(" * 4.0");
else if( aluInstruction.omod == 3 )
src->add(" / 2.0");
src->add(");" _CRLF);
}
else
{
cemu_assert_unimplemented();
}
}
// handle clamp
if( aluInstruction.destClamp != 0 )
{
if( outputDataType == LATTE_DECOMPILER_DTYPE_FLOAT )
{
_emitInstructionOutputVariableName(shaderContext, &aluInstruction);
src->add(" = clamp(");
_emitInstructionOutputVariableName(shaderContext, &aluInstruction);
src->add(", 0.0, 1.0);" _CRLF);
}
else if( outputDataType == LATTE_DECOMPILER_DTYPE_SIGNED_INT )
{
_emitInstructionOutputVariableName(shaderContext, &aluInstruction);
src->add(" = clampFI32(");
_emitInstructionOutputVariableName(shaderContext, &aluInstruction);
src->add(");" _CRLF);
}
else
{
cemu_assert_unimplemented();
}
}
// handle result broadcasting for reduction instructions
if( isReductionOperation )
{
// reduction operations set all four PV components (todo: Needs further research. According to AMD docs, dot4 only sets PV.x? update: Unlike DOT4, CUBE sets all PV elements accordingly to their GPR output?)
if( aluRedcInstruction[0]->opcode == ALU_OP2_INST_CUBE )
{
// CUBE
for (sint32 f = 0; f < 4; f++)
{
if (aluRedcInstruction[f]->writeMask != 0)
continue;
_emitInstructionPVPSOutputVariableName(shaderContext, aluRedcInstruction[f]);
src->add(" = ");
_emitInstructionOutputVariableName(shaderContext, aluRedcInstruction[0]);
src->add(";" _CRLF);
}
}
else
{
// DOT4, DOT4_IEEE, etc.
// reduction operation result is only set for output in redc[0], we also need to update redc[1] to redc[3]
for(sint32 f=0; f<4; f++)
{
if( aluRedcInstruction[f]->writeMask == 0 )
_emitInstructionPVPSOutputVariableName(shaderContext, aluRedcInstruction[f]);
else
{
if (f == 0)
continue;
_emitInstructionOutputVariableName(shaderContext, aluRedcInstruction[f]);
}
src->add(" = ");
_emitInstructionOutputVariableName(shaderContext, aluRedcInstruction[0]);
src->add(";" _CRLF);
}
}
}
}
shaderContext->aluPVPSState = nullptr;
}
/*
* Emits code to access one component (xyzw) of the texture coordinate input vector
*/
void _emitTEXSampleCoordInputComponent(LatteDecompilerShaderContext* shaderContext, LatteDecompilerTEXInstruction* texInstruction, sint32 componentIndex, sint32 interpretSrcAsType)
{
cemu_assert(componentIndex >= 0 && componentIndex < 4);
cemu_assert_debug(interpretSrcAsType == LATTE_DECOMPILER_DTYPE_SIGNED_INT || interpretSrcAsType == LATTE_DECOMPILER_DTYPE_FLOAT);
StringBuf* src = shaderContext->shaderSource;
sint32 elementSel = texInstruction->textureFetch.srcSel[componentIndex];
if (elementSel < 4)
{
_emitRegisterChannelAccessCode(shaderContext, texInstruction->srcGpr, elementSel, interpretSrcAsType);
return;
}
const char* resultElemTable[4] = {"x","y","z","w"};
if(interpretSrcAsType == LATTE_DECOMPILER_DTYPE_SIGNED_INT )
{
if( elementSel == 4 )
src->add("floatBitsToInt(0.0)");
else if( elementSel == 5 )
src->add("floatBitsToInt(1.0)");
}
else if(interpretSrcAsType == LATTE_DECOMPILER_DTYPE_FLOAT )
{
if( elementSel == 4 )
src->add("0.0");
else if( elementSel == 5 )
src->add("1.0");
}
}
const char* _texGprAccessElemTable[8] = {"x","y","z","w","_","_","_","_"};
char* _getTexGPRAccess(LatteDecompilerShaderContext* shaderContext, sint32 gprIndex, uint32 dataType, sint8 selX, sint8 selY, sint8 selZ, sint8 selW, char* tempBuffer)
{
// intBitsToFloat(R{}i.w)
*tempBuffer = '\0';
uint8 elemCount = (selX > 0 ? 1 : 0) + (selY > 0 ? 1 : 0) + (selZ > 0 ? 1 : 0) + (selW > 0 ? 1 : 0);
if (shaderContext->typeTracker.defaultDataType == LATTE_DECOMPILER_DTYPE_SIGNED_INT)
{
if (dataType == LATTE_DECOMPILER_DTYPE_SIGNED_INT)
; // no conversion
else if (dataType == LATTE_DECOMPILER_DTYPE_FLOAT)
strcat(tempBuffer, "intBitsToFloat(");
else
cemu_assert_unimplemented();
strcat(tempBuffer, _getRegisterVarName(shaderContext, gprIndex));
// _texGprAccessElemTable
strcat(tempBuffer, ".");
if (selX >= 0)
strcat(tempBuffer, _texGprAccessElemTable[selX]);
if (selY >= 0)
strcat(tempBuffer, _texGprAccessElemTable[selY]);
if (selZ >= 0)
strcat(tempBuffer, _texGprAccessElemTable[selZ]);
if (selW >= 0)
strcat(tempBuffer, _texGprAccessElemTable[selW]);
if (dataType == LATTE_DECOMPILER_DTYPE_SIGNED_INT)
; // no conversion
else if (dataType == LATTE_DECOMPILER_DTYPE_FLOAT)
strcat(tempBuffer, ")");
else
cemu_assert_unimplemented();
}
else if (shaderContext->typeTracker.defaultDataType == LATTE_DECOMPILER_DTYPE_FLOAT)
{
if (dataType == LATTE_DECOMPILER_DTYPE_SIGNED_INT)
cemu_assert_unimplemented();
else if (dataType == LATTE_DECOMPILER_DTYPE_FLOAT)
; // no conversion
else
cemu_assert_unimplemented();
strcat(tempBuffer, _getRegisterVarName(shaderContext, gprIndex));
// _texGprAccessElemTable
strcat(tempBuffer, ".");
if (selX >= 0)
strcat(tempBuffer, _texGprAccessElemTable[selX]);
if (selY >= 0)
strcat(tempBuffer, _texGprAccessElemTable[selY]);
if (selZ >= 0)
strcat(tempBuffer, _texGprAccessElemTable[selZ]);
if (selW >= 0)
strcat(tempBuffer, _texGprAccessElemTable[selW]);
if (dataType == LATTE_DECOMPILER_DTYPE_SIGNED_INT)
cemu_assert_unimplemented();
else if (dataType == LATTE_DECOMPILER_DTYPE_FLOAT)
; // no conversion
else
cemu_assert_unimplemented();
}
else
cemu_assert_unimplemented();
return tempBuffer;
}
void _emitTEXSampleTextureCode(LatteDecompilerShaderContext* shaderContext, LatteDecompilerTEXInstruction* texInstruction)
{
StringBuf* src = shaderContext->shaderSource;
if (texInstruction->textureFetch.textureIndex < 0 || texInstruction->textureFetch.textureIndex >= LATTE_NUM_MAX_TEX_UNITS)
{
// skip out of bounds texture unit access
return;
}
auto texDim = shaderContext->shader->textureUnitDim[texInstruction->textureFetch.textureIndex];
char tempBuffer0[32];
char tempBuffer1[32];
src->add(_getRegisterVarName(shaderContext, texInstruction->dstGpr));
src->add(".");
const char* resultElemTable[4] = {"x","y","z","w"};
sint32 numWrittenElements = 0;
for(sint32 f=0; f<4; f++)
{
if( texInstruction->dstSel[f] < 4 )
{
src->add(resultElemTable[f]);
numWrittenElements++;
}
else if( texInstruction->dstSel[f] == 7 )
{
// masked and not written
}
else
{
debugBreakpoint();
}
}
// texture sampler opcode
uint32 texOpcode = texInstruction->opcode;
if (shaderContext->shaderType == LatteConst::ShaderType::Vertex)
{
// vertex shader forces LOD to zero, but certain sampler types don't support textureLod(...) API
if (texOpcode == GPU7_TEX_INST_SAMPLE_C_LZ)
texOpcode = GPU7_TEX_INST_SAMPLE_C;
}
// check if offset is used
bool hasOffset = false;
if( texInstruction->textureFetch.offsetX != 0 || texInstruction->textureFetch.offsetY != 0 || texInstruction->textureFetch.offsetZ != 0 )
hasOffset = true;
// emit sample code
if (shaderContext->shader->textureIsIntegerFormat[texInstruction->textureFetch.textureIndex])
{
// integer samplers
if (shaderContext->typeTracker.defaultDataType == LATTE_DECOMPILER_DTYPE_SIGNED_INT) // uint to int
{
if(numWrittenElements == 1)
src->add(" = int(");
else
shaderContext->shaderSource->addFmt(" = ivec{}(", numWrittenElements);
}
else if (shaderContext->typeTracker.defaultDataType == LATTE_DECOMPILER_DTYPE_FLOAT)
src->add(" = uintBitsToFloat(");
}
else
{
// float samplers
if (shaderContext->typeTracker.defaultDataType == LATTE_DECOMPILER_DTYPE_SIGNED_INT)
src->add(" = floatBitsToInt(");
else if (shaderContext->typeTracker.defaultDataType == LATTE_DECOMPILER_DTYPE_FLOAT)
src->add(" = (");
}
bool unnormalizationHandled = false;
bool useTexelCoordinates = false;
// handle illegal combinations
if (texOpcode == GPU7_TEX_INST_FETCH4 && (texDim == Latte::E_DIM::DIM_1D || texDim == Latte::E_DIM::DIM_1D_ARRAY))
{
// fetch4 is not allowed on 1D textures
// seen in YWW during boss fight of Level 1-4
// todo - investigate what this returns on actual HW
if (numWrittenElements == 1)
shaderContext->shaderSource->add("0.0");
else
shaderContext->shaderSource->addFmt("vec{}(0.0)", numWrittenElements);
shaderContext->shaderSource->add(");" _CRLF);
return;
}
if (texOpcode == GPU7_TEX_INST_SAMPLE && (texInstruction->textureFetch.unnormalized[0] && texInstruction->textureFetch.unnormalized[1] && texInstruction->textureFetch.unnormalized[2] && texInstruction->textureFetch.unnormalized[3]) )
{
// texture is likely a RECT
if (hasOffset)
cemu_assert_unimplemented();
src->add("texelFetch(");
unnormalizationHandled = true;
useTexelCoordinates = true;
}
else if( texOpcode == GPU7_TEX_INST_FETCH4 )
{
if( hasOffset )
cemu_assert_unimplemented();
src->add("textureGather(");
}
else if( texOpcode == GPU7_TEX_INST_LD )
{
if( hasOffset )
cemu_assert_unimplemented();
src->add("texelFetch(");
unnormalizationHandled = true;
useTexelCoordinates = true;
}
else if( texOpcode == GPU7_TEX_INST_SAMPLE_L )
{
// sample with LOD value set in gpr.w (replaces computed LOD value)
if( hasOffset )
src->add("textureLodOffset(");
else
src->add("textureLod(");
}
else if (texOpcode == GPU7_TEX_INST_SAMPLE_LZ)
{
// sample with LOD set to 0.0 (replaces computed LOD value)
if (hasOffset)
src->add("textureLodOffset(");
else
src->add("textureLod(");
}
else if (texOpcode == GPU7_TEX_INST_SAMPLE_LB)
{
// sample with LOD biased
// note: AMD doc says LOD bias is calculated from instruction LOD_BIAS field. But it appears that LOD bias is taken from input register. Might actually be both?
if (hasOffset)
src->add("textureOffset(");
else
src->add("texture(");
}
else if (texOpcode == GPU7_TEX_INST_SAMPLE)
{
if (hasOffset)
src->add("textureOffset(");
else
src->add("texture(");
}
else if (texOpcode == GPU7_TEX_INST_SAMPLE_C_L)
{
// sample with LOD value set in gpr.w (replaces computed LOD value)
if (hasOffset)
src->add("textureLodOffset(");
else
src->add("textureLod(");
}
else if (texOpcode == GPU7_TEX_INST_SAMPLE_C_LZ)
{
// sample with LOD set to 0.0 (replaces computed LOD value)
if (hasOffset)
src->add("textureLodOffset(");
else
src->add("textureLod(");
}
else if (texOpcode == GPU7_TEX_INST_SAMPLE_C)
{
if (hasOffset)
src->add("textureOffset(");
else
src->add("texture(");
}
else if (texOpcode == GPU7_TEX_INST_SAMPLE_G)
{
if (hasOffset)
cemu_assert_unimplemented();
src->add("textureGrad(");
}
else
{
if( hasOffset )
cemu_assert_unimplemented();
cemu_assert_unimplemented();
src->add("texture(");
}
src->addFmt("{}{}, ", _getTextureUnitVariablePrefixName(shaderContext->shader->shaderType), texInstruction->textureFetch.textureIndex);
// for textureGather() add shift (todo: depends on rounding mode set in sampler registers?)
if (texOpcode == GPU7_TEX_INST_FETCH4)
{
if (texDim == Latte::E_DIM::DIM_2D)
{
//src->addFmt2("(vec2(-0.1) / vec2(textureSize({}{},0).xy)) + ", gpu7Decompiler_getTextureUnitVariablePrefixName(shaderContext->shader->shaderType), texInstruction->textureIndex);
// vec2(-0.00001) is minimum to break Nvidia
// vec2(0.0001) is minimum to fix shadows on Intel, also fixes it on AMD (Windows and Linux)
// todo - emulating coordinate rounding mode correctly is tricky
// GX2 supports two modes: Truncate or rounding according to DX9 rules
// Vulkan uses truncate mode when point sampling (min and mag is both nearest) otherwise it uses rounding
// adding a small fixed bias is enough to avoid vendor-specific cases where small inaccuracies cause the number to get rounded down due to truncation
src->addFmt("vec2(0.0001) + ");
}
}
const sint32 texCoordDataType = (texOpcode == GPU7_TEX_INST_LD) ? LATTE_DECOMPILER_DTYPE_SIGNED_INT : LATTE_DECOMPILER_DTYPE_FLOAT;
if(useTexelCoordinates)
{
// handle integer coordinates for texelFetch
if (texDim == Latte::E_DIM::DIM_2D || texDim == Latte::E_DIM::DIM_2D_MSAA)
{
src->add("ivec2(");
src->add("vec2(");
_emitTEXSampleCoordInputComponent(shaderContext, texInstruction, 0, texCoordDataType);
src->addFmt(", ");
_emitTEXSampleCoordInputComponent(shaderContext, texInstruction, 1, texCoordDataType);
src->addFmt(")*uf_tex{}Scale", texInstruction->textureFetch.textureIndex); // close vec2 and scale
src->add("), 0"); // close ivec2 and lod param
// todo - lod
}
else if (texDim == Latte::E_DIM::DIM_1D)
{
// VC DS games forget to initialize textures and use texel fetch on an uninitialized texture (a dim of 0 maps to 1D)
src->add("int(");
src->add("float(");
_emitTEXSampleCoordInputComponent(shaderContext, texInstruction, 0, (texOpcode == GPU7_TEX_INST_LD) ? LATTE_DECOMPILER_DTYPE_SIGNED_INT : LATTE_DECOMPILER_DTYPE_FLOAT);
src->addFmt(")*uf_tex{}Scale.x", texInstruction->textureFetch.textureIndex);
src->add("), 0");
// todo - lod
}
else
cemu_assert_debug(false);
}
else /* useTexelCoordinates == false */
{
// float coordinates
if ( (texOpcode == GPU7_TEX_INST_SAMPLE_C || texOpcode == GPU7_TEX_INST_SAMPLE_C_L || texOpcode == GPU7_TEX_INST_SAMPLE_C_LZ) )
{
// shadow sampler
if (texDim == Latte::E_DIM::DIM_2D_ARRAY)
{
// 3 coords + compare value (as vec4)
src->add("vec4(");
_emitTEXSampleCoordInputComponent(shaderContext, texInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(",");
_emitTEXSampleCoordInputComponent(shaderContext, texInstruction, 1, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(",");
_emitTEXSampleCoordInputComponent(shaderContext, texInstruction, 2, LATTE_DECOMPILER_DTYPE_FLOAT);
src->addFmt(",{})", _getTexGPRAccess(shaderContext, texInstruction->srcGpr, LATTE_DECOMPILER_DTYPE_FLOAT, texInstruction->textureFetch.srcSel[3], -1, -1, -1, tempBuffer0));
}
else if (texDim == Latte::E_DIM::DIM_CUBEMAP)
{
// 2 coords + faceId
if (texInstruction->textureFetch.srcSel[0] >= 4 || texInstruction->textureFetch.srcSel[1] >= 4)
{
debugBreakpoint();
}
src->add("vec4(");
src->addFmt("redcCUBEReverse({},", _getTexGPRAccess(shaderContext, texInstruction->srcGpr, LATTE_DECOMPILER_DTYPE_FLOAT, texInstruction->textureFetch.srcSel[0], texInstruction->textureFetch.srcSel[1], -1, -1, tempBuffer0));
_emitTEXSampleCoordInputComponent(shaderContext, texInstruction, 2, LATTE_DECOMPILER_DTYPE_SIGNED_INT);
src->addFmt(")");
src->addFmt(",cubeMapArrayIndex{})", texInstruction->textureFetch.textureIndex); // cubemap index
}
else if (texDim == Latte::E_DIM::DIM_1D)
{
// 1 coord + 1 unused coord (per GLSL spec) + compare value
if (texInstruction->textureFetch.srcSel[0] >= 4)
{
debugBreakpoint();
}
src->addFmt("vec3({},0.0,{})", _getTexGPRAccess(shaderContext, texInstruction->srcGpr, LATTE_DECOMPILER_DTYPE_FLOAT, texInstruction->textureFetch.srcSel[0], -1, -1, -1, tempBuffer0), _getTexGPRAccess(shaderContext, texInstruction->srcGpr, LATTE_DECOMPILER_DTYPE_FLOAT, texInstruction->textureFetch.srcSel[3], -1, -1, -1, tempBuffer1));
}
else
{
// 2 coords + compare value (as vec3)
if (texInstruction->textureFetch.srcSel[0] >= 4 && texInstruction->textureFetch.srcSel[1] >= 4)
{
debugBreakpoint();
}
src->addFmt("vec3({}, {})", _getTexGPRAccess(shaderContext, texInstruction->srcGpr, LATTE_DECOMPILER_DTYPE_FLOAT, texInstruction->textureFetch.srcSel[0], texInstruction->textureFetch.srcSel[1], -1, -1, tempBuffer0), _getTexGPRAccess(shaderContext, texInstruction->srcGpr, LATTE_DECOMPILER_DTYPE_FLOAT, texInstruction->textureFetch.srcSel[3], -1, -1, -1, tempBuffer1));
}
}
else if( texDim == Latte::E_DIM::DIM_3D || texDim == Latte::E_DIM::DIM_2D_ARRAY )
{
// 3 coords
src->add("vec3(");
_emitTEXSampleCoordInputComponent(shaderContext, texInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(",");
_emitTEXSampleCoordInputComponent(shaderContext, texInstruction, 1, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(",");
_emitTEXSampleCoordInputComponent(shaderContext, texInstruction, 2, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(")");
}
else if( texDim == Latte::E_DIM::DIM_CUBEMAP )
{
// 2 coords + faceId
cemu_assert_debug(texInstruction->textureFetch.srcSel[0] < 4);
cemu_assert_debug(texInstruction->textureFetch.srcSel[1] < 4);
src->add("vec4(");
src->addFmt("redcCUBEReverse({},", _getTexGPRAccess(shaderContext, texInstruction->srcGpr, LATTE_DECOMPILER_DTYPE_FLOAT, texInstruction->textureFetch.srcSel[0], texInstruction->textureFetch.srcSel[1], -1, -1, tempBuffer0));
_emitTEXSampleCoordInputComponent(shaderContext, texInstruction, 2, LATTE_DECOMPILER_DTYPE_SIGNED_INT);
src->add(")");
src->addFmt(",cubeMapArrayIndex{})", texInstruction->textureFetch.textureIndex); // cubemap index
}
else if( texDim == Latte::E_DIM::DIM_1D )
{
// 1 coord
src->add(_getTexGPRAccess(shaderContext, texInstruction->srcGpr, LATTE_DECOMPILER_DTYPE_FLOAT, texInstruction->textureFetch.srcSel[0], -1, -1, -1, tempBuffer0));
}
else
{
// 2 coords
src->add("vec2(");
_emitTEXSampleCoordInputComponent(shaderContext, texInstruction, 0, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(",");
_emitTEXSampleCoordInputComponent(shaderContext, texInstruction, 1, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(")");
// avoid truncate to effectively round downwards on texel edges
if (ActiveSettings::ForceSamplerRoundToPrecision())
src->addFmt("+ vec2(1.0)/vec2(textureSize({}{}, 0))/512.0", _getTextureUnitVariablePrefixName(shaderContext->shader->shaderType), texInstruction->textureFetch.textureIndex);
}
// lod or lod bias parameter
if( texOpcode == GPU7_TEX_INST_SAMPLE_L || texOpcode == GPU7_TEX_INST_SAMPLE_LB || texOpcode == GPU7_TEX_INST_SAMPLE_C_L)
{
src->add(",");
if(texOpcode == GPU7_TEX_INST_SAMPLE_LB)
src->add(_FormatFloatAsGLSLConstant((float)texInstruction->textureFetch.lodBias / 16.0f));
else
_emitTEXSampleCoordInputComponent(shaderContext, texInstruction, 3, LATTE_DECOMPILER_DTYPE_FLOAT);
}
else if( texOpcode == GPU7_TEX_INST_SAMPLE_LZ || texOpcode == GPU7_TEX_INST_SAMPLE_C_LZ )
{
src->add(",0.0");
}
}
// gradient parameters
if (texOpcode == GPU7_TEX_INST_SAMPLE_G)
{
if (texDim == Latte::E_DIM::DIM_2D ||
texDim == Latte::E_DIM::DIM_1D )
{
src->add(",gradH.xy,gradV.xy");
}
else
{
cemu_assert_unimplemented();
}
}
// offset
if( texOpcode == GPU7_TEX_INST_SAMPLE_L || texOpcode == GPU7_TEX_INST_SAMPLE_LZ || texOpcode == GPU7_TEX_INST_SAMPLE_C_LZ || texOpcode == GPU7_TEX_INST_SAMPLE || texOpcode == GPU7_TEX_INST_SAMPLE_C )
{
if( hasOffset )
{
uint8 offsetComponentCount = 0;
if( texDim == Latte::E_DIM::DIM_1D )
offsetComponentCount = 1;
else if( texDim == Latte::E_DIM::DIM_2D )
offsetComponentCount = 2;
else if( texDim == Latte::E_DIM::DIM_3D )
offsetComponentCount = 3;
else if( texDim == Latte::E_DIM::DIM_2D_ARRAY )
offsetComponentCount = 2;
else
cemu_assert_unimplemented();
if( (texInstruction->textureFetch.offsetX&1) )
cemu_assert_unimplemented();
if( (texInstruction->textureFetch.offsetY&1) )
cemu_assert_unimplemented();
if ((texInstruction->textureFetch.offsetZ & 1))
cemu_assert_unimplemented();
if( offsetComponentCount == 1 )
src->addFmt(",{}", texInstruction->textureFetch.offsetX/2);
else if( offsetComponentCount == 2 )
src->addFmt(",ivec2({},{})", texInstruction->textureFetch.offsetX/2, texInstruction->textureFetch.offsetY/2, texInstruction->textureFetch.offsetZ/2);
else if( offsetComponentCount == 3 )
src->addFmt(",ivec3({},{},{})", texInstruction->textureFetch.offsetX/2, texInstruction->textureFetch.offsetY/2, texInstruction->textureFetch.offsetZ/2);
}
}
// lod bias
if( texOpcode == GPU7_TEX_INST_SAMPLE_C || texOpcode == GPU7_TEX_INST_SAMPLE_C_LZ )
{
src->add(")");
if (numWrittenElements > 1)
{
// result is copied into multiple channels
src->add(".");
for (sint32 f = 0; f < numWrittenElements; f++)
{
cemu_assert_debug(texInstruction->dstSel[f] == 0); // only x component is defined
src->add("x");
}
}
}
else
{
src->add(").");
for (sint32 f = 0; f < 4; f++)
{
if( texInstruction->dstSel[f] < 4 )
{
uint8 elemIndex = texInstruction->dstSel[f];
if (texOpcode == GPU7_TEX_INST_FETCH4)
{
// GLSL's textureGather() and GPU7's FETCH4 instruction have a different order of elements
// xyzw: top-left, top-right, bottom-right, bottom-left
// textureGather xyzw
// fetch4 yzxw
// translate index from fetch4 to textureGather order
static uint8 fetchToGather[4] =
{
2, // x -> z
0, // y -> x
1, // z -> y
3, // w -> w
};
elemIndex = fetchToGather[elemIndex];
}
src->add(resultElemTable[elemIndex]);
numWrittenElements++;
}
else if( texInstruction->dstSel[f] == 7 )
{
// masked and not written
}
else
{
cemu_assert_unimplemented();
}
}
}
src->add(");");
// debug
#ifdef CEMU_DEBUG_ASSERT
if(texInstruction->opcode == GPU7_TEX_INST_LD )
src->add(" // TEX_INST_LD");
else if(texInstruction->opcode == GPU7_TEX_INST_SAMPLE )
src->add(" // TEX_INST_SAMPLE");
else if(texInstruction->opcode == GPU7_TEX_INST_SAMPLE_L )
src->add(" // TEX_INST_SAMPLE_L");
else if(texInstruction->opcode == GPU7_TEX_INST_SAMPLE_LZ )
src->add(" // TEX_INST_SAMPLE_LZ");
else if(texInstruction->opcode == GPU7_TEX_INST_SAMPLE_C )
src->add(" // TEX_INST_SAMPLE_C");
else if(texInstruction->opcode == GPU7_TEX_INST_SAMPLE_G )
src->add(" // TEX_INST_SAMPLE_G");
else
src->addFmt(" // 0x{:02x}", texInstruction->opcode);
if (texInstruction->opcode != texOpcode)
src->addFmt(" (applied as 0x{:02x})", texOpcode);
src->addFmt(" OffsetXYZ {:02x} {:02x} {:02x}", (uint8)texInstruction->textureFetch.offsetX&0xFF, (uint8)texInstruction->textureFetch.offsetY&0xFF, (uint8)texInstruction->textureFetch.offsetZ&0xFF);
#endif
src->add("" _CRLF);
}
void _emitTEXGetTextureResInfoCode(LatteDecompilerShaderContext* shaderContext, LatteDecompilerTEXInstruction* texInstruction)
{
StringBuf* src = shaderContext->shaderSource;
src->addFmt("R{}", texInstruction->dstGpr);
src->add("i");
src->add(".");
const char* resultElemTable[4] = {"x","y","z","w"};
sint32 numWrittenElements = 0;
for(sint32 f=0; f<4; f++)
{
if( texInstruction->dstSel[f] < 4 )
{
src->add(resultElemTable[f]);
numWrittenElements++;
}
else if( texInstruction->dstSel[f] == 7 )
{
// masked and not written
}
else
{
cemu_assert_unimplemented();
}
}
// todo - mip index parameter?
auto texDim = shaderContext->shader->textureUnitDim[texInstruction->textureFetch.textureIndex];
if (texDim == Latte::E_DIM::DIM_1D)
src->addFmt(" = ivec4(textureSize({}{}, 0),1,1,1).", _getTextureUnitVariablePrefixName(shaderContext->shader->shaderType), texInstruction->textureFetch.textureIndex);
else if (texDim == Latte::E_DIM::DIM_1D_ARRAY)
src->addFmt(" = ivec4(textureSize({}{}, 0),1,1).", _getTextureUnitVariablePrefixName(shaderContext->shader->shaderType), texInstruction->textureFetch.textureIndex);
else if (texDim == Latte::E_DIM::DIM_2D || texDim == Latte::E_DIM::DIM_2D_MSAA)
src->addFmt(" = ivec4(textureSize({}{}, 0),1,1).", _getTextureUnitVariablePrefixName(shaderContext->shader->shaderType), texInstruction->textureFetch.textureIndex);
else if (texDim == Latte::E_DIM::DIM_2D_ARRAY)
src->addFmt(" = ivec4(textureSize({}{}, 0),1).", _getTextureUnitVariablePrefixName(shaderContext->shader->shaderType), texInstruction->textureFetch.textureIndex);
else
{
cemu_assert_debug(false);
src->addFmt(" = ivec4(textureSize({}{}, 0),1,1).", _getTextureUnitVariablePrefixName(shaderContext->shader->shaderType), texInstruction->textureFetch.textureIndex);
}
for(sint32 f=0; f<4; f++)
{
if( texInstruction->dstSel[f] < 4 )
{
src->add(resultElemTable[texInstruction->dstSel[f]]);
numWrittenElements++;
}
else if( texInstruction->dstSel[f] == 7 )
{
// masked and not written
}
else
{
debugBreakpoint();
}
}
src->add(";" _CRLF);
}
void _emitTEXGetCompTexLodCode(LatteDecompilerShaderContext* shaderContext, LatteDecompilerTEXInstruction* texInstruction)
{
StringBuf* src = shaderContext->shaderSource;
src->add(_getRegisterVarName(shaderContext, texInstruction->dstGpr));
src->add(".");
const char* resultElemTable[4] = {"x","y","z","w"};
sint32 numWrittenElements = 0;
for(sint32 f=0; f<4; f++)
{
if( texInstruction->dstSel[f] < 4 )
{
src->add(resultElemTable[f]);
numWrittenElements++;
}
else if( texInstruction->dstSel[f] == 7 )
{
// masked and not written
}
else
{
debugBreakpoint();
}
}
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, shaderContext->typeTracker.defaultDataType);
if( shaderContext->shader->textureUnitDim[texInstruction->textureFetch.textureIndex] == Latte::E_DIM::DIM_CUBEMAP )
{
// 3 coordinates
if(shaderContext->typeTracker.defaultDataType == LATTE_DECOMPILER_DTYPE_FLOAT)
src->addFmt("vec4(textureQueryLod({}{}, {}.{}{}{}),0.0,0.0)", _getTextureUnitVariablePrefixName(shaderContext->shader->shaderType), texInstruction->textureFetch.textureIndex, _getRegisterVarName(shaderContext, texInstruction->srcGpr), resultElemTable[texInstruction->textureFetch.srcSel[0]], resultElemTable[texInstruction->textureFetch.srcSel[1]], resultElemTable[texInstruction->textureFetch.srcSel[2]]);
else
src->addFmt("vec4(textureQueryLod({}{}, intBitsToFloat({}.{}{}{})),0.0,0.0)", _getTextureUnitVariablePrefixName(shaderContext->shader->shaderType), texInstruction->textureFetch.textureIndex, _getRegisterVarName(shaderContext, texInstruction->srcGpr), resultElemTable[texInstruction->textureFetch.srcSel[0]], resultElemTable[texInstruction->textureFetch.srcSel[1]], resultElemTable[texInstruction->textureFetch.srcSel[2]]);
}
else
{
if (shaderContext->typeTracker.defaultDataType == LATTE_DECOMPILER_DTYPE_FLOAT)
src->addFmt("vec4(textureQueryLod({}{}, {}.{}{}),0.0,0.0)", _getTextureUnitVariablePrefixName(shaderContext->shader->shaderType), texInstruction->textureFetch.textureIndex, _getRegisterVarName(shaderContext, texInstruction->srcGpr), resultElemTable[texInstruction->textureFetch.srcSel[0]], resultElemTable[texInstruction->textureFetch.srcSel[1]]);
else
src->addFmt("vec4(textureQueryLod({}{}, intBitsToFloat({}.{}{})),0.0,0.0)", _getTextureUnitVariablePrefixName(shaderContext->shader->shaderType), texInstruction->textureFetch.textureIndex, _getRegisterVarName(shaderContext, texInstruction->srcGpr), resultElemTable[texInstruction->textureFetch.srcSel[0]], resultElemTable[texInstruction->textureFetch.srcSel[1]]);
debugBreakpoint();
}
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, shaderContext->typeTracker.defaultDataType);
src->add(".");
for(sint32 f=0; f<4; f++)
{
if( texInstruction->dstSel[f] < 4 )
{
src->add(resultElemTable[texInstruction->dstSel[f]]);
numWrittenElements++;
}
else if( texInstruction->dstSel[f] == 7 )
{
// masked and not written
}
else
{
debugBreakpoint();
}
}
src->add(";" _CRLF);
}
void _emitTEXSetCubemapIndexCode(LatteDecompilerShaderContext* shaderContext, LatteDecompilerTEXInstruction* texInstruction)
{
StringBuf* src = shaderContext->shaderSource;
src->addFmt("cubeMapArrayIndex{}", texInstruction->textureFetch.textureIndex);
const char* resultElemTable[4] = {"x","y","z","w"};
if (shaderContext->typeTracker.defaultDataType == LATTE_DECOMPILER_DTYPE_SIGNED_INT)
src->addFmt(" = intBitsToFloat(R{}i.{});" _CRLF, texInstruction->srcGpr, resultElemTable[texInstruction->textureFetch.srcSel[0]]);
else if (shaderContext->typeTracker.defaultDataType == LATTE_DECOMPILER_DTYPE_FLOAT)
src->addFmt(" = R{}f.{};" _CRLF, texInstruction->srcGpr, resultElemTable[texInstruction->textureFetch.srcSel[0]]);
else
cemu_assert_unimplemented();
}
void _emitTEXGetGradientsHV(LatteDecompilerShaderContext* shaderContext, LatteDecompilerTEXInstruction* texInstruction)
{
StringBuf* src = shaderContext->shaderSource;
sint32 componentCount = 0;
for (sint32 i = 0; i < 4; i++)
{
if(texInstruction->dstSel[i] == 7)
continue;
componentCount++;
}
src->add(_getRegisterVarName(shaderContext, texInstruction->dstGpr));
src->add(".");
const char* resultElemTable[4] = { "x","y","z","w" };
sint32 numWrittenElements = 0;
for (sint32 f = 0; f < 4; f++)
{
if (texInstruction->dstSel[f] < 4)
{
src->add(resultElemTable[f]);
numWrittenElements++;
}
else if (texInstruction->dstSel[f] == 7)
{
// masked and not written
}
else
{
debugBreakpoint();
}
}
const char* funcName;
if (texInstruction->opcode == GPU7_TEX_INST_GET_GRADIENTS_H)
funcName = "dFdx";
else
funcName = "dFdy";
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, shaderContext->typeTracker.defaultDataType);
src->addFmt("{}(", funcName);
_emitRegisterAccessCode(shaderContext, texInstruction->srcGpr, (componentCount >= 1) ? texInstruction->textureFetch.srcSel[0] : -1, (componentCount >= 2) ? texInstruction->textureFetch.srcSel[1] : -1, (componentCount >= 3) ? texInstruction->textureFetch.srcSel[2] : -1, (componentCount >= 4)?texInstruction->textureFetch.srcSel[3]:-1, LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(")");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_FLOAT, shaderContext->typeTracker.defaultDataType);
src->add(";" _CRLF);
}
void _emitTEXSetGradientsHV(LatteDecompilerShaderContext* shaderContext, LatteDecompilerTEXInstruction* texInstruction)
{
StringBuf* src = shaderContext->shaderSource;
if (texInstruction->opcode == GPU7_TEX_INST_SET_GRADIENTS_H)
src->add("gradH = ");
else
src->add("gradV = ");
_emitRegisterAccessCode(shaderContext, texInstruction->srcGpr, texInstruction->textureFetch.srcSel[0], texInstruction->textureFetch.srcSel[1], texInstruction->textureFetch.srcSel[2], texInstruction->textureFetch.srcSel[3], LATTE_DECOMPILER_DTYPE_FLOAT);
src->add(";" _CRLF);
}
void _emitGSReadInputVFetchCode(LatteDecompilerShaderContext* shaderContext, LatteDecompilerTEXInstruction* texInstruction)
{
StringBuf* src = shaderContext->shaderSource;
src->add(_getRegisterVarName(shaderContext, texInstruction->dstGpr));
src->add(".");
const char* resultElemTable[4] = {"x","y","z","w"};
sint32 numWrittenElements = 0;
for(sint32 f=0; f<4; f++)
{
if( texInstruction->dstSel[f] < 4 )
{
src->add(resultElemTable[f]);
numWrittenElements++;
}
else if( texInstruction->dstSel[f] == 7 )
{
// masked and not written
}
else
{
cemu_assert_unimplemented();
}
}
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, shaderContext->typeTracker.defaultDataType);
src->add("(v2g[");
if (texInstruction->textureFetch.srcSel[0] >= 4)
cemu_assert_unimplemented();
if (texInstruction->textureFetch.srcSel[1] >= 4)
cemu_assert_unimplemented();
// todo: Index type
src->add("0");
src->addFmt("].passV2GParameter{}.", texInstruction->textureFetch.offset/16);
for(sint32 f=0; f<4; f++)
{
if( texInstruction->dstSel[f] < 4 )
{
src->add(resultElemTable[texInstruction->dstSel[f]]);
numWrittenElements++;
}
else if( texInstruction->dstSel[f] == 7 )
{
// masked and not written
}
else
{
cemu_assert_unimplemented();
}
}
src->add(")");
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, shaderContext->typeTracker.defaultDataType);
src->add(";" _CRLF);
}
sint32 _writeDestMaskXYZW(LatteDecompilerShaderContext* shaderContext, sint8* dstSel)
{
StringBuf* src = shaderContext->shaderSource;
const char* resultElemTable[4] = { "x","y","z","w" };
sint32 numWrittenElements = 0;
for (sint32 f = 0; f < 4; f++)
{
if (dstSel[f] < 4)
{
src->add(resultElemTable[f]);
numWrittenElements++;
}
else if (dstSel[f] == 7)
{
// masked and not written
}
else
{
cemu_assert_unimplemented();
}
}
return numWrittenElements;
}
void _emitTEXVFetchCode(LatteDecompilerShaderContext* shaderContext, LatteDecompilerTEXInstruction* texInstruction)
{
// handle special case where geometry shader reads input attributes from vertex shader via ringbuffer
StringBuf* src = shaderContext->shaderSource;
if( texInstruction->textureFetch.textureIndex == 0x9F && shaderContext->shaderType == LatteConst::ShaderType::Geometry )
{
_emitGSReadInputVFetchCode(shaderContext, texInstruction);
return;
}
src->add(_getRegisterVarName(shaderContext, texInstruction->dstGpr));
src->add(".");
_writeDestMaskXYZW(shaderContext, texInstruction->dstSel);
const char* resultElemTable[4] = {"x","y","z","w"};
src->add(" = ");
if (shaderContext->typeTracker.defaultDataType == LATTE_DECOMPILER_DTYPE_SIGNED_INT)
src->add("floatBitsToInt(");
else
src->add("(");
src->addFmt("{}{}[", _getShaderUniformBlockVariableName(shaderContext->shader->shaderType), texInstruction->textureFetch.textureIndex - 0x80);
if( shaderContext->typeTracker.defaultDataType == LATTE_DECOMPILER_DTYPE_SIGNED_INT )
src->addFmt("{}.{}", _getRegisterVarName(shaderContext, texInstruction->srcGpr), resultElemTable[texInstruction->textureFetch.srcSel[0]]);
else
src->addFmt("floatBitsToInt({}.{})", _getRegisterVarName(shaderContext, texInstruction->srcGpr), resultElemTable[texInstruction->textureFetch.srcSel[0]]);
src->add("].");
for(sint32 f=0; f<4; f++)
{
if( texInstruction->dstSel[f] < 4 )
{
src->add(resultElemTable[texInstruction->dstSel[f]]);
}
else if( texInstruction->dstSel[f] == 7 )
{
// masked and not written
}
else
{
debugBreakpoint();
}
}
src->add(");" _CRLF);
}
void _emitTEXReadMemCode(LatteDecompilerShaderContext* shaderContext, LatteDecompilerTEXInstruction* texInstruction)
{
StringBuf* src = shaderContext->shaderSource;
src->add(_getRegisterVarName(shaderContext, texInstruction->dstGpr));
src->add(".");
sint32 count = _writeDestMaskXYZW(shaderContext, texInstruction->dstSel);
src->add(" = ");
if (shaderContext->typeTracker.defaultDataType == LATTE_DECOMPILER_DTYPE_SIGNED_INT)
src->add("floatBitsToInt(");
else
src->add("(");
sint32 readCount;
if (texInstruction->memRead.format == FMT_32_FLOAT)
{
readCount = 1;
// todo
src->add("0.0");
}
else if (texInstruction->memRead.format == FMT_32_32_FLOAT)
{
readCount = 2;
// todo
src->add("vec2(0.0,0.0)");
}
else if (texInstruction->memRead.format == FMT_32_32_32_FLOAT)
{
readCount = 3;
// todo
src->add("vec3(0.0,0.0,0.0)");
}
else
{
cemu_assert_unimplemented();
}
if (count < readCount)
{
if (count == 1)
src->add(".x");
else if (count == 2)
src->add(".xy");
else if (count == 3)
src->add(".xyz");
}
src->add(");" _CRLF);
}
void _emitTEXClauseCode(LatteDecompilerShaderContext* shaderContext, LatteDecompilerCFInstruction* cfInstruction)
{
cemu_assert_debug(cfInstruction->instructionsALU.empty());
for(auto& texInstruction : cfInstruction->instructionsTEX)
{
if( texInstruction.opcode == GPU7_TEX_INST_SAMPLE || texInstruction.opcode == GPU7_TEX_INST_SAMPLE_L || texInstruction.opcode == GPU7_TEX_INST_SAMPLE_LB || texInstruction.opcode == GPU7_TEX_INST_SAMPLE_LZ || texInstruction.opcode == GPU7_TEX_INST_SAMPLE_C || texInstruction.opcode == GPU7_TEX_INST_SAMPLE_C_L || texInstruction.opcode == GPU7_TEX_INST_SAMPLE_C_LZ || texInstruction.opcode == GPU7_TEX_INST_FETCH4 || texInstruction.opcode == GPU7_TEX_INST_SAMPLE_G || texInstruction.opcode == GPU7_TEX_INST_LD )
_emitTEXSampleTextureCode(shaderContext, &texInstruction);
else if( texInstruction.opcode == GPU7_TEX_INST_GET_TEXTURE_RESINFO )
_emitTEXGetTextureResInfoCode(shaderContext, &texInstruction);
else if( texInstruction.opcode == GPU7_TEX_INST_GET_COMP_TEX_LOD )
_emitTEXGetCompTexLodCode(shaderContext, &texInstruction);
else if( texInstruction.opcode == GPU7_TEX_INST_SET_CUBEMAP_INDEX )
_emitTEXSetCubemapIndexCode(shaderContext, &texInstruction);
else if (texInstruction.opcode == GPU7_TEX_INST_GET_GRADIENTS_H ||
texInstruction.opcode == GPU7_TEX_INST_GET_GRADIENTS_V)
_emitTEXGetGradientsHV(shaderContext, &texInstruction);
else if (texInstruction.opcode == GPU7_TEX_INST_SET_GRADIENTS_H ||
texInstruction.opcode == GPU7_TEX_INST_SET_GRADIENTS_V)
_emitTEXSetGradientsHV(shaderContext, &texInstruction);
else if (texInstruction.opcode == GPU7_TEX_INST_VFETCH)
_emitTEXVFetchCode(shaderContext, &texInstruction);
else if (texInstruction.opcode == GPU7_TEX_INST_MEM)
_emitTEXReadMemCode(shaderContext, &texInstruction);
else
cemu_assert_unimplemented();
}
}
// generate the code for reading the source input GPR (or constants) for exports
void _emitExportGPRReadCode(LatteDecompilerShaderContext* shaderContext, LatteDecompilerCFInstruction* cfInstruction, sint32 requiredType, uint32 burstIndex)
{
StringBuf* src = shaderContext->shaderSource;
uint32 numOutputs = 4;
if( cfInstruction->type == GPU7_CF_INST_MEM_RING_WRITE )
{
numOutputs = (cfInstruction->memWriteCompMask&1)?1:0;
numOutputs += (cfInstruction->memWriteCompMask&2)?1:0;
numOutputs += (cfInstruction->memWriteCompMask&4)?1:0;
numOutputs += (cfInstruction->memWriteCompMask&8)?1:0;
}
if (requiredType == LATTE_DECOMPILER_DTYPE_FLOAT)
{
if(numOutputs == 1)
src->add("float(");
else
src->addFmt("vec{}(", numOutputs);
}
else if (requiredType == LATTE_DECOMPILER_DTYPE_SIGNED_INT)
{
if (numOutputs == 1)
src->add("int(");
else
src->addFmt("ivec{}(", numOutputs);
}
else
cemu_assert_unimplemented();
sint32 actualOutputs = 0;
for(sint32 i=0; i<4; i++)
{
// todo: Use type of register element based on information from type tracker (currently we assume it's always a signed integer)
uint32 exportSel = 0;
if( cfInstruction->type == GPU7_CF_INST_MEM_RING_WRITE )
{
exportSel = i;
if( (cfInstruction->memWriteCompMask&(1<<i)) == 0 )
continue; // dont output
}
else
{
exportSel = cfInstruction->exportComponentSel[i];
}
if( actualOutputs > 0 )
src->add(", ");
actualOutputs++;
if( exportSel < 4 )
{
_emitRegisterAccessCode(shaderContext, cfInstruction->exportSourceGPR+burstIndex, exportSel, -1, -1, -1, requiredType);
}
else if (exportSel == 4)
{
// constant zero
src->add("0");
}
else if (exportSel == 5)
{
// constant one
src->add("1.0");
}
else if( exportSel == 7 )
{
// element masked (which means 0 is exported?)
src->add("0");
}
else
{
cemu_assert_debug(false);
src->add("0");
}
}
if( requiredType == LATTE_DECOMPILER_DTYPE_FLOAT )
src->add(")");
else if( requiredType == LATTE_DECOMPILER_DTYPE_SIGNED_INT )
src->add(")");
else
cemu_assert_unimplemented();
}
void _emitExportCode(LatteDecompilerShaderContext* shaderContext, LatteDecompilerCFInstruction* cfInstruction)
{
StringBuf* src = shaderContext->shaderSource;
src->add("// export" _CRLF);
if(shaderContext->shaderType == LatteConst::ShaderType::Vertex )
{
if( cfInstruction->exportBurstCount != 0 )
debugBreakpoint();
if (cfInstruction->exportType == 1 && cfInstruction->exportArrayBase == GPU7_DECOMPILER_CF_EXPORT_BASE_POSITION)
{
// export position
// GX2 special state 0 disables rasterizer viewport offset and scaling (probably, exact mechanism is not known). Handle this here
bool hasAnyViewportScaleDisabled =
!shaderContext->contextRegistersNew->PA_CL_VTE_CNTL.get_VPORT_X_SCALE_ENA() ||
!shaderContext->contextRegistersNew->PA_CL_VTE_CNTL.get_VPORT_Y_SCALE_ENA() ||
!shaderContext->contextRegistersNew->PA_CL_VTE_CNTL.get_VPORT_Z_SCALE_ENA();
if (hasAnyViewportScaleDisabled)
{
src->add("vec4 finalPos = ");
_emitExportGPRReadCode(shaderContext, cfInstruction, LATTE_DECOMPILER_DTYPE_FLOAT, 0);
src->add(";" _CRLF);
src->add("finalPos.xy = finalPos.xy * uf_windowSpaceToClipSpaceTransform - vec2(1.0,1.0);");
src->add("SET_POSITION(finalPos);");
}
else
{
src->add("SET_POSITION(");
_emitExportGPRReadCode(shaderContext, cfInstruction, LATTE_DECOMPILER_DTYPE_FLOAT, 0);
src->add(");" _CRLF);
}
}
else if (cfInstruction->exportType == 1 && cfInstruction->exportArrayBase == GPU7_DECOMPILER_CF_EXPORT_POINT_SIZE )
{
// export gl_PointSize
if (shaderContext->analyzer.outputPointSize)
{
cemu_assert_debug(shaderContext->analyzer.writesPointSize);
src->add("gl_PointSize = (");
_emitExportGPRReadCode(shaderContext, cfInstruction, LATTE_DECOMPILER_DTYPE_FLOAT, 0);
src->add(").x");
src->add(";" _CRLF);
}
}
else if( cfInstruction->exportType == 2 && cfInstruction->exportArrayBase < 32 )
{
// export parameter
sint32 paramIndex = cfInstruction->exportArrayBase;
uint32 vsSemanticId = _getVertexShaderOutParamSemanticId(shaderContext->contextRegisters, paramIndex);
if (vsSemanticId != 0xFF)
{
src->addFmt("passParameterSem{} = ", vsSemanticId);
_emitExportGPRReadCode(shaderContext, cfInstruction, LATTE_DECOMPILER_DTYPE_FLOAT, 0);
src->add(";" _CRLF);
}
else
{
src->add("// skipped export to semanticId 255" _CRLF);
}
}
else
cemu_assert_unimplemented();
}
else if(shaderContext->shaderType == LatteConst::ShaderType::Pixel )
{
if( cfInstruction->exportType == 0 && cfInstruction->exportArrayBase < 8 )
{
for(uint32 i=0; i<(cfInstruction->exportBurstCount+1); i++)
{
sint32 pixelColorOutputIndex = LatteDecompiler_getColorOutputIndexFromExportIndex(shaderContext, cfInstruction->exportArrayBase+i);
// if color output is for target 0, then also handle alpha test
bool alphaTestEnable = shaderContext->contextRegistersNew->SX_ALPHA_TEST_CONTROL.get_ALPHA_TEST_ENABLE();
auto alphaTestFunc = shaderContext->contextRegistersNew->SX_ALPHA_TEST_CONTROL.get_ALPHA_FUNC();
if( pixelColorOutputIndex == 0 && alphaTestEnable && alphaTestFunc == Latte::E_COMPAREFUNC::NEVER )
{
// never pass alpha test
src->add("discard;" _CRLF);
}
else if( pixelColorOutputIndex == 0 && alphaTestEnable && alphaTestFunc != Latte::E_COMPAREFUNC::ALWAYS)
{
src->add("if( ((");
_emitExportGPRReadCode(shaderContext, cfInstruction, LATTE_DECOMPILER_DTYPE_FLOAT, i);
src->add(").a ");
switch( alphaTestFunc )
{
case Latte::E_COMPAREFUNC::LESS:
src->add("<");
break;
case Latte::E_COMPAREFUNC::EQUAL:
src->add("==");
break;
case Latte::E_COMPAREFUNC::LEQUAL:
src->add("<=");
break;
case Latte::E_COMPAREFUNC::GREATER:
src->add(">");
break;
case Latte::E_COMPAREFUNC::NOTEQUAL:
src->add("!=");
break;
case Latte::E_COMPAREFUNC::GEQUAL:
src->add(">=");
break;
}
src->add(" uf_alphaTestRef");
src->add(") == false) discard;" _CRLF);
}
// pixel color output
src->addFmt("passPixelColor{} = ", pixelColorOutputIndex);
_emitExportGPRReadCode(shaderContext, cfInstruction, LATTE_DECOMPILER_DTYPE_FLOAT, i);
src->add(";" _CRLF);
if( cfInstruction->exportArrayBase+i >= 8 )
cemu_assert_unimplemented();
}
}
else if( cfInstruction->exportType == 0 && cfInstruction->exportArrayBase == 61 )
{
// pixel depth or gl_FragStencilRefARB
if( cfInstruction->exportBurstCount > 0 )
cemu_assert_unimplemented();
if (cfInstruction->exportComponentSel[0] == 7)
{
cemu_assert_unimplemented(); // gl_FragDepth ?
}
if (cfInstruction->exportComponentSel[1] != 7)
{
cemu_assert_unimplemented(); // exporting to gl_FragStencilRefARB
}
if (cfInstruction->exportComponentSel[2] != 7)
{
cemu_assert_unimplemented(); // ukn
}
if (cfInstruction->exportComponentSel[3] != 7)
{
cemu_assert_unimplemented(); // ukn
}
src->add("gl_FragDepth = ");
_emitExportGPRReadCode(shaderContext, cfInstruction, LATTE_DECOMPILER_DTYPE_FLOAT, 0);
src->add(".x");
src->add(";" _CRLF);
}
else
cemu_assert_unimplemented();
}
}
void _emitXYZWByMask(StringBuf* src, uint32 mask)
{
if( (mask&(1<<0)) != 0 )
src->add("x");
if( (mask&(1<<1)) != 0 )
src->add("y");
if( (mask&(1<<2)) != 0 )
src->add("z");
if( (mask&(1<<3)) != 0 )
src->add("w");
}
void _emitCFRingWriteCode(LatteDecompilerShaderContext* shaderContext, LatteDecompilerCFInstruction* cfInstruction)
{
StringBuf* src = shaderContext->shaderSource;
// calculate parameter output (based on ring buffer output offset relative to GS unit)
uint32 bytesPerVertex = shaderContext->contextRegisters[mmSQ_GS_VERT_ITEMSIZE] * 4;
bytesPerVertex = std::max(bytesPerVertex, (uint32)1); // avoid division by zero
uint32 parameterOffset = ((cfInstruction->exportArrayBase * 4) % bytesPerVertex);
// for geometry shaders with streamout, MEM_RING_WRITE is used to pass the data to the copy shader, which then uses STREAM*_WRITE
if (shaderContext->shaderType == LatteConst::ShaderType::Geometry && shaderContext->analyzer.hasStreamoutEnable)
{
// if streamout is enabled, we generate transform feedback output code instead of the normal gs output
for (uint32 burstIndex = 0; burstIndex < (cfInstruction->exportBurstCount + 1); burstIndex++)
{
parameterOffset = ((cfInstruction->exportArrayBase * 4 + burstIndex*0x10) % bytesPerVertex);
// find matching stream write in copy shader
LatteGSCopyShaderStreamWrite_t* streamWrite = nullptr;
for (auto& it : shaderContext->parsedGSCopyShader->list_streamWrites)
{
if (it.offset == parameterOffset)
{
streamWrite = ⁢
break;
}
}
if (streamWrite == nullptr)
{
cemu_assert_suspicious();
return;
}
for (sint32 i = 0; i < 4; i++)
{
if ((cfInstruction->memWriteCompMask&(1 << i)) == 0)
continue;
if (shaderContext->options->useTFViaSSBO)
{
uint32 u32Offset = streamWrite->exportArrayBase + i;
src->addFmt("sb_buffer[sbBase{} + {}]", streamWrite->bufferIndex, u32Offset);
}
else
{
src->addFmt("sb{}[{}]", streamWrite->bufferIndex, streamWrite->exportArrayBase + i);
}
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, shaderContext->typeTracker.defaultDataType, LATTE_DECOMPILER_DTYPE_SIGNED_INT);
src->addFmt("{}.", _getRegisterVarName(shaderContext, cfInstruction->exportSourceGPR+burstIndex));
if (i == 0)
src->add("x");
else if (i == 1)
src->add("y");
else if (i == 2)
src->add("z");
else if (i == 3)
src->add("w");
_emitTypeConversionSuffix(shaderContext, shaderContext->typeTracker.defaultDataType, LATTE_DECOMPILER_DTYPE_SIGNED_INT);
src->add(";" _CRLF);
}
}
return;
}
if (shaderContext->shaderType == LatteConst::ShaderType::Vertex)
{
if (cfInstruction->memWriteElemSize != 3)
cemu_assert_unimplemented();
if ((cfInstruction->exportArrayBase & 3) != 0)
cemu_assert_unimplemented();
for (sint32 burstIndex = 0; burstIndex < (sint32)(cfInstruction->exportBurstCount + 1); burstIndex++)
{
src->addFmt("v2g.passV2GParameter{}.", (cfInstruction->exportArrayBase) / 4 + burstIndex);
_emitXYZWByMask(src, cfInstruction->memWriteCompMask);
src->addFmt(" = ");
_emitExportGPRReadCode(shaderContext, cfInstruction, LATTE_DECOMPILER_DTYPE_SIGNED_INT, burstIndex);
src->add(";" _CRLF);
}
}
else if (shaderContext->shaderType == LatteConst::ShaderType::Geometry)
{
cemu_assert_debug(cfInstruction->memWriteElemSize == 3);
//if (cfInstruction->memWriteElemSize != 3)
// debugBreakpoint();
cemu_assert_debug((cfInstruction->exportArrayBase & 3) == 0);
for (uint32 burstIndex = 0; burstIndex < (cfInstruction->exportBurstCount + 1); burstIndex++)
{
uint32 parameterExportType = 0;
uint32 parameterExportBase = 0;
if (LatteGSCopyShaderParser_getExportTypeByOffset(shaderContext->parsedGSCopyShader, parameterOffset + burstIndex * (cfInstruction->memWriteElemSize+1)*4, ¶meterExportType, ¶meterExportBase) == false)
{
cemu_assert_debug(false);
shaderContext->hasError = true;
return;
}
if (parameterExportType == 1 && parameterExportBase == GPU7_DECOMPILER_CF_EXPORT_BASE_POSITION)
{
src->add("{" _CRLF);
src->addFmt("vec4 pos = vec4(0.0,0.0,0.0,1.0);" _CRLF);
src->addFmt("pos.");
_emitXYZWByMask(src, cfInstruction->memWriteCompMask);
src->addFmt(" = ");
_emitExportGPRReadCode(shaderContext, cfInstruction, LATTE_DECOMPILER_DTYPE_FLOAT, burstIndex);
src->add(";" _CRLF);
src->add("SET_POSITION(pos);" _CRLF);
src->add("}" _CRLF);
}
else if (parameterExportType == 2 && parameterExportBase < 16)
{
src->addFmt("passG2PParameter{}.", parameterExportBase);
_emitXYZWByMask(src, cfInstruction->memWriteCompMask);
src->addFmt(" = ");
_emitExportGPRReadCode(shaderContext, cfInstruction, LATTE_DECOMPILER_DTYPE_FLOAT, burstIndex);
src->add(";" _CRLF);
}
else
cemu_assert_debug(false);
}
}
else
debugBreakpoint(); // todo
}
void _emitStreamWriteCode(LatteDecompilerShaderContext* shaderContext, LatteDecompilerCFInstruction* cfInstruction)
{
StringBuf* src = shaderContext->shaderSource;
if (shaderContext->analyzer.hasStreamoutEnable == false)
{
#ifdef CEMU_DEBUG_ASSERT
src->add("// omitted streamout write" _CRLF);
#endif
return;
}
uint32 streamoutBufferIndex;
if (cfInstruction->type == GPU7_CF_INST_MEM_STREAM0_WRITE)
streamoutBufferIndex = 0;
else if (cfInstruction->type == GPU7_CF_INST_MEM_STREAM1_WRITE)
streamoutBufferIndex = 1;
else
cemu_assert_unimplemented();
if (shaderContext->shaderType == LatteConst::ShaderType::Vertex)
{
uint32 arraySize = cfInstruction->memWriteArraySize + 1;
for (sint32 i = 0; i < (sint32)arraySize; i++)
{
if ((cfInstruction->memWriteCompMask&(1 << i)) == 0)
continue;
if (shaderContext->options->useTFViaSSBO)
{
uint32 u32Offset = cfInstruction->exportArrayBase + i;
src->addFmt("sb_buffer[sbBase{} + {}]", streamoutBufferIndex, u32Offset);
}
else
{
src->addFmt("sb{}[{}]", streamoutBufferIndex, cfInstruction->exportArrayBase + i);
}
src->add(" = ");
_emitTypeConversionPrefix(shaderContext, shaderContext->typeTracker.defaultDataType, LATTE_DECOMPILER_DTYPE_SIGNED_INT);
src->add(_getRegisterVarName(shaderContext, cfInstruction->exportSourceGPR));
_appendChannelAccess(src, i);
_emitTypeConversionSuffix(shaderContext, shaderContext->typeTracker.defaultDataType, LATTE_DECOMPILER_DTYPE_SIGNED_INT);
src->add(";" _CRLF);
}
}
else
cemu_assert_debug(false);
}
void _emitCFCall(LatteDecompilerShaderContext* shaderContext, LatteDecompilerCFInstruction* cfInstruction)
{
StringBuf* src = shaderContext->shaderSource;
uint32 subroutineAddr = cfInstruction->addr;
LatteDecompilerSubroutineInfo* subroutineInfo = nullptr;
// find subroutine
for (auto& subroutineItr : shaderContext->list_subroutines)
{
if (subroutineItr.cfAddr == subroutineAddr)
{
subroutineInfo = &subroutineItr;
break;
}
}
if (subroutineInfo == nullptr)
{
cemu_assert_debug(false);
return;
}
// inline function
if (shaderContext->isSubroutine)
{
cemu_assert_debug(false); // inlining with cascaded function calls not supported
return;
}
// init CF stack variables
src->addFmt("activeMaskStackSub{:04x}[0] = true;" _CRLF, subroutineInfo->cfAddr);
src->addFmt("activeMaskStackCSub{:04x}[0] = true;" _CRLF, subroutineInfo->cfAddr);
src->addFmt("activeMaskStackCSub{:04x}[1] = true;" _CRLF, subroutineInfo->cfAddr);
shaderContext->isSubroutine = true;
shaderContext->subroutineInfo = subroutineInfo;
for(auto& cfInstruction : subroutineInfo->instructions)
LatteDecompiler_emitClauseCode(shaderContext, &cfInstruction, true);
shaderContext->isSubroutine = false;
shaderContext->subroutineInfo = nullptr;
}
void LatteDecompiler_emitClauseCode(LatteDecompilerShaderContext* shaderContext, LatteDecompilerCFInstruction* cfInstruction, bool isSubroutine)
{
StringBuf* src = shaderContext->shaderSource;
if( cfInstruction->type == GPU7_CF_INST_ALU || cfInstruction->type == GPU7_CF_INST_ALU_PUSH_BEFORE || cfInstruction->type == GPU7_CF_INST_ALU_POP_AFTER || cfInstruction->type == GPU7_CF_INST_ALU_POP2_AFTER || cfInstruction->type == GPU7_CF_INST_ALU_BREAK || cfInstruction->type == GPU7_CF_INST_ALU_ELSE_AFTER )
{
// emit ALU code
if (shaderContext->analyzer.modifiesPixelActiveState)
{
if(cfInstruction->type == GPU7_CF_INST_ALU_PUSH_BEFORE)
src->addFmt("if( {} == true ) {{" _CRLF, _getActiveMaskCVarName(shaderContext, cfInstruction->activeStackDepth + 1 - 1));
else
src->addFmt("if( {} == true ) {{" _CRLF, _getActiveMaskCVarName(shaderContext, cfInstruction->activeStackDepth + 1));
}
if (cfInstruction->type == GPU7_CF_INST_ALU_PUSH_BEFORE)
{
src->addFmt("{} = {};" _CRLF, _getActiveMaskVarName(shaderContext, cfInstruction->activeStackDepth), _getActiveMaskVarName(shaderContext, cfInstruction->activeStackDepth-1));
src->addFmt("{} = {};" _CRLF, _getActiveMaskCVarName(shaderContext, cfInstruction->activeStackDepth + 1), _getActiveMaskCVarName(shaderContext, cfInstruction->activeStackDepth));
}
_emitALUClauseCode(shaderContext, cfInstruction);
if( shaderContext->analyzer.modifiesPixelActiveState )
src->add("}" _CRLF);
cemu_assert_debug(!(shaderContext->analyzer.modifiesPixelActiveState == false && cfInstruction->type != GPU7_CF_INST_ALU));
// handle ELSE case of PUSH_BEFORE
if( cfInstruction->type == GPU7_CF_INST_ALU_PUSH_BEFORE )
{
src->add("else {" _CRLF);
src->addFmt("{} = false;" _CRLF, _getActiveMaskVarName(shaderContext, cfInstruction->activeStackDepth));
src->addFmt("{} = false;" _CRLF, _getActiveMaskCVarName(shaderContext, cfInstruction->activeStackDepth + 1));
src->add("}" _CRLF);
}
// post clause handler
if( cfInstruction->type == GPU7_CF_INST_ALU_POP_AFTER )
{
src->addFmt("{} = {} == true && {} == true;" _CRLF, _getActiveMaskCVarName(shaderContext, cfInstruction->activeStackDepth + 1 - 1), _getActiveMaskVarName(shaderContext, cfInstruction->activeStackDepth - 1), _getActiveMaskCVarName(shaderContext, cfInstruction->activeStackDepth - 1));
}
else if( cfInstruction->type == GPU7_CF_INST_ALU_POP2_AFTER )
{
src->addFmt("{} = {} == true && {} == true;" _CRLF, _getActiveMaskCVarName(shaderContext, cfInstruction->activeStackDepth + 1 - 2), _getActiveMaskVarName(shaderContext, cfInstruction->activeStackDepth - 2), _getActiveMaskCVarName(shaderContext, cfInstruction->activeStackDepth - 2));
}
else if( cfInstruction->type == GPU7_CF_INST_ALU_ELSE_AFTER )
{
// no condition test
// pop stack
if( cfInstruction->popCount != 0 )
debugBreakpoint();
// else operation
src->addFmt("{} = {} == false;" _CRLF, _getActiveMaskVarName(shaderContext, cfInstruction->activeStackDepth), _getActiveMaskVarName(shaderContext, cfInstruction->activeStackDepth));
src->addFmt("{} = {} == true && {} == true;" _CRLF, _getActiveMaskCVarName(shaderContext, cfInstruction->activeStackDepth + 1), _getActiveMaskVarName(shaderContext, cfInstruction->activeStackDepth), _getActiveMaskCVarName(shaderContext, cfInstruction->activeStackDepth));
}
}
else if( cfInstruction->type == GPU7_CF_INST_TEX )
{
// emit TEX code
if (shaderContext->analyzer.modifiesPixelActiveState)
{
src->addFmt("if( {} == true ) {{" _CRLF, _getActiveMaskCVarName(shaderContext, cfInstruction->activeStackDepth+1));
}
_emitTEXClauseCode(shaderContext, cfInstruction);
if (shaderContext->analyzer.modifiesPixelActiveState)
{
src->add("}" _CRLF);
}
}
else if( cfInstruction->type == GPU7_CF_INST_EXPORT || cfInstruction->type == GPU7_CF_INST_EXPORT_DONE )
{
// emit export code
_emitExportCode(shaderContext, cfInstruction);
}
else if( cfInstruction->type == GPU7_CF_INST_ELSE )
{
// todo: Condition test, popCount?
src->addFmt("{} = {} == false;" _CRLF, _getActiveMaskVarName(shaderContext, cfInstruction->activeStackDepth), _getActiveMaskVarName(shaderContext, cfInstruction->activeStackDepth));
src->addFmt("{} = {} == true && {} == true;" _CRLF, _getActiveMaskCVarName(shaderContext, cfInstruction->activeStackDepth + 1), _getActiveMaskVarName(shaderContext, cfInstruction->activeStackDepth), _getActiveMaskCVarName(shaderContext, cfInstruction->activeStackDepth));
}
else if( cfInstruction->type == GPU7_CF_INST_POP )
{
src->addFmt("{} = {} == true && {} == true;" _CRLF, _getActiveMaskCVarName(shaderContext, cfInstruction->activeStackDepth + 1 - cfInstruction->popCount), _getActiveMaskVarName(shaderContext, cfInstruction->activeStackDepth - cfInstruction->popCount), _getActiveMaskCVarName(shaderContext, cfInstruction->activeStackDepth - cfInstruction->popCount));
}
else if( cfInstruction->type == GPU7_CF_INST_LOOP_START_DX10 ||
cfInstruction->type == GPU7_CF_INST_LOOP_START_NO_AL)
{
// start of loop
// if pixel is disabled, then skip loop
if (ActiveSettings::ShaderPreventInfiniteLoopsEnabled())
{
// with iteration limit to prevent infinite loops
src->addFmt("int loopCounter{} = 0;" _CRLF, (sint32)cfInstruction->cfAddr);
src->addFmt("while( {} == true && loopCounter{} < 500 )" _CRLF, _getActiveMaskCVarName(shaderContext, cfInstruction->activeStackDepth + 1), (sint32)cfInstruction->cfAddr);
src->add("{" _CRLF);
src->addFmt("loopCounter{}++;" _CRLF, (sint32)cfInstruction->cfAddr);
}
else
{
src->addFmt("while( {} == true )" _CRLF, _getActiveMaskCVarName(shaderContext, cfInstruction->activeStackDepth + 1));
src->add("{" _CRLF);
}
}
else if( cfInstruction->type == GPU7_CF_INST_LOOP_END )
{
// this might not always work
if( cfInstruction->popCount != 0 )
debugBreakpoint();
src->add("}" _CRLF);
}
else if( cfInstruction->type == GPU7_CF_INST_LOOP_BREAK )
{
if( cfInstruction->popCount != 0 )
debugBreakpoint();
if (shaderContext->analyzer.modifiesPixelActiveState)
{
src->addFmt("if( {} == true ) {{" _CRLF, _getActiveMaskCVarName(shaderContext, cfInstruction->activeStackDepth + 1));
}
// note: active stack level is set to the same level as the loop begin. popCount is ignored
src->add("break;" _CRLF);
if (shaderContext->analyzer.modifiesPixelActiveState)
src->add("}" _CRLF);
}
else if( cfInstruction->type == GPU7_CF_INST_MEM_STREAM0_WRITE ||
cfInstruction->type == GPU7_CF_INST_MEM_STREAM1_WRITE )
{
_emitStreamWriteCode(shaderContext, cfInstruction);
}
else if( cfInstruction->type == GPU7_CF_INST_MEM_RING_WRITE )
{
_emitCFRingWriteCode(shaderContext, cfInstruction);
}
else if( cfInstruction->type == GPU7_CF_INST_EMIT_VERTEX )
{
if( shaderContext->analyzer.modifiesPixelActiveState )
src->addFmt("if( {} == true ) {{" _CRLF, _getActiveMaskCVarName(shaderContext, cfInstruction->activeStackDepth + 1));
// write point size
if (shaderContext->analyzer.outputPointSize && shaderContext->analyzer.writesPointSize == false)
src->add("gl_PointSize = uf_pointSize;" _CRLF);
// emit vertex
src->add("EmitVertex();" _CRLF);
// increment transform feedback pointer
if (shaderContext->analyzer.useSSBOForStreamout)
{
for (sint32 i = 0; i < LATTE_NUM_STREAMOUT_BUFFER; i++)
{
if (!shaderContext->output->streamoutBufferWriteMask[i])
continue;
cemu_assert_debug((shaderContext->output->streamoutBufferStride[i] & 3) == 0);
src->addFmt("sbBase{} += {};" _CRLF, i, shaderContext->output->streamoutBufferStride[i] / 4);
}
}
if( shaderContext->analyzer.modifiesPixelActiveState )
src->add("}" _CRLF);
}
else if (cfInstruction->type == GPU7_CF_INST_CALL)
{
_emitCFCall(shaderContext, cfInstruction);
}
else if (cfInstruction->type == GPU7_CF_INST_RETURN)
{
// todo (handle properly)
}
else
{
cemu_assert_debug(false);
}
}
void LatteDecompiler_emitGLSLHelperFunctions(LatteDecompilerShaderContext* shaderContext, StringBuf* fCStr_shaderSource)
{
if( shaderContext->analyzer.hasRedcCUBE )
{
fCStr_shaderSource->add("void redcCUBE(vec4 src0, vec4 src1, out vec3 stm, out int faceId)\r\n"
"{\r\n"
"// stm -> x .. s, y .. t, z .. MajorAxis*2.0\r\n"
"vec3 inputCoord = normalize(vec3(src1.y, src1.x, src0.x));\r\n"
"float rx = inputCoord.x;\r\n"
"float ry = inputCoord.y;\r\n"
"float rz = inputCoord.z;\r\n"
"if( abs(rx) > abs(ry) && abs(rx) > abs(rz) )\r\n"
"{\r\n"
"stm.z = rx*2.0;\r\n"
"stm.xy = vec2(ry,rz); \r\n"
"if( rx >= 0.0 )\r\n"
"{\r\n"
"faceId = 0;\r\n"
"}\r\n"
"else\r\n"
"{\r\n"
"faceId = 1;\r\n"
"}\r\n"
"}\r\n"
"else if( abs(ry) > abs(rx) && abs(ry) > abs(rz) )\r\n"
"{\r\n"
"stm.z = ry*2.0;\r\n"
"stm.xy = vec2(rx,rz); \r\n"
"if( ry >= 0.0 )\r\n"
"{\r\n"
"faceId = 2;\r\n"
"}\r\n"
"else\r\n"
"{\r\n"
"faceId = 3;\r\n"
"}\r\n"
"}\r\n"
"else //if( abs(rz) > abs(ry) && abs(rz) > abs(rx) )\r\n"
"{\r\n"
"stm.z = rz*2.0;\r\n"
"stm.xy = vec2(rx,ry); \r\n"
"if( rz >= 0.0 )\r\n"
"{\r\n"
"faceId = 4;\r\n"
"}\r\n"
"else\r\n"
"{\r\n"
"faceId = 5;\r\n"
"}\r\n"
"}\r\n"
"}\r\n");
}
if( shaderContext->analyzer.hasCubeMapTexture )
{
fCStr_shaderSource->add("vec3 redcCUBEReverse(vec2 st, int faceId)\r\n"
"{\r\n"
"st.yx = st.xy;\r\n"
"vec3 v;\r\n"
"float majorAxis = 1.0;\r\n"
"if( faceId == 0 )\r\n"
"{\r\n"
"v.yz = (st-vec2(1.5))*(majorAxis*2.0);\r\n"
"v.x = 1.0;\r\n"
"}\r\n"
"else if( faceId == 1 )\r\n"
"{\r\n"
"v.yz = (st-vec2(1.5))*(majorAxis*2.0);\r\n"
"v.x = -1.0;\r\n"
"}\r\n"
"else if( faceId == 2 )\r\n"
"{\r\n"
"v.xz = (st-vec2(1.5))*(majorAxis*2.0);\r\n"
"v.y = 1.0;\r\n"
"}\r\n"
"else if( faceId == 3 )\r\n"
"{\r\n"
"v.xz = (st-vec2(1.5))*(majorAxis*2.0);\r\n"
"v.y = -1.0;\r\n"
"}\r\n"
"else if( faceId == 4 )\r\n"
"{\r\n"
"v.xy = (st-vec2(1.5))*(majorAxis*2.0);\r\n"
"v.z = 1.0;\r\n"
"}\r\n"
"else\r\n"
"{\r\n"
"v.xy = (st-vec2(1.5))*(majorAxis*2.0);\r\n"
"v.z = -1.0;\r\n"
"}\r\n"
"return v;\r\n"
"}\r\n");
}
// clamp
fCStr_shaderSource->add(""
"int clampFI32(int v)\r\n"
"{\r\n"
"if( v == 0x7FFFFFFF )\r\n"
" return floatBitsToInt(1.0);\r\n"
"else if( v == 0xFFFFFFFF )\r\n"
" return floatBitsToInt(0.0);\r\n"
"return floatBitsToInt(clamp(intBitsToFloat(v), 0.0, 1.0));\r\n"
"}\r\n");
// mul non-ieee way (0*NaN/INF => 0.0)
if (shaderContext->options->strictMul)
{
// things we tried:
//fCStr_shaderSource->add("float mul_nonIEEE(float a, float b){ return mix(a*b,0.0,a==0.0||b==0.0); }" STR_LINEBREAK);
//fCStr_shaderSource->add("float mul_nonIEEE(float a, float b){ return mix(vec2(a*b,0.0),vec2(0.0,0.0),(equal(vec2(a),vec2(0.0,0.0))||equal(vec2(b),vec2(0.0,0.0)))).x; }" STR_LINEBREAK);
//fCStr_shaderSource->add("float mul_nonIEEE(float a, float b){ if( a == 0.0 || b == 0.0 ) return 0.0; return a*b; }" STR_LINEBREAK);
//fCStr_shaderSource->add("float mul_nonIEEE(float a, float b){float r = a*b;r = intBitsToFloat(floatBitsToInt(r)&(((floatBitsToInt(a) != 0) && (floatBitsToInt(b) != 0))?0xFFFFFFFF:0));return r;}" STR_LINEBREAK); works
// for "min" it used to be: float mul_nonIEEE(float a, float b){ return min(a*b,min(abs(a)*3.40282347E+38F,abs(b)*3.40282347E+38F)); }
if( LatteGPUState.glVendor == GLVENDOR_NVIDIA && !ActiveSettings::DumpShadersEnabled())
fCStr_shaderSource->add("float mul_nonIEEE(float a, float b){return mix(0.0, a*b, (a != 0.0) && (b != 0.0));}" _CRLF); // compiles faster on Nvidia and also results in lower RAM usage (OpenGL)
else
fCStr_shaderSource->add("float mul_nonIEEE(float a, float b){ if( a == 0.0 || b == 0.0 ) return 0.0; return a*b; }" _CRLF);
// DXKV-like: fCStr_shaderSource->add("float mul_nonIEEE(float a, float b){ return (b==0.0 ? 0.0 : a) * (a==0.0 ? 0.0 : b); }" _CRLF);
}
}
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompilerEmitGLSLHeader.hpp"
void LatteDecompiler_emitAttributeImport(LatteDecompilerShaderContext* shaderContext, LatteParsedFetchShaderAttribute_t& attrib)
{
auto src = shaderContext->shaderSource;
static const char* dsMappingTableFloat[6] = { "int(attrDecoder.x)", "int(attrDecoder.y)", "int(attrDecoder.z)", "int(attrDecoder.w)", /*"floatBitsToInt(0.0)"*/ "0", /*"floatBitsToInt(1.0)"*/ "0x3f800000" };
static const char* dsMappingTableInt[6] = { "int(attrDecoder.x)", "int(attrDecoder.y)", "int(attrDecoder.z)", "int(attrDecoder.w)", "0", "1" };
// get register index based on vtx semantic table
uint32 attributeShaderLoc = 0xFFFFFFFF;
for (sint32 f = 0; f < 32; f++)
{
if (shaderContext->contextRegisters[mmSQ_VTX_SEMANTIC_0 + f] == attrib.semanticId)
{
attributeShaderLoc = f;
break;
}
}
if (attributeShaderLoc == 0xFFFFFFFF)
return; // attribute is not mapped to VS input
uint32 registerIndex = attributeShaderLoc + 1; // R0 is skipped
// is register used?
if ((shaderContext->analyzer.gprUseMask[registerIndex / 8] & (1 << (registerIndex % 8))) == 0)
{
src->addFmt("// skipped unused attribute for r{}" _CRLF, registerIndex);
return;
}
LatteDecompiler_emitAttributeDecodeGLSL(shaderContext->shader, src, &attrib);
if (shaderContext->typeTracker.defaultDataType == LATTE_DECOMPILER_DTYPE_SIGNED_INT)
src->addFmt("{} = ivec4(", _getRegisterVarName(shaderContext, registerIndex));
else
src->addFmt("{} = vec4(", _getRegisterVarName(shaderContext, registerIndex));
for (sint32 f = 0; f < 4; f++)
{
uint8 ds = attrib.ds[f];
if (f > 0)
src->add(", ");
_emitTypeConversionPrefix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, shaderContext->typeTracker.defaultDataType);
if (ds >= 6)
{
cemu_assert_unimplemented();
ds = 4; // read as 0.0
}
if (attrib.nfa != 1)
{
src->add(dsMappingTableFloat[ds]);
}
else
{
src->add(dsMappingTableInt[ds]);
}
_emitTypeConversionSuffix(shaderContext, LATTE_DECOMPILER_DTYPE_SIGNED_INT, shaderContext->typeTracker.defaultDataType);
}
src->add(");" _CRLF);
}
void LatteDecompiler_emitGLSLShader(LatteDecompilerShaderContext* shaderContext, LatteDecompilerShader* shader)
{
StringBuf* src = new StringBuf(1024*1024*12); // reserve 12MB for generated source (we resize-to-fit at the end)
shaderContext->shaderSource = src;
// GLSL shader header
src->add("#version 430" _CRLF); // 430 is required for shader storage (Vulkan alternative TF path)
src->add("#extension GL_ARB_texture_gather : enable" _CRLF);
src->add("#extension GL_ARB_separate_shader_objects : enable" _CRLF);
if (shaderContext->analyzer.hasStreamoutWrite || shaderContext->options->usesGeometryShader )
src->add("#extension GL_ARB_enhanced_layouts : enable" _CRLF);
// debug info
src->addFmt("// shader {:016x}" _CRLF, shaderContext->shaderBaseHash);
#ifdef CEMU_DEBUG_ASSERT
src->addFmt("// usesIntegerValues: {}" _CRLF, shaderContext->analyzer.usesIntegerValues?"true":"false");
src->addFmt(_CRLF);
#endif
// header part (definitions for inputs and outputs)
LatteDecompiler::emitHeader(shaderContext);
// helper functions
LatteDecompiler_emitGLSLHelperFunctions(shaderContext, src);
// start of main
src->add("void main()" _CRLF);
src->add("{" _CRLF);
// variable definition
if (shaderContext->typeTracker.useArrayGPRs == false)
{
// each register is a separate variable
for (sint32 i = 0; i < 128; i++)
{
if (shaderContext->analyzer.usesRelativeGPRRead || (shaderContext->analyzer.gprUseMask[i / 8] & (1 << (i & 7))) != 0)
{
if (shaderContext->typeTracker.genIntReg)
src->addFmt("ivec4 R{}i = ivec4(0);" _CRLF, i);
else if (shaderContext->typeTracker.genFloatReg)
src->addFmt("vec4 R{}f = vec4(0.0);" _CRLF, i);
}
}
}
else
{
// registers are represented using a single large array
if (shaderContext->typeTracker.genIntReg)
src->addFmt("ivec4 Ri[128];" _CRLF);
else if (shaderContext->typeTracker.genFloatReg)
src->addFmt("vec4 Rf[128];" _CRLF);
for (sint32 i = 0; i < 128; i++)
{
if (shaderContext->typeTracker.genIntReg)
src->addFmt("Ri[{}] = ivec4(0);" _CRLF, i);
else if (shaderContext->typeTracker.genFloatReg)
src->addFmt("Rf[{}] = vec4(0.0);" _CRLF, i);
}
}
if( shader->shaderType == LatteConst::ShaderType::Vertex )
src->addFmt("uvec4 attrDecoder;" _CRLF);
if (shaderContext->typeTracker.genIntReg)
src->addFmt("int backupReg0i, backupReg1i, backupReg2i, backupReg3i, backupReg4i;" _CRLF);
if (shaderContext->typeTracker.genFloatReg)
src->addFmt("float backupReg0f, backupReg1f, backupReg2f, backupReg3f, backupReg4f;" _CRLF);
if (shaderContext->typeTracker.genIntReg)
{
src->addFmt("int PV0ix = 0, PV0iy = 0, PV0iz = 0, PV0iw = 0, PV1ix = 0, PV1iy = 0, PV1iz = 0, PV1iw = 0;" _CRLF);
src->addFmt("int PS0i = 0, PS1i = 0;" _CRLF);
src->addFmt("ivec4 tempi = ivec4(0);" _CRLF);
}
if (shaderContext->typeTracker.genFloatReg)
{
src->addFmt("float PV0fx = 0.0, PV0fy = 0.0, PV0fz = 0.0, PV0fw = 0.0, PV1fx = 0.0, PV1fy = 0.0, PV1fz = 0.0, PV1fw = 0.0;" _CRLF);
src->addFmt("float PS0f = 0.0, PS1f = 0.0;" _CRLF);
src->addFmt("vec4 tempf = vec4(0.0);" _CRLF);
}
if (shaderContext->analyzer.hasGradientLookup)
{
src->add("vec4 gradH;" _CRLF);
src->add("vec4 gradV;" _CRLF);
}
src->add("float tempResultf;" _CRLF);
src->add("int tempResulti;" _CRLF);
src->add("ivec4 ARi = ivec4(0);" _CRLF);
src->add("bool predResult = true;" _CRLF);
if(shaderContext->analyzer.modifiesPixelActiveState )
{
src->addFmt("bool activeMaskStack[{}];" _CRLF, shaderContext->analyzer.activeStackMaxDepth+1);
src->addFmt("bool activeMaskStackC[{}];" _CRLF, shaderContext->analyzer.activeStackMaxDepth+2);
for (sint32 i = 0; i < shaderContext->analyzer.activeStackMaxDepth; i++)
{
src->addFmt("activeMaskStack[{}] = false;" _CRLF, i);
}
for (sint32 i = 0; i < shaderContext->analyzer.activeStackMaxDepth+1; i++)
{
src->addFmt("activeMaskStackC[{}] = false;" _CRLF, i);
}
src->addFmt("activeMaskStack[0] = true;" _CRLF);
src->addFmt("activeMaskStackC[0] = true;" _CRLF);
src->addFmt("activeMaskStackC[1] = true;" _CRLF);
// generate vars for each subroutine
for (auto& subroutineInfo : shaderContext->list_subroutines)
{
sint32 subroutineMaxStackDepth = 0;
src->addFmt("bool activeMaskStackSub{:04x}[{}];" _CRLF, subroutineInfo.cfAddr, subroutineMaxStackDepth + 1);
src->addFmt("bool activeMaskStackCSub{:04x}[{}];" _CRLF, subroutineInfo.cfAddr, subroutineMaxStackDepth + 2);
}
}
// helper variables for cube maps (todo: Only emit when used)
if (shaderContext->analyzer.hasRedcCUBE)
{
src->add("vec3 cubeMapSTM;" _CRLF);
src->add("int cubeMapFaceId;" _CRLF);
}
for(sint32 i=0; i<LATTE_NUM_MAX_TEX_UNITS; i++)
{
if(!shaderContext->output->textureUnitMask[i])
continue;
if( shader->textureUnitDim[i] != Latte::E_DIM::DIM_CUBEMAP )
continue;
src->addFmt("float cubeMapArrayIndex{} = 0.0;" _CRLF, i);
}
// init base offset for streamout buffer writes
if (shaderContext->analyzer.useSSBOForStreamout && (shader->shaderType == LatteConst::ShaderType::Vertex || shader->shaderType == LatteConst::ShaderType::Geometry))
{
for (sint32 i = 0; i < LATTE_NUM_STREAMOUT_BUFFER; i++)
{
if(!shaderContext->output->streamoutBufferWriteMask[i])
continue;
cemu_assert_debug((shaderContext->output->streamoutBufferStride[i]&3) == 0);
if (shader->shaderType == LatteConst::ShaderType::Vertex) // vertex shader
src->addFmt("int sbBase{} = uf_streamoutBufferBase{}/4 + (gl_VertexID + uf_verticesPerInstance * gl_InstanceID)*{};" _CRLF, i, i, shaderContext->output->streamoutBufferStride[i] / 4);
else // geometry shader
{
uint32 gsOutPrimType = shaderContext->contextRegisters[mmVGT_GS_OUT_PRIM_TYPE];
uint32 bytesPerVertex = shaderContext->contextRegisters[mmSQ_GS_VERT_ITEMSIZE] * 4;
uint32 maxVerticesInGS = ((shaderContext->contextRegisters[mmSQ_GSVS_RING_ITEMSIZE] & 0x7FFF) * 4) / bytesPerVertex;
cemu_assert_debug(gsOutPrimType == 0); // currently we only properly handle GS output primitive points
src->addFmt("int sbBase{} = uf_streamoutBufferBase{}/4 + (gl_PrimitiveIDIn * {})*{};" _CRLF, i, i, maxVerticesInGS, shaderContext->output->streamoutBufferStride[i] / 4);
}
}
}
// code to load inputs from previous stage
if( shader->shaderType == LatteConst::ShaderType::Vertex )
{
if( (shaderContext->analyzer.gprUseMask[0/8]&(1<<(0%8))) != 0 )
{
if (shaderContext->typeTracker.defaultDataType == LATTE_DECOMPILER_DTYPE_SIGNED_INT)
src->addFmt("{} = ivec4(gl_VertexID, 0, 0, gl_InstanceID);" _CRLF, _getRegisterVarName(shaderContext, 0));
else if (shaderContext->typeTracker.defaultDataType == LATTE_DECOMPILER_DTYPE_FLOAT)
src->addFmt("{} = floatBitsToInt(ivec4(gl_VertexID, 0, 0, gl_InstanceID));" _CRLF, _getRegisterVarName(shaderContext, 0));
else
cemu_assert_unimplemented();
}
LatteFetchShader* parsedFetchShader = shaderContext->fetchShader;
for(auto& bufferGroup : parsedFetchShader->bufferGroups)
{
for(sint32 i=0; i<bufferGroup.attribCount; i++)
LatteDecompiler_emitAttributeImport(shaderContext, bufferGroup.attrib[i]);
}
for (auto& bufferGroup : parsedFetchShader->bufferGroupsInvalid)
{
// these attributes point to non-existent buffers
// todo - figure out how the hardware actually handles this, currently we assume the input values are zero
for (sint32 i = 0; i < bufferGroup.attribCount; i++)
LatteDecompiler_emitAttributeImport(shaderContext, bufferGroup.attrib[i]);
}
}
else if (shader->shaderType == LatteConst::ShaderType::Pixel)
{
LatteShaderPSInputTable* psInputTable = LatteSHRC_GetPSInputTable();
uint32 psControl0 = shaderContext->contextRegisters[mmSPI_PS_IN_CONTROL_0];
uint32 psControl1 = shaderContext->contextRegisters[mmSPI_PS_IN_CONTROL_1];
uint32 spiInterpControl = shaderContext->contextRegisters[mmSPI_INTERP_CONTROL_0];
uint8 spriteEnable = (spiInterpControl >> 1) & 1;
cemu_assert_debug(spriteEnable == 0);
uint8 frontFace_enabled = (psControl1 >> 8) & 1;
uint8 frontFace_chan = (psControl1 >> 9) & 3;
uint8 frontFace_allBits = (psControl1 >> 11) & 1;
uint8 frontFace_regIndex = (psControl1 >> 12) & 0x1F;
// handle param_gen
if (psInputTable->paramGen != 0)
{
cemu_assert_debug((psInputTable->paramGen) == 1); // handle the other bits (the same set of coordinates with different perspective/projection settings?)
uint32 paramGenGPRIndex = psInputTable->paramGenGPR;
if (shaderContext->typeTracker.defaultDataType == LATTE_DECOMPILER_DTYPE_FLOAT)
src->addFmt("{} = gl_PointCoord.xyxy;" _CRLF, _getRegisterVarName(shaderContext, paramGenGPRIndex));
else
src->addFmt("{} = floatBitsToInt(gl_PointCoord.xyxy);" _CRLF, _getRegisterVarName(shaderContext, paramGenGPRIndex));
}
for (sint32 i = 0; i < psInputTable->count; i++)
{
uint32 psControl0 = shaderContext->contextRegisters[mmSPI_PS_IN_CONTROL_0];
uint32 spi0_paramGen = (psControl0 >> 15) & 0xF;
sint32 gprIndex = i;// +spi0_paramGen + paramRegOffset;
if ((shaderContext->analyzer.gprUseMask[gprIndex / 8] & (1 << (gprIndex % 8))) == 0 && shaderContext->analyzer.usesRelativeGPRRead == false)
continue;
uint32 psInputSemanticId = psInputTable->import[i].semanticId;
if (psInputSemanticId == LATTE_ANALYZER_IMPORT_INDEX_SPIPOSITION)
{
if (shaderContext->typeTracker.defaultDataType == LATTE_DECOMPILER_DTYPE_FLOAT)
src->addFmt("{} = GET_FRAGCOORD();" _CRLF, _getRegisterVarName(shaderContext, gprIndex));
else
src->addFmt("{} = floatBitsToInt(GET_FRAGCOORD());" _CRLF, _getRegisterVarName(shaderContext, gprIndex));
continue;
}
if (shaderContext->options->usesGeometryShader)
{
// import from geometry shader
if (shaderContext->typeTracker.defaultDataType == LATTE_DECOMPILER_DTYPE_SIGNED_INT)
src->addFmt("{} = floatBitsToInt(passG2PParameter{});" _CRLF, _getRegisterVarName(shaderContext, gprIndex), psInputSemanticId & 0x7F);
else if (shaderContext->typeTracker.defaultDataType == LATTE_DECOMPILER_DTYPE_FLOAT)
src->addFmt("{} = passG2PParameter{};" _CRLF, _getRegisterVarName(shaderContext, gprIndex), psInputSemanticId & 0x7F);
else
cemu_assert_unimplemented();
}
else
{
// import from vertex shader
if (shaderContext->typeTracker.defaultDataType == LATTE_DECOMPILER_DTYPE_SIGNED_INT)
src->addFmt("{} = floatBitsToInt(passParameterSem{});" _CRLF, _getRegisterVarName(shaderContext, gprIndex), psInputSemanticId);
else if (shaderContext->typeTracker.defaultDataType == LATTE_DECOMPILER_DTYPE_FLOAT)
src->addFmt("{} = passParameterSem{};" _CRLF, _getRegisterVarName(shaderContext, gprIndex), psInputSemanticId);
else
cemu_assert_unimplemented();
}
}
// front facing attribute
if (frontFace_enabled)
{
if ((shaderContext->analyzer.gprUseMask[0 / 8] & (1 << (0 % 8))) != 0)
{
if (frontFace_allBits)
cemu_assert_debug(false);
if (shaderContext->typeTracker.defaultDataType == LATTE_DECOMPILER_DTYPE_SIGNED_INT)
src->addFmt("{}.{} = floatBitsToInt(gl_FrontFacing?1.0:0.0);" _CRLF, _getRegisterVarName(shaderContext, frontFace_regIndex), _getElementStrByIndex(frontFace_chan));
else if (shaderContext->typeTracker.defaultDataType == LATTE_DECOMPILER_DTYPE_FLOAT)
src->addFmt("{}.{} = gl_FrontFacing?1.0:0.0;" _CRLF, _getRegisterVarName(shaderContext, frontFace_regIndex), _getElementStrByIndex(frontFace_chan));
else
cemu_assert_debug(false);
}
}
}
for(auto& cfInstruction : shaderContext->cfInstructions)
LatteDecompiler_emitClauseCode(shaderContext, &cfInstruction, false);
if( shader->shaderType == LatteConst::ShaderType::Geometry )
src->add("EndPrimitive();" _CRLF);
// vertex shader should write renderstate point size at the end if required but not modified by shader
if (shaderContext->analyzer.outputPointSize && shaderContext->analyzer.writesPointSize == false)
{
if (shader->shaderType == LatteConst::ShaderType::Vertex && shaderContext->options->usesGeometryShader == false)
src->add("gl_PointSize = uf_pointSize;" _CRLF);
}
// end of shader main
src->add("}" _CRLF);
src->shrink_to_fit();
shader->strBuf_shaderSource = src;
}
| 162,303
|
C++
|
.cpp
| 3,975
| 37.665912
| 511
| 0.728526
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,224
|
LatteDecompiler.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompiler.cpp
|
#include "Cafe/HW/Latte/Core/LatteConst.h"
#include "Cafe/HW/Latte/Core/LatteShaderAssembly.h"
#include "Cafe/HW/Latte/ISA/RegDefines.h"
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/HW/Latte/Core/LatteDraw.h"
#include "Cafe/HW/Latte/Core/LatteShader.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompiler.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompilerInternal.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompilerInstructions.h"
#include "Cafe/HW/Latte/Core/FetchShader.h"
#include "Cafe/HW/Latte/Core/LattePerformanceMonitor.h"
#include "Cafe/HW/Latte/Renderer/Renderer.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.h"
#include "util/helpers/helpers.h"
// parse instruction and if valid append it to instructionList
bool LatteDecompiler_ParseCFInstruction(LatteDecompilerShaderContext* shaderContext, uint32 cfIndex, uint32 cfWord0, uint32 cfWord1, bool* endOfProgram, std::vector<LatteDecompilerCFInstruction>& instructionList)
{
LatteDecompilerShader* shaderObj = shaderContext->shader;
uint32 cf_inst23_7 = (cfWord1 >> 23) & 0x7F;
if (cf_inst23_7 < 0x40) // starting at 0x40 the bits overlap with the ALU instruction encoding
{
*endOfProgram = ((cfWord1 >> 21) & 1) != 0;
uint32 addr = cfWord0 & 0xFFFFFFFF;
uint32 count = (cfWord1 >> 10) & 7;
if (((cfWord1 >> 19) & 1) != 0)
count |= 0x8;
count++;
if (cf_inst23_7 == GPU7_CF_INST_CALL_FS)
{
// nop
return true;
}
else if (cf_inst23_7 == GPU7_CF_INST_NOP)
{
// nop
if (((cfWord1 >> 0) & 7) != 0)
debugBreakpoint(); // pop count is not zero
return true;
}
else if (cf_inst23_7 == GPU7_CF_INST_EXPORT || cf_inst23_7 == GPU7_CF_INST_EXPORT_DONE)
{
// export
uint32 edType = (cfWord0 >> 13) & 0x3;
uint32 edIndexGpr = (cfWord0 >> 23) & 0x7F;
uint32 edRWRel = (cfWord0 >> 22) & 1;
if (edRWRel != 0 || edIndexGpr != 0)
debugBreakpoint();
LatteDecompilerCFInstruction& cfInstruction = instructionList.emplace_back();
// set type and address
cfInstruction.type = cf_inst23_7;
cfInstruction.cfAddr = cfIndex;
// set cond
cfInstruction.cfCond = (cfWord1 >> 8) & 3;
// set export component selection
cfInstruction.exportComponentSel[0] = (cfWord1 >> 0) & 0x7;
cfInstruction.exportComponentSel[1] = (cfWord1 >> 3) & 0x7;
cfInstruction.exportComponentSel[2] = (cfWord1 >> 6) & 0x7;
cfInstruction.exportComponentSel[3] = (cfWord1 >> 9) & 0x7;
// set export array base, index and burstcount
cfInstruction.exportArrayBase = (cfWord0 >> 0) & 0x1FFF;
cfInstruction.exportBurstCount = (cfWord1 >> 17) & 0xF;
// set export source GPR and type
cfInstruction.exportSourceGPR = (cfWord0 >> 15) & 0x7F;
cfInstruction.exportType = edType;
//cfInstruction->memWriteElemSize = (cfWord0>>29)&3; // unused
return true;
}
else if (cf_inst23_7 == GPU7_CF_INST_TEX)
{
LatteDecompilerCFInstruction& cfInstruction = instructionList.emplace_back();
// set type and address
cfInstruction.type = cf_inst23_7;
cfInstruction.cfAddr = cfIndex;
// set cond
cfInstruction.cfCond = (cfWord1 >> 8) & 3;
// set TEX clause related values
cfInstruction.addr = addr; // index of first instruction in 64bit words
cfInstruction.count = count; // number of instructions (each instruction is 128bit)
// todo: CF_CONST and COND field and maybe other fields?
return true;
}
else if (cf_inst23_7 == GPU7_CF_INST_ELSE ||
cf_inst23_7 == GPU7_CF_INST_POP)
{
LatteDecompilerCFInstruction& cfInstruction = instructionList.emplace_back();
// set type and address
cfInstruction.type = cf_inst23_7;
cfInstruction.cfAddr = cfIndex;
// set cond and popCount
cfInstruction.cfCond = (cfWord1 >> 8) & 3;
cfInstruction.popCount = (cfWord1 >> 0) & 7;
// set TEX clause related values
cfInstruction.addr = addr; // index of first instruction in 64bit words
cfInstruction.count = count; // number of instructions (each instruction is 128bit)
// todo: CF_CONST
return true;
}
else if (cf_inst23_7 == GPU7_CF_INST_JUMP)
{
// ignored (we use ALU/IF/ELSE/PUSH/POP clauses to determine code flow)
return true;
}
else if (cf_inst23_7 == GPU7_CF_INST_LOOP_START_DX10 || cf_inst23_7 == GPU7_CF_INST_LOOP_END ||
cf_inst23_7 == GPU7_CF_INST_LOOP_START_NO_AL)
{
LatteDecompilerCFInstruction& cfInstruction = instructionList.emplace_back();
// set type and address
cfInstruction.type = cf_inst23_7;
cfInstruction.cfAddr = cfIndex;
// set cond and popCount
cfInstruction.cfCond = (cfWord1 >> 8) & 3;
cfInstruction.popCount = (cfWord1 >> 0) & 7;
// set TEX clause related values
cfInstruction.addr = addr; // index of first instruction in 64bit words
cfInstruction.count = count; // number of instructions (each instruction is 128bit)
return true;
}
else if (cf_inst23_7 == GPU7_CF_INST_LOOP_BREAK)
{
LatteDecompilerCFInstruction& cfInstruction = instructionList.emplace_back();
// set type and address
cfInstruction.type = cf_inst23_7;
cfInstruction.cfAddr = cfIndex;
// set cond and popCount
cfInstruction.cfCond = (cfWord1 >> 8) & 3;
cfInstruction.popCount = (cfWord1 >> 0) & 7;
// set clause related values
cfInstruction.addr = addr; // index of first instruction in 64bit words
cfInstruction.count = count; // number of instructions (each instruction is 128bit)
return true;
}
else if (cf_inst23_7 == GPU7_CF_INST_MEM_STREAM0_WRITE ||
cf_inst23_7 == GPU7_CF_INST_MEM_STREAM1_WRITE)
{
LatteDecompilerCFInstruction& cfInstruction = instructionList.emplace_back();
// todo: Correctly read all the STREAM0_WRITE specific fields
// set type and address
cfInstruction.type = cf_inst23_7;
cfInstruction.cfAddr = cfIndex;
// set export array base
cfInstruction.exportArrayBase = (cfWord0 >> 0) & 0x1FFF;
cfInstruction.memWriteArraySize = (cfWord1 >> 0) & 0xFFF;
cfInstruction.memWriteCompMask = (cfWord1 >> 12) & 0xF;
// set export source GPR and type
cfInstruction.exportSourceGPR = (cfWord0 >> 15) & 0x7F;
return true;
}
else if (cf_inst23_7 == GPU7_CF_INST_MEM_RING_WRITE)
{
// this CF instruction is only available when the geometry shader stage is active
LatteDecompilerCFInstruction& cfInstruction = instructionList.emplace_back();
// set type and address
cfInstruction.type = cf_inst23_7;
cfInstruction.cfAddr = cfIndex;
// set export array base
cfInstruction.exportArrayBase = (cfWord0 >> 0) & 0x1FFF;
cfInstruction.memWriteArraySize = (cfWord1 >> 0) & 0xFFF;
cfInstruction.memWriteCompMask = (cfWord1 >> 12) & 0xF;
cfInstruction.memWriteElemSize = ((cfWord0 >> 30) & 0x3);
cfInstruction.exportBurstCount = (cfWord1 >> 17) & 0xF;
// set export source GPR and type
cfInstruction.exportSourceGPR = (cfWord0 >> 15) & 0x7F;
return true;
}
else if (cf_inst23_7 == GPU7_CF_INST_EMIT_VERTEX)
{
// this CF instruction is only available when the geometry shader stage is active
LatteDecompilerCFInstruction& cfInstruction = instructionList.emplace_back();
// set type and address
cfInstruction.type = cf_inst23_7;
cfInstruction.cfAddr = cfIndex;
return true;
}
else if (cf_inst23_7 == GPU7_CF_INST_CALL)
{
// CALL subroutine
LatteDecompilerCFInstruction& cfInstruction = instructionList.emplace_back();
uint32 callCount = (cfWord1 >> 13) & 0x3F;
cfInstruction.addr = addr; // index of call destination in 64bit words
cfInstruction.count = callCount; // store callCount in count
cfInstruction.type = cf_inst23_7;
cfInstruction.cfAddr = cfIndex;
// remember subroutine
bool subroutineIsKnown = false;
for (auto& it : shaderContext->list_subroutines)
{
if (it.cfAddr == addr)
{
subroutineIsKnown = true;
break;
}
}
if (subroutineIsKnown == false)
{
LatteDecompilerSubroutineInfo subroutineInfo = { 0 };
subroutineInfo.cfAddr = addr;
shaderContext->list_subroutines.push_back(subroutineInfo);
}
return true;
}
else if (cf_inst23_7 == GPU7_CF_INST_RETURN)
{
LatteDecompilerCFInstruction& cfInstruction = instructionList.emplace_back();
// set type and address
cfInstruction.type = cf_inst23_7;
cfInstruction.cfAddr = cfIndex;
// set cond and popCount
cfInstruction.cfCond = (cfWord1 >> 8) & 3;
cfInstruction.popCount = (cfWord1 >> 0) & 7;
// todo - other fields?
return true;
}
else
{
debug_printf("Unknown 23_7 clause 0x%x found\n", cf_inst23_7);
shaderObj->hasError = true;
return false;
}
}
else
{
// ALU instruction
uint32 cf_inst26_4 = ((cfWord1 >> 26) & 0xF) | GPU7_CF_INST_ALU_MASK;
if (cf_inst26_4 == GPU7_CF_INST_ALU || cf_inst26_4 == GPU7_CF_INST_ALU_PUSH_BEFORE || cf_inst26_4 == GPU7_CF_INST_ALU_POP_AFTER || cf_inst26_4 == GPU7_CF_INST_ALU_POP2_AFTER || cf_inst26_4 == GPU7_CF_INST_ALU_BREAK || cf_inst26_4 == GPU7_CF_INST_ALU_ELSE_AFTER)
{
LatteDecompilerCFInstruction& cfInstruction = instructionList.emplace_back();
// set type and address
cfInstruction.type = cf_inst26_4;
cfInstruction.cfAddr = cfIndex;
// CF_ALU_* has no cond field
cfInstruction.cfCond = 0;
// set ALU clause related values
cfInstruction.addr = (cfWord0 >> 0) & 0x3FFFFF; // index of first instruction in 64bit words
cfInstruction.count = ((cfWord1 >> 18) & 0x7F) + 1; // number of instructions (each instruction is 64bit)
// set constant file/bank values
cfInstruction.cBank0Index = (cfWord0 >> 22) & 0xF;
cfInstruction.cBank1Index = (cfWord0 >> 26) & 0xF;
cfInstruction.cBank0AddrBase = ((cfWord1 >> 2) & 0xFF) * 16;
cfInstruction.cBank1AddrBase = ((cfWord1 >> 10) & 0xFF) * 16;
return true;
}
else
{
debug_printf("Unknown 26_4 clause 0x%x found\n", cf_inst26_4);
shaderObj->hasError = true;
return false;
}
}
cemu_assert_unimplemented(); // should not reach
return false;
}
void LatteDecompiler_ParseCFSubroutine(LatteDecompilerShaderContext* shaderContext, uint8* programData, uint32 programSize, LatteDecompilerSubroutineInfo* subroutineInfo)
{
LatteDecompilerShader* shaderObj = shaderContext->shader;
// parse control flow instructions
for (uint32 i = subroutineInfo->cfAddr; i < programSize / 8; i++)
{
uint32 cfWord0 = *(uint32*)(programData + i * 8 + 0);
uint32 cfWord1 = *(uint32*)(programData + i * 8 + 4);
bool isEndOfProgram = false;
if( !LatteDecompiler_ParseCFInstruction(shaderContext, i, cfWord0, cfWord1, &isEndOfProgram, subroutineInfo->instructions) )
continue;
cemu_assert_debug(!isEndOfProgram); // should never be encountered in a subroutine?
if (shaderObj->hasError)
return;
auto& cfInstruction = subroutineInfo->instructions.back();
if (cfInstruction.type == GPU7_CF_INST_RETURN)
return; // todo - should check if this return statement is conditional
}
cemu_assert_debug(false); // should not reach (subroutines have to end with RETURN)
}
void LatteDecompiler_ParseCF(LatteDecompilerShaderContext* shaderContext, uint8* programData, uint32 programSize)
{
LatteDecompilerShader* shaderObj = shaderContext->shader;
// parse control flow instructions for main entry point
bool endOfProgram = false;
for (uint32 i = 0; i < programSize / 8; i++)
{
uint32 cfWord0 = *(uint32*)(programData + i * 8 + 0);
uint32 cfWord1 = *(uint32*)(programData + i * 8 + 4);
LatteDecompiler_ParseCFInstruction(shaderContext, i, cfWord0, cfWord1, &endOfProgram, shaderContext->cfInstructions);
if (endOfProgram)
break;
}
// parse CF instructions for subroutines
for (auto& subroutineInfo : shaderContext->list_subroutines)
{
LatteDecompiler_ParseCFSubroutine(shaderContext, programData, programSize, &subroutineInfo);
}
}
// returns true if the given op2/op3 ALU instruction is always executed on the transcendental unit
bool LatteDecompiler_IsALUTransInstruction(bool isOP3, uint32 opcode)
{
if( isOP3 == true )
return false; // OP3 has no transcendental instructions?
if( opcode == ALU_OP2_INST_COS ||
opcode == ALU_OP2_INST_SIN ||
opcode == ALU_OP2_INST_RECIP_FF ||
opcode == ALU_OP2_INST_RECIP_IEEE ||
opcode == ALU_OP2_INST_RECIPSQRT_IEEE ||
opcode == ALU_OP2_INST_RECIPSQRT_CLAMPED ||
opcode == ALU_OP2_INST_RECIPSQRT_FF ||
opcode == ALU_OP2_INST_MULLO_INT ||
opcode == ALU_OP2_INST_MULLO_UINT ||
opcode == ALU_OP2_INST_FLT_TO_INT ||
opcode == ALU_OP2_INST_FLT_TO_UINT ||
opcode == ALU_OP2_INST_INT_TO_FLOAT ||
opcode == ALU_OP2_INST_UINT_TO_FLOAT ||
opcode == ALU_OP2_INST_LOG_CLAMPED ||
opcode == ALU_OP2_INST_LOG_IEEE ||
opcode == ALU_OP2_INST_EXP_IEEE ||
opcode == ALU_OP2_INST_UINT_TO_FLOAT ||
opcode == ALU_OP2_INST_SQRT_IEEE
)
{
// transcendental
return true;
}
else if( opcode == ALU_OP2_INST_MOV ||
opcode == ALU_OP2_INST_ADD ||
opcode == ALU_OP2_INST_NOP ||
opcode == ALU_OP2_INST_MUL ||
opcode == ALU_OP2_INST_DOT4 ||
opcode == ALU_OP2_INST_DOT4_IEEE ||
opcode == ALU_OP2_INST_MAX || // Not sure if MIN/MAX are non-transcendental?
opcode == ALU_OP2_INST_MIN ||
opcode == ALU_OP2_INST_AND_INT ||
opcode == ALU_OP2_INST_OR_INT ||
opcode == ALU_OP2_INST_XOR_INT ||
opcode == ALU_OP2_INST_NOT_INT ||
opcode == ALU_OP2_INST_ADD_INT ||
opcode == ALU_OP2_INST_SUB_INT ||
opcode == ALU_OP2_INST_SETGT ||
opcode == ALU_OP2_INST_SETGE ||
opcode == ALU_OP2_INST_SETNE ||
opcode == ALU_OP2_INST_SETE ||
opcode == ALU_OP2_INST_SETE_INT ||
opcode == ALU_OP2_INST_SETNE_INT ||
opcode == ALU_OP2_INST_SETGT_INT ||
opcode == ALU_OP2_INST_SETGE_INT ||
opcode == ALU_OP2_INST_SETGE_UINT ||
opcode == ALU_OP2_INST_SETGT_UINT ||
opcode == ALU_OP2_INST_MAX_DX10 ||
opcode == ALU_OP2_INST_MIN_DX10 ||
opcode == ALU_OP2_INST_PRED_SETE ||
opcode == ALU_OP2_INST_PRED_SETNE ||
opcode == ALU_OP2_INST_PRED_SETGE ||
opcode == ALU_OP2_INST_PRED_SETGT ||
opcode == ALU_OP2_INST_PRED_SETE_INT ||
opcode == ALU_OP2_INST_PRED_SETNE_INT ||
opcode == ALU_OP2_INST_PRED_SETGT_INT ||
opcode == ALU_OP2_INST_PRED_SETGE_INT ||
opcode == ALU_OP2_INST_KILLE_INT ||
opcode == ALU_OP2_INST_KILLGT_INT ||
opcode == ALU_OP2_INST_KILLNE_INT ||
opcode == ALU_OP2_INST_KILLGT ||
opcode == ALU_OP2_INST_KILLGE ||
opcode == ALU_OP2_INST_KILLE ||
opcode == ALU_OP2_INST_MUL_IEEE ||
opcode == ALU_OP2_INST_FLOOR ||
opcode == ALU_OP2_INST_FRACT ||
opcode == ALU_OP2_INST_TRUNC ||
opcode == ALU_OP2_INST_LSHL_INT ||
opcode == ALU_OP2_INST_ASHR_INT ||
opcode == ALU_OP2_INST_LSHR_INT ||
opcode == ALU_OP2_INST_MAX_INT ||
opcode == ALU_OP2_INST_MIN_INT ||
opcode == ALU_OP2_INST_MOVA_FLOOR ||
opcode == ALU_OP2_INST_MOVA_INT ||
opcode == ALU_OP2_INST_SETE_DX10 ||
opcode == ALU_OP2_INST_SETNE_DX10 ||
opcode == ALU_OP2_INST_SETGT_DX10 ||
opcode == ALU_OP2_INST_SETGE_DX10 ||
opcode == ALU_OP2_INST_RNDNE ||
opcode == ALU_OP2_INST_CUBE // reduction instruction
)
{
// not transcendental
return false;
}
else
{
debug_printf("_isALUTransInstruction(): Unknown instruction 0x%x (%s)\n", opcode, isOP3?"op3":"op2");
}
// ALU.Trans instructions:
// [x] FLT_TO_INT
// [x] FLT_TO_UINT
// [x] INT_TO_FLT
// MULHI_INT
// MULHI_UINT
// [x] MULLO_INT
// [x] MULLO_UINT
// RECIP_INT
// RECIP_UINT
// [x] UINT_TO_FLT
// [x] COS
// [x] EXP_IEEE
// [x] LOG_CLAMPED
// [x] LOG_IEEE
// MUL_LIT
// MUL_LIT_D2
// MUL_LIT_M2
// MUL_LIT_M4
// RECIP_CLAMPED
// [x] RECIP_FF
// [x] RECIP_IEEE
// [x] RECIPSQRT_CLAMPED
// [x] RECIPSQRT_FF
// [x] RECIPSQRT_IEEE
// [x] SIN
// [x] SQRT_IEEE
return false;
}
void LatteDecompiler_ParseALUClause(LatteDecompilerShader* shaderContext, LatteDecompilerCFInstruction* cfInstruction, uint8* programData, uint32 programSize)
{
sint32 instructionGroupIndex = 0;
sint32 indexInGroup = 0; // index of instruction within instruction group
uint32 elementsWrittenMask = 0; // used to determine ALU/Trans unit for instructions
uint8 literalMask = 0; // mask of used literals for current instruction group
sint32 parserIndex = 0;
while( parserIndex < cfInstruction->count )
{
uint32 aluWord0 = *(uint32*)(programData+(cfInstruction->addr+parserIndex)*8+0);
uint32 aluWord1 = *(uint32*)(programData+(cfInstruction->addr+parserIndex)*8+4);
parserIndex++;
bool isLastInGroup = (aluWord0&0x80000000) != 0;
uint32 alu_inst13_5 = (aluWord1>>13)&0x1F;
// parameters from ALU word 0 (shared for ALU OP2 and OP3)
uint32 src0Sel = (aluWord0>>0)&0x1FF; // source selection
uint32 src1Sel = (aluWord0>>13)&0x1FF;
uint32 src0Rel = (aluWord0>>9)&0x1; // relative addressing mode
uint32 src1Rel = (aluWord0>>22)&0x1;
uint32 src0Chan = (aluWord0>>10)&0x3; // component selection x/y/z/w
uint32 src1Chan = (aluWord0>>23)&0x3;
uint32 src0Neg = (aluWord0>>12)&0x1; // negate input
uint32 src1Neg = (aluWord0>>25)&0x1;
uint32 indexMode = (aluWord0>>26)&7;
uint32 predSel = (aluWord0>>29)&3;
if( predSel != 0 )
debugBreakpoint();
if( alu_inst13_5 >= 0x8 )
{
// op3
// parameters from ALU word 1
uint32 src2Sel = (aluWord1>>0)&0x1FF; // source selection
uint32 src2Rel = (aluWord1>>9)&0x1; // relative addressing mode
uint32 src2Chan = (aluWord1>>10)&0x3; // component selection x/y/z/w
uint32 src2Neg = (aluWord1>>12)&0x1; // negate input
uint32 destGpr = (aluWord1>>21)&0x7F;
uint32 destRel = (aluWord1>>28)&1;
uint32 destElem = (aluWord1>>29)&3;
uint32 destClamp = (aluWord1>>31)&1;
LatteDecompilerALUInstruction aluInstruction;
aluInstruction.cfInstruction = cfInstruction;
aluInstruction.isOP3 = true;
aluInstruction.opcode = alu_inst13_5;
aluInstruction.instructionGroupIndex = instructionGroupIndex;
aluInstruction.indexMode = indexMode;
aluInstruction.destGpr = destGpr;
aluInstruction.destRel = destRel;
aluInstruction.destElem = destElem;
aluInstruction.destClamp = destClamp;
aluInstruction.writeMask = 1;
aluInstruction.omod = 0; // op3 has no omod
aluInstruction.sourceOperand[0].sel = src0Sel;
aluInstruction.sourceOperand[0].rel = src0Rel;
aluInstruction.sourceOperand[0].abs = 0;
aluInstruction.sourceOperand[0].neg = src0Neg;
aluInstruction.sourceOperand[0].chan = src0Chan;
aluInstruction.sourceOperand[1].sel = src1Sel;
aluInstruction.sourceOperand[1].rel = src1Rel;
aluInstruction.sourceOperand[1].abs = 0;
aluInstruction.sourceOperand[1].neg = src1Neg;
aluInstruction.sourceOperand[1].chan = src1Chan;
aluInstruction.sourceOperand[2].sel = src2Sel;
aluInstruction.sourceOperand[2].rel = src2Rel;
aluInstruction.sourceOperand[2].abs = 0;
aluInstruction.sourceOperand[2].neg = src2Neg;
aluInstruction.sourceOperand[2].chan = src2Chan;
// check for literal access
if( GPU7_ALU_SRC_IS_LITERAL(src0Sel) )
literalMask |= (1<<src0Chan);
if( GPU7_ALU_SRC_IS_LITERAL(src1Sel) )
literalMask |= (1<<src1Chan);
if( GPU7_ALU_SRC_IS_LITERAL(src2Sel) )
literalMask |= (1<<src2Chan);
// determine used ALU unit (x,y,z,w,t)
uint32 aluUnit = destElem;
if( aluUnit < 4 && (elementsWrittenMask & (1<<aluUnit)) != 0 )
{
aluUnit = 4; // ALU unit already used, this instruction uses the transcendental unit
}
elementsWrittenMask |= (1<<aluUnit);
aluInstruction.aluUnit = aluUnit;
aluInstruction.indexInGroup = indexInGroup;
aluInstruction.isLastInstructionOfGroup = isLastInGroup;
// add instruction to list of sub-instructions
cfInstruction->instructionsALU.emplace_back(aluInstruction);
}
else
{
uint32 alu_inst7_11 = (aluWord1>>7)&0x7FF;
uint32 src0Abs = (aluWord1>>0)&1;
uint32 src1Abs = (aluWord1>>1)&1;
uint32 updateExecuteMask = (aluWord1>>2)&1;
uint32 updatePredicate = (aluWord1>>3)&1;
uint32 writeMask = (aluWord1>>4)&1;
uint32 omod = (aluWord1>>5)&3;
uint32 destGpr = (aluWord1>>21)&0x7F;
uint32 destRel = (aluWord1>>28)&1;
uint32 destElem = (aluWord1>>29)&3;
uint32 destClamp = (aluWord1>>31)&1;
LatteDecompilerALUInstruction aluInstruction;
aluInstruction.cfInstruction = cfInstruction;
aluInstruction.isOP3 = false;
aluInstruction.opcode = alu_inst7_11;
aluInstruction.instructionGroupIndex = instructionGroupIndex;
aluInstruction.indexMode = indexMode;
aluInstruction.destGpr = destGpr;
aluInstruction.destRel = destRel;
aluInstruction.destElem = destElem;
aluInstruction.destClamp = destClamp;
aluInstruction.writeMask = writeMask;
aluInstruction.updateExecuteMask = updateExecuteMask;
aluInstruction.updatePredicate = updatePredicate;
aluInstruction.omod = omod;
aluInstruction.sourceOperand[0].sel = src0Sel;
aluInstruction.sourceOperand[0].rel = src0Rel;
aluInstruction.sourceOperand[0].abs = src0Abs;
aluInstruction.sourceOperand[0].neg = src0Neg;
aluInstruction.sourceOperand[0].chan = src0Chan;
aluInstruction.sourceOperand[1].sel = src1Sel;
aluInstruction.sourceOperand[1].rel = src1Rel;
aluInstruction.sourceOperand[1].abs = src1Abs;
aluInstruction.sourceOperand[1].neg = src1Neg;
aluInstruction.sourceOperand[1].chan = src1Chan;
aluInstruction.sourceOperand[2].sel = 0xFFFFFFFF;
// check for literal access
if( GPU7_ALU_SRC_IS_LITERAL(src0Sel) )
literalMask |= (1<<src0Chan);
if( GPU7_ALU_SRC_IS_LITERAL(src1Sel) )
literalMask |= (1<<src1Chan);
// determine ALU unit (x,y,z,w,t)
uint32 aluUnit = destElem;
// some instructions always use the transcendental unit
bool isTranscendentalOperation = LatteDecompiler_IsALUTransInstruction(false, alu_inst7_11);
if( isTranscendentalOperation )
aluUnit = 4;
if( aluUnit < 4 && (elementsWrittenMask & (1<<aluUnit)) != 0 )
{
aluUnit = 4; // ALU unit already used, this instruction uses the transcendental unit
}
elementsWrittenMask |= (1<<aluUnit);
aluInstruction.aluUnit = aluUnit;
aluInstruction.indexInGroup = indexInGroup;
aluInstruction.isLastInstructionOfGroup = isLastInGroup;
// add instruction to list of sub-instructions
cfInstruction->instructionsALU.emplace_back(aluInstruction);
}
indexInGroup++;
if( isLastInGroup )
{
// load literal data
if( literalMask )
{
bool useLiteralDataXY = false;
bool useLiteralDataZW = false;
if( (literalMask&(1|2)) )
{
useLiteralDataXY = true;
}
if( (literalMask&(4|8)) )
{
useLiteralDataXY = true;
useLiteralDataZW = true;
}
uint32 literalWords[4] = {0};
literalWords[0] = *(uint32*)(programData+(cfInstruction->addr+parserIndex)*8+0);
literalWords[1] = *(uint32*)(programData+(cfInstruction->addr+parserIndex)*8+4);
if( useLiteralDataZW )
{
literalWords[2] = *(uint32*)(programData+(cfInstruction->addr+parserIndex+1)*8+0);
literalWords[3] = *(uint32*)(programData+(cfInstruction->addr+parserIndex+1)*8+4);
}
if( useLiteralDataZW )
parserIndex += 2;
else
parserIndex += 1;
// set literal data for all instructions of the current instruction group
for(auto& aluInstructionItr : reverse_itr(cfInstruction->instructionsALU) )
{
if( aluInstructionItr.instructionGroupIndex != instructionGroupIndex )
break;
aluInstructionItr.literalData.w[0] = literalWords[0];
aluInstructionItr.literalData.w[1] = literalWords[1];
aluInstructionItr.literalData.w[2] = literalWords[2];
aluInstructionItr.literalData.w[3] = literalWords[3];
}
}
// reset instruction group related tracking variables
literalMask = 0;
elementsWrittenMask = 0;
indexInGroup = 0;
// start next group
instructionGroupIndex++;
}
}
}
/*
* Parse TEX clause
*/
void LatteDecompiler_ParseTEXClause(LatteDecompilerShader* shaderContext, LatteDecompilerCFInstruction* cfInstruction, uint8* programData, uint32 programSize)
{
for(sint32 i=0; i<cfInstruction->count; i++)
{
// each instruction is 128bit
uint32 instructionAddr = cfInstruction->addr*2+i*4;
uint32 word0 = *(uint32*)(programData+instructionAddr*4+0);
uint32 word1 = *(uint32*)(programData+instructionAddr*4+4);
uint32 word2 = *(uint32*)(programData+instructionAddr*4+8);
uint32 word3 = *(uint32*)(programData+instructionAddr*4+12);
uint32 inst0_4 = (word0>>0)&0x1F;
if (inst0_4 == GPU7_TEX_INST_SAMPLE || inst0_4 == GPU7_TEX_INST_SAMPLE_L || inst0_4 == GPU7_TEX_INST_SAMPLE_LZ || inst0_4 == GPU7_TEX_INST_SAMPLE_LB || inst0_4 == GPU7_TEX_INST_SAMPLE_C || inst0_4 == GPU7_TEX_INST_SAMPLE_C_L || inst0_4 == GPU7_TEX_INST_SAMPLE_C_LZ || inst0_4 == GPU7_TEX_INST_FETCH4 || inst0_4 == GPU7_TEX_INST_SAMPLE_G || inst0_4 == GPU7_TEX_INST_LD
|| inst0_4 == GPU7_TEX_INST_GET_TEXTURE_RESINFO || inst0_4 == GPU7_TEX_INST_GET_COMP_TEX_LOD)
{
uint32 fetchType = (word0 >> 5) & 3;
uint32 bufferId = (word0 >> 8) & 0xFF;
uint32 samplerId = (word2 >> 15) & 0x1F;
uint32 srcGpr = (word0 >> 16) & 0x7F;
uint32 srcRel = (word0 >> 23) & 1;
if (srcRel != 0)
debugBreakpoint();
uint32 destGpr = (word1 >> 0) & 0x7F;
uint32 destRel = (word1 >> 7) & 1;
if (destRel != 0)
debugBreakpoint();
uint32 dstSelX = (word1 >> 9) & 0x7;
uint32 dstSelY = (word1 >> 12) & 0x7;
uint32 dstSelZ = (word1 >> 15) & 0x7;
uint32 dstSelW = (word1 >> 18) & 0x7;
uint32 coordTypeX = (word1 >> 28) & 1;
uint32 coordTypeY = (word1 >> 29) & 1;
uint32 coordTypeZ = (word1 >> 30) & 1;
uint32 coordTypeW = (word1 >> 31) & 1;
uint32 srcSelX = (word2 >> 20) & 0x7;
uint32 srcSelY = (word2 >> 23) & 0x7;
uint32 srcSelZ = (word2 >> 26) & 0x7;
uint32 srcSelW = (word2 >> 29) & 0x7;
uint32 offsetX = (word2 >> 0) & 0x1F;
uint32 offsetY = (word2 >> 5) & 0x1F;
uint32 offsetZ = (word2 >> 10) & 0x1F;
sint8 lodBias = (word2 >> 21) & 0x7F;
if ((lodBias&0x40) != 0)
lodBias |= 0x80;
// bufferID -> Texture index
// samplerId -> Sampler index
sint32 textureIndex = bufferId - 0x00;
// create new tex instruction
LatteDecompilerTEXInstruction texInstruction;
texInstruction.cfInstruction = cfInstruction;
texInstruction.opcode = inst0_4;
texInstruction.textureFetch.textureIndex = textureIndex;
texInstruction.textureFetch.samplerIndex = samplerId;
texInstruction.dstSel[0] = dstSelX;
texInstruction.dstSel[1] = dstSelY;
texInstruction.dstSel[2] = dstSelZ;
texInstruction.dstSel[3] = dstSelW;
texInstruction.textureFetch.srcSel[0] = srcSelX;
texInstruction.textureFetch.srcSel[1] = srcSelY;
texInstruction.textureFetch.srcSel[2] = srcSelZ;
texInstruction.textureFetch.srcSel[3] = srcSelW;
texInstruction.textureFetch.offsetX = (sint8)((offsetX & 0x10) ? (offsetX | 0xE0) : (offsetX));
texInstruction.textureFetch.offsetY = (sint8)((offsetY & 0x10) ? (offsetY | 0xE0) : (offsetY));
texInstruction.textureFetch.offsetZ = (sint8)((offsetZ & 0x10) ? (offsetZ | 0xE0) : (offsetZ));
texInstruction.dstGpr = destGpr;
texInstruction.srcGpr = srcGpr;
texInstruction.textureFetch.unnormalized[0] = coordTypeX == 0;
texInstruction.textureFetch.unnormalized[1] = coordTypeY == 0;
texInstruction.textureFetch.unnormalized[2] = coordTypeZ == 0;
texInstruction.textureFetch.unnormalized[3] = coordTypeW == 0;
texInstruction.textureFetch.lodBias = (sint8)lodBias;
cfInstruction->instructionsTEX.emplace_back(texInstruction);
}
else if( inst0_4 == GPU7_TEX_INST_SET_CUBEMAP_INDEX )
{
// todo: check if the encoding of fields matches with that of GPU7_TEX_INST_SAMPLE* (it should, according to AMD doc)
uint32 fetchType = (word0>>5)&3;
uint32 bufferId = (word0>>8)&0xFF;
uint32 samplerId = (word2>>15)&0x1F;
uint32 srcGpr = (word0>>16)&0x7F;
uint32 srcRel = (word0>>23)&1;
if( srcRel != 0 )
debugBreakpoint();
uint32 destGpr = (word1>>0)&0x7F;
uint32 destRel = (word1>>7)&1;
if( destRel != 0 )
debugBreakpoint();
uint32 dstSelX = (word1>>9)&0x7;
uint32 dstSelY = (word1>>12)&0x7;
uint32 dstSelZ = (word1>>15)&0x7;
uint32 dstSelW = (word1>>18)&0x7;
uint32 srcSelX = (word2>>20)&0x7;
uint32 srcSelY = (word2>>23)&0x7;
uint32 srcSelZ = (word2>>26)&0x7;
uint32 srcSelW = (word2>>29)&0x7;
sint32 textureIndex = bufferId-0x00;
// create new tex instruction
LatteDecompilerTEXInstruction texInstruction;
texInstruction.cfInstruction = cfInstruction;
texInstruction.opcode = inst0_4;
texInstruction.textureFetch.textureIndex = textureIndex;
texInstruction.textureFetch.samplerIndex = samplerId;
texInstruction.dstSel[0] = dstSelX;
texInstruction.dstSel[1] = dstSelY;
texInstruction.dstSel[2] = dstSelZ;
texInstruction.dstSel[3] = dstSelW;
texInstruction.textureFetch.srcSel[0] = srcSelX;
texInstruction.textureFetch.srcSel[1] = srcSelY;
texInstruction.textureFetch.srcSel[2] = srcSelZ;
texInstruction.textureFetch.srcSel[3] = srcSelW;
texInstruction.dstGpr = destGpr;
texInstruction.srcGpr = srcGpr;
cfInstruction->instructionsTEX.emplace_back(texInstruction);
}
else if (inst0_4 == GPU7_TEX_INST_GET_GRADIENTS_H || inst0_4 == GPU7_TEX_INST_GET_GRADIENTS_V)
{
uint32 fetchType = (word0 >> 5) & 3;
uint32 bufferId = (word0 >> 8) & 0xFF;
uint32 samplerId = (word2 >> 15) & 0x1F;
uint32 srcGpr = (word0 >> 16) & 0x7F;
uint32 srcRel = (word0 >> 23) & 1;
if (srcRel != 0)
debugBreakpoint();
uint32 destGpr = (word1 >> 0) & 0x7F;
uint32 destRel = (word1 >> 7) & 1;
if (destRel != 0)
debugBreakpoint();
uint32 dstSelX = (word1 >> 9) & 0x7;
uint32 dstSelY = (word1 >> 12) & 0x7;
uint32 dstSelZ = (word1 >> 15) & 0x7;
uint32 dstSelW = (word1 >> 18) & 0x7;
uint32 coordTypeX = (word1 >> 28) & 1;
uint32 coordTypeY = (word1 >> 29) & 1;
uint32 coordTypeZ = (word1 >> 30) & 1;
uint32 coordTypeW = (word1 >> 31) & 1;
cemu_assert_debug(coordTypeX != GPU7_TEX_UNNORMALIZED);
cemu_assert_debug(coordTypeY != GPU7_TEX_UNNORMALIZED);
cemu_assert_debug(coordTypeZ != GPU7_TEX_UNNORMALIZED);
cemu_assert_debug(coordTypeW != GPU7_TEX_UNNORMALIZED);
uint32 srcSelX = (word2 >> 20) & 0x7;
uint32 srcSelY = (word2 >> 23) & 0x7;
uint32 srcSelZ = (word2 >> 26) & 0x7;
uint32 srcSelW = (word2 >> 29) & 0x7;
uint32 offsetX = (word2 >> 0) & 0x1F;
uint32 offsetY = (word2 >> 5) & 0x1F;
uint32 offsetZ = (word2 >> 10) & 0x1F;
cemu_assert_debug(offsetX == 0);
cemu_assert_debug(offsetY == 0);
cemu_assert_debug(offsetZ == 0);
// create new tex instruction
LatteDecompilerTEXInstruction texInstruction;
texInstruction.cfInstruction = cfInstruction;
texInstruction.opcode = inst0_4;
texInstruction.dstSel[0] = dstSelX;
texInstruction.dstSel[1] = dstSelY;
texInstruction.dstSel[2] = dstSelZ;
texInstruction.dstSel[3] = dstSelW;
texInstruction.textureFetch.srcSel[0] = srcSelX;
texInstruction.textureFetch.srcSel[1] = srcSelY;
texInstruction.textureFetch.srcSel[2] = srcSelZ;
texInstruction.textureFetch.srcSel[3] = srcSelW;
texInstruction.dstGpr = destGpr;
texInstruction.srcGpr = srcGpr;
cfInstruction->instructionsTEX.emplace_back(texInstruction);
}
else if (inst0_4 == GPU7_TEX_INST_SET_GRADIENTS_H || inst0_4 == GPU7_TEX_INST_SET_GRADIENTS_V)
{
uint32 bufferId = (word0 >> 8) & 0xFF;
uint32 samplerId = (word2 >> 15) & 0x1F;
uint32 srcGpr = (word0 >> 16) & 0x7F;
uint32 srcRel = (word0 >> 23) & 1;
if (srcRel != 0)
debugBreakpoint();
uint32 coordTypeX = (word1 >> 28) & 1;
uint32 coordTypeY = (word1 >> 29) & 1;
uint32 coordTypeZ = (word1 >> 30) & 1;
uint32 coordTypeW = (word1 >> 31) & 1;
cemu_assert_debug(coordTypeX != GPU7_TEX_UNNORMALIZED);
cemu_assert_debug(coordTypeY != GPU7_TEX_UNNORMALIZED);
cemu_assert_debug(coordTypeZ != GPU7_TEX_UNNORMALIZED);
cemu_assert_debug(coordTypeW != GPU7_TEX_UNNORMALIZED);
uint32 srcSelX = (word2 >> 20) & 0x7;
uint32 srcSelY = (word2 >> 23) & 0x7;
uint32 srcSelZ = (word2 >> 26) & 0x7;
uint32 srcSelW = (word2 >> 29) & 0x7;
sint32 textureIndex = bufferId - 0x00;
// create new tex instruction
LatteDecompilerTEXInstruction texInstruction;
texInstruction.cfInstruction = cfInstruction;
texInstruction.opcode = inst0_4;
texInstruction.textureFetch.textureIndex = textureIndex;
texInstruction.textureFetch.samplerIndex = samplerId;
texInstruction.textureFetch.srcSel[0] = srcSelX;
texInstruction.textureFetch.srcSel[1] = srcSelY;
texInstruction.textureFetch.srcSel[2] = srcSelZ;
texInstruction.textureFetch.srcSel[3] = srcSelW;
texInstruction.srcGpr = srcGpr;
texInstruction.dstGpr = 0xFFFFFFFF;
cfInstruction->instructionsTEX.emplace_back(texInstruction);
}
else if( inst0_4 == GPU7_TEX_INST_VFETCH )
{
// this uses the VTX_WORD* encoding
uint32 fetchType = (word0>>5)&3;
uint32 bufferId = (word0>>8)&0xFF;
uint32 offset = (word2>>0)&0xFFFF;
uint32 endianSwap = (word2>>16)&0x3;
uint32 constNoStride = (word2>>18)&0x1;
uint32 srcGpr = (word0>>16)&0x7F;
uint32 srcRel = (word0>>23)&1;
if( srcRel != 0 )
debugBreakpoint();
uint32 destGpr = (word1>>0)&0x7F;
uint32 destRel = (word1>>7)&1;
if( destRel != 0 )
debugBreakpoint();
uint32 dstSelX = (word1>>9)&0x7;
uint32 dstSelY = (word1>>12)&0x7;
uint32 dstSelZ = (word1>>15)&0x7;
uint32 dstSelW = (word1>>18)&0x7;
uint32 srcSelX = (word0>>24)&0x3;
uint32 srcSelY = 0;
uint32 srcSelZ = 0;
uint32 srcSelW = 0;
// create new tex instruction
LatteDecompilerTEXInstruction texInstruction;
texInstruction.cfInstruction = cfInstruction;
texInstruction.opcode = inst0_4;
texInstruction.textureFetch.textureIndex = bufferId;
texInstruction.textureFetch.samplerIndex = 0;
texInstruction.textureFetch.offset = offset;
texInstruction.dstSel[0] = dstSelX;
texInstruction.dstSel[1] = dstSelY;
texInstruction.dstSel[2] = dstSelZ;
texInstruction.dstSel[3] = dstSelW;
texInstruction.textureFetch.srcSel[0] = srcSelX;
texInstruction.textureFetch.srcSel[1] = srcSelY;
texInstruction.textureFetch.srcSel[2] = srcSelZ;
texInstruction.textureFetch.srcSel[3] = srcSelW;
texInstruction.dstGpr = destGpr;
texInstruction.srcGpr = srcGpr;
cfInstruction->instructionsTEX.emplace_back(texInstruction);
}
else if (inst0_4 == GPU7_TEX_INST_MEM)
{
// memory access
// MEM_RD_WORD0
uint32 elementSize = (word0 >> 5) & 3;
uint32 memOp = (word0 >> 8) & 7;
uint8 indexed = (word0 >> 12) & 1;
uint32 srcGPR = (word0 >> 16) & 0x7F;
uint8 srcREL = (word0 >> 23) & 1;
uint8 srcSelX = (word0 >> 24) & 3;
// MEM_RD_WORD1
uint32 dstGPR = (word1 >> 0) & 0x7F;
uint8 dstREL = (word1 >> 7) & 1;
uint8 dstSelX = (word1 >> 9) & 7;
uint8 dstSelY = (word1 >> 12) & 7;
uint8 dstSelZ = (word1 >> 15) & 7;
uint8 dstSelW = (word1 >> 18) & 7;
uint8 dataFormat = (word1 >> 22) & 0x3F;
uint8 nfa = (word1 >> 28) & 3;
uint8 isSigned = (word1 >> 30) & 1;
uint8 srfMode = (word1 >> 31) & 1;
// MEM_RD_WORD2
uint32 arrayBase = (word2 & 0x1FFF);
uint8 endianSwap = (word2 >> 16) & 3;
uint32 arraySize = (word2 >> 20) & 0xFFF;
if (memOp == 2)
{
// read from scatter buffer (SSBO)
LatteDecompilerTEXInstruction texInstruction;
texInstruction.cfInstruction = cfInstruction;
texInstruction.opcode = inst0_4;
cemu_assert_debug(srcREL == 0 || dstREL == 0); // unsupported relative access
texInstruction.memRead.arrayBase = arrayBase;
texInstruction.srcGpr = srcGPR;
texInstruction.dstGpr = dstGPR;
texInstruction.memRead.srcSelX = srcSelX;
texInstruction.dstSel[0] = dstSelX;
texInstruction.dstSel[1] = dstSelY;
texInstruction.dstSel[2] = dstSelZ;
texInstruction.dstSel[3] = dstSelW;
texInstruction.memRead.format = dataFormat;
texInstruction.memRead.nfa = nfa;
texInstruction.memRead.isSigned = isSigned;
cfInstruction->instructionsTEX.emplace_back(texInstruction);
}
else
{
cemu_assert_unimplemented();
}
}
else
{
cemuLog_logDebug(LogType::Force, "Unsupported tex instruction {}", inst0_4);
shaderContext->hasError = true;
break;
}
}
cemu_assert_debug(cfInstruction->instructionsALU.empty()); // clause may only contain texture instructions
}
// iterate all CF instructions and parse clause sub-instructions (if present)
void LatteDecompiler_ParseClauses(LatteDecompilerShaderContext* decompilerContext, uint8* programData, uint32 programSize, std::vector<LatteDecompilerCFInstruction> &list_instructions)
{
LatteDecompilerShader* shader = decompilerContext->shader;
for (auto& cfInstruction : list_instructions)
{
if (cfInstruction.type == GPU7_CF_INST_ALU || cfInstruction.type == GPU7_CF_INST_ALU_PUSH_BEFORE || cfInstruction.type == GPU7_CF_INST_ALU_POP_AFTER || cfInstruction.type == GPU7_CF_INST_ALU_POP2_AFTER || cfInstruction.type == GPU7_CF_INST_ALU_BREAK || cfInstruction.type == GPU7_CF_INST_ALU_ELSE_AFTER)
{
LatteDecompiler_ParseALUClause(shader, &cfInstruction, programData, programSize);
}
else if (cfInstruction.type == GPU7_CF_INST_TEX)
{
LatteDecompiler_ParseTEXClause(shader, &cfInstruction, programData, programSize);
}
else if (cfInstruction.type == GPU7_CF_INST_EXPORT || cfInstruction.type == GPU7_CF_INST_EXPORT_DONE)
{
// no sub-instructions
}
else if (cfInstruction.type == GPU7_CF_INST_ELSE || cfInstruction.type == GPU7_CF_INST_POP)
{
// no sub-instructions
}
else if (cfInstruction.type == GPU7_CF_INST_LOOP_START_DX10 || cfInstruction.type == GPU7_CF_INST_LOOP_END ||
cfInstruction.type == GPU7_CF_INST_LOOP_START_NO_AL)
{
// no sub-instructions
}
else if (cfInstruction.type == GPU7_CF_INST_LOOP_BREAK)
{
// no sub-instructions
}
else if (cfInstruction.type == GPU7_CF_INST_MEM_STREAM0_WRITE ||
cfInstruction.type == GPU7_CF_INST_MEM_STREAM1_WRITE)
{
// no sub-instructions
}
else if (cfInstruction.type == GPU7_CF_INST_MEM_RING_WRITE)
{
// no sub-instructions
}
else if (cfInstruction.type == GPU7_CF_INST_CALL)
{
// no sub-instructions
}
else if (cfInstruction.type == GPU7_CF_INST_RETURN)
{
// no sub-instructions
}
else if (cfInstruction.type == GPU7_CF_INST_EMIT_VERTEX)
{
// no sub-instructions
}
else
{
debug_printf("_parseClauses(): Unsupported clause 0x%x\n", cfInstruction.type);
cemu_assert_unimplemented();
}
}
}
// iterate all CF instructions and parse sub-instructions
void LatteDecompiler_ParseClauses(LatteDecompilerShaderContext* shaderContext, uint8* programData, uint32 programSize)
{
LatteDecompilerShader* shader = shaderContext->shader;
LatteDecompiler_ParseClauses(shaderContext, programData, programSize, shaderContext->cfInstructions);
// parse subroutines
for (auto& subroutineInfo : shaderContext->list_subroutines)
{
LatteDecompiler_ParseClauses(shaderContext, programData, programSize, subroutineInfo.instructions);
}
}
void _LatteDecompiler_GenerateDataForFastAccess(LatteDecompilerShader* shader)
{
if (shader->hasError)
return;
for (size_t i = 0; i < shader->list_remappedUniformEntries.size(); i++)
{
LatteDecompilerRemappedUniformEntry_t* entry = shader->list_remappedUniformEntries.data() + i;
if (entry->isRegister)
{
LatteFastAccessRemappedUniformEntry_register_t entryReg;
entryReg.indexOffset = entry->index * 16;
entryReg.mappedIndexOffset = entry->mappedIndex * 16;
shader->list_remappedUniformEntries_register.push_back(entryReg);
}
else
{
LatteFastAccessRemappedUniformEntry_buffer_t entryBuf;
uint32 kcacheBankIdOffset = entry->kcacheBankId* (7 * 4);
entryBuf.indexOffset = entry->index * 16;
entryBuf.mappedIndexOffset = entry->mappedIndex * 16;
// find or create buffer group
auto bufferGroup = std::find_if(shader->list_remappedUniformEntries_bufferGroups.begin(), shader->list_remappedUniformEntries_bufferGroups.end(), [kcacheBankIdOffset](const LatteDecompilerShader::_RemappedUniformBufferGroup& v) { return v.kcacheBankIdOffset == kcacheBankIdOffset; });
if (bufferGroup != shader->list_remappedUniformEntries_bufferGroups.end())
{
(*bufferGroup).entries.emplace_back(entryBuf);
}
else
{
shader->list_remappedUniformEntries_bufferGroups.emplace_back(kcacheBankIdOffset).entries.emplace_back(entryBuf);
}
}
}
}
void _LatteDecompiler_Process(LatteDecompilerShaderContext* shaderContext, uint8* programData, uint32 programSize)
{
// parse control flow instructions
if (shaderContext->shader->hasError == false)
LatteDecompiler_ParseCF(shaderContext, programData, programSize);
// parse individual clauses
if (shaderContext->shader->hasError == false)
LatteDecompiler_ParseClauses(shaderContext, programData, programSize);
// analyze
if (shaderContext->shader->hasError == false)
LatteDecompiler_analyze(shaderContext, shaderContext->shader);
if (shaderContext->shader->hasError == false)
LatteDecompiler_analyzeDataTypes(shaderContext);
// emit code
if (shaderContext->shader->hasError == false)
LatteDecompiler_emitGLSLShader(shaderContext, shaderContext->shader);
LatteDecompiler_cleanup(shaderContext);
// fast access
_LatteDecompiler_GenerateDataForFastAccess(shaderContext->shader);
}
void LatteDecompiler_InitContext(LatteDecompilerShaderContext& dCtx, const LatteDecompilerOptions& options, LatteDecompilerOutput_t* output, LatteConst::ShaderType shaderType, uint64 shaderBaseHash, uint32* contextRegisters)
{
dCtx.output = output;
dCtx.shaderType = shaderType;
dCtx.options = &options;
dCtx.shaderBaseHash = shaderBaseHash;
dCtx.contextRegisters = contextRegisters;
dCtx.contextRegistersNew = (LatteContextRegister*)contextRegisters;
output->shaderType = shaderType;
}
void LatteDecompiler_DecompileVertexShader(uint64 shaderBaseHash, uint32* contextRegisters, uint8* programData, uint32 programSize, struct LatteFetchShader* fetchShader, LatteDecompilerOptions& options, LatteDecompilerOutput_t* output)
{
cemu_assert_debug(fetchShader);
cemu_assert_debug((programSize & 3) == 0);
performanceMonitor.gpuTime_shaderCreate.beginMeasuring();
// prepare decompiler context
LatteDecompilerShaderContext shaderContext = { 0 };
LatteDecompiler_InitContext(shaderContext, options, output, LatteConst::ShaderType::Vertex, shaderBaseHash, contextRegisters);
shaderContext.fetchShader = fetchShader;
// prepare shader (deprecated)
LatteDecompilerShader* shader = new LatteDecompilerShader(LatteConst::ShaderType::Vertex);
shader->compatibleFetchShader = shaderContext.fetchShader;
output->shaderType = LatteConst::ShaderType::Vertex;
shaderContext.shader = shader;
output->shader = shader;
for (sint32 i = 0; i < LATTE_NUM_MAX_TEX_UNITS; i++)
{
shader->textureUnitSamplerAssignment[i] = LATTE_DECOMPILER_SAMPLER_NONE;
shader->textureUsesDepthCompare[i] = false;
}
// parse & compile
_LatteDecompiler_Process(&shaderContext, programData, programSize);
performanceMonitor.gpuTime_shaderCreate.endMeasuring();
}
void LatteDecompiler_DecompileGeometryShader(uint64 shaderBaseHash, uint32* contextRegisters, uint8* programData, uint32 programSize, uint8* gsCopyProgramData, uint32 gsCopyProgramSize, uint32 vsRingParameterCount, LatteDecompilerOptions& options, LatteDecompilerOutput_t* output)
{
cemu_assert_debug((programSize & 3) == 0);
performanceMonitor.gpuTime_shaderCreate.beginMeasuring();
// prepare decompiler context
LatteDecompilerShaderContext shaderContext = { 0 };
LatteDecompiler_InitContext(shaderContext, options, output, LatteConst::ShaderType::Geometry, shaderBaseHash, contextRegisters);
// prepare shader
LatteDecompilerShader* shader = new LatteDecompilerShader(LatteConst::ShaderType::Geometry);
shader->ringParameterCountFromPrevStage = vsRingParameterCount;
output->shaderType = LatteConst::ShaderType::Geometry;
shaderContext.shader = shader;
output->shader = shader;
if (gsCopyProgramData == NULL)
{
shader->hasError = true;
}
else
{
shaderContext.parsedGSCopyShader = LatteGSCopyShaderParser_parse(gsCopyProgramData, gsCopyProgramSize);
}
for (sint32 i = 0; i < LATTE_NUM_MAX_TEX_UNITS; i++)
{
shader->textureUnitSamplerAssignment[i] = LATTE_DECOMPILER_SAMPLER_NONE;
shader->textureUsesDepthCompare[i] = false;
}
// parse & compile
_LatteDecompiler_Process(&shaderContext, programData, programSize);
performanceMonitor.gpuTime_shaderCreate.endMeasuring();
}
void LatteDecompiler_DecompilePixelShader(uint64 shaderBaseHash, uint32* contextRegisters, uint8* programData, uint32 programSize, LatteDecompilerOptions& options, LatteDecompilerOutput_t* output)
{
cemu_assert_debug((programSize & 3) == 0);
performanceMonitor.gpuTime_shaderCreate.beginMeasuring();
// prepare decompiler context
LatteDecompilerShaderContext shaderContext = { 0 };
LatteDecompiler_InitContext(shaderContext, options, output, LatteConst::ShaderType::Pixel, shaderBaseHash, contextRegisters);
shaderContext.contextRegisters = contextRegisters;
// prepare shader
LatteDecompilerShader* shader = new LatteDecompilerShader(LatteConst::ShaderType::Pixel);
output->shaderType = LatteConst::ShaderType::Pixel;
shaderContext.shader = shader;
output->shader = shader;
for (sint32 i = 0; i < LATTE_NUM_MAX_TEX_UNITS; i++)
{
shader->textureUnitSamplerAssignment[i] = LATTE_DECOMPILER_SAMPLER_NONE;
shader->textureUsesDepthCompare[i] = false;
}
// parse & compile
_LatteDecompiler_Process(&shaderContext, programData, programSize);
performanceMonitor.gpuTime_shaderCreate.endMeasuring();
}
void LatteDecompiler_cleanup(LatteDecompilerShaderContext* shaderContext)
{
shaderContext->cfInstructions.clear();
}
| 45,469
|
C++
|
.cpp
| 1,125
| 36.992889
| 369
| 0.722551
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,227
|
LatteAddrLib.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/LatteAddrLib/LatteAddrLib.cpp
|
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/HW/Latte/LatteAddrLib/LatteAddrLib.h"
#include "Cafe/OS/libs/gx2/GX2_Surface.h"
#include <bit>
/*
Info:
- Extra samples for AA are stored in their own micro-tiles
Macro-Tiling:
- Contains one micro-tile for every combination of bank/channel select
- Since there are 4 bank and 2 pipe bits this means 4*2 = 8 micro tiles (or 8*4 for thick?). But the arrangement varies per tilemode (aspect ratio)
Allowed layouts: 1x8, 2x4, 4x2
- Address format: .... aaaaabbc aaaaaaaa A = offset, b = bank, c = channel
- Channel/Bank bits are determined by:
channel0 = x[3] ^ y[3]
bank0 = x[3] ^ y[5]
bank1 = x[4] ^ y[4]
*/
using namespace Latte;
namespace LatteAddrLib
{
enum class COMPUTE_SURFACE_RESULT
{
RESULT_OK = 0,
UNKNOWN_FORMAT = 3,
BAD_SIZE_FIELD = 6,
};
const uint32 m_configFlags = (1 << 29);
uint32 GetSliceComputingFlags()
{
return (m_configFlags >> 26) & 3;
}
uint32 GetFillSizeFieldsFlags()
{
return (m_configFlags >> 25) & 1;
}
bool GetFlagUseTileIndex()
{
return ((m_configFlags >> 24) & 1) != 0;
}
bool GetFlagNoCubeMipSlicesPad()
{
return ((m_configFlags >> 28) & 1) != 0;
}
bool GetFlagNo1DTiledMSAA()
{
return ((m_configFlags >> 29) & 1) != 0;
}
bool IsPow2(uint32 dim)
{
return (dim & (dim - 1)) == 0;
}
uint32 PowTwoAlign(uint32 x, uint32 align)
{
return (x + align - 1) & ~(align - 1);
}
uint32 NextPow2(uint32 dim)
{
return std::bit_ceil<uint32>(dim);
}
uint32 GetBitsPerPixel(E_HWSURFFMT format, uint32* pElemMode, uint32* pExpandX, uint32* pExpandY)
{
uint32 bpp;
uint32 elemMode = 3;
switch (format)
{
case E_HWSURFFMT::INVALID_FORMAT:
bpp = 0;
*pExpandX = 1;
*pExpandY = 1;
break;
case E_HWSURFFMT::HWFMT_8:
case E_HWSURFFMT::HWFMT_4_4:
case E_HWSURFFMT::HWFMT_3_3_2:
bpp = 8;
*pExpandX = 1;
*pExpandY = 1;
break;
case E_HWSURFFMT::HWFMT_16:
case E_HWSURFFMT::HWFMT_16_FLOAT:
case E_HWSURFFMT::HWFMT_8_8:
case E_HWSURFFMT::HWFMT_5_6_5:
case E_HWSURFFMT::HWFMT_6_5_5:
case E_HWSURFFMT::HWFMT_1_5_5_5:
case E_HWSURFFMT::HWFMT_4_4_4_4:
bpp = 16;
*pExpandX = 1;
*pExpandY = 1;
break;
case E_HWSURFFMT::HWFMT_5_5_5_1:
bpp = 16;
*pExpandX = 1;
*pExpandY = 1;
break;
case E_HWSURFFMT::HWFMT_32:
case E_HWSURFFMT::HWFMT_32_FLOAT:
case E_HWSURFFMT::HWFMT_16_16:
case E_HWSURFFMT::HWFMT_16_16_FLOAT:
case E_HWSURFFMT::HWFMT_24_8:
case E_HWSURFFMT::HWFMT_24_8_FLOAT:
case E_HWSURFFMT::HWFMT_10_11_11:
case E_HWSURFFMT::HWFMT_11_11_10:
case E_HWSURFFMT::HWFMT_2_10_10_10:
case E_HWSURFFMT::HWFMT_8_8_8_8:
case E_HWSURFFMT::HWFMT_8_24:
case E_HWSURFFMT::HWFMT_8_24_FLOAT:
case E_HWSURFFMT::HWFMT_10_11_11_FLOAT:
case E_HWSURFFMT::HWFMT_11_11_10_FLOAT:
case E_HWSURFFMT::HWFMT_10_10_10_2:
bpp = 32;
*pExpandX = 1;
*pExpandY = 1;
break;
case E_HWSURFFMT::HWFMT_32_32:
case E_HWSURFFMT::HWFMT_32_32_FLOAT:
case E_HWSURFFMT::HWFMT_16_16_16_16:
case E_HWSURFFMT::HWFMT_16_16_16_16_FLOAT:
case E_HWSURFFMT::HWFMT_X24_8_32_FLOAT:
bpp = 64;
*pExpandX = 1;
*pExpandY = 1;
break;
case E_HWSURFFMT::HWFMT_32_32_32_32:
case E_HWSURFFMT::HWFMT_32_32_32_32_FLOAT:
bpp = 128;
*pExpandX = 1;
*pExpandY = 1;
break;
case E_HWSURFFMT::HWFMT_BC1:
elemMode = 9;
bpp = 64;
*pExpandX = 4;
*pExpandY = 4;
break;
case E_HWSURFFMT::HWFMT_BC2:
elemMode = 10;
bpp = 128;
*pExpandX = 4;
*pExpandY = 4;
break;
case E_HWSURFFMT::HWFMT_BC3:
elemMode = 11;
bpp = 128;
*pExpandX = 4;
*pExpandY = 4;
break;
case E_HWSURFFMT::HWFMT_BC4:
elemMode = 12;
bpp = 64;
*pExpandX = 4;
*pExpandY = 4;
break;
case E_HWSURFFMT::HWFMT_BC5:
case E_HWSURFFMT::U_HWFMT_BC6:
case E_HWSURFFMT::U_HWFMT_BC7:
elemMode = 13;
bpp = 128;
*pExpandX = 4;
*pExpandY = 4;
break;
default:
cemu_assert_suspicious();
bpp = 0;
*pExpandX = 1;
*pExpandY = 1;
break;
}
*pElemMode = elemMode;
return bpp;
}
void AdjustSurfaceInfo(uint32 elemMode, uint32 expandX, uint32 expandY, uint32* pBpp, uint32* pWidth, uint32* pHeight)
{
bool isBCFormat = false;
if (pBpp)
{
uint32 bpp = *pBpp;
uint32 packedBits;
switch (elemMode)
{
case 4:
packedBits = bpp / expandX / expandY;
break;
case 5:
case 6:
packedBits = expandY * expandX * bpp;
break;
case 7:
case 8:
packedBits = *pBpp;
break;
case 9:
case 12:
packedBits = 64;
isBCFormat = true;
break;
case 10:
case 11:
case 13:
packedBits = 128;
isBCFormat = true;
break;
case 0:
case 1:
case 2:
case 3:
packedBits = *pBpp;
break;
default:
packedBits = *pBpp;
break;
}
*pBpp = packedBits;
}
if (pWidth)
{
if (pHeight)
{
uint32 width = *pWidth;
uint32 height = *pHeight;
if (expandX > 1 || expandY > 1)
{
uint32 widthAligned;
uint32 heightAligned;
if (elemMode == 4)
{
widthAligned = expandX * width;
heightAligned = expandY * height;
}
else if (isBCFormat)
{
widthAligned = width / expandX;
heightAligned = height / expandY;
}
else
{
widthAligned = (width + expandX - 1) / expandX;
heightAligned = (height + expandY - 1) / expandY;
}
*pWidth = std::max<uint32>(widthAligned, 1);
*pHeight = std::max<uint32>(heightAligned, 1);
}
}
}
}
void ComputeMipLevelDimensions(uint32* pWidth, uint32* pHeight, uint32* pNumSlices, AddrSurfaceFlags flags, Latte::E_HWSURFFMT format, uint32 mipLevel)
{
bool isBCn = (uint32)format >= (uint32)Latte::E_HWSURFFMT::HWFMT_BC1 && (uint32)format <= (uint32)Latte::E_HWSURFFMT::U_HWFMT_BC7;
if (isBCn && (mipLevel == 0 || flags.inputIsBase))
{
*pWidth = PowTwoAlign(*pWidth, 4);
*pHeight = PowTwoAlign(*pHeight, 4);
}
if (isBCn)
{
if (mipLevel != 0)
{
uint32 width = *pWidth;
uint32 height = *pHeight;
uint32 slices = *pNumSlices;
if (flags.inputIsBase)
{
if (!flags.dimCube)
slices >>= mipLevel;
width = std::max<uint32>(width >> mipLevel, 1);
height = std::max<uint32>(height >> mipLevel, 1);
slices = std::max<uint32>(slices, 1);
}
*pWidth = NextPow2(width);
*pHeight = NextPow2(height);
*pNumSlices = slices;
}
}
else if (mipLevel && flags.inputIsBase)
{
uint32 width = *pWidth;
uint32 height = *pHeight;
uint32 slices = *pNumSlices;
width >>= mipLevel;
height >>= mipLevel;
if (!flags.dimCube) // dim 3D
slices >>= mipLevel;
width = std::max<uint32>(1, width);
height = std::max<uint32>(1, height);
slices = std::max<uint32>(1, slices);
if (format != E_HWSURFFMT::U_HWFMT_32_32_32 && format != E_HWSURFFMT::U_HWFMT_32_32_32_FLOAT)
{
width = NextPow2(width);
height = NextPow2(height);
slices = NextPow2(slices);
}
*pWidth = width;
*pHeight = height;
*pNumSlices = slices;
}
}
E_HWTILEMODE ConvertTileModeToNonBankSwappedMode(E_HWTILEMODE tileMode)
{
switch (tileMode)
{
case E_HWTILEMODE::TM_2B_TILED_THIN1:
return E_HWTILEMODE::TM_2D_TILED_THIN1;
case E_HWTILEMODE::TM_2B_TILED_THIN2:
return E_HWTILEMODE::TM_2D_TILED_THIN2;
case E_HWTILEMODE::TM_2B_TILED_THIN4:
return E_HWTILEMODE::TM_2D_TILED_THIN4;
case E_HWTILEMODE::TM_2B_TILED_THICK:
return E_HWTILEMODE::TM_2D_TILED_THICK;
case E_HWTILEMODE::TM_3B_TILED_THIN1:
return E_HWTILEMODE::TM_3D_TILED_THIN1;
case E_HWTILEMODE::TM_3B_TILED_THICK:
return E_HWTILEMODE::TM_3D_TILED_THICK;
default:
break;
}
return tileMode;
}
uint32 _CalculateSurfaceTileSlices(E_HWTILEMODE tileMode, uint32 bpp, uint32 numSamples)
{
uint32 bytePerSample = ((bpp << 6) + 7) >> 3;
uint32 tileSlices = 1;
if (TM_GetThickness(tileMode) > 1)
numSamples = 4;
if (bytePerSample)
{
uint32 samplePerTile = m_splitSize / bytePerSample;
if (samplePerTile)
{
tileSlices = numSamples / samplePerTile;
if (!(numSamples / samplePerTile))
tileSlices = 1;
}
}
return tileSlices;
}
uint32 ComputeSurfaceRotationFromTileMode(E_HWTILEMODE tileMode)
{
switch (tileMode)
{
case E_HWTILEMODE::TM_2D_TILED_THIN1:
case E_HWTILEMODE::TM_2D_TILED_THIN2:
case E_HWTILEMODE::TM_2D_TILED_THIN4:
case E_HWTILEMODE::TM_2D_TILED_THICK:
case E_HWTILEMODE::TM_2B_TILED_THIN1:
case E_HWTILEMODE::TM_2B_TILED_THIN2:
case E_HWTILEMODE::TM_2B_TILED_THIN4:
case E_HWTILEMODE::TM_2B_TILED_THICK:
return m_pipes * ((m_banks >> 1) - 1);
case E_HWTILEMODE::TM_3D_TILED_THIN1:
case E_HWTILEMODE::TM_3D_TILED_THICK:
case E_HWTILEMODE::TM_3B_TILED_THIN1:
case E_HWTILEMODE::TM_3B_TILED_THICK:
if (m_pipes >= 4)
return (m_pipes >> 1) - 1;
return 1;
default:
break;
}
return 0;
}
E_HWTILEMODE _ComputeSurfaceMipLevelTileMode(E_HWTILEMODE baseTileMode, uint32 bpp, uint32 level, uint32 width, uint32 height, uint32 numSlices, uint32 numSamples, bool isDepth, bool noRecursive)
{
E_HWTILEMODE result;
E_HWTILEMODE expTileMode = baseTileMode;
uint32 tileSlices = _CalculateSurfaceTileSlices(baseTileMode, bpp, numSamples);
switch (baseTileMode)
{
case E_HWTILEMODE::TM_1D_TILED_THIN1:
if (numSamples > 1 && GetFlagNo1DTiledMSAA())
expTileMode = E_HWTILEMODE::TM_2D_TILED_THIN1;
break;
case E_HWTILEMODE::TM_1D_TILED_THICK:
if (numSamples > 1 || isDepth)
expTileMode = E_HWTILEMODE::TM_1D_TILED_THIN1;
if (numSamples == 2 || numSamples == 4)
expTileMode = E_HWTILEMODE::TM_2D_TILED_THICK;
break;
case E_HWTILEMODE::TM_2D_TILED_THIN2:
if (2 * m_pipeInterleaveBytes > m_splitSize)
expTileMode = E_HWTILEMODE::TM_2D_TILED_THIN1;
break;
case E_HWTILEMODE::TM_2D_TILED_THIN4:
if (4 * m_pipeInterleaveBytes > m_splitSize)
expTileMode = E_HWTILEMODE::TM_2D_TILED_THIN2;
break;
case E_HWTILEMODE::TM_2D_TILED_THICK:
if (numSamples > 1 || tileSlices > 1 || isDepth)
expTileMode = E_HWTILEMODE::TM_2D_TILED_THIN1;
break;
case E_HWTILEMODE::TM_2B_TILED_THIN2:
if (2 * m_pipeInterleaveBytes > m_splitSize)
expTileMode = E_HWTILEMODE::TM_2B_TILED_THIN1;
break;
case E_HWTILEMODE::TM_2B_TILED_THIN4:
if (4 * m_pipeInterleaveBytes > m_splitSize)
expTileMode = E_HWTILEMODE::TM_2B_TILED_THIN2;
break;
case E_HWTILEMODE::TM_2B_TILED_THICK:
if (numSamples > 1 || tileSlices > 1 || isDepth)
expTileMode = E_HWTILEMODE::TM_2B_TILED_THIN1;
break;
case E_HWTILEMODE::TM_3D_TILED_THICK:
if (numSamples > 1 || tileSlices > 1 || isDepth)
expTileMode = E_HWTILEMODE::TM_3D_TILED_THIN1;
break;
case E_HWTILEMODE::TM_3B_TILED_THICK:
if (numSamples > 1 || tileSlices > 1 || isDepth)
expTileMode = E_HWTILEMODE::TM_3B_TILED_THIN1;
break;
default:
expTileMode = baseTileMode;
break;
}
uint32 rotation = ComputeSurfaceRotationFromTileMode(expTileMode);
if (!(rotation % m_pipes))
{
switch (expTileMode)
{
case E_HWTILEMODE::TM_3D_TILED_THIN1:
expTileMode = E_HWTILEMODE::TM_2D_TILED_THIN1;
break;
case E_HWTILEMODE::TM_3D_TILED_THICK:
expTileMode = E_HWTILEMODE::TM_2D_TILED_THICK;
break;
case E_HWTILEMODE::TM_3B_TILED_THIN1:
expTileMode = E_HWTILEMODE::TM_2B_TILED_THIN1;
break;
case E_HWTILEMODE::TM_3B_TILED_THICK:
expTileMode = E_HWTILEMODE::TM_2B_TILED_THICK;
break;
default:
break;
}
}
if (noRecursive)
{
result = expTileMode;
}
else
{
if (bpp == 96 || bpp == 48 || bpp == 24)
bpp /= 3u;
uint32 widthAligned = NextPow2(width);
uint32 heightAligned = NextPow2(height);
uint32 numSlicesAligned = NextPow2(numSlices);
if (level)
{
expTileMode = ConvertTileModeToNonBankSwappedMode(expTileMode);
uint32 thickness = TM_GetThickness(expTileMode);
uint32 microTileBytes = (numSamples * bpp * (thickness << 6) + 7) >> 3;
uint32 widthAlignFactor;
if (microTileBytes >= m_pipeInterleaveBytes)
widthAlignFactor = 1;
else
widthAlignFactor = m_pipeInterleaveBytes / microTileBytes;
uint32 macroTileWidth = 8 * m_banks;
uint32 macroTileHeight = 8 * m_pipes;
switch (expTileMode)
{
case E_HWTILEMODE::TM_2D_TILED_THIN1:
case E_HWTILEMODE::TM_3D_TILED_THIN1:
if (widthAligned < widthAlignFactor * macroTileWidth || heightAligned < macroTileHeight)
expTileMode = E_HWTILEMODE::TM_1D_TILED_THIN1;
break;
case E_HWTILEMODE::TM_2D_TILED_THIN2:
macroTileWidth >>= 1;
macroTileHeight *= 2;
if (widthAligned < widthAlignFactor * macroTileWidth || heightAligned < macroTileHeight)
expTileMode = E_HWTILEMODE::TM_1D_TILED_THIN1;
break;
case E_HWTILEMODE::TM_2D_TILED_THIN4:
macroTileWidth >>= 2;
macroTileHeight *= 4;
if (widthAligned < widthAlignFactor * macroTileWidth || heightAligned < macroTileHeight)
expTileMode = E_HWTILEMODE::TM_1D_TILED_THIN1;
break;
case E_HWTILEMODE::TM_2D_TILED_THICK:
case E_HWTILEMODE::TM_3D_TILED_THICK:
if (widthAligned < widthAlignFactor * macroTileWidth || heightAligned < macroTileHeight)
expTileMode = E_HWTILEMODE::TM_1D_TILED_THICK;
break;
default:
break;
}
if (expTileMode == E_HWTILEMODE::TM_1D_TILED_THICK)
{
if (numSlicesAligned < 4)
expTileMode = E_HWTILEMODE::TM_1D_TILED_THIN1;
}
else if (expTileMode == E_HWTILEMODE::TM_2D_TILED_THICK)
{
if (numSlicesAligned < 4)
expTileMode = E_HWTILEMODE::TM_2D_TILED_THIN1;
}
else if (expTileMode == E_HWTILEMODE::TM_3D_TILED_THICK && numSlicesAligned < 4)
{
expTileMode = E_HWTILEMODE::TM_3D_TILED_THIN1;
}
result = _ComputeSurfaceMipLevelTileMode(expTileMode, bpp, level, widthAligned, heightAligned, numSlicesAligned, numSamples, isDepth, true);
}
else
{
result = expTileMode;
}
}
return result;
}
uint32 _ComputeMacroTileAspectRatio(E_HWTILEMODE tileMode)
{
switch (tileMode)
{
case E_HWTILEMODE::TM_2B_TILED_THIN1:
case E_HWTILEMODE::TM_3D_TILED_THIN1:
case E_HWTILEMODE::TM_3B_TILED_THIN1:
return 1;
case E_HWTILEMODE::TM_2D_TILED_THIN2:
case E_HWTILEMODE::TM_2B_TILED_THIN2:
return 2;
case E_HWTILEMODE::TM_2D_TILED_THIN4:
case E_HWTILEMODE::TM_2B_TILED_THIN4:
return 4;
default:
break;
}
return 1;
}
void _AdjustPitchAlignment(AddrSurfaceFlags flags, uint32& pitchAlign)
{
if (flags.display)
pitchAlign = PowTwoAlign(pitchAlign, 32);
}
void _ComputeSurfaceAlignmentsMacroTiled(E_HWTILEMODE tileMode, uint32 bpp, AddrSurfaceFlags flags, uint32 numSamples, uint32* pBaseAlign, uint32* pPitchAlign, uint32* pHeightAlign, uint32* pMacroWidth, uint32* pMacroHeight)
{
uint32 aspectRatio = _ComputeMacroTileAspectRatio(tileMode);
uint32 thickness = TM_GetThickness(tileMode);
if (bpp == 96 || bpp == 48 || bpp == 24)
bpp /= 3;
if (bpp == 3)
bpp = 1;
uint32 macroTileWidth = (8 * m_banks) / aspectRatio;
uint32 macroTileHeight = aspectRatio * 8 * m_pipes;
uint32 pitchAlign = std::max(macroTileWidth, macroTileWidth * (m_pipeInterleaveBytes / bpp / (8 * thickness) / numSamples));
_AdjustPitchAlignment(flags, *pPitchAlign);
uint32 heightAlign = macroTileHeight;
uint32 macroTileBytes = numSamples * ((bpp * macroTileHeight * macroTileWidth + 7) >> 3);
if (m_chipFamily == 1 && numSamples == 1)
macroTileBytes *= 2;
uint32 baseAlign;
if (thickness == 1)
baseAlign = std::max(macroTileBytes, (numSamples * heightAlign * bpp * pitchAlign + 7) >> 3);
else
baseAlign = std::max(m_pipeInterleaveBytes, (4 * heightAlign * bpp * pitchAlign + 7) >> 3);
uint32 microTileBytes = (thickness * numSamples * (bpp << 6) + 7) >> 3;
uint32 splits;
if (microTileBytes < m_splitSize)
splits = 1;
else
splits = microTileBytes / m_splitSize;
baseAlign /= splits;
*pBaseAlign = baseAlign;
*pPitchAlign = pitchAlign;
*pHeightAlign = heightAlign;
*pMacroWidth = macroTileWidth;
*pMacroHeight = macroTileHeight;
}
uint32 ComputeSurfaceBankSwappedWidth(E_HWTILEMODE tileMode, uint32 bpp, uint32 numSamples, uint32 pitch)
{
uint32 bankSwapWidth = 0;
uint32 slicesPerTile = 1;
uint32 bytesPerSample = 8 * bpp;
uint32 samplesPerTile = m_splitSize / bytesPerSample;
if (m_splitSize / bytesPerSample)
{
slicesPerTile = numSamples / samplesPerTile;
if (!(numSamples / samplesPerTile))
slicesPerTile = 1;
}
if (TM_IsThickAndMacroTiled(tileMode) == 1)
numSamples = 4;
if (TM_IsBankSwapped(tileMode))
{
uint32 bytesPerTileSlice = numSamples * bytesPerSample / slicesPerTile;
uint32 aspectRatioFactor = _ComputeMacroTileAspectRatio(tileMode);
uint32 swapTiles = std::max<uint32>((m_swapSize >> 1) / bpp, 1);
uint32 swapWidth = swapTiles * 8 * m_banks;
uint32 heightBytes = numSamples * aspectRatioFactor * m_pipes * bpp / slicesPerTile;
uint32 swapMax = m_pipes * m_banks * m_rowSize / heightBytes;
uint32 swapMin = m_pipeInterleaveBytes * 8 * m_banks / bytesPerTileSlice;
uint32 swapVal;
if (swapMax >= swapWidth)
swapVal = std::max(swapWidth, swapMin);
else
swapVal = swapMax;
for (bankSwapWidth = swapVal; bankSwapWidth >= 2 * pitch; bankSwapWidth >>= 1);
}
return bankSwapWidth;
}
void PadDimensions(E_HWTILEMODE tileMode, uint32 padDims, int isCube, int cubeAsArray, uint32* pPitch, uint32 pitchAlign, uint32* pHeight, uint32 heightAlign, uint32* pSlices, uint32 sliceAlign)
{
uint32 thickness = TM_GetThickness(tileMode);
if (!padDims)
padDims = 3;
if (IsPow2(pitchAlign))
{
*pPitch = PowTwoAlign(*pPitch, pitchAlign);
}
else
{
*pPitch = pitchAlign + *pPitch - 1;
*pPitch /= pitchAlign;
*pPitch *= pitchAlign;
}
if (padDims > 1)
*pHeight = PowTwoAlign(*pHeight, heightAlign);
if (padDims > 2 || thickness > 1)
{
if (isCube && (!GetFlagNoCubeMipSlicesPad() || cubeAsArray))
*pSlices = NextPow2(*pSlices);
if (thickness > 1)
*pSlices = PowTwoAlign(*pSlices, sliceAlign);
}
}
void _ComputeSurfaceAlignmentsMicroTiled(E_HWTILEMODE tileMode, uint32 bpp, AddrSurfaceFlags flags, uint32 numSamples, uint32& baseAlignOut, uint32& pitchAlignOut, uint32& heightAlignOut)
{
if (bpp == 96 || bpp == 48 || bpp == 24)
bpp /= 3u;
uint32 thickness = TM_GetThickness(tileMode);
baseAlignOut = m_pipeInterleaveBytes;
pitchAlignOut = std::max(8u, m_pipeInterleaveBytes / bpp / numSamples / thickness);
heightAlignOut = 8;
_AdjustPitchAlignment(flags, pitchAlignOut);
}
void _ComputeSurfaceAlignmentsLinear(E_HWTILEMODE tileMode, uint32 bpp, AddrSurfaceFlags flags, uint32* pBaseAlign, uint32* pPitchAlign, uint32* pHeightAlign)
{
cemu_assert_debug(tileMode == E_HWTILEMODE::TM_LINEAR_GENERAL || tileMode == E_HWTILEMODE::TM_LINEAR_ALIGNED);
if (tileMode == E_HWTILEMODE::TM_LINEAR_ALIGNED)
{
uint32 pixelsPerPipeInterleave = 8 * m_pipeInterleaveBytes / bpp;
*pBaseAlign = m_pipeInterleaveBytes;
*pPitchAlign = std::max<uint32>(64, pixelsPerPipeInterleave);
*pHeightAlign = 1;
}
else if (tileMode == E_HWTILEMODE::TM_LINEAR_GENERAL)
{
*pBaseAlign = 1;
*pPitchAlign = 1;
*pHeightAlign = 1;
}
_AdjustPitchAlignment(flags, *pPitchAlign);
}
void _ComputeSurfaceInfoLinear(E_HWTILEMODE tileMode, uint32 bpp, uint32 numSamples, uint32 pitch, uint32 height, uint32 numSlices, uint32 mipLevel, uint32 padDims, AddrSurfaceFlags flags, AddrSurfaceInfo_OUT* pOut)
{
uint32 heightAlign;
uint32 pitchAlign;
uint32 baseAlign;
uint32 expPitch = pitch;
uint32 expHeight = height;
uint32 expNumSlices = numSlices;
_ComputeSurfaceAlignmentsLinear(tileMode, bpp, flags, &baseAlign, &pitchAlign, &heightAlign);
if (flags.linearWA && mipLevel == 0)
{
expPitch /= 3u;
expPitch = NextPow2(expPitch);
}
if (mipLevel)
{
expPitch = NextPow2(expPitch);
expHeight = NextPow2(expHeight);
if (flags.dimCube)
{
expNumSlices = numSlices;
if (numSlices <= 1)
padDims = 2;
else
padDims = 0;
}
else
{
expNumSlices = NextPow2(numSlices);
}
}
uint32 microTileThickness = TM_GetThickness(tileMode);
PadDimensions(tileMode, padDims, flags.dimCube, flags.cubeAsArray, &expPitch, pitchAlign, &expHeight, heightAlign, &expNumSlices, microTileThickness);
if (flags.linearWA && mipLevel == 0)
expPitch *= 3;
uint32 slices = expNumSlices * numSamples / microTileThickness;
pOut->pitch = expPitch;
pOut->height = expHeight;
pOut->depth = expNumSlices;
pOut->surfSize = (((uint64)expHeight * expPitch * slices * bpp * numSamples + 7) / 8);
pOut->baseAlign = baseAlign;
pOut->pitchAlign = pitchAlign;
pOut->heightAlign = heightAlign;
pOut->depthAlign = microTileThickness;
}
void _ComputeSurfaceInfoMicroTiled(E_HWTILEMODE tileMode, uint32 bpp, uint32 numSamples, uint32 pitch, uint32 height, uint32 numSlices, uint32 mipLevel, uint32 padDims, AddrSurfaceFlags flags, AddrSurfaceInfo_OUT* pOut)
{
E_HWTILEMODE expTileMode = tileMode;
uint32 expPitch = pitch;
uint32 expHeight = height;
uint32 expNumSlices = numSlices;
uint32 microTileThickness = TM_GetThickness(tileMode);
if (mipLevel)
{
expPitch = NextPow2(pitch);
expHeight = NextPow2(height);
if (flags.dimCube)
{
expNumSlices = numSlices;
if (numSlices <= 1)
padDims = 2;
else
padDims = 0;
}
else
{
expNumSlices = NextPow2(numSlices);
}
if (expTileMode == E_HWTILEMODE::TM_1D_TILED_THICK && expNumSlices < 4)
{
expTileMode = E_HWTILEMODE::TM_1D_TILED_THIN1;
microTileThickness = 1;
}
}
uint32 heightAlign;
uint32 pitchAlign;
uint32 baseAlign;
_ComputeSurfaceAlignmentsMicroTiled(expTileMode, bpp, flags, numSamples, /* outputs: */ baseAlign, pitchAlign, heightAlign);
PadDimensions(expTileMode, padDims, flags.dimCube, flags.cubeAsArray, &expPitch, pitchAlign, &expHeight, heightAlign, &expNumSlices, microTileThickness);
pOut->pitch = expPitch;
pOut->height = expHeight;
pOut->depth = expNumSlices;
pOut->surfSize = (((uint64)expHeight * expPitch * expNumSlices * bpp * numSamples + 7) / 8);
pOut->hwTileMode = expTileMode;
pOut->baseAlign = baseAlign;
pOut->pitchAlign = pitchAlign;
pOut->heightAlign = heightAlign;
pOut->depthAlign = microTileThickness;
}
void _ComputeSurfaceInfoMacroTiled(E_HWTILEMODE tileMode, E_HWTILEMODE baseTileMode, uint32 bpp, uint32 numSamples, uint32 pitch, uint32 height, uint32 numSlices, uint32 mipLevel, uint32 padDims, AddrSurfaceFlags flags, AddrSurfaceInfo_OUT* pOut)
{
uint32 macroWidth, macroHeight;
uint32 baseAlign, heightAlign, pitchAlign;
uint32 expPitch = pitch;
uint32 expHeight = height;
uint32 expNumSlices = numSlices;
E_HWTILEMODE expTileMode = tileMode;
uint32 microTileThickness = TM_GetThickness(tileMode);
if (mipLevel)
{
expPitch = NextPow2(pitch);
expHeight = NextPow2(height);
expNumSlices = NextPow2(numSlices);
if (flags.dimCube)
{
// cubemap
expNumSlices = numSlices;
padDims = numSlices <= 1 ? 2 : 0;
}
if (expTileMode == E_HWTILEMODE::TM_2D_TILED_THICK && expNumSlices < 4)
{
expTileMode = E_HWTILEMODE::TM_2D_TILED_THIN1;
microTileThickness = 1;
}
}
uint32 pitchAlignFactor = std::max<uint32>((m_pipeInterleaveBytes >> 3) / bpp, 1);
if (tileMode != baseTileMode && mipLevel != 0 && TM_IsThickAndMacroTiled(baseTileMode) && !TM_IsThickAndMacroTiled(tileMode))
{
_ComputeSurfaceAlignmentsMacroTiled(baseTileMode, bpp, flags, numSamples, &baseAlign, &pitchAlign, &heightAlign, ¯oWidth, ¯oHeight);
if (expPitch < pitchAlign * pitchAlignFactor || expHeight < heightAlign)
{
_ComputeSurfaceInfoMicroTiled(E_HWTILEMODE::TM_1D_TILED_THIN1, bpp, numSamples, pitch, height, numSlices, mipLevel, padDims, flags, pOut);
return;
}
}
_ComputeSurfaceAlignmentsMacroTiled(tileMode, bpp, flags, numSamples, &baseAlign, &pitchAlign, &heightAlign, ¯oWidth, ¯oHeight);
uint32 bankSwappedWidth = ComputeSurfaceBankSwappedWidth(tileMode, bpp, numSamples, pitch);
if (bankSwappedWidth > pitchAlign)
pitchAlign = bankSwappedWidth;
PadDimensions(tileMode, padDims, flags.dimCube, flags.cubeAsArray, &expPitch, pitchAlign, &expHeight, heightAlign, &expNumSlices, microTileThickness);
pOut->pitch = expPitch;
pOut->height = expHeight;
pOut->depth = expNumSlices;
pOut->surfSize = (((uint64)expHeight * expPitch * expNumSlices * bpp * numSamples + 7) / 8);
pOut->hwTileMode = expTileMode;
pOut->baseAlign = baseAlign;
pOut->pitchAlign = pitchAlign;
pOut->heightAlign = heightAlign;
pOut->depthAlign = microTileThickness;
}
COMPUTE_SURFACE_RESULT ComputeSurfaceInfoEx(const AddrSurfaceInfo_IN* pIn, AddrSurfaceInfo_OUT* pOut)
{
Latte::E_HWTILEMODE tileMode = pIn->tileMode;
Latte::E_HWTILEMODE baseTileMode = tileMode;
uint32 bpp = pIn->bpp;
uint32 numSamples = std::max<uint32>(pIn->numSamples, 1);
uint32 pitch = pIn->width;
uint32 height = pIn->height;
uint32 numSlices = pIn->numSlices;
uint32 mipLevel = pIn->mipLevel;
AddrSurfaceFlags flags = pIn->flags;
uint32 padDims = 0;
if (flags.dimCube && mipLevel == 0)
padDims = 2;
if (flags.fmask)
tileMode = ConvertTileModeToNonBankSwappedMode(tileMode);
else
tileMode = _ComputeSurfaceMipLevelTileMode(tileMode, bpp, mipLevel, pitch, height, numSlices, numSamples, flags.depth, false);
switch (tileMode)
{
case E_HWTILEMODE::TM_LINEAR_GENERAL:
case E_HWTILEMODE::TM_LINEAR_ALIGNED:
_ComputeSurfaceInfoLinear(tileMode, bpp, numSamples, pitch, height, numSlices, mipLevel, padDims, flags, pOut);
pOut->hwTileMode = tileMode;
break;
case E_HWTILEMODE::TM_1D_TILED_THIN1:
case E_HWTILEMODE::TM_1D_TILED_THICK:
_ComputeSurfaceInfoMicroTiled(tileMode, bpp, numSamples, pitch, height, numSlices, mipLevel, padDims, flags, pOut);
break;
case E_HWTILEMODE::TM_2D_TILED_THIN1:
case E_HWTILEMODE::TM_2D_TILED_THIN2:
case E_HWTILEMODE::TM_2D_TILED_THIN4:
case E_HWTILEMODE::TM_2D_TILED_THICK:
case E_HWTILEMODE::TM_2B_TILED_THIN1:
case E_HWTILEMODE::TM_2B_TILED_THIN2:
case E_HWTILEMODE::TM_2B_TILED_THIN4:
case E_HWTILEMODE::TM_2B_TILED_THICK:
case E_HWTILEMODE::TM_3D_TILED_THIN1:
case E_HWTILEMODE::TM_3D_TILED_THICK:
case E_HWTILEMODE::TM_3B_TILED_THIN1:
case E_HWTILEMODE::TM_3B_TILED_THICK:
_ComputeSurfaceInfoMacroTiled(tileMode, baseTileMode, bpp, numSamples, pitch, height, numSlices, mipLevel, padDims, flags, pOut);
break;
default:
return COMPUTE_SURFACE_RESULT::UNKNOWN_FORMAT;
}
return COMPUTE_SURFACE_RESULT::RESULT_OK;
}
void RestoreSurfaceInfo(uint32 elemMode, uint32 expandX, uint32 expandY, uint32*pBpp, uint32 *pWidth, uint32*pHeight)
{
if (pBpp)
{
uint32 bpp = *pBpp;
uint32 originalBits;
switch (elemMode)
{
case 4:
originalBits = expandY * expandX * bpp;
break;
case 5:
case 6:
originalBits = bpp / expandX / expandY;
break;
case 7:
case 8:
originalBits = *pBpp;
break;
case 9:
case 12:
originalBits = 64;
break;
case 10:
case 11:
case 13:
originalBits = 128;
break;
case 0:
case 1:
case 2:
case 3:
originalBits = *pBpp;
break;
default:
originalBits = *pBpp;
break;
}
*pBpp = originalBits;
}
if (pWidth && pHeight)
{
uint32 width = *pWidth;
uint32 height = *pHeight;
if (expandX > 1 || expandY > 1)
{
if (elemMode == 4)
{
width /= expandX;
height /= expandY;
}
else
{
width *= expandX;
height *= expandY;
}
}
*pWidth = std::max<uint32>(width, 1);
*pHeight = std::max<uint32>(height, 1);
}
}
COMPUTE_SURFACE_RESULT ComputeSurfaceInfo(AddrSurfaceInfo_IN* pIn, AddrSurfaceInfo_OUT* pOut)
{
if (GetFillSizeFieldsFlags() == 1 && (pIn->size != sizeof(AddrSurfaceInfo_IN) || pOut->size != sizeof(AddrSurfaceInfo_OUT)))
return COMPUTE_SURFACE_RESULT::BAD_SIZE_FIELD;
cemu_assert_debug(pIn->bpp <= 128);
ComputeMipLevelDimensions(&pIn->width, &pIn->height, &pIn->numSlices, pIn->flags, pIn->format, pIn->mipLevel);
uint32 width = pIn->width;
uint32 height = pIn->height;
uint32 bpp = pIn->bpp;
uint32 elemMode;
uint32 expandX = 1;
uint32 expandY = 1;
cemu_assert_debug(pIn->tileIndex == 0 && pIn->pTileInfo == nullptr);
pOut->pixelBits = pIn->bpp;
if (pIn->format != E_HWSURFFMT::INVALID_FORMAT)
{
bpp = GetBitsPerPixel(pIn->format, &elemMode, &expandX, &expandY);
if (pIn->tileMode == E_HWTILEMODE::TM_LINEAR_ALIGNED && elemMode == 4 && expandX == 3)
pIn->flags.linearWA = true;
AdjustSurfaceInfo(elemMode, expandX, expandY, &bpp, &width, &height);
pIn->width = width;
pIn->height = height;
pIn->bpp = bpp;
}
else if (pIn->bpp != 0)
{
pIn->width = std::max<uint32>(pIn->width, 1);
pIn->height = std::max<uint32>(pIn->height, 1);
}
else
return COMPUTE_SURFACE_RESULT::UNKNOWN_FORMAT;
COMPUTE_SURFACE_RESULT r = ComputeSurfaceInfoEx(pIn, pOut);
if (r != COMPUTE_SURFACE_RESULT::RESULT_OK)
return r;
pOut->bpp = pIn->bpp;
pOut->pixelPitch = pOut->pitch;
pOut->pixelHeight = pOut->height;
if (pIn->format != E_HWSURFFMT::INVALID_FORMAT && (!pIn->flags.linearWA || pIn->mipLevel == 0))
{
RestoreSurfaceInfo(elemMode, expandX, expandY, &bpp, &pOut->pixelPitch, &pOut->pixelHeight);
}
uint32 sliceFlags = GetSliceComputingFlags();
if (sliceFlags)
{
if (sliceFlags == 1)
pOut->sliceSize = (pOut->height * pOut->pitch * pOut->bpp * pIn->numSamples + 7) / 8;
}
else if (pIn->flags.dim3D)
{
pOut->sliceSize = (uint32)(pOut->surfSize);
}
else
{
if(pOut->surfSize == 0 && pOut->depth == 0)
pOut->sliceSize = 0; // edge case for (1D)_ARRAY textures with res 0/0/0
else
pOut->sliceSize = (uint32)(pOut->surfSize / pOut->depth);
if (pIn->slice == pIn->numSlices - 1 && pIn->numSlices > 1)
pOut->sliceSize += pOut->sliceSize * (pOut->depth - pIn->numSlices);
}
pOut->pitchTileMax = (pOut->pitch >> 3) - 1;
pOut->heightTileMax = (pOut->height >> 3) - 1;
pOut->sliceTileMax = ((pOut->height * pOut->pitch >> 6) - 1);
return COMPUTE_SURFACE_RESULT::RESULT_OK;
}
void GX2CalculateSurfaceInfo(Latte::E_GX2SURFFMT surfaceFormat, uint32 surfaceWidth, uint32 surfaceHeight, uint32 surfaceDepth, E_DIM surfaceDim, E_GX2TILEMODE surfaceTileMode, uint32 surfaceAA, uint32 level, AddrSurfaceInfo_OUT* pSurfOut, bool optimizeForDepthBuffer, bool optimizeForScanBuffer)
{
AddrSurfaceInfo_IN surfInfoIn = { 0 };
Latte::E_HWSURFFMT hwFormat = Latte::GetHWFormat(surfaceFormat);
memset(pSurfOut, 0, sizeof(AddrSurfaceInfo_OUT));
pSurfOut->size = sizeof(AddrSurfaceInfo_OUT);
if (surfaceTileMode == E_GX2TILEMODE::TM_LINEAR_SPECIAL)
{
uint32 numSamples = 1 << surfaceAA;
uint32 blockSize = IsCompressedFormat(hwFormat) ? 4 : 1;
uint32 width = ((surfaceWidth >> level) + blockSize - 1) & ~(blockSize - 1);
pSurfOut->bpp = Latte::GetFormatBits(hwFormat);
pSurfOut->pitch = width / blockSize;
pSurfOut->pixelBits = pSurfOut->bpp;
pSurfOut->baseAlign = 1;
pSurfOut->pitchAlign = 1;
pSurfOut->heightAlign = 1;
pSurfOut->depthAlign = 1;
switch (surfaceDim)
{
case E_DIM::DIM_1D:
pSurfOut->height = 1;
pSurfOut->depth = 1;
break;
case E_DIM::DIM_2D:
pSurfOut->height = std::max<uint32>(surfaceHeight >> level, 1);
pSurfOut->depth = 1;
break;
case E_DIM::DIM_3D:
pSurfOut->height = std::max<uint32>(surfaceHeight >> level, 1);
pSurfOut->depth = std::max<uint32>(surfaceDepth >> level, 1);
break;
case E_DIM::DIM_CUBEMAP:
pSurfOut->height = std::max<uint32>(surfaceHeight >> level, 1);
pSurfOut->depth = std::max<uint32>(surfaceDepth, 6);
break;
case E_DIM::DIM_1D_ARRAY:
pSurfOut->height = 1;
pSurfOut->depth = surfaceDepth;
break;
case E_DIM::DIM_2D_ARRAY:
pSurfOut->height = std::max<uint32>(surfaceHeight >> level, 1);
pSurfOut->depth = surfaceDepth;
break;
default:
break;
}
pSurfOut->height = (~(blockSize - 1) & (pSurfOut->height + blockSize - 1)) / (uint64)blockSize;
pSurfOut->pixelPitch = ~(blockSize - 1) & ((surfaceWidth >> level) + blockSize - 1);
pSurfOut->pixelPitch = std::max<uint32>(pSurfOut->pixelPitch, blockSize);
pSurfOut->pixelHeight = ~(blockSize - 1) & ((surfaceHeight >> level) + blockSize - 1);
pSurfOut->pixelHeight = std::max<uint32>(pSurfOut->pixelHeight, blockSize);
pSurfOut->pitch = std::max<uint32>(pSurfOut->pitch, 1);
pSurfOut->height = std::max<uint32>(pSurfOut->height, 1);
pSurfOut->surfSize = pSurfOut->bpp * numSamples * pSurfOut->depth * pSurfOut->height * pSurfOut->pitch >> 3;
if (surfaceDim == E_DIM::DIM_3D)
pSurfOut->sliceSize = (uint32)pSurfOut->surfSize;
else
{
if(pSurfOut->surfSize == 0 && pSurfOut->depth == 0)
pSurfOut->sliceSize = 0;
else
pSurfOut->sliceSize = (uint32)(pSurfOut->surfSize / pSurfOut->depth);
}
pSurfOut->pitchTileMax = (pSurfOut->pitch >> 3) - 1;
pSurfOut->heightTileMax = (pSurfOut->height >> 3) - 1;
pSurfOut->sliceTileMax = (pSurfOut->height * pSurfOut->pitch >> 6) - 1;
}
else
{
memset(&surfInfoIn, 0, sizeof(AddrSurfaceInfo_IN));
surfInfoIn.size = sizeof(AddrSurfaceInfo_IN);
if (!IsValidHWTileMode((E_HWTILEMODE)surfaceTileMode))
{
// cemuLog_log(LogType::Force, "Unexpected TileMode {} in AddrLib", (uint32)surfaceTileMode);
surfaceTileMode = (E_GX2TILEMODE)((uint32)surfaceTileMode & 0xF);
}
surfInfoIn.tileMode = MakeHWTileMode(surfaceTileMode);
surfInfoIn.format = hwFormat;
surfInfoIn.bpp = Latte::GetFormatBits(hwFormat);
surfInfoIn.numSamples = 1 << surfaceAA;
surfInfoIn.numFrags = surfInfoIn.numSamples;
surfInfoIn.width = std::max<uint32>(surfaceWidth >> level, 1);
switch (surfaceDim)
{
case E_DIM::DIM_1D:
surfInfoIn.height = 1;
surfInfoIn.numSlices = 1;
break;
case E_DIM::DIM_2D:
surfInfoIn.height = std::max<uint32>(surfaceHeight >> level, 1);
surfInfoIn.numSlices = 1;
break;
case E_DIM::DIM_3D:
surfInfoIn.height = std::max<uint32>(surfaceHeight >> level, 1);
surfInfoIn.numSlices = std::max<uint32>(surfaceDepth >> level, 1);
surfInfoIn.flags.dim3D = true;
break;
case E_DIM::DIM_CUBEMAP:
surfInfoIn.height = std::max<uint32>(surfaceHeight >> level, 1);
surfInfoIn.numSlices = std::max<uint32>(surfaceDepth, 6);
surfInfoIn.flags.dimCube = true;
break;
case E_DIM::DIM_1D_ARRAY:
surfInfoIn.height = 1;
surfInfoIn.numSlices = surfaceDepth;
break;
case E_DIM::DIM_2D_ARRAY:
surfInfoIn.height = std::max<uint32>(surfaceHeight >> level, 1);
surfInfoIn.numSlices = surfaceDepth;
break;
case E_DIM::DIM_2D_MSAA:
surfInfoIn.height = std::max<uint32>(surfaceHeight >> level, 1);
surfInfoIn.numSlices = 1;
break;
case E_DIM::DIM_2D_ARRAY_MSAA:
surfInfoIn.height = std::max<uint32>(surfaceHeight >> level, 1);
surfInfoIn.numSlices = surfaceDepth;
break;
default:
break;
}
surfInfoIn.slice = 0;
surfInfoIn.mipLevel = level;
surfInfoIn.flags.inputIsBase = (level == 0);
surfInfoIn.flags.depth = optimizeForDepthBuffer;
surfInfoIn.flags.display = optimizeForScanBuffer;
ComputeSurfaceInfo(&surfInfoIn, pSurfOut);
}
}
uint32 CalculateMipOffset(Latte::E_GX2SURFFMT format, uint32 width, uint32 height, uint32 depth, E_DIM dim, E_HWTILEMODE tileMode, uint32 swizzle, uint32 surfaceAA, sint32 mipIndex)
{
cemu_assert_debug(IsValidHWTileMode(tileMode));
AddrSurfaceInfo_OUT surfaceInfo;
uint32 currentMipOffset = 0;
E_HWTILEMODE lastTileMode = tileMode;
uint32 prevSize = 0;
for (sint32 level = 1; level <= mipIndex; level++)
{
GX2CalculateSurfaceInfo(format, width, height, depth, dim, MakeGX2TileMode(tileMode), surfaceAA, level, &surfaceInfo);
if (level)
{
uint32 pad = 0;
if (TM_IsMacroTiled(lastTileMode) && !TM_IsMacroTiled(surfaceInfo.hwTileMode))
{
if (level > 1)
pad = swizzle & 0xFFFF;
}
pad += (surfaceInfo.baseAlign - (currentMipOffset % surfaceInfo.baseAlign)) % surfaceInfo.baseAlign;
currentMipOffset = currentMipOffset + pad + prevSize;
}
else
{
currentMipOffset = prevSize;
}
lastTileMode = surfaceInfo.hwTileMode;
prevSize = (uint32)surfaceInfo.surfSize;
}
return currentMipOffset;
}
// Calculate aligned address and size of a given slice and mip level
// For thick-tiled surfaces this returns the area of the whole thick tile (4 slices per thick tile) and the relative slice index within the tile is returned in subSliceIndex
void CalculateMipAndSliceAddr(uint32 physAddr, uint32 physMipAddr, Latte::E_GX2SURFFMT format, uint32 width, uint32 height, uint32 depth, Latte::E_DIM dim, Latte::E_HWTILEMODE tileMode, uint32 swizzle, uint32 surfaceAA, sint32 mipIndex, sint32 sliceIndex, uint32* outputSliceOffset, uint32* outputSliceSize, sint32* subSliceIndex)
{
cemu_assert_debug((uint32)tileMode < 16); // only hardware tilemodes allowed
AddrSurfaceInfo_OUT surfaceInfo;
uint32 currentMipOffset = 0;
Latte::E_HWTILEMODE lastTileMode = tileMode;
uint32 prevSize = 0;
for (sint32 level = 1; level <= mipIndex; level++)
{
GX2CalculateSurfaceInfo(format, width, height, depth, dim, MakeGX2TileMode(tileMode), surfaceAA, level, &surfaceInfo);
// extract swizzle from mip-pointer if macro tiled
if (level == 1 && TM_IsMacroTiled(surfaceInfo.hwTileMode))
{
swizzle = physMipAddr & 0x700;
physMipAddr &= ~0x700;
}
cemu_assert_debug(IsValidHWTileMode(surfaceInfo.hwTileMode));
if (level)
{
uint32 pad = 0;
if (TM_IsMacroTiled(lastTileMode) && !TM_IsMacroTiled(surfaceInfo.hwTileMode))
{
if (level > 1)
pad = swizzle & 0xFFFF;
}
pad += (surfaceInfo.baseAlign - (currentMipOffset % surfaceInfo.baseAlign)) % surfaceInfo.baseAlign;
currentMipOffset = currentMipOffset + pad + prevSize;
}
else
{
currentMipOffset = prevSize;
}
lastTileMode = surfaceInfo.hwTileMode;
prevSize = (uint32)surfaceInfo.surfSize;
}
// calculate slice offset
if( mipIndex == 0 ) // make sure surfaceInfo is initialized
GX2CalculateSurfaceInfo(format, width, height, depth, dim, MakeGX2TileMode(tileMode), surfaceAA, 0, &surfaceInfo);
uint32 sliceOffset = 0;
uint32 sliceSize = 0;
// surfaceInfo.sliceSize isn't always correct (especially when depth is misaligned with 4 for THICK tile modes?) so we calculate it manually
// this formula only works because both pitch and height are aligned to micro/macro blocks by GX2CalculateSurfaceInfo, normally we would have to use the tile dimensions to calculate the size
uint32 correctedSliceSize = surfaceInfo.pitch*surfaceInfo.height*surfaceInfo.bpp / 8;
if (TM_IsThick(surfaceInfo.hwTileMode))
{
// 4 slices are interleaved
sliceOffset = (sliceIndex&~3) * correctedSliceSize;
sliceSize = correctedSliceSize * 4;
*subSliceIndex = sliceIndex & 3;
}
else
{
sliceOffset = sliceIndex * correctedSliceSize;
sliceSize = correctedSliceSize;
*subSliceIndex = 0;
}
if (mipIndex)
{
sliceOffset += physMipAddr;
}
else
{
sliceOffset += physAddr;
}
*outputSliceOffset = currentMipOffset + sliceOffset;
*outputSliceSize = sliceSize;
}
};
| 39,318
|
C++
|
.cpp
| 1,192
| 29.213926
| 331
| 0.700509
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,228
|
LatteAddrLib_Coord.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/LatteAddrLib/LatteAddrLib_Coord.cpp
|
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/HW/Latte/LatteAddrLib/LatteAddrLib.h"
#include "Cafe/OS/libs/gx2/GX2_Surface.h"
using namespace Latte;
namespace LatteAddrLib
{
#if BOOST_OS_LINUX || BOOST_OS_MACOS
unsigned char _BitScanReverse(uint32* _Index, uint32 _Mask)
{
if (!_Mask)
return 0;
*_Index = 31 - __builtin_clzl(_Mask);
return 1;
}
#endif
static const uint32 bankSwapOrder[] = { 0, 1, 3, 2 };
uint32 _GetMicroTileType(bool isDepth)
{
return isDepth ? 1 : 0;
}
uint32 _ComputePixelIndexWithinMicroTile(uint32 x, uint32 y, uint32 z, uint32 bpp, E_HWTILEMODE tileMode, uint32 microTileType)
{
cemu_assert_debug(microTileType == 0 || microTileType == 1);
uint32 pixelBit0, pixelBit1, pixelBit2, pixelBit3, pixelBit4, pixelBit5, pixelBit6, pixelBit7, pixelBit8;
pixelBit6 = 0;
pixelBit7 = 0;
pixelBit8 = 0;
uint32 thickness = LatteAddrLib::TM_GetThickness(tileMode);
if (microTileType)
{
pixelBit0 = x & 1;
pixelBit1 = y & 1;
pixelBit2 = (x & 2) >> 1;
pixelBit3 = (y & 2) >> 1;
pixelBit4 = (x & 4) >> 2;
pixelBit5 = (y & 4) >> 2;
}
else
{
switch (bpp)
{
case 8:
pixelBit0 = x & 1;
pixelBit1 = (x & 2) >> 1;
pixelBit2 = (x & 4) >> 2;
pixelBit3 = (y & 2) >> 1;
pixelBit4 = y & 1;
pixelBit5 = (y & 4) >> 2;
break;
case 0x10:
pixelBit0 = x & 1;
pixelBit1 = (x & 2) >> 1;
pixelBit2 = (x & 4) >> 2;
pixelBit3 = y & 1;
pixelBit4 = (y & 2) >> 1;
pixelBit5 = (y & 4) >> 2;
break;
case 0x20:
case 0x60:
pixelBit0 = x & 1;
pixelBit1 = (x & 2) >> 1;
pixelBit2 = y & 1;
pixelBit3 = (x & 4) >> 2;
pixelBit4 = (y & 2) >> 1;
pixelBit5 = (y & 4) >> 2;
break;
case 0x40:
pixelBit0 = x & 1;
pixelBit1 = y & 1;
pixelBit2 = (x & 2) >> 1;
pixelBit3 = (x & 4) >> 2;
pixelBit4 = (y & 2) >> 1;
pixelBit5 = (y & 4) >> 2;
break;
case 0x80:
pixelBit0 = y & 1;
pixelBit1 = x & 1;
pixelBit2 = (x & 2) >> 1;
pixelBit3 = (x & 4) >> 2;
pixelBit4 = (y & 2) >> 1;
pixelBit5 = (y & 4) >> 2;
break;
default:
pixelBit0 = x & 1;
pixelBit1 = (x & 2) >> 1;
pixelBit2 = y & 1;
pixelBit3 = (x & 4) >> 2;
pixelBit4 = (y & 2) >> 1;
pixelBit5 = (y & 4) >> 2;
break;
}
}
if (thickness > 1)
{
pixelBit6 = z & 1;
pixelBit7 = (z & 2) >> 1;
}
return (pixelBit8 << 8) | (pixelBit7 << 7) | (pixelBit6 << 6) | (pixelBit5 << 5) | (pixelBit4 << 4) | (pixelBit3 << 3) | (pixelBit2 << 2) | (pixelBit1 << 1) | pixelBit0;
}
uint32 _ComputePipeFromCoordWoRotation(uint32 x, uint32 y)
{
// hardcoded to assume 2 pipes
uint32 pipe;
pipe = ((y >> 3) ^ (x >> 3)) & 1;
return pipe;
}
uint32 _ComputeBankFromCoordWoRotation(uint32 x, uint32 y)
{
uint32 bank;
if (m_banks == 4)
{
uint32 bankNew = (y >> 4) & 3;
bankNew = ((bankNew >> 1) | (bankNew << 1)); // swap lowest two bits
bankNew ^= (x >> 3);
bankNew &= 3;
bank = bankNew;
}
else if (m_banks == 8)
{
cemu_assert_unimplemented();
bank = 0;
}
else
{
bank = 0;
}
return bank;
}
uint32 ComputeSurfaceAddrFromCoordLinear(uint32 x, uint32 y, uint32 slice, uint32 sample, uint32 bpp, uint32 pitch, uint32 height, uint32 numSlices)
{
uint32 pixelIndex = x + pitch * y + (slice + numSlices * sample) * height * pitch;
return (pixelIndex * bpp) / 8;
}
uint32 ComputeSurfaceAddrFromCoordMicroTiled(uint32 x, uint32 y, uint32 slice, uint32 bpp, uint32 pitch, uint32 height, Latte::E_HWTILEMODE tileMode, bool isDepth)
{
uint32 microTileThickness = (tileMode == Latte::E_HWTILEMODE::TM_1D_TILED_THICK) ? 4 : 1;
uint32 microTilesPerRow = pitch >> 3;
uint32 microTileIndexX = x >> 3;
uint32 microTileIndexY = y >> 3;
uint32 microTileBytes = microTileThickness * (((bpp << 6) + 7) >> 3); // each tile is 8x8 or 8x8x4
uint32 microTileOffset = microTileBytes * (uint64)((x >> 3) + (pitch >> 3) * (y >> 3));
uint32 sliceBytes = (height * (uint64)pitch * microTileThickness * bpp + 7) / 8;
uint32 sliceOffset = sliceBytes * (slice / microTileThickness);
uint32 pixelIndex = _ComputePixelIndexWithinMicroTile(x, y, slice, bpp, tileMode, _GetMicroTileType(isDepth));
uint32 pixelOffset = bpp * pixelIndex;
pixelOffset >>= 3;
return pixelOffset + microTileOffset + sliceOffset;
}
uint32 ComputeSurfaceAddrFromCoordMacroTiled(uint32 x, uint32 y, uint32 slice, uint32 sample, uint32 bpp, uint32 pitch, uint32 height, uint32 numSamples, Latte::E_HWTILEMODE tileMode, bool isDepth, uint32 pipeSwizzle, uint32 bankSwizzle)
{
uint32 microTileThickness = LatteAddrLib::TM_GetThickness((E_HWTILEMODE)tileMode);
uint32 microTileBits = numSamples * bpp * (microTileThickness * (8 * 8));
uint32 microTileBytes = microTileBits >> 3;
uint32 pixelIndex = _ComputePixelIndexWithinMicroTile(x, y, slice, bpp, tileMode, _GetMicroTileType(isDepth));
uint32 sampleOffset, pixelOffset;
if (isDepth)
{
sampleOffset = bpp * sample;
pixelOffset = numSamples * bpp * pixelIndex;
}
else
{
sampleOffset = sample * (microTileBits / numSamples);
pixelOffset = bpp * pixelIndex;
}
uint32 elemOffset = pixelOffset + sampleOffset;
uint32 bytesPerSample = microTileBytes / numSamples;
uint32 sampleSlice, numSampleSplits;
if (numSamples <= 1 || microTileBytes <= m_splitSize)
{
numSampleSplits = 1;
sampleSlice = 0;
}
else
{
uint32 samplesPerSlice = m_splitSize / bytesPerSample;
numSampleSplits = numSamples / samplesPerSlice;
numSamples = samplesPerSlice;
sampleSlice = elemOffset / (microTileBits / numSampleSplits);
elemOffset %= microTileBits / numSampleSplits;
}
elemOffset >>= 3;
uint32 pipe = _ComputePipeFromCoordWoRotation(x, y);
uint32 bank = _ComputeBankFromCoordWoRotation(x, y);
uint32 bankPipe = pipe + m_pipes * bank;
uint32 rotation = ComputeSurfaceRotationFromTileMode(tileMode);
uint32 swizzle = pipeSwizzle + m_pipes * bankSwizzle;
uint32 sliceIn = slice;
if (TM_IsThickAndMacroTiled(tileMode))
sliceIn >>= 2;
bankPipe ^= m_pipes * sampleSlice * ((m_banks >> 1) + 1) ^ (swizzle + sliceIn * rotation);
bankPipe %= m_pipes * m_banks;
pipe = bankPipe % m_pipes;
bank = bankPipe / m_pipes;
uint64 sliceBytes = (((uint64)height * pitch * microTileThickness * bpp * numSamples + 7) / 8);
uint64 sliceOffset = sliceBytes * ((sampleSlice + numSampleSplits * slice) / microTileThickness);
uint32 macroTilePitch = 8 * m_banks;
uint32 macroTileHeight = 8 * m_pipes;
switch (tileMode)
{
case Latte::E_HWTILEMODE::TM_2D_TILED_THIN2:
case Latte::E_HWTILEMODE::TM_2B_TILED_THIN2:
macroTilePitch >>= 1;
macroTileHeight <<= 1;
break;
case Latte::E_HWTILEMODE::TM_2D_TILED_THIN4:
case Latte::E_HWTILEMODE::TM_2B_TILED_THIN4:
macroTilePitch >>= 2;
macroTileHeight <<= 2;
break;
default:
break;
}
uint32 macroTilesPerRow = pitch / macroTilePitch;
uint32 macroTileBytes = (numSamples * microTileThickness * bpp * macroTileHeight * macroTilePitch + 7) >> 3;
uint32 macroTileIndexX = x / macroTilePitch;
uint32 macroTileIndexY = y / macroTileHeight;
uint32 macroTileOffset = (x / macroTilePitch + pitch / macroTilePitch * (y / macroTileHeight)) * macroTileBytes;
if (TM_IsBankSwapped(tileMode))
{
uint32 bankSwapWidth = ComputeSurfaceBankSwappedWidth(tileMode, bpp, numSamples, pitch);
uint32 swapIndex = macroTilePitch * macroTileIndexX / bankSwapWidth;
uint32 bankMask = m_banks - 1;
bank ^= bankSwapOrder[swapIndex & bankMask];
}
uint32 pipeOffset = (pipe << m_pipeInterleaveBytesBitcount);
uint32 bankOffset = (bank << (m_pipesBitcount + m_pipeInterleaveBytesBitcount));
uint32 numSwizzleBits = (m_banksBitcount + m_pipesBitcount);
uint32 macroSliceOffset = (uint32)((macroTileOffset + sliceOffset) >> numSwizzleBits);
macroSliceOffset += elemOffset;
uint32 macroSliceOffsetHigh = macroSliceOffset & ~((1 << m_pipeInterleaveBytesBitcount) - 1);
uint32 macroSliceOffsetLow = macroSliceOffset & ((1 << m_pipeInterleaveBytesBitcount) - 1);
uint32 finalMacroTileOffset = (macroSliceOffsetHigh << numSwizzleBits) | macroSliceOffsetLow;
return finalMacroTileOffset | pipeOffset | bankOffset;
}
void SetupCachedSurfaceAddrInfo(CachedSurfaceAddrInfo* info, uint32 slice, uint32 sample, uint32 bpp, uint32 pitch, uint32 height, uint32 depth, uint32 numSamples, E_HWTILEMODE tileMode, int isDepth, uint32 pipeSwizzle, uint32 bankSwizzle)
{
info->slice = slice;
info->sample = sample;
info->bpp = bpp;
info->pitch = pitch;
info->height = height;
info->depth = depth;
info->numSamples = numSamples;
info->tileMode = tileMode;
info->isDepth = isDepth;
info->pipeSwizzle = pipeSwizzle;
info->bankSwizzle = bankSwizzle;
// calculate static info
info->microTileThickness = LatteAddrLib::TM_GetThickness((E_HWTILEMODE)tileMode);
info->microTileBits = info->numSamples * info->bpp * (info->microTileThickness * (8 * 8));
info->microTileBytes = info->microTileBits >> 3;
info->microTileType = (info->isDepth != 0) ? 1 : 0;
cemu_assert_debug(sample == 0); // non-zero not supported
info->rotation = ComputeSurfaceRotationFromTileMode((E_HWTILEMODE)tileMode);
// macro tile
info->macroTilePitch = 8 * m_banks;
info->macroTileHeight = 8 * m_pipes;
switch (info->tileMode)
{
case E_HWTILEMODE::TM_2D_TILED_THIN2:
case E_HWTILEMODE::TM_2B_TILED_THIN2:
info->macroTilePitch >>= 1;
info->macroTileHeight <<= 1;
break;
case E_HWTILEMODE::TM_2D_TILED_THIN4:
case E_HWTILEMODE::TM_2B_TILED_THIN4:
info->macroTilePitch >>= 2;
info->macroTileHeight <<= 2;
break;
default:
break;
}
_BitScanReverse((DWORD*)&info->macroTilePitchBits, info->macroTilePitch);
_BitScanReverse((DWORD*)&info->macroTileHeightBits, info->macroTileHeight);
info->macroTilesPerRow = info->pitch / info->macroTilePitch;
info->macroTileBytes = (info->numSamples * info->microTileThickness * info->bpp * info->macroTileHeight * info->macroTilePitch + 7) >> 3;
// slice
info->sliceBytes = (info->height * (uint64)info->pitch * info->microTileThickness * info->bpp * info->numSamples + 7) / 8;
info->sliceIn = info->slice;
if (TM_IsThickAndMacroTiled(tileMode))
info->sliceIn >>= 2;
// bank swap
if (TM_IsBankSwapped(tileMode))
info->bankSwapWidth = ComputeSurfaceBankSwappedWidth(tileMode, info->bpp, info->numSamples, info->pitch);
// pixel offset multiplier
if (info->isDepth)
{
info->pixelOffsetMul = info->numSamples * info->bpp;
}
else
{
info->pixelOffsetMul = info->bpp;
}
info->bytesPerPixel = info->pixelOffsetMul >> 3;
// table for micro tile offset calculation (we could pre-generate these)
for (sint32 z = 0; z < 8; z++)
{
for (sint32 y = 0; y < 8; y++)
{
for (sint32 x = 0; x < 8; x++)
{
uint16 v = _ComputePixelIndexWithinMicroTile(x, y, z, info->bpp, info->tileMode, info->microTileType);
info->microTilePixelIndexTable[x + y * 8 + z * 8 * 8] = v;
}
}
}
// other constant values
uint32 swizzle = info->pipeSwizzle + m_pipes * info->bankSwizzle;
info->c0 = (swizzle + info->sliceIn * info->rotation);
}
uint32 ComputeSurfaceAddrFromCoordMacroTiledCached(uint32 x, uint32 y, CachedSurfaceAddrInfo* info)
{
uint32 numSamples = info->numSamples;
uint32 pixelIndex = (uint32)info->microTilePixelIndexTable[(x & 7) + ((y & 7) << 3) + ((info->slice & 7) << 6)];
uint32 pixelOffset = pixelIndex * info->pixelOffsetMul;
uint32 bytesPerSample = info->microTileBytes / numSamples;
uint32 sampleSlice, numSampleSplits, samplesPerSlice;
if (numSamples <= 1 || info->microTileBytes <= m_splitSize)
{
samplesPerSlice = numSamples;
numSampleSplits = 1;
sampleSlice = 0;
}
else
{
samplesPerSlice = m_splitSize / bytesPerSample;
numSampleSplits = numSamples / samplesPerSlice;
numSamples = samplesPerSlice;
sampleSlice = pixelOffset / (info->microTileBits / numSampleSplits);
pixelOffset %= info->microTileBits / numSampleSplits;
}
pixelOffset >>= 3;
uint32 pipe = _ComputePipeFromCoordWoRotation(x, y);
uint32 bank = _ComputeBankFromCoordWoRotation(x, y);
uint32 bankPipe = pipe + m_pipes * bank;
bankPipe ^= m_pipes * sampleSlice * ((m_banks >> 1) + 1) ^ info->c0;
bankPipe %= m_pipes * m_banks;
pipe = bankPipe % m_pipes;
bank = bankPipe / m_pipes;
uint32 sliceOffset = info->sliceBytes * ((sampleSlice + numSampleSplits * info->slice) / info->microTileThickness);
uint32 macroTileIndexX = x >> info->macroTilePitchBits;
uint32 macroTileIndexY = y >> info->macroTileHeightBits;
uint32 macroTileOffset = (macroTileIndexX + (info->pitch >> info->macroTilePitchBits) * macroTileIndexY) * info->macroTileBytes;
if (TM_IsBankSwapped(info->tileMode))
{
uint32 swapIndex = info->macroTilePitch * macroTileIndexX / info->bankSwapWidth;
bank ^= bankSwapOrder[swapIndex & (m_banks - 1)];
}
uint32 pipeOffset = (pipe << m_pipeInterleaveBytesBitcount);
uint32 bankOffset = (bank << (m_pipesBitcount + m_pipeInterleaveBytesBitcount));
uint32 numSwizzleBits = (m_banksBitcount + m_pipesBitcount);
uint32 macroSliceOffset = (uint32)((macroTileOffset + sliceOffset) >> numSwizzleBits);
macroSliceOffset += pixelOffset;
uint32 macroSliceOffsetHigh = macroSliceOffset & ~((1 << m_pipeInterleaveBytesBitcount) - 1);
uint32 macroSliceOffsetLow = macroSliceOffset & ((1 << m_pipeInterleaveBytesBitcount) - 1);
uint32 finalMacroTileOffset = (macroSliceOffsetHigh << numSwizzleBits) | macroSliceOffsetLow;
return finalMacroTileOffset | pipeOffset | bankOffset;
}
/*
* Optimized routine with following assumptions:
* tileMode is 4
* samples is 1
*/
uint32 ComputeSurfaceAddrFromCoordMacroTiledCached_tm04_sample1(uint32 x, uint32 y, CachedSurfaceAddrInfo* info)
{
uint32 pixelIndex = (uint32)info->microTilePixelIndexTable[(x & 7) + ((y & 7) << 3) + ((info->slice & 7) << 6)];
uint32 pixelOffset = pixelIndex * info->pixelOffsetMul;
pixelOffset >>= 3; // bits to bytes
uint32 pipe = _ComputePipeFromCoordWoRotation(x, y); // pipe = ((y >> 3) ^ (x >> 3)) & 1;
uint32 bank = _ComputeBankFromCoordWoRotation(x, y); // based on (x>>3)&3 and (y>>4)&3
pipe ^= (info->c0 >> 0) & 1;
bank ^= (info->c0 >> 1) & 3;
uint32 sliceOffset = info->sliceBytes * (info->slice / info->microTileThickness);
uint32 macroTileIndexX = x >> info->macroTilePitchBits;
uint32 macroTileIndexY = y >> info->macroTileHeightBits;
uint32 macroTileOffset = (macroTileIndexX + (info->pitch >> info->macroTilePitchBits) * macroTileIndexY) * info->macroTileBytes;
uint32 pipeOffset = (pipe << m_pipeInterleaveBytesBitcount);
uint32 bankOffset = (bank << (m_pipesBitcount + m_pipeInterleaveBytesBitcount));
uint32 numSwizzleBits = (m_banksBitcount + m_pipesBitcount);
uint32 macroSliceOffset = (uint32)((macroTileOffset + sliceOffset) >> numSwizzleBits);
macroSliceOffset += pixelOffset;
uint32 macroSliceOffsetHigh = macroSliceOffset & ~((1 << m_pipeInterleaveBytesBitcount) - 1);
uint32 macroSliceOffsetLow = macroSliceOffset & ((1 << m_pipeInterleaveBytesBitcount) - 1);
uint32 finalMacroTileOffset = (macroSliceOffsetHigh << numSwizzleBits) | macroSliceOffsetLow;
return finalMacroTileOffset | pipeOffset | bankOffset;
}
};
| 15,264
|
C++
|
.cpp
| 388
| 35.997423
| 240
| 0.696972
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,229
|
RendererShader.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/RendererShader.cpp
|
#include "Cafe/HW/Latte/Renderer/RendererShader.h"
#include "Cafe/GameProfile/GameProfile.h"
// generate a Cemu version and setting dependent id
uint32 RendererShader::GeneratePrecompiledCacheId()
{
uint32 v = 0;
const char* s = EMULATOR_VERSION_SUFFIX;
while (*s)
{
v = std::rotl<uint32>(v, 7);
v += (uint32)(*s);
s++;
}
v += (EMULATOR_VERSION_MAJOR * 1000000u);
v += (EMULATOR_VERSION_MINOR * 10000u);
v += (EMULATOR_VERSION_PATCH * 100u);
// settings that can influence shaders
v += (uint32)g_current_game_profile->GetAccurateShaderMul() * 133;
return v;
}
void RendererShader::GenerateShaderPrecompiledCacheFilename(RendererShader::ShaderType type, uint64 baseHash, uint64 auxHash, uint64& h1, uint64& h2)
{
h1 = baseHash;
h2 = auxHash;
if (type == RendererShader::ShaderType::kVertex)
h2 += 0xA16374cull;
else if (type == RendererShader::ShaderType::kFragment)
h2 += 0x8752deull;
else if (type == RendererShader::ShaderType::kGeometry)
h2 += 0x65a035ull;
}
| 994
|
C++
|
.cpp
| 31
| 30.032258
| 149
| 0.739312
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,230
|
RendererOuputShader.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/RendererOuputShader.cpp
|
#include "Cafe/HW/Latte/Renderer/RendererOuputShader.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/OpenGLRenderer.h"
const std::string RendererOutputShader::s_copy_shader_source =
R"(#version 420
#ifdef VULKAN
layout(location = 0) in vec2 passUV;
layout(binding = 0) uniform sampler2D textureSrc;
layout(location = 0) out vec4 colorOut0;
#else
in vec2 passUV;
layout(binding=0) uniform sampler2D textureSrc;
layout(location = 0) out vec4 colorOut0;
#endif
void main()
{
colorOut0 = vec4(texture(textureSrc, passUV).rgb,1.0);
}
)";
const std::string RendererOutputShader::s_bicubic_shader_source =
R"(
#version 420
#ifdef VULKAN
layout(location = 0) in vec2 passUV;
layout(binding = 0) uniform sampler2D textureSrc;
layout(binding = 1) uniform vec2 textureSrcResolution;
layout(location = 0) out vec4 colorOut0;
#else
in vec2 passUV;
layout(binding=0) uniform sampler2D textureSrc;
uniform vec2 textureSrcResolution;
layout(location = 0) out vec4 colorOut0;
#endif
vec4 cubic(float x)
{
float x2 = x * x;
float x3 = x2 * x;
vec4 w;
w.x = -x3 + 3 * x2 - 3 * x + 1;
w.y = 3 * x3 - 6 * x2 + 4;
w.z = -3 * x3 + 3 * x2 + 3 * x + 1;
w.w = x3;
return w / 6.0;
}
vec4 bcFilter(vec2 texcoord, vec2 texscale)
{
float fx = fract(texcoord.x);
float fy = fract(texcoord.y);
texcoord.x -= fx;
texcoord.y -= fy;
vec4 xcubic = cubic(fx);
vec4 ycubic = cubic(fy);
vec4 c = vec4(texcoord.x - 0.5, texcoord.x + 1.5, texcoord.y - 0.5, texcoord.y + 1.5);
vec4 s = vec4(xcubic.x + xcubic.y, xcubic.z + xcubic.w, ycubic.x + ycubic.y, ycubic.z + ycubic.w);
vec4 offset = c + vec4(xcubic.y, xcubic.w, ycubic.y, ycubic.w) / s;
vec4 sample0 = texture(textureSrc, vec2(offset.x, offset.z) * texscale);
vec4 sample1 = texture(textureSrc, vec2(offset.y, offset.z) * texscale);
vec4 sample2 = texture(textureSrc, vec2(offset.x, offset.w) * texscale);
vec4 sample3 = texture(textureSrc, vec2(offset.y, offset.w) * texscale);
float sx = s.x / (s.x + s.y);
float sy = s.z / (s.z + s.w);
return mix(
mix(sample3, sample2, sx),
mix(sample1, sample0, sx), sy);
}
void main(){
colorOut0 = vec4(bcFilter(passUV*textureSrcResolution, vec2(1.0,1.0)/textureSrcResolution).rgb,1.0);
}
)";
const std::string RendererOutputShader::s_hermite_shader_source =
R"(#version 420
in vec4 gl_FragCoord;
in vec2 passUV;
layout(binding=0) uniform sampler2D textureSrc;
uniform vec2 textureSrcResolution;
uniform vec2 outputResolution;
layout(location = 0) out vec4 colorOut0;
// https://www.shadertoy.com/view/MllSzX
vec3 CubicHermite (vec3 A, vec3 B, vec3 C, vec3 D, float t)
{
float t2 = t*t;
float t3 = t*t*t;
vec3 a = -A/2.0 + (3.0*B)/2.0 - (3.0*C)/2.0 + D/2.0;
vec3 b = A - (5.0*B)/2.0 + 2.0*C - D / 2.0;
vec3 c = -A/2.0 + C/2.0;
vec3 d = B;
return a*t3 + b*t2 + c*t + d;
}
vec3 BicubicHermiteTexture(vec2 uv, vec4 texelSize)
{
vec2 pixel = uv*texelSize.zw + 0.5;
vec2 frac = fract(pixel);
pixel = floor(pixel) / texelSize.zw - vec2(texelSize.xy/2.0);
vec4 doubleSize = texelSize*texelSize;
vec3 C00 = texture(textureSrc, pixel + vec2(-texelSize.x ,-texelSize.y)).rgb;
vec3 C10 = texture(textureSrc, pixel + vec2( 0.0 ,-texelSize.y)).rgb;
vec3 C20 = texture(textureSrc, pixel + vec2( texelSize.x ,-texelSize.y)).rgb;
vec3 C30 = texture(textureSrc, pixel + vec2( doubleSize.x,-texelSize.y)).rgb;
vec3 C01 = texture(textureSrc, pixel + vec2(-texelSize.x , 0.0)).rgb;
vec3 C11 = texture(textureSrc, pixel + vec2( 0.0 , 0.0)).rgb;
vec3 C21 = texture(textureSrc, pixel + vec2( texelSize.x , 0.0)).rgb;
vec3 C31 = texture(textureSrc, pixel + vec2( doubleSize.x, 0.0)).rgb;
vec3 C02 = texture(textureSrc, pixel + vec2(-texelSize.x , texelSize.y)).rgb;
vec3 C12 = texture(textureSrc, pixel + vec2( 0.0 , texelSize.y)).rgb;
vec3 C22 = texture(textureSrc, pixel + vec2( texelSize.x , texelSize.y)).rgb;
vec3 C32 = texture(textureSrc, pixel + vec2( doubleSize.x, texelSize.y)).rgb;
vec3 C03 = texture(textureSrc, pixel + vec2(-texelSize.x , doubleSize.y)).rgb;
vec3 C13 = texture(textureSrc, pixel + vec2( 0.0 , doubleSize.y)).rgb;
vec3 C23 = texture(textureSrc, pixel + vec2( texelSize.x , doubleSize.y)).rgb;
vec3 C33 = texture(textureSrc, pixel + vec2( doubleSize.x, doubleSize.y)).rgb;
vec3 CP0X = CubicHermite(C00, C10, C20, C30, frac.x);
vec3 CP1X = CubicHermite(C01, C11, C21, C31, frac.x);
vec3 CP2X = CubicHermite(C02, C12, C22, C32, frac.x);
vec3 CP3X = CubicHermite(C03, C13, C23, C33, frac.x);
return CubicHermite(CP0X, CP1X, CP2X, CP3X, frac.y);
}
void main(){
vec4 texelSize = vec4( 1.0 / outputResolution.xy, outputResolution.xy);
colorOut0 = vec4(BicubicHermiteTexture(passUV, texelSize), 1.0);
}
)";
RendererOutputShader::RendererOutputShader(const std::string& vertex_source, const std::string& fragment_source)
{
m_vertex_shader = g_renderer->shader_create(RendererShader::ShaderType::kVertex, 0, 0, vertex_source, false, false);
m_fragment_shader = g_renderer->shader_create(RendererShader::ShaderType::kFragment, 0, 0, fragment_source, false, false);
m_vertex_shader->PreponeCompilation(true);
m_fragment_shader->PreponeCompilation(true);
if (!m_vertex_shader->WaitForCompiled())
throw std::exception();
if(!m_fragment_shader->WaitForCompiled())
throw std::exception();
if (g_renderer->GetType() == RendererAPI::OpenGL)
{
m_attributes[0].m_loc_texture_src_resolution = m_vertex_shader->GetUniformLocation("textureSrcResolution");
m_attributes[0].m_loc_input_resolution = m_vertex_shader->GetUniformLocation("inputResolution");
m_attributes[0].m_loc_output_resolution = m_vertex_shader->GetUniformLocation("outputResolution");
m_attributes[1].m_loc_texture_src_resolution = m_fragment_shader->GetUniformLocation("textureSrcResolution");
m_attributes[1].m_loc_input_resolution = m_fragment_shader->GetUniformLocation("inputResolution");
m_attributes[1].m_loc_output_resolution = m_fragment_shader->GetUniformLocation("outputResolution");
}
else
{
cemuLog_logDebug(LogType::Force, "RendererOutputShader() - todo for Vulkan");
m_attributes[0].m_loc_texture_src_resolution = -1;
m_attributes[0].m_loc_input_resolution = -1;
m_attributes[0].m_loc_output_resolution = -1;
m_attributes[1].m_loc_texture_src_resolution = -1;
m_attributes[1].m_loc_input_resolution = -1;
m_attributes[1].m_loc_output_resolution = -1;
}
}
void RendererOutputShader::SetUniformParameters(const LatteTextureView& texture_view, const Vector2i& input_res, const Vector2i& output_res) const
{
float res[2];
// vertex shader
if (m_attributes[0].m_loc_texture_src_resolution != -1)
{
res[0] = (float)texture_view.baseTexture->width;
res[1] = (float)texture_view.baseTexture->height;
m_vertex_shader->SetUniform2fv(m_attributes[0].m_loc_texture_src_resolution, res, 1);
}
if (m_attributes[0].m_loc_input_resolution != -1)
{
res[0] = (float)input_res.x;
res[1] = (float)input_res.y;
m_vertex_shader->SetUniform2fv(m_attributes[0].m_loc_input_resolution, res, 1);
}
if (m_attributes[0].m_loc_output_resolution != -1)
{
res[0] = (float)output_res.x;
res[1] = (float)output_res.y;
m_vertex_shader->SetUniform2fv(m_attributes[0].m_loc_output_resolution, res, 1);
}
// fragment shader
if (m_attributes[1].m_loc_texture_src_resolution != -1)
{
res[0] = (float)texture_view.baseTexture->width;
res[1] = (float)texture_view.baseTexture->height;
m_fragment_shader->SetUniform2fv(m_attributes[1].m_loc_texture_src_resolution, res, 1);
}
if (m_attributes[1].m_loc_input_resolution != -1)
{
res[0] = (float)input_res.x;
res[1] = (float)input_res.y;
m_fragment_shader->SetUniform2fv(m_attributes[1].m_loc_input_resolution, res, 1);
}
if (m_attributes[1].m_loc_output_resolution != -1)
{
res[0] = (float)output_res.x;
res[1] = (float)output_res.y;
m_fragment_shader->SetUniform2fv(m_attributes[1].m_loc_output_resolution, res, 1);
}
}
RendererOutputShader* RendererOutputShader::s_copy_shader;
RendererOutputShader* RendererOutputShader::s_copy_shader_ud;
RendererOutputShader* RendererOutputShader::s_bicubic_shader;
RendererOutputShader* RendererOutputShader::s_bicubic_shader_ud;
RendererOutputShader* RendererOutputShader::s_hermit_shader;
RendererOutputShader* RendererOutputShader::s_hermit_shader_ud;
std::string RendererOutputShader::GetOpenGlVertexSource(bool render_upside_down)
{
// vertex shader
std::ostringstream vertex_source;
vertex_source <<
R"(#version 400
out vec2 passUV;
out gl_PerVertex
{
vec4 gl_Position;
};
void main(){
vec2 vPos;
vec2 vUV;
int vID = gl_VertexID;
)";
if (render_upside_down)
{
vertex_source <<
R"( if( vID == 0 ) { vPos = vec2(1.0,1.0); vUV = vec2(1.0,0.0); }
else if( vID == 1 ) { vPos = vec2(-1.0,1.0); vUV = vec2(0.0,0.0); }
else if( vID == 2 ) { vPos = vec2(-1.0,-1.0); vUV = vec2(0.0,1.0); }
else if( vID == 3 ) { vPos = vec2(-1.0,-1.0); vUV = vec2(0.0,1.0); }
else if( vID == 4 ) { vPos = vec2(1.0,-1.0); vUV = vec2(1.0,1.0); }
else if( vID == 5 ) { vPos = vec2(1.0,1.0); vUV = vec2(1.0,0.0); }
)";
}
else
{
vertex_source <<
R"( if( vID == 0 ) { vPos = vec2(1.0,1.0); vUV = vec2(1.0,1.0); }
else if( vID == 1 ) { vPos = vec2(-1.0,1.0); vUV = vec2(0.0,1.0); }
else if( vID == 2 ) { vPos = vec2(-1.0,-1.0); vUV = vec2(0.0,0.0); }
else if( vID == 3 ) { vPos = vec2(-1.0,-1.0); vUV = vec2(0.0,0.0); }
else if( vID == 4 ) { vPos = vec2(1.0,-1.0); vUV = vec2(1.0,0.0); }
else if( vID == 5 ) { vPos = vec2(1.0,1.0); vUV = vec2(1.0,1.0); }
)";
}
vertex_source <<
R"( passUV = vUV;
gl_Position = vec4(vPos, 0.0, 1.0);
}
)";
return vertex_source.str();
}
std::string RendererOutputShader::GetVulkanVertexSource(bool render_upside_down)
{
// vertex shader
std::ostringstream vertex_source;
vertex_source <<
R"(#version 450
layout(location = 0) out vec2 passUV;
out gl_PerVertex
{
vec4 gl_Position;
};
void main(){
vec2 vPos;
vec2 vUV;
int vID = gl_VertexIndex;
)";
if (render_upside_down)
{
vertex_source <<
R"( if( vID == 0 ) { vPos = vec2(1.0,1.0); vUV = vec2(1.0,0.0); }
else if( vID == 1 ) { vPos = vec2(-1.0,1.0); vUV = vec2(0.0,0.0); }
else if( vID == 2 ) { vPos = vec2(-1.0,-1.0); vUV = vec2(0.0,1.0); }
else if( vID == 3 ) { vPos = vec2(-1.0,-1.0); vUV = vec2(0.0,1.0); }
else if( vID == 4 ) { vPos = vec2(1.0,-1.0); vUV = vec2(1.0,1.0); }
else if( vID == 5 ) { vPos = vec2(1.0,1.0); vUV = vec2(1.0,0.0); }
)";
}
else
{
vertex_source <<
R"( if( vID == 0 ) { vPos = vec2(1.0,1.0); vUV = vec2(1.0,1.0); }
else if( vID == 1 ) { vPos = vec2(-1.0,1.0); vUV = vec2(0.0,1.0); }
else if( vID == 2 ) { vPos = vec2(-1.0,-1.0); vUV = vec2(0.0,0.0); }
else if( vID == 3 ) { vPos = vec2(-1.0,-1.0); vUV = vec2(0.0,0.0); }
else if( vID == 4 ) { vPos = vec2(1.0,-1.0); vUV = vec2(1.0,0.0); }
else if( vID == 5 ) { vPos = vec2(1.0,1.0); vUV = vec2(1.0,1.0); }
)";
}
vertex_source <<
R"( passUV = vUV;
gl_Position = vec4(vPos, 0.0, 1.0);
}
)";
return vertex_source.str();
}
void RendererOutputShader::InitializeStatic()
{
std::string vertex_source, vertex_source_ud;
// vertex shader
if (g_renderer->GetType() == RendererAPI::OpenGL)
{
vertex_source = GetOpenGlVertexSource(false);
vertex_source_ud = GetOpenGlVertexSource(true);
s_copy_shader = new RendererOutputShader(vertex_source, s_copy_shader_source);
s_copy_shader_ud = new RendererOutputShader(vertex_source_ud, s_copy_shader_source);
s_bicubic_shader = new RendererOutputShader(vertex_source, s_bicubic_shader_source);
s_bicubic_shader_ud = new RendererOutputShader(vertex_source_ud, s_bicubic_shader_source);
s_hermit_shader = new RendererOutputShader(vertex_source, s_hermite_shader_source);
s_hermit_shader_ud = new RendererOutputShader(vertex_source_ud, s_hermite_shader_source);
}
else
{
vertex_source = GetVulkanVertexSource(false);
vertex_source_ud = GetVulkanVertexSource(true);
s_copy_shader = new RendererOutputShader(vertex_source, s_copy_shader_source);
s_copy_shader_ud = new RendererOutputShader(vertex_source_ud, s_copy_shader_source);
/* s_bicubic_shader = new RendererOutputShader(vertex_source, s_bicubic_shader_source); TODO
s_bicubic_shader_ud = new RendererOutputShader(vertex_source_ud, s_bicubic_shader_source);
s_hermit_shader = new RendererOutputShader(vertex_source, s_hermite_shader_source);
s_hermit_shader_ud = new RendererOutputShader(vertex_source_ud, s_hermite_shader_source);*/
}
}
| 12,548
|
C++
|
.cpp
| 315
| 37.215873
| 146
| 0.688236
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,231
|
Renderer.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/Renderer.cpp
|
#include "Cafe/HW/Latte/Renderer/Renderer.h"
#include "gui/guiWrapper.h"
#include "config/CemuConfig.h"
#include "Cafe/HW/Latte/Core/LatteOverlay.h"
#include <imgui.h>
#include "imgui/imgui_extension.h"
#include <png.h>
#include "config/ActiveSettings.h"
#include <wx/image.h>
#include <wx/dataobj.h>
#include <wx/clipbrd.h>
#include <wx/log.h>
std::unique_ptr<Renderer> g_renderer;
bool Renderer::GetVRAMInfo(int& usageInMB, int& totalInMB) const
{
usageInMB = totalInMB = -1;
#if BOOST_OS_WINDOWS
if (m_dxgi_wrapper)
{
DXGI_QUERY_VIDEO_MEMORY_INFO info{};
if (m_dxgi_wrapper->QueryVideoMemoryInfo(info))
{
totalInMB = (info.Budget / 1000) / 1000;
usageInMB = (info.CurrentUsage / 1000) / 1000;
return true;
}
}
#endif
return false;
}
void Renderer::Initialize()
{
// imgui
imguiFontAtlas = new ImFontAtlas();
imguiFontAtlas->AddFontDefault();
auto setupContext = [](ImGuiContext* context){
ImGui::SetCurrentContext(context);
ImGuiIO& io = ImGui::GetIO();
io.WantSaveIniSettings = false;
io.IniFilename = nullptr;
};
imguiTVContext = ImGui::CreateContext(imguiFontAtlas);
imguiPadContext = ImGui::CreateContext(imguiFontAtlas);
setupContext(imguiTVContext);
setupContext(imguiPadContext);
}
void Renderer::Shutdown()
{
// imgui
ImGui::DestroyContext(imguiTVContext);
ImGui::DestroyContext(imguiPadContext);
ImGui_ClearFonts();
delete imguiFontAtlas;
}
bool Renderer::ImguiBegin(bool mainWindow)
{
sint32 w = 0, h = 0;
if(mainWindow)
gui_getWindowPhysSize(w, h);
else if(gui_isPadWindowOpen())
gui_getPadWindowPhysSize(w, h);
else
return false;
if (w == 0 || h == 0)
return false;
// select the right context
ImGui::SetCurrentContext(mainWindow ? imguiTVContext : imguiPadContext);
const Vector2f window_size{ (float)w,(float)h };
auto& io = ImGui::GetIO();
io.DisplaySize = { window_size.x, window_size.y }; // should be only updated in the renderer and only when needed
ImGui_PrecacheFonts();
return true;
}
uint8 Renderer::SRGBComponentToRGB(uint8 ci)
{
const float cs = (float)ci / 255.0f;
float cl;
if (cs <= 0.04045)
cl = cs / 12.92f;
else
cl = std::pow((cs + 0.055f) / 1.055f, 2.4f);
cl = std::min(cl, 1.0f);
return (uint8)(cl * 255.0f);
}
uint8 Renderer::RGBComponentToSRGB(uint8 cli)
{
const float cl = (float)cli / 255.0f;
float cs;
if (cl < 0.0031308)
cs = 12.92f * cl;
else
cs = 1.055f * std::pow(cl, 0.41666f) - 0.055f;
cs = std::max(std::min(cs, 1.0f), 0.0f);
return (uint8)(cs * 255.0f);
}
static std::optional<fs::path> GenerateScreenshotFilename(bool isDRC)
{
fs::path screendir = ActiveSettings::GetUserDataPath("screenshots");
// build screenshot name with format Screenshot_YYYY-MM-DD_HH-MM-SS[_GamePad].png
// if the file already exists add a suffix counter (_2.png, _3.png etc)
std::time_t time_t = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now());
std::tm* tm = std::localtime(&time_t);
std::string screenshotFileName = fmt::format("Screenshot_{:04}-{:02}-{:02}_{:02}-{:02}-{:02}", tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec);
if (isDRC)
screenshotFileName.append("_GamePad");
fs::path screenshotPath;
for(sint32 i=0; i<999; i++)
{
screenshotPath = screendir;
if (i == 0)
screenshotPath.append(fmt::format("{}.png", screenshotFileName));
else
screenshotPath.append(fmt::format("{}_{}.png", screenshotFileName, i + 1));
std::error_code ec;
bool exists = fs::exists(screenshotPath, ec);
if (!ec && !exists)
return screenshotPath;
}
return std::nullopt;
}
std::mutex s_clipboardMutex;
static bool SaveScreenshotToClipboard(const wxImage &image)
{
bool success = false;
s_clipboardMutex.lock();
if (wxTheClipboard->Open())
{
wxTheClipboard->SetData(new wxImageDataObject(image));
wxTheClipboard->Close();
success = true;
}
s_clipboardMutex.unlock();
return success;
}
static bool SaveScreenshotToFile(const wxImage &image, bool mainWindow)
{
auto path = GenerateScreenshotFilename(!mainWindow);
if (!path) return false;
std::error_code ec;
fs::create_directories(path->parent_path(), ec);
if (ec) return false;
// suspend wxWidgets logging for the lifetime this object, to prevent a message box if wxImage::SaveFile fails
wxLogNull _logNo;
return image.SaveFile(path->wstring());
}
static void ScreenshotThread(std::vector<uint8> data, bool save_screenshot, int width, int height, bool mainWindow)
{
#if BOOST_OS_WINDOWS
// on Windows wxWidgets uses OLE API for the clipboard
// to make this work we need to call OleInitialize() on the same thread
OleInitialize(nullptr);
#endif
wxImage image(width, height, data.data(), true);
if (mainWindow)
{
if(SaveScreenshotToClipboard(image))
{
if (!save_screenshot)
LatteOverlay_pushNotification("Screenshot saved to clipboard", 2500);
}
else
{
LatteOverlay_pushNotification("Failed to open clipboard", 2500);
}
}
if (save_screenshot)
{
if (SaveScreenshotToFile(image, mainWindow))
{
if (mainWindow)
LatteOverlay_pushNotification("Screenshot saved", 2500);
}
else
{
LatteOverlay_pushNotification("Failed to save screenshot to file", 2500);
}
}
}
void Renderer::SaveScreenshot(const std::vector<uint8>& rgb_data, int width, int height, bool mainWindow) const
{
const bool save_screenshot = GetConfig().save_screenshot;
std::thread(ScreenshotThread, rgb_data, save_screenshot, width, height, mainWindow).detach();
}
| 5,478
|
C++
|
.cpp
| 183
| 27.601093
| 178
| 0.732
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,232
|
LatteTextureViewVk.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/Vulkan/LatteTextureViewVk.cpp
|
#include "Cafe/HW/Latte/Renderer/Vulkan/LatteTextureViewVk.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/LatteTextureVk.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanAPI.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.h"
#include "Cafe/HW/Latte/Core/LattePerformanceMonitor.h"
uint32 LatteTextureVk_AdjustTextureCompSel(Latte::E_GX2SURFFMT format, uint32 compSel)
{
switch (format)
{
case Latte::E_GX2SURFFMT::R8_UNORM: // R8 is replicated on all channels (while OpenGL would return 1.0 for BGA instead)
case Latte::E_GX2SURFFMT::R8_SNORM: // probably the same as _UNORM, but needs testing
if (compSel >= 1 && compSel <= 3)
compSel = 0;
break;
case Latte::E_GX2SURFFMT::A1_B5_G5_R5_UNORM: // order of components is reversed (RGBA -> ABGR)
if (compSel >= 0 && compSel <= 3)
compSel = 3 - compSel;
break;
case Latte::E_GX2SURFFMT::BC4_UNORM:
case Latte::E_GX2SURFFMT::BC4_SNORM:
if (compSel >= 1 && compSel <= 3)
compSel = 0;
break;
case Latte::E_GX2SURFFMT::BC5_UNORM:
case Latte::E_GX2SURFFMT::BC5_SNORM:
// RG maps to RG
// B maps to ?
// A maps to G (guessed)
if (compSel == 3)
compSel = 1; // read Alpha as Green
break;
case Latte::E_GX2SURFFMT::A2_B10_G10_R10_UNORM:
// reverse components (Wii U: ABGR, OpenGL: RGBA)
// used in Resident Evil Revelations
if (compSel >= 0 && compSel <= 3)
compSel = 3 - compSel;
break;
case Latte::E_GX2SURFFMT::X24_G8_UINT:
// map everything to alpha?
if (compSel >= 0 && compSel <= 3)
compSel = 3;
break;
case Latte::E_GX2SURFFMT::R4_G4_UNORM:
// red and green swapped
if (compSel == 0)
compSel = 1;
else if (compSel == 1)
compSel = 0;
break;
default:
break;
}
return compSel;
}
LatteTextureViewVk::LatteTextureViewVk(VkDevice device, LatteTextureVk* texture, Latte::E_DIM dim, Latte::E_GX2SURFFMT format, sint32 firstMip, sint32 mipCount, sint32 firstSlice, sint32 sliceCount)
: LatteTextureView(texture, firstMip, mipCount, firstSlice, sliceCount, dim, format), m_device(device)
{
if(texture->overwriteInfo.hasFormatOverwrite)
{
cemu_assert_debug(format == texture->format); // if format overwrite is used, the texture is no longer taking part in aliasing and the format of any view has to match
m_format = texture->GetFormat();
}
else if (dim != texture->dim || format != texture->format)
{
VulkanRenderer::FormatInfoVK texFormatInfo;
VulkanRenderer::GetInstance()->GetTextureFormatInfoVK(format, texture->isDepth, dim, 0, 0, &texFormatInfo);
m_format = texFormatInfo.vkImageFormat;
}
else
m_format = texture->GetFormat();
m_uniqueId = VulkanRenderer::GetInstance()->GenUniqueId();
}
LatteTextureViewVk::~LatteTextureViewVk()
{
while (!list_descriptorSets.empty())
delete list_descriptorSets[0];
if (m_smallCacheView0)
VulkanRenderer::GetInstance()->ReleaseDestructibleObject(m_smallCacheView0);
if (m_smallCacheView1)
VulkanRenderer::GetInstance()->ReleaseDestructibleObject(m_smallCacheView1);
if (m_fallbackCache)
{
for (auto& itr : *m_fallbackCache)
VulkanRenderer::GetInstance()->ReleaseDestructibleObject(itr.second);
delete m_fallbackCache;
m_fallbackCache = nullptr;
}
}
VKRObjectTextureView* LatteTextureViewVk::CreateView(uint32 gpuSamplerSwizzle)
{
uint32 compSelR = (gpuSamplerSwizzle >> 16) & 0x7;
uint32 compSelG = (gpuSamplerSwizzle >> 19) & 0x7;
uint32 compSelB = (gpuSamplerSwizzle >> 22) & 0x7;
uint32 compSelA = (gpuSamplerSwizzle >> 25) & 0x7;
compSelR = LatteTextureVk_AdjustTextureCompSel(format, compSelR);
compSelG = LatteTextureVk_AdjustTextureCompSel(format, compSelG);
compSelB = LatteTextureVk_AdjustTextureCompSel(format, compSelB);
compSelA = LatteTextureVk_AdjustTextureCompSel(format, compSelA);
VkImageViewCreateInfo viewInfo{};
viewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
viewInfo.image = GetBaseImage()->GetImageObj()->m_image;
viewInfo.viewType = GetImageViewTypeFromGX2Dim(dim);
viewInfo.format = m_format;
viewInfo.subresourceRange.aspectMask = GetBaseImage()->GetImageAspect();
if (viewInfo.subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT)
viewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; // make sure stencil is never set, we only support sampling depth for now
viewInfo.subresourceRange.baseMipLevel = firstMip;
viewInfo.subresourceRange.levelCount = this->numMip;
if (viewInfo.viewType == VK_IMAGE_VIEW_TYPE_3D && baseTexture->Is3DTexture())
{
cemu_assert_debug(firstMip == 0);
cemu_assert_debug(this->numSlice == baseTexture->depth);
viewInfo.subresourceRange.baseArrayLayer = 0;
viewInfo.subresourceRange.layerCount = 1;
}
else
{
viewInfo.subresourceRange.baseArrayLayer = firstSlice;
viewInfo.subresourceRange.layerCount = this->numSlice;
}
static const VkComponentSwizzle swizzle[] =
{
VK_COMPONENT_SWIZZLE_R,
VK_COMPONENT_SWIZZLE_G,
VK_COMPONENT_SWIZZLE_B,
VK_COMPONENT_SWIZZLE_A,
VK_COMPONENT_SWIZZLE_ZERO,
VK_COMPONENT_SWIZZLE_ONE,
VK_COMPONENT_SWIZZLE_ZERO,
VK_COMPONENT_SWIZZLE_ZERO
};
viewInfo.components.r = swizzle[compSelR];
viewInfo.components.g = swizzle[compSelG];
viewInfo.components.b = swizzle[compSelB];
viewInfo.components.a = swizzle[compSelA];
VkImageView view;
if (vkCreateImageView(m_device, &viewInfo, nullptr, &view) != VK_SUCCESS)
throw std::runtime_error("failed to create texture image view!");
return new VKRObjectTextureView(GetBaseImage()->GetImageObj(), view);
}
VKRObjectTextureView* LatteTextureViewVk::GetViewRGBA()
{
return GetSamplerView(0x06880000); // RGBA swizzle
}
VKRObjectTextureView* LatteTextureViewVk::GetSamplerView(uint32 gpuSamplerSwizzle)
{
gpuSamplerSwizzle &= 0x0FFF0000;
// look up view in cache
// intentionally using unrolled code here instead of a loop for a small performance gain
if (m_smallCacheSwizzle0 == gpuSamplerSwizzle)
return m_smallCacheView0;
if (m_smallCacheSwizzle1 == gpuSamplerSwizzle)
return m_smallCacheView1;
auto fallbackCache = m_fallbackCache;
if (m_fallbackCache)
{
const auto it = fallbackCache->find(gpuSamplerSwizzle);
if (it != fallbackCache->cend())
return it->second;
}
// not cached, create new view and store in cache
auto viewObj = CreateView(gpuSamplerSwizzle);
if (m_smallCacheSwizzle0 == CACHE_EMPTY_ENTRY)
{
m_smallCacheSwizzle0 = gpuSamplerSwizzle;
m_smallCacheView0 = viewObj;
}
else if (m_smallCacheSwizzle1 == CACHE_EMPTY_ENTRY)
{
m_smallCacheSwizzle1 = gpuSamplerSwizzle;
m_smallCacheView1 = viewObj;
}
else
{
if (!m_fallbackCache)
m_fallbackCache = new std::unordered_map<uint32, VKRObjectTextureView*>();
m_fallbackCache->insert_or_assign(gpuSamplerSwizzle, viewObj);
}
return viewObj;
}
VkSampler LatteTextureViewVk::GetDefaultTextureSampler(bool useLinearTexFilter)
{
VkSampler& sampler = GetViewRGBA()->m_textureDefaultSampler[useLinearTexFilter ? 1 : 0];
if (sampler != VK_NULL_HANDLE)
return sampler;
VkSamplerCreateInfo samplerInfo{};
samplerInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
if (useLinearTexFilter)
{
samplerInfo.magFilter = VK_FILTER_LINEAR;
samplerInfo.minFilter = VK_FILTER_LINEAR;
}
else
{
samplerInfo.magFilter = VK_FILTER_NEAREST;
samplerInfo.minFilter = VK_FILTER_NEAREST;
}
if (vkCreateSampler(m_device, &samplerInfo, nullptr, &sampler) != VK_SUCCESS)
{
cemuLog_log(LogType::Force, "Failed to create default sampler");
throw std::runtime_error("failed to create texture sampler!");
}
return sampler;
}
VkImageViewType LatteTextureViewVk::GetImageViewTypeFromGX2Dim(Latte::E_DIM dim)
{
switch (dim)
{
case Latte::E_DIM::DIM_1D:
return VK_IMAGE_VIEW_TYPE_1D;
case Latte::E_DIM::DIM_2D:
case Latte::E_DIM::DIM_2D_MSAA:
return VK_IMAGE_VIEW_TYPE_2D;
case Latte::E_DIM::DIM_2D_ARRAY:
return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
case Latte::E_DIM::DIM_3D:
return VK_IMAGE_VIEW_TYPE_3D;
case Latte::E_DIM::DIM_CUBEMAP:
return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
default:
cemu_assert_unimplemented();
}
return VK_IMAGE_VIEW_TYPE_2D;
}
| 7,993
|
C++
|
.cpp
| 224
| 33.290179
| 198
| 0.763324
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,233
|
VKRPipelineInfo.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/Vulkan/VKRPipelineInfo.cpp
|
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanAPI.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/LatteTextureVk.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/RendererShaderVk.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompiler.h"
#include "Cafe/HW/Latte/Core/LattePerformanceMonitor.h"
#include "imgui/imgui_impl_vulkan.h"
#include "imgui/imgui_extension.h"
#include "config/CemuConfig.h"
PipelineInfo::PipelineInfo(uint64 minimalStateHash, uint64 pipelineHash, LatteFetchShader* fetchShader, LatteDecompilerShader* vertexShader, LatteDecompilerShader* pixelShader, LatteDecompilerShader* geometryShader)
{
this->minimalStateHash = minimalStateHash;
this->stateHash = pipelineHash;
this->fetchShader = fetchShader;
this->vertexShader = vertexShader;
this->geometryShader = geometryShader;
this->pixelShader = pixelShader;
this->vertexShaderVk = vertexShader ? (RendererShaderVk*)vertexShader->shader : nullptr;
this->geometryShaderVk = geometryShader ? (RendererShaderVk*)geometryShader->shader : nullptr;
this->pixelShaderVk = pixelShader ? (RendererShaderVk*)pixelShader->shader : nullptr;
// init VKRObjPipeline
m_vkrObjPipeline = new VKRObjectPipeline();
m_vkrObjPipeline->pipeline = VK_NULL_HANDLE;
// track dependency with shaders
if (vertexShaderVk)
vertexShaderVk->TrackDependency(this);
if (geometryShaderVk)
geometryShaderVk->TrackDependency(this);
if (pixelShaderVk)
pixelShaderVk->TrackDependency(this);
// "Accurate barriers" is usually enabled globally but since the CPU cost is substantial we allow users to disable it (debug -> 'Accurate barriers' option)
// We always force accurate barriers for known problematic shaders
if (pixelShader)
{
if (pixelShader->baseHash == 0x6f6f6e7b9aae57af && pixelShader->auxHash == 0x00078787f9249249) // BotW lava
neverSkipAccurateBarrier = true;
if (pixelShader->baseHash == 0x4c0bd596e3aef4a6 && pixelShader->auxHash == 0x003c3c3fc9269249) // BotW foam layer for water on the bottom of waterfalls
neverSkipAccurateBarrier = true;
}
}
PipelineInfo::~PipelineInfo()
{
if (rectEmulationGS)
{
delete rectEmulationGS;
rectEmulationGS = nullptr;
}
// delete descriptor sets
while (!pixel_ds_cache.empty())
{
VkDescriptorSetInfo* dsInfo = pixel_ds_cache.begin()->second;
delete dsInfo;
}
while (!geometry_ds_cache.empty())
{
VkDescriptorSetInfo* dsInfo = geometry_ds_cache.begin()->second;
delete dsInfo;
}
while (!vertex_ds_cache.empty())
{
VkDescriptorSetInfo* dsInfo = vertex_ds_cache.begin()->second;
delete dsInfo;
}
// disassociate from shaders
if (vertexShaderVk)
vertexShaderVk->RemoveDependency(this);
if (geometryShaderVk)
geometryShaderVk->RemoveDependency(this);
if (pixelShaderVk)
pixelShaderVk->RemoveDependency(this);
// queue pipeline for destruction
if (m_vkrObjPipeline)
{
VulkanRenderer::GetInstance()->ReleaseDestructibleObject(m_vkrObjPipeline);
m_vkrObjPipeline = nullptr;
}
// remove from cache
VulkanRenderer::GetInstance()->unregisterGraphicsPipeline(this);
}
| 3,097
|
C++
|
.cpp
| 79
| 36.936709
| 215
| 0.795606
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,234
|
VsyncDriver.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/Vulkan/VsyncDriver.cpp
|
#include "gui/MainWindow.h"
#if BOOST_OS_WINDOWS
#include <Windows.h>
typedef LONG NTSTATUS;
typedef UINT32 D3DKMT_HANDLE;
typedef UINT D3DDDI_VIDEO_PRESENT_SOURCE_ID;
typedef struct _D3DKMT_OPENADAPTERFROMHDC
{
HDC hDc;
D3DKMT_HANDLE hAdapter;
LUID AdapterLuid;
D3DDDI_VIDEO_PRESENT_SOURCE_ID VidPnSourceId;
}D3DKMT_OPENADAPTERFROMHDC;
typedef struct _D3DKMT_WAITFORVERTICALBLANKEVENT {
D3DKMT_HANDLE hAdapter;
D3DKMT_HANDLE hDevice;
D3DDDI_VIDEO_PRESENT_SOURCE_ID VidPnSourceId;
} D3DKMT_WAITFORVERTICALBLANKEVENT;
class DeviceVsyncHandler
{
public:
DeviceVsyncHandler(void(*cbVSync)()) : m_vsyncDriverVSyncCb(cbVSync)
{
m_shutdownThread = false;
if (!pfnD3DKMTOpenAdapterFromHdc)
{
HMODULE hModuleGDI = LoadLibraryA("gdi32.dll");
*(void**)&pfnD3DKMTOpenAdapterFromHdc = GetProcAddress(hModuleGDI, "D3DKMTOpenAdapterFromHdc");
*(void**)&pfnD3DKMTWaitForVerticalBlankEvent = GetProcAddress(hModuleGDI, "D3DKMTWaitForVerticalBlankEvent");
}
m_thd = std::thread(&DeviceVsyncHandler::vsyncThread, this);
}
~DeviceVsyncHandler()
{
m_shutdownThread = true;
cemu_assert_debug(m_thd.joinable());
m_thd.join();
}
void notifyWindowPosChanged()
{
m_checkMonitorChange = true;
}
private:
bool HasMonitorChanged()
{
HWND hWnd = (HWND)g_mainFrame->GetRenderCanvasHWND();
if (hWnd == 0)
return true;
HMONITOR hMonitor = MonitorFromWindow(hWnd, MONITOR_DEFAULTTONEAREST);
MONITORINFOEXW monitorInfo{};
monitorInfo.cbSize = sizeof(monitorInfo);
if (GetMonitorInfoW(hMonitor, &monitorInfo) == 0)
return true;
if (wcscmp(monitorInfo.szDevice, m_activeMonitorDevice) == 0)
return false;
return true;
}
HRESULT GetAdapterHandleFromHwnd(D3DKMT_HANDLE* phAdapter, UINT* pOutput)
{
if (!g_mainFrame)
return E_FAIL;
HWND hWnd = (HWND)g_mainFrame->GetRenderCanvasHWND();
if (hWnd == 0)
return E_FAIL;
wcsncpy(m_activeMonitorDevice, L"", 32); // reset remembered monitor device
m_checkMonitorChange = false;
D3DKMT_OPENADAPTERFROMHDC OpenAdapterData;
*phAdapter = NULL;
*pOutput = 0;
HMONITOR hMonitor = MonitorFromWindow(hWnd, MONITOR_DEFAULTTONEAREST);
MONITORINFOEXW monitorInfo{};
monitorInfo.cbSize = sizeof(monitorInfo);
if (GetMonitorInfoW(hMonitor, &monitorInfo) == 0)
return E_FAIL;
HDC hdc = CreateDCW(NULL, monitorInfo.szDevice, NULL, NULL);
if (hdc == NULL) {
return E_FAIL;
}
OpenAdapterData.hDc = hdc;
if (pfnD3DKMTOpenAdapterFromHdc(&OpenAdapterData) == 0)
{
DeleteDC(hdc);
*phAdapter = OpenAdapterData.hAdapter;
*pOutput = OpenAdapterData.VidPnSourceId;
// remember monitor device
wcsncpy(m_activeMonitorDevice, monitorInfo.szDevice, 32);
return S_OK;
}
DeleteDC(hdc);
return E_FAIL;
}
void vsyncThread()
{
D3DKMT_HANDLE hAdapter;
UINT hOutput;
GetAdapterHandleFromHwnd(&hAdapter, &hOutput);
int failCount = 0;
while (!m_shutdownThread)
{
D3DKMT_WAITFORVERTICALBLANKEVENT arg;
arg.hDevice = 0;
arg.hAdapter = hAdapter;
arg.VidPnSourceId = hOutput;
NTSTATUS r = pfnD3DKMTWaitForVerticalBlankEvent(&arg);
if (r != 0)
{
//cemuLog_log(LogType::Force, "Wait for VerticalBlank failed");
Sleep(1000 / 60);
failCount++;
if (failCount >= 10)
{
while (GetAdapterHandleFromHwnd(&hAdapter, &hOutput) != S_OK)
{
Sleep(1000 / 60);
if (m_shutdownThread)
return;
}
failCount = 0;
}
}
else
signalVsync();
if (m_checkMonitorChange)
{
m_checkMonitorChange = false;
if (HasMonitorChanged())
{
while (GetAdapterHandleFromHwnd(&hAdapter, &hOutput) != S_OK)
{
Sleep(1000 / 60);
if (m_shutdownThread)
return;
}
}
}
}
}
void signalVsync()
{
if(m_vsyncDriverVSyncCb)
m_vsyncDriverVSyncCb();
}
void setCallback(void(*cbVSync)())
{
m_vsyncDriverVSyncCb = cbVSync;
}
private:
NTSTATUS(__stdcall* pfnD3DKMTOpenAdapterFromHdc)(D3DKMT_OPENADAPTERFROMHDC* Arg1) = nullptr;
NTSTATUS(__stdcall* pfnD3DKMTWaitForVerticalBlankEvent)(const D3DKMT_WAITFORVERTICALBLANKEVENT* Arg1) = nullptr;
std::thread m_thd;
bool m_shutdownThread;
bool m_checkMonitorChange{};
WCHAR m_activeMonitorDevice[32];
void (*m_vsyncDriverVSyncCb)() = nullptr;
};
DeviceVsyncHandler* s_vsyncDriver = nullptr;
std::mutex s_driverAccess;
void VsyncDriver_startThread(void(*cbVSync)())
{
std::unique_lock<std::mutex> ul(s_driverAccess);
if (!s_vsyncDriver)
s_vsyncDriver = new DeviceVsyncHandler(cbVSync);
}
void VsyncDriver_notifyWindowPosChanged()
{
std::unique_lock<std::mutex> ul(s_driverAccess);
if (s_vsyncDriver)
s_vsyncDriver->notifyWindowPosChanged();
}
#else
void VsyncDriver_startThread(void(*cbVSync)())
{
cemu_assert_unimplemented();
}
void VsyncDriver_notifyWindowPosChanged()
{
}
#endif
| 4,918
|
C++
|
.cpp
| 178
| 24.460674
| 113
| 0.727602
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,235
|
VulkanAPI.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanAPI.cpp
|
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanAPI.h"
#define VKFUNC_DEFINE
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanAPI.h"
#include <numeric> // for std::iota
#if BOOST_OS_LINUX || BOOST_OS_MACOS
#include <dlfcn.h>
#endif
#define VULKAN_API_CPU_BENCHMARK 0 // if 1, Cemu will log the CPU time spent per Vulkan API function
bool g_vulkan_available = false;
#if VULKAN_API_CPU_BENCHMARK != 0
uint64 s_vulkanBenchmarkLastResultsTime = 0;
struct VulkanBenchmarkFuncInfo
{
std::string funcName;
uint64 cycles;
uint32 numCalls;
};
std::vector<VulkanBenchmarkFuncInfo*> s_vulkanBenchmarkFuncs;
template<typename TRet, typename... Args>
auto VkWrapperFuncGenTest(TRet (*func)(Args...), const char* name)
{
static VulkanBenchmarkFuncInfo _FuncInfo;
static auto _FuncPtrCopy = func;
TRet (*newFunc)(Args...);
if constexpr(std::is_void_v<TRet>)
{
newFunc = +[](Args... args) { uint64 t = __rdtsc(); _mm_mfence(); _FuncPtrCopy(args...); _mm_mfence(); _FuncInfo.cycles += (__rdtsc() - t); _FuncInfo.numCalls++; };
}
else
newFunc = +[](Args... args) -> TRet { uint64 t = __rdtsc(); _mm_mfence(); TRet r = _FuncPtrCopy(args...); _mm_mfence(); _FuncInfo.cycles += (__rdtsc() - t); _FuncInfo.numCalls++; return r; };
if(func && func != newFunc)
_FuncPtrCopy = func;
if(_FuncInfo.funcName.empty())
{
_FuncInfo = {.funcName = name, .cycles = 0, .numCalls = 0};
s_vulkanBenchmarkFuncs.emplace_back(&_FuncInfo);
}
return newFunc;
};
#endif
// called when a TV SwapBuffers is called
void VulkanBenchmarkPrintResults()
{
#if VULKAN_API_CPU_BENCHMARK != 0
// note: This could be done by hooking vk present functions
uint64 currentCycle = __rdtsc();
uint64 elapsedCycles = currentCycle - s_vulkanBenchmarkLastResultsTime;
s_vulkanBenchmarkLastResultsTime = currentCycle;
double elapsedCyclesDbl = (double)elapsedCycles;
cemuLog_log(LogType::Force, "--- Vulkan API CPU benchmark ---");
cemuLog_log(LogType::Force, "Elapsed cycles this frame: {:} | Current cycle {:} | NumFunc {:}", elapsedCycles, currentCycle, s_vulkanBenchmarkFuncs.size());
std::vector<sint32> sortedIndices(s_vulkanBenchmarkFuncs.size());
std::iota(sortedIndices.begin(), sortedIndices.end(), 0);
std::sort(sortedIndices.begin(), sortedIndices.end(),
[](int32_t a, int32_t b) {
return s_vulkanBenchmarkFuncs[a]->cycles > s_vulkanBenchmarkFuncs[b]->cycles;
});
for (sint32 idx : sortedIndices)
{
auto& func = s_vulkanBenchmarkFuncs[idx];
if(func->cycles == 0)
return;
cemuLog_log(LogType::Force, "{}: {} cycles ({:.4}%) {} calls", func->funcName.c_str(), func->cycles, ((double)func->cycles / elapsedCyclesDbl) * 100.0, func->numCalls);
func->cycles = 0;
func->numCalls = 0;
}
#endif
}
#if BOOST_OS_WINDOWS
bool InitializeGlobalVulkan()
{
const auto hmodule = LoadLibraryA("vulkan-1.dll");
if(g_vulkan_available)
return true;
if (hmodule == nullptr)
{
cemuLog_log(LogType::Force, "Vulkan loader not available. Outdated graphics driver or Vulkan runtime not installed?");
return false;
}
#define VKFUNC_INIT
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanAPI.h"
if(!vkEnumerateInstanceVersion)
{
cemuLog_log(LogType::Force, "vkEnumerateInstanceVersion not available. Outdated graphics driver or Vulkan runtime?");
FreeLibrary(hmodule);
return false;
}
g_vulkan_available = true;
return true;
}
bool InitializeInstanceVulkan(VkInstance instance)
{
const auto hmodule = GetModuleHandleA("vulkan-1.dll");
if (hmodule == nullptr)
return false;
#define VKFUNC_INSTANCE_INIT
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanAPI.h"
return true;
}
bool InitializeDeviceVulkan(VkDevice device)
{
const auto hmodule = GetModuleHandleA("vulkan-1.dll");
if (hmodule == nullptr)
return false;
#define VKFUNC_DEVICE_INIT
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanAPI.h"
#if VULKAN_API_CPU_BENCHMARK != 0
#define VKFUNC_DEFINE_CUSTOM(__func) __func = VkWrapperFuncGenTest(__func, #__func)
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanAPI.h"
#endif
return true;
}
#else
void* dlopen_vulkan_loader()
{
#if BOOST_OS_LINUX
void* vulkan_so = dlopen("libvulkan.so", RTLD_NOW);
if(!vulkan_so)
vulkan_so = dlopen("libvulkan.so.1", RTLD_NOW);
#elif BOOST_OS_MACOS
void* vulkan_so = dlopen("libMoltenVK.dylib", RTLD_NOW);
#endif
return vulkan_so;
}
bool InitializeGlobalVulkan()
{
void* vulkan_so = dlopen_vulkan_loader();
if(g_vulkan_available)
return true;
if (!vulkan_so)
{
cemuLog_log(LogType::Force, "Vulkan loader not available.");
return false;
}
#define VKFUNC_INIT
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanAPI.h"
if(!vkEnumerateInstanceVersion)
{
cemuLog_log(LogType::Force, "vkEnumerateInstanceVersion not available. Outdated graphics driver or Vulkan runtime?");
return false;
}
g_vulkan_available = true;
return true;
}
bool InitializeInstanceVulkan(VkInstance instance)
{
void* vulkan_so = dlopen_vulkan_loader();
if (!vulkan_so)
return false;
#define VKFUNC_INSTANCE_INIT
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanAPI.h"
return true;
}
bool InitializeDeviceVulkan(VkDevice device)
{
void* vulkan_so = dlopen_vulkan_loader();
if (!vulkan_so)
return false;
#define VKFUNC_DEVICE_INIT
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanAPI.h"
#if VULKAN_API_CPU_BENCHMARK != 0
#define VKFUNC_DEFINE_CUSTOM(__func) __func = VkWrapperFuncGenTest(__func, #__func)
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanAPI.h"
#endif
return true;
}
#endif
| 5,475
|
C++
|
.cpp
| 167
| 30.622754
| 193
| 0.741311
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,236
|
LatteTextureVk.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/Vulkan/LatteTextureVk.cpp
|
#include "Cafe/HW/Latte/Renderer/Vulkan/LatteTextureVk.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/LatteTextureViewVk.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanAPI.h"
LatteTextureVk::LatteTextureVk(class VulkanRenderer* vkRenderer, Latte::E_DIM dim, MPTR physAddress, MPTR physMipAddress, Latte::E_GX2SURFFMT format, uint32 width, uint32 height, uint32 depth, uint32 pitch, uint32 mipLevels, uint32 swizzle,
Latte::E_HWTILEMODE tileMode, bool isDepth)
: LatteTexture(dim, physAddress, physMipAddress, format, width, height, depth, pitch, mipLevels, swizzle, tileMode, isDepth), m_vkr(vkRenderer)
{
vkObjTex = new VKRObjectTexture();
VkImageCreateInfo imageInfo{};
imageInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
sint32 effectiveBaseWidth = width;
sint32 effectiveBaseHeight = height;
sint32 effectiveBaseDepth = depth;
if (overwriteInfo.hasResolutionOverwrite)
{
effectiveBaseWidth = overwriteInfo.width;
effectiveBaseHeight = overwriteInfo.height;
effectiveBaseDepth = overwriteInfo.depth;
}
effectiveBaseDepth = std::max(1, effectiveBaseDepth);
imageInfo.extent.width = effectiveBaseWidth;
imageInfo.extent.height = effectiveBaseHeight;
imageInfo.mipLevels = mipLevels;
imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
imageInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
if (dim == Latte::E_DIM::DIM_3D)
{
imageInfo.extent.depth = effectiveBaseDepth;
imageInfo.arrayLayers = 1;
imageInfo.flags |= VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT;
}
else
{
imageInfo.extent.depth = 1;
imageInfo.arrayLayers = effectiveBaseDepth;
if (dim != Latte::E_DIM::DIM_1D && (effectiveBaseDepth % 6) == 0 && effectiveBaseWidth == effectiveBaseHeight)
imageInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
}
VulkanRenderer::FormatInfoVK texFormatInfo;
vkRenderer->GetTextureFormatInfoVK(format, isDepth, dim, effectiveBaseWidth, effectiveBaseHeight, &texFormatInfo);
cemu_assert_debug(hasStencil == ((texFormatInfo.vkImageAspect & VK_IMAGE_ASPECT_STENCIL_BIT) != 0));
imageInfo.format = texFormatInfo.vkImageFormat;
vkObjTex->m_imageAspect = texFormatInfo.vkImageAspect;
if (isDepth == false && texFormatInfo.isCompressed)
{
imageInfo.flags |= VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT;
}
if (isDepth == false)
imageInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
if (isDepth)
{
imageInfo.usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
}
else
{
if(Latte::IsCompressedFormat(format) == false && texFormatInfo.vkImageFormat != VK_FORMAT_R4G4_UNORM_PACK8) // Vulkan's R4G4 cant be used as a color attachment
imageInfo.usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
}
if (dim == Latte::E_DIM::DIM_2D)
imageInfo.imageType = VK_IMAGE_TYPE_2D;
else if (dim == Latte::E_DIM::DIM_1D)
imageInfo.imageType = VK_IMAGE_TYPE_1D;
else if (dim == Latte::E_DIM::DIM_3D)
imageInfo.imageType = VK_IMAGE_TYPE_3D;
else if (dim == Latte::E_DIM::DIM_2D_ARRAY)
imageInfo.imageType = VK_IMAGE_TYPE_2D;
else if (dim == Latte::E_DIM::DIM_CUBEMAP)
imageInfo.imageType = VK_IMAGE_TYPE_2D;
else if (dim == Latte::E_DIM::DIM_2D_MSAA)
imageInfo.imageType = VK_IMAGE_TYPE_2D;
else
{
cemu_assert_unimplemented();
}
if (vkCreateImage(m_vkr->GetLogicalDevice(), &imageInfo, nullptr, &vkObjTex->m_image) != VK_SUCCESS)
m_vkr->UnrecoverableError("Failed to create texture image");
if (m_vkr->IsDebugUtilsEnabled() && vkSetDebugUtilsObjectNameEXT)
{
VkDebugUtilsObjectNameInfoEXT objName{};
objName.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
objName.objectType = VK_OBJECT_TYPE_IMAGE;
objName.pNext = nullptr;
objName.objectHandle = (uint64_t)vkObjTex->m_image;
auto objNameStr = fmt::format("tex_{:08x}_fmt{:04x}", physAddress, (uint32)format);
objName.pObjectName = objNameStr.c_str();
vkSetDebugUtilsObjectNameEXT(m_vkr->GetLogicalDevice(), &objName);
}
vkObjTex->m_flags = imageInfo.flags;
vkObjTex->m_format = imageInfo.format;
// init layout array
m_layoutsMips = std::max(mipLevels, 1u); // todo - use effective mip count
m_layoutsDepth = std::max(depth, 1u);
if (Is3DTexture())
m_layouts.resize(m_layoutsMips, VK_IMAGE_LAYOUT_UNDEFINED); // one per mip
else
m_layouts.resize(m_layoutsMips * m_layoutsDepth, VK_IMAGE_LAYOUT_UNDEFINED); // one per layer per mip
}
LatteTextureVk::~LatteTextureVk()
{
cemu_assert_debug(views.empty());
m_vkr->surfaceCopy_notifyTextureRelease(this);
VulkanRenderer::GetInstance()->ReleaseDestructibleObject(vkObjTex);
vkObjTex = nullptr;
}
LatteTextureView* LatteTextureVk::CreateView(Latte::E_DIM dim, Latte::E_GX2SURFFMT format, sint32 firstMip, sint32 mipCount, sint32 firstSlice, sint32 sliceCount)
{
cemu_assert_debug(mipCount > 0);
cemu_assert_debug(sliceCount > 0);
cemu_assert_debug((firstMip + mipCount) <= this->mipLevels);
cemu_assert_debug((firstSlice + sliceCount) <= this->depth);
return new LatteTextureViewVk(m_vkr->GetLogicalDevice(), this, dim, format, firstMip, mipCount, firstSlice, sliceCount);
}
void LatteTextureVk::AllocateOnHost()
{
auto allocationInfo = VulkanRenderer::GetInstance()->GetMemoryManager()->imageMemoryAllocate(GetImageObj()->m_image);
vkObjTex->m_allocation = allocationInfo;
}
| 5,368
|
C++
|
.cpp
| 119
| 42.764706
| 240
| 0.769849
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,237
|
VulkanRendererCore.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanRendererCore.cpp
|
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanAPI.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/LatteTextureVk.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/RendererShaderVk.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanPipelineCompiler.h"
#include "Cafe/HW/Latte/Core/LattePerformanceMonitor.h"
#include "Cafe/HW/Latte/Core/LatteShader.h"
#include "Cafe/HW/Latte/Core/FetchShader.h"
#include "Cafe/HW/Latte/Core/LatteIndices.h"
#include "Cafe/OS/libs/gx2/GX2.h"
#include "imgui/imgui_impl_vulkan.h"
#include "Cafe/GameProfile/GameProfile.h"
#include "util/helpers/helpers.h"
extern bool hasValidFramebufferAttached;
// includes only states that may change during minimal drawcalls
uint64 VulkanRenderer::draw_calculateMinimalGraphicsPipelineHash(const LatteFetchShader* fetchShader, const LatteContextRegister& lcr)
{
uint64 stateHash = 0;
// fetch shader
for (auto& group : fetchShader->bufferGroups)
{
uint32 bufferStride = group.getCurrentBufferStride(lcr.GetRawView());
stateHash = std::rotl<uint64>(stateHash, 7);
stateHash += bufferStride * 3;
}
stateHash += fetchShader->getVkPipelineHashFragment();
stateHash = std::rotl<uint64>(stateHash, 7);
stateHash += lcr.GetRawView()[mmVGT_PRIMITIVE_TYPE];
stateHash = std::rotl<uint64>(stateHash, 7);
stateHash += lcr.GetRawView()[mmVGT_STRMOUT_EN];
stateHash = std::rotl<uint64>(stateHash, 7);
if(lcr.PA_CL_CLIP_CNTL.get_DX_RASTERIZATION_KILL())
stateHash += 0x333333;
return stateHash;
}
uint64 VulkanRenderer::draw_calculateGraphicsPipelineHash(const LatteFetchShader* fetchShader, const LatteDecompilerShader* vertexShader, const LatteDecompilerShader* geometryShader, const LatteDecompilerShader* pixelShader, const VKRObjectRenderPass* renderPassObj, const LatteContextRegister& lcr)
{
// note: vertexShader references a fetchShader (vertexShader->fetchShader) but it's not necessarily the one that is currently active
// this is because we try to separate dynamic state (mainly attribute offsets) from the actual attribute data layout and mapping (types and slots)
// on Vulkan this causes issues because we bake the attribute offsets, which may not match vertexShader->compatibleFetchShader, into the pipeline
// To avoid issues always use the active fetch shader. Not the one associated with the vertexShader object
// note 2:
// there is a secondary issue where we dont store all fetch shaders into the pipeline cache (only a single fetch shader is tied to each stored vertex shader)
// but we can probably trust drivers to not require pipeline recompilation if only the offsets differ
// An alternative would be to use VK_EXT_vertex_input_dynamic_state but it comes with minor overhead
// Regardless, the extension is not well supported as of writing this (July 2021, only 10% of GPUs support it on Windows. Nvidia only)
cemu_assert_debug(vertexShader->compatibleFetchShader->key == fetchShader->key); // fetch shaders must be layout compatible, but may have different offsets
uint64 stateHash;
stateHash = draw_calculateMinimalGraphicsPipelineHash(fetchShader, lcr);
stateHash = (stateHash >> 8) + (stateHash * 0x370531ull) % 0x7F980D3BF9B4639Dull;
uint32* ctxRegister = lcr.GetRawView();
if (vertexShader)
stateHash += vertexShader->baseHash;
stateHash = std::rotl<uint64>(stateHash, 13);
if (geometryShader)
stateHash += geometryShader->baseHash;
stateHash = std::rotl<uint64>(stateHash, 13);
if (pixelShader)
stateHash += pixelShader->baseHash + pixelShader->auxHash;
stateHash = std::rotl<uint64>(stateHash, 13);
uint32 polygonCtrl = lcr.PA_SU_SC_MODE_CNTL.getRawValue();
stateHash += polygonCtrl;
stateHash = std::rotl<uint64>(stateHash, 7);
stateHash += ctxRegister[Latte::REGADDR::PA_CL_CLIP_CNTL];
stateHash = std::rotl<uint64>(stateHash, 7);
const auto colorControlReg = ctxRegister[Latte::REGADDR::CB_COLOR_CONTROL];
stateHash += colorControlReg;
stateHash += ctxRegister[Latte::REGADDR::CB_TARGET_MASK];
const uint32 blendEnableMask = (colorControlReg >> 8) & 0xFF;
if (blendEnableMask)
{
for (auto i = 0; i < 8; ++i)
{
if (((blendEnableMask & (1 << i))) == 0)
continue;
stateHash = std::rotl<uint64>(stateHash, 7);
stateHash += ctxRegister[Latte::REGADDR::CB_BLEND0_CONTROL + i];
}
}
stateHash += renderPassObj->m_hashForPipeline;
uint32 depthControl = ctxRegister[Latte::REGADDR::DB_DEPTH_CONTROL];
bool stencilTestEnable = depthControl & 1;
if (stencilTestEnable)
{
stateHash += ctxRegister[mmDB_STENCILREFMASK];
stateHash = std::rotl<uint64>(stateHash, 17);
if(depthControl & (1<<7)) // back stencil enable
{
stateHash += ctxRegister[mmDB_STENCILREFMASK_BF];
stateHash = std::rotl<uint64>(stateHash, 13);
}
}
else
{
// zero out stencil related bits (8-31)
depthControl &= 0xFF;
}
stateHash = std::rotl<uint64>(stateHash, 17);
stateHash += depthControl;
// polygon offset
if (polygonCtrl & (1 << 11))
{
// front offset enabled
stateHash += 0x1111;
}
return stateHash;
}
void VulkanRenderer::draw_debugPipelineHashState()
{
cemu_assert_debug(false);
}
PipelineInfo* VulkanRenderer::draw_getCachedPipeline()
{
// todo - optimize m_pipeline_info_cache away and store directly in vk vertex shader
const auto fetchShader = LatteSHRC_GetActiveFetchShader();
const auto vertexShader = LatteSHRC_GetActiveVertexShader();
const auto it = m_pipeline_info_cache.find(vertexShader->baseHash);
if (it == m_pipeline_info_cache.cend())
return nullptr;
const auto geometryShader = LatteSHRC_GetActiveGeometryShader();
const auto pixelShader = LatteSHRC_GetActivePixelShader();
auto cachedFboVk = (CachedFBOVk*)m_state.activeFBO;
const uint64 stateHash = draw_calculateGraphicsPipelineHash(fetchShader, vertexShader, geometryShader, pixelShader, cachedFboVk->GetRenderPassObj(), LatteGPUState.contextNew);
const auto innerit = it->second.find(stateHash);
if (innerit == it->second.cend())
return nullptr;
return innerit->second;
}
void VulkanRenderer::unregisterGraphicsPipeline(PipelineInfo* pipelineInfo)
{
bool removedFromCache = false;
for (auto& topMapItr : m_pipeline_info_cache)
{
auto& subMap = topMapItr.second;
for (auto it = subMap.cbegin(); it != subMap.cend();)
{
if (it->second == pipelineInfo)
{
subMap.erase(it);
removedFromCache = true;
break;
}
++it;
}
if (removedFromCache)
break;
}
}
bool g_compilePipelineThreadInit{false};
std::mutex g_compilePipelineMutex;
std::condition_variable g_compilePipelineCondVar;
std::queue<PipelineCompiler*> g_compilePipelineRequests;
void compilePipeline_thread(sint32 threadIndex)
{
SetThreadName("compilePl");
#ifdef _WIN32
// one thread runs at normal priority while the others run at lower priority
if(threadIndex != 0)
SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_BELOW_NORMAL);
#endif
while (true)
{
std::unique_lock lock(g_compilePipelineMutex);
while (g_compilePipelineRequests.empty())
g_compilePipelineCondVar.wait(lock);
PipelineCompiler* request = g_compilePipelineRequests.front();
g_compilePipelineRequests.pop();
lock.unlock();
request->Compile(true, false, true);
delete request;
}
}
void compilePipelineThread_init()
{
uint32 numCompileThreads;
uint32 cpuCoreCount = GetPhysicalCoreCount();
if (cpuCoreCount <= 2)
numCompileThreads = 1;
else
numCompileThreads = 2 + (cpuCoreCount - 3); // 2 plus one additionally for every extra core above 3
numCompileThreads = std::min(numCompileThreads, 8u); // cap at 8
for (uint32_t i = 0; i < numCompileThreads; i++)
{
std::thread compileThread(compilePipeline_thread, i);
compileThread.detach();
}
}
void compilePipelineThread_queue(PipelineCompiler* v)
{
std::unique_lock lock(g_compilePipelineMutex);
g_compilePipelineRequests.push(std::move(v));
lock.unlock();
g_compilePipelineCondVar.notify_one();
}
// make a guess if a pipeline is not essential
// non-essential means that skipping these drawcalls shouldn't lead to permanently corrupted graphics
bool VulkanRenderer::IsAsyncPipelineAllowed(uint32 numIndices)
{
// frame debuggers dont handle async well (as of 2020)
if (IsDebugUtilsEnabled() && vkSetDebugUtilsObjectNameEXT)
return false;
CachedFBOVk* currentFBO = m_state.activeFBO;
auto fboExtend = currentFBO->GetExtend();
if (fboExtend.width == 1600 && fboExtend.height == 1600)
return false; // Splatoon ink mechanics use 1600x1600 R8 and R8G8 framebuffers, this resolution is rare enough that we can just blacklist it globally
if (currentFBO->hasDepthBuffer())
return true; // aggressive filter but seems to work well so far
// small index count (3,4,5,6) is often associated with full-viewport quads (which are considered essential due to often being used to generate persistent textures)
if (numIndices <= 6)
{
return false;
}
return true;
}
// create graphics pipeline for current state
PipelineInfo* VulkanRenderer::draw_createGraphicsPipeline(uint32 indexCount)
{
if (!g_compilePipelineThreadInit)
{
compilePipelineThread_init();
g_compilePipelineThreadInit = true;
}
const auto fetchShader = LatteSHRC_GetActiveFetchShader();
const auto vertexShader = LatteSHRC_GetActiveVertexShader();
const auto geometryShader = LatteSHRC_GetActiveGeometryShader();
const auto pixelShader = LatteSHRC_GetActivePixelShader();
auto cachedFboVk = (CachedFBOVk*)m_state.activeFBO;
uint64 minimalStateHash = draw_calculateMinimalGraphicsPipelineHash(fetchShader, LatteGPUState.contextNew);
uint64 pipelineHash = draw_calculateGraphicsPipelineHash(fetchShader, vertexShader, geometryShader, pixelShader, cachedFboVk->GetRenderPassObj(), LatteGPUState.contextNew);
// create PipelineInfo
auto vkFBO = (CachedFBOVk*)(VulkanRenderer::GetInstance()->m_state.activeFBO);
PipelineInfo* pipelineInfo = new PipelineInfo(minimalStateHash, pipelineHash, fetchShader, vertexShader, pixelShader, geometryShader);
// register pipeline
uint64 vsBaseHash = vertexShader->baseHash;
auto it = m_pipeline_info_cache.emplace(vsBaseHash, robin_hood::unordered_flat_map<uint64, PipelineInfo*>());
auto& cache_map = it.first->second;
cache_map.emplace(pipelineHash, pipelineInfo);
// init pipeline compiler
PipelineCompiler* pipelineCompiler = new PipelineCompiler();
pipelineCompiler->InitFromCurrentGPUState(pipelineInfo, LatteGPUState.contextNew, vkFBO->GetRenderPassObj());
pipelineCompiler->TrackAsCached(vsBaseHash, pipelineHash);
// use heuristics based on parameter patterns to determine if the current drawcall is essential (non-skipable)
bool allowAsyncCompile = false;
if (GetConfig().async_compile)
allowAsyncCompile = IsAsyncPipelineAllowed(indexCount);
if (allowAsyncCompile)
{
// even when async is allowed, attempt synchronous creation first (which will immediately fail if the pipeline is not cached)
if (pipelineCompiler->Compile(false, true, true) == false)
{
// shaders or pipeline not cached -> asynchronous compilation
compilePipelineThread_queue(pipelineCompiler);
}
else
{
delete pipelineCompiler;
}
}
else
{
// synchronous compilation
pipelineCompiler->Compile(true, true, true);
delete pipelineCompiler;
}
return pipelineInfo;
}
PipelineInfo* VulkanRenderer::draw_getOrCreateGraphicsPipeline(uint32 indexCount)
{
auto cache_object = draw_getCachedPipeline();
if (cache_object != nullptr)
{
#ifdef CEMU_DEBUG_ASSERT
cemu_assert_debug(cache_object->vertexShader == LatteSHRC_GetActiveVertexShader());
cemu_assert_debug(cache_object->geometryShader == LatteSHRC_GetActiveGeometryShader());
cemu_assert_debug(cache_object->pixelShader == LatteSHRC_GetActivePixelShader());
if (cache_object->fetchShader->key != LatteSHRC_GetActiveFetchShader()->key ||
cache_object->fetchShader->vkPipelineHashFragment != LatteSHRC_GetActiveFetchShader()->vkPipelineHashFragment)
{
debug_printf("Incompatible fetch shader %p %p\n", cache_object->fetchShader, LatteSHRC_GetActiveFetchShader());
assert_dbg();
}
uint64 calcMinimalHash = draw_calculateMinimalGraphicsPipelineHash(LatteSHRC_GetActiveFetchShader(), LatteGPUState.contextNew);
auto currentPrimitiveMode = LatteGPUState.contextNew.VGT_PRIMITIVE_TYPE.get_PRIMITIVE_MODE();
cemu_assert_debug(cache_object->primitiveMode == currentPrimitiveMode);
cemu_assert_debug(cache_object->minimalStateHash == calcMinimalHash);
#endif
return cache_object;
}
//draw_debugPipelineHashState();
return draw_createGraphicsPipeline(indexCount);
}
void* VulkanRenderer::indexData_reserveIndexMemory(uint32 size, uint32& offset, uint32& bufferIndex)
{
auto& indexAllocator = this->memoryManager->getIndexAllocator();
auto resv = indexAllocator.AllocateBufferMemory(size, 32);
offset = resv.bufferOffset;
bufferIndex = resv.bufferIndex;
return resv.memPtr;
}
void VulkanRenderer::indexData_uploadIndexMemory(uint32 offset, uint32 size)
{
// does nothing since the index buffer memory is coherent
}
float s_vkUniformData[512 * 4];
void VulkanRenderer::uniformData_updateUniformVars(uint32 shaderStageIndex, LatteDecompilerShader* shader)
{
auto GET_UNIFORM_DATA_PTR = [](size_t index) { return s_vkUniformData + (index / 4); };
sint32 shaderAluConst;
switch (shader->shaderType)
{
case LatteConst::ShaderType::Vertex:
shaderAluConst = 0x400;
break;
case LatteConst::ShaderType::Pixel:
shaderAluConst = 0;
break;
case LatteConst::ShaderType::Geometry:
shaderAluConst = 0; // geometry shader has no ALU const
break;
default:
UNREACHABLE;
}
if (shader->resourceMapping.uniformVarsBufferBindingPoint >= 0)
{
if (shader->uniform.list_ufTexRescale.empty() == false)
{
for (auto& entry : shader->uniform.list_ufTexRescale)
{
float* xyScale = LatteTexture_getEffectiveTextureScale(shader->shaderType, entry.texUnit);
memcpy(entry.currentValue, xyScale, sizeof(float) * 2);
memcpy(GET_UNIFORM_DATA_PTR(entry.uniformLocation), xyScale, sizeof(float) * 2);
}
}
if (shader->uniform.loc_alphaTestRef >= 0)
{
*GET_UNIFORM_DATA_PTR(shader->uniform.loc_alphaTestRef) = LatteGPUState.contextNew.SX_ALPHA_REF.get_ALPHA_TEST_REF();
}
if (shader->uniform.loc_pointSize >= 0)
{
const auto& pointSizeReg = LatteGPUState.contextNew.PA_SU_POINT_SIZE;
float pointWidth = (float)pointSizeReg.get_WIDTH() / 8.0f;
if (pointWidth == 0.0f)
pointWidth = 1.0f / 8.0f; // minimum size
*GET_UNIFORM_DATA_PTR(shader->uniform.loc_pointSize) = pointWidth;
}
if (shader->uniform.loc_remapped >= 0)
{
LatteBufferCache_LoadRemappedUniforms(shader, GET_UNIFORM_DATA_PTR(shader->uniform.loc_remapped));
}
if (shader->uniform.loc_uniformRegister >= 0)
{
uint32* uniformRegData = (uint32*)(LatteGPUState.contextRegister + mmSQ_ALU_CONSTANT0_0 + shaderAluConst);
memcpy(GET_UNIFORM_DATA_PTR(shader->uniform.loc_uniformRegister), uniformRegData, shader->uniform.count_uniformRegister * 16);
}
if (shader->uniform.loc_windowSpaceToClipSpaceTransform >= 0)
{
sint32 viewportWidth;
sint32 viewportHeight;
LatteRenderTarget_GetCurrentVirtualViewportSize(&viewportWidth, &viewportHeight); // always call after _updateViewport()
float* v = GET_UNIFORM_DATA_PTR(shader->uniform.loc_windowSpaceToClipSpaceTransform);
v[0] = 2.0f / (float)viewportWidth;
v[1] = 2.0f / (float)viewportHeight;
}
if (shader->uniform.loc_fragCoordScale >= 0)
{
LatteMRT::GetCurrentFragCoordScale(GET_UNIFORM_DATA_PTR(shader->uniform.loc_fragCoordScale));
}
if (shader->uniform.loc_verticesPerInstance >= 0)
{
*(int*)GET_UNIFORM_DATA_PTR(shader->uniform.loc_verticesPerInstance) = m_streamoutState.verticesPerInstance;
for (sint32 b = 0; b < LATTE_NUM_STREAMOUT_BUFFER; b++)
{
if (shader->uniform.loc_streamoutBufferBase[b] >= 0)
{
*(uint32*)GET_UNIFORM_DATA_PTR(shader->uniform.loc_streamoutBufferBase[b]) = m_streamoutState.buffer[b].ringBufferOffset;
}
}
}
// upload
const uint32 bufferAlignmentM1 = std::max(m_featureControl.limits.minUniformBufferOffsetAlignment, m_featureControl.limits.nonCoherentAtomSize) - 1;
const uint32 uniformSize = (shader->uniform.uniformRangeSize + bufferAlignmentM1) & ~bufferAlignmentM1;
auto waitWhileCondition = [&](std::function<bool()> condition) {
while (condition())
{
if (m_commandBufferSyncIndex == m_commandBufferIndex)
{
if (m_cmdBufferUniformRingbufIndices[m_commandBufferIndex] != m_uniformVarBufferReadIndex)
{
draw_endRenderPass();
SubmitCommandBuffer();
}
else
{
// submitting work would not change readIndex, so there's no way for conditions based on it to change
cemuLog_log(LogType::Force, "draw call overflowed and corrupted uniform ringbuffer. expect visual corruption");
cemu_assert_suspicious();
break;
}
}
WaitForNextFinishedCommandBuffer();
}
};
// wrap around if it doesnt fit consecutively
if (m_uniformVarBufferWriteIndex + uniformSize > UNIFORMVAR_RINGBUFFER_SIZE)
{
waitWhileCondition([&]() {
return m_uniformVarBufferReadIndex > m_uniformVarBufferWriteIndex || m_uniformVarBufferReadIndex == 0;
});
m_uniformVarBufferWriteIndex = 0;
}
auto ringBufRemaining = [&]() {
ssize_t ringBufferUsedBytes = (ssize_t)m_uniformVarBufferWriteIndex - m_uniformVarBufferReadIndex;
if (ringBufferUsedBytes < 0)
ringBufferUsedBytes += UNIFORMVAR_RINGBUFFER_SIZE;
return UNIFORMVAR_RINGBUFFER_SIZE - 1 - ringBufferUsedBytes;
};
waitWhileCondition([&]() {
return ringBufRemaining() < uniformSize;
});
const uint32 uniformOffset = m_uniformVarBufferWriteIndex;
memcpy(m_uniformVarBufferPtr + uniformOffset, s_vkUniformData, shader->uniform.uniformRangeSize);
m_uniformVarBufferWriteIndex += uniformSize;
// update dynamic offset
dynamicOffsetInfo.uniformVarBufferOffset[shaderStageIndex] = uniformOffset;
// flush if not coherent
if (!m_uniformVarBufferMemoryIsCoherent)
{
VkMappedMemoryRange flushedRange{};
flushedRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
flushedRange.memory = m_uniformVarBufferMemory;
flushedRange.offset = uniformOffset;
flushedRange.size = uniformSize;
vkFlushMappedMemoryRanges(m_logicalDevice, 1, &flushedRange);
}
}
}
void VulkanRenderer::draw_prepareDynamicOffsetsForDescriptorSet(uint32 shaderStageIndex, uint32* dynamicOffsets,
sint32& numDynOffsets,
const PipelineInfo* pipeline_info)
{
numDynOffsets = 0;
if (pipeline_info->dynamicOffsetInfo.hasUniformVar[shaderStageIndex])
{
dynamicOffsets[0] = dynamicOffsetInfo.uniformVarBufferOffset[shaderStageIndex];
numDynOffsets++;
}
if (pipeline_info->dynamicOffsetInfo.hasUniformBuffers[shaderStageIndex])
{
for (auto& itr : pipeline_info->dynamicOffsetInfo.list_uniformBuffers[shaderStageIndex])
{
dynamicOffsets[numDynOffsets] = dynamicOffsetInfo.shaderUB[shaderStageIndex].uniformBufferOffset[itr];
numDynOffsets++;
}
}
}
uint64 VulkanRenderer::GetDescriptorSetStateHash(LatteDecompilerShader* shader)
{
uint64 hash = 0;
const sint32 textureCount = shader->resourceMapping.getTextureCount();
for (int i = 0; i < textureCount; ++i)
{
const auto relative_textureUnit = shader->resourceMapping.getTextureUnitFromBindingPoint(i);
auto hostTextureUnit = relative_textureUnit;
auto textureDim = shader->textureUnitDim[relative_textureUnit];
auto texUnitRegIndex = hostTextureUnit * 7;
switch (shader->shaderType)
{
case LatteConst::ShaderType::Vertex:
hostTextureUnit += LATTE_CEMU_VS_TEX_UNIT_BASE;
texUnitRegIndex += Latte::REGADDR::SQ_TEX_RESOURCE_WORD0_N_VS;
break;
case LatteConst::ShaderType::Pixel:
hostTextureUnit += LATTE_CEMU_PS_TEX_UNIT_BASE;
texUnitRegIndex += Latte::REGADDR::SQ_TEX_RESOURCE_WORD0_N_PS;
break;
case LatteConst::ShaderType::Geometry:
hostTextureUnit += LATTE_CEMU_GS_TEX_UNIT_BASE;
texUnitRegIndex += Latte::REGADDR::SQ_TEX_RESOURCE_WORD0_N_GS;
break;
default:
UNREACHABLE;
}
auto texture = m_state.boundTexture[hostTextureUnit];
if (!texture)
continue;
const uint32 word4 = LatteGPUState.contextRegister[texUnitRegIndex + 4];
uint32 samplerIndex = shader->textureUnitSamplerAssignment[relative_textureUnit];
if (samplerIndex != LATTE_DECOMPILER_SAMPLER_NONE)
{
samplerIndex += LatteDecompiler_getTextureSamplerBaseIndex(shader->shaderType);
hash += LatteGPUState.contextRegister[Latte::REGADDR::SQ_TEX_SAMPLER_WORD0_0 + samplerIndex * 3 + 0];
hash = std::rotl<uint64>(hash, 7);
hash += LatteGPUState.contextRegister[Latte::REGADDR::SQ_TEX_SAMPLER_WORD0_0 + samplerIndex * 3 + 1];
hash = std::rotl<uint64>(hash, 7);
hash += LatteGPUState.contextRegister[Latte::REGADDR::SQ_TEX_SAMPLER_WORD0_0 + samplerIndex * 3 + 2];
hash = std::rotl<uint64>(hash, 7);
}
hash = std::rotl<uint64>(hash, 7);
// hash view id + swizzle mask
hash += (uint64)texture->GetUniqueId();
hash = std::rotr<uint64>(hash, 21);
hash += (uint64)(word4 & 0x0FFF0000);
}
return hash;
}
VkDescriptorSetInfo* VulkanRenderer::draw_getOrCreateDescriptorSet(PipelineInfo* pipeline_info, LatteDecompilerShader* shader)
{
const uint64 stateHash = GetDescriptorSetStateHash(shader);
VkDescriptorSetLayout descriptor_set_layout;
switch (shader->shaderType)
{
case LatteConst::ShaderType::Vertex:
{
const auto it = pipeline_info->vertex_ds_cache.find(stateHash);
if (it != pipeline_info->vertex_ds_cache.cend())
return it->second;
descriptor_set_layout = pipeline_info->m_vkrObjPipeline->vertexDSL;
break;
}
case LatteConst::ShaderType::Pixel:
{
const auto it = pipeline_info->pixel_ds_cache.find(stateHash);
if (it != pipeline_info->pixel_ds_cache.cend())
return it->second;
descriptor_set_layout = pipeline_info->m_vkrObjPipeline->pixelDSL;
break;
}
case LatteConst::ShaderType::Geometry:
{
const auto it = pipeline_info->geometry_ds_cache.find(stateHash);
if (it != pipeline_info->geometry_ds_cache.cend())
return it->second;
descriptor_set_layout = pipeline_info->m_vkrObjPipeline->geometryDSL;
break;
}
default:
UNREACHABLE;
}
// create new descriptor set
VkDescriptorSetInfo* dsInfo = new VkDescriptorSetInfo();
dsInfo->stateHash = stateHash;
dsInfo->shaderType = shader->shaderType;
dsInfo->pipeline_info = pipeline_info;
dsInfo->m_vkObjDescriptorSet = new VKRObjectDescriptorSet();
auto vkObjDS = dsInfo->m_vkObjDescriptorSet;
VkDescriptorSetAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
allocInfo.descriptorPool = m_descriptorPool;
allocInfo.descriptorSetCount = 1;
allocInfo.pSetLayouts = &descriptor_set_layout;
VkDescriptorSet result;
if (vkAllocateDescriptorSets(m_logicalDevice, &allocInfo, &result) != VK_SUCCESS)
{
UnrecoverableError(fmt::format("Failed to allocate descriptor sets. Currently allocated: Descriptors={} TextureSamplers={} DynUniformBuffers={} StorageBuffers={}",
performanceMonitor.vk.numDescriptorSets.get(),
performanceMonitor.vk.numDescriptorSamplerTextures.get(),
performanceMonitor.vk.numDescriptorDynUniformBuffers.get(),
performanceMonitor.vk.numDescriptorStorageBuffers.get()
).c_str());
}
vkObjDS->descriptorSet = result;
sint32 textureCount = shader->resourceMapping.getTextureCount();
std::vector<VkWriteDescriptorSet> descriptorWrites;
std::vector<VkDescriptorImageInfo> textureArray;
for (int i = 0; i < textureCount; ++i)
{
VkDescriptorImageInfo info{};
const auto relative_textureUnit = shader->resourceMapping.getTextureUnitFromBindingPoint(i);
auto hostTextureUnit = relative_textureUnit;
auto textureDim = shader->textureUnitDim[relative_textureUnit];
auto texUnitRegIndex = hostTextureUnit * 7;
switch (shader->shaderType)
{
case LatteConst::ShaderType::Vertex:
hostTextureUnit += LATTE_CEMU_VS_TEX_UNIT_BASE;
texUnitRegIndex += Latte::REGADDR::SQ_TEX_RESOURCE_WORD0_N_VS;
break;
case LatteConst::ShaderType::Pixel:
hostTextureUnit += LATTE_CEMU_PS_TEX_UNIT_BASE;
texUnitRegIndex += Latte::REGADDR::SQ_TEX_RESOURCE_WORD0_N_PS;
break;
case LatteConst::ShaderType::Geometry:
hostTextureUnit += LATTE_CEMU_GS_TEX_UNIT_BASE;
texUnitRegIndex += Latte::REGADDR::SQ_TEX_RESOURCE_WORD0_N_GS;
break;
default:
UNREACHABLE;
}
auto textureView = m_state.boundTexture[hostTextureUnit];
if (!textureView)
{
if (textureDim == Latte::E_DIM::DIM_1D)
{
info.imageLayout = VK_IMAGE_LAYOUT_GENERAL;
info.imageView = nullTexture1D.view;
info.sampler = nullTexture1D.sampler;
textureArray.emplace_back(info);
}
else
{
info.imageLayout = VK_IMAGE_LAYOUT_GENERAL;
info.imageView = nullTexture2D.view;
info.sampler = nullTexture2D.sampler;
textureArray.emplace_back(info);
}
continue;
}
// safeguard to avoid using mismatching texture dimensions
// if we properly support aux hash for vs/gs this should never trigger
if (textureDim == Latte::E_DIM::DIM_1D && (textureView->dim != Latte::E_DIM::DIM_1D))
{
// should be 1D
info.imageLayout = VK_IMAGE_LAYOUT_GENERAL;
info.imageView = nullTexture1D.view;
info.sampler = nullTexture1D.sampler;
textureArray.emplace_back(info);
// cemuLog_log(LogType::Force, "Vulkan-Info: Shader 0x{:016x} uses 1D texture but bound texture has mismatching type (dim: 0x{:02x})", shader->baseHash, textureView->gx2Dim);
continue;
}
else if (textureDim == Latte::E_DIM::DIM_2D && (textureView->dim != Latte::E_DIM::DIM_2D && textureView->dim != Latte::E_DIM::DIM_2D_MSAA))
{
// should be 2D
// is GPU7 fine with 2D access to a 2D_ARRAY texture?
info.imageLayout = VK_IMAGE_LAYOUT_GENERAL;
info.imageView = nullTexture2D.view;
info.sampler = nullTexture2D.sampler;
textureArray.emplace_back(info);
// cemuLog_log(LogType::Force, "Vulkan-Info: Shader 0x{:016x} uses 2D texture but bound texture has mismatching type (dim: 0x{:02x})", shader->baseHash, textureView->gx2Dim);
continue;
}
info.imageLayout = VK_IMAGE_LAYOUT_GENERAL;
VkSamplerCustomBorderColorCreateInfoEXT samplerCustomBorderColor{};
VkSampler sampler;
VkSamplerCreateInfo samplerInfo{};
samplerInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
LatteTexture* baseTexture = textureView->baseTexture;
// get texture register word 0
uint32 word4 = LatteGPUState.contextRegister[texUnitRegIndex + 4];
auto imageViewObj = textureView->GetSamplerView(word4);
info.imageView = imageViewObj->m_textureImageView;
vkObjDS->addRef(imageViewObj);
// track relation between view and descriptor set
vectorAppendUnique(dsInfo->list_referencedViews, textureView);
textureView->AddDescriptorSetReference(dsInfo);
if (!baseTexture->IsCompressedFormat())
vectorAppendUnique(dsInfo->list_fboCandidates, (LatteTextureVk*)baseTexture);
uint32 stageSamplerIndex = shader->textureUnitSamplerAssignment[relative_textureUnit];
if (stageSamplerIndex != LATTE_DECOMPILER_SAMPLER_NONE)
{
uint32 samplerIndex = stageSamplerIndex + LatteDecompiler_getTextureSamplerBaseIndex(shader->shaderType);
const _LatteRegisterSetSampler* samplerWords = LatteGPUState.contextNew.SQ_TEX_SAMPLER + samplerIndex;
// lod
uint32 iMinLOD = samplerWords->WORD1.get_MIN_LOD();
uint32 iMaxLOD = samplerWords->WORD1.get_MAX_LOD();
sint32 iLodBias = samplerWords->WORD1.get_LOD_BIAS();
// apply relative lod bias from graphic pack
if (baseTexture->overwriteInfo.hasRelativeLodBias)
iLodBias += baseTexture->overwriteInfo.relativeLodBias;
// apply absolute lod bias from graphic pack
if (baseTexture->overwriteInfo.hasLodBias)
iLodBias = baseTexture->overwriteInfo.lodBias;
auto filterMip = samplerWords->WORD0.get_MIP_FILTER();
if (filterMip == Latte::LATTE_SQ_TEX_SAMPLER_WORD0_0::E_Z_FILTER::NONE)
{
samplerInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
samplerInfo.minLod = 0;
samplerInfo.maxLod = 0.25f;
}
else if (filterMip == Latte::LATTE_SQ_TEX_SAMPLER_WORD0_0::E_Z_FILTER::POINT)
{
samplerInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
samplerInfo.minLod = (float)iMinLOD / 64.0f;
samplerInfo.maxLod = (float)iMaxLOD / 64.0f;
}
else if (filterMip == Latte::LATTE_SQ_TEX_SAMPLER_WORD0_0::E_Z_FILTER::LINEAR)
{
samplerInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
samplerInfo.minLod = (float)iMinLOD / 64.0f;
samplerInfo.maxLod = (float)iMaxLOD / 64.0f;
}
else
{
// fallback for invalid constants
samplerInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
samplerInfo.minLod = (float)iMinLOD / 64.0f;
samplerInfo.maxLod = (float)iMaxLOD / 64.0f;
}
auto filterMin = samplerWords->WORD0.get_XY_MIN_FILTER();
cemu_assert_debug(filterMin != Latte::LATTE_SQ_TEX_SAMPLER_WORD0_0::E_XY_FILTER::BICUBIC); // todo
samplerInfo.minFilter = (filterMin == Latte::LATTE_SQ_TEX_SAMPLER_WORD0_0::E_XY_FILTER::POINT || filterMin == Latte::LATTE_SQ_TEX_SAMPLER_WORD0_0::E_XY_FILTER::ANISO_POINT) ? VK_FILTER_NEAREST : VK_FILTER_LINEAR;
auto filterMag = samplerWords->WORD0.get_XY_MAG_FILTER();
samplerInfo.magFilter = (filterMag == Latte::LATTE_SQ_TEX_SAMPLER_WORD0_0::E_XY_FILTER::POINT || filterMin == Latte::LATTE_SQ_TEX_SAMPLER_WORD0_0::E_XY_FILTER::ANISO_POINT) ? VK_FILTER_NEAREST : VK_FILTER_LINEAR;
auto filterZ = samplerWords->WORD0.get_Z_FILTER();
// todo: z-filter for texture array samplers is customizable for GPU7 but OpenGL/Vulkan doesn't expose this functionality?
static const VkSamplerAddressMode s_vkClampTable[] = {
VK_SAMPLER_ADDRESS_MODE_REPEAT, // WRAP
VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT, // MIRROR
VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, // CLAMP_LAST_TEXEL
VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE, // MIRROR_ONCE_LAST_TEXEL
VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, // unsupported HALF_BORDER
VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, // unsupported MIRROR_ONCE_HALF_BORDER
VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, // CLAMP_BORDER
VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER // MIRROR_ONCE_BORDER
};
auto clampX = samplerWords->WORD0.get_CLAMP_X();
auto clampY = samplerWords->WORD0.get_CLAMP_Y();
auto clampZ = samplerWords->WORD0.get_CLAMP_Z();
samplerInfo.addressModeU = s_vkClampTable[(size_t)clampX];
samplerInfo.addressModeV = s_vkClampTable[(size_t)clampY];
samplerInfo.addressModeW = s_vkClampTable[(size_t)clampZ];
auto maxAniso = samplerWords->WORD0.get_MAX_ANISO_RATIO();
if (baseTexture->overwriteInfo.anisotropicLevel >= 0)
maxAniso = baseTexture->overwriteInfo.anisotropicLevel;
if (maxAniso > 0)
{
samplerInfo.anisotropyEnable = VK_TRUE;
samplerInfo.maxAnisotropy = (float)(1 << maxAniso);
}
else
{
samplerInfo.anisotropyEnable = VK_FALSE;
samplerInfo.maxAnisotropy = 1.0f;
}
samplerInfo.mipLodBias = (float)iLodBias / 64.0f;
// depth compare
uint8 depthCompareMode = shader->textureUsesDepthCompare[relative_textureUnit] ? 1 : 0;
static const VkCompareOp s_vkCompareOps[]
{
VK_COMPARE_OP_NEVER,
VK_COMPARE_OP_LESS,
VK_COMPARE_OP_EQUAL,
VK_COMPARE_OP_LESS_OR_EQUAL,
VK_COMPARE_OP_GREATER,
VK_COMPARE_OP_NOT_EQUAL,
VK_COMPARE_OP_GREATER_OR_EQUAL,
VK_COMPARE_OP_ALWAYS,
};
if (depthCompareMode == 1)
{
samplerInfo.compareEnable = VK_TRUE;
samplerInfo.compareOp = s_vkCompareOps[(size_t)samplerWords->WORD0.get_DEPTH_COMPARE_FUNCTION()];
}
else
{
samplerInfo.compareEnable = VK_FALSE;
}
// border
auto borderType = samplerWords->WORD0.get_BORDER_COLOR_TYPE();
if (borderType == Latte::LATTE_SQ_TEX_SAMPLER_WORD0_0::E_BORDER_COLOR_TYPE::TRANSPARENT_BLACK)
samplerInfo.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
else if (borderType == Latte::LATTE_SQ_TEX_SAMPLER_WORD0_0::E_BORDER_COLOR_TYPE::OPAQUE_BLACK)
samplerInfo.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK;
else if (borderType == Latte::LATTE_SQ_TEX_SAMPLER_WORD0_0::E_BORDER_COLOR_TYPE::OPAQUE_WHITE)
samplerInfo.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE;
else
{
if (this->m_featureControl.deviceExtensions.custom_border_color_without_format)
{
samplerCustomBorderColor.sType = VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT;
samplerCustomBorderColor.format = VK_FORMAT_UNDEFINED;
_LatteRegisterSetSamplerBorderColor* borderColorReg;
if (shader->shaderType == LatteConst::ShaderType::Vertex)
borderColorReg = LatteGPUState.contextNew.TD_VS_SAMPLER_BORDER_COLOR + stageSamplerIndex;
else if (shader->shaderType == LatteConst::ShaderType::Pixel)
borderColorReg = LatteGPUState.contextNew.TD_PS_SAMPLER_BORDER_COLOR + stageSamplerIndex;
else // geometry
borderColorReg = LatteGPUState.contextNew.TD_GS_SAMPLER_BORDER_COLOR + stageSamplerIndex;
samplerCustomBorderColor.customBorderColor.float32[0] = borderColorReg->red.get_channelValue();
samplerCustomBorderColor.customBorderColor.float32[1] = borderColorReg->green.get_channelValue();
samplerCustomBorderColor.customBorderColor.float32[2] = borderColorReg->blue.get_channelValue();
samplerCustomBorderColor.customBorderColor.float32[3] = borderColorReg->alpha.get_channelValue();
samplerInfo.borderColor = VK_BORDER_COLOR_FLOAT_CUSTOM_EXT;
samplerInfo.pNext = &samplerCustomBorderColor;
}
else
{
// default to transparent black if custom border color is not supported
samplerInfo.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
}
}
}
if (vkCreateSampler(m_logicalDevice, &samplerInfo, nullptr, &sampler) != VK_SUCCESS)
UnrecoverableError("Failed to create texture sampler");
info.sampler = sampler;
textureArray.emplace_back(info);
}
if (textureCount > 0)
{
for (sint32 i = 0; i < textureCount; i++)
{
VkWriteDescriptorSet write_descriptor{};
write_descriptor.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
write_descriptor.dstSet = result;
write_descriptor.dstBinding = shader->resourceMapping.getTextureBaseBindingPoint() + i;
write_descriptor.dstArrayElement = 0;
write_descriptor.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
write_descriptor.descriptorCount = 1;
write_descriptor.pImageInfo = textureArray.data() + i;
descriptorWrites.emplace_back(write_descriptor);
performanceMonitor.vk.numDescriptorSamplerTextures.increment();
dsInfo->statsNumSamplerTextures++;
}
}
// descriptor for uniform vars (as buffer)
VkDescriptorBufferInfo uniformVarsBufferInfo{};
if (shader->resourceMapping.uniformVarsBufferBindingPoint >= 0)
{
uniformVarsBufferInfo.buffer = m_uniformVarBuffer;
uniformVarsBufferInfo.offset = 0; // fixed offset is always zero since we only use dynamic offsets
uniformVarsBufferInfo.range = shader->uniform.uniformRangeSize;
VkWriteDescriptorSet write_descriptor{};
write_descriptor.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
write_descriptor.dstSet = result;
write_descriptor.dstBinding = shader->resourceMapping.uniformVarsBufferBindingPoint;
write_descriptor.dstArrayElement = 0;
write_descriptor.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
write_descriptor.descriptorCount = 1;
write_descriptor.pBufferInfo = &uniformVarsBufferInfo;
descriptorWrites.emplace_back(write_descriptor);
performanceMonitor.vk.numDescriptorDynUniformBuffers.increment();
dsInfo->statsNumDynUniformBuffers++;
}
// descriptor for uniform buffers
VkDescriptorBufferInfo uniformBufferInfo{};
uniformBufferInfo.buffer = m_useHostMemoryForCache ? m_importedMem : m_bufferCache;
uniformBufferInfo.offset = 0; // fixed offset is always zero since we only use dynamic offsets
if (m_vendor == GfxVendor::AMD)
{
// on AMD we enable robust buffer access and map the remaining range of the buffer
uniformBufferInfo.range = VK_WHOLE_SIZE;
}
else
{
// on other vendors (which may not allow large range values) we disable robust buffer access and use a fixed size
// update: starting with their Vulkan 1.2 drivers Nvidia now also prevents out-of-bounds access. Unlike on AMD, we can't use VK_WHOLE_SIZE due to 64KB size limit of uniforms
// as a workaround we set the size to the allowed maximum. A proper solution would be to use SSBOs for large uniforms / uniforms with unknown size?
uniformBufferInfo.range = 1024 * 16 * 4; // XCX
}
for (sint32 i = 0; i < LATTE_NUM_MAX_UNIFORM_BUFFERS; i++)
{
if (shader->resourceMapping.uniformBuffersBindingPoint[i] >= 0)
{
VkWriteDescriptorSet write_descriptor{};
write_descriptor.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
write_descriptor.dstSet = result;
write_descriptor.dstBinding = shader->resourceMapping.uniformBuffersBindingPoint[i];
write_descriptor.dstArrayElement = 0;
write_descriptor.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
write_descriptor.descriptorCount = 1;
write_descriptor.pBufferInfo = &uniformBufferInfo;
descriptorWrites.emplace_back(write_descriptor);
performanceMonitor.vk.numDescriptorDynUniformBuffers.increment();
dsInfo->statsNumDynUniformBuffers++;
}
}
VkDescriptorBufferInfo tfStorageBufferInfo{};
if (shader->resourceMapping.tfStorageBindingPoint >= 0)
{
tfStorageBufferInfo.buffer = m_xfbRingBuffer;
tfStorageBufferInfo.offset = 0; // offset is calculated in shader
tfStorageBufferInfo.range = VK_WHOLE_SIZE;
VkWriteDescriptorSet write_descriptor{};
write_descriptor.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
write_descriptor.dstSet = result;
write_descriptor.dstBinding = shader->resourceMapping.tfStorageBindingPoint;
write_descriptor.dstArrayElement = 0;
write_descriptor.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
write_descriptor.descriptorCount = 1;
write_descriptor.pBufferInfo = &tfStorageBufferInfo;
descriptorWrites.emplace_back(write_descriptor);
performanceMonitor.vk.numDescriptorStorageBuffers.increment();
dsInfo->statsNumStorageBuffers++;
}
if (!descriptorWrites.empty())
vkUpdateDescriptorSets(m_logicalDevice, (uint32)descriptorWrites.size(), descriptorWrites.data(), 0, nullptr);
switch (shader->shaderType)
{
case LatteConst::ShaderType::Vertex:
{
pipeline_info->vertex_ds_cache[stateHash] = dsInfo;
break;
}
case LatteConst::ShaderType::Pixel:
{
pipeline_info->pixel_ds_cache[stateHash] = dsInfo;
break;
}
case LatteConst::ShaderType::Geometry:
{
pipeline_info->geometry_ds_cache[stateHash] = dsInfo;
break;
}
default:
UNREACHABLE;
}
return dsInfo;
}
void VulkanRenderer::sync_inputTexturesChanged()
{
bool writeFlushRequired = false;
if (m_state.activeVertexDS)
{
for (auto& tex : m_state.activeVertexDS->list_fboCandidates)
{
tex->m_vkFlushIndex_read = m_state.currentFlushIndex;
if (tex->m_vkFlushIndex_write == m_state.currentFlushIndex)
writeFlushRequired = true;
}
}
if (m_state.activeGeometryDS)
{
for (auto& tex : m_state.activeGeometryDS->list_fboCandidates)
{
tex->m_vkFlushIndex_read = m_state.currentFlushIndex;
if (tex->m_vkFlushIndex_write == m_state.currentFlushIndex)
writeFlushRequired = true;
}
}
if (m_state.activePixelDS)
{
for (auto& tex : m_state.activePixelDS->list_fboCandidates)
{
tex->m_vkFlushIndex_read = m_state.currentFlushIndex;
if (tex->m_vkFlushIndex_write == m_state.currentFlushIndex)
writeFlushRequired = true;
}
}
// barrier here
if (writeFlushRequired)
{
VkMemoryBarrier memoryBarrier{};
memoryBarrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
memoryBarrier.srcAccessMask = 0;
memoryBarrier.dstAccessMask = 0;
VkPipelineStageFlags srcStage = 0;
VkPipelineStageFlags dstStage = 0;
// src
srcStage |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
memoryBarrier.srcAccessMask |= VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
srcStage |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
memoryBarrier.srcAccessMask |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
// dst
dstStage |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
memoryBarrier.dstAccessMask |= VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT;
dstStage |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
memoryBarrier.dstAccessMask |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT;
vkCmdPipelineBarrier(m_state.currentCommandBuffer, srcStage, dstStage, 0, 1, &memoryBarrier, 0, nullptr, 0, nullptr);
performanceMonitor.vk.numDrawBarriersPerFrame.increment();
m_state.currentFlushIndex++;
}
}
void VulkanRenderer::sync_RenderPassLoadTextures(CachedFBOVk* fboVk)
{
bool readFlushRequired = false;
// always called after draw_inputTexturesChanged()
for (auto& tex : fboVk->GetTextures())
{
LatteTextureVk* texVk = (LatteTextureVk*)tex;
// write-before-write
if (texVk->m_vkFlushIndex_write == m_state.currentFlushIndex)
readFlushRequired = true;
texVk->m_vkFlushIndex_write = m_state.currentFlushIndex;
// todo - also check for write-before-write ?
if (texVk->m_vkFlushIndex_read == m_state.currentFlushIndex)
readFlushRequired = true;
}
// barrier here
if (readFlushRequired)
{
VkMemoryBarrier memoryBarrier{};
memoryBarrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
memoryBarrier.srcAccessMask = 0;
memoryBarrier.dstAccessMask = 0;
VkPipelineStageFlags srcStage = 0;
VkPipelineStageFlags dstStage = 0;
// src
srcStage |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
memoryBarrier.srcAccessMask |= VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
srcStage |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
memoryBarrier.srcAccessMask |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
// dst
dstStage |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
memoryBarrier.dstAccessMask |= VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT;
dstStage |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
memoryBarrier.dstAccessMask |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT;
vkCmdPipelineBarrier(m_state.currentCommandBuffer, srcStage, dstStage, 0, 1, &memoryBarrier, 0, nullptr, 0, nullptr);
performanceMonitor.vk.numDrawBarriersPerFrame.increment();
m_state.currentFlushIndex++;
}
}
void VulkanRenderer::sync_RenderPassStoreTextures(CachedFBOVk* fboVk)
{
uint32 flushIndex = m_state.currentFlushIndex;
for (auto& tex : fboVk->GetTextures())
{
LatteTextureVk* texVk = (LatteTextureVk*)tex;
texVk->m_vkFlushIndex_write = flushIndex;
}
}
void VulkanRenderer::draw_prepareDescriptorSets(PipelineInfo* pipeline_info, VkDescriptorSetInfo*& vertexDS, VkDescriptorSetInfo*& pixelDS, VkDescriptorSetInfo*& geometryDS)
{
const auto vertexShader = LatteSHRC_GetActiveVertexShader();
const auto geometryShader = LatteSHRC_GetActiveGeometryShader();
const auto pixelShader = LatteSHRC_GetActivePixelShader();
if (vertexShader)
{
auto descriptorSetInfo = draw_getOrCreateDescriptorSet(pipeline_info, vertexShader);
descriptorSetInfo->m_vkObjDescriptorSet->flagForCurrentCommandBuffer();
vertexDS = descriptorSetInfo;
}
if (pixelShader)
{
auto descriptorSetInfo = draw_getOrCreateDescriptorSet(pipeline_info, pixelShader);
descriptorSetInfo->m_vkObjDescriptorSet->flagForCurrentCommandBuffer();
pixelDS = descriptorSetInfo;
}
if (geometryShader)
{
auto descriptorSetInfo = draw_getOrCreateDescriptorSet(pipeline_info, geometryShader);
descriptorSetInfo->m_vkObjDescriptorSet->flagForCurrentCommandBuffer();
geometryDS = descriptorSetInfo;
}
}
void VulkanRenderer::draw_updateVkBlendConstants()
{
uint32* blendColorConstant = LatteGPUState.contextRegister + Latte::REGADDR::CB_BLEND_RED;
vkCmdSetBlendConstants(m_state.currentCommandBuffer, (const float*)blendColorConstant);
}
void VulkanRenderer::draw_updateDepthBias(bool forceUpdate)
{
uint32 frontScaleU32 = LatteGPUState.contextNew.PA_SU_POLY_OFFSET_FRONT_SCALE.getRawValue();
uint32 frontOffsetU32 = LatteGPUState.contextNew.PA_SU_POLY_OFFSET_FRONT_OFFSET.getRawValue();
uint32 offsetClampU32 = LatteGPUState.contextNew.PA_SU_POLY_OFFSET_CLAMP.getRawValue();
if (forceUpdate == false &&
m_state.prevPolygonFrontScaleU32 == frontScaleU32 &&
m_state.prevPolygonFrontOffsetU32 == frontOffsetU32 &&
m_state.prevPolygonFrontClampU32 == offsetClampU32)
return;
m_state.prevPolygonFrontScaleU32 = frontScaleU32;
m_state.prevPolygonFrontOffsetU32 = frontOffsetU32;
m_state.prevPolygonFrontClampU32 = offsetClampU32;
float frontScale = LatteGPUState.contextNew.PA_SU_POLY_OFFSET_FRONT_SCALE.get_SCALE();
float frontOffset = LatteGPUState.contextNew.PA_SU_POLY_OFFSET_FRONT_OFFSET.get_OFFSET();
float offsetClamp = LatteGPUState.contextNew.PA_SU_POLY_OFFSET_CLAMP.get_CLAMP();
frontScale /= 16.0f;
vkCmdSetDepthBias(m_state.currentCommandBuffer, frontOffset, offsetClamp, frontScale);
}
bool s_syncOnNextDraw = false;
void VulkanRenderer::draw_setRenderPass()
{
CachedFBOVk* fboVk = m_state.activeFBO;
// update self-dependency flag
if (m_state.descriptorSetsChanged || m_state.activeRenderpassFBO != fboVk)
{
m_state.hasRenderSelfDependency = fboVk->CheckForCollision(m_state.activeVertexDS, m_state.activeGeometryDS, m_state.activePixelDS);
}
auto vkObjRenderPass = fboVk->GetRenderPassObj();
auto vkObjFramebuffer = fboVk->GetFramebufferObj();
bool overridePassReuse = m_state.hasRenderSelfDependency && (GetConfig().vk_accurate_barriers || m_state.activePipelineInfo->neverSkipAccurateBarrier);
if (!overridePassReuse && m_state.activeRenderpassFBO == fboVk)
{
if (m_state.descriptorSetsChanged)
sync_inputTexturesChanged();
return;
}
draw_endRenderPass();
if (m_state.descriptorSetsChanged)
sync_inputTexturesChanged();
// assume that FBO changed, update self-dependency state
m_state.hasRenderSelfDependency = fboVk->CheckForCollision(m_state.activeVertexDS, m_state.activeGeometryDS, m_state.activePixelDS);
sync_RenderPassLoadTextures(fboVk);
if (m_featureControl.deviceExtensions.dynamic_rendering)
{
vkCmdBeginRenderingKHR(m_state.currentCommandBuffer, fboVk->GetRenderingInfo());
}
else
{
VkRenderPass renderPass = vkObjRenderPass->m_renderPass;
VkFramebuffer framebuffer = vkObjFramebuffer->m_frameBuffer;
VkExtent2D extend = fboVk->GetExtend();
VkRenderPassBeginInfo renderPassInfo{};
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
renderPassInfo.renderPass = renderPass;
renderPassInfo.framebuffer = framebuffer;
renderPassInfo.renderArea.offset = { 0, 0 };
renderPassInfo.renderArea.extent = extend;
renderPassInfo.clearValueCount = 0;
vkCmdBeginRenderPass(m_state.currentCommandBuffer, &renderPassInfo, VK_SUBPASS_CONTENTS_INLINE);
}
m_state.activeRenderpassFBO = fboVk;
vkObjRenderPass->flagForCurrentCommandBuffer();
vkObjFramebuffer->flagForCurrentCommandBuffer();
performanceMonitor.vk.numBeginRenderpassPerFrame.increment();
}
void VulkanRenderer::draw_endRenderPass()
{
if (!m_state.activeRenderpassFBO)
return;
if (m_featureControl.deviceExtensions.dynamic_rendering)
vkCmdEndRenderingKHR(m_state.currentCommandBuffer);
else
vkCmdEndRenderPass(m_state.currentCommandBuffer);
sync_RenderPassStoreTextures(m_state.activeRenderpassFBO);
m_state.activeRenderpassFBO = nullptr;
}
void LatteDraw_handleSpecialState8_clearAsDepth();
// transfer depth buffer data to color buffer
void VulkanRenderer::draw_handleSpecialState5()
{
LatteMRT::UpdateCurrentFBO();
LatteRenderTarget_updateViewport();
LatteTextureView* colorBuffer = LatteMRT::GetColorAttachment(0);
LatteTextureView* depthBuffer = LatteMRT::GetDepthAttachment();
sint32 vpWidth, vpHeight;
LatteMRT::GetVirtualViewportDimensions(vpWidth, vpHeight);
surfaceCopy_copySurfaceWithFormatConversion(
depthBuffer->baseTexture, depthBuffer->firstMip, depthBuffer->firstSlice,
colorBuffer->baseTexture, colorBuffer->firstMip, colorBuffer->firstSlice,
vpWidth, vpHeight);
}
void VulkanRenderer::draw_beginSequence()
{
m_state.drawSequenceSkip = false;
bool streamoutEnable = LatteGPUState.contextRegister[mmVGT_STRMOUT_EN] != 0;
// update shader state
LatteSHRC_UpdateActiveShaders();
if (LatteGPUState.activeShaderHasError)
{
cemuLog_logDebugOnce(LogType::Force, "Skipping drawcalls due to shader error");
m_state.drawSequenceSkip = true;
cemu_assert_debug(false);
return;
}
// update render target and texture state
LatteGPUState.requiresTextureBarrier = false;
while (true)
{
LatteGPUState.repeatTextureInitialization = false;
if (!LatteMRT::UpdateCurrentFBO())
{
debug_printf("Rendertarget invalid\n");
m_state.drawSequenceSkip = true;
return; // no render target
}
if (!hasValidFramebufferAttached && !streamoutEnable)
{
debug_printf("Drawcall with no color buffer or depth buffer attached\n");
m_state.drawSequenceSkip = true;
return; // no render target
}
LatteTexture_updateTextures();
if (!LatteGPUState.repeatTextureInitialization)
break;
}
// apply render target
LatteMRT::ApplyCurrentState();
// viewport and scissor box
LatteRenderTarget_updateViewport();
LatteRenderTarget_updateScissorBox();
// check for conditions which would turn the drawcalls into no-ops
bool rasterizerEnable = LatteGPUState.contextNew.PA_CL_CLIP_CNTL.get_DX_RASTERIZATION_KILL() == false;
// GX2SetSpecialState(0, true) enables DX_RASTERIZATION_KILL, but still expects depth writes to happen? -> Research which stages are disabled by DX_RASTERIZATION_KILL exactly
// for now we use a workaround:
if (!LatteGPUState.contextNew.PA_CL_VTE_CNTL.get_VPORT_X_OFFSET_ENA())
rasterizerEnable = true;
if (rasterizerEnable == false && streamoutEnable == false)
m_state.drawSequenceSkip = true;
}
void VulkanRenderer::draw_execute(uint32 baseVertex, uint32 baseInstance, uint32 instanceCount, uint32 count, MPTR indexDataMPTR, Latte::LATTE_VGT_DMA_INDEX_TYPE::E_INDEX_TYPE indexType, bool isFirst)
{
if (m_state.drawSequenceSkip)
{
LatteGPUState.drawCallCounter++;
return;
}
// fast clear color as depth
if (LatteGPUState.contextNew.GetSpecialStateValues()[8] != 0)
{
LatteDraw_handleSpecialState8_clearAsDepth();
LatteGPUState.drawCallCounter++;
return;
}
else if (LatteGPUState.contextNew.GetSpecialStateValues()[5] != 0)
{
draw_handleSpecialState5();
LatteGPUState.drawCallCounter++;
return;
}
// prepare streamout
m_streamoutState.verticesPerInstance = count;
LatteStreamout_PrepareDrawcall(count, instanceCount);
// update uniform vars
LatteDecompilerShader* vertexShader = LatteSHRC_GetActiveVertexShader();
LatteDecompilerShader* pixelShader = LatteSHRC_GetActivePixelShader();
LatteDecompilerShader* geometryShader = LatteSHRC_GetActiveGeometryShader();
if (vertexShader)
uniformData_updateUniformVars(VulkanRendererConst::SHADER_STAGE_INDEX_VERTEX, vertexShader);
if (pixelShader)
uniformData_updateUniformVars(VulkanRendererConst::SHADER_STAGE_INDEX_FRAGMENT, pixelShader);
if (geometryShader)
uniformData_updateUniformVars(VulkanRendererConst::SHADER_STAGE_INDEX_GEOMETRY, geometryShader);
// store where the read pointer should go after command buffer execution
m_cmdBufferUniformRingbufIndices[m_commandBufferIndex] = m_uniformVarBufferWriteIndex;
// process index data
const LattePrimitiveMode primitiveMode = static_cast<LattePrimitiveMode>(LatteGPUState.contextRegister[mmVGT_PRIMITIVE_TYPE]);
Renderer::INDEX_TYPE hostIndexType;
uint32 hostIndexCount;
uint32 indexMin = 0;
uint32 indexMax = 0;
uint32 indexBufferOffset = 0;
uint32 indexBufferIndex = 0;
LatteIndices_decode(memory_getPointerFromVirtualOffset(indexDataMPTR), indexType, count, primitiveMode, indexMin, indexMax, hostIndexType, hostIndexCount, indexBufferOffset, indexBufferIndex);
// update index binding
bool isPrevIndexData = false;
if (hostIndexType != INDEX_TYPE::NONE)
{
if (m_state.activeIndexBufferOffset != indexBufferOffset || m_state.activeIndexBufferIndex != indexBufferIndex || m_state.activeIndexType != hostIndexType)
{
m_state.activeIndexType = hostIndexType;
m_state.activeIndexBufferOffset = indexBufferOffset;
m_state.activeIndexBufferIndex = indexBufferIndex;
VkIndexType vkType;
if (hostIndexType == INDEX_TYPE::U16)
vkType = VK_INDEX_TYPE_UINT16;
else if (hostIndexType == INDEX_TYPE::U32)
vkType = VK_INDEX_TYPE_UINT32;
else
cemu_assert(false);
vkCmdBindIndexBuffer(m_state.currentCommandBuffer, memoryManager->getIndexAllocator().GetBufferByIndex(indexBufferIndex), indexBufferOffset, vkType);
}
else
isPrevIndexData = true;
}
if (m_useHostMemoryForCache)
{
// direct memory access (Wii U memory space imported as a Vulkan buffer), update buffer bindings
draw_updateVertexBuffersDirectAccess();
LatteDecompilerShader* vertexShader = LatteSHRC_GetActiveVertexShader();
if (vertexShader)
draw_updateUniformBuffersDirectAccess(vertexShader, mmSQ_VTX_UNIFORM_BLOCK_START, LatteConst::ShaderType::Vertex);
LatteDecompilerShader* geometryShader = LatteSHRC_GetActiveGeometryShader();
if (geometryShader)
draw_updateUniformBuffersDirectAccess(geometryShader, mmSQ_GS_UNIFORM_BLOCK_START, LatteConst::ShaderType::Geometry);
LatteDecompilerShader* pixelShader = LatteSHRC_GetActivePixelShader();
if (pixelShader)
draw_updateUniformBuffersDirectAccess(pixelShader, mmSQ_PS_UNIFORM_BLOCK_START, LatteConst::ShaderType::Pixel);
}
else
{
// synchronize vertex and uniform cache and update buffer bindings
LatteBufferCache_Sync(indexMin + baseVertex, indexMax + baseVertex, baseInstance, instanceCount);
}
PipelineInfo* pipeline_info;
if (!isFirst)
{
if (m_state.activePipelineInfo->minimalStateHash != draw_calculateMinimalGraphicsPipelineHash(vertexShader->compatibleFetchShader, LatteGPUState.contextNew))
{
// pipeline changed
pipeline_info = draw_getOrCreateGraphicsPipeline(count);
m_state.activePipelineInfo = pipeline_info;
}
else
{
pipeline_info = m_state.activePipelineInfo;
#ifdef CEMU_DEBUG_ASSERT
auto pipeline_info2 = draw_getOrCreateGraphicsPipeline(count);
if (pipeline_info != pipeline_info2)
{
cemu_assert_debug(false);
}
#endif
}
}
else
{
pipeline_info = draw_getOrCreateGraphicsPipeline(count);
m_state.activePipelineInfo = pipeline_info;
}
auto vkObjPipeline = pipeline_info->m_vkrObjPipeline;
if (vkObjPipeline->pipeline == VK_NULL_HANDLE)
{
// invalid/uninitialized pipeline
m_state.activeVertexDS = nullptr;
return;
}
VkDescriptorSetInfo *vertexDS = nullptr, *pixelDS = nullptr, *geometryDS = nullptr;
if (!isFirst && m_state.activeVertexDS)
{
vertexDS = m_state.activeVertexDS;
pixelDS = m_state.activePixelDS;
geometryDS = m_state.activeGeometryDS;
m_state.descriptorSetsChanged = false;
}
else
{
draw_prepareDescriptorSets(pipeline_info, vertexDS, pixelDS, geometryDS);
m_state.activeVertexDS = vertexDS;
m_state.activePixelDS = pixelDS;
m_state.activeGeometryDS = geometryDS;
m_state.descriptorSetsChanged = true;
}
draw_setRenderPass();
if (m_state.currentPipeline != vkObjPipeline->pipeline)
{
vkCmdBindPipeline(m_state.currentCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vkObjPipeline->pipeline);
vkObjPipeline->flagForCurrentCommandBuffer();
m_state.currentPipeline = vkObjPipeline->pipeline;
// depth bias
if (pipeline_info->usesDepthBias)
draw_updateDepthBias(true);
}
else
{
if (pipeline_info->usesDepthBias)
draw_updateDepthBias(false);
}
// update blend constants
if (pipeline_info->usesBlendConstants)
draw_updateVkBlendConstants();
// update descriptor sets
uint32_t dynamicOffsets[17 * 2];
if (vertexDS && pixelDS)
{
// update vertex and pixel descriptor set in a single call to vkCmdBindDescriptorSets
sint32 numDynOffsetsVS;
sint32 numDynOffsetsPS;
draw_prepareDynamicOffsetsForDescriptorSet(VulkanRendererConst::SHADER_STAGE_INDEX_VERTEX, dynamicOffsets, numDynOffsetsVS,
pipeline_info);
draw_prepareDynamicOffsetsForDescriptorSet(VulkanRendererConst::SHADER_STAGE_INDEX_FRAGMENT, dynamicOffsets + numDynOffsetsVS, numDynOffsetsPS,
pipeline_info);
VkDescriptorSet dsArray[2];
dsArray[0] = vertexDS->m_vkObjDescriptorSet->descriptorSet;
dsArray[1] = pixelDS->m_vkObjDescriptorSet->descriptorSet;
vkCmdBindDescriptorSets(m_state.currentCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
vkObjPipeline->pipeline_layout, 0, 2, dsArray, numDynOffsetsVS + numDynOffsetsPS,
dynamicOffsets);
}
else if (vertexDS)
{
sint32 numDynOffsets;
draw_prepareDynamicOffsetsForDescriptorSet(VulkanRendererConst::SHADER_STAGE_INDEX_VERTEX, dynamicOffsets, numDynOffsets,
pipeline_info);
vkCmdBindDescriptorSets(m_state.currentCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
vkObjPipeline->pipeline_layout, 0, 1, &vertexDS->m_vkObjDescriptorSet->descriptorSet, numDynOffsets,
dynamicOffsets);
}
else if (pixelDS)
{
sint32 numDynOffsets;
draw_prepareDynamicOffsetsForDescriptorSet(VulkanRendererConst::SHADER_STAGE_INDEX_FRAGMENT, dynamicOffsets, numDynOffsets,
pipeline_info);
vkCmdBindDescriptorSets(m_state.currentCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
vkObjPipeline->pipeline_layout, 1, 1, &pixelDS->m_vkObjDescriptorSet->descriptorSet, numDynOffsets,
dynamicOffsets);
}
if (geometryDS)
{
sint32 numDynOffsets;
draw_prepareDynamicOffsetsForDescriptorSet(VulkanRendererConst::SHADER_STAGE_INDEX_GEOMETRY, dynamicOffsets, numDynOffsets,
pipeline_info);
vkCmdBindDescriptorSets(m_state.currentCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
vkObjPipeline->pipeline_layout, 2, 1, &geometryDS->m_vkObjDescriptorSet->descriptorSet, numDynOffsets,
dynamicOffsets);
}
// draw
if (hostIndexType != INDEX_TYPE::NONE)
vkCmdDrawIndexed(m_state.currentCommandBuffer, hostIndexCount, instanceCount, 0, baseVertex, baseInstance);
else
vkCmdDraw(m_state.currentCommandBuffer, count, instanceCount, baseVertex, baseInstance);
LatteStreamout_FinishDrawcall(m_useHostMemoryForCache);
LatteGPUState.drawCallCounter++;
}
// used in place of vertex/uniform caching when direct memory access is possible
void VulkanRenderer::draw_updateVertexBuffersDirectAccess()
{
LatteFetchShader* parsedFetchShader = LatteSHRC_GetActiveFetchShader();
if (!parsedFetchShader)
return;
for (auto& bufferGroup : parsedFetchShader->bufferGroups)
{
uint32 bufferIndex = bufferGroup.attributeBufferIndex;
uint32 bufferBaseRegisterIndex = mmSQ_VTX_ATTRIBUTE_BLOCK_START + bufferIndex * 7;
MPTR bufferAddress = LatteGPUState.contextRegister[bufferBaseRegisterIndex + 0];
uint32 bufferSize = LatteGPUState.contextRegister[bufferBaseRegisterIndex + 1] + 1;
uint32 bufferStride = (LatteGPUState.contextRegister[bufferBaseRegisterIndex + 2] >> 11) & 0xFFFF;
if (bufferAddress == MPTR_NULL) [[unlikely]]
{
bufferAddress = 0x10000000;
}
if (m_state.currentVertexBinding[bufferIndex].offset == bufferAddress)
continue;
cemu_assert_debug(bufferAddress < 0x50000000);
VkBuffer attrBuffer = m_importedMem;
VkDeviceSize attrOffset = bufferAddress - m_importedMemBaseAddress;
vkCmdBindVertexBuffers(m_state.currentCommandBuffer, bufferIndex, 1, &attrBuffer, &attrOffset);
}
}
void VulkanRenderer::draw_updateUniformBuffersDirectAccess(LatteDecompilerShader* shader, const uint32 uniformBufferRegOffset, LatteConst::ShaderType shaderType)
{
if (shader->uniformMode == LATTE_DECOMPILER_UNIFORM_MODE_FULL_CBANK)
{
for(const auto& buf : shader->list_quickBufferList)
{
sint32 i = buf.index;
MPTR physicalAddr = LatteGPUState.contextRegister[uniformBufferRegOffset + i * 7 + 0];
uint32 uniformSize = LatteGPUState.contextRegister[uniformBufferRegOffset + i * 7 + 1] + 1;
if (physicalAddr == MPTR_NULL) [[unlikely]]
{
cemu_assert_unimplemented();
continue;
}
uniformSize = std::min<uint32>(uniformSize, buf.size);
cemu_assert_debug(physicalAddr < 0x50000000);
uint32 bufferIndex = i;
cemu_assert_debug(bufferIndex < 16);
switch (shaderType)
{
case LatteConst::ShaderType::Vertex:
dynamicOffsetInfo.shaderUB[VulkanRendererConst::SHADER_STAGE_INDEX_VERTEX].uniformBufferOffset[bufferIndex] = physicalAddr - m_importedMemBaseAddress;
break;
case LatteConst::ShaderType::Geometry:
dynamicOffsetInfo.shaderUB[VulkanRendererConst::SHADER_STAGE_INDEX_GEOMETRY].uniformBufferOffset[bufferIndex] = physicalAddr - m_importedMemBaseAddress;
break;
case LatteConst::ShaderType::Pixel:
dynamicOffsetInfo.shaderUB[VulkanRendererConst::SHADER_STAGE_INDEX_FRAGMENT].uniformBufferOffset[bufferIndex] = physicalAddr - m_importedMemBaseAddress;
break;
default:
UNREACHABLE;
}
}
}
}
void VulkanRenderer::draw_endSequence()
{
LatteDecompilerShader* pixelShader = LatteSHRC_GetActivePixelShader();
// post-drawcall logic
if (pixelShader)
LatteRenderTarget_trackUpdates();
bool hasReadback = LatteTextureReadback_Update();
m_recordedDrawcalls++;
if (m_recordedDrawcalls >= m_submitThreshold || hasReadback)
{
SubmitCommandBuffer();
}
}
void VulkanRenderer::debug_genericBarrier()
{
draw_endRenderPass();
VkMemoryBarrier memoryBarrier{};
memoryBarrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
memoryBarrier.srcAccessMask = VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
VK_ACCESS_INDEX_READ_BIT |
VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
VK_ACCESS_UNIFORM_READ_BIT |
VK_ACCESS_INPUT_ATTACHMENT_READ_BIT |
VK_ACCESS_SHADER_READ_BIT |
VK_ACCESS_SHADER_WRITE_BIT |
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
VK_ACCESS_TRANSFER_READ_BIT |
VK_ACCESS_TRANSFER_WRITE_BIT |
VK_ACCESS_HOST_READ_BIT |
VK_ACCESS_HOST_WRITE_BIT |
VK_ACCESS_MEMORY_READ_BIT |
VK_ACCESS_MEMORY_WRITE_BIT;
memoryBarrier.dstAccessMask = VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
VK_ACCESS_INDEX_READ_BIT |
VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
VK_ACCESS_UNIFORM_READ_BIT |
VK_ACCESS_INPUT_ATTACHMENT_READ_BIT |
VK_ACCESS_SHADER_READ_BIT |
VK_ACCESS_SHADER_WRITE_BIT |
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
VK_ACCESS_TRANSFER_READ_BIT |
VK_ACCESS_TRANSFER_WRITE_BIT |
VK_ACCESS_HOST_READ_BIT |
VK_ACCESS_HOST_WRITE_BIT |
VK_ACCESS_MEMORY_READ_BIT |
VK_ACCESS_MEMORY_WRITE_BIT;
vkCmdPipelineBarrier(m_state.currentCommandBuffer, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 1, &memoryBarrier, 0, nullptr, 0, nullptr);
}
| 63,429
|
C++
|
.cpp
| 1,486
| 39.642665
| 299
| 0.779543
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,238
|
VulkanRenderer.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.cpp
|
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanAPI.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/LatteTextureVk.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/RendererShaderVk.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanTextureReadback.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/CocoaSurface.h"
#include "Cafe/HW/Latte/Core/LatteBufferCache.h"
#include "Cafe/HW/Latte/Core/LattePerformanceMonitor.h"
#include "Cafe/HW/Latte/Core/LatteOverlay.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompiler.h"
#include "Cafe/CafeSystem.h"
#include "util/helpers/helpers.h"
#include "util/helpers/StringHelpers.h"
#include "config/ActiveSettings.h"
#include "config/CemuConfig.h"
#include "gui/guiWrapper.h"
#include "imgui/imgui_extension.h"
#include "imgui/imgui_impl_vulkan.h"
#include "Cafe/TitleList/GameInfo.h"
#include "Cafe/HW/Latte/Core/LatteTiming.h" // vsync control
#include <glslang/Public/ShaderLang.h>
#include <wx/msgdlg.h>
#include <wx/intl.h> // for localization
#ifndef VK_API_VERSION_MAJOR
#define VK_API_VERSION_MAJOR(version) (((uint32_t)(version) >> 22) & 0x7FU)
#define VK_API_VERSION_MINOR(version) (((uint32_t)(version) >> 12) & 0x3FFU)
#endif
extern std::atomic_int g_compiling_pipelines;
const std::vector<const char*> kOptionalDeviceExtensions =
{
VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME,
VK_NV_FILL_RECTANGLE_EXTENSION_NAME,
VK_EXT_PIPELINE_CREATION_FEEDBACK_EXTENSION_NAME,
VK_EXT_FILTER_CUBIC_EXTENSION_NAME, // not supported by any device yet
VK_EXT_EXTERNAL_MEMORY_HOST_EXTENSION_NAME,
VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME,
VK_KHR_SHADER_FLOAT_CONTROLS_EXTENSION_NAME,
VK_KHR_PRESENT_WAIT_EXTENSION_NAME,
VK_KHR_PRESENT_ID_EXTENSION_NAME
};
const std::vector<const char*> kRequiredDeviceExtensions =
{
VK_KHR_SWAPCHAIN_EXTENSION_NAME,
VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME
}; // Intel doesnt support VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME
VKAPI_ATTR VkBool32 VKAPI_CALL DebugUtilsCallback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageTypes, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, void* pUserData)
{
#ifdef CEMU_DEBUG_ASSERT
if (strstr(pCallbackData->pMessage, "consumes input location"))
return VK_FALSE; // false means we dont care
if (strstr(pCallbackData->pMessage, "blend"))
return VK_FALSE; //
// note: Check if previously used location in VK_EXT_debug_report callback is the same as messageIdNumber under the new extension
// validation errors which are difficult to fix
if (pCallbackData->messageIdNumber == 0x6c3b517c || pCallbackData->messageIdNumber == 0xffffffffa6b17cdf || pCallbackData->messageIdNumber == 0xffffffffc406fcb7)
return VK_FALSE; // its illegal to render to and sample from same texture
if (pCallbackData->messageIdNumber == 0x6e633069)
return VK_FALSE; // framebuffer attachments should have identity swizzle
if (pCallbackData->messageIdNumber == 0xffffffffb408bc0b)
return VK_FALSE; // too many samplers
if (pCallbackData->messageIdNumber == 0x6bbb14)
return VK_FALSE; // SPIR-V inconsistency
if (strstr(pCallbackData->pMessage, "Number of currently valid sampler objects is not less than the maximum allowed"))
return VK_FALSE;
assert_dbg();
#endif
cemuLog_log(LogType::Force, (char*)pCallbackData->pMessage);
return VK_FALSE;
}
std::vector<VulkanRenderer::DeviceInfo> VulkanRenderer::GetDevices()
{
if(!vkEnumerateInstanceVersion)
{
cemuLog_log(LogType::Force, "Vulkan cant list devices because Vulkan loader failed");
return {};
}
uint32 apiVersion = VK_API_VERSION_1_1;
if (vkEnumerateInstanceVersion(&apiVersion) != VK_SUCCESS)
{
if (VK_API_VERSION_MAJOR(apiVersion) < 1 || VK_API_VERSION_MINOR(apiVersion) < 2)
apiVersion = VK_API_VERSION_1_1;
}
std::vector<DeviceInfo> result;
std::vector<const char*> requiredExtensions;
requiredExtensions.clear();
requiredExtensions.emplace_back(VK_KHR_SURFACE_EXTENSION_NAME);
#if BOOST_OS_WINDOWS
requiredExtensions.emplace_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME);
#elif BOOST_OS_LINUX
auto backend = gui_getWindowInfo().window_main.backend;
if(backend == WindowHandleInfo::Backend::X11)
requiredExtensions.emplace_back(VK_KHR_XLIB_SURFACE_EXTENSION_NAME);
#ifdef HAS_WAYLAND
else if (backend == WindowHandleInfo::Backend::WAYLAND)
requiredExtensions.emplace_back(VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME);
#endif
#elif BOOST_OS_MACOS
requiredExtensions.emplace_back(VK_EXT_METAL_SURFACE_EXTENSION_NAME);
#endif
VkApplicationInfo app_info{};
app_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
app_info.pApplicationName = EMULATOR_NAME;
app_info.applicationVersion = VK_MAKE_VERSION(EMULATOR_VERSION_MAJOR, EMULATOR_VERSION_MINOR, EMULATOR_VERSION_PATCH);
app_info.pEngineName = EMULATOR_NAME;
app_info.engineVersion = app_info.applicationVersion;
app_info.apiVersion = apiVersion;
VkInstanceCreateInfo create_info{};
create_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
create_info.pApplicationInfo = &app_info;
create_info.ppEnabledExtensionNames = requiredExtensions.data();
create_info.enabledExtensionCount = requiredExtensions.size();
create_info.ppEnabledLayerNames = nullptr;
create_info.enabledLayerCount = 0;
VkInstance instance = nullptr;
try
{
VkResult err;
if ((err = vkCreateInstance(&create_info, nullptr, &instance)) != VK_SUCCESS)
throw std::runtime_error(fmt::format("Unable to create a Vulkan instance: {}", err));
if (!InitializeInstanceVulkan(instance))
throw std::runtime_error("can't initialize instanced vulkan functions");
uint32_t device_count = 0;
vkEnumeratePhysicalDevices(instance, &device_count, nullptr);
if (device_count == 0)
throw std::runtime_error("Failed to find a GPU with Vulkan support.");
// create tmp surface to create a logical device
auto surface = CreateFramebufferSurface(instance, gui_getWindowInfo().window_main);
std::vector<VkPhysicalDevice> devices(device_count);
vkEnumeratePhysicalDevices(instance, &device_count, devices.data());
for (const auto& device : devices)
{
if (IsDeviceSuitable(surface, device))
{
VkPhysicalDeviceIDProperties physDeviceIDProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES };
VkPhysicalDeviceProperties2 physDeviceProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2 };
physDeviceProps.pNext = &physDeviceIDProps;
vkGetPhysicalDeviceProperties2(device, &physDeviceProps);
result.emplace_back(physDeviceProps.properties.deviceName, physDeviceIDProps.deviceUUID);
}
}
vkDestroySurfaceKHR(instance, surface, nullptr);
}
catch (...)
{
}
if (instance)
vkDestroyInstance(instance, nullptr);
return result;
}
void VulkanRenderer::DetermineVendor()
{
VkPhysicalDeviceProperties2 properties{};
VkPhysicalDeviceDriverProperties driverProperties{ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES };
properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
if (m_featureControl.deviceExtensions.driver_properties)
properties.pNext = &driverProperties;
vkGetPhysicalDeviceProperties2(m_physicalDevice, &properties);
switch (properties.properties.vendorID)
{
case 0x10DE:
m_vendor = GfxVendor::Nvidia;
break;
case 0x8086: // iGPU
m_vendor = GfxVendor::Intel;
break;
case 0x1002:
m_vendor = GfxVendor::AMD;
break;
case 0x106B:
m_vendor = GfxVendor::Apple;
break;
}
VkDriverId driverId = driverProperties.driverID;
if(driverId == VK_DRIVER_ID_MESA_RADV || driverId == VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA)
m_vendor = GfxVendor::Mesa;
cemuLog_log(LogType::Force, "Using GPU: {}", properties.properties.deviceName);
if (m_featureControl.deviceExtensions.driver_properties)
{
cemuLog_log(LogType::Force, "Driver version: {}", driverProperties.driverInfo);
if(m_vendor == GfxVendor::Nvidia)
{
// multithreaded pipelines on nvidia (requires 515 or higher)
m_featureControl.disableMultithreadedCompilation = (StringHelpers::ToInt(std::string(driverProperties.driverInfo)) < 515);
}
}
else
{
cemuLog_log(LogType::Force, "Driver version (as stored in device info): {:08}", properties.properties.driverVersion);
if(m_vendor == GfxVendor::Nvidia)
{
// if the driver does not support the extension,
// it is assumed the driver is under version 515
m_featureControl.disableMultithreadedCompilation = true;
}
}
}
void VulkanRenderer::GetDeviceFeatures()
{
/* Get Vulkan features via GetPhysicalDeviceFeatures2 */
void* prevStruct = nullptr;
VkPhysicalDeviceCustomBorderColorFeaturesEXT bcf{};
bcf.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT;
bcf.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT;
prevStruct = &bcf;
VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT pcc{};
pcc.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT;
pcc.pNext = prevStruct;
prevStruct = &pcc;
VkPhysicalDevicePresentIdFeaturesKHR pidf{};
pidf.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_ID_FEATURES_KHR;
pidf.pNext = prevStruct;
prevStruct = &pidf;
VkPhysicalDevicePresentWaitFeaturesKHR pwf{};
pwf.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_WAIT_FEATURES_KHR;
pwf.pNext = prevStruct;
prevStruct = &pwf;
VkPhysicalDeviceFeatures2 physicalDeviceFeatures2{};
physicalDeviceFeatures2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
physicalDeviceFeatures2.pNext = prevStruct;
vkGetPhysicalDeviceFeatures2(m_physicalDevice, &physicalDeviceFeatures2);
cemuLog_log(LogType::Force, "Vulkan: present_wait extension: {}", (pwf.presentWait && pidf.presentId) ? "supported" : "unsupported");
/* Get Vulkan device properties and limits */
VkPhysicalDeviceFloatControlsPropertiesKHR pfcp{};
prevStruct = nullptr;
if (m_featureControl.deviceExtensions.shader_float_controls)
{
pfcp.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR;
pfcp.pNext = prevStruct;
prevStruct = &pfcp;
}
VkPhysicalDeviceProperties2 prop2{};
prop2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
prop2.pNext = prevStruct;
vkGetPhysicalDeviceProperties2(m_physicalDevice, &prop2);
/* Determine which subfeatures we can use */
m_featureControl.deviceExtensions.pipeline_creation_cache_control = pcc.pipelineCreationCacheControl;
m_featureControl.deviceExtensions.custom_border_color_without_format = m_featureControl.deviceExtensions.custom_border_color && bcf.customBorderColorWithoutFormat;
m_featureControl.shaderFloatControls.shaderRoundingModeRTEFloat32 = m_featureControl.deviceExtensions.shader_float_controls && pfcp.shaderRoundingModeRTEFloat32;
if(!m_featureControl.shaderFloatControls.shaderRoundingModeRTEFloat32)
cemuLog_log(LogType::Force, "Shader round mode control not available on this device or driver. Some rendering issues might occur.");
if (!m_featureControl.deviceExtensions.pipeline_creation_cache_control)
{
cemuLog_log(LogType::Force, "VK_EXT_pipeline_creation_cache_control not supported. Cannot use asynchronous shader and pipeline compilation");
// if async shader compilation is enabled show warning message
if (GetConfig().async_compile)
LatteOverlay_pushNotification(_("Async shader compile is enabled but not supported by the graphics driver\nCemu will use synchronous compilation which can cause additional stutter").utf8_string(), 10000);
}
if (!m_featureControl.deviceExtensions.custom_border_color_without_format)
{
if (m_featureControl.deviceExtensions.custom_border_color)
{
cemuLog_log(LogType::Force, "VK_EXT_custom_border_color is present but only with limited support. Cannot emulate arbitrary border color");
}
else
{
cemuLog_log(LogType::Force, "VK_EXT_custom_border_color not supported. Cannot emulate arbitrary border color");
}
}
// get limits
m_featureControl.limits.minUniformBufferOffsetAlignment = std::max(prop2.properties.limits.minUniformBufferOffsetAlignment, (VkDeviceSize)4);
m_featureControl.limits.nonCoherentAtomSize = std::max(prop2.properties.limits.nonCoherentAtomSize, (VkDeviceSize)4);
cemuLog_log(LogType::Force, fmt::format("VulkanLimits: UBAlignment {0} nonCoherentAtomSize {1}", prop2.properties.limits.minUniformBufferOffsetAlignment, prop2.properties.limits.nonCoherentAtomSize));
}
VulkanRenderer::VulkanRenderer()
{
glslang::InitializeProcess();
cemuLog_log(LogType::Force, "------- Init Vulkan graphics backend -------");
const bool useValidationLayer = cemuLog_isLoggingEnabled(LogType::VulkanValidation);
if (useValidationLayer)
cemuLog_log(LogType::Force, "Validation layer is enabled");
VkResult err;
// build list of layers
m_layerNames.clear();
if (useValidationLayer)
m_layerNames.emplace_back("VK_LAYER_KHRONOS_validation");
// check available instance extensions
std::vector<const char*> enabledInstanceExtensions = CheckInstanceExtensionSupport(m_featureControl);
uint32 apiVersion = VK_API_VERSION_1_1;
if (vkEnumerateInstanceVersion(&apiVersion) != VK_SUCCESS)
{
if (VK_API_VERSION_MAJOR(apiVersion) < 1 || VK_API_VERSION_MINOR(apiVersion) < 2)
apiVersion = VK_API_VERSION_1_1;
}
cemuLog_log(LogType::Force, fmt::format("Vulkan instance version: {}.{}", VK_API_VERSION_MAJOR(apiVersion), VK_API_VERSION_MINOR(apiVersion)));
VkApplicationInfo app_info{};
app_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
app_info.pApplicationName = EMULATOR_NAME;
app_info.applicationVersion = VK_MAKE_VERSION(EMULATOR_VERSION_MAJOR, EMULATOR_VERSION_MINOR, EMULATOR_VERSION_PATCH);
app_info.pEngineName = EMULATOR_NAME;
app_info.engineVersion = app_info.applicationVersion;
app_info.apiVersion = apiVersion;
VkInstanceCreateInfo create_info{};
create_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
create_info.pApplicationInfo = &app_info;
create_info.ppEnabledExtensionNames = enabledInstanceExtensions.data();
create_info.enabledExtensionCount = enabledInstanceExtensions.size();
create_info.ppEnabledLayerNames = m_layerNames.data();
create_info.enabledLayerCount = m_layerNames.size();
err = vkCreateInstance(&create_info, nullptr, &m_instance);
if (err == VK_ERROR_LAYER_NOT_PRESENT) {
cemuLog_log(LogType::Force, "Failed to enable vulkan validation (VK_LAYER_KHRONOS_validation)");
create_info.enabledLayerCount = 0;
err = vkCreateInstance(&create_info, nullptr, &m_instance);
}
if (err != VK_SUCCESS)
throw std::runtime_error(fmt::format("Unable to create a Vulkan instance: {}", err));
if (!InitializeInstanceVulkan(m_instance))
throw std::runtime_error("Unable to load instanced Vulkan functions");
uint32_t device_count = 0;
vkEnumeratePhysicalDevices(m_instance, &device_count, nullptr);
if (device_count == 0)
throw std::runtime_error("Failed to find a GPU with Vulkan support.");
// create tmp surface to create a logical device
auto surface = CreateFramebufferSurface(m_instance, gui_getWindowInfo().window_main);
auto& config = GetConfig();
decltype(config.graphic_device_uuid) zero{};
const bool has_device_set = config.graphic_device_uuid != zero;
VkPhysicalDevice fallbackDevice = VK_NULL_HANDLE;
std::vector<VkPhysicalDevice> devices(device_count);
vkEnumeratePhysicalDevices(m_instance, &device_count, devices.data());
for (const auto& device : devices)
{
if (IsDeviceSuitable(surface, device))
{
if (fallbackDevice == VK_NULL_HANDLE)
fallbackDevice = device;
if (has_device_set)
{
VkPhysicalDeviceIDProperties physDeviceIDProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES };
VkPhysicalDeviceProperties2 physDeviceProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2 };
physDeviceProps.pNext = &physDeviceIDProps;
vkGetPhysicalDeviceProperties2(device, &physDeviceProps);
if (memcmp(config.graphic_device_uuid.data(), physDeviceIDProps.deviceUUID, VK_UUID_SIZE) != 0)
continue;
}
m_physicalDevice = device;
break;
}
}
if (m_physicalDevice == VK_NULL_HANDLE && fallbackDevice != VK_NULL_HANDLE)
{
cemuLog_log(LogType::Force, "The selected GPU could not be found or is not suitable. Falling back to first available device instead");
m_physicalDevice = fallbackDevice;
config.graphic_device_uuid = {}; // resetting device selection
}
else if (m_physicalDevice == VK_NULL_HANDLE)
{
cemuLog_log(LogType::Force, "No physical GPU could be found with the required extensions and swap chain support.");
throw std::runtime_error("No physical GPU could be found with the required extensions and swap chain support.");
}
CheckDeviceExtensionSupport(m_physicalDevice, m_featureControl); // todo - merge this with GetDeviceFeatures and separate from IsDeviceSuitable?
if (m_featureControl.debugMarkersSupported)
cemuLog_log(LogType::Force, "Debug: Frame debugger attached, will use vkDebugMarkerSetObjectNameEXT");
DetermineVendor();
GetDeviceFeatures();
// init memory manager
memoryManager = new VKRMemoryManager(this);
try
{
VkPhysicalDeviceIDProperties physDeviceIDProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES };
VkPhysicalDeviceProperties2 physDeviceProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2 };
physDeviceProps.pNext = &physDeviceIDProps;
vkGetPhysicalDeviceProperties2(m_physicalDevice, &physDeviceProps);
#if BOOST_OS_WINDOWS
m_dxgi_wrapper = std::make_unique<DXGIWrapper>(physDeviceIDProps.deviceLUID);
#endif
}
catch (const std::exception& ex)
{
cemuLog_log(LogType::Force, "can't create dxgi wrapper: {}", ex.what());
}
// create logical device
m_indices = FindQueueFamilies(surface, m_physicalDevice);
std::set<int> uniqueQueueFamilies = { m_indices.graphicsFamily, m_indices.presentFamily };
std::vector<VkDeviceQueueCreateInfo> queueCreateInfos = CreateQueueCreateInfos(uniqueQueueFamilies);
VkPhysicalDeviceFeatures deviceFeatures = {};
deviceFeatures.independentBlend = VK_TRUE;
deviceFeatures.samplerAnisotropy = VK_TRUE;
deviceFeatures.imageCubeArray = VK_TRUE;
#if !BOOST_OS_MACOS
deviceFeatures.geometryShader = VK_TRUE;
deviceFeatures.logicOp = VK_TRUE;
#endif
deviceFeatures.occlusionQueryPrecise = VK_TRUE;
deviceFeatures.depthClamp = VK_TRUE;
deviceFeatures.depthBiasClamp = VK_TRUE;
if (m_vendor == GfxVendor::AMD)
{
deviceFeatures.robustBufferAccess = VK_TRUE;
cemuLog_log(LogType::Force, "Enable robust buffer access");
}
if (m_featureControl.mode.useTFEmulationViaSSBO)
{
deviceFeatures.vertexPipelineStoresAndAtomics = true;
}
void* deviceExtensionFeatures = nullptr;
// enable VK_EXT_pipeline_creation_cache_control
VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT cacheControlFeature{};
if (m_featureControl.deviceExtensions.pipeline_creation_cache_control)
{
cacheControlFeature.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT;
cacheControlFeature.pNext = deviceExtensionFeatures;
deviceExtensionFeatures = &cacheControlFeature;
cacheControlFeature.pipelineCreationCacheControl = VK_TRUE;
}
// enable VK_EXT_custom_border_color
VkPhysicalDeviceCustomBorderColorFeaturesEXT customBorderColorFeature{};
if (m_featureControl.deviceExtensions.custom_border_color_without_format)
{
customBorderColorFeature.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT;
customBorderColorFeature.pNext = deviceExtensionFeatures;
deviceExtensionFeatures = &customBorderColorFeature;
customBorderColorFeature.customBorderColors = VK_TRUE;
customBorderColorFeature.customBorderColorWithoutFormat = VK_TRUE;
}
// enable VK_KHR_present_id
VkPhysicalDevicePresentIdFeaturesKHR presentIdFeature{};
if(m_featureControl.deviceExtensions.present_wait)
{
presentIdFeature.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_ID_FEATURES_KHR;
presentIdFeature.pNext = deviceExtensionFeatures;
deviceExtensionFeatures = &presentIdFeature;
presentIdFeature.presentId = VK_TRUE;
}
// enable VK_KHR_present_wait
VkPhysicalDevicePresentWaitFeaturesKHR presentWaitFeature{};
if(m_featureControl.deviceExtensions.present_wait)
{
presentWaitFeature.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_WAIT_FEATURES_KHR;
presentWaitFeature.pNext = deviceExtensionFeatures;
deviceExtensionFeatures = &presentWaitFeature;
presentWaitFeature.presentWait = VK_TRUE;
}
std::vector<const char*> used_extensions;
VkDeviceCreateInfo createInfo = CreateDeviceCreateInfo(queueCreateInfos, deviceFeatures, deviceExtensionFeatures, used_extensions);
VkResult result = vkCreateDevice(m_physicalDevice, &createInfo, nullptr, &m_logicalDevice);
if (result != VK_SUCCESS)
{
cemuLog_log(LogType::Force, "Vulkan: Unable to create a logical device. Error {}", (sint32)result);
throw std::runtime_error(fmt::format("Unable to create a logical device: {}", result));
}
InitializeDeviceVulkan(m_logicalDevice);
vkGetDeviceQueue(m_logicalDevice, m_indices.graphicsFamily, 0, &m_graphicsQueue);
vkGetDeviceQueue(m_logicalDevice, m_indices.graphicsFamily, 0, &m_presentQueue);
vkDestroySurfaceKHR(m_instance, surface, nullptr);
if (useValidationLayer && m_featureControl.instanceExtensions.debug_utils)
{
PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT = reinterpret_cast<PFN_vkCreateDebugUtilsMessengerEXT>(vkGetInstanceProcAddr(m_instance, "vkCreateDebugUtilsMessengerEXT"));
VkDebugUtilsMessengerCreateInfoEXT debugCallback{};
debugCallback.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT;
debugCallback.pNext = nullptr;
debugCallback.flags = 0;
debugCallback.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT;
debugCallback.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT;
debugCallback.pfnUserCallback = &DebugUtilsCallback;
vkCreateDebugUtilsMessengerEXT(m_instance, &debugCallback, nullptr, &m_debugCallback);
}
if (m_featureControl.instanceExtensions.debug_utils)
cemuLog_log(LogType::Force, "Using available debug function: vkCreateDebugUtilsMessengerEXT()");
// set initial viewport and scissor box size
m_state.currentViewport.width = 4;
m_state.currentViewport.height = 4;
m_state.currentScissorRect.extent.width = 4;
m_state.currentScissorRect.extent.height = 4;
QueryMemoryInfo();
QueryAvailableFormats();
CreateCommandPool();
CreateCommandBuffers();
CreateDescriptorPool();
swapchain_createDescriptorSetLayout();
// extension info
// cemuLog_log(LogType::Force, "VK_KHR_dynamic_rendering: {}", m_featureControl.deviceExtensions.dynamic_rendering?"supported":"not supported");
void* bufferPtr;
// init ringbuffer for uniform vars
m_uniformVarBufferMemoryIsCoherent = false;
if (memoryManager->CreateBuffer2(UNIFORMVAR_RINGBUFFER_SIZE, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT, m_uniformVarBuffer, m_uniformVarBufferMemory))
m_uniformVarBufferMemoryIsCoherent = true;
else if (memoryManager->CreateBuffer2(UNIFORMVAR_RINGBUFFER_SIZE, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, m_uniformVarBuffer, m_uniformVarBufferMemory))
m_uniformVarBufferMemoryIsCoherent = true; // unified memory
else if (memoryManager->CreateBuffer2(UNIFORMVAR_RINGBUFFER_SIZE, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, m_uniformVarBuffer, m_uniformVarBufferMemory))
m_uniformVarBufferMemoryIsCoherent = true;
else
{
memoryManager->CreateBuffer2(UNIFORMVAR_RINGBUFFER_SIZE, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, m_uniformVarBuffer, m_uniformVarBufferMemory);
}
if (!m_uniformVarBufferMemoryIsCoherent)
cemuLog_log(LogType::Force, "[Vulkan-Info] Using non-coherent memory for uniform data");
bufferPtr = nullptr;
vkMapMemory(m_logicalDevice, m_uniformVarBufferMemory, 0, VK_WHOLE_SIZE, 0, &bufferPtr);
m_uniformVarBufferPtr = (uint8*)bufferPtr;
// texture readback buffer
memoryManager->CreateBuffer(TEXTURE_READBACK_SIZE, VK_BUFFER_USAGE_TRANSFER_DST_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT, m_textureReadbackBuffer, m_textureReadbackBufferMemory);
bufferPtr = nullptr;
vkMapMemory(m_logicalDevice, m_textureReadbackBufferMemory, 0, VK_WHOLE_SIZE, 0, &bufferPtr);
m_textureReadbackBufferPtr = (uint8*)bufferPtr;
// transform feedback ringbuffer
memoryManager->CreateBuffer(LatteStreamout_GetRingBufferSize(), VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | (m_featureControl.mode.useTFEmulationViaSSBO ? VK_BUFFER_USAGE_STORAGE_BUFFER_BIT : 0), 0, m_xfbRingBuffer, m_xfbRingBufferMemory);
// occlusion query result buffer
memoryManager->CreateBuffer(OCCLUSION_QUERY_POOL_SIZE * sizeof(uint64), VK_BUFFER_USAGE_TRANSFER_DST_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT, m_occlusionQueries.bufferQueryResults, m_occlusionQueries.memoryQueryResults);
bufferPtr = nullptr;
vkMapMemory(m_logicalDevice, m_occlusionQueries.memoryQueryResults, 0, VK_WHOLE_SIZE, 0, &bufferPtr);
m_occlusionQueries.ptrQueryResults = (uint64*)bufferPtr;
for (sint32 i = 0; i < OCCLUSION_QUERY_POOL_SIZE; i++)
m_occlusionQueries.list_availableQueryIndices.emplace_back(i);
// start compilation threads
RendererShaderVk::Init();
}
VulkanRenderer::~VulkanRenderer()
{
SubmitCommandBuffer();
WaitDeviceIdle();
WaitCommandBufferFinished(GetCurrentCommandBufferId());
// make sure compilation threads have been shut down
RendererShaderVk::Shutdown();
// shut down pipeline save thread
m_destructionRequested = true;
m_pipeline_cache_semaphore.notify();
m_pipeline_cache_save_thread.join();
// shut down imgui
ImGui_ImplVulkan_Shutdown();
// delete null objects
DeleteNullObjects();
// delete buffers
memoryManager->DeleteBuffer(m_uniformVarBuffer, m_uniformVarBufferMemory);
memoryManager->DeleteBuffer(m_textureReadbackBuffer, m_textureReadbackBufferMemory);
memoryManager->DeleteBuffer(m_xfbRingBuffer, m_xfbRingBufferMemory);
memoryManager->DeleteBuffer(m_occlusionQueries.bufferQueryResults, m_occlusionQueries.memoryQueryResults);
memoryManager->DeleteBuffer(m_bufferCache, m_bufferCacheMemory);
// texture memory
// todo
// upload buffers
// todo
m_padSwapchainInfo = nullptr;
m_mainSwapchainInfo = nullptr;
// clean up resources used for surface copy
surfaceCopy_cleanup();
// clean up default shaders
delete defaultShaders.copySurface_vs;
defaultShaders.copySurface_vs = nullptr;
delete defaultShaders.copySurface_psColor2Depth;
defaultShaders.copySurface_psColor2Depth = nullptr;
delete defaultShaders.copySurface_psDepth2Color;
defaultShaders.copySurface_psDepth2Color = nullptr;
// destroy misc
for (auto& it : m_cmd_buffer_fences)
{
vkDestroyFence(m_logicalDevice, it, nullptr);
it = VK_NULL_HANDLE;
}
if (m_pipelineLayout != VK_NULL_HANDLE)
vkDestroyPipelineLayout(m_logicalDevice, m_pipelineLayout, nullptr);
if (m_commandPool != VK_NULL_HANDLE)
vkDestroyCommandPool(m_logicalDevice, m_commandPool, nullptr);
// destroy debug callback
if (m_debugCallback)
{
PFN_vkDestroyDebugUtilsMessengerEXT vkDestroyDebugUtilsMessengerEXT = reinterpret_cast<PFN_vkDestroyDebugUtilsMessengerEXT>(vkGetInstanceProcAddr(m_instance, "vkDestroyDebugUtilsMessengerEXT"));
vkDestroyDebugUtilsMessengerEXT(m_instance, m_debugCallback, nullptr);
}
// destroy instance, devices
if (m_instance != VK_NULL_HANDLE)
{
if (m_logicalDevice != VK_NULL_HANDLE)
{
vkDestroyDevice(m_logicalDevice, nullptr);
}
vkDestroyInstance(m_instance, nullptr);
}
// destroy memory manager
delete memoryManager;
// crashes?
//glslang::FinalizeProcess();
}
VulkanRenderer* VulkanRenderer::GetInstance()
{
#ifdef CEMU_DEBUG_ASSERT
cemu_assert_debug(g_renderer && dynamic_cast<VulkanRenderer*>(g_renderer.get()));
// Use #if here because dynamic_casts dont get optimized away even if the result is not stored as with cemu_assert_debug
#endif
return (VulkanRenderer*)g_renderer.get();
}
void VulkanRenderer::InitializeSurface(const Vector2i& size, bool mainWindow)
{
if (mainWindow)
{
m_mainSwapchainInfo = std::make_unique<SwapchainInfoVk>(mainWindow, size);
m_mainSwapchainInfo->Create();
// aquire first command buffer
InitFirstCommandBuffer();
}
else
{
m_padSwapchainInfo = std::make_unique<SwapchainInfoVk>(mainWindow, size);
// todo: figure out a way to exclusively create swapchain on main LatteThread
m_padSwapchainInfo->Create();
}
}
const std::unique_ptr<SwapchainInfoVk>& VulkanRenderer::GetChainInfoPtr(bool mainWindow) const
{
return mainWindow ? m_mainSwapchainInfo : m_padSwapchainInfo;
}
SwapchainInfoVk& VulkanRenderer::GetChainInfo(bool mainWindow) const
{
return *GetChainInfoPtr(mainWindow);
}
void VulkanRenderer::StopUsingPadAndWait()
{
m_destroyPadSwapchainNextAcquire.test_and_set();
m_destroyPadSwapchainNextAcquire.wait(true);
}
bool VulkanRenderer::IsPadWindowActive()
{
return IsSwapchainInfoValid(false);
}
void VulkanRenderer::HandleScreenshotRequest(LatteTextureView* texView, bool padView)
{
const bool hasScreenshotRequest = gui_hasScreenshotRequest();
if (!hasScreenshotRequest && m_screenshot_state == ScreenshotState::None)
return;
if (IsSwapchainInfoValid(false))
{
// we already took a pad view screenshow and want a main window screenshot
if (m_screenshot_state == ScreenshotState::Main && padView)
return;
if (m_screenshot_state == ScreenshotState::Pad && !padView)
return;
// remember which screenshot is left to take
if (m_screenshot_state == ScreenshotState::None)
m_screenshot_state = padView ? ScreenshotState::Main : ScreenshotState::Pad;
else
m_screenshot_state = ScreenshotState::None;
}
else
m_screenshot_state = ScreenshotState::None;
auto texViewVk = (LatteTextureViewVk*)texView;
auto baseImageTex = texViewVk->GetBaseImage();
baseImageTex->GetImageObj()->flagForCurrentCommandBuffer();
auto baseImageTexVkImage = baseImageTex->GetImageObj()->m_image;
//auto baseImageObj = baseImage->GetTextureImageView();
auto dumpImage = baseImageTex->GetImageObj()->m_image;
//dumpImage->flagForCurrentCommandBuffer();
int width, height;
baseImageTex->GetEffectiveSize(width, height, 0);
VkImage image = nullptr;
VkDeviceMemory imageMemory = nullptr;;
auto format = baseImageTex->GetFormat();
if (format != VK_FORMAT_R8G8B8A8_UNORM && format != VK_FORMAT_R8G8B8A8_SRGB && format != VK_FORMAT_R8G8B8_UNORM && format != VK_FORMAT_R8G8B8_SNORM)
{
VkFormatProperties formatProps;
vkGetPhysicalDeviceFormatProperties(m_physicalDevice, format, &formatProps);
bool supportsBlit = (formatProps.optimalTilingFeatures & VK_FORMAT_FEATURE_BLIT_SRC_BIT) != 0;
const bool dstUsesSRGB = (!padView && LatteGPUState.tvBufferUsesSRGB) || (padView && LatteGPUState.drcBufferUsesSRGB);
const auto blitFormat = dstUsesSRGB ? VK_FORMAT_R8G8B8A8_SRGB : VK_FORMAT_R8G8B8A8_UNORM;
vkGetPhysicalDeviceFormatProperties(m_physicalDevice, blitFormat, &formatProps);
supportsBlit &= (formatProps.optimalTilingFeatures & VK_FORMAT_FEATURE_BLIT_DST_BIT) != 0;
// convert texture using blitting
if (supportsBlit)
{
VkImageCreateInfo imageInfo{};
imageInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
imageInfo.format = blitFormat;
imageInfo.extent = { (uint32)width, (uint32)height, 1 };
imageInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
imageInfo.arrayLayers = 1;
imageInfo.mipLevels = 1;
imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
imageInfo.imageType = VK_IMAGE_TYPE_2D;
imageInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
imageInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
if (vkCreateImage(m_logicalDevice, &imageInfo, nullptr, &image) != VK_SUCCESS)
return;
VkMemoryRequirements memRequirements;
vkGetImageMemoryRequirements(m_logicalDevice, image, &memRequirements);
VkMemoryAllocateInfo allocInfo{};
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocInfo.allocationSize = memRequirements.size;
allocInfo.memoryTypeIndex = memoryManager->FindMemoryType(memRequirements.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
if (vkAllocateMemory(m_logicalDevice, &allocInfo, nullptr, &imageMemory) != VK_SUCCESS)
{
vkDestroyImage(m_logicalDevice, image, nullptr);
return;
}
vkBindImageMemory(m_logicalDevice, image, imageMemory, 0);
// prepare dest image
{
VkImageMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.image = image;
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
barrier.subresourceRange.baseMipLevel = 0;
barrier.subresourceRange.levelCount = 1;
barrier.subresourceRange.baseArrayLayer = 0;
barrier.subresourceRange.layerCount = 1;
barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
barrier.srcAccessMask = 0;
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
vkCmdPipelineBarrier(getCurrentCommandBuffer(), VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier);
}
// prepare src image for blitting
{
VkImageMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.image = baseImageTexVkImage;
barrier.subresourceRange.aspectMask = baseImageTex->GetImageAspect();
barrier.subresourceRange.baseMipLevel = texViewVk->firstMip;
barrier.subresourceRange.levelCount = 1;
barrier.subresourceRange.baseArrayLayer = texViewVk->firstSlice;
barrier.subresourceRange.layerCount = 1;
barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL;
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
barrier.srcAccessMask = VK_ACCESS_MEMORY_READ_BIT;
barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
vkCmdPipelineBarrier(getCurrentCommandBuffer(), VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier);
}
VkOffset3D blitSize{ width, height, 1 };
VkImageBlit imageBlitRegion{};
imageBlitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imageBlitRegion.srcSubresource.layerCount = 1;
imageBlitRegion.srcOffsets[1] = blitSize;
imageBlitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imageBlitRegion.dstSubresource.layerCount = 1;
imageBlitRegion.dstOffsets[1] = blitSize;
// Issue the blit command
vkCmdBlitImage(getCurrentCommandBuffer(), baseImageTexVkImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &imageBlitRegion, VK_FILTER_NEAREST);
// dest image to general layout
{
VkImageMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.image = image;
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
barrier.subresourceRange.baseMipLevel = 0;
barrier.subresourceRange.levelCount = 1;
barrier.subresourceRange.baseArrayLayer = 0;
barrier.subresourceRange.layerCount = 1;
barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
vkCmdPipelineBarrier(getCurrentCommandBuffer(), VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier);
}
// transition image back
{
VkImageMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.image = baseImageTexVkImage;
barrier.subresourceRange.aspectMask = baseImageTex->GetImageAspect();
barrier.subresourceRange.baseMipLevel = texViewVk->firstMip;
barrier.subresourceRange.levelCount = 1;
barrier.subresourceRange.baseArrayLayer = texViewVk->firstSlice;
barrier.subresourceRange.layerCount = 1;
barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
barrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
vkCmdPipelineBarrier(getCurrentCommandBuffer(), VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier);
}
format = VK_FORMAT_R8G8B8A8_UNORM;
dumpImage = image;
}
}
uint32 size;
switch (format)
{
case VK_FORMAT_R8G8B8A8_UNORM:
case VK_FORMAT_R8G8B8A8_SRGB:
size = 4 * width * height;
break;
case VK_FORMAT_R8G8B8_UNORM:
case VK_FORMAT_R8G8B8_SRGB:
size = 3 * width * height;
break;
default:
size = 0;
}
if (size == 0)
{
cemu_assert_debug(false);
return;
}
VkBufferImageCopy region{};
region.bufferOffset = 0;
region.bufferRowLength = width;
region.bufferImageHeight = height;
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.imageSubresource.baseArrayLayer = 0;
region.imageSubresource.layerCount = 1;
region.imageSubresource.mipLevel = 0;
region.imageOffset = { 0,0,0 };
region.imageExtent = { (uint32)width,(uint32)height,1 };
void* bufferPtr = nullptr;
VkBuffer buffer = nullptr;
VkDeviceMemory bufferMemory = nullptr;
memoryManager->CreateBuffer(size, VK_BUFFER_USAGE_TRANSFER_DST_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT, buffer, bufferMemory);
vkMapMemory(m_logicalDevice, bufferMemory, 0, VK_WHOLE_SIZE, 0, &bufferPtr);
{
VkImageMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.image = dumpImage;
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
barrier.subresourceRange.baseMipLevel = 0;
barrier.subresourceRange.levelCount = 1;
barrier.subresourceRange.baseArrayLayer = 0;
barrier.subresourceRange.layerCount = 1;
barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL;
barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
vkCmdPipelineBarrier(getCurrentCommandBuffer(), VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier);
}
vkCmdCopyImageToBuffer(getCurrentCommandBuffer(), dumpImage, VK_IMAGE_LAYOUT_GENERAL, buffer, 1, ®ion);
SubmitCommandBuffer();
WaitCommandBufferFinished(GetCurrentCommandBufferId());
bool formatValid = true;
std::vector<uint8> rgb_data;
rgb_data.reserve(3 * width * height);
switch (format)
{
case VK_FORMAT_R8G8B8A8_UNORM:
for (auto ptr = (uint8*)bufferPtr; ptr < (uint8*)bufferPtr + size; ptr += 4)
{
rgb_data.emplace_back(*ptr);
rgb_data.emplace_back(*(ptr + 1));
rgb_data.emplace_back(*(ptr + 2));
}
break;
case VK_FORMAT_R8G8B8A8_SRGB:
for (auto ptr = (uint8*)bufferPtr; ptr < (uint8*)bufferPtr + size; ptr += 4)
{
rgb_data.emplace_back(SRGBComponentToRGB(*ptr));
rgb_data.emplace_back(SRGBComponentToRGB(*(ptr + 1)));
rgb_data.emplace_back(SRGBComponentToRGB(*(ptr + 2)));
}
break;
case VK_FORMAT_R8G8B8_UNORM:
std::copy((uint8*)bufferPtr, (uint8*)bufferPtr + size, rgb_data.begin());
break;
case VK_FORMAT_R8G8B8_SRGB:
std::transform((uint8*)bufferPtr, (uint8*)bufferPtr + size, rgb_data.begin(), SRGBComponentToRGB);
break;
default:
formatValid = false;
cemu_assert_debug(false);
}
vkUnmapMemory(m_logicalDevice, bufferMemory);
vkFreeMemory(m_logicalDevice, bufferMemory, nullptr);
vkDestroyBuffer(m_logicalDevice, buffer, nullptr);
if (image)
vkDestroyImage(m_logicalDevice, image, nullptr);
if (imageMemory)
vkFreeMemory(m_logicalDevice, imageMemory, nullptr);
if (formatValid)
SaveScreenshot(rgb_data, width, height, !padView);
}
static const float kQueuePriority = 1.0f;
std::vector<VkDeviceQueueCreateInfo> VulkanRenderer::CreateQueueCreateInfos(const std::set<sint32>& uniqueQueueFamilies) const
{
std::vector<VkDeviceQueueCreateInfo> queueCreateInfos;
for (int queueFamily : uniqueQueueFamilies)
{
VkDeviceQueueCreateInfo queueCreateInfo{};
queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queueCreateInfo.queueFamilyIndex = queueFamily;
queueCreateInfo.queueCount = 1;
queueCreateInfo.pQueuePriorities = &kQueuePriority;
queueCreateInfos.emplace_back(queueCreateInfo);
}
return queueCreateInfos;
}
VkDeviceCreateInfo VulkanRenderer::CreateDeviceCreateInfo(const std::vector<VkDeviceQueueCreateInfo>& queueCreateInfos, const VkPhysicalDeviceFeatures& deviceFeatures, const void* deviceExtensionStructs, std::vector<const char*>& used_extensions) const
{
used_extensions = kRequiredDeviceExtensions;
if (m_featureControl.deviceExtensions.tooling_info)
used_extensions.emplace_back(VK_EXT_TOOLING_INFO_EXTENSION_NAME);
if (m_featureControl.deviceExtensions.depth_range_unrestricted)
used_extensions.emplace_back(VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME);
if (m_featureControl.deviceExtensions.nv_fill_rectangle)
used_extensions.emplace_back(VK_NV_FILL_RECTANGLE_EXTENSION_NAME);
if (m_featureControl.deviceExtensions.pipeline_feedback)
used_extensions.emplace_back(VK_EXT_PIPELINE_CREATION_FEEDBACK_EXTENSION_NAME);
if (m_featureControl.deviceExtensions.cubic_filter)
used_extensions.emplace_back(VK_EXT_FILTER_CUBIC_EXTENSION_NAME);
if (m_featureControl.deviceExtensions.custom_border_color)
used_extensions.emplace_back(VK_EXT_CUSTOM_BORDER_COLOR_EXTENSION_NAME);
if (m_featureControl.deviceExtensions.driver_properties)
used_extensions.emplace_back(VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME);
if (m_featureControl.deviceExtensions.external_memory_host)
used_extensions.emplace_back(VK_EXT_EXTERNAL_MEMORY_HOST_EXTENSION_NAME);
if (m_featureControl.deviceExtensions.synchronization2)
used_extensions.emplace_back(VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME);
if (m_featureControl.deviceExtensions.dynamic_rendering)
used_extensions.emplace_back(VK_KHR_DYNAMIC_RENDERING_EXTENSION_NAME);
if (m_featureControl.deviceExtensions.shader_float_controls)
used_extensions.emplace_back(VK_KHR_SHADER_FLOAT_CONTROLS_EXTENSION_NAME);
if (m_featureControl.deviceExtensions.present_wait)
used_extensions.emplace_back(VK_KHR_PRESENT_ID_EXTENSION_NAME);
if (m_featureControl.deviceExtensions.present_wait)
used_extensions.emplace_back(VK_KHR_PRESENT_WAIT_EXTENSION_NAME);
VkDeviceCreateInfo createInfo{};
createInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
createInfo.pQueueCreateInfos = queueCreateInfos.data();
createInfo.queueCreateInfoCount = (uint32_t)queueCreateInfos.size();
createInfo.pEnabledFeatures = &deviceFeatures;
createInfo.enabledExtensionCount = used_extensions.size();
createInfo.ppEnabledExtensionNames = used_extensions.data();
createInfo.pNext = deviceExtensionStructs;
if (!m_layerNames.empty())
{
createInfo.enabledLayerCount = m_layerNames.size();
createInfo.ppEnabledLayerNames = m_layerNames.data();
}
return createInfo;
}
RendererShader* VulkanRenderer::shader_create(RendererShader::ShaderType type, uint64 baseHash, uint64 auxHash, const std::string& source, bool isGameShader, bool isGfxPackShader)
{
return new RendererShaderVk(type, baseHash, auxHash, isGameShader, isGfxPackShader, source);
}
VulkanRenderer::QueueFamilyIndices VulkanRenderer::FindQueueFamilies(VkSurfaceKHR surface, VkPhysicalDevice device)
{
uint32_t queueFamilyCount = 0;
vkGetPhysicalDeviceQueueFamilyProperties(device, &queueFamilyCount, nullptr);
std::vector<VkQueueFamilyProperties> queueFamilies(queueFamilyCount);
vkGetPhysicalDeviceQueueFamilyProperties(device, &queueFamilyCount, queueFamilies.data());
QueueFamilyIndices indices;
for (int i = 0; i < (int)queueFamilies.size(); ++i)
{
const auto& queueFamily = queueFamilies[i];
if (queueFamily.queueCount > 0 && queueFamily.queueFlags & VK_QUEUE_GRAPHICS_BIT)
indices.graphicsFamily = i;
VkBool32 presentSupport = false;
const VkResult result = vkGetPhysicalDeviceSurfaceSupportKHR(device, i, surface, &presentSupport);
if (result != VK_SUCCESS)
throw std::runtime_error(fmt::format("Error while attempting to check if a surface supports presentation: {}", result));
if (queueFamily.queueCount > 0 && presentSupport)
indices.presentFamily = i;
if (indices.IsComplete())
break;
}
return indices;
}
bool VulkanRenderer::CheckDeviceExtensionSupport(const VkPhysicalDevice device, FeatureControl& info)
{
std::vector<VkExtensionProperties> availableDeviceExtensions;
auto isExtensionAvailable = [&availableDeviceExtensions](const char* extensionName) -> bool
{
return std::find_if(availableDeviceExtensions.begin(), availableDeviceExtensions.end(),
[&extensionName](const VkExtensionProperties& prop) -> bool
{
return strcmp(prop.extensionName, extensionName) == 0;
}) != availableDeviceExtensions.cend();
};
uint32_t extensionCount;
VkResult result = vkEnumerateDeviceExtensionProperties(device, nullptr, &extensionCount, nullptr);
if (result != VK_SUCCESS)
throw std::runtime_error(fmt::format("Cannot retrieve count of properties for a physical device: {}", result));
availableDeviceExtensions.resize(extensionCount);
result = vkEnumerateDeviceExtensionProperties(device, nullptr, &extensionCount, availableDeviceExtensions.data());
if (result != VK_SUCCESS)
throw std::runtime_error(fmt::format("Cannot retrieve properties for a physical device: {}", result));
std::set<std::string> requiredExtensions(kRequiredDeviceExtensions.begin(), kRequiredDeviceExtensions.end());
for (const auto& extension : availableDeviceExtensions)
{
requiredExtensions.erase(extension.extensionName);
}
info.deviceExtensions.tooling_info = isExtensionAvailable(VK_EXT_TOOLING_INFO_EXTENSION_NAME);
info.deviceExtensions.transform_feedback = isExtensionAvailable(VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME);
info.deviceExtensions.depth_range_unrestricted = isExtensionAvailable(VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME);
info.deviceExtensions.nv_fill_rectangle = isExtensionAvailable(VK_NV_FILL_RECTANGLE_EXTENSION_NAME);
info.deviceExtensions.pipeline_feedback = isExtensionAvailable(VK_EXT_PIPELINE_CREATION_FEEDBACK_EXTENSION_NAME);
info.deviceExtensions.cubic_filter = isExtensionAvailable(VK_EXT_FILTER_CUBIC_EXTENSION_NAME);
info.deviceExtensions.custom_border_color = isExtensionAvailable(VK_EXT_CUSTOM_BORDER_COLOR_EXTENSION_NAME);
info.deviceExtensions.driver_properties = isExtensionAvailable(VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME);
info.deviceExtensions.external_memory_host = isExtensionAvailable(VK_EXT_EXTERNAL_MEMORY_HOST_EXTENSION_NAME);
info.deviceExtensions.synchronization2 = isExtensionAvailable(VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME);
info.deviceExtensions.shader_float_controls = isExtensionAvailable(VK_KHR_SHADER_FLOAT_CONTROLS_EXTENSION_NAME);
info.deviceExtensions.dynamic_rendering = false; // isExtensionAvailable(VK_KHR_DYNAMIC_RENDERING_EXTENSION_NAME);
// dynamic rendering doesn't provide any benefits for us right now. Driver implementations are very unoptimized as of Feb 2022
info.deviceExtensions.present_wait = isExtensionAvailable(VK_KHR_PRESENT_WAIT_EXTENSION_NAME) && isExtensionAvailable(VK_KHR_PRESENT_ID_EXTENSION_NAME);
// check for framedebuggers
info.debugMarkersSupported = false;
if (info.deviceExtensions.tooling_info && vkGetPhysicalDeviceToolPropertiesEXT)
{
uint32_t toolCount = 0;
if (vkGetPhysicalDeviceToolPropertiesEXT(device, &toolCount, nullptr) == VK_SUCCESS)
{
std::vector<VkPhysicalDeviceToolPropertiesEXT> toolProperties(toolCount);
if (toolCount > 0 && vkGetPhysicalDeviceToolPropertiesEXT(device, &toolCount, toolProperties.data()) == VK_SUCCESS)
{
for (auto& itr : toolProperties)
{
if ((itr.purposes & VK_TOOL_PURPOSE_DEBUG_MARKERS_BIT_EXT) != 0)
info.debugMarkersSupported = true;
}
}
}
}
return requiredExtensions.empty();
}
std::vector<const char*> VulkanRenderer::CheckInstanceExtensionSupport(FeatureControl& info)
{
std::vector<VkExtensionProperties> availableInstanceExtensions;
std::vector<const char*> enabledInstanceExtensions;
VkResult err;
auto isExtensionAvailable = [&availableInstanceExtensions](const char* extensionName) -> bool
{
return std::find_if(availableInstanceExtensions.begin(), availableInstanceExtensions.end(),
[&extensionName](const VkExtensionProperties& prop) -> bool
{
return strcmp(prop.extensionName, extensionName) == 0;
}) != availableInstanceExtensions.cend();
};
// get list of available instance extensions
uint32_t count;
if ((err = vkEnumerateInstanceExtensionProperties(nullptr, &count, nullptr)) != VK_SUCCESS)
throw std::runtime_error(fmt::format("Failed to retrieve the instance extension properties : {}", err));
availableInstanceExtensions.resize(count);
if ((err = vkEnumerateInstanceExtensionProperties(nullptr, &count, availableInstanceExtensions.data())) != VK_SUCCESS)
throw std::runtime_error(fmt::format("Failed to retrieve the instance extension properties: {}", err));
// build list of required extensions
std::vector<const char*> requiredInstanceExtensions;
requiredInstanceExtensions.emplace_back(VK_KHR_SURFACE_EXTENSION_NAME);
#if BOOST_OS_WINDOWS
requiredInstanceExtensions.emplace_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME);
#elif BOOST_OS_LINUX
auto backend = gui_getWindowInfo().window_main.backend;
if(backend == WindowHandleInfo::Backend::X11)
requiredInstanceExtensions.emplace_back(VK_KHR_XLIB_SURFACE_EXTENSION_NAME);
#if HAS_WAYLAND
else if (backend == WindowHandleInfo::Backend::WAYLAND)
requiredInstanceExtensions.emplace_back(VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME);
#endif
#elif BOOST_OS_MACOS
requiredInstanceExtensions.emplace_back(VK_EXT_METAL_SURFACE_EXTENSION_NAME);
#endif
if (cemuLog_isLoggingEnabled(LogType::VulkanValidation))
requiredInstanceExtensions.emplace_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME);
// make sure all required extensions are supported
for (const auto& extension : availableInstanceExtensions)
{
for (auto it = requiredInstanceExtensions.begin(); it < requiredInstanceExtensions.end(); ++it)
{
if (strcmp(*it, extension.extensionName) == 0)
{
enabledInstanceExtensions.emplace_back(*it);
requiredInstanceExtensions.erase(it);
break;
}
}
}
if (!requiredInstanceExtensions.empty())
{
cemuLog_log(LogType::Force, "The following required Vulkan instance extensions are not supported:");
std::stringstream ss;
for (const auto& extension : requiredInstanceExtensions)
cemuLog_log(LogType::Force, "{}", extension);
cemuLog_waitForFlush();
throw std::runtime_error(ss.str());
}
// check for optional extensions
info.instanceExtensions.debug_utils = isExtensionAvailable(VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
if (info.instanceExtensions.debug_utils)
enabledInstanceExtensions.emplace_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
return enabledInstanceExtensions;
}
bool VulkanRenderer::IsDeviceSuitable(VkSurfaceKHR surface, const VkPhysicalDevice& device)
{
if (!FindQueueFamilies(surface, device).IsComplete())
return false;
// check API version (using Vulkan 1.0 way of querying properties)
VkPhysicalDeviceProperties properties{};
vkGetPhysicalDeviceProperties(device, &properties);
uint32 vkVersionMajor = VK_API_VERSION_MAJOR(properties.apiVersion);
uint32 vkVersionMinor = VK_API_VERSION_MINOR(properties.apiVersion);
if (vkVersionMajor < 1 || (vkVersionMajor == 1 && vkVersionMinor < 1))
return false; // minimum required version is Vulkan 1.1
FeatureControl info;
if (!CheckDeviceExtensionSupport(device, info))
return false;
const auto swapchainSupport = SwapchainInfoVk::QuerySwapchainSupport(surface, device);
return !swapchainSupport.formats.empty() && !swapchainSupport.presentModes.empty();
}
#if BOOST_OS_WINDOWS
VkSurfaceKHR VulkanRenderer::CreateWinSurface(VkInstance instance, HWND hwindow)
{
VkWin32SurfaceCreateInfoKHR sci{};
sci.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR;
sci.hwnd = hwindow;
sci.hinstance = GetModuleHandle(nullptr);
VkSurfaceKHR result;
VkResult err;
if ((err = vkCreateWin32SurfaceKHR(instance, &sci, nullptr, &result)) != VK_SUCCESS)
{
cemuLog_log(LogType::Force, "Cannot create a Win32 Vulkan surface: {}", (sint32)err);
throw std::runtime_error(fmt::format("Cannot create a Win32 Vulkan surface: {}", err));
}
return result;
}
#endif
#if BOOST_OS_LINUX
VkSurfaceKHR VulkanRenderer::CreateXlibSurface(VkInstance instance, Display* dpy, Window window)
{
VkXlibSurfaceCreateInfoKHR sci{};
sci.sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR;
sci.flags = 0;
sci.dpy = dpy;
sci.window = window;
VkSurfaceKHR result;
VkResult err;
if ((err = vkCreateXlibSurfaceKHR(instance, &sci, nullptr, &result)) != VK_SUCCESS)
{
cemuLog_log(LogType::Force, "Cannot create a X11 Vulkan surface: {}", (sint32)err);
throw std::runtime_error(fmt::format("Cannot create a X11 Vulkan surface: {}", err));
}
return result;
}
VkSurfaceKHR VulkanRenderer::CreateXcbSurface(VkInstance instance, xcb_connection_t* connection, xcb_window_t window)
{
VkXcbSurfaceCreateInfoKHR sci{};
sci.sType = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR;
sci.flags = 0;
sci.connection = connection;
sci.window = window;
VkSurfaceKHR result;
VkResult err;
if ((err = vkCreateXcbSurfaceKHR(instance, &sci, nullptr, &result)) != VK_SUCCESS)
{
cemuLog_log(LogType::Force, "Cannot create a XCB Vulkan surface: {}", (sint32)err);
throw std::runtime_error(fmt::format("Cannot create a XCB Vulkan surface: {}", err));
}
return result;
}
#ifdef HAS_WAYLAND
VkSurfaceKHR VulkanRenderer::CreateWaylandSurface(VkInstance instance, wl_display* display, wl_surface* surface)
{
VkWaylandSurfaceCreateInfoKHR sci{};
sci.sType = VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR;
sci.flags = 0;
sci.display = display;
sci.surface = surface;
VkSurfaceKHR result;
VkResult err;
if ((err = vkCreateWaylandSurfaceKHR(instance, &sci, nullptr, &result)) != VK_SUCCESS)
{
cemuLog_log(LogType::Force, "Cannot create a Wayland Vulkan surface: {}", (sint32)err);
throw std::runtime_error(fmt::format("Cannot create a Wayland Vulkan surface: {}", err));
}
return result;
}
#endif // HAS_WAYLAND
#endif // BOOST_OS_LINUX
VkSurfaceKHR VulkanRenderer::CreateFramebufferSurface(VkInstance instance, struct WindowHandleInfo& windowInfo)
{
#if BOOST_OS_WINDOWS
return CreateWinSurface(instance, windowInfo.hwnd);
#elif BOOST_OS_LINUX
if(windowInfo.backend == WindowHandleInfo::Backend::X11)
return CreateXlibSurface(instance, windowInfo.xlib_display, windowInfo.xlib_window);
#ifdef HAS_WAYLAND
if(windowInfo.backend == WindowHandleInfo::Backend::WAYLAND)
return CreateWaylandSurface(instance, windowInfo.display, windowInfo.surface);
#endif
return {};
#elif BOOST_OS_MACOS
return CreateCocoaSurface(instance, windowInfo.handle);
#endif
}
void VulkanRenderer::CreateCommandPool()
{
VkCommandPoolCreateInfo poolInfo{};
poolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
poolInfo.queueFamilyIndex = m_indices.graphicsFamily;
poolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
VkResult result = vkCreateCommandPool(m_logicalDevice, &poolInfo, nullptr, &m_commandPool);
if (result != VK_SUCCESS)
throw std::runtime_error(fmt::format("Failed to create command pool: {}", result));
}
void VulkanRenderer::CreateCommandBuffers()
{
auto it = m_cmd_buffer_fences.begin();
VkFenceCreateInfo fenceInfo{};
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
vkCreateFence(m_logicalDevice, &fenceInfo, nullptr, &*it);
++it;
fenceInfo.flags = 0;
for (; it != m_cmd_buffer_fences.end(); ++it)
{
vkCreateFence(m_logicalDevice, &fenceInfo, nullptr, &*it);
}
VkCommandBufferAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
allocInfo.commandPool = m_commandPool;
allocInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
allocInfo.commandBufferCount = (uint32_t)m_commandBuffers.size();
const VkResult result = vkAllocateCommandBuffers(m_logicalDevice, &allocInfo, m_commandBuffers.data());
if (result != VK_SUCCESS)
{
cemuLog_log(LogType::Force, "Failed to allocate command buffers: {}", result);
throw std::runtime_error(fmt::format("Failed to allocate command buffers: {}", result));
}
for (auto& semItr : m_commandBufferSemaphores)
{
VkSemaphoreCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
if (vkCreateSemaphore(m_logicalDevice, &info, nullptr, &semItr) != VK_SUCCESS)
UnrecoverableError("Failed to create semaphore for command buffer");
}
}
bool VulkanRenderer::IsSwapchainInfoValid(bool mainWindow) const
{
auto& chainInfo = GetChainInfoPtr(mainWindow);
return chainInfo && chainInfo->IsValid();
}
void VulkanRenderer::CreateNullTexture(NullTexture& nullTex, VkImageType imageType)
{
// these are used when the game requests NULL ptr textures
VkImageCreateInfo imageInfo{};
imageInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
if (imageType == VK_IMAGE_TYPE_1D)
{
imageInfo.extent.width = 4;
imageInfo.extent.height = 1;
}
else if (imageType == VK_IMAGE_TYPE_2D)
{
imageInfo.extent.width = 4;
imageInfo.extent.height = 1;
}
else
{
cemu_assert(false);
}
imageInfo.mipLevels = 1;
imageInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
imageInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
imageInfo.extent.depth = 1;
imageInfo.arrayLayers = 1;
imageInfo.imageType = imageType;
imageInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
if (vkCreateImage(m_logicalDevice, &imageInfo, nullptr, &nullTex.image) != VK_SUCCESS)
UnrecoverableError("Failed to create nullTex image");
nullTex.allocation = memoryManager->imageMemoryAllocate(nullTex.image);
VkClearColorValue clrColor{};
ClearColorImageRaw(nullTex.image, 0, 0, clrColor, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL);
// texture view
VkImageViewCreateInfo viewInfo{};
viewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
viewInfo.image = nullTex.image;
if (imageType == VK_IMAGE_TYPE_1D)
viewInfo.viewType = VK_IMAGE_VIEW_TYPE_1D;
else if (imageType == VK_IMAGE_TYPE_2D)
viewInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
else
{
cemu_assert(false);
}
viewInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
viewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
viewInfo.subresourceRange.baseMipLevel = 0;
viewInfo.subresourceRange.levelCount = 1;
viewInfo.subresourceRange.baseArrayLayer = 0;
viewInfo.subresourceRange.layerCount = 1;
if (vkCreateImageView(m_logicalDevice, &viewInfo, nullptr, &nullTex.view) != VK_SUCCESS)
UnrecoverableError("Failed to create nullTex image view");
// sampler
VkSamplerCreateInfo samplerInfo{};
samplerInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
samplerInfo.magFilter = VK_FILTER_LINEAR;
samplerInfo.minFilter = VK_FILTER_LINEAR;
samplerInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
samplerInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerInfo.mipLodBias = 0.0f;
samplerInfo.compareOp = VK_COMPARE_OP_NEVER;
samplerInfo.minLod = 0.0f;
samplerInfo.maxLod = 0.0f;
samplerInfo.maxAnisotropy = 1.0;
samplerInfo.anisotropyEnable = VK_FALSE;
samplerInfo.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE;
vkCreateSampler(m_logicalDevice, &samplerInfo, nullptr, &nullTex.sampler);
}
void VulkanRenderer::CreateNullObjects()
{
CreateNullTexture(nullTexture1D, VK_IMAGE_TYPE_1D);
CreateNullTexture(nullTexture2D, VK_IMAGE_TYPE_2D);
}
void VulkanRenderer::DeleteNullTexture(NullTexture& nullTex)
{
vkDestroySampler(m_logicalDevice, nullTex.sampler, nullptr);
nullTex.sampler = VK_NULL_HANDLE;
vkDestroyImageView(m_logicalDevice, nullTex.view, nullptr);
nullTex.view = VK_NULL_HANDLE;
vkDestroyImage(m_logicalDevice, nullTex.image, nullptr);
nullTex.image = VK_NULL_HANDLE;
memoryManager->imageMemoryFree(nullTex.allocation);
nullTex.allocation = nullptr;
}
void VulkanRenderer::DeleteNullObjects()
{
DeleteNullTexture(nullTexture1D);
DeleteNullTexture(nullTexture2D);
}
void VulkanRenderer::ImguiInit()
{
if (m_imguiRenderPass == VK_NULL_HANDLE)
{
// TODO: renderpass swapchain format may change between srgb and rgb -> need reinit
VkAttachmentDescription colorAttachment = {};
colorAttachment.format = m_mainSwapchainInfo->m_surfaceFormat.format;
colorAttachment.samples = VK_SAMPLE_COUNT_1_BIT;
colorAttachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
colorAttachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
colorAttachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
colorAttachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
colorAttachment.initialLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
colorAttachment.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
VkAttachmentReference colorAttachmentRef = {};
colorAttachmentRef.attachment = 0;
colorAttachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
VkSubpassDescription subpass = {};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &colorAttachmentRef;
VkRenderPassCreateInfo renderPassInfo = {};
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
renderPassInfo.attachmentCount = 1;
renderPassInfo.pAttachments = &colorAttachment;
renderPassInfo.subpassCount = 1;
renderPassInfo.pSubpasses = &subpass;
const auto result = vkCreateRenderPass(m_logicalDevice, &renderPassInfo, nullptr, &m_imguiRenderPass);
if (result != VK_SUCCESS)
throw VkException(result, "can't create imgui renderpass");
}
ImGui_ImplVulkan_InitInfo info{};
info.Instance = m_instance;
info.PhysicalDevice = m_physicalDevice;
info.Device = m_logicalDevice;
info.QueueFamily = m_indices.presentFamily;
info.Queue = m_presentQueue;
info.PipelineCache = m_pipeline_cache;
info.DescriptorPool = m_descriptorPool;
info.MinImageCount = m_mainSwapchainInfo->m_swapchainImages.size();
info.ImageCount = info.MinImageCount;
ImGui_ImplVulkan_Init(&info, m_imguiRenderPass);
}
void VulkanRenderer::Initialize()
{
Renderer::Initialize();
CreatePipelineCache();
ImguiInit();
CreateNullObjects();
}
void VulkanRenderer::Shutdown()
{
Renderer::Shutdown();
SubmitCommandBuffer();
WaitDeviceIdle();
if (m_imguiRenderPass != VK_NULL_HANDLE)
{
vkDestroyRenderPass(m_logicalDevice, m_imguiRenderPass, nullptr);
m_imguiRenderPass = VK_NULL_HANDLE;
}
RendererShaderVk::Shutdown();
}
void VulkanRenderer::UnrecoverableError(const char* errMsg) const
{
cemuLog_log(LogType::Force, "Unrecoverable error in Vulkan renderer");
cemuLog_log(LogType::Force, "Msg: {}", errMsg);
throw std::runtime_error(errMsg);
}
struct VulkanRequestedFormat_t
{
VkFormat fmt;
const char* name;
bool isDepth;
bool mustSupportAttachment;
bool mustSupportBlending;
};
#define reqColorFormat(__name, __reqAttachment, __reqBlend) {__name, ""#__name, false, __reqAttachment, __reqBlend}
#define reqDepthFormat(__name) {__name, ""#__name, true, true, false}
VulkanRequestedFormat_t requestedFormatList[] =
{
reqDepthFormat(VK_FORMAT_D32_SFLOAT_S8_UINT),
reqDepthFormat(VK_FORMAT_D24_UNORM_S8_UINT),
reqDepthFormat(VK_FORMAT_D32_SFLOAT),
reqDepthFormat(VK_FORMAT_D16_UNORM),
reqColorFormat(VK_FORMAT_R32G32B32A32_SFLOAT, true, true),
reqColorFormat(VK_FORMAT_R32G32B32A32_UINT, true, false),
reqColorFormat(VK_FORMAT_R16G16B16A16_SFLOAT, true, true),
reqColorFormat(VK_FORMAT_R16G16B16A16_UINT, true, false),
reqColorFormat(VK_FORMAT_R16G16B16A16_UNORM, true, true),
reqColorFormat(VK_FORMAT_R16G16B16A16_SNORM, true, true),
reqColorFormat(VK_FORMAT_R8G8B8A8_UNORM, true, true),
reqColorFormat(VK_FORMAT_R8G8B8A8_SNORM, true, true),
reqColorFormat(VK_FORMAT_R8G8B8A8_SRGB, true, true),
reqColorFormat(VK_FORMAT_R8G8B8A8_UINT, true, false),
reqColorFormat(VK_FORMAT_R8G8B8A8_SINT, true, false),
reqColorFormat(VK_FORMAT_R4G4B4A4_UNORM_PACK16, true, true),
reqColorFormat(VK_FORMAT_R32G32_SFLOAT, true, true),
reqColorFormat(VK_FORMAT_R32G32_UINT, true, false),
reqColorFormat(VK_FORMAT_R16G16_UNORM, true, true),
reqColorFormat(VK_FORMAT_R16G16_SFLOAT, true, true),
reqColorFormat(VK_FORMAT_R8G8_UNORM, true, true),
reqColorFormat(VK_FORMAT_R8G8_SNORM, true, true),
reqColorFormat(VK_FORMAT_R4G4_UNORM_PACK8, true, true),
reqColorFormat(VK_FORMAT_R32_SFLOAT, true, true),
reqColorFormat(VK_FORMAT_R32_UINT, true, false),
reqColorFormat(VK_FORMAT_R16_SFLOAT, true, true),
reqColorFormat(VK_FORMAT_R16_UNORM, true, true),
reqColorFormat(VK_FORMAT_R16_SNORM, true, true),
reqColorFormat(VK_FORMAT_R8_UNORM, true, true),
reqColorFormat(VK_FORMAT_R8_SNORM, true, true),
reqColorFormat(VK_FORMAT_R5G6B5_UNORM_PACK16, true, true),
reqColorFormat(VK_FORMAT_R5G5B5A1_UNORM_PACK16, true, true),
reqColorFormat(VK_FORMAT_B10G11R11_UFLOAT_PACK32, true, true),
reqColorFormat(VK_FORMAT_R16G16B16A16_SNORM, true, true),
reqColorFormat(VK_FORMAT_BC1_RGBA_SRGB_BLOCK, false, false),
reqColorFormat(VK_FORMAT_BC1_RGBA_UNORM_BLOCK, false, false),
reqColorFormat(VK_FORMAT_BC2_UNORM_BLOCK, false, false),
reqColorFormat(VK_FORMAT_BC2_SRGB_BLOCK, false, false),
reqColorFormat(VK_FORMAT_BC3_UNORM_BLOCK, false, false),
reqColorFormat(VK_FORMAT_BC3_SRGB_BLOCK, false, false),
reqColorFormat(VK_FORMAT_BC4_UNORM_BLOCK, false, false),
reqColorFormat(VK_FORMAT_BC4_SNORM_BLOCK, false, false),
reqColorFormat(VK_FORMAT_BC5_UNORM_BLOCK, false, false),
reqColorFormat(VK_FORMAT_BC5_SNORM_BLOCK, false, false),
reqColorFormat(VK_FORMAT_A2B10G10R10_UNORM_PACK32, true, true),
reqColorFormat(VK_FORMAT_R32_SFLOAT, true, true)
};
void VulkanRenderer::QueryMemoryInfo()
{
VkPhysicalDeviceMemoryProperties memProperties;
vkGetPhysicalDeviceMemoryProperties(m_physicalDevice, &memProperties);
cemuLog_log(LogType::Force, "Vulkan device memory info:");
for (uint32 i = 0; i < memProperties.memoryHeapCount; i++)
{
cemuLog_log(LogType::Force, "Heap {} - Size {}MB Flags 0x{:08x}", i, (sint32)(memProperties.memoryHeaps[i].size / 1024ll / 1024ll), (uint32)memProperties.memoryHeaps[i].flags);
}
for (uint32 i = 0; i < memProperties.memoryTypeCount; i++)
{
cemuLog_log(LogType::Force, "Memory {} - HeapIndex {} Flags 0x{:08x}", i, (sint32)memProperties.memoryTypes[i].heapIndex, (uint32)memProperties.memoryTypes[i].propertyFlags);
}
}
void VulkanRenderer::QueryAvailableFormats()
{
VkFormatProperties fmtProp{};
vkGetPhysicalDeviceFormatProperties(m_physicalDevice, VK_FORMAT_D24_UNORM_S8_UINT, &fmtProp);
// D24S8
if (fmtProp.optimalTilingFeatures != 0) // todo - more restrictive check
{
m_supportedFormatInfo.fmt_d24_unorm_s8_uint = true;
}
// R4G4
fmtProp = {};
vkGetPhysicalDeviceFormatProperties(m_physicalDevice, VK_FORMAT_R4G4_UNORM_PACK8, &fmtProp);
if (fmtProp.optimalTilingFeatures != 0)
{
m_supportedFormatInfo.fmt_r4g4_unorm_pack = true;
}
// R5G6B5
fmtProp = {};
vkGetPhysicalDeviceFormatProperties(m_physicalDevice, VK_FORMAT_R5G6B5_UNORM_PACK16, &fmtProp);
if (fmtProp.optimalTilingFeatures != 0)
{
m_supportedFormatInfo.fmt_r5g6b5_unorm_pack = true;
}
// R4G4B4A4
fmtProp = {};
vkGetPhysicalDeviceFormatProperties(m_physicalDevice, VK_FORMAT_R4G4B4A4_UNORM_PACK16, &fmtProp);
if (fmtProp.optimalTilingFeatures != 0)
{
m_supportedFormatInfo.fmt_r4g4b4a4_unorm_pack = true;
}
// A1R5G5B5
fmtProp = {};
vkGetPhysicalDeviceFormatProperties(m_physicalDevice, VK_FORMAT_A1R5G5B5_UNORM_PACK16, &fmtProp);
if (fmtProp.optimalTilingFeatures != 0)
{
m_supportedFormatInfo.fmt_a1r5g5b5_unorm_pack = true;
}
// print info about unsupported formats to log
for (auto& it : requestedFormatList)
{
fmtProp = {};
vkGetPhysicalDeviceFormatProperties(m_physicalDevice, it.fmt, &fmtProp);
VkFormatFeatureFlags requestedBits = 0;
if (it.mustSupportAttachment)
{
if (it.isDepth)
requestedBits |= VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
else
requestedBits |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT;
if (!it.isDepth && it.mustSupportBlending)
requestedBits |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT;
}
requestedBits |= VK_FORMAT_FEATURE_TRANSFER_DST_BIT;
requestedBits |= VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT;
if (fmtProp.optimalTilingFeatures == 0)
{
cemuLog_log(LogType::Force, "{} not supported", it.name);
}
else if ((fmtProp.optimalTilingFeatures & requestedBits) != requestedBits)
{
//std::string missingStr;
//missingStr.assign(fmt::format("{} missing features:", it.name));
//if (!(fmtProp.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) && !it.isDepth && it.mustSupportAttachment)
// missingStr.append(" COLOR_ATTACHMENT");
//if (!(fmtProp.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT) && !it.isDepth && it.mustSupportBlending)
// missingStr.append(" COLOR_ATTACHMENT_BLEND");
//if (!(fmtProp.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) && it.isDepth && it.mustSupportAttachment)
// missingStr.append(" DEPTH_ATTACHMENT");
//if (!(fmtProp.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_DST_BIT))
// missingStr.append(" TRANSFER_DST");
//if (!(fmtProp.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT))
// missingStr.append(" SAMPLED_IMAGE");
//cemuLog_log(LogType::Force, "{}", missingStr.c_str());
}
}
}
bool VulkanRenderer::ImguiBegin(bool mainWindow)
{
if (!Renderer::ImguiBegin(mainWindow))
return false;
auto& chainInfo = GetChainInfo(mainWindow);
if (!AcquireNextSwapchainImage(mainWindow))
return false;
draw_endRenderPass();
m_state.currentPipeline = VK_NULL_HANDLE;
ImGui_ImplVulkan_CreateFontsTexture(m_state.currentCommandBuffer);
ImGui_ImplVulkan_NewFrame(m_state.currentCommandBuffer, chainInfo.m_swapchainFramebuffers[chainInfo.swapchainImageIndex], chainInfo.getExtent());
ImGui_UpdateWindowInformation(mainWindow);
ImGui::NewFrame();
return true;
}
void VulkanRenderer::ImguiEnd()
{
ImGui::Render();
ImGui_ImplVulkan_RenderDrawData(ImGui::GetDrawData(), m_state.currentCommandBuffer);
vkCmdEndRenderPass(m_state.currentCommandBuffer);
}
std::vector<LatteTextureVk*> g_imgui_textures; // TODO manage better
ImTextureID VulkanRenderer::GenerateTexture(const std::vector<uint8>& data, const Vector2i& size)
{
try
{
std::vector <uint8> tmp(size.x * size.y * 4);
for (size_t i = 0; i < data.size() / 3; ++i)
{
tmp[(i * 4) + 0] = data[(i * 3) + 0];
tmp[(i * 4) + 1] = data[(i * 3) + 1];
tmp[(i * 4) + 2] = data[(i * 3) + 2];
tmp[(i * 4) + 3] = 0xFF;
}
return (ImTextureID)ImGui_ImplVulkan_GenerateTexture(m_state.currentCommandBuffer, tmp, size);
}
catch (const std::exception& ex)
{
cemuLog_log(LogType::Force, "can't generate imgui texture: {}", ex.what());
return nullptr;
}
}
void VulkanRenderer::DeleteTexture(ImTextureID id)
{
WaitDeviceIdle();
ImGui_ImplVulkan_DeleteTexture(id);
}
void VulkanRenderer::DeleteFontTextures()
{
ImGui_ImplVulkan_DestroyFontsTexture();
}
bool VulkanRenderer::BeginFrame(bool mainWindow)
{
if (!AcquireNextSwapchainImage(mainWindow))
return false;
auto& chainInfo = GetChainInfo(mainWindow);
VkClearColorValue clearColor{ 0, 0, 0, 0 };
ClearColorImageRaw(chainInfo.m_swapchainImages[chainInfo.swapchainImageIndex], 0, 0, clearColor, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_PRESENT_SRC_KHR);
// mark current swapchain image as well defined
chainInfo.hasDefinedSwapchainImage = true;
return true;
}
void VulkanRenderer::DrawEmptyFrame(bool mainWindow)
{
if (!BeginFrame(mainWindow))
return;
SwapBuffers(mainWindow, !mainWindow);
}
void VulkanRenderer::InitFirstCommandBuffer()
{
cemu_assert_debug(m_state.currentCommandBuffer == nullptr);
// m_commandBufferIndex always points to the currently used command buffer, so we set it to 0
m_commandBufferIndex = 0;
m_commandBufferSyncIndex = 0;
m_state.currentCommandBuffer = m_commandBuffers[m_commandBufferIndex];
vkResetFences(m_logicalDevice, 1, &m_cmd_buffer_fences[m_commandBufferIndex]);
VkCommandBufferBeginInfo beginInfo{};
beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
beginInfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
vkBeginCommandBuffer(m_state.currentCommandBuffer, &beginInfo);
vkCmdSetViewport(m_state.currentCommandBuffer, 0, 1, &m_state.currentViewport);
vkCmdSetScissor(m_state.currentCommandBuffer, 0, 1, &m_state.currentScissorRect);
m_state.resetCommandBufferState();
}
void VulkanRenderer::ProcessFinishedCommandBuffers()
{
bool finishedCmdBuffers = false;
while (m_commandBufferSyncIndex != m_commandBufferIndex)
{
VkResult fenceStatus = vkGetFenceStatus(m_logicalDevice, m_cmd_buffer_fences[m_commandBufferSyncIndex]);
if (fenceStatus == VK_SUCCESS)
{
ProcessDestructionQueue();
m_uniformVarBufferReadIndex = m_cmdBufferUniformRingbufIndices[m_commandBufferSyncIndex];
m_commandBufferSyncIndex = (m_commandBufferSyncIndex + 1) % m_commandBuffers.size();
memoryManager->cleanupBuffers(m_countCommandBufferFinished);
m_countCommandBufferFinished++;
finishedCmdBuffers = true;
continue;
}
else if (fenceStatus == VK_NOT_READY)
{
// not signaled
break;
}
cemuLog_log(LogType::Force, "vkGetFenceStatus returned unexpected error {}", (sint32)fenceStatus);
cemu_assert_debug(false);
}
if (finishedCmdBuffers)
{
LatteTextureReadback_UpdateFinishedTransfers(false);
}
}
void VulkanRenderer::WaitForNextFinishedCommandBuffer()
{
cemu_assert_debug(m_commandBufferSyncIndex != m_commandBufferIndex);
// wait on least recently submitted command buffer
VkResult result = vkWaitForFences(m_logicalDevice, 1, &m_cmd_buffer_fences[m_commandBufferSyncIndex], true, UINT64_MAX);
if (result == VK_TIMEOUT)
{
cemuLog_log(LogType::Force, "vkWaitForFences: Returned VK_TIMEOUT on infinite fence");
}
else if (result != VK_SUCCESS)
{
cemuLog_log(LogType::Force, "vkWaitForFences: Returned unhandled error {}", (sint32)result);
}
// process
ProcessFinishedCommandBuffers();
}
void VulkanRenderer::SubmitCommandBuffer(VkSemaphore signalSemaphore, VkSemaphore waitSemaphore)
{
draw_endRenderPass();
occlusionQuery_notifyEndCommandBuffer();
vkEndCommandBuffer(m_state.currentCommandBuffer);
VkSubmitInfo submitInfo = {};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &m_state.currentCommandBuffer;
// signal current command buffer semaphore
VkSemaphore signalSemArray[2];
if (signalSemaphore != VK_NULL_HANDLE)
{
submitInfo.signalSemaphoreCount = 2;
signalSemArray[0] = m_commandBufferSemaphores[m_commandBufferIndex]; // signal current
signalSemArray[1] = signalSemaphore; // signal current
submitInfo.pSignalSemaphores = signalSemArray;
}
else
{
submitInfo.signalSemaphoreCount = 1;
submitInfo.pSignalSemaphores = &m_commandBufferSemaphores[m_commandBufferIndex]; // signal current
}
// wait for previous command buffer semaphore
VkSemaphore prevSem = GetLastSubmittedCmdBufferSemaphore();
const VkPipelineStageFlags semWaitStageMask[2] = { VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT };
VkSemaphore waitSemArray[2];
submitInfo.waitSemaphoreCount = 0;
if (m_numSubmittedCmdBuffers > 0)
waitSemArray[submitInfo.waitSemaphoreCount++] = prevSem; // wait on semaphore from previous submit
if (waitSemaphore != VK_NULL_HANDLE)
waitSemArray[submitInfo.waitSemaphoreCount++] = waitSemaphore;
submitInfo.pWaitDstStageMask = semWaitStageMask;
submitInfo.pWaitSemaphores = waitSemArray;
const VkResult result = vkQueueSubmit(m_graphicsQueue, 1, &submitInfo, m_cmd_buffer_fences[m_commandBufferIndex]);
if (result != VK_SUCCESS)
UnrecoverableError(fmt::format("failed to submit command buffer. Error {}", result).c_str());
m_numSubmittedCmdBuffers++;
// check if any previously submitted command buffers have finished execution
ProcessFinishedCommandBuffers();
// acquire next command buffer
auto nextCmdBufferIndex = (m_commandBufferIndex + 1) % m_commandBuffers.size();
if (nextCmdBufferIndex == m_commandBufferSyncIndex)
{
// force wait for the next command buffer
cemuLog_logDebug(LogType::Force, "Vulkan: Waiting for available command buffer...");
WaitForNextFinishedCommandBuffer();
}
m_cmdBufferUniformRingbufIndices[nextCmdBufferIndex] = m_cmdBufferUniformRingbufIndices[m_commandBufferIndex];
m_commandBufferIndex = nextCmdBufferIndex;
m_state.currentCommandBuffer = m_commandBuffers[m_commandBufferIndex];
vkResetFences(m_logicalDevice, 1, &m_cmd_buffer_fences[m_commandBufferIndex]);
vkResetCommandBuffer(m_state.currentCommandBuffer, 0);
VkCommandBufferBeginInfo beginInfo{};
beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
beginInfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
vkBeginCommandBuffer(m_state.currentCommandBuffer, &beginInfo);
// make sure some states are set for this command buffer
vkCmdSetViewport(m_state.currentCommandBuffer, 0, 1, &m_state.currentViewport);
vkCmdSetScissor(m_state.currentCommandBuffer, 0, 1, &m_state.currentScissorRect);
// DEBUG
//debug_genericBarrier();
// reset states which are bound to a command buffer
m_state.resetCommandBufferState();
occlusionQuery_notifyBeginCommandBuffer();
m_recordedDrawcalls = 0;
m_submitThreshold = 300;
m_submitOnIdle = false;
}
// submit within next 10 drawcalls
void VulkanRenderer::RequestSubmitSoon()
{
m_submitThreshold = std::min(m_submitThreshold, m_recordedDrawcalls + 10);
}
// command buffer will be submitted when GPU has no more commands to process or when threshold is reached
void VulkanRenderer::RequestSubmitOnIdle()
{
m_submitOnIdle = true;
}
uint64 VulkanRenderer::GetCurrentCommandBufferId() const
{
return m_numSubmittedCmdBuffers;
}
bool VulkanRenderer::HasCommandBufferFinished(uint64 commandBufferId) const
{
return m_countCommandBufferFinished > commandBufferId;
}
void VulkanRenderer::WaitCommandBufferFinished(uint64 commandBufferId)
{
if (commandBufferId == m_numSubmittedCmdBuffers)
SubmitCommandBuffer();
while (HasCommandBufferFinished(commandBufferId) == false)
WaitForNextFinishedCommandBuffer();
}
void VulkanRenderer::PipelineCacheSaveThread(size_t cache_size)
{
SetThreadName("vkDriverPlCache");
const auto dir = ActiveSettings::GetCachePath("shaderCache/driver/vk");
if (!fs::exists(dir))
{
try
{
fs::create_directories(dir);
}
catch (const std::exception& ex)
{
cemuLog_log(LogType::Force, "can't create vulkan pipeline cache directory \"{}\": {}", _pathToUtf8(dir), ex.what());
return;
}
}
const auto filename = dir / fmt::format(L"{:016x}.bin", CafeSystem::GetForegroundTitleId());
while (true)
{
if (m_destructionRequested)
return;
m_pipeline_cache_semaphore.wait();
if (m_destructionRequested)
return;
for (sint32 i = 0; i < 15 * 4; i++)
{
if (m_destructionRequested)
return;
std::this_thread::sleep_for(std::chrono::milliseconds(250));
}
// always prioritize the compiler threads over this thread
// avoid calling stalling lock() since it will block other threads from entering even when the lock is currently held in shared mode
while (!m_pipeline_cache_save_mutex.try_lock())
std::this_thread::sleep_for(std::chrono::milliseconds(250));
size_t size = 0;
VkResult res = vkGetPipelineCacheData(m_logicalDevice, m_pipeline_cache, &size, nullptr);
if (res == VK_SUCCESS && size > 0 && size != cache_size)
{
std::vector<uint8_t> cacheData(size);
res = vkGetPipelineCacheData(m_logicalDevice, m_pipeline_cache, &size, cacheData.data());
m_pipeline_cache_semaphore.reset();
m_pipeline_cache_save_mutex.unlock();
if (res == VK_SUCCESS)
{
auto file = std::ofstream(filename, std::ios::out | std::ios::binary);
if (file.is_open())
{
file.write((char*)cacheData.data(), cacheData.size());
file.close();
cache_size = size;
cemuLog_logDebug(LogType::Force, "pipeline cache saved");
}
else
{
cemuLog_log(LogType::Force, "can't write pipeline cache to disk");
}
}
else
{
cemuLog_log(LogType::Force, "can't retrieve pipeline cache data: 0x{:x}", res);
}
}
else
{
m_pipeline_cache_semaphore.reset();
m_pipeline_cache_save_mutex.unlock();
}
}
}
void VulkanRenderer::CreatePipelineCache()
{
std::vector<uint8_t> cacheData;
const auto dir = ActiveSettings::GetCachePath("shaderCache/driver/vk");
if (fs::exists(dir))
{
const auto filename = dir / fmt::format("{:016x}.bin", CafeSystem::GetForegroundTitleId());
auto file = std::ifstream(filename, std::ios::in | std::ios::binary | std::ios::ate);
if (file.is_open())
{
const size_t fileSize = file.tellg();
file.seekg(0, std::ifstream::beg);
cacheData.resize(fileSize);
file.read((char*)cacheData.data(), cacheData.size());
file.close();
}
}
VkPipelineCacheCreateInfo createInfo{};
createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
createInfo.initialDataSize = cacheData.size();
createInfo.pInitialData = cacheData.data();
VkResult result = vkCreatePipelineCache(m_logicalDevice, &createInfo, nullptr, &m_pipeline_cache);
if (result != VK_SUCCESS)
{
cemuLog_log(LogType::Force, "Failed to open Vulkan pipeline cache: {}", result);
// unable to load the existing cache, start with an empty cache instead
createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
createInfo.initialDataSize = 0;
createInfo.pInitialData = nullptr;
result = vkCreatePipelineCache(m_logicalDevice, &createInfo, nullptr, &m_pipeline_cache);
if (result != VK_SUCCESS)
UnrecoverableError(fmt::format("Failed to create new Vulkan pipeline cache: {}", result).c_str());
}
size_t cache_size = 0;
vkGetPipelineCacheData(m_logicalDevice, m_pipeline_cache, &cache_size, nullptr);
m_pipeline_cache_save_thread = std::thread(&VulkanRenderer::PipelineCacheSaveThread, this, cache_size);
}
void VulkanRenderer::swapchain_createDescriptorSetLayout()
{
VkDescriptorSetLayoutBinding samplerLayoutBinding = {};
samplerLayoutBinding.binding = 0;
samplerLayoutBinding.descriptorCount = 1;
samplerLayoutBinding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
samplerLayoutBinding.pImmutableSamplers = nullptr;
samplerLayoutBinding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
VkDescriptorSetLayoutBinding bindings[] = { samplerLayoutBinding };
VkDescriptorSetLayoutCreateInfo layoutInfo = {};
layoutInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
layoutInfo.bindingCount = std::size(bindings);
layoutInfo.pBindings = bindings;
if (vkCreateDescriptorSetLayout(m_logicalDevice, &layoutInfo, nullptr, &m_swapchainDescriptorSetLayout) != VK_SUCCESS)
UnrecoverableError("failed to create descriptor set layout for swapchain");
}
void VulkanRenderer::GetTextureFormatInfoVK(Latte::E_GX2SURFFMT format, bool isDepth, Latte::E_DIM dim, sint32 width, sint32 height, FormatInfoVK* formatInfoOut)
{
formatInfoOut->texelCountX = width;
formatInfoOut->texelCountY = height;
formatInfoOut->isCompressed = false;
if (isDepth)
{
switch (format)
{
case Latte::E_GX2SURFFMT::D24_S8_UNORM:
if (m_supportedFormatInfo.fmt_d24_unorm_s8_uint == false)
{
formatInfoOut->vkImageFormat = VK_FORMAT_D32_SFLOAT_S8_UINT;
formatInfoOut->vkImageAspect = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
formatInfoOut->decoder = TextureDecoder_NullData64::getInstance();
}
else
{
formatInfoOut->vkImageFormat = VK_FORMAT_D24_UNORM_S8_UINT;
formatInfoOut->vkImageAspect = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
formatInfoOut->decoder = TextureDecoder_D24_S8::getInstance();
}
break;
case Latte::E_GX2SURFFMT::D24_S8_FLOAT:
// alternative format
formatInfoOut->vkImageFormat = VK_FORMAT_D32_SFLOAT_S8_UINT;
formatInfoOut->vkImageAspect = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
formatInfoOut->decoder = TextureDecoder_NullData64::getInstance();
break;
case Latte::E_GX2SURFFMT::D32_FLOAT:
formatInfoOut->vkImageFormat = VK_FORMAT_D32_SFLOAT;
formatInfoOut->vkImageAspect = VK_IMAGE_ASPECT_DEPTH_BIT;
formatInfoOut->decoder = TextureDecoder_R32_FLOAT::getInstance();
break;
case Latte::E_GX2SURFFMT::D16_UNORM:
formatInfoOut->vkImageFormat = VK_FORMAT_D16_UNORM;
formatInfoOut->vkImageAspect = VK_IMAGE_ASPECT_DEPTH_BIT;
formatInfoOut->decoder = TextureDecoder_R16_UNORM::getInstance();
break;
case Latte::E_GX2SURFFMT::D32_S8_FLOAT:
formatInfoOut->vkImageFormat = VK_FORMAT_D32_SFLOAT_S8_UINT;
formatInfoOut->vkImageAspect = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
formatInfoOut->decoder = TextureDecoder_D32_S8_UINT_X24::getInstance();
break;
default:
cemuLog_log(LogType::Force, "Unsupported depth texture format {:04x}", (uint32)format);
// default to placeholder format
formatInfoOut->vkImageFormat = VK_FORMAT_D16_UNORM;
formatInfoOut->vkImageAspect = VK_IMAGE_ASPECT_DEPTH_BIT;
formatInfoOut->decoder = nullptr;
break;
}
}
else
{
formatInfoOut->vkImageAspect = VK_IMAGE_ASPECT_COLOR_BIT;
if(format == (Latte::E_GX2SURFFMT::R16_G16_B16_A16_FLOAT | Latte::E_GX2SURFFMT::FMT_BIT_SRGB)) // Seen in Sonic Transformed level Starry Speedway. SRGB should just be ignored for native float formats?
format = Latte::E_GX2SURFFMT::R16_G16_B16_A16_FLOAT;
switch (format)
{
// RGBA formats
case Latte::E_GX2SURFFMT::R32_G32_B32_A32_FLOAT:
formatInfoOut->vkImageFormat = VK_FORMAT_R32G32B32A32_SFLOAT;
formatInfoOut->decoder = TextureDecoder_R32_G32_B32_A32_FLOAT::getInstance();
break;
case Latte::E_GX2SURFFMT::R32_G32_B32_A32_UINT:
formatInfoOut->vkImageFormat = VK_FORMAT_R32G32B32A32_UINT;
formatInfoOut->decoder = TextureDecoder_R32_G32_B32_A32_UINT::getInstance();
break;
case Latte::E_GX2SURFFMT::R16_G16_B16_A16_FLOAT:
formatInfoOut->vkImageFormat = VK_FORMAT_R16G16B16A16_SFLOAT;
formatInfoOut->decoder = TextureDecoder_R16_G16_B16_A16_FLOAT::getInstance();
break;
case Latte::E_GX2SURFFMT::R16_G16_B16_A16_UINT:
formatInfoOut->vkImageFormat = VK_FORMAT_R16G16B16A16_UINT;
formatInfoOut->decoder = TextureDecoder_R16_G16_B16_A16_UINT::getInstance();
break;
case Latte::E_GX2SURFFMT::R16_G16_B16_A16_UNORM:
formatInfoOut->vkImageFormat = VK_FORMAT_R16G16B16A16_UNORM;
formatInfoOut->decoder = TextureDecoder_R16_G16_B16_A16::getInstance();
break;
case Latte::E_GX2SURFFMT::R16_G16_B16_A16_SNORM:
formatInfoOut->vkImageFormat = VK_FORMAT_R16G16B16A16_SNORM;
formatInfoOut->decoder = TextureDecoder_R16_G16_B16_A16::getInstance();
break;
case Latte::E_GX2SURFFMT::R8_G8_B8_A8_UNORM:
formatInfoOut->vkImageFormat = VK_FORMAT_R8G8B8A8_UNORM;
formatInfoOut->decoder = TextureDecoder_R8_G8_B8_A8::getInstance();
break;
case Latte::E_GX2SURFFMT::R8_G8_B8_A8_SNORM:
formatInfoOut->vkImageFormat = VK_FORMAT_R8G8B8A8_SNORM;
formatInfoOut->decoder = TextureDecoder_R8_G8_B8_A8::getInstance();
break;
case Latte::E_GX2SURFFMT::R8_G8_B8_A8_SRGB:
formatInfoOut->vkImageFormat = VK_FORMAT_R8G8B8A8_SRGB;
formatInfoOut->decoder = TextureDecoder_R8_G8_B8_A8::getInstance();
break;
case Latte::E_GX2SURFFMT::R8_G8_B8_A8_UINT:
formatInfoOut->vkImageFormat = VK_FORMAT_R8G8B8A8_UINT;
formatInfoOut->decoder = TextureDecoder_R8_G8_B8_A8::getInstance();
break;
case Latte::E_GX2SURFFMT::R8_G8_B8_A8_SINT:
formatInfoOut->vkImageFormat = VK_FORMAT_R8G8B8A8_SINT;
formatInfoOut->decoder = TextureDecoder_R8_G8_B8_A8::getInstance();
break;
// RG formats
case Latte::E_GX2SURFFMT::R32_G32_FLOAT:
formatInfoOut->vkImageFormat = VK_FORMAT_R32G32_SFLOAT;
formatInfoOut->decoder = TextureDecoder_R32_G32_FLOAT::getInstance();
break;
case Latte::E_GX2SURFFMT::R32_G32_UINT:
formatInfoOut->vkImageFormat = VK_FORMAT_R32G32_UINT;
formatInfoOut->decoder = TextureDecoder_R32_G32_UINT::getInstance();
break;
case Latte::E_GX2SURFFMT::R16_G16_UNORM:
formatInfoOut->vkImageFormat = VK_FORMAT_R16G16_UNORM;
formatInfoOut->decoder = TextureDecoder_R16_G16::getInstance();
break;
case Latte::E_GX2SURFFMT::R16_G16_FLOAT:
formatInfoOut->vkImageFormat = VK_FORMAT_R16G16_SFLOAT;
formatInfoOut->decoder = TextureDecoder_R16_G16_FLOAT::getInstance();
break;
case Latte::E_GX2SURFFMT::R8_G8_UNORM:
formatInfoOut->vkImageFormat = VK_FORMAT_R8G8_UNORM;
formatInfoOut->decoder = TextureDecoder_R8_G8::getInstance();
break;
case Latte::E_GX2SURFFMT::R8_G8_SNORM:
formatInfoOut->vkImageFormat = VK_FORMAT_R8G8_SNORM;
formatInfoOut->decoder = TextureDecoder_R8_G8::getInstance();
break;
case Latte::E_GX2SURFFMT::R4_G4_UNORM:
if (m_supportedFormatInfo.fmt_r4g4_unorm_pack == false)
{
if (m_supportedFormatInfo.fmt_r4g4b4a4_unorm_pack == false) {
formatInfoOut->vkImageFormat = VK_FORMAT_R8G8B8A8_UNORM;
formatInfoOut->decoder = TextureDecoder_R4G4_UNORM_To_RGBA8::getInstance();
}
else {
formatInfoOut->vkImageFormat = VK_FORMAT_R4G4B4A4_UNORM_PACK16;
formatInfoOut->decoder = TextureDecoder_R4_G4_UNORM_To_RGBA4_vk::getInstance();
}
}
else
{
formatInfoOut->vkImageFormat = VK_FORMAT_R4G4_UNORM_PACK8;
formatInfoOut->decoder = TextureDecoder_R4_G4::getInstance();
}
break;
// R formats
case Latte::E_GX2SURFFMT::R32_FLOAT:
formatInfoOut->vkImageFormat = VK_FORMAT_R32_SFLOAT;
formatInfoOut->decoder = TextureDecoder_R32_FLOAT::getInstance();
break;
case Latte::E_GX2SURFFMT::R32_UINT:
formatInfoOut->vkImageFormat = VK_FORMAT_R32_UINT;
formatInfoOut->decoder = TextureDecoder_R32_UINT::getInstance();
break;
case Latte::E_GX2SURFFMT::R16_FLOAT:
formatInfoOut->vkImageFormat = VK_FORMAT_R16_SFLOAT;
formatInfoOut->decoder = TextureDecoder_R16_FLOAT::getInstance();
break;
case Latte::E_GX2SURFFMT::R16_UNORM:
formatInfoOut->vkImageFormat = VK_FORMAT_R16_UNORM;
formatInfoOut->decoder = TextureDecoder_R16_UNORM::getInstance();
break;
case Latte::E_GX2SURFFMT::R16_SNORM:
formatInfoOut->vkImageFormat = VK_FORMAT_R16_SNORM;
formatInfoOut->decoder = TextureDecoder_R16_SNORM::getInstance();
break;
case Latte::E_GX2SURFFMT::R16_UINT:
formatInfoOut->vkImageFormat = VK_FORMAT_R16_UINT;
formatInfoOut->decoder = TextureDecoder_R16_UINT::getInstance();
break;
case Latte::E_GX2SURFFMT::R8_UNORM:
formatInfoOut->vkImageFormat = VK_FORMAT_R8_UNORM;
formatInfoOut->decoder = TextureDecoder_R8::getInstance();
break;
case Latte::E_GX2SURFFMT::R8_SNORM:
formatInfoOut->vkImageFormat = VK_FORMAT_R8_SNORM;
formatInfoOut->decoder = TextureDecoder_R8::getInstance();
break;
case Latte::E_GX2SURFFMT::R8_UINT:
formatInfoOut->vkImageFormat = VK_FORMAT_R8_UINT;
formatInfoOut->decoder = TextureDecoder_R8_UINT::getInstance();
break;
// special formats
case Latte::E_GX2SURFFMT::R5_G6_B5_UNORM:
if (m_supportedFormatInfo.fmt_r5g6b5_unorm_pack == false) {
formatInfoOut->vkImageFormat = VK_FORMAT_R8G8B8A8_UNORM;
formatInfoOut->decoder = TextureDecoder_R5G6B5_UNORM_To_RGBA8::getInstance();
}
else {
// Vulkan has R in MSB, GPU7 has it in LSB
formatInfoOut->vkImageFormat = VK_FORMAT_R5G6B5_UNORM_PACK16;
formatInfoOut->decoder = TextureDecoder_R5_G6_B5_swappedRB::getInstance();
}
break;
case Latte::E_GX2SURFFMT::R5_G5_B5_A1_UNORM:
if (m_supportedFormatInfo.fmt_a1r5g5b5_unorm_pack == false) {
formatInfoOut->vkImageFormat = VK_FORMAT_R8G8B8A8_UNORM;
formatInfoOut->decoder = TextureDecoder_R5_G5_B5_A1_UNORM_swappedRB_To_RGBA8::getInstance();
}
else {
// used in Super Mario 3D World for the hidden Luigi sprites
// since order of channels is reversed in Vulkan compared to GX2 the format we need is A1B5G5R5
formatInfoOut->vkImageFormat = VK_FORMAT_A1R5G5B5_UNORM_PACK16;
formatInfoOut->decoder = TextureDecoder_R5_G5_B5_A1_UNORM_swappedRB::getInstance();
}
break;
case Latte::E_GX2SURFFMT::A1_B5_G5_R5_UNORM:
if (m_supportedFormatInfo.fmt_a1r5g5b5_unorm_pack == false) {
formatInfoOut->vkImageFormat = VK_FORMAT_R8G8B8A8_UNORM;
formatInfoOut->decoder = TextureDecoder_A1_B5_G5_R5_UNORM_vulkan_To_RGBA8::getInstance();
}
else {
// used by VC64 (e.g. Ocarina of Time)
formatInfoOut->vkImageFormat = VK_FORMAT_A1R5G5B5_UNORM_PACK16; // A 15 R 10..14, G 5..9 B 0..4
formatInfoOut->decoder = TextureDecoder_A1_B5_G5_R5_UNORM_vulkan::getInstance();
}
break;
case Latte::E_GX2SURFFMT::R11_G11_B10_FLOAT:
formatInfoOut->vkImageFormat = VK_FORMAT_B10G11R11_UFLOAT_PACK32; // verify if order of channels is still the same as GX2
formatInfoOut->decoder = TextureDecoder_R11_G11_B10_FLOAT::getInstance();
break;
case Latte::E_GX2SURFFMT::R4_G4_B4_A4_UNORM:
if (m_supportedFormatInfo.fmt_r4g4b4a4_unorm_pack == false) {
formatInfoOut->vkImageFormat = VK_FORMAT_R8G8B8A8_UNORM;
formatInfoOut->decoder = TextureDecoder_R4G4B4A4_UNORM_To_RGBA8::getInstance();
}
else {
formatInfoOut->vkImageFormat = VK_FORMAT_R4G4B4A4_UNORM_PACK16;
formatInfoOut->decoder = TextureDecoder_R4_G4_B4_A4_UNORM::getInstance();
}
break;
// special formats - R10G10B10_A2
case Latte::E_GX2SURFFMT::R10_G10_B10_A2_UNORM:
formatInfoOut->vkImageFormat = VK_FORMAT_A2B10G10R10_UNORM_PACK32; // todo - verify
formatInfoOut->decoder = TextureDecoder_R10_G10_B10_A2_UNORM::getInstance();
break;
case Latte::E_GX2SURFFMT::R10_G10_B10_A2_SNORM:
formatInfoOut->vkImageFormat = VK_FORMAT_R16G16B16A16_SNORM; // Vulkan has VK_FORMAT_A2R10G10B10_SNORM_PACK32 but it doesnt work?
formatInfoOut->decoder = TextureDecoder_R10_G10_B10_A2_SNORM_To_RGBA16::getInstance();
break;
case Latte::E_GX2SURFFMT::R10_G10_B10_A2_SRGB:
//formatInfoOut->vkImageFormat = VK_FORMAT_R16G16B16A16_SNORM; // Vulkan has no uncompressed SRGB format with more than 8 bits per channel
//formatInfoOut->decoder = TextureDecoder_R10_G10_B10_A2_SNORM_To_RGBA16::getInstance();
//break;
formatInfoOut->vkImageFormat = VK_FORMAT_A2B10G10R10_UNORM_PACK32; // todo - verify
formatInfoOut->decoder = TextureDecoder_R10_G10_B10_A2_UNORM::getInstance();
break;
// compressed formats
case Latte::E_GX2SURFFMT::BC1_SRGB:
formatInfoOut->vkImageFormat = VK_FORMAT_BC1_RGBA_SRGB_BLOCK; // todo - verify
formatInfoOut->decoder = TextureDecoder_BC1::getInstance();
break;
case Latte::E_GX2SURFFMT::BC1_UNORM:
formatInfoOut->vkImageFormat = VK_FORMAT_BC1_RGBA_UNORM_BLOCK; // todo - verify
formatInfoOut->decoder = TextureDecoder_BC1::getInstance();
break;
case Latte::E_GX2SURFFMT::BC2_UNORM:
formatInfoOut->vkImageFormat = VK_FORMAT_BC2_UNORM_BLOCK; // todo - verify
formatInfoOut->decoder = TextureDecoder_BC2::getInstance();
break;
case Latte::E_GX2SURFFMT::BC2_SRGB:
formatInfoOut->vkImageFormat = VK_FORMAT_BC2_SRGB_BLOCK; // todo - verify
formatInfoOut->decoder = TextureDecoder_BC2::getInstance();
break;
case Latte::E_GX2SURFFMT::BC3_UNORM:
formatInfoOut->vkImageFormat = VK_FORMAT_BC3_UNORM_BLOCK;
formatInfoOut->decoder = TextureDecoder_BC3::getInstance();
break;
case Latte::E_GX2SURFFMT::BC3_SRGB:
formatInfoOut->vkImageFormat = VK_FORMAT_BC3_SRGB_BLOCK;
formatInfoOut->decoder = TextureDecoder_BC3::getInstance();
break;
case Latte::E_GX2SURFFMT::BC4_UNORM:
formatInfoOut->vkImageFormat = VK_FORMAT_BC4_UNORM_BLOCK;
formatInfoOut->decoder = TextureDecoder_BC4::getInstance();
break;
case Latte::E_GX2SURFFMT::BC4_SNORM:
formatInfoOut->vkImageFormat = VK_FORMAT_BC4_SNORM_BLOCK;
formatInfoOut->decoder = TextureDecoder_BC4::getInstance();
break;
case Latte::E_GX2SURFFMT::BC5_UNORM:
formatInfoOut->vkImageFormat = VK_FORMAT_BC5_UNORM_BLOCK;
formatInfoOut->decoder = TextureDecoder_BC5::getInstance();
break;
case Latte::E_GX2SURFFMT::BC5_SNORM:
formatInfoOut->vkImageFormat = VK_FORMAT_BC5_SNORM_BLOCK;
formatInfoOut->decoder = TextureDecoder_BC5::getInstance();
break;
case Latte::E_GX2SURFFMT::R24_X8_UNORM:
formatInfoOut->vkImageFormat = VK_FORMAT_R32_SFLOAT;
formatInfoOut->decoder = TextureDecoder_R24_X8::getInstance();
break;
case Latte::E_GX2SURFFMT::X24_G8_UINT:
// used by Color Splash and Resident Evil
formatInfoOut->vkImageFormat = VK_FORMAT_R8G8B8A8_UINT; // todo - should we use ABGR format?
formatInfoOut->decoder = TextureDecoder_X24_G8_UINT::getInstance(); // todo - verify
case Latte::E_GX2SURFFMT::R32_X8_FLOAT:
// seen in Disney Infinity 3.0
formatInfoOut->vkImageFormat = VK_FORMAT_R32_SFLOAT;
formatInfoOut->decoder = TextureDecoder_NullData64::getInstance();
break;
default:
cemuLog_log(LogType::Force, "Unsupported color texture format {:04x}", (uint32)format);
cemu_assert_debug(false);
}
}
}
VkPipelineShaderStageCreateInfo VulkanRenderer::CreatePipelineShaderStageCreateInfo(VkShaderStageFlagBits stage, VkShaderModule& module, const char* entryName) const
{
VkPipelineShaderStageCreateInfo shaderStageInfo{};
shaderStageInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shaderStageInfo.stage = stage;
shaderStageInfo.module = module;
shaderStageInfo.pName = entryName;
return shaderStageInfo;
}
VkPipeline VulkanRenderer::backbufferBlit_createGraphicsPipeline(VkDescriptorSetLayout descriptorLayout, bool padView, RendererOutputShader* shader)
{
auto& chainInfo = GetChainInfo(!padView);
RendererShaderVk* vertexRendererShader = static_cast<RendererShaderVk*>(shader->GetVertexShader());
RendererShaderVk* fragmentRendererShader = static_cast<RendererShaderVk*>(shader->GetFragmentShader());
uint64 hash = 0;
hash += (uint64)vertexRendererShader;
hash += (uint64)fragmentRendererShader;
hash += (uint64)(chainInfo.m_usesSRGB);
hash += ((uint64)padView) << 1;
static std::unordered_map<uint64, VkPipeline> s_pipeline_cache;
const auto it = s_pipeline_cache.find(hash);
if (it != s_pipeline_cache.cend())
return it->second;
std::vector<VkPipelineShaderStageCreateInfo> shaderStages;
if (vertexRendererShader)
shaderStages.emplace_back(CreatePipelineShaderStageCreateInfo(VK_SHADER_STAGE_VERTEX_BIT, vertexRendererShader->GetShaderModule(), "main"));
if (fragmentRendererShader)
shaderStages.emplace_back(CreatePipelineShaderStageCreateInfo(VK_SHADER_STAGE_FRAGMENT_BIT, fragmentRendererShader->GetShaderModule(), "main"));
VkPipelineVertexInputStateCreateInfo vertexInputInfo{};
vertexInputInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
vertexInputInfo.vertexBindingDescriptionCount = 0;
vertexInputInfo.vertexAttributeDescriptionCount = 0;
VkPipelineInputAssemblyStateCreateInfo inputAssembly{};
inputAssembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
inputAssembly.primitiveRestartEnable = VK_FALSE;
VkPipelineViewportStateCreateInfo viewportState{};
viewportState.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
viewportState.viewportCount = 1;
viewportState.scissorCount = 1;
VkDynamicState dynamicStates[] = { VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR };
VkPipelineDynamicStateCreateInfo dynamicState = {};
dynamicState.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dynamicState.dynamicStateCount = std::size(dynamicStates);
dynamicState.pDynamicStates = dynamicStates;
VkPipelineRasterizationStateCreateInfo rasterizer{};
rasterizer.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterizer.depthClampEnable = VK_FALSE;
rasterizer.rasterizerDiscardEnable = VK_FALSE;
rasterizer.polygonMode = VK_POLYGON_MODE_FILL;
rasterizer.lineWidth = 1.0f;
rasterizer.cullMode = VK_CULL_MODE_BACK_BIT;
rasterizer.frontFace = VK_FRONT_FACE_CLOCKWISE;
rasterizer.depthBiasEnable = VK_FALSE;
VkPipelineMultisampleStateCreateInfo multisampling{};
multisampling.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
multisampling.sampleShadingEnable = VK_FALSE;
multisampling.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
VkPipelineColorBlendAttachmentState colorBlendAttachment{};
colorBlendAttachment.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
colorBlendAttachment.blendEnable = VK_FALSE;
VkPipelineColorBlendStateCreateInfo colorBlending{};
colorBlending.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
colorBlending.logicOpEnable = VK_FALSE;
colorBlending.logicOp = VK_LOGIC_OP_COPY;
colorBlending.attachmentCount = 1;
colorBlending.pAttachments = &colorBlendAttachment;
colorBlending.blendConstants[0] = 0.0f;
colorBlending.blendConstants[1] = 0.0f;
colorBlending.blendConstants[2] = 0.0f;
colorBlending.blendConstants[3] = 0.0f;
VkPipelineLayoutCreateInfo pipelineLayoutInfo{};
pipelineLayoutInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipelineLayoutInfo.setLayoutCount = 1;
pipelineLayoutInfo.pSetLayouts = &descriptorLayout;
VkResult result = vkCreatePipelineLayout(m_logicalDevice, &pipelineLayoutInfo, nullptr, &m_pipelineLayout);
if (result != VK_SUCCESS)
throw std::runtime_error(fmt::format("Failed to create pipeline layout: {}", result));
VkGraphicsPipelineCreateInfo pipelineInfo = {};
pipelineInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
pipelineInfo.stageCount = shaderStages.size();
pipelineInfo.pStages = shaderStages.data();
pipelineInfo.pVertexInputState = &vertexInputInfo;
pipelineInfo.pInputAssemblyState = &inputAssembly;
pipelineInfo.pViewportState = &viewportState;
pipelineInfo.pDynamicState = &dynamicState;
pipelineInfo.pRasterizationState = &rasterizer;
pipelineInfo.pMultisampleState = &multisampling;
pipelineInfo.pColorBlendState = &colorBlending;
pipelineInfo.layout = m_pipelineLayout;
pipelineInfo.renderPass = chainInfo.m_swapchainRenderPass;
pipelineInfo.subpass = 0;
pipelineInfo.basePipelineHandle = VK_NULL_HANDLE;
VkPipeline pipeline = nullptr;
std::shared_lock lock(m_pipeline_cache_save_mutex);
result = vkCreateGraphicsPipelines(m_logicalDevice, m_pipeline_cache, 1, &pipelineInfo, nullptr, &pipeline);
if (result != VK_SUCCESS)
{
cemuLog_log(LogType::Force, "Failed to create graphics pipeline. Error {}", result);
throw std::runtime_error(fmt::format("Failed to create graphics pipeline: {}", result));
}
s_pipeline_cache[hash] = pipeline;
m_pipeline_cache_semaphore.notify();
return pipeline;
}
bool VulkanRenderer::AcquireNextSwapchainImage(bool mainWindow)
{
if(!IsSwapchainInfoValid(mainWindow))
return false;
if(!mainWindow && m_destroyPadSwapchainNextAcquire.test())
{
RecreateSwapchain(mainWindow, true);
m_destroyPadSwapchainNextAcquire.clear();
m_destroyPadSwapchainNextAcquire.notify_all();
return false;
}
auto& chainInfo = GetChainInfo(mainWindow);
if (chainInfo.swapchainImageIndex != -1)
return true; // image already reserved
if (!UpdateSwapchainProperties(mainWindow))
return false;
bool result = chainInfo.AcquireImage();
if (!result)
return false;
SubmitCommandBuffer(VK_NULL_HANDLE, chainInfo.ConsumeAcquireSemaphore());
return true;
}
void VulkanRenderer::RecreateSwapchain(bool mainWindow, bool skipCreate)
{
SubmitCommandBuffer();
WaitDeviceIdle();
auto& chainInfo = GetChainInfo(mainWindow);
Vector2i size;
if (mainWindow)
{
ImGui_ImplVulkan_Shutdown();
gui_getWindowPhysSize(size.x, size.y);
}
else
{
gui_getPadWindowPhysSize(size.x, size.y);
}
chainInfo.swapchainImageIndex = -1;
chainInfo.Cleanup();
chainInfo.m_desiredExtent = size;
if(!skipCreate)
{
chainInfo.Create();
}
if (mainWindow)
ImguiInit();
}
bool VulkanRenderer::UpdateSwapchainProperties(bool mainWindow)
{
auto& chainInfo = GetChainInfo(mainWindow);
bool stateChanged = chainInfo.m_shouldRecreate;
const auto configValue = (VSync)GetConfig().vsync.GetValue();
if(chainInfo.m_vsyncState != configValue)
stateChanged = true;
const bool latteBufferUsesSRGB = mainWindow ? LatteGPUState.tvBufferUsesSRGB : LatteGPUState.drcBufferUsesSRGB;
if (chainInfo.m_usesSRGB != latteBufferUsesSRGB)
stateChanged = true;
int width, height;
if (mainWindow)
gui_getWindowPhysSize(width, height);
else
gui_getPadWindowPhysSize(width, height);
auto extent = chainInfo.getExtent();
if (width != extent.width || height != extent.height)
stateChanged = true;
if(stateChanged)
{
try
{
RecreateSwapchain(mainWindow);
}
catch (std::exception&)
{
cemu_assert_debug(false);
return false;
}
}
chainInfo.m_shouldRecreate = false;
chainInfo.m_vsyncState = configValue;
chainInfo.m_usesSRGB = latteBufferUsesSRGB;
return true;
}
void VulkanRenderer::SwapBuffer(bool mainWindow)
{
if(!AcquireNextSwapchainImage(mainWindow))
return;
auto& chainInfo = GetChainInfo(mainWindow);
if (!chainInfo.hasDefinedSwapchainImage)
{
// set the swapchain image to a defined state
VkClearColorValue clearColor{ 0, 0, 0, 0 };
ClearColorImageRaw(chainInfo.m_swapchainImages[chainInfo.swapchainImageIndex], 0, 0, clearColor, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_PRESENT_SRC_KHR);
}
const size_t currentFrameCmdBufferID = GetCurrentCommandBufferId();
VkSemaphore presentSemaphore = chainInfo.m_presentSemaphores[chainInfo.swapchainImageIndex];
SubmitCommandBuffer(presentSemaphore); // submit all command and signal semaphore
cemu_assert_debug(m_numSubmittedCmdBuffers > 0);
// wait for the previous frame to finish rendering
WaitCommandBufferFinished(m_commandBufferIDOfPrevFrame);
m_commandBufferIDOfPrevFrame = currentFrameCmdBufferID;
chainInfo.WaitAvailableFence();
VkPresentIdKHR presentId = {};
VkPresentInfoKHR presentInfo = {};
presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
presentInfo.swapchainCount = 1;
presentInfo.pSwapchains = &chainInfo.m_swapchain;
presentInfo.pImageIndices = &chainInfo.swapchainImageIndex;
// wait on command buffer semaphore
presentInfo.waitSemaphoreCount = 1;
presentInfo.pWaitSemaphores = &presentSemaphore;
// if present_wait is available and enabled, add frame markers to present requests
// and limit the number of queued present operations
if (m_featureControl.deviceExtensions.present_wait && chainInfo.m_maxQueued > 0)
{
presentId.sType = VK_STRUCTURE_TYPE_PRESENT_ID_KHR;
presentId.swapchainCount = 1;
presentId.pPresentIds = &chainInfo.m_presentId;
presentInfo.pNext = &presentId;
if(chainInfo.m_queueDepth >= chainInfo.m_maxQueued)
{
uint64 waitFrameId = chainInfo.m_presentId - chainInfo.m_queueDepth;
vkWaitForPresentKHR(m_logicalDevice, chainInfo.m_swapchain, waitFrameId, 40'000'000);
chainInfo.m_queueDepth--;
}
}
VkResult result = vkQueuePresentKHR(m_presentQueue, &presentInfo);
if (result < 0 && result != VK_ERROR_OUT_OF_DATE_KHR)
{
throw std::runtime_error(fmt::format("Failed to present image: {}", result));
}
if(result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR)
chainInfo.m_shouldRecreate = true;
if(result >= 0)
{
chainInfo.m_queueDepth++;
chainInfo.m_presentId++;
}
chainInfo.hasDefinedSwapchainImage = false;
chainInfo.swapchainImageIndex = -1;
}
void VulkanRenderer::Flush(bool waitIdle)
{
if (m_recordedDrawcalls > 0 || m_submitOnIdle)
SubmitCommandBuffer();
if (waitIdle)
WaitCommandBufferFinished(GetCurrentCommandBufferId());
}
void VulkanRenderer::NotifyLatteCommandProcessorIdle()
{
if (m_submitOnIdle)
SubmitCommandBuffer();
}
void VulkanBenchmarkPrintResults();
void VulkanRenderer::SwapBuffers(bool swapTV, bool swapDRC)
{
SubmitCommandBuffer();
if (swapTV && IsSwapchainInfoValid(true))
SwapBuffer(true);
if (swapDRC && IsSwapchainInfoValid(false))
SwapBuffer(false);
if(swapTV)
VulkanBenchmarkPrintResults();
}
void VulkanRenderer::ClearColorbuffer(bool padView)
{
if (!IsSwapchainInfoValid(!padView))
return;
auto& chainInfo = GetChainInfo(!padView);
if (chainInfo.swapchainImageIndex == -1)
return;
VkClearColorValue clearColor{ 0, 0, 0, 0 };
ClearColorImageRaw(chainInfo.m_swapchainImages[chainInfo.swapchainImageIndex], 0, 0, clearColor, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL);
}
void VulkanRenderer::ClearColorImageRaw(VkImage image, uint32 sliceIndex, uint32 mipIndex, const VkClearColorValue& color, VkImageLayout inputLayout, VkImageLayout outputLayout)
{
draw_endRenderPass();
VkImageSubresourceRange subresourceRange{};
subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
subresourceRange.baseMipLevel = mipIndex;
subresourceRange.levelCount = 1;
subresourceRange.baseArrayLayer = sliceIndex;
subresourceRange.layerCount = 1;
barrier_image<SYNC_OP::ANY_TRANSFER | SYNC_OP::IMAGE_READ | SYNC_OP::IMAGE_WRITE, SYNC_OP::ANY_TRANSFER>(image, subresourceRange, inputLayout, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
vkCmdClearColorImage(m_state.currentCommandBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &color, 1, &subresourceRange);
barrier_image<ANY_TRANSFER, SYNC_OP::ANY_TRANSFER | SYNC_OP::IMAGE_READ | SYNC_OP::IMAGE_WRITE>(image, subresourceRange, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, outputLayout);
}
void VulkanRenderer::ClearColorImage(LatteTextureVk* vkTexture, uint32 sliceIndex, uint32 mipIndex, const VkClearColorValue& color, VkImageLayout outputLayout)
{
if(vkTexture->isDepth)
{
cemu_assert_suspicious();
return;
}
if (vkTexture->IsCompressedFormat())
{
// vkCmdClearColorImage cannot be called on compressed formats
// for now we ignore affected clears but still transition the image to the correct layout
auto imageObj = vkTexture->GetImageObj();
imageObj->flagForCurrentCommandBuffer();
VkImageSubresourceLayers subresourceRange{};
subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
subresourceRange.mipLevel = mipIndex;
subresourceRange.baseArrayLayer = sliceIndex;
subresourceRange.layerCount = 1;
barrier_image<ANY_TRANSFER | IMAGE_READ, ANY_TRANSFER | IMAGE_READ | IMAGE_WRITE>(vkTexture, subresourceRange, outputLayout);
if(color.float32[0] == 0.0f && color.float32[1] == 0.0f && color.float32[2] == 0.0f && color.float32[3] == 0.0f)
{
static bool dbgMsgPrinted = false;
if(!dbgMsgPrinted)
{
cemuLog_logDebug(LogType::Force, "Unsupported compressed texture clear to zero");
dbgMsgPrinted = true;
}
}
return;
}
VkImageSubresourceRange subresourceRange;
subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
subresourceRange.baseMipLevel = mipIndex;
subresourceRange.levelCount = 1;
subresourceRange.baseArrayLayer = sliceIndex;
subresourceRange.layerCount = 1;
auto imageObj = vkTexture->GetImageObj();
imageObj->flagForCurrentCommandBuffer();
VkImageLayout inputLayout = vkTexture->GetImageLayout(subresourceRange);
ClearColorImageRaw(imageObj->m_image, sliceIndex, mipIndex, color, inputLayout, outputLayout);
vkTexture->SetImageLayout(subresourceRange, outputLayout);
}
void VulkanRenderer::DrawBackbufferQuad(LatteTextureView* texView, RendererOutputShader* shader, bool useLinearTexFilter, sint32 imageX, sint32 imageY, sint32 imageWidth, sint32 imageHeight, bool padView, bool clearBackground)
{
if(!AcquireNextSwapchainImage(!padView))
return;
auto& chainInfo = GetChainInfo(!padView);
LatteTextureViewVk* texViewVk = (LatteTextureViewVk*)texView;
draw_endRenderPass();
if (clearBackground)
ClearColorbuffer(padView);
// barrier for input texture
VkMemoryBarrier memoryBarrier{};
memoryBarrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
VkPipelineStageFlags srcStage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT;
VkPipelineStageFlags dstStage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
memoryBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;
memoryBarrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT;
vkCmdPipelineBarrier(m_state.currentCommandBuffer, srcStage, dstStage, 0, 1, &memoryBarrier, 0, nullptr, 0, nullptr);
auto pipeline = backbufferBlit_createGraphicsPipeline(m_swapchainDescriptorSetLayout, padView, shader);
VkRenderPassBeginInfo renderPassInfo = {};
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
renderPassInfo.renderPass = chainInfo.m_swapchainRenderPass;
renderPassInfo.framebuffer = chainInfo.m_swapchainFramebuffers[chainInfo.swapchainImageIndex];
renderPassInfo.renderArea.offset = { 0, 0 };
renderPassInfo.renderArea.extent = chainInfo.getExtent();
renderPassInfo.clearValueCount = 0;
VkViewport viewport{};
viewport.x = imageX;
viewport.y = imageY;
viewport.width = imageWidth;
viewport.height = imageHeight;
viewport.minDepth = 0.0f;
viewport.maxDepth = 1.0f;
vkCmdSetViewport(m_state.currentCommandBuffer, 0, 1, &viewport);
VkRect2D scissor{};
scissor.extent = chainInfo.getExtent();
vkCmdSetScissor(m_state.currentCommandBuffer, 0, 1, &scissor);
auto descriptSet = backbufferBlit_createDescriptorSet(m_swapchainDescriptorSetLayout, texViewVk, useLinearTexFilter);
vkCmdBeginRenderPass(m_state.currentCommandBuffer, &renderPassInfo, VK_SUBPASS_CONTENTS_INLINE);
vkCmdBindPipeline(m_state.currentCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
m_state.currentPipeline = pipeline;
vkCmdBindDescriptorSets(m_state.currentCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, m_pipelineLayout, 0, 1, &descriptSet, 0, nullptr);
vkCmdDraw(m_state.currentCommandBuffer, 6, 1, 0, 0);
vkCmdEndRenderPass(m_state.currentCommandBuffer);
// restore viewport
vkCmdSetViewport(m_state.currentCommandBuffer, 0, 1, &m_state.currentViewport);
// mark current swapchain image as well defined
chainInfo.hasDefinedSwapchainImage = true;
}
void VulkanRenderer::CreateDescriptorPool()
{
std::array<VkDescriptorPoolSize, 4> poolSizes = {};
poolSizes[0].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
poolSizes[0].descriptorCount = 1024 * 128;
poolSizes[1].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
poolSizes[1].descriptorCount = 1024 * 1;
poolSizes[2].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
poolSizes[2].descriptorCount = 1024 * 128;
poolSizes[3].type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
poolSizes[3].descriptorCount = 1024 * 4;
VkDescriptorPoolCreateInfo poolInfo = {};
poolInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
poolInfo.poolSizeCount = poolSizes.size();
poolInfo.pPoolSizes = poolSizes.data();
poolInfo.maxSets = 1024 * 256;
poolInfo.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
if (vkCreateDescriptorPool(m_logicalDevice, &poolInfo, nullptr, &m_descriptorPool) != VK_SUCCESS)
UnrecoverableError("Failed to create descriptor pool!");
}
VkDescriptorSet VulkanRenderer::backbufferBlit_createDescriptorSet(VkDescriptorSetLayout descriptor_set_layout, LatteTextureViewVk* texViewVk, bool useLinearTexFilter)
{
uint64 hash = 0;
hash += (uint64)texViewVk->GetViewRGBA();
hash += (uint64)texViewVk->GetDefaultTextureSampler(useLinearTexFilter);
static std::unordered_map<uint64, VkDescriptorSet> s_set_cache;
const auto it = s_set_cache.find(hash);
if (it != s_set_cache.cend())
return it->second;
VkDescriptorSetAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
allocInfo.descriptorPool = m_descriptorPool;
allocInfo.descriptorSetCount = 1;
allocInfo.pSetLayouts = &descriptor_set_layout;
VkDescriptorSet result;
if (vkAllocateDescriptorSets(m_logicalDevice, &allocInfo, &result) != VK_SUCCESS)
UnrecoverableError("Failed to allocate descriptor sets for backbuffer blit");
performanceMonitor.vk.numDescriptorSets.increment();
VkDescriptorImageInfo imageInfo = {};
imageInfo.imageLayout = VK_IMAGE_LAYOUT_GENERAL;
imageInfo.imageView = texViewVk->GetViewRGBA()->m_textureImageView;
imageInfo.sampler = texViewVk->GetDefaultTextureSampler(useLinearTexFilter);
VkWriteDescriptorSet descriptorWrites = {};
descriptorWrites.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptorWrites.dstSet = result;
descriptorWrites.dstBinding = 0;
descriptorWrites.dstArrayElement = 0;
descriptorWrites.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
descriptorWrites.descriptorCount = 1;
descriptorWrites.pImageInfo = &imageInfo;
vkUpdateDescriptorSets(m_logicalDevice, 1, &descriptorWrites, 0, nullptr);
performanceMonitor.vk.numDescriptorSamplerTextures.increment();
s_set_cache[hash] = result;
return result;
}
void VulkanRenderer::renderTarget_setViewport(float x, float y, float width, float height, float nearZ, float farZ, bool halfZ)
{
// the Vulkan renderer handles halfZ in the vertex shader
float vpNewX = x;
float vpNewY = y + height;
float vpNewWidth = width;
float vpNewHeight = -height;
if (m_state.currentViewport.x == vpNewX && m_state.currentViewport.y == vpNewY && m_state.currentViewport.width == vpNewWidth && m_state.currentViewport.height == vpNewHeight && m_state.currentViewport.minDepth == nearZ && m_state.currentViewport.maxDepth == farZ)
return; // viewport did not change
m_state.currentViewport.x = vpNewX;
m_state.currentViewport.y = vpNewY;
m_state.currentViewport.width = vpNewWidth;
m_state.currentViewport.height = vpNewHeight;
m_state.currentViewport.minDepth = nearZ;
m_state.currentViewport.maxDepth = farZ;
vkCmdSetViewport(m_state.currentCommandBuffer, 0, 1, &m_state.currentViewport);
}
void VulkanRenderer::renderTarget_setScissor(sint32 scissorX, sint32 scissorY, sint32 scissorWidth, sint32 scissorHeight)
{
m_state.currentScissorRect.offset.x = scissorX;
m_state.currentScissorRect.offset.y = scissorY;
m_state.currentScissorRect.extent.width = scissorWidth;
m_state.currentScissorRect.extent.height = scissorHeight;
vkCmdSetScissor(m_state.currentCommandBuffer, 0, 1, &m_state.currentScissorRect);
}
LatteCachedFBO* VulkanRenderer::rendertarget_createCachedFBO(uint64 key)
{
return new CachedFBOVk(key, m_logicalDevice);
}
void VulkanRenderer::rendertarget_deleteCachedFBO(LatteCachedFBO* cfbo)
{
if (cfbo == m_state.activeFBO)
m_state.activeFBO = nullptr;
}
void VulkanRenderer::rendertarget_bindFramebufferObject(LatteCachedFBO* cfbo)
{
m_state.activeFBO = (CachedFBOVk*)cfbo;
}
void* VulkanRenderer::texture_acquireTextureUploadBuffer(uint32 size)
{
return memoryManager->TextureUploadBufferAcquire(size);
}
void VulkanRenderer::texture_releaseTextureUploadBuffer(uint8* mem)
{
memoryManager->TextureUploadBufferRelease(mem);
}
TextureDecoder* VulkanRenderer::texture_chooseDecodedFormat(Latte::E_GX2SURFFMT format, bool isDepth, Latte::E_DIM dim, uint32 width, uint32 height)
{
FormatInfoVK texFormatInfo{};
GetTextureFormatInfoVK(format, isDepth, dim, width, height, &texFormatInfo);
return texFormatInfo.decoder;
}
void VulkanRenderer::ReleaseDestructibleObject(VKRDestructibleObject* destructibleObject)
{
// destroy immediately if possible
if (destructibleObject->canDestroy())
{
delete destructibleObject;
return;
}
// otherwise put on queue
m_spinlockDestructionQueue.lock();
m_destructionQueue.emplace_back(destructibleObject);
m_spinlockDestructionQueue.unlock();
}
void VulkanRenderer::ProcessDestructionQueue()
{
m_spinlockDestructionQueue.lock();
for (auto it = m_destructionQueue.begin(); it != m_destructionQueue.end();)
{
if ((*it)->canDestroy())
{
delete (*it);
it = m_destructionQueue.erase(it);
continue;
}
++it;
}
m_spinlockDestructionQueue.unlock();
}
VkDescriptorSetInfo::~VkDescriptorSetInfo()
{
for (auto& it : list_referencedViews)
it->RemoveDescriptorSetReference(this);
// unregister
switch (shaderType)
{
case LatteConst::ShaderType::Vertex:
{
auto r = pipeline_info->vertex_ds_cache.erase(stateHash);
cemu_assert_debug(r == 1);
break;
}
case LatteConst::ShaderType::Pixel:
{
auto r = pipeline_info->pixel_ds_cache.erase(stateHash);
cemu_assert_debug(r == 1);
break;
}
case LatteConst::ShaderType::Geometry:
{
auto r = pipeline_info->geometry_ds_cache.erase(stateHash);
cemu_assert_debug(r == 1);
break;
}
default:
UNREACHABLE;
}
// update global stats
performanceMonitor.vk.numDescriptorSamplerTextures.decrement(statsNumSamplerTextures);
performanceMonitor.vk.numDescriptorDynUniformBuffers.decrement(statsNumDynUniformBuffers);
performanceMonitor.vk.numDescriptorStorageBuffers.decrement(statsNumStorageBuffers);
VulkanRenderer::GetInstance()->ReleaseDestructibleObject(m_vkObjDescriptorSet);
m_vkObjDescriptorSet = nullptr;
}
void VulkanRenderer::texture_clearSlice(LatteTexture* hostTexture, sint32 sliceIndex, sint32 mipIndex)
{
draw_endRenderPass();
auto vkTexture = (LatteTextureVk*)hostTexture;
if (vkTexture->isDepth)
texture_clearDepthSlice(hostTexture, sliceIndex, mipIndex, true, vkTexture->hasStencil, 0.0f, 0);
else
{
cemu_assert_debug(vkTexture->dim != Latte::E_DIM::DIM_3D);
ClearColorImage(vkTexture, sliceIndex, mipIndex, { 0,0,0,0 }, VK_IMAGE_LAYOUT_GENERAL);
}
}
void VulkanRenderer::texture_clearColorSlice(LatteTexture* hostTexture, sint32 sliceIndex, sint32 mipIndex, float r, float g, float b, float a)
{
auto vkTexture = (LatteTextureVk*)hostTexture;
if(vkTexture->dim == Latte::E_DIM::DIM_3D)
{
cemu_assert_unimplemented();
}
ClearColorImage(vkTexture, sliceIndex, mipIndex, {r, g, b, a}, VK_IMAGE_LAYOUT_GENERAL);
}
void VulkanRenderer::texture_clearDepthSlice(LatteTexture* hostTexture, uint32 sliceIndex, sint32 mipIndex, bool clearDepth, bool clearStencil, float depthValue, uint32 stencilValue)
{
draw_endRenderPass(); // vkCmdClearDepthStencilImage must not be inside renderpass
auto vkTexture = (LatteTextureVk*)hostTexture;
VkImageAspectFlags imageAspect = vkTexture->GetImageAspect();
VkImageAspectFlags aspectMask = 0;
if (clearDepth && (imageAspect & VK_IMAGE_ASPECT_DEPTH_BIT) != 0)
aspectMask |= VK_IMAGE_ASPECT_DEPTH_BIT;
if (clearStencil && (imageAspect & VK_IMAGE_ASPECT_STENCIL_BIT) != 0)
aspectMask |= VK_IMAGE_ASPECT_STENCIL_BIT;
auto imageObj = vkTexture->GetImageObj();
imageObj->flagForCurrentCommandBuffer();
VkImageSubresourceLayers subresourceRange{};
subresourceRange.aspectMask = vkTexture->GetImageAspect();
subresourceRange.mipLevel = mipIndex;
subresourceRange.baseArrayLayer = sliceIndex;
subresourceRange.layerCount = 1;
barrier_image<ANY_TRANSFER | IMAGE_READ | IMAGE_WRITE, ANY_TRANSFER>(vkTexture, subresourceRange, VK_IMAGE_LAYOUT_GENERAL);
VkClearDepthStencilValue depthStencilValue{};
depthStencilValue.depth = depthValue;
depthStencilValue.stencil = stencilValue;
VkImageSubresourceRange range{};
range.baseMipLevel = mipIndex;
range.levelCount = 1;
range.baseArrayLayer = sliceIndex;
range.layerCount = 1;
range.aspectMask = aspectMask;
vkCmdClearDepthStencilImage(m_state.currentCommandBuffer, imageObj->m_image, VK_IMAGE_LAYOUT_GENERAL, &depthStencilValue, 1, &range);
barrier_image<ANY_TRANSFER, ANY_TRANSFER | IMAGE_READ | IMAGE_WRITE>(vkTexture, subresourceRange, VK_IMAGE_LAYOUT_GENERAL);
}
void VulkanRenderer::texture_loadSlice(LatteTexture* hostTexture, sint32 width, sint32 height, sint32 depth, void* pixelData, sint32 sliceIndex, sint32 mipIndex, uint32 compressedImageSize)
{
auto vkTexture = (LatteTextureVk*)hostTexture;
auto vkImageObj = vkTexture->GetImageObj();
vkImageObj->flagForCurrentCommandBuffer();
draw_endRenderPass();
VkMemoryRequirements memRequirements;
vkGetImageMemoryRequirements(m_logicalDevice, vkImageObj->m_image, &memRequirements);
uint32 uploadSize = compressedImageSize;// memRequirements.size;
uint32 uploadAlignment = memRequirements.alignment;
VKRSynchronizedRingAllocator& vkMemAllocator = memoryManager->getStagingAllocator();
auto uploadResv = vkMemAllocator.AllocateBufferMemory(uploadSize, uploadAlignment);
memcpy(uploadResv.memPtr, pixelData, compressedImageSize);
vkMemAllocator.FlushReservation(uploadResv);
FormatInfoVK texFormatInfo;
GetTextureFormatInfoVK(hostTexture->format, hostTexture->isDepth, hostTexture->dim, 0, 0, &texFormatInfo);
bool is3DTexture = hostTexture->Is3DTexture();
VkImageSubresourceLayers barrierSubresourceRange{};
barrierSubresourceRange.aspectMask = texFormatInfo.vkImageAspect;
barrierSubresourceRange.mipLevel = mipIndex;
barrierSubresourceRange.baseArrayLayer = is3DTexture ? 0 : sliceIndex;
barrierSubresourceRange.layerCount = 1;
barrier_image<ANY_TRANSFER | IMAGE_READ | IMAGE_WRITE | HOST_WRITE, ANY_TRANSFER>(vkTexture, barrierSubresourceRange, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
VkBufferImageCopy imageRegion[2]{};
sint32 imageRegionCount = 0;
if (texFormatInfo.vkImageAspect == VK_IMAGE_ASPECT_COLOR_BIT || texFormatInfo.vkImageAspect == VK_IMAGE_ASPECT_DEPTH_BIT)
{
imageRegion[0].bufferOffset = uploadResv.bufferOffset;
imageRegion[0].imageExtent.width = width;
imageRegion[0].imageExtent.height = height;
imageRegion[0].imageExtent.depth = 1;
imageRegion[0].imageOffset.z = is3DTexture ? sliceIndex : 0;
imageRegion[0].imageSubresource.mipLevel = mipIndex;
imageRegion[0].imageSubresource.aspectMask = texFormatInfo.vkImageAspect;
imageRegion[0].imageSubresource.baseArrayLayer = is3DTexture ? 0 : sliceIndex;
imageRegion[0].imageSubresource.layerCount = 1;
imageRegionCount = 1;
}
else if (texFormatInfo.vkImageAspect == VK_IMAGE_ASPECT_DEPTH_BIT)
{
if (is3DTexture)
cemu_assert_debug(false);
// depth only copy
imageRegion[0].bufferOffset = uploadResv.bufferOffset;
imageRegion[0].imageExtent.width = width;
imageRegion[0].imageExtent.height = height;
imageRegion[0].imageExtent.depth = 1;
imageRegion[0].imageSubresource.mipLevel = mipIndex;
imageRegion[0].imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
imageRegion[0].imageSubresource.baseArrayLayer = sliceIndex;
imageRegion[0].imageSubresource.layerCount = 1;
imageRegionCount = 1;
}
else if (texFormatInfo.vkImageAspect == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT))
{
if (is3DTexture)
cemu_assert_debug(false);
// depth copy
imageRegion[0].bufferOffset = uploadResv.bufferOffset;
imageRegion[0].imageExtent.width = width;
imageRegion[0].imageExtent.height = height;
imageRegion[0].imageExtent.depth = 1;
imageRegion[0].imageSubresource.mipLevel = mipIndex;
imageRegion[0].imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
imageRegion[0].imageSubresource.baseArrayLayer = sliceIndex;
imageRegion[0].imageSubresource.layerCount = 1;
// stencil copy
imageRegion[1].bufferOffset = uploadResv.bufferOffset;
imageRegion[1].imageExtent.width = width;
imageRegion[1].imageExtent.height = height;
imageRegion[1].imageExtent.depth = 1;
imageRegion[1].imageSubresource.mipLevel = mipIndex;
imageRegion[1].imageSubresource.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
imageRegion[1].imageSubresource.baseArrayLayer = sliceIndex;
imageRegion[1].imageSubresource.layerCount = 1;
imageRegionCount = 2;
}
else
cemu_assert_debug(false);
vkCmdCopyBufferToImage(m_state.currentCommandBuffer, uploadResv.vkBuffer, vkImageObj->m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, imageRegionCount, imageRegion);
barrier_image<ANY_TRANSFER, ANY_TRANSFER | IMAGE_READ | IMAGE_WRITE>(vkTexture, barrierSubresourceRange, VK_IMAGE_LAYOUT_GENERAL);
}
LatteTexture* VulkanRenderer::texture_createTextureEx(Latte::E_DIM dim, MPTR physAddress, MPTR physMipAddress, Latte::E_GX2SURFFMT format, uint32 width, uint32 height, uint32 depth, uint32 pitch, uint32 mipLevels,
uint32 swizzle, Latte::E_HWTILEMODE tileMode, bool isDepth)
{
return new LatteTextureVk(this, dim, physAddress, physMipAddress, format, width, height, depth, pitch, mipLevels, swizzle, tileMode, isDepth);
}
void VulkanRenderer::texture_setLatteTexture(LatteTextureView* textureView, uint32 textureUnit)
{
m_state.boundTexture[textureUnit] = static_cast<LatteTextureViewVk*>(textureView);
}
void VulkanRenderer::texture_copyImageSubData(LatteTexture* src, sint32 srcMip, sint32 effectiveSrcX, sint32 effectiveSrcY, sint32 srcSlice, LatteTexture* dst, sint32 dstMip, sint32 effectiveDstX, sint32 effectiveDstY, sint32 dstSlice, sint32 effectiveCopyWidth, sint32 effectiveCopyHeight, sint32 srcDepth)
{
LatteTextureVk* srcVk = static_cast<LatteTextureVk*>(src);
LatteTextureVk* dstVk = static_cast<LatteTextureVk*>(dst);
draw_endRenderPass(); // vkCmdCopyImage must be called outside of a renderpass
VKRObjectTexture* srcVkObj = srcVk->GetImageObj();
VKRObjectTexture* dstVkObj = dstVk->GetImageObj();
srcVkObj->flagForCurrentCommandBuffer();
dstVkObj->flagForCurrentCommandBuffer();
VkImageCopy region{};
region.srcOffset.x = effectiveSrcX;
region.srcOffset.y = effectiveSrcY;
region.dstOffset.x = effectiveDstX;
region.dstOffset.y = effectiveDstY;
region.extent.width = effectiveCopyWidth;
region.extent.height = effectiveCopyHeight;
region.extent.depth = 1;
if (src->Is3DTexture())
{
region.srcOffset.z = srcSlice;
region.extent.depth = srcDepth;
region.srcSubresource.baseArrayLayer = 0;
region.srcSubresource.layerCount = 1;
}
else
{
region.srcOffset.z = 0;
region.extent.depth = 1;
region.srcSubresource.baseArrayLayer = srcSlice;
region.srcSubresource.layerCount = srcDepth;
}
if (dst->Is3DTexture())
{
region.dstOffset.z = dstSlice;
region.dstSubresource.baseArrayLayer = 0;
region.dstSubresource.layerCount = 1;
}
else
{
region.dstOffset.z = 0;
region.dstSubresource.baseArrayLayer = dstSlice;
region.dstSubresource.layerCount = srcDepth;
}
region.srcSubresource.mipLevel = srcMip;
region.srcSubresource.aspectMask = srcVk->GetImageAspect();
region.dstSubresource.mipLevel = dstMip;
region.dstSubresource.aspectMask = dstVk->GetImageAspect();
bool srcIsCompressed = Latte::IsCompressedFormat(srcVk->format);
bool dstIsCompressed = Latte::IsCompressedFormat(dstVk->format);
if (!srcIsCompressed && dstIsCompressed)
{
// handle the special case where the destination is compressed and not a multiple of the texel size (4)
sint32 mipWidth = std::max(dst->width >> dstMip, 1);
sint32 mipHeight = std::max(dst->height >> dstMip, 1);
if (mipWidth < 4 || mipHeight < 4)
{
cemuLog_logDebug(LogType::Force, "vkCmdCopyImage - blocked copy for unsupported uncompressed->compressed copy with dst smaller than 4x4");
return;
}
}
// make sure all write operations to the src image have finished
barrier_image<SYNC_OP::IMAGE_WRITE | SYNC_OP::ANY_TRANSFER, SYNC_OP::ANY_TRANSFER>(srcVk, region.srcSubresource, VK_IMAGE_LAYOUT_GENERAL);
// make sure all read and write operations to the dst image have finished
barrier_image<SYNC_OP::IMAGE_READ | SYNC_OP::IMAGE_WRITE | SYNC_OP::ANY_TRANSFER, SYNC_OP::ANY_TRANSFER>(dstVk, region.dstSubresource, VK_IMAGE_LAYOUT_GENERAL);
vkCmdCopyImage(m_state.currentCommandBuffer, srcVkObj->m_image, VK_IMAGE_LAYOUT_GENERAL, dstVkObj->m_image, VK_IMAGE_LAYOUT_GENERAL, 1, ®ion);
// make sure the transfer is finished before the image is read or written
barrier_image<SYNC_OP::ANY_TRANSFER, SYNC_OP::IMAGE_READ | SYNC_OP::IMAGE_WRITE | SYNC_OP::ANY_TRANSFER>(dstVk, region.dstSubresource, VK_IMAGE_LAYOUT_GENERAL);
}
LatteTextureReadbackInfo* VulkanRenderer::texture_createReadback(LatteTextureView* textureView)
{
auto* result = new LatteTextureReadbackInfoVk(m_logicalDevice, textureView);
LatteTextureVk* vkTex = (LatteTextureVk*)textureView->baseTexture;
VkMemoryRequirements memRequirements;
vkGetImageMemoryRequirements(m_logicalDevice, vkTex->GetImageObj()->m_image, &memRequirements);
const uint32 linearImageSize = result->GetImageSize();
const uint32 uploadSize = (linearImageSize == 0) ? memRequirements.size : linearImageSize;
const uint32 uploadAlignment = 256; // todo - use Vk optimalBufferCopyOffsetAlignment
m_textureReadbackBufferWriteIndex = (m_textureReadbackBufferWriteIndex + uploadAlignment - 1) & ~(uploadAlignment - 1);
if ((m_textureReadbackBufferWriteIndex + uploadSize + 256) > TEXTURE_READBACK_SIZE)
{
m_textureReadbackBufferWriteIndex = 0;
}
const uint32 uploadBufferOffset = m_textureReadbackBufferWriteIndex;
m_textureReadbackBufferWriteIndex += uploadSize;
result->SetBuffer(m_textureReadbackBuffer, m_textureReadbackBufferPtr, uploadBufferOffset);
return result;
}
uint32 s_vkCurrentUniqueId = 0;
uint64 VulkanRenderer::GenUniqueId()
{
s_vkCurrentUniqueId++;
return s_vkCurrentUniqueId;
}
void VulkanRenderer::streamout_setupXfbBuffer(uint32 bufferIndex, sint32 ringBufferOffset, uint32 rangeAddr, uint32 rangeSize)
{
VkDeviceSize tfBufferOffset = ringBufferOffset;
m_streamoutState.buffer[bufferIndex].enabled = true;
m_streamoutState.buffer[bufferIndex].ringBufferOffset = ringBufferOffset;
}
void VulkanRenderer::streamout_begin()
{
if (m_featureControl.mode.useTFEmulationViaSSBO)
return;
if (m_state.hasActiveXfb == false)
m_state.hasActiveXfb = true;
}
void VulkanRenderer::streamout_applyTransformFeedbackState()
{
if (m_featureControl.mode.useTFEmulationViaSSBO)
return;
cemu_assert_debug(m_state.hasActiveXfb == false);
if (m_state.hasActiveXfb)
{
// set buffers
for (sint32 i = 0; i < LATTE_NUM_STREAMOUT_BUFFER; i++)
{
if (m_streamoutState.buffer[i].enabled)
{
VkBuffer tfBuffer = m_xfbRingBuffer;
VkDeviceSize tfBufferOffset = m_streamoutState.buffer[i].ringBufferOffset;
VkDeviceSize tfBufferSize = VK_WHOLE_SIZE;
vkCmdBindTransformFeedbackBuffersEXT(m_state.currentCommandBuffer, i, 1, &tfBuffer, &tfBufferOffset, &tfBufferSize);
}
}
// begin transform feedback
vkCmdBeginTransformFeedbackEXT(m_state.currentCommandBuffer, 0, 0, nullptr, nullptr);
}
}
void VulkanRenderer::streamout_rendererFinishDrawcall()
{
if (m_state.hasActiveXfb)
{
vkCmdEndTransformFeedbackEXT(m_state.currentCommandBuffer, 0, 0, nullptr, nullptr);
m_streamoutState.buffer[0].enabled = false;
m_streamoutState.buffer[1].enabled = false;
m_streamoutState.buffer[2].enabled = false;
m_streamoutState.buffer[3].enabled = false;
m_state.hasActiveXfb = false;
}
}
void VulkanRenderer::buffer_bindVertexBuffer(uint32 bufferIndex, uint32 offset, uint32 size)
{
cemu_assert_debug(!m_useHostMemoryForCache);
if (m_state.currentVertexBinding[bufferIndex].offset == offset)
return;
cemu_assert_debug(bufferIndex < LATTE_MAX_VERTEX_BUFFERS);
m_state.currentVertexBinding[bufferIndex].offset = offset;
VkBuffer attrBuffer = m_bufferCache;
VkDeviceSize attrOffset = offset;
vkCmdBindVertexBuffers(m_state.currentCommandBuffer, bufferIndex, 1, &attrBuffer, &attrOffset);
}
void VulkanRenderer::buffer_bindVertexStrideWorkaroundBuffer(VkBuffer fixedBuffer, uint32 offset, uint32 bufferIndex, uint32 size)
{
cemu_assert_debug(bufferIndex < LATTE_MAX_VERTEX_BUFFERS);
m_state.currentVertexBinding[bufferIndex].offset = 0xFFFFFFFF;
VkBuffer attrBuffer = fixedBuffer;
VkDeviceSize attrOffset = offset;
vkCmdBindVertexBuffers(m_state.currentCommandBuffer, bufferIndex, 1, &attrBuffer, &attrOffset);
}
std::pair<VkBuffer, uint32> VulkanRenderer::buffer_genStrideWorkaroundVertexBuffer(MPTR buffer, uint32 size, uint32 oldStride)
{
cemu_assert_debug(oldStride % 4 != 0);
std::span<uint8> old_buffer{memory_getPointerFromPhysicalOffset(buffer), size};
//new stride is the nearest multiple of 4
uint32 newStride = oldStride + (4-(oldStride % 4));
uint32 newSize = size / oldStride * newStride;
auto new_buffer_alloc = memoryManager->getMetalStrideWorkaroundAllocator().AllocateBufferMemory(newSize, 128);
std::span<uint8> new_buffer{new_buffer_alloc.memPtr, new_buffer_alloc.size};
for(size_t elem = 0; elem < size / oldStride; elem++)
{
memcpy(&new_buffer[elem * newStride], &old_buffer[elem * oldStride], oldStride);
}
return {new_buffer_alloc.vkBuffer, new_buffer_alloc.bufferOffset};
}
void VulkanRenderer::buffer_bindUniformBuffer(LatteConst::ShaderType shaderType, uint32 bufferIndex, uint32 offset, uint32 size)
{
cemu_assert_debug(!m_useHostMemoryForCache);
cemu_assert_debug(bufferIndex < 16);
switch (shaderType)
{
case LatteConst::ShaderType::Vertex:
dynamicOffsetInfo.shaderUB[VulkanRendererConst::SHADER_STAGE_INDEX_VERTEX].uniformBufferOffset[bufferIndex] = offset;
break;
case LatteConst::ShaderType::Geometry:
dynamicOffsetInfo.shaderUB[VulkanRendererConst::SHADER_STAGE_INDEX_GEOMETRY].uniformBufferOffset[bufferIndex] = offset;
break;
case LatteConst::ShaderType::Pixel:
dynamicOffsetInfo.shaderUB[VulkanRendererConst::SHADER_STAGE_INDEX_FRAGMENT].uniformBufferOffset[bufferIndex] = offset;
break;
default:
cemu_assert_debug(false);
}
}
void VulkanRenderer::bufferCache_init(const sint32 bufferSize)
{
m_importedMemBaseAddress = 0x10000000;
size_t hostAllocationSize = 0x40000000ull;
// todo - get size of allocation
bool configUseHostMemory = false; // todo - replace this with a config option
m_useHostMemoryForCache = false;
if (m_featureControl.deviceExtensions.external_memory_host && configUseHostMemory)
{
m_useHostMemoryForCache = memoryManager->CreateBufferFromHostMemory(memory_getPointerFromVirtualOffset(m_importedMemBaseAddress), hostAllocationSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT, 0, m_importedMem, m_importedMemMemory);
if (!m_useHostMemoryForCache)
{
cemuLog_log(LogType::Force, "Unable to import host memory to Vulkan buffer. Use default cache system instead");
}
}
if(!m_useHostMemoryForCache)
memoryManager->CreateBuffer(bufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT, 0, m_bufferCache, m_bufferCacheMemory);
}
void VulkanRenderer::bufferCache_upload(uint8* buffer, sint32 size, uint32 bufferOffset)
{
draw_endRenderPass();
VKRSynchronizedRingAllocator& vkMemAllocator = memoryManager->getStagingAllocator();
auto uploadResv = vkMemAllocator.AllocateBufferMemory(size, 256);
memcpy(uploadResv.memPtr, buffer, size);
vkMemAllocator.FlushReservation(uploadResv);
barrier_bufferRange<ANY_TRANSFER | HOST_WRITE, ANY_TRANSFER,
BUFFER_SHADER_READ, TRANSFER_WRITE>(
uploadResv.vkBuffer, uploadResv.bufferOffset, uploadResv.size, // make sure any in-flight transfers are completed
m_bufferCache, bufferOffset, size); // make sure all reads are completed before we overwrite the data
VkBufferCopy region;
region.srcOffset = uploadResv.bufferOffset;
region.dstOffset = bufferOffset;
region.size = size;
vkCmdCopyBuffer(m_state.currentCommandBuffer, uploadResv.vkBuffer, m_bufferCache, 1, ®ion);
barrier_sequentializeTransfer();
}
void VulkanRenderer::bufferCache_copy(uint32 srcOffset, uint32 dstOffset, uint32 size)
{
cemu_assert_debug(!m_useHostMemoryForCache);
draw_endRenderPass();
barrier_sequentializeTransfer();
bool isOverlapping = (srcOffset + size) > dstOffset && (srcOffset) < (dstOffset + size);
cemu_assert_debug(!isOverlapping);
VkBufferCopy bufferCopy{};
bufferCopy.srcOffset = srcOffset;
bufferCopy.dstOffset = dstOffset;
bufferCopy.size = size;
vkCmdCopyBuffer(m_state.currentCommandBuffer, m_bufferCache, m_bufferCache, 1, &bufferCopy);
barrier_sequentializeTransfer();
}
void VulkanRenderer::bufferCache_copyStreamoutToMainBuffer(uint32 srcOffset, uint32 dstOffset, uint32 size)
{
draw_endRenderPass();
VkBuffer dstBuffer;
if (m_useHostMemoryForCache)
{
// in host memory mode, dstOffset is physical address instead of cache address
dstBuffer = m_importedMem;
dstOffset -= m_importedMemBaseAddress;
}
else
dstBuffer = m_bufferCache;
barrier_bufferRange<BUFFER_SHADER_WRITE, TRANSFER_READ,
ANY_TRANSFER | BUFFER_SHADER_READ, TRANSFER_WRITE>(
m_xfbRingBuffer, srcOffset, size, // wait for all writes to finish
dstBuffer, dstOffset, size); // wait for all reads to finish
barrier_sequentializeTransfer();
VkBufferCopy bufferCopy{};
bufferCopy.srcOffset = srcOffset;
bufferCopy.dstOffset = dstOffset;
bufferCopy.size = size;
vkCmdCopyBuffer(m_state.currentCommandBuffer, m_xfbRingBuffer, dstBuffer, 1, &bufferCopy);
barrier_sequentializeTransfer();
}
void VulkanRenderer::AppendOverlayDebugInfo()
{
ImGui::Text("--- Vulkan info ---");
ImGui::Text("GfxPipelines %u", performanceMonitor.vk.numGraphicPipelines.get());
ImGui::Text("DescriptorSets %u", performanceMonitor.vk.numDescriptorSets.get());
ImGui::Text("DS ImgSamplers %u", performanceMonitor.vk.numDescriptorSamplerTextures.get());
ImGui::Text("DS DynUniform %u", performanceMonitor.vk.numDescriptorDynUniformBuffers.get());
ImGui::Text("DS StorageBuf %u", performanceMonitor.vk.numDescriptorStorageBuffers.get());
ImGui::Text("Images %u", performanceMonitor.vk.numImages.get());
ImGui::Text("ImageView %u", performanceMonitor.vk.numImageViews.get());
ImGui::Text("RenderPass %u", performanceMonitor.vk.numRenderPass.get());
ImGui::Text("Framebuffer %u", performanceMonitor.vk.numFramebuffer.get());
m_spinlockDestructionQueue.lock();
ImGui::Text("DestructionQ %u", (unsigned int)m_destructionQueue.size());
m_spinlockDestructionQueue.unlock();
ImGui::Text("BeginRP/f %u", performanceMonitor.vk.numBeginRenderpassPerFrame.get());
ImGui::Text("Barriers/f %u", performanceMonitor.vk.numDrawBarriersPerFrame.get());
ImGui::Text("--- Cache info ---");
uint32 bufferCacheHeapSize = 0;
uint32 bufferCacheAllocationSize = 0;
uint32 bufferCacheNumAllocations = 0;
LatteBufferCache_getStats(bufferCacheHeapSize, bufferCacheAllocationSize, bufferCacheNumAllocations);
ImGui::Text("Buffer");
ImGui::SameLine(60.0f);
ImGui::Text("%06uKB / %06uKB Allocs: %u", (uint32)(bufferCacheAllocationSize + 1023) / 1024, ((uint32)bufferCacheHeapSize + 1023) / 1024, (uint32)bufferCacheNumAllocations);
uint32 numBuffers;
size_t totalSize, freeSize;
memoryManager->getStagingAllocator().GetStats(numBuffers, totalSize, freeSize);
ImGui::Text("Staging");
ImGui::SameLine(60.0f);
ImGui::Text("%06uKB / %06uKB Buffers: %u", ((uint32)(totalSize - freeSize) + 1023) / 1024, ((uint32)totalSize + 1023) / 1024, (uint32)numBuffers);
memoryManager->getIndexAllocator().GetStats(numBuffers, totalSize, freeSize);
ImGui::Text("Index");
ImGui::SameLine(60.0f);
ImGui::Text("%06uKB / %06uKB Buffers: %u", ((uint32)(totalSize - freeSize) + 1023) / 1024, ((uint32)totalSize + 1023) / 1024, (uint32)numBuffers);
ImGui::Text("--- Tex heaps ---");
memoryManager->appendOverlayHeapDebugInfo();
}
void VKRDestructibleObject::flagForCurrentCommandBuffer()
{
m_lastCmdBufferId = VulkanRenderer::GetInstance()->GetCurrentCommandBufferId();
}
bool VKRDestructibleObject::canDestroy()
{
if (refCount > 0)
return false;
return VulkanRenderer::GetInstance()->HasCommandBufferFinished(m_lastCmdBufferId);
}
VKRObjectTexture::VKRObjectTexture()
{
performanceMonitor.vk.numImages.increment();
}
VKRObjectTexture::~VKRObjectTexture()
{
auto vkr = VulkanRenderer::GetInstance();
if (m_allocation)
{
vkr->GetMemoryManager()->imageMemoryFree(m_allocation);
m_allocation = nullptr;
}
if (m_image)
vkDestroyImage(vkr->GetLogicalDevice(), m_image, nullptr);
performanceMonitor.vk.numImages.decrement();
}
VKRObjectTextureView::VKRObjectTextureView(VKRObjectTexture* tex, VkImageView view)
{
m_textureImageView = view;
this->addRef(tex);
performanceMonitor.vk.numImageViews.increment();
}
VKRObjectTextureView::~VKRObjectTextureView()
{
auto logicalDevice = VulkanRenderer::GetInstance()->GetLogicalDevice();
if (m_textureDefaultSampler[0] != VK_NULL_HANDLE)
vkDestroySampler(logicalDevice, m_textureDefaultSampler[0], nullptr);
if (m_textureDefaultSampler[1] != VK_NULL_HANDLE)
vkDestroySampler(logicalDevice, m_textureDefaultSampler[1], nullptr);
vkDestroyImageView(logicalDevice, m_textureImageView, nullptr);
performanceMonitor.vk.numImageViews.decrement();
}
VKRObjectRenderPass::VKRObjectRenderPass(AttachmentInfo_t& attachmentInfo, sint32 colorAttachmentCount)
{
// generate helper hash for pipeline state
uint64 stateHash = 0;
for (int i = 0; i < Latte::GPU_LIMITS::NUM_COLOR_ATTACHMENTS; ++i)
{
if (attachmentInfo.colorAttachment[i].isPresent || attachmentInfo.colorAttachment[i].viewObj)
{
stateHash += attachmentInfo.colorAttachment[i].format + i * 31;
stateHash = std::rotl<uint64>(stateHash, 7);
}
}
if (attachmentInfo.depthAttachment.isPresent || attachmentInfo.depthAttachment.viewObj)
{
stateHash += attachmentInfo.depthAttachment.format;
stateHash = std::rotl<uint64>(stateHash, 7);
}
m_hashForPipeline = stateHash;
// setup Vulkan renderpass
std::vector<VkAttachmentDescription> attachments_descriptions;
std::array<VkAttachmentReference, Latte::GPU_LIMITS::NUM_COLOR_ATTACHMENTS> color_attachments_references{};
cemu_assert(colorAttachmentCount <= color_attachments_references.size());
sint32 numColorAttachments = 0;
for (int i = 0; i < 8; ++i)
{
if (attachmentInfo.colorAttachment[i].viewObj == nullptr && attachmentInfo.colorAttachment[i].isPresent == false)
{
color_attachments_references[i].attachment = VK_ATTACHMENT_UNUSED;
m_colorAttachmentFormat[i] = VK_FORMAT_UNDEFINED;
continue;
}
m_colorAttachmentFormat[i] = attachmentInfo.colorAttachment[i].format;
color_attachments_references[i].attachment = (uint32)attachments_descriptions.size();
color_attachments_references[i].layout = VK_IMAGE_LAYOUT_GENERAL;
VkAttachmentDescription entry{};
entry.format = attachmentInfo.colorAttachment[i].format;
entry.samples = VK_SAMPLE_COUNT_1_BIT;
entry.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
entry.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
entry.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
entry.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
entry.initialLayout = VK_IMAGE_LAYOUT_GENERAL;
entry.finalLayout = VK_IMAGE_LAYOUT_GENERAL;
attachments_descriptions.emplace_back(entry);
numColorAttachments = i + 1;
}
VkAttachmentReference depth_stencil_attachments_references{};
bool hasDepthStencilAttachment = false;
if (attachmentInfo.depthAttachment.viewObj == nullptr && attachmentInfo.depthAttachment.isPresent == false)
{
depth_stencil_attachments_references.attachment = VK_ATTACHMENT_UNUSED;
m_depthAttachmentFormat = VK_FORMAT_UNDEFINED;
}
else
{
hasDepthStencilAttachment = true;
depth_stencil_attachments_references.attachment = (uint32)attachments_descriptions.size();
depth_stencil_attachments_references.layout = VK_IMAGE_LAYOUT_GENERAL;
m_depthAttachmentFormat = attachmentInfo.depthAttachment.format;
VkAttachmentDescription entry{};
entry.format = attachmentInfo.depthAttachment.format;
entry.samples = VK_SAMPLE_COUNT_1_BIT;
entry.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
entry.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
if (attachmentInfo.depthAttachment.hasStencil)
{
entry.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
entry.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
}
else
{
entry.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
entry.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
}
entry.initialLayout = VK_IMAGE_LAYOUT_GENERAL;
entry.finalLayout = VK_IMAGE_LAYOUT_GENERAL;
attachments_descriptions.emplace_back(entry);
}
// todo - use numColorAttachments instead of .size() or colorAttachmentCount (needs adjusting in many places)
VkSubpassDescription subpass{};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = colorAttachmentCount;
subpass.pColorAttachments = color_attachments_references.data();
subpass.inputAttachmentCount = 0;
subpass.pInputAttachments = nullptr;
subpass.pDepthStencilAttachment = &depth_stencil_attachments_references;
VkRenderPassCreateInfo renderPassInfo{};
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
renderPassInfo.attachmentCount = (uint32)attachments_descriptions.size();
renderPassInfo.pAttachments = attachments_descriptions.data();
renderPassInfo.subpassCount = 1;
renderPassInfo.pSubpasses = &subpass;
renderPassInfo.pDependencies = nullptr;
renderPassInfo.dependencyCount = 0;
// before Cemu 1.25.5 we used zero here, which means implicit synchronization. For 1.25.5 it was changed to 2 (using the subpass dependencies above)
// Reverted this again to zero for Cemu 1.25.5b as the performance cost is just too high. Manual synchronization is preferred
if (vkCreateRenderPass(VulkanRenderer::GetInstance()->GetLogicalDevice(), &renderPassInfo, nullptr, &m_renderPass) != VK_SUCCESS)
{
cemuLog_log(LogType::Force, "Vulkan-Error: Failed to create render pass");
throw std::runtime_error("failed to create render pass!");
}
// track references
for (int i = 0; i < 8; ++i)
{
if (attachmentInfo.colorAttachment[i].viewObj)
addRef(attachmentInfo.colorAttachment[i].viewObj);
}
if (attachmentInfo.depthAttachment.viewObj)
addRef(attachmentInfo.depthAttachment.viewObj);
performanceMonitor.vk.numRenderPass.increment();
}
VKRObjectRenderPass::~VKRObjectRenderPass()
{
if (m_renderPass != VK_NULL_HANDLE)
vkDestroyRenderPass(VulkanRenderer::GetInstance()->GetLogicalDevice(), m_renderPass, nullptr);
performanceMonitor.vk.numRenderPass.decrement();
}
VKRObjectFramebuffer::VKRObjectFramebuffer(VKRObjectRenderPass* renderPass, std::span<VKRObjectTextureView*> attachments, Vector2i size)
{
// convert VKRObjectTextureView* array to vkImageView array
std::array<VkImageView, 16> attachmentViews;
cemu_assert(attachments.size() < attachmentViews.size());
for (size_t i = 0; i < attachments.size(); i++)
attachmentViews[i] = attachments[i]->m_textureImageView;
VkFramebufferCreateInfo createInfo{};
createInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
createInfo.pAttachments = attachmentViews.data();
createInfo.attachmentCount = attachments.size();
createInfo.renderPass = renderPass->m_renderPass;
createInfo.layers = 1;
createInfo.width = size.x;
createInfo.height = size.y;
if (vkCreateFramebuffer(VulkanRenderer::GetInstance()->GetLogicalDevice(), &createInfo, nullptr, &m_frameBuffer) != VK_SUCCESS)
throw std::runtime_error("failed to create framebuffer!");
// track refs
this->addRef(renderPass);
for (auto& itr : attachments)
this->addRef(itr);
performanceMonitor.vk.numFramebuffer.increment();
}
VKRObjectFramebuffer::~VKRObjectFramebuffer()
{
if (m_frameBuffer != VK_NULL_HANDLE)
vkDestroyFramebuffer(VulkanRenderer::GetInstance()->GetLogicalDevice(), m_frameBuffer, nullptr);
performanceMonitor.vk.numFramebuffer.decrement();
}
VKRObjectPipeline::VKRObjectPipeline()
{
// todo
}
void VKRObjectPipeline::setPipeline(VkPipeline newPipeline)
{
cemu_assert_debug(pipeline == VK_NULL_HANDLE);
pipeline = newPipeline;
if(newPipeline != VK_NULL_HANDLE)
performanceMonitor.vk.numGraphicPipelines.increment();
}
VKRObjectPipeline::~VKRObjectPipeline()
{
auto vkr = VulkanRenderer::GetInstance();
if (pipeline != VK_NULL_HANDLE)
{
vkDestroyPipeline(vkr->GetLogicalDevice(), pipeline, nullptr);
performanceMonitor.vk.numGraphicPipelines.decrement();
}
if (vertexDSL != VK_NULL_HANDLE)
vkDestroyDescriptorSetLayout(vkr->GetLogicalDevice(), vertexDSL, nullptr);
if (pixelDSL != VK_NULL_HANDLE)
vkDestroyDescriptorSetLayout(vkr->GetLogicalDevice(), pixelDSL, nullptr);
if (geometryDSL != VK_NULL_HANDLE)
vkDestroyDescriptorSetLayout(vkr->GetLogicalDevice(), geometryDSL, nullptr);
if (pipeline_layout != VK_NULL_HANDLE)
vkDestroyPipelineLayout(vkr->GetLogicalDevice(), pipeline_layout, nullptr);
}
VKRObjectDescriptorSet::VKRObjectDescriptorSet()
{
performanceMonitor.vk.numDescriptorSets.increment();
}
VKRObjectDescriptorSet::~VKRObjectDescriptorSet()
{
auto vkr = VulkanRenderer::GetInstance();
vkFreeDescriptorSets(vkr->GetLogicalDevice(), vkr->GetDescriptorPool(), 1, &descriptorSet);
performanceMonitor.vk.numDescriptorSets.decrement();
}
| 153,511
|
C++
|
.cpp
| 3,410
| 42.365103
| 333
| 0.78781
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,239
|
VulkanPipelineStableCache.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanPipelineStableCache.cpp
|
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanPipelineCompiler.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanPipelineStableCache.h"
#include "Cafe/HW/Latte/Core/LatteShader.h"
#include "Cafe/HW/Latte/Core/LattePerformanceMonitor.h"
#include "Cafe/HW/Latte/Core/LatteCachedFBO.h"
#include "Cafe/OS/libs/gx2/GX2.h"
#include "config/ActiveSettings.h"
#include "util/helpers/Serializer.h"
#include "Cafe/HW/Latte/Common/RegisterSerializer.h"
#include "Cemu/FileCache/FileCache.h"
#include "Cafe/HW/Latte/Core/LatteShaderCache.h"
#include "util/helpers/helpers.h"
#include <openssl/sha.h>
struct
{
uint32 pipelineLoadIndex;
uint32 pipelineMaxFileIndex;
std::atomic_uint32_t pipelinesQueued;
std::atomic_uint32_t pipelinesLoaded;
}g_vkCacheState;
VulkanPipelineStableCache g_vkPipelineStableCacheInstance;
VulkanPipelineStableCache& VulkanPipelineStableCache::GetInstance()
{
return g_vkPipelineStableCacheInstance;
}
uint32 VulkanPipelineStableCache::BeginLoading(uint64 cacheTitleId)
{
std::error_code ec;
fs::create_directories(ActiveSettings::GetCachePath("shaderCache/transferable"), ec);
const auto pathCacheFile = ActiveSettings::GetCachePath("shaderCache/transferable/{:016x}_vkpipeline.bin", cacheTitleId);
// init cache loader state
g_vkCacheState.pipelineLoadIndex = 0;
g_vkCacheState.pipelineMaxFileIndex = 0;
g_vkCacheState.pipelinesLoaded = 0;
g_vkCacheState.pipelinesQueued = 0;
// start async compilation threads
m_compilationCount.store(0);
m_compilationQueue.clear();
// get core count
uint32 cpuCoreCount = GetPhysicalCoreCount();
m_numCompilationThreads = std::clamp(cpuCoreCount, 1u, 8u);
if (VulkanRenderer::GetInstance()->GetDisableMultithreadedCompilation())
m_numCompilationThreads = 1;
for (uint32 i = 0; i < m_numCompilationThreads; i++)
{
std::thread compileThread(&VulkanPipelineStableCache::CompilerThread, this);
compileThread.detach();
}
// open cache file or create it
cemu_assert_debug(s_cache == nullptr);
s_cache = FileCache::Open(pathCacheFile, true, LatteShaderCache_getPipelineCacheExtraVersion(cacheTitleId));
if (!s_cache)
{
cemuLog_log(LogType::Force, "Failed to open or create Vulkan pipeline cache file: {}", _pathToUtf8(pathCacheFile));
return 0;
}
else
{
s_cache->UseCompression(false);
g_vkCacheState.pipelineMaxFileIndex = s_cache->GetMaximumFileIndex();
}
return s_cache->GetFileCount();
}
bool VulkanPipelineStableCache::UpdateLoading(uint32& pipelinesLoadedTotal, uint32& pipelinesMissingShaders)
{
pipelinesLoadedTotal = g_vkCacheState.pipelinesLoaded;
pipelinesMissingShaders = 0;
while (g_vkCacheState.pipelineLoadIndex <= g_vkCacheState.pipelineMaxFileIndex)
{
if (m_compilationQueue.size() >= 50)
{
std::this_thread::sleep_for(std::chrono::milliseconds(10));
return true; // queue up to 50 entries at a time
}
uint64 fileNameA, fileNameB;
std::vector<uint8> fileData;
if (s_cache->GetFileByIndex(g_vkCacheState.pipelineLoadIndex, &fileNameA, &fileNameB, fileData))
{
// queue for async compilation
g_vkCacheState.pipelinesQueued++;
m_compilationQueue.push(std::move(fileData));
g_vkCacheState.pipelineLoadIndex++;
return true;
}
g_vkCacheState.pipelineLoadIndex++;
}
if (g_vkCacheState.pipelinesLoaded != g_vkCacheState.pipelinesQueued)
{
std::this_thread::sleep_for(std::chrono::milliseconds(10));
return true; // pipelines still compiling
}
return false; // done
}
void VulkanPipelineStableCache::EndLoading()
{
// shut down compilation threads
uint32 threadCount = m_numCompilationThreads;
m_numCompilationThreads = 0; // signal thread shutdown
for (uint32 i = 0; i < threadCount; i++)
{
m_compilationQueue.push({}); // push empty workload for every thread. Threads then will shutdown after checking for m_numCompilationThreads == 0
}
// keep cache file open for writing of new pipelines
}
void VulkanPipelineStableCache::Close()
{
if(s_cache)
{
delete s_cache;
s_cache = nullptr;
}
}
struct CachedPipeline
{
struct ShaderHash
{
uint64 baseHash;
uint64 auxHash;
bool isPresent{};
void set(uint64 baseHash, uint64 auxHash)
{
this->baseHash = baseHash;
this->auxHash = auxHash;
this->isPresent = true;
}
};
ShaderHash vsHash; // includes fetch shader
ShaderHash gsHash;
ShaderHash psHash;
Latte::GPUCompactedRegisterState gpuState;
};
VkFormat __getColorBufferVkFormat(const uint32 index, const LatteContextRegister& lcr)
{
Latte::E_GX2SURFFMT colorBufferFormat = LatteMRT::GetColorBufferFormat(index, lcr);
VulkanRenderer::FormatInfoVK texFormatInfo;
VulkanRenderer::GetInstance()->GetTextureFormatInfoVK(colorBufferFormat, false, Latte::E_DIM::DIM_2D, 1280, 720, &texFormatInfo);
return texFormatInfo.vkImageFormat;
}
void __getDepthBufferVkFormat(const LatteContextRegister& lcr, VkFormat& dbFormat, bool& hasStencil)
{
Latte::E_GX2SURFFMT format = LatteMRT::GetDepthBufferFormat(lcr);
VulkanRenderer::FormatInfoVK texFormatInfo;
VulkanRenderer::GetInstance()->GetTextureFormatInfoVK(format, true, Latte::E_DIM::DIM_2D, 1280, 720, &texFormatInfo);
dbFormat = texFormatInfo.vkImageFormat;
hasStencil = (texFormatInfo.vkImageAspect & VK_IMAGE_ASPECT_STENCIL_BIT) != 0;
}
// create placeholder renderpass for cached pipeline
VKRObjectRenderPass* __CreateTemporaryRenderPass(const LatteDecompilerShader* pixelShader, const LatteContextRegister& lcr)
{
VKRObjectRenderPass::AttachmentInfo_t attachmentInfo;
uint8 cbMask = LatteMRT::GetActiveColorBufferMask(pixelShader, lcr);
bool dbMask = LatteMRT::GetActiveDepthBufferMask(lcr);
for (int i = 0; i < 8; ++i)
{
if ((cbMask & (1 << i)) == 0)
{
attachmentInfo.colorAttachment[i].viewObj = nullptr;
continue;
}
// setup color attachment
attachmentInfo.colorAttachment[i].viewObj = nullptr;
attachmentInfo.colorAttachment[i].isPresent = true;
attachmentInfo.colorAttachment[i].format = __getColorBufferVkFormat(i, lcr);
}
// setup depth attachment
if (dbMask)
{
attachmentInfo.depthAttachment.viewObj = nullptr;
attachmentInfo.depthAttachment.isPresent = true;
VkFormat dbFormat;
bool hasStencil;
__getDepthBufferVkFormat(lcr, dbFormat, hasStencil);
attachmentInfo.depthAttachment.format = dbFormat;
attachmentInfo.depthAttachment.hasStencil = hasStencil;
}
else
{
// no depth attachment
attachmentInfo.depthAttachment.viewObj = nullptr;
attachmentInfo.depthAttachment.isPresent = false;
}
return new VKRObjectRenderPass(attachmentInfo);
}
void VulkanPipelineStableCache::LoadPipelineFromCache(std::span<uint8> fileData)
{
static FSpinlock s_spinlockSharedInternal;
// deserialize file
LatteContextRegister* lcr = new LatteContextRegister();
s_spinlockSharedInternal.lock();
CachedPipeline* cachedPipeline = new CachedPipeline();
s_spinlockSharedInternal.unlock();
MemStreamReader streamReader(fileData.data(), fileData.size());
if (!DeserializePipeline(streamReader, *cachedPipeline))
{
// failed to deserialize
s_spinlockSharedInternal.lock();
delete lcr;
delete cachedPipeline;
s_spinlockSharedInternal.unlock();
return;
}
// restored register view from compacted state
Latte::LoadGPURegisterState(*lcr, cachedPipeline->gpuState);
LatteDecompilerShader* vertexShader = nullptr;
LatteDecompilerShader* geometryShader = nullptr;
LatteDecompilerShader* pixelShader = nullptr;
// find vertex shader
if (cachedPipeline->vsHash.isPresent)
{
vertexShader = LatteSHRC_FindVertexShader(cachedPipeline->vsHash.baseHash, cachedPipeline->vsHash.auxHash);
if (!vertexShader)
{
cemuLog_logDebug(LogType::Force, "Vertex shader not found in cache");
return;
}
}
// find geometry shader
if (cachedPipeline->gsHash.isPresent)
{
geometryShader = LatteSHRC_FindGeometryShader(cachedPipeline->gsHash.baseHash, cachedPipeline->gsHash.auxHash);
if (!geometryShader)
{
cemuLog_logDebug(LogType::Force, "Geometry shader not found in cache");
return;
}
}
// find pixel shader
if (cachedPipeline->psHash.isPresent)
{
pixelShader = LatteSHRC_FindPixelShader(cachedPipeline->psHash.baseHash, cachedPipeline->psHash.auxHash);
if (!pixelShader)
{
cemuLog_logDebug(LogType::Force, "Pixel shader not found in cache");
return;
}
}
// create temporary renderpass
if (!pixelShader)
{
cemu_assert_debug(false);
return;
}
auto renderPass = __CreateTemporaryRenderPass(pixelShader, *lcr);
// create pipeline info
m_pipelineIsCachedLock.lock();
PipelineInfo* pipelineInfo = new PipelineInfo(0, 0, vertexShader->compatibleFetchShader, vertexShader, pixelShader, geometryShader);
m_pipelineIsCachedLock.unlock();
// compile
{
PipelineCompiler pp;
if (!pp.InitFromCurrentGPUState(pipelineInfo, *lcr, renderPass))
{
s_spinlockSharedInternal.lock();
delete lcr;
delete cachedPipeline;
s_spinlockSharedInternal.unlock();
return;
}
pp.Compile(true, true, false);
// destroy pp early
}
// on success, calculate pipeline hash and flag as present in cache
uint64 pipelineBaseHash = vertexShader->baseHash;
uint64 pipelineStateHash = VulkanRenderer::draw_calculateGraphicsPipelineHash(vertexShader->compatibleFetchShader, vertexShader, geometryShader, pixelShader, renderPass, *lcr);
m_pipelineIsCachedLock.lock();
m_pipelineIsCached.emplace(pipelineBaseHash, pipelineStateHash);
m_pipelineIsCachedLock.unlock();
// clean up
s_spinlockSharedInternal.lock();
delete pipelineInfo;
delete lcr;
delete cachedPipeline;
VulkanRenderer::GetInstance()->ReleaseDestructibleObject(renderPass);
s_spinlockSharedInternal.unlock();
}
bool VulkanPipelineStableCache::HasPipelineCached(uint64 baseHash, uint64 pipelineStateHash)
{
PipelineHash ph(baseHash, pipelineStateHash);
return m_pipelineIsCached.find(ph) != m_pipelineIsCached.end();
}
ConcurrentQueue<CachedPipeline*> g_pipelineCachingQueue;
void VulkanPipelineStableCache::AddCurrentStateToCache(uint64 baseHash, uint64 pipelineStateHash)
{
m_pipelineIsCached.emplace(baseHash, pipelineStateHash);
if (!m_pipelineCacheStoreThread)
{
m_pipelineCacheStoreThread = new std::thread(&VulkanPipelineStableCache::WorkerThread, this);
m_pipelineCacheStoreThread->detach();
}
// fill job structure with cached GPU state
// for each cached pipeline we store:
// - Active shaders (referenced by hash)
// - An almost-complete register state of the GPU (minus some ALU uniform constants which aren't relevant)
CachedPipeline* job = new CachedPipeline();
auto vs = LatteSHRC_GetActiveVertexShader();
auto gs = LatteSHRC_GetActiveGeometryShader();
auto ps = LatteSHRC_GetActivePixelShader();
if (vs)
job->vsHash.set(vs->baseHash, vs->auxHash);
if (gs)
job->gsHash.set(gs->baseHash, gs->auxHash);
if (ps)
job->psHash.set(ps->baseHash, ps->auxHash);
Latte::StoreGPURegisterState(LatteGPUState.contextNew, job->gpuState);
// queue job
g_pipelineCachingQueue.push(job);
}
bool VulkanPipelineStableCache::SerializePipeline(MemStreamWriter& memWriter, CachedPipeline& cachedPipeline)
{
memWriter.writeBE<uint8>(0x01); // version
uint8 presentMask = 0;
if (cachedPipeline.vsHash.isPresent)
presentMask |= 1;
if (cachedPipeline.gsHash.isPresent)
presentMask |= 2;
if (cachedPipeline.psHash.isPresent)
presentMask |= 4;
memWriter.writeBE<uint8>(presentMask);
if (cachedPipeline.vsHash.isPresent)
{
memWriter.writeBE<uint64>(cachedPipeline.vsHash.baseHash);
memWriter.writeBE<uint64>(cachedPipeline.vsHash.auxHash);
}
if (cachedPipeline.gsHash.isPresent)
{
memWriter.writeBE<uint64>(cachedPipeline.gsHash.baseHash);
memWriter.writeBE<uint64>(cachedPipeline.gsHash.auxHash);
}
if (cachedPipeline.psHash.isPresent)
{
memWriter.writeBE<uint64>(cachedPipeline.psHash.baseHash);
memWriter.writeBE<uint64>(cachedPipeline.psHash.auxHash);
}
Latte::SerializeRegisterState(cachedPipeline.gpuState, memWriter);
return true;
}
bool VulkanPipelineStableCache::DeserializePipeline(MemStreamReader& memReader, CachedPipeline& cachedPipeline)
{
// version
if (memReader.readBE<uint8>() != 1)
{
cemuLog_log(LogType::Force, "Cached Vulkan pipeline corrupted or has unknown version");
return false;
}
// shader hashes
uint8 presentMask = memReader.readBE<uint8>();
if (presentMask & 1)
{
uint64 baseHash = memReader.readBE<uint64>();
uint64 auxHash = memReader.readBE<uint64>();
cachedPipeline.vsHash.set(baseHash, auxHash);
}
if (presentMask & 2)
{
uint64 baseHash = memReader.readBE<uint64>();
uint64 auxHash = memReader.readBE<uint64>();
cachedPipeline.gsHash.set(baseHash, auxHash);
}
if (presentMask & 4)
{
uint64 baseHash = memReader.readBE<uint64>();
uint64 auxHash = memReader.readBE<uint64>();
cachedPipeline.psHash.set(baseHash, auxHash);
}
// deserialize GPU state
if (!Latte::DeserializeRegisterState(cachedPipeline.gpuState, memReader))
{
return false;
}
cemu_assert_debug(!memReader.hasError());
return true;
}
int VulkanPipelineStableCache::CompilerThread()
{
SetThreadName("plCacheCompiler");
while (m_numCompilationThreads != 0)
{
std::vector<uint8> pipelineData = m_compilationQueue.pop();
if(pipelineData.empty())
continue;
LoadPipelineFromCache(pipelineData);
++g_vkCacheState.pipelinesLoaded;
}
return 0;
}
void VulkanPipelineStableCache::WorkerThread()
{
SetThreadName("plCacheWriter");
while (true)
{
CachedPipeline* job;
g_pipelineCachingQueue.pop(job);
if (!s_cache)
{
delete job;
continue;
}
// serialize
MemStreamWriter memWriter(1024 * 4);
SerializePipeline(memWriter, *job);
auto blob = memWriter.getResult();
// file name is derived from data hash
uint8 hash[SHA256_DIGEST_LENGTH];
SHA256(blob.data(), blob.size(), hash);
uint64 nameA = *(uint64be*)(hash + 0);
uint64 nameB = *(uint64be*)(hash + 8);
s_cache->AddFileAsync({ nameA, nameB }, blob.data(), blob.size());
delete job;
}
}
| 13,894
|
C++
|
.cpp
| 411
| 31.413625
| 177
| 0.785034
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,240
|
VKRMemoryManager.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/Vulkan/VKRMemoryManager.cpp
|
#include "Cafe/HW/Latte/Renderer/Vulkan/VKRMemoryManager.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.h"
#include <imgui.h>
/* VKRSynchronizedMemoryBuffer */
void VKRSynchronizedRingAllocator::addUploadBufferSyncPoint(AllocatorBuffer_t& buffer, uint32 offset)
{
auto cmdBufferId = m_vkr->GetCurrentCommandBufferId();
if (cmdBufferId == buffer.lastSyncpointCmdBufferId)
return;
buffer.lastSyncpointCmdBufferId = cmdBufferId;
buffer.queue_syncPoints.emplace(cmdBufferId, offset);
}
void VKRSynchronizedRingAllocator::allocateAdditionalUploadBuffer(uint32 sizeRequiredForAlloc)
{
// calculate buffer size, should be a multiple of bufferAllocSize that is at least as large as sizeRequiredForAlloc
uint32 bufferAllocSize = m_minimumBufferAllocSize;
while (bufferAllocSize < sizeRequiredForAlloc)
bufferAllocSize += m_minimumBufferAllocSize;
AllocatorBuffer_t newBuffer{};
newBuffer.writeIndex = 0;
newBuffer.basePtr = nullptr;
if (m_bufferType == BUFFER_TYPE::STAGING)
m_vkrMemMgr->CreateBuffer(bufferAllocSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, newBuffer.vk_buffer, newBuffer.vk_mem);
else if (m_bufferType == BUFFER_TYPE::INDEX)
m_vkrMemMgr->CreateBuffer(bufferAllocSize, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, newBuffer.vk_buffer, newBuffer.vk_mem);
else if (m_bufferType == BUFFER_TYPE::STRIDE)
m_vkrMemMgr->CreateBuffer(bufferAllocSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, newBuffer.vk_buffer, newBuffer.vk_mem);
else
cemu_assert_debug(false);
void* bufferPtr = nullptr;
vkMapMemory(m_vkr->GetLogicalDevice(), newBuffer.vk_mem, 0, VK_WHOLE_SIZE, 0, &bufferPtr);
newBuffer.basePtr = (uint8*)bufferPtr;
newBuffer.size = bufferAllocSize;
newBuffer.index = (uint32)m_buffers.size();
m_buffers.push_back(newBuffer);
}
VKRSynchronizedRingAllocator::AllocatorReservation_t VKRSynchronizedRingAllocator::AllocateBufferMemory(uint32 size, uint32 alignment)
{
if (alignment < 128)
alignment = 128;
size = (size + 127) & ~127;
for (auto& itr : m_buffers)
{
// align pointer
uint32 alignmentPadding = (alignment - (itr.writeIndex % alignment)) % alignment;
uint32 distanceToSyncPoint;
if (!itr.queue_syncPoints.empty())
{
if(itr.queue_syncPoints.front().offset < itr.writeIndex)
distanceToSyncPoint = 0xFFFFFFFF;
else
distanceToSyncPoint = itr.queue_syncPoints.front().offset - itr.writeIndex;
}
else
distanceToSyncPoint = 0xFFFFFFFF;
uint32 spaceNeeded = alignmentPadding + size;
if (spaceNeeded > distanceToSyncPoint)
continue; // not enough space in current buffer
if ((itr.writeIndex + spaceNeeded) > itr.size)
{
// wrap-around
spaceNeeded = size;
alignmentPadding = 0;
// check if there is enough space in current buffer after wrap-around
if (!itr.queue_syncPoints.empty())
{
distanceToSyncPoint = itr.queue_syncPoints.front().offset - 0;
if (spaceNeeded > distanceToSyncPoint)
continue;
}
else if (spaceNeeded > itr.size)
continue;
itr.writeIndex = 0;
}
addUploadBufferSyncPoint(itr, itr.writeIndex);
itr.writeIndex += alignmentPadding;
uint32 offset = itr.writeIndex;
itr.writeIndex += size;
itr.cleanupCounter = 0;
VKRSynchronizedRingAllocator::AllocatorReservation_t res;
res.vkBuffer = itr.vk_buffer;
res.vkMem = itr.vk_mem;
res.memPtr = itr.basePtr + offset;
res.bufferOffset = offset;
res.size = size;
res.bufferIndex = itr.index;
return res;
}
// allocate new buffer
allocateAdditionalUploadBuffer(size);
return AllocateBufferMemory(size, alignment);
}
void VKRSynchronizedRingAllocator::FlushReservation(AllocatorReservation_t& uploadReservation)
{
cemu_assert_debug(m_bufferType == BUFFER_TYPE::STAGING); // only the staging buffer isn't coherent
// todo - use nonCoherentAtomSize for flush size (instead of hardcoded constant)
VkMappedMemoryRange flushedRange{};
flushedRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
flushedRange.memory = uploadReservation.vkMem;
flushedRange.offset = uploadReservation.bufferOffset;
flushedRange.size = uploadReservation.size;
vkFlushMappedMemoryRanges(m_vkr->GetLogicalDevice(), 1, &flushedRange);
}
void VKRSynchronizedRingAllocator::CleanupBuffer(uint64 latestFinishedCommandBufferId)
{
if (latestFinishedCommandBufferId > 1)
latestFinishedCommandBufferId -= 1;
for (auto& itr : m_buffers)
{
while (!itr.queue_syncPoints.empty() && latestFinishedCommandBufferId > itr.queue_syncPoints.front().commandBufferId)
{
itr.queue_syncPoints.pop();
}
if (itr.queue_syncPoints.empty())
itr.cleanupCounter++;
}
// check if last buffer is available for deletion
if (m_buffers.size() >= 2)
{
auto& lastBuffer = m_buffers.back();
if (lastBuffer.cleanupCounter >= 1000)
{
// release buffer
vkUnmapMemory(m_vkr->GetLogicalDevice(), lastBuffer.vk_mem);
m_vkrMemMgr->DeleteBuffer(lastBuffer.vk_buffer, lastBuffer.vk_mem);
m_buffers.pop_back();
}
}
}
VkBuffer VKRSynchronizedRingAllocator::GetBufferByIndex(uint32 index) const
{
return m_buffers[index].vk_buffer;
}
void VKRSynchronizedRingAllocator::GetStats(uint32& numBuffers, size_t& totalBufferSize, size_t& freeBufferSize) const
{
numBuffers = (uint32)m_buffers.size();
totalBufferSize = 0;
freeBufferSize = 0;
for (auto& itr : m_buffers)
{
totalBufferSize += itr.size;
// calculate free space in buffer
uint32 distanceToSyncPoint;
if (!itr.queue_syncPoints.empty())
{
if (itr.queue_syncPoints.front().offset < itr.writeIndex)
distanceToSyncPoint = (itr.size - itr.writeIndex) + itr.queue_syncPoints.front().offset; // size with wrap-around
else
distanceToSyncPoint = itr.queue_syncPoints.front().offset - itr.writeIndex;
}
else
distanceToSyncPoint = itr.size;
freeBufferSize += distanceToSyncPoint;
}
}
/* VkTextureChunkedHeap */
uint32 VkTextureChunkedHeap::allocateNewChunk(uint32 chunkIndex, uint32 minimumAllocationSize)
{
cemu_assert_debug(m_list_chunkInfo.size() == chunkIndex);
m_list_chunkInfo.resize(m_list_chunkInfo.size() + 1);
// pad minimumAllocationSize to 32KB alignment
minimumAllocationSize = (minimumAllocationSize + (32*1024-1)) & ~(32 * 1024 - 1);
uint32 allocationSize = 1024 * 1024 * 128;
if (chunkIndex == 0)
{
// make the first allocation smaller, this decreases wasted memory when there are textures that require specific flags (and thus separate heaps)
allocationSize = 1024 * 1024 * 16;
}
if (allocationSize < minimumAllocationSize)
allocationSize = minimumAllocationSize;
// get available memory types/heaps
std::vector<uint32> deviceLocalMemoryTypeIndices = m_vkrMemoryManager->FindMemoryTypes(m_typeFilter, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
std::vector<uint32> hostLocalMemoryTypeIndices = m_vkrMemoryManager->FindMemoryTypes(m_typeFilter, 0);
// remove device local memory types from host local vector
auto pred = [&deviceLocalMemoryTypeIndices](const uint32& v) ->bool
{
return std::find(deviceLocalMemoryTypeIndices.begin(), deviceLocalMemoryTypeIndices.end(), v) != deviceLocalMemoryTypeIndices.end();
};
hostLocalMemoryTypeIndices.erase(std::remove_if(hostLocalMemoryTypeIndices.begin(), hostLocalMemoryTypeIndices.end(), pred), hostLocalMemoryTypeIndices.end());
// allocate chunk memory
for (sint32 t = 0; t < 3; t++)
{
// attempt to allocate from device local memory first
for (auto memType : deviceLocalMemoryTypeIndices)
{
VkMemoryAllocateInfo allocInfo{};
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocInfo.allocationSize = allocationSize;
allocInfo.memoryTypeIndex = memType;
VkDeviceMemory imageMemory;
VkResult r = vkAllocateMemory(m_device, &allocInfo, nullptr, &imageMemory);
if (r != VK_SUCCESS)
continue;
m_list_chunkInfo[chunkIndex].mem = imageMemory;
return allocationSize;
}
// attempt to allocate from host-local memory
for (auto memType : hostLocalMemoryTypeIndices)
{
VkMemoryAllocateInfo allocInfo{};
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocInfo.allocationSize = allocationSize;
allocInfo.memoryTypeIndex = memType;
VkDeviceMemory imageMemory;
VkResult r = vkAllocateMemory(m_device, &allocInfo, nullptr, &imageMemory);
if (r != VK_SUCCESS)
continue;
m_list_chunkInfo[chunkIndex].mem = imageMemory;
return allocationSize;
}
// retry with smaller size if possible
allocationSize /= 2;
if (allocationSize < minimumAllocationSize)
break;
cemuLog_log(LogType::Force, "Failed to allocate texture memory chunk with size {}MB. Trying again with smaller allocation size", allocationSize / 1024 / 1024);
}
cemuLog_log(LogType::Force, "Unable to allocate image memory chunk ({} heaps)", deviceLocalMemoryTypeIndices.size());
throw std::runtime_error("failed to allocate image memory!");
return 0;
}
uint32_t VKRMemoryManager::FindMemoryType(uint32_t typeFilter, VkMemoryPropertyFlags properties) const
{
VkPhysicalDeviceMemoryProperties memProperties;
vkGetPhysicalDeviceMemoryProperties(m_vkr->GetPhysicalDevice(), &memProperties);
for (uint32 i = 0; i < memProperties.memoryTypeCount; i++)
{
if ((typeFilter & (1 << i)) != 0 && (memProperties.memoryTypes[i].propertyFlags & properties) == properties)
return i;
}
m_vkr->UnrecoverableError(fmt::format("failed to find suitable memory type ({0:#08x} {1:#08x})", typeFilter, properties).c_str());
return 0;
}
bool VKRMemoryManager::FindMemoryType2(uint32 typeFilter, VkMemoryPropertyFlags properties, uint32& memoryIndex) const
{
VkPhysicalDeviceMemoryProperties memProperties;
vkGetPhysicalDeviceMemoryProperties(m_vkr->GetPhysicalDevice(), &memProperties);
for (uint32_t i = 0; i < memProperties.memoryTypeCount; i++)
{
if (typeFilter & (1 << i) && memProperties.memoryTypes[i].propertyFlags == properties)
{
memoryIndex = i;
return true;
}
}
return false;
}
std::vector<uint32> VKRMemoryManager::FindMemoryTypes(uint32_t typeFilter, VkMemoryPropertyFlags properties) const
{
std::vector<uint32> memoryTypes;
memoryTypes.clear();
VkPhysicalDeviceMemoryProperties memProperties;
vkGetPhysicalDeviceMemoryProperties(m_vkr->GetPhysicalDevice(), &memProperties);
for (uint32_t i = 0; i < memProperties.memoryTypeCount; i++)
{
if (typeFilter & (1 << i) && (memProperties.memoryTypes[i].propertyFlags & properties) == properties)
memoryTypes.emplace_back(i);
}
if (memoryTypes.empty())
m_vkr->UnrecoverableError(fmt::format("Failed to find suitable memory type ({0:#08x} {1:#08x})", typeFilter, properties).c_str());
return memoryTypes;
}
size_t VKRMemoryManager::GetTotalMemoryForBufferType(VkBufferUsageFlags usage, VkMemoryPropertyFlags properties, size_t minimumBufferSize)
{
VkDevice logicalDevice = m_vkr->GetLogicalDevice();
// create temporary buffer object to get memory type
VkBuffer temporaryBuffer;
VkBufferCreateInfo bufferInfo{};
bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufferInfo.usage = usage;
bufferInfo.size = minimumBufferSize; // the buffer size can theoretically influence the memory type, is there a better way to handle this?
bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
if (vkCreateBuffer(logicalDevice, &bufferInfo, nullptr, &temporaryBuffer) != VK_SUCCESS)
{
cemuLog_log(LogType::Force, "Vulkan: GetTotalMemoryForBufferType() failed to create temporary buffer");
return 0;
}
// get memory requirements for buffer
VkMemoryRequirements memRequirements;
vkGetBufferMemoryRequirements(logicalDevice, temporaryBuffer, &memRequirements);
uint32 typeFilter = memRequirements.memoryTypeBits;
// destroy temporary buffer
vkDestroyBuffer(logicalDevice, temporaryBuffer, nullptr);
// get list of all suitable heaps
std::unordered_set<uint32> list_heapIndices;
VkPhysicalDeviceMemoryProperties memProperties{};
vkGetPhysicalDeviceMemoryProperties(m_vkr->GetPhysicalDevice(), &memProperties);
for (uint32_t i = 0; i < memProperties.memoryTypeCount; i++)
{
if (typeFilter & (1 << i) && (memProperties.memoryTypes[i].propertyFlags & properties) == properties)
list_heapIndices.emplace(memProperties.memoryTypes[i].heapIndex);
}
// sum up size of heaps
size_t total = 0;
for (auto heapIndex : list_heapIndices)
{
if (heapIndex > memProperties.memoryHeapCount)
continue;
total += memProperties.memoryHeaps[heapIndex].size;
}
return total;
}
void VKRMemoryManager::CreateBuffer(VkDeviceSize size, VkBufferUsageFlags usage, VkMemoryPropertyFlags properties, VkBuffer& buffer, VkDeviceMemory& bufferMemory) const
{
VkBufferCreateInfo bufferInfo{};
bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufferInfo.usage = usage;
bufferInfo.size = size;
bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
if (vkCreateBuffer(m_vkr->GetLogicalDevice(), &bufferInfo, nullptr, &buffer) != VK_SUCCESS)
m_vkr->UnrecoverableError("Failed to create buffer");
VkMemoryRequirements memRequirements;
vkGetBufferMemoryRequirements(m_vkr->GetLogicalDevice(), buffer, &memRequirements);
VkMemoryAllocateInfo allocInfo{};
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocInfo.allocationSize = memRequirements.size;
allocInfo.memoryTypeIndex = FindMemoryType(memRequirements.memoryTypeBits, properties);
if (vkAllocateMemory(m_vkr->GetLogicalDevice(), &allocInfo, nullptr, &bufferMemory) != VK_SUCCESS)
m_vkr->UnrecoverableError("Failed to allocate buffer memory");
if (vkBindBufferMemory(m_vkr->GetLogicalDevice(), buffer, bufferMemory, 0) != VK_SUCCESS)
m_vkr->UnrecoverableError("Failed to bind buffer memory");
}
bool VKRMemoryManager::CreateBuffer2(VkDeviceSize size, VkBufferUsageFlags usage, VkMemoryPropertyFlags properties, VkBuffer& buffer, VkDeviceMemory& bufferMemory) const
{
VkBufferCreateInfo bufferInfo{};
bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufferInfo.usage = usage;
bufferInfo.size = size;
bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
if (vkCreateBuffer(m_vkr->GetLogicalDevice(), &bufferInfo, nullptr, &buffer) != VK_SUCCESS)
{
cemuLog_log(LogType::Force, "Failed to create buffer (CreateBuffer2)");
return false;
}
VkMemoryRequirements memRequirements;
vkGetBufferMemoryRequirements(m_vkr->GetLogicalDevice(), buffer, &memRequirements);
VkMemoryAllocateInfo allocInfo{};
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocInfo.allocationSize = memRequirements.size;
if (!FindMemoryType2(memRequirements.memoryTypeBits, properties, allocInfo.memoryTypeIndex))
{
vkDestroyBuffer(m_vkr->GetLogicalDevice(), buffer, nullptr);
return false;
}
if (vkAllocateMemory(m_vkr->GetLogicalDevice(), &allocInfo, nullptr, &bufferMemory) != VK_SUCCESS)
{
vkDestroyBuffer(m_vkr->GetLogicalDevice(), buffer, nullptr);
return false;
}
if (vkBindBufferMemory(m_vkr->GetLogicalDevice(), buffer, bufferMemory, 0) != VK_SUCCESS)
{
vkDestroyBuffer(m_vkr->GetLogicalDevice(), buffer, nullptr);
cemuLog_log(LogType::Force, "Failed to bind buffer (CreateBuffer2)");
return false;
}
return true;
}
bool VKRMemoryManager::CreateBufferFromHostMemory(void* hostPointer, VkDeviceSize size, VkBufferUsageFlags usage, VkMemoryPropertyFlags properties, VkBuffer& buffer, VkDeviceMemory& bufferMemory) const
{
VkBufferCreateInfo bufferInfo{};
bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufferInfo.usage = usage;
bufferInfo.size = size;
bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkExternalMemoryBufferCreateInfo emb{};
emb.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO;
emb.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
bufferInfo.pNext = &emb;
if (vkCreateBuffer(m_vkr->GetLogicalDevice(), &bufferInfo, nullptr, &buffer) != VK_SUCCESS)
{
cemuLog_log(LogType::Force, "Failed to create buffer (CreateBuffer2)");
return false;
}
VkMemoryRequirements memRequirements;
vkGetBufferMemoryRequirements(m_vkr->GetLogicalDevice(), buffer, &memRequirements);
VkMemoryAllocateInfo allocInfo{};
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocInfo.allocationSize = memRequirements.size;
VkImportMemoryHostPointerInfoEXT importHostMem{};
importHostMem.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT;
importHostMem.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
importHostMem.pHostPointer = hostPointer;
// VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT or
// VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT
// whats the difference ?
allocInfo.pNext = &importHostMem;
if (!FindMemoryType2(memRequirements.memoryTypeBits, properties, allocInfo.memoryTypeIndex))
{
vkDestroyBuffer(m_vkr->GetLogicalDevice(), buffer, nullptr);
return false;
}
if (vkAllocateMemory(m_vkr->GetLogicalDevice(), &allocInfo, nullptr, &bufferMemory) != VK_SUCCESS)
{
vkDestroyBuffer(m_vkr->GetLogicalDevice(), buffer, nullptr);
return false;
}
if (vkBindBufferMemory(m_vkr->GetLogicalDevice(), buffer, bufferMemory, 0) != VK_SUCCESS)
{
vkDestroyBuffer(m_vkr->GetLogicalDevice(), buffer, nullptr);
cemuLog_log(LogType::Force, "Failed to bind buffer (CreateBufferFromHostMemory)");
return false;
}
return true;
}
void VKRMemoryManager::DeleteBuffer(VkBuffer& buffer, VkDeviceMemory& deviceMem) const
{
if (buffer != VK_NULL_HANDLE)
vkDestroyBuffer(m_vkr->GetLogicalDevice(), buffer, nullptr);
if (deviceMem != VK_NULL_HANDLE)
vkFreeMemory(m_vkr->GetLogicalDevice(), deviceMem, nullptr);
buffer = VK_NULL_HANDLE;
deviceMem = VK_NULL_HANDLE;
}
VkImageMemAllocation* VKRMemoryManager::imageMemoryAllocate(VkImage image)
{
VkMemoryRequirements memRequirements;
vkGetImageMemoryRequirements(m_vkr->GetLogicalDevice(), image, &memRequirements);
uint32 typeFilter = memRequirements.memoryTypeBits;
// get or create heap for this type filter
VkTextureChunkedHeap* texHeap;
auto it = map_textureHeap.find(typeFilter);
if (it == map_textureHeap.end())
{
texHeap = new VkTextureChunkedHeap(this, typeFilter, m_vkr->GetLogicalDevice());
map_textureHeap.emplace(typeFilter, texHeap);
}
else
texHeap = it->second;
// alloc mem from heap
uint32 allocationSize = (uint32)memRequirements.size;
CHAddr mem = texHeap->allocMem(allocationSize, (uint32)memRequirements.alignment);
if (!mem.isValid())
{
// allocation failed, try to make space by deleting textures
// todo - improve this algorithm
std::vector<LatteTexture*> deleteableTextures = LatteTC_GetDeleteableTextures();
// delete up to 20 textures from the deletable textures list, then retry allocation
while (!deleteableTextures.empty())
{
size_t numDelete = deleteableTextures.size();
if (numDelete > 20)
numDelete = 20;
for (size_t i = 0; i < numDelete; i++)
LatteTexture_Delete(deleteableTextures[i]);
deleteableTextures.erase(deleteableTextures.begin(), deleteableTextures.begin() + numDelete);
mem = texHeap->allocMem(allocationSize, (uint32)memRequirements.alignment);
if (mem.isValid())
break;
}
if (!mem.isValid())
{
m_vkr->UnrecoverableError("Ran out of VRAM for textures");
}
}
vkBindImageMemory(m_vkr->GetLogicalDevice(), image, texHeap->getChunkMem(mem.chunkIndex), mem.offset);
return new VkImageMemAllocation(typeFilter, mem, allocationSize);
}
void VKRMemoryManager::imageMemoryFree(VkImageMemAllocation* imageMemAllocation)
{
auto heapItr = map_textureHeap.find(imageMemAllocation->typeFilter);
if (heapItr == map_textureHeap.end())
{
cemuLog_log(LogType::Force, "Internal texture heap error");
return;
}
heapItr->second->freeMem(imageMemAllocation->mem);
delete imageMemAllocation;
}
void VKRMemoryManager::appendOverlayHeapDebugInfo()
{
for (auto& itr : map_textureHeap)
{
uint32 heapSize;
uint32 allocatedBytes;
itr.second->getStatistics(heapSize, allocatedBytes);
uint32 heapSizeMB = (heapSize / 1024 / 1024);
uint32 allocatedBytesMB = (allocatedBytes / 1024 / 1024);
ImGui::Text("%s", fmt::format("{0:#08x} Size: {1}MB/{2}MB", itr.first, allocatedBytesMB, heapSizeMB).c_str());
}
}
| 20,153
|
C++
|
.cpp
| 479
| 39.521921
| 201
| 0.781414
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,241
|
SwapchainInfoVk.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/Vulkan/SwapchainInfoVk.cpp
|
#include "SwapchainInfoVk.h"
#include "config/CemuConfig.h"
#include "gui/guiWrapper.h"
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/HW/Latte/Core/LatteTiming.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanAPI.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.h"
SwapchainInfoVk::SwapchainInfoVk(bool mainWindow, Vector2i size) : mainWindow(mainWindow), m_desiredExtent(size)
{
auto& windowHandleInfo = mainWindow ? gui_getWindowInfo().canvas_main : gui_getWindowInfo().canvas_pad;
auto renderer = VulkanRenderer::GetInstance();
m_instance = renderer->GetVkInstance();
m_logicalDevice = renderer->GetLogicalDevice();
m_physicalDevice = renderer->GetPhysicalDevice();
m_surface = renderer->CreateFramebufferSurface(m_instance, windowHandleInfo);
}
SwapchainInfoVk::~SwapchainInfoVk()
{
Cleanup();
if(m_surface != VK_NULL_HANDLE)
vkDestroySurfaceKHR(m_instance, m_surface, nullptr);
}
void SwapchainInfoVk::Create()
{
const auto details = QuerySwapchainSupport(m_surface, m_physicalDevice);
m_surfaceFormat = ChooseSurfaceFormat(details.formats);
m_actualExtent = ChooseSwapExtent(details.capabilities);
// use at least two swapchain images. fewer than that causes problems on some drivers
uint32_t image_count = std::max(2u, details.capabilities.minImageCount);
if(details.capabilities.maxImageCount > 0)
image_count = std::min(image_count, details.capabilities.maxImageCount);
if(image_count < 2)
cemuLog_log(LogType::Force, "Vulkan: Swapchain image count less than 2 may cause problems");
VkSwapchainCreateInfoKHR create_info = CreateSwapchainCreateInfo(m_surface, details, m_surfaceFormat, image_count, m_actualExtent);
create_info.oldSwapchain = nullptr;
create_info.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
VkResult result = vkCreateSwapchainKHR(m_logicalDevice, &create_info, nullptr, &m_swapchain);
if (result != VK_SUCCESS)
UnrecoverableError("Error attempting to create a swapchain");
result = vkGetSwapchainImagesKHR(m_logicalDevice, m_swapchain, &image_count, nullptr);
if (result != VK_SUCCESS)
UnrecoverableError("Error attempting to retrieve the count of swapchain images");
m_swapchainImages.resize(image_count);
result = vkGetSwapchainImagesKHR(m_logicalDevice, m_swapchain, &image_count, m_swapchainImages.data());
if (result != VK_SUCCESS)
UnrecoverableError("Error attempting to retrieve swapchain images");
// create default renderpass
VkAttachmentDescription colorAttachment = {};
colorAttachment.format = m_surfaceFormat.format;
colorAttachment.samples = VK_SAMPLE_COUNT_1_BIT;
colorAttachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
colorAttachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
colorAttachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
colorAttachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
colorAttachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
colorAttachment.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
VkAttachmentReference colorAttachmentRef = {};
colorAttachmentRef.attachment = 0;
colorAttachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
VkSubpassDescription subpass = {};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &colorAttachmentRef;
VkRenderPassCreateInfo renderPassInfo = {};
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
renderPassInfo.attachmentCount = 1;
renderPassInfo.pAttachments = &colorAttachment;
renderPassInfo.subpassCount = 1;
renderPassInfo.pSubpasses = &subpass;
result = vkCreateRenderPass(m_logicalDevice, &renderPassInfo, nullptr, &m_swapchainRenderPass);
if (result != VK_SUCCESS)
UnrecoverableError("Failed to create renderpass for swapchain");
// create swapchain image views
m_swapchainImageViews.resize(m_swapchainImages.size());
for (sint32 i = 0; i < m_swapchainImages.size(); i++)
{
VkImageViewCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
createInfo.image = m_swapchainImages[i];
createInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
createInfo.format = m_surfaceFormat.format;
createInfo.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
createInfo.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
createInfo.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
createInfo.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
createInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
createInfo.subresourceRange.baseMipLevel = 0;
createInfo.subresourceRange.levelCount = 1;
createInfo.subresourceRange.baseArrayLayer = 0;
createInfo.subresourceRange.layerCount = 1;
result = vkCreateImageView(m_logicalDevice, &createInfo, nullptr, &m_swapchainImageViews[i]);
if (result != VK_SUCCESS)
UnrecoverableError("Failed to create imageviews for swapchain");
}
// create swapchain framebuffers
m_swapchainFramebuffers.resize(m_swapchainImages.size());
for (size_t i = 0; i < m_swapchainImages.size(); i++)
{
VkImageView attachments[1];
attachments[0] = m_swapchainImageViews[i];
// create framebuffer
VkFramebufferCreateInfo framebufferInfo = {};
framebufferInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
framebufferInfo.renderPass = m_swapchainRenderPass;
framebufferInfo.attachmentCount = 1;
framebufferInfo.pAttachments = attachments;
framebufferInfo.width = m_actualExtent.width;
framebufferInfo.height = m_actualExtent.height;
framebufferInfo.layers = 1;
result = vkCreateFramebuffer(m_logicalDevice, &framebufferInfo, nullptr, &m_swapchainFramebuffers[i]);
if (result != VK_SUCCESS)
UnrecoverableError("Failed to create framebuffer for swapchain");
}
m_presentSemaphores.resize(m_swapchainImages.size());
// create present semaphores
VkSemaphoreCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
for (auto& semaphore : m_presentSemaphores){
if (vkCreateSemaphore(m_logicalDevice, &info, nullptr, &semaphore) != VK_SUCCESS)
UnrecoverableError("Failed to create semaphore for swapchain present");
}
m_acquireSemaphores.resize(m_swapchainImages.size());
// create acquire semaphores
info = {};
info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
for (auto& semaphore : m_acquireSemaphores){
if (vkCreateSemaphore(m_logicalDevice, &info, nullptr, &semaphore) != VK_SUCCESS)
UnrecoverableError("Failed to create semaphore for swapchain acquire");
}
VkFenceCreateInfo fenceInfo = {};
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
result = vkCreateFence(m_logicalDevice, &fenceInfo, nullptr, &m_imageAvailableFence);
if (result != VK_SUCCESS)
UnrecoverableError("Failed to create fence for swapchain");
m_acquireIndex = 0;
hasDefinedSwapchainImage = false;
m_queueDepth = 0;
}
void SwapchainInfoVk::Cleanup()
{
m_swapchainImages.clear();
for (auto& sem: m_acquireSemaphores)
vkDestroySemaphore(m_logicalDevice, sem, nullptr);
m_acquireSemaphores.clear();
for (auto& sem: m_presentSemaphores)
vkDestroySemaphore(m_logicalDevice, sem, nullptr);
m_presentSemaphores.clear();
if (m_swapchainRenderPass)
{
vkDestroyRenderPass(m_logicalDevice, m_swapchainRenderPass, nullptr);
m_swapchainRenderPass = nullptr;
}
for (auto& imageView : m_swapchainImageViews)
vkDestroyImageView(m_logicalDevice, imageView, nullptr);
m_swapchainImageViews.clear();
for (auto& framebuffer : m_swapchainFramebuffers)
vkDestroyFramebuffer(m_logicalDevice, framebuffer, nullptr);
m_swapchainFramebuffers.clear();
if (m_imageAvailableFence)
{
WaitAvailableFence();
vkDestroyFence(m_logicalDevice, m_imageAvailableFence, nullptr);
m_imageAvailableFence = nullptr;
}
if (m_swapchain)
{
vkDestroySwapchainKHR(m_logicalDevice, m_swapchain, nullptr);
m_swapchain = VK_NULL_HANDLE;
}
}
bool SwapchainInfoVk::IsValid() const
{
return m_swapchain && !m_acquireSemaphores.empty();
}
void SwapchainInfoVk::WaitAvailableFence()
{
if(m_awaitableFence != VK_NULL_HANDLE)
vkWaitForFences(m_logicalDevice, 1, &m_awaitableFence, VK_TRUE, UINT64_MAX);
m_awaitableFence = VK_NULL_HANDLE;
}
void SwapchainInfoVk::ResetAvailableFence() const
{
vkResetFences(m_logicalDevice, 1, &m_imageAvailableFence);
}
VkSemaphore SwapchainInfoVk::ConsumeAcquireSemaphore()
{
VkSemaphore ret = m_currentSemaphore;
m_currentSemaphore = VK_NULL_HANDLE;
return ret;
}
bool SwapchainInfoVk::AcquireImage()
{
ResetAvailableFence();
VkSemaphore acquireSemaphore = m_acquireSemaphores[m_acquireIndex];
VkResult result = vkAcquireNextImageKHR(m_logicalDevice, m_swapchain, 1'000'000'000, acquireSemaphore, m_imageAvailableFence, &swapchainImageIndex);
if (result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR)
m_shouldRecreate = true;
if (result == VK_TIMEOUT)
{
swapchainImageIndex = -1;
return false;
}
if (result < 0)
{
swapchainImageIndex = -1;
if (result != VK_ERROR_OUT_OF_DATE_KHR)
throw std::runtime_error(fmt::format("Failed to acquire next image: {}", result));
return false;
}
m_currentSemaphore = acquireSemaphore;
m_awaitableFence = m_imageAvailableFence;
m_acquireIndex = (m_acquireIndex + 1) % m_swapchainImages.size();
return true;
}
void SwapchainInfoVk::UnrecoverableError(const char* errMsg)
{
cemuLog_log(LogType::Force, "Unrecoverable error in Vulkan swapchain");
cemuLog_log(LogType::Force, "Msg: {}", errMsg);
throw std::runtime_error(errMsg);
}
SwapchainInfoVk::SwapchainSupportDetails SwapchainInfoVk::QuerySwapchainSupport(VkSurfaceKHR surface, const VkPhysicalDevice& device)
{
SwapchainSupportDetails details;
VkResult result = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(device, surface, &details.capabilities);
if (result != VK_SUCCESS)
{
if (result != VK_ERROR_SURFACE_LOST_KHR)
cemuLog_log(LogType::Force, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR failed. Error {}", (sint32)result);
throw std::runtime_error(fmt::format("Unable to retrieve physical device surface capabilities: {}", result));
}
uint32_t formatCount = 0;
result = vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface, &formatCount, nullptr);
if (result != VK_SUCCESS)
{
cemuLog_log(LogType::Force, "vkGetPhysicalDeviceSurfaceFormatsKHR failed. Error {}", (sint32)result);
throw std::runtime_error(fmt::format("Unable to retrieve the number of formats for a surface on a physical device: {}", result));
}
if (formatCount != 0)
{
details.formats.resize(formatCount);
result = vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface, &formatCount, details.formats.data());
if (result != VK_SUCCESS)
{
cemuLog_log(LogType::Force, "vkGetPhysicalDeviceSurfaceFormatsKHR failed. Error {}", (sint32)result);
throw std::runtime_error(fmt::format("Unable to retrieve the formats for a surface on a physical device: {}", result));
}
}
uint32_t presentModeCount = 0;
result = vkGetPhysicalDeviceSurfacePresentModesKHR(device, surface, &presentModeCount, nullptr);
if (result != VK_SUCCESS)
{
cemuLog_log(LogType::Force, "vkGetPhysicalDeviceSurfacePresentModesKHR failed. Error {}", (sint32)result);
throw std::runtime_error(fmt::format("Unable to retrieve the count of present modes for a surface on a physical device: {}", result));
}
if (presentModeCount != 0)
{
details.presentModes.resize(presentModeCount);
result = vkGetPhysicalDeviceSurfacePresentModesKHR(device, surface, &presentModeCount, details.presentModes.data());
if (result != VK_SUCCESS)
{
cemuLog_log(LogType::Force, "vkGetPhysicalDeviceSurfacePresentModesKHR failed. Error {}", (sint32)result);
throw std::runtime_error(fmt::format("Unable to retrieve the present modes for a surface on a physical device: {}", result));
}
}
return details;
}
VkSurfaceFormatKHR SwapchainInfoVk::ChooseSurfaceFormat(const std::vector<VkSurfaceFormatKHR>& formats) const
{
if (formats.size() == 1 && formats[0].format == VK_FORMAT_UNDEFINED)
return{ VK_FORMAT_B8G8R8A8_UNORM, VK_COLOR_SPACE_SRGB_NONLINEAR_KHR };
for (const auto& format : formats)
{
bool useSRGB = mainWindow ? LatteGPUState.tvBufferUsesSRGB : LatteGPUState.drcBufferUsesSRGB;
if (useSRGB)
{
if (format.format == VK_FORMAT_B8G8R8A8_SRGB && format.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR)
return format;
}
else
{
if (format.format == VK_FORMAT_B8G8R8A8_UNORM && format.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR)
return format;
}
}
return formats[0];
}
VkExtent2D SwapchainInfoVk::ChooseSwapExtent(const VkSurfaceCapabilitiesKHR& capabilities) const
{
if (capabilities.currentExtent.width != std::numeric_limits<uint32>::max())
return capabilities.currentExtent;
VkExtent2D actualExtent = { (uint32)m_desiredExtent.x, (uint32)m_desiredExtent.y };
actualExtent.width = std::max(capabilities.minImageExtent.width, std::min(capabilities.maxImageExtent.width, actualExtent.width));
actualExtent.height = std::max(capabilities.minImageExtent.height, std::min(capabilities.maxImageExtent.height, actualExtent.height));
return actualExtent;
}
VkPresentModeKHR SwapchainInfoVk::ChoosePresentMode(const std::vector<VkPresentModeKHR>& modes)
{
m_maxQueued = 0;
const auto vsyncState = (VSync)GetConfig().vsync.GetValue();
if (vsyncState == VSync::MAILBOX)
{
if (std::find(modes.cbegin(), modes.cend(), VK_PRESENT_MODE_MAILBOX_KHR) != modes.cend())
return VK_PRESENT_MODE_MAILBOX_KHR;
cemuLog_log(LogType::Force, "Vulkan: Can't find mailbox present mode");
}
else if (vsyncState == VSync::Immediate)
{
if (std::find(modes.cbegin(), modes.cend(), VK_PRESENT_MODE_IMMEDIATE_KHR) != modes.cend())
return VK_PRESENT_MODE_IMMEDIATE_KHR;
cemuLog_log(LogType::Force, "Vulkan: Can't find immediate present mode");
}
else if (vsyncState == VSync::SYNC_AND_LIMIT)
{
LatteTiming_EnableHostDrivenVSync();
// use immediate mode if available, other wise fall back to
//if (std::find(modes.cbegin(), modes.cend(), VK_PRESENT_MODE_IMMEDIATE_KHR) != modes.cend())
// return VK_PRESENT_MODE_IMMEDIATE_KHR;
//else
// cemuLog_log(LogType::Force, "Vulkan: Present mode 'immediate' not available. Vsync might not behave as intended");
return VK_PRESENT_MODE_FIFO_KHR;
}
m_maxQueued = 1;
return VK_PRESENT_MODE_FIFO_KHR;
}
VkSwapchainCreateInfoKHR SwapchainInfoVk::CreateSwapchainCreateInfo(VkSurfaceKHR surface, const SwapchainSupportDetails& swapchainSupport, const VkSurfaceFormatKHR& surfaceFormat, uint32 imageCount, const VkExtent2D& extent)
{
VkSwapchainCreateInfoKHR createInfo{};
createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
createInfo.surface = surface;
createInfo.minImageCount = imageCount;
createInfo.imageFormat = surfaceFormat.format;
createInfo.imageExtent = extent;
createInfo.imageArrayLayers = 1;
createInfo.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
const VulkanRenderer::QueueFamilyIndices indices = VulkanRenderer::GetInstance()->FindQueueFamilies(surface, m_physicalDevice);
m_swapchainQueueFamilyIndices = { (uint32)indices.graphicsFamily, (uint32)indices.presentFamily };
if (indices.graphicsFamily != indices.presentFamily)
{
createInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
createInfo.queueFamilyIndexCount = m_swapchainQueueFamilyIndices.size();
createInfo.pQueueFamilyIndices = m_swapchainQueueFamilyIndices.data();
}
else
createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
createInfo.preTransform = swapchainSupport.capabilities.currentTransform;
createInfo.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
createInfo.presentMode = ChoosePresentMode(swapchainSupport.presentModes);
createInfo.clipped = VK_TRUE;
cemuLog_logDebug(LogType::Force, "vulkan presentation mode: {}", createInfo.presentMode);
return createInfo;
}
| 15,784
|
C++
|
.cpp
| 352
| 42.446023
| 224
| 0.786885
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,242
|
VulkanQuery.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanQuery.cpp
|
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.h"
class LatteQueryObjectVk : public LatteQueryObject
{
friend class VulkanRenderer;
LatteQueryObjectVk(VulkanRenderer* rendererVk) : m_rendererVk(rendererVk)
{
};
bool getResult(uint64& numSamplesPassed) override;
void begin() override;
void end() override;
void beginFragment();
void endFragment();
void handleFinishedFragments();
uint32 acquireQueryIndex();
void releaseQueryIndex(uint32 queryIndex);
private:
struct queryFragment
{
uint32 queryIndex;
uint64 m_finishCommandBuffer;
bool isFinished;
};
VulkanRenderer* m_rendererVk;
//sint32 m_queryIndex;
std::vector<queryFragment> list_queryFragments;
bool m_vkQueryEnded{};
bool m_hasActiveQuery{};
bool m_hasActiveFragment{};
uint64 m_finishCommandBuffer;
uint64 m_acccumulatedSum;
};
bool LatteQueryObjectVk::getResult(uint64& numSamplesPassed)
{
if (!m_vkQueryEnded)
return false;
if (!m_rendererVk->HasCommandBufferFinished(m_finishCommandBuffer))
return false;
handleFinishedFragments();
cemu_assert_debug(list_queryFragments.empty());
numSamplesPassed = m_acccumulatedSum;
//numSamplesPassed = m_rendererVk->m_occlusionQueries.ptrQueryResults[m_queryIndex];
return true;
}
void LatteQueryObjectVk::beginFragment()
{
m_rendererVk->draw_endRenderPass();
handleFinishedFragments();
uint32 newQueryIndex = acquireQueryIndex();
queryFragment qf{};
qf.queryIndex = newQueryIndex;
qf.isFinished = false;
qf.m_finishCommandBuffer = 0;
list_queryFragments.emplace_back(qf);
vkCmdResetQueryPool(m_rendererVk->m_state.currentCommandBuffer, m_rendererVk->m_occlusionQueries.queryPool, newQueryIndex, 1);
vkCmdBeginQuery(m_rendererVk->m_state.currentCommandBuffer, m_rendererVk->m_occlusionQueries.queryPool, newQueryIndex, VK_QUERY_CONTROL_PRECISE_BIT);
// todo - we already synchronize with command buffers, should we also set wait bits?
m_hasActiveFragment = true;
}
void LatteQueryObjectVk::begin()
{
m_vkQueryEnded = false;
m_hasActiveQuery = true;
beginFragment();
}
void LatteQueryObjectVk::endFragment()
{
m_rendererVk->draw_endRenderPass();
cemu_assert_debug(m_hasActiveFragment);
uint32 queryIndex = list_queryFragments.back().queryIndex;
vkCmdEndQuery(m_rendererVk->m_state.currentCommandBuffer, m_rendererVk->m_occlusionQueries.queryPool, queryIndex);
vkCmdCopyQueryPoolResults(m_rendererVk->m_state.currentCommandBuffer, m_rendererVk->m_occlusionQueries.queryPool, queryIndex, 1, m_rendererVk->m_occlusionQueries.bufferQueryResults, queryIndex * sizeof(uint64), 8, VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
list_queryFragments.back().m_finishCommandBuffer = m_rendererVk->GetCurrentCommandBufferId();
list_queryFragments.back().isFinished = true;
m_hasActiveFragment = false;
}
void LatteQueryObjectVk::handleFinishedFragments()
{
// remove finished fragments and add to m_acccumulatedSum
while (!list_queryFragments.empty())
{
auto& it = list_queryFragments.front();
if (!it.isFinished)
break;
if (!m_rendererVk->HasCommandBufferFinished(it.m_finishCommandBuffer))
break;
m_acccumulatedSum += m_rendererVk->m_occlusionQueries.ptrQueryResults[it.queryIndex];
releaseQueryIndex(it.queryIndex);
list_queryFragments.erase(list_queryFragments.begin());
}
}
uint32 LatteQueryObjectVk::acquireQueryIndex()
{
if (m_rendererVk->m_occlusionQueries.list_availableQueryIndices.empty())
{
cemuLog_log(LogType::Force, "Vulkan-Error: Exhausted query pool");
assert_dbg();
}
uint32 queryIndex = m_rendererVk->m_occlusionQueries.list_availableQueryIndices.back();
m_rendererVk->m_occlusionQueries.list_availableQueryIndices.pop_back();
return queryIndex;
}
void LatteQueryObjectVk::releaseQueryIndex(uint32 queryIndex)
{
m_rendererVk->m_occlusionQueries.list_availableQueryIndices.emplace_back(queryIndex);
}
void LatteQueryObjectVk::end()
{
cemu_assert_debug(!list_queryFragments.empty());
if(m_hasActiveFragment)
endFragment();
m_vkQueryEnded = true;
m_hasActiveQuery = false;
m_finishCommandBuffer = m_rendererVk->GetCurrentCommandBufferId();
m_rendererVk->m_occlusionQueries.m_lastCommandBuffer = m_finishCommandBuffer;
m_rendererVk->RequestSubmitSoon(); // make sure the current command buffer gets submitted soon
m_rendererVk->RequestSubmitOnIdle();
}
LatteQueryObject* VulkanRenderer::occlusionQuery_create()
{
// create query pool if it doesn't already exist
if(m_occlusionQueries.queryPool == VK_NULL_HANDLE)
{
VkQueryPoolCreateInfo queryPoolCreateInfo{};
queryPoolCreateInfo.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
queryPoolCreateInfo.flags = 0;
queryPoolCreateInfo.queryType = VK_QUERY_TYPE_OCCLUSION;
queryPoolCreateInfo.queryCount = OCCLUSION_QUERY_POOL_SIZE;
queryPoolCreateInfo.pipelineStatistics = 0;
auto r = vkCreateQueryPool(m_logicalDevice, &queryPoolCreateInfo, nullptr, &m_occlusionQueries.queryPool);
if (r != VK_SUCCESS)
{
cemuLog_log(LogType::Force, "Vulkan-Error: Failed to create query pool with error {}", (sint32)r);
return nullptr;
}
}
LatteQueryObjectVk* queryObjVk = nullptr;
if (m_occlusionQueries.list_cachedQueries.empty())
{
queryObjVk = new LatteQueryObjectVk(this);
}
else
{
queryObjVk = m_occlusionQueries.list_cachedQueries.front();
m_occlusionQueries.list_cachedQueries.erase(m_occlusionQueries.list_cachedQueries.begin()+0);
}
queryObjVk->queryEnded = false;
queryObjVk->queryEventStart = 0;
queryObjVk->queryEventEnd = 0;
queryObjVk->m_vkQueryEnded = false;
queryObjVk->m_acccumulatedSum = 0;
cemu_assert_debug(queryObjVk->list_queryFragments.empty()); // query fragment list should always be cleared in _destroy()
m_occlusionQueries.list_currentlyActiveQueries.emplace_back(queryObjVk);
return queryObjVk;
}
void VulkanRenderer::occlusionQuery_destroy(LatteQueryObject* queryObj)
{
LatteQueryObjectVk* queryObjVk = static_cast<LatteQueryObjectVk*>(queryObj);
m_occlusionQueries.list_currentlyActiveQueries.erase(std::remove(m_occlusionQueries.list_currentlyActiveQueries.begin(), m_occlusionQueries.list_currentlyActiveQueries.end(), queryObj), m_occlusionQueries.list_currentlyActiveQueries.end());
m_occlusionQueries.list_cachedQueries.emplace_back(queryObjVk);
for (auto& it : queryObjVk->list_queryFragments)
queryObjVk->releaseQueryIndex(it.queryIndex);
queryObjVk->list_queryFragments.clear();
}
void VulkanRenderer::occlusionQuery_flush()
{
WaitCommandBufferFinished(m_occlusionQueries.m_lastCommandBuffer);
}
void VulkanRenderer::occlusionQuery_updateState()
{
// check for finished command buffers here since query states are tied to buffers
ProcessFinishedCommandBuffers();
}
void VulkanRenderer::occlusionQuery_notifyEndCommandBuffer()
{
for (auto& it : m_occlusionQueries.list_currentlyActiveQueries)
if(it->m_hasActiveQuery)
it->endFragment();
}
void VulkanRenderer::occlusionQuery_notifyBeginCommandBuffer()
{
for (auto& it : m_occlusionQueries.list_currentlyActiveQueries)
if (it->m_hasActiveQuery)
it->beginFragment();
}
| 7,011
|
C++
|
.cpp
| 184
| 35.961957
| 266
| 0.80806
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,243
|
RendererShaderVk.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/Vulkan/RendererShaderVk.cpp
|
#include "Cafe/HW/Latte/Renderer/Vulkan/RendererShaderVk.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanAPI.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.h"
#include "config/ActiveSettings.h"
#include "config/CemuConfig.h"
#include "util/helpers/ConcurrentQueue.h"
#include "Cemu/FileCache/FileCache.h"
#include <glslang/Public/ShaderLang.h>
#include <glslang/SPIRV/GlslangToSpv.h>
#include <util/helpers/helpers.h>
bool s_isLoadingShadersVk{ false };
class FileCache* s_spirvCache{nullptr};
extern std::atomic_int g_compiled_shaders_total;
extern std::atomic_int g_compiled_shaders_async;
consteval TBuiltInResource GetDefaultBuiltInResource()
{
TBuiltInResource defaultResource = {};
defaultResource.maxLights = 32;
defaultResource.maxClipPlanes = 6;
defaultResource.maxTextureUnits = 32;
defaultResource.maxTextureCoords = 32;
defaultResource.maxVertexAttribs = 64;
defaultResource.maxVertexUniformComponents = 4096;
defaultResource.maxVaryingFloats = 64;
defaultResource.maxVertexTextureImageUnits = 32;
defaultResource.maxCombinedTextureImageUnits = 80;
defaultResource.maxTextureImageUnits = 32;
defaultResource.maxFragmentUniformComponents = 4096;
defaultResource.maxDrawBuffers = 32;
defaultResource.maxVertexUniformVectors = 128;
defaultResource.maxVaryingVectors = 8;
defaultResource.maxFragmentUniformVectors = 16;
defaultResource.maxVertexOutputVectors = 16;
defaultResource.maxFragmentInputVectors = 15;
defaultResource.minProgramTexelOffset = -8;
defaultResource.maxProgramTexelOffset = 7;
defaultResource.maxClipDistances = 8;
defaultResource.maxComputeWorkGroupCountX = 65535;
defaultResource.maxComputeWorkGroupCountY = 65535;
defaultResource.maxComputeWorkGroupCountZ = 65535;
defaultResource.maxComputeWorkGroupSizeX = 1024;
defaultResource.maxComputeWorkGroupSizeY = 1024;
defaultResource.maxComputeWorkGroupSizeZ = 64;
defaultResource.maxComputeUniformComponents = 1024;
defaultResource.maxComputeTextureImageUnits = 16;
defaultResource.maxComputeImageUniforms = 8;
defaultResource.maxComputeAtomicCounters = 8;
defaultResource.maxComputeAtomicCounterBuffers = 1;
defaultResource.maxVaryingComponents = 60;
defaultResource.maxVertexOutputComponents = 64;
defaultResource.maxGeometryInputComponents = 64;
defaultResource.maxGeometryOutputComponents = 128;
defaultResource.maxFragmentInputComponents = 128;
defaultResource.maxImageUnits = 8;
defaultResource.maxCombinedImageUnitsAndFragmentOutputs = 8;
defaultResource.maxCombinedShaderOutputResources = 8;
defaultResource.maxImageSamples = 0;
defaultResource.maxVertexImageUniforms = 0;
defaultResource.maxTessControlImageUniforms = 0;
defaultResource.maxTessEvaluationImageUniforms = 0;
defaultResource.maxGeometryImageUniforms = 0;
defaultResource.maxFragmentImageUniforms = 8;
defaultResource.maxCombinedImageUniforms = 8;
defaultResource.maxGeometryTextureImageUnits = 16;
defaultResource.maxGeometryOutputVertices = 256;
defaultResource.maxGeometryTotalOutputComponents = 1024;
defaultResource.maxGeometryUniformComponents = 1024;
defaultResource.maxGeometryVaryingComponents = 64;
defaultResource.maxTessControlInputComponents = 128;
defaultResource.maxTessControlOutputComponents = 128;
defaultResource.maxTessControlTextureImageUnits = 16;
defaultResource.maxTessControlUniformComponents = 1024;
defaultResource.maxTessControlTotalOutputComponents = 4096;
defaultResource.maxTessEvaluationInputComponents = 128;
defaultResource.maxTessEvaluationOutputComponents = 128;
defaultResource.maxTessEvaluationTextureImageUnits = 16;
defaultResource.maxTessEvaluationUniformComponents = 1024;
defaultResource.maxTessPatchComponents = 120;
defaultResource.maxPatchVertices = 32;
defaultResource.maxTessGenLevel = 64;
defaultResource.maxViewports = 16;
defaultResource.maxVertexAtomicCounters = 0;
defaultResource.maxTessControlAtomicCounters = 0;
defaultResource.maxTessEvaluationAtomicCounters = 0;
defaultResource.maxGeometryAtomicCounters = 0;
defaultResource.maxFragmentAtomicCounters = 8;
defaultResource.maxCombinedAtomicCounters = 8;
defaultResource.maxAtomicCounterBindings = 1;
defaultResource.maxVertexAtomicCounterBuffers = 0;
defaultResource.maxTessControlAtomicCounterBuffers = 0;
defaultResource.maxTessEvaluationAtomicCounterBuffers = 0;
defaultResource.maxGeometryAtomicCounterBuffers = 0;
defaultResource.maxFragmentAtomicCounterBuffers = 1;
defaultResource.maxCombinedAtomicCounterBuffers = 1;
defaultResource.maxAtomicCounterBufferSize = 16384;
defaultResource.maxTransformFeedbackBuffers = 4;
defaultResource.maxTransformFeedbackInterleavedComponents = 64;
defaultResource.maxCullDistances = 8;
defaultResource.maxCombinedClipAndCullDistances = 8;
defaultResource.maxSamples = 4;
defaultResource.maxMeshOutputVerticesNV = 256;
defaultResource.maxMeshOutputPrimitivesNV = 512;
defaultResource.maxMeshWorkGroupSizeX_NV = 32;
defaultResource.maxMeshWorkGroupSizeY_NV = 1;
defaultResource.maxMeshWorkGroupSizeZ_NV = 1;
defaultResource.maxTaskWorkGroupSizeX_NV = 32;
defaultResource.maxTaskWorkGroupSizeY_NV = 1;
defaultResource.maxTaskWorkGroupSizeZ_NV = 1;
defaultResource.maxMeshViewCountNV = 4;
defaultResource.limits = {};
defaultResource.limits.nonInductiveForLoops = true;
defaultResource.limits.whileLoops = true;
defaultResource.limits.doWhileLoops = true;
defaultResource.limits.generalUniformIndexing = true;
defaultResource.limits.generalAttributeMatrixVectorIndexing = true;
defaultResource.limits.generalVaryingIndexing = true;
defaultResource.limits.generalSamplerIndexing = true;
defaultResource.limits.generalVariableIndexing = true;
defaultResource.limits.generalConstantMatrixVectorIndexing = true;
return defaultResource;
};
class _ShaderVkThreadPool
{
public:
void StartThreads()
{
if (m_threadsActive.exchange(true))
return;
// create thread pool
const uint32 threadCount = 2;
for (uint32 i = 0; i < threadCount; ++i)
s_threads.emplace_back(&_ShaderVkThreadPool::CompilerThreadFunc, this);
}
void StopThreads()
{
if (!m_threadsActive.exchange(false))
return;
for (uint32 i = 0; i < s_threads.size(); ++i)
s_compilationQueueCount.increment();
for (auto& it : s_threads)
it.join();
s_threads.clear();
}
~_ShaderVkThreadPool()
{
StopThreads();
}
void CompilerThreadFunc()
{
SetThreadName("vkShaderComp");
while (m_threadsActive.load(std::memory_order::relaxed))
{
s_compilationQueueCount.decrementWithWait();
s_compilationQueueMutex.lock();
if (s_compilationQueue.empty())
{
// queue empty again, shaders compiled synchronously via PreponeCompilation()
s_compilationQueueMutex.unlock();
continue;
}
RendererShaderVk* job = s_compilationQueue.front();
s_compilationQueue.pop_front();
// set compilation state
cemu_assert_debug(job->m_compilationState.getValue() == RendererShaderVk::COMPILATION_STATE::QUEUED);
job->m_compilationState.setValue(RendererShaderVk::COMPILATION_STATE::COMPILING);
s_compilationQueueMutex.unlock();
// compile
job->CompileInternal(false);
++g_compiled_shaders_async;
// mark as compiled
cemu_assert_debug(job->m_compilationState.getValue() == RendererShaderVk::COMPILATION_STATE::COMPILING);
job->m_compilationState.setValue(RendererShaderVk::COMPILATION_STATE::DONE);
}
}
bool HasThreadsRunning() const { return m_threadsActive; }
public:
std::vector<std::thread> s_threads;
std::deque<RendererShaderVk*> s_compilationQueue;
CounterSemaphore s_compilationQueueCount;
std::mutex s_compilationQueueMutex;
private:
std::atomic<bool> m_threadsActive;
}ShaderVkThreadPool;
RendererShaderVk::RendererShaderVk(ShaderType type, uint64 baseHash, uint64 auxHash, bool isGameShader, bool isGfxPackShader, const std::string& glslCode)
: RendererShader(type, baseHash, auxHash, isGameShader, isGfxPackShader), m_glslCode(glslCode)
{
// start async compilation
ShaderVkThreadPool.s_compilationQueueMutex.lock();
m_compilationState.setValue(COMPILATION_STATE::QUEUED);
ShaderVkThreadPool.s_compilationQueue.push_back(this);
ShaderVkThreadPool.s_compilationQueueCount.increment();
ShaderVkThreadPool.s_compilationQueueMutex.unlock();
cemu_assert_debug(ShaderVkThreadPool.HasThreadsRunning()); // make sure .StartThreads() was called
}
RendererShaderVk::~RendererShaderVk()
{
while (!list_pipelineInfo.empty())
delete list_pipelineInfo[0];
}
void RendererShaderVk::Init()
{
ShaderVkThreadPool.StartThreads();
}
void RendererShaderVk::Shutdown()
{
ShaderVkThreadPool.StopThreads();
}
sint32 RendererShaderVk::GetUniformLocation(const char* name)
{
cemu_assert_suspicious();
return 0;
}
void RendererShaderVk::SetUniform2fv(sint32 location, void* data, sint32 count)
{
cemu_assert_suspicious();
}
void RendererShaderVk::SetUniform4iv(sint32 location, void* data, sint32 count)
{
cemu_assert_suspicious();
}
void RendererShaderVk::CreateVkShaderModule(std::span<uint32> spirvBuffer)
{
VkShaderModuleCreateInfo createInfo{};
createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
createInfo.codeSize = spirvBuffer.size_bytes();
createInfo.pCode = spirvBuffer.data();
VulkanRenderer* vkr = (VulkanRenderer*)g_renderer.get();
VkDevice m_device = vkr->GetLogicalDevice();
VkResult result = vkCreateShaderModule(m_device, &createInfo, nullptr, &m_shader_module);
if (result != VK_SUCCESS)
{
cemuLog_log(LogType::Force, "Vulkan: Shader error");
throw std::runtime_error(fmt::format("Failed to create shader module: {}", result));
}
// set debug name
if (vkr->IsDebugUtilsEnabled() && vkSetDebugUtilsObjectNameEXT)
{
VkDebugUtilsObjectNameInfoEXT objName{};
objName.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
objName.objectType = VK_OBJECT_TYPE_SHADER_MODULE;
objName.pNext = nullptr;
objName.objectHandle = (uint64_t)m_shader_module;
auto objNameStr = fmt::format("shader_{:016x}_{:016x}", m_baseHash, m_auxHash);
objName.pObjectName = objNameStr.c_str();
vkSetDebugUtilsObjectNameEXT(vkr->GetLogicalDevice(), &objName);
}
}
void RendererShaderVk::FinishCompilation()
{
m_glslCode.clear();
m_glslCode.shrink_to_fit();
}
void RendererShaderVk::CompileInternal(bool isRenderThread)
{
// try to retrieve SPIR-V module from cache
if (s_isLoadingShadersVk && (m_isGameShader && !m_isGfxPackShader) && s_spirvCache)
{
cemu_assert_debug(m_baseHash != 0);
uint64 h1, h2;
GenerateShaderPrecompiledCacheFilename(m_type, m_baseHash, m_auxHash, h1, h2);
std::vector<uint8> cacheFileData;
if (s_spirvCache->GetFile({ h1, h2 }, cacheFileData))
{
// generate shader from cached SPIR-V buffer
CreateVkShaderModule(std::span<uint32>((uint32*)cacheFileData.data(), cacheFileData.size() / sizeof(uint32)));
FinishCompilation();
return;
}
}
EShLanguage state;
switch (GetType())
{
case ShaderType::kVertex:
state = EShLangVertex;
break;
case ShaderType::kFragment:
state = EShLangFragment;
break;
case ShaderType::kGeometry:
state = EShLangGeometry;
break;
default:
cemu_assert_debug(false);
}
glslang::TShader Shader(state);
const char* cstr = m_glslCode.c_str();
Shader.setStrings(&cstr, 1);
Shader.setEnvInput(glslang::EShSourceGlsl, state, glslang::EShClientVulkan, 100);
Shader.setEnvClient(glslang::EShClientVulkan, glslang::EShTargetClientVersion::EShTargetVulkan_1_1);
Shader.setEnvTarget(glslang::EShTargetSpv, glslang::EShTargetLanguageVersion::EShTargetSpv_1_3);
TBuiltInResource Resources = GetDefaultBuiltInResource();
std::string PreprocessedGLSL;
VulkanRenderer* vkr = (VulkanRenderer*)g_renderer.get();
EShMessages messagesPreprocess;
if (vkr->IsDebugUtilsEnabled() && vkSetDebugUtilsObjectNameEXT)
messagesPreprocess = (EShMessages)(EShMsgSpvRules | EShMsgVulkanRules | EShMsgDebugInfo);
else
messagesPreprocess = (EShMessages)(EShMsgSpvRules | EShMsgVulkanRules);
glslang::TShader::ForbidIncluder Includer;
if (!Shader.preprocess(&Resources, 450, ENoProfile, false, false, messagesPreprocess, &PreprocessedGLSL, Includer))
{
cemuLog_log(LogType::Force, fmt::format("GLSL Preprocessing Failed For {:016x}_{:016x}: \"{}\"", m_baseHash, m_auxHash, Shader.getInfoLog()));
FinishCompilation();
return;
}
EShMessages messagesParseLink;
if (vkr->IsDebugUtilsEnabled() && vkSetDebugUtilsObjectNameEXT)
messagesParseLink = (EShMessages)(EShMsgSpvRules | EShMsgVulkanRules | EShMsgDebugInfo);
else
messagesParseLink = (EShMessages)(EShMsgSpvRules | EShMsgVulkanRules);
const char* PreprocessedCStr = PreprocessedGLSL.c_str();
Shader.setStrings(&PreprocessedCStr, 1);
if (!Shader.parse(&Resources, 100, false, messagesParseLink))
{
cemuLog_log(LogType::Force, fmt::format("GLSL parsing failed for {:016x}_{:016x}: \"{}\"", m_baseHash, m_auxHash, Shader.getInfoLog()));
cemuLog_logDebug(LogType::Force, "GLSL source:\n{}", m_glslCode);
cemu_assert_debug(false);
FinishCompilation();
return;
}
glslang::TProgram Program;
Program.addShader(&Shader);
if (!Program.link(messagesParseLink))
{
cemuLog_log(LogType::Force, fmt::format("GLSL linking failed for {:016x}_{:016x}: \"{}\"", m_baseHash, m_auxHash, Program.getInfoLog()));
cemu_assert_debug(false);
FinishCompilation();
return;
}
if (!Program.mapIO())
{
cemuLog_log(LogType::Force, fmt::format("GLSL linking failed for {:016x}_{:016x}: \"{}\"", m_baseHash, m_auxHash, Program.getInfoLog()));
FinishCompilation();
return;
}
// temp storage for SPIR-V after translation
std::vector<uint32> spirvBuffer;
spv::SpvBuildLogger logger;
glslang::SpvOptions spvOptions;
spvOptions.disableOptimizer = false;
spvOptions.generateDebugInfo = (vkr->IsDebugUtilsEnabled() && vkSetDebugUtilsObjectNameEXT);
spvOptions.validate = false;
spvOptions.optimizeSize = true;
//auto beginTime = benchmarkTimer_start();
GlslangToSpv(*Program.getIntermediate(state), spirvBuffer, &logger, &spvOptions);
//double timeDur = benchmarkTimer_stop(beginTime);
//forceLogRemoveMe_printf("Shader GLSL-to-SPIRV compilation took %lfms Size %08x", timeDur, spirvBuffer.size()*4);
if (s_spirvCache && m_isGameShader && m_isGfxPackShader == false)
{
uint64 h1, h2;
GenerateShaderPrecompiledCacheFilename(m_type, m_baseHash, m_auxHash, h1, h2);
s_spirvCache->AddFile({ h1, h2 }, (const uint8*)spirvBuffer.data(), spirvBuffer.size() * sizeof(uint32));
}
CreateVkShaderModule(spirvBuffer);
// count compiled shader
if (!s_isLoadingShadersVk)
{
if( m_isGameShader )
++g_compiled_shaders_total;
}
FinishCompilation();
}
void RendererShaderVk::PreponeCompilation(bool isRenderThread)
{
ShaderVkThreadPool.s_compilationQueueMutex.lock();
bool isStillQueued = m_compilationState.hasState(COMPILATION_STATE::QUEUED);
if (isStillQueued)
{
// remove from queue
ShaderVkThreadPool.s_compilationQueue.erase(std::remove(ShaderVkThreadPool.s_compilationQueue.begin(), ShaderVkThreadPool.s_compilationQueue.end(), this), ShaderVkThreadPool.s_compilationQueue.end());
m_compilationState.setValue(COMPILATION_STATE::COMPILING);
}
ShaderVkThreadPool.s_compilationQueueMutex.unlock();
if (!isStillQueued)
{
m_compilationState.waitUntilValue(COMPILATION_STATE::DONE);
--g_compiled_shaders_async; // compilation caused a stall so we don't consider this one async
return;
}
else
{
// compile synchronously
CompileInternal(isRenderThread);
m_compilationState.setValue(COMPILATION_STATE::DONE);
}
}
bool RendererShaderVk::IsCompiled()
{
return m_compilationState.hasState(COMPILATION_STATE::DONE);
};
bool RendererShaderVk::WaitForCompiled()
{
m_compilationState.waitUntilValue(COMPILATION_STATE::DONE);
return true;
}
void RendererShaderVk::ShaderCacheLoading_begin(uint64 cacheTitleId)
{
if (s_spirvCache)
{
delete s_spirvCache;
s_spirvCache = nullptr;
}
uint32 spirvCacheMagic = GeneratePrecompiledCacheId();
const std::string cacheFilename = fmt::format("{:016x}_spirv.bin", cacheTitleId);
const fs::path cachePath = ActiveSettings::GetCachePath("shaderCache/precompiled/{}", cacheFilename);
s_spirvCache = FileCache::Open(cachePath, true, spirvCacheMagic);
if (s_spirvCache == nullptr)
cemuLog_log(LogType::Force, "Unable to open SPIR-V cache {}", cacheFilename);
s_isLoadingShadersVk = true;
}
void RendererShaderVk::ShaderCacheLoading_end()
{
// keep g_spirvCache open since we will write to it while the game is running
s_isLoadingShadersVk = false;
}
void RendererShaderVk::ShaderCacheLoading_Close()
{
delete s_spirvCache;
s_spirvCache = nullptr;
}
| 16,526
|
C++
|
.cpp
| 420
| 37.045238
| 202
| 0.804747
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,244
|
VulkanPipelineCompiler.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanPipelineCompiler.cpp
|
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.h"
#include "Cafe/HW/Latte/Core/FetchShader.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanPipelineCompiler.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanPipelineStableCache.h"
#include "Cafe/HW/Latte/Core/LatteShader.h"
#include "Cafe/HW/Latte/Core/LattePerformanceMonitor.h"
#include "Cafe/OS/libs/gx2/GX2.h"
#include "config/ActiveSettings.h"
#include "util/helpers/Serializer.h"
#include "Cafe/HW/Latte/Common/RegisterSerializer.h"
std::mutex s_nvidiaWorkaround;
/* rects emulation */
void rectsEmulationGS_outputSingleVertex(std::string& gsSrc, LatteDecompilerShader* vertexShader, LatteShaderPSInputTable* psInputTable, sint32 vIdx, const LatteContextRegister& latteRegister)
{
auto parameterMask = vertexShader->outputParameterMask;
for (uint32 i = 0; i < 32; i++)
{
if ((parameterMask & (1 << i)) == 0)
continue;
sint32 vsSemanticId = psInputTable->getVertexShaderOutParamSemanticId(latteRegister.GetRawView(), i);
if (vsSemanticId < 0)
continue;
// make sure PS has matching input
if (!psInputTable->hasPSImportForSemanticId(vsSemanticId))
continue;
gsSrc.append(fmt::format("passParameterSem{}Out = passParameterSem{}In[{}];\r\n", vsSemanticId, vsSemanticId, vIdx));
}
gsSrc.append(fmt::format("gl_Position = gl_in[{}].gl_Position;\r\n", vIdx));
gsSrc.append("EmitVertex();\r\n");
}
void rectsEmulationGS_outputGeneratedVertex(std::string& gsSrc, LatteDecompilerShader* vertexShader, LatteShaderPSInputTable* psInputTable, const char* variant, const LatteContextRegister& latteRegister)
{
auto parameterMask = vertexShader->outputParameterMask;
for (uint32 i = 0; i < 32; i++)
{
if ((parameterMask & (1 << i)) == 0)
continue;
sint32 vsSemanticId = psInputTable->getVertexShaderOutParamSemanticId(latteRegister.GetRawView(), i);
if (vsSemanticId < 0)
continue;
// make sure PS has matching input
if (!psInputTable->hasPSImportForSemanticId(vsSemanticId))
continue;
gsSrc.append(fmt::format("passParameterSem{}Out = gen4thVertex{}(passParameterSem{}In[0], passParameterSem{}In[1], passParameterSem{}In[2]);\r\n", vsSemanticId, variant, vsSemanticId, vsSemanticId, vsSemanticId));
}
gsSrc.append(fmt::format("gl_Position = gen4thVertex{}(gl_in[0].gl_Position, gl_in[1].gl_Position, gl_in[2].gl_Position);\r\n", variant));
gsSrc.append("EmitVertex();\r\n");
}
void rectsEmulationGS_outputVerticesCode(std::string& gsSrc, LatteDecompilerShader* vertexShader, LatteShaderPSInputTable* psInputTable, sint32 p0, sint32 p1, sint32 p2, sint32 p3, const char* variant, const LatteContextRegister& latteRegister)
{
sint32 pList[4] = { p0, p1, p2, p3 };
for (sint32 i = 0; i < 4; i++)
{
if (pList[i] == 3)
rectsEmulationGS_outputGeneratedVertex(gsSrc, vertexShader, psInputTable, variant, latteRegister);
else
rectsEmulationGS_outputSingleVertex(gsSrc, vertexShader, psInputTable, pList[i], latteRegister);
}
}
RendererShaderVk* rectsEmulationGS_generate(LatteDecompilerShader* vertexShader, const LatteContextRegister& latteRegister)
{
std::string gsSrc;
gsSrc.append("#version 450\r\n");
LatteShaderPSInputTable* psInputTable = LatteSHRC_GetPSInputTable();
// layout
gsSrc.append("layout(triangles) in;\r\n");
gsSrc.append("layout(triangle_strip) out;\r\n");
gsSrc.append("layout(max_vertices = 4) out;\r\n");
// inputs & outputs
auto parameterMask = vertexShader->outputParameterMask;
for (sint32 f = 0; f < 2; f++)
{
for (uint32 i = 0; i < 32; i++)
{
if ((parameterMask & (1 << i)) == 0)
continue;
sint32 vsSemanticId = psInputTable->getVertexShaderOutParamSemanticId(latteRegister.GetRawView(), i);
if (vsSemanticId < 0)
continue;
auto psImport = psInputTable->getPSImportBySemanticId(vsSemanticId);
if (psImport == nullptr)
continue;
gsSrc.append(fmt::format("layout(location = {}) ", psInputTable->getPSImportLocationBySemanticId(vsSemanticId)));
if (psImport->isFlat)
gsSrc.append("flat ");
if (psImport->isNoPerspective)
gsSrc.append("noperspective ");
if (f == 0)
gsSrc.append("in");
else
gsSrc.append("out");
if (f == 0)
gsSrc.append(fmt::format(" vec4 passParameterSem{}In[];\r\n", vsSemanticId));
else
gsSrc.append(fmt::format(" vec4 passParameterSem{}Out;\r\n", vsSemanticId));
}
}
// gen function
gsSrc.append("vec4 gen4thVertexA(vec4 a, vec4 b, vec4 c)\r\n");
gsSrc.append("{\r\n");
gsSrc.append("return b - (c - a);\r\n");
gsSrc.append("}\r\n");
gsSrc.append("vec4 gen4thVertexB(vec4 a, vec4 b, vec4 c)\r\n");
gsSrc.append("{\r\n");
gsSrc.append("return c - (b - a);\r\n");
gsSrc.append("}\r\n");
gsSrc.append("vec4 gen4thVertexC(vec4 a, vec4 b, vec4 c)\r\n");
gsSrc.append("{\r\n");
gsSrc.append("return c + (b - a);\r\n");
gsSrc.append("}\r\n");
// main
gsSrc.append("void main()\r\n");
gsSrc.append("{\r\n");
// there are two possible winding orders that need different triangle generation:
// 0 1
// 2 3
// and
// 0 1
// 3 2
// all others are just symmetries of these cases
// we can determine the case by comparing the distance 0<->1 and 0<->2
gsSrc.append("float dist0_1 = length(gl_in[1].gl_Position.xy - gl_in[0].gl_Position.xy);\r\n");
gsSrc.append("float dist0_2 = length(gl_in[2].gl_Position.xy - gl_in[0].gl_Position.xy);\r\n");
gsSrc.append("float dist1_2 = length(gl_in[2].gl_Position.xy - gl_in[1].gl_Position.xy);\r\n");
// emit vertices
gsSrc.append("if(dist0_1 > dist0_2 && dist0_1 > dist1_2)\r\n");
gsSrc.append("{\r\n");
// p0 to p1 is diagonal
rectsEmulationGS_outputVerticesCode(gsSrc, vertexShader, psInputTable, 2, 1, 0, 3, "A", latteRegister);
gsSrc.append("} else if ( dist0_2 > dist0_1 && dist0_2 > dist1_2 ) {\r\n");
// p0 to p2 is diagonal
rectsEmulationGS_outputVerticesCode(gsSrc, vertexShader, psInputTable, 1, 2, 0, 3, "B", latteRegister);
gsSrc.append("} else {\r\n");
// p1 to p2 is diagonal
rectsEmulationGS_outputVerticesCode(gsSrc, vertexShader, psInputTable, 0, 1, 2, 3, "C", latteRegister);
gsSrc.append("}\r\n");
gsSrc.append("}\r\n");
auto vkShader = new RendererShaderVk(RendererShader::ShaderType::kGeometry, 0, 0, false, false, gsSrc);
vkShader->PreponeCompilation(true);
return vkShader;
}
/* pipeline compiler and cache helper */
extern std::atomic_int g_compiling_pipelines;
extern std::atomic_int g_compiling_pipelines_async;
extern std::atomic_uint64_t g_compiling_pipelines_syncTimeSum;
PipelineCompiler::PipelineCompiler() {};
PipelineCompiler::~PipelineCompiler()
{
if (m_vkrObjPipeline)
m_vkrObjPipeline->decRef();
if (m_renderPassObj)
m_renderPassObj->decRef();
};
VkFormat PipelineCompiler::GetVertexFormat(uint8 format)
{
switch (format)
{
case FMT_32_32_32_32_FLOAT:
return VK_FORMAT_R32G32B32A32_UINT;
case FMT_32_32_32_FLOAT:
return VK_FORMAT_R32G32B32_UINT;
case FMT_32_32_FLOAT:
return VK_FORMAT_R32G32_UINT;
case FMT_32_FLOAT:
return VK_FORMAT_R32_UINT;
case FMT_8_8_8_8:
return VK_FORMAT_R8G8B8A8_UINT;
case FMT_8_8_8:
return VK_FORMAT_R8G8B8_UINT;
case FMT_8_8:
return VK_FORMAT_R8G8_UINT;
case FMT_8:
return VK_FORMAT_R8_UINT;
case FMT_32_32_32_32:
return VK_FORMAT_R32G32B32A32_UINT;
case FMT_32_32_32:
return VK_FORMAT_R32G32B32_UINT;
case FMT_32_32:
return VK_FORMAT_R32G32_UINT;
case FMT_32:
return VK_FORMAT_R32_UINT;
case FMT_16_16_16_16:
return VK_FORMAT_R16G16B16A16_UINT; // verified to match OpenGL
case FMT_16_16_16:
return VK_FORMAT_R16G16B16_UINT;
case FMT_16_16:
return VK_FORMAT_R16G16_UINT;
case FMT_16:
return VK_FORMAT_R16_UINT;
case FMT_16_16_16_16_FLOAT:
return VK_FORMAT_R16G16B16A16_UINT; // verified to match OpenGL
case FMT_16_16_16_FLOAT:
return VK_FORMAT_R16G16B16_UINT;
case FMT_16_16_FLOAT:
return VK_FORMAT_R16G16_UINT;
case FMT_16_FLOAT:
return VK_FORMAT_R16_UINT;
case FMT_2_10_10_10:
return VK_FORMAT_R32_UINT; // verified to match OpenGL
default:
cemuLog_log(LogType::Force, "Unsupported vertex format: {:02x}", format);
assert_dbg();
return VK_FORMAT_UNDEFINED;
}
}
static VkBlendOp GetVkBlendOp(Latte::LATTE_CB_BLENDN_CONTROL::E_COMBINEFUNC combineFunc)
{
switch (combineFunc)
{
case Latte::LATTE_CB_BLENDN_CONTROL::E_COMBINEFUNC::DST_PLUS_SRC:
return VK_BLEND_OP_ADD;
case Latte::LATTE_CB_BLENDN_CONTROL::E_COMBINEFUNC::SRC_MINUS_DST:
return VK_BLEND_OP_SUBTRACT;
case Latte::LATTE_CB_BLENDN_CONTROL::E_COMBINEFUNC::MIN_DST_SRC:
return VK_BLEND_OP_MIN;
case Latte::LATTE_CB_BLENDN_CONTROL::E_COMBINEFUNC::MAX_DST_SRC:
return VK_BLEND_OP_MAX;
case Latte::LATTE_CB_BLENDN_CONTROL::E_COMBINEFUNC::DST_MINUS_SRC:
return VK_BLEND_OP_REVERSE_SUBTRACT;
default:
cemu_assert_suspicious();
return VK_BLEND_OP_ADD;
}
}
static VkBlendFactor GetVkBlendFactor(Latte::LATTE_CB_BLENDN_CONTROL::E_BLENDFACTOR factor)
{
const VkBlendFactor factors[] =
{
/* 0x00 */ VK_BLEND_FACTOR_ZERO,
/* 0x01 */ VK_BLEND_FACTOR_ONE,
/* 0x02 */ VK_BLEND_FACTOR_SRC_COLOR,
/* 0x03 */ VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR,
/* 0x04 */ VK_BLEND_FACTOR_SRC_ALPHA,
/* 0x05 */ VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA,
/* 0x06 */ VK_BLEND_FACTOR_DST_ALPHA,
/* 0x07 */ VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA,
/* 0x08 */ VK_BLEND_FACTOR_DST_COLOR,
/* 0x09 */ VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR,
/* 0x0A */ VK_BLEND_FACTOR_SRC_ALPHA_SATURATE,
/* 0x0B */ VK_BLEND_FACTOR_MAX_ENUM, // todo
/* 0x0C */ VK_BLEND_FACTOR_MAX_ENUM, // todo
/* 0x0D */ VK_BLEND_FACTOR_CONSTANT_COLOR,
/* 0x0E */ VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR,
/* 0x0F */ VK_BLEND_FACTOR_SRC1_COLOR,
/* 0x10 */ VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR,
/* 0x11 */ VK_BLEND_FACTOR_SRC1_ALPHA,
/* 0x12 */ VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA,
/* 0x13 */ VK_BLEND_FACTOR_CONSTANT_ALPHA,
/* 0x14 */ VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA
};
cemu_assert_debug((uint32)factor < std::size(factors));
return factors[(uint32)factor];
}
bool PipelineCompiler::ConsumesBlendConstants(VkBlendFactor blendFactor)
{
if (blendFactor == VK_BLEND_FACTOR_CONSTANT_COLOR ||
blendFactor == VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR ||
blendFactor == VK_BLEND_FACTOR_CONSTANT_ALPHA ||
blendFactor == VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)
return true;
return false;
}
void PipelineCompiler::CreateDescriptorSetLayout(VulkanRenderer* vkRenderer, LatteDecompilerShader* shader, VkDescriptorSetLayout& layout, PipelineInfo* vkrPipelineInfo)
{
// create vertex shader descriptor set
std::vector<VkDescriptorSetLayoutBinding> descriptorSetLayoutBindings;
VkShaderStageFlags stageFlags = 0;
uint32 stageIndex = 0;
if (shader->shaderType == LatteConst::ShaderType::Vertex)
{
stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
stageIndex = VulkanRendererConst::SHADER_STAGE_INDEX_VERTEX;
}
else if (shader->shaderType == LatteConst::ShaderType::Pixel)
{
stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
stageIndex = VulkanRendererConst::SHADER_STAGE_INDEX_FRAGMENT;
}
else if (shader->shaderType == LatteConst::ShaderType::Geometry)
{
stageFlags = VK_SHADER_STAGE_GEOMETRY_BIT;
stageIndex = VulkanRendererConst::SHADER_STAGE_INDEX_GEOMETRY;
}
// attributes
// -> not part of descriptor
// textures
sint32 textureBindingBase = shader->resourceMapping.getTextureBaseBindingPoint();
if (textureBindingBase >= 0)
{
sint32 textureCount = shader->resourceMapping.getTextureCount();
for (sint32 i = 0; i < textureCount; i++)
{
VkDescriptorSetLayoutBinding entry{};
entry.binding = (uint32)textureBindingBase + i;
entry.descriptorCount = 1;
entry.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
entry.pImmutableSamplers = nullptr;
entry.stageFlags = stageFlags;
descriptorSetLayoutBindings.emplace_back(entry);
}
}
// uniform buffers
if (shader->resourceMapping.uniformVarsBufferBindingPoint >= 0)
{
VkDescriptorSetLayoutBinding entry{};
entry.binding = shader->resourceMapping.uniformVarsBufferBindingPoint;
entry.descriptorCount = 1;
entry.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
entry.pImmutableSamplers = nullptr;
entry.stageFlags = stageFlags;
descriptorSetLayoutBindings.emplace_back(entry);
}
for (sint32 i = 0; i < LATTE_NUM_MAX_UNIFORM_BUFFERS; i++)
{
if (shader->resourceMapping.uniformBuffersBindingPoint[i] >= 0)
{
VkDescriptorSetLayoutBinding entry{};
entry.binding = shader->resourceMapping.uniformBuffersBindingPoint[i];
entry.descriptorCount = 1;
entry.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
entry.pImmutableSamplers = nullptr;
entry.stageFlags = stageFlags;
descriptorSetLayoutBindings.emplace_back(entry);
vkrPipelineInfo->dynamicOffsetInfo.list_uniformBuffers[stageIndex].emplace_back((uint8)i);
}
}
// storage buffer for TF
if (shader->resourceMapping.tfStorageBindingPoint >= 0)
{
VkDescriptorSetLayoutBinding entry{};
entry.binding = shader->resourceMapping.tfStorageBindingPoint;
entry.descriptorCount = 1;
entry.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
entry.pImmutableSamplers = nullptr;
entry.stageFlags = stageFlags;
descriptorSetLayoutBindings.emplace_back(entry);
}
if (shader->resourceMapping.uniformVarsBufferBindingPoint >= 0)
vkrPipelineInfo->dynamicOffsetInfo.hasUniformVar[stageIndex] = true;
if (shader->resourceMapping.hasUniformBuffers())
vkrPipelineInfo->dynamicOffsetInfo.hasUniformBuffers[stageIndex] = true;
VkDescriptorSetLayoutCreateInfo layoutInfo = {};
layoutInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
layoutInfo.bindingCount = descriptorSetLayoutBindings.size();
layoutInfo.pBindings = descriptorSetLayoutBindings.data();
if (vkCreateDescriptorSetLayout(vkRenderer->m_logicalDevice, &layoutInfo, nullptr, &layout) != VK_SUCCESS)
vkRenderer->UnrecoverableError(fmt::format("Failed to create descriptor set layout for shader {0:#x}", shader->baseHash).c_str());
}
bool PipelineCompiler::InitShaderStages(VulkanRenderer* vkRenderer, RendererShaderVk* vkVertexShader, RendererShaderVk* vkPixelShader, RendererShaderVk* vkGeometryShader)
{
// prepare shader stages
cemu_assert_debug(vkVertexShader == nullptr || vkVertexShader->IsCompiled());
cemu_assert_debug(vkPixelShader == nullptr || vkPixelShader->IsCompiled());
cemu_assert_debug(vkGeometryShader == nullptr || vkGeometryShader->IsCompiled());
if ((vkVertexShader && vkVertexShader->GetShaderModule() == VK_NULL_HANDLE) ||
(vkGeometryShader && vkGeometryShader->GetShaderModule() == VK_NULL_HANDLE) ||
(vkPixelShader && vkPixelShader->GetShaderModule() == VK_NULL_HANDLE))
{
cemuLog_log(LogType::Force, "Vulkan-Info: Pipeline creation failed due to invalid shader(s)");
return false;
}
if (vkVertexShader)
shaderStages.emplace_back(vkRenderer->CreatePipelineShaderStageCreateInfo(VK_SHADER_STAGE_VERTEX_BIT, vkVertexShader->GetShaderModule(), "main"));
if (vkGeometryShader)
shaderStages.emplace_back(vkRenderer->CreatePipelineShaderStageCreateInfo(VK_SHADER_STAGE_GEOMETRY_BIT, vkGeometryShader->GetShaderModule(), "main"));
else if (m_rectEmulationGS)
shaderStages.emplace_back(vkRenderer->CreatePipelineShaderStageCreateInfo(VK_SHADER_STAGE_GEOMETRY_BIT, m_rectEmulationGS->GetShaderModule(), "main"));
if (vkPixelShader)
shaderStages.emplace_back(vkRenderer->CreatePipelineShaderStageCreateInfo(VK_SHADER_STAGE_FRAGMENT_BIT, vkPixelShader->GetShaderModule(), "main"));
return true;
}
void PipelineCompiler::InitVertexInputState(const LatteContextRegister& latteRegister, LatteDecompilerShader* vertexShader, LatteFetchShader* fetchShader)
{
vertexInputAttributeDescription.reserve(16);
vertexInputBindingDescription.reserve(fetchShader->bufferGroups.size());
for (auto& bufferGroup : fetchShader->bufferGroups)
{
std::optional<LatteConst::VertexFetchType2> fetchType;
for (sint32 j = 0; j < bufferGroup.attribCount; ++j)
{
auto& attr = bufferGroup.attrib[j];
uint32 semanticId = vertexShader->resourceMapping.attributeMapping[attr.semanticId];
if (semanticId == (uint32)-1)
continue; // attribute not used?
VkVertexInputAttributeDescription entry{};
entry.location = semanticId;
entry.offset = attr.offset;
entry.binding = attr.attributeBufferIndex;
entry.format = GetVertexFormat(attr.format);
vertexInputAttributeDescription.emplace_back(entry);
if (fetchType.has_value())
cemu_assert_debug(fetchType == attr.fetchType);
else
fetchType = attr.fetchType;
if (attr.fetchType == LatteConst::INSTANCE_DATA)
{
cemu_assert_debug(attr.aluDivisor == 1); // other divisor not yet supported
// use VK_EXT_vertex_attribute_divisor
}
}
uint32 bufferIndex = bufferGroup.attributeBufferIndex;
uint32 bufferBaseRegisterIndex = mmSQ_VTX_ATTRIBUTE_BLOCK_START + bufferIndex * 7;
uint32 bufferStride = (latteRegister.GetRawView()[bufferBaseRegisterIndex + 2] >> 11) & 0xFFFF;
VkVertexInputBindingDescription entry{};
#if BOOST_OS_MACOS
if (bufferStride % 4 != 0) {
bufferStride = bufferStride + (4-(bufferStride % 4));
}
#endif
entry.stride = bufferStride;
if (!fetchType.has_value() || fetchType == LatteConst::VertexFetchType2::VERTEX_DATA)
entry.inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
else if (fetchType == LatteConst::VertexFetchType2::INSTANCE_DATA)
entry.inputRate = VK_VERTEX_INPUT_RATE_INSTANCE;
else
{
cemu_assert(false);
}
entry.binding = bufferIndex;
vertexInputBindingDescription.emplace_back(entry);
}
vertexInputInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
vertexInputInfo.vertexBindingDescriptionCount = vertexInputBindingDescription.size();
vertexInputInfo.pVertexBindingDescriptions = vertexInputBindingDescription.data();
vertexInputInfo.vertexAttributeDescriptionCount = vertexInputAttributeDescription.size();
vertexInputInfo.pVertexAttributeDescriptions = vertexInputAttributeDescription.data();
}
void PipelineCompiler::InitInputAssemblyState(const Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE primitiveMode)
{
inputAssembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
inputAssembly.primitiveRestartEnable = VK_TRUE;
switch (primitiveMode)
{
case Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::POINTS:
inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
inputAssembly.primitiveRestartEnable = false;
break;
case Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::LINES:
inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
inputAssembly.primitiveRestartEnable = false;
break;
case Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::LINE_STRIP:
inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
break;
case Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::LINE_LOOP:
inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_LINE_STRIP; // line loops are emulated as line strips with an extra connecting strip at the end
break;
case Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::LINE_STRIP_ADJACENT: // Tropical Freeze level 3-6
inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY;
break;
case Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::TRIANGLES:
inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
inputAssembly.primitiveRestartEnable = false;
break;
case Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::TRIANGLE_FAN:
inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN;
break;
case Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::TRIANGLE_STRIP:
inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
break;
case Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::QUADS:
inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; // quads are emulated as 2 triangles
inputAssembly.primitiveRestartEnable = false;
break;
case Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::QUAD_STRIP:
inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; // quad strips are emulated as (count-2)/2 triangles
inputAssembly.primitiveRestartEnable = false;
break;
case Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::RECTS:
inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; // rects are emulated as 2 triangles
inputAssembly.primitiveRestartEnable = false;
break;
default:
cemuLog_logDebug(LogType::Force, "Vulkan-Unsupported: Graphics pipeline with primitive mode {} created", primitiveMode);
cemu_assert_debug(false);
}
}
void PipelineCompiler::InitViewportState()
{
viewportState.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
viewportState.viewportCount = 1;
viewportState.scissorCount = 1;
}
void PipelineCompiler::InitRasterizerState(const LatteContextRegister& latteRegister, VulkanRenderer* vkRenderer, bool isPrimitiveRect, bool& usesDepthBias)
{
// polygon control
const auto& polygonControlReg = latteRegister.PA_SU_SC_MODE_CNTL;
const auto frontFace = polygonControlReg.get_FRONT_FACE();
uint32 cullFront = polygonControlReg.get_CULL_FRONT();
uint32 cullBack = polygonControlReg.get_CULL_BACK();
uint32 polyOffsetFrontEnable = polygonControlReg.get_OFFSET_FRONT_ENABLED();
cemu_assert_debug(LatteGPUState.contextNew.PA_CL_CLIP_CNTL.get_ZCLIP_NEAR_DISABLE() == LatteGPUState.contextNew.PA_CL_CLIP_CNTL.get_ZCLIP_FAR_DISABLE()); // near or far clipping can be disabled individually
bool zClipEnable = LatteGPUState.contextNew.PA_CL_CLIP_CNTL.get_ZCLIP_FAR_DISABLE() == false;
// z-clipping
rasterizerExt.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT;
rasterizerExt.depthClipEnable = zClipEnable;
rasterizerExt.flags = 0;
rasterizer.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterizer.pNext = &rasterizerExt;
rasterizer.rasterizerDiscardEnable = LatteGPUState.contextNew.PA_CL_CLIP_CNTL.get_DX_RASTERIZATION_KILL();
// GX2SetSpecialState(0, true) workaround
if (!LatteGPUState.contextNew.PA_CL_VTE_CNTL.get_VPORT_X_OFFSET_ENA())
rasterizer.rasterizerDiscardEnable = false;
rasterizer.polygonMode = VK_POLYGON_MODE_FILL;
if (vkRenderer->m_featureControl.deviceExtensions.nv_fill_rectangle && isPrimitiveRect)
rasterizer.polygonMode = VK_POLYGON_MODE_FILL_RECTANGLE_NV;
rasterizer.depthClampEnable = VK_TRUE; // depth clamping is always enabled
rasterizer.lineWidth = 1.0f; // TODO -> mmPA_SU_LINE_CNTL
usesDepthBias = polyOffsetFrontEnable;
if (polyOffsetFrontEnable)
{
rasterizer.depthBiasEnable = VK_TRUE;
// initialize to zero, set dynamically via vkCmdSetDepthBias
rasterizer.depthBiasConstantFactor = 0.0f;
rasterizer.depthBiasSlopeFactor = 0.0f;
rasterizer.depthBiasClamp = 0.0f;
}
else
rasterizer.depthBiasEnable = VK_FALSE;
// todo - how does culling behave with rects?
// right now we just assume that their winding is always CW
if (isPrimitiveRect)
{
if (frontFace == Latte::LATTE_PA_SU_SC_MODE_CNTL::E_FRONTFACE::CW)
cullFront = cullBack;
else
cullBack = cullFront;
}
if (cullFront && cullBack)
rasterizer.cullMode = VK_CULL_MODE_FRONT_AND_BACK;
else if (cullFront)
rasterizer.cullMode = VK_CULL_MODE_FRONT_BIT;
else if (cullBack)
rasterizer.cullMode = VK_CULL_MODE_BACK_BIT;
else
rasterizer.cullMode = VK_CULL_MODE_NONE;
if (frontFace == Latte::LATTE_PA_SU_SC_MODE_CNTL::E_FRONTFACE::CCW)
rasterizer.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
else
rasterizer.frontFace = VK_FRONT_FACE_CLOCKWISE;
// multisampling
multisampling.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
multisampling.sampleShadingEnable = VK_FALSE;
multisampling.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
}
bool _IsVkIntegerFormat(VkFormat fmt)
{
return
// 8bit integer formats
fmt == VK_FORMAT_R8_UINT || fmt == VK_FORMAT_R8_SINT ||
fmt == VK_FORMAT_R8G8_UINT || fmt == VK_FORMAT_R8G8_SINT ||
fmt == VK_FORMAT_R8G8B8_UINT || fmt == VK_FORMAT_R8G8B8_SINT ||
fmt == VK_FORMAT_R8G8B8A8_UINT || fmt == VK_FORMAT_R8G8B8A8_SINT ||
fmt == VK_FORMAT_B8G8R8A8_UINT || fmt == VK_FORMAT_B8G8R8A8_SINT ||
// 16bit integer formats
fmt == VK_FORMAT_R16_UINT || fmt == VK_FORMAT_R16_SINT ||
fmt == VK_FORMAT_R16G16_UINT || fmt == VK_FORMAT_R16G16_SINT ||
fmt == VK_FORMAT_R16G16B16_UINT || fmt == VK_FORMAT_R16G16B16_SINT ||
fmt == VK_FORMAT_R16G16B16A16_UINT || fmt == VK_FORMAT_R16G16B16A16_SINT ||
// 32bit integer formats
fmt == VK_FORMAT_R32_UINT || fmt == VK_FORMAT_R32_SINT ||
fmt == VK_FORMAT_R32G32_UINT || fmt == VK_FORMAT_R32G32_SINT ||
fmt == VK_FORMAT_R32G32B32_UINT || fmt == VK_FORMAT_R32G32B32_SINT ||
fmt == VK_FORMAT_R32G32B32A32_UINT || fmt == VK_FORMAT_R32G32B32A32_SINT;
}
void PipelineCompiler::InitBlendState(const LatteContextRegister& latteRegister, PipelineInfo* pipelineInfo, bool& usesBlendConstants, VKRObjectRenderPass* renderPassObj)
{
const Latte::LATTE_CB_COLOR_CONTROL& colorControlReg = latteRegister.CB_COLOR_CONTROL;
uint32 blendEnableMask = colorControlReg.get_BLEND_MASK();
uint32 renderTargetMask = latteRegister.CB_TARGET_MASK.get_MASK();
usesBlendConstants = false;
for (size_t i = 0; i < colorBlendAttachments.size(); i++)
{
auto& entry = colorBlendAttachments[i];
if (((blendEnableMask & (1 << i))) != 0)
entry.blendEnable = VK_TRUE;
else
entry.blendEnable = VK_FALSE;
if (entry.blendEnable != VK_FALSE && _IsVkIntegerFormat(renderPassObj->GetColorFormat(i)))
{
// force-disable blending for integer formats
entry.blendEnable = VK_FALSE;
}
const auto& blendControlReg = latteRegister.CB_BLENDN_CONTROL[i];
entry.colorWriteMask = (renderTargetMask >> (i * 4)) & 0xF;
entry.colorBlendOp = GetVkBlendOp(blendControlReg.get_COLOR_COMB_FCN());
entry.srcColorBlendFactor = GetVkBlendFactor(blendControlReg.get_COLOR_SRCBLEND());
entry.dstColorBlendFactor = GetVkBlendFactor(blendControlReg.get_COLOR_DSTBLEND());
if (blendControlReg.get_SEPARATE_ALPHA_BLEND())
{
entry.alphaBlendOp = GetVkBlendOp(blendControlReg.get_ALPHA_COMB_FCN());
entry.srcAlphaBlendFactor = GetVkBlendFactor(blendControlReg.get_ALPHA_SRCBLEND());
entry.dstAlphaBlendFactor = GetVkBlendFactor(blendControlReg.get_ALPHA_DSTBLEND());
}
else
{
entry.alphaBlendOp = entry.colorBlendOp;
entry.srcAlphaBlendFactor = entry.srcColorBlendFactor;
entry.dstAlphaBlendFactor = entry.dstColorBlendFactor;
}
usesBlendConstants |= ConsumesBlendConstants(entry.srcColorBlendFactor);
usesBlendConstants |= ConsumesBlendConstants(entry.dstColorBlendFactor);
usesBlendConstants |= ConsumesBlendConstants(entry.srcAlphaBlendFactor);
usesBlendConstants |= ConsumesBlendConstants(entry.dstAlphaBlendFactor);
}
// setup VkPipelineColorBlendStateCreateInfo
colorBlending.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
const auto logicOp = colorControlReg.get_ROP();
if (logicOp == Latte::LATTE_CB_COLOR_CONTROL::E_LOGICOP::COPY)
{
colorBlending.logicOpEnable = VK_FALSE;
colorBlending.logicOp = VK_LOGIC_OP_COPY;
}
else
{
colorBlending.logicOpEnable = VK_TRUE;
switch (logicOp)
{
case Latte::LATTE_CB_COLOR_CONTROL::E_LOGICOP::SET:
colorBlending.logicOp = VK_LOGIC_OP_SET;
break;
case Latte::LATTE_CB_COLOR_CONTROL::E_LOGICOP::CLEAR:
colorBlending.logicOp = VK_LOGIC_OP_CLEAR;
break;
case Latte::LATTE_CB_COLOR_CONTROL::E_LOGICOP::OR:
colorBlending.logicOp = VK_LOGIC_OP_OR;
break;
default:
colorBlending.logicOp = VK_LOGIC_OP_COPY;
cemu_assert_unimplemented();
}
}
colorBlending.attachmentCount = colorBlendAttachments.size();
colorBlending.pAttachments = colorBlendAttachments.data();
// we use VK_DYNAMIC_STATE_BLEND_CONSTANTS, the blend constants here don't matter
colorBlending.blendConstants[0] = 0;
colorBlending.blendConstants[1] = 0;
colorBlending.blendConstants[2] = 0;
colorBlending.blendConstants[3] = 0;
}
void PipelineCompiler::InitDescriptorSetLayouts(VulkanRenderer* vkRenderer, PipelineInfo* vkrPipelineInfo, LatteDecompilerShader* vertexShader, LatteDecompilerShader* pixelShader, LatteDecompilerShader* geometryShader)
{
auto vkObjPipeline = vkrPipelineInfo->m_vkrObjPipeline;
if (vertexShader)
{
cemu_assert_debug(descriptorSetLayoutCount == 0);
CreateDescriptorSetLayout(vkRenderer, vertexShader, descriptorSetLayout[descriptorSetLayoutCount], vkrPipelineInfo);
vkObjPipeline->vertexDSL = descriptorSetLayout[descriptorSetLayoutCount];
descriptorSetLayoutCount++;
}
if (pixelShader)
{
cemu_assert_debug(descriptorSetLayoutCount == 1);
CreateDescriptorSetLayout(vkRenderer, pixelShader, descriptorSetLayout[descriptorSetLayoutCount], vkrPipelineInfo);
vkObjPipeline->pixelDSL = descriptorSetLayout[descriptorSetLayoutCount];
descriptorSetLayoutCount++;
}
else if (geometryShader)
{
// if no pixel shader is present, create empty placeholder descriptor set layout (geometry shader set must be at index 2)
VkDescriptorSetLayoutCreateInfo layoutInfo = {};
layoutInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
layoutInfo.bindingCount = 0;
layoutInfo.pBindings = nullptr;
if (vkCreateDescriptorSetLayout(vkRenderer->m_logicalDevice, &layoutInfo, nullptr, &descriptorSetLayout[descriptorSetLayoutCount]) != VK_SUCCESS)
vkRenderer->UnrecoverableError(fmt::format("Failed to create placeholder descriptor set layout for shader {0:#x}", geometryShader->baseHash).c_str());
descriptorSetLayoutCount++;
}
if (geometryShader)
{
cemu_assert_debug(descriptorSetLayoutCount == 2);
CreateDescriptorSetLayout(vkRenderer, geometryShader, descriptorSetLayout[descriptorSetLayoutCount], vkrPipelineInfo);
vkObjPipeline->geometryDSL = descriptorSetLayout[descriptorSetLayoutCount];
descriptorSetLayoutCount++;
}
}
void PipelineCompiler::InitDepthStencilState()
{
// get depth control parameters
bool depthEnable = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_Z_ENABLE();
auto depthFunc = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_Z_FUNC();
bool depthWriteEnable = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_Z_WRITE_ENABLE();
// setup VkPipelineDepthStencilStateCreateInfo
depthStencilState.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
depthStencilState.depthTestEnable = depthEnable ? VK_TRUE : VK_FALSE;
depthStencilState.depthWriteEnable = depthWriteEnable ? VK_TRUE : VK_FALSE;
static const VkCompareOp vkDepthCompareTable[8] =
{
VK_COMPARE_OP_NEVER,
VK_COMPARE_OP_LESS,
VK_COMPARE_OP_EQUAL,
VK_COMPARE_OP_LESS_OR_EQUAL,
VK_COMPARE_OP_GREATER,
VK_COMPARE_OP_NOT_EQUAL,
VK_COMPARE_OP_GREATER_OR_EQUAL,
VK_COMPARE_OP_ALWAYS
};
depthStencilState.depthCompareOp = vkDepthCompareTable[(size_t)depthFunc];
depthStencilState.depthBoundsTestEnable = false; // todo
depthStencilState.minDepthBounds = 0.0f;
depthStencilState.maxDepthBounds = 1.0f;
// get stencil control parameters
bool stencilEnable = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_STENCIL_ENABLE();
bool backStencilEnable = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_BACK_STENCIL_ENABLE();
auto frontStencilFunc = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_STENCIL_FUNC_F();
auto frontStencilZPass = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_STENCIL_ZPASS_F();
auto frontStencilZFail = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_STENCIL_ZFAIL_F();
auto frontStencilFail = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_STENCIL_FAIL_F();
auto backStencilFunc = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_STENCIL_FUNC_B();
auto backStencilZPass = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_STENCIL_ZPASS_B();
auto backStencilZFail = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_STENCIL_ZFAIL_B();
auto backStencilFail = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_STENCIL_FAIL_B();
// get stencil control parameters
uint32 stencilCompareMaskFront = LatteGPUState.contextNew.DB_STENCILREFMASK.get_STENCILMASK_F();
uint32 stencilWriteMaskFront = LatteGPUState.contextNew.DB_STENCILREFMASK.get_STENCILWRITEMASK_F();
uint32 stencilRefFront = LatteGPUState.contextNew.DB_STENCILREFMASK.get_STENCILREF_F();
uint32 stencilCompareMaskBack = LatteGPUState.contextNew.DB_STENCILREFMASK_BF.get_STENCILMASK_B();
uint32 stencilWriteMaskBack = LatteGPUState.contextNew.DB_STENCILREFMASK_BF.get_STENCILWRITEMASK_B();
uint32 stencilRefBack = LatteGPUState.contextNew.DB_STENCILREFMASK_BF.get_STENCILREF_B();
static const VkStencilOp stencilOpTable[8] = {
VK_STENCIL_OP_KEEP,
VK_STENCIL_OP_ZERO,
VK_STENCIL_OP_REPLACE,
VK_STENCIL_OP_INCREMENT_AND_CLAMP,
VK_STENCIL_OP_DECREMENT_AND_CLAMP,
VK_STENCIL_OP_INVERT,
VK_STENCIL_OP_INCREMENT_AND_WRAP,
VK_STENCIL_OP_DECREMENT_AND_WRAP
};
depthStencilState.stencilTestEnable = stencilEnable ? VK_TRUE : VK_FALSE;
depthStencilState.front.reference = stencilRefFront;
depthStencilState.front.compareMask = stencilCompareMaskFront;
depthStencilState.front.writeMask = stencilWriteMaskFront;
depthStencilState.front.compareOp = vkDepthCompareTable[(size_t)frontStencilFunc];
depthStencilState.front.depthFailOp = stencilOpTable[(size_t)frontStencilZFail];
depthStencilState.front.failOp = stencilOpTable[(size_t)frontStencilFail];
depthStencilState.front.passOp = stencilOpTable[(size_t)frontStencilZPass];
if (backStencilEnable)
{
depthStencilState.back.reference = stencilRefBack;
depthStencilState.back.compareMask = stencilCompareMaskBack;
depthStencilState.back.writeMask = stencilWriteMaskBack;
depthStencilState.back.compareOp = vkDepthCompareTable[(size_t)backStencilFunc];
depthStencilState.back.depthFailOp = stencilOpTable[(size_t)backStencilZFail];
depthStencilState.back.failOp = stencilOpTable[(size_t)backStencilFail];
depthStencilState.back.passOp = stencilOpTable[(size_t)backStencilZPass];
}
else
{
depthStencilState.back.reference = stencilRefFront;
depthStencilState.back.compareMask = stencilCompareMaskFront;
depthStencilState.back.writeMask = stencilWriteMaskFront;
depthStencilState.back.compareOp = vkDepthCompareTable[(size_t)frontStencilFunc];
depthStencilState.back.depthFailOp = stencilOpTable[(size_t)frontStencilZFail];
depthStencilState.back.failOp = stencilOpTable[(size_t)frontStencilFail];
depthStencilState.back.passOp = stencilOpTable[(size_t)frontStencilZPass];
}
}
void PipelineCompiler::InitDynamicState(PipelineInfo* pipelineInfo, bool usesBlendConstants, bool usesDepthBias)
{
if (usesBlendConstants)
{
dynamicStates.emplace_back(VK_DYNAMIC_STATE_BLEND_CONSTANTS);
pipelineInfo->usesBlendConstants = true;
}
if (usesDepthBias)
{
dynamicStates.emplace_back(VK_DYNAMIC_STATE_DEPTH_BIAS);
pipelineInfo->usesDepthBias = true;
}
dynamicState.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dynamicState.dynamicStateCount = dynamicStates.size();
dynamicState.pDynamicStates = dynamicStates.data();
}
bool PipelineCompiler::InitFromCurrentGPUState(PipelineInfo* pipelineInfo, const LatteContextRegister& latteRegister, VKRObjectRenderPass* renderPassObj)
{
VulkanRenderer* vkRenderer = VulkanRenderer::GetInstance();
// ##########################################################################################################################################
bool isPrimitiveRect = false;
const auto primitiveMode = latteRegister.VGT_PRIMITIVE_TYPE.get_PRIMITIVE_MODE();
isPrimitiveRect = (primitiveMode == Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::RECTS);
m_fetchShader = pipelineInfo->fetchShader;
m_vkVertexShader = pipelineInfo->vertexShaderVk;
m_vkPixelShader = pipelineInfo->pixelShaderVk;
m_vkGeometryShader = pipelineInfo->geometryShaderVk;
m_vkrObjPipeline = pipelineInfo->m_vkrObjPipeline;
m_renderPassObj = renderPassObj;
// if required generate RECT emulation geometry shader
if (!vkRenderer->m_featureControl.deviceExtensions.nv_fill_rectangle && isPrimitiveRect)
{
cemu_assert(m_vkGeometryShader == nullptr); // todo - handle cases where the game already provides a GS
m_rectEmulationGS = rectsEmulationGS_generate(pipelineInfo->vertexShader, latteRegister);
pipelineInfo->rectEmulationGS = m_rectEmulationGS;
}
// ##########################################################################################################################################
pipelineInfo->primitiveMode = primitiveMode;
InitVertexInputState(latteRegister, pipelineInfo->vertexShader, pipelineInfo->fetchShader);
InitInputAssemblyState(primitiveMode);
InitViewportState();
bool usesDepthBias = false;
InitRasterizerState(latteRegister, vkRenderer, isPrimitiveRect, usesDepthBias);
bool usesBlendConstants = false;
InitBlendState(latteRegister, pipelineInfo, usesBlendConstants, renderPassObj);
InitDescriptorSetLayouts(vkRenderer, pipelineInfo, pipelineInfo->vertexShader, pipelineInfo->pixelShader, pipelineInfo->geometryShader);
// ##########################################################################################################################################
VkPipelineLayoutCreateInfo pipelineLayoutInfo{};
pipelineLayoutInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipelineLayoutInfo.setLayoutCount = descriptorSetLayoutCount;
pipelineLayoutInfo.pSetLayouts = descriptorSetLayout;
pipelineLayoutInfo.pPushConstantRanges = nullptr;
pipelineLayoutInfo.pushConstantRangeCount = 0;
VkResult result = vkCreatePipelineLayout(vkRenderer->m_logicalDevice, &pipelineLayoutInfo, nullptr, &m_pipeline_layout);
if (result != VK_SUCCESS)
{
cemuLog_log(LogType::Force, "Failed to create pipeline layout: {}", result);
s_nvidiaWorkaround.unlock();
return false;
}
// ###################################################
InitDepthStencilState();
// ##########################################################################################################################################
InitDynamicState(pipelineInfo, usesBlendConstants, usesDepthBias);
// ##########################################################################################################################################
pipelineInfo->m_vkrObjPipeline->pipeline_layout = m_pipeline_layout;
// increment ref counter for vkrObjPipeline and renderpass object to make sure they dont get released while we are using them
m_vkrObjPipeline->incRef();
renderPassObj->incRef();
return true;
}
bool PipelineCompiler::Compile(bool forceCompile, bool isRenderThread, bool showInOverlay)
{
VulkanRenderer* vkRenderer = VulkanRenderer::GetInstance();
if (!vkRenderer->m_featureControl.deviceExtensions.pipeline_creation_cache_control)
forceCompile = true; // if VK_EXT_pipeline_creation_cache_control is not supported we always force synchronous compilation
if (!forceCompile)
{
// fail early if some shader stages are not compiled
if (m_vkVertexShader && m_vkVertexShader->IsCompiled() == false)
return false;
if (m_vkPixelShader && m_vkPixelShader->IsCompiled() == false)
return false;
if (m_vkGeometryShader && m_vkGeometryShader->IsCompiled() == false)
return false;
}
else
{
// if some shader stages are not compiled yet, compile them now
if (m_vkVertexShader && m_vkVertexShader->IsCompiled() == false)
m_vkVertexShader->PreponeCompilation(isRenderThread);
if (m_vkPixelShader && m_vkPixelShader->IsCompiled() == false)
m_vkPixelShader->PreponeCompilation(isRenderThread);
if (m_vkGeometryShader && m_vkGeometryShader->IsCompiled() == false)
m_vkGeometryShader->PreponeCompilation(isRenderThread);
}
if (shaderStages.empty())
{
if (!InitShaderStages(vkRenderer, m_vkVertexShader, m_vkPixelShader, m_vkGeometryShader))
return true; // invalid shaders, cannot compile
}
VkGraphicsPipelineCreateInfo pipelineInfo{};
pipelineInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
pipelineInfo.stageCount = shaderStages.size();
pipelineInfo.pStages = shaderStages.data();
pipelineInfo.pVertexInputState = &vertexInputInfo;
pipelineInfo.pInputAssemblyState = &inputAssembly;
pipelineInfo.pViewportState = &viewportState;
pipelineInfo.pDynamicState = &dynamicState;
pipelineInfo.pRasterizationState = &rasterizer;
pipelineInfo.pMultisampleState = &multisampling;
pipelineInfo.pColorBlendState = &colorBlending;
pipelineInfo.layout = m_pipeline_layout;
pipelineInfo.renderPass = m_renderPassObj->m_renderPass;
pipelineInfo.pDepthStencilState = &depthStencilState;
pipelineInfo.subpass = 0;
pipelineInfo.basePipelineHandle = nullptr;
pipelineInfo.flags = 0;
if (!forceCompile)
pipelineInfo.flags |= VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT;
VkPipelineCreationFeedbackCreateInfoEXT creationFeedbackInfo;
VkPipelineCreationFeedbackEXT creationFeedback;
std::vector<VkPipelineCreationFeedbackEXT> creationStageFeedback(0);
if (vkRenderer->m_featureControl.deviceExtensions.pipeline_feedback)
{
creationFeedback = {};
creationFeedback.flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT;
creationStageFeedback.reserve(pipelineInfo.stageCount);
for (uint32_t i = 0; i < pipelineInfo.stageCount; ++i)
creationStageFeedback.data()[i] = { VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT, 0 };
creationFeedbackInfo = {};
creationFeedbackInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT;
creationFeedbackInfo.pPipelineCreationFeedback = &creationFeedback;
creationFeedbackInfo.pPipelineStageCreationFeedbacks = creationStageFeedback.data();
creationFeedbackInfo.pipelineStageCreationFeedbackCount = pipelineInfo.stageCount;
pipelineInfo.pNext = &creationFeedbackInfo;
}
VkPipeline pipeline = VK_NULL_HANDLE;
VkResult result;
uint8 retryCount = 0;
while (retryCount < 3)
{
std::shared_lock lock(vkRenderer->m_pipeline_cache_save_mutex);
result = vkCreateGraphicsPipelines(vkRenderer->m_logicalDevice, vkRenderer->m_pipeline_cache, 1, &pipelineInfo, nullptr, &pipeline);
lock.unlock();
if (result != VK_ERROR_OUT_OF_DEVICE_MEMORY)
break;
retryCount++;
}
if (result == VK_ERROR_PIPELINE_COMPILE_REQUIRED_EXT)
{
return false;
}
else if (result == VK_SUCCESS)
{
m_vkrObjPipeline->setPipeline(pipeline);
}
else
{
cemuLog_log(LogType::Force, "Failed to create graphics pipeline. Error {}", (sint32)result);
cemu_assert_debug(false);
return true; // true indicates that caller should no longer attempt to compile this pipeline again
}
vkRenderer->m_pipeline_cache_semaphore.notify();
if (vkRenderer->m_featureControl.deviceExtensions.pipeline_feedback)
{
if (HAS_FLAG(creationFeedback.flags, VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT))
{
bool hasCacheHit = HAS_FLAG(creationFeedback.flags, VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT);
if (!hasCacheHit)
{
if (showInOverlay)
{
if (isRenderThread)
g_compiling_pipelines_syncTimeSum += creationFeedback.duration;
else
g_compiling_pipelines_async++;
g_compiling_pipelines++;
}
}
}
}
return true;
}
void PipelineCompiler::TrackAsCached(uint64 baseHash, uint64 pipelineStateHash)
{
auto& pipelineCache = VulkanPipelineStableCache::GetInstance();
if (pipelineCache.HasPipelineCached(baseHash, pipelineStateHash))
return;
pipelineCache.AddCurrentStateToCache(baseHash, pipelineStateHash);
}
| 43,097
|
C++
|
.cpp
| 949
| 42.773446
| 244
| 0.767909
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,245
|
VulkanSurfaceCopy.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/Vulkan/VulkanSurfaceCopy.cpp
|
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanAPI.h"
struct CopyShaderPushConstantData_t
{
float vertexOffsets[4 * 2];
sint32 srcTexelOffset[2];
};
struct CopySurfacePipelineInfo
{
template<typename T>
struct TexSliceMipMapping
{
TexSliceMipMapping(LatteTextureVk* texture) : m_texture(texture) {};
~TexSliceMipMapping()
{
//delete vkObjPipeline;
//delete vkObjRenderPass;
for (auto itr : m_array)
{
if (itr != nullptr)
delete itr;
}
}
T* create(sint32 sliceIndex, sint32 mipIndex)
{
sint32 idx = m_texture->GetSliceMipArrayIndex(sliceIndex, mipIndex);
if (idx >= m_array.size())
m_array.resize(idx + 1);
T* v = new T();
m_array[idx] = v;
return v;
}
T* get(sint32 sliceIndex, sint32 mipIndex) const
{
sint32 idx = m_texture->GetSliceMipArrayIndex(sliceIndex, mipIndex);
if (idx >= m_array.size())
return nullptr;
return m_array[idx];
}
TexSliceMipMapping(const TexSliceMipMapping&) = delete;
TexSliceMipMapping& operator=(const TexSliceMipMapping&) = delete;
TexSliceMipMapping(TexSliceMipMapping&& rhs)
{
m_texture = rhs.m_texture;
m_array = std::move(rhs.m_array);
}
TexSliceMipMapping& operator=(TexSliceMipMapping&& rhs)
{
m_texture = rhs.m_texture;
m_array = std::move(rhs.m_array);
}
LatteTextureVk* m_texture;
std::vector<T*> m_array;
};
struct FramebufferValue
{
VKRObjectFramebuffer* vkObjFramebuffer;
VKRObjectTextureView* vkObjImageView;
};
struct DescriptorValue
{
VKRObjectDescriptorSet* vkObjDescriptorSet;
VKRObjectTextureView* vkObjImageView;
//VKRObjectSampler* vkObjSampler;
};
CopySurfacePipelineInfo() = default;
CopySurfacePipelineInfo(VkDevice device) : m_device(device) {}
CopySurfacePipelineInfo(const CopySurfacePipelineInfo& info) = delete;
VkDevice m_device = nullptr;
VKRObjectPipeline* vkObjPipeline{};
VKRObjectRenderPass* vkObjRenderPass{};
// map of framebuffers used with this pipeline
std::unordered_map<LatteTextureVk*, TexSliceMipMapping<FramebufferValue>> map_framebuffers;
// map of descriptor sets used with this pipeline
std::unordered_map<LatteTextureVk*, TexSliceMipMapping<DescriptorValue>> map_descriptors;
};
struct VkCopySurfaceState_t
{
LatteTextureVk* sourceTexture;
sint32 srcMip;
sint32 srcSlice;
LatteTextureVk* destinationTexture;
sint32 dstMip;
sint32 dstSlice;
sint32 width;
sint32 height;
};
extern std::atomic_int g_compiling_pipelines;
uint64 VulkanRenderer::copySurface_getPipelineStateHash(VkCopySurfaceState_t& state)
{
uint64 h = 0;
h += (uintptr_t)state.destinationTexture->GetFormat();
h = std::rotr<uint64>(h, 7);
h += state.sourceTexture->isDepth ? 0x1111ull : 0;
h = std::rotr<uint64>(h, 7);
h += state.destinationTexture->isDepth ? 0x1112ull : 0;
h = std::rotr<uint64>(h, 7);
return h;
}
CopySurfacePipelineInfo* VulkanRenderer::copySurface_getCachedPipeline(VkCopySurfaceState_t& state)
{
const uint64 stateHash = copySurface_getPipelineStateHash(state);
const auto it = m_copySurfacePipelineCache.find(stateHash);
if (it == m_copySurfacePipelineCache.cend())
return nullptr;
return it->second;
}
RendererShaderVk* _vkGenSurfaceCopyShader_vs()
{
const char* vsShaderSrc =
"#version 450\r\n"
"layout(location = 0) out ivec2 passSrcTexelOffset;\r\n"
"layout(push_constant) uniform pushConstants {\r\n"
"vec2 vertexOffsets[4];\r\n"
"ivec2 srcTexelOffset;\r\n"
"}uf_pushConstants;\r\n"
"\r\n"
"void main(){\r\n"
//"ivec2 tUV;\r\n"
"vec2 tPOS;\r\n"
"switch(gl_VertexIndex)"
"{\r\n"
// AMD driver has issues with indexed push constant access, therefore use this workaround
"case 0: tPOS = uf_pushConstants.vertexOffsets[0].xy; break;\r\n"
"case 1: tPOS = uf_pushConstants.vertexOffsets[1].xy; break;\r\n"
"case 2: tPOS = uf_pushConstants.vertexOffsets[3].xy; break;\r\n"
"case 3: tPOS = uf_pushConstants.vertexOffsets[0].xy; break;\r\n"
"case 4: tPOS = uf_pushConstants.vertexOffsets[2].xy; break;\r\n"
"case 5: tPOS = uf_pushConstants.vertexOffsets[3].xy; break;\r\n"
"}"
"passSrcTexelOffset = uf_pushConstants.srcTexelOffset;\r\n"
"gl_Position = vec4(tPOS, 0, 1.0);\r\n"
"}\r\n";
std::string shaderStr(vsShaderSrc);
auto vkShader = new RendererShaderVk(RendererShader::ShaderType::kVertex, 0, 0, false, false, shaderStr);
vkShader->PreponeCompilation(true);
return vkShader;
}
RendererShaderVk* _vkGenSurfaceCopyShader_ps_colorToDepth()
{
const char* psShaderSrc = ""
"#version 450\r\n"
"layout(location = 0) in flat ivec2 passSrcTexelOffset;\r\n"
"layout(binding = 0) uniform sampler2D textureSrc;\r\n"
"in vec4 gl_FragCoord;\r\n"
"\r\n"
"void main(){\r\n"
"gl_FragDepth = texelFetch(textureSrc, passSrcTexelOffset + ivec2(gl_FragCoord.xy), 0).r;\r\n"
"}\r\n";
std::string shaderStr(psShaderSrc);
auto vkShader = new RendererShaderVk(RendererShader::ShaderType::kFragment, 0, 0, false, false, shaderStr);
vkShader->PreponeCompilation(true);
return vkShader;
}
RendererShaderVk* _vkGenSurfaceCopyShader_ps_depthToColor()
{
const char* psShaderSrc = ""
"#version 450\r\n"
"layout(location = 0) in flat ivec2 passSrcTexelOffset;\r\n"
"layout(binding = 0) uniform sampler2D textureSrc;\r\n"
"layout(location = 0) out vec4 colorOut0;\r\n"
"in vec4 gl_FragCoord;\r\n"
"\r\n"
"void main(){\r\n"
"colorOut0.r = texelFetch(textureSrc, passSrcTexelOffset + ivec2(gl_FragCoord.xy), 0).r;\r\n"
"}\r\n";
std::string shaderStr(psShaderSrc);
auto vkShader = new RendererShaderVk(RendererShader::ShaderType::kFragment, 0, 0, false, false, shaderStr);
vkShader->PreponeCompilation(true);
return vkShader;
}
VKRObjectRenderPass* VulkanRenderer::copySurface_createRenderpass(VkCopySurfaceState_t& state)
{
VKRObjectRenderPass::AttachmentInfo_t attachmentInfo{};
if (state.destinationTexture->isDepth)
{
attachmentInfo.depthAttachment.viewObj = ((LatteTextureViewVk*)state.destinationTexture->baseView)->GetViewRGBA();
attachmentInfo.depthAttachment.format = state.destinationTexture->GetFormat();
attachmentInfo.depthAttachment.hasStencil = state.destinationTexture->hasStencil;
}
else
{
attachmentInfo.colorAttachment[0].viewObj = ((LatteTextureViewVk*)state.destinationTexture->baseView)->GetViewRGBA();
attachmentInfo.colorAttachment[0].format = state.destinationTexture->GetFormat();
}
VKRObjectRenderPass* vkObjRenderPass = new VKRObjectRenderPass(attachmentInfo, 1);
return vkObjRenderPass;
}
CopySurfacePipelineInfo* VulkanRenderer::copySurface_getOrCreateGraphicsPipeline(VkCopySurfaceState_t& state)
{
auto cache_object = copySurface_getCachedPipeline(state);
if (cache_object != nullptr)
return cache_object;
if (defaultShaders.copySurface_vs == nullptr)
{
// on first call generate shaders
defaultShaders.copySurface_vs = _vkGenSurfaceCopyShader_vs();
defaultShaders.copySurface_psColor2Depth = _vkGenSurfaceCopyShader_ps_colorToDepth();
defaultShaders.copySurface_psDepth2Color = _vkGenSurfaceCopyShader_ps_depthToColor();
}
RendererShaderVk* vertexShader = defaultShaders.copySurface_vs;
RendererShaderVk* pixelShader = nullptr;
if (state.sourceTexture->isDepth && !state.destinationTexture->isDepth)
pixelShader = defaultShaders.copySurface_psDepth2Color;
else if (!state.sourceTexture->isDepth && state.destinationTexture->isDepth)
pixelShader = defaultShaders.copySurface_psColor2Depth;
else
{
cemu_assert(false);
}
std::vector<VkPipelineShaderStageCreateInfo> shaderStages;
shaderStages.emplace_back(CreatePipelineShaderStageCreateInfo(VK_SHADER_STAGE_VERTEX_BIT, vertexShader->GetShaderModule(), "main"));
shaderStages.emplace_back(CreatePipelineShaderStageCreateInfo(VK_SHADER_STAGE_FRAGMENT_BIT, pixelShader->GetShaderModule(), "main"));
// ##########################################################################################################################################
const uint64 stateHash = copySurface_getPipelineStateHash(state);
CopySurfacePipelineInfo* copyPipeline = new CopySurfacePipelineInfo();
m_copySurfacePipelineCache.try_emplace(stateHash, copyPipeline);
VKRObjectPipeline* vkObjPipeline = new VKRObjectPipeline();
// ##########################################################################################################################################
VkPipelineVertexInputStateCreateInfo vertexInputInfo{};
vertexInputInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
vertexInputInfo.vertexBindingDescriptionCount = 0;
vertexInputInfo.pVertexBindingDescriptions = nullptr;
vertexInputInfo.vertexAttributeDescriptionCount = 0;
vertexInputInfo.pVertexAttributeDescriptions = nullptr;
// ##########################################################################################################################################
VkPipelineInputAssemblyStateCreateInfo inputAssembly{};
inputAssembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
inputAssembly.primitiveRestartEnable = VK_FALSE;
inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
// ##########################################################################################################################################
VkPipelineViewportStateCreateInfo viewportState{};
viewportState.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
viewportState.viewportCount = 1;
viewportState.scissorCount = 1;
// ##########################################################################################################################################
VkPipelineRasterizationStateCreateInfo rasterizer{};
rasterizer.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterizer.depthClampEnable = VK_FALSE;
rasterizer.rasterizerDiscardEnable = VK_FALSE;
rasterizer.polygonMode = VK_POLYGON_MODE_FILL;
rasterizer.lineWidth = 1.0f;
rasterizer.cullMode = VK_CULL_MODE_NONE;
rasterizer.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
// ##########################################################################################################################################
VkPipelineMultisampleStateCreateInfo multisampling{};
multisampling.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
multisampling.sampleShadingEnable = VK_FALSE;
multisampling.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
// ##########################################################################################################################################
VkPipelineColorBlendStateCreateInfo colorBlending{};
VkPipelineColorBlendAttachmentState blendAttachment{};
if (!state.destinationTexture->isDepth)
{
blendAttachment.blendEnable = VK_FALSE;
blendAttachment.colorWriteMask = VK_COLOR_COMPONENT_R_BIT;
colorBlending.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
colorBlending.attachmentCount = 1;
colorBlending.pAttachments = &blendAttachment;
colorBlending.logicOpEnable = VK_FALSE;
}
// ##########################################################################################################################################
std::vector<VkDescriptorSetLayoutBinding> descriptorSetLayoutBindings;
VkDescriptorSetLayoutBinding entry{};
entry.binding = 0;
entry.descriptorCount = 1;
entry.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
entry.pImmutableSamplers = nullptr;
entry.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
descriptorSetLayoutBindings.emplace_back(entry);
VkDescriptorSetLayoutCreateInfo layoutInfo = {};
layoutInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
layoutInfo.bindingCount = (uint32_t)descriptorSetLayoutBindings.size();
layoutInfo.pBindings = descriptorSetLayoutBindings.data();
if (vkCreateDescriptorSetLayout(m_logicalDevice, &layoutInfo, nullptr, &vkObjPipeline->pixelDSL) != VK_SUCCESS)
UnrecoverableError(fmt::format("Failed to create descriptor set layout for surface copy shader").c_str());
// ##########################################################################################################################################
VkPushConstantRange pushConstantRange{};
pushConstantRange.offset = 0;
pushConstantRange.size = sizeof(CopyShaderPushConstantData_t);
pushConstantRange.stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
VkPipelineLayoutCreateInfo pipelineLayoutInfo{};
pipelineLayoutInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipelineLayoutInfo.setLayoutCount = 1;
pipelineLayoutInfo.pSetLayouts = &vkObjPipeline->pixelDSL;
pipelineLayoutInfo.pPushConstantRanges = &pushConstantRange;
pipelineLayoutInfo.pushConstantRangeCount = 1;
VkResult result = vkCreatePipelineLayout(m_logicalDevice, &pipelineLayoutInfo, nullptr, &vkObjPipeline->pipeline_layout);
if (result != VK_SUCCESS)
{
cemuLog_log(LogType::Force, "Failed to create pipeline layout: {}", result);
vkObjPipeline->pipeline = VK_NULL_HANDLE;
return copyPipeline;
}
// ###################################################
bool writeDepth = state.destinationTexture->isDepth;
VkPipelineDepthStencilStateCreateInfo depthStencilState{};
depthStencilState.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
depthStencilState.depthTestEnable = writeDepth ? VK_TRUE : VK_FALSE;
depthStencilState.depthWriteEnable = writeDepth ? VK_TRUE : VK_FALSE;
depthStencilState.depthCompareOp = VK_COMPARE_OP_ALWAYS;
depthStencilState.depthBoundsTestEnable = false;
depthStencilState.minDepthBounds = 0.0f;
depthStencilState.maxDepthBounds = 1.0f;
depthStencilState.stencilTestEnable = VK_FALSE;
// ##########################################################################################################################################
std::vector<VkDynamicState> dynamicStates = { VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR };
VkPipelineDynamicStateCreateInfo dynamicState = {};
dynamicState.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dynamicState.dynamicStateCount = (uint32_t)dynamicStates.size();
dynamicState.pDynamicStates = dynamicStates.data();
// ##########################################################################################################################################
copyPipeline->vkObjRenderPass = copySurface_createRenderpass(state);
vkObjPipeline->addRef(copyPipeline->vkObjRenderPass);
// ###########################################################
VkGraphicsPipelineCreateInfo pipelineInfo{};
pipelineInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
pipelineInfo.stageCount = (uint32_t)shaderStages.size();
pipelineInfo.pStages = shaderStages.data();
pipelineInfo.pVertexInputState = &vertexInputInfo;
pipelineInfo.pInputAssemblyState = &inputAssembly;
pipelineInfo.pViewportState = &viewportState;
pipelineInfo.pDynamicState = &dynamicState;
pipelineInfo.pRasterizationState = &rasterizer;
pipelineInfo.pMultisampleState = &multisampling;
pipelineInfo.pColorBlendState = state.destinationTexture->isDepth?nullptr:&colorBlending;
pipelineInfo.layout = vkObjPipeline->pipeline_layout;
pipelineInfo.renderPass = copyPipeline->vkObjRenderPass->m_renderPass;
pipelineInfo.pDepthStencilState = &depthStencilState;
pipelineInfo.subpass = 0;
pipelineInfo.basePipelineHandle = nullptr;
pipelineInfo.flags = 0;
copyPipeline->vkObjPipeline = vkObjPipeline;
result = vkCreateGraphicsPipelines(m_logicalDevice, m_pipeline_cache, 1, &pipelineInfo, nullptr, ©Pipeline->vkObjPipeline->pipeline);
if (result != VK_SUCCESS)
{
cemuLog_log(LogType::Force, "Failed to create graphics pipeline for surface copy. Error {} Info:", (sint32)result);
cemu_assert_debug(false);
copyPipeline->vkObjPipeline->pipeline = VK_NULL_HANDLE;
}
//performanceMonitor.vk.numGraphicPipelines.increment();
//m_pipeline_cache_semaphore.notify();
return copyPipeline;
}
VKRObjectTextureView* VulkanRenderer::surfaceCopy_createImageView(LatteTextureVk* textureVk, uint32 sliceIndex, uint32 mipIndex)
{
VkImageViewCreateInfo viewCreateInfo = {};
viewCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
viewCreateInfo.image = textureVk->GetImageObj()->m_image;
viewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
viewCreateInfo.format = textureVk->GetFormat();
viewCreateInfo.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
viewCreateInfo.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
viewCreateInfo.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
viewCreateInfo.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
if (textureVk->isDepth)
viewCreateInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
else
viewCreateInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
viewCreateInfo.subresourceRange.baseMipLevel = mipIndex;
viewCreateInfo.subresourceRange.levelCount = 1;
viewCreateInfo.subresourceRange.baseArrayLayer = sliceIndex;
viewCreateInfo.subresourceRange.layerCount = 1;
VkImageView imageView;
if (vkCreateImageView(m_logicalDevice, &viewCreateInfo, nullptr, &imageView) != VK_SUCCESS)
UnrecoverableError("Failed to create framebuffer image view for copy surface operation");
return new VKRObjectTextureView(textureVk->GetImageObj(), imageView);
}
VKRObjectFramebuffer* VulkanRenderer::surfaceCopy_getOrCreateFramebuffer(VkCopySurfaceState_t& state, CopySurfacePipelineInfo* pipelineInfo)
{
auto itr = pipelineInfo->map_framebuffers.find(state.destinationTexture);
if (itr != pipelineInfo->map_framebuffers.end())
{
auto p = itr->second.get(state.dstSlice, state.dstMip);
if (p != nullptr)
return p->vkObjFramebuffer;
}
// create view
VKRObjectTextureView* vkObjTextureView = surfaceCopy_createImageView(state.destinationTexture, state.dstSlice, state.dstMip);
// create new framebuffer
sint32 effectiveWidth, effectiveHeight;
state.destinationTexture->GetEffectiveSize(effectiveWidth, effectiveHeight, state.dstMip);
std::array<VKRObjectTextureView*, 1> fbAttachments;
fbAttachments[0] = vkObjTextureView;
VKRObjectFramebuffer* vkObjFramebuffer = new VKRObjectFramebuffer(pipelineInfo->vkObjRenderPass, fbAttachments, Vector2i(effectiveWidth, effectiveHeight));
// register
auto insertResult = pipelineInfo->map_framebuffers.try_emplace(state.destinationTexture, state.destinationTexture);
CopySurfacePipelineInfo::FramebufferValue* framebufferVal = insertResult.first->second.create(state.dstSlice, state.dstMip);
framebufferVal->vkObjFramebuffer = vkObjFramebuffer;
framebufferVal->vkObjImageView = vkObjTextureView;
return vkObjFramebuffer;
}
VKRObjectDescriptorSet* VulkanRenderer::surfaceCopy_getOrCreateDescriptorSet(VkCopySurfaceState_t& state, CopySurfacePipelineInfo* pipelineInfo)
{
auto itr = pipelineInfo->map_descriptors.find(state.sourceTexture);
if (itr != pipelineInfo->map_descriptors.end())
{
auto p = itr->second.get(state.srcSlice, state.srcMip);
if (p != nullptr)
return p->vkObjDescriptorSet;
}
VKRObjectDescriptorSet* vkObjDescriptorSet = new VKRObjectDescriptorSet();
// allocate new descriptor set
VkDescriptorSetAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
allocInfo.descriptorPool = m_descriptorPool;
allocInfo.descriptorSetCount = 1;
allocInfo.pSetLayouts = &(pipelineInfo->vkObjPipeline->pixelDSL);
if (vkAllocateDescriptorSets(m_logicalDevice, &allocInfo, &vkObjDescriptorSet->descriptorSet) != VK_SUCCESS)
{
UnrecoverableError("failed to allocate descriptor set for surface copy operation");
}
// create view
VKRObjectTextureView* vkObjImageView = surfaceCopy_createImageView(state.sourceTexture, state.srcSlice, state.srcMip);
vkObjDescriptorSet->addRef(vkObjImageView);
// create sampler
VkSamplerCreateInfo samplerInfo{};
samplerInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
samplerInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
samplerInfo.minLod = 0;
samplerInfo.maxLod = 0;
samplerInfo.minFilter = VK_FILTER_NEAREST;
samplerInfo.magFilter = VK_FILTER_NEAREST;
samplerInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
samplerInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
samplerInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
samplerInfo.anisotropyEnable = VK_FALSE;
samplerInfo.maxAnisotropy = 1.0f;
samplerInfo.mipLodBias = 0;
samplerInfo.compareEnable = VK_FALSE;
samplerInfo.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
if (vkCreateSampler(m_logicalDevice, &samplerInfo, nullptr, &vkObjImageView->m_textureDefaultSampler[0]) != VK_SUCCESS)
UnrecoverableError("Failed to create texture sampler for surface copy operation");
// create descriptor image info
VkDescriptorImageInfo descriptorImageInfo{};
descriptorImageInfo.sampler = vkObjImageView->m_textureDefaultSampler[0];
descriptorImageInfo.imageView = vkObjImageView->m_textureImageView;
descriptorImageInfo.imageLayout = VK_IMAGE_LAYOUT_GENERAL;
VkWriteDescriptorSet write_descriptor{};
write_descriptor.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
write_descriptor.dstSet = vkObjDescriptorSet->descriptorSet;
write_descriptor.dstBinding = 0;
write_descriptor.dstArrayElement = 0;
write_descriptor.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
write_descriptor.descriptorCount = 1;
write_descriptor.pImageInfo = &descriptorImageInfo;
vkUpdateDescriptorSets(m_logicalDevice, 1, &write_descriptor, 0, nullptr);
// register
auto insertResult = pipelineInfo->map_descriptors.try_emplace(state.sourceTexture, state.sourceTexture);
CopySurfacePipelineInfo::DescriptorValue* descriptorValue = insertResult.first->second.create(state.srcSlice, state.srcMip);
descriptorValue->vkObjDescriptorSet = vkObjDescriptorSet;
descriptorValue->vkObjImageView = vkObjImageView;
return vkObjDescriptorSet;
}
void VulkanRenderer::surfaceCopy_viaDrawcall(LatteTextureVk* srcTextureVk, sint32 texSrcMip, sint32 texSrcSlice, LatteTextureVk* dstTextureVk, sint32 texDstMip, sint32 texDstSlice, sint32 effectiveCopyWidth, sint32 effectiveCopyHeight)
{
draw_endRenderPass();
//debug_printf("surfaceCopy_viaDrawcall Src %04d %04d Dst %04d %04d CopySize %04d %04d\n", srcTextureVk->width, srcTextureVk->height, dstTextureVk->width, dstTextureVk->height, effectiveCopyWidth, effectiveCopyHeight);
VkImageSubresourceLayers srcImageSubresource;
srcImageSubresource.aspectMask = srcTextureVk->GetImageAspect();
srcImageSubresource.baseArrayLayer = texSrcSlice;
srcImageSubresource.mipLevel = texSrcMip;
srcImageSubresource.layerCount = 1;
VkImageSubresourceLayers dstImageSubresource;
dstImageSubresource.aspectMask = dstTextureVk->GetImageAspect();
dstImageSubresource.baseArrayLayer = texDstSlice;
dstImageSubresource.mipLevel = texDstMip;
dstImageSubresource.layerCount = 1;
VkCopySurfaceState_t copySurfaceState;
copySurfaceState.sourceTexture = srcTextureVk;
copySurfaceState.srcSlice = texSrcSlice;
copySurfaceState.srcMip = texSrcMip;
copySurfaceState.destinationTexture = dstTextureVk;
copySurfaceState.dstSlice = texDstSlice;
copySurfaceState.dstMip = texDstMip;
copySurfaceState.width = effectiveCopyWidth;
copySurfaceState.height = effectiveCopyHeight;
CopySurfacePipelineInfo* copySurfacePipelineInfo = copySurface_getOrCreateGraphicsPipeline(copySurfaceState);
// get framebuffer
VKRObjectFramebuffer* vkObjFramebuffer = surfaceCopy_getOrCreateFramebuffer(copySurfaceState, copySurfacePipelineInfo);
vkObjFramebuffer->flagForCurrentCommandBuffer();
// get descriptor set
VKRObjectDescriptorSet* vkObjDescriptorSet = surfaceCopy_getOrCreateDescriptorSet(copySurfaceState, copySurfacePipelineInfo);
sint32 dstEffectiveWidth, dstEffectiveHeight;
dstTextureVk->GetEffectiveSize(dstEffectiveWidth, dstEffectiveHeight, texDstMip);
sint32 srcEffectiveWidth, srcEffectiveHeight;
srcTextureVk->GetEffectiveSize(srcEffectiveWidth, srcEffectiveHeight, texSrcMip);
CopyShaderPushConstantData_t pushConstantData;
float srcCopyWidth = (float)1.0f;
float srcCopyHeight = (float)1.0f;
// q0 vertex
pushConstantData.vertexOffsets[0] = -1.0f;
pushConstantData.vertexOffsets[1] = 1.0f;
// q1
pushConstantData.vertexOffsets[2] = 1.0f;
pushConstantData.vertexOffsets[3] = 1.0f;
// q2
pushConstantData.vertexOffsets[4] = -1.0f;
pushConstantData.vertexOffsets[5] = -1.0f;
// q3
pushConstantData.vertexOffsets[6] = 1.0f;
pushConstantData.vertexOffsets[7] = -1.0f;
pushConstantData.srcTexelOffset[0] = 0;
pushConstantData.srcTexelOffset[1] = 0;
vkCmdPushConstants(m_state.currentCommandBuffer, copySurfacePipelineInfo->vkObjPipeline->pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(pushConstantData), &pushConstantData);
// draw
VkRenderPassBeginInfo renderPassInfo{};
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
renderPassInfo.renderPass = copySurfacePipelineInfo->vkObjRenderPass->m_renderPass;
renderPassInfo.framebuffer = vkObjFramebuffer->m_frameBuffer;
renderPassInfo.renderArea.offset = { 0, 0 };
renderPassInfo.renderArea.extent = { (uint32_t)effectiveCopyWidth, (uint32_t)effectiveCopyHeight };
renderPassInfo.clearValueCount = 0;
VkViewport viewport{};
viewport.x = 0;
viewport.y = (float)effectiveCopyHeight;
viewport.width = (float)effectiveCopyWidth;
viewport.height = (float)-effectiveCopyHeight;
viewport.minDepth = 0.0f;
viewport.maxDepth = 1.0f;
VkRect2D scissor;
scissor.offset.x = 0;
scissor.offset.y = 0;
scissor.extent.width = effectiveCopyWidth;
scissor.extent.height = effectiveCopyHeight;
vkCmdSetViewport(m_state.currentCommandBuffer, 0, 1, &viewport);
vkCmdSetScissor(m_state.currentCommandBuffer, 0, 1, &scissor);
cemu_assert_debug(srcTextureVk->GetImageObj()->m_image != dstTextureVk->GetImageObj()->m_image);
barrier_image<SYNC_OP::IMAGE_WRITE | SYNC_OP::ANY_TRANSFER, SYNC_OP::IMAGE_READ>(srcTextureVk, srcImageSubresource, VK_IMAGE_LAYOUT_GENERAL); // wait for any modifying operations on source image to complete
barrier_image<SYNC_OP::IMAGE_READ | SYNC_OP::IMAGE_WRITE | SYNC_OP::ANY_TRANSFER, SYNC_OP::IMAGE_WRITE>(dstTextureVk, dstImageSubresource, VK_IMAGE_LAYOUT_GENERAL); // wait for any operations on destination image to complete
vkCmdBeginRenderPass(m_state.currentCommandBuffer, &renderPassInfo, VK_SUBPASS_CONTENTS_INLINE);
vkCmdBindPipeline(m_state.currentCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, copySurfacePipelineInfo->vkObjPipeline->pipeline);
copySurfacePipelineInfo->vkObjPipeline->flagForCurrentCommandBuffer();
m_state.currentPipeline = copySurfacePipelineInfo->vkObjPipeline->pipeline;
vkCmdBindDescriptorSets(m_state.currentCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
copySurfacePipelineInfo->vkObjPipeline->pipeline_layout, 0, 1, &vkObjDescriptorSet->descriptorSet, 0, nullptr);
vkObjDescriptorSet->flagForCurrentCommandBuffer();
vkCmdDraw(m_state.currentCommandBuffer, 6, 1, 0, 0);
vkCmdEndRenderPass(m_state.currentCommandBuffer);
barrier_image<SYNC_OP::IMAGE_READ, SYNC_OP::IMAGE_READ | SYNC_OP::IMAGE_WRITE | SYNC_OP::ANY_TRANSFER>(srcTextureVk, srcImageSubresource, VK_IMAGE_LAYOUT_GENERAL); // wait for drawcall to complete before any other operations on the source image
barrier_image<SYNC_OP::IMAGE_WRITE, SYNC_OP::IMAGE_READ | SYNC_OP::IMAGE_WRITE | SYNC_OP::ANY_TRANSFER>(dstTextureVk, dstImageSubresource, VK_IMAGE_LAYOUT_GENERAL); // wait for drawcall to complete before any other operations on the destination image
// restore viewport and scissor box
vkCmdSetViewport(m_state.currentCommandBuffer, 0, 1, &m_state.currentViewport);
vkCmdSetScissor(m_state.currentCommandBuffer, 0, 1, &m_state.currentScissorRect);
LatteTexture_TrackTextureGPUWrite(dstTextureVk, texDstSlice, texDstMip, LatteTexture_getNextUpdateEventCounter());
}
struct vkComponentDesc_t
{
enum class TYPE : uint8
{
NONE,
UNORM,
SNORM,
FLOAT
};
uint8 bits;
TYPE type;
vkComponentDesc_t(uint8 b, TYPE t) : bits(b), type(t) {};
friend bool operator==(const vkComponentDesc_t& lhs, const vkComponentDesc_t& rhs)
{
return lhs.bits == rhs.bits && lhs.type == rhs.type;
}
};
bool vkIsDepthFormat(VkFormat imageFormat)
{
switch (imageFormat)
{
case VK_FORMAT_D32_SFLOAT_S8_UINT:
case VK_FORMAT_D24_UNORM_S8_UINT:
case VK_FORMAT_D32_SFLOAT:
case VK_FORMAT_D16_UNORM:
return true;
default:
break;
}
return false;
}
vkComponentDesc_t vkGetFormatDepthBits(VkFormat imageFormat)
{
switch (imageFormat)
{
case VK_FORMAT_D32_SFLOAT_S8_UINT:
return vkComponentDesc_t(32, vkComponentDesc_t::TYPE::FLOAT);
case VK_FORMAT_D24_UNORM_S8_UINT:
return vkComponentDesc_t(24, vkComponentDesc_t::TYPE::UNORM);
case VK_FORMAT_D32_SFLOAT:
return vkComponentDesc_t(32, vkComponentDesc_t::TYPE::FLOAT);
case VK_FORMAT_D16_UNORM:
return vkComponentDesc_t(16, vkComponentDesc_t::TYPE::UNORM);
default:
break;
}
return vkComponentDesc_t(0, vkComponentDesc_t::TYPE::NONE);
}
bool vkIsBitCompatibleColorDepthFormat(VkFormat format1, VkFormat format2)
{
cemu_assert_debug(vkIsDepthFormat(format1) != vkIsDepthFormat(format2));
VkFormat depthFormat, colorFormat;
if (vkIsDepthFormat(format1))
{
depthFormat = format1;
colorFormat = format2;
}
else
{
depthFormat = format2;
colorFormat = format1;
}
switch (depthFormat)
{
case VK_FORMAT_D32_SFLOAT_S8_UINT:
return colorFormat == VK_FORMAT_R32_SFLOAT;
case VK_FORMAT_D24_UNORM_S8_UINT:
return false; // there is no 24-bit color format
case VK_FORMAT_D32_SFLOAT:
return colorFormat == VK_FORMAT_R32_SFLOAT;
case VK_FORMAT_D16_UNORM:
return colorFormat == VK_FORMAT_R16_UNORM;
default:
break;
}
return false;
}
void VulkanRenderer::surfaceCopy_copySurfaceWithFormatConversion(LatteTexture* sourceTexture, sint32 srcMip, sint32 srcSlice, LatteTexture* destinationTexture, sint32 dstMip, sint32 dstSlice, sint32 width, sint32 height)
{
// scale copy size to effective size
sint32 effectiveCopyWidth = width;
sint32 effectiveCopyHeight = height;
LatteTexture_scaleToEffectiveSize(sourceTexture, &effectiveCopyWidth, &effectiveCopyHeight, 0);
sint32 sourceEffectiveWidth, sourceEffectiveHeight;
sourceTexture->GetEffectiveSize(sourceEffectiveWidth, sourceEffectiveHeight, srcMip);
sint32 texSrcMip = srcMip;
sint32 texSrcSlice = srcSlice;
sint32 texDstMip = dstMip;
sint32 texDstSlice = dstSlice;
LatteTextureVk* srcTextureVk = (LatteTextureVk*)sourceTexture;
LatteTextureVk* dstTextureVk = (LatteTextureVk*)destinationTexture;
// check if texture rescale ratios match
// todo - if not, we have to use drawcall based copying
if (!LatteTexture_doesEffectiveRescaleRatioMatch(srcTextureVk, texSrcMip, dstTextureVk, texDstMip))
{
cemuLog_logDebug(LogType::Force, "surfaceCopy_copySurfaceViaDrawcall(): Mismatching dimensions");
return;
}
// check if bpp size matches
if (srcTextureVk->GetBPP() != dstTextureVk->GetBPP())
{
cemuLog_logDebug(LogType::Force, "surfaceCopy_copySurfaceViaDrawcall(): Mismatching BPP");
return;
}
surfaceCopy_viaDrawcall(srcTextureVk, texSrcMip, texSrcSlice, dstTextureVk, texDstMip, texDstSlice, effectiveCopyWidth, effectiveCopyHeight);
}
// called whenever a texture is destroyed
// it is guaranteed that the texture is not in use and all associated resources (descriptor sets, framebuffers) can be destroyed safely
void VulkanRenderer::surfaceCopy_notifyTextureRelease(LatteTextureVk* hostTexture)
{
for (auto& itr : m_copySurfacePipelineCache)
{
auto& pipelineInfo = itr.second;
auto itrDescriptors = pipelineInfo->map_descriptors.find(hostTexture);
if (itrDescriptors != pipelineInfo->map_descriptors.end())
{
for (auto p : itrDescriptors->second.m_array)
{
if (p)
{
VulkanRenderer::GetInstance()->ReleaseDestructibleObject(p->vkObjDescriptorSet);
p->vkObjDescriptorSet = nullptr;
VulkanRenderer::GetInstance()->ReleaseDestructibleObject(p->vkObjImageView);
p->vkObjImageView = nullptr;
}
}
pipelineInfo->map_descriptors.erase(itrDescriptors);
}
auto itrFramebuffers = pipelineInfo->map_framebuffers.find(hostTexture);
if (itrFramebuffers != pipelineInfo->map_framebuffers.end())
{
for (auto p : itrFramebuffers->second.m_array)
{
if (p)
{
VulkanRenderer::GetInstance()->ReleaseDestructibleObject(p->vkObjFramebuffer);
p->vkObjFramebuffer = nullptr;
VulkanRenderer::GetInstance()->ReleaseDestructibleObject(p->vkObjImageView);
p->vkObjImageView = nullptr;
}
}
pipelineInfo->map_framebuffers.erase(itrFramebuffers);
}
}
}
void VulkanRenderer::surfaceCopy_cleanup()
{
// todo - release m_copySurfacePipelineCache etc
}
| 32,555
|
C++
|
.cpp
| 695
| 44.346763
| 251
| 0.762483
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,246
|
CachedFBOVk.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/Vulkan/CachedFBOVk.cpp
|
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanAPI.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/CachedFBOVk.h"
void CachedFBOVk::CreateRenderPass()
{
VKRObjectRenderPass::AttachmentInfo_t attachmentInfo;
for (int i = 0; i < 8; ++i)
{
auto& buffer = colorBuffer[i];
auto textureViewVk = (LatteTextureViewVk*)buffer.texture;
if (!textureViewVk)
{
attachmentInfo.colorAttachment[i].viewObj = nullptr;
continue;
}
// setup color attachment
auto viewObj = textureViewVk->GetViewRGBA();
attachmentInfo.colorAttachment[i].viewObj = viewObj;
attachmentInfo.colorAttachment[i].format = textureViewVk->GetFormat();
}
// setup depth attachment
if (depthBuffer.texture)
{
LatteTextureViewVk* depthTexVk = static_cast<LatteTextureViewVk*>(depthBuffer.texture);
auto depthBufferViewObj = depthTexVk->GetViewRGBA();
attachmentInfo.depthAttachment.viewObj = depthBufferViewObj;
attachmentInfo.depthAttachment.format = depthTexVk->GetFormat();
attachmentInfo.depthAttachment.hasStencil = depthTexVk->baseTexture->hasStencil;
}
else
{
// no depth attachment
attachmentInfo.depthAttachment.viewObj = nullptr;
}
m_vkrObjRenderPass = new VKRObjectRenderPass(attachmentInfo);
}
CachedFBOVk::~CachedFBOVk()
{
while (!m_usedByPipelines.empty())
delete m_usedByPipelines[0];
auto vkr = VulkanRenderer::GetInstance();
vkr->ReleaseDestructibleObject(m_vkrObjFramebuffer);
m_vkrObjFramebuffer = nullptr;
vkr->ReleaseDestructibleObject(m_vkrObjRenderPass);
m_vkrObjRenderPass = nullptr;
}
VKRObjectTextureView* CachedFBOVk::GetColorBufferImageView(uint32 index)
{
cemu_assert(index < 8);
auto& cb = colorBuffer[index];
auto textureViewVk = (LatteTextureViewVk*)cb.texture;
if (!textureViewVk)
return nullptr;
auto viewDim = textureViewVk->dim;
if (viewDim == Latte::E_DIM::DIM_3D)
viewDim = Latte::E_DIM::DIM_2D; // bind 3D texture slices as 2D images
return textureViewVk->GetViewRGBA();
}
VKRObjectTextureView* CachedFBOVk::GetDepthStencilBufferImageView(bool& hasStencil)
{
hasStencil = false;
auto textureViewVk = (LatteTextureViewVk*)depthBuffer.texture;
if (!textureViewVk)
return nullptr;
cemu_assert_debug(textureViewVk->numMip == 1);
hasStencil = textureViewVk->baseTexture->hasStencil;
return textureViewVk->GetViewRGBA();
}
void CachedFBOVk::CreateFramebuffer()
{
std::array<VKRObjectTextureView*, 9> imageViews{};
int imageViewIndex = 0;
for (uint32 i = 0; i < 8; ++i)
{
VKRObjectTextureView* cbView = GetColorBufferImageView(i);
if (!cbView)
continue;
imageViews[imageViewIndex++] = cbView;
}
bool hasStencil = false;
VKRObjectTextureView* depthStencilView = GetDepthStencilBufferImageView(hasStencil);
if (depthStencilView)
imageViews[imageViewIndex++] = depthStencilView;
cemu_assert_debug(imageViewIndex < 9);
m_vkrObjFramebuffer = new VKRObjectFramebuffer(m_vkrObjRenderPass, std::span<VKRObjectTextureView*>(imageViews.data(), imageViewIndex), m_size);
m_extend = { (uint32)m_size.x, (uint32)m_size.y };
}
void CachedFBOVk::InitDynamicRenderingData()
{
// init struct for KHR_dynamic_rendering
for (int i = 0; i < 8; ++i)
{
m_vkColorAttachments[i].sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO_KHR;
m_vkColorAttachments[i].pNext = nullptr;
m_vkColorAttachments[i].imageLayout = VK_IMAGE_LAYOUT_GENERAL;
m_vkColorAttachments[i].resolveMode = VK_RESOLVE_MODE_NONE;
m_vkColorAttachments[i].resolveImageView = VK_NULL_HANDLE;
m_vkColorAttachments[i].resolveImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
m_vkColorAttachments[i].loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
m_vkColorAttachments[i].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
// ignore clearValue
VKRObjectTextureView* cbView = GetColorBufferImageView(i);
auto& buffer = colorBuffer[i];
if (!cbView)
{
m_vkColorAttachments[i].imageView = VK_NULL_HANDLE;
continue;
}
else
m_vkColorAttachments[i].imageView = cbView->m_textureImageView;
}
m_vkRenderingInfo.pColorAttachments = m_vkColorAttachments;
m_vkRenderingInfo.colorAttachmentCount = 8;
// trim the color attachment list if tail entries are not set
while (m_vkRenderingInfo.colorAttachmentCount > 0)
{
if (m_vkColorAttachments[m_vkRenderingInfo.colorAttachmentCount - 1].imageView)
break;
m_vkRenderingInfo.colorAttachmentCount--;
}
// initially set both stencil and depth attachment to an empty default
m_vkDepthAttachment.sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO_KHR;
m_vkDepthAttachment.pNext = nullptr;
m_vkDepthAttachment.imageLayout = VK_IMAGE_LAYOUT_GENERAL;
m_vkDepthAttachment.resolveMode = VK_RESOLVE_MODE_NONE;
m_vkDepthAttachment.resolveImageView = VK_NULL_HANDLE;
m_vkDepthAttachment.resolveImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
m_vkDepthAttachment.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
m_vkDepthAttachment.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
m_vkStencilAttachment.sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO_KHR;
m_vkStencilAttachment.pNext = nullptr;
m_vkStencilAttachment.imageLayout = VK_IMAGE_LAYOUT_GENERAL;
m_vkStencilAttachment.resolveMode = VK_RESOLVE_MODE_NONE;
m_vkStencilAttachment.resolveImageView = VK_NULL_HANDLE;
m_vkStencilAttachment.resolveImageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
m_vkStencilAttachment.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
m_vkStencilAttachment.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
m_vkRenderingInfo.pDepthAttachment = nullptr;
m_vkRenderingInfo.pStencilAttachment = nullptr;
bool hasStencil = false;
VKRObjectTextureView* depthStencilView = GetDepthStencilBufferImageView(hasStencil);
// setup depth and stencil attachment
if (depthStencilView)
{
m_vkDepthAttachment.imageView = depthStencilView->m_textureImageView;
m_vkDepthAttachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
m_vkDepthAttachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
m_vkRenderingInfo.pDepthAttachment = &m_vkDepthAttachment;
if (hasStencil)
{
m_vkStencilAttachment.imageView = depthStencilView->m_textureImageView;
m_vkStencilAttachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
m_vkStencilAttachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
m_vkRenderingInfo.pStencilAttachment = &m_vkStencilAttachment;
}
}
m_vkRenderingInfo.sType = VK_STRUCTURE_TYPE_RENDERING_INFO_KHR;
m_vkRenderingInfo.pNext = nullptr;
m_vkRenderingInfo.flags = 0;
m_vkRenderingInfo.renderArea.offset = { 0, 0 };
m_vkRenderingInfo.renderArea.extent = m_extend;
m_vkRenderingInfo.viewMask = 0; // multiview disabled
m_vkRenderingInfo.layerCount = 1;
}
uint32 s_currentCollisionCheckIndex = 1;
bool CachedFBOVk::CheckForCollision(VkDescriptorSetInfo* vsDS, VkDescriptorSetInfo* gsDS, VkDescriptorSetInfo* psDS) const
{
s_currentCollisionCheckIndex++;
const uint32 curColIndex = s_currentCollisionCheckIndex;
for (auto& itr : m_referencedTextures)
{
LatteTextureVk* vkTex = (LatteTextureVk*)itr;
vkTex->m_collisionCheckIndex = curColIndex;
}
if (vsDS)
{
for (auto& itr : vsDS->list_fboCandidates)
{
if (itr->m_collisionCheckIndex == curColIndex)
return true;
}
}
if (gsDS)
{
for (auto& itr : gsDS->list_fboCandidates)
{
if (itr->m_collisionCheckIndex == curColIndex)
return true;
}
}
if (psDS)
{
for (auto& itr : psDS->list_fboCandidates)
{
if (itr->m_collisionCheckIndex == curColIndex)
return true;
}
}
return false;
}
| 7,393
|
C++
|
.cpp
| 200
| 34.47
| 145
| 0.787299
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,247
|
TextureReadbackVk.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/Vulkan/TextureReadbackVk.cpp
|
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanTextureReadback.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/LatteTextureVk.h"
LatteTextureReadbackInfoVk::LatteTextureReadbackInfoVk(VkDevice device, LatteTextureView* textureView)
: LatteTextureReadbackInfo(textureView), m_device(device)
{
m_image_size = GetImageSize(textureView);
}
LatteTextureReadbackInfoVk::~LatteTextureReadbackInfoVk()
{
}
uint32 LatteTextureReadbackInfoVk::GetImageSize(LatteTextureView* textureView)
{
const auto* baseTexture = (LatteTextureVk*)textureView->baseTexture;
// handle format
const auto textureFormat = baseTexture->GetFormat();
if (textureView->format == Latte::E_GX2SURFFMT::R8_G8_B8_A8_UNORM)
{
cemu_assert(textureFormat == VK_FORMAT_R8G8B8A8_UNORM);
return baseTexture->width * baseTexture->height * 4;
}
else if (textureView->format == Latte::E_GX2SURFFMT::R8_UNORM)
{
cemu_assert(textureFormat == VK_FORMAT_R8_UNORM);
return baseTexture->width * baseTexture->height * 1;
}
else if (textureView->format == Latte::E_GX2SURFFMT::R8_G8_B8_A8_SRGB)
{
cemu_assert(textureFormat == VK_FORMAT_R8G8B8A8_SRGB);
return baseTexture->width * baseTexture->height * 4;
}
else if (textureView->format == Latte::E_GX2SURFFMT::R32_G32_B32_A32_FLOAT)
{
cemu_assert(textureFormat == VK_FORMAT_R32G32B32A32_SFLOAT);
return baseTexture->width * baseTexture->height * 16;
}
else if (textureView->format == Latte::E_GX2SURFFMT::R32_FLOAT)
{
cemu_assert(textureFormat == VK_FORMAT_R32_SFLOAT || textureFormat == VK_FORMAT_D32_SFLOAT);
if (baseTexture->isDepth)
return baseTexture->width * baseTexture->height * 4;
else
return baseTexture->width * baseTexture->height * 4;
}
else if (textureView->format == Latte::E_GX2SURFFMT::R16_UNORM)
{
cemu_assert(textureFormat == VK_FORMAT_R16_UNORM);
if (baseTexture->isDepth)
{
cemu_assert_debug(false);
return baseTexture->width * baseTexture->height * 2;
}
else
{
return baseTexture->width * baseTexture->height * 2;
}
}
else if (textureView->format == Latte::E_GX2SURFFMT::R16_G16_B16_A16_FLOAT)
{
cemu_assert(textureFormat == VK_FORMAT_R16G16B16A16_SFLOAT);
return baseTexture->width * baseTexture->height * 8;
}
else if (textureView->format == Latte::E_GX2SURFFMT::R8_G8_UNORM)
{
cemu_assert(textureFormat == VK_FORMAT_R8G8_UNORM);
return baseTexture->width * baseTexture->height * 2;
}
else if (textureView->format == Latte::E_GX2SURFFMT::R16_G16_B16_A16_UNORM)
{
cemu_assert(textureFormat == VK_FORMAT_R16G16B16A16_UNORM);
return baseTexture->width * baseTexture->height * 8;
}
else if (textureView->format == Latte::E_GX2SURFFMT::D24_S8_UNORM)
{
cemu_assert(textureFormat == VK_FORMAT_D24_UNORM_S8_UINT);
// todo - if driver does not support VK_FORMAT_D24_UNORM_S8_UINT this is represented as VK_FORMAT_D32_SFLOAT_S8_UINT which is 8 bytes
return baseTexture->width * baseTexture->height * 4;
}
else
{
cemuLog_log(LogType::Force, "Unsupported texture readback format {:04x}", (uint32)textureView->format);
cemu_assert_debug(false);
return 0;
}
}
void LatteTextureReadbackInfoVk::StartTransfer()
{
cemu_assert(m_textureView);
auto* baseTexture = (LatteTextureVk*)m_textureView->baseTexture;
baseTexture->GetImageObj()->flagForCurrentCommandBuffer();
cemu_assert_debug(m_textureView->firstSlice == 0);
cemu_assert_debug(m_textureView->firstMip == 0);
cemu_assert_debug(m_textureView->baseTexture->dim != Latte::E_DIM::DIM_3D);
VkBufferImageCopy region{};
region.bufferOffset = m_buffer_offset;
region.bufferRowLength = baseTexture->width;
region.bufferImageHeight = baseTexture->height;
region.imageSubresource.aspectMask = baseTexture->GetImageAspect();
region.imageSubresource.baseArrayLayer = 0;
region.imageSubresource.layerCount = 1;
region.imageSubresource.mipLevel = 0;
region.imageOffset = {0,0,0};
region.imageExtent = {(uint32)baseTexture->width,(uint32)baseTexture->height,1};
const auto renderer = VulkanRenderer::GetInstance();
renderer->draw_endRenderPass();
renderer->barrier_image<VulkanRenderer::ANY_TRANSFER | VulkanRenderer::IMAGE_WRITE, VulkanRenderer::TRANSFER_READ>(baseTexture, region.imageSubresource, VK_IMAGE_LAYOUT_GENERAL);
renderer->barrier_sequentializeTransfer();
vkCmdCopyImageToBuffer(renderer->getCurrentCommandBuffer(), baseTexture->GetImageObj()->m_image, VK_IMAGE_LAYOUT_GENERAL, m_buffer, 1, ®ion);
renderer->barrier_sequentializeTransfer();
renderer->barrier_image<VulkanRenderer::TRANSFER_READ, VulkanRenderer::ANY_TRANSFER | VulkanRenderer::IMAGE_WRITE>(baseTexture, region.imageSubresource, VK_IMAGE_LAYOUT_GENERAL); // make sure transfer is finished before image is modified
renderer->barrier_bufferRange<VulkanRenderer::TRANSFER_WRITE, VulkanRenderer::HOST_READ>(m_buffer, m_buffer_offset, m_image_size); // make sure transfer is finished before result is read
m_associatedCommandBufferId = renderer->GetCurrentCommandBufferId();
m_textureView = nullptr;
// to decrease latency of readbacks make sure that the current command buffer is submitted soon
renderer->RequestSubmitSoon();
renderer->RequestSubmitOnIdle();
}
bool LatteTextureReadbackInfoVk::IsFinished()
{
const auto renderer = VulkanRenderer::GetInstance();
return renderer->HasCommandBufferFinished(m_associatedCommandBufferId);
}
void LatteTextureReadbackInfoVk::ForceFinish()
{
const auto renderer = VulkanRenderer::GetInstance();
renderer->WaitCommandBufferFinished(m_associatedCommandBufferId);
}
| 5,546
|
C++
|
.cpp
| 127
| 41.338583
| 238
| 0.777325
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,248
|
OpenGLQuery.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/OpenGL/OpenGLQuery.cpp
|
#include "Cafe/HW/Latte/Renderer/OpenGL/OpenGLRenderer.h"
#include "Common/GLInclude/GLInclude.h"
class LatteQueryObjectGL : public LatteQueryObject
{
friend class OpenGLRenderer;
bool getResult(uint64& numSamplesPassed) override;
void begin() override;
void end() override;
private:
GLuint m_queryId{};
GLenum m_glTarget{};
};
LatteQueryObject* OpenGLRenderer::occlusionQuery_create()
{
if (!list_queryCacheOcclusion.empty())
{
LatteQueryObjectGL* queryObject = list_queryCacheOcclusion.front();
list_queryCacheOcclusion.erase(list_queryCacheOcclusion.begin() + 0);
queryObject->m_glTarget = GL_SAMPLES_PASSED;
queryObject->queryEnded = false;
queryObject->queryEventStart = 0;
queryObject->queryEventEnd = 0;
return queryObject;
}
// no query object available in cache, create new query
LatteQueryObjectGL* queryObject = new LatteQueryObjectGL();
glGenQueries(1, &queryObject->m_queryId);
queryObject->m_glTarget = GL_SAMPLES_PASSED;
queryObject->queryEnded = false;
queryObject->index = 0;
queryObject->queryEventStart = 0;
queryObject->queryEventEnd = 0;
catchOpenGLError();
return queryObject;
}
void OpenGLRenderer::occlusionQuery_destroy(LatteQueryObject* queryObj)
{
list_queryCacheOcclusion.emplace_back(static_cast<LatteQueryObjectGL*>(queryObj));
}
void OpenGLRenderer::occlusionQuery_flush()
{
glFlush();
}
bool LatteQueryObjectGL::getResult(uint64& numSamplesPassed)
{
GLint resultAvailable = 0;
catchOpenGLError();
glGetQueryObjectiv(this->m_queryId, GL_QUERY_RESULT_AVAILABLE, &resultAvailable);
if (resultAvailable == 0)
return false;
catchOpenGLError();
GLint64 queryResult = 0;
glGetQueryObjecti64v(this->m_queryId, GL_QUERY_RESULT, &queryResult);
numSamplesPassed = queryResult;
return true;
}
void LatteQueryObjectGL::begin()
{
catchOpenGLError();
glBeginQueryIndexed(this->m_glTarget, this->index, this->m_queryId);
catchOpenGLError();
}
void LatteQueryObjectGL::end()
{
glEndQueryIndexed(this->m_glTarget, this->index);
catchOpenGLError();
}
| 2,023
|
C++
|
.cpp
| 67
| 28.313433
| 83
| 0.798255
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,249
|
TextureReadbackGL.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/OpenGL/TextureReadbackGL.cpp
|
#include "Cafe/HW/Latte/Renderer/Renderer.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/OpenGLRenderer.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/OpenGLTextureReadback.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/LatteTextureViewGL.h"
LatteTextureReadbackInfoGL::LatteTextureReadbackInfoGL(LatteTextureView* textureView)
: LatteTextureReadbackInfo(textureView)
{
LatteTexture* baseTexture = textureView->baseTexture;
// handle format
if (textureView->format == Latte::E_GX2SURFFMT::R8_G8_B8_A8_UNORM)
{
m_image_size = baseTexture->width*baseTexture->height * 4;
m_texFormatGL = GL_RGBA;
m_texDataTypeGL = GL_UNSIGNED_BYTE;
}
else if (textureView->format == Latte::E_GX2SURFFMT::R8_G8_B8_A8_SRGB)
{
m_image_size = baseTexture->width*baseTexture->height * 4;
m_texFormatGL = GL_RGBA;
m_texDataTypeGL = GL_UNSIGNED_BYTE;
}
else if (textureView->format == Latte::E_GX2SURFFMT::R32_G32_B32_A32_FLOAT)
{
m_image_size = baseTexture->width*baseTexture->height * 16;
m_texFormatGL = GL_RGBA;
m_texDataTypeGL = GL_FLOAT;
}
else if (textureView->format == Latte::E_GX2SURFFMT::R32_FLOAT)
{
if (baseTexture->isDepth)
{
m_image_size = baseTexture->width*baseTexture->height * 4;
m_texFormatGL = GL_DEPTH_COMPONENT;
m_texDataTypeGL = GL_FLOAT;
}
else
{
m_image_size = baseTexture->width*baseTexture->height * 4;
m_texFormatGL = GL_RED;
m_texDataTypeGL = GL_FLOAT;
}
}
else if (textureView->format == Latte::E_GX2SURFFMT::R16_UNORM)
{
if (baseTexture->isDepth)
{
m_image_size = baseTexture->width*baseTexture->height * 2;
m_texFormatGL = GL_DEPTH_COMPONENT;
m_texDataTypeGL = GL_UNSIGNED_SHORT;
cemu_assert_unimplemented();
}
else
{
m_image_size = baseTexture->width*baseTexture->height * 2;
m_texFormatGL = GL_RED;
m_texDataTypeGL = GL_UNSIGNED_SHORT;
}
}
else if (textureView->format == Latte::E_GX2SURFFMT::R16_G16_B16_A16_FLOAT)
{
m_image_size = baseTexture->width*baseTexture->height * 8;
m_texFormatGL = GL_RGBA;
m_texDataTypeGL = GL_HALF_FLOAT;
}
else if (textureView->format == Latte::E_GX2SURFFMT::R8_G8_UNORM)
{
m_image_size = baseTexture->width*baseTexture->height * 2;
m_texFormatGL = GL_RG;
m_texDataTypeGL = GL_UNSIGNED_BYTE;
}
else if (textureView->format == Latte::E_GX2SURFFMT::R16_G16_B16_A16_UNORM)
{
m_image_size = baseTexture->width*baseTexture->height * 8;
m_texFormatGL = GL_RGBA;
m_texDataTypeGL = GL_UNSIGNED_SHORT;
}
else
{
cemuLog_logDebug(LogType::Force, "Unsupported texture readback format {:04x}", (uint32)textureView->format);
return;
}
}
LatteTextureReadbackInfoGL::~LatteTextureReadbackInfoGL()
{
if(imageCopyFinSync != 0)
glDeleteSync(imageCopyFinSync);
if(texImageBufferGL)
glDeleteBuffers(1, &texImageBufferGL);
}
void LatteTextureReadbackInfoGL::StartTransfer()
{
cemu_assert(m_textureView);
((OpenGLRenderer*)g_renderer.get())->texture_bindAndActivate(m_textureView, 0);
// create unsynchronized buffer
glGenBuffers(1, &texImageBufferGL);
glBindBuffer(GL_PIXEL_PACK_BUFFER, texImageBufferGL);
glBufferData(GL_PIXEL_PACK_BUFFER, m_image_size, NULL, GL_DYNAMIC_READ);
// request texture read into buffer
glGetTexImage(((LatteTextureViewGL*)m_textureView)->glTexTarget, 0, m_texFormatGL, m_texDataTypeGL, NULL);
glFlush();
// create fence sync (so we can check if the image copy operation finished)
imageCopyFinSync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
m_textureView = nullptr;
}
bool LatteTextureReadbackInfoGL::IsFinished()
{
GLenum status = glClientWaitSync(imageCopyFinSync, 0, 0);
if (status == GL_TIMEOUT_EXPIRED)
return false;
else if (status == GL_ALREADY_SIGNALED || status == GL_SIGNALED)
return true;
else
throw std::runtime_error("_updateFinishedTransfers(): Error during readback sync check\n");
}
uint8* LatteTextureReadbackInfoGL::GetData()
{
glBindBuffer(GL_PIXEL_PACK_BUFFER, texImageBufferGL);
return (uint8*)glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY);
}
void LatteTextureReadbackInfoGL::ReleaseData()
{
glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
}
| 4,074
|
C++
|
.cpp
| 123
| 30.658537
| 110
| 0.751966
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,250
|
RendererShaderGL.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/OpenGL/RendererShaderGL.cpp
|
#include "Cafe/HW/Latte/Renderer/Renderer.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/RendererShaderGL.h"
#include "Cemu/FileCache/FileCache.h"
#include "config/ActiveSettings.h"
#include "config/LaunchSettings.h"
extern std::atomic_int g_compiled_shaders_total;
extern std::atomic_int g_compiled_shaders_async;
bool s_isLoadingShaders{false};
bool RendererShaderGL::loadBinary()
{
if (!s_programBinaryCache)
return false;
if (m_isGameShader == false || m_isGfxPackShader)
return false; // only non-custom
if (!glProgramBinary)
return false; // OpenGL program binaries not supported
cemu_assert_debug(m_baseHash != 0);
uint64 h1, h2;
GenerateShaderPrecompiledCacheFilename(m_type, m_baseHash, m_auxHash, h1, h2);
std::vector<uint8> cacheFileData;
if (!s_programBinaryCache->GetFile({h1, h2 }, cacheFileData))
return false;
if (cacheFileData.size() <= sizeof(uint32))
return false;
uint32 shaderBinFormat = *(uint32*)(cacheFileData.data());
m_program = glCreateProgram();
glProgramBinary(m_program, shaderBinFormat, cacheFileData.data()+4, cacheFileData.size()-4);
int status = -1;
glGetProgramiv(m_program, GL_LINK_STATUS, &status);
if (status != GL_TRUE)
{
glDeleteProgram(m_program);
m_program = 0;
return false;
}
m_isCompiled = true;
return true;
}
void RendererShaderGL::storeBinary()
{
if (!s_programBinaryCache)
return;
if (!glGetProgramBinary)
return;
if (m_program == 0)
return;
if (!m_isGameShader || m_isGfxPackShader)
return;
GLint binaryLength = 0;
glGetProgramiv(m_program, GL_PROGRAM_BINARY_LENGTH, &binaryLength);
if (binaryLength > 0)
{
uint64 h1, h2;
GenerateShaderPrecompiledCacheFilename(m_type, m_baseHash, m_auxHash, h1, h2);
// build stored shader data (4 byte format + binary data)
std::vector<uint8> storedBinary(binaryLength+sizeof(uint32), 0);
GLenum binaryFormat = 0;
glGetProgramBinary(m_program, binaryLength, NULL, &binaryFormat, storedBinary.data()+sizeof(uint32));
*(uint32*)(storedBinary.data() + 0) = binaryFormat;
// store
s_programBinaryCache->AddFileAsync({h1, h2 }, storedBinary.data(), storedBinary.size());
}
}
RendererShaderGL::RendererShaderGL(ShaderType type, uint64 baseHash, uint64 auxHash, bool isGameShader, bool isGfxPackShader, const std::string& glslSource)
: RendererShader(type, baseHash, auxHash, isGameShader, isGfxPackShader), m_glslSource(glslSource)
{
GLenum glShaderType;
switch (type)
{
case ShaderType::kVertex:
glShaderType = GL_VERTEX_SHADER;
break;
case ShaderType::kFragment:
glShaderType = GL_FRAGMENT_SHADER;
break;
case ShaderType::kGeometry:
glShaderType = GL_GEOMETRY_SHADER;
break;
default:
cemu_assert_debug(false);
}
if (s_isLoadingShaders)
{
if (loadBinary())
{
m_glslSource.clear();
m_glslSource.shrink_to_fit();
return;
}
}
m_shader_object = glCreateShader(glShaderType);
const char *c_str = m_glslSource.c_str();
const GLint size = (GLint)m_glslSource.size();
glShaderSource(m_shader_object, 1, &c_str, &size);
glCompileShader(m_shader_object);
GLint log_length;
glGetShaderiv(m_shader_object, GL_INFO_LOG_LENGTH, &log_length);
if (log_length > 0)
{
char log[2048]{};
GLsizei log_size;
glGetShaderInfoLog(m_shader_object, std::min<uint32>(log_length, sizeof(log) - 1), &log_size, log);
cemuLog_log(LogType::Force, "Error/Warning in shader:");
cemuLog_log(LogType::Force, log);
}
// set debug name
if (LaunchSettings::NSightModeEnabled())
{
auto objNameStr = fmt::format("shader_{:016x}_{:016x}", m_baseHash, m_auxHash);
glObjectLabel(GL_SHADER, m_shader_object, objNameStr.size(), objNameStr.c_str());
}
m_program = glCreateProgram();
glProgramParameteri(m_program, GL_PROGRAM_SEPARABLE, GL_TRUE);
glProgramParameteri(m_program, GL_PROGRAM_BINARY_RETRIEVABLE_HINT, GL_TRUE);
glAttachShader(m_program, m_shader_object);
m_shader_attached = true;
glLinkProgram(m_program);
storeBinary();
// count shader compilation
if (!s_isLoadingShaders)
++g_compiled_shaders_total;
// we can throw away the GLSL code to conserve RAM
m_glslSource.clear();
m_glslSource.shrink_to_fit();
}
RendererShaderGL::~RendererShaderGL()
{
if (m_shader_object != 0 && m_shader_attached)
glDetachShader(m_program, m_shader_object);
if (m_shader_object != 0)
glDeleteShader(m_shader_object);
if (m_program != 0)
glDeleteProgram(m_program);
}
void RendererShaderGL::PreponeCompilation(bool isRenderThread)
{
// the logic for initiating compilation is currently in the constructor
// here we only guarantee that it is finished before we return
if (m_isCompiled)
return;
WaitForCompiled();
}
bool RendererShaderGL::IsCompiled()
{
cemu_assert_debug(false);
return true;
}
bool RendererShaderGL::WaitForCompiled()
{
char infoLog[8 * 1024];
if (m_isCompiled)
return true;
// check if compilation was successful
GLint compileStatus = GL_FALSE;
glGetShaderiv(m_shader_object, GL_COMPILE_STATUS, &compileStatus);
if (compileStatus == 0)
{
uint32 infoLogLength, tempLength;
glGetShaderiv(m_shader_object, GL_INFO_LOG_LENGTH, (GLint *)&infoLogLength);
if (infoLogLength != 0)
{
tempLength = sizeof(infoLog) - 1;
glGetShaderInfoLog(m_shader_object, std::min(infoLogLength, tempLength), (GLsizei*)&tempLength, (GLcharARB*)infoLog);
infoLog[tempLength] = '\0';
cemuLog_log(LogType::Force, "Compile error in shader. Log:");
cemuLog_log(LogType::Force, infoLog);
}
if (m_shader_object != 0)
glDeleteShader(m_shader_object);
m_isCompiled = true;
return false;
}
// get shader binary
GLint linkStatus = GL_FALSE;
glGetProgramiv(m_program, GL_LINK_STATUS, &linkStatus);
if (linkStatus == 0)
{
uint32 infoLogLength, tempLength;
glGetProgramiv(m_program, GL_INFO_LOG_LENGTH, (GLint *)&infoLogLength);
if (infoLogLength != 0)
{
tempLength = sizeof(infoLog) - 1;
glGetProgramInfoLog(m_program, std::min(infoLogLength, tempLength), (GLsizei*)&tempLength, (GLcharARB*)infoLog);
infoLog[tempLength] = '\0';
cemuLog_log(LogType::Force, "Link error in shader. Log:");
cemuLog_log(LogType::Force, infoLog);
}
m_isCompiled = true;
return false;
}
/*glDetachShader(m_program, m_shader_object);
m_shader_attached = false;*/
m_isCompiled = true;
return true;
}
sint32 RendererShaderGL::GetUniformLocation(const char* name)
{
return glGetUniformLocation(m_program, name);
}
void RendererShaderGL::SetUniform2fv(sint32 location, void* data, sint32 count)
{
glProgramUniform2fv(m_program, location, count, (const GLfloat*)data);
}
void RendererShaderGL::SetUniform4iv(sint32 location, void* data, sint32 count)
{
glProgramUniform4iv(m_program, location, count, (const GLint*)data);
}
void RendererShaderGL::ShaderCacheLoading_begin(uint64 cacheTitleId)
{
cemu_assert_debug(!s_programBinaryCache); // should not be set, ShaderCacheLoading_Close() not called?
// determine if cache is enabled
bool usePrecompiled = false;
switch (ActiveSettings::GetPrecompiledShadersOption())
{
case PrecompiledShaderOption::Auto:
if (g_renderer->GetVendor() == GfxVendor::Nvidia)
usePrecompiled = false;
else
usePrecompiled = true;
break;
case PrecompiledShaderOption::Enable:
usePrecompiled = true;
break;
case PrecompiledShaderOption::Disable:
usePrecompiled = false;
break;
default:
UNREACHABLE;
}
cemuLog_log(LogType::Force, "Using precompiled shaders: {}", usePrecompiled ? "true" : "false");
if (usePrecompiled)
{
const uint32 cacheMagic = GeneratePrecompiledCacheId();
const std::string cacheFilename = fmt::format("{:016x}_gl.bin", cacheTitleId);
s_programBinaryCache = FileCache::Open(ActiveSettings::GetCachePath("shaderCache/precompiled/{}", cacheFilename), true, cacheMagic);
if (s_programBinaryCache == nullptr)
cemuLog_log(LogType::Force, "Unable to open OpenGL precompiled cache {}", cacheFilename);
}
s_isLoadingShaders = true;
}
void RendererShaderGL::ShaderCacheLoading_end()
{
s_isLoadingShaders = false;
}
void RendererShaderGL::ShaderCacheLoading_Close()
{
if(s_programBinaryCache)
{
delete s_programBinaryCache;
s_programBinaryCache = nullptr;
}
g_compiled_shaders_total = 0;
g_compiled_shaders_async = 0;
}
FileCache* RendererShaderGL::s_programBinaryCache{};
| 8,267
|
C++
|
.cpp
| 255
| 29.886275
| 156
| 0.7521
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,251
|
OpenGLSurfaceCopy.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/OpenGL/OpenGLSurfaceCopy.cpp
|
#include "Cafe/HW/Latte/Renderer/OpenGL/OpenGLRenderer.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/RendererShaderGL.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/CachedFBOGL.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/LatteTextureGL.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/LatteTextureViewGL.h"
#include "Cafe/HW/Latte/Core/LattePerformanceMonitor.h"
#include "Cafe/HW/Latte/Core/LatteShader.h"
#include "Cafe/HW/Latte/Core/LatteDraw.h"
#include "Cafe/HW/Latte/Core/LatteDefaultShaders.h"
void LatteDraw_resetAttributePointerCache();
void _setDepthCompareMode(LatteTextureViewGL* textureView, uint8 depthCompareMode)
{
if (depthCompareMode != textureView->samplerState.depthCompareMode)
{
if (depthCompareMode != 0)
glTexParameteri(textureView->glTexTarget, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
else
glTexParameteri(textureView->glTexTarget, GL_TEXTURE_COMPARE_MODE, GL_NONE);
textureView->samplerState.depthCompareMode = depthCompareMode;
}
}
void OpenGLRenderer::surfaceCopy_copySurfaceWithFormatConversion(LatteTexture* sourceTexture, sint32 srcMip, sint32 srcSlice, LatteTexture* destinationTexture, sint32 dstMip, sint32 dstSlice, sint32 width, sint32 height)
{
// scale copy size to effective size
sint32 effectiveCopyWidth = width;
sint32 effectiveCopyHeight = height;
LatteTexture_scaleToEffectiveSize(sourceTexture, &effectiveCopyWidth, &effectiveCopyHeight, 0);
sint32 sourceEffectiveWidth, sourceEffectiveHeight;
sourceTexture->GetEffectiveSize(sourceEffectiveWidth, sourceEffectiveHeight, srcMip);
// reset everything
renderstate_resetColorControl();
renderstate_resetDepthControl();
attributeStream_unbindVertexBuffer();
SetArrayElementBuffer(0);
LatteDraw_resetAttributePointerCache();
SetAttributeArrayState(0, true, -1);
SetAttributeArrayState(1, true, -1);
for (uint32 i = 2; i < GPU_GL_MAX_NUM_ATTRIBUTE; i++)
SetAttributeArrayState(i, false, -1);
catchOpenGLError();
// set viewport
g_renderer->renderTarget_setViewport(0, 0, (float)effectiveCopyWidth, (float)effectiveCopyHeight, 0.0f, 1.0f);
catchOpenGLError();
// get a view of the copied slice/mip in the source and destination texture
LatteTextureView* sourceView = sourceTexture->GetOrCreateView(srcMip, 1, srcSlice, 1);
LatteTextureView* destinationView = destinationTexture->GetOrCreateView(dstMip, 1, dstSlice, 1);
texture_bindAndActivate(sourceView, 0);
catchOpenGLError();
// setup texture attributes
_setDepthCompareMode((LatteTextureViewGL*)sourceView, 0);
catchOpenGLError();
// bind framebuffer
if (destinationTexture->isDepth)
LatteMRT::BindDepthBufferOnly(destinationView);
else
LatteMRT::BindColorBufferOnly(destinationView);
catchOpenGLError();
// enable depth writes if the destination is a depth texture
if (destinationTexture->isDepth)
renderstate_setAlwaysWriteDepth();
// bind format specific copy shader
LatteDefaultShader_t* copyShader = LatteDefaultShader_getPixelCopyShader_depthToColor();
if (destinationTexture->isDepth)
copyShader = LatteDefaultShader_getPixelCopyShader_colorToDepth();
glUseProgram(copyShader->glProgamId);
catchOpenGLError();
// setup uniforms
glUniform1i(copyShader->copyShaderUniforms.uniformLoc_textureSrc, 0);
catchOpenGLError();
float vertexOffsets[4 * 4];
float srcCopyWidth = (float)width / (float)sourceTexture->width;
float srcCopyHeight = (float)height / (float)sourceTexture->height;
// q0 vertex
vertexOffsets[0] = -1.0f;
vertexOffsets[1] = 1.0f;
// q0 uv
vertexOffsets[2] = 0.0f;
vertexOffsets[3] = 0.0f;
// q1
vertexOffsets[4] = 1.0f;
vertexOffsets[5] = 1.0f;
// q1 uv
vertexOffsets[6] = srcCopyWidth;
vertexOffsets[7] = 0.0f;
// q2
vertexOffsets[8] = -1.0f;
vertexOffsets[9] = -1.0f;
// q2 uv
vertexOffsets[10] = 0.0f;
vertexOffsets[11] = srcCopyHeight;
// q3
vertexOffsets[12] = 1.0f;
vertexOffsets[13] = -1.0f;
// q3 uv
vertexOffsets[14] = srcCopyWidth;
vertexOffsets[15] = srcCopyHeight;
glUniform4fv(copyShader->copyShaderUniforms.uniformLoc_vertexOffsets, 4, vertexOffsets);
catchOpenGLError();
// draw
uint16 indexData[6] = { 0,1,3,0,2,3 };
glDrawRangeElements(GL_TRIANGLES, 0, 5, 6, GL_UNSIGNED_SHORT, indexData);
catchOpenGLError();
LatteGPUState.repeatTextureInitialization = true;
glUseProgram(0);
}
| 4,269
|
C++
|
.cpp
| 105
| 38.6
| 220
| 0.797545
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,252
|
LatteTextureViewGL.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/OpenGL/LatteTextureViewGL.cpp
|
#include "Cafe/HW/Latte/Renderer/OpenGL/LatteTextureViewGL.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/LatteTextureGL.h"
#include "Cafe/HW/Latte/Renderer/Renderer.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/OpenGLRenderer.h"
#include "config/LaunchSettings.h"
LatteTextureViewGL::LatteTextureViewGL(LatteTextureGL* texture, Latte::E_DIM dim, Latte::E_GX2SURFFMT format, sint32 firstMip, sint32 mipCount, sint32 firstSlice, sint32 sliceCount, bool registerView, bool forceCreateNewTexId)
: LatteTextureView(texture, firstMip, mipCount, firstSlice, sliceCount, dim, format, registerView)
{
if (dim != texture->dim || format != texture->format ||
firstSlice != 0 || firstMip != 0 || mipCount != texture->mipLevels || sliceCount != texture->depth ||
forceCreateNewTexId)
{
LatteTextureGL::GenerateEmptyTextureFromGX2Dim(dim, glTexId, glTexTarget, false);
this->glInternalFormat = 0;
InitAliasView();
}
else
{
glTexId = texture->glId_texture;
glTexTarget = texture->glTexTarget;
glInternalFormat = texture->glInternalFormat;
}
// mark all sampler properties as undefined
samplerState.maxAniso = 0xFF;
samplerState.filterMin = 0xFFFFFFFF;
samplerState.filterMag = 0xFFFFFFFF;
samplerState.maxMipLevels = 0xFF;
samplerState.borderType = 0xFF;
samplerState.borderColor[0] = 9999.0f;
samplerState.borderColor[1] = 9999.0f;
samplerState.borderColor[2] = 9999.0f;
samplerState.borderColor[3] = 9999.0f;
samplerState.clampS = 0xFF;
samplerState.clampT = 0xFF;
samplerState.clampR = 0xFF;
samplerState.minLod = 0xFFFF;
samplerState.maxLod = 0xFFFF;
samplerState.lodBias = 0x7FFF;
samplerState.depthCompareMode = 0xFF;
samplerState.depthCompareFunc = 0xFF;
swizzleR = 0xFF;
swizzleG = 0xFF;
swizzleB = 0xFF;
swizzleA = 0xFF;
}
LatteTextureViewGL::~LatteTextureViewGL()
{
delete m_alternativeView;
((OpenGLRenderer*)g_renderer.get())->texture_notifyDelete(this);
glDeleteTextures(1, &glTexId);
}
void LatteTextureViewGL::InitAliasView()
{
const auto texture = (LatteTextureGL*)baseTexture;
// compute internal format
if(texture->overwriteInfo.hasFormatOverwrite)
{
cemu_assert_debug(format == texture->format);
glInternalFormat = texture->glInternalFormat; // for format overwrite no aliasing is allowed and thus we always inherit the internal format of the base texture
}
else if (baseTexture->isDepth)
{
// depth is handled differently
cemu_assert(format == texture->format); // is depth alias with different format intended?
glInternalFormat = texture->glInternalFormat;
}
else
{
LatteTextureGL::FormatInfoGL glFormatInfo;
LatteTextureGL::GetOpenGLFormatInfo(baseTexture->isDepth, format, dim, &glFormatInfo);
glInternalFormat = glFormatInfo.glInternalFormat;
}
catchOpenGLError();
if (firstMip >= texture->maxPossibleMipLevels)
{
cemuLog_logDebug(LogType::Force, "InitAliasView(): Out of bounds mip level requested");
glTextureView(glTexId, glTexTarget, texture->glId_texture, glInternalFormat, texture->maxPossibleMipLevels - 1, numMip, firstSlice, this->numSlice);
}
else
glTextureView(glTexId, glTexTarget, texture->glId_texture, glInternalFormat, firstMip, numMip, firstSlice, numSlice);
catchOpenGLError();
if (glTextureParameteri)
{
glTextureParameteri(glTexId, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTextureParameteri(glTexId, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTextureParameteri(glTexId, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTextureParameteri(glTexId, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTextureParameteri(glTexId, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glTextureParameteri(glTexId, GL_TEXTURE_COMPARE_MODE, GL_NONE);
}
else
{
// todo - fallback for when DSA isn't supported
}
// set debug name
bool useGLDebugNames = false;
#ifdef CEMU_DEBUG_ASSERT
useGLDebugNames = true;
#endif
if (LaunchSettings::NSightModeEnabled())
useGLDebugNames = true;
if (useGLDebugNames)
{
char textureDebugLabel[512];
sprintf(textureDebugLabel, "%08x_f%04x_p%04x_viewFMT%04x%s_org%d", baseTexture->physAddress, (uint32)baseTexture->format, baseTexture->pitch, (uint32)this->format, baseTexture->isDepth?"_d":"", texture->glId_texture);
glObjectLabel(GL_TEXTURE, glTexId, -1, textureDebugLabel);
}
}
LatteTextureViewGL* LatteTextureViewGL::GetAlternativeView()
{
if (!m_alternativeView)
m_alternativeView = new LatteTextureViewGL((LatteTextureGL*)baseTexture, dim, format, firstMip, numMip, firstSlice, numSlice, false, true);
return m_alternativeView;
}
| 4,470
|
C++
|
.cpp
| 114
| 37.026316
| 226
| 0.785419
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,253
|
LatteTextureGL.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/OpenGL/LatteTextureGL.cpp
|
#include "Cafe/HW/Latte/Renderer/OpenGL/LatteTextureGL.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/LatteTextureViewGL.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/OpenGLRenderer.h"
#include "Cafe/HW/Latte/Core/Latte.h"
#include "config/LaunchSettings.h"
LatteTextureGL::LatteTextureGL(Latte::E_DIM dim, MPTR physAddress, MPTR physMipAddress, Latte::E_GX2SURFFMT format, uint32 width, uint32 height, uint32 depth, uint32 pitch, uint32 mipLevels, uint32 swizzle,
Latte::E_HWTILEMODE tileMode, bool isDepth)
: LatteTexture(dim, physAddress, physMipAddress, format, width, height, depth, pitch, mipLevels, swizzle, tileMode, isDepth)
{
GenerateEmptyTextureFromGX2Dim(dim, this->glId_texture, this->glTexTarget, true);
// set format info
FormatInfoGL glFormatInfo;
GetOpenGLFormatInfo(isDepth, overwriteInfo.hasFormatOverwrite ? (Latte::E_GX2SURFFMT)overwriteInfo.format : format, dim, &glFormatInfo);
this->glInternalFormat = glFormatInfo.glInternalFormat;
this->isAlternativeFormat = glFormatInfo.isUsingAlternativeFormat;
// set debug name
bool useGLDebugNames = false;
#ifdef CEMU_DEBUG_ASSERT
useGLDebugNames = true;
#endif
if (LaunchSettings::NSightModeEnabled())
useGLDebugNames = true;
if (useGLDebugNames)
{
char textureDebugLabel[512];
sprintf(textureDebugLabel, "%08x_f%04x%s_p%04x_%dx%d", physAddress, (uint32)format, this->isDepth ? "_d" : "", pitch, width, height);
glObjectLabel(GL_TEXTURE, this->glId_texture, -1, textureDebugLabel);
}
}
LatteTextureGL::~LatteTextureGL()
{
glDeleteTextures(1, &glId_texture);
catchOpenGLError();
}
void LatteTextureGL::GenerateEmptyTextureFromGX2Dim(Latte::E_DIM dim, GLuint& texId, GLint& texTarget, bool createForTargetType)
{
if (dim == Latte::E_DIM::DIM_2D)
texTarget = GL_TEXTURE_2D;
else if (dim == Latte::E_DIM::DIM_1D)
texTarget = GL_TEXTURE_1D;
else if (dim == Latte::E_DIM::DIM_3D)
texTarget = GL_TEXTURE_3D;
else if (dim == Latte::E_DIM::DIM_2D_ARRAY)
texTarget = GL_TEXTURE_2D_ARRAY;
else if (dim == Latte::E_DIM::DIM_CUBEMAP)
texTarget = GL_TEXTURE_CUBE_MAP_ARRAY;
else if (dim == Latte::E_DIM::DIM_2D_MSAA)
texTarget = GL_TEXTURE_2D; // todo, GL_TEXTURE_2D_MULTISAMPLE ?
else
{
cemu_assert_unimplemented();
}
if(createForTargetType)
texId = glCreateTextureWrapper(texTarget); // initializes the texture to texTarget (equivalent to calling glGenTextures + glBindTexture)
else
glGenTextures(1, &texId);
}
LatteTextureView* LatteTextureGL::CreateView(Latte::E_DIM dim, Latte::E_GX2SURFFMT format, sint32 firstMip, sint32 mipCount, sint32 firstSlice, sint32 sliceCount)
{
return new LatteTextureViewGL(this, dim, format, firstMip, mipCount, firstSlice, sliceCount);
}
void LatteTextureGL::GetOpenGLFormatInfo(bool isDepth, Latte::E_GX2SURFFMT format, Latte::E_DIM dim, FormatInfoGL* formatInfoOut)
{
formatInfoOut->isUsingAlternativeFormat = false;
if (isDepth)
{
if (format == Latte::E_GX2SURFFMT::D24_S8_UNORM)
{
formatInfoOut->setFormat(GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8);
return;
}
else if (format == Latte::E_GX2SURFFMT::D24_S8_FLOAT)
{
formatInfoOut->setFormat(GL_DEPTH32F_STENCIL8, GL_DEPTH_STENCIL, GL_FLOAT_32_UNSIGNED_INT_24_8_REV);
formatInfoOut->markAsAlternativeFormat();
return;
}
else if (format == Latte::E_GX2SURFFMT::D32_S8_FLOAT)
{
formatInfoOut->setFormat(GL_DEPTH32F_STENCIL8, GL_DEPTH_STENCIL, GL_FLOAT_32_UNSIGNED_INT_24_8_REV);
return;
}
else if (format == Latte::E_GX2SURFFMT::D32_FLOAT)
{
formatInfoOut->setFormat(GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT);
return;
}
else if (format == Latte::E_GX2SURFFMT::D16_UNORM)
{
formatInfoOut->setFormat(GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT);
return;
}
// unsupported depth format
cemuLog_log(LogType::Force, "OpenGL: Unsupported texture depth format 0x{:04x}", (uint32)format);
// use placeholder format
formatInfoOut->setFormat(GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT);
formatInfoOut->markAsAlternativeFormat();
return;
}
bool glIsCompressed = false;
bool isUsingAlternativeFormat = false; // set to true if there is no bit-perfect matching OpenGL format
sint32 glInternalFormat;
sint32 glSuppliedFormat;
sint32 glSuppliedFormatType;
// get format information
if (format == Latte::E_GX2SURFFMT::R4_G4_UNORM)
{
formatInfoOut->setFormat(GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4);
formatInfoOut->markAsAlternativeFormat();
return;
}
else if (format == Latte::E_GX2SURFFMT::R4_G4_B4_A4_UNORM)
{
formatInfoOut->setFormat(GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4);
return;
}
else if (format == Latte::E_GX2SURFFMT::R16_G16_B16_A16_FLOAT)
{
formatInfoOut->setFormat(GL_RGBA16F, GL_RGBA, GL_HALF_FLOAT);
return;
}
else if (format == Latte::E_GX2SURFFMT::R16_G16_FLOAT)
{
formatInfoOut->setFormat(GL_RG16F, GL_RG, GL_HALF_FLOAT);
return;
}
else if (format == Latte::E_GX2SURFFMT::R16_SNORM)
{
formatInfoOut->setFormat(GL_R16_SNORM, GL_RED, GL_SHORT);
return;
}
else if (format == Latte::E_GX2SURFFMT::R16_FLOAT)
{
formatInfoOut->setFormat(GL_R16F, GL_RED, GL_HALF_FLOAT);
return;
}
else if (format == Latte::E_GX2SURFFMT::BC1_UNORM ||
format == Latte::E_GX2SURFFMT::BC1_SRGB)
{
if (format == Latte::E_GX2SURFFMT::BC1_SRGB)
formatInfoOut->setCompressed(GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, -1, -1);
else
formatInfoOut->setCompressed(GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, -1, -1);
return;
}
else if (format == Latte::E_GX2SURFFMT::BC2_UNORM || format == Latte::E_GX2SURFFMT::BC2_SRGB)
{
// todo - use OpenGL BC2 format if available
formatInfoOut->setFormat(GL_RGBA16F, GL_RGBA, GL_FLOAT);
formatInfoOut->markAsAlternativeFormat();
return;
}
else if (format == Latte::E_GX2SURFFMT::BC3_UNORM || format == Latte::E_GX2SURFFMT::BC3_SRGB)
{
if (format == Latte::E_GX2SURFFMT::BC3_SRGB)
formatInfoOut->setCompressed(GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT, -1, -1);
else
formatInfoOut->setCompressed(GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, -1, -1);
return;
}
else if (format == Latte::E_GX2SURFFMT::BC4_UNORM || format == Latte::E_GX2SURFFMT::BC4_SNORM)
{
bool allowCompressed = true;
if (dim != Latte::E_DIM::DIM_2D && dim != Latte::E_DIM::DIM_2D_ARRAY)
allowCompressed = false; // RGTC1 does not support non-2D textures
if (allowCompressed)
{
if (format == Latte::E_GX2SURFFMT::BC4_UNORM)
formatInfoOut->setCompressed(GL_COMPRESSED_RED_RGTC1, -1, -1);
else
formatInfoOut->setCompressed(GL_COMPRESSED_SIGNED_RED_RGTC1, -1, -1);
return;
}
else
{
formatInfoOut->setFormat(GL_RG16F, GL_RG, GL_FLOAT);
formatInfoOut->markAsAlternativeFormat();
return;
}
}
else if (format == Latte::E_GX2SURFFMT::BC5_UNORM || format == Latte::E_GX2SURFFMT::BC5_SNORM)
{
if (format == Latte::E_GX2SURFFMT::BC5_SNORM)
formatInfoOut->setCompressed(GL_COMPRESSED_SIGNED_RG_RGTC2, -1, -1);
else
formatInfoOut->setCompressed(GL_COMPRESSED_RG_RGTC2, -1, -1);
return;
}
else if (format == Latte::E_GX2SURFFMT::R32_FLOAT)
{
formatInfoOut->setFormat(GL_R32F, GL_RED, GL_FLOAT);
return;
}
else if (format == Latte::E_GX2SURFFMT::R32_G32_FLOAT)
{
formatInfoOut->setFormat(GL_RG32F, GL_RG, GL_FLOAT);
return;
}
else if (format == Latte::E_GX2SURFFMT::R32_G32_UINT)
{
formatInfoOut->setFormat(GL_RG32UI, GL_RG_INTEGER, GL_UNSIGNED_INT);
return;
}
else if (format == Latte::E_GX2SURFFMT::R32_UINT)
{
formatInfoOut->setFormat(GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT);
return;
}
else if (format == Latte::E_GX2SURFFMT::R16_UINT)
{
// used by VC DS (New Super Mario Bros)
formatInfoOut->setFormat(GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT);
return;
}
else if (format == Latte::E_GX2SURFFMT::R8_UINT)
{
// used by VC DS (New Super Mario Bros)
formatInfoOut->setFormat(GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE);
return;
}
else if (format == Latte::E_GX2SURFFMT::R32_G32_B32_A32_FLOAT)
{
formatInfoOut->setFormat(GL_RGBA32F, GL_RGBA, GL_FLOAT);
return;
}
else if (format == Latte::E_GX2SURFFMT::R8_G8_B8_A8_UNORM || format == Latte::E_GX2SURFFMT::R8_G8_B8_A8_SRGB)
{
if (format == Latte::E_GX2SURFFMT::R8_G8_B8_A8_SRGB)
glInternalFormat = GL_SRGB8_ALPHA8;
else
glInternalFormat = GL_RGBA8;
// supplied format
glSuppliedFormat = GL_RGBA;
glSuppliedFormatType = GL_UNSIGNED_BYTE;
glIsCompressed = false;
}
else if (format == Latte::E_GX2SURFFMT::R8_G8_B8_A8_SNORM)
{
glInternalFormat = GL_RGBA8_SNORM;
// supplied format
glSuppliedFormat = GL_RGBA;
glSuppliedFormatType = GL_BYTE;
glIsCompressed = false;
}
else if (format == Latte::E_GX2SURFFMT::R8_UNORM)
{
glInternalFormat = GL_R8;
// supplied format
glSuppliedFormat = GL_RED;
glSuppliedFormatType = GL_UNSIGNED_BYTE;
glIsCompressed = false;
}
else if (format == Latte::E_GX2SURFFMT::R8_SNORM)
{
glInternalFormat = GL_R8_SNORM;
// supplied format
glSuppliedFormat = GL_RED;
glSuppliedFormatType = GL_BYTE;
glIsCompressed = false;
}
else if (format == Latte::E_GX2SURFFMT::R8_G8_UNORM)
{
glInternalFormat = GL_RG8;
// supplied format
glSuppliedFormat = GL_RG;
glSuppliedFormatType = GL_UNSIGNED_BYTE;
glIsCompressed = false;
}
else if (format == Latte::E_GX2SURFFMT::R8_G8_SNORM)
{
glInternalFormat = GL_RG8_SNORM;
// supplied format
glSuppliedFormat = GL_RG;
glSuppliedFormatType = GL_BYTE;
glIsCompressed = false;
}
else if (format == Latte::E_GX2SURFFMT::R16_UNORM)
{
glInternalFormat = GL_R16;
// supplied format
glSuppliedFormat = GL_RED;
glSuppliedFormatType = GL_UNSIGNED_SHORT;
glIsCompressed = false;
}
else if (format == Latte::E_GX2SURFFMT::R16_G16_B16_A16_UNORM)
{
glInternalFormat = GL_RGBA16;
// supplied format
glSuppliedFormat = GL_RGBA;
glSuppliedFormatType = GL_UNSIGNED_SHORT;
glIsCompressed = false;
}
else if (format == Latte::E_GX2SURFFMT::R16_G16_B16_A16_SNORM)
{
glInternalFormat = GL_RGBA16_SNORM;
// supplied format
glSuppliedFormat = GL_RGBA;
glSuppliedFormatType = GL_SHORT;
glIsCompressed = false;
}
else if (format == Latte::E_GX2SURFFMT::R16_G16_UNORM)
{
glInternalFormat = GL_RG16;
// supplied format
glSuppliedFormat = GL_RG;
glSuppliedFormatType = GL_UNSIGNED_SHORT;
glIsCompressed = false;
}
else if (format == Latte::E_GX2SURFFMT::R5_G6_B5_UNORM)
{
glInternalFormat = GL_RGB565;
// supplied format
glSuppliedFormat = GL_RGB;
glSuppliedFormatType = GL_UNSIGNED_SHORT_5_6_5_REV;
glIsCompressed = false;
}
else if (format == Latte::E_GX2SURFFMT::R5_G5_B5_A1_UNORM)
{
glInternalFormat = GL_RGB5_A1;
// supplied format
glSuppliedFormat = GL_RGBA;
glSuppliedFormatType = GL_UNSIGNED_SHORT_5_5_5_1;
glIsCompressed = false;
}
else if (format == Latte::E_GX2SURFFMT::A1_B5_G5_R5_UNORM)
{
glInternalFormat = GL_RGB5_A1;
// supplied format
glSuppliedFormat = GL_RGBA;
glSuppliedFormatType = GL_UNSIGNED_SHORT_5_5_5_1;
glIsCompressed = false;
}
else if (format == Latte::E_GX2SURFFMT::R10_G10_B10_A2_UNORM)
{
glInternalFormat = GL_RGB10_A2;
// supplied format
glSuppliedFormat = GL_RGBA;
glSuppliedFormatType = GL_UNSIGNED_INT_2_10_10_10_REV;
glIsCompressed = false;
}
else if (format == Latte::E_GX2SURFFMT::R10_G10_B10_A2_SRGB) // used by Super Mario Maker
{
glInternalFormat = GL_RGB10_A2; // todo - how to handle SRGB for this format?
// supplied format
glSuppliedFormat = GL_RGBA;
glSuppliedFormatType = GL_UNSIGNED_INT_2_10_10_10_REV;
glIsCompressed = false;
}
else if (format == Latte::E_GX2SURFFMT::A2_B10_G10_R10_UNORM)
{
glInternalFormat = GL_RGB10_A2;
// supplied format
glSuppliedFormat = GL_RGBA;
glSuppliedFormatType = GL_UNSIGNED_INT_10_10_10_2;
glIsCompressed = false;
}
else if (format == Latte::E_GX2SURFFMT::R10_G10_B10_A2_SNORM)
{
glInternalFormat = GL_RGBA16_SNORM; // OpenGL has no signed version of GL_RGB10_A2
isUsingAlternativeFormat = true;
// supplied format
glSuppliedFormat = GL_RGBA;
glSuppliedFormatType = GL_SHORT;
glIsCompressed = false;
}
else if (format == Latte::E_GX2SURFFMT::R11_G11_B10_FLOAT)
{
glInternalFormat = GL_R11F_G11F_B10F;
// supplied format
glSuppliedFormat = GL_RGB;
glSuppliedFormatType = GL_UNSIGNED_INT_10F_11F_11F_REV;
glIsCompressed = false;
}
else if (format == Latte::E_GX2SURFFMT::R32_G32_B32_A32_UINT)
{
glInternalFormat = GL_RGBA32UI;
// supplied format
glSuppliedFormat = GL_RGBA_INTEGER;
glSuppliedFormatType = GL_UNSIGNED_INT;
glIsCompressed = false;
}
else if (format == Latte::E_GX2SURFFMT::R16_G16_B16_A16_UINT)
{
glInternalFormat = GL_RGBA16UI;
// supplied format
glSuppliedFormat = GL_RGBA_INTEGER;
glSuppliedFormatType = GL_UNSIGNED_SHORT;
glIsCompressed = false;
}
else if (format == Latte::E_GX2SURFFMT::R8_G8_B8_A8_UINT)
{
glInternalFormat = GL_RGBA8UI;
// supplied format
glSuppliedFormat = GL_RGBA_INTEGER;
glSuppliedFormatType = GL_UNSIGNED_BYTE;
glIsCompressed = false;
}
else if (format == Latte::E_GX2SURFFMT::R24_X8_UNORM)
{
// OpenGL has no color version of GL_DEPTH24_STENCIL8, therefore we use a 32-bit floating-point format instead
glInternalFormat = GL_R32F;
isUsingAlternativeFormat = true;
// supplied format
glSuppliedFormat = GL_RED;
glSuppliedFormatType = GL_FLOAT;
glIsCompressed = false;
}
else if (format == Latte::E_GX2SURFFMT::X24_G8_UINT)
{
// OpenGL has no X24_G8 format, we use RGBA8UI instead and manually swizzle the channels
// this format is used in Resident Evil Revelations when scanning with the Genesis. It's also used in Cars 3: Driven to Win?
glInternalFormat = GL_RGBA8UI;
isUsingAlternativeFormat = true;
// supplied format
glSuppliedFormat = GL_RGBA;
glSuppliedFormatType = GL_FLOAT;
glIsCompressed = false;
}
else if (format == Latte::E_GX2SURFFMT::R32_X8_FLOAT)
{
// only available as depth format in OpenGL
// used by Cars 3: Driven to Win
// find a way to emulate this using a color format
glInternalFormat = GL_DEPTH32F_STENCIL8;
isUsingAlternativeFormat = false;
// supplied format
glSuppliedFormat = GL_DEPTH_STENCIL;
glSuppliedFormatType = GL_FLOAT_32_UNSIGNED_INT_24_8_REV;
glIsCompressed = false;
cemu_assert_debug(false);
}
else
{
cemuLog_log(LogType::Force, "OpenGL: Unsupported texture format 0x{:04x}", (uint32)format);
cemu_assert_unimplemented();
}
formatInfoOut->glInternalFormat = glInternalFormat;
formatInfoOut->glSuppliedFormat = glSuppliedFormat;
formatInfoOut->glSuppliedFormatType = glSuppliedFormatType;
formatInfoOut->glIsCompressed = glIsCompressed;
formatInfoOut->isUsingAlternativeFormat = isUsingAlternativeFormat;
}
void LatteTextureGL::AllocateOnHost()
{
auto hostTexture = this;
cemu_assert_debug(hostTexture->isDataDefined == false);
sint32 effectiveBaseWidth = hostTexture->width;
sint32 effectiveBaseHeight = hostTexture->height;
sint32 effectiveBaseDepth = hostTexture->depth;
if (hostTexture->overwriteInfo.hasResolutionOverwrite)
{
effectiveBaseWidth = hostTexture->overwriteInfo.width;
effectiveBaseHeight = hostTexture->overwriteInfo.height;
effectiveBaseDepth = hostTexture->overwriteInfo.depth;
}
// calculate mip count
sint32 mipLevels = std::min(hostTexture->mipLevels, hostTexture->maxPossibleMipLevels);
mipLevels = std::max(mipLevels, 1);
// create immutable storage
if (hostTexture->dim == Latte::E_DIM::DIM_2D || hostTexture->dim == Latte::E_DIM::DIM_2D_MSAA)
{
cemu_assert_debug(effectiveBaseDepth == 1);
glTextureStorage2DWrapper(GL_TEXTURE_2D, hostTexture->glId_texture, mipLevels, hostTexture->glInternalFormat, effectiveBaseWidth, effectiveBaseHeight);
}
else if (hostTexture->dim == Latte::E_DIM::DIM_1D)
{
cemu_assert_debug(effectiveBaseHeight == 1);
cemu_assert_debug(effectiveBaseDepth == 1);
glTextureStorage1DWrapper(GL_TEXTURE_1D, hostTexture->glId_texture, mipLevels, hostTexture->glInternalFormat, effectiveBaseWidth);
}
else if (hostTexture->dim == Latte::E_DIM::DIM_2D_ARRAY || hostTexture->dim == Latte::E_DIM::DIM_2D_ARRAY_MSAA)
{
glTextureStorage3DWrapper(GL_TEXTURE_2D_ARRAY, hostTexture->glId_texture, mipLevels, hostTexture->glInternalFormat, effectiveBaseWidth, effectiveBaseHeight, std::max(1, effectiveBaseDepth));
}
else if (hostTexture->dim == Latte::E_DIM::DIM_3D)
{
glTextureStorage3DWrapper(GL_TEXTURE_3D, hostTexture->glId_texture, mipLevels, hostTexture->glInternalFormat, effectiveBaseWidth, effectiveBaseHeight, std::max(1, effectiveBaseDepth));
}
else if (hostTexture->dim == Latte::E_DIM::DIM_CUBEMAP)
{
glTextureStorage3DWrapper(GL_TEXTURE_CUBE_MAP_ARRAY, hostTexture->glId_texture, mipLevels, hostTexture->glInternalFormat, effectiveBaseWidth, effectiveBaseHeight, effectiveBaseDepth);
}
else
{
cemu_assert_unimplemented();
}
}
| 16,811
|
C++
|
.cpp
| 487
| 31.958932
| 206
| 0.745755
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,254
|
OpenGLRenderer.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/OpenGL/OpenGLRenderer.cpp
|
#include "Cafe/HW/Latte/Renderer/OpenGL/OpenGLRenderer.h"
#include "gui/guiWrapper.h"
#include "Cafe/HW/Latte/Core/LatteRingBuffer.h"
#include "Cafe/HW/Latte/Core/LatteDraw.h"
#include "Cafe/HW/Latte/Core/LatteOverlay.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/LatteTextureGL.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/LatteTextureViewGL.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/RendererShaderGL.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/CachedFBOGL.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/OpenGLTextureReadback.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompiler.h"
#include "imgui/imgui_impl_opengl3.h"
#include "imgui/imgui_extension.h"
#include "Cafe/HW/Latte/ISA/RegDefines.h"
#include "Cafe/OS/libs/gx2/GX2.h"
#include "gui/canvas/OpenGLCanvas.h"
#define STRINGIFY2(X) #X
#define STRINGIFY(X) STRINGIFY2(X)
namespace CemuGL
{
#define GLFUNC(__type, __name) __type __name;
#define EGLFUNC(__type, __name) __type __name;
#include "Common/GLInclude/glFunctions.h"
#undef GLFUNC
#undef EGLFUNC
}
#include "config/ActiveSettings.h"
#include "config/LaunchSettings.h"
static const int TEXBUFFER_SIZE = 1024 * 1024 * 32; // 32MB
struct
{
// options
bool useTextureUploadBuffer;
// texture upload
GLuint uploadBuffer;
sint32 uploadIndex;
void* uploadBufferPtr;
LatteRingBuffer_t* uploadRingBuffer;
// current texture work buffer (subrange of uploadRingBuffer)
uint8* texWorkBuffer;
sint32 texWorkBufferSize;
// texture upload buffer (when not using persistent buffer)
std::vector<uint8> texUploadBuffer;
// FBO for fast clearing (on Nvidia or if glClearTexSubImage is not supported)
GLuint clearFBO;
}glRendererState;
static const GLenum glDepthFuncTable[] =
{
GL_NEVER,
GL_LESS,
GL_EQUAL,
GL_LEQUAL,
GL_GREATER,
GL_NOTEQUAL,
GL_GEQUAL,
GL_ALWAYS
};
static const GLenum glAlphaTestFunc[] =
{
GL_NEVER,
GL_LESS,
GL_EQUAL,
GL_LEQUAL,
GL_GREATER,
GL_NOTEQUAL,
GL_GEQUAL,
GL_ALWAYS
};
OpenGLRenderer::OpenGLRenderer()
{
glRendererState.useTextureUploadBuffer = false;
if (glRendererState.useTextureUploadBuffer)
{
glCreateBuffers(1, &glRendererState.uploadBuffer);
glNamedBufferStorage(glRendererState.uploadBuffer, TEXBUFFER_SIZE, nullptr, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT);
void* buffer = glMapNamedBufferRange(glRendererState.uploadBuffer, 0, TEXBUFFER_SIZE, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT | GL_MAP_UNSYNCHRONIZED_BIT | GL_MAP_FLUSH_EXPLICIT_BIT);
if (buffer == nullptr)
{
cemuLog_log(LogType::Force, "Failed to allocate GL texture upload buffer. Using traditional API instead");
cemu_assert_debug(false);
}
glRendererState.uploadBufferPtr = buffer;
glRendererState.uploadRingBuffer = LatteRingBuffer_create((uint8*)buffer, TEXBUFFER_SIZE);
glRendererState.uploadIndex = 0;
}
#if BOOST_OS_WINDOWS
try
{
m_dxgi_wrapper = std::make_unique<DXGIWrapper>();
}
catch (const std::exception& ex)
{
cemuLog_log(LogType::Force, "Unable to create dxgi wrapper: {} (VRAM overlay stat won't be available)", ex.what());
}
#endif
}
OpenGLRenderer::~OpenGLRenderer()
{
if(m_pipeline != 0)
glDeleteProgramPipelines(1, &m_pipeline);
}
OpenGLRenderer* OpenGLRenderer::GetInstance()
{
cemu_assert_debug(g_renderer && dynamic_cast<OpenGLRenderer*>(g_renderer.get()));
return (OpenGLRenderer*)g_renderer.get();
}
bool OpenGLRenderer::ImguiBegin(bool mainWindow)
{
if (!mainWindow)
{
GLCanvas_MakeCurrent(true);
m_isPadViewContext = true;
}
if(!Renderer::ImguiBegin(mainWindow))
return false;
renderstate_resetColorControl();
renderstate_resetDepthControl();
renderstate_resetStencilMask();
if (glClipControl)
glClipControl(GL_LOWER_LEFT, GL_NEGATIVE_ONE_TO_ONE);
ImGui_ImplOpenGL3_NewFrame();
ImGui_UpdateWindowInformation(mainWindow);
ImGui::NewFrame();
return true;
}
void OpenGLRenderer::ImguiEnd()
{
ImGui::Render();
ImGui_ImplOpenGL3_RenderDrawData(ImGui::GetDrawData());
if (m_isPadViewContext)
{
GLCanvas_MakeCurrent(false);
m_isPadViewContext = false;
}
if (glClipControl)
glClipControl(GL_UPPER_LEFT, GL_NEGATIVE_ONE_TO_ONE);
}
ImTextureID OpenGLRenderer::GenerateTexture(const std::vector<uint8>& data, const Vector2i& size)
{
GLuint textureId;
glGenTextures(1, &textureId);
glBindTexture(GL_TEXTURE_2D, textureId);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glActiveTexture(GL_TEXTURE0);
glTexImage2D(GL_TEXTURE_2D, 0, GL_SRGB, size.x, size.y, 0, GL_RGB, GL_UNSIGNED_BYTE, data.data());
return (ImTextureID)(uintptr_t)textureId;
}
void OpenGLRenderer::DeleteTexture(ImTextureID id)
{
if (id)
{
GLuint textureId = (GLuint)(uintptr_t)id;
glDeleteTextures(1, &textureId);
}
}
void OpenGLRenderer::DeleteFontTextures()
{
ImGui_ImplOpenGL3_DestroyFontsTexture();
}
typedef void(*GL_IMPORT)();
#if BOOST_OS_WINDOWS
GL_IMPORT _GetOpenGLFunction(HMODULE hLib, const char* name)
{
GL_IMPORT r = (GL_IMPORT)wglGetProcAddress(name);
if (r == nullptr)
r = (GL_IMPORT)GetProcAddress(hLib, name);
return r;
}
void LoadOpenGLImports()
{
HMODULE hLib = LoadLibraryA("opengl32.dll");
#define GLFUNC(__type, __name) __name = (__type)_GetOpenGLFunction(hLib, STRINGIFY(__name));
#include "Common/GLInclude/glFunctions.h"
#undef GLFUNC
}
#elif BOOST_OS_LINUX
GL_IMPORT _GetOpenGLFunction(void* hLib, PFNGLXGETPROCADDRESSPROC func, const char* name)
{
GL_IMPORT r = (GL_IMPORT)func((const GLubyte*)name);
return r;
}
#include <dlfcn.h>
// #define RTLD_NOW 0x00002
// #define RTLD_GLOBAL 0x00100
void LoadOpenGLImports()
{
PFNGLXGETPROCADDRESSPROC _glXGetProcAddress = nullptr;
void* libGL = dlopen("libGL.so.1", RTLD_NOW | RTLD_GLOBAL);
_glXGetProcAddress = (PFNGLXGETPROCADDRESSPROC)dlsym(libGL, "glXGetProcAddressARB");
if(!_glXGetProcAddress)
{
libGL = dlopen("libGL.so", RTLD_NOW | RTLD_GLOBAL);
_glXGetProcAddress = (PFNGLXGETPROCADDRESSPROC)dlsym(libGL, "glXGetProcAddressARB");
}
void* libEGL = dlopen("libEGL.so.1", RTLD_NOW | RTLD_GLOBAL);
if(!libEGL)
{
libGL = dlopen("libEGL.so", RTLD_NOW | RTLD_GLOBAL);
}
#define GLFUNC(__type, __name) __name = (__type)_GetOpenGLFunction(libGL, _glXGetProcAddress, STRINGIFY(__name));
#define EGLFUNC(__type, __name) __name = (__type)dlsym(libEGL, STRINGIFY(__name));
#include "Common/GLInclude/glFunctions.h"
#undef GLFUNC
#undef EGLFUNC
}
#if BOOST_OS_LINUX
// dummy function for all code that is statically linked with cemu and attempts to use eglSwapInterval
// used to suppress wxWidgets calls to eglSwapInterval
extern "C"
EGLAPI EGLBoolean EGLAPIENTRY eglSwapInterval(EGLDisplay dpy, EGLint interval)
{
return EGL_TRUE;
}
#endif
#elif BOOST_OS_MACOS
void LoadOpenGLImports()
{
cemu_assert_unimplemented();
}
#endif
void OpenGLRenderer::Initialize()
{
Renderer::Initialize();
auto lock = cemuLog_acquire();
cemuLog_log(LogType::Force, "------- Init OpenGL graphics backend -------");
GLCanvas_MakeCurrent(false);
LoadOpenGLImports();
GetVendorInformation();
#if BOOST_OS_WINDOWS
if (wglSwapIntervalEXT)
wglSwapIntervalEXT(0); // disable V-Sync per default
#endif
if (glMaxShaderCompilerThreadsARB)
glMaxShaderCompilerThreadsARB(0xFFFFFFFF);
cemuLog_log(LogType::Force, "OpenGL extensions:");
cemuLog_log(LogType::Force, "ARB_clip_control: {}", glClipControl ? "available" : "not supported");
cemuLog_log(LogType::Force, "ARB_get_program_binary: {}", (glGetProgramBinary != NULL && glProgramBinary != NULL) ? "available" : "not supported");
cemuLog_log(LogType::Force, "ARB_clear_texture: {}", (glClearTexImage != NULL) ? "available" : "not supported");
cemuLog_log(LogType::Force, "ARB_copy_image: {}", (glCopyImageSubData != NULL) ? "available" : "not supported");
cemuLog_log(LogType::Force, "NV_depth_buffer_float: {}", (glDepthRangedNV != NULL) ? "available" : "not supported");
// enable framebuffer SRGB support
glEnable(GL_FRAMEBUFFER_SRGB);
if (this->m_vendor != GfxVendor::AMD)
{
// don't enable this for AMD because GL_PROGRAM_POINT_SIZE breaks shader binaries for some reason
// it also seems like AMD ignores GL_POINT_SPRITE and gl_PointCoord is always 0.0/0.0
// point size is always defined via shader
glEnable(GL_PROGRAM_POINT_SIZE);
// since we are using compatibility profile point sprites are disabled by default (e.g. gl_PointCoord wont work)
glEnable(GL_POINT_SPRITE);
}
// check if clip control is available
if (glClipControl)
{
glClipControl(GL_UPPER_LEFT, GL_NEGATIVE_ONE_TO_ONE);
}
else
{
cemuLog_log(LogType::Force, "ARB_CLIP_CONTROL not supported by graphics driver. This will lead to crashes or graphical artifacts.");
}
// set pixel unpack alignment to 1 byte
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
// on NVIDIA rendering to SNORM textures is clamped to [0.0,1.0] range for non-core profiles
// we can still disable the clamping using an older function (note that AMD and Intel don't need this, which is technically incorrect according to the compatibility profile spec)
if (m_vendor == GfxVendor::Nvidia)
glClampColor(GL_CLAMP_FRAGMENT_COLOR, GL_FALSE);
glEnable(GL_PRIMITIVE_RESTART);
glPrimitiveRestartIndex(0xFFFFFFFF);
glGenProgramPipelines(1, &m_pipeline);
glBindProgramPipeline(m_pipeline);
lock.unlock();
// create framebuffer for fast clearing (avoid glClearTexSubImage on Nvidia)
if (glCreateFramebuffers)
glCreateFramebuffers(1, &glRendererState.clearFBO);
else
{
glGenFramebuffers(1, &glRendererState.clearFBO);
// bind to initialize
glBindFramebuffer(GL_FRAMEBUFFER_EXT, glRendererState.clearFBO);
glBindFramebuffer(GL_FRAMEBUFFER_EXT, 0);
}
draw_init();
catchOpenGLError();
glGenBuffers(1, &glStreamoutCacheRingBuffer);
glBindBuffer(GL_TRANSFORM_FEEDBACK_BUFFER, glStreamoutCacheRingBuffer);
glBufferData(GL_TRANSFORM_FEEDBACK_BUFFER, LatteStreamout_GetRingBufferSize(), NULL, GL_DYNAMIC_DRAW);
glBindBuffer(GL_TRANSFORM_FEEDBACK_BUFFER, 0);
catchOpenGLError();
// imgui
ImGui_ImplOpenGL3_Init("#version 130");
}
bool OpenGLRenderer::IsPadWindowActive()
{
return GLCanvas_HasPadViewOpen();
}
void OpenGLRenderer::Flush(bool waitIdle)
{
glFlush();
if (waitIdle)
glFinish();
}
void OpenGLRenderer::NotifyLatteCommandProcessorIdle()
{
glFlush();
}
void OpenGLRenderer::GetVendorInformation()
{
// example vendor strings:
// ATI Technologies Inc.
// NVIDIA Corporation
// Intel
char* glVendorString = (char*)glGetString(GL_VENDOR);
char* glRendererString = (char*)glGetString(GL_RENDERER);
char* glVersionString = (char*)glGetString(GL_VERSION);
cemuLog_log(LogType::Force, "GL_VENDOR: {}", glVendorString ? glVendorString : "unknown");
cemuLog_log(LogType::Force, "GL_RENDERER: {}", glRendererString ? glRendererString : "unknown");
cemuLog_log(LogType::Force, "GL_VERSION: {}", glVersionString ? glVersionString : "unknown");
if(glVersionString && boost::icontains(glVersionString, "Mesa"))
{
m_vendor = GfxVendor::Mesa;
return;
}
if (glVendorString)
{
if ((toupper(glVendorString[0]) == 'A' && toupper(glVendorString[1]) == 'T' && toupper(glVendorString[2]) == 'I') ||
(toupper(glVendorString[0]) == 'A' && toupper(glVendorString[1]) == 'M' && toupper(glVendorString[2]) == 'D'))
{
m_vendor = GfxVendor::AMD;
return;
}
else if (memcmp(glVendorString, "NVIDIA", 6) == 0)
{
m_vendor = GfxVendor::Nvidia;
return;
}
else if (memcmp(glVendorString, "Intel", 5) == 0)
{
m_vendor = GfxVendor::Intel;
return;
}
}
m_vendor = GfxVendor::Generic;
}
void _glDebugCallback(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *message, const void *userParam)
{
if (LatteGPUState.glVendor == GLVENDOR_NVIDIA && strstr(message, "Buffer"))
return;
if (LatteGPUState.glVendor == GLVENDOR_NVIDIA && strstr(message, "performance warning"))
return;
if (LatteGPUState.glVendor == GLVENDOR_NVIDIA && strstr(message, "Dithering is enabled"))
return;
if (LatteGPUState.glVendor == GLVENDOR_NVIDIA && strstr(message, "Blending is enabled, but is not supported for integer framebuffers"))
return;
if (LatteGPUState.glVendor == GLVENDOR_NVIDIA && strstr(message, "does not have a defined base level"))
return;
if(LatteGPUState.glVendor == GLVENDOR_NVIDIA && strstr(message, "has depth comparisons disabled, with a texture object"))
return;
cemuLog_log(LogType::Force, "GLDEBUG: {}", message);
cemu_assert_debug(false);
}
void OpenGLRenderer::EnableDebugMode()
{
glEnable(GL_DEBUG_OUTPUT);
glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
glDebugMessageCallback(_glDebugCallback, NULL);
glDebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DONT_CARE, 0, NULL, true);
}
void OpenGLRenderer::SwapBuffers(bool swapTV, bool swapDRC)
{
GLCanvas_SwapBuffers(swapTV, swapDRC);
if (swapTV)
cleanupAfterFrame();
}
bool OpenGLRenderer::BeginFrame(bool mainWindow)
{
if (!mainWindow && !IsPadWindowActive())
return false;
GLCanvas_MakeCurrent(!mainWindow);
ClearColorbuffer(!mainWindow);
return true;
}
void OpenGLRenderer::DrawEmptyFrame(bool mainWindow)
{
if (!BeginFrame(mainWindow))
return;
SwapBuffers(mainWindow, !mainWindow);
}
void OpenGLRenderer::ClearColorbuffer(bool padView)
{
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT);
}
void OpenGLRenderer::HandleScreenshotRequest(LatteTextureView* texView, bool padView)
{
const bool hasScreenshotRequest = gui_hasScreenshotRequest();
if(!hasScreenshotRequest && m_screenshot_state == ScreenshotState::None)
return;
if (IsPadWindowActive())
{
// we already took a pad view screenshow and want a main window screenshot
if (m_screenshot_state == ScreenshotState::Main && padView)
return;
if (m_screenshot_state == ScreenshotState::Pad && !padView)
return;
// remember which screenshot is left to take
if (m_screenshot_state == ScreenshotState::None)
m_screenshot_state = padView ? ScreenshotState::Main : ScreenshotState::Pad;
else
m_screenshot_state = ScreenshotState::None;
}
else
m_screenshot_state = ScreenshotState::None;
int screenshotWidth, screenshotHeight;
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
texture_bindAndActivate(texView, 0);
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &screenshotWidth);
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &screenshotHeight);
glPixelStorei(GL_PACK_ALIGNMENT, 1); // set alignment to 1
const sint32 pixelDataSize = screenshotWidth * screenshotHeight * 3;
std::vector<uint8> rgb_data(pixelDataSize);
glGetTexImage(GL_TEXTURE_2D, 0, GL_RGB, GL_UNSIGNED_BYTE, rgb_data.data());
texture_bindAndActivate(nullptr, 0);
const bool srcUsesSRGB = HAS_FLAG(texView->format, Latte::E_GX2SURFFMT::FMT_BIT_SRGB);
const bool dstUsesSRGB = (!padView && LatteGPUState.tvBufferUsesSRGB) || (padView && LatteGPUState.drcBufferUsesSRGB);
if((srcUsesSRGB && !dstUsesSRGB) || (!srcUsesSRGB && dstUsesSRGB))
{
for (sint32 iy = 0; iy < screenshotHeight; ++iy)
{
for (sint32 ix = 0; ix < screenshotWidth; ++ix)
{
uint8* pData = rgb_data.data() + (ix + iy * screenshotWidth) * 3;
if (srcUsesSRGB && !dstUsesSRGB)
{
// SRGB -> RGB
pData[0] = SRGBComponentToRGB(pData[0]);
pData[1] = SRGBComponentToRGB(pData[1]);
pData[2] = SRGBComponentToRGB(pData[2]);
}
else if (!srcUsesSRGB && dstUsesSRGB)
{
// RGB -> SRGB
pData[0] = RGBComponentToSRGB(pData[0]);
pData[1] = RGBComponentToSRGB(pData[1]);
pData[2] = RGBComponentToSRGB(pData[2]);
}
}
}
}
SaveScreenshot(rgb_data, screenshotWidth, screenshotHeight, !padView);
}
void OpenGLRenderer::DrawBackbufferQuad(LatteTextureView* texView, RendererOutputShader* shader, bool useLinearTexFilter, sint32 imageX, sint32 imageY, sint32 imageWidth, sint32 imageHeight, bool padView, bool clearBackground)
{
if (padView && !IsPadWindowActive())
return;
catchOpenGLError();
GLCanvas_MakeCurrent(padView);
renderstate_resetColorControl();
renderstate_resetDepthControl();
attributeStream_reset();
// bind back buffer
rendertarget_bindFramebufferObject(nullptr);
if (clearBackground)
{
int windowWidth, windowHeight;
if (padView)
gui_getPadWindowPhysSize(windowWidth, windowHeight);
else
gui_getWindowPhysSize(windowWidth, windowHeight);
g_renderer->renderTarget_setViewport(0, 0, windowWidth, windowHeight, 0.0f, 1.0f);
g_renderer->ClearColorbuffer(padView);
}
sint32 effectiveWidth, effectiveHeight;
texView->baseTexture->GetEffectiveSize(effectiveWidth, effectiveHeight, 0);
shader_unbind(RendererShader::ShaderType::kGeometry);
shader_bind(shader->GetVertexShader());
shader_bind(shader->GetFragmentShader());
shader->SetUniformParameters(*texView, { effectiveWidth, effectiveHeight }, { imageWidth, imageHeight });
// set viewport
glViewportIndexedf(0, imageX, imageY, imageWidth, imageHeight);
LatteTextureViewGL* texViewGL = (LatteTextureViewGL*)texView;
texture_bindAndActivate(texView, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, useLinearTexFilter ? GL_LINEAR : GL_NEAREST);
texViewGL->samplerState.filterMag = 0xFFFFFFFF;
if ((!padView && !LatteGPUState.tvBufferUsesSRGB) || (padView && !LatteGPUState.drcBufferUsesSRGB))
glDisable(GL_FRAMEBUFFER_SRGB);
uint16 indexData[6] = { 0,1,2,3,4,5 };
glDrawRangeElements(GL_TRIANGLES, 0, 5, 6, GL_UNSIGNED_SHORT, indexData);
if ((!padView && !LatteGPUState.tvBufferUsesSRGB) || (padView && !LatteGPUState.drcBufferUsesSRGB))
glEnable(GL_FRAMEBUFFER_SRGB);
// unbind texture
texture_bindAndActivate(nullptr, 0);
catchOpenGLError();
// restore viewport
glViewportIndexedf(0, prevViewportX, prevViewportY, prevViewportWidth, prevViewportHeight);
// switch back to TV context
if (padView)
GLCanvas_MakeCurrent(false);
}
void OpenGLRenderer::renderTarget_setViewport(float x, float y, float width, float height, float nearZ, float farZ, bool halfZ /*= false*/)
{
if (prevNearZ != nearZ || prevFarZ != farZ || _prevHalfZ != halfZ)
{
if (*(uint32*)&farZ == 0x3f7fffff)
*(uint32*)&farZ = 0x3f800000;
if (glDepthRangedNV)
glDepthRangedNV(nearZ, farZ);
else
glDepthRange(nearZ, farZ);
prevNearZ = nearZ;
prevFarZ = farZ;
}
bool invertY = false;
if (height < 0.0)
{
invertY = true;
y += height;
height = -height;
}
if (glClipControl && (_prevInvertY != invertY || _prevHalfZ != halfZ))
{
GLenum clipDepth = halfZ ? GL_ZERO_TO_ONE : GL_NEGATIVE_ONE_TO_ONE;
if (invertY)
glClipControl(GL_LOWER_LEFT, clipDepth); // OpenGL style
else
glClipControl(GL_UPPER_LEFT, clipDepth); // DX style (default for GX2)
_prevInvertY = invertY;
_prevHalfZ = halfZ;
}
if (prevViewportX == x && prevViewportY == y && prevViewportWidth == width && prevViewportHeight == height)
return; // viewport did not change
glViewportIndexedf(0, x, y, width, height);
prevViewportX = x;
prevViewportY = y;
prevViewportWidth = width;
prevViewportHeight = height;
}
void OpenGLRenderer::renderTarget_setScissor(sint32 scissorX, sint32 scissorY, sint32 scissorWidth, sint32 scissorHeight)
{
if (prevScissorEnable != true)
{
// enable scissor box
glEnable(GL_SCISSOR_TEST);
prevScissorEnable = true;
}
glScissor(scissorX, scissorY, scissorWidth, scissorHeight);
}
LatteCachedFBO* OpenGLRenderer::rendertarget_createCachedFBO(uint64 key)
{
return new CachedFBOGL(key);
}
void OpenGLRenderer::rendertarget_deleteCachedFBO(LatteCachedFBO* cfbo)
{
auto cfboGL = (CachedFBOGL*)cfbo;
if (prevBoundFBO == cfboGL->glId_fbo)
{
glBindFramebuffer(GL_FRAMEBUFFER_EXT, 0);
prevBoundFBO = 0;
}
glDeleteFramebuffers(1, &cfboGL->glId_fbo);
}
// set active FBO
void OpenGLRenderer::rendertarget_bindFramebufferObject(LatteCachedFBO* cfbo)
{
GLuint fboid;
if (cfbo)
{
const auto cfboGL = (CachedFBOGL*)cfbo;
fboid = cfboGL->glId_fbo;
}
else
fboid = 0;
if (prevBoundFBO != fboid)
{
glBindFramebuffer(GL_FRAMEBUFFER_EXT, fboid);
prevBoundFBO = fboid;
}
}
void OpenGLRenderer::renderstate_setChannelTargetMask(uint32 renderTargetMask)
{
if (renderTargetMask != prevTargetColorMask)
{
for (sint32 i = 0; i < 8; i++)
{
uint32 targetRGBAMask = ((renderTargetMask >> (i * 4)) & 0xF);
if (targetRGBAMask != ((prevTargetColorMask >> (i * 4)) & 0xF))
{
// update color mask
glColorMaski(i, (targetRGBAMask & 1) ? GL_TRUE : GL_FALSE, (targetRGBAMask & 2) ? GL_TRUE : GL_FALSE, (targetRGBAMask & 4) ? GL_TRUE : GL_FALSE, (targetRGBAMask & 8) ? GL_TRUE : GL_FALSE);
}
}
prevTargetColorMask = renderTargetMask;
}
}
void OpenGLRenderer::renderstate_setAlwaysWriteDepth()
{
if (prevDepthEnable == 0)
{
glEnable(GL_DEPTH_TEST);
prevDepthEnable = 1;
}
glDepthFunc(GL_ALWAYS);
prevDepthFunc = Latte::LATTE_DB_DEPTH_CONTROL::E_ZFUNC::ALWAYS;
}
static const GLuint table_glBlendSrcDst[] =
{
/* 0x00 */ GL_ZERO,
/* 0x01 */ GL_ONE,
/* 0x02 */ GL_SRC_COLOR,
/* 0x03 */ GL_ONE_MINUS_SRC_COLOR,
/* 0x04 */ GL_SRC_ALPHA,
/* 0x05 */ GL_ONE_MINUS_SRC_ALPHA,
/* 0x06 */ GL_DST_ALPHA,
/* 0x07 */ GL_ONE_MINUS_DST_ALPHA,
/* 0x08 */ GL_DST_COLOR,
/* 0x09 */ GL_ONE_MINUS_DST_COLOR,
/* 0x0A */ GL_SRC_ALPHA_SATURATE,
/* 0x0B */ 0xFFFFFFFF,
/* 0x0C */ 0xFFFFFFFF,
/* 0x0D */ GL_CONSTANT_COLOR,
/* 0x0E */ GL_ONE_MINUS_CONSTANT_COLOR,
/* 0x0F */ GL_SRC1_COLOR,
/* 0x10 */ GL_ONE_MINUS_SRC1_COLOR,
/* 0x11 */ GL_SRC1_ALPHA,
/* 0x12 */ GL_ONE_MINUS_SRC1_ALPHA,
/* 0x13 */ GL_CONSTANT_ALPHA,
/* 0x14 */ GL_ONE_MINUS_CONSTANT_ALPHA
};
static GLuint GetGLBlendFactor(Latte::LATTE_CB_BLENDN_CONTROL::E_BLENDFACTOR blendFactor)
{
uint32 blendFactorU = (uint32)blendFactor;
if (blendFactorU >= 0xF && blendFactorU <= 0x12)
{
debug_printf("Unsupported dual-source blending used\n");
cemu_assert_debug(false); // dual-source blending
return GL_ZERO;
}
if (blendFactorU >= (sizeof(table_glBlendSrcDst) / sizeof(table_glBlendSrcDst[0])))
{
debug_printf("GetGLBlendFactor: Constant 0x%x out of range\n", blendFactor);
return GL_ZERO;
}
if (table_glBlendSrcDst[blendFactorU] == -1)
{
debug_printf("GetGLBlendFactor: Constant 0x%x is invalid\n", blendFactor);
cemu_assert_debug(false);
return GL_ZERO;
}
return table_glBlendSrcDst[blendFactorU];
}
static const GLuint table_glBlendCombine[] =
{
GL_FUNC_ADD,
GL_FUNC_SUBTRACT,
GL_MIN,
GL_MAX,
GL_FUNC_REVERSE_SUBTRACT
};
GLuint GetGLBlendCombineFunc(Latte::LATTE_CB_BLENDN_CONTROL::E_COMBINEFUNC combineFunc)
{
uint32 combineFuncU = (uint32)combineFunc;
if (combineFuncU >= (sizeof(table_glBlendCombine) / sizeof(table_glBlendCombine[0])))
{
cemu_assert_suspicious();
return GL_FUNC_ADD;
}
return table_glBlendCombine[combineFuncU];
}
void* OpenGLRenderer::texture_acquireTextureUploadBuffer(uint32 size)
{
if (glRendererState.useTextureUploadBuffer)
{
glRendererState.texWorkBuffer = LatteRingBuffer_allocate(glRendererState.uploadRingBuffer, size, 1024);
glRendererState.texWorkBufferSize = size;
return glRendererState.texWorkBuffer;
}
// static memory buffer
if (glRendererState.texUploadBuffer.size() < size)
{
glRendererState.texUploadBuffer.resize(size);
}
return glRendererState.texUploadBuffer.data();
}
void OpenGLRenderer::texture_releaseTextureUploadBuffer(uint8* mem)
{
// do nothing
}
TextureDecoder* OpenGLRenderer::texture_chooseDecodedFormat(Latte::E_GX2SURFFMT format, bool isDepth, Latte::E_DIM dim, uint32 width, uint32 height)
{
TextureDecoder* texDecoder = nullptr;
if (isDepth)
{
if (format == Latte::E_GX2SURFFMT::R32_FLOAT)
{
return TextureDecoder_R32_FLOAT::getInstance();
}
if (format == Latte::E_GX2SURFFMT::D24_S8_UNORM)
{
return TextureDecoder_D24_S8::getInstance();
}
else if (format == Latte::E_GX2SURFFMT::D24_S8_FLOAT)
{
return TextureDecoder_NullData64::getInstance();
}
else if (format == Latte::E_GX2SURFFMT::D32_S8_FLOAT)
{
return TextureDecoder_D32_S8_UINT_X24::getInstance();
}
else if (format == Latte::E_GX2SURFFMT::R32_FLOAT)
{
return TextureDecoder_R32_FLOAT::getInstance();
}
else if (format == Latte::E_GX2SURFFMT::R16_UNORM)
{
return TextureDecoder_R16_FLOAT::getInstance();
}
return nullptr;
}
if (format == Latte::E_GX2SURFFMT::R4_G4_UNORM)
texDecoder = TextureDecoder_R4_G4_UNORM_To_RGBA4::getInstance();
else if (format == Latte::E_GX2SURFFMT::R4_G4_B4_A4_UNORM)
texDecoder = TextureDecoder_R4_G4_B4_A4_UNORM::getInstance();
else if (format == Latte::E_GX2SURFFMT::R16_G16_B16_A16_FLOAT)
texDecoder = TextureDecoder_R16_G16_B16_A16_FLOAT::getInstance();
else if (format == Latte::E_GX2SURFFMT::R16_G16_FLOAT)
texDecoder = TextureDecoder_R16_G16_FLOAT::getInstance();
else if (format == Latte::E_GX2SURFFMT::R16_SNORM)
texDecoder = TextureDecoder_R16_SNORM::getInstance();
else if (format == Latte::E_GX2SURFFMT::R16_FLOAT)
texDecoder = TextureDecoder_R16_FLOAT::getInstance();
else if (format == Latte::E_GX2SURFFMT::R32_FLOAT)
texDecoder = TextureDecoder_R32_FLOAT::getInstance();
else if (format == Latte::E_GX2SURFFMT::BC1_UNORM)
texDecoder = TextureDecoder_BC1::getInstance();
else if (format == Latte::E_GX2SURFFMT::BC1_SRGB)
texDecoder = TextureDecoder_BC1::getInstance();
else if (format == Latte::E_GX2SURFFMT::BC2_UNORM)
texDecoder = TextureDecoder_BC2_UNORM_uncompress::getInstance();
else if (format == Latte::E_GX2SURFFMT::BC2_SRGB)
texDecoder = TextureDecoder_BC2_SRGB_uncompress::getInstance();
else if (format == Latte::E_GX2SURFFMT::BC3_UNORM)
texDecoder = TextureDecoder_BC3::getInstance();
else if (format == Latte::E_GX2SURFFMT::BC3_SRGB)
texDecoder = TextureDecoder_BC3::getInstance();
else if (format == Latte::E_GX2SURFFMT::BC4_UNORM)
{
if (dim != Latte::E_DIM::DIM_2D && dim != Latte::E_DIM::DIM_2D_ARRAY)
texDecoder = TextureDecoder_BC4_UNORM_uncompress::getInstance();
else
texDecoder = TextureDecoder_BC4::getInstance();
}
else if (format == Latte::E_GX2SURFFMT::BC4_SNORM)
{
if (dim != Latte::E_DIM::DIM_2D && dim != Latte::E_DIM::DIM_2D_ARRAY)
texDecoder = TextureDecoder_BC4::getInstance();
else
texDecoder = TextureDecoder_BC4_UNORM_uncompress::getInstance();
}
else if (format == Latte::E_GX2SURFFMT::BC5_UNORM)
texDecoder = TextureDecoder_BC5::getInstance();
else if (format == Latte::E_GX2SURFFMT::BC5_SNORM)
texDecoder = TextureDecoder_BC5::getInstance();
else if (format == Latte::E_GX2SURFFMT::R8_G8_B8_A8_UNORM)
texDecoder = TextureDecoder_R8_G8_B8_A8::getInstance();
else if (format == Latte::E_GX2SURFFMT::R8_G8_B8_A8_SNORM)
texDecoder = TextureDecoder_R8_G8_B8_A8::getInstance();
else if (format == Latte::E_GX2SURFFMT::R8_G8_B8_A8_SRGB)
texDecoder = TextureDecoder_R8_G8_B8_A8::getInstance();
else if (format == Latte::E_GX2SURFFMT::R8_UNORM)
texDecoder = TextureDecoder_R8::getInstance();
else if (format == Latte::E_GX2SURFFMT::R8_SNORM)
texDecoder = TextureDecoder_R8::getInstance();
else if (format == Latte::E_GX2SURFFMT::R8_G8_UNORM)
texDecoder = TextureDecoder_R8_G8::getInstance();
else if (format == Latte::E_GX2SURFFMT::R8_G8_SNORM)
texDecoder = TextureDecoder_R8_G8::getInstance();
else if (format == Latte::E_GX2SURFFMT::R16_UNORM)
texDecoder = TextureDecoder_R16_UNORM::getInstance();
else if (format == Latte::E_GX2SURFFMT::R16_G16_B16_A16_UNORM)
texDecoder = TextureDecoder_R16_G16_B16_A16::getInstance();
else if (format == Latte::E_GX2SURFFMT::R16_G16_B16_A16_SNORM)
texDecoder = TextureDecoder_R16_G16_B16_A16::getInstance();
else if (format == Latte::E_GX2SURFFMT::R16_G16_UNORM)
texDecoder = TextureDecoder_R16_G16::getInstance();
else if (format == Latte::E_GX2SURFFMT::R5_G6_B5_UNORM)
texDecoder = TextureDecoder_R5_G6_B5::getInstance();
else if (format == Latte::E_GX2SURFFMT::R5_G5_B5_A1_UNORM)
texDecoder = TextureDecoder_R5_G5_B5_A1_UNORM_swappedOpenGL::getInstance();
else if (format == Latte::E_GX2SURFFMT::A1_B5_G5_R5_UNORM)
texDecoder = TextureDecoder_A1_B5_G5_R5_UNORM::getInstance();
else if (format == Latte::E_GX2SURFFMT::R32_G32_FLOAT)
texDecoder = TextureDecoder_R32_G32_FLOAT::getInstance();
else if (format == Latte::E_GX2SURFFMT::R32_G32_UINT)
texDecoder = TextureDecoder_R32_G32_UINT::getInstance();
else if (format == Latte::E_GX2SURFFMT::R32_UINT)
texDecoder = TextureDecoder_R32_UINT::getInstance();
else if (format == Latte::E_GX2SURFFMT::R16_UINT)
texDecoder = TextureDecoder_R16_UINT::getInstance();
else if (format == Latte::E_GX2SURFFMT::R8_UINT)
texDecoder = TextureDecoder_R8_UINT::getInstance();
else if (format == Latte::E_GX2SURFFMT::R32_G32_B32_A32_FLOAT)
texDecoder = TextureDecoder_R32_G32_B32_A32_FLOAT::getInstance();
else if (format == Latte::E_GX2SURFFMT::R10_G10_B10_A2_UNORM)
texDecoder = TextureDecoder_R10_G10_B10_A2_UNORM::getInstance();
else if (format == Latte::E_GX2SURFFMT::A2_B10_G10_R10_UNORM)
texDecoder = TextureDecoder_A2_B10_G10_R10_UNORM_To_RGBA16::getInstance();
else if (format == Latte::E_GX2SURFFMT::R10_G10_B10_A2_SNORM)
texDecoder = TextureDecoder_R10_G10_B10_A2_SNORM_To_RGBA16::getInstance();
else if (format == Latte::E_GX2SURFFMT::R10_G10_B10_A2_SRGB)
texDecoder = TextureDecoder_R10_G10_B10_A2_UNORM::getInstance();
else if (format == Latte::E_GX2SURFFMT::R11_G11_B10_FLOAT)
texDecoder = TextureDecoder_R11_G11_B10_FLOAT::getInstance();
else if (format == Latte::E_GX2SURFFMT::R32_G32_B32_A32_UINT)
texDecoder = TextureDecoder_R32_G32_B32_A32_UINT::getInstance();
else if (format == Latte::E_GX2SURFFMT::R16_G16_B16_A16_UINT)
texDecoder = TextureDecoder_R16_G16_B16_A16_UINT::getInstance();
else if (format == Latte::E_GX2SURFFMT::R8_G8_B8_A8_UINT)
texDecoder = TextureDecoder_R8_G8_B8_A8_UINT::getInstance();
else if (format == Latte::E_GX2SURFFMT::R24_X8_UNORM)
texDecoder = TextureDecoder_R24_X8::getInstance();
else if (format == Latte::E_GX2SURFFMT::X24_G8_UINT)
texDecoder = TextureDecoder_X24_G8_UINT::getInstance();
else if (format == Latte::E_GX2SURFFMT::D32_S8_FLOAT)
texDecoder = TextureDecoder_D32_S8_UINT_X24::getInstance();
else
cemu_assert_debug(false);
cemu_assert_debug(!isDepth);
return texDecoder;
}
// use standard API to upload texture data
void OpenGLRenderer_texture_loadSlice_normal(LatteTexture* hostTextureGeneric, sint32 width, sint32 height, sint32 depth, void* pixelData, sint32 sliceIndex, sint32 mipIndex, uint32 imageSize)
{
auto hostTexture = (LatteTextureGL*)hostTextureGeneric;
sint32 effectiveWidth = width;
sint32 effectiveHeight = height;
sint32 effectiveDepth = depth;
cemu_assert_debug(hostTexture->overwriteInfo.hasResolutionOverwrite == false); // not supported in _loadSlice
cemu_assert_debug(hostTexture->overwriteInfo.hasFormatOverwrite == false); // not supported in _loadSlice
// get format info
LatteTextureGL::FormatInfoGL glFormatInfo;
LatteTextureGL::GetOpenGLFormatInfo(hostTexture->isDepth, hostTexture->overwriteInfo.hasFormatOverwrite ? (Latte::E_GX2SURFFMT)hostTexture->overwriteInfo.format : hostTexture->format, hostTexture->dim, &glFormatInfo);
// upload slice
catchOpenGLError();
if (mipIndex >= hostTexture->maxPossibleMipLevels)
{
cemuLog_logDebug(LogType::Force, "2D texture mip level allocated out of range");
return;
}
if (hostTexture->dim == Latte::E_DIM::DIM_2D || hostTexture->dim == Latte::E_DIM::DIM_2D_MSAA)
{
if (glFormatInfo.glIsCompressed)
glCompressedTextureSubImage2DWrapper(hostTexture->glTexTarget, hostTexture->glId_texture, mipIndex, 0, 0, effectiveWidth, effectiveHeight, glFormatInfo.glInternalFormat, imageSize, pixelData);
else
glTextureSubImage2DWrapper(hostTexture->glTexTarget, hostTexture->glId_texture, mipIndex, 0, 0, effectiveWidth, effectiveHeight, glFormatInfo.glSuppliedFormat, glFormatInfo.glSuppliedFormatType, pixelData);
}
else if (hostTexture->dim == Latte::E_DIM::DIM_1D)
{
if (glFormatInfo.glIsCompressed)
glCompressedTextureSubImage1DWrapper(hostTexture->glTexTarget, hostTexture->glId_texture, mipIndex, 0, width, glFormatInfo.glInternalFormat, imageSize, pixelData);
else
glTextureSubImage1DWrapper(hostTexture->glTexTarget, hostTexture->glId_texture, mipIndex, 0, width, glFormatInfo.glSuppliedFormat, glFormatInfo.glSuppliedFormatType, pixelData);
}
else if (hostTexture->dim == Latte::E_DIM::DIM_2D_ARRAY || hostTexture->dim == Latte::E_DIM::DIM_2D_ARRAY_MSAA ||
hostTexture->dim == Latte::E_DIM::DIM_3D ||
hostTexture->dim == Latte::E_DIM::DIM_CUBEMAP)
{
if (glFormatInfo.glIsCompressed)
glCompressedTextureSubImage3DWrapper(hostTexture->glTexTarget, hostTexture->glId_texture, mipIndex, 0, 0, sliceIndex, effectiveWidth, effectiveHeight, 1, glFormatInfo.glInternalFormat, imageSize, pixelData);
else
glTextureSubImage3DWrapper(hostTexture->glTexTarget, hostTexture->glId_texture, mipIndex, 0, 0, sliceIndex, effectiveWidth, effectiveHeight, 1, glFormatInfo.glSuppliedFormat, glFormatInfo.glSuppliedFormatType, pixelData);
}
catchOpenGLError();
}
// use persistent buffers to upload data
void OpenGLRenderer_texture_loadSlice_viaBuffers(LatteTexture* hostTexture, sint32 width, sint32 height, sint32 depth, void* pixelData, sint32 sliceIndex, sint32 mipIndex, uint32 imageSize)
{
// bind buffer
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, glRendererState.uploadBuffer);
catchOpenGLError();
// upload data to buffer
cemu_assert(imageSize <= TEXBUFFER_SIZE);
cemu_assert_debug(glRendererState.texWorkBufferSize == imageSize);
glFlushMappedBufferRange(GL_PIXEL_UNPACK_BUFFER, (GLintptr)(glRendererState.texWorkBuffer - (uint8*)glRendererState.uploadBufferPtr), imageSize);
catchOpenGLError();
OpenGLRenderer_texture_loadSlice_normal(hostTexture, width, height, depth, (void*)(glRendererState.texWorkBuffer - (uint8*)glRendererState.uploadBufferPtr), sliceIndex, mipIndex, imageSize);
// unbind buffer and sync
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
catchOpenGLError();
}
void OpenGLRenderer::texture_loadSlice(LatteTexture* hostTexture, sint32 width, sint32 height, sint32 depth, void* pixelData, sint32 sliceIndex, sint32 mipIndex, uint32 imageSize)
{
if (glRendererState.useTextureUploadBuffer)
OpenGLRenderer_texture_loadSlice_viaBuffers(hostTexture, width, height, depth, pixelData, sliceIndex, mipIndex, imageSize);
else
OpenGLRenderer_texture_loadSlice_normal(hostTexture, width, height, depth, pixelData, sliceIndex, mipIndex, imageSize);
}
void OpenGLRenderer::texture_clearColorSlice(LatteTexture* hostTexture, sint32 sliceIndex, sint32 mipIndex, float r, float g, float b, float a)
{
LatteTextureGL* texGL = (LatteTextureGL*)hostTexture;
cemu_assert_debug(!texGL->isDepth);
sint32 eWidth, eHeight;
hostTexture->GetEffectiveSize(eWidth, eHeight, mipIndex);
renderstate_resetColorControl();
renderTarget_setViewport(0, 0, eWidth, eHeight, 0.0f, 1.0f);
LatteMRT::BindColorBufferOnly(hostTexture->GetOrCreateView(mipIndex, 1, sliceIndex, 1));
glClearColor(r, g, b, a);
glClear(GL_COLOR_BUFFER_BIT);
}
void OpenGLRenderer::texture_clearDepthSlice(LatteTexture* hostTexture, uint32 sliceIndex, sint32 mipIndex, bool clearDepth, bool clearStencil, float depthValue, uint32 stencilValue)
{
LatteTextureGL* texGL = (LatteTextureGL*)hostTexture;
cemu_assert_debug(texGL->isDepth);
sint32 eWidth, eHeight;
hostTexture->GetEffectiveSize(eWidth, eHeight, mipIndex);
renderstate_resetColorControl();
renderstate_resetDepthControl();
renderTarget_setViewport(0, 0, eWidth, eHeight, 0.0f, 1.0f);
LatteMRT::BindDepthBufferOnly(hostTexture->GetOrCreateView(mipIndex, 1, sliceIndex, 1));
if (!hostTexture->hasStencil)
clearStencil = false;
if (clearDepth)
glClearDepth(clearDepth);
if (clearStencil)
{
renderstate_resetStencilMask();
glClearStencil((GLint)stencilValue);
}
glClear((clearDepth ? GL_DEPTH_BUFFER_BIT : 0) | (clearStencil ? GL_STENCIL_BUFFER_BIT : 0));
catchOpenGLError();
}
void OpenGLRenderer::texture_clearSlice(LatteTexture* hostTextureGeneric, sint32 sliceIndex, sint32 mipIndex)
{
auto hostTexture = (LatteTextureGL*)hostTextureGeneric;
// get OpenGL format info
LatteTextureGL::FormatInfoGL formatInfoGL;
LatteTextureGL::GetOpenGLFormatInfo(hostTexture->isDepth, hostTexture->format, hostTexture->dim, &formatInfoGL);
// get effective size of mip
sint32 effectiveWidth, effectiveHeight;
hostTexture->GetEffectiveSize(effectiveWidth, effectiveHeight, mipIndex);
// on Nvidia glClearTexImage and glClearTexSubImage has bad performance (clearing a 4K texture takes up to 50ms)
// clearing with glTextureSubImage2D from a CPU RAM buffer is only slightly slower
// clearing with glTextureSubImage2D from a OpenGL buffer is 10-20% faster than glClearTexImage
// clearing with FBO and glClear is orders of magnitude faster than the other methods
// (these are results from 2018, may be different now)
if (this->m_vendor == GfxVendor::Nvidia || glClearTexSubImage == nullptr)
{
if (formatInfoGL.glIsCompressed)
{
cemuLog_logDebug(LogType::Force, "Unsupported clear for compressed texture");
return; // todo - create integer texture view to clear compressed textures
}
if (hostTextureGeneric->isDepth)
{
cemuLog_logDebug(LogType::Force, "Unsupported clear for depth texture");
return; // todo - use depth clear
}
glBindFramebuffer(GL_FRAMEBUFFER_EXT, glRendererState.clearFBO);
// set attachment
if (hostTexture->dim == Latte::E_DIM::DIM_2D)
glFramebufferTexture2D(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, hostTexture->glId_texture, mipIndex);
else
glFramebufferTextureLayer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, hostTexture->glId_texture, mipIndex, sliceIndex);
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT);
glBindFramebuffer(GL_FRAMEBUFFER_EXT, prevBoundFBO);
return;
}
if (glClearTexSubImage == nullptr)
return;
glClearTexSubImage(hostTexture->glId_texture, mipIndex, 0, 0, sliceIndex, effectiveWidth, effectiveHeight, 1, formatInfoGL.glSuppliedFormat, formatInfoGL.glSuppliedFormatType, NULL);
}
LatteTexture* OpenGLRenderer::texture_createTextureEx(Latte::E_DIM dim, MPTR physAddress, MPTR physMipAddress, Latte::E_GX2SURFFMT format, uint32 width, uint32 height, uint32 depth, uint32 pitch, uint32 mipLevels,
uint32 swizzle, Latte::E_HWTILEMODE tileMode, bool isDepth)
{
return new LatteTextureGL(dim, physAddress, physMipAddress, format, width, height, depth, pitch, mipLevels, swizzle, tileMode, isDepth);
}
void OpenGLRenderer::texture_setActiveTextureUnit(sint32 index)
{
if (activeTextureUnit != index)
{
glActiveTexture(GL_TEXTURE0 + index);
activeTextureUnit = index;
}
}
void OpenGLRenderer::texture_bindAndActivate(LatteTextureView* textureView, uint32 textureUnit)
{
const auto textureViewGL = (LatteTextureViewGL*)textureView;
// don't call glBindTexture if the texture is already bound
if (m_latteBoundTextures[textureUnit] == textureViewGL)
{
texture_setActiveTextureUnit(textureUnit);
return; // already bound
}
// bind
m_latteBoundTextures[textureUnit] = textureViewGL;
texture_setActiveTextureUnit(textureUnit);
if (textureViewGL)
{
glBindTexture(textureViewGL->glTexTarget, textureViewGL->glTexId);
}
}
void OpenGLRenderer::texture_notifyDelete(LatteTextureView* textureView)
{
for (uint32 i = 0; i < Latte::GPU_LIMITS::NUM_TEXTURES_PER_STAGE * 3; i++)
{
if (m_latteBoundTextures[i] == textureView)
m_latteBoundTextures[i] = nullptr;
}
}
// set Latte texture, on the OpenGL renderer this behaves like _bindAndActivate() but doesn't call _setActiveTextureUnit() if the texture is already bound
void OpenGLRenderer::texture_setLatteTexture(LatteTextureView* textureView1, uint32 textureUnit)
{
auto textureView = ((LatteTextureViewGL*)textureView1);
cemu_assert_debug(textureUnit < Latte::GPU_LIMITS::NUM_TEXTURES_PER_STAGE * 3);
if (m_latteBoundTextures[textureUnit] == textureView)
return;
if (textureView == nullptr)
return;
// bind
if (glBindTextureUnit)
{
glBindTextureUnit(textureUnit, textureView->glTexId);
m_latteBoundTextures[textureUnit] = textureView;
activeTextureUnit = -1;
}
else
{
texture_setActiveTextureUnit(textureUnit);
glBindTexture(textureView->glTexTarget, textureView->glTexId);
m_latteBoundTextures[textureUnit] = textureView;
}
}
void OpenGLRenderer::texture_copyImageSubData(LatteTexture* src, sint32 srcMip, sint32 effectiveSrcX, sint32 effectiveSrcY, sint32 srcSlice, LatteTexture* dst, sint32 dstMip, sint32 effectiveDstX, sint32 effectiveDstY,
sint32 dstSlice, sint32 effectiveCopyWidth, sint32 effectiveCopyHeight, sint32 srcDepth)
{
auto srcGL = (LatteTextureGL*)src;
auto dstGL = (LatteTextureGL*)dst;
if ((srcGL->isAlternativeFormat || dstGL->isAlternativeFormat) && (srcGL->glInternalFormat != dstGL->glInternalFormat))
{
if (srcGL->format == Latte::E_GX2SURFFMT::R16_G16_B16_A16_UINT && dstGL->format == Latte::E_GX2SURFFMT::BC4_UNORM)
{
cemu_assert_debug(dstGL->dim != Latte::E_DIM::DIM_2D);
// special case where BC4 format is replaced with R16F for array/3d-textures (since OpenGL's BC4 compression only supports 2D textures)
texture_syncSliceSpecialBC4(srcGL, srcSlice, srcMip, dstGL, dstSlice, dstMip);
return;
}
else
{
cemuLog_logDebug(LogType::Force, "_syncSlice() called with unhandled alternative format");
return;
}
}
if (srcGL->format == Latte::E_GX2SURFFMT::R32_G32_B32_A32_UINT && dstGL->format == Latte::E_GX2SURFFMT::BC3_UNORM)
{
if ((dstGL->width >> dstMip) < 4 || (dstGL->height >> dstMip) < 4)
{
texture_syncSliceSpecialIntegerToBC3(srcGL, srcSlice, srcMip, dstGL, dstSlice, dstMip);
return;
}
}
catchOpenGLError();
glCopyImageSubData(srcGL->glId_texture, srcGL->glTexTarget, srcMip, effectiveSrcX, effectiveSrcY, srcSlice, dstGL->glId_texture, dstGL->glTexTarget, dstMip, effectiveDstX, effectiveDstY, dstSlice, effectiveCopyWidth, effectiveCopyHeight, srcDepth);
catchOpenGLError();
}
LatteTextureReadbackInfo* OpenGLRenderer::texture_createReadback(LatteTextureView* textureView)
{
return new LatteTextureReadbackInfoGL(textureView);
}
void LatteDraw_resetAttributePointerCache();
void OpenGLRenderer::attributeStream_reset()
{
// reset attribute state
attributeStream_unbindVertexBuffer();
// setup vertices
SetArrayElementBuffer(0);
LatteDraw_resetAttributePointerCache();
SetAttributeArrayState(0, true, -1);
SetAttributeArrayState(1, true, -1);
for (uint32 i = 0; i < GPU_GL_MAX_NUM_ATTRIBUTE; i++)
SetAttributeArrayState(i, false, -1);
}
void OpenGLRenderer::bufferCache_init(const sint32 bufferSize)
{
glGenBuffers(1, &glAttributeCacheAB);
glBindBuffer(GL_ARRAY_BUFFER, glAttributeCacheAB);
glBufferData(GL_ARRAY_BUFFER, bufferSize, NULL, GL_STREAM_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
void OpenGLRenderer::attributeStream_bindVertexCacheBuffer()
{
if (_boundArrayBuffer == glAttributeCacheAB)
return;
_boundArrayBuffer = glAttributeCacheAB;
glBindBuffer(GL_ARRAY_BUFFER, glAttributeCacheAB);
}
void OpenGLRenderer::attributeStream_unbindVertexBuffer()
{
if (_boundArrayBuffer == 0)
return;
_boundArrayBuffer = 0;
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
RendererShader* OpenGLRenderer::shader_create(RendererShader::ShaderType type, uint64 baseHash, uint64 auxHash, const std::string& source, bool isGameShader, bool isGfxPackShader)
{
return new RendererShaderGL(type, baseHash, auxHash, isGameShader, isGfxPackShader, source);
}
void OpenGLRenderer::shader_bind(RendererShader* shader)
{
auto shaderGL = (RendererShaderGL*)shader;
GLbitfield shaderBit;
const auto program = shaderGL->GetProgram();
switch(shader->GetType())
{
case RendererShader::ShaderType::kVertex:
if (program == prevVertexShaderProgram)
return;
shaderBit = GL_VERTEX_SHADER_BIT;
prevVertexShaderProgram = program;
break;
case RendererShader::ShaderType::kFragment:
if (program == prevPixelShaderProgram)
return;
shaderBit = GL_FRAGMENT_SHADER_BIT;
prevPixelShaderProgram = program;
break;
case RendererShader::ShaderType::kGeometry:
if (program == prevGeometryShaderProgram)
return;
shaderBit = GL_GEOMETRY_SHADER_BIT;
prevGeometryShaderProgram = program;
break;
default:
UNREACHABLE;
}
catchOpenGLError();
glUseProgramStages(m_pipeline, shaderBit, program);
catchOpenGLError();
}
void OpenGLRenderer::shader_unbind(RendererShader::ShaderType shaderType)
{
switch (shaderType) {
case RendererShader::ShaderType::kVertex:
glUseProgramStages(m_pipeline, GL_VERTEX_SHADER_BIT, 0);
prevVertexShaderProgram = -1;
break;
case RendererShader::ShaderType::kFragment:
glUseProgramStages(m_pipeline, GL_FRAGMENT_SHADER_BIT, 0);
prevPixelShaderProgram = -1;
break;
case RendererShader::ShaderType::kGeometry:
glUseProgramStages(m_pipeline, GL_GEOMETRY_SHADER_BIT, 0);
prevGeometryShaderProgram = -1;
break;
default:
UNREACHABLE;
}
}
void decodeBC4Block_UNORM(uint8* blockStorage, float* rOutput);
void OpenGLRenderer::texture_syncSliceSpecialBC4(LatteTexture* srcTexture, sint32 srcSliceIndex, sint32 srcMipIndex, LatteTexture* dstTexture, sint32 dstSliceIndex, sint32 dstMipIndex)
{
auto srcTextureGL = (LatteTextureGL*)srcTexture;
auto dstTextureGL = (LatteTextureGL*)dstTexture;
sint32 sourceTexWidth = std::max(srcTexture->width >> srcMipIndex, 1);
sint32 sourceTexHeight = std::max(srcTexture->height >> srcMipIndex, 1);
sint32 destTexWidth = std::max(dstTexture->width >> dstMipIndex, 1);
sint32 destTexHeight = std::max(dstTexture->height >> dstMipIndex, 1);
sint32 compressedCopyWidth = std::min(sourceTexWidth, std::max(1, destTexWidth / 4));
sint32 compressedCopyHeight = std::min(sourceTexHeight, std::max(1, destTexHeight / 4));
uint8* texelData = (uint8*)malloc(compressedCopyWidth*compressedCopyHeight * 8);
float* pixelRGBA16fData = (float*)malloc(destTexWidth*destTexHeight * sizeof(float) * 2);
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
if (glGetTextureSubImage)
glGetTextureSubImage(srcTextureGL->glId_texture, 0, 0, 0, srcSliceIndex, compressedCopyWidth, compressedCopyHeight, 1, GL_RGBA_INTEGER, GL_UNSIGNED_SHORT, compressedCopyWidth * compressedCopyHeight * 8, texelData);
for (sint32 bx = 0; bx < compressedCopyWidth; bx++)
{
for (sint32 by = 0; by < compressedCopyHeight; by++)
{
float rBlock[4 * 4];
decodeBC4Block_UNORM(texelData + (bx + by * compressedCopyWidth) * 8, rBlock);
for (sint32 sy = 0; sy < std::min(4, destTexHeight - by * 4); sy++)
{
for (sint32 sx = 0; sx < std::min(4, destTexWidth - bx * 4); sx++)
{
sint32 pixelIndex = (bx * 4 + sx) + (by * 4 + sy)*destTexWidth;
pixelRGBA16fData[pixelIndex * 2] = rBlock[sx + sy * 4];
pixelRGBA16fData[pixelIndex * 2 + 1] = rBlock[sx + sy * 4];
}
}
}
}
// upload mip
if (glGetTextureSubImage && glTextureSubImage3D)
glTextureSubImage3D(dstTextureGL->glId_texture, dstMipIndex, 0, 0, dstSliceIndex, destTexWidth, destTexHeight, 1, GL_RG, GL_FLOAT, pixelRGBA16fData);
free(pixelRGBA16fData);
free(texelData);
catchOpenGLError();
}
void OpenGLRenderer::texture_syncSliceSpecialIntegerToBC3(LatteTexture* srcTexture, sint32 srcSliceIndex, sint32 srcMipIndex, LatteTexture* dstTexture, sint32 dstSliceIndex, sint32 dstMipIndex)
{
auto srcTextureGL = (LatteTextureGL*)srcTexture;
auto dstTextureGL = (LatteTextureGL*)dstTexture;
sint32 sourceTexWidth = std::max(srcTexture->width >> srcMipIndex, 1);
sint32 sourceTexHeight = std::max(srcTexture->height >> srcMipIndex, 1);
sint32 destTexWidth = std::max(dstTexture->width >> dstMipIndex, 1);
sint32 destTexHeight = std::max(dstTexture->height >> dstMipIndex, 1);
sint32 compressedCopyWidth = std::min(sourceTexWidth, std::max(1, destTexWidth / 4));
sint32 compressedCopyHeight = std::min(sourceTexHeight, std::max(1, destTexHeight / 4));
uint8* texelData = (uint8*)malloc(compressedCopyWidth*compressedCopyHeight * 16);
catchOpenGLError();
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
catchOpenGLError();
if (glGetTextureSubImage)
glGetTextureSubImage(srcTextureGL->glId_texture, 0, 0, 0, srcSliceIndex, compressedCopyWidth, compressedCopyHeight, 1, GL_RGBA_INTEGER, GL_UNSIGNED_INT, compressedCopyWidth * compressedCopyHeight * 16, texelData);
//float* pixelRGBA16fData = (float*)malloc(destTexWidth*destTexHeight * sizeof(float) * 2);
//for (sint32 bx = 0; bx < compressedCopyWidth; bx++)
//{
// for (sint32 by = 0; by < compressedCopyHeight; by++)
// {
// float rBlock[4 * 4];
// decodeBC4Block_UNORM(texelData + (bx + by * compressedCopyWidth) * 8, rBlock);
// for (sint32 sy = 0; sy < min(4, destTexHeight - by * 4); sy++)
// {
// for (sint32 sx = 0; sx < min(4, destTexWidth - bx * 4); sx++)
// {
// sint32 pixelIndex = (bx * 4 + sx) + (by * 4 + sy)*destTexWidth;
// pixelRGBA16fData[pixelIndex * 2] = rBlock[sx + sy * 4];
// pixelRGBA16fData[pixelIndex * 2 + 1] = rBlock[sx + sy * 4];
// }
// }
// }
//}
// upload mip
catchOpenGLError();
if (glGetTextureSubImage && glCompressedTextureSubImage3D)
glCompressedTextureSubImage3D(dstTextureGL->glId_texture, dstMipIndex, 0, 0, dstSliceIndex, destTexWidth, destTexHeight, 1, dstTextureGL->glInternalFormat, compressedCopyWidth * compressedCopyHeight * 16, texelData);
free(texelData);
catchOpenGLError();
}
void OpenGLRenderer::renderstate_updateBlendingAndColorControl()
{
catchOpenGLError();
const auto& colorControlReg = LatteGPUState.contextNew.CB_COLOR_CONTROL;
const auto specialOp = colorControlReg.get_SPECIAL_OP();
const uint32 blendEnableMask = colorControlReg.get_BLEND_MASK();
const auto logicOp = colorControlReg.get_ROP();
cemu_assert_debug(!colorControlReg.get_MULTIWRITE_ENABLE()); // not supported
uint32 renderTargetMask = LatteGPUState.contextNew.CB_TARGET_MASK.get_MASK();
if (specialOp == Latte::LATTE_CB_COLOR_CONTROL::E_SPECIALOP::DISABLE)
{
renderTargetMask = 0;
}
OpenGLRenderer::renderstate_setChannelTargetMask(renderTargetMask);
catchOpenGLError();
// handle depth and stencil control
// get depth control parameters
bool depthEnable = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_Z_ENABLE();
auto depthFunc = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_Z_FUNC();
bool depthWriteEnable = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_Z_WRITE_ENABLE();
// get stencil control parameters
bool stencilEnable = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_STENCIL_ENABLE();
bool backStencilEnable = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_BACK_STENCIL_ENABLE();
auto frontStencilFunc = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_STENCIL_FUNC_F();
auto frontStencilZPass = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_STENCIL_ZPASS_F();
auto frontStencilZFail = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_STENCIL_ZFAIL_F();
auto frontStencilFail = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_STENCIL_FAIL_F();
auto backStencilFunc = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_STENCIL_FUNC_B();
auto backStencilZPass = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_STENCIL_ZPASS_B();
auto backStencilZFail = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_STENCIL_ZFAIL_B();
auto backStencilFail = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_STENCIL_FAIL_B();
// get stencil control parameters
uint32 stencilCompareMaskFront = LatteGPUState.contextNew.DB_STENCILREFMASK.get_STENCILMASK_F();
uint32 stencilWriteMaskFront = LatteGPUState.contextNew.DB_STENCILREFMASK.get_STENCILWRITEMASK_F();
uint32 stencilRefFront = LatteGPUState.contextNew.DB_STENCILREFMASK.get_STENCILREF_F();
uint32 stencilCompareMaskBack = LatteGPUState.contextNew.DB_STENCILREFMASK_BF.get_STENCILMASK_B();
uint32 stencilWriteMaskBack = LatteGPUState.contextNew.DB_STENCILREFMASK_BF.get_STENCILWRITEMASK_B();
uint32 stencilRefBack = LatteGPUState.contextNew.DB_STENCILREFMASK_BF.get_STENCILREF_B();
const static GLenum stencilActionGX2ToGL[] =
{
GL_KEEP,
GL_ZERO,
GL_REPLACE,
GL_INCR,
GL_DECR,
GL_INVERT,
GL_INCR_WRAP,
GL_DECR_WRAP
};
if (prevStencilEnable != stencilEnable)
{
if (stencilEnable)
glEnable(GL_STENCIL_TEST);
else
glDisable(GL_STENCIL_TEST);
prevStencilEnable = stencilEnable;
}
// update stencil parameters only if stencil is enabled
if (stencilEnable)
{
if (!backStencilEnable)
{
// if back face stencil is disabled then use front parameters
backStencilFunc = frontStencilFunc;
backStencilZPass = frontStencilZPass;
backStencilZFail = frontStencilZFail;
backStencilFail = frontStencilFail;
stencilRefBack = stencilRefFront;
stencilCompareMaskBack = stencilCompareMaskFront;
stencilWriteMaskBack = stencilWriteMaskFront;
}
// update stencil configuration for front side
if (prevFrontStencilFail != frontStencilFail || prevFrontStencilZFail != frontStencilZFail || prevFrontStencilZPass != frontStencilZPass)
{
glStencilOpSeparate(GL_FRONT, stencilActionGX2ToGL[(size_t)frontStencilFail], stencilActionGX2ToGL[(size_t)frontStencilZFail], stencilActionGX2ToGL[(size_t)frontStencilZPass]);
prevFrontStencilFail = frontStencilFail;
prevFrontStencilZFail = frontStencilZFail;
prevFrontStencilZPass = frontStencilZPass;
}
if (prevFrontStencilFunc != frontStencilFunc || prevStencilRefFront != stencilRefFront || prevStencilCompareMaskFront != stencilCompareMaskFront)
{
glStencilFuncSeparate(GL_FRONT, glDepthFuncTable[(size_t)frontStencilFunc], stencilRefFront, stencilCompareMaskFront);
prevFrontStencilFunc = frontStencilFunc;
prevStencilRefFront = stencilRefFront;
prevStencilCompareMaskFront = stencilCompareMaskFront;
}
if (prevStencilWriteMaskFront != stencilWriteMaskFront)
{
glStencilMaskSeparate(GL_FRONT, stencilWriteMaskFront);
prevStencilWriteMaskFront = stencilWriteMaskFront;
}
// update stencil configuration for back side
if (prevBackStencilFail != backStencilFail || prevBackStencilZFail != backStencilZFail || prevBackStencilZPass != backStencilZPass)
{
glStencilOpSeparate(GL_BACK, stencilActionGX2ToGL[(size_t)backStencilFail], stencilActionGX2ToGL[(size_t)backStencilZFail], stencilActionGX2ToGL[(size_t)backStencilZPass]);
prevBackStencilFail = backStencilFail;
prevBackStencilZFail = backStencilZFail;
prevBackStencilZPass = backStencilZPass;
}
if (prevBackStencilFunc != backStencilFunc || prevStencilRefBack != stencilRefBack || prevStencilCompareMaskBack != stencilCompareMaskBack)
{
glStencilFuncSeparate(GL_BACK, glDepthFuncTable[(size_t)backStencilFunc], stencilRefBack, stencilCompareMaskBack);
prevBackStencilFunc = backStencilFunc;
prevStencilRefBack = stencilRefBack;
prevStencilCompareMaskBack = stencilCompareMaskBack;
}
if (prevStencilWriteMaskBack != stencilWriteMaskBack)
{
glStencilMaskSeparate(GL_BACK, stencilWriteMaskBack);
prevStencilWriteMaskBack = stencilWriteMaskBack;
}
}
if (depthEnable != prevDepthEnable)
{
if (depthEnable)
glEnable(GL_DEPTH_TEST);
else
glDisable(GL_DEPTH_TEST);
prevDepthEnable = depthEnable;
}
if (depthWriteEnable != prevDepthWriteEnable)
{
if (depthWriteEnable)
glDepthMask(GL_TRUE);
else
glDepthMask(GL_FALSE);
prevDepthWriteEnable = depthWriteEnable;
}
if (depthFunc != prevDepthFunc)
{
glDepthFunc(glDepthFuncTable[(size_t)depthFunc]);
prevDepthFunc = depthFunc;
}
catchOpenGLError();
uint32 blendChangeMask = blendEnableMask ^ prevBlendMask;
if (blendChangeMask)
{
for (uint32 i = 0; i < 8; i++)
{
if ((blendChangeMask & (1 << i)) != 0)
{
// bit changed -> blend mode was toggled
if ((blendEnableMask & (1 << i)) != 0)
glEnablei(GL_BLEND, i);
else
glDisablei(GL_BLEND, i);
}
}
prevBlendMask = blendEnableMask;
}
catchOpenGLError();
uint32* blendColorConstant = LatteGPUState.contextRegister + Latte::REGADDR::CB_BLEND_RED;
if (blendColorConstant[0] != prevBlendColorConstant[0] ||
blendColorConstant[1] != prevBlendColorConstant[1] ||
blendColorConstant[2] != prevBlendColorConstant[2] ||
blendColorConstant[3] != prevBlendColorConstant[3])
{
glBlendColor(*(float*)(blendColorConstant + 0), *(float*)(blendColorConstant + 1), *(float*)(blendColorConstant + 2), *(float*)(blendColorConstant + 3));
prevBlendColorConstant[0] = blendColorConstant[0];
prevBlendColorConstant[1] = blendColorConstant[1];
prevBlendColorConstant[2] = blendColorConstant[2];
prevBlendColorConstant[3] = blendColorConstant[3];
}
for (uint32 i = 0; i < 8; i++)
{
const auto& blendControlReg = LatteGPUState.contextNew.CB_BLENDN_CONTROL[i];
if (blendControlReg.getRawValue() != prevBlendControlReg[i])
{
if (blendControlReg.get_SEPARATE_ALPHA_BLEND())
{
glBlendFuncSeparatei(i, GetGLBlendFactor(blendControlReg.get_COLOR_SRCBLEND()), GetGLBlendFactor(blendControlReg.get_COLOR_DSTBLEND()), GetGLBlendFactor(blendControlReg.get_ALPHA_SRCBLEND()), GetGLBlendFactor(blendControlReg.get_ALPHA_DSTBLEND()));
glBlendEquationSeparatei(i, GetGLBlendCombineFunc(blendControlReg.get_COLOR_COMB_FCN()), GetGLBlendCombineFunc(blendControlReg.get_ALPHA_COMB_FCN()));
}
else
{
auto colorSrc = GetGLBlendFactor(blendControlReg.get_COLOR_SRCBLEND());
auto colorDst = GetGLBlendFactor(blendControlReg.get_COLOR_DSTBLEND());
glBlendFuncSeparatei(i, colorSrc, colorDst, colorSrc, colorDst);
auto combineFunc = GetGLBlendCombineFunc(blendControlReg.get_COLOR_COMB_FCN());
glBlendEquationSeparatei(i, combineFunc, combineFunc);
}
prevBlendControlReg[i] = blendControlReg.getRawValue();
}
}
// set logic op
uint32 logicOpGL = GL_COPY;
if (logicOp == Latte::LATTE_CB_COLOR_CONTROL::E_LOGICOP::COPY)
logicOpGL = GL_COPY;
else if (logicOp == Latte::LATTE_CB_COLOR_CONTROL::E_LOGICOP::SET)
logicOpGL = GL_SET;
else if (logicOp == Latte::LATTE_CB_COLOR_CONTROL::E_LOGICOP::CLEAR)
logicOpGL = GL_CLEAR;
else if (logicOp == Latte::LATTE_CB_COLOR_CONTROL::E_LOGICOP::OR)
logicOpGL = GL_OR;
else
cemu_assert_unimplemented();
if (prevLogicOp != logicOpGL)
{
if (logicOpGL != GL_COPY)
glEnable(GL_COLOR_LOGIC_OP);
else
glDisable(GL_COLOR_LOGIC_OP);
glLogicOp(logicOpGL);
prevLogicOp = logicOpGL;
}
// polygon control
const auto& polygonControlReg = LatteGPUState.contextNew.PA_SU_SC_MODE_CNTL;
const auto frontFace = polygonControlReg.get_FRONT_FACE();
uint32 cullFront = polygonControlReg.get_CULL_FRONT();
uint32 cullBack = polygonControlReg.get_CULL_BACK();
// todo - polygon modes
uint32 lineAndPointOffsetEnabled = polygonControlReg.get_OFFSET_PARA_ENABLED();
uint32 polyOffsetFrontEnabled = polygonControlReg.get_OFFSET_FRONT_ENABLED();
uint32 polyOffsetBackEnabled = polygonControlReg.get_OFFSET_BACK_ENABLED();
uint32 cullEnable = (cullFront || cullBack) ? 1 : 0;
if (prevCullEnable != cullEnable)
{
if (cullEnable)
glEnable(GL_CULL_FACE);
else
glDisable(GL_CULL_FACE);
prevCullEnable = cullEnable;
}
if (prevCullFrontFace != frontFace)
{
if (frontFace == Latte::LATTE_PA_SU_SC_MODE_CNTL::E_FRONTFACE::CCW)
glFrontFace(GL_CCW);
else if (frontFace == Latte::LATTE_PA_SU_SC_MODE_CNTL::E_FRONTFACE::CW)
glFrontFace(GL_CW);
else
cemu_assert_unimplemented();
prevCullFrontFace = frontFace;
}
if (prevCullFront != cullFront || prevCullBack != cullBack)
{
if (cullFront && cullBack)
glCullFace(GL_FRONT_AND_BACK);
else if (cullFront == 0 && cullBack)
glCullFace(GL_BACK);
else if (cullFront && cullBack == 0)
glCullFace(GL_FRONT);
else
; // front and back disabled, do nothing here since we force disable culling via glDisable(GL_CULL_FACE) above
prevCullFront = cullFront;
prevCullBack = cullBack;
}
if (polyOffsetFrontEnabled != prevPolygonOffsetFrontEnabled)
{
if (polyOffsetFrontEnabled)
glEnable(GL_POLYGON_OFFSET_FILL);
else
glDisable(GL_POLYGON_OFFSET_FILL);
prevPolygonOffsetFrontEnabled = polyOffsetFrontEnabled;
}
if (polyOffsetFrontEnabled)
{
// if polygon offset is enabled check if offset/scale register changed
if (LatteGPUState.contextNew.PA_SU_POLY_OFFSET_FRONT_OFFSET.getRawValue() != prevPolygonFrontOffsetU32 || LatteGPUState.contextNew.PA_SU_POLY_OFFSET_FRONT_SCALE.getRawValue() != prevPolygonFrontScaleU32 || LatteGPUState.contextNew.PA_SU_POLY_OFFSET_CLAMP.getRawValue() != prevPolygonClampU32)
{
float frontScale = LatteGPUState.contextNew.PA_SU_POLY_OFFSET_FRONT_SCALE.get_SCALE();
float frontOffset = LatteGPUState.contextNew.PA_SU_POLY_OFFSET_FRONT_OFFSET.get_OFFSET();
float offsetClamp = LatteGPUState.contextNew.PA_SU_POLY_OFFSET_CLAMP.get_CLAMP();
frontScale /= 16.0f;
//if( glPolygonOffsetClampEXT )
// glPolygonOffsetClampEXT(frontOffset, frontScale, offsetClamp);
//else
glPolygonOffset(frontScale, frontOffset);
prevPolygonFrontOffsetU32 = LatteGPUState.contextNew.PA_SU_POLY_OFFSET_FRONT_SCALE.getRawValue();
prevPolygonFrontScaleU32 = LatteGPUState.contextNew.PA_SU_POLY_OFFSET_FRONT_SCALE.getRawValue();
prevPolygonClampU32 = LatteGPUState.contextNew.PA_SU_POLY_OFFSET_CLAMP.getRawValue();
}
}
// clip control
catchOpenGLError();
cemu_assert_debug(LatteGPUState.contextNew.PA_CL_CLIP_CNTL.get_ZCLIP_NEAR_DISABLE() == LatteGPUState.contextNew.PA_CL_CLIP_CNTL.get_ZCLIP_FAR_DISABLE()); // near/far clipping disabled individually
uint32 zClipEnable = LatteGPUState.contextNew.PA_CL_CLIP_CNTL.get_ZCLIP_NEAR_DISABLE() == false;
// todo: Mass Effect 3 uses precompiled display lists which seem to write values to PA_CL_CLIP_CNTL which aren't available via the traditional GX2 API
if (prevZClipEnable != zClipEnable)
{
if (zClipEnable)
{
// disable depth clamping and enable clipping
glDisable(GL_DEPTH_CLAMP);
}
else
{
// enable depth clamping and disable clipping
glEnable(GL_DEPTH_CLAMP);
}
prevZClipEnable = zClipEnable;
}
catchOpenGLError();
// point size
const auto& pointSizeReg = LatteGPUState.contextNew.PA_SU_POINT_SIZE;
if (pointSizeReg.getRawValue() != prevPointSizeReg)
{
float pointWidth = (float)pointSizeReg.get_WIDTH() / 8.0f;
float pointHeight = (float)pointSizeReg.get_HEIGHT() / 8.0f;
if (pointWidth == 0.0f)
glPointSize(1.0f / 8.0f); // minimum size
else
glPointSize(pointWidth);
prevPointSizeReg = pointSizeReg.getRawValue();
catchOpenGLError();
}
// primitive restart index
uint32 primitiveRestartIndex = LatteGPUState.contextNew.VGT_MULTI_PRIM_IB_RESET_INDX.get_RESTART_INDEX();
if (prevPrimitiveRestartIndex != primitiveRestartIndex)
{
glPrimitiveRestartIndex(primitiveRestartIndex);
prevPrimitiveRestartIndex = primitiveRestartIndex;
}
}
void OpenGLRenderer::renderstate_resetColorControl()
{
renderstate_setChannelTargetMask(0xF);
// disable blending
uint32 blendEnableMask = 0;
for (uint32 i = 0; i < 8; i++)
{
if (((blendEnableMask^prevBlendMask)&(1 << i)) != 0)
{
// bit changed -> blend mode was toggled
if ((blendEnableMask&(1 << i)) != 0)
glEnablei(GL_BLEND, i);
else
glDisablei(GL_BLEND, i);
}
}
prevBlendMask = blendEnableMask;
// "forget" blend states
for (uint32 i = 0; i < 8; i++)
{
prevBlendControlReg[i] = 0xFFFFFFFF;
}
// disable alpha test
if (prevAlphaTestEnable != 0)
{
glDisable(GL_ALPHA_TEST);
prevAlphaTestEnable = 0;
}
if (prevLogicOp != GL_COPY)
{
glDisable(GL_COLOR_LOGIC_OP);
glLogicOp(GL_COPY);
prevLogicOp = GL_COPY;
}
// disable culling
uint32 cullEnable = 0;
if (prevCullEnable != cullEnable)
{
glDisable(GL_CULL_FACE);
prevCullEnable = 0;
}
// disable polygon offset
if (prevPolygonOffsetFrontEnabled != 0)
{
glDisable(GL_POLYGON_OFFSET_FILL);
prevPolygonOffsetFrontEnabled = 0;
}
// disable scissor box
if (prevScissorEnable != false)
{
glDisable(GL_SCISSOR_TEST);
prevScissorEnable = false;
}
}
void OpenGLRenderer::renderstate_resetDepthControl()
{
if (prevDepthEnable)
{
glDisable(GL_DEPTH_TEST);
prevDepthEnable = false;
}
if (!prevDepthWriteEnable)
{
glDepthMask(GL_TRUE);
prevDepthWriteEnable = true;
}
if (prevStencilEnable)
{
glDisable(GL_STENCIL_TEST);
prevStencilEnable = false;
}
//if (prevZClipEnable == 0)
//{
// glDisable(GL_DEPTH_CLAMP);
// prevZClipEnable = 1;
//}
glDisable(GL_DEPTH_CLAMP);
prevZClipEnable = 1;
if (prevPrimitiveRestartIndex != 0xFFFFFFFF)
{
glPrimitiveRestartIndex(0xFFFFFFFF);
prevPrimitiveRestartIndex = 0xFFFFFFFF;
}
}
void OpenGLRenderer::renderstate_resetStencilMask()
{
uint32 stencilWriteMaskFront = 0xFFFFFFFF; // enable front mask
uint32 stencilWriteMaskBack = 0xFFFFFFFF; // enable back mask
if (prevStencilWriteMaskFront != stencilWriteMaskFront)
{
glStencilMaskSeparate(GL_FRONT, stencilWriteMaskFront);
prevStencilWriteMaskFront = stencilWriteMaskFront;
}
if (prevStencilWriteMaskBack != stencilWriteMaskBack)
{
glStencilMaskSeparate(GL_BACK, stencilWriteMaskBack);
prevStencilWriteMaskBack = stencilWriteMaskBack;
}
}
void OpenGLRenderer::cleanupAfterFrame()
{
ReleaseBufferCacheEntries();
}
void OpenGLRenderer::ReleaseBufferCacheEntries()
{
for (auto& itr : m_destructionQueues.bufferCacheEntries)
itr.free();
m_destructionQueues.bufferCacheEntries.clear();
}
| 64,985
|
C++
|
.cpp
| 1,662
| 36.638387
| 294
| 0.765717
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,255
|
OpenGLRendererStreamout.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/OpenGL/OpenGLRendererStreamout.cpp
|
#include "Cafe/HW/Latte/Renderer/OpenGL/RendererShaderGL.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/OpenGLRenderer.h"
#include "Cafe/HW/Latte/Core/LattePerformanceMonitor.h"
#include "Cafe/HW/Latte/Core/LatteShader.h"
#include "Cafe/OS/libs/gx2/GX2.h" // todo - remove dependency
void OpenGLRenderer::streamout_setupXfbBuffer(uint32 bufferIndex, sint32 ringBufferOffset, uint32 rangeAddr, uint32 rangeSize)
{
catchOpenGLError();
glBindBufferRange(GL_TRANSFORM_FEEDBACK_BUFFER, bufferIndex, glStreamoutCacheRingBuffer, ringBufferOffset, rangeSize);
catchOpenGLError();
}
void OpenGLRenderer::streamout_begin()
{
// get primitive mode
GLenum glTransformFeedbackPrimitiveMode;
auto primitiveMode = LatteGPUState.contextNew.VGT_PRIMITIVE_TYPE.get_PRIMITIVE_MODE();
if (primitiveMode == Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::POINTS)
glTransformFeedbackPrimitiveMode = GL_POINTS;
else if(primitiveMode == Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::TRIANGLES)
glTransformFeedbackPrimitiveMode = GL_POINTS;
else if (primitiveMode == Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::QUADS)
glTransformFeedbackPrimitiveMode = GL_POINTS;
else
{
debug_printf("Unsupported streamout primitive mode 0x%02x\n", primitiveMode);
cemu_assert_debug(false);
}
cemu_assert_debug(m_isXfbActive == false);
glEnable(GL_RASTERIZER_DISCARD_EXT);
glBeginTransformFeedback(glTransformFeedbackPrimitiveMode);
catchOpenGLError();
m_isXfbActive = true;
}
void OpenGLRenderer::bufferCache_copyStreamoutToMainBuffer(uint32 srcOffset, uint32 dstOffset, uint32 size)
{
if (glCopyNamedBufferSubData)
glCopyNamedBufferSubData(glStreamoutCacheRingBuffer, glAttributeCacheAB, srcOffset, dstOffset, size);
else
cemuLog_log(LogType::Force, "glCopyNamedBufferSubData() not supported");
}
void OpenGLRenderer::streamout_rendererFinishDrawcall()
{
if (m_isXfbActive)
{
glEndTransformFeedback();
glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, 0);
glDisable(GL_RASTERIZER_DISCARD_EXT);
m_isXfbActive = false;
}
}
| 2,045
|
C++
|
.cpp
| 50
| 38.86
| 126
| 0.816306
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,256
|
OpenGLRendererUniformData.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/OpenGL/OpenGLRendererUniformData.cpp
|
#include "Cafe/HW/Latte/Renderer/OpenGL/OpenGLRenderer.h"
#include "Cafe/HW/Latte/Core/LatteShader.h"
GLint _gl_remappedUniformData[4 * 256];
void OpenGLRenderer::uniformData_update()
{
// update uniforms
LatteDecompilerShader* shaderArray[3];
shaderArray[0] = LatteSHRC_GetActiveVertexShader();
shaderArray[1] = LatteSHRC_GetActivePixelShader();
shaderArray[2] = LatteSHRC_GetActiveGeometryShader();;
uint32 shaderBlockUniformRegisterOffset[3];
shaderBlockUniformRegisterOffset[0] = mmSQ_VTX_UNIFORM_BLOCK_START;
shaderBlockUniformRegisterOffset[1] = mmSQ_PS_UNIFORM_BLOCK_START;
shaderBlockUniformRegisterOffset[2] = mmSQ_GS_UNIFORM_BLOCK_START;
uint32 shaderALUConstOffset[3];
shaderALUConstOffset[0] = 0x400;
shaderALUConstOffset[1] = 0;
shaderALUConstOffset[2] = 0xFFFFFFFF; // GS has no uniform registers
for (sint32 s = 0; s < 3; s++)
{
// update block uniforms
LatteDecompilerShader* shader = shaderArray[s];
if (!shader)
continue;
auto hostShader = shader->shader;
if (shader->uniformMode == LATTE_DECOMPILER_UNIFORM_MODE_REMAPPED)
{
auto& list_uniformMapping = shader->list_remappedUniformEntries;
cemu_assert_debug(list_uniformMapping.size() <= 256);
sint32 remappedArraySize = (sint32)list_uniformMapping.size();
LatteBufferCache_LoadRemappedUniforms(shader, (float*)(_gl_remappedUniformData));
// update values only when the hash changed
if (remappedArraySize > 0)
{
uint64 uniformDataHash[2] = { 0 };
uint64* remappedUniformData64 = (uint64*)_gl_remappedUniformData;
for (sint32 f = 0; f < remappedArraySize; f++)
{
uniformDataHash[0] ^= remappedUniformData64[0];
uniformDataHash[0] = std::rotl<uint64>(uniformDataHash[0], 11);
uniformDataHash[1] ^= remappedUniformData64[1];
uniformDataHash[1] = std::rotl<uint64>(uniformDataHash[1], 11);
remappedUniformData64 += 2;
}
if (shader->uniformDataHash64[0] != uniformDataHash[0] || shader->uniformDataHash64[1] != uniformDataHash[1])
{
shader->uniformDataHash64[0] = uniformDataHash[0];
shader->uniformDataHash64[1] = uniformDataHash[1];
hostShader->SetUniform4iv(shader->uniform.loc_remapped, _gl_remappedUniformData, remappedArraySize);
}
}
}
else if (shader->uniformMode == LATTE_DECOMPILER_UNIFORM_MODE_FULL_CFILE)
{
if (shaderALUConstOffset[s] == 0xFFFFFFFF)
assert_dbg();
GLint* uniformRegData = (GLint*)(LatteGPUState.contextRegister + mmSQ_ALU_CONSTANT0_0 + shaderALUConstOffset[s]);
hostShader->SetUniform4iv(shader->uniform.loc_uniformRegister, uniformRegData, shader->uniform.count_uniformRegister);
}
else if (shader->uniformMode == LATTE_DECOMPILER_UNIFORM_MODE_FULL_CBANK)
{
// handled by _syncGPUUniformBuffers()
}
else if (shader->uniformMode == LATTE_DECOMPILER_UNIFORM_MODE_NONE)
{
// no uniforms used
}
}
}
| 2,858
|
C++
|
.cpp
| 69
| 37.927536
| 121
| 0.748473
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,257
|
OpenGLRendererCore.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Renderer/OpenGL/OpenGLRendererCore.cpp
|
#include "Common/GLInclude/GLInclude.h"
#include "Cafe/HW/Latte/Core/LatteRingBuffer.h"
#include "Cafe/HW/Latte/Core/LatteDraw.h"
#include "Cafe/HW/Latte/Core/LattePerformanceMonitor.h"
#include "Cafe/HW/Latte/Core/LatteShader.h"
#include "Cafe/HW/Latte/Core/LatteSoftware.h"
#include "Cafe/HW/Latte/Core/FetchShader.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/OpenGLRenderer.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/LatteTextureGL.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/LatteTextureViewGL.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/CachedFBOGL.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/RendererShaderGL.h"
#include "Cafe/HW/Latte/ISA/RegDefines.h"
#include "Cafe/OS/libs/gx2/GX2.h"
#include "Cafe/GameProfile/GameProfile.h"
#include "config/ActiveSettings.h"
using _INDEX_TYPE = Latte::LATTE_VGT_DMA_INDEX_TYPE::E_INDEX_TYPE;
GLenum sGLActiveDrawMode = 0;
extern bool hasValidFramebufferAttached;
#define INDEX_CACHE_ENTRIES (8)
typedef struct
{
MPTR prevIndexDataMPTR;
sint32 prevIndexType;
sint32 prevCount;
// index data
uint8* indexData;
uint8* indexData2;
uint32 indexBufferOffset;
sint32 indexDataSize; // current size
sint32 indexDataLimit; // maximum size
// info
uint32 maxIndex;
uint32 minIndex;
}indexDataCacheEntry_t;
struct
{
indexDataCacheEntry_t indexCacheEntry[INDEX_CACHE_ENTRIES];
sint32 nextCacheEntryIndex;
// info about currently used index data
uint32 maxIndex;
uint32 minIndex;
uint8* indexData;
// buffer
GLuint glIndexCacheBuffer;
VirtualBufferHeap_t* indexBufferVirtualHeap;
uint8* mappedIndexBuffer;
LatteRingBuffer_t* indexRingBuffer;
uint8* tempIndexStorage;
// misc
bool initialized;
GLuint glActiveElementArrayBuffer;
}indexState = { 0 };
struct
{
uint8* vboOutput;
uint32 vboStride;
uint8 dataFormat;
uint8 nfa;
bool isSigned;
}activeAttributePointer[LATTE_VS_ATTRIBUTE_LIMIT] = { 0 };
void LatteDraw_resetAttributePointerCache()
{
for (sint32 i = 0; i < LATTE_VS_ATTRIBUTE_LIMIT; i++)
{
activeAttributePointer[i].vboOutput = (uint8*)-1;
activeAttributePointer[i].vboStride = (uint32)-1;
}
}
void _setAttributeBufferPointerRaw(uint32 attributeShaderLoc, uint8* buffer, uint32 bufferSize, uint32 stride, LatteParsedFetchShaderAttribute_t* attrib, uint8* vboOutput, uint32 vboStride)
{
uint32 dataFormat = attrib->format;
bool isSigned = attrib->isSigned != 0;
uint8 nfa = attrib->nfa;
// don't call glVertexAttribIPointer if parameters have not changed
if (activeAttributePointer[attributeShaderLoc].vboOutput == vboOutput && activeAttributePointer[attributeShaderLoc].vboStride == vboStride && activeAttributePointer[attributeShaderLoc].dataFormat == dataFormat && activeAttributePointer[attributeShaderLoc].nfa == nfa && activeAttributePointer[attributeShaderLoc].isSigned == isSigned)
{
return;
}
activeAttributePointer[attributeShaderLoc].vboOutput = vboOutput;
activeAttributePointer[attributeShaderLoc].vboStride = vboStride;
activeAttributePointer[attributeShaderLoc].dataFormat = dataFormat;
activeAttributePointer[attributeShaderLoc].nfa = nfa;
activeAttributePointer[attributeShaderLoc].isSigned = isSigned;
// setup attribute pointer
if (dataFormat == FMT_32_32_32_32_FLOAT || dataFormat == FMT_32_32_32_32)
{
glVertexAttribIPointer(attributeShaderLoc, 4, GL_UNSIGNED_INT, vboStride, vboOutput);
}
else if (dataFormat == FMT_32_32_32_FLOAT || dataFormat == FMT_32_32_32)
{
glVertexAttribIPointer(attributeShaderLoc, 3, GL_UNSIGNED_INT, vboStride, vboOutput);
}
else if (dataFormat == FMT_32_32_FLOAT || dataFormat == FMT_32_32)
{
glVertexAttribIPointer(attributeShaderLoc, 2, GL_UNSIGNED_INT, vboStride, vboOutput);
}
else if (dataFormat == FMT_32_FLOAT || dataFormat == FMT_32)
{
glVertexAttribIPointer(attributeShaderLoc, 1, GL_UNSIGNED_INT, vboStride, vboOutput);
}
else if (dataFormat == FMT_8_8_8_8)
{
glVertexAttribIPointer(attributeShaderLoc, 4, GL_UNSIGNED_BYTE, vboStride, vboOutput);
}
else if (dataFormat == FMT_8_8)
{
// workaround for AMD (alignment must be 4 for 2xbyte)
if (((uint32)(size_t)vboOutput & 0x3) == 2 && LatteGPUState.glVendor == GLVENDOR_AMD)
{
glVertexAttribIPointer(attributeShaderLoc, 4, GL_UNSIGNED_BYTE, vboStride, vboOutput - 2);
}
else
{
glVertexAttribIPointer(attributeShaderLoc, 2, GL_UNSIGNED_BYTE, vboStride, vboOutput);
}
}
else if (dataFormat == FMT_8)
{
glVertexAttribIPointer(attributeShaderLoc, 1, GL_UNSIGNED_BYTE, vboStride, vboOutput);
}
else if (dataFormat == FMT_16_16_16_16_FLOAT || dataFormat == FMT_16_16_16_16)
{
glVertexAttribIPointer(attributeShaderLoc, 4, GL_UNSIGNED_SHORT, vboStride, vboOutput);
}
else if (dataFormat == FMT_16_16_FLOAT || dataFormat == FMT_16_16)
{
glVertexAttribIPointer(attributeShaderLoc, 2, GL_UNSIGNED_SHORT, vboStride, vboOutput);
}
else if (dataFormat == FMT_16_FLOAT || dataFormat == FMT_16)
{
glVertexAttribIPointer(attributeShaderLoc, 1, GL_UNSIGNED_SHORT, vboStride, vboOutput);
}
else if (dataFormat == FMT_2_10_10_10)
{
glVertexAttribIPointer(attributeShaderLoc, 1, GL_UNSIGNED_INT, vboStride, vboOutput);
}
else
{
debug_printf("_setAttributeBufferPointerRaw(): Unsupported format %d\n", dataFormat);
cemu_assert_unimplemented();
}
}
bool glAttributeArrayIsEnabled[GPU_GL_MAX_NUM_ATTRIBUTE] = { 0 };
sint32 glAttributeArrayAluDivisor[GPU_GL_MAX_NUM_ATTRIBUTE] = { 0 };
void OpenGLRenderer::SetAttributeArrayState(uint32 index, bool isEnabled, sint32 aluDivisor)
{
cemu_assert_debug(index < GPU_GL_MAX_NUM_ATTRIBUTE);
catchOpenGLError();
if (glAttributeArrayIsEnabled[index] != isEnabled)
{
if (isEnabled)
{
// enable
glEnableVertexAttribArray(index);
glAttributeArrayIsEnabled[index] = true;
}
else
{
// disable
glDisableVertexAttribArray(index);
glAttributeArrayIsEnabled[index] = false;
}
catchOpenGLError();
}
// set divisor state
if (glAttributeArrayAluDivisor[index] != aluDivisor)
{
if (aluDivisor <= 0)
glVertexAttribDivisor(index, 0);
else
glVertexAttribDivisor(index, aluDivisor);
glAttributeArrayAluDivisor[index] = aluDivisor;
catchOpenGLError();
}
}
// Sets the currently active element array buffer and binds it
void OpenGLRenderer::SetArrayElementBuffer(GLuint arrayElementBuffer)
{
if (arrayElementBuffer == indexState.glActiveElementArrayBuffer)
return;
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, arrayElementBuffer);
indexState.glActiveElementArrayBuffer = arrayElementBuffer;
}
typedef struct
{
MPTR physAddr;
sint32 count;
uint32 primitiveRestartIndex;
uint32 primitiveMode;
}indexDataCacheKey_t;
typedef struct _indexDataCacheEntry_t
{
indexDataCacheKey_t key;
_indexDataCacheEntry_t* nextInBucket; // points to next element in same bucket
uint32 physSize;
uint32 hash;
_INDEX_TYPE indexType;
//sint32 indexType;
uint32 minIndex;
uint32 maxIndex;
uint32 lastAccessFrameCount;
VirtualBufferHeapEntry_t* heapEntry;
_indexDataCacheEntry_t* nextInMostRecentUsage; // points to element which was used more recently
_indexDataCacheEntry_t* prevInMostRecentUsage; // points to element which was used less recently
}indexDataCacheEntry2_t;
#define INDEX_DATA_CACHE_BUCKETS (1783)
indexDataCacheEntry2_t* indexDataCacheBucket[INDEX_DATA_CACHE_BUCKETS] = { 0 };
indexDataCacheEntry2_t* indexDataCacheFirst = nullptr; // points to least recently used item
indexDataCacheEntry2_t* indexDataCacheLast = nullptr; // points to most recently used item
sint32 indexDataCacheEntryCount = 0;
void _appendToUsageLinkedList(indexDataCacheEntry2_t* entry)
{
if (indexDataCacheLast == nullptr)
{
indexDataCacheLast = entry;
indexDataCacheFirst = entry;
entry->nextInMostRecentUsage = nullptr;
entry->prevInMostRecentUsage = nullptr;
}
else
{
indexDataCacheLast->nextInMostRecentUsage = entry;
entry->prevInMostRecentUsage = indexDataCacheLast;
entry->nextInMostRecentUsage = nullptr;
indexDataCacheLast = entry;
}
}
void _removeFromUsageLinkedList(indexDataCacheEntry2_t* entry)
{
if (entry->prevInMostRecentUsage)
{
entry->prevInMostRecentUsage->nextInMostRecentUsage = entry->nextInMostRecentUsage;
}
else
indexDataCacheFirst = entry->nextInMostRecentUsage;
if (entry->nextInMostRecentUsage)
{
entry->nextInMostRecentUsage->prevInMostRecentUsage = entry->prevInMostRecentUsage;
}
else
indexDataCacheLast = entry->prevInMostRecentUsage;
entry->prevInMostRecentUsage = nullptr;
entry->nextInMostRecentUsage = nullptr;
}
void _removeFromBucket(indexDataCacheEntry2_t* entry)
{
uint32 indexDataBucketIdx = (uint32)((entry->key.physAddr + entry->key.count) ^ (entry->key.physAddr >> 16)) % INDEX_DATA_CACHE_BUCKETS;
if (indexDataCacheBucket[indexDataBucketIdx] == entry)
{
indexDataCacheBucket[indexDataBucketIdx] = entry->nextInBucket;
entry->nextInBucket = nullptr;
return;
}
indexDataCacheEntry2_t* cacheEntryItr = indexDataCacheBucket[indexDataBucketIdx];
while (cacheEntryItr)
{
if (cacheEntryItr->nextInBucket == entry)
{
cacheEntryItr->nextInBucket = entry->nextInBucket;
entry->nextInBucket = nullptr;
return;
}
// next
cacheEntryItr = cacheEntryItr->nextInBucket;
}
}
void _decodeAndUploadIndexData(indexDataCacheEntry2_t* cacheEntry)
{
uint32 count = cacheEntry->key.count;
uint32 primitiveRestartIndex = cacheEntry->key.primitiveRestartIndex;
if (cacheEntry->indexType == _INDEX_TYPE::U16_BE)
{
// 16bit indices
uint16* indexInputU16 = (uint16*)memory_getPointerFromPhysicalOffset(cacheEntry->key.physAddr);
uint16* indexOutputU16 = (uint16*)indexState.tempIndexStorage;
cemu_assert_debug(count != 0);
uint16 indexMinU16 = 0xFFFF;
uint16 indexMaxU16 = 0;
if (primitiveRestartIndex < 0x10000)
{
// with primitive restart index
uint16 primitiveRestartIndexU16 = (uint16)primitiveRestartIndex;
for (uint32 i = 0; i < count; i++)
{
uint16 idxU16 = _swapEndianU16(*indexInputU16);
indexInputU16++;
if (primitiveRestartIndexU16 != idxU16)
{
indexMinU16 = std::min(indexMinU16, idxU16);
indexMaxU16 = std::max(indexMaxU16, idxU16);
}
*indexOutputU16 = idxU16;
indexOutputU16++;
}
}
else
{
// without primitive restart index
for (uint32 i = 0; i < count; i++)
{
uint16 idxU16 = _swapEndianU16(*indexInputU16);
indexInputU16++;
indexMinU16 = std::min(indexMinU16, idxU16);
indexMaxU16 = std::max(indexMaxU16, idxU16);
*indexOutputU16 = idxU16;
indexOutputU16++;
}
}
cacheEntry->minIndex = indexMinU16;
cacheEntry->maxIndex = indexMaxU16;
glBufferSubData(GL_ELEMENT_ARRAY_BUFFER, cacheEntry->heapEntry->startOffset, count * sizeof(uint16), indexState.tempIndexStorage);
performanceMonitor.cycle[performanceMonitor.cycleIndex].indexDataUploaded += (count * sizeof(uint16));
}
else if(cacheEntry->indexType == _INDEX_TYPE::U32_BE)
{
// 32bit indices
uint32* indexInputU32 = (uint32*)memory_getPointerFromPhysicalOffset(cacheEntry->key.physAddr);
uint32* indexOutputU32 = (uint32*)indexState.tempIndexStorage;
cemu_assert_debug(count != 0);
uint32 indexMinU32 = _swapEndianU32(*indexInputU32);
uint32 indexMaxU32 = _swapEndianU32(*indexInputU32);
for (uint32 i = 0; i < count; i++)
{
uint32 idxU32 = _swapEndianU32(*indexInputU32);
indexInputU32++;
if (idxU32 != primitiveRestartIndex)
{
indexMinU32 = std::min(indexMinU32, idxU32);
indexMaxU32 = std::max(indexMaxU32, idxU32);
}
*indexOutputU32 = idxU32;
indexOutputU32++;
}
cacheEntry->minIndex = indexMinU32;
cacheEntry->maxIndex = indexMaxU32;
glBufferSubData(GL_ELEMENT_ARRAY_BUFFER, cacheEntry->heapEntry->startOffset, count * sizeof(uint32), indexState.tempIndexStorage);
performanceMonitor.cycle[performanceMonitor.cycleIndex].indexDataUploaded += (count * sizeof(uint32));
}
else
{
cemu_assert_debug(false);
}
}
void LatteDraw_cleanupAfterFrame()
{
// drop everything from cache that is older than 30 frames
uint32 frameCounter = LatteGPUState.frameCounter;
while (indexDataCacheFirst)
{
indexDataCacheEntry2_t* entry = indexDataCacheFirst;
if ((frameCounter - entry->lastAccessFrameCount) < 30)
break;
// remove entry
virtualBufferHeap_free(indexState.indexBufferVirtualHeap, entry->heapEntry);
_removeFromUsageLinkedList(entry);
_removeFromBucket(entry);
free(entry);
}
}
void LatteDrawGL_removeLeastRecentlyUsedIndexCacheEntries(sint32 count)
{
while (indexDataCacheFirst && count > 0)
{
indexDataCacheEntry2_t* entry = indexDataCacheFirst;
// remove entry
virtualBufferHeap_free(indexState.indexBufferVirtualHeap, entry->heapEntry);
_removeFromUsageLinkedList(entry);
_removeFromBucket(entry);
free(entry);
count--;
}
}
uint32 LatteDrawGL_calculateIndexDataHash(uint8* data, uint32 size)
{
uint32 h = 0;
if (size < 16)
{
// hash the bytes individually
while (size != 0)
{
h += (uint32)*data;
data++;
size--;
}
return h;
}
// first 16 bytes
h += *(uint32*)(data + 0);
h += *(uint32*)(data + 4);
h += *(uint32*)(data + 8);
h += *(uint32*)(data + 12);
// last 16 bytes
data = data + ((size - 16)&~3);
h += *(uint32*)(data + 0);
h += *(uint32*)(data + 4);
h += *(uint32*)(data + 8);
h += *(uint32*)(data + 12);
return h;
}
// index handling with cache
// todo - Outdated cache implementation. Update OpenGL renderer to use the generic implementation that is also used by the Vulkan renderer
void LatteDrawGL_prepareIndicesWithGPUCache(MPTR indexDataMPTR, _INDEX_TYPE indexType, sint32 count, sint32 primitiveMode)
{
if (indexType == _INDEX_TYPE::AUTO)
{
indexState.minIndex = 0;
indexState.maxIndex = count - 1;
// since no indices are used we don't need to unbind the element array buffer
return; // automatic indices
}
OpenGLRenderer::SetArrayElementBuffer(indexState.glIndexCacheBuffer);
uint32 indexDataBucketIdx = (uint32)((indexDataMPTR + count) ^ (indexDataMPTR >> 16)) % INDEX_DATA_CACHE_BUCKETS;
// find matching entry
uint32 primitiveRestartIndex = LatteGPUState.contextNew.VGT_MULTI_PRIM_IB_RESET_INDX.get_RESTART_INDEX();
indexDataCacheEntry2_t* cacheEntryItr = indexDataCacheBucket[indexDataBucketIdx];
indexDataCacheKey_t compareKey;
compareKey.physAddr = indexDataMPTR;
compareKey.count = count;
compareKey.primitiveMode = primitiveMode;
compareKey.primitiveRestartIndex = primitiveRestartIndex;
while (cacheEntryItr)
{
if (memcmp(&(cacheEntryItr->key), &compareKey, sizeof(compareKey)) != 0)
{
// next
cacheEntryItr = cacheEntryItr->nextInBucket;
continue;
}
// entry found
indexState.minIndex = cacheEntryItr->minIndex;
indexState.maxIndex = cacheEntryItr->maxIndex;
indexState.indexData = (uint8*)(size_t)cacheEntryItr->heapEntry->startOffset;
cacheEntryItr->lastAccessFrameCount = LatteGPUState.frameCounter;
// check if the data changed
uint32 h = LatteDrawGL_calculateIndexDataHash(memory_getPointerFromPhysicalOffset(indexDataMPTR), cacheEntryItr->physSize);
if (cacheEntryItr->hash != h)
{
cemuLog_logDebug(LogType::Force, "IndexData hash changed");
_decodeAndUploadIndexData(cacheEntryItr);
cacheEntryItr->hash = h;
}
// move entry to the front
_removeFromUsageLinkedList(cacheEntryItr);
_appendToUsageLinkedList(cacheEntryItr);
return;
}
// calculate size of index data in cache
sint32 cacheIndexDataSize = 0;
if (indexType == _INDEX_TYPE::U16_BE || indexType == _INDEX_TYPE::U16_LE)
cacheIndexDataSize = count * sizeof(uint16);
else
cacheIndexDataSize = count * sizeof(uint32);
// no matching entry, create new one
VirtualBufferHeapEntry_t* heapEntry = virtualBufferHeap_allocate(indexState.indexBufferVirtualHeap, cacheIndexDataSize);
if (heapEntry == nullptr)
{
while (true)
{
LatteDrawGL_removeLeastRecentlyUsedIndexCacheEntries(10);
heapEntry = virtualBufferHeap_allocate(indexState.indexBufferVirtualHeap, cacheIndexDataSize);
if (heapEntry != nullptr)
break;
if (indexDataCacheFirst == nullptr)
{
cemuLog_log(LogType::Force, "Unable to allocate entry in index cache");
assert_dbg();
}
}
}
indexDataCacheEntry2_t* cacheEntry = (indexDataCacheEntry2_t*)malloc(sizeof(indexDataCacheEntry2_t));
memset(cacheEntry, 0, sizeof(indexDataCacheEntry2_t));
cacheEntry->key.physAddr = indexDataMPTR;
cacheEntry->physSize = (indexType == _INDEX_TYPE::U16_BE || indexType == _INDEX_TYPE::U16_LE) ? (count * sizeof(uint16)) : (count * sizeof(uint32));
cacheEntry->hash = LatteDrawGL_calculateIndexDataHash(memory_getPointerFromPhysicalOffset(indexDataMPTR), cacheEntry->physSize);
cacheEntry->key.count = count;
cacheEntry->key.primitiveRestartIndex = primitiveRestartIndex;
cacheEntry->indexType = indexType;
cacheEntry->key.primitiveMode = primitiveMode;
cacheEntry->heapEntry = heapEntry;
cacheEntry->lastAccessFrameCount = LatteGPUState.frameCounter;
// append entry in bucket list
cacheEntry->nextInBucket = indexDataCacheBucket[indexDataBucketIdx];
indexDataCacheBucket[indexDataBucketIdx] = cacheEntry;
// append as most recently used entry
_appendToUsageLinkedList(cacheEntry);
// decode and upload the data
_decodeAndUploadIndexData(cacheEntry);
indexDataCacheEntryCount++;
indexState.minIndex = cacheEntry->minIndex;
indexState.maxIndex = cacheEntry->maxIndex;
indexState.indexData = (uint8*)(size_t)cacheEntry->heapEntry->startOffset;
}
void LatteDraw_handleSpecialState8_clearAsDepth()
{
if (LatteGPUState.contextNew.GetSpecialStateValues()[0] == 0)
cemuLog_logDebug(LogType::Force, "Special state 8 requires special state 0 but it is not set?");
// get depth buffer information
uint32 regDepthBuffer = LatteGPUState.contextRegister[mmDB_HTILE_DATA_BASE];
uint32 regDepthSize = LatteGPUState.contextRegister[mmDB_DEPTH_SIZE];
uint32 regDepthBufferInfo = LatteGPUState.contextRegister[mmDB_DEPTH_INFO];
// get format and tileMode from info reg
uint32 depthBufferTileMode = (regDepthBufferInfo >> 15) & 0xF;
MPTR depthBufferPhysMem = regDepthBuffer << 8;
uint32 depthBufferPitch = (((regDepthSize >> 0) & 0x3FF) + 1);
uint32 depthBufferHeight = ((((regDepthSize >> 10) & 0xFFFFF) + 1) / depthBufferPitch);
depthBufferPitch <<= 3;
depthBufferHeight <<= 3;
uint32 depthBufferWidth = depthBufferPitch;
sint32 sliceIndex = 0; // todo
sint32 mipIndex = 0;
// clear all color buffers that match the format of the depth buffer
sint32 searchIndex = 0;
bool targetFound = false;
while (true)
{
LatteTextureView* view = LatteTC_LookupTextureByData(depthBufferPhysMem, depthBufferWidth, depthBufferHeight, depthBufferPitch, 0, 1, sliceIndex, 1, &searchIndex);
if (!view)
{
// should we clear in RAM instead?
break;
}
sint32 effectiveClearWidth = view->baseTexture->width;
sint32 effectiveClearHeight = view->baseTexture->height;
LatteTexture_scaleToEffectiveSize(view->baseTexture, &effectiveClearWidth, &effectiveClearHeight, 0);
// hacky way to get clear color
float* regClearColor = (float*)(LatteGPUState.contextRegister + 0xC000 + 0); // REG_BASE_ALU_CONST
uint8 clearColor[4] = { 0 };
clearColor[0] = (uint8)(regClearColor[0] * 255.0f);
clearColor[1] = (uint8)(regClearColor[1] * 255.0f);
clearColor[2] = (uint8)(regClearColor[2] * 255.0f);
clearColor[3] = (uint8)(regClearColor[3] * 255.0f);
// todo - use fragment shader software emulation (evoke for one pixel) to determine clear color
// todo - dont clear entire slice, use effectiveClearWidth, effectiveClearHeight
if (g_renderer->GetType() == RendererAPI::OpenGL)
{
//cemu_assert_debug(false); // implement g_renderer->texture_clearColorSlice properly for OpenGL renderer
if (glClearTexSubImage)
glClearTexSubImage(((LatteTextureViewGL*)view)->glTexId, mipIndex, 0, 0, 0, effectiveClearWidth, effectiveClearHeight, 1, GL_RGBA, GL_UNSIGNED_BYTE, clearColor);
}
else
{
if (view->baseTexture->isDepth)
g_renderer->texture_clearDepthSlice(view->baseTexture, sliceIndex + view->firstSlice, mipIndex + view->firstMip, true, view->baseTexture->hasStencil, 0.0f, 0);
else
g_renderer->texture_clearColorSlice(view->baseTexture, sliceIndex + view->firstSlice, mipIndex + view->firstMip, clearColor[0], clearColor[1], clearColor[2], clearColor[3]);
}
}
}
void LatteDrawGL_doDraw(_INDEX_TYPE indexType, uint32 baseVertex, uint32 baseInstance, uint32 instanceCount, uint32 count)
{
if (indexType == _INDEX_TYPE::U16_BE)
{
// 16bit index, big endian
if (instanceCount > 1 || baseInstance != 0)
{
glDrawElementsInstancedBaseVertexBaseInstance(sGLActiveDrawMode, count, GL_UNSIGNED_SHORT, indexState.indexData, instanceCount, baseVertex, baseInstance);
}
else
{
if (baseVertex != 0)
glDrawRangeElementsBaseVertex(sGLActiveDrawMode, indexState.minIndex, indexState.maxIndex, count, GL_UNSIGNED_SHORT, indexState.indexData, baseVertex);
else
glDrawRangeElements(sGLActiveDrawMode, indexState.minIndex, indexState.maxIndex, count, GL_UNSIGNED_SHORT, indexState.indexData);
}
}
else if (indexType == _INDEX_TYPE::U32_BE)
{
// 32bit index, big endian
if (instanceCount > 1 || baseInstance != 0)
{
//debug_printf("Render instanced\n");
glDrawElementsInstancedBaseVertexBaseInstance(sGLActiveDrawMode, count, GL_UNSIGNED_INT, indexState.indexData, instanceCount, baseVertex, baseInstance);
}
else
{
glDrawRangeElementsBaseVertex(sGLActiveDrawMode, indexState.minIndex, indexState.maxIndex, count, GL_UNSIGNED_INT, indexState.indexData, baseVertex);
}
}
else if (indexType == _INDEX_TYPE::AUTO)
{
// render without index (automatic index generation)
cemu_assert_debug(baseInstance == 0);
if (instanceCount > 1)
glDrawArraysInstanced(sGLActiveDrawMode, baseVertex, count, instanceCount);
else
{
glDrawArrays(sGLActiveDrawMode, baseVertex, count);
}
}
else
{
cemu_assert_debug(false);
}
}
uint32 _glVertexBufferOffset[32] = { 0 };
void OpenGLRenderer::buffer_bindVertexBuffer(uint32 bufferIndex, uint32 offset, uint32 size)
{
_glVertexBufferOffset[bufferIndex] = offset;
}
void OpenGLRenderer::buffer_bindUniformBuffer(LatteConst::ShaderType shaderType, uint32 bufferIndex, uint32 offset, uint32 size)
{
switch (shaderType)
{
case LatteConst::ShaderType::Vertex:
bufferIndex += 0;
break;
case LatteConst::ShaderType::Pixel:
bufferIndex += 32;
break;
case LatteConst::ShaderType::Geometry:
bufferIndex += 64;
break;
}
if (offset == 0 && size == 0)
{
// when binding NULL we just bind some arbitrary undefined data so the OpenGL driver is happy since a size of 0 is not allowed (should we bind a buffer filled with zeroes instead?)
glBindBufferRange(GL_UNIFORM_BUFFER, bufferIndex, glAttributeCacheAB, 0, 32);
return;
}
glBindBufferRange(GL_UNIFORM_BUFFER, bufferIndex, glAttributeCacheAB, offset, size);
}
void LatteDraw_resetAttributePointerCache();
void _resetAttributes(LatteParsedFetchShaderBufferGroup_t* attribGroup, bool* attributeArrayUsed)
{
for (sint32 i = 0; i < attribGroup->attribCount; i++)
{
LatteParsedFetchShaderAttribute_t* attrib = attribGroup->attrib + i;
sint32 attributeShaderLocation = attrib->semanticId; // we now bind to the semanticId instead
attributeArrayUsed[attributeShaderLocation] = false;
}
}
void OpenGLRenderer::_setupVertexAttributes()
{
LatteFetchShader* fetchShader = LatteSHRC_GetActiveFetchShader();
LatteDecompilerShader* vertexShader = LatteSHRC_GetActiveVertexShader();
catchOpenGLError();
// bind buffer
attributeStream_bindVertexCacheBuffer();
catchOpenGLError();
LatteFetchShader* parsedFetchShader = LatteSHRC_GetActiveFetchShader();
bool attributeArrayUsed[32] = { 0 }; // used to keep track of enabled vertex attributes for this shader
sint32 attributeDataIndex = 0;
uint32 vboDataOffset = 0;
bool tfBufferIsBound = false;
sint32 maxReallocAttemptLimit = 1;
for(auto& bufferGroup : parsedFetchShader->bufferGroups)
{
uint32 bufferIndex = bufferGroup.attributeBufferIndex;
uint32 bufferBaseRegisterIndex = mmSQ_VTX_ATTRIBUTE_BLOCK_START + bufferIndex * 7;
MPTR bufferAddress = LatteGPUState.contextRegister[bufferBaseRegisterIndex + 0];
uint32 bufferSize = LatteGPUState.contextRegister[bufferBaseRegisterIndex + 1] + 1;
uint32 bufferStride = (LatteGPUState.contextRegister[bufferBaseRegisterIndex + 2] >> 11) & 0xFFFF;
if (bufferAddress == MPTR_NULL)
{
_resetAttributes(&bufferGroup, attributeArrayUsed);
continue;
}
vboDataOffset = _glVertexBufferOffset[bufferIndex];
for (sint32 i = 0; i < bufferGroup.attribCount; i++)
{
LatteParsedFetchShaderAttribute_t* attrib = bufferGroup.attrib + i;
sint32 attributeShaderLocation = attrib->semanticId; // we now bind to the semanticId instead
attributeShaderLocation = vertexShader->resourceMapping.getAttribHostShaderIndex(attrib->semanticId);
if (attributeShaderLocation == -1)
continue; // attribute not used
if (attributeShaderLocation >= GPU_GL_MAX_NUM_ATTRIBUTE)
continue;
if (attributeArrayUsed[attributeShaderLocation] == true)
{
debug_printf("Fetch shader attribute is bound multiple times\n");
}
// get buffer
uint32 bufferIndex = attrib->attributeBufferIndex;
cemu_assert_debug(bufferIndex < 0x10);
cemu_assert_debug(attrib->fetchType == LatteConst::VERTEX_DATA || attrib->fetchType == LatteConst::INSTANCE_DATA); // unsupported fetch type
SetAttributeArrayState(attributeShaderLocation, true, (bufferStride == 0) ? 99999999 : attrib->aluDivisor);
uint8* bufferInput = memory_getPointerFromPhysicalOffset(bufferAddress) + attrib->offset;
uint32 bufferSizeInput = bufferSize - attrib->offset;
uint8* vboGLPtr;
vboGLPtr = (uint8*)(size_t)(vboDataOffset + attrib->offset);
_setAttributeBufferPointerRaw(attributeShaderLocation, NULL, 0, bufferStride, attrib, vboGLPtr, bufferStride);
attributeArrayUsed[attributeShaderLocation] = true;
attributeDataIndex++;
catchOpenGLError();
}
}
for (uint32 i = 0; i < GPU_GL_MAX_NUM_ATTRIBUTE; i++)
{
if (attributeArrayUsed[i] == false && glAttributeArrayIsEnabled[i] == true)
SetAttributeArrayState(i, false, -1);
}
}
void rectsEmulationGS_outputSingleVertex(std::string& gsSrc, LatteDecompilerShader* vertexShader, LatteShaderPSInputTable* psInputTable, sint32 vIdx);
void rectsEmulationGS_outputGeneratedVertex(std::string& gsSrc, LatteDecompilerShader* vertexShader, LatteShaderPSInputTable* psInputTable, const char* variant);
void rectsEmulationGS_outputVerticesCode(std::string& gsSrc, LatteDecompilerShader* vertexShader, LatteShaderPSInputTable* psInputTable, sint32 p0, sint32 p1, sint32 p2, sint32 p3, const char* variant, const LatteContextRegister& latteRegister);
std::map<uint64, RendererShaderGL*> g_mapGLRectEmulationGS;
RendererShaderGL* rectsEmulationGS_generateShaderGL(LatteDecompilerShader* vertexShader)
{
LatteShaderPSInputTable* psInputTable = LatteSHRC_GetPSInputTable();
std::string gsSrc;
gsSrc.append("#version 450\r\n");
// layout
gsSrc.append("layout(triangles) in;\r\n");
gsSrc.append("layout(triangle_strip) out;\r\n");
gsSrc.append("layout(max_vertices = 4) out;\r\n");
// gl_PerVertex input
gsSrc.append("in gl_PerVertex {\r\n");
gsSrc.append("vec4 gl_Position;\r\n");
gsSrc.append("} gl_in[];\r\n");
// gl_PerVertex output
gsSrc.append("out gl_PerVertex {\r\n");
gsSrc.append("vec4 gl_Position;\r\n");
gsSrc.append("};\r\n");
// inputs & outputs
auto parameterMask = vertexShader->outputParameterMask;
for (sint32 f = 0; f < 2; f++)
{
for (uint32 i = 0; i < 32; i++)
{
if ((parameterMask & (1 << i)) == 0)
continue;
sint32 vsSemanticId = psInputTable->getVertexShaderOutParamSemanticId(LatteGPUState.contextRegister, i);
if (vsSemanticId < 0)
continue;
auto psImport = psInputTable->getPSImportBySemanticId(vsSemanticId);
if (psImport == nullptr)
continue;
gsSrc.append(fmt::format("layout(location = {}) ", psInputTable->getPSImportLocationBySemanticId(vsSemanticId)));
if (psImport->isFlat)
gsSrc.append("flat ");
if (psImport->isNoPerspective)
gsSrc.append("noperspective ");
if (f == 0)
gsSrc.append("in");
else
gsSrc.append("out");
if (f == 0)
gsSrc.append(fmt::format(" vec4 passParameterSem{}In[];\r\n", vsSemanticId));
else
gsSrc.append(fmt::format(" vec4 passParameterSem{}Out;\r\n", vsSemanticId));
}
}
// gen function
gsSrc.append("vec4 gen4thVertexA(vec4 a, vec4 b, vec4 c)\r\n");
gsSrc.append("{\r\n");
gsSrc.append("return b - (c - a);\r\n");
gsSrc.append("}\r\n");
gsSrc.append("vec4 gen4thVertexB(vec4 a, vec4 b, vec4 c)\r\n");
gsSrc.append("{\r\n");
gsSrc.append("return c - (b - a);\r\n");
gsSrc.append("}\r\n");
gsSrc.append("vec4 gen4thVertexC(vec4 a, vec4 b, vec4 c)\r\n");
gsSrc.append("{\r\n");
gsSrc.append("return c + (b - a);\r\n");
gsSrc.append("}\r\n");
// main
gsSrc.append("void main()\r\n");
gsSrc.append("{\r\n");
// there are two possible winding orders that need different triangle generation:
// 0 1
// 2 3
// and
// 0 1
// 3 2
// all others are just symmetries of these cases
// we can determine the case by comparing the distance 0<->1 and 0<->2
gsSrc.append("float dist0_1 = length(gl_in[1].gl_Position.xy - gl_in[0].gl_Position.xy);\r\n");
gsSrc.append("float dist0_2 = length(gl_in[2].gl_Position.xy - gl_in[0].gl_Position.xy);\r\n");
gsSrc.append("float dist1_2 = length(gl_in[2].gl_Position.xy - gl_in[1].gl_Position.xy);\r\n");
// emit vertices
gsSrc.append("if(dist0_1 > dist0_2 && dist0_1 > dist1_2)\r\n");
gsSrc.append("{\r\n");
// p0 to p1 is diagonal
rectsEmulationGS_outputVerticesCode(gsSrc, vertexShader, psInputTable, 2, 1, 0, 3, "A", LatteGPUState.contextNew);
gsSrc.append("} else if ( dist0_2 > dist0_1 && dist0_2 > dist1_2 ) {\r\n");
// p0 to p2 is diagonal
rectsEmulationGS_outputVerticesCode(gsSrc, vertexShader, psInputTable, 1, 2, 0, 3, "B", LatteGPUState.contextNew);
gsSrc.append("} else {\r\n");
// p1 to p2 is diagonal
rectsEmulationGS_outputVerticesCode(gsSrc, vertexShader, psInputTable, 0, 1, 2, 3, "C", LatteGPUState.contextNew);
gsSrc.append("}\r\n");
gsSrc.append("}\r\n");
auto glShader = new RendererShaderGL(RendererShader::ShaderType::kGeometry, 0, 0, false, false, gsSrc);
glShader->PreponeCompilation(true);
return glShader;
}
RendererShaderGL* rectsEmulationGS_getShaderGL(LatteDecompilerShader* vertexShader)
{
LatteShaderPSInputTable* psInputTable = LatteSHRC_GetPSInputTable();
uint64 h = vertexShader->baseHash + psInputTable->key;
auto itr = g_mapGLRectEmulationGS.find(h);
if (itr != g_mapGLRectEmulationGS.end())
return (*itr).second;
auto glShader = rectsEmulationGS_generateShaderGL(vertexShader);
g_mapGLRectEmulationGS.emplace(h, glShader);
return glShader;
}
uint32 sPrevTextureReadbackDrawcallUpdate = 0;
template<bool TIsMinimal, bool THasProfiling>
void OpenGLRenderer::draw_genericDrawHandler(uint32 baseVertex, uint32 baseInstance, uint32 instanceCount, uint32 count, MPTR indexDataMPTR, Latte::LATTE_VGT_DMA_INDEX_TYPE::E_INDEX_TYPE indexType)
{
ReleaseBufferCacheEntries();
catchOpenGLError();
void* indexData = indexDataMPTR != MPTR_NULL ? memory_getPointerFromPhysicalOffset(indexDataMPTR) : NULL;
auto primitiveMode = LatteGPUState.contextNew.VGT_PRIMITIVE_TYPE.get_PRIMITIVE_MODE();
// handle special state 8 (clear as depth)
if (LatteGPUState.contextNew.GetSpecialStateValues()[8] != 0)
{
LatteDraw_handleSpecialState8_clearAsDepth();
LatteGPUState.drawCallCounter++;
return;
}
// update shaders and uniforms
if constexpr (!TIsMinimal)
{
beginPerfMonProfiling(performanceMonitor.gpuTime_dcStageShaderAndUniformMgr);
LatteSHRC_UpdateActiveShaders();
LatteDecompilerShader* vs = (LatteDecompilerShader*)LatteSHRC_GetActiveVertexShader();
LatteDecompilerShader* gs = (LatteDecompilerShader*)LatteSHRC_GetActiveGeometryShader();
LatteDecompilerShader* ps = (LatteDecompilerShader*)LatteSHRC_GetActivePixelShader();
if (vs)
shader_bind(vs->shader);
else
shader_unbind(RendererShader::ShaderType::kVertex);
if (ps && LatteGPUState.contextRegister[mmVGT_STRMOUT_EN] == 0)
shader_bind(ps->shader);
else
shader_unbind(RendererShader::ShaderType::kFragment);
if (gs)
shader_bind(gs->shader);
else
shader_unbind(RendererShader::ShaderType::kGeometry);
endPerfMonProfiling(performanceMonitor.gpuTime_dcStageShaderAndUniformMgr);
}
if (LatteGPUState.activeShaderHasError)
{
debug_printf("Skipped drawcall due to shader error\n");
return;
}
// check for blacklisted shaders
uint64 vsShaderHash = 0;
if (LatteSHRC_GetActiveVertexShader())
vsShaderHash = LatteSHRC_GetActiveVertexShader()->baseHash;
uint64 psShaderHash = 0;
if (LatteSHRC_GetActivePixelShader())
psShaderHash = LatteSHRC_GetActivePixelShader()->baseHash;
// setup streamout (if enabled)
bool rasterizerEnable = LatteGPUState.contextNew.PA_CL_CLIP_CNTL.get_DX_RASTERIZATION_KILL() == false;
if (!LatteGPUState.contextNew.PA_CL_VTE_CNTL.get_VPORT_X_OFFSET_ENA())
rasterizerEnable = true;
bool streamoutEnable = LatteGPUState.contextRegister[mmVGT_STRMOUT_EN] != 0;
if (streamoutEnable)
{
if (glBeginTransformFeedback == nullptr)
{
cemu_assert_debug(false);
return; // transform feedback not supported
}
}
// skip draw if output is not used
if (rasterizerEnable == false && streamoutEnable == false)
{
// rasterizer and streamout disabled
LatteGPUState.drawCallCounter++;
return;
}
// get primitive
if (primitiveMode == Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::TRIANGLES)
sGLActiveDrawMode = GL_TRIANGLES;
else if (primitiveMode == Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::TRIANGLE_STRIP)
sGLActiveDrawMode = GL_TRIANGLE_STRIP;
else if (primitiveMode == Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::QUADS)
sGLActiveDrawMode = GL_QUADS;
else if (primitiveMode == Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::TRIANGLE_FAN)
sGLActiveDrawMode = GL_TRIANGLE_FAN;
else if (primitiveMode == Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::RECTS)
sGLActiveDrawMode = GL_TRIANGLES;
else if (primitiveMode == Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::POINTS)
sGLActiveDrawMode = GL_POINTS;
else if (primitiveMode == Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::LINES)
sGLActiveDrawMode = GL_LINES;
else if (primitiveMode == Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::LINE_STRIP)
sGLActiveDrawMode = GL_LINE_STRIP;
else if (primitiveMode == Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::LINE_LOOP)
sGLActiveDrawMode = GL_LINE_LOOP;
else if (primitiveMode == Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::QUAD_STRIP)
sGLActiveDrawMode = GL_QUAD_STRIP;
else
{
cemu_assert_debug(false); // unsupported primitive type
LatteGPUState.drawCallCounter++;
return;
}
if constexpr (!TIsMinimal)
{
// update render targets and textures
LatteGPUState.requiresTextureBarrier = false;
beginPerfMonProfiling(performanceMonitor.gpuTime_dcStageTextures);
while (true)
{
LatteGPUState.repeatTextureInitialization = false;
if (streamoutEnable == false)
{
// only handle rendertargets if streamout is inactive
if (LatteMRT::UpdateCurrentFBO() == false)
return; // no render target
if (hasValidFramebufferAttached == false)
return;
}
LatteTexture_updateTextures(); // caution: Do not call any functions that potentially modify texture bindings after this line
if (LatteGPUState.repeatTextureInitialization == false)
break;
catchOpenGLError();
}
endPerfMonProfiling(performanceMonitor.gpuTime_dcStageTextures);
beginPerfMonProfiling(performanceMonitor.gpuTime_dcStageMRT);
LatteMRT::ApplyCurrentState();
endPerfMonProfiling(performanceMonitor.gpuTime_dcStageMRT);
// texture barrier for write-read patterns
if (LatteGPUState.requiresTextureBarrier && glTextureBarrier)
{
glTextureBarrier();
}
}
catchOpenGLError();
// handle RECT primitive
if (primitiveMode == Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::RECTS)
{
RendererShaderGL* rectsEmulationShader = rectsEmulationGS_getShaderGL(LatteSHRC_GetActiveVertexShader());
shader_bind(rectsEmulationShader);
}
// prepare index cache
beginPerfMonProfiling(performanceMonitor.gpuTime_dcStageIndexMgr);
LatteDrawGL_prepareIndicesWithGPUCache(indexDataMPTR, indexType, count, (sint32)primitiveMode);
endPerfMonProfiling(performanceMonitor.gpuTime_dcStageIndexMgr);
// synchronize vertex and uniform buffers
LatteBufferCache_Sync(indexState.minIndex + baseVertex, indexState.maxIndex + baseVertex, baseInstance, instanceCount);
_setupVertexAttributes();
// update renderstate
LatteRenderTarget_updateViewport();
LatteRenderTarget_updateScissorBox();
renderstate_updateBlendingAndColorControl();
catchOpenGLError();
// handle special state 5 (convert depth to color)
if (LatteGPUState.contextNew.GetSpecialStateValues()[5] != 0)
{
debug_printf("GPU7 special state 5 used\n");
LatteTextureView* rt_color = LatteMRT::GetColorAttachment(0);
LatteTextureView* rt_depth = LatteMRT::GetDepthAttachment();
if (!rt_depth || !rt_color)
{
cemuLog_log(LogType::Force, "GPU7 special state 5 used but render target not setup correctly");
return;
}
surfaceCopy_copySurfaceWithFormatConversion(rt_depth->baseTexture, rt_depth->firstMip, rt_depth->firstSlice, rt_color->baseTexture, rt_color->firstMip, rt_color->firstSlice, rt_depth->baseTexture->width, rt_depth->baseTexture->height);
LatteGPUState.drawCallCounter++;
return;
}
beginPerfMonProfiling(performanceMonitor.gpuTime_dcStageShaderAndUniformMgr);
// update uniform values
uniformData_update();
endPerfMonProfiling(performanceMonitor.gpuTime_dcStageShaderAndUniformMgr);
catchOpenGLError();
// upload special uniforms
LatteDecompilerShader* vertexShader = LatteSHRC_GetActiveVertexShader();
LatteDecompilerShader* pixelShader = LatteSHRC_GetActivePixelShader();
LatteDecompilerShader* geometryShader = LatteSHRC_GetActiveGeometryShader();
if (vertexShader)
{
auto vertexShaderGL = (RendererShaderGL*)vertexShader->shader;
if (vertexShader->uniform.loc_windowSpaceToClipSpaceTransform >= 0)
{
sint32 viewportWidth;
sint32 viewportHeight;
LatteRenderTarget_GetCurrentVirtualViewportSize(&viewportWidth, &viewportHeight); // always call after _updateViewport()
float t[2];
t[0] = 2.0f / (float)viewportWidth;
t[1] = 2.0f / (float)viewportHeight;
glProgramUniform2fv(vertexShaderGL->GetProgram(), vertexShader->uniform.loc_windowSpaceToClipSpaceTransform, 1, t);
}
// update uf_texRescaleFactors
for (auto& entry : vertexShader->uniform.list_ufTexRescale)
{
float* xyScale = LatteTexture_getEffectiveTextureScale(LatteConst::ShaderType::Vertex, entry.texUnit);
if (memcmp(entry.currentValue, xyScale, sizeof(float) * 2) == 0)
continue; // value unchanged
memcpy(entry.currentValue, xyScale, sizeof(float) * 2);
glProgramUniform2fv(vertexShaderGL->GetProgram(), entry.uniformLocation, 1, xyScale);
}
// update uf_pointSize
if (vertexShader->uniform.loc_pointSize >= 0)
{
float t[1];
float pointWidth = (float)LatteGPUState.contextNew.PA_SU_POINT_SIZE.get_WIDTH() / 8.0f;
if (pointWidth == 0.0f)
pointWidth = 1.0f / 8.0f; // minimum size
t[0] = pointWidth;
glProgramUniform1fv(vertexShaderGL->GetProgram(), vertexShader->uniform.loc_pointSize, 1, t);
}
}
if (geometryShader)
{
auto geometryShaderGL = (RendererShaderGL*)geometryShader->shader;
// update uf_texRescaleFactors
for (auto& entry : geometryShader->uniform.list_ufTexRescale)
{
float* xyScale = LatteTexture_getEffectiveTextureScale(LatteConst::ShaderType::Geometry, entry.texUnit);
if (memcmp(entry.currentValue, xyScale, sizeof(float) * 2) == 0)
continue; // value unchanged
memcpy(entry.currentValue, xyScale, sizeof(float) * 2);
glProgramUniform2fv(geometryShaderGL->GetProgram(), entry.uniformLocation, 1, xyScale);
}
// update uf_pointSize
if (geometryShader->uniform.loc_pointSize >= 0)
{
float t[1];
float pointWidth = (float)LatteGPUState.contextNew.PA_SU_POINT_SIZE.get_WIDTH() / 8.0f;
if (pointWidth == 0.0f)
pointWidth = 1.0f / 8.0f; // minimum size
t[0] = pointWidth;
glProgramUniform1fv(geometryShaderGL->GetProgram(), geometryShader->uniform.loc_pointSize, 1, t);
}
}
if (pixelShader)
{
auto pixelShaderGL = (RendererShaderGL*)pixelShader->shader;
if (pixelShader->uniform.loc_alphaTestRef >= 0)
{
float t[1];
t[0] = LatteGPUState.contextNew.SX_ALPHA_REF.get_ALPHA_TEST_REF();
if (pixelShader->uniform.ufCurrentValueAlphaTestRef != t[0])
{
glProgramUniform1fv(pixelShaderGL->GetProgram(), pixelShader->uniform.loc_alphaTestRef, 1, t);
pixelShader->uniform.ufCurrentValueAlphaTestRef = t[0];
}
}
// update uf_fragCoordScale
if (pixelShader->uniform.loc_fragCoordScale >= 0)
{
float coordScale[4];
LatteMRT::GetCurrentFragCoordScale(coordScale);
if (pixelShader->uniform.ufCurrentValueFragCoordScale[0] != coordScale[0] || pixelShader->uniform.ufCurrentValueFragCoordScale[1] != coordScale[1])
{
glProgramUniform2fv(pixelShaderGL->GetProgram(), pixelShader->uniform.loc_fragCoordScale, 1, coordScale);
pixelShader->uniform.ufCurrentValueFragCoordScale[0] = coordScale[0];
pixelShader->uniform.ufCurrentValueFragCoordScale[1] = coordScale[1];
}
}
// update uf_texRescaleFactors
for (auto& entry : pixelShader->uniform.list_ufTexRescale)
{
float* xyScale = LatteTexture_getEffectiveTextureScale(LatteConst::ShaderType::Pixel, entry.texUnit);
if (memcmp(entry.currentValue, xyScale, sizeof(float) * 2) == 0)
continue; // value unchanged
memcpy(entry.currentValue, xyScale, sizeof(float) * 2);
glProgramUniform2fv(pixelShaderGL->GetProgram(), entry.uniformLocation, 1, xyScale);
}
}
catchOpenGLError();
// prepare streamout
LatteStreamout_PrepareDrawcall(count, instanceCount);
if (streamoutEnable && rasterizerEnable)
{
if (primitiveMode == Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::QUADS)
sGLActiveDrawMode = GL_POINTS;
}
catchOpenGLError();
// render
beginPerfMonProfiling(performanceMonitor.gpuTime_dcStageDrawcallAPI);
LatteDrawGL_doDraw(indexType, baseVertex, baseInstance, instanceCount, count);
endPerfMonProfiling(performanceMonitor.gpuTime_dcStageDrawcallAPI);
// post-drawcall logic
if(pixelShader)
LatteRenderTarget_trackUpdates();
LatteStreamout_FinishDrawcall(false);
catchOpenGLError();
if (primitiveMode == Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::RECTS)
shader_unbind(RendererShader::ShaderType::kGeometry);
LatteGPUState.drawCallCounter++;
if (streamoutEnable && rasterizerEnable)
{
// streamout and rasterizer enabled, repeat drawcall with streamout disabled
uint32 strmOutEnOrg = LatteGPUState.contextRegister[mmVGT_STRMOUT_EN];
LatteGPUState.contextRegister[mmVGT_STRMOUT_EN] = 0;
draw_genericDrawHandler<false, THasProfiling>(baseVertex, baseInstance, instanceCount, count, indexDataMPTR, indexType);
LatteGPUState.contextRegister[mmVGT_STRMOUT_EN] = strmOutEnOrg;
return;
}
LatteTextureReadback_Update();
uint32 dcSinceLastReadbackCheck = LatteGPUState.drawCallCounter - sPrevTextureReadbackDrawcallUpdate;
if (dcSinceLastReadbackCheck >= 150)
{
LatteTextureReadback_UpdateFinishedTransfers(false);
sPrevTextureReadbackDrawcallUpdate = LatteGPUState.drawCallCounter;
}
catchOpenGLError();
}
void OpenGLRenderer::draw_beginSequence()
{
// no-op
}
void OpenGLRenderer::draw_execute(uint32 baseVertex, uint32 baseInstance, uint32 instanceCount, uint32 count, MPTR indexDataMPTR, Latte::LATTE_VGT_DMA_INDEX_TYPE::E_INDEX_TYPE indexType, bool isFirst)
{
bool isMinimal = !isFirst;
if (isMinimal)
draw_genericDrawHandler<true, false>(baseVertex, baseInstance, instanceCount, count, indexDataMPTR, indexType);
else
draw_genericDrawHandler<false, false>(baseVertex, baseInstance, instanceCount, count, indexDataMPTR, indexType);
}
void OpenGLRenderer::draw_endSequence()
{
// no-op
}
#define GPU7_INDEX_BUFFER_CACHE_SIZE_DEPR (18*1024*1024) // 18MB
void OpenGLRenderer::draw_init()
{
if (indexState.initialized)
return;
indexState.initialized = true;
// create index buffer
glGenBuffers(1, &indexState.glIndexCacheBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexState.glIndexCacheBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, GPU7_INDEX_BUFFER_CACHE_SIZE_DEPR, NULL, GL_DYNAMIC_DRAW);
#if BOOST_OS_WINDOWS
indexState.mappedIndexBuffer = (uint8*)_aligned_malloc(GPU7_INDEX_BUFFER_CACHE_SIZE_DEPR, 256);
#else
indexState.mappedIndexBuffer = (uint8*)aligned_alloc(256, GPU7_INDEX_BUFFER_CACHE_SIZE_DEPR);
#endif
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
indexState.indexRingBuffer = LatteRingBuffer_create(indexState.mappedIndexBuffer, GPU7_INDEX_BUFFER_CACHE_SIZE_DEPR);
indexState.tempIndexStorage = (uint8*)malloc(1024 * 1024 * 8);
// create virtual heap for index buffer
indexState.indexBufferVirtualHeap = virtualBufferHeap_create(GPU7_INDEX_BUFFER_CACHE_SIZE_DEPR);
}
void OpenGLRenderer::bufferCache_upload(uint8* buffer, sint32 size, uint32 bufferOffset)
{
attributeStream_bindVertexCacheBuffer();
glBufferSubData(GL_ARRAY_BUFFER, bufferOffset, size, buffer);
}
void OpenGLRenderer::bufferCache_copy(uint32 srcOffset, uint32 dstOffset, uint32 size)
{
attributeStream_bindVertexCacheBuffer();
glCopyBufferSubData(GL_ARRAY_BUFFER, GL_ARRAY_BUFFER, srcOffset, dstOffset, size);
}
GLint glClampTable[] =
{
GL_REPEAT,
GL_MIRRORED_REPEAT,
GL_CLAMP_TO_EDGE,
GL_MIRROR_CLAMP_TO_EDGE,
GL_CLAMP_TO_EDGE,
GL_MIRROR_CLAMP_TO_BORDER_EXT,
GL_CLAMP_TO_BORDER,
GL_MIRROR_CLAMP_TO_BORDER_EXT
};
GLint glCompSelTable[8] =
{
GL_RED,
GL_GREEN,
GL_BLUE,
GL_ALPHA,
GL_ZERO,
GL_ONE,
0,
0
};
GLint glDepthCompareTable[8] = {
GL_NEVER,
GL_LESS,
GL_EQUAL,
GL_LEQUAL,
GL_GREATER,
GL_NOTEQUAL,
GL_GEQUAL,
GL_ALWAYS
};
// Remaps component selection if the underlying OpenGL texture format would behave differently than it's GPU7 counterpart
uint32 _correctTextureCompSelGL(Latte::E_GX2SURFFMT format, uint32 compSel)
{
switch (format)
{
case Latte::E_GX2SURFFMT::R8_UNORM: // R8 is replicated on all channels (while OpenGL would return 1.0 for BGA instead)
case Latte::E_GX2SURFFMT::R8_SNORM: // probably the same as _UNORM, but needs testing
if (compSel >= 1 && compSel <= 3)
compSel = 0;
break;
case Latte::E_GX2SURFFMT::A1_B5_G5_R5_UNORM: // order of components is reversed (RGBA -> ABGR)
if (compSel >= 0 && compSel <= 3)
compSel = 3 - compSel;
break;
case Latte::E_GX2SURFFMT::BC4_UNORM:
case Latte::E_GX2SURFFMT::BC4_SNORM:
if (compSel >= 1 && compSel <= 3)
compSel = 0;
break;
case Latte::E_GX2SURFFMT::BC5_UNORM:
case Latte::E_GX2SURFFMT::BC5_SNORM:
// RG maps to RG
// B maps to ?
// A maps to G (guessed)
if (compSel == 3)
compSel = 1; // read Alpha as Green
break;
case Latte::E_GX2SURFFMT::A2_B10_G10_R10_UNORM:
// reverse components (Wii U: ABGR, OpenGL: RGBA)
// used in Resident Evil Revelations
if (compSel >= 0 && compSel <= 3)
compSel = 3 - compSel;
break;
case Latte::E_GX2SURFFMT::X24_G8_UINT:
// map everything to alpha?
if (compSel >= 0 && compSel <= 3)
compSel = 3;
break;
default:
break;
}
return compSel;
}
#define quickBindTexture() if( textureIsActive == false ) { texture_bindAndActivate(hostTextureView, hostTextureUnit); textureIsActive = true; }
uint32 _getGLMinFilter(Latte::LATTE_SQ_TEX_SAMPLER_WORD0_0::E_XY_FILTER filterMin, Latte::LATTE_SQ_TEX_SAMPLER_WORD0_0::E_Z_FILTER filterMip)
{
bool isMinPointFilter = (filterMin == Latte::LATTE_SQ_TEX_SAMPLER_WORD0_0::E_XY_FILTER::POINT) || (filterMin == Latte::LATTE_SQ_TEX_SAMPLER_WORD0_0::E_XY_FILTER::ANISO_POINT);
if (filterMip == Latte::LATTE_SQ_TEX_SAMPLER_WORD0_0::E_Z_FILTER::NONE)
{
// no mip
return isMinPointFilter ? GL_NEAREST : GL_LINEAR;
}
else if (filterMip == Latte::LATTE_SQ_TEX_SAMPLER_WORD0_0::E_Z_FILTER::POINT)
{
// nearest neighbor
return isMinPointFilter ? GL_NEAREST_MIPMAP_NEAREST : GL_LINEAR_MIPMAP_NEAREST;
}
// else -> filterMip == LINEAR
return isMinPointFilter ? GL_NEAREST_MIPMAP_LINEAR : GL_LINEAR_MIPMAP_LINEAR;
}
/*
* Update channel swizzling and other texture settings for a texture unit
* hostTextureView is the texture unit view used on the host side
*/
void OpenGLRenderer::renderstate_updateTextureSettingsGL(LatteDecompilerShader* shaderContext, LatteTextureView* _hostTextureView, uint32 hostTextureUnit, const Latte::LATTE_SQ_TEX_RESOURCE_WORD4_N texUnitWord4, uint32 texUnitIndex, bool isDepthSampler)
{
auto hostTextureView = (LatteTextureViewGL*)_hostTextureView;
LatteTexture* baseTexture = hostTextureView->baseTexture;
// get texture register word 0
catchOpenGLError();
bool textureIsActive = false;
uint32 compSelR = (uint32)texUnitWord4.get_DST_SEL_X();
uint32 compSelG = (uint32)texUnitWord4.get_DST_SEL_Y();
uint32 compSelB = (uint32)texUnitWord4.get_DST_SEL_Z();
uint32 compSelA = (uint32)texUnitWord4.get_DST_SEL_W();
// on OpenGL some channels might be mapped differently
compSelR = _correctTextureCompSelGL(hostTextureView->format, compSelR);
compSelG = _correctTextureCompSelGL(hostTextureView->format, compSelG);
compSelB = _correctTextureCompSelGL(hostTextureView->format, compSelB);
compSelA = _correctTextureCompSelGL(hostTextureView->format, compSelA);
// update swizzle parameters
if (hostTextureView->swizzleR != compSelR)
{
quickBindTexture();
glTexParameteri(hostTextureView->glTexTarget, GL_TEXTURE_SWIZZLE_R, glCompSelTable[compSelR]);
hostTextureView->swizzleR = compSelR;
}
if (hostTextureView->swizzleG != compSelG)
{
quickBindTexture();
glTexParameteri(hostTextureView->glTexTarget, GL_TEXTURE_SWIZZLE_G, glCompSelTable[compSelG]);
hostTextureView->swizzleG = compSelG;
}
if (hostTextureView->swizzleB != compSelB)
{
quickBindTexture();
glTexParameteri(hostTextureView->glTexTarget, GL_TEXTURE_SWIZZLE_B, glCompSelTable[compSelB]);
hostTextureView->swizzleB = compSelB;
}
if (hostTextureView->swizzleA != compSelA)
{
quickBindTexture();
glTexParameteri(hostTextureView->glTexTarget, GL_TEXTURE_SWIZZLE_A, glCompSelTable[compSelA]);
hostTextureView->swizzleA = compSelA;
}
catchOpenGLError();
uint32 stageSamplerIndex = shaderContext->textureUnitSamplerAssignment[texUnitIndex];
if (stageSamplerIndex != LATTE_DECOMPILER_SAMPLER_NONE)
{
uint32 samplerIndex = stageSamplerIndex;
samplerIndex += LatteDecompiler_getTextureSamplerBaseIndex(shaderContext->shaderType);
const _LatteRegisterSetSampler* samplerWords = LatteGPUState.contextNew.SQ_TEX_SAMPLER + samplerIndex;
auto filterMag = samplerWords->WORD0.get_XY_MAG_FILTER();
auto filterMin = samplerWords->WORD0.get_XY_MAG_FILTER();
//auto filterZ = samplerWords->WORD0.get_Z_FILTER();
auto filterMip = samplerWords->WORD0.get_MIP_FILTER();
// get OpenGL constant for min filter
uint32 filterMinGL = _getGLMinFilter(filterMin, filterMip);
uint32 filterMagGL = (filterMag == Latte::LATTE_SQ_TEX_SAMPLER_WORD0_0::E_XY_FILTER::POINT || filterMag == Latte::LATTE_SQ_TEX_SAMPLER_WORD0_0::E_XY_FILTER::ANISO_POINT) ? GL_NEAREST : GL_LINEAR;
// todo: z-filter is customizable for GPU7 but OpenGL doesn't offer the same functionality?
LatteSamplerState* samplerState = &hostTextureView->samplerState;
catchOpenGLError();
uint32 clampX = (uint32)samplerWords->WORD0.get_CLAMP_X();
uint32 clampY = (uint32)samplerWords->WORD0.get_CLAMP_Y();
uint32 clampZ = (uint32)samplerWords->WORD0.get_CLAMP_Z();
if (samplerState->clampS != clampX)
{
quickBindTexture();
glTexParameteri(hostTextureView->glTexTarget, GL_TEXTURE_WRAP_S, glClampTable[clampX]);
samplerState->clampS = clampX;
}
if (samplerState->clampT != clampY)
{
quickBindTexture();
glTexParameteri(hostTextureView->glTexTarget, GL_TEXTURE_WRAP_T, glClampTable[clampY]);
samplerState->clampT = clampY;
}
if (samplerState->clampR != clampZ)
{
quickBindTexture();
glTexParameteri(hostTextureView->glTexTarget, GL_TEXTURE_WRAP_R, glClampTable[clampZ]);
samplerState->clampR = clampZ;
}
catchOpenGLError();
uint32 maxAniso = (uint32)samplerWords->WORD0.get_MAX_ANISO_RATIO();
if (baseTexture->overwriteInfo.anisotropicLevel >= 0)
maxAniso = baseTexture->overwriteInfo.anisotropicLevel;
if (samplerState->maxAniso != maxAniso)
{
quickBindTexture();
glTexParameterf(hostTextureView->glTexTarget, GL_TEXTURE_MAX_ANISOTROPY_EXT, (float)(1 << maxAniso));
samplerState->maxAniso = maxAniso;
catchOpenGLError();
}
if (samplerState->filterMin != filterMinGL)
{
quickBindTexture();
glTexParameteri(hostTextureView->glTexTarget, GL_TEXTURE_MIN_FILTER, filterMinGL);
samplerState->filterMin = filterMinGL;
catchOpenGLError();
}
if (samplerState->filterMag != filterMagGL)
{
quickBindTexture();
glTexParameteri(hostTextureView->glTexTarget, GL_TEXTURE_MAG_FILTER, filterMagGL);
samplerState->filterMag = filterMagGL;
catchOpenGLError();
}
if (samplerState->maxMipLevels != hostTextureView->numMip)
{
quickBindTexture();
glTexParameteri(hostTextureView->glTexTarget, GL_TEXTURE_MAX_LEVEL, std::max(hostTextureView->numMip, 1) - 1);
samplerState->maxMipLevels = hostTextureView->numMip;
catchOpenGLError();
}
// lod
uint32 iMinLOD = samplerWords->WORD1.get_MIN_LOD();
uint32 iMaxLOD = samplerWords->WORD1.get_MAX_LOD();
sint32 iLodBias = samplerWords->WORD1.get_LOD_BIAS();
// apply relative lod bias from graphic pack
if (baseTexture->overwriteInfo.hasRelativeLodBias)
{
iLodBias += baseTexture->overwriteInfo.relativeLodBias;
}
// apply absolute lod bias from graphic pack
if (baseTexture->overwriteInfo.hasLodBias)
{
iLodBias = baseTexture->overwriteInfo.lodBias;
}
if (samplerState->minLod != iMinLOD)
{
quickBindTexture();
glTexParameterf(hostTextureView->glTexTarget, GL_TEXTURE_MIN_LOD, (float)iMinLOD / 64.0f);
samplerState->minLod = iMinLOD;
}
if (samplerState->maxLod != iMaxLOD)
{
quickBindTexture();
glTexParameterf(hostTextureView->glTexTarget, GL_TEXTURE_MAX_LOD, (float)iMaxLOD / 64.0f);
samplerState->maxLod = iMaxLOD;
}
if (samplerState->lodBias != iLodBias)
{
quickBindTexture();
glTexParameterf(hostTextureView->glTexTarget, GL_TEXTURE_LOD_BIAS, (float)iLodBias / 64.0f);
samplerState->lodBias = iLodBias;
}
// depth compare
uint32 samplerDepthCompare = (uint32)samplerWords->WORD0.get_DEPTH_COMPARE_FUNCTION();
uint8 depthCompareMode = isDepthSampler ? 1 : 0;
if (samplerDepthCompare != samplerState->depthCompareFunc)
{
quickBindTexture();
glTexParameteri(hostTextureView->glTexTarget, GL_TEXTURE_COMPARE_FUNC, glDepthCompareTable[samplerDepthCompare]);
samplerState->depthCompareFunc = samplerDepthCompare;
}
if (depthCompareMode != samplerState->depthCompareMode)
{
quickBindTexture();
if (depthCompareMode != 0)
glTexParameteri(hostTextureView->glTexTarget, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
else
glTexParameteri(hostTextureView->glTexTarget, GL_TEXTURE_COMPARE_MODE, GL_NONE);
samplerState->depthCompareMode = depthCompareMode;
}
catchOpenGLError();
// border
auto borderType = samplerWords->WORD0.get_BORDER_COLOR_TYPE();
if (samplerState->borderType != (uint8)borderType || borderType == Latte::LATTE_SQ_TEX_SAMPLER_WORD0_0::E_BORDER_COLOR_TYPE::REGISTER)
{
// todo: Should we use integer border color (glTexParameteriv) for integer texture formats?
GLfloat borderColor[4];
if (borderType == Latte::LATTE_SQ_TEX_SAMPLER_WORD0_0::E_BORDER_COLOR_TYPE::TRANSPARENT_BLACK)
{
borderColor[0] = 0.0f;
borderColor[1] = 0.0f;
borderColor[2] = 0.0f;
borderColor[3] = 0.0f;
}
else if (borderType == Latte::LATTE_SQ_TEX_SAMPLER_WORD0_0::E_BORDER_COLOR_TYPE::OPAQUE_BLACK)
{
borderColor[0] = 0.0f;
borderColor[1] = 0.0f;
borderColor[2] = 0.0f;
borderColor[3] = 1.0f;
}
else if (borderType == Latte::LATTE_SQ_TEX_SAMPLER_WORD0_0::E_BORDER_COLOR_TYPE::OPAQUE_WHITE)
{
borderColor[0] = 1.0f;
borderColor[1] = 1.0f;
borderColor[2] = 1.0f;
borderColor[3] = 1.0f;
}
else
{
// border color from register
_LatteRegisterSetSamplerBorderColor* borderColorReg;
if (shaderContext->shaderType == LatteConst::ShaderType::Vertex || shaderContext->shaderType == LatteConst::ShaderType::Compute)
borderColorReg = LatteGPUState.contextNew.TD_VS_SAMPLER_BORDER_COLOR + stageSamplerIndex;
else if (shaderContext->shaderType == LatteConst::ShaderType::Pixel)
borderColorReg = LatteGPUState.contextNew.TD_PS_SAMPLER_BORDER_COLOR + stageSamplerIndex;
else // geometry
borderColorReg = LatteGPUState.contextNew.TD_GS_SAMPLER_BORDER_COLOR + stageSamplerIndex;
borderColor[0] = borderColorReg->red.get_channelValue();
borderColor[1] = borderColorReg->green.get_channelValue();
borderColor[2] = borderColorReg->blue.get_channelValue();
borderColor[3] = borderColorReg->alpha.get_channelValue();
}
if (samplerState->borderColor[0] != borderColor[0] || samplerState->borderColor[1] != borderColor[1] || samplerState->borderColor[2] != borderColor[2] || samplerState->borderColor[3] != borderColor[3])
{
quickBindTexture();
glTexParameterfv(hostTextureView->glTexTarget, GL_TEXTURE_BORDER_COLOR, borderColor);
samplerState->borderColor[0] = borderColor[0];
samplerState->borderColor[1] = borderColor[1];
samplerState->borderColor[2] = borderColor[2];
samplerState->borderColor[3] = borderColor[3];
}
samplerState->borderType = (uint8)borderType;
}
catchOpenGLError();
}
}
| 57,665
|
C++
|
.cpp
| 1,459
| 36.745031
| 335
| 0.76692
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,258
|
ShaderDescription.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/ShaderInfo/ShaderDescription.cpp
|
#include "Cafe/HW/Latte/ShaderInfo/ShaderInfo.h"
#include "Cafe/HW/Latte/ISA/LatteInstructions.h"
namespace Latte
{
bool ShaderDescription::analyzeShaderCode(void* shaderProgram, size_t sizeInBytes, LatteConst::ShaderType shaderType)
{
assert_dbg();
// parse CF flow
// we need to parse:
// - Export clauses to gather info about exported attributes and written render targets
// - ALU clauses to gather info about accessed uniforms (and the remapped uniform)
const LatteCFInstruction* cfCode = (const LatteCFInstruction*)shaderProgram;
const size_t cfMaxCount = sizeInBytes / 8;
size_t cfIndex = 0;
while (cfIndex < cfMaxCount)
{
const LatteCFInstruction* baseInstr = cfCode + cfIndex;
cfIndex++;
bool isALU = false;
if (const auto cfInstr = baseInstr->getParserIfOpcodeMatch<LatteCFInstruction_DEFAULT>())
{
cemu_assert_debug(cfInstr->getField_WHOLE_QUAD_MODE() == 0);
cemu_assert_debug(cfInstr->getField_CALL_COUNT() == 0); // todo
cemu_assert_debug(cfInstr->getField_POP_COUNT() == 0); // todo
auto cond = cfInstr->getField_COND();
assert_dbg();
}
else if (const auto cfInstr = baseInstr->getParserIfOpcodeMatch<LatteCFInstruction_ALU>())
{
assert_dbg();
isALU = true;
}
else if (const auto cfInstr = baseInstr->getParserIfOpcodeMatch<LatteCFInstruction_EXPORT_IMPORT>())
{
assert_dbg();
}
else
{
cemuLog_log(LogType::Force, "ShaderDescription::analyzeShaderCode(): Missing implementation for CF opcode 0x%02x\n", baseInstr->getField_Opcode());
cemu_assert_debug(false); // todo
}
if (!isALU && baseInstr->getField_END_OF_PROGRAM())
break;
}
return true;
}
};
| 1,691
|
C++
|
.cpp
| 47
| 32.255319
| 151
| 0.721713
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,259
|
LatteTextureLegacy.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteTextureLegacy.cpp
|
#include "Cafe/HW/Latte/ISA/RegDefines.h"
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/HW/Latte/Core/LatteShader.h"
#include "Cafe/HW/Latte/Renderer/Renderer.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/OpenGLRenderer.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/LatteTextureGL.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/LatteTextureViewGL.h"
struct TexScaleXY
{
float xy[2];
};
struct
{
TexScaleXY perUnit[Latte::GPU_LIMITS::NUM_TEXTURES_PER_STAGE]; // stores actualResolution/effectiveResolution ratio for each texture
}LatteTextureScale[static_cast<size_t>(LatteConst::ShaderType::TotalCount)] = { };
float* LatteTexture_getEffectiveTextureScale(LatteConst::ShaderType shaderType, sint32 texUnit)
{
cemu_assert_debug(texUnit >= 0 && texUnit < Latte::GPU_LIMITS::NUM_TEXTURES_PER_STAGE);
return LatteTextureScale[static_cast<size_t>(shaderType)].perUnit[texUnit].xy;
}
void LatteTexture_setEffectiveTextureScale(LatteConst::ShaderType shaderType, sint32 texUnit, float u, float v)
{
cemu_assert_debug(texUnit >= 0 && texUnit < Latte::GPU_LIMITS::NUM_TEXTURES_PER_STAGE);
float* t = LatteTextureScale[static_cast<size_t>(shaderType)].perUnit[texUnit].xy;
t[0] = u;
t[1] = v;
}
void LatteTextureLoader_UpdateTextureSliceData(LatteTexture* tex, uint32 sliceIndex, uint32 mipIndex, MPTR physImagePtr, MPTR physMipPtr, Latte::E_DIM dim, uint32 width, uint32 height, uint32 depth, uint32 mipLevels, uint32 pitch, Latte::E_HWTILEMODE tileMode, uint32 swizzle, bool dumpTex);
void LatteTexture_ReloadData(LatteTexture* tex)
{
tex->reloadCount++;
for(sint32 mip=0; mip<tex->mipLevels; mip++)
{
if(tex->dim == Latte::E_DIM::DIM_2D_ARRAY ||
tex->dim == Latte::E_DIM::DIM_2D_ARRAY_MSAA )
{
sint32 numSlices = std::max(tex->depth, 1);
for(sint32 s=0; s<numSlices; s++)
LatteTextureLoader_UpdateTextureSliceData(tex, s, mip, tex->physAddress, tex->physMipAddress, tex->dim, tex->width, tex->height, tex->depth, tex->mipLevels, tex->pitch, tex->tileMode, tex->swizzle, true);
}
else if( tex->dim == Latte::E_DIM::DIM_CUBEMAP )
{
cemu_assert_debug((tex->depth % 6) == 0);
sint32 numFullCubeMaps = tex->depth/6; // number of cubemaps (if numFullCubeMaps is >1 then this texture is a cubemap array)
for(sint32 s=0; s<numFullCubeMaps*6; s++)
LatteTextureLoader_UpdateTextureSliceData(tex, s, mip, tex->physAddress, tex->physMipAddress, tex->dim, tex->width, tex->height, tex->depth, tex->mipLevels, tex->pitch, tex->tileMode, tex->swizzle, true);
}
else if( tex->dim == Latte::E_DIM::DIM_3D )
{
sint32 mipDepth = std::max(tex->depth>>mip, 1);
for(sint32 s=0; s<mipDepth; s++)
{
LatteTextureLoader_UpdateTextureSliceData(tex, s, mip, tex->physAddress, tex->physMipAddress, tex->dim, tex->width, tex->height, tex->depth, tex->mipLevels, tex->pitch, tex->tileMode, tex->swizzle, true);
}
}
else
{
// load slice 0
LatteTextureLoader_UpdateTextureSliceData(tex, 0, mip, tex->physAddress, tex->physMipAddress, tex->dim, tex->width, tex->height, tex->depth, tex->mipLevels, tex->pitch, tex->tileMode, tex->swizzle, true);
}
}
tex->lastUpdateEventCounter = LatteTexture_getNextUpdateEventCounter();
}
LatteTextureView* LatteTexture_CreateTexture(Latte::E_DIM dim, MPTR physAddress, MPTR physMipAddress, Latte::E_GX2SURFFMT format, uint32 width, uint32 height, uint32 depth, uint32 pitch, uint32 mipLevels, uint32 swizzle, Latte::E_HWTILEMODE tileMode, bool isDepth)
{
const auto tex = g_renderer->texture_createTextureEx(dim, physAddress, physMipAddress, format, width, height, depth, pitch, mipLevels, swizzle, tileMode, isDepth);
// init slice/mip info array
LatteTexture_InitSliceAndMipInfo(tex);
LatteTexture_RegisterTextureMemoryOccupancy(tex);
cemu_assert_debug(mipLevels != 0);
// calculate number of potential mip levels (from effective size)
sint32 effectiveWidth = width;
sint32 effectiveHeight = height;
sint32 effectiveDepth = depth;
if (tex->overwriteInfo.hasResolutionOverwrite)
{
effectiveWidth = tex->overwriteInfo.width;
effectiveHeight = tex->overwriteInfo.height;
effectiveDepth = tex->overwriteInfo.depth;
}
tex->maxPossibleMipLevels = 1;
if (dim != Latte::E_DIM::DIM_3D)
{
for (sint32 i = 0; i < 20; i++)
{
if ((effectiveWidth >> i) <= 1 && (effectiveHeight >> i) <= 1)
{
tex->maxPossibleMipLevels = i + 1;
break;
}
}
}
else
{
for (sint32 i = 0; i < 20; i++)
{
if ((effectiveWidth >> i) <= 1 && (effectiveHeight >> i) <= 1 && (effectiveDepth >> i) <= 1)
{
tex->maxPossibleMipLevels = i + 1;
break;
}
}
}
LatteTexture_ReloadData(tex);
LatteTC_MarkTextureStillInUse(tex);
LatteTC_RegisterTexture(tex);
// create initial view that maps to the whole texture
tex->baseView = tex->GetOrCreateView(0, tex->mipLevels, 0, tex->depth);
return tex->baseView;
}
Latte::E_GX2SURFFMT LatteTexture_ReconstructGX2Format(const Latte::LATTE_SQ_TEX_RESOURCE_WORD1_N& texUnitWord1, const Latte::LATTE_SQ_TEX_RESOURCE_WORD4_N& texUnitWord4)
{
Latte::E_GX2SURFFMT gx2Format = (Latte::E_GX2SURFFMT)texUnitWord1.get_DATA_FORMAT();
auto nfa = texUnitWord4.get_NUM_FORM_ALL();
if (nfa == Latte::LATTE_SQ_TEX_RESOURCE_WORD4_N::E_NUM_FORMAT_ALL::NUM_FORMAT_SCALED)
gx2Format |= Latte::E_GX2SURFFMT::FMT_BIT_FLOAT;
else if (nfa == Latte::LATTE_SQ_TEX_RESOURCE_WORD4_N::E_NUM_FORMAT_ALL::NUM_FORMAT_INT)
gx2Format |= Latte::E_GX2SURFFMT::FMT_BIT_INT;
if(texUnitWord4.get_FORCE_DEGAMMA())
gx2Format |= Latte::E_GX2SURFFMT::FMT_BIT_SRGB;
if (texUnitWord4.get_FORMAT_COMP_X() == Latte::LATTE_SQ_TEX_RESOURCE_WORD4_N::E_FORMAT_COMP::COMP_SIGNED)
gx2Format |= Latte::E_GX2SURFFMT::FMT_BIT_SIGNED;
return gx2Format;
}
void LatteTexture_updateTexturesForStage(LatteDecompilerShader* shaderContext, uint32 glBackendBaseTexUnit, _LatteRegisterSetTextureUnit* texRegBase)
{
for (sint32 z = 0; z < shaderContext->textureUnitListCount; z++)
{
sint32 textureIndex = shaderContext->textureUnitList[z];
const auto& texRegister = texRegBase[textureIndex];
// get physical address of texture data
MPTR physAddr = (texRegister.word2.get_BASE_ADDRESS() << 8);
if (physAddr == MPTR_NULL)
continue; // invalid data
MPTR physMipAddr = (texRegister.word3.get_MIP_ADDRESS() << 8);
// word0
const auto word0 = texRegister.word0;
auto dim = word0.get_DIM();
uint32 pitch = (word0.get_PITCH() + 1) << 3;
uint32 width = word0.get_WIDTH() + 1;
auto tileMode = word0.get_TILE_MODE();
// word1
const auto word1 = texRegister.word1;
uint32 depth = word1.get_DEPTH();
if (dim == Latte::E_DIM::DIM_2D_ARRAY || dim == Latte::E_DIM::DIM_3D || dim == Latte::E_DIM::DIM_2D_ARRAY_MSAA || dim == Latte::E_DIM::DIM_1D_ARRAY)
{
depth = depth + 1;
}
else
{
if (dim == Latte::E_DIM::DIM_CUBEMAP)
depth = 6 * (depth + 1);
if (depth == 0)
depth = 1;
}
uint32 height = word1.get_HEIGHT() + 1;
if (Latte::IsCompressedFormat(word1.get_DATA_FORMAT()))
pitch /= 4;
// view slice
const auto word4 = texRegister.word4;
const auto word5 = texRegister.word5;
uint32 viewFirstSlice = word5.get_BASE_ARRAY();
uint32 viewNumSlices = word5.get_LAST_ARRAY() + 1 - viewFirstSlice;
uint32 viewFirstMip = word4.get_BASE_LEVEL();
uint32 viewNumMips = word5.get_LAST_LEVEL() + 1 - viewFirstMip;
cemu_assert_debug(viewNumMips != 0);
Latte::E_GX2SURFFMT format = LatteTexture_ReconstructGX2Format(word1, word4);
// todo - AA
if (dim == Latte::E_DIM::DIM_2D_MSAA)
{
// MSAA only supports one mip level?
// without this we encounter a crash in The Mysterious Cities of Gold: Secret Paths due to it setting mip count to 2 and leaving mip pointer on an invalid uninitialized value
viewFirstMip = 0;
viewNumMips = 1;
}
// swizzle
uint32 swizzle = 0;
if (Latte::TM_IsMacroTiled(tileMode))
{
// extract swizzle bits from pointer if macro-tiled
swizzle = (physAddr & 0x700);
physAddr &= ~0x700;
}
bool isDepthSampler = shaderContext->textureUsesDepthCompare[textureIndex];
// look for already existing texture
LatteTextureView* textureView;
if (!isDepthSampler)
textureView = LatteTextureViewLookupCache::lookup(physAddr, width, height, depth, pitch, viewFirstMip, viewNumMips, viewFirstSlice, viewNumSlices, format, dim);
else
textureView = LatteTextureViewLookupCache::lookupWithColorOrDepthType(physAddr, width, height, depth, pitch, viewFirstMip, viewNumMips, viewFirstSlice, viewNumSlices, format, dim, true);
if (!textureView)
{
// view not found, create a new mapping which will also create a new texture if necessary
textureView = LatteTexture_CreateMapping(physAddr, physMipAddr, width, height, depth, pitch, tileMode, swizzle, viewFirstMip, viewNumMips, viewFirstSlice, viewNumSlices, format, dim, dim, isDepthSampler);
if (textureView == nullptr)
continue;
LatteGPUState.repeatTextureInitialization = true;
}
if (g_renderer->GetType() == RendererAPI::OpenGL)
{
// on OpenGL, texture views and sampler parameters are tied together (we are avoiding sampler objects due to driver bugs)
// in order to emulate different sampler parameters when a texture is bound multiple times we create extra views
OpenGLRenderer* rendererGL = static_cast<OpenGLRenderer*>(g_renderer.get());
// if this texture is bound multiple times then use alternative views
if (textureView->lastTextureBindIndex == LatteGPUState.textureBindCounter)
{
LatteTextureViewGL* textureViewGL = (LatteTextureViewGL*)textureView;
// get next unused alternative texture view
while (true)
{
textureViewGL = textureViewGL->GetAlternativeView();
if (textureViewGL->lastTextureBindIndex != LatteGPUState.textureBindCounter)
break;
}
textureView = textureViewGL;
}
textureView->lastTextureBindIndex = LatteGPUState.textureBindCounter;
rendererGL->renderstate_updateTextureSettingsGL(shaderContext, textureView, textureIndex + glBackendBaseTexUnit, word4, textureIndex, isDepthSampler);
}
g_renderer->texture_setLatteTexture(textureView, textureIndex + glBackendBaseTexUnit);
// update if data changed
bool swizzleChanged = false;
if (textureView->baseTexture->swizzle != swizzle)
{
debug_printf("BaseSwizzle diff prev %08x new %08x rt %08x tm %d\n", textureView->baseTexture->swizzle, swizzle, textureView->baseTexture->lastRenderTargetSwizzle, textureView->baseTexture->tileMode);
if (swizzle == textureView->baseTexture->lastRenderTargetSwizzle)
{
// last render to texture updated the swizzle and we can assume the texture data is still valid
textureView->baseTexture->swizzle = textureView->baseTexture->lastRenderTargetSwizzle;
}
else
{
// reload texture
swizzleChanged = true;
}
}
else if ((viewFirstMip + viewNumMips) > 1 && (textureView->baseTexture->physMipAddress != physMipAddr))
{
debug_printf("MipPhys/Swizzle change diff prev %08x new %08x tm %d\n", textureView->baseTexture->physMipAddress, physMipAddr, textureView->baseTexture->tileMode);
swizzleChanged = true;
cemu_assert_debug(physMipAddr != MPTR_NULL);
}
// check for changes
if (LatteTC_HasTextureChanged(textureView->baseTexture) || swizzleChanged)
{
debug_printf("Reload texture 0x%08x res %dx%d memRange %08x-%08x SwizzleChange: %s\n", textureView->baseTexture->physAddress, textureView->baseTexture->width, textureView->baseTexture->height, textureView->baseTexture->texDataPtrLow, textureView->baseTexture->texDataPtrHigh, swizzleChanged ? "yes" : "no");
// update swizzle / changed mip address
if (swizzleChanged)
{
textureView->baseTexture->swizzle = swizzle;
if ((viewFirstMip + viewNumMips) > 1)
{
textureView->baseTexture->physMipAddress = physMipAddr;
}
}
debug_printf("Reload reason: Data-change when bound as texture (new hash 0x%08x)\n", textureView->baseTexture->texDataHash2);
LatteTexture_ReloadData(textureView->baseTexture);
}
LatteTexture* baseTexture = textureView->baseTexture;
if (baseTexture->reloadFromDynamicTextures)
{
LatteTexture_UpdateCacheFromDynamicTextures(baseTexture);
baseTexture->reloadFromDynamicTextures = false;
}
LatteTC_MarkTextureStillInUse(baseTexture);
// check if barrier is necessary
if ((sint32)(LatteGPUState.drawCallCounter - baseTexture->lastUnflushedRTDrawcallIndex) < 2)
{
LatteGPUState.requiresTextureBarrier = true;
baseTexture->lastUnflushedRTDrawcallIndex = 0;
}
// update scale
float texScaleU, texScaleV;
if (baseTexture->overwriteInfo.hasResolutionOverwrite == false)
{
texScaleU = 1.0f;
texScaleV = 1.0f;
}
else
{
texScaleU = (float)baseTexture->overwriteInfo.width / (float)baseTexture->width;
texScaleV = (float)baseTexture->overwriteInfo.height / (float)baseTexture->height;
}
LatteTexture_setEffectiveTextureScale(shaderContext->shaderType, textureIndex, texScaleU, texScaleV);
}
}
// initialize textures used by the current drawcall
// Sets LatteGPUState.repeatTextureInitialization to true if a new texture mapping was created (indicating that this function must be called again)
// also sets LatteGPUState.requiresTextureBarrier to true if texture barrier is required
void LatteTexture_updateTextures()
{
LatteGPUState.textureBindCounter++;
// pixel shader
LatteDecompilerShader* pixelShader = LatteSHRC_GetActivePixelShader();
if (pixelShader)
LatteTexture_updateTexturesForStage(pixelShader, LATTE_CEMU_PS_TEX_UNIT_BASE, LatteGPUState.contextNew.SQ_TEX_START_PS);
// vertex shader
LatteDecompilerShader* vertexShader = LatteSHRC_GetActiveVertexShader();
cemu_assert_debug(vertexShader != nullptr);
LatteTexture_updateTexturesForStage(vertexShader, LATTE_CEMU_VS_TEX_UNIT_BASE, LatteGPUState.contextNew.SQ_TEX_START_VS);
// geometry shader
LatteDecompilerShader* geometryShader = LatteSHRC_GetActiveGeometryShader();
if (geometryShader)
LatteTexture_updateTexturesForStage(geometryShader, LATTE_CEMU_GS_TEX_UNIT_BASE, LatteGPUState.contextNew.SQ_TEX_START_GS);
}
sint32 LatteTexture_getEffectiveWidth(LatteTexture* texture)
{
if (texture->overwriteInfo.hasResolutionOverwrite)
return texture->overwriteInfo.width;
return texture->width;
}
// returns true if the two textures have the same rescale factor
bool LatteTexture_doesEffectiveRescaleRatioMatch(LatteTexture* texture1, sint32 mipLevel1, LatteTexture* texture2, sint32 mipLevel2)
{
double widthRatio1 = (double)LatteTexture_getEffectiveWidth(texture1) / (double)texture1->width;
double widthRatio2 = (double)LatteTexture_getEffectiveWidth(texture2) / (double)texture2->width;
// the difference between the factors must be less than 5%
double diff = widthRatio1 / widthRatio2;
if (abs(1.0 - diff) > 0.05)
{
return false;
}
return true;
}
void LatteTexture_scaleToEffectiveSize(LatteTexture* texture, sint32* x, sint32* y, sint32 mipLevel)
{
if( texture->overwriteInfo.hasResolutionOverwrite == false )
return;
*x = *x * std::max(1,texture->overwriteInfo.width>>mipLevel) / std::max(1,texture->width>>mipLevel);
*y = *y * std::max(1,texture->overwriteInfo.height>>mipLevel) / std::max(1, texture->height>>mipLevel);
}
uint64 _textureUpdateEventCounter = 1;
uint64 LatteTexture_getNextUpdateEventCounter()
{
uint64 counter = _textureUpdateEventCounter;
_textureUpdateEventCounter++;
return counter;
}
void LatteTexture_init()
{
}
| 15,346
|
C++
|
.cpp
| 341
| 42.064516
| 310
| 0.754157
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,260
|
LatteShaderGL.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteShaderGL.cpp
|
#include "Common/GLInclude/GLInclude.h"
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/HW/Latte/Core/LatteDraw.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompiler.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/OpenGLRenderer.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/RendererShaderGL.h"
#include "util/helpers/StringBuf.h"
bool gxShader_checkIfSuccessfullyLinked(GLuint glProgram)
{
int status = -1;
glGetProgramiv(glProgram, GL_LINK_STATUS, &status);
if( status == GL_TRUE )
return true;
// in debug mode, get and print shader error log
char infoLog[48*1024];
uint32 infoLogLength, tempLength;
glGetProgramiv(glProgram, GL_INFO_LOG_LENGTH, (GLint *)&infoLogLength);
tempLength = sizeof(infoLog)-1;
glGetProgramInfoLog(glProgram, std::min(tempLength, infoLogLength), (GLsizei*)&tempLength, (GLcharARB*)infoLog);
infoLog[tempLength] = '\0';
cemuLog_log(LogType::Force, "Link error in raw shader");
cemuLog_log(LogType::Force, infoLog);
return false;
}
void LatteShader_prepareSeparableUniforms(LatteDecompilerShader* shader)
{
if (g_renderer->GetType() == RendererAPI::Vulkan)
return;
auto shaderGL = (RendererShaderGL*)shader->shader;
// setup uniform info
if (shader->shaderType == LatteConst::ShaderType::Vertex)
{
shader->uniform.loc_remapped = glGetUniformLocation(shaderGL->GetProgram(), "uf_remappedVS");
shader->uniform.loc_uniformRegister = glGetUniformLocation(shaderGL->GetProgram(), "uf_uniformRegisterVS");
}
else if (shader->shaderType == LatteConst::ShaderType::Geometry)
{
shader->uniform.loc_remapped = glGetUniformLocation(shaderGL->GetProgram(), "uf_remappedGS");
shader->uniform.loc_uniformRegister = glGetUniformLocation(shaderGL->GetProgram(), "uf_uniformRegisterGS");
}
else if (shader->shaderType == LatteConst::ShaderType::Pixel)
{
shader->uniform.loc_remapped = glGetUniformLocation(shaderGL->GetProgram(), "uf_remappedPS");
shader->uniform.loc_uniformRegister = glGetUniformLocation(shaderGL->GetProgram(), "uf_uniformRegisterPS");
}
catchOpenGLError();
shader->uniform.loc_windowSpaceToClipSpaceTransform = glGetUniformLocation(shaderGL->GetProgram(), "uf_windowSpaceToClipSpaceTransform");
shader->uniform.loc_alphaTestRef = glGetUniformLocation(shaderGL->GetProgram(), "uf_alphaTestRef");
shader->uniform.loc_pointSize = glGetUniformLocation(shaderGL->GetProgram(), "uf_pointSize");
shader->uniform.loc_fragCoordScale = glGetUniformLocation(shaderGL->GetProgram(), "uf_fragCoordScale");
cemu_assert_debug(shader->uniform.list_ufTexRescale.empty());
for (sint32 t = 0; t < LATTE_NUM_MAX_TEX_UNITS; t++)
{
char ufName[64];
sprintf(ufName, "uf_tex%dScale", t);
GLint uniformLocation = glGetUniformLocation(shaderGL->GetProgram(), ufName);
if (uniformLocation >= 0)
{
LatteUniformTextureScaleEntry_t entry = { 0 };
entry.texUnit = t;
entry.uniformLocation = uniformLocation;
shader->uniform.list_ufTexRescale.push_back(entry);
}
}
}
GLuint gpu7ShaderGLDepr_compileShader(const std::string& source, uint32_t type)
{
cemu_assert(type == GL_VERTEX_SHADER || type == GL_FRAGMENT_SHADER);
const GLuint shader_object = glCreateShader(type);
const char *c_str = source.c_str();
const GLint size = (GLint)source.size();
glShaderSource(shader_object, 1, &c_str, &size);
glCompileShader(shader_object);
GLint log_length;
glGetShaderiv(shader_object, GL_INFO_LOG_LENGTH, &log_length);
if (log_length > 0)
{
char log[2048]{};
GLsizei log_size;
glGetShaderInfoLog(shader_object, std::min(log_length, (GLint)sizeof(log) - 1), &log_size, log);
cemuLog_log(LogType::Force, "Error/Warning in vertex shader:");
cemuLog_log(LogType::Force, log);
}
return shader_object;
}
GLuint gpu7ShaderGLDepr_compileVertexShader(const std::string& source)
{
return gpu7ShaderGLDepr_compileShader(source, GL_VERTEX_SHADER);
}
GLuint gpu7ShaderGLDepr_compileFragmentShader(const std::string& source)
{
return gpu7ShaderGLDepr_compileShader(source, GL_FRAGMENT_SHADER);
}
GLuint gpu7ShaderGLDepr_compileVertexShader(const char* shaderSource, sint32 shaderSourceLength)
{
uint32 shaderObject = glCreateShader(GL_VERTEX_SHADER);
GLchar* srcPtr = (GLchar*)shaderSource;
GLint srcLen = shaderSourceLength;
glShaderSource(shaderObject, 1, &srcPtr, &srcLen);
glCompileShader(shaderObject);
uint32 shaderLogLengthInfo, shaderLogLen;
glGetShaderiv(shaderObject, GL_INFO_LOG_LENGTH, (GLint *)&shaderLogLengthInfo);
if (shaderLogLengthInfo > 0)
{
char messageLog[2048]{};
glGetShaderInfoLog(shaderObject, std::min<uint32>(shaderLogLengthInfo, sizeof(messageLog) - 1), (GLsizei*)&shaderLogLen, (GLcharARB*)messageLog);
cemuLog_log(LogType::Force, "Error/Warning in vertex shader:");
cemuLog_log(LogType::Force, messageLog);
}
return shaderObject;
}
GLuint gpu7ShaderGLDepr_compileFragmentShader(const char* shaderSource, sint32 shaderSourceLength)
{
uint32 shaderObject = glCreateShader(GL_FRAGMENT_SHADER);
GLchar* srcPtr = (GLchar*)shaderSource;
GLint srcLen = shaderSourceLength;
glShaderSource(shaderObject, 1, &srcPtr, &srcLen);
glCompileShader(shaderObject);
uint32 shaderLogLengthInfo, shaderLogLen;
char messageLog[2048];
glGetShaderiv(shaderObject, GL_INFO_LOG_LENGTH, (GLint *)&shaderLogLengthInfo);
if (shaderLogLengthInfo > 0)
{
memset(messageLog, 0, sizeof(messageLog));
glGetShaderInfoLog(shaderObject, std::min<uint32>(shaderLogLengthInfo, sizeof(messageLog) - 1), (GLsizei*)&shaderLogLen, (GLcharARB*)messageLog);
cemuLog_log(LogType::Force, "Error/Warning in fragment shader:");
cemuLog_log(LogType::Force, messageLog);
}
return shaderObject;
}
GLuint gxShaderDepr_compileRaw(StringBuf* strSourceVS, StringBuf* strSourceFS)
{
GLuint glShaderProgram = glCreateProgram();
GLuint vertexShader = gpu7ShaderGLDepr_compileVertexShader(strSourceVS->c_str(), strSourceVS->getLen());
glAttachShader(glShaderProgram, vertexShader);
GLuint fragmentShader = gpu7ShaderGLDepr_compileFragmentShader(strSourceFS->c_str(), strSourceFS->getLen());
glAttachShader(glShaderProgram, fragmentShader);
glLinkProgram(glShaderProgram);
if( gxShader_checkIfSuccessfullyLinked(glShaderProgram) == false )
{
return 0;
}
return glShaderProgram;
}
GLuint gxShaderDepr_compileRaw(const std::string& vertex_source, const std::string& fragment_source)
{
const GLuint programm = glCreateProgram();
auto vertex_shader = std::async(std::launch::deferred, gpu7ShaderGLDepr_compileShader, vertex_source, GL_VERTEX_SHADER);
auto fragment_shader = std::async(std::launch::deferred, gpu7ShaderGLDepr_compileShader, fragment_source, GL_FRAGMENT_SHADER);
glAttachShader(programm, vertex_shader.get());
glAttachShader(programm, fragment_shader.get());
glLinkProgram(programm);
return gxShader_checkIfSuccessfullyLinked(programm) ? programm : 0;
}
| 6,787
|
C++
|
.cpp
| 154
| 41.967532
| 147
| 0.786826
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,261
|
LatteQuery.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteQuery.cpp
|
#include "Cafe/HW/Latte/ISA/RegDefines.h"
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/HW/Latte/Core/LatteDraw.h"
#include "Cafe/HW/Latte/Core/LatteQueryObject.h"
#include "Cafe/HW/Latte/Renderer/Renderer.h"
#define GPU7_QUERY_TYPE_OCCLUSION (1)
uint64 queryEventCounter = 1;
struct LatteGX2QueryInformation
{
MPTR queryMPTR;
uint64 queryEventStart;
uint64 queryEventEnd;
uint64 sampleSum;
bool queryEnded;
};
std::vector<LatteGX2QueryInformation*> list_activeGX2Queries2;
std::vector<LatteQueryObject*> list_queriesInFlight;
uint64 latestQueryFinishedEventId = 0;
LatteQueryObject* _currentlyActiveRendererQuery = {0};
uint64 LatteQuery_getNextEventId()
{
uint64 ev = queryEventCounter;
queryEventCounter++;
return ev;
}
void LatteQuery_begin(LatteQueryObject* queryObject, uint64 eventId)
{
queryObject->queryEventStart = eventId;
queryObject->begin();
}
void LatteQuery_end(LatteQueryObject* queryObject, uint64 eventId)
{
cemu_assert_debug(!queryObject->queryEnded);
queryObject->queryEnded = true;
queryObject->queryEventEnd = eventId;
queryObject->end();
}
LatteQueryObject* LatteQuery_createSamplePassedQuery()
{
return g_renderer->occlusionQuery_create();
}
void LatteQuery_finishGX2Query(LatteGX2QueryInformation* gx2Query)
{
uint32* queryObjectData = (uint32*)memory_getPointerFromVirtualOffset(gx2Query->queryMPTR);
*(uint64*)(queryObjectData + 0) = 0;
*(uint64*)(queryObjectData + 2) = gx2Query->sampleSum;
*(uint64*)(queryObjectData + 4) = 0;
*(uint64*)(queryObjectData + 6) = 0;
*(uint64*)(queryObjectData + 8) = 0; // overwrites the 'OCPU' magic constant letting GX2QueryGetOcclusionResult know that the query is finished (for CPU queries)
}
void LatteQuery_UpdateFinishedQueries()
{
g_renderer->occlusionQuery_updateState();
for(uint32 i=0; i<list_queriesInFlight.size(); i++)
{
LatteQueryObject* queryObject = list_queriesInFlight[i];
cemu_assert_debug(queryObject->queryEnded);
if( queryObject->queryEnded == false )
continue;
// check if result is available
uint64 numSamplesPassed;
if (!queryObject->getResult(numSamplesPassed))
break;
cemu_assert_debug(latestQueryFinishedEventId < queryObject->queryEventEnd);
latestQueryFinishedEventId = queryObject->queryEventEnd;
// add number of passed samples to all gx2 queries that were active at the time
for (auto& it : list_activeGX2Queries2)
{
if (queryObject->queryEventStart >= it->queryEventStart && queryObject->queryEventEnd <= it->queryEventEnd)
it->sampleSum += numSamplesPassed;
}
list_queriesInFlight.erase(list_queriesInFlight.begin() + i);
i--;
g_renderer->occlusionQuery_destroy(queryObject);
}
// check for finished GX2 queries
for (sint32 i = 0; i < list_activeGX2Queries2.size(); i++)
{
auto gx2Query = list_activeGX2Queries2[i];
if (gx2Query->queryEnded && latestQueryFinishedEventId >= gx2Query->queryEventEnd)
{
LatteQuery_finishGX2Query(gx2Query);
free(gx2Query);
list_activeGX2Queries2.erase(list_activeGX2Queries2.begin() + i);
i--;
}
}
}
void LatteQuery_UpdateFinishedQueriesForceFinishAll()
{
cemu_assert_debug(_currentlyActiveRendererQuery == nullptr);
g_renderer->occlusionQuery_flush(); // guarantees that all query commands have been submitted and finished processing
while (true)
{
LatteQuery_UpdateFinishedQueries();
if (list_queriesInFlight.empty())
break;
}
}
sint32 checkQueriesCounter = 0;
void LatteQuery_endActiveRendererQuery(uint64 currentEventId)
{
if (_currentlyActiveRendererQuery != nullptr)
{
LatteQuery_end(_currentlyActiveRendererQuery, currentEventId);
list_queriesInFlight.emplace_back(_currentlyActiveRendererQuery);
_currentlyActiveRendererQuery = nullptr;
}
}
void LatteQuery_BeginOcclusionQuery(MPTR queryMPTR)
{
if (checkQueriesCounter < 7)
{
checkQueriesCounter++;
}
else
{
LatteQuery_UpdateFinishedQueries();
checkQueriesCounter = 0;
}
for(auto& it : list_activeGX2Queries2)
{
if (it->queryMPTR == queryMPTR)
{
debug_printf("itHLEBeginOcclusionQuery: Query 0x%08x is already active\n", queryMPTR);
return;
}
}
uint64 currentEventId = LatteQuery_getNextEventId();
// end any currently active query
LatteQuery_endActiveRendererQuery(currentEventId);
// create GX2 query binding
LatteGX2QueryInformation* queryBinding = (LatteGX2QueryInformation*)malloc(sizeof(LatteGX2QueryInformation));
memset(queryBinding, 0x00, sizeof(LatteGX2QueryInformation));
queryBinding->queryEventStart = currentEventId;
queryBinding->queryMPTR = queryMPTR;
list_activeGX2Queries2.emplace_back(queryBinding);
// start renderer query
LatteQueryObject* queryObject = LatteQuery_createSamplePassedQuery();
LatteQuery_begin(queryObject, currentEventId);
_currentlyActiveRendererQuery = queryObject;
}
void LatteQuery_EndOcclusionQuery(MPTR queryMPTR)
{
if (queryMPTR == MPTR_NULL)
return;
uint64 currentEventId = LatteQuery_getNextEventId();
// mark query binding as ended
for(auto& it : list_activeGX2Queries2)
{
if (it->queryMPTR == queryMPTR)
{
it->queryEventEnd = currentEventId;
it->queryEnded = true;
break;
}
}
// end currently active renderer query
LatteQuery_endActiveRendererQuery(currentEventId);
// check if there are still active GX2 queries
bool hasActiveGX2Query = false;
for (auto& it : list_activeGX2Queries2)
{
if (!it->queryEnded)
{
hasActiveGX2Query = true;
break;
}
}
// start a new renderer query if there are still active GX2 queries
if (hasActiveGX2Query)
{
LatteQueryObject* queryObject = LatteQuery_createSamplePassedQuery();
LatteQuery_begin(queryObject, currentEventId);
list_queriesInFlight.emplace_back(queryObject);
_currentlyActiveRendererQuery = queryObject;
catchOpenGLError();
}
}
void LatteQuery_CancelActiveGPU7Queries()
{
cemu_assert_debug(_currentlyActiveRendererQuery == nullptr);
}
void LatteQuery_Init()
{
}
| 5,884
|
C++
|
.cpp
| 186
| 29.370968
| 162
| 0.789177
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,262
|
LatteTextureReadback.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteTextureReadback.cpp
|
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/HW/Latte/Core/LatteDraw.h"
#include "Cafe/HW/Latte/Core/LattePerformanceMonitor.h"
#include "Common/GLInclude/GLInclude.h"
#include "Cafe/HW/Latte/Renderer/Renderer.h"
#include "Cafe/HW/Latte/Core/LatteTexture.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/LatteTextureViewGL.h"
#define LOG_READBACK_TIME
struct LatteTextureReadbackQueueEntry
{
HRTick initiateTime;
uint32 lastUpdateDrawcallIndex;
LatteTextureView* textureView;
};
std::vector<LatteTextureReadbackQueueEntry> sTextureScheduledReadbacks; // readbacks that have been queued but the actual transfer has not yet been started
std::queue<LatteTextureReadbackInfo*> sTextureActiveReadbackQueue; // readbacks in flight
void LatteTextureReadback_StartTransfer(LatteTextureView* textureView)
{
cemuLog_log(LogType::TextureReadback, "[TextureReadback-Start] PhysAddr {:08x} Res {}x{} Fmt {} Slice {} Mip {}", textureView->baseTexture->physAddress, textureView->baseTexture->width, textureView->baseTexture->height, textureView->baseTexture->format, textureView->firstSlice, textureView->firstMip);
HRTick currentTick = HighResolutionTimer().now().getTick();
// create info entry and store in ordered linked list
LatteTextureReadbackInfo* readbackInfo = g_renderer->texture_createReadback(textureView);
sTextureActiveReadbackQueue.push(readbackInfo);
readbackInfo->StartTransfer();
readbackInfo->transferStartTime = currentTick;
}
/*
* Checks for queued transfers and starts them if at least five drawcalls have passed since the last write
* Called after a draw sequence is completed
* Returns true if at least one transfer was started
*/
bool LatteTextureReadback_Update(bool forceStart)
{
bool hasStartedTransfer = false;
for (size_t i = 0; i < sTextureScheduledReadbacks.size(); i++)
{
LatteTextureReadbackQueueEntry& entry = sTextureScheduledReadbacks[i];
uint32 numElapsedDrawcalls = LatteGPUState.drawCallCounter - entry.lastUpdateDrawcallIndex;
if (forceStart || numElapsedDrawcalls >= 5)
{
#ifdef LOG_READBACK_TIME
double elapsedSecondsSinceInitiate = HighResolutionTimer::getTimeDiff(entry.initiateTime, HighResolutionTimer().now().getTick());
cemuLog_log(LogType::TextureReadback, "[TextureReadback-Update] Starting transfer for {:08x} after {} elapsed drawcalls. Time since initiate: {:.4} Force-start: {}", entry.textureView->baseTexture->physAddress, numElapsedDrawcalls, elapsedSecondsSinceInitiate, forceStart?"yes":"no");
#endif
LatteTextureReadback_StartTransfer(entry.textureView);
// remove element
vectorRemoveByIndex(sTextureScheduledReadbacks, i);
i--;
hasStartedTransfer = true;
}
}
return hasStartedTransfer;
}
/*
* Called when a texture is deleted
*/
void LatteTextureReadback_NotifyTextureDeletion(LatteTexture* texture)
{
// delete from queue
for (size_t i = 0; i < sTextureScheduledReadbacks.size(); i++)
{
LatteTextureReadbackQueueEntry& entry = sTextureScheduledReadbacks[i];
if (entry.textureView->baseTexture == texture)
{
vectorRemoveByIndex(sTextureScheduledReadbacks, i);
break;
}
}
}
void LatteTextureReadback_Initate(LatteTextureView* textureView)
{
// currently we don't support readback for resized textures
if (textureView->baseTexture->overwriteInfo.hasResolutionOverwrite)
{
cemuLog_log(LogType::Force, "Texture readback is not supported for textures with modified resolution. Texture: {:08x} {}x{}", textureView->baseTexture->physAddress, textureView->baseTexture->width, textureView->baseTexture->height);
return;
}
// check if texture isn't already queued for transfer
for (size_t i = 0; i < sTextureScheduledReadbacks.size(); i++)
{
LatteTextureReadbackQueueEntry& entry = sTextureScheduledReadbacks[i];
if (entry.textureView == textureView)
{
entry.lastUpdateDrawcallIndex = LatteGPUState.drawCallCounter;
return;
}
}
// queue
LatteTextureReadbackQueueEntry queueEntry;
queueEntry.initiateTime = HighResolutionTimer().now().getTick();
queueEntry.textureView = textureView;
queueEntry.lastUpdateDrawcallIndex = LatteGPUState.drawCallCounter;
sTextureScheduledReadbacks.emplace_back(queueEntry);
}
void LatteTextureReadback_UpdateFinishedTransfers(bool forceFinish)
{
if (forceFinish)
{
// start any delayed transfers
LatteTextureReadback_Update(true);
}
performanceMonitor.gpuTime_waitForAsync.beginMeasuring();
while (!sTextureActiveReadbackQueue.empty())
{
LatteTextureReadbackInfo* readbackInfo = sTextureActiveReadbackQueue.front();
if (forceFinish)
{
if (!readbackInfo->IsFinished())
{
readbackInfo->waitStartTime = HighResolutionTimer().now().getTick();
#ifdef LOG_READBACK_TIME
if (cemuLog_isLoggingEnabled(LogType::TextureReadback))
{
double elapsedSecondsTransfer = HighResolutionTimer::getTimeDiff(readbackInfo->transferStartTime, HighResolutionTimer().now().getTick());
cemuLog_log(LogType::TextureReadback, "[Texture-Readback] Force-finish: {:08x} Res {:}/{:} TM {:} FMT {:04x} Transfer time so far: {:.4}ms", readbackInfo->hostTextureCopy.physAddress, readbackInfo->hostTextureCopy.width, readbackInfo->hostTextureCopy.height, readbackInfo->hostTextureCopy.tileMode, (uint32)readbackInfo->hostTextureCopy.format, elapsedSecondsTransfer * 1000.0);
}
#endif
readbackInfo->forceFinish = true;
readbackInfo->ForceFinish();
// rerun logic since ->ForceFinish() can recurively call this function and thus modify the queue
continue;
}
}
else
{
if (!readbackInfo->IsFinished())
break;
readbackInfo->waitStartTime = HighResolutionTimer().now().getTick();
}
// performance testing
#ifdef LOG_READBACK_TIME
if (cemuLog_isLoggingEnabled(LogType::TextureReadback))
{
HRTick currentTick = HighResolutionTimer().now().getTick();
double elapsedSecondsTransfer = HighResolutionTimer::getTimeDiff(readbackInfo->transferStartTime, currentTick);
double elapsedSecondsWaiting = HighResolutionTimer::getTimeDiff(readbackInfo->waitStartTime, currentTick);
cemuLog_log(LogType::TextureReadback, "[Texture-Readback] {:08x} Res {}/{} TM {} FMT {:04x} ReadbackLatency: {:6.3}ms WaitTime: {:6.3}ms ForcedWait {}", readbackInfo->hostTextureCopy.physAddress, readbackInfo->hostTextureCopy.width, readbackInfo->hostTextureCopy.height, readbackInfo->hostTextureCopy.tileMode, (uint32)readbackInfo->hostTextureCopy.format, elapsedSecondsTransfer * 1000.0, elapsedSecondsWaiting * 1000.0, readbackInfo->forceFinish ? "yes" : "no");
}
#endif
uint8* pixelData = readbackInfo->GetData();
LatteTextureLoader_writeReadbackTextureToMemory(&readbackInfo->hostTextureCopy, 0, 0, pixelData);
readbackInfo->ReleaseData();
// get the original texture if it still exists and invalidate the current data hash
LatteTextureView* origTexView = LatteTextureViewLookupCache::lookupSlice(readbackInfo->hostTextureCopy.physAddress, readbackInfo->hostTextureCopy.width, readbackInfo->hostTextureCopy.height, readbackInfo->hostTextureCopy.pitch, 0, 0, readbackInfo->hostTextureCopy.format);
if (origTexView)
LatteTC_ResetTextureChangeTracker(origTexView->baseTexture, true);
delete readbackInfo;
// remove from queue
cemu_assert_debug(!sTextureActiveReadbackQueue.empty());
cemu_assert_debug(readbackInfo == sTextureActiveReadbackQueue.front());
sTextureActiveReadbackQueue.pop();
}
performanceMonitor.gpuTime_waitForAsync.endMeasuring();
}
| 7,392
|
C++
|
.cpp
| 154
| 45.415584
| 467
| 0.796071
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,263
|
LatteTextureCache.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteTextureCache.cpp
|
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/HW/Latte/Core/LatteDraw.h"
#include "Cafe/HW/Latte/Core/LatteTexture.h"
#include "Cafe/HW/Latte/Renderer/Renderer.h"
#include "Common/cpu_features.h"
std::unordered_set<LatteTexture*> g_allTextures;
void LatteTC_Init()
{
cemu_assert_debug(g_allTextures.empty());
}
void LatteTC_RegisterTexture(LatteTexture* tex)
{
g_allTextures.emplace(tex);
}
void LatteTC_UnregisterTexture(LatteTexture* tex)
{
g_allTextures.erase(tex);
}
// sample few uint64s uniformly over memory range
uint32 _quickStochasticHash(void* texData, uint32 memRange)
{
uint64* texDataU64 = (uint64*)texData;
uint64 hashVal = 0;
memRange /= sizeof(uint64);
uint32 memStep = memRange / 37; // use prime here to avoid memStep aligning nicely with pitch of texture, leading to sampling only along the border of a texture
for (sint32 i = 0; i < 37; i++)
{
hashVal += *texDataU64;
hashVal = (hashVal << 3) | (hashVal >> 61);
texDataU64 += memStep;
}
return (uint32)hashVal ^ (uint32)(hashVal >> 32);
}
uint32 LatteTexture_CalculateTextureDataHash(LatteTexture* hostTexture)
{
if( hostTexture->texDataPtrHigh == hostTexture->texDataPtrLow )
{
return 0;
}
if (hostTexture->format == Latte::E_GX2SURFFMT::R11_G11_B10_FLOAT)
{
// this is an exotic format that usually isn't generated or updated CPU-side
// therefore as an optimization we can risk to only check a minimal amount of bytes at the beginning of the texture data
// updates which change the entire texture should still be detected this way
// this also helps with a bug in BotW which seems to fill the empty areas of the textures with other data which causes unnecessary invalidations and texture reloads
// Wonderful 101 generates this format in a 8x8x8 3D texture using tiling aperture
if (hostTexture->tileMode == Latte::E_HWTILEMODE::TM_1D_TILED_THICK && hostTexture->depth == 8 && hostTexture->width == 8 && hostTexture->height == 8)
{
// special case for Wonderful 101
uint32* texDataU32 = (uint32*)memory_getPointerFromPhysicalOffset(hostTexture->texDataPtrLow);
return texDataU32[0] ^ texDataU32[0x100/4] ^ texDataU32[0x200/4] ^ texDataU32[0x300/4]; // check the first thick slice (each slice has 0x400 bytes, with 0x100 bytes between layers)
}
uint32* texDataU32 = (uint32*)memory_getPointerFromPhysicalOffset(hostTexture->texDataPtrLow);
return texDataU32[0] ^ texDataU32[1] ^ texDataU32[2] ^ texDataU32[3];
}
uint32 memRange = hostTexture->texDataPtrHigh - hostTexture->texDataPtrLow;
uint32* texDataU32 = (uint32*)memory_getPointerFromPhysicalOffset(hostTexture->texDataPtrLow);
uint32 hashVal = 0;
uint32 pixelCount = hostTexture->width*hostTexture->height;
bool isCompressedFormat = hostTexture->IsCompressedFormat();
if (isCompressedFormat || hostTexture->useLightHash)
{
// check only 32 samples of the texture
if (memRange < 256)
{
memRange /= sizeof(uint32);
while (memRange--)
{
hashVal += *texDataU32;
hashVal = (hashVal << 3) | (hashVal >> 29);
texDataU32++;
}
}
else
{
hashVal = _quickStochasticHash(texDataU32, memRange);
}
return hashVal;
}
if( pixelCount <= (700*700) )
{
// small texture size
bool isCompressedFormat = hostTexture->IsCompressedFormat();
if( isCompressedFormat == false || memRange < 0x200 )
{
memRange /= (4*sizeof(uint32));
while( memRange-- )
{
hashVal += *texDataU32;
hashVal = (hashVal<<3)|(hashVal>>29);
texDataU32 += 4;
}
}
else
{
memRange /= (32*sizeof(uint32));
while( memRange-- )
{
hashVal += *texDataU32;
hashVal = (hashVal<<3)|(hashVal>>29);
texDataU32 += 32;
}
}
}
else if( pixelCount <= (1200*1200) )
{
// medium texture size
bool isCompressedFormat = hostTexture->IsCompressedFormat();
if( isCompressedFormat == false )
{
memRange /= (12*sizeof(uint32));
while( memRange-- )
{
hashVal += *texDataU32;
hashVal = (hashVal<<3)|(hashVal>>29);
texDataU32 += 12;
}
}
else
{
memRange /= (96*sizeof(uint32));
while( memRange-- )
{
hashVal += *texDataU32;
hashVal = (hashVal<<3)|(hashVal>>29);
texDataU32 += 96;
}
}
}
else
{
// huge texture size
bool isCompressedFormat = hostTexture->IsCompressedFormat();
if( isCompressedFormat == false )
{
#if BOOST_OS_WINDOWS
if (g_CPUFeatures.x86.avx2)
{
__m256i h256 = { 0 };
__m256i* readPtr = (__m256i*)texDataU32;
memRange /= (288);
while (memRange--)
{
__m256i temp = _mm256_load_si256(readPtr);
readPtr += (288 / 32);
h256 = _mm256_xor_si256(h256, temp);
}
#ifdef __clang__
hashVal = h256[0] + h256[1] + h256[2] + h256[3] + h256[4] + h256[5] + h256[6] + h256[7];
#else
hashVal = h256.m256i_u32[0] + h256.m256i_u32[1] + h256.m256i_u32[2] + h256.m256i_u32[3] + h256.m256i_u32[4] + h256.m256i_u32[5] + h256.m256i_u32[6] + h256.m256i_u32[7];
#endif
}
#else
if( false ) {}
#endif
else
{
memRange /= (32 * sizeof(uint64));
uint64 h64 = 0;
uint64* texDataU64 = (uint64*)texDataU32;
while (memRange--)
{
h64 += *texDataU64;
h64 = (h64 << 3) | (h64 >> 61);
texDataU64 += 32;
}
hashVal = (h64 & 0xFFFFFFFF) + (h64 >> 32);
}
}
else
{
memRange /= (512*sizeof(uint32));
while( memRange-- )
{
hashVal += *texDataU32;
hashVal = (hashVal<<3)|(hashVal>>29);
texDataU32 += 512;
}
}
}
return hashVal;
}
uint64 _botwLargeTexHax = 0;
bool LatteTC_HasTextureChanged(LatteTexture* hostTexture, bool force)
{
if (hostTexture->forceInvalidate)
{
force = true;
debug_printf("Force invalidate 0x%08x\n", hostTexture->physAddress);
hostTexture->forceInvalidate = false;
}
// if texture is written by GPU operations we switch to a faster hash implementation
if (hostTexture->isUpdatedOnGPU && hostTexture->useLightHash == false)
{
hostTexture->useLightHash = true;
// update hash
hostTexture->texDataHash2 = LatteTexture_CalculateTextureDataHash(hostTexture);
}
// only check each texture for updates once a frame
// todo: Instead of relying on frames, it would be better to recheck only after any GPU wait operation occurred.
if( hostTexture->lastDataUpdateFrameCounter == LatteGPUState.frameCounter && force == false)
return false;
hostTexture->lastDataUpdateFrameCounter = LatteGPUState.frameCounter;
// we assume that certain texture properties indicate that the texture will never be written by the CPU
if (hostTexture->width == 1280 && hostTexture->format != Latte::E_GX2SURFFMT::R8_UNORM && force == false)
{
// todo - remove this or find a better way to handle excluded texture invalidation checks (maybe via game profile?)
return false;
}
// workaround for corrupted terrain texture in BotW after video playback
// probably would be fixed if we added support for invalidating individual slices/mips of a texture
uint32 texDataHash = LatteTexture_CalculateTextureDataHash(hostTexture);
if( texDataHash != hostTexture->texDataHash2 )
{
hostTexture->texDataHash2 = texDataHash;
if (hostTexture->depth == 83 && hostTexture->width == 1024 && hostTexture->height == 1024)
{
_botwLargeTexHax = LatteGPUState.frameCounter;
}
return true;
}
if (_botwLargeTexHax != 0 && hostTexture->depth == 83 && hostTexture->width == 1024 && hostTexture->height == 1024 && _botwLargeTexHax != LatteGPUState.frameCounter)
{
_botwLargeTexHax = 0;
return true;
}
return false;
}
void LatteTC_ResetTextureChangeTracker(LatteTexture* hostTexture, bool force)
{
if( hostTexture->lastDataUpdateFrameCounter == LatteGPUState.frameCounter && force == false)
return;
hostTexture->lastDataUpdateFrameCounter = LatteGPUState.frameCounter;
LatteTC_HasTextureChanged(hostTexture, true);
}
/*
* This function should be called whenever the texture is still used in some form (any kind of access counts)
* The purpose of this function is to prevent garbage collection of textures that are still actively used
*/
void LatteTC_MarkTextureStillInUse(LatteTexture* texture)
{
texture->lastAccessTick = LatteGPUState.currentDrawCallTick;
texture->lastAccessFrameCount = LatteGPUState.frameCounter;
}
// check if a texture has been overwritten by another texture using GPU-writes
bool LatteTC_IsTextureDataOverwritten(LatteTexture* texture)
{
// check overlaps
sint32 mipLevels = texture->mipLevels;
sint32 sliceCount = texture->depth;
mipLevels = std::min(mipLevels, 3); // only check first 3 mip levels
for (sint32 mipIndex = 0; mipIndex < mipLevels; mipIndex++)
{
sint32 mipSliceCount;
if (texture->Is3DTexture())
mipSliceCount = std::max(1, sliceCount >> mipIndex);
else
mipSliceCount = sliceCount;
for (sint32 sliceIndex = 0; sliceIndex < mipSliceCount; sliceIndex++)
{
LatteTextureSliceMipInfo* sliceMipInfo = texture->sliceMipInfo + texture->GetSliceMipArrayIndex(sliceIndex, mipIndex);
bool isSliceMipOutdated = false;
for (auto& overlapData : sliceMipInfo->list_dataOverlap)
{
if (sliceMipInfo->lastDynamicUpdate < overlapData.destMipSliceInfo->lastDynamicUpdate)
{
isSliceMipOutdated = true;
break;
}
}
if (isSliceMipOutdated == false)
return false;
}
}
return true;
}
void LatteTexture_Delete(LatteTexture* texture)
{
LatteTC_UnregisterTexture(texture);
LatteMRT::NotifyTextureDeletion(texture);
LatteTextureReadback_NotifyTextureDeletion(texture);
LatteTexture_DeleteTextureRelations(texture);
// delete views
while (!texture->views.empty())
delete texture->views[0];
cemu_assert_debug(texture->views.empty());
cemu_assert_debug(texture->baseView == nullptr);
// free data overlap tracking
LatteTexture_DeleteDataOverlapTracking(texture);
// remove from lists
LatteTexture_UnregisterTextureMemoryOccupancy(texture);
// free memory
if (texture->sliceMipInfo)
{
delete[] texture->sliceMipInfo;
texture->sliceMipInfo = nullptr;
}
delete texture;
}
/*
* Checks if the texture can be dropped from the cache and if yes, delete it
* Returns true if the texture was deleted
*/
bool LatteTC_CleanupCheckTexture(LatteTexture* texture, uint32 currentTick)
{
uint32 currentFrameCount = LatteGPUState.frameCounter;
uint32 ticksSinceLastAccess = currentTick - texture->lastAccessTick;
uint32 framesSinceLastAccess = currentFrameCount - texture->lastAccessFrameCount;
if( !texture->isUpdatedOnGPU )
{
// RAM-only textures are safe to be deleted since we can always restore them from RAM
if( ticksSinceLastAccess >= (120*1000) && framesSinceLastAccess >= 2000 )
{
LatteTexture_Delete(texture);
return true;
}
}
if ((LatteGPUState.currentDrawCallTick - texture->lastAccessTick) >= 100 &&
LatteTC_IsTextureDataOverwritten(texture))
{
LatteTexture_Delete(texture);
return true;
}
// if unused for more than 5 seconds, start deleting views since they are cheap to recreate
if (ticksSinceLastAccess >= 5 * 1000 && framesSinceLastAccess >= 30)
{
for (sint32 i = 0; i < 3; i++)
{
if (texture->views.size() <= 1)
break;
LatteTextureView* view = texture->views[0];
if (view == texture->baseView)
view = texture->views[1];
delete view;
}
}
return false;
}
void LatteTexture_RefreshInfoCache();
/*
* Scans for unused textures and deletes them
* Called at the end of every frame
*/
void LatteTC_CleanupUnusedTextures()
{
static size_t currentScanIndex = 0;
uint32 currentTick = GetTickCount();
sint32 maxDelete = 10;
std::vector<LatteTexture*>& allTextures = LatteTexture::GetAllTextures();
if (!allTextures.empty())
{
for (sint32 c = 0; c < 25; c++)
{
if (currentScanIndex >= allTextures.size())
currentScanIndex = 0;
LatteTexture* texItr = allTextures[currentScanIndex];
currentScanIndex++;
if (!texItr)
continue;
if (LatteTC_CleanupCheckTexture(texItr, currentTick))
{
maxDelete--;
if (maxDelete <= 0)
break; // deleting can be an expensive operation, dont delete too many at once to avoid micro stutter
if (allTextures.empty())
break;
}
}
}
LatteTexture_RefreshInfoCache(); // find a better place to call this from?
}
std::vector<LatteTexture*> LatteTC_GetDeleteableTextures()
{
std::vector<LatteTexture*> texList;
uint32 currentFrameCount = LatteGPUState.frameCounter;
for (auto& itr : g_allTextures)
{
if(itr->lastAccessFrameCount == 0)
continue; // not initialized
uint32 framesSinceLastAccess = currentFrameCount - itr->lastAccessFrameCount;
if(framesSinceLastAccess < 3)
continue;
if (itr->isUpdatedOnGPU)
{
if (LatteTC_IsTextureDataOverwritten(itr))
texList.emplace_back(itr);
}
else
{
texList.emplace_back(itr);
}
}
return texList;
}
void LatteTC_UnloadAllTextures()
{
std::vector<LatteTexture*> allTexturesCopy = LatteTexture::GetAllTextures();
for (auto& itr : allTexturesCopy)
{
if(itr)
LatteTexture_Delete(itr);
}
LatteRenderTarget_unloadAll();
}
| 12,913
|
C++
|
.cpp
| 405
| 28.992593
| 183
| 0.73015
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,264
|
LatteRenderTarget.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteRenderTarget.cpp
|
#include "Cafe/HW/Latte/ISA/RegDefines.h"
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/HW/Latte/Core/LatteDraw.h"
#include "Cafe/HW/Latte/Core/LatteShader.h"
#include "Cafe/HW/Latte/Core/LatteOverlay.h"
#include "Cafe/HW/Latte/Core/LatteBufferCache.h"
#include "Cafe/HW/Latte/Core/LatteTexture.h"
#include "Cafe/HW/Latte/Core/LatteCachedFBO.h"
#include "Cafe/HW/Latte/Renderer/Renderer.h"
#include "Cafe/HW/Latte/Core/LattePerformanceMonitor.h"
#include "Cafe/GraphicPack/GraphicPack2.h"
#include "config/ActiveSettings.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.h"
#include "gui/guiWrapper.h"
#include "Cafe/OS/libs/erreula/erreula.h"
#include "input/InputManager.h"
#include "Cafe/OS/libs/swkbd/swkbd.h"
uint32 prevScissorX = 0;
uint32 prevScissorY = 0;
uint32 prevScissorWidth = 0;
uint32 prevScissorHeight = 0;
bool hasValidFramebufferAttached = false;
struct LatteMRTQuad
{
sint32 width;
sint32 height;
};
struct
{
LatteMRTQuad currentRenderSize;
LatteMRTQuad currentEffectiveSize;
struct
{
sint32 width;
sint32 height;
}currentGuestViewport;
bool renderTargetIsResized;
// tracking
sint32 rtUpdateListCount;
LatteTextureView* rtUpdateList[64];
sint32 rtUpdateListSlice[64];
sint32 rtUpdateListMip[64];
}sLatteRenderTargetState;
struct
{
struct
{
LatteTextureView* view{};
}colorBuffer[8];
struct
{
LatteTextureView* view{};
bool hasStencil{false};
}depthBuffer;
}sLatteCurrentRendertargets{};
LatteCachedFBO::LatteCachedFBO(uint64 key) : key(key)
{
for (sint32 i = 0; i < 8; i++)
{
LatteTextureView* colorTexView = sLatteCurrentRendertargets.colorBuffer[i].view;
colorBuffer[i].texture = colorTexView;
if (colorTexView)
{
vectorAppendUnique(colorTexView->list_associatedFbo, this);
m_referencedTextures.emplace_back(colorTexView->baseTexture);
}
}
if (sLatteCurrentRendertargets.depthBuffer.view)
{
LatteTextureView* depthTexView = sLatteCurrentRendertargets.depthBuffer.view;
depthBuffer.texture = depthTexView;
depthBuffer.hasStencil = sLatteCurrentRendertargets.depthBuffer.hasStencil;
if (depthTexView)
{
vectorAppendUnique(depthTexView->list_associatedFbo, this);
m_referencedTextures.emplace_back(depthTexView->baseTexture);
}
}
calculateEffectiveRenderAreaSize();
}
void LatteMRT::NotifyTextureDeletion(LatteTexture* texture)
{
for (sint32 i = 0; i < Latte::GPU_LIMITS::NUM_COLOR_ATTACHMENTS; i++)
{
if (sLatteCurrentRendertargets.colorBuffer[i].view && sLatteCurrentRendertargets.colorBuffer[i].view->baseTexture == texture)
{
sLatteCurrentRendertargets.colorBuffer[i].view = nullptr;
}
}
}
LatteCachedFBO* LatteMRT::CreateCachedFBO(uint64 key)
{
return g_renderer->rendertarget_createCachedFBO(key);
}
void LatteMRT::DeleteCachedFBO(LatteCachedFBO* cfbo)
{
// color textures
for (sint32 i = 0; i < 8; i++)
{
if (cfbo->colorBuffer[i].texture == NULL)
continue;
cfbo->colorBuffer[i].texture->list_fboLookup.erase(std::remove(cfbo->colorBuffer[i].texture->list_fboLookup.begin(), cfbo->colorBuffer[i].texture->list_fboLookup.end(), cfbo), cfbo->colorBuffer[i].texture->list_fboLookup.end());
cfbo->colorBuffer[i].texture->list_associatedFbo.erase(std::remove(cfbo->colorBuffer[i].texture->list_associatedFbo.begin(), cfbo->colorBuffer[i].texture->list_associatedFbo.end(), cfbo), cfbo->colorBuffer[i].texture->list_associatedFbo.end());
}
// depth texture
if (cfbo->depthBuffer.texture)
{
cfbo->depthBuffer.texture->list_fboLookup.erase(std::remove(cfbo->depthBuffer.texture->list_fboLookup.begin(), cfbo->depthBuffer.texture->list_fboLookup.end(), cfbo), cfbo->depthBuffer.texture->list_fboLookup.end());
cfbo->depthBuffer.texture->list_associatedFbo.erase(std::remove(cfbo->depthBuffer.texture->list_associatedFbo.begin(), cfbo->depthBuffer.texture->list_associatedFbo.end(), cfbo), cfbo->depthBuffer.texture->list_associatedFbo.end());
}
g_renderer->rendertarget_deleteCachedFBO(cfbo);
delete cfbo;
}
LatteCachedFBO* g_emptyFBO = nullptr;
void LatteMRT::SetColorAttachment(uint32 index, LatteTextureView* view)
{
sLatteCurrentRendertargets.colorBuffer[index].view = view;
}
void LatteMRT::SetDepthAndStencilAttachment(LatteTextureView* view, bool hasStencil)
{
sLatteCurrentRendertargets.depthBuffer.view = view;
sLatteCurrentRendertargets.depthBuffer.hasStencil = hasStencil;
}
LatteTextureView* LatteMRT::GetColorAttachment(uint32 index)
{
cemu_assert_debug(index < 8);
return sLatteCurrentRendertargets.colorBuffer[index].view;
}
LatteTextureView* LatteMRT::GetDepthAttachment()
{
return sLatteCurrentRendertargets.depthBuffer.view;
}
void LatteMRT::ApplyCurrentState()
{
uint64 key = 0;
LatteTextureView* fboLookupView = NULL;
for (sint32 i = 0; i < 8; i++)
{
LatteTextureView* colorView = sLatteCurrentRendertargets.colorBuffer[i].view;
if (colorView)
{
key += ((uint64)colorView);
key = std::rotl<uint64>(key, 5);
fboLookupView = colorView;
}
key = std::rotl<uint64>(key, 7);
}
if (sLatteCurrentRendertargets.depthBuffer.view)
{
key += ((uint64)sLatteCurrentRendertargets.depthBuffer.view);
key = std::rotl<uint64>(key, 5);
key += (sLatteCurrentRendertargets.depthBuffer.hasStencil);
if (fboLookupView == NULL)
{
fboLookupView = sLatteCurrentRendertargets.depthBuffer.view;
}
}
// use fboLookupTexture to find cached FBO
if (fboLookupView == nullptr)
{
if (!g_emptyFBO)
g_emptyFBO = CreateCachedFBO(key);
g_renderer->rendertarget_bindFramebufferObject(g_emptyFBO);
return;
}
// look for FBO
for (auto& fbo : fboLookupView->list_fboLookup)
{
if (fbo->key == key)
{
// found matching FBO
g_renderer->rendertarget_bindFramebufferObject(fbo);
return;
}
}
// create new cached FBO
LatteCachedFBO* cfbo = CreateCachedFBO(key);
g_renderer->rendertarget_bindFramebufferObject(cfbo);
fboLookupView->list_fboLookup.push_back(cfbo);
// some extra checks to verify that looked up fbo matches active buffers
cemu_assert_debug(cfbo->colorBuffer[0].texture == sLatteCurrentRendertargets.colorBuffer[0].view);
cemu_assert_debug(cfbo->colorBuffer[1].texture == sLatteCurrentRendertargets.colorBuffer[1].view);
cemu_assert_debug(cfbo->colorBuffer[2].texture == sLatteCurrentRendertargets.colorBuffer[2].view);
cemu_assert_debug(cfbo->depthBuffer.texture == sLatteCurrentRendertargets.depthBuffer.view);
}
void LatteMRT::BindColorBufferOnly(LatteTextureView* view)
{
cemu_assert_debug(!view->baseTexture->isDepth);
SetColorAttachment(0, view);
for (sint32 i = 1; i < 8; i++)
SetColorAttachment(i, nullptr);
SetDepthAndStencilAttachment(nullptr, false);
ApplyCurrentState();
}
void LatteMRT::BindDepthBufferOnly(LatteTextureView* view)
{
cemu_assert_debug(view->baseTexture->isDepth);
for (sint32 i = 0; i < 8; i++)
SetColorAttachment(i, nullptr);
SetDepthAndStencilAttachment(view, view->baseTexture->hasStencil);
ApplyCurrentState();
}
LatteTextureView* LatteMRT_CreateDepthBuffer(MPTR depthBufferPhysMem, uint32 width, uint32 height, uint32 pitch, Latte::E_HWTILEMODE tileMode, Latte::E_GX2SURFFMT format, uint32 swizzle, sint32 viewSlice)
{
LatteTextureView* textureView = LatteTexture_CreateMapping(depthBufferPhysMem, MPTR_NULL, width, height, viewSlice+1, pitch, tileMode, swizzle, 0, 1, viewSlice, 1, format, viewSlice > 0 ? Latte::E_DIM::DIM_2D_ARRAY : Latte::E_DIM::DIM_2D, Latte::E_DIM::DIM_2D, true);
LatteMRT::SetDepthAndStencilAttachment(textureView, textureView->baseTexture->hasStencil);
return textureView;
}
sint32 _depthBufferSizeWarningCount = 0;
LatteTextureView* LatteMRT::GetColorAttachmentTexture(uint32 index, bool createNew, bool checkForTextureChanges)
{
uint32* colorBufferRegBase = LatteGPUState.contextRegister+(mmCB_COLOR0_BASE + index);
uint32 regColorBufferBase = colorBufferRegBase[mmCB_COLOR0_BASE - mmCB_COLOR0_BASE] & 0xFFFFFF00; // the low 8 bits are ignored? How to Survive seems to rely on this
uint32 regColorSize = colorBufferRegBase[mmCB_COLOR0_SIZE - mmCB_COLOR0_BASE];
uint32 regColorInfo = colorBufferRegBase[mmCB_COLOR0_INFO - mmCB_COLOR0_BASE];
uint32 regColorView = colorBufferRegBase[mmCB_COLOR0_VIEW - mmCB_COLOR0_BASE];
// decode color buffer reg info
Latte::E_HWTILEMODE colorBufferTileMode = (Latte::E_HWTILEMODE)((regColorInfo >> 8) & 0xF);
uint32 numberType = (regColorInfo >> 12) & 7;
Latte::E_GX2SURFFMT colorBufferFormat = GetColorBufferFormat(index, LatteGPUState.contextNew);
MPTR colorBufferPhysMem = regColorBufferBase;
uint32 colorBufferSwizzle = 0;
if ( Latte::TM_IsMacroTiled(colorBufferTileMode) )
{
colorBufferSwizzle = colorBufferPhysMem & 0x700;
colorBufferPhysMem = colorBufferPhysMem & ~(7 << 8);
}
// get view slice and view slice num
uint32 viewFirstSlice = (regColorView & 0x7FF);
uint32 viewNumSlices = ((regColorView >> 13) & 0x7FF) - viewFirstSlice + 1;
if (viewNumSlices != 1)
{
debug_printf("viewNumSlices is not 1! (%d)\n", viewNumSlices);
}
uint32 colorBufferPitch = (((regColorSize >> 0) & 0x3FF) + 1);
colorBufferPitch <<= 3;
uint32 pitchHeight = (((regColorSize >> 10) & 0xFFFFF) + 1);
pitchHeight <<= 6;
uint32 colorBufferHeight = pitchHeight / colorBufferPitch;
uint32 colorBufferWidth = colorBufferPitch;
// colorbuffer width/height has to be padded to 8/32 alignment but the actual resolution might be smaller
// use the scissor box as a clue to figure out the original resolution if possible
if(LatteGPUState.allowFramebufferSizeOptimization)
{
uint32 scissorBoxWidth = LatteGPUState.contextNew.PA_SC_GENERIC_SCISSOR_BR.get_BR_X();
uint32 scissorBoxHeight = LatteGPUState.contextNew.PA_SC_GENERIC_SCISSOR_BR.get_BR_Y();
if (((scissorBoxWidth + 7) & ~7) == colorBufferWidth)
colorBufferWidth = scissorBoxWidth;
if (((colorBufferHeight + 31) & ~31) == colorBufferHeight)
colorBufferHeight = scissorBoxHeight;
}
// log resolution changes if the above heuristic takes effect
// this is useful to find resolutions which need to be updated in gfx pack texture rules
#if 0
uint32 colorBufferHeight2 = pitchHeight / colorBufferPitch;
static std::unordered_set<uint64> s_foundColorBufferResMappings;
if (colorBufferPitch != colorBufferWidth || colorBufferHeight != colorBufferHeight2)
{
// only log unique, source and dest resolution. Encode into a key with 16 bits per component
uint64 resHash = (uint64)colorBufferWidth | ((uint64)colorBufferHeight << 16) | ((uint64)colorBufferPitch << 32) | ((uint64)colorBufferHeight2 << 48);
if( !s_foundColorBufferResMappings.contains(resHash) )
{
s_foundColorBufferResMappings.insert(resHash);
cemuLog_log(LogType::Force, "[COLORBUFFER-DBG] Using res {}x{} instead of {}x{}", colorBufferWidth, colorBufferHeight, colorBufferPitch, colorBufferHeight2);
}
}
#endif
bool colorBufferWasFound = false;
sint32 viewFirstMip = 0; // todo
cemu_assert_debug(viewNumSlices == 1);
LatteTextureView* colorBufferView = LatteTextureViewLookupCache::lookupSliceEx(colorBufferPhysMem, colorBufferWidth, colorBufferHeight, colorBufferPitch, viewFirstMip, viewFirstSlice, colorBufferFormat, false);
if (colorBufferView == nullptr)
{
// create color buffer view
colorBufferView = LatteTexture_CreateMapping(colorBufferPhysMem, 0, colorBufferWidth, colorBufferHeight, (viewFirstSlice + viewNumSlices), colorBufferPitch, colorBufferTileMode, colorBufferSwizzle>>8, viewFirstMip, 1, viewFirstSlice, viewNumSlices, (Latte::E_GX2SURFFMT)colorBufferFormat, (viewFirstSlice + viewNumSlices)>1? Latte::E_DIM::DIM_2D_ARRAY: Latte::E_DIM::DIM_2D, Latte::E_DIM::DIM_2D, false, true);
LatteGPUState.repeatTextureInitialization = true;
checkForTextureChanges = false;
}
if (colorBufferView->baseTexture->swizzle != colorBufferSwizzle)
{
colorBufferView->baseTexture->lastRenderTargetSwizzle = colorBufferSwizzle;
}
// check for texture changes
if (checkForTextureChanges)
LatteTexture_UpdateDataToLatest(colorBufferView->baseTexture);
// mark as used
LatteTC_MarkTextureStillInUse(colorBufferView->baseTexture);
return colorBufferView;
}
// get mask of all used color buffers
uint8 LatteMRT::GetActiveColorBufferMask(const LatteDecompilerShader* pixelShader, const LatteContextRegister& lcr)
{
const uint32* regView = lcr.GetRawView();
uint8 colorBufferMask = 0;
for (uint32 i = 0; i < 8; i++)
{
if (regView[mmCB_COLOR0_BASE + i] != MPTR_NULL)
colorBufferMask |= (1 << i);
}
// check if color buffer output is active
const Latte::LATTE_CB_COLOR_CONTROL& colorControlReg = lcr.CB_COLOR_CONTROL;
uint32 colorBufferDisable = colorControlReg.get_SPECIAL_OP() == Latte::LATTE_CB_COLOR_CONTROL::E_SPECIALOP::DISABLE;
if (colorBufferDisable)
return 0;
cemu_assert_debug(colorControlReg.get_DEGAMMA_ENABLE() == false); // not supported
// combine color buffer mask with pixel output mask from pixel shader
colorBufferMask &= (pixelShader ? pixelShader->pixelColorOutputMask : 0);
// combine color buffer mask with color channel mask from mmCB_TARGET_MASK (disable render buffer if all colors are blocked)
uint32 channelTargetMask = lcr.CB_TARGET_MASK.get_MASK();
for (uint32 i = 0; i < 8; i++)
{
if (((channelTargetMask >> (i * 4)) & 0xF) == 0)
colorBufferMask &= ~(1 << i);
}
// render targets smaller than the scissor size are not allowed
// this fixes a few render issues in Cemu but we dont know if this matches HW behavior
cemu_assert_debug(lcr.PA_SC_GENERIC_SCISSOR_TL.get_WINDOW_OFFSET_DISABLE() == true); // todo (not exposed by GX2 API)
uint32 scissorAccessWidth = lcr.PA_SC_GENERIC_SCISSOR_BR.get_BR_X();
uint32 scissorAccessHeight = lcr.PA_SC_GENERIC_SCISSOR_BR.get_BR_Y();
for (uint32 i = 0; i < 8; i++)
{
if( (colorBufferMask&(1<<i)) == 0 )
continue;
// get width/height
uint32 regColorSize = regView[mmCB_COLOR0_SIZE + i];
uint32 regColorInfo = regView[mmCB_COLOR0_INFO + i];
// decode color buffer reg info
uint32 colorBufferPitch = (((regColorSize >> 0) & 0x3FF) + 1);
colorBufferPitch <<= 3;
uint32 pitchHeight = (((regColorSize >> 10) & 0xFFFFF) + 1);
pitchHeight <<= 6;
uint32 colorBufferHeight = pitchHeight / colorBufferPitch;
uint32 colorBufferWidth = colorBufferPitch;
if ((colorBufferWidth < (sint32)scissorAccessWidth) ||
(colorBufferHeight < (sint32)scissorAccessHeight))
{
// log this?
colorBufferMask &= ~(1<<i);
}
}
return colorBufferMask;
}
// returns true if depth/stencil buffer is used
bool LatteMRT::GetActiveDepthBufferMask(const LatteContextRegister& lcr)
{
bool depthBufferMask = true;
// if depth test is not used then detach the depth buffer
bool depthEnable = lcr.DB_DEPTH_CONTROL.get_Z_ENABLE();
bool stencilTestEnable = lcr.DB_DEPTH_CONTROL.get_STENCIL_ENABLE();
bool backStencilEnable = lcr.DB_DEPTH_CONTROL.get_BACK_STENCIL_ENABLE();
if (!depthEnable && !stencilTestEnable && !backStencilEnable)
depthBufferMask = false;
return depthBufferMask;
}
const uint32 _colorBufferFormatBits[] =
{
0, // 0
0x200, // 1
0, // 2
0, // 3
0x100, // 4
0x300, // 5
0x400, // 6
0x800, // 7
};
Latte::E_GX2SURFFMT LatteMRT::GetColorBufferFormat(const uint32 index, const LatteContextRegister& lcr)
{
cemu_assert_debug(index < Latte::GPU_LIMITS::NUM_COLOR_ATTACHMENTS);
uint32 regColorInfo = lcr.GetRawView()[mmCB_COLOR0_INFO + index];
uint32 colorBufferFormat = (regColorInfo >> 2) & 0x3F; // base HW format
uint32 numberType = (regColorInfo >> 12) & 7;
colorBufferFormat |= _colorBufferFormatBits[numberType];
return (Latte::E_GX2SURFFMT)colorBufferFormat;
}
// return GX2 format of current depth buffer
Latte::E_GX2SURFFMT LatteMRT::GetDepthBufferFormat(const LatteContextRegister& lcr)
{
uint32 regDepthBufferInfo = lcr.GetRawView()[mmDB_DEPTH_INFO];
switch (regDepthBufferInfo & 7)
{
case 1:
return Latte::E_GX2SURFFMT::D16_UNORM;
case 3:
return Latte::E_GX2SURFFMT::D24_S8_UNORM;
case 5:
return Latte::E_GX2SURFFMT::D24_S8_FLOAT;
case 6:
return Latte::E_GX2SURFFMT::D32_FLOAT;
case 7:
return Latte::E_GX2SURFFMT::D32_S8_FLOAT;
default:
debug_printf("Invalid DB_DEPTH_INFO depthbuffer format (%d)\n", (regDepthBufferInfo & 7));
break;
}
return Latte::E_GX2SURFFMT::D16_UNORM;
}
bool LatteMRT::UpdateCurrentFBO()
{
catchOpenGLError();
sLatteRenderTargetState.rtUpdateListCount = 0;
// combine color buffer mask with pixel output mask from pixel shader
LatteDecompilerShader* pixelShader = LatteSHRC_GetActivePixelShader();
uint8 colorBufferMask = GetActiveColorBufferMask(pixelShader, LatteGPUState.contextNew);
bool depthBufferMask = GetActiveDepthBufferMask(LatteGPUState.contextNew);
// if depth test is not used then detach the depth buffer
bool depthEnable = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_Z_ENABLE();
bool stencilTestEnable = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_STENCIL_ENABLE();
bool backStencilEnable = LatteGPUState.contextNew.DB_DEPTH_CONTROL.get_BACK_STENCIL_ENABLE();
if (!depthEnable && !stencilTestEnable && !backStencilEnable)
depthBufferMask = false;
bool hasResizedTexture = false; // set to true if any of the color buffers or the depth buffer reference a resized texture (via graphic pack texture rules)
sLatteRenderTargetState.renderTargetIsResized = false;
// real size
LatteMRTQuad* rtRealSize = &sLatteRenderTargetState.currentRenderSize;
rtRealSize->width = 0;
rtRealSize->height = 0;
// effective size (differs from real size only if graphic pack rules overwrite texture sizes)
LatteMRTQuad* rtEffectiveSize = &sLatteRenderTargetState.currentEffectiveSize;
rtEffectiveSize->width = 0;
rtEffectiveSize->height = 0;
// get scissor box
cemu_assert_debug(LatteGPUState.contextNew.PA_SC_GENERIC_SCISSOR_TL.get_WINDOW_OFFSET_DISABLE() == true); // todo (not exposed by GX2 API?)
uint32 scissorX = LatteGPUState.contextNew.PA_SC_GENERIC_SCISSOR_TL.get_TL_X();
uint32 scissorY = LatteGPUState.contextNew.PA_SC_GENERIC_SCISSOR_TL.get_TL_Y();
uint32 scissorWidth = LatteGPUState.contextNew.PA_SC_GENERIC_SCISSOR_BR.get_BR_X() - scissorX;
uint32 scissorHeight = LatteGPUState.contextNew.PA_SC_GENERIC_SCISSOR_BR.get_BR_Y() - scissorY;
uint32 scissorAccessWidth = scissorX + scissorWidth;
uint32 scissorAccessHeight = scissorY + scissorHeight;
// color buffers
for (uint32 i = 0; i < Latte::GPU_LIMITS::NUM_COLOR_ATTACHMENTS; i++)
{
if (((colorBufferMask)&(1 << i)) == 0)
{
// unbind
SetColorAttachment(i, nullptr);
continue;
}
LatteTextureView* colorAttachmentView = GetColorAttachmentTexture(i, true, true);
SetColorAttachment(i, colorAttachmentView);
// after the drawcall mark the texture as updated
sLatteRenderTargetState.rtUpdateList[sLatteRenderTargetState.rtUpdateListCount] = colorAttachmentView;
sLatteRenderTargetState.rtUpdateListCount++;
sint32 colorAttachmentWidth, colorAttachmentHeight;
colorAttachmentView->baseTexture->GetSize(colorAttachmentWidth, colorAttachmentHeight, colorAttachmentView->firstMip);
// set effective size
sint32 effectiveWidth, effectiveHeight;
colorAttachmentView->baseTexture->GetEffectiveSize(effectiveWidth, effectiveHeight, colorAttachmentView->firstMip);
if (rtEffectiveSize->width == 0 && rtEffectiveSize->height == 0)
{
rtEffectiveSize->width = effectiveWidth;
rtEffectiveSize->height = effectiveHeight;
}
else if (rtEffectiveSize->width != effectiveWidth && rtEffectiveSize->height != effectiveHeight)
{
cemuLog_logDebug(LogType::Force, "Color buffer size mismatch ({}x{}). Effective size: {}x{} Real size: {}x{} Mismatching texture: {:08x} {}x{} fmt {:04x}", rtEffectiveSize->width, rtEffectiveSize->height, effectiveWidth, effectiveHeight, colorAttachmentView->baseTexture->width, colorAttachmentView->baseTexture->height, colorAttachmentView->baseTexture->physAddress, colorAttachmentView->baseTexture->width, colorAttachmentView->baseTexture->height, (uint32)colorAttachmentView->baseTexture->format);
}
// currently the first color attachment defines the size of the current render target
if (rtRealSize->width == 0 && rtRealSize->height == 0)
{
rtRealSize->width = colorAttachmentWidth;
rtRealSize->height = colorAttachmentHeight;
}
if (colorAttachmentView)
continue;
}
// depth buffer
if (depthBufferMask)
{
uint32 regDepthBuffer = LatteGPUState.contextRegister[mmDB_HTILE_DATA_BASE];
uint32 regDepthSize = LatteGPUState.contextRegister[mmDB_DEPTH_SIZE];
uint32 regDepthBufferInfo = LatteGPUState.contextRegister[mmDB_DEPTH_INFO];
// get format and tileMode from info reg
Latte::E_GX2SURFFMT depthBufferFormat = GetDepthBufferFormat(LatteGPUState.contextNew);
Latte::E_HWTILEMODE depthBufferTileMode = (Latte::E_HWTILEMODE)((regDepthBufferInfo >> 15) & 0xF);
MPTR depthBufferPhysMem = regDepthBuffer << 8;
uint32 depthBufferPitch = (((regDepthSize >> 0) & 0x3FF) + 1);
uint32 depthBufferHeight = ((((regDepthSize >> 10) & 0xFFFFF) + 1) / depthBufferPitch);
depthBufferPitch <<= 3;
depthBufferHeight <<= 3;
uint32 depthBufferWidth = depthBufferPitch;
if (depthBufferWidth < 2)
{
debug_printf("depthBufferWidth has invalid value %d\n", depthBufferWidth);
depthBufferWidth = 2;
}
if (depthBufferHeight < 2)
{
debug_printf("depthBufferHeight has invalid value %d\n", depthBufferHeight);
depthBufferHeight = 2;
}
bool blockDepthBuffer = false;
if (scissorAccessWidth > depthBufferPitch || scissorAccessHeight > depthBufferHeight)
{
SetDepthAndStencilAttachment(nullptr, false);
blockDepthBuffer = true;
// set effective size
if( rtEffectiveSize->width == 0 && rtEffectiveSize->height == 0 )
{
rtEffectiveSize->width = rtRealSize->width;
rtEffectiveSize->height = rtRealSize->height;
}
}
if (blockDepthBuffer == false)
{
if (rtRealSize->width == 0)
{
rtRealSize->width = depthBufferWidth;
rtRealSize->height = depthBufferHeight;
}
uint32 regDepthView = LatteGPUState.contextRegister[mmDB_DEPTH_VIEW];
uint32 depthBufferViewFirstSlice = (regDepthView & 0x7FF);
uint32 depthBufferViewNumSlices = ((regDepthView >> 13) & 0x7FF) - depthBufferViewFirstSlice + 1;
cemu_assert_debug(depthBufferViewNumSlices == 1); // binding multiple layers makes no sense?
uint32 depthBufferSwizzle = 0;
if (Latte::TM_IsMacroTiled(depthBufferTileMode))
{
depthBufferSwizzle = (depthBufferPhysMem >> 8) & 7;
depthBufferPhysMem = depthBufferPhysMem & ~(7 << 8);
}
if (depthBufferPhysMem != MPTR_NULL)
{
LatteTextureView* depthBufferView = LatteTextureViewLookupCache::lookupSliceEx(depthBufferPhysMem, depthBufferWidth, depthBufferHeight, depthBufferPitch, 0, depthBufferViewFirstSlice, depthBufferFormat, true);
if (!depthBufferView)
{
// create new depth buffer view and if it doesn't exist then also create the texture
depthBufferView = LatteTexture_CreateMapping(depthBufferPhysMem, 0, depthBufferWidth, depthBufferHeight, depthBufferViewFirstSlice+1, depthBufferPitch, depthBufferTileMode, depthBufferSwizzle, 0, 1, depthBufferViewFirstSlice, 1, depthBufferFormat, depthBufferViewFirstSlice > 0 ? Latte::E_DIM::DIM_2D_ARRAY : Latte::E_DIM::DIM_2D, Latte::E_DIM::DIM_2D, true, true);
LatteGPUState.repeatTextureInitialization = true;
}
else
{
// check for texture changes
LatteTexture_UpdateDataToLatest(depthBufferView->baseTexture);
}
// set effective size
sint32 effectiveWidth, effectiveHeight;
depthBufferView->baseTexture->GetEffectiveSize(effectiveWidth, effectiveHeight, depthBufferView->firstMip);
if (rtEffectiveSize->width == 0 && rtEffectiveSize->height == 0)
{
rtEffectiveSize->width = effectiveWidth;
rtEffectiveSize->height = effectiveHeight;
}
else if (rtEffectiveSize->width > effectiveWidth && rtEffectiveSize->height > effectiveHeight)
{
if (_depthBufferSizeWarningCount < 100)
{
cemuLog_logDebug(LogType::Force, "Depth buffer size too small. Effective size: {}x{} Real size: {}x{} Mismatching texture: {:08x} {}x{} fmt {:04x}", effectiveWidth, effectiveHeight, depthBufferView->baseTexture->width, depthBufferView->baseTexture->height, depthBufferView->baseTexture->physAddress, depthBufferView->baseTexture->width, depthBufferView->baseTexture->height, (uint32)depthBufferView->baseTexture->format);
_depthBufferSizeWarningCount++;
}
}
LatteTC_MarkTextureStillInUse(depthBufferView->baseTexture);
// after the drawcall mark the texture as updated
sLatteRenderTargetState.rtUpdateList[sLatteRenderTargetState.rtUpdateListCount] = depthBufferView;
sLatteRenderTargetState.rtUpdateListCount++;
SetDepthAndStencilAttachment(depthBufferView, depthBufferView->baseTexture->hasStencil);
}
}
else
{
SetDepthAndStencilAttachment(nullptr, false);
}
}
else
{
SetDepthAndStencilAttachment(nullptr, false);
}
catchOpenGLError();
if (colorBufferMask || depthBufferMask)
{
hasValidFramebufferAttached = true;
}
else
{
hasValidFramebufferAttached = false;
return true;
}
if( rtEffectiveSize->width != rtRealSize->width || rtEffectiveSize->height != rtRealSize->height )
{
//debug_printf("RenderTarget rescaled. Real: %dx%d Resized: %dx%d\n", rtRealSize->width, rtRealSize->height, rtEffectiveSize->width, rtEffectiveSize->height);
sLatteRenderTargetState.renderTargetIsResized = true;
}
if (sLatteRenderTargetState.currentEffectiveSize.width == 0)
{
debug_printf("Render target effective size is 0. May indicate a bug in Cemu or invalid color/depth buffers\n");
return false;
}
return true;
}
// return a vec4 with xy being the scale ratio for render target extend and zw being the virtual viewport dimensions
void LatteMRT::GetCurrentFragCoordScale(float* coordScale)
{
if (sLatteRenderTargetState.renderTargetIsResized)
{
coordScale[0] = (float)sLatteRenderTargetState.currentRenderSize.width / (float)sLatteRenderTargetState.currentEffectiveSize.width;
coordScale[1] = (float)sLatteRenderTargetState.currentRenderSize.height / (float)sLatteRenderTargetState.currentEffectiveSize.height;
coordScale[2] = sLatteRenderTargetState.currentGuestViewport.width;
coordScale[3] = sLatteRenderTargetState.currentGuestViewport.height;
}
else
{
coordScale[0] = 1.0f;
coordScale[1] = 1.0f;
coordScale[2] = sLatteRenderTargetState.currentGuestViewport.width;
coordScale[3] = sLatteRenderTargetState.currentGuestViewport.height;
}
}
void LatteMRT::GetVirtualViewportDimensions(sint32& width, sint32& height)
{
width = sLatteRenderTargetState.currentGuestViewport.width;
height = sLatteRenderTargetState.currentGuestViewport.height;
}
// flag all FBO textures as updated via GPU
// also handle texture readback
void LatteRenderTarget_trackUpdates()
{
// after the drawcall, mark the render target textures as dynamically updated
uint64 eventCounter = LatteTexture_getNextUpdateEventCounter();
for (sint32 i = 0; i < sLatteRenderTargetState.rtUpdateListCount; i++)
{
LatteTextureView* texView = sLatteRenderTargetState.rtUpdateList[i];
LatteTexture* baseTexture = texView->baseTexture;
LatteTexture_TrackTextureGPUWrite(baseTexture, texView->firstSlice, texView->firstMip, eventCounter);
// texture readback
if (baseTexture->enableReadback)
{
LatteTextureReadback_Initate(texView);
}
}
}
void LatteRenderTarget_itHLESwapScanBuffer()
{
performanceMonitor.cycle[performanceMonitor.cycleIndex].frameCounter++;
if(LatteGPUState.frameCounter > 5)
performanceMonitor.gpuTime_frameTime.endMeasuring();
LattePerformanceMonitor_frameEnd();
LatteGPUState.frameCounter++;
g_renderer->SwapBuffers(true, true);
catchOpenGLError();
performanceMonitor.gpuTime_frameTime.beginMeasuring();
LatteTC_CleanupUnusedTextures();
LatteDraw_cleanupAfterFrame();
LatteQuery_CancelActiveGPU7Queries();
LatteBufferCache_notifySwapTVScanBuffer();
LattePerformanceMonitor_frameBegin();
}
void LatteRenderTarget_applyTextureColorClear(LatteTexture* texture, uint32 sliceIndex, uint32 mipIndex, float r, float g, float b, float a, uint64 eventCounter)
{
if (texture->isDepth)
{
cemuLog_logDebug(LogType::Force, "Unsupported clear depth as color");
}
else
{
g_renderer->texture_clearColorSlice(texture, sliceIndex, mipIndex, r, g, b, a);
LatteTexture_MarkDynamicTextureAsChanged(texture->baseView, sliceIndex, mipIndex, eventCounter);
}
}
void LatteRenderTarget_applyTextureDepthClear(LatteTexture* texture, uint32 sliceIndex, uint32 mipIndex, bool hasDepthClear, bool hasStencilClear, float depthValue, uint8 stencilValue, uint64 eventCounter)
{
if(texture->isDepth)
{
g_renderer->texture_clearDepthSlice(texture, sliceIndex, mipIndex, hasDepthClear, hasStencilClear, depthValue, stencilValue);
}
else
{
// clearing a color texture using depth clear
if (hasStencilClear)
return; // operation likely not intended as a color clear
//cemu_assert_debug(!hasStencilClear);
if (hasDepthClear)
{
g_renderer->texture_clearColorSlice(texture, sliceIndex, mipIndex, depthValue, depthValue, depthValue, depthValue);
}
}
LatteTexture_MarkDynamicTextureAsChanged(texture->baseView, sliceIndex, mipIndex, eventCounter);
}
void LatteRenderTarget_itHLEClearColorDepthStencil(uint32 clearMask,
MPTR colorBufferMPTR, Latte::E_GX2SURFFMT colorBufferFormat, Latte::E_HWTILEMODE colorBufferTilemode, uint32 colorBufferWidth, uint32 colorBufferHeight, uint32 colorBufferPitch, uint32 colorBufferViewFirstSlice, uint32 colorBufferViewNumSlice,
MPTR depthBufferMPTR, Latte::E_GX2SURFFMT depthBufferFormat, Latte::E_HWTILEMODE depthBufferTileMode, sint32 depthBufferWidth, sint32 depthBufferHeight, sint32 depthBufferPitch, sint32 depthBufferViewFirstSlice, sint32 depthBufferViewNumSlice,
float r, float g, float b, float a, float clearDepth, uint32 clearStencil)
{
uint32 depthBufferMipIndex = 0; // todo
uint32 colorBufferMipIndex = 0; // todo
bool hasColorClear = (clearMask & 1);
bool hasDepthClear = (clearMask & 2);
bool hasStencilClear = (clearMask & 4);
// extract swizzle offset from pointer
uint32 colorBufferSwizzle = 0;
uint32 depthBufferSwizzle = 0;
if (Latte::TM_IsMacroTiled(colorBufferTilemode))
{
colorBufferSwizzle = (colorBufferMPTR >> 8) & 7;
colorBufferMPTR = colorBufferMPTR & ~(7 << 8);
}
if (Latte::TM_IsMacroTiled(depthBufferTileMode))
{
depthBufferSwizzle = (depthBufferMPTR >> 8) & 7;
depthBufferMPTR = depthBufferMPTR & ~(7 << 8);
}
cemu_assert_debug(colorBufferViewNumSlice <= 1);
cemu_assert_debug(depthBufferViewNumSlice <= 1);
// clear color buffer (if flag set)
uint64 eventCounter = LatteTexture_getNextUpdateEventCounter();
if ((clearMask & 1) != 0 && colorBufferMPTR != MPTR_NULL && colorBufferWidth > 0 && colorBufferHeight > 0)
{
// clear color
sint32 searchIndex = 0;
bool targetFound = false;
while (true)
{
LatteTextureView* colorView = LatteTC_LookupTextureByData(colorBufferMPTR, colorBufferWidth, colorBufferHeight, colorBufferPitch, 0, 1, colorBufferViewFirstSlice, 1, &searchIndex);
if (!colorView)
break;
if (Latte::GetFormatBits(colorBufferFormat) != Latte::GetFormatBits(colorView->baseTexture->format))
continue;
if (colorView->baseTexture->pitch == colorBufferPitch && colorView->baseTexture->height == colorBufferHeight)
targetFound = true;
LatteRenderTarget_applyTextureColorClear(colorView->baseTexture, colorBufferViewFirstSlice, colorBufferMipIndex, r, g, b, a, eventCounter);
}
if (targetFound == false)
{
// create new texture with matching format
cemu_assert_debug(colorBufferViewNumSlice <= 1);
LatteTextureView* newColorView = LatteTexture_CreateMapping(colorBufferMPTR, MPTR_NULL, colorBufferWidth, colorBufferHeight, colorBufferViewFirstSlice+1, colorBufferPitch, colorBufferTilemode, colorBufferSwizzle, 0, 1, colorBufferViewFirstSlice, 1, colorBufferFormat, colorBufferViewFirstSlice > 0 ? Latte::E_DIM::DIM_2D_ARRAY : Latte::E_DIM::DIM_2D, Latte::E_DIM::DIM_2D, false);
LatteRenderTarget_applyTextureColorClear(newColorView->baseTexture, colorBufferViewFirstSlice, colorBufferMipIndex, r, g, b, a, eventCounter);
}
}
// clear depth or stencil buffer (if flag set)
if ((hasDepthClear || hasStencilClear) && depthBufferMPTR != MPTR_NULL)
{
std::vector<LatteTexture*> list_depthClearTextures;
LatteTC_LookupTexturesByPhysAddr(depthBufferMPTR, list_depthClearTextures);
bool foundMatchingDepthBuffer = false;
// todo - support for clearing depth mips?
cemu_assert_debug(depthBufferViewNumSlice == 1);
for (auto& texItr : list_depthClearTextures)
{
// only clear depth buffers if they are smaller
if (texItr->pitch > depthBufferPitch)
continue;
if (depthBufferViewFirstSlice >= texItr->depth)
continue; // slice out of range
if (texItr->pitch == depthBufferPitch && texItr->height == depthBufferHeight)
foundMatchingDepthBuffer = true;
// todo - calculate actual sliceIndex and mipIndex since the textures in list_depthClearTextures dont necessarily share the same base
LatteRenderTarget_applyTextureDepthClear(texItr, depthBufferViewFirstSlice, depthBufferMipIndex, hasDepthClear, hasStencilClear, clearDepth, clearStencil, eventCounter);
}
if (foundMatchingDepthBuffer == false)
{
LatteTextureView* newDepthBufferView = LatteMRT_CreateDepthBuffer(depthBufferMPTR, depthBufferWidth, depthBufferHeight, depthBufferPitch, depthBufferTileMode, (Latte::E_GX2SURFFMT)depthBufferFormat, depthBufferSwizzle, depthBufferViewFirstSlice);
LatteRenderTarget_applyTextureDepthClear(newDepthBufferView->baseTexture, depthBufferViewFirstSlice, depthBufferMipIndex, hasDepthClear, hasStencilClear, clearDepth, clearStencil, eventCounter);
}
}
}
sint32 _currentOutputImageWidth = 0;
sint32 _currentOutputImageHeight = 0;
void LatteRenderTarget_getScreenImageArea(sint32* x, sint32* y, sint32* width, sint32* height, sint32* fullWidth, sint32* fullHeight, bool padView)
{
int w, h;
if(padView && gui_isPadWindowOpen())
gui_getPadWindowPhysSize(w, h);
else
gui_getWindowPhysSize(w, h);
sint32 scaledOutputX;
sint32 scaledOutputY;
if (GetConfig().fullscreen_scaling == kKeepAspectRatio)
{
// calculate maximum possible resolution with intact aspect ratio
scaledOutputX = w;
scaledOutputY = _currentOutputImageHeight * w / std::max(_currentOutputImageWidth, 1);
if (scaledOutputY > h)
{
scaledOutputX = _currentOutputImageWidth * h / std::max(_currentOutputImageHeight, 1);
scaledOutputY = h;
}
}
else
{
scaledOutputX = w;
scaledOutputY = h;
}
*x = (w - scaledOutputX) / 2;
*y = (h - scaledOutputY) / 2;
*width = scaledOutputX;
*height = scaledOutputY;
if (fullWidth)
*fullWidth = w;
if (fullHeight)
*fullHeight = h;
}
void LatteRenderTarget_copyToBackbuffer(LatteTextureView* textureView, bool isPadView)
{
// make sure texture is updated to latest data in cache
LatteTexture_UpdateDataToLatest(textureView->baseTexture);
// mark source texture as still in use
LatteTC_MarkTextureStillInUse(textureView->baseTexture);
sint32 effectiveWidth, effectiveHeight;
textureView->baseTexture->GetEffectiveSize(effectiveWidth, effectiveHeight, 0);
_currentOutputImageWidth = effectiveWidth;
_currentOutputImageHeight = effectiveHeight;
sint32 imageX, imageY;
sint32 imageWidth, imageHeight;
sint32 fullscreenWidth, fullscreenHeight;
LatteRenderTarget_getScreenImageArea(&imageX, &imageY, &imageWidth, &imageHeight, &fullscreenWidth, &fullscreenHeight, isPadView);
bool clearBackground = false;
if (imageWidth != fullscreenWidth || imageHeight != fullscreenHeight)
clearBackground = true;
const bool renderUpsideDown = ActiveSettings::RenderUpsideDownEnabled();
// force disable bicubic scaling if output resolution is equal/smaller than input resolution
const bool downscaling = (imageWidth <= effectiveWidth || imageHeight <= effectiveHeight);
// check for graphic pack shaders
RendererOutputShader* shader = nullptr;
LatteTextureView::MagFilter filter = LatteTextureView::MagFilter::kLinear;
for(const auto& gp : GraphicPack2::GetActiveGraphicPacks())
{
if(downscaling)
{
shader = gp->GetDownscalingShader(renderUpsideDown);
if (shader)
{
filter = gp->GetDownscalingMagFilter();
break;
}
}
else
{
shader = gp->GetUpscalingShader(renderUpsideDown);
if (shader)
{
filter = gp->GetUpscalingMagFilter();
break;
}
}
shader = gp->GetOuputShader(renderUpsideDown);
if (shader)
{
filter = downscaling ? gp->GetDownscalingMagFilter() : gp->GetUpscalingMagFilter();
break;
}
}
if (shader == nullptr)
{
sint32 scaling_filter = downscaling ? GetConfig().downscale_filter : GetConfig().upscale_filter;
if (g_renderer->GetType() == RendererAPI::Vulkan)
{
// force linear or nearest neighbor filter
if(scaling_filter != kLinearFilter && scaling_filter != kNearestNeighborFilter)
scaling_filter = kLinearFilter;
}
if (scaling_filter == kLinearFilter)
{
if(renderUpsideDown)
shader = RendererOutputShader::s_copy_shader_ud;
else
shader = RendererOutputShader::s_copy_shader;
filter = LatteTextureView::MagFilter::kLinear;
}
else if (scaling_filter == kBicubicFilter)
{
if (renderUpsideDown)
shader = RendererOutputShader::s_bicubic_shader_ud;
else
shader = RendererOutputShader::s_bicubic_shader;
filter = LatteTextureView::MagFilter::kNearestNeighbor;
}
else if (scaling_filter == kBicubicHermiteFilter)
{
if (renderUpsideDown)
shader = RendererOutputShader::s_hermit_shader_ud;
else
shader = RendererOutputShader::s_hermit_shader;
filter = LatteTextureView::MagFilter::kLinear;
}
else if (scaling_filter == kNearestNeighborFilter)
{
if (renderUpsideDown)
shader = RendererOutputShader::s_copy_shader_ud;
else
shader = RendererOutputShader::s_copy_shader;
filter = LatteTextureView::MagFilter::kNearestNeighbor;
}
}
cemu_assert(shader);
g_renderer->DrawBackbufferQuad(textureView, shader, filter==LatteTextureView::MagFilter::kLinear, imageX, imageY, imageWidth, imageHeight, isPadView, clearBackground);
g_renderer->HandleScreenshotRequest(textureView, isPadView);
if (!g_renderer->ImguiBegin(!isPadView))
return;
swkbd_render(!isPadView);
nn::erreula::render(!isPadView);
LatteOverlay_render(isPadView);
g_renderer->ImguiEnd();
}
void LatteRenderTarget_itHLECopyColorBufferToScanBuffer(MPTR colorBufferPtr, uint32 colorBufferWidth, uint32 colorBufferHeight, uint32 colorBufferSliceIndex, uint32 colorBufferFormat, uint32 colorBufferPitch, Latte::E_HWTILEMODE colorBufferTilemode, uint32 colorBufferSwizzle, uint32 renderTarget)
{
cemu_assert_debug(colorBufferSliceIndex == 0); // todo - support for non-zero slice
LatteTextureView* texView = LatteTC_GetTextureSliceViewOrTryCreate(colorBufferPtr, MPTR_NULL, (Latte::E_GX2SURFFMT)colorBufferFormat, colorBufferTilemode, colorBufferWidth, colorBufferHeight, 1, colorBufferPitch, colorBufferSwizzle, 0, 0, true);
if (!texView)
{
return;
}
auto getVPADScreenActive = [](size_t n) -> std::pair<bool, bool> {
auto controller = InputManager::instance().get_vpad_controller(n);
if (!controller)
return {false,false};
auto pressed = controller->is_screen_active();
auto toggle = controller->is_screen_active_toggle();
return {pressed && !toggle, pressed && toggle};
};
const bool tabPressed = gui_isKeyDown(PlatformKeyCodes::TAB);
const bool ctrlPressed = gui_isKeyDown(PlatformKeyCodes::LCONTROL);
const auto [vpad0Active, vpad0Toggle] = getVPADScreenActive(0);
const auto [vpad1Active, vpad1Toggle] = getVPADScreenActive(1);
const bool altScreenRequested = (!ctrlPressed && tabPressed) || vpad0Active || vpad1Active;
const bool togglePressed = (ctrlPressed && tabPressed) || vpad0Toggle || vpad1Toggle;
static bool togglePressedLast = false;
bool& isDRCPrimary = LatteGPUState.isDRCPrimary;
if(togglePressed && !togglePressedLast)
isDRCPrimary = !isDRCPrimary;
togglePressedLast = togglePressed;
bool showDRC = swkbd_hasKeyboardInputHook() == false && (isDRCPrimary ^ altScreenRequested);
if ((renderTarget & RENDER_TARGET_DRC) && g_renderer->IsPadWindowActive())
LatteRenderTarget_copyToBackbuffer(texView, true);
if (((renderTarget & RENDER_TARGET_TV) && !showDRC) || ((renderTarget & RENDER_TARGET_DRC) && showDRC))
LatteRenderTarget_copyToBackbuffer(texView, false);
}
// returns the current size of the virtual viewport (not the same as effective size, which can be influenced by texture rules)
void LatteRenderTarget_GetCurrentVirtualViewportSize(sint32* viewportWidth, sint32* viewportHeight)
{
*viewportWidth = sLatteRenderTargetState.currentGuestViewport.width;
*viewportHeight = sLatteRenderTargetState.currentGuestViewport.height;
}
void LatteRenderTarget_updateViewport()
{
float vpWidth = LatteGPUState.contextNew.PA_CL_VPORT_XSCALE.get_SCALE() / 0.5f;
float vpX = LatteGPUState.contextNew.PA_CL_VPORT_XOFFSET.get_OFFSET() - LatteGPUState.contextNew.PA_CL_VPORT_XSCALE.get_SCALE();
float vpHeight = LatteGPUState.contextNew.PA_CL_VPORT_YSCALE.get_SCALE() / -0.5f;
float vpY = LatteGPUState.contextNew.PA_CL_VPORT_YOFFSET.get_OFFSET() + LatteGPUState.contextNew.PA_CL_VPORT_YSCALE.get_SCALE();
bool halfZ = LatteGPUState.contextNew.PA_CL_CLIP_CNTL.get_DX_CLIP_SPACE_DEF();
// calculate near/far
float farZ;
float nearZ;
float s = LatteGPUState.contextNew.PA_CL_VPORT_ZSCALE.get_SCALE();
float b = LatteGPUState.contextNew.PA_CL_VPORT_ZOFFSET.get_OFFSET();
if (halfZ == false)
{
farZ = s + b;
nearZ = b - s;
}
else
{
farZ = s + b;
nearZ = b;
}
sLatteRenderTargetState.currentGuestViewport.width = vpWidth;
sLatteRenderTargetState.currentGuestViewport.height = vpHeight;
if (sLatteRenderTargetState.renderTargetIsResized)
{
vpX *= ((float)sLatteRenderTargetState.currentEffectiveSize.width / (float)sLatteRenderTargetState.currentRenderSize.width);
vpY *= ((float)sLatteRenderTargetState.currentEffectiveSize.height / (float)sLatteRenderTargetState.currentRenderSize.height);
vpWidth *= ((float)sLatteRenderTargetState.currentEffectiveSize.width / (float)sLatteRenderTargetState.currentRenderSize.width);
vpHeight *= ((float)sLatteRenderTargetState.currentEffectiveSize.height / (float)sLatteRenderTargetState.currentRenderSize.height);
}
g_renderer->renderTarget_setViewport(vpX, vpY, vpWidth, vpHeight, nearZ, farZ, halfZ);
}
void LatteRenderTarget_updateScissorBox()
{
// update scissor box
uint32 scissorX = LatteGPUState.contextNew.PA_SC_GENERIC_SCISSOR_TL.get_TL_X();
uint32 scissorY = LatteGPUState.contextNew.PA_SC_GENERIC_SCISSOR_TL.get_TL_Y();
uint32 scissorWidth = LatteGPUState.contextNew.PA_SC_GENERIC_SCISSOR_BR.get_BR_X() - scissorX;
uint32 scissorHeight = LatteGPUState.contextNew.PA_SC_GENERIC_SCISSOR_BR.get_BR_Y() - scissorY;
if( sLatteRenderTargetState.renderTargetIsResized )
{
scissorX = (sint32)((float)scissorX * ((float)sLatteRenderTargetState.currentEffectiveSize.width / (float)sLatteRenderTargetState.currentRenderSize.width));
scissorY = (sint32)((float)scissorY * ((float)sLatteRenderTargetState.currentEffectiveSize.height / (float)sLatteRenderTargetState.currentRenderSize.height));
scissorWidth = (sint32)((float)scissorWidth * ((float)sLatteRenderTargetState.currentEffectiveSize.width / (float)sLatteRenderTargetState.currentRenderSize.width));
scissorHeight = (sint32)((float)scissorHeight * ((float)sLatteRenderTargetState.currentEffectiveSize.height / (float)sLatteRenderTargetState.currentRenderSize.height));
}
if( scissorX != prevScissorX || scissorY != prevScissorY || scissorWidth != prevScissorWidth || scissorHeight != prevScissorHeight )
{
g_renderer->renderTarget_setScissor(scissorX, scissorY, scissorWidth, scissorHeight);
prevScissorX = scissorX;
prevScissorY = scissorY;
prevScissorWidth = scissorWidth;
prevScissorHeight = scissorHeight;
}
}
void LatteRenderTarget_unloadAll()
{
if (g_emptyFBO)
{
LatteMRT::DeleteCachedFBO(g_emptyFBO);
g_emptyFBO = nullptr;
}
}
| 43,913
|
C++
|
.cpp
| 994
| 41.479879
| 504
| 0.780434
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,265
|
LatteShader.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteShader.cpp
|
#include "Cafe/HW/Latte/Core/LatteConst.h"
#include "Cafe/HW/Latte/Core/LatteShaderAssembly.h"
#include "Cafe/HW/Latte/ISA/RegDefines.h"
#include "Cafe/HW/Latte/ISA/LatteReg.h"
#include "Cafe/HW/Latte/Core/LatteShader.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompiler.h"
#include "Cafe/HW/Latte/Core/FetchShader.h"
#include "Cafe/HW/Latte/Core/LattePerformanceMonitor.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.h"
#include "Cafe/OS/libs/gx2/GX2.h" // todo - remove dependency
#include "Cafe/GraphicPack/GraphicPack2.h"
#include "util/helpers/StringParser.h"
#include "config/ActiveSettings.h"
#include "Cafe/GameProfile/GameProfile.h"
#include "util/containers/flat_hash_map.hpp"
#include <cinttypes>
// experimental new decompiler (WIP)
#include "util/Zir/EmitterGLSL/ZpIREmitGLSL.h"
#include "util/Zir/Core/ZpIRDebug.h"
#include "Cafe/HW/Latte/Transcompiler/LatteTC.h"
#include "Cafe/HW/Latte/ShaderInfo/ShaderInfo.h"
struct _ShaderHashCache
{
uint64 prevHash1;
uint64 prevHash2;
uint32* prevProgramCode;
uint32 prevProgramSize;
};
_ShaderHashCache hashCacheVS = { 0 };
_ShaderHashCache hashCacheGS = { 0 };
_ShaderHashCache hashCachePS = { 0 };
LatteFetchShader* _activeFetchShader = nullptr;
LatteDecompilerShader* _activeVertexShader = nullptr;
LatteDecompilerShader* _activeGeometryShader = nullptr;
LatteDecompilerShader* _activePixelShader = nullptr;
// runtime shader cache
using SHRC_CACHE_TYPE = ska::flat_hash_map<uint64, LatteDecompilerShader*>;
SHRC_CACHE_TYPE sVertexShaders(512);
SHRC_CACHE_TYPE sGeometryShaders(512);
SHRC_CACHE_TYPE sPixelShaders(512);
uint64 _shaderBaseHash_vs;
uint64 _shaderBaseHash_gs;
uint64 _shaderBaseHash_ps;
std::atomic_int g_compiled_shaders_total = 0;
std::atomic_int g_compiled_shaders_async = 0;
LatteFetchShader* LatteSHRC_GetActiveFetchShader()
{
return _activeFetchShader;
}
LatteDecompilerShader* LatteSHRC_GetActiveVertexShader()
{
return _activeVertexShader;
}
LatteDecompilerShader* LatteSHRC_GetActiveGeometryShader()
{
return _activeGeometryShader;
}
LatteDecompilerShader* LatteSHRC_GetActivePixelShader()
{
return _activePixelShader;
}
inline ska::flat_hash_map<uint64, LatteDecompilerShader*>& LatteSHRC_GetCacheByType(LatteConst::ShaderType shaderType)
{
if (shaderType == LatteConst::ShaderType::Vertex)
return sVertexShaders;
else if (shaderType == LatteConst::ShaderType::Geometry)
return sGeometryShaders;
cemu_assert_debug(shaderType == LatteConst::ShaderType::Pixel);
return sPixelShaders;
}
// calculate hash from shader binary
// this algorithm could be more efficient since we could leverage the fact that the size is always aligned to 8 byte
// but since this is baked into the shader names used for gfx packs and shader caches we can't really change this
void _calcShaderHashGeneric(uint32* programCode, uint32 programSize, uint64& outputHash1, uint64& outputHash2)
{
outputHash1 = 0;
outputHash2 = 0;
for (uint32 i = 0; i < programSize / 4; i++)
{
uint32 temp = programCode[i];
outputHash1 += (uint64)temp;
outputHash2 ^= (uint64)temp;
outputHash1 = (outputHash1 << 3) | (outputHash1 >> 61);
outputHash2 = (outputHash2 >> 7) | (outputHash2 << 57);
}
}
void _calculateShaderProgramHash(uint32* programCode, uint32 programSize, _ShaderHashCache* hashCache, uint64* outputHash1, uint64* outputHash2)
{
uint64 progHash1 = 0;
uint64 progHash2 = 0;
if (!programCode)
{
hashCache->prevProgramCode = NULL;
hashCache->prevProgramSize = 0;
hashCache->prevHash1 = 0;
hashCache->prevHash2 = 0;
}
else if (hashCache->prevProgramCode != programCode || hashCache->prevProgramSize != programSize)
{
_calcShaderHashGeneric(programCode, programSize, progHash1, progHash2);
hashCache->prevProgramCode = programCode;
hashCache->prevProgramSize = programSize;
hashCache->prevHash1 = progHash1;
hashCache->prevHash2 = progHash2;
}
else
{
progHash1 = hashCache->prevHash1;
progHash2 = hashCache->prevHash2;
}
*outputHash1 = progHash1;
*outputHash2 = progHash2;
}
void LatteSHRC_ResetCachedShaderHash()
{
hashCacheVS.prevProgramCode = 0;
hashCacheVS.prevProgramSize = 0;
hashCacheGS.prevProgramCode = 0;
hashCacheGS.prevProgramSize = 0;
hashCachePS.prevProgramCode = 0;
hashCachePS.prevProgramSize = 0;
}
LatteShaderPSInputTable _activePSImportTable;
LatteShaderPSInputTable* LatteSHRC_GetPSInputTable()
{
return &_activePSImportTable;
}
void LatteSHRC_RemoveFromCache(LatteDecompilerShader* shader)
{
bool removed = false;
auto& cache = LatteSHRC_GetCacheByType(shader->shaderType);
// remove from hashtable
auto baseIt = cache.find(shader->baseHash);
if (baseIt == cache.end())
{
cemu_assert_suspicious(); // deleting from runtime cache but shader is not present?
}
else if (baseIt->second == shader)
{
cemu_assert_debug(baseIt->second == shader);
cache.erase(baseIt);
if (shader->next)
{
cemu_assert_debug(shader->baseHash == shader->next->baseHash);
cache.emplace(shader->baseHash, shader->next);
}
shader->next = 0;
removed = true;
}
else
{
// remove from chain
LatteDecompilerShader* shaderChain = baseIt->second;
while (shaderChain->next)
{
if (shaderChain->next == shader)
{
shaderChain->next = shaderChain->next->next;
removed = true;
break;
}
}
}
cemu_assert(removed);
}
void LatteSHRC_RemoveFromCacheByHash(uint64 shader_base_hash, uint64 shader_aux_hash, LatteConst::ShaderType type)
{
LatteDecompilerShader* shader = nullptr;
if (type == LatteConst::ShaderType::Vertex)
shader = LatteSHRC_FindVertexShader(shader_base_hash, shader_aux_hash);
else if (type == LatteConst::ShaderType::Geometry)
shader = LatteSHRC_FindGeometryShader(shader_base_hash, shader_aux_hash);
else if (type == LatteConst::ShaderType::Pixel)
shader = LatteSHRC_FindPixelShader(shader_base_hash, shader_aux_hash);
if (shader)
LatteSHRC_RemoveFromCache(shader);
}
void LatteShader_free(LatteDecompilerShader* shader)
{
LatteSHRC_RemoveFromCache(shader);
if (shader->shader)
delete shader->shader;
shader->shader = nullptr;
delete shader;
}
// both vertex and geometry/pixel shader depend on PS inputs
// we prepare the PS import info in advance
void LatteShader_UpdatePSInputs(uint32* contextRegisters)
{
// PS control
uint32 psControl0 = contextRegisters[mmSPI_PS_IN_CONTROL_0];
uint32 spi0_positionEnable = (psControl0 >> 8) & 1;
uint32 spi0_positionCentroid = (psControl0 >> 9) & 1;
cemu_assert_debug(spi0_positionCentroid == 0); // controls gl_FragCoord
uint32 spi0_positionAddr = (psControl0 >> 10) & 0x1F; // controls gl_FragCoord
uint32 spi0_paramGen = (psControl0 >> 15) & 0xF; // used for gl_PointCoords
uint32 spi0_paramGenAddr = (psControl0 >> 19) & 0x7F;
sint32 importIndex = 0;
//cemu_assert_debug(((psControl0>>26)&3) == 1); // BARYC_SAMPLE_CNTL
//cemu_assert_debug((psControl0&(1 << 28)) == 0); // PERSP_GRADIENT_ENA
//cemu_assert_debug((psControl0&(1 << 29)) == 0); // LINEAR_GRADIENT_ENA
// if LINEAR_GRADIENT_ENA_bit is enabled, the pixel shader accesses gl_ClipSize?
// VS/GS parameters
uint32 numPSInputs = contextRegisters[mmSPI_PS_IN_CONTROL_0] & 0x3F;
uint64 key = 0;
if (spi0_positionEnable)
{
key += (uint64)spi0_positionAddr + 1;
}
// parameter gen
if (spi0_paramGen != 0)
{
key += std::rotr<uint64>(spi0_paramGen, 7);
key += std::rotr<uint64>(spi0_paramGenAddr, 3);
_activePSImportTable.paramGen = spi0_paramGen;
_activePSImportTable.paramGenGPR = spi0_paramGenAddr;
}
else
{
_activePSImportTable.paramGen = 0;
}
// semantic imports from vertex shader
#ifdef CEMU_DEBUG_ASSERT
uint8 semanticMask[256 / 8] = { 0 };
#endif
cemu_assert_debug(numPSInputs <= GPU7_PS_MAX_INPUTS);
numPSInputs = std::min<uint32>(numPSInputs, GPU7_PS_MAX_INPUTS);
for (uint32 f = 0; f < numPSInputs; f++)
{
uint32 psInputControl = contextRegisters[mmSPI_PS_INPUT_CNTL_0 + f];
uint32 psSemanticId = (psInputControl & 0xFF);
uint8 defaultValue = (psInputControl>>8)&3;
// default:
// 0 -> 0.0 0.0 0.0 0.0
// 1 -> 0.0 0.0 0.0 1.0
// 2 -> 1.0 1.0 1.0 0.0
// 3 -> 1.0 1.0 1.0 1.0
cemu_assert_debug(defaultValue <= 1);
uint32 uknBits = psInputControl & ~((0xFF)|(0x3<<8) | (1 << 10) | (1 << 12));
uknBits &= ~0x800; // FLAT_SHADE
//cemu_assert_debug(uknBits == 0);
//cemu_assert_debug(((psInputControl >> 11) & 1) == 0); // centroid
//cemu_assert_debug(((psInputControl >> 17) & 1) == 0); // point sprite coord
cemu_assert_debug(psSemanticId != 0xFF);
key += (uint64)psInputControl;
key = std::rotl<uint64>(key, 7);
if (spi0_positionEnable && f == spi0_positionAddr)
{
_activePSImportTable.import[f].semanticId = LATTE_ANALYZER_IMPORT_INDEX_SPIPOSITION;
_activePSImportTable.import[f].isFlat = false;
_activePSImportTable.import[f].isNoPerspective = false;
key += (uint64)0x33;
}
else
{
#ifdef CEMU_DEBUG_ASSERT
if (semanticMask[psSemanticId >> 3] & (1 << (psSemanticId & 7)))
{
cemuLog_logDebug(LogType::Force, "SemanticId already used");
}
semanticMask[psSemanticId >> 3] |= (1 << (psSemanticId & 7));
#endif
_activePSImportTable.import[f].semanticId = psSemanticId;
_activePSImportTable.import[f].isFlat = (psInputControl&(1 << 10)) != 0;
_activePSImportTable.import[f].isNoPerspective = (psInputControl&(1 << 12)) != 0;
}
}
_activePSImportTable.key = key;
_activePSImportTable.count = numPSInputs;
}
void LatteShader_CreateRendererShader(LatteDecompilerShader* shader, bool compileAsync)
{
if (shader->hasError )
{
cemuLog_log(LogType::Force, "Unable to compile shader {:016x}", shader->baseHash);
return;
}
GraphicPack2::GP_SHADER_TYPE gpShaderType;
RendererShader::ShaderType shaderType;
if (shader->shaderType == LatteConst::ShaderType::Vertex)
{
shaderType = RendererShader::ShaderType::kVertex;
gpShaderType = GraphicPack2::GP_SHADER_TYPE::VERTEX;
}
else if (shader->shaderType == LatteConst::ShaderType::Geometry)
{
shaderType = RendererShader::ShaderType::kGeometry;
gpShaderType = GraphicPack2::GP_SHADER_TYPE::GEOMETRY;
}
else if (shader->shaderType == LatteConst::ShaderType::Pixel)
{
shaderType = RendererShader::ShaderType::kFragment;
gpShaderType = GraphicPack2::GP_SHADER_TYPE::PIXEL;
}
// check if a custom shader is present
std::string shaderSrc;
const std::string* customShaderSrc = GraphicPack2::FindCustomShaderSource(shader->baseHash, shader->auxHash, gpShaderType, g_renderer->GetType() == RendererAPI::Vulkan);
if (customShaderSrc)
{
shaderSrc.assign(*customShaderSrc);
shader->isCustomShader = true;
}
else
shaderSrc.assign(shader->strBuf_shaderSource->c_str());
if (shaderType == RendererShader::ShaderType::kVertex &&
(shader->baseHash == 0x15bc7edf9de2ed30 || shader->baseHash == 0x83a697d61a3b9202 ||
shader->baseHash == 0x97bc44a5028381c6 || shader->baseHash == 0x24838b84d15a1da1))
{
cemuLog_logDebug(LogType::Force, "Filtered shader to avoid AMD crash");
shader->shader = nullptr;
shader->hasError = true;
return;
}
// create shader
shader->shader = g_renderer->shader_create(shaderType, shader->baseHash, shader->auxHash, shaderSrc, true, shader->isCustomShader);
if (shader->shader == nullptr)
shader->hasError = true;
// after renderer shader creation we can throw away any intermediate info
LatteShader_CleanupAfterCompile(shader);
}
void LatteShader_FinishCompilation(LatteDecompilerShader* shader)
{
if (shader->hasError)
{
cemuLog_logDebug(LogType::Force, "LatteShader_finishCompilation(): Skipped because of error in shader {:x}", shader->baseHash);
return;
}
shader->shader->WaitForCompiled();
LatteShader_prepareSeparableUniforms(shader);
LatteShader_CleanupAfterCompile(shader);
}
void LatteSHRC_RegisterShader(LatteDecompilerShader* shader, uint64 baseHash, uint64 auxHash)
{
auto& cache = LatteSHRC_GetCacheByType(shader->shaderType);
shader->baseHash = baseHash;
shader->auxHash = auxHash;
auto it = cache.find(baseHash);
if (it == cache.end())
{
shader->next = nullptr;
cache.emplace(shader->baseHash, shader);
}
else
{
shader->next = it->second->next;
it->second->next = shader;
}
}
LatteDecompilerShader* LatteSHRC_GetFromChain(LatteDecompilerShader* baseShader, uint64 baseHash, uint64 auxHash)
{
while (baseShader && baseShader->auxHash != auxHash)
baseShader = baseShader->next;
return baseShader;
}
LatteDecompilerShader* LatteSHRC_Get(SHRC_CACHE_TYPE& cache, uint64 baseHash, uint64 auxHash)
{
auto it = cache.find(baseHash);
if (it == cache.end())
return nullptr;
LatteDecompilerShader* baseShader = it->second;
if (!baseShader)
return nullptr;
while (baseShader && baseShader->auxHash != auxHash)
baseShader = baseShader->next;
return baseShader;
}
LatteDecompilerShader* LatteSHRC_FindVertexShader(uint64 baseHash, uint64 auxHash)
{
return LatteSHRC_Get(sVertexShaders, baseHash, auxHash);
}
LatteDecompilerShader* LatteSHRC_FindGeometryShader(uint64 baseHash, uint64 auxHash)
{
return LatteSHRC_Get(sGeometryShaders, baseHash, auxHash);
}
LatteDecompilerShader* LatteSHRC_FindPixelShader(uint64 baseHash, uint64 auxHash)
{
return LatteSHRC_Get(sPixelShaders, baseHash, auxHash);
}
// update the currently active fetch shader
void LatteShaderSHRC_UpdateFetchShader()
{
_activeFetchShader = LatteFetchShader::FindByGPUState();
}
void LatteShader_CleanupAfterCompile(LatteDecompilerShader* shader)
{
if (shader->strBuf_shaderSource)
{
delete shader->strBuf_shaderSource;
shader->strBuf_shaderSource = nullptr;
}
}
void LatteShader_DumpShader(uint64 baseHash, uint64 auxHash, LatteDecompilerShader* shader)
{
if (!ActiveSettings::DumpShadersEnabled())
return;
const char* suffix = "";
if (shader->shaderType == LatteConst::ShaderType::Vertex)
suffix = "vs";
else if (shader->shaderType == LatteConst::ShaderType::Geometry)
suffix = "gs";
else if (shader->shaderType == LatteConst::ShaderType::Pixel)
suffix = "ps";
fs::path dumpPath = "dump/shaders";
dumpPath /= fmt::format("{:016x}_{:016x}_{}.txt", baseHash, auxHash, suffix);
FileStream* fs = FileStream::createFile2(dumpPath);
if (fs)
{
if (shader->strBuf_shaderSource)
fs->writeData(shader->strBuf_shaderSource->c_str(), shader->strBuf_shaderSource->getLen());
delete fs;
}
}
void LatteShader_DumpRawShader(uint64 baseHash, uint64 auxHash, uint32 type, uint8* programCode, uint32 programLen)
{
if (!ActiveSettings::DumpShadersEnabled())
return;
const char* suffix = "";
if (type == SHADER_DUMP_TYPE_FETCH)
suffix = "fs";
else if (type == SHADER_DUMP_TYPE_VERTEX)
suffix = "vs";
else if (type == SHADER_DUMP_TYPE_GEOMETRY)
suffix = "gs";
else if (type == SHADER_DUMP_TYPE_PIXEL)
suffix = "ps";
else if (type == SHADER_DUMP_TYPE_COPY)
suffix = "copy";
else if (type == SHADER_DUMP_TYPE_COMPUTE)
suffix = "compute";
fs::path dumpPath = "dump/shaders";
dumpPath /= fmt::format("{:016x}_{:016x}_{}.bin", baseHash, auxHash, suffix);
FileStream* fs = FileStream::createFile2(dumpPath);
if (fs)
{
fs->writeData(programCode, programLen);
delete fs;
}
}
void LatteSHRC_UpdateVSBaseHash(uint8* vertexShaderPtr, uint32 vertexShaderSize, bool usesGeometryShader)
{
uint32* vsProgramCode = (uint32*)vertexShaderPtr;
// update hash from vertex shader data
uint64 vsHash1 = 0;
uint64 vsHash2 = 0;
_calculateShaderProgramHash(vsProgramCode, vertexShaderSize, &hashCacheVS, &vsHash1, &vsHash2);
uint64 vsHash = vsHash1 + vsHash2 + _activeFetchShader->key + _activePSImportTable.key + (usesGeometryShader ? 0x1111ULL : 0ULL);
uint32 tmp = LatteGPUState.contextNew.PA_CL_VTE_CNTL.getRawValue() ^ 0x43F;
vsHash += tmp;
auto primitiveType = LatteGPUState.contextNew.VGT_PRIMITIVE_TYPE.get_PRIMITIVE_MODE();
if (primitiveType == Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::RECTS)
{
vsHash += 13ULL;
}
else if (primitiveType == Latte::LATTE_VGT_PRIMITIVE_TYPE::E_PRIMITIVE_TYPE::POINTS)
{
// required for Vulkan since we have to write the pointsize in the shader
vsHash += 71ULL;
}
vsHash += (LatteGPUState.contextRegister[mmVGT_STRMOUT_EN] ? 21 : 0);
// halfZ
if (LatteGPUState.contextNew.PA_CL_CLIP_CNTL.get_DX_CLIP_SPACE_DEF())
vsHash += 0x1537;
_shaderBaseHash_vs = vsHash;
}
void LatteSHRC_UpdateGSBaseHash(uint8* geometryShaderPtr, uint32 geometryShaderSize, uint8* geometryCopyShader, uint32 geometryCopyShaderSize)
{
// update hash from geometry shader data
uint64 gsHash1 = 0;
uint64 gsHash2 = 0;
_calculateShaderProgramHash((uint32*)geometryShaderPtr, geometryShaderSize, &hashCacheGS, &gsHash1, &gsHash2);
// get geometry shader
uint64 gsHash = gsHash1 + gsHash2;
gsHash += (uint64)_activeVertexShader->ringParameterCount;
gsHash += (LatteGPUState.contextRegister[mmVGT_STRMOUT_EN] ? 21 : 0);
_shaderBaseHash_gs = gsHash;
}
void LatteSHRC_UpdatePSBaseHash(uint8* pixelShaderPtr, uint32 pixelShaderSize, bool usesGeometryShader)
{
uint32* psProgramCode = (uint32*)pixelShaderPtr;
// update hash from pixel shader data
uint64 psHash1 = 0;
uint64 psHash2 = 0;
_calculateShaderProgramHash(psProgramCode, pixelShaderSize, &hashCachePS, &psHash1, &psHash2);
// get vertex shader
uint64 psHash = psHash1 + psHash2 + _activePSImportTable.key + (usesGeometryShader ? hashCacheGS.prevHash1 : 0ULL);
_shaderBaseHash_ps = psHash;
}
uint64 LatteSHRC_CalcVSAuxHash(LatteDecompilerShader* vertexShader, uint32* contextRegisters)
{
// todo - include texture types in aux hash similar to how it is already done in pixel shader
// or maybe there is a way to figure out the proper texture types?
uint64 auxHash = 0;
if(vertexShader->hasStreamoutBufferWrite)
{
// hash stride for streamout buffers
for (uint32 i = 0; i < LATTE_NUM_STREAMOUT_BUFFER; i++)
{
if(!vertexShader->streamoutBufferWriteMask[i])
continue;
uint32 bufferStride = contextRegisters[mmVGT_STRMOUT_VTX_STRIDE_0 + i * 4];
auxHash = std::rotl<uint64>(auxHash, 7);
auxHash += (uint64)bufferStride;
}
}
// textures can affect the shader. Either by their type (2D, 3D, cubemap) or by their format (float vs integer)
uint64 auxHashTex = 0;
for (uint8 i = 0; i < vertexShader->textureUnitListCount; i++)
{
uint8 t = vertexShader->textureUnitList[i];
uint32 word4 = contextRegisters[Latte::REGADDR::SQ_TEX_RESOURCE_WORD0_N_VS + t * 7 + 4];
if ((word4 & 0x300) == 0x100)
{
// integer format
auxHashTex = std::rotl<uint64>(auxHashTex, 7);
auxHashTex += 0x333;
}
}
return auxHash + auxHashTex;
}
uint64 LatteSHRC_CalcGSAuxHash(LatteDecompilerShader* geometryShader)
{
// todo - include texture types in aux hash similar to how it is already done in pixel shader
return 0;
}
uint64 LatteSHRC_CalcPSAuxHash(LatteDecompilerShader* pixelShader, uint32* contextRegisters)
{
uint64 auxHash = 0;
// CB_SHADER_MASK can remap pixel shader outputs
auxHash = (auxHash >> 3) | (auxHash << 61);
auxHash += (uint64)contextRegisters[mmCB_SHADER_MASK];
// alpha test
uint8 alphaTestFunc = contextRegisters[Latte::REGADDR::SX_ALPHA_TEST_CONTROL] & 0x7;
uint8 alphaTestEnable = (contextRegisters[Latte::REGADDR::SX_ALPHA_TEST_CONTROL] >> 3) & 1;
if (alphaTestEnable)
{
auxHash += (uint64)alphaTestFunc;
auxHash = (auxHash >> 3) | (auxHash << 61);
auxHash += 1;
}
// texture types (2D, 3D, cubemap etc.) affect the shader too
for (uint8 i = 0; i < pixelShader->textureUnitListCount; i++)
{
uint8 t = pixelShader->textureUnitList[i];
uint32 word0 = contextRegisters[Latte::REGADDR::SQ_TEX_RESOURCE_WORD0_N_PS + t * 7 + 0];
uint32 dim = (word0 & 7);
auxHash = (auxHash << 3) | (auxHash >> 61);
auxHash += (uint64)dim;
}
return auxHash;
}
LatteDecompilerShader* LatteShader_CreateShaderFromDecompilerOutput(LatteDecompilerOutput_t& decompilerOutput, uint64 baseHash, bool calculateAuxHash, uint64 optionalAuxHash, uint32* contextRegister)
{
LatteDecompilerShader* shader = decompilerOutput.shader;
shader->baseHash = baseHash;
// copy resource mapping
if(g_renderer->GetType() == RendererAPI::Vulkan)
shader->resourceMapping = decompilerOutput.resourceMappingVK;
else
shader->resourceMapping = decompilerOutput.resourceMappingGL;
// copy texture info
shader->textureUnitMask2 = decompilerOutput.textureUnitMask;
// copy streamout info
shader->streamoutBufferWriteMask = decompilerOutput.streamoutBufferWriteMask;
shader->hasStreamoutBufferWrite = decompilerOutput.streamoutBufferWriteMask.any();
// copy uniform offsets
// for OpenGL these are retrieved in _prepareSeparableUniforms()
if (g_renderer->GetType() == RendererAPI::Vulkan)
{
shader->uniform.loc_remapped = decompilerOutput.uniformOffsetsVK.offset_remapped;
shader->uniform.loc_uniformRegister = decompilerOutput.uniformOffsetsVK.offset_uniformRegister;
shader->uniform.count_uniformRegister = decompilerOutput.uniformOffsetsVK.count_uniformRegister;
shader->uniform.loc_windowSpaceToClipSpaceTransform = decompilerOutput.uniformOffsetsVK.offset_windowSpaceToClipSpaceTransform;
shader->uniform.loc_alphaTestRef = decompilerOutput.uniformOffsetsVK.offset_alphaTestRef;
shader->uniform.loc_pointSize = decompilerOutput.uniformOffsetsVK.offset_pointSize;
shader->uniform.loc_fragCoordScale = decompilerOutput.uniformOffsetsVK.offset_fragCoordScale;
for (sint32 t = 0; t < LATTE_NUM_MAX_TEX_UNITS; t++)
{
if (decompilerOutput.uniformOffsetsVK.offset_texScale[t] >= 0)
{
LatteUniformTextureScaleEntry_t entry = { 0 };
entry.texUnit = t;
entry.uniformLocation = decompilerOutput.uniformOffsetsVK.offset_texScale[t];
shader->uniform.list_ufTexRescale.push_back(entry);
}
}
shader->uniform.loc_verticesPerInstance = decompilerOutput.uniformOffsetsVK.offset_verticesPerInstance;
for (sint32 t = 0; t < LATTE_NUM_STREAMOUT_BUFFER; t++)
shader->uniform.loc_streamoutBufferBase[t] = decompilerOutput.uniformOffsetsVK.offset_streamoutBufferBase[t];
shader->uniform.uniformRangeSize = decompilerOutput.uniformOffsetsVK.offset_endOfBlock;
}
else
{
shader->uniform.count_uniformRegister = decompilerOutput.uniformOffsetsGL.count_uniformRegister;
}
// calculate aux hash
if (calculateAuxHash)
{
if (decompilerOutput.shaderType == LatteConst::ShaderType::Vertex)
{
uint64 vsAuxHash = LatteSHRC_CalcVSAuxHash(shader, contextRegister);
shader->auxHash = vsAuxHash;
}
else if (decompilerOutput.shaderType == LatteConst::ShaderType::Geometry)
{
uint64 gsAuxHash = LatteSHRC_CalcGSAuxHash(shader);
shader->auxHash = gsAuxHash;
}
else if (decompilerOutput.shaderType == LatteConst::ShaderType::Pixel)
{
uint64 psAuxHash = LatteSHRC_CalcPSAuxHash(shader, contextRegister);
shader->auxHash = psAuxHash;
}
else
cemu_assert_debug(false);
}
else
{
shader->auxHash = optionalAuxHash;
}
return shader;
}
void LatteShader_GetDecompilerOptions(LatteDecompilerOptions& options, LatteConst::ShaderType shaderType, bool geometryShaderEnabled)
{
options.usesGeometryShader = geometryShaderEnabled;
options.spirvInstrinsics.hasRoundingModeRTEFloat32 = false;
if (g_renderer->GetType() == RendererAPI::Vulkan)
{
options.useTFViaSSBO = VulkanRenderer::GetInstance()->UseTFViaSSBO();
options.spirvInstrinsics.hasRoundingModeRTEFloat32 = VulkanRenderer::GetInstance()->HasSPRIVRoundingModeRTE32();
}
options.strictMul = g_current_game_profile->GetAccurateShaderMul() != AccurateShaderMulOption::False;
}
LatteDecompilerShader* LatteShader_CompileSeparableVertexShader2(uint64 baseHash, uint64& vsAuxHash, uint8* vertexShaderPtr, uint32 vertexShaderSize, bool usesGeometryShader, LatteFetchShader* fetchShader)
{
/* Analyze shader to gather general information about inputs/outputs */
Latte::ShaderDescription shaderDescription;
if (!shaderDescription.analyzeShaderCode(vertexShaderPtr, vertexShaderSize, LatteConst::ShaderType::Vertex))
{
assert_dbg();
return nullptr;
}
/* Create context dependent IO info for this shader */
//Latte::ShaderInstanceInfo
assert_dbg();
// todo - Use ShaderInstanceInfo when generating the GLSL (GLSL::Emit() should take a 'GLSLInfoSource' class which has a bunch of virtual methods for retrieving uniform names etc. We then override this class and plug in logic using ShaderInstanceInfo
/* Translate R600Plus to GLSL */
ZpIR::DebugPrinter irDebugPrinter;
LatteTCGenIR genIR;
genIR.setVertexShaderContext(fetchShader, LatteGPUState.contextRegister + mmSQ_VTX_SEMANTIC_0);
auto irObj = genIR.transcompileLatteToIR(vertexShaderPtr, vertexShaderSize, LatteTCGenIR::VERTEX);
// debug output (before register allocation)
irDebugPrinter.setShowPhysicalRegisters(false);
irDebugPrinter.debugPrint(irObj);
// register allocation
ZirPass::RegisterAllocatorForGLSL ra(irObj);
ra.applyPass();
// debug output (after register allocation)
irDebugPrinter.setShowPhysicalRegisters(true);
irDebugPrinter.setPhysicalRegisterNameSource(ZirPass::RegisterAllocatorForGLSL::DebugPrintHelper_getPhysRegisterName);
irDebugPrinter.debugPrint(irObj);
// gen GLSL
StringBuf glslSourceBuffer(64 * 1024);
// emit GLSL header
assert_dbg(); // todo
// emit main
ZirEmitter::GLSL emitter;
emitter.Emit(irObj, &glslSourceBuffer);
// debug copy to string
std::string dbg;
dbg.insert(0, glslSourceBuffer.c_str(), glslSourceBuffer.getLen());
assert_dbg();
return nullptr;
}
// compile new vertex shader (relies partially on current state)
LatteDecompilerShader* LatteShader_CompileSeparableVertexShader(uint64 baseHash, uint64& vsAuxHash, uint8* vertexShaderPtr, uint32 vertexShaderSize, bool usesGeometryShader, LatteFetchShader* fetchShader)
{
// new decompiler test
//LatteShader_CompileSeparableVertexShader2(baseHash, vsAuxHash, vertexShaderPtr, vertexShaderSize, usesGeometryShader, fetchShader);
// legacy decompiler
LatteDecompilerOptions options;
LatteShader_GetDecompilerOptions(options, LatteConst::ShaderType::Vertex, usesGeometryShader);
LatteDecompilerOutput_t decompilerOutput{};
LatteDecompiler_DecompileVertexShader(_shaderBaseHash_vs, LatteGPUState.contextRegister, vertexShaderPtr, vertexShaderSize, fetchShader, options, &decompilerOutput);
LatteDecompilerShader* vertexShader = LatteShader_CreateShaderFromDecompilerOutput(decompilerOutput, baseHash, true, 0, LatteGPUState.contextRegister);
vsAuxHash = vertexShader->auxHash;
if (vertexShader->hasError == false)
{
uint8* fsProgramCode = (uint8*)memory_getPointerFromPhysicalOffset(LatteGPUState.contextRegister[mmSQ_PGM_START_FS + 0] << 8);
uint32 fsProgramSize = LatteGPUState.contextRegister[mmSQ_PGM_START_FS + 1] << 3;
LatteShaderCache_writeSeparableVertexShader(vertexShader->baseHash, vertexShader->auxHash, fsProgramCode, fsProgramSize, vertexShaderPtr, vertexShaderSize, LatteGPUState.contextRegister, usesGeometryShader);
}
LatteShader_DumpShader(vertexShader->baseHash, vertexShader->auxHash, vertexShader);
LatteShader_DumpRawShader(vertexShader->baseHash, vertexShader->auxHash, SHADER_DUMP_TYPE_VERTEX, vertexShaderPtr, vertexShaderSize);
LatteShader_CreateRendererShader(vertexShader, false);
performanceMonitor.numCompiledVS++;
if (g_renderer->GetType() == RendererAPI::OpenGL)
{
if (vertexShader->shader)
vertexShader->shader->PreponeCompilation(true);
LatteShader_FinishCompilation(vertexShader);
}
LatteSHRC_RegisterShader(vertexShader, vertexShader->baseHash, vertexShader->auxHash);
return vertexShader;
}
LatteDecompilerShader* LatteShader_CompileSeparableGeometryShader(uint64 baseHash, uint8* geometryShaderPtr, uint32 geometryShaderSize, uint8* geometryCopyShader, uint32 geometryCopyShaderSize)
{
LatteDecompilerOptions options;
LatteShader_GetDecompilerOptions(options, LatteConst::ShaderType::Geometry, true);
LatteDecompilerOutput_t decompilerOutput{};
LatteDecompiler_DecompileGeometryShader(_shaderBaseHash_gs, LatteGPUState.contextRegister, geometryShaderPtr, geometryShaderSize, geometryCopyShader, geometryCopyShaderSize, _activeVertexShader->ringParameterCount, options, &decompilerOutput);
LatteDecompilerShader* geometryShader = LatteShader_CreateShaderFromDecompilerOutput(decompilerOutput, baseHash, true, 0, LatteGPUState.contextRegister);
if (geometryShader->hasError == false)
{
LatteShaderCache_writeSeparableGeometryShader(geometryShader->baseHash, geometryShader->auxHash, geometryShaderPtr, geometryShaderSize, geometryCopyShader, geometryCopyShaderSize, LatteGPUState.contextRegister, LatteGPUState.contextNew.GetSpecialStateValues(), _activeVertexShader->ringParameterCount);
}
LatteShader_DumpShader(geometryShader->baseHash, geometryShader->auxHash, geometryShader);
LatteShader_DumpRawShader(geometryShader->baseHash, geometryShader->auxHash, SHADER_DUMP_TYPE_GEOMETRY, geometryShaderPtr, geometryShaderSize);
LatteShader_DumpRawShader(geometryShader->baseHash, geometryShader->auxHash, SHADER_DUMP_TYPE_COPY, geometryCopyShader, geometryCopyShaderSize);
LatteShader_CreateRendererShader(geometryShader, false);
performanceMonitor.numCompiledGS++;
if (g_renderer->GetType() == RendererAPI::OpenGL)
{
if (geometryShader->shader)
geometryShader->shader->PreponeCompilation(true);
LatteShader_FinishCompilation(geometryShader);
}
LatteSHRC_RegisterShader(geometryShader, geometryShader->baseHash, geometryShader->auxHash);
return geometryShader;
}
LatteDecompilerShader* LatteShader_CompileSeparablePixelShader(uint64 baseHash, uint64& psAuxHash, uint8* pixelShaderPtr, uint32 pixelShaderSize, bool usesGeometryShader)
{
LatteDecompilerOptions options;
LatteShader_GetDecompilerOptions(options, LatteConst::ShaderType::Pixel, usesGeometryShader);
LatteDecompilerOutput_t decompilerOutput{};
LatteDecompiler_DecompilePixelShader(baseHash, LatteGPUState.contextRegister, pixelShaderPtr, pixelShaderSize, options, &decompilerOutput);
LatteDecompilerShader* pixelShader = LatteShader_CreateShaderFromDecompilerOutput(decompilerOutput, baseHash, true, 0, LatteGPUState.contextRegister);
psAuxHash = pixelShader->auxHash;
LatteShader_DumpShader(_shaderBaseHash_ps, psAuxHash, pixelShader);
LatteShader_DumpRawShader(_shaderBaseHash_ps, psAuxHash, SHADER_DUMP_TYPE_PIXEL, pixelShaderPtr, pixelShaderSize);
LatteShader_CreateRendererShader(pixelShader, false);
performanceMonitor.numCompiledPS++;
if (pixelShader->hasError == false)
{
LatteShaderCache_writeSeparablePixelShader(_shaderBaseHash_ps, psAuxHash, pixelShaderPtr, pixelShaderSize, LatteGPUState.contextRegister, usesGeometryShader);
}
if (g_renderer->GetType() == RendererAPI::OpenGL)
{
if (pixelShader->shader)
pixelShader->shader->PreponeCompilation(true);
LatteShader_FinishCompilation(pixelShader);
}
LatteSHRC_RegisterShader(pixelShader, _shaderBaseHash_ps, psAuxHash);
return pixelShader;
}
void LatteSHRC_UpdateVertexShader(uint8* vertexShaderPtr, uint32 vertexShaderSize, bool usesGeometryShader)
{
// todo - should include VTX_SEMANTIC table in state
LatteSHRC_UpdateVSBaseHash(vertexShaderPtr, vertexShaderSize, usesGeometryShader);
uint64 vsAuxHash = 0;
auto itBaseShader = sVertexShaders.find(_shaderBaseHash_vs);
LatteDecompilerShader* vertexShader = nullptr;
if (itBaseShader != sVertexShaders.end())
{
vsAuxHash = LatteSHRC_CalcVSAuxHash(itBaseShader->second, LatteGPUState.contextRegister);
vertexShader = LatteSHRC_GetFromChain(itBaseShader->second, _shaderBaseHash_vs, vsAuxHash);
}
if (!vertexShader)
vertexShader = LatteShader_CompileSeparableVertexShader(_shaderBaseHash_vs, vsAuxHash, vertexShaderPtr, vertexShaderSize, usesGeometryShader, _activeFetchShader);
if (vertexShader->hasError)
{
LatteGPUState.activeShaderHasError = true;
return;
}
_activeVertexShader = vertexShader;
}
void LatteSHRC_UpdateGeometryShader(bool usesGeometryShader, uint8* geometryShaderPtr, uint32 geometryShaderSize, uint8* geometryCopyShader, uint32 geometryCopyShaderSize)
{
if (!usesGeometryShader || !_activeVertexShader)
{
_shaderBaseHash_gs = 0;
_activeGeometryShader = nullptr;
return;
}
LatteSHRC_UpdateGSBaseHash(geometryShaderPtr, geometryShaderSize, geometryCopyShader, geometryCopyShaderSize);
auto itBaseShader = sGeometryShaders.find(_shaderBaseHash_gs);
LatteDecompilerShader* geometryShader;
if (itBaseShader != sGeometryShaders.end())
{
// geometry shader already known
geometryShader = itBaseShader->second;
cemu_assert_debug(LatteSHRC_CalcGSAuxHash(geometryShader) == 0);
}
else
{
// decompile geometry shader
geometryShader = LatteShader_CompileSeparableGeometryShader(_shaderBaseHash_gs, geometryShaderPtr, geometryShaderSize, geometryCopyShader, geometryCopyShaderSize);
}
if (geometryShader->hasError)
{
LatteGPUState.activeShaderHasError = true;
return;
}
_activeGeometryShader = geometryShader;
}
void LatteSHRC_UpdatePixelShader(uint8* pixelShaderPtr, uint32 pixelShaderSize, bool usesGeometryShader)
{
LatteSHRC_UpdatePSBaseHash(pixelShaderPtr, pixelShaderSize, usesGeometryShader);
uint64 psAuxHash = 0;
auto itBaseShader = sPixelShaders.find(_shaderBaseHash_ps);
LatteDecompilerShader* pixelShader = nullptr;
if (itBaseShader != sPixelShaders.end())
{
psAuxHash = LatteSHRC_CalcPSAuxHash(itBaseShader->second, LatteGPUState.contextRegister);
pixelShader = LatteSHRC_GetFromChain(itBaseShader->second, _shaderBaseHash_ps, psAuxHash);
}
if (!pixelShader)
pixelShader = LatteShader_CompileSeparablePixelShader(_shaderBaseHash_ps, psAuxHash, pixelShaderPtr, pixelShaderSize, usesGeometryShader);
if (pixelShader->hasError)
{
LatteGPUState.activeShaderHasError = true;
return;
}
_activePixelShader = pixelShader;
}
void LatteSHRC_UpdateActiveShaders()
{
// check if geometry shader is used
auto gsMode = LatteGPUState.contextNew.VGT_GS_MODE.get_MODE();
cemu_assert_debug(LatteGPUState.contextNew.VGT_GS_MODE.get_ES_PASSTHRU() == false);
// todo: Support for ES passthrough and cut mode in mmVGT_GS_MODE
bool geometryShaderUsed = false;
if (gsMode == Latte::LATTE_VGT_GS_MODE::E_MODE::OFF)
{
geometryShaderUsed = false;
}
else if (gsMode == Latte::LATTE_VGT_GS_MODE::E_MODE::SCENARIO_G)
{
// could also be compute shader?
geometryShaderUsed = true;
}
else
{
cemu_assert_debug(false);
}
// get shader programs
uint8* psProgramCode = (uint8*)memory_getPointerFromPhysicalOffset((LatteGPUState.contextRegister[mmSQ_PGM_START_PS] & 0xFFFFFF) << 8);
uint32 psProgramSize = LatteGPUState.contextRegister[mmSQ_PGM_START_PS + 1] << 3;
uint8* gsProgramCode = (uint8*)memory_getPointerFromPhysicalOffset((LatteGPUState.contextRegister[mmSQ_PGM_START_GS] & 0xFFFFFF) << 8);
uint32 gsProgramSize = LatteGPUState.contextRegister[mmSQ_PGM_START_GS + 1] << 3;
uint8* vsProgramCode;
uint32 vsProgramSize;
uint8* copyProgramCode = NULL;
uint32 copyProgramSize = 0;
if (geometryShaderUsed)
{
vsProgramCode = (uint8*)memory_getPointerFromPhysicalOffset((LatteGPUState.contextRegister[mmSQ_PGM_START_ES] & 0xFFFFFF) << 8);
vsProgramSize = LatteGPUState.contextRegister[mmSQ_PGM_START_ES + 1] << 3;
copyProgramCode = (uint8*)memory_getPointerFromPhysicalOffset((LatteGPUState.contextRegister[mmSQ_PGM_START_VS] & 0xFFFFFF) << 8);
if (LatteGPUState.contextRegister[mmSQ_PGM_START_VS] == 0)
{
copyProgramCode = NULL;
debug_printf("copyProgram is NULL but used. Might be because of unsupported vertex/geometry mode?");
}
copyProgramSize = LatteGPUState.contextRegister[mmSQ_PGM_START_VS + 1] << 3;
}
else
{
if (LatteGPUState.contextRegister[mmSQ_PGM_START_VS] == 0)
{
debug_printf("No vertex shader program set\n");
LatteGPUState.activeShaderHasError = true;
return;
}
vsProgramCode = (uint8*)memory_getPointerFromPhysicalOffset((LatteGPUState.contextRegister[mmSQ_PGM_START_VS] & 0xFFFFFF) << 8);
vsProgramSize = LatteGPUState.contextRegister[mmSQ_PGM_START_VS + 1] << 3;
}
// set new shaders
LatteGPUState.activeShaderHasError = false;
LatteShader_UpdatePSInputs(LatteGPUState.contextRegister);
LatteShaderSHRC_UpdateFetchShader();
LatteSHRC_UpdateVertexShader(vsProgramCode, vsProgramSize, geometryShaderUsed);
if (LatteGPUState.activeShaderHasError)
return;
LatteSHRC_UpdateGeometryShader(geometryShaderUsed, gsProgramCode, gsProgramSize, copyProgramCode, copyProgramSize);
if (LatteGPUState.activeShaderHasError)
return;
LatteSHRC_UpdatePixelShader(psProgramCode, psProgramSize, geometryShaderUsed);
if (LatteGPUState.activeShaderHasError)
return;
}
// returns the sampler base index for the given shader type
sint32 LatteDecompiler_getTextureSamplerBaseIndex(LatteConst::ShaderType shaderType)
{
uint32 samplerId = LATTE_DECOMPILER_SAMPLER_NONE;
if (shaderType == LatteConst::ShaderType::Vertex)
return Latte::SAMPLER_BASE_INDEX_VERTEX;
else if (shaderType == LatteConst::ShaderType::Pixel)
return Latte::SAMPLER_BASE_INDEX_PIXEL;
else if (shaderType == LatteConst::ShaderType::Geometry)
return Latte::SAMPLER_BASE_INDEX_GEOMETRY;
else
cemu_assert_suspicious();
return 0;
}
void LatteSHRC_Init()
{
cemu_assert_debug(sVertexShaders.empty());
cemu_assert_debug(sGeometryShaders.empty());
cemu_assert_debug(sPixelShaders.empty());
}
void LatteSHRC_UnloadAll()
{
while(!sVertexShaders.empty())
LatteShader_free(sVertexShaders.begin()->second);
cemu_assert_debug(sVertexShaders.empty());
while(!sGeometryShaders.empty())
LatteShader_free(sGeometryShaders.begin()->second);
cemu_assert_debug(sGeometryShaders.empty());
while(!sPixelShaders.empty())
LatteShader_free(sPixelShaders.begin()->second);
cemu_assert_debug(sPixelShaders.empty());
}
| 37,466
|
C++
|
.cpp
| 922
| 38.28308
| 304
| 0.78388
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,266
|
LatteTiming.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteTiming.cpp
|
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/OS/libs/gx2/GX2_Event.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VsyncDriver.h"
#include "util/highresolutiontimer/HighResolutionTimer.h"
#include "config/CemuConfig.h"
#include "Cafe/CafeSystem.h"
sint32 s_customVsyncFrequency = -1;
void LatteTiming_NotifyHostVSync();
// calculate time between vsync events in timer units
// standard rate on Wii U is 59.94, however to prevent tearing and microstutter on ~60Hz displays it is better if we slightly overshoot 60 Hz
// can be modified by graphic packs
HRTick LatteTime_CalculateTimeBetweenVSync()
{
// 59.94 -> 60 * 0.999
HRTick tick = HighResolutionTimer::getFrequency();
if (s_customVsyncFrequency > 0)
{
tick /= (uint64)s_customVsyncFrequency;
}
else
{
tick *= 1000ull;
tick /= 1002ull;
tick /= 60ull;
}
return tick;
}
void LatteTiming_setCustomVsyncFrequency(sint32 frequency)
{
s_customVsyncFrequency = frequency;
}
void LatteTiming_disableCustomVsyncFrequency()
{
s_customVsyncFrequency = -1;
}
bool LatteTiming_getCustomVsyncFrequency(sint32& customFrequency)
{
sint32 t = s_customVsyncFrequency;
if (t <= 0)
return false;
customFrequency = t;
return true;
}
bool s_usingHostDrivenVSync = false;
void LatteTiming_EnableHostDrivenVSync()
{
if (s_usingHostDrivenVSync)
return;
VsyncDriver_startThread(LatteTiming_NotifyHostVSync);
s_usingHostDrivenVSync = true;
}
bool LatteTiming_IsUsingHostDrivenVSync()
{
return s_usingHostDrivenVSync;
}
void LatteTiming_Init()
{
LatteGPUState.timer_frequency = HighResolutionTimer::getFrequency();
LatteGPUState.timer_bootUp = HighResolutionTimer::now().getTick();
LatteGPUState.timer_nextVSync = LatteGPUState.timer_bootUp + LatteTime_CalculateTimeBetweenVSync();
}
void LatteTiming_signalVsync()
{
static uint32 s_vsyncIntervalCounter = 0;
if (!LatteGPUState.gx2InitCalled)
return;
s_vsyncIntervalCounter++;
uint32 swapInterval = 1;
if (LatteGPUState.sharedArea)
swapInterval = LatteGPUState.sharedArea->swapInterval;
// flip
if (s_vsyncIntervalCounter >= swapInterval)
{
if (LatteGPUState.sharedArea)
{
// hack/workaround - only execute flip if GX2SwapScanBuffers() isn't lagging behind
uint64 currentTitleId = CafeSystem::GetForegroundTitleId();
if (currentTitleId == 0x00050000101c9500 || currentTitleId == 0x00050000101c9400 || currentTitleId == 0x0005000e101c9300)
{
uint32 currentFlipRequestCount = _swapEndianU32(LatteGPUState.sharedArea->flipRequestCountBE);
uint32 currentFlipExecuteCount = _swapEndianU32(LatteGPUState.sharedArea->flipExecuteCountBE);
if ((currentFlipRequestCount >= currentFlipExecuteCount) || (currentFlipExecuteCount - currentFlipRequestCount < 4))
{
LatteGPUState.sharedArea->flipExecuteCountBE = _swapEndianU32(_swapEndianU32(LatteGPUState.sharedArea->flipExecuteCountBE) + 1);
}
LatteGPUState.flipCounter++;
}
else
{
// old code for all other games
if (LatteGPUState.flipRequestCount > 0)
{
LatteGPUState.flipRequestCount.fetch_sub(1);
LatteGPUState.sharedArea->flipExecuteCountBE = _swapEndianU32(_swapEndianU32(LatteGPUState.sharedArea->flipExecuteCountBE) + 1);
}
}
}
GX2::__GX2NotifyEvent(GX2::GX2CallbackEventType::FLIP);
s_vsyncIntervalCounter = 0;
}
// vsync
GX2::__GX2NotifyEvent(GX2::GX2CallbackEventType::VSYNC);
}
HRTick s_lastHostVsync = 0;
// notify when host vsync event is triggered (on renderer canvas)
void LatteTiming_NotifyHostVSync()
{
if (!LatteTiming_IsUsingHostDrivenVSync())
return;
auto nowTimePoint = HighResolutionTimer::now().getTick();
auto dif = nowTimePoint - s_lastHostVsync;
auto vsyncPeriod = LatteTime_CalculateTimeBetweenVSync();
if (dif < vsyncPeriod)
{
// skip
return;
}
uint64 elapsedPeriods = dif / vsyncPeriod;
if (elapsedPeriods >= 10)
{
s_lastHostVsync = nowTimePoint;
}
else
s_lastHostVsync += vsyncPeriod;
LatteTiming_signalVsync();
}
// handle timed vsync event
void LatteTiming_HandleTimedVsync()
{
// simulate VSync
uint64 currentTimer = HighResolutionTimer::now().getTick();
if( currentTimer >= LatteGPUState.timer_nextVSync )
{
if(!LatteTiming_IsUsingHostDrivenVSync())
LatteTiming_signalVsync();
// even if vsync is delegated to the host device, we still use this virtual vsync timer to check finished states
LatteQuery_UpdateFinishedQueries();
LatteTextureReadback_UpdateFinishedTransfers(false);
// update vsync timer
uint64 vsyncTime = LatteTime_CalculateTimeBetweenVSync();
uint64 missedVsyncCount = (currentTimer - LatteGPUState.timer_nextVSync) / vsyncTime;
if (missedVsyncCount >= 2)
{
LatteGPUState.timer_nextVSync += vsyncTime*(missedVsyncCount+1ULL);
}
else
LatteGPUState.timer_nextVSync += vsyncTime;
}
}
| 4,769
|
C++
|
.cpp
| 149
| 29.52349
| 141
| 0.783554
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,267
|
LatteIndices.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteIndices.cpp
|
#include "Cafe/HW/Latte/Core/LatteConst.h"
#include "Cafe/HW/Latte/Renderer/Renderer.h"
#include "Cafe/HW/Latte/ISA/RegDefines.h"
#include "Common/cpu_features.h"
#if defined(ARCH_X86_64) && defined(__GNUC__)
#include <immintrin.h>
#endif
struct
{
const void* lastPtr;
uint32 lastCount;
LattePrimitiveMode lastPrimitiveMode;
LatteIndexType lastIndexType;
// output
uint32 indexMin;
uint32 indexMax;
Renderer::INDEX_TYPE renderIndexType;
uint32 outputCount;
uint32 indexBufferOffset;
uint32 indexBufferIndex;
}LatteIndexCache{};
void LatteIndices_invalidate(const void* memPtr, uint32 size)
{
if (LatteIndexCache.lastPtr >= memPtr && (LatteIndexCache.lastPtr < ((uint8*)memPtr + size)) )
{
LatteIndexCache.lastPtr = nullptr;
LatteIndexCache.lastCount = 0;
}
}
void LatteIndices_invalidateAll()
{
LatteIndexCache.lastPtr = nullptr;
LatteIndexCache.lastCount = 0;
}
uint32 LatteIndices_calculateIndexOutputSize(LattePrimitiveMode primitiveMode, LatteIndexType indexType, uint32 count)
{
if (primitiveMode == LattePrimitiveMode::QUADS)
{
sint32 numQuads = count / 4;
if (indexType == LatteIndexType::AUTO)
{
if(count <= 0xFFFF)
return numQuads * 6 * sizeof(uint16);
return numQuads * 6 * sizeof(uint32);
}
if (indexType == LatteIndexType::U16_BE || indexType == LatteIndexType::U16_LE)
return numQuads * 6 * sizeof(uint16);
if (indexType == LatteIndexType::U32_BE || indexType == LatteIndexType::U32_LE)
return numQuads * 6 * sizeof(uint32);
cemu_assert_suspicious();
return 0;
}
else if (primitiveMode == LattePrimitiveMode::QUAD_STRIP)
{
if (count <= 3)
{
return 0;
}
sint32 numQuads = (count-2) / 2;
if (indexType == LatteIndexType::AUTO)
{
if (count <= 0xFFFF)
return numQuads * 6 * sizeof(uint16);
return numQuads * 6 * sizeof(uint32);
}
if (indexType == LatteIndexType::U16_BE || indexType == LatteIndexType::U16_LE)
return numQuads * 6 * sizeof(uint16);
if (indexType == LatteIndexType::U32_BE || indexType == LatteIndexType::U32_LE)
return numQuads * 6 * sizeof(uint32);
cemu_assert_suspicious();
return 0;
}
else if (primitiveMode == LattePrimitiveMode::LINE_LOOP)
{
count++; // one extra vertex to reconnect the LINE_STRIP to the beginning
if (indexType == LatteIndexType::AUTO)
{
if (count <= 0xFFFF)
return count * sizeof(uint16);
return count * sizeof(uint32);
}
if (indexType == LatteIndexType::U16_BE || indexType == LatteIndexType::U16_LE)
return count * sizeof(uint16);
if (indexType == LatteIndexType::U32_BE || indexType == LatteIndexType::U32_LE)
return count * sizeof(uint32);
cemu_assert_suspicious();
return 0;
}
else if(indexType == LatteIndexType::AUTO)
return 0;
else if (indexType == LatteIndexType::U16_BE || indexType == LatteIndexType::U16_LE)
return count * sizeof(uint16);
else if (indexType == LatteIndexType::U32_BE || indexType == LatteIndexType::U32_LE)
return count * sizeof(uint32);
return 0;
}
template<typename T>
void LatteIndices_convertBE(const void* indexDataInput, void* indexDataOutput, uint32 count, uint32& indexMin, uint32& indexMax)
{
const betype<T>* src = (betype<T>*)indexDataInput;
T* dst = (T*)indexDataOutput;
for (uint32 i = 0; i < count; i++)
{
T v = *src;
*dst = v;
indexMin = std::min(indexMin, (uint32)v);
indexMax = std::max(indexMax, (uint32)v);
dst++;
src++;
}
}
template<typename T>
void LatteIndices_convertLE(const void* indexDataInput, void* indexDataOutput, uint32 count, uint32& indexMin, uint32& indexMax)
{
const T* src = (T*)indexDataInput;
T* dst = (T*)indexDataOutput;
for (uint32 i = 0; i < count; i++)
{
T v = *src;
*dst = v;
indexMin = std::min(indexMin, (uint32)v);
indexMax = std::max(indexMax, (uint32)v);
dst++;
src++;
}
}
template<typename T>
void LatteIndices_unpackQuadsAndConvert(const void* indexDataInput, void* indexDataOutput, uint32 count, uint32& indexMin, uint32& indexMax)
{
sint32 numQuads = count / 4;
const betype<T>* src = (betype<T>*)indexDataInput;
T* dst = (T*)indexDataOutput;
for (sint32 i = 0; i < numQuads; i++)
{
T idx0 = src[0];
T idx1 = src[1];
T idx2 = src[2];
T idx3 = src[3];
indexMin = std::min(indexMin, (uint32)idx0);
indexMax = std::max(indexMax, (uint32)idx0);
indexMin = std::min(indexMin, (uint32)idx1);
indexMax = std::max(indexMax, (uint32)idx1);
indexMin = std::min(indexMin, (uint32)idx2);
indexMax = std::max(indexMax, (uint32)idx2);
indexMin = std::min(indexMin, (uint32)idx3);
indexMax = std::max(indexMax, (uint32)idx3);
dst[0] = idx0;
dst[1] = idx1;
dst[2] = idx2;
dst[3] = idx0;
dst[4] = idx2;
dst[5] = idx3;
src += 4;
dst += 6;
}
}
template<typename T>
void LatteIndices_generateAutoQuadIndices(const void* indexDataInput, void* indexDataOutput, uint32 count, uint32& indexMin, uint32& indexMax)
{
sint32 numQuads = count / 4;
const betype<T>* src = (betype<T>*)indexDataInput;
T* dst = (T*)indexDataOutput;
for (sint32 i = 0; i < numQuads; i++)
{
T idx0 = i * 4 + 0;
T idx1 = i * 4 + 1;
T idx2 = i * 4 + 2;
T idx3 = i * 4 + 3;
dst[0] = idx0;
dst[1] = idx1;
dst[2] = idx2;
dst[3] = idx0;
dst[4] = idx2;
dst[5] = idx3;
src += 4;
dst += 6;
}
indexMin = 0;
indexMax = std::max(count, 1u) - 1;
}
template<typename T>
void LatteIndices_unpackQuadStripAndConvert(const void* indexDataInput, void* indexDataOutput, uint32 count, uint32& indexMin, uint32& indexMax)
{
if (count <= 3)
return;
sint32 numQuads = (count - 2) / 2;
const betype<T>* src = (betype<T>*)indexDataInput;
T* dst = (T*)indexDataOutput;
for (sint32 i = 0; i < numQuads; i++)
{
T idx0 = src[0];
T idx1 = src[1];
T idx2 = src[2];
T idx3 = src[3];
indexMin = std::min(indexMin, (uint32)idx0);
indexMax = std::max(indexMax, (uint32)idx0);
indexMin = std::min(indexMin, (uint32)idx1);
indexMax = std::max(indexMax, (uint32)idx1);
indexMin = std::min(indexMin, (uint32)idx2);
indexMax = std::max(indexMax, (uint32)idx2);
indexMin = std::min(indexMin, (uint32)idx3);
indexMax = std::max(indexMax, (uint32)idx3);
dst[0] = idx0;
dst[1] = idx1;
dst[2] = idx2;
dst[3] = idx2;
dst[4] = idx1;
dst[5] = idx3;
src += 2;
dst += 6;
}
}
template<typename T>
void LatteIndices_unpackLineLoopAndConvert(const void* indexDataInput, void* indexDataOutput, uint32 count, uint32& indexMin, uint32& indexMax)
{
if (count <= 0)
return;
const betype<T>* src = (betype<T>*)indexDataInput;
T firstIndex = *src;
T* dst = (T*)indexDataOutput;
for (sint32 i = 0; i < (sint32)count; i++)
{
T idx = *src;
indexMin = std::min(indexMin, (uint32)idx);
indexMax = std::max(indexMax, (uint32)idx);
*dst = idx;
src++;
dst++;
}
*dst = firstIndex;
}
template<typename T>
void LatteIndices_generateAutoQuadStripIndices(void* indexDataOutput, uint32 count, uint32& indexMin, uint32& indexMax)
{
if (count <= 3)
return;
sint32 numQuads = (count - 2) / 2;
T* dst = (T*)indexDataOutput;
for (sint32 i = 0; i < numQuads; i++)
{
T idx0 = i * 2 + 0;
T idx1 = i * 2 + 1;
T idx2 = i * 2 + 2;
T idx3 = i * 2 + 3;
dst[0] = idx0;
dst[1] = idx1;
dst[2] = idx2;
dst[3] = idx2;
dst[4] = idx1;
dst[5] = idx3;
dst += 6;
}
indexMin = 0;
indexMax = std::max(count, 1u) - 1;
}
template<typename T>
void LatteIndices_generateAutoLineLoopIndices(void* indexDataOutput, uint32 count, uint32& indexMin, uint32& indexMax)
{
if (count == 0)
return;
T* dst = (T*)indexDataOutput;
for (sint32 i = 0; i < (sint32)count; i++)
{
*dst = (T)i;
dst++;
}
*dst = 0;
dst++;
indexMin = 0;
indexMax = std::max(count, 1u) - 1;
}
#if defined(ARCH_X86_64)
ATTRIBUTE_AVX2
void LatteIndices_fastConvertU16_AVX2(const void* indexDataInput, void* indexDataOutput, uint32 count, uint32& indexMin, uint32& indexMax)
{
// using AVX + AVX2 we can process 16 indices at a time
const uint16* indicesU16BE = (const uint16*)indexDataInput;
uint16* indexOutput = (uint16*)indexDataOutput;
sint32 count16 = count >> 4;
sint32 countRemaining = count & 15;
if (count16)
{
__m256i mMin = _mm256_set_epi16((sint16)0xFFFF, (sint16)0xFFFF, (sint16)0xFFFF, (sint16)0xFFFF, (sint16)0xFFFF, (sint16)0xFFFF, (sint16)0xFFFF, (sint16)0xFFFF,
(sint16)0xFFFF, (sint16)0xFFFF, (sint16)0xFFFF, (sint16)0xFFFF, (sint16)0xFFFF, (sint16)0xFFFF, (sint16)0xFFFF, (sint16)0xFFFF);
__m256i mMax = _mm256_set_epi16(0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000);
__m256i mShuffle16Swap = _mm256_set_epi8(30, 31, 28, 29, 26, 27, 24, 25, 22, 23, 20, 21, 18, 19, 16, 17, 14, 15, 12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1);
do
{
__m256i mIndexData = _mm256_loadu_si256((const __m256i*)indicesU16BE);
indicesU16BE += 16;
_mm_prefetch((const char*)indicesU16BE, _MM_HINT_T0);
// endian swap
mIndexData = _mm256_shuffle_epi8(mIndexData, mShuffle16Swap);
_mm256_store_si256((__m256i*)indexOutput, mIndexData);
mMin = _mm256_min_epu16(mIndexData, mMin);
mMax = _mm256_max_epu16(mIndexData, mMax);
indexOutput += 16;
} while (--count16);
// fold 32 to 16 byte
mMin = _mm256_min_epu16(mMin, _mm256_permute2x128_si256(mMin, mMin, 1));
mMax = _mm256_max_epu16(mMax, _mm256_permute2x128_si256(mMax, mMax, 1));
// fold 16 to 8 byte
mMin = _mm256_min_epu16(mMin, _mm256_shuffle_epi32(mMin, (2 << 0) | (3 << 2) | (2 << 4) | (3 << 6)));
mMax = _mm256_max_epu16(mMax, _mm256_shuffle_epi32(mMax, (2 << 0) | (3 << 2) | (2 << 4) | (3 << 6)));
uint16* mMinU16 = (uint16*)&mMin;
uint16* mMaxU16 = (uint16*)&mMax;
indexMin = std::min(indexMin, (uint32)mMinU16[0]);
indexMin = std::min(indexMin, (uint32)mMinU16[1]);
indexMin = std::min(indexMin, (uint32)mMinU16[2]);
indexMin = std::min(indexMin, (uint32)mMinU16[3]);
indexMax = std::max(indexMax, (uint32)mMaxU16[0]);
indexMax = std::max(indexMax, (uint32)mMaxU16[1]);
indexMax = std::max(indexMax, (uint32)mMaxU16[2]);
indexMax = std::max(indexMax, (uint32)mMaxU16[3]);
}
// process remaining indices
uint32 _minIndex = 0xFFFFFFFF;
uint32 _maxIndex = 0;
for (sint32 i = countRemaining; (--i) >= 0;)
{
uint16 idx = _swapEndianU16(*indicesU16BE);
*indexOutput = idx;
indexOutput++;
indicesU16BE++;
_maxIndex = std::max(_maxIndex, (uint32)idx);
_minIndex = std::min(_minIndex, (uint32)idx);
}
// update min/max
indexMax = std::max(indexMax, _maxIndex);
indexMin = std::min(indexMin, _minIndex);
}
ATTRIBUTE_SSE41
void LatteIndices_fastConvertU16_SSE41(const void* indexDataInput, void* indexDataOutput, uint32 count, uint32& indexMin, uint32& indexMax)
{
// SSSE3 & SSE4.1 optimized decoding
const uint16* indicesU16BE = (const uint16*)indexDataInput;
uint16* indexOutput = (uint16*)indexDataOutput;
sint32 count8 = count >> 3;
sint32 countRemaining = count & 7;
if (count8)
{
__m128i mMin = _mm_set_epi16((short)0xFFFF, (short)0xFFFF, (short)0xFFFF, (short)0xFFFF, (short)0xFFFF, (short)0xFFFF, (short)0xFFFF, (short)0xFFFF);
__m128i mMax = _mm_set_epi16(0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000);
__m128i mTemp;
__m128i* mRawIndices = (__m128i*)indicesU16BE;
indicesU16BE += count8 * 8;
__m128i* mOutputIndices = (__m128i*)indexOutput;
indexOutput += count8 * 8;
__m128i shufmask = _mm_set_epi8(14, 15, 12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1);
while (count8--)
{
mTemp = _mm_loadu_si128(mRawIndices);
mRawIndices++;
mTemp = _mm_shuffle_epi8(mTemp, shufmask);
mMin = _mm_min_epu16(mMin, mTemp);
mMax = _mm_max_epu16(mMax, mTemp);
_mm_store_si128(mOutputIndices, mTemp);
mOutputIndices++;
}
uint16* mMaxU16 = (uint16*)&mMax;
uint16* mMinU16 = (uint16*)&mMin;
indexMax = std::max(indexMax, (uint32)mMaxU16[0]);
indexMax = std::max(indexMax, (uint32)mMaxU16[1]);
indexMax = std::max(indexMax, (uint32)mMaxU16[2]);
indexMax = std::max(indexMax, (uint32)mMaxU16[3]);
indexMax = std::max(indexMax, (uint32)mMaxU16[4]);
indexMax = std::max(indexMax, (uint32)mMaxU16[5]);
indexMax = std::max(indexMax, (uint32)mMaxU16[6]);
indexMax = std::max(indexMax, (uint32)mMaxU16[7]);
indexMin = std::min(indexMin, (uint32)mMinU16[0]);
indexMin = std::min(indexMin, (uint32)mMinU16[1]);
indexMin = std::min(indexMin, (uint32)mMinU16[2]);
indexMin = std::min(indexMin, (uint32)mMinU16[3]);
indexMin = std::min(indexMin, (uint32)mMinU16[4]);
indexMin = std::min(indexMin, (uint32)mMinU16[5]);
indexMin = std::min(indexMin, (uint32)mMinU16[6]);
indexMin = std::min(indexMin, (uint32)mMinU16[7]);
}
uint32 _minIndex = 0xFFFFFFFF;
uint32 _maxIndex = 0;
for (sint32 i = countRemaining; (--i) >= 0;)
{
uint16 idx = _swapEndianU16(*indicesU16BE);
*indexOutput = idx;
indexOutput++;
indicesU16BE++;
_maxIndex = std::max(_maxIndex, (uint32)idx);
_minIndex = std::min(_minIndex, (uint32)idx);
}
indexMax = std::max(indexMax, _maxIndex);
indexMin = std::min(indexMin, _minIndex);
}
ATTRIBUTE_AVX2
void LatteIndices_fastConvertU32_AVX2(const void* indexDataInput, void* indexDataOutput, uint32 count, uint32& indexMin, uint32& indexMax)
{
// using AVX + AVX2 we can process 8 indices at a time
const uint32* indicesU32BE = (const uint32*)indexDataInput;
uint32* indexOutput = (uint32*)indexDataOutput;
sint32 count8 = count >> 3;
sint32 countRemaining = count & 7;
if (count8)
{
__m256i mMin = _mm256_set_epi32((sint32)0xFFFFFFFF, (sint32)0xFFFFFFFF, (sint32)0xFFFFFFFF, (sint32)0xFFFFFFFF, (sint32)0xFFFFFFFF, (sint32)0xFFFFFFFF, (sint32)0xFFFFFFFF, (sint32)0xFFFFFFFF);
__m256i mMax = _mm256_set_epi32(0, 0, 0, 0, 0, 0, 0, 0);
__m256i mShuffle32Swap = _mm256_set_epi8(28,29,30,31,
24,25,26,27,
20,21,22,23,
16,17,18,19,
12,13,14,15,
8,9,10,11,
4,5,6,7,
0,1,2,3);
// unaligned
do
{
__m256i mIndexData = _mm256_loadu_si256((const __m256i*)indicesU32BE);
indicesU32BE += 8;
_mm_prefetch((const char*)indicesU32BE, _MM_HINT_T0);
// endian swap
mIndexData = _mm256_shuffle_epi8(mIndexData, mShuffle32Swap);
_mm256_store_si256((__m256i*)indexOutput, mIndexData);
mMin = _mm256_min_epu32(mIndexData, mMin);
mMax = _mm256_max_epu32(mIndexData, mMax);
indexOutput += 8;
} while (--count8);
// fold 32 to 16 byte
mMin = _mm256_min_epu32(mMin, _mm256_permute2x128_si256(mMin, mMin, 1));
mMax = _mm256_max_epu32(mMax, _mm256_permute2x128_si256(mMax, mMax, 1));
// fold 16 to 8 byte
mMin = _mm256_min_epu32(mMin, _mm256_shuffle_epi32(mMin, (2 << 0) | (3 << 2) | (2 << 4) | (3 << 6)));
mMax = _mm256_max_epu32(mMax, _mm256_shuffle_epi32(mMax, (2 << 0) | (3 << 2) | (2 << 4) | (3 << 6)));
uint32* mMinU32 = (uint32*)&mMin;
uint32* mMaxU32 = (uint32*)&mMax;
indexMin = std::min(indexMin, (uint32)mMinU32[0]);
indexMin = std::min(indexMin, (uint32)mMinU32[1]);
indexMax = std::max(indexMax, (uint32)mMaxU32[0]);
indexMax = std::max(indexMax, (uint32)mMaxU32[1]);
}
// process remaining indices
uint32 _minIndex = 0xFFFFFFFF;
uint32 _maxIndex = 0;
for (sint32 i = countRemaining; (--i) >= 0;)
{
uint32 idx = _swapEndianU32(*indicesU32BE);
*indexOutput = idx;
indexOutput++;
indicesU32BE++;
_maxIndex = std::max(_maxIndex, (uint32)idx);
_minIndex = std::min(_minIndex, (uint32)idx);
}
// update min/max
indexMax = std::max(indexMax, _maxIndex);
indexMin = std::min(indexMin, _minIndex);
}
#endif
template<typename T>
void _LatteIndices_alternativeCalculateIndexMinMax(const void* indexData, uint32 count, uint32 primitiveRestartIndex, uint32& indexMin, uint32& indexMax)
{
cemu_assert_debug(count != 0);
const betype<T>* idxPtrT = (betype<T>*)indexData;
T _indexMin = *idxPtrT;
T _indexMax = *idxPtrT;
cemu_assert_debug(primitiveRestartIndex <= std::numeric_limits<T>::max());
T restartIndexT = (T)primitiveRestartIndex;
while (count)
{
T idx = *idxPtrT;
if (idx != restartIndexT)
{
_indexMin = std::min(_indexMin, idx);
_indexMax = std::max(_indexMax, idx);
}
idxPtrT++;
count--;
}
indexMin = _indexMin;
indexMax = _indexMax;
}
// calculate min and max index while taking primitive restart into account
// fallback implementation in case the fast path gives us invalid results
void LatteIndices_alternativeCalculateIndexMinMax(const void* indexData, LatteIndexType indexType, uint32 count, uint32& indexMin, uint32& indexMax)
{
if (count == 0)
{
indexMin = 0;
indexMax = 0;
return;
}
uint32 primitiveRestartIndex = LatteGPUState.contextNew.VGT_MULTI_PRIM_IB_RESET_INDX.get_RESTART_INDEX();
if (indexType == LatteIndexType::U16_BE)
{
_LatteIndices_alternativeCalculateIndexMinMax<uint16>(indexData, count, primitiveRestartIndex, indexMin, indexMax);
}
else if (indexType == LatteIndexType::U32_BE)
{
_LatteIndices_alternativeCalculateIndexMinMax<uint32>(indexData, count, primitiveRestartIndex, indexMin, indexMax);
}
else
{
cemu_assert_debug(false);
}
}
void LatteIndices_decode(const void* indexData, LatteIndexType indexType, uint32 count, LattePrimitiveMode primitiveMode, uint32& indexMin, uint32& indexMax, Renderer::INDEX_TYPE& renderIndexType, uint32& outputCount, uint32& indexBufferOffset, uint32& indexBufferIndex)
{
// what this should do:
// [x] use fast SIMD-based index decoding
// [x] unpack QUAD indices to triangle indices
// [x] calculate min and max index, be careful about primitive restart index
// [x] decode data directly into coherent memory buffer?
// [ ] better cache implementation, allow to cache across frames
// reuse from cache if data didn't change
if (LatteIndexCache.lastPtr == indexData &&
LatteIndexCache.lastCount == count &&
LatteIndexCache.lastPrimitiveMode == primitiveMode &&
LatteIndexCache.lastIndexType == indexType)
{
indexMin = LatteIndexCache.indexMin;
indexMax = LatteIndexCache.indexMax;
renderIndexType = LatteIndexCache.renderIndexType;
outputCount = LatteIndexCache.outputCount;
indexBufferOffset = LatteIndexCache.indexBufferOffset;
indexBufferIndex = LatteIndexCache.indexBufferIndex;
return;
}
outputCount = 0;
if (indexType == LatteIndexType::AUTO)
renderIndexType = Renderer::INDEX_TYPE::NONE;
else if (indexType == LatteIndexType::U16_BE || indexType == LatteIndexType::U16_LE)
renderIndexType = Renderer::INDEX_TYPE::U16;
else if (indexType == LatteIndexType::U32_BE)
renderIndexType = Renderer::INDEX_TYPE::U32;
else
cemu_assert_debug(false);
uint32 primitiveRestartIndex = LatteGPUState.contextNew.VGT_MULTI_PRIM_IB_RESET_INDX.get_RESTART_INDEX();
// calculate index output size
uint32 indexOutputSize = LatteIndices_calculateIndexOutputSize(primitiveMode, indexType, count);
if (indexOutputSize == 0)
{
outputCount = count;
indexMin = 0;
indexMax = std::max(count, 1u)-1;
renderIndexType = Renderer::INDEX_TYPE::NONE;
return; // no indices
}
// query index buffer from renderer
void* indexOutputPtr = g_renderer->indexData_reserveIndexMemory(indexOutputSize, indexBufferOffset, indexBufferIndex);
// decode indices
indexMin = std::numeric_limits<uint32>::max();
indexMax = std::numeric_limits<uint32>::min();
if (primitiveMode == LattePrimitiveMode::QUADS)
{
// unpack quads into triangles
if (indexType == LatteIndexType::AUTO)
{
if (count <= 0xFFFF)
{
LatteIndices_generateAutoQuadIndices<uint16>(indexData, indexOutputPtr, count, indexMin, indexMax);
renderIndexType = Renderer::INDEX_TYPE::U16;
}
else
{
LatteIndices_generateAutoQuadIndices<uint32>(indexData, indexOutputPtr, count, indexMin, indexMax);
renderIndexType = Renderer::INDEX_TYPE::U32;
}
}
else if (indexType == LatteIndexType::U16_BE)
LatteIndices_unpackQuadsAndConvert<uint16>(indexData, indexOutputPtr, count, indexMin, indexMax);
else if (indexType == LatteIndexType::U32_BE)
LatteIndices_unpackQuadsAndConvert<uint32>(indexData, indexOutputPtr, count, indexMin, indexMax);
else
cemu_assert_debug(false);
outputCount = count / 4 * 6;
}
else if (primitiveMode == LattePrimitiveMode::QUAD_STRIP)
{
// unpack quad strip into triangles
if (indexType == LatteIndexType::AUTO)
{
if (count <= 0xFFFF)
{
LatteIndices_generateAutoQuadStripIndices<uint16>(indexOutputPtr, count, indexMin, indexMax);
renderIndexType = Renderer::INDEX_TYPE::U16;
}
else
{
LatteIndices_generateAutoQuadStripIndices<uint32>(indexOutputPtr, count, indexMin, indexMax);
renderIndexType = Renderer::INDEX_TYPE::U32;
}
}
else if (indexType == LatteIndexType::U16_BE)
LatteIndices_unpackQuadStripAndConvert<uint16>(indexData, indexOutputPtr, count, indexMin, indexMax);
else if (indexType == LatteIndexType::U32_BE)
LatteIndices_unpackQuadStripAndConvert<uint32>(indexData, indexOutputPtr, count, indexMin, indexMax);
else
cemu_assert_debug(false);
if (count >= 2)
outputCount = (count - 2) / 2 * 6;
else
outputCount = 0;
}
else if (primitiveMode == LattePrimitiveMode::LINE_LOOP)
{
// unpack line loop into line strip with extra reconnecting vertex
if (indexType == LatteIndexType::AUTO)
{
if (count <= 0xFFFF)
{
LatteIndices_generateAutoLineLoopIndices<uint16>(indexOutputPtr, count, indexMin, indexMax);
renderIndexType = Renderer::INDEX_TYPE::U16;
}
else
{
LatteIndices_generateAutoLineLoopIndices<uint32>(indexOutputPtr, count, indexMin, indexMax);
renderIndexType = Renderer::INDEX_TYPE::U32;
}
}
else if (indexType == LatteIndexType::U16_BE)
LatteIndices_unpackLineLoopAndConvert<uint16>(indexData, indexOutputPtr, count, indexMin, indexMax);
else if (indexType == LatteIndexType::U32_BE)
LatteIndices_unpackLineLoopAndConvert<uint32>(indexData, indexOutputPtr, count, indexMin, indexMax);
else
cemu_assert_debug(false);
outputCount = count + 1;
}
else
{
if (indexType == LatteIndexType::U16_BE)
{
#if defined(ARCH_X86_64)
if (g_CPUFeatures.x86.avx2)
LatteIndices_fastConvertU16_AVX2(indexData, indexOutputPtr, count, indexMin, indexMax);
else if (g_CPUFeatures.x86.sse4_1 && g_CPUFeatures.x86.ssse3)
LatteIndices_fastConvertU16_SSE41(indexData, indexOutputPtr, count, indexMin, indexMax);
else
LatteIndices_convertBE<uint16>(indexData, indexOutputPtr, count, indexMin, indexMax);
#else
LatteIndices_convertBE<uint16>(indexData, indexOutputPtr, count, indexMin, indexMax);
#endif
}
else if (indexType == LatteIndexType::U32_BE)
{
#if defined(ARCH_X86_64)
if (g_CPUFeatures.x86.avx2)
LatteIndices_fastConvertU32_AVX2(indexData, indexOutputPtr, count, indexMin, indexMax);
else
LatteIndices_convertBE<uint32>(indexData, indexOutputPtr, count, indexMin, indexMax);
#else
LatteIndices_convertBE<uint32>(indexData, indexOutputPtr, count, indexMin, indexMax);
#endif
}
else if (indexType == LatteIndexType::U16_LE)
{
LatteIndices_convertLE<uint16>(indexData, indexOutputPtr, count, indexMin, indexMax);
}
else if (indexType == LatteIndexType::U32_LE)
{
LatteIndices_convertLE<uint32>(indexData, indexOutputPtr, count, indexMin, indexMax);
}
else
cemu_assert_debug(false);
outputCount = count;
}
// the above algorithms use a simplistic approach to get indexMin/indexMax
// here we make sure primitive restart indices dont influence the index range
if (primitiveRestartIndex == indexMin || primitiveRestartIndex == indexMax)
{
// recalculate index range but filter out primitive restart index
LatteIndices_alternativeCalculateIndexMinMax(indexData, indexType, count, indexMin, indexMax);
}
g_renderer->indexData_uploadIndexMemory(indexBufferOffset, indexOutputSize);
// update cache
LatteIndexCache.lastPtr = indexData;
LatteIndexCache.lastCount = count;
LatteIndexCache.lastPrimitiveMode = primitiveMode;
LatteIndexCache.lastIndexType = indexType;
LatteIndexCache.indexMin = indexMin;
LatteIndexCache.indexMax = indexMax;
LatteIndexCache.renderIndexType = renderIndexType;
LatteIndexCache.outputCount = outputCount;
LatteIndexCache.indexBufferOffset = indexBufferOffset;
LatteIndexCache.indexBufferIndex = indexBufferIndex;
}
| 24,143
|
C++
|
.cpp
| 682
| 32.546921
| 270
| 0.712133
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,268
|
LatteTexture.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteTexture.cpp
|
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/HW/Latte/Core/LatteShader.h"
#include "Cafe/HW/Latte/Core/LattePerformanceMonitor.h"
#include "Cafe/HW/Latte/Core/LatteTexture.h"
#include "Cafe/HW/Latte/Renderer/Renderer.h"
#include "Cafe/HW/Latte/LatteAddrLib/LatteAddrLib.h"
#include "Cafe/GraphicPack/GraphicPack2.h"
#include <boost/container/small_vector.hpp>
struct TexMemOccupancyEntry
{
uint32 addrStart;
uint32 addrEnd;
LatteTextureSliceMipInfo* sliceMipInfo;
};
#define TEX_OCCUPANCY_BUCKET_COUNT (0x800) // each bucket covers a range of 2MB
#define TEX_OCCUPANCY_BUCKET_SIZE (0x100000000/TEX_OCCUPANCY_BUCKET_COUNT)
#define loopItrMemOccupancyBuckets(__startAddr, __endAddr) for(sint32 startBucketIndex = ((__startAddr)/TEX_OCCUPANCY_BUCKET_SIZE), bucketIndex=startBucketIndex; bucketIndex<=((__endAddr-1)/TEX_OCCUPANCY_BUCKET_SIZE); bucketIndex++)
std::vector<TexMemOccupancyEntry> list_texMemOccupancyBucket[TEX_OCCUPANCY_BUCKET_COUNT];
std::atomic_bool s_refreshTextureQueryList;
std::vector<LatteTextureInformation> s_cacheInfoList;
std::vector<LatteTextureInformation> LatteTexture_QueryCacheInfo()
{
// raise request flag to refresh cache
s_refreshTextureQueryList.store(true);
// wait until cleared or until timeout occurred
auto begin = std::chrono::high_resolution_clock::now();
while (true)
{
if (!s_refreshTextureQueryList)
break;
auto dur = std::chrono::high_resolution_clock::now() - begin;
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(dur).count();
if (ms >= 1000) // dont stall more than one second
return std::vector<LatteTextureInformation>();
}
return s_cacheInfoList;
}
void LatteTexture_RefreshInfoCache()
{
if (!s_refreshTextureQueryList)
return;
std::vector<LatteTextureInformation> infoCache;
std::set<LatteTexture*> visitedTextures;
std::unordered_set<LatteTextureView*> allViews = LatteTextureViewLookupCache::GetAllViews();
for (auto& it : allViews)
{
LatteTexture* baseTexture = it->baseTexture;
if(visitedTextures.find(baseTexture) != visitedTextures.end())
continue;
visitedTextures.emplace(baseTexture);
// add cache info
auto& entry = infoCache.emplace_back();
entry.physAddress = baseTexture->physAddress;
entry.physMipAddress = baseTexture->physMipAddress;
entry.width = baseTexture->width;
entry.height = baseTexture->height;
entry.depth = baseTexture->depth;
entry.pitch = baseTexture->pitch;
entry.mipLevels = baseTexture->mipLevels;
entry.format = baseTexture->format;
entry.isDepth = baseTexture->isDepth;
entry.dim = baseTexture->dim;
entry.tileMode = baseTexture->tileMode;
entry.lastAccessTick = baseTexture->lastAccessTick;
entry.lastAccessFrameCount = baseTexture->lastAccessFrameCount;
entry.isUpdatedOnGPU = baseTexture->isUpdatedOnGPU;
// overwrite info
entry.overwriteInfo.hasResolutionOverwrite = baseTexture->overwriteInfo.hasResolutionOverwrite;
entry.overwriteInfo.width = baseTexture->overwriteInfo.width;
entry.overwriteInfo.height = baseTexture->overwriteInfo.height;
entry.overwriteInfo.depth = baseTexture->overwriteInfo.depth;
// count number of alternative views
entry.alternativeViewCount = 0;
// views
for (auto& viewItr : baseTexture->views)
{
if(viewItr == baseTexture->baseView)
continue;
auto& viewEntry = entry.views.emplace_back();
viewEntry.physAddress = viewItr->baseTexture->physAddress;
viewEntry.physMipAddress = viewItr->baseTexture->physMipAddress;
viewEntry.width = viewItr->baseTexture->width;
viewEntry.height = viewItr->baseTexture->height;
viewEntry.pitch = viewItr->baseTexture->pitch;
viewEntry.firstMip = viewItr->firstMip;
viewEntry.numMip = viewItr->numMip;
viewEntry.firstSlice = viewItr->firstSlice;
viewEntry.numSlice = viewItr->numSlice;
viewEntry.format = viewItr->format;
viewEntry.dim = viewItr->dim;
}
}
std::swap(s_cacheInfoList, infoCache);
s_refreshTextureQueryList.store(false);
}
void LatteTexture_AddTexMemOccupancyInterval(LatteTextureSliceMipInfo* sliceMipInfo)
{
TexMemOccupancyEntry entry;
entry.addrStart = sliceMipInfo->addrStart;
entry.addrEnd = sliceMipInfo->addrEnd;
entry.sliceMipInfo = sliceMipInfo;
loopItrMemOccupancyBuckets(entry.addrStart, entry.addrEnd)
list_texMemOccupancyBucket[bucketIndex].push_back(entry);
}
void LatteTexture_RegisterTextureMemoryOccupancy(LatteTexture* texture)
{
sint32 mipLevels = texture->mipLevels;
sint32 sliceCount = texture->depth;
for (sint32 mipIndex = 0; mipIndex < mipLevels; mipIndex++)
{
sint32 mipSliceCount;
if (texture->Is3DTexture())
mipSliceCount = std::max(1, sliceCount >> mipIndex);
else
mipSliceCount = sliceCount;
for (sint32 sliceIndex = 0; sliceIndex < mipSliceCount; sliceIndex++)
{
LatteTextureSliceMipInfo* sliceMipInfo = texture->sliceMipInfo + texture->GetSliceMipArrayIndex(sliceIndex, mipIndex);
LatteTexture_AddTexMemOccupancyInterval(sliceMipInfo);
}
}
}
void LatteTexture_RemoveTexMemOccupancyInterval(LatteTexture* texture, LatteTextureSliceMipInfo* sliceMipInfo)
{
loopItrMemOccupancyBuckets(sliceMipInfo->addrStart, sliceMipInfo->addrEnd)
{
for (sint32 i = 0; i < list_texMemOccupancyBucket[bucketIndex].size(); i++)
{
if (list_texMemOccupancyBucket[bucketIndex][i].sliceMipInfo->texture == texture)
{
list_texMemOccupancyBucket[bucketIndex].erase(list_texMemOccupancyBucket[bucketIndex].begin() + i);
i--;
continue;
}
}
}
}
void LatteTexture_UnregisterTextureMemoryOccupancy(LatteTexture* texture)
{
sint32 mipLevels = texture->mipLevels;
sint32 sliceCount = texture->depth;
for (sint32 mipIndex = 0; mipIndex < mipLevels; mipIndex++)
{
sint32 mipSliceCount;
if (texture->Is3DTexture())
mipSliceCount = std::max(1, sliceCount >> mipIndex);
else
mipSliceCount = sliceCount;
for (sint32 sliceIndex = 0; sliceIndex < mipSliceCount; sliceIndex++)
{
LatteTextureSliceMipInfo* sliceMipInfo = texture->sliceMipInfo + texture->GetSliceMipArrayIndex(sliceIndex, mipIndex);
LatteTexture_RemoveTexMemOccupancyInterval(texture, sliceMipInfo);
}
}
}
// calculate the actually accessed data range
// the resulting range is an estimate and may be smaller than the actual slice size (but not larger)
void LatteTexture_EstimateMipSliceAccessedDataRange(LatteTexture* texture, sint32 sliceIndex, sint32 mipIndex, LatteTextureSliceMipInfo* sliceMipInfo)
{
uint32 estAddrStart;
uint32 estAddrEnd;
LatteTextureLoader_estimateAccessedDataRange(texture, sliceIndex, mipIndex, estAddrStart, estAddrEnd);
cemu_assert_debug(estAddrStart >= sliceMipInfo->addrStart);
cemu_assert_debug(estAddrEnd <= sliceMipInfo->addrEnd);
cemu_assert_debug(estAddrStart <= estAddrEnd);
sliceMipInfo->estDataAddrStart = estAddrStart;
sliceMipInfo->estDataAddrEnd = estAddrEnd;
}
void LatteTexture_InitSliceAndMipInfo(LatteTexture* texture)
{
cemu_assert_debug(texture->mipLevels > 0);
cemu_assert_debug(texture->depth > 0);
sint32 mipSliceCount = texture->GetSliceMipArraySize();
texture->sliceMipInfo = new LatteTextureSliceMipInfo[mipSliceCount]();
// todo - mipLevels can be greater than maximum possible mip count. How to handle this? Probably should differentiate between mipLevels and effective mip levels
sint32 mipLevels = texture->mipLevels;
sint32 sliceCount = texture->depth;
for (sint32 mipIndex = 0; mipIndex < mipLevels; mipIndex++)
{
sint32 mipSliceCount;
if (texture->Is3DTexture())
{
mipSliceCount = std::max(1, sliceCount >> mipIndex);
}
else
mipSliceCount = sliceCount;
for (sint32 sliceIndex = 0; sliceIndex < mipSliceCount; sliceIndex++)
{
uint32 calcSliceAddr;
uint32 calcSliceSize;
sint32 calcSubSliceIndex;
LatteAddrLib::CalculateMipAndSliceAddr(texture->physAddress, texture->physMipAddress, texture->format, texture->width, texture->height, texture->depth, texture->dim, texture->tileMode, texture->swizzle, 0, mipIndex, sliceIndex, &calcSliceAddr, &calcSliceSize, &calcSubSliceIndex);
LatteTextureSliceMipInfo* sliceMipInfo = texture->sliceMipInfo + texture->GetSliceMipArrayIndex(sliceIndex, mipIndex);
sliceMipInfo->addrStart = calcSliceAddr;
sliceMipInfo->addrEnd = calcSliceAddr + calcSliceSize;
sliceMipInfo->subIndex = calcSubSliceIndex;
sliceMipInfo->dataChecksum = 0;
sliceMipInfo->sliceIndex = sliceIndex;
sliceMipInfo->mipIndex = mipIndex;
sliceMipInfo->texture = texture;
// get additional slice/mip info
LatteAddrLib::AddrSurfaceInfo_OUT surfaceInfo;
LatteAddrLib::GX2CalculateSurfaceInfo(texture->format, texture->width, texture->height, texture->depth, texture->dim, Latte::MakeGX2TileMode(texture->tileMode), 0, mipIndex, &surfaceInfo);
sliceMipInfo->tileMode = surfaceInfo.hwTileMode;
if (mipIndex == 0)
sliceMipInfo->pitch = texture->pitch; // for the base level, use the pitch value configured in hardware
else
sliceMipInfo->pitch = surfaceInfo.pitch;
LatteTexture_EstimateMipSliceAccessedDataRange(texture, sliceIndex, mipIndex, sliceMipInfo);
}
}
}
// if this function returns false, textures will not be synchronized even if their data overlaps
bool LatteTexture_IsFormatViewCompatible(Latte::E_GX2SURFFMT formatA, Latte::E_GX2SURFFMT formatB)
{
if(formatA == formatB)
return true; // if the format is identical then compatibility must be guaranteed (otherwise we can't create the necessary default view of a texture)
// todo - find a better way to handle this
for (sint32 swap = 0; swap < 2; swap++)
{
// other formats
// seems like format 0x19 (RGB10_A2) has issues on OpenGL Intel and AMD when copying texture data
Latte::E_HWSURFFMT hwFormatA = Latte::GetHWFormat(formatA);
Latte::E_HWSURFFMT hwFormatB = Latte::GetHWFormat(formatB);
if (hwFormatA == Latte::E_HWSURFFMT::HWFMT_2_10_10_10 && formatB == Latte::E_GX2SURFFMT::R11_G11_B10_FLOAT)
return false;
if (formatA == Latte::E_GX2SURFFMT::R11_G11_B10_FLOAT && hwFormatB == Latte::E_HWSURFFMT::HWFMT_2_10_10_10)
return false;
if (hwFormatA == Latte::E_HWSURFFMT::HWFMT_2_10_10_10 && formatB == Latte::E_GX2SURFFMT::R8_G8_B8_A8_UNORM)
return false;
if (formatA == Latte::E_GX2SURFFMT::R8_G8_B8_A8_UNORM && hwFormatB == Latte::E_HWSURFFMT::HWFMT_2_10_10_10)
return false;
// format A1B5G5R5 views are not compatible with other 16-bit formats in OpenGL
if (formatA == Latte::E_GX2SURFFMT::A1_B5_G5_R5_UNORM || formatB == Latte::E_GX2SURFFMT::A1_B5_G5_R5_UNORM)
return false;
// used in N64 VC (E.g. Super Mario 64)
// used in Smash
if (formatA == Latte::E_GX2SURFFMT::D24_S8_UNORM && formatB == Latte::E_GX2SURFFMT::R10_G10_B10_A2_SNORM)
return false;
if (formatA == Latte::E_GX2SURFFMT::R32_FLOAT && formatB == Latte::E_GX2SURFFMT::R10_G10_B10_A2_SNORM)
return false;
// loop again with swapped vars
Latte::E_GX2SURFFMT temp = formatA;
formatA = formatB;
formatB = temp;
}
return true;
}
bool LatteTexture_IsTexelSizeCompatibleFormat(Latte::E_GX2SURFFMT formatA, Latte::E_GX2SURFFMT formatB)
{
// handle some special cases where formats are incompatible regardless of equal bpp
if (formatA == Latte::E_GX2SURFFMT::D24_S8_UNORM && formatB == Latte::E_GX2SURFFMT::D32_FLOAT)
return false;
if (Latte::IsCompressedFormat(formatA) && Latte::IsCompressedFormat(formatB))
{
if (Latte::GetHWFormat(formatA) != Latte::GetHWFormat(formatB))
return false; // compressed formats with different encodings are considered incompatible
}
return Latte::GetFormatBits((Latte::E_GX2SURFFMT)formatA) == Latte::GetFormatBits((Latte::E_GX2SURFFMT)formatB);
}
void LatteTexture_copyData(LatteTexture* srcTexture, LatteTexture* dstTexture, sint32 mipCount, sint32 sliceCount)
{
cemu_assert_debug(mipCount != 0);
cemu_assert_debug(sliceCount != 0);
sint32 effectiveCopyWidth = srcTexture->width;
sint32 effectiveCopyHeight = srcTexture->height;
if (LatteTexture_doesEffectiveRescaleRatioMatch(dstTexture, 0, srcTexture, 0))
{
// adjust copy size
LatteTexture_scaleToEffectiveSize(dstTexture, &effectiveCopyWidth, &effectiveCopyHeight, 0);
}
else
{
sint32 effectiveWidth_dst, effectiveHeight_dst;
srcTexture->GetEffectiveSize(effectiveWidth_dst, effectiveHeight_dst, 0);
sint32 effectiveWidth_src, effectiveHeight_src;
dstTexture->GetEffectiveSize(effectiveWidth_src, effectiveHeight_src, 0);
debug_printf("texture_copyData(): Effective size mismatch\n");
cemuLog_logDebug(LogType::Force, "texture_copyData(): Effective size mismatch (due to texture rule)");
cemuLog_logDebug(LogType::Force, "Destination: origResolution {:04}x{:04} effectiveResolution {:04}x{:04} fmt {:04x} mipIndex {}", srcTexture->width, srcTexture->height, effectiveWidth_dst, effectiveHeight_dst, (uint32)dstTexture->format, 0);
cemuLog_logDebug(LogType::Force, "Source: origResolution {:04}x{:04} effectiveResolution {:04}x{:04} fmt {:04x} mipIndex {}", srcTexture->width, srcTexture->height, effectiveWidth_src, effectiveHeight_src, (uint32)srcTexture->format, 0);
return;
}
for (sint32 mipIndex = 0; mipIndex < mipCount; mipIndex++)
{
sint32 sliceCopyWidth = std::max(effectiveCopyWidth >> mipIndex, 1);
sint32 sliceCopyHeight = std::max(effectiveCopyHeight >> mipIndex, 1);
g_renderer->texture_copyImageSubData(srcTexture, mipIndex, 0, 0, 0, dstTexture, mipIndex, 0, 0, 0, sliceCopyWidth, sliceCopyHeight, sliceCount);
sint32 mipSliceCount = sliceCount;
if (dstTexture->Is3DTexture())
mipSliceCount >>= mipIndex;
for (sint32 sliceIndex = 0; sliceIndex < mipSliceCount; sliceIndex++)
{
LatteTextureSliceMipInfo* srcTexSliceInfo = srcTexture->sliceMipInfo + srcTexture->GetSliceMipArrayIndex(sliceIndex, mipIndex);
LatteTextureSliceMipInfo* dstTexSliceInfo = dstTexture->sliceMipInfo + dstTexture->GetSliceMipArrayIndex(sliceIndex, mipIndex);
dstTexSliceInfo->lastDynamicUpdate = srcTexSliceInfo->lastDynamicUpdate;
}
}
}
template<bool bothMustMatch>
bool LatteTexture_DoesWidthHeightMatch(Latte::E_GX2SURFFMT format1, uint32 width1, uint32 height1, Latte::E_GX2SURFFMT format2, uint32 width2, uint32 height2)
{
if (Latte::IsCompressedFormat(format1))
{
width1 <<= 2;
height1 <<= 2;
}
if (Latte::IsCompressedFormat(format2))
{
width2 <<= 2;
height2 <<= 2;
}
if constexpr(bothMustMatch)
return width1 == width2 && height1 == height2;
else
return width1 == width2 || height1 == height2;
}
void LatteTexture_CopySlice(LatteTexture* srcTexture, sint32 srcSlice, sint32 srcMip, LatteTexture* dstTexture, sint32 dstSlice, sint32 dstMip, sint32 srcX, sint32 srcY, sint32 dstX, sint32 dstY, sint32 width, sint32 height)
{
if (srcTexture->isDepth != dstTexture->isDepth)
{
g_renderer->surfaceCopy_copySurfaceWithFormatConversion(srcTexture, srcMip, srcSlice, dstTexture, dstMip, dstSlice, width, height);
return;
}
// rescale copy size
sint32 effectiveCopyWidth = width;
sint32 effectiveCopyHeight = height;
LatteTexture_scaleToEffectiveSize(srcTexture, &effectiveCopyWidth, &effectiveCopyHeight, 0);
sint32 effectiveSrcX = srcX;
sint32 effectiveSrcY = srcY;
LatteTexture_scaleToEffectiveSize(srcTexture, &effectiveSrcX, &effectiveSrcY, 0);
sint32 effectiveDstX = dstX;
sint32 effectiveDstY = dstY;
LatteTexture_scaleToEffectiveSize(dstTexture, &effectiveDstX, &effectiveDstY, 0);
// check if rescale is compatible
if (LatteTexture_doesEffectiveRescaleRatioMatch(dstTexture, 0, srcTexture, 0) == false)
{
sint32 effectiveWidth_src = srcTexture->overwriteInfo.hasResolutionOverwrite ? srcTexture->overwriteInfo.width : srcTexture->width;
sint32 effectiveHeight_src = srcTexture->overwriteInfo.hasResolutionOverwrite ? srcTexture->overwriteInfo.height : srcTexture->height;
sint32 effectiveWidth_dst = dstTexture->overwriteInfo.hasResolutionOverwrite ? dstTexture->overwriteInfo.width : dstTexture->width;
sint32 effectiveHeight_dst = dstTexture->overwriteInfo.hasResolutionOverwrite ? dstTexture->overwriteInfo.height : dstTexture->height;
if (cemuLog_isLoggingEnabled(LogType::TextureCache))
{
cemuLog_log(LogType::Force, "_copySlice(): Unable to sync textures with mismatching scale ratio (due to texture rule)");
float ratioWidth_src = (float)effectiveWidth_src / (float)srcTexture->width;
float ratioHeight_src = (float)effectiveHeight_src / (float)srcTexture->height;
float ratioWidth_dst = (float)effectiveWidth_dst / (float)dstTexture->width;
float ratioHeight_dst = (float)effectiveHeight_dst / (float)dstTexture->height;
cemuLog_log(LogType::Force, "Source: {:08x} origResolution {:4}/{:4} effectiveResolution {:4}/{:4} fmt {:04x} mipIndex {} ratioW/H: {:.4}/{:.4}", srcTexture->physAddress, srcTexture->width, srcTexture->height, effectiveWidth_src, effectiveHeight_src, (uint32)srcTexture->format, srcMip, ratioWidth_src, ratioHeight_src);
cemuLog_log(LogType::Force, "Destination: {:08x} origResolution {:4}/{:4} effectiveResolution {:4}/{:4} fmt {:04x} mipIndex {} ratioW/H: {:.4}/{:.4}", dstTexture->physAddress, dstTexture->width, dstTexture->height, effectiveWidth_dst, effectiveHeight_dst, (uint32)dstTexture->format, dstMip, ratioWidth_dst, ratioHeight_dst);
}
//cemuLog_logDebug(LogType::Force, "If these textures are not meant to share data you can ignore this");
return;
}
// todo - store 'lastUpdated' value per slice/mip and copy it's value when copying the slice data
g_renderer->texture_copyImageSubData(srcTexture, srcMip, effectiveSrcX, effectiveSrcY, srcSlice, dstTexture, dstMip, effectiveDstX, effectiveDstY, dstSlice, effectiveCopyWidth, effectiveCopyHeight, 1);
}
bool LatteTexture_GetSubtextureSliceAndMip(LatteTexture* baseTexture, LatteTexture* mipTexture, sint32* baseSliceIndex, sint32* baseMipIndex)
{
LatteTextureSliceMipInfo* mipTextureSliceInfo = mipTexture->sliceMipInfo + mipTexture->GetSliceMipArrayIndex(0, 0);
// todo - this can be optimized by first determining the mip level from pitch
for (sint32 mipIndex = 0; mipIndex < baseTexture->mipLevels; mipIndex++)
{
sint32 sliceCount;
if (baseTexture->Is3DTexture())
sliceCount = std::max(baseTexture->depth >> mipIndex, 1);
else
sliceCount = baseTexture->depth;
for (sint32 sliceIndex = 0; sliceIndex < sliceCount; sliceIndex++)
{
LatteTextureSliceMipInfo* sliceMipInfo = baseTexture->sliceMipInfo + baseTexture->GetSliceMipArrayIndex(sliceIndex, mipIndex);
if (sliceMipInfo->addrStart == mipTextureSliceInfo->addrStart && sliceMipInfo->subIndex == mipTextureSliceInfo->subIndex)
{
*baseSliceIndex = sliceIndex;
*baseMipIndex = mipIndex;
return true;
}
// todo - support overlapping textures with a non-zero y-offset
}
}
return false;
}
// if a texture shares memory with another texture then flag those textures as invalidated (on next use, synchronize data)
void LatteTexture_MarkDynamicTextureAsChanged(LatteTextureView* textureView, sint32 sliceIndex, sint32 mipIndex, uint64 eventCounter)
{
LatteTexture* baseTexture = textureView->baseTexture;
baseTexture->lastWriteEventCounter = eventCounter;
sint32 aSliceIndex = textureView->firstSlice + sliceIndex;
sint32 aMipIndex = textureView->firstMip + mipIndex;
LatteTextureSliceMipInfo* baseSliceMipInfo = baseTexture->sliceMipInfo + baseTexture->GetSliceMipArrayIndex(aSliceIndex, aMipIndex);
baseSliceMipInfo->lastDynamicUpdate = eventCounter;
LatteTexture_MarkConnectedTexturesForReloadFromDynamicTextures(textureView->baseTexture);
}
void LatteTexture_SyncSlice(LatteTexture* srcTexture, sint32 srcSliceIndex, sint32 srcMipIndex, LatteTexture* dstTexture, sint32 dstSliceIndex, sint32 dstMipIndex)
{
sint32 srcWidth = srcTexture->width;
sint32 srcHeight = srcTexture->height;
sint32 dstWidth = dstTexture->width;
sint32 dstHeight = dstTexture->height;
if(srcTexture->overwriteInfo.hasFormatOverwrite != dstTexture->overwriteInfo.hasFormatOverwrite)
return; // dont sync: format overwrite state needs to match. Not strictly necessary but it simplifies logic down the road
else if(srcTexture->overwriteInfo.hasFormatOverwrite && srcTexture->overwriteInfo.format != dstTexture->overwriteInfo.format)
return; // both are overwritten but with different formats
if (srcMipIndex == 0 && dstMipIndex == 0 && (srcTexture->tileMode == Latte::E_HWTILEMODE::TM_LINEAR_ALIGNED || srcTexture->tileMode == Latte::E_HWTILEMODE::TM_1D_TILED_THIN1) && srcTexture->height > dstTexture->height && (srcTexture->height % dstTexture->height) == 0)
{
bool isMatch = srcTexture->tileMode == Latte::E_HWTILEMODE::TM_LINEAR_ALIGNED;
if (srcTexture->tileMode == Latte::E_HWTILEMODE::TM_1D_TILED_THIN1 && srcTexture->width == 32)
{
// special case for CoD BO2, where 1024x32 and 32x32x32 textures share memory
isMatch = true;
}
if (isMatch && srcTexture->IsCompressedFormat() == false && dstTexture->IsCompressedFormat() == false)
{
sint32 virtualSlices = srcTexture->height / dstTexture->height;
if (dstTexture->depth == virtualSlices)
{
// special case for Ninja Gaiden
// it initializes a 24x24x24 texture array as a 24x576x1 2D texture (using tilemode 1)
sint32 copyWidth = std::min(srcWidth, dstWidth);
sint32 copyHeight = std::min(srcHeight, dstHeight);
for (sint32 slice = 0; slice < virtualSlices; slice++)
LatteTexture_CopySlice(srcTexture, srcSliceIndex, srcMipIndex, dstTexture, dstSliceIndex + slice, dstMipIndex, 0, slice * dstTexture->height, 0, 0, copyWidth, copyHeight);
}
return;
}
}
bool srcIsCompressed = srcTexture->IsCompressedFormat();
bool dstIsCompressed = dstTexture->IsCompressedFormat();
if (srcIsCompressed != dstIsCompressed)
{
// convert into unit of source texture
if (srcIsCompressed == false)
{
// destination compressed, source uncompressed (integer format)
dstWidth >>= 2;
dstHeight >>= 2;
}
else
{
// destination uncompressed (integer format), source compressed
dstWidth <<= 2;
dstHeight <<= 2;
}
}
srcWidth = std::max(srcWidth >> srcMipIndex, 1);
srcHeight = std::max(srcHeight >> srcMipIndex, 1);
dstWidth = std::max(dstWidth >> dstMipIndex, 1);
dstHeight = std::max(dstHeight >> dstMipIndex, 1);
sint32 copyWidth = std::min(srcWidth, dstWidth);
sint32 copyHeight = std::min(srcHeight, dstHeight);
LatteTexture_CopySlice(srcTexture, srcSliceIndex, srcMipIndex, dstTexture, dstSliceIndex, dstMipIndex, 0, 0, 0, 0, copyWidth, copyHeight);
}
void LatteTexture_UpdateTextureFromDynamicChanges(LatteTexture* texture)
{
// note: Currently this function assumes that only one other texture is updated per slice/mip (if multiple overlap, we should merge the one with the latest timestamp the latest of each individually)
for (auto& texRel : texture->list_compatibleRelations)
{
LatteTexture* baseTexture = texRel->baseTexture;
LatteTexture* subTexture = texRel->subTexture;
for (sint32 cMipIndex = 0; cMipIndex < texRel->mipCount; cMipIndex++)
{
sint32 mipSliceCount = texRel->sliceCount;
if (texRel->baseTexture->Is3DTexture())
{
cemu_assert_debug(cMipIndex == 0); // values above 0 need testing
mipSliceCount >>= cMipIndex;
}
for (sint32 cSliceIndex = 0; cSliceIndex < mipSliceCount; cSliceIndex++)
{
LatteTextureSliceMipInfo* baseSliceMipInfo = baseTexture->sliceMipInfo + baseTexture->GetSliceMipArrayIndex(texRel->baseSliceIndex + cSliceIndex, texRel->baseMipIndex + cMipIndex);
LatteTextureSliceMipInfo* subSliceMipInfo = subTexture->sliceMipInfo + subTexture->GetSliceMipArrayIndex(cSliceIndex, cMipIndex);
if (texture == baseTexture)
{
// baseTexture is target texture
if (baseSliceMipInfo->lastDynamicUpdate < subSliceMipInfo->lastDynamicUpdate)
{
LatteTexture_SyncSlice(subTexture, cSliceIndex, cMipIndex, baseTexture, texRel->baseSliceIndex + cSliceIndex, texRel->baseMipIndex + cMipIndex);
baseSliceMipInfo->lastDynamicUpdate = subSliceMipInfo->lastDynamicUpdate;
if(subTexture->isUpdatedOnGPU)
texture->isUpdatedOnGPU = true;
}
}
else
{
// subTexture is target texture
if (subSliceMipInfo->lastDynamicUpdate < baseSliceMipInfo->lastDynamicUpdate)
{
LatteTexture_SyncSlice(baseTexture, texRel->baseSliceIndex + cSliceIndex, texRel->baseMipIndex + cMipIndex, subTexture, cSliceIndex, cMipIndex);
subSliceMipInfo->lastDynamicUpdate = baseSliceMipInfo->lastDynamicUpdate;
if (baseTexture->isUpdatedOnGPU)
texture->isUpdatedOnGPU = true;
}
}
}
}
}
}
bool _LatteTexture_IsTileModeCompatible(LatteTexture* texture1, sint32 mipIndex1, LatteTexture* texture2, sint32 mipIndex2)
{
if (mipIndex1 == 0 && mipIndex2 == 0)
return texture1->tileMode == texture2->tileMode;
LatteTextureSliceMipInfo* texture1SliceInfo = texture1->sliceMipInfo + texture1->GetSliceMipArrayIndex(0, mipIndex1);
LatteTextureSliceMipInfo* texture2SliceInfo = texture2->sliceMipInfo + texture2->GetSliceMipArrayIndex(0, mipIndex2);
if (texture1SliceInfo->tileMode == texture2SliceInfo->tileMode)
return true;
return false;
}
bool __LatteTexture_IsBlockedFormatRelation(LatteTexture* texture1, LatteTexture* texture2)
{
if (texture1->isDepth && texture2->isDepth == false)
{
// necessary for Smash? (currently our depth to color copy always converts and the depth ends up in R only)
if (texture1->format == Latte::E_GX2SURFFMT::D32_FLOAT && Latte::GetHWFormat(texture2->format) == Latte::E_HWSURFFMT::HWFMT_8_8_8_8)
return true;
}
// Vulkan has stricter rules
if (g_renderer->GetType() == RendererAPI::Vulkan)
{
// found in Smash (Wii Fit Stage)
if (texture1->format == Latte::E_GX2SURFFMT::D32_FLOAT && Latte::GetHWFormat(texture2->format) == Latte::E_HWSURFFMT::HWFMT_8_24)
return true;
}
return false;
}
bool LatteTexture_IsBlockedFormatRelation(LatteTexture* texture1, LatteTexture* texture2)
{
if (__LatteTexture_IsBlockedFormatRelation(texture1, texture2))
return true;
return __LatteTexture_IsBlockedFormatRelation(texture2, texture1);
}
// called if two textures are known to overlap in memory
// this function then tries to figure out the details and registers the relation in texture*->list_compatibleRelations
void LatteTexture_TrackTextureRelation(LatteTexture* texture1, LatteTexture* texture2)
{
// make sure texture 2 is always at texture 1 mip level 0 or beyond
if (texture1->physAddress > texture2->physAddress)
return LatteTexture_TrackTextureRelation(texture2, texture1);
// check if this texture relation is already tracked
cemu_assert_debug(texture1->physAddress != 0);
cemu_assert_debug(texture2->physAddress != 0);
for (auto& it : texture1->list_compatibleRelations)
{
if (it->baseTexture == texture1 && it->subTexture == texture2)
return; // association already known
}
// check for blocked format combination
if (LatteTexture_IsBlockedFormatRelation(texture1, texture2))
return;
if (texture1->physAddress == texture2->physAddress && false)
{
// both textures overlap at mip level 0
cemu_assert_debug(texture1->swizzle == texture2->swizzle);
cemu_assert_debug(texture1->tileMode == texture2->tileMode);
if (LatteTexture_DoesWidthHeightMatch<false>(texture1->format, texture1->width, texture1->height, texture2->format, texture2->width, texture2->height))
{
cemu_assert_unimplemented();
}
}
else
{
sint32 baseSliceIndex;
sint32 baseMipIndex;
if (texture1->physAddress == texture2->physAddress)
{
baseSliceIndex = 0;
baseMipIndex = 0;
}
else
{
if (LatteTexture_GetSubtextureSliceAndMip(texture1, texture2, &baseSliceIndex, &baseMipIndex) == false)
{
return;
}
}
sint32 sharedMipLevels = 1;
// todo - support for multiple shared mip levels
// check if pitch is compatible
LatteTextureSliceMipInfo* texture1SliceInfo = texture1->sliceMipInfo + texture1->GetSliceMipArrayIndex(baseSliceIndex, baseMipIndex);
LatteTextureSliceMipInfo* texture2SliceInfo = texture2->sliceMipInfo + texture2->GetSliceMipArrayIndex(0, 0);
if (_LatteTexture_IsTileModeCompatible(texture1, baseMipIndex, texture2, 0) == false)
return; // not compatible
if (texture1SliceInfo->pitch != texture2SliceInfo->pitch)
return; // not compatible
// calculate compatible depth range
sint32 baseRemainingDepth = texture1->GetMipDepth(baseMipIndex) - baseSliceIndex;
cemu_assert_debug(baseRemainingDepth >= 0);
sint32 compatibleDepthRange = std::min(baseRemainingDepth, texture2->depth);
cemu_assert_debug(compatibleDepthRange > 0);
// create association
LatteTextureRelation* rel = (LatteTextureRelation*)malloc(sizeof(LatteTextureRelation));
memset(rel, 0, sizeof(LatteTextureRelation));
rel->baseTexture = texture1;
rel->subTexture = texture2;
rel->baseMipIndex = baseMipIndex;
rel->baseSliceIndex = baseSliceIndex;
rel->mipCount = sharedMipLevels;
rel->sliceCount = compatibleDepthRange;
rel->yOffset = 0; // todo
texture1->list_compatibleRelations.push_back(rel);
texture2->list_compatibleRelations.push_back(rel);
}
}
void LatteTexture_TrackDataOverlap(LatteTexture* texture, LatteTextureSliceMipInfo* sliceMipInfo, TexMemOccupancyEntry& occupancy)
{
// todo - handle tile thickness and z offset
// todo - check address range overlap
auto& occMipSliceInfo = occupancy.sliceMipInfo;
if ((sliceMipInfo->addrEnd > occMipSliceInfo->addrStart && sliceMipInfo->addrStart < occMipSliceInfo->addrEnd) == false)
return;
// check if this overlap is already tracked
for (auto& it : sliceMipInfo->list_dataOverlap)
{
if (it.destMipSliceInfo == occupancy.sliceMipInfo)
return;
}
// register texture->dest
LatteTextureSliceMipDataOverlap_t overlapEntry;
overlapEntry.destMipSliceInfo = occupancy.sliceMipInfo;
overlapEntry.destTexture = occupancy.sliceMipInfo->texture;
sliceMipInfo->list_dataOverlap.push_back(overlapEntry);
// register dest->texture
LatteTextureSliceMipDataOverlap_t overlapEntry2;
overlapEntry2.destMipSliceInfo = sliceMipInfo;
overlapEntry2.destTexture = sliceMipInfo->texture;
occupancy.sliceMipInfo->list_dataOverlap.push_back(overlapEntry2);
}
void _LatteTexture_RemoveDataOverlapTracking(LatteTexture* texture, LatteTextureSliceMipInfo* sliceMipInfo, LatteTextureSliceMipDataOverlap_t& dataOverlap)
{
LatteTexture* destTexture = dataOverlap.destTexture;
LatteTextureSliceMipInfo* destSliceMipInfo = dataOverlap.destMipSliceInfo;
// delete from dest
for (auto it = destSliceMipInfo->list_dataOverlap.begin(); it != destSliceMipInfo->list_dataOverlap.end();)
{
if (it->destTexture == texture)
it = destSliceMipInfo->list_dataOverlap.erase(it);
else if (it->destTexture == destTexture)
cemu_assert_unimplemented();
else
it++;
}
}
void LatteTexture_DeleteDataOverlapTracking(LatteTexture* texture, LatteTextureSliceMipInfo* sliceMipInfo)
{
for(auto& it : sliceMipInfo->list_dataOverlap)
_LatteTexture_RemoveDataOverlapTracking(texture, sliceMipInfo, it);
sliceMipInfo->list_dataOverlap.resize(0);
}
void LatteTexture_DeleteDataOverlapTracking(LatteTexture* texture)
{
sint32 mipLevels = texture->mipLevels;
sint32 sliceCount = texture->depth;
for (sint32 mipIndex = 0; mipIndex < mipLevels; mipIndex++)
{
sint32 mipSliceCount;
if (texture->Is3DTexture())
mipSliceCount = std::max(1, sliceCount >> mipIndex);
else
mipSliceCount = sliceCount;
for (sint32 sliceIndex = 0; sliceIndex < mipSliceCount; sliceIndex++)
{
LatteTextureSliceMipInfo* sliceMipInfo = texture->sliceMipInfo + texture->GetSliceMipArrayIndex(sliceIndex, mipIndex);
LatteTexture_DeleteDataOverlapTracking(texture, sliceMipInfo);
}
}
}
void LatteTexture_GatherTextureRelations(LatteTexture* texture)
{
for (sint32 mipIndex = 0; mipIndex < texture->mipLevels; mipIndex++)
{
sint32 mipSliceCount;
if (texture->Is3DTexture())
mipSliceCount = std::max(1, texture->depth >> mipIndex);
else
mipSliceCount = texture->depth;
for (sint32 sliceIndex = 0; sliceIndex < mipSliceCount; sliceIndex++)
{
LatteTextureSliceMipInfo* sliceMipInfo = texture->sliceMipInfo + texture->GetSliceMipArrayIndex(sliceIndex, mipIndex);
loopItrMemOccupancyBuckets(sliceMipInfo->addrStart, sliceMipInfo->addrEnd)
{
for (auto& occupancy : list_texMemOccupancyBucket[bucketIndex])
{
LatteTexture* itrTexture = occupancy.sliceMipInfo->texture;
if (itrTexture == texture)
continue; // ignore self
if (sliceMipInfo->addrEnd >= occupancy.addrStart && sliceMipInfo->addrStart < occupancy.addrEnd)
{
if (sliceMipInfo->addrStart == occupancy.addrStart && sliceMipInfo->subIndex == occupancy.sliceMipInfo->subIndex)
{
// overlapping with zero x/y offset
if (sliceMipInfo->pitch == occupancy.sliceMipInfo->pitch && LatteTexture_IsTexelSizeCompatibleFormat(texture->format, itrTexture->format)
&& sliceMipInfo->tileMode == occupancy.sliceMipInfo->tileMode &&
LatteTexture_IsFormatViewCompatible(texture->format, itrTexture->format))
{
LatteTexture_TrackTextureRelation(texture, itrTexture);
}
else
{
// pitch not compatible or format not compatible
}
}
else
{
LatteTexture_TrackDataOverlap(texture, sliceMipInfo, occupancy);
}
}
}
}
}
}
}
void LatteTexture_DeleteTextureRelations(LatteTexture* texture)
{
while (texture->list_compatibleRelations.empty() == false)
{
LatteTextureRelation* rel = texture->list_compatibleRelations[0];
rel->baseTexture->list_compatibleRelations.erase(std::find(rel->baseTexture->list_compatibleRelations.begin(), rel->baseTexture->list_compatibleRelations.end(), rel));
rel->subTexture->list_compatibleRelations.erase(std::find(rel->subTexture->list_compatibleRelations.begin(), rel->subTexture->list_compatibleRelations.end(), rel));
free(rel);
}
texture->list_compatibleRelations.clear();
}
enum VIEWCOMPATIBILITY
{
VIEW_COMPATIBLE, // subtexture can be represented as view into base texture
VIEW_BASE_TOO_SMALL, // base texture must be extended (depth or mip levels) to fit sub texture
VIEW_NOT_COMPATIBLE,
};
bool IsDimensionCompatibleForGX2View(Latte::E_DIM baseDim, Latte::E_DIM viewDim)
{
// Note that some combinations depend on the exact view/slice index and count which we currently ignore (like a 3D view of a 3D texture)
bool isCompatible =
(baseDim == viewDim) ||
(baseDim == Latte::E_DIM::DIM_CUBEMAP && viewDim == Latte::E_DIM::DIM_2D) ||
(baseDim == Latte::E_DIM::DIM_2D && viewDim == Latte::E_DIM::DIM_2D_ARRAY) ||
(baseDim == Latte::E_DIM::DIM_2D_ARRAY && viewDim == Latte::E_DIM::DIM_2D) ||
(baseDim == Latte::E_DIM::DIM_CUBEMAP && viewDim == Latte::E_DIM::DIM_2D_ARRAY) ||
(baseDim == Latte::E_DIM::DIM_2D_ARRAY && viewDim == Latte::E_DIM::DIM_CUBEMAP) ||
(baseDim == Latte::E_DIM::DIM_3D && viewDim == Latte::E_DIM::DIM_2D_ARRAY);
if(isCompatible)
return true;
// these combinations have been seen in use by games and are considered incompatible:
// (baseDim == Latte::E_DIM::DIM_2D_ARRAY && viewDim == Latte::E_DIM::DIM_3D) -> Not allowed on OpenGL
// (baseDim == Latte::E_DIM::DIM_2D && viewDim == Latte::E_DIM::DIM_2D_MSAA)
// (baseDim == Latte::E_DIM::DIM_2D && viewDim == Latte::E_DIM::DIM_1D)
// (baseDim == Latte::E_DIM::DIM_2D && viewDim == Latte::E_DIM::DIM_3D)
// (baseDim == Latte::E_DIM::DIM_3D && viewDim == Latte::E_DIM::DIM_2D)
// (baseDim == Latte::E_DIM::DIM_3D && viewDim == Latte::E_DIM::DIM_3D) -> Only compatible if the same depth and shared at mip/slice 0
// (baseDim == Latte::E_DIM::DIM_2D && viewDim == Latte::E_DIM::DIM_CUBEMAP)
// (baseDim == Latte::E_DIM::DIM_2D_MSAA && viewDim == Latte::E_DIM::DIM_2D)
// (baseDim == Latte::E_DIM::DIM_1D && viewDim == Latte::E_DIM::DIM_2D)
return false;
}
VIEWCOMPATIBILITY LatteTexture_CanTextureBeRepresentedAsView(LatteTexture* baseTexture, uint32 physAddr, sint32 width, sint32 height, sint32 pitch, Latte::E_DIM dimView, Latte::E_GX2SURFFMT format, bool isDepth, sint32 firstMip, sint32 numMip, sint32 firstSlice, sint32 numSlice, sint32& relativeMipIndex, sint32& relativeSliceIndex)
{
relativeMipIndex = 0;
relativeSliceIndex = 0;
if (baseTexture->overwriteInfo.hasFormatOverwrite)
{
// if the base format is overwritten, then we only allow aliasing if the view format matches the base format
if (baseTexture->format != format)
return VIEW_NOT_COMPATIBLE;
}
if (LatteTexture_IsFormatViewCompatible(baseTexture->format, format) == false)
return VIEW_NOT_COMPATIBLE;
if (baseTexture->physAddress == physAddr && baseTexture->pitch == pitch)
{
if (baseTexture->isDepth != isDepth)
return VIEW_NOT_COMPATIBLE; // depth and non-depth formats are never compatible (on OpenGL)
if (!LatteTexture_IsTexelSizeCompatibleFormat(baseTexture->format, format) || baseTexture->width != width || baseTexture->height != height)
return VIEW_NOT_COMPATIBLE;
// 3D views are only compatible on Vulkan if they match the base texture in regards to mip and slice count
bool isCompatible3DView = dimView == Latte::E_DIM::DIM_3D && baseTexture->dim == dimView && firstSlice == 0 && firstMip == 0 && baseTexture->mipLevels == numMip && baseTexture->depth == numSlice;
if (!isCompatible3DView && !IsDimensionCompatibleForGX2View(baseTexture->dim, dimView))
return VIEW_NOT_COMPATIBLE;
if (baseTexture->isDepth && baseTexture->format != format)
{
// depth view with different format
cemuLog_logDebug(LogType::Force, "_createMapping(): Incompatible depth view format");
return VIEW_NOT_COMPATIBLE;
}
// AMD has a bug on OpenGL where it ignores the internal format of texture views when they are bound as render targets,
// as a result we cant use texture views when they have a different format
if (baseTexture->format != format)
return VIEW_NOT_COMPATIBLE;
if ((firstMip + numMip) > baseTexture->mipLevels || (firstSlice + numSlice) > baseTexture->depth)
{
// view has more slices or mips than existing texture
return VIEW_BASE_TOO_SMALL;
}
return VIEW_COMPATIBLE;
}
else
{
if (numMip > 1)
return VIEW_NOT_COMPATIBLE;
if (baseTexture->Is3DTexture())
return VIEW_NOT_COMPATIBLE; // todo - add support for mapping views into 3D textures
// if phys address or pitch differs then it might be pointing to a mip
for (sint32 m = 0; m < baseTexture->mipLevels; m++)
{
auto sliceMipInfo = baseTexture->sliceMipInfo + baseTexture->GetSliceMipArrayIndex(0, m);
// check pitch
if(sliceMipInfo->pitch != pitch)
continue;
// check all slices
if(LatteAddrLib::TM_IsThickAndMacroTiled(baseTexture->tileMode))
continue; // todo - check only every 4th slice?
for (sint32 s=0; s<baseTexture->GetMipDepth(m); s++)
{
sliceMipInfo = baseTexture->sliceMipInfo + baseTexture->GetSliceMipArrayIndex(s, m);
if (sliceMipInfo->addrStart != physAddr || sliceMipInfo->pitch != pitch)
continue;
if (baseTexture->isDepth != isDepth)
return VIEW_NOT_COMPATIBLE;
if (baseTexture->GetMipWidth(m) != width || baseTexture->GetMipHeight(m) != height)
return VIEW_NOT_COMPATIBLE;
if (!LatteTexture_IsTexelSizeCompatibleFormat(baseTexture->format, format) )
return VIEW_NOT_COMPATIBLE;
if (!IsDimensionCompatibleForGX2View(baseTexture->dim, dimView))
return VIEW_NOT_COMPATIBLE;
if (baseTexture->isDepth && baseTexture->format != format)
{
// depth view with different format
cemuLog_logDebug(LogType::Force, "_createMapping(): Incompatible depth view format");
return VIEW_NOT_COMPATIBLE;
}
// AMD has a bug on OpenGL where it ignores the internal format of texture views when they are bound as render targets,
// as a result we cant use texture views when they have a different format
if (baseTexture->format != format)
return VIEW_NOT_COMPATIBLE;
if ((m + firstMip + numMip) > baseTexture->mipLevels || (s + firstSlice + numSlice) > baseTexture->depth)
{
relativeMipIndex = m;
relativeSliceIndex = s;
return VIEW_BASE_TOO_SMALL;
}
relativeMipIndex = m;
relativeSliceIndex = s;
return VIEW_COMPATIBLE;
}
}
}
return VIEW_NOT_COMPATIBLE;
}
// deletes any related textures that have become redundant (aka textures that can also be represented entirely as a view into the new texture)
void LatteTexture_DeleteAbsorbedSubtextures(LatteTexture* texture)
{
for(size_t i=0; i<texture->list_compatibleRelations.size(); i++)
{
LatteTextureRelation* textureRelation = texture->list_compatibleRelations[i];
LatteTexture* relatedTexture = (textureRelation->baseTexture!=texture)? textureRelation->baseTexture:textureRelation->subTexture;
sint32 relativeMipIndex;
sint32 relativeSliceIndex;
if (LatteTexture_CanTextureBeRepresentedAsView(texture, relatedTexture->physAddress, relatedTexture->width, relatedTexture->height, relatedTexture->pitch, relatedTexture->dim, relatedTexture->format, relatedTexture->isDepth, 0, relatedTexture->mipLevels, 0, relatedTexture->depth, relativeMipIndex, relativeSliceIndex) == VIEW_COMPATIBLE)
{
LatteTexture_Delete(relatedTexture);
LatteGPUState.repeatTextureInitialization = true;
}
}
}
void LatteTexture_RecreateTextureWithDifferentMipSliceCount(LatteTexture* texture, MPTR physMipAddr, sint32 newMipCount, sint32 newDepth)
{
Latte::E_DIM newDim = texture->dim;
if (newDim == Latte::E_DIM::DIM_2D && newDepth > 1)
newDim = Latte::E_DIM::DIM_2D_ARRAY;
else if (newDim == Latte::E_DIM::DIM_1D && newDepth > 1)
newDim = Latte::E_DIM::DIM_1D_ARRAY;
LatteTextureView* view = LatteTexture_CreateTexture(newDim, texture->physAddress, physMipAddr, texture->format, texture->width, texture->height, newDepth, texture->pitch, newMipCount, texture->swizzle, texture->tileMode, texture->isDepth);
cemu_assert(!(view->baseTexture->mipLevels <= 1 && physMipAddr == MPTR_NULL && newMipCount > 1));
// copy data from old texture if its dynamically updated
if (texture->isUpdatedOnGPU)
{
LatteTexture_copyData(texture, view->baseTexture, texture->mipLevels, texture->depth);
view->baseTexture->isUpdatedOnGPU = true;
}
// remove old texture
LatteTexture_Delete(texture);
// gather texture relations for new texture
LatteTexture_GatherTextureRelations(view->baseTexture);
LatteTexture_UpdateTextureFromDynamicChanges(view->baseTexture);
// todo - inherit 'isUpdatedOnGPU' flag for each mip/slice
// delete any individual smaller slices/mips that have become redundant
LatteTexture_DeleteAbsorbedSubtextures(view->baseTexture);
}
// create new texture representation
// if allowCreateNewDataTexture is true, a new texture will be created if necessary. If it is false, only existing textures may be used, except if a data-compatible version of the requested texture already exists and it's not view compatible (todo - we should differentiate between Latte compatible views and renderer compatible)
// the returned view will map to the provided mip and slice range within the created texture, this is to match the behavior of lookupSliceEx
LatteTextureView* LatteTexture_CreateMapping(MPTR physAddr, MPTR physMipAddr, sint32 width, sint32 height, sint32 depth, sint32 pitch, Latte::E_HWTILEMODE tileMode, uint32 swizzle, sint32 firstMip, sint32 numMip, sint32 firstSlice, sint32 numSlice, Latte::E_GX2SURFFMT format, Latte::E_DIM dimBase, Latte::E_DIM dimView, bool isDepth, bool allowCreateNewDataTexture)
{
if (format == Latte::E_GX2SURFFMT::INVALID_FORMAT)
{
cemuLog_logDebug(LogType::Force, "LatteTexture_CreateMapping(): Invalid format");
return nullptr;
}
// note: When creating an existing texture, we only allow mip and slice expansion at the end
cemu_assert_debug(depth);
cemu_assert_debug(!(depth > 1 && dimBase == Latte::E_DIM::DIM_2D));
cemu_assert_debug(!(numSlice > 1 && dimView == Latte::E_DIM::DIM_2D));
// todo, depth and numSlice are redundant
sint32 sliceCount = firstSlice + numSlice;
boost::container::small_vector<LatteTexture*, 16> list_overlappingTextures;
for (sint32 sliceIndex = 0; sliceIndex < sliceCount; sliceIndex++)
{
sint32 mipIndex = 0;
uint32 calcSliceAddrStart;
uint32 calcSliceSize;
sint32 calcSubSliceIndex;
LatteAddrLib::CalculateMipAndSliceAddr(physAddr, physMipAddr, format, width, height, depth, dimBase, tileMode, swizzle, 0, mipIndex, sliceIndex, &calcSliceAddrStart, &calcSliceSize, &calcSubSliceIndex);
uint32 calcSliceAddrEnd = calcSliceAddrStart + calcSliceSize;
// attempt to create view in already existing texture first (we may have to recreate the texture with new specifications)
loopItrMemOccupancyBuckets(calcSliceAddrStart, calcSliceAddrEnd)
{
for (auto& occupancy : list_texMemOccupancyBucket[bucketIndex])
{
if (calcSliceAddrEnd >= occupancy.addrStart && calcSliceAddrStart < occupancy.addrEnd)
{
if (calcSliceAddrStart == occupancy.addrStart)
{
// overlapping with zero x/y offset
if (std::find(list_overlappingTextures.begin(), list_overlappingTextures.end(), occupancy.sliceMipInfo->texture) == list_overlappingTextures.end())
{
list_overlappingTextures.push_back(occupancy.sliceMipInfo->texture);
}
}
else
{
// overlapping but not matching directly
// todo - check if they match with a y offset
}
}
}
}
}
// try to merge textures if possible
for (auto& tex : list_overlappingTextures)
{
sint32 relativeMipIndex;
sint32 relativeSliceIndex;
VIEWCOMPATIBILITY viewCompatibility = LatteTexture_CanTextureBeRepresentedAsView(tex, physAddr, width, height, pitch, dimView, format, isDepth, firstMip, numMip, firstSlice, numSlice, relativeMipIndex, relativeSliceIndex);
if (viewCompatibility == VIEW_NOT_COMPATIBLE)
{
allowCreateNewDataTexture = true;
continue;
}
if (viewCompatibility == VIEW_BASE_TOO_SMALL)
{
if (relativeMipIndex != 0 || relativeSliceIndex != 0)
{
// not yet supported
allowCreateNewDataTexture = true;
continue;
}
// new mapping has more slices/mips than known texture -> expand texture
sint32 newDepth = std::max(relativeSliceIndex + firstSlice + numSlice, std::max(depth, tex->depth));
sint32 newMipCount = std::max(relativeMipIndex + firstMip + numMip, tex->mipLevels);
uint32 newPhysMipAddr;
if ((relativeMipIndex + firstMip + numMip) > 1)
{
newPhysMipAddr = physMipAddr;
}
else
{
newPhysMipAddr = tex->physMipAddress;
}
LatteTexture_RecreateTextureWithDifferentMipSliceCount(tex, newPhysMipAddr, newMipCount, newDepth);
return LatteTexture_CreateMapping(physAddr, physMipAddr, width, height, depth, pitch, tileMode, swizzle, firstMip, numMip, firstSlice, numSlice, format, dimBase, dimView, isDepth);
}
else if(viewCompatibility == VIEW_COMPATIBLE)
{
LatteTextureView* view = tex->GetOrCreateView(dimView, format, relativeMipIndex + firstMip, numMip, relativeSliceIndex + firstSlice, numSlice);
if (relativeMipIndex != 0 || relativeSliceIndex != 0)
{
// for accesses to mips/slices using a physAddress offset we manually need to create a new view lookup
// by default views only create a lookup for the base texture physAddress
view->CreateLookupForSubTexture(relativeMipIndex, relativeSliceIndex);
#ifdef CEMU_DEBUG_ASSERT
LatteTextureView* testView = LatteTextureViewLookupCache::lookup(physAddr, width, height, depth, pitch, firstMip, numMip, firstSlice, numSlice, format, dimView);
cemu_assert(testView);
#endif
}
return view;
}
else
{
cemu_assert_debug(false);
}
}
// create new texture
if (allowCreateNewDataTexture == false)
return nullptr;
LatteTextureView* view = LatteTexture_CreateTexture(dimBase, physAddr, physMipAddr, format, width, height, depth, pitch, firstMip + numMip, swizzle, tileMode, isDepth);
LatteTexture* newTexture = view->baseTexture;
LatteTexture_GatherTextureRelations(view->baseTexture);
LatteTexture_UpdateTextureFromDynamicChanges(view->baseTexture);
// delete any individual smaller slices/mips that have become redundant
LatteTexture_DeleteAbsorbedSubtextures(view->baseTexture);
// create view
sint32 relativeMipIndex;
sint32 relativeSliceIndex;
VIEWCOMPATIBILITY viewCompatibility = LatteTexture_CanTextureBeRepresentedAsView(newTexture, physAddr, width, height, pitch, dimView, format, isDepth, firstMip, numMip, firstSlice, numSlice, relativeMipIndex, relativeSliceIndex);
cemu_assert(viewCompatibility == VIEW_COMPATIBLE);
return view->baseTexture->GetOrCreateView(dimView, format, relativeMipIndex + firstMip, numMip, relativeSliceIndex + firstSlice, numSlice);
}
LatteTextureView* LatteTC_LookupTextureByData(MPTR physAddr, sint32 width, sint32 height, sint32 pitch, sint32 firstMip, sint32 numMip, sint32 firstSlice, sint32 numSlice, sint32* searchIndex)
{
cemu_assert_debug(firstMip == 0);
sint32 cSearchIndex = 0;
loopItrMemOccupancyBuckets(physAddr, physAddr+1)
{
auto& bucket = list_texMemOccupancyBucket[bucketIndex];
for (sint32 i = 0; i < bucket.size(); i++)
{
if (bucket[i].addrStart == physAddr)
{
LatteTexture* tex = bucket[i].sliceMipInfo->texture;
if (tex->physAddress == physAddr && tex->pitch == pitch)
{
if (firstSlice >= 0 && firstSlice < (tex->depth))
{
if (cSearchIndex >= *searchIndex)
{
(*searchIndex)++;
return tex->baseView;
}
cSearchIndex++;
}
}
}
}
}
return nullptr;
}
void LatteTC_LookupTexturesByPhysAddr(MPTR physAddr, std::vector<LatteTexture*>& list_textures)
{
sint32 cSearchIndex = 0;
loopItrMemOccupancyBuckets(physAddr, physAddr + 1)
{
for (sint32 i = 0; i < list_texMemOccupancyBucket[bucketIndex].size(); i++)
{
if (list_texMemOccupancyBucket[bucketIndex][i].addrStart == physAddr)
{
LatteTexture* tex = list_texMemOccupancyBucket[bucketIndex][i].sliceMipInfo->texture;
if (tex->physAddress == physAddr)
{
vectorAppendUnique(list_textures, tex);
}
}
}
}
}
LatteTextureView* LatteTC_GetTextureSliceViewOrTryCreate(MPTR srcImagePtr, MPTR srcMipPtr, Latte::E_GX2SURFFMT srcFormat, Latte::E_HWTILEMODE srcTileMode, uint32 srcWidth, uint32 srcHeight, uint32 srcDepth, uint32 srcPitch, uint32 srcSwizzle, uint32 srcSlice, uint32 srcMip, const bool requireExactResolution)
{
LatteTextureView* sourceView;
if(requireExactResolution == false)
sourceView = LatteTextureViewLookupCache::lookupSliceMinSize(srcImagePtr, srcWidth, srcHeight, srcPitch, srcMip, srcSlice, srcFormat);
else
sourceView = LatteTextureViewLookupCache::lookupSlice(srcImagePtr, srcWidth, srcHeight, srcPitch, srcMip, srcSlice, srcFormat);
if (sourceView)
return sourceView;
return LatteTexture_CreateMapping(srcImagePtr, srcMipPtr, srcWidth, srcHeight, srcDepth, srcPitch, srcTileMode, srcSwizzle, srcMip, 1, srcSlice, 1, srcFormat, srcDepth > 1 ? Latte::E_DIM::DIM_2D_ARRAY : Latte::E_DIM::DIM_2D, Latte::E_DIM::DIM_2D, false, false);
}
void LatteTexture_UpdateDataToLatest(LatteTexture* texture)
{
if (LatteTC_HasTextureChanged(texture))
LatteTexture_ReloadData(texture);
if (texture->reloadFromDynamicTextures)
{
LatteTexture_UpdateCacheFromDynamicTextures(texture);
texture->reloadFromDynamicTextures = false;
}
}
LatteTextureSliceMipInfo* LatteTexture::GetSliceMipArrayEntry(sint32 sliceIndex, sint32 mipIndex)
{
return sliceMipInfo + GetSliceMipArrayIndex(sliceIndex, mipIndex);
}
std::vector<LatteTexture*> sAllTextures; // entries can be nullptr
std::vector<size_t> sAllTextureFreeIndices;
void _AddTextureToGlobalList(LatteTexture* tex)
{
if (sAllTextureFreeIndices.empty())
{
tex->globalListIndex = sAllTextures.size();
sAllTextures.emplace_back(tex);
return;
}
size_t index = sAllTextureFreeIndices.back();
sAllTextureFreeIndices.pop_back();
sAllTextures[index] = tex;
tex->globalListIndex = index;
}
void _RemoveTextureFromGlobalList(LatteTexture* tex)
{
cemu_assert_debug(tex->globalListIndex >= 0 && tex->globalListIndex < sAllTextures.size());
cemu_assert_debug(sAllTextures[tex->globalListIndex] == tex);
if (tex->globalListIndex + 1 == sAllTextures.size())
{
// if the index is at the end, make the list smaller instead of freeing the index
sAllTextures.pop_back();
return;
}
sAllTextures[tex->globalListIndex] = nullptr;
sAllTextureFreeIndices.emplace_back(tex->globalListIndex);
}
std::vector<LatteTexture*>& LatteTexture::GetAllTextures()
{
return sAllTextures;
}
bool LatteTexture_GX2FormatHasStencil(bool isDepth, Latte::E_GX2SURFFMT format)
{
if (!isDepth)
return false;
return format == Latte::E_GX2SURFFMT::D24_S8_UNORM ||
format == Latte::E_GX2SURFFMT::D24_S8_FLOAT ||
format == Latte::E_GX2SURFFMT::D32_S8_FLOAT;
}
LatteTexture::LatteTexture(Latte::E_DIM dim, MPTR physAddress, MPTR physMipAddress, Latte::E_GX2SURFFMT format, uint32 width, uint32 height, uint32 depth, uint32 pitch, uint32 mipLevels, uint32 swizzle,
Latte::E_HWTILEMODE tileMode, bool isDepth)
{
_AddTextureToGlobalList(this);
if (depth < 1)
depth = 1;
// setup texture object
this->physAddress = physAddress;
this->dim = dim;
this->format = format;
this->width = width;
this->height = height;
this->depth = depth;
this->swizzle = swizzle;
this->pitch = pitch;
this->mipLevels = mipLevels;
this->tileMode = tileMode;
this->isDepth = isDepth;
this->hasStencil = LatteTexture_GX2FormatHasStencil(isDepth, format);
this->physMipAddress = physMipAddress;
this->lastUpdateEventCounter = LatteTexture_getNextUpdateEventCounter();
this->lastWriteEventCounter = LatteTexture_getNextUpdateEventCounter();
// handle graphic pack overwrite rules
for (const auto& gp : GraphicPack2::GetActiveGraphicPacks())
{
for (const auto& rule : gp->GetTextureRules())
{
if (!rule.filter_settings.format_whitelist.empty() && std::find(rule.filter_settings.format_whitelist.begin(), rule.filter_settings.format_whitelist.end(), (uint32)format) == rule.filter_settings.format_whitelist.end())
continue;
if (!rule.filter_settings.format_blacklist.empty() && std::find(rule.filter_settings.format_blacklist.begin(), rule.filter_settings.format_blacklist.end(), (uint32)format) != rule.filter_settings.format_blacklist.end())
continue;
if (!rule.filter_settings.tilemode_whitelist.empty() && std::find(rule.filter_settings.tilemode_whitelist.begin(), rule.filter_settings.tilemode_whitelist.end(), (int)tileMode) == rule.filter_settings.tilemode_whitelist.end())
continue;
if (!rule.filter_settings.tilemode_blacklist.empty() && std::find(rule.filter_settings.tilemode_blacklist.begin(), rule.filter_settings.tilemode_blacklist.end(), (int)tileMode) != rule.filter_settings.tilemode_blacklist.end())
continue;
if (rule.filter_settings.width != -1 && rule.filter_settings.width != width)
continue;
if (rule.filter_settings.height != -1 && rule.filter_settings.height != height)
continue;
if (rule.filter_settings.depth != -1 && rule.filter_settings.depth != depth)
continue;
if (rule.filter_settings.inMEM1 == GraphicPack2::TextureRule::FILTER_SETTINGS::MEM1_FILTER::OUTSIDE && mmuRange_MEM1.containsAddress(this->physAddress))
continue;
if (rule.filter_settings.inMEM1 == GraphicPack2::TextureRule::FILTER_SETTINGS::MEM1_FILTER::INSIDE && !mmuRange_MEM1.containsAddress(this->physAddress))
continue;
this->overwriteInfo.width = width;
this->overwriteInfo.height = height;
this->overwriteInfo.depth = depth;
if (rule.overwrite_settings.width != -1)
{
this->overwriteInfo.hasResolutionOverwrite = true;
this->overwriteInfo.width = rule.overwrite_settings.width;
}
if (rule.overwrite_settings.height != -1)
{
this->overwriteInfo.hasResolutionOverwrite = true;
this->overwriteInfo.height = rule.overwrite_settings.height;
}
if (rule.overwrite_settings.depth != -1)
{
this->overwriteInfo.hasResolutionOverwrite = true;
this->overwriteInfo.depth = rule.overwrite_settings.depth;
}
if (rule.overwrite_settings.format != -1)
{
this->overwriteInfo.hasFormatOverwrite = true;
this->overwriteInfo.format = rule.overwrite_settings.format;
}
if (rule.overwrite_settings.lod_bias != -1)
{
this->overwriteInfo.hasLodBias = true;
this->overwriteInfo.lodBias = rule.overwrite_settings.lod_bias;
}
if (rule.overwrite_settings.relative_lod_bias != -1)
{
this->overwriteInfo.hasRelativeLodBias = true;
this->overwriteInfo.relativeLodBias = rule.overwrite_settings.relative_lod_bias;
}
if (rule.overwrite_settings.anistropic_value != -1)
{
this->overwriteInfo.anisotropicLevel = rule.overwrite_settings.anistropic_value;
}
}
}
// determine if this texture should ever be mirrored to CPU RAM
if (this->tileMode == Latte::E_HWTILEMODE::TM_LINEAR_ALIGNED)
{
this->enableReadback = true;
}
}
LatteTexture::~LatteTexture()
{
_RemoveTextureFromGlobalList(this);
cemu_assert_debug(baseView == nullptr);
cemu_assert_debug(views.empty());
};
// sync texture data between overlapping textures
void LatteTexture_UpdateCacheFromDynamicTextures(LatteTexture* textureDest)
{
LatteTexture_UpdateTextureFromDynamicChanges(textureDest);
}
void LatteTexture_MarkConnectedTexturesForReloadFromDynamicTextures(LatteTexture* texture)
{
for (auto& it : texture->list_compatibleRelations)
{
if (texture == it->baseTexture)
it->subTexture->reloadFromDynamicTextures = true;
else
it->baseTexture->reloadFromDynamicTextures = true;
}
}
void LatteTexture_TrackTextureGPUWrite(LatteTexture* texture, uint32 slice, uint32 mip, uint64 eventCounter)
{
LatteTexture_MarkDynamicTextureAsChanged(texture->baseView, slice, mip, eventCounter);
LatteTC_ResetTextureChangeTracker(texture);
texture->isUpdatedOnGPU = true;
texture->lastUnflushedRTDrawcallIndex = LatteGPUState.drawCallCounter;
}
| 57,595
|
C++
|
.cpp
| 1,233
| 43.643147
| 366
| 0.765538
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,269
|
LatteStreamoutGPU.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteStreamoutGPU.cpp
|
#include "Cafe/HW/Latte/Core/LatteConst.h"
#include "Cafe/HW/Latte/ISA/RegDefines.h"
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/HW/Latte/Core/LatteDraw.h"
#include "Cafe/HW/Latte/Core/LatteShader.h"
#include "Cafe/GameProfile/GameProfile.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompiler.h"
#include "util/containers/IntervalBucketContainer.h"
#include "Cafe/HW/Latte/Renderer/Renderer.h"
#include "Cafe/HW/Latte/Core/LatteRingBuffer.h"
#include "Cafe/HW/Latte/Core/LatteBufferCache.h"
struct
{
sint32 currentRingbufferOffset;
VirtualBufferHeap_t* mainBufferHeap;
}streamoutManager;
sint32 LatteStreamout_GetRingBufferSize()
{
return 8 * 1024 * 1024; // 8MB
}
sint32 LatteStreamout_allocateGPURingbufferMem(sint32 size)
{
// pad size to 256 byte alignment
size = (size + 255)&~255;
// get next offset
if ((streamoutManager.currentRingbufferOffset + size) > LatteStreamout_GetRingBufferSize())
{
streamoutManager.currentRingbufferOffset = 0;
}
sint32 allocOffset = streamoutManager.currentRingbufferOffset;
streamoutManager.currentRingbufferOffset += size;
return allocOffset;
}
void LatteStreamout_InitCache()
{
streamoutManager.currentRingbufferOffset = 0;
streamoutManager.mainBufferHeap = nullptr;
}
bool _transformFeedbackIsActive = false;
struct
{
uint32 vertexCount;
uint32 instanceCount;
uint32 streamoutWriteMask;
struct
{
bool isActive;
sint32 ringBufferOffset;
uint32 rangeAddr;
uint32 rangeSize; // size of written streamout data, bounded by buffer size
}streamoutBufferWrite[LATTE_NUM_STREAMOUT_BUFFER];
}activeStreamoutOperation;
uint32 LatteStreamout_getNumberOfWrittenVertices()
{
// todo: Currently we only handle GX2_POINTS
return activeStreamoutOperation.vertexCount * activeStreamoutOperation.instanceCount;
}
// returns the number of bytes that are written into the buffer by the current draw operation (ignoring buffer maximum size)
uint32 LatteStreamout_getBufferWriteRangeSize(uint32 streamoutBufferIndex)
{
uint32 bufferStride = LatteGPUState.contextRegister[mmVGT_STRMOUT_VTX_STRIDE_0 + streamoutBufferIndex * 4] << 2;
uint32 bufferSize = LatteGPUState.contextRegister[mmVGT_STRMOUT_BUFFER_SIZE_0 + streamoutBufferIndex * 4] << 2;
uint32 writeSize = LatteStreamout_getNumberOfWrittenVertices() * bufferStride;
if (bufferSize < writeSize)
writeSize = bufferSize;
return writeSize;
}
void LatteStreamout_PrepareDrawcall(uint32 count, uint32 instanceCount)
{
if (LatteGPUState.contextRegister[mmVGT_STRMOUT_EN] == 0)
{
_transformFeedbackIsActive = false;
return; // streamout inactive
}
// get active vertex shader
LatteDecompilerShader* vertexShader = LatteSHRC_GetActiveVertexShader();
// if a geometry shader is used calculate how many vertices it outputs
LatteDecompilerShader* geometryShader = LatteSHRC_GetActiveGeometryShader();
sint32 maxVerticesInGS = 1;
if (geometryShader)
{
uint32 gsOutPrimType = LatteGPUState.contextRegister[mmVGT_GS_OUT_PRIM_TYPE];
uint32 bytesPerVertex = LatteGPUState.contextRegister[mmSQ_GS_VERT_ITEMSIZE] * 4;
maxVerticesInGS = ((LatteGPUState.contextRegister[mmSQ_GSVS_RING_ITEMSIZE] & 0x7FFF) * 4) / bytesPerVertex;
cemu_assert_debug(maxVerticesInGS > 0);
}
// setup active streamout operation struct
activeStreamoutOperation.vertexCount = count * maxVerticesInGS;
activeStreamoutOperation.instanceCount = instanceCount;
// get mask of all written streamout buffers
uint32 streamoutWriteMask = 0;
if (geometryShader)
{
#ifdef CEMU_DEBUG_ASSERT
cemu_assert_debug(vertexShader->streamoutBufferWriteMask.any() == false);
#endif
for (sint32 i = 0; i < LATTE_NUM_STREAMOUT_BUFFER; i++)
if (geometryShader->streamoutBufferWriteMask[i])
streamoutWriteMask |= (1 << i);
}
else
{
for (sint32 i = 0; i < LATTE_NUM_STREAMOUT_BUFFER; i++)
if (vertexShader->streamoutBufferWriteMask[i])
streamoutWriteMask |= (1 << i);
}
activeStreamoutOperation.streamoutWriteMask = streamoutWriteMask;
// bind streamout buffers
for (uint32 i = 0; i < LATTE_NUM_STREAMOUT_BUFFER; i++)
{
if ((streamoutWriteMask&(1 << i)) == 0)
{
activeStreamoutOperation.streamoutBufferWrite[i].isActive = false;
continue;
}
uint32 bufferBaseMPTR = LatteGPUState.contextRegister[mmVGT_STRMOUT_BUFFER_BASE_0 + i * 4] << 8;
uint32 bufferSize = LatteGPUState.contextRegister[mmVGT_STRMOUT_BUFFER_SIZE_0 + i * 4] << 2;
uint32 bufferOffset = LatteGPUState.contextRegister[mmVGT_STRMOUT_BUFFER_OFFSET_0 + i * 4];
uint32 streamoutWriteSize = LatteStreamout_getBufferWriteRangeSize(i);
uint32 rangeAddr = bufferBaseMPTR + bufferOffset;
sint32 ringBufferOffset = LatteStreamout_allocateGPURingbufferMem(streamoutWriteSize); // allocate memory for the entire streamout write
// calculate write size after bounding it to the buffer
uint32 remainingBytesToWrite = bufferOffset > bufferSize ? 0 : (bufferSize - bufferOffset);
uint32 rangeSize = std::min(streamoutWriteSize, remainingBytesToWrite);
activeStreamoutOperation.streamoutBufferWrite[i].isActive = true;
activeStreamoutOperation.streamoutBufferWrite[i].ringBufferOffset = ringBufferOffset;
activeStreamoutOperation.streamoutBufferWrite[i].rangeAddr = rangeAddr;
activeStreamoutOperation.streamoutBufferWrite[i].rangeSize = rangeSize;
g_renderer->streamout_setupXfbBuffer(i, ringBufferOffset, rangeAddr, rangeSize);
}
g_renderer->streamout_begin();
_transformFeedbackIsActive = true;
}
void LatteStreamout_FinishDrawcall(bool useDirectMemoryMode)
{
if (_transformFeedbackIsActive)
{
_transformFeedbackIsActive = false;
for (uint32 i = 0; i < LATTE_NUM_STREAMOUT_BUFFER; i++)
{
if ((activeStreamoutOperation.streamoutWriteMask&(1 << i)) == 0)
continue;
if (activeStreamoutOperation.streamoutBufferWrite[i].rangeSize > 0)
{
if(useDirectMemoryMode)
g_renderer->bufferCache_copyStreamoutToMainBuffer(activeStreamoutOperation.streamoutBufferWrite[i].ringBufferOffset, activeStreamoutOperation.streamoutBufferWrite[i].rangeAddr, activeStreamoutOperation.streamoutBufferWrite[i].rangeSize);
else
LatteBufferCache_copyStreamoutDataToCache(activeStreamoutOperation.streamoutBufferWrite[i].rangeAddr, activeStreamoutOperation.streamoutBufferWrite[i].rangeSize, activeStreamoutOperation.streamoutBufferWrite[i].ringBufferOffset);
}
// advance streamout offset
uint32 newOffset = LatteGPUState.contextRegister[mmVGT_STRMOUT_BUFFER_OFFSET_0 + i * 4] + activeStreamoutOperation.streamoutBufferWrite[i].rangeSize;
LatteGPUState.contextRegister[mmVGT_STRMOUT_BUFFER_OFFSET_0 + i * 4] = newOffset;
}
g_renderer->streamout_rendererFinishDrawcall();
}
}
| 6,629
|
C++
|
.cpp
| 156
| 40.160256
| 242
| 0.804984
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,270
|
LatteAsyncCommands.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteAsyncCommands.cpp
|
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/HW/Latte/Core/LatteAsyncCommands.h"
#include "Cafe/HW/Latte/Core/LatteShader.h"
#include "Cafe/HW/Latte/Core/LatteTexture.h"
void LatteThread_Exit();
SlimRWLock swl_gpuAsyncCommands;
typedef struct
{
uint32 type;
union
{
struct
{
MPTR physAddr;
MPTR mipAddr;
uint32 swizzle;
sint32 format;
sint32 width;
sint32 height;
sint32 depth;
uint32 pitch;
uint32 slice;
sint32 dim;
Latte::E_HWTILEMODE tilemode;
sint32 aa;
sint32 level;
}forceTextureReadback;
struct
{
uint64 shaderBaseHash;
uint64 shaderAuxHash;
LatteConst::ShaderType shaderType;
}deleteShader;
};
}LatteAsyncCommand_t;
#define ASYNC_CMD_FORCE_TEXTURE_READBACK 1
#define ASYNC_CMD_DELETE_SHADER 2
std::queue<LatteAsyncCommand_t> LatteAsyncCommandQueue;
void LatteAsyncCommands_queueForceTextureReadback(MPTR physAddr, MPTR mipAddr, uint32 swizzle, sint32 format, sint32 width, sint32 height, sint32 depth, uint32 pitch, uint32 slice, sint32 dim, Latte::E_HWTILEMODE tilemode, sint32 aa, sint32 level)
{
LatteAsyncCommand_t asyncCommand = {};
// setup command
asyncCommand.type = ASYNC_CMD_FORCE_TEXTURE_READBACK;
asyncCommand.forceTextureReadback.physAddr = physAddr;
asyncCommand.forceTextureReadback.mipAddr = mipAddr;
asyncCommand.forceTextureReadback.swizzle = swizzle;
asyncCommand.forceTextureReadback.format = format;
asyncCommand.forceTextureReadback.width = width;
asyncCommand.forceTextureReadback.height = height;
asyncCommand.forceTextureReadback.depth = depth;
asyncCommand.forceTextureReadback.pitch = pitch;
asyncCommand.forceTextureReadback.slice = slice;
asyncCommand.forceTextureReadback.dim = dim;
asyncCommand.forceTextureReadback.tilemode = tilemode;
asyncCommand.forceTextureReadback.aa = aa;
asyncCommand.forceTextureReadback.level = level;
swl_gpuAsyncCommands.LockWrite();
LatteAsyncCommandQueue.push(asyncCommand);
swl_gpuAsyncCommands.UnlockWrite();
}
void LatteAsyncCommands_queueDeleteShader(uint64 shaderBaseHash, uint64 shaderAuxHash, LatteConst::ShaderType shaderType)
{
LatteAsyncCommand_t asyncCommand = {};
// setup command
asyncCommand.type = ASYNC_CMD_DELETE_SHADER;
asyncCommand.deleteShader.shaderBaseHash = shaderBaseHash;
asyncCommand.deleteShader.shaderAuxHash = shaderAuxHash;
asyncCommand.deleteShader.shaderType = shaderType;
swl_gpuAsyncCommands.LockWrite();
LatteAsyncCommandQueue.push(asyncCommand);
swl_gpuAsyncCommands.UnlockWrite();
}
void LatteAsyncCommands_waitUntilAllProcessed()
{
while (LatteAsyncCommandQueue.empty() == false)
{
_mm_pause();
}
}
/*
* Called by the GPU command processor frequently
*/
void LatteAsyncCommands_checkAndExecute()
{
// quick check if queue is empty (requires no lock)
if (Latte_GetStopSignal())
LatteThread_Exit();
if (LatteAsyncCommandQueue.empty())
return;
swl_gpuAsyncCommands.LockWrite();
while (LatteAsyncCommandQueue.empty() == false)
{
// get first command in queue
LatteAsyncCommand_t asyncCommand = LatteAsyncCommandQueue.front();
swl_gpuAsyncCommands.UnlockWrite();
if (asyncCommand.type == ASYNC_CMD_FORCE_TEXTURE_READBACK)
{
cemu_assert_debug(asyncCommand.forceTextureReadback.level == 0); // implement mip swizzle and verify
LatteTextureView* textureView = LatteTC_GetTextureSliceViewOrTryCreate(asyncCommand.forceTextureReadback.physAddr, asyncCommand.forceTextureReadback.mipAddr, (Latte::E_GX2SURFFMT)asyncCommand.forceTextureReadback.format, asyncCommand.forceTextureReadback.tilemode, asyncCommand.forceTextureReadback.width, asyncCommand.forceTextureReadback.height, asyncCommand.forceTextureReadback.depth, asyncCommand.forceTextureReadback.pitch, 0, asyncCommand.forceTextureReadback.slice, asyncCommand.forceTextureReadback.level);
if (textureView != nullptr)
{
LatteTexture_UpdateDataToLatest(textureView->baseTexture);
// start transfer
LatteTextureReadback_StartTransfer(textureView);
// wait until finished
LatteTextureReadback_UpdateFinishedTransfers(true);
}
else
{
cemuLog_logDebug(LogType::Force, "Texture not found for readback");
}
}
else if (asyncCommand.type == ASYNC_CMD_DELETE_SHADER)
{
LatteSHRC_RemoveFromCacheByHash(asyncCommand.deleteShader.shaderBaseHash, asyncCommand.deleteShader.shaderAuxHash, asyncCommand.deleteShader.shaderType);
}
else
{
cemu_assert_unimplemented();
}
swl_gpuAsyncCommands.LockWrite();
LatteAsyncCommandQueue.pop();
}
swl_gpuAsyncCommands.UnlockWrite();
}
| 4,536
|
C++
|
.cpp
| 125
| 33.6
| 518
| 0.809416
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,271
|
LatteTextureView.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteTextureView.cpp
|
#include "Cafe/HW/Latte/Core/LatteTexture.h"
#include "Cafe/HW/Latte/Core/LatteTextureView.h"
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/GraphicPack/GraphicPack2.h"
LatteTextureView::LatteTextureView(LatteTexture* texture, sint32 firstMip, sint32 mipCount, sint32 firstSlice, sint32 sliceCount, Latte::E_DIM dim, Latte::E_GX2SURFFMT format, bool registerView)
{
this->baseTexture = texture;
this->firstMip = firstMip;
this->numMip = mipCount;
this->firstSlice = firstSlice;
this->numSlice = sliceCount;
this->dim = dim;
this->format = format;
if (registerView)
{
texture->views.emplace_back(this);
LatteTextureViewLookupCache::Add(this);
}
}
LatteTextureView::~LatteTextureView()
{
// unregister view
LatteTextureViewLookupCache::RemoveAll(this);
// remove from texture
vectorRemoveByValue(baseTexture->views, this);
if (baseTexture->baseView == this)
baseTexture->baseView = nullptr;
// delete all associated FBOs
while (!list_associatedFbo.empty())
LatteMRT::DeleteCachedFBO(list_associatedFbo[0]);
}
void LatteTextureView::CreateLookupForSubTexture(uint32 mipStart, uint32 sliceStart)
{
cemu_assert_debug(mipStart != 0 || sliceStart != 0); // This function should never be called with both parameters zero. Every view creates a base lookup on construction
LatteTextureViewLookupCache::Add(this, mipStart, sliceStart);
}
/* View lookup cache */
struct LatteTexViewLookupDesc
{
LatteTexViewLookupDesc(LatteTextureView* view) : view(view)
{
this->physAddr = view->baseTexture->physAddress;
this->physMipAddr = view->baseTexture->physMipAddress;
this->width = view->baseTexture->width;
this->height = view->baseTexture->height;
this->pitch = view->baseTexture->pitch;
this->firstMip = view->firstMip;
this->numMip = view->numMip;
this->firstSlice = view->firstSlice;
this->numSlice = view->numSlice;
this->format = view->format;
this->dim = view->dim;
this->isDepth = view->baseTexture->isDepth;
}
void SetParametersForSubTexture(sint32 baseMip, sint32 baseSlice)
{
cemu_assert_debug(baseMip >= 0);
cemu_assert_debug(baseSlice >= 0);
LatteTextureSliceMipInfo* sliceMipInfo = view->baseTexture->GetSliceMipArrayEntry(baseSlice, baseMip);
physAddr = sliceMipInfo->addrStart;
pitch = sliceMipInfo->pitch;
cemu_assert_debug(format == view->baseTexture->format); // if the format is different then width/height calculation might differ. This only affects the case where an integer format is mapped onto a compressed format or vice versa.
width = view->baseTexture->GetMipWidth(baseMip);
height = view->baseTexture->GetMipHeight(baseMip);
// adjust firstMip and firstSlice to be relative to base of subtexture
cemu_assert(firstMip >= baseMip);
cemu_assert(firstSlice >= baseSlice);
firstMip -= baseMip;
firstSlice -= baseSlice;
}
// key data for looking up views
MPTR physAddr;
MPTR physMipAddr;
sint32 width;
sint32 height;
sint32 pitch;
sint32 firstMip;
sint32 numMip;
sint32 firstSlice;
sint32 numSlice;
Latte::E_GX2SURFFMT format;
Latte::E_DIM dim;
bool isDepth;
// associated view
LatteTextureView* view;
};
struct LatteTexViewBucket
{
std::vector<LatteTexViewLookupDesc> list;
};
#define TEXTURE_VIEW_BUCKETS (1061)
inline uint32 _getViewBucketKey(MPTR physAddress, uint32 width, uint32 height, uint32 pitch)
{
return (physAddress + width * 7 + height * 11 + pitch * 13) % TEXTURE_VIEW_BUCKETS;
}
inline uint32 _getViewBucketKeyNoRes(MPTR physAddress, uint32 pitch)
{
return (physAddress + pitch * 13) % TEXTURE_VIEW_BUCKETS;
}
LatteTexViewBucket texViewBucket[TEXTURE_VIEW_BUCKETS] = { };
LatteTexViewBucket texViewBucket_nores[TEXTURE_VIEW_BUCKETS] = { };
void LatteTextureViewLookupCache::Add(LatteTextureView* view, uint32 baseMip, uint32 baseSlice)
{
LatteTexViewLookupDesc desc(view);
if (baseMip != 0 || baseSlice != 0)
desc.SetParametersForSubTexture(baseMip, baseSlice);
// generic bucket
uint32 key = _getViewBucketKey(desc.physAddr, desc.width, desc.height, desc.pitch);
texViewBucket[key].list.emplace_back(desc);
vectorAppendUnique(view->viewLookUpCacheKeys, key);
// resolution-independent bucket
key = _getViewBucketKeyNoRes(desc.physAddr, desc.pitch);
texViewBucket_nores[key].list.push_back(desc);
vectorAppendUnique(view->viewLookUpCacheKeysNoRes, key);
}
void LatteTextureViewLookupCache::RemoveAll(LatteTextureView* view)
{
for (auto& key : view->viewLookUpCacheKeys)
{
auto& bucket = texViewBucket[key].list;
bucket.erase(std::remove_if(bucket.begin(), bucket.end(), [view](const LatteTexViewLookupDesc& v) {
return v.view == view; }), bucket.end());
}
for (auto& key : view->viewLookUpCacheKeysNoRes)
{
auto& bucket = texViewBucket_nores[key].list;
bucket.erase(std::remove_if(bucket.begin(), bucket.end(), [view](const LatteTexViewLookupDesc& v) {
return v.view == view; }), bucket.end());
}
}
LatteTextureView* LatteTextureViewLookupCache::lookup(MPTR physAddr, sint32 width, sint32 height, sint32 depth, sint32 pitch, sint32 firstMip, sint32 numMip, sint32 firstSlice, sint32 numSlice, Latte::E_GX2SURFFMT format, Latte::E_DIM dim)
{
// todo - add tileMode param to this and the other lookup functions?
uint32 key = _getViewBucketKey(physAddr, width, height, pitch);
key %= TEXTURE_VIEW_BUCKETS;
for (auto& it : texViewBucket[key].list)
{
if (it.format == format && it.dim == dim &&
it.width == width && it.height == height && it.pitch == pitch && it.physAddr == physAddr
&& it.firstMip == firstMip && it.numMip == numMip
&& it.firstSlice == firstSlice && it.numSlice == numSlice
)
{
return it.view;
}
}
return nullptr;
}
LatteTextureView* LatteTextureViewLookupCache::lookupWithColorOrDepthType(MPTR physAddr, sint32 width, sint32 height, sint32 depth, sint32 pitch, sint32 firstMip, sint32 numMip, sint32 firstSlice, sint32 numSlice, Latte::E_GX2SURFFMT format, Latte::E_DIM dim, bool isDepth)
{
cemu_assert_debug(firstSlice == 0);
uint32 key = _getViewBucketKey(physAddr, width, height, pitch);
key %= TEXTURE_VIEW_BUCKETS;
for (auto& it : texViewBucket[key].list)
{
if (it.format == format && it.dim == dim && it.width == width && it.height == height && it.pitch == pitch && it.physAddr == physAddr
&& it.firstMip == firstMip && it.numMip == numMip
&& it.firstSlice == firstSlice && it.numSlice == numSlice &&
it.isDepth == isDepth
)
{
return it.view;
}
}
return nullptr;
}
// look up view with unspecified mipCount and sliceCount
LatteTextureView* LatteTextureViewLookupCache::lookupSlice(MPTR physAddr, sint32 width, sint32 height, sint32 pitch, sint32 firstMip, sint32 firstSlice, Latte::E_GX2SURFFMT format)
{
uint32 key = _getViewBucketKey(physAddr, width, height, pitch);
key %= TEXTURE_VIEW_BUCKETS;
for (auto& it : texViewBucket[key].list)
{
if (it.width == width && it.height == height && it.pitch == pitch && it.physAddr == physAddr && it.format == format)
{
if (firstSlice == it.firstSlice && firstMip == it.firstMip)
return it.view;
}
}
return nullptr;
}
// look up view with unspecified mipCount/sliceCount and only minimum width and height given
LatteTextureView* LatteTextureViewLookupCache::lookupSliceMinSize(MPTR physAddr, sint32 minWidth, sint32 minHeight, sint32 pitch, sint32 firstMip, sint32 firstSlice, Latte::E_GX2SURFFMT format)
{
uint32 key = _getViewBucketKeyNoRes(physAddr, pitch);
key %= TEXTURE_VIEW_BUCKETS;
for (auto& it : texViewBucket_nores[key].list)
{
if (it.width >= minWidth && it.height >= minHeight && it.pitch == pitch && it.physAddr == physAddr && it.format == format)
{
if (firstSlice == it.firstSlice && firstMip == it.firstMip)
return it.view;
}
}
return nullptr;
}
// similar to lookupSlice but also compares isDepth
LatteTextureView* LatteTextureViewLookupCache::lookupSliceEx(MPTR physAddr, sint32 width, sint32 height, sint32 pitch, sint32 firstMip, sint32 firstSlice, Latte::E_GX2SURFFMT format, bool isDepth)
{
cemu_assert_debug(firstMip == 0);
uint32 key = _getViewBucketKey(physAddr, width, height, pitch);
key %= TEXTURE_VIEW_BUCKETS;
for (auto& it : texViewBucket[key].list)
{
if (it.width == width && it.height == height && it.pitch == pitch && it.physAddr == physAddr && it.format == format && it.isDepth == isDepth)
{
if (firstSlice == it.firstSlice && firstMip == it.firstMip)
return it.view;
}
}
return nullptr;
}
std::unordered_set<LatteTextureView*> LatteTextureViewLookupCache::GetAllViews()
{
std::unordered_set<LatteTextureView*> viewSet;
for (uint32 i = 0; i < TEXTURE_VIEW_BUCKETS; i++)
{
for (auto& it : texViewBucket[i].list)
viewSet.emplace(it.view);
}
return viewSet;
}
| 8,655
|
C++
|
.cpp
| 222
| 36.743243
| 273
| 0.751604
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,272
|
LatteBufferData.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteBufferData.cpp
|
#include "Cafe/HW/Latte/ISA/RegDefines.h"
#include "Cafe/HW/Latte/Renderer/Renderer.h"
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/HW/Latte/Core/LatteDraw.h"
#include "Cafe/HW/Latte/Core/LatteShader.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompiler.h"
#include "Cafe/HW/Latte/Core/FetchShader.h"
#include "Cafe/HW/Latte/Core/LattePerformanceMonitor.h"
#include "Cafe/GameProfile/GameProfile.h"
#include "Cafe/HW/Latte/Core/LatteBufferCache.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanRenderer.h"
template<int vectorLen>
void rectGenerate4thVertex(uint32be* output, uint32be* input0, uint32be* input1, uint32be* input2)
{
float* v = (float*)output;
for (sint32 i = 0; i < vectorLen; i++)
output[vectorLen * 0 + i] = _swapEndianU32(input0[i]);
for (sint32 i = 0; i < vectorLen; i++)
output[vectorLen * 1 + i] = _swapEndianU32(input1[i]);
for (sint32 i = 0; i < vectorLen; i++)
output[vectorLen * 2 + i] = _swapEndianU32(input2[i]);
float minX = std::min(v[vectorLen * 0 + 0], std::min(v[vectorLen * 1 + 0], v[vectorLen * 2 + 0]));
float maxX = std::max(v[vectorLen * 0 + 0], std::max(v[vectorLen * 1 + 0], v[vectorLen * 2 + 0]));
float minY = std::min(v[vectorLen * 0 + 1], std::min(v[vectorLen * 1 + 1], v[vectorLen * 2 + 1]));;
float maxY = std::max(v[vectorLen * 0 + 1], std::max(v[vectorLen * 1 + 1], v[vectorLen * 2 + 1]));;
float totalX = minX;
totalX += maxY;
float halfX = totalX / 2.0f;
float totalY = minY;
totalY += maxY;
float halfY = totalY / 2.0f;
sint32 countX =
((v[vectorLen * 0 + 0] < halfX) ? 1 : 0) +
((v[vectorLen * 1 + 0] < halfX) ? 1 : 0) +
((v[vectorLen * 2 + 0] < halfX) ? 1 : 0);
sint32 countY =
((v[vectorLen * 0 + 1] < halfY) ? 1 : 0) +
((v[vectorLen * 1 + 1] < halfY) ? 1 : 0) +
((v[vectorLen * 2 + 1] < halfY) ? 1 : 0);
if (countX < 2)
v[vectorLen * 3 + 0] = minX;
else
v[vectorLen * 3 + 0] = maxX;
if (countY < 2)
v[vectorLen * 3 + 1] = minY;
else
v[vectorLen * 3 + 1] = maxY;
if (vectorLen >= 3)
v[vectorLen * 3 + 2] = v[vectorLen * 0 + 2]; // z from v0
if (vectorLen >= 4)
v[vectorLen * 3 + 3] = v[vectorLen * 0 + 3]; // w from v0
// order of rectangle vertices is
// v0 v1
// v2 v3
for (sint32 f = 0; f < vectorLen*4; f++)
output[f] = _swapEndianU32(output[f]);
}
#define ATTRIBUTE_CACHE_RING_SIZE (128) // up to 128 entries can be cached
void LatteBufferCache_LoadRemappedUniforms(LatteDecompilerShader* shader, float* uniformData)
{
uint32 shaderAluConst;
uint32 shaderUniformRegisterOffset;
switch (shader->shaderType)
{
case LatteConst::ShaderType::Vertex:
shaderAluConst = 0x400;
shaderUniformRegisterOffset = mmSQ_VTX_UNIFORM_BLOCK_START;
break;
case LatteConst::ShaderType::Pixel:
shaderAluConst = 0;
shaderUniformRegisterOffset = mmSQ_PS_UNIFORM_BLOCK_START;
break;
case LatteConst::ShaderType::Geometry:
shaderAluConst = 0; // geometry shader has no ALU const
shaderUniformRegisterOffset = mmSQ_GS_UNIFORM_BLOCK_START;
break;
default:
cemu_assert_debug(false);
}
// sourced from uniform registers
uint32* aluConstBase = LatteGPUState.contextRegister + mmSQ_ALU_CONSTANT0_0 + shaderAluConst;
for (auto it : shader->list_remappedUniformEntries_register)
{
uint64* uniformRegData = (uint64*)(aluConstBase + it.indexOffset / 4);
uint64* regDest = (uint64*)((uint8*)uniformData + it.mappedIndexOffset);
regDest[0] = uniformRegData[0];
regDest[1] = uniformRegData[1];
}
// sourced from uniform buffers
for (auto& bufferGroup : shader->list_remappedUniformEntries_bufferGroups)
{
MPTR physicalAddr = LatteGPUState.contextRegister[shaderUniformRegisterOffset + bufferGroup.kcacheBankIdOffset / 4];
if (physicalAddr)
{
uint8* uniformBase = memory_base + physicalAddr;
for (auto& it : bufferGroup.entries)
{
uint64* regDest = (uint64*)((uint8*)uniformData + it.mappedIndexOffset);
uint64* uniformEntrySrc = (uint64*)(uniformBase + it.indexOffset);
memcpy(regDest, uniformEntrySrc, 16);
}
}
else
{
for (auto& it : bufferGroup.entries)
{
uint64* regDest = (uint64*)((uint8*)uniformData + it.mappedIndexOffset);
regDest[0] = 0;
regDest[1] = 0;
}
}
}
}
void LatteBufferCache_syncGPUUniformBuffers(LatteDecompilerShader* shader, const uint32 uniformBufferRegOffset, LatteConst::ShaderType shaderType)
{
if (shader->uniformMode == LATTE_DECOMPILER_UNIFORM_MODE_FULL_CBANK)
{
for(const auto& buf : shader->list_quickBufferList)
{
sint32 i = buf.index;
MPTR physicalAddr = LatteGPUState.contextRegister[uniformBufferRegOffset + i * 7 + 0];
uint32 uniformSize = LatteGPUState.contextRegister[uniformBufferRegOffset + i * 7 + 1] + 1;
if (physicalAddr == MPTR_NULL) [[unlikely]]
{
g_renderer->buffer_bindUniformBuffer(shaderType, i, 0, 0);
continue;
}
uniformSize = std::min<uint32>(uniformSize, buf.size);
uint32 bindOffset = LatteBufferCache_retrieveDataInCache(physicalAddr, uniformSize);
g_renderer->buffer_bindUniformBuffer(shaderType, i, bindOffset, uniformSize);
}
}
}
// upload vertex and uniform buffers
bool LatteBufferCache_Sync(uint32 minIndex, uint32 maxIndex, uint32 baseInstance, uint32 instanceCount)
{
static uint32 s_syncBufferCounter = 0;
s_syncBufferCounter++;
if (s_syncBufferCounter >= 30)
{
LatteBufferCache_incrementalCleanup();
s_syncBufferCounter = 0;
}
LatteBufferCache_processDCFlushQueue();
// process queued deallocations from previous drawcall
LatteBufferCache_processDeallocations();
// sync and bind vertex buffers
LatteFetchShader* parsedFetchShader = LatteSHRC_GetActiveFetchShader();
if (!parsedFetchShader)
return false;
for (auto& bufferGroup : parsedFetchShader->bufferGroups)
{
uint32 bufferIndex = bufferGroup.attributeBufferIndex;
uint32 bufferBaseRegisterIndex = mmSQ_VTX_ATTRIBUTE_BLOCK_START + bufferIndex * 7;
MPTR bufferAddress = LatteGPUState.contextRegister[bufferBaseRegisterIndex + 0];
uint32 bufferSize = LatteGPUState.contextRegister[bufferBaseRegisterIndex + 1] + 1;
uint32 bufferStride = (LatteGPUState.contextRegister[bufferBaseRegisterIndex + 2] >> 11) & 0xFFFF;
if (bufferAddress == MPTR_NULL)
{
g_renderer->buffer_bindVertexBuffer(bufferIndex, 0, 0);
continue;
}
// dont rely on buffer size given by game
uint32 fixedBufferSize = 0;
if (bufferGroup.hasVtxIndexAccess)
fixedBufferSize = bufferStride * (maxIndex + 1) + bufferGroup.maxOffset;
if (bufferGroup.hasInstanceIndexAccess)
{
uint32 fixedBufferSizeInstance = bufferStride * ((baseInstance + instanceCount) + 1) + bufferGroup.maxOffset;
fixedBufferSize = std::max(fixedBufferSize, fixedBufferSizeInstance);
}
if (fixedBufferSize == 0 || bufferStride == 0)
fixedBufferSize += 128;
#if BOOST_OS_MACOS
if(bufferStride % 4 != 0)
{
if (VulkanRenderer* vkRenderer = VulkanRenderer::GetInstance())
{
auto fixedBuffer = vkRenderer->buffer_genStrideWorkaroundVertexBuffer(bufferAddress, fixedBufferSize, bufferStride);
vkRenderer->buffer_bindVertexStrideWorkaroundBuffer(fixedBuffer.first, fixedBuffer.second, bufferIndex, fixedBufferSize);
continue;
}
}
#endif
uint32 bindOffset = LatteBufferCache_retrieveDataInCache(bufferAddress, fixedBufferSize);
g_renderer->buffer_bindVertexBuffer(bufferIndex, bindOffset, fixedBufferSize);
}
// sync uniform buffers
LatteDecompilerShader* vertexShader = LatteSHRC_GetActiveVertexShader();
if (vertexShader)
LatteBufferCache_syncGPUUniformBuffers(vertexShader, mmSQ_VTX_UNIFORM_BLOCK_START, LatteConst::ShaderType::Vertex);
LatteDecompilerShader* geometryShader = LatteSHRC_GetActiveGeometryShader();
if (geometryShader)
LatteBufferCache_syncGPUUniformBuffers(geometryShader, mmSQ_GS_UNIFORM_BLOCK_START, LatteConst::ShaderType::Geometry);
LatteDecompilerShader* pixelShader = LatteSHRC_GetActivePixelShader();
if (pixelShader)
LatteBufferCache_syncGPUUniformBuffers(pixelShader, mmSQ_PS_UNIFORM_BLOCK_START, LatteConst::ShaderType::Pixel);
return true;
}
| 7,999
|
C++
|
.cpp
| 199
| 37.457286
| 146
| 0.74045
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,273
|
LatteDefaultShaders.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteDefaultShaders.cpp
|
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/HW/Latte/Core/LatteDraw.h"
#include "Cafe/HW/Latte/Core/LatteShader.h"
#include "Cafe/HW/Latte/Core/LatteDefaultShaders.h"
#include "util/helpers/StringBuf.h"
LatteDefaultShader_t* _copyShader_depthToColor;
LatteDefaultShader_t* _copyShader_colorToDepth;
void LatteDefaultShader_pixelCopyShader_generateVSBody(StringBuf* vs)
{
vs->add("#version 420\r\n");
vs->add("out vec2 passUV;\r\n");
vs->add("uniform vec4 uf_vertexOffsets[4];\r\n");
vs->add("\r\n");
vs->add("void main(){\r\n");
vs->add("int vID = gl_VertexID;\r\n");
vs->add("passUV = uf_vertexOffsets[vID].zw;\r\n");
vs->add("gl_Position = vec4(uf_vertexOffsets[vID].xy, 0.0, 1.0);\r\n");
vs->add("}\r\n");
}
GLuint gxShaderDepr_compileRaw(StringBuf* strSourceVS, StringBuf* strSourceFS);
GLuint gxShaderDepr_compileRaw(const std::string& vertex_source, const std::string& fragment_source);
LatteDefaultShader_t* LatteDefaultShader_getPixelCopyShader_depthToColor()
{
if (_copyShader_depthToColor != 0)
return _copyShader_depthToColor;
catchOpenGLError();
LatteDefaultShader_t* defaultShader = (LatteDefaultShader_t*)malloc(sizeof(LatteDefaultShader_t));
memset(defaultShader, 0, sizeof(LatteDefaultShader_t));
StringBuf fCStr_vertexShader(1024 * 16);
LatteDefaultShader_pixelCopyShader_generateVSBody(&fCStr_vertexShader);
StringBuf fCStr_defaultFragShader(1024 * 16);
fCStr_defaultFragShader.add("#version 420\r\n");
fCStr_defaultFragShader.add("in vec2 passUV;\r\n");
fCStr_defaultFragShader.add("uniform sampler2D textureSrc;\r\n");
fCStr_defaultFragShader.add("layout(location = 0) out vec4 colorOut0;\r\n");
fCStr_defaultFragShader.add("\r\n");
fCStr_defaultFragShader.add("void main(){\r\n");
fCStr_defaultFragShader.add("colorOut0 = vec4(texture(textureSrc, passUV).r,0.0,0.0,1.0);\r\n");
fCStr_defaultFragShader.add("}\r\n");
defaultShader->glProgamId = gxShaderDepr_compileRaw(&fCStr_vertexShader, &fCStr_defaultFragShader);
catchOpenGLError();
defaultShader->copyShaderUniforms.uniformLoc_textureSrc = glGetUniformLocation(defaultShader->glProgamId, "textureSrc");
defaultShader->copyShaderUniforms.uniformLoc_vertexOffsets = glGetUniformLocation(defaultShader->glProgamId, "uf_vertexOffsets");
_copyShader_depthToColor = defaultShader;
catchOpenGLError();
return defaultShader;
}
LatteDefaultShader_t* LatteDefaultShader_getPixelCopyShader_colorToDepth()
{
if (_copyShader_colorToDepth != 0)
return _copyShader_colorToDepth;
catchOpenGLError();
LatteDefaultShader_t* defaultShader = (LatteDefaultShader_t*)malloc(sizeof(LatteDefaultShader_t));
memset(defaultShader, 0, sizeof(LatteDefaultShader_t));
StringBuf fCStr_vertexShader(1024 * 16);
LatteDefaultShader_pixelCopyShader_generateVSBody(&fCStr_vertexShader);
StringBuf fCStr_defaultFragShader(1024 * 16);
fCStr_defaultFragShader.add("#version 420\r\n");
fCStr_defaultFragShader.add("in vec2 passUV;\r\n");
fCStr_defaultFragShader.add("uniform sampler2D textureSrc;\r\n");
fCStr_defaultFragShader.add("layout(location = 0) out vec4 colorOut0;\r\n");
fCStr_defaultFragShader.add("\r\n");
fCStr_defaultFragShader.add("void main(){\r\n");
fCStr_defaultFragShader.add("gl_FragDepth = texture(textureSrc, passUV).r;\r\n");
fCStr_defaultFragShader.add("}\r\n");
defaultShader->glProgamId = gxShaderDepr_compileRaw(&fCStr_vertexShader, &fCStr_defaultFragShader);
defaultShader->copyShaderUniforms.uniformLoc_textureSrc = glGetUniformLocation(defaultShader->glProgamId, "textureSrc");
defaultShader->copyShaderUniforms.uniformLoc_vertexOffsets = glGetUniformLocation(defaultShader->glProgamId, "uf_vertexOffsets");
_copyShader_colorToDepth = defaultShader;
catchOpenGLError();
return defaultShader;
}
| 3,739
|
C++
|
.cpp
| 72
| 49.958333
| 130
| 0.791404
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,274
|
LatteSoftware.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteSoftware.cpp
|
#include "Cafe/HW/Latte/ISA/RegDefines.h"
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/HW/Latte/Core/LatteConst.h"
#include "Cafe/HW/Latte/Core/LatteShaderAssembly.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompilerInstructions.h"
#include "Cafe/HW/Latte/Core/FetchShader.h"
#include "Cafe/HW/Latte/Core/LattePM4.h"
#define GPU7_ENDIAN_8IN32 2
typedef struct
{
union
{
float f[4];
uint32 u32[4];
sint32 s32[4];
};
}LatteReg_t;
#define REG_AR (128)
typedef struct
{
LatteReg_t reg[128+1];
union
{
uint32 u32[5];
sint32 s32[5];
float f[5];
}pvps;
union
{
uint32 u32[5];
sint32 s32[5];
float f[5];
}pvpsUpdate;
void* cfilePtr;
LatteReg_t* literalPtr;
// cbank
LatteReg_t* cbank0;
LatteReg_t* cbank1;
// vertex shader exports
LatteReg_t export_pos;
uint32* shaderBase;
sint32 shaderSize;
// shaders
LatteFetchShader* fetchShader;
}LatteSoftwareExecContext_t;
LatteSoftwareExecContext_t LatteSWCtx;
char _tempStringBuf[2048];
sint32 tempStringIndex = 0;
char* getTempString()
{
tempStringIndex = (tempStringIndex + 1) % 10;
return _tempStringBuf + tempStringIndex * (sizeof(_tempStringBuf) / 10);
}
const char* _getSrcName(uint32 srcSel, uint32 srcChan)
{
if (GPU7_ALU_SRC_IS_GPR(srcSel))
{
char* tempStr = getTempString();
sprintf(tempStr, "R%d", srcSel & 0x7F);
if (srcChan == 0)
strcat(tempStr, ".x");
else if (srcChan == 1)
strcat(tempStr, ".y");
else if (srcChan == 2)
strcat(tempStr, ".z");
else
strcat(tempStr, ".w");
return tempStr;
}
else if (GPU7_ALU_SRC_IS_CFILE(srcSel))
{
return "CFILE";
}
else if (GPU7_ALU_SRC_IS_PV(srcSel))
{
return "PV";
}
else if (GPU7_ALU_SRC_IS_PS(srcSel))
{
return "PS";
}
else if (GPU7_ALU_SRC_IS_CBANK0(srcSel))
{
return "CBANK0";
}
else if (GPU7_ALU_SRC_IS_CBANK1(srcSel))
{
return "CBANK1";
}
else if (GPU7_ALU_SRC_IS_CONST_0F(srcSel))
{
return "0.0";
}
else if (GPU7_ALU_SRC_IS_CONST_1F(srcSel))
{
return "1.0";
}
else if (GPU7_ALU_SRC_IS_CONST_0_5F(srcSel))
{
return "0.5";
}
else if (GPU7_ALU_SRC_IS_LITERAL(srcSel))
{
return "LITERAL";
}
else
{
cemu_assert_unimplemented();
}
return "UKN";
}
sint32 _getSrc_genericS32(uint32 srcSel, uint32 srcChan, uint32 srcRel, uint32 indexMode)
{
sint32 v = 0;
if (GPU7_ALU_SRC_IS_GPR(srcSel))
{
cemu_assert_debug(srcRel == 0);
v = LatteSWCtx.reg[GPU7_ALU_SRC_GET_GPR_INDEX(srcSel)].s32[srcChan];
}
else if (GPU7_ALU_SRC_IS_CFILE(srcSel))
{
if (srcRel)
{
if (indexMode <= GPU7_INDEX_AR_W)
{
v = ((sint32*)LatteSWCtx.cfilePtr)[LatteSWCtx.reg[REG_AR].s32[indexMode] * 4 + GPU7_ALU_SRC_GET_CFILE_INDEX(srcSel) * 4 + srcChan];
}
else
cemu_assert_debug(false);
}
else
{
v = ((sint32*)LatteSWCtx.cfilePtr)[GPU7_ALU_SRC_GET_CFILE_INDEX(srcSel) * 4 + srcChan];
}
}
else if (GPU7_ALU_SRC_IS_PV(srcSel))
{
cemu_assert_debug(srcRel == 0);
v = LatteSWCtx.pvps.s32[srcChan];
}
else if (GPU7_ALU_SRC_IS_PS(srcSel))
{
cemu_assert_debug(srcRel == 0);
v = LatteSWCtx.pvps.s32[4];
}
else if (GPU7_ALU_SRC_IS_CBANK0(srcSel))
{
if (srcRel)
{
if (indexMode <= GPU7_INDEX_AR_W)
{
v = LatteSWCtx.cbank0[LatteSWCtx.reg[REG_AR].s32[indexMode] + GPU7_ALU_SRC_GET_CBANK0_INDEX(srcSel)].s32[srcChan];
}
else
assert_dbg();
}
else
{
v = LatteSWCtx.cbank0[GPU7_ALU_SRC_GET_CBANK0_INDEX(srcSel)].s32[srcChan];
}
}
else if (GPU7_ALU_SRC_IS_CBANK1(srcSel))
{
if (srcRel)
{
if (indexMode <= GPU7_INDEX_AR_W)
{
v = LatteSWCtx.cbank1[LatteSWCtx.reg[REG_AR].s32[indexMode] + GPU7_ALU_SRC_GET_CBANK1_INDEX(srcSel)].s32[srcChan];
}
else
assert_dbg();
}
else
{
v = LatteSWCtx.cbank1[GPU7_ALU_SRC_GET_CBANK1_INDEX(srcSel)].s32[srcChan];
}
}
else if (GPU7_ALU_SRC_IS_CONST_0F(srcSel))
{
cemu_assert_debug(srcRel == 0);
v = 0; // 0.0f
}
else if (GPU7_ALU_SRC_IS_CONST_1F(srcSel))
{
cemu_assert_debug(srcRel == 0);
v = 0x3f800000; // 1.0f
}
else if (GPU7_ALU_SRC_IS_CONST_0_5F(srcSel))
{
cemu_assert_debug(srcRel == 0);
v = 0x3f000000; // 0.5f
}
else if (GPU7_ALU_SRC_IS_LITERAL(srcSel))
{
v = LatteSWCtx.literalPtr->s32[srcChan];
}
else
assert_dbg();
return v;
}
sint32 _getSrc_s32(uint32 srcSel, uint32 srcChan, uint32 srcNeg, uint32 srcAbs, uint32 srcRel, uint32 indexMode)
{
sint32 v = _getSrc_genericS32(srcSel, srcChan, srcRel, indexMode);
cemu_assert_debug(srcNeg == 0);
cemu_assert_debug(srcAbs == 0);
return v;
}
float _getSrc_f(uint32 srcSel, uint32 srcChan, uint32 srcNeg, uint32 srcAbs, uint32 srcRel, uint32 indexMode)
{
float v = 0;
*(sint32*)&v = _getSrc_genericS32(srcSel, srcChan, srcRel, indexMode);
if (srcAbs) // todo - how does this interact with srcNeg
v = fabs(v);
if (srcNeg)
v = -v;
return v;
}
#define _updateGPR_S32(__gprIdx,__channel,__v) {gprUpdate[updateQueueLength].gprIndex = __gprIdx; gprUpdate[updateQueueLength].channel = __channel; gprUpdate[updateQueueLength].s32 = __v; updateQueueLength++;}
#define _updateGPR_F(__gprIdx,__channel,__v) {gprUpdate[updateQueueLength].gprIndex = __gprIdx; gprUpdate[updateQueueLength].channel = __channel; gprUpdate[updateQueueLength].f = __v; updateQueueLength++;}
#define _updatePVPS_S32(__pvIndex, __v) {LatteSWCtx.pvpsUpdate.s32[__pvIndex] = __v;}
#define _updatePVPS_F(__pvIndex, __v) {LatteSWCtx.pvpsUpdate.f[__pvIndex] = __v;}
float LatteSoftware_omod(uint32 omod, float f)
{
switch (omod)
{
case ALU_OMOD_NONE:
return f;
case ALU_OMOD_MUL2:
return f * 2.0f;
case ALU_OMOD_MUL4:
return f * 4.0f;
case ALU_OMOD_DIV2:
return f / 2.0f;
}
cemu_assert_debug(false);
return 0.0f;
}
#ifdef CEMU_DEBUG_ASSERT
#define _clamp(__v) if(destClamp != 0) cemu_assert_unimplemented()
#else
#define _clamp(__v) // todo
#endif
#define _omod(__v) __v = LatteSoftware_omod(omod, __v)
bool LatteDecompiler_IsALUTransInstruction(bool isOP3, uint32 opcode);
void LatteSoftware_setupCBankPointers(uint32 cBank0Index, uint32 cBank1Index, uint32 cBank0AddrBase, uint32 cBank1AddrBase)
{
MPTR cBank0Ptr = LatteGPUState.contextRegister[mmSQ_VTX_UNIFORM_BLOCK_START + cBank0Index * 7 + 0];
MPTR cBank1Ptr = LatteGPUState.contextRegister[mmSQ_VTX_UNIFORM_BLOCK_START + cBank1Index * 7 + 0];
LatteSWCtx.cbank0 = (LatteReg_t*)memory_getPointerFromPhysicalOffset(cBank0Ptr + cBank0AddrBase);
LatteSWCtx.cbank1 = (LatteReg_t*)memory_getPointerFromPhysicalOffset(cBank1Ptr + cBank1AddrBase);
}
void LatteSoftware_executeALUClause(uint32 cfType, uint32 addr, uint32 count, uint32 cBank0Index, uint32 cBank1Index, uint32 cBank0AddrBase, uint32 cBank1AddrBase)
{
cemu_assert_debug(cfType == GPU7_CF_INST_ALU); // todo - handle other ALU clauses
uint32* aluWordPtr = LatteSWCtx.shaderBase + addr * 2;
uint32* aluWordPtrEnd = aluWordPtr + count * 2;
LatteSoftware_setupCBankPointers(cBank0Index, cBank1Index, cBank0AddrBase, cBank1AddrBase);
struct
{
sint16 gprIndex;
sint16 channel;
union
{
float f;
uint32 u32;
sint32 s32;
};
}gprUpdate[16];
sint32 updateQueueLength = 0;
uint32 aluUnitWriteMask = 0;
uint8 literalAccessMask = 0;
while (aluWordPtr < aluWordPtrEnd)
{
// calculate number of instructions in group
sint32 groupInstructionCount;
for (sint32 i = 0; i < 6; i++)
{
if (aluWordPtr[i * 2] & 0x80000000)
{
groupInstructionCount = i + 1;
break;
}
cemu_assert_debug(i < 5);
}
LatteSWCtx.literalPtr = (LatteReg_t*)(aluWordPtr + groupInstructionCount*2);
// process group
bool hasReductionInstruction = false;
float reductionResult = 0.0f;
for (sint32 s = 0; s < groupInstructionCount; s++)
{
uint32 aluWord0 = aluWordPtr[0];
uint32 aluWord1 = aluWordPtr[1];
aluWordPtr += 2;
uint32 alu_inst13_5 = (aluWord1 >> 13) & 0x1F;
// parameters from ALU word 0 (shared for ALU OP2 and OP3)
uint32 src0Sel = (aluWord0 >> 0) & 0x1FF; // source selection
uint32 src1Sel = (aluWord0 >> 13) & 0x1FF;
uint32 src0Rel = (aluWord0 >> 9) & 0x1; // relative addressing mode
uint32 src1Rel = (aluWord0 >> 22) & 0x1;
uint32 src0Chan = (aluWord0 >> 10) & 0x3; // component selection x/y/z/w
uint32 src1Chan = (aluWord0 >> 23) & 0x3;
uint32 src0Neg = (aluWord0 >> 12) & 0x1; // negate input
uint32 src1Neg = (aluWord0 >> 25) & 0x1;
uint32 indexMode = (aluWord0 >> 26) & 7;
uint32 predSel = (aluWord0 >> 29) & 3;
if (GPU7_ALU_SRC_IS_LITERAL(src0Sel))
literalAccessMask |= (1 << src0Chan);
if (GPU7_ALU_SRC_IS_LITERAL(src1Sel))
literalAccessMask |= (1 << src1Chan);
if (alu_inst13_5 >= 0x8)
{
// op3
// parameters from ALU word 1
uint32 src2Sel = (aluWord1 >> 0) & 0x1FF; // source selection
uint32 src2Rel = (aluWord1 >> 9) & 0x1; // relative addressing mode
uint32 src2Chan = (aluWord1 >> 10) & 0x3; // component selection x/y/z/w
uint32 src2Neg = (aluWord1 >> 12) & 0x1; // negate input
if (GPU7_ALU_SRC_IS_LITERAL(src2Sel))
literalAccessMask |= (1 << src2Chan);
uint32 destGpr = (aluWord1 >> 21) & 0x7F;
uint32 destRel = (aluWord1 >> 28) & 1;
uint32 destElem = (aluWord1 >> 29) & 3;
uint32 destClamp = (aluWord1 >> 31) & 1;
uint32 aluUnit = destElem;
if (aluUnitWriteMask&(1 << destElem))
{
aluUnit = 4; // PV
}
else
aluUnitWriteMask |= (1 << destElem);
switch (alu_inst13_5)
{
case ALU_OP3_INST_CMOVE:
{
float f = _getSrc_f(src0Sel, src0Chan, src0Neg, 0, src0Rel, indexMode);
sint32 result;
if (f == 0.0f)
result = _getSrc_s32(src1Sel, src1Chan, src1Neg, 0, src1Rel, indexMode);
else
result = _getSrc_s32(src2Sel, src2Chan, src2Neg, 0, src2Rel, indexMode);
cemu_assert_debug(destClamp == 0);
_updateGPR_S32(destGpr, destElem, result);
_updatePVPS_S32(aluUnit, result);
break;
}
case ALU_OP3_INST_CMOVGT:
{
float f = _getSrc_f(src0Sel, src0Chan, src0Neg, 0, src0Rel, indexMode);
sint32 result;
if (f > 0.0f)
result = _getSrc_s32(src1Sel, src1Chan, src1Neg, 0, src1Rel, indexMode);
else
result = _getSrc_s32(src2Sel, src2Chan, src2Neg, 0, src2Rel, indexMode);
cemu_assert_debug(destClamp == 0);
_updateGPR_S32(destGpr, destElem, result);
_updatePVPS_S32(aluUnit, result);
break;
}
case ALU_OP3_INST_MULADD:
{
float f0 = _getSrc_f(src0Sel, src0Chan, src0Neg, 0, src0Rel, indexMode);
float f1 = _getSrc_f(src1Sel, src1Chan, src1Neg, 0, src1Rel, indexMode);
float f2 = _getSrc_f(src2Sel, src2Chan, src2Neg, 0, src2Rel, indexMode);
float f = f0*f1+f2;
_updateGPR_F(destGpr, destElem, f);
_updatePVPS_F(aluUnit, f);
break;
}
default:
cemu_assert_debug(false);
}
}
else
{
uint32 alu_inst7_11 = (aluWord1 >> 7) & 0x7FF;
uint32 src0Abs = (aluWord1 >> 0) & 1;
uint32 src1Abs = (aluWord1 >> 1) & 1;
uint32 updateExecuteMask = (aluWord1 >> 2) & 1;
uint32 updatePredicate = (aluWord1 >> 3) & 1;
uint32 writeMask = (aluWord1 >> 4) & 1;
uint32 omod = (aluWord1 >> 5) & 3;
uint32 destGpr = (aluWord1 >> 21) & 0x7F;
uint32 destRel = (aluWord1 >> 28) & 1;
uint32 destElem = (aluWord1 >> 29) & 3;
uint32 destClamp = (aluWord1 >> 31) & 1;
uint32 aluUnit = destElem;
if (LatteDecompiler_IsALUTransInstruction(false, alu_inst7_11))
aluUnit = 4;
if (aluUnitWriteMask&(1 << destElem))
{
aluUnit = 4; // PV
}
else
aluUnitWriteMask |= (1 << destElem);
switch (alu_inst7_11)
{
case ALU_OP2_INST_NOP:
{
break;
}
case ALU_OP2_INST_MOV:
{
if (src0Neg || src0Abs || omod != 0)
{
float v = _getSrc_f(src0Sel, src0Chan, src0Neg, src0Abs, src0Rel, indexMode);
_omod(v);
_clamp(f);
if (writeMask)
_updateGPR_F(destGpr, destElem, v);
_updatePVPS_F(aluUnit, v);
}
else
{
sint32 v = _getSrc_s32(src0Sel, src0Chan, src0Neg, src0Abs, src0Rel, indexMode);
// todo - omod/clamp for float moves
if (writeMask)
_updateGPR_S32(destGpr, destElem, v);
_updatePVPS_S32(aluUnit, v);
}
break;
}
case ALU_OP2_INST_ADD:
{
float f0 = _getSrc_f(src0Sel, src0Chan, src0Neg, src0Abs, src0Rel, indexMode);
float f1 = _getSrc_f(src1Sel, src1Chan, src1Neg, src1Abs, src1Rel, indexMode);
float f = f0 + f1;
_omod(f);
_clamp(f);
if (writeMask)
_updateGPR_F(destGpr, destElem, f);
_updatePVPS_F(aluUnit, f);
break;
}
case ALU_OP2_INST_MUL:
case ALU_OP2_INST_MUL_IEEE:
{
float f0 = _getSrc_f(src0Sel, src0Chan, src0Neg, src0Abs, src0Rel, indexMode);
float f1 = _getSrc_f(src1Sel, src1Chan, src1Neg, src1Abs, src1Rel, indexMode);
float f = f0 * f1;
if (f0 == 0.0f || f1 == 0.0f)
f = 0.0f;
_omod(f);
_clamp(f);
if (writeMask)
_updateGPR_F(destGpr, destElem, f);
_updatePVPS_F(aluUnit, f);
break;
}
case ALU_OP2_INST_TRUNC:
{
float f0 = _getSrc_f(src0Sel, src0Chan, src0Neg, src0Abs, src0Rel, indexMode);
float f = truncf(f0);
_omod(f);
_clamp(f);
if (writeMask)
_updateGPR_F(destGpr, destElem, f);
_updatePVPS_F(aluUnit, f);
break;
}
case ALU_OP2_INST_RECIP_IEEE:
{
float f0 = _getSrc_f(src0Sel, src0Chan, src0Neg, src0Abs, src0Rel, indexMode);
float f = 1.0f / f0;
_omod(f);
_clamp(f);
if (writeMask)
_updateGPR_F(destGpr, destElem, f);
_updatePVPS_F(aluUnit, f);
break;
}
case ALU_OP2_INST_SETGT:
{
float f0 = _getSrc_f(src0Sel, src0Chan, src0Neg, src0Abs, src0Rel, indexMode);
float f1 = _getSrc_f(src1Sel, src1Chan, src1Neg, src1Abs, src1Rel, indexMode);
float f = (f0 > f1) ? 1.0f : 0.0f;
_omod(f);
_clamp(f);
if (writeMask)
_updateGPR_F(destGpr, destElem, f);
_updatePVPS_F(aluUnit, f);
break;
}
case ALU_OP2_INST_SETGE:
{
float f0 = _getSrc_f(src0Sel, src0Chan, src0Neg, src0Abs, src0Rel, indexMode);
float f1 = _getSrc_f(src1Sel, src1Chan, src1Neg, src1Abs, src1Rel, indexMode);
float f = (f0 >= f1) ? 1.0f : 0.0f;
_omod(f);
_clamp(f);
if (writeMask)
_updateGPR_F(destGpr, destElem, f);
_updatePVPS_F(aluUnit, f);
break;
}
case ALU_OP2_INST_INT_TO_FLOAT:
{
sint32 v = _getSrc_s32(src0Sel, src0Chan, src0Neg, src0Abs, src0Rel, indexMode);
float f = (float)v;
_omod(f);
_clamp(f);
if (writeMask)
_updateGPR_F(destGpr, destElem, f);
_updatePVPS_F(aluUnit, f);
break;
}
case ALU_OP2_INST_FLT_TO_INT:
{
float v = _getSrc_f(src0Sel, src0Chan, src0Neg, src0Abs, src0Rel, indexMode);
sint32 f = (sint32)v;
if (writeMask)
_updateGPR_S32(destGpr, destElem, f);
_updatePVPS_S32(aluUnit, f);
break;
}
case ALU_OP2_INST_MOVA_FLOOR:
{
float f = _getSrc_f(src0Sel, src0Chan, src0Neg, src0Abs, src0Rel, indexMode);
f = floor(f);
f = std::min(std::max(f, -256.0f), 255.0f);
// omod, clamp?
_updateGPR_S32(REG_AR, destElem, (sint32)f);
if (writeMask)
_updateGPR_F(destGpr, destElem, f);
_updatePVPS_F(aluUnit, f);
break;
}
case ALU_OP2_INST_DOT4:
{
float f0 = _getSrc_f(src0Sel, src0Chan, src0Neg, src0Abs, src0Rel, indexMode);
float f1 = _getSrc_f(src1Sel, src1Chan, src1Neg, src1Abs, src1Rel, indexMode);
float f = f0 * f1;
reductionResult += f;
_omod(f);
_clamp(f);
if (writeMask)
{
_updateGPR_F(destGpr, destElem, f);
}
_updatePVPS_F(aluUnit, f);
hasReductionInstruction = true;
break;
}
default:
cemu_assert_debug(false);
}
}
}
// apply updates
if (hasReductionInstruction == false)
{
for (sint32 i = 0; i < updateQueueLength; i++)
{
LatteSWCtx.reg[gprUpdate[i].gprIndex].s32[gprUpdate[i].channel] = gprUpdate[i].s32;
}
LatteSWCtx.pvps.s32[0] = LatteSWCtx.pvpsUpdate.s32[0];
LatteSWCtx.pvps.s32[1] = LatteSWCtx.pvpsUpdate.s32[1];
LatteSWCtx.pvps.s32[2] = LatteSWCtx.pvpsUpdate.s32[2];
LatteSWCtx.pvps.s32[3] = LatteSWCtx.pvpsUpdate.s32[3];
LatteSWCtx.pvps.s32[4] = LatteSWCtx.pvpsUpdate.s32[4];
}
else
{
for (sint32 i = 0; i < updateQueueLength; i++)
{
LatteSWCtx.reg[gprUpdate[i].gprIndex].f[gprUpdate[i].channel] = reductionResult;
}
LatteSWCtx.pvps.f[0] = reductionResult;
LatteSWCtx.pvps.f[1] = reductionResult;
LatteSWCtx.pvps.f[2] = reductionResult;
LatteSWCtx.pvps.f[3] = reductionResult;
}
updateQueueLength = 0;
// skip literals
if (literalAccessMask&(3 << 0))
aluWordPtr += 2;
if (literalAccessMask&(3 << 2))
{
cemu_assert_debug((literalAccessMask &3) != 0);
aluWordPtr += 2;
}
// reset group state tracking variables
aluUnitWriteMask = 0;
literalAccessMask = 0;
}
}
sint32 _getRegValueByCompSel(uint32 gprIndex, uint32 compSel)
{
if (compSel < 4)
return LatteSWCtx.reg[gprIndex].s32[compSel];
cemu_assert_unimplemented();
return 0;
}
void LatteSoftware_singleRun()
{
uint32* cfWords = LatteSWCtx.shaderBase;
sint32 instructionIndex = 0;
while (true)
{
uint32 cfWord0 = cfWords[instructionIndex + 0];
uint32 cfWord1 = cfWords[instructionIndex + 1];
instructionIndex += 2;
uint32 cf_inst23_7 = (cfWord1 >> 23) & 0x7F;
if (cf_inst23_7 < 0x40) // starting at 0x40 the bits overlap with the ALU instruction encoding
{
bool isEndOfProgram = ((cfWord1 >> 21) & 1) != 0;
uint32 addr = cfWord0 & 0xFFFFFFFF;
uint32 count = (cfWord1 >> 10) & 7;
if (((cfWord1 >> 19) & 1) != 0)
count |= 0x8;
count++;
if (cf_inst23_7 == GPU7_CF_INST_CALL_FS)
{
}
else if (cf_inst23_7 == GPU7_CF_INST_NOP)
{
// nop
if (((cfWord1 >> 0) & 7) != 0)
cemu_assert_debug(false); // pop count is not zero
}
else if (cf_inst23_7 == GPU7_CF_INST_EXPORT || cf_inst23_7 == GPU7_CF_INST_EXPORT_DONE)
{
// export
uint32 edType = (cfWord0 >> 13) & 0x3;
uint32 edIndexGpr = (cfWord0 >> 23) & 0x7F;
uint32 edRWRel = (cfWord0 >> 22) & 1;
if (edRWRel != 0 || edIndexGpr != 0)
cemu_assert_debug(false);
uint8 exportComponentSel[4];
exportComponentSel[0] = (cfWord1 >> 0) & 0x7;
exportComponentSel[1] = (cfWord1 >> 3) & 0x7;
exportComponentSel[2] = (cfWord1 >> 6) & 0x7;
exportComponentSel[3] = (cfWord1 >> 9) & 0x7;
uint32 exportArrayBase = (cfWord0 >> 0) & 0x1FFF;
uint32 exportBurstCount = (cfWord1 >> 17) & 0xF;
uint32 exportSourceGPR = (cfWord0 >> 15) & 0x7F;
uint32 memWriteElemSize = (cfWord0>>29)&3; // unused
cemu_assert_debug(exportBurstCount == 0);
if (edType == 1 && exportArrayBase == GPU7_DECOMPILER_CF_EXPORT_BASE_POSITION)
{
LatteSWCtx.export_pos.s32[0] = _getRegValueByCompSel(exportSourceGPR, exportComponentSel[0]);
LatteSWCtx.export_pos.s32[1] = _getRegValueByCompSel(exportSourceGPR, exportComponentSel[1]);
LatteSWCtx.export_pos.s32[2] = _getRegValueByCompSel(exportSourceGPR, exportComponentSel[2]);
LatteSWCtx.export_pos.s32[3] = _getRegValueByCompSel(exportSourceGPR, exportComponentSel[3]);
}
else
{
// unhandled export
cemu_assert_unimplemented();
}
}
else if (cf_inst23_7 == GPU7_CF_INST_TEX)
{
cemu_assert_unimplemented();
}
else if (cf_inst23_7 == GPU7_CF_INST_ELSE ||
cf_inst23_7 == GPU7_CF_INST_POP)
{
cemu_assert_unimplemented();
}
else if (cf_inst23_7 == GPU7_CF_INST_JUMP)
{
cemu_assert_unimplemented();
}
else if (cf_inst23_7 == GPU7_CF_INST_LOOP_START_DX10 || cf_inst23_7 == GPU7_CF_INST_LOOP_END)
{
cemu_assert_unimplemented();
}
else if (cf_inst23_7 == GPU7_CF_INST_LOOP_BREAK)
{
cemu_assert_unimplemented();
}
else if (cf_inst23_7 == GPU7_CF_INST_MEM_STREAM0_WRITE ||
cf_inst23_7 == GPU7_CF_INST_MEM_STREAM1_WRITE)
{
cemu_assert_unimplemented();
}
else if (cf_inst23_7 == GPU7_CF_INST_MEM_RING_WRITE)
{
cemu_assert_unimplemented();
}
else if (cf_inst23_7 == GPU7_CF_INST_EMIT_VERTEX)
{
cemu_assert_unimplemented();
}
else
{
cemu_assert_unimplemented();
}
if (isEndOfProgram)
{
return;
}
}
else
{
// ALU instruction
uint32 cf_inst26_4 = ((cfWord1 >> 26) & 0xF) | GPU7_CF_INST_ALU_MASK;
if (cf_inst26_4 == GPU7_CF_INST_ALU || cf_inst26_4 == GPU7_CF_INST_ALU_PUSH_BEFORE || cf_inst26_4 == GPU7_CF_INST_ALU_POP_AFTER || cf_inst26_4 == GPU7_CF_INST_ALU_POP2_AFTER || cf_inst26_4 == GPU7_CF_INST_ALU_BREAK || cf_inst26_4 == GPU7_CF_INST_ALU_ELSE_AFTER)
{
uint32 addr = (cfWord0 >> 0) & 0x3FFFFF;
uint32 count = ((cfWord1 >> 18) & 0x7F) + 1;
uint32 cBank0Index = (cfWord0 >> 22) & 0xF;
uint32 cBank1Index = (cfWord0 >> 26) & 0xF;
uint32 cBank0AddrBase = ((cfWord1 >> 2) & 0xFF) * 16;
uint32 cBank1AddrBase = ((cfWord1 >> 10) & 0xFF) * 16;
LatteSoftware_executeALUClause(cf_inst26_4, addr, count, cBank0Index, cBank1Index, cBank0AddrBase, cBank1AddrBase);
}
else
{
cemu_assert_unimplemented();
}
}
}
cemu_assert_debug(false);
}
template<int endianMode>
uint32 _readVtxU32(void* ptr)
{
uint32 v = *(uint32*)ptr;
if constexpr (endianMode == GPU7_ENDIAN_8IN32)
v = _swapEndianU32(v);
return v;
}
template<int endianMode, int nfa>
void _readAttr_FLOAT_32_32(void* ptr, LatteReg_t& output)
{
output.s32[0] = _readVtxU32<endianMode>((uint32*)ptr);
output.s32[1] = _readVtxU32<endianMode>((uint32*)ptr + 1);
output.s32[2] = 0;
output.s32[3] = 0;
}
template<int endianMode, int nfa>
void _readAttr_FLOAT_32_32_32(void* ptr, LatteReg_t& output)
{
output.s32[0] = _readVtxU32<endianMode>((uint32*)ptr);
output.s32[1] = _readVtxU32<endianMode>((uint32*)ptr + 1);
output.s32[2] = _readVtxU32<endianMode>((uint32*)ptr + 2);
output.s32[3] = 0;
}
template<int endianMode, int nfa>
void _readAttr_FLOAT_32_32_32_32(void* ptr, LatteReg_t& output)
{
output.s32[0] = _readVtxU32<endianMode>((uint32*)ptr);
output.s32[1] = _readVtxU32<endianMode>((uint32*)ptr + 1);
output.s32[2] = _readVtxU32<endianMode>((uint32*)ptr + 2);
output.s32[3] = _readVtxU32<endianMode>((uint32*)ptr + 3);
}
#define _fmtKey(__fmt, __endianSwap, __nfa, __isSigned) ((__endianSwap)|((__nfa)<<2)|((__isSigned)<<4)|((__fmt)<<5))
void LatteSoftware_loadVertexAttributes(sint32 index)
{
LatteSWCtx.reg[0].s32[0] = index;
for (auto& bufferGroup : LatteSWCtx.fetchShader->bufferGroups)
{
for (sint32 f = 0; f < bufferGroup.attribCount; f++)
{
auto attrib = bufferGroup.attrib + f;
// calculate element index
sint32 elementIndex = index;
// todo - handle instance index and attr divisor
// get buffer
uint32 bufferIndex = attrib->attributeBufferIndex;
if (bufferIndex >= 0x10)
{
continue;
}
uint32 bufferBaseRegisterIndex = mmSQ_VTX_ATTRIBUTE_BLOCK_START + bufferIndex * 7;
MPTR bufferAddress = LatteGPUState.contextRegister[bufferBaseRegisterIndex + 0];
uint32 bufferSize = LatteGPUState.contextRegister[bufferBaseRegisterIndex + 1] + 1;
uint32 bufferStride = (LatteGPUState.contextRegister[bufferBaseRegisterIndex + 2] >> 11) & 0xFFFF;
if (bufferAddress == MPTR_NULL)
{
debug_printf("Warning: Attribute uses NULL buffer during software emulation\n");
continue;
}
// translate semanticId to gpr index
uint32 gprIndex = 0xFFFFFFFF;
for (sint32 f = 0; f < 32; f++)
{
if (LatteGPUState.contextRegister[mmSQ_VTX_SEMANTIC_0 + f] == attrib->semanticId)
{
gprIndex = f;
break;
}
}
if (gprIndex == 0xFFFFFFFF)
continue; // attribute is not mapped to VS input
gprIndex = gprIndex + 1;
sint32 formatKey = _fmtKey((sint32)attrib->format, (sint32)attrib->endianSwap, (sint32)attrib->nfa, (sint32)attrib->isSigned);
void* inputData = memory_getPointerFromPhysicalOffset(bufferAddress + elementIndex * bufferStride);
LatteReg_t attrData;
switch (formatKey)
{
case _fmtKey(FMT_32_32_FLOAT, GPU7_ENDIAN_8IN32, LATTE_NFA_2, LATTE_VTX_UNSIGNED):
_readAttr_FLOAT_32_32<GPU7_ENDIAN_8IN32, LATTE_NFA_2>(inputData, attrData);
break;
case _fmtKey(FMT_32_32_32_FLOAT, GPU7_ENDIAN_8IN32, LATTE_NFA_2, LATTE_VTX_UNSIGNED):
_readAttr_FLOAT_32_32_32<GPU7_ENDIAN_8IN32, LATTE_NFA_2>(inputData, attrData);
break;
case _fmtKey(FMT_32_32_32_32_FLOAT, GPU7_ENDIAN_8IN32, LATTE_NFA_2, LATTE_VTX_UNSIGNED):
_readAttr_FLOAT_32_32_32_32<GPU7_ENDIAN_8IN32, LATTE_NFA_2>(inputData, attrData);
break;
default:
cemu_assert_debug(false);
}
LatteReg_t* gprOutput = LatteSWCtx.reg+gprIndex;
for (uint32 f = 0; f < 4; f++)
{
if (attrib->ds[f] < 4)
gprOutput->s32[f] = attrData.s32[f];
else if (attrib->ds[f] == 4)
gprOutput->s32[f] = 0;
else if (attrib->ds[f] == 5)
gprOutput->f[f] = 1.0;
else
cemu_assert_debug(false);
}
}
}
}
float* LatteSoftware_getPositionExport()
{
return LatteSWCtx.export_pos.f;
}
void LatteSoftware_executeVertex(sint32 index)
{
LatteSoftware_loadVertexAttributes(index);
LatteSoftware_singleRun();
}
void LatteSoftware_setupVertexShader(LatteFetchShader* fetchShader, void* shaderPtr, sint32 size)
{
LatteSWCtx.fetchShader = fetchShader;
LatteSWCtx.shaderBase = (uint32*)shaderPtr;
LatteSWCtx.shaderSize = size;
LatteSWCtx.cfilePtr = (void*)(LatteGPUState.contextRegister + LATTE_REG_BASE_ALU_CONST + 0x400);
}
| 25,423
|
C++
|
.cpp
| 835
| 26.594012
| 264
| 0.666354
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,275
|
LatteOverlay.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteOverlay.cpp
|
#include "Cafe/HW/Latte/Core/LatteOverlay.h"
#include "Cafe/HW/Latte/Core/LattePerformanceMonitor.h"
#include "gui/guiWrapper.h"
#include "config/CemuConfig.h"
#include "Cafe/HW/Latte/Renderer/Renderer.h"
#include "config/ActiveSettings.h"
#include <imgui.h>
#include "resource/IconsFontAwesome5.h"
#include "imgui/imgui_extension.h"
#include "input/InputManager.h"
#include "util/SystemInfo/SystemInfo.h"
#include <cinttypes>
struct OverlayStats
{
OverlayStats() {};
int processor_count = 1;
ProcessorTime processor_time_cemu;
std::vector<ProcessorTime> processor_times;
double fps{};
uint32 draw_calls_per_frame{};
uint32 fast_draw_calls_per_frame{};
float cpu_usage{}; // cemu cpu usage in %
std::vector<float> cpu_per_core; // global cpu usage in % per core
uint32 ram_usage{}; // ram usage in MB
int vramUsage{}, vramTotal{}; // vram usage in mb
} g_state{};
extern std::atomic_int g_compiled_shaders_total;
extern std::atomic_int g_compiled_shaders_async;
std::atomic_int g_compiling_pipelines;
std::atomic_int g_compiling_pipelines_async;
std::atomic_uint64_t g_compiling_pipelines_syncTimeSum;
extern std::mutex g_friend_notification_mutex;
extern std::vector< std::pair<std::string, int> > g_friend_notifications;
std::mutex g_notification_mutex;
std::vector< std::pair<std::string, int> > g_notifications;
void LatteOverlay_pushNotification(const std::string& text, sint32 duration)
{
std::unique_lock lock(g_notification_mutex);
g_notifications.emplace_back(text, duration);
}
struct OverlayList
{
std::wstring text;
float pos_x = 0;
float pos_y = 0;
float width;
OverlayList(std::wstring text, float width)
: text(std::move(text)), width(width) {}
};
const auto kPopupFlags = ImGuiWindowFlags_NoMove | ImGuiWindowFlags_NoDecoration | ImGuiWindowFlags_AlwaysAutoResize | ImGuiWindowFlags_NoSavedSettings | ImGuiWindowFlags_NoFocusOnAppearing | ImGuiWindowFlags_NoNav;
const float kBackgroundAlpha = 0.65f;
void LatteOverlay_renderOverlay(ImVec2& position, ImVec2& pivot, sint32 direction, float fontSize, bool pad)
{
auto& config = GetConfig();
const auto font = ImGui_GetFont(fontSize);
ImGui::PushFont(font);
const ImVec4 color = ImGui::ColorConvertU32ToFloat4(config.overlay.text_color);
ImGui::PushStyleColor(ImGuiCol_Text, color);
// stats overlay
if (config.overlay.fps || config.overlay.drawcalls || config.overlay.cpu_usage || config.overlay.cpu_per_core_usage || config.overlay.ram_usage)
{
ImGui::SetNextWindowPos(position, ImGuiCond_Always, pivot);
ImGui::SetNextWindowBgAlpha(kBackgroundAlpha);
if (ImGui::Begin("Stats overlay", nullptr, kPopupFlags))
{
if (config.overlay.fps)
ImGui::Text("FPS: %.2lf", g_state.fps);
if (config.overlay.drawcalls)
ImGui::Text("Draws/f: %d (fast: %d)", g_state.draw_calls_per_frame, g_state.fast_draw_calls_per_frame);
if (config.overlay.cpu_usage)
ImGui::Text("CPU: %.2lf%%", g_state.cpu_usage);
if (config.overlay.cpu_per_core_usage)
{
for (sint32 i = 0; i < g_state.processor_count; ++i)
{
ImGui::Text("CPU #%d: %.2lf%%", i + 1, g_state.cpu_per_core[i]);
}
}
if (config.overlay.ram_usage)
ImGui::Text("RAM: %dMB", g_state.ram_usage);
if(config.overlay.vram_usage && g_state.vramUsage != -1 && g_state.vramTotal != -1)
ImGui::Text("VRAM: %dMB / %dMB", g_state.vramUsage, g_state.vramTotal);
if (config.overlay.debug)
g_renderer->AppendOverlayDebugInfo();
position.y += (ImGui::GetWindowSize().y + 10.0f) * direction;
}
ImGui::End();
}
ImGui::PopStyleColor();
ImGui::PopFont();
}
void LatteOverlay_RenderNotifications(ImVec2& position, ImVec2& pivot, sint32 direction, float fontSize, bool pad)
{
auto& config = GetConfig();
const auto font = ImGui_GetFont(fontSize);
ImGui::PushFont(font);
const ImVec4 color = ImGui::ColorConvertU32ToFloat4(config.notification.text_color);
ImGui::PushStyleColor(ImGuiCol_Text, color);
// selected controller profiles in the beginning
if (config.notification.controller_profiles)
{
static bool s_init_overlay = false;
if (!s_init_overlay)
{
static std::chrono::steady_clock::time_point s_started = tick_cached();
const auto now = tick_cached();
if (std::chrono::duration_cast<std::chrono::milliseconds>(now - s_started).count() <= 5000)
{
// active account
ImGui::SetNextWindowPos(position, ImGuiCond_Always, pivot);
ImGui::SetNextWindowBgAlpha(kBackgroundAlpha);
if (ImGui::Begin("Active account", nullptr, kPopupFlags))
{
ImGui::TextUnformatted((const char*)ICON_FA_USER);
ImGui::SameLine();
static std::string s_mii_name;
if (s_mii_name.empty())
{
auto tmp_view = Account::GetAccount(ActiveSettings::GetPersistentId()).GetMiiName();
std::wstring tmp{ tmp_view };
s_mii_name = boost::nowide::narrow(tmp);
}
ImGui::TextUnformatted(s_mii_name.c_str());
position.y += (ImGui::GetWindowSize().y + 10.0f) * direction;
}
ImGui::End();
// controller
std::vector<std::pair<int, std::string>> profiles;
auto& input_manager = InputManager::instance();
for (int i = 0; i < InputManager::kMaxController; ++i)
{
const auto controller = input_manager.get_controller(i);
if (!controller)
continue;
const auto& profile_name = controller->get_profile_name();
if (profile_name.empty())
continue;
profiles.emplace_back(i, profile_name);
}
if (!profiles.empty())
{
ImGui::SetNextWindowPos(position, ImGuiCond_Always, pivot);
ImGui::SetNextWindowBgAlpha(kBackgroundAlpha);
if (ImGui::Begin("Controller profile names", nullptr, kPopupFlags))
{
auto it = profiles.cbegin();
ImGui::TextUnformatted((const char*)ICON_FA_GAMEPAD);
ImGui::SameLine();
ImGui::Text("Player %d: %s", it->first + 1, it->second.c_str());
for (++it; it != profiles.cend(); ++it)
{
ImGui::Separator();
ImGui::TextUnformatted((const char*)ICON_FA_GAMEPAD);
ImGui::SameLine();
ImGui::Text("Player %d: %s", it->first + 1, it->second.c_str());
}
position.y += (ImGui::GetWindowSize().y + 10.0f) * direction;
}
ImGui::End();
}
else
s_init_overlay = true;
}
else
s_init_overlay = true;
}
}
if (config.notification.friends)
{
static std::vector< std::pair<std::string, std::chrono::steady_clock::time_point> > s_friend_list;
std::unique_lock lock(g_friend_notification_mutex);
if (!g_friend_notifications.empty())
{
const auto tick = tick_cached();
for (const auto& entry : g_friend_notifications)
{
s_friend_list.emplace_back(entry.first, tick + std::chrono::milliseconds(entry.second));
}
g_friend_notifications.clear();
}
if (!s_friend_list.empty())
{
ImGui::SetNextWindowPos(position, ImGuiCond_Always, pivot);
ImGui::SetNextWindowBgAlpha(kBackgroundAlpha);
if (ImGui::Begin("Friends overlay", nullptr, kPopupFlags))
{
const auto tick = tick_cached();
for (auto it = s_friend_list.cbegin(); it != s_friend_list.cend();)
{
ImGui::TextUnformatted(it->first.c_str(), it->first.c_str() + it->first.size());
if (tick >= it->second)
it = s_friend_list.erase(it);
else
++it;
}
position.y += (ImGui::GetWindowSize().y + 10.0f) * direction;
}
ImGui::End();
}
}
// low battery warning
if (config.notification.controller_battery)
{
std::vector<int> batteries;
auto& input_manager = InputManager::instance();
for (int i = 0; i < InputManager::kMaxController; ++i)
{
const auto controller = input_manager.get_controller(i);
if (!controller)
continue;
if (controller->is_battery_low())
batteries.emplace_back(i);
}
if (!batteries.empty())
{
static std::chrono::steady_clock::time_point s_last_tick = tick_cached();
static bool s_blink_state = false;
const auto now = tick_cached();
if (std::chrono::duration_cast<std::chrono::milliseconds>(now - s_last_tick).count() >= 750)
{
s_blink_state = !s_blink_state;
s_last_tick = now;
}
ImGui::SetNextWindowPos(position, ImGuiCond_Always, pivot);
ImGui::SetNextWindowBgAlpha(kBackgroundAlpha);
if (ImGui::Begin("Low battery overlay", nullptr, kPopupFlags))
{
auto it = batteries.cbegin();
ImGui::TextUnformatted((const char*)(s_blink_state ? ICON_FA_BATTERY_EMPTY : ICON_FA_BATTERY_QUARTER));
ImGui::SameLine();
ImGui::Text("Player %d", *it + 1);
for (++it; it != batteries.cend(); ++it)
{
ImGui::Separator();
ImGui::TextUnformatted((const char*)(s_blink_state ? ICON_FA_BATTERY_EMPTY : ICON_FA_BATTERY_QUARTER));
ImGui::SameLine();
ImGui::Text("Player %d", *it + 1);
}
position.y += (ImGui::GetWindowSize().y + 10.0f) * direction;
}
ImGui::End();
}
}
if (config.notification.shader_compiling)
{
static int32_t s_shader_count = 0;
static int32_t s_shader_count_async = 0;
if (s_shader_count > 0 || g_compiled_shaders_total > 0)
{
const int tmp = g_compiled_shaders_total.exchange(0);
const int tmpAsync = g_compiled_shaders_async.exchange(0);
s_shader_count += tmp;
s_shader_count_async += tmpAsync;
static std::chrono::steady_clock::time_point s_last_tick = tick_cached();
const auto now = tick_cached();
if (tmp > 0)
s_last_tick = now;
if (std::chrono::duration_cast<std::chrono::milliseconds>(now - s_last_tick).count() >= 2500)
{
s_shader_count = 0;
s_shader_count_async = 0;
}
if (s_shader_count > 0)
{
ImGui::SetNextWindowPos(position, ImGuiCond_Always, pivot);
ImGui::SetNextWindowBgAlpha(kBackgroundAlpha);
if (ImGui::Begin("Compiling shaders overlay", nullptr, kPopupFlags))
{
ImRotateStart();
ImGui::TextUnformatted((const char*)ICON_FA_SPINNER);
const auto ticks = std::chrono::time_point_cast<std::chrono::milliseconds>(now);
ImRotateEnd(0.001f * ticks.time_since_epoch().count());
ImGui::SameLine();
if (s_shader_count_async > 0 && GetConfig().async_compile) // the latter condition is to never show async count when async isn't enabled. Since it can be confusing to the user
{
if(s_shader_count > 1)
ImGui::Text("Compiled %d new shaders... (%d async)", s_shader_count, s_shader_count_async);
else
ImGui::Text("Compiled %d new shader... (%d async)", s_shader_count, s_shader_count_async);
}
else
{
if (s_shader_count > 1)
ImGui::Text("Compiled %d new shaders...", s_shader_count);
else
ImGui::Text("Compiled %d new shader...", s_shader_count);
}
position.y += (ImGui::GetWindowSize().y + 10.0f) * direction;
}
ImGui::End();
}
}
static int32_t s_pipeline_count = 0;
static int32_t s_pipeline_count_async = 0;
if (s_pipeline_count > 0 || g_compiling_pipelines > 0)
{
const int tmp = g_compiling_pipelines.exchange(0);
const int tmpAsync = g_compiling_pipelines_async.exchange(0);
s_pipeline_count += tmp;
s_pipeline_count_async += tmpAsync;
static std::chrono::steady_clock::time_point s_last_tick = tick_cached();
const auto now = tick_cached();
if (tmp > 0)
s_last_tick = now;
if (std::chrono::duration_cast<std::chrono::milliseconds>(now - s_last_tick).count() >= 2500)
{
s_pipeline_count = 0;
s_pipeline_count_async = 0;
}
if (s_pipeline_count > 0)
{
ImGui::SetNextWindowPos(position, ImGuiCond_Always, pivot);
ImGui::SetNextWindowBgAlpha(kBackgroundAlpha);
if (ImGui::Begin("Compiling pipeline overlay", nullptr, kPopupFlags))
{
ImRotateStart();
ImGui::TextUnformatted((const char*)ICON_FA_SPINNER);
const auto ticks = std::chrono::time_point_cast<std::chrono::milliseconds>(now);
ImRotateEnd(0.001f * ticks.time_since_epoch().count());
ImGui::SameLine();
#ifdef CEMU_DEBUG_ASSERT
uint64 totalTime = g_compiling_pipelines_syncTimeSum / 1000000ull;
if (s_pipeline_count_async > 0)
{
if (s_pipeline_count > 1)
ImGui::Text("Compiled %d new pipelines... (%d async) TotalSync: %" PRIu64 "ms", s_pipeline_count, s_pipeline_count_async, totalTime);
else
ImGui::Text("Compiled %d new pipeline... (%d async) TotalSync: %" PRIu64 "ms", s_pipeline_count, s_pipeline_count_async, totalTime);
}
else
{
if (s_pipeline_count > 1)
ImGui::Text("Compiled %d new pipelines... TotalSync: %" PRIu64 "ms", s_pipeline_count, totalTime);
else
ImGui::Text("Compiled %d new pipeline... TotalSync: %" PRIu64 "ms", s_pipeline_count, totalTime);
}
#else
if (s_pipeline_count_async > 0)
{
if (s_pipeline_count > 1)
ImGui::Text("Compiled %d new pipelines... (%d async)", s_pipeline_count, s_pipeline_count_async);
else
ImGui::Text("Compiled %d new pipeline... (%d async)", s_pipeline_count, s_pipeline_count_async);
}
else
{
if (s_pipeline_count > 1)
ImGui::Text("Compiled %d new pipelines...", s_pipeline_count);
else
ImGui::Text("Compiled %d new pipeline...", s_pipeline_count);
}
#endif
position.y += (ImGui::GetWindowSize().y + 10.0f) * direction;
}
ImGui::End();
}
}
}
// misc notifications
static std::vector< std::pair<std::string, std::chrono::steady_clock::time_point> > s_misc_notifications;
std::unique_lock misc_lock(g_notification_mutex);
if (!g_notifications.empty())
{
const auto tick = tick_cached();
for (const auto& entry : g_notifications)
{
s_misc_notifications.emplace_back(entry.first, tick + std::chrono::milliseconds(entry.second));
}
g_notifications.clear();
}
misc_lock.unlock();
if (!s_misc_notifications.empty())
{
ImGui::SetNextWindowPos(position, ImGuiCond_Always, pivot);
ImGui::SetNextWindowBgAlpha(kBackgroundAlpha);
if (ImGui::Begin("Misc notifications", nullptr, kPopupFlags))
{
const auto tick = tick_cached();
for (auto it = s_misc_notifications.cbegin(); it != s_misc_notifications.cend();)
{
ImGui::TextUnformatted(it->first.c_str(), it->first.c_str() + it->first.size());
if (tick >= it->second)
it = s_misc_notifications.erase(it);
else
++it;
}
position.y += (ImGui::GetWindowSize().y + 10.0f) * direction;
}
ImGui::End();
}
ImGui::PopStyleColor();
ImGui::PopFont();
}
void LatteOverlay_translateScreenPosition(ScreenPosition pos, const Vector2f& window_size, ImVec2& position, ImVec2& pivot, sint32& direction)
{
switch (pos)
{
case ScreenPosition::kTopLeft:
position = { 10, 10 };
pivot = { 0, 0 };
direction = 1;
break;
case ScreenPosition::kTopCenter:
position = { window_size.x / 2.0f, 10 };
pivot = { 0.5f, 0 };
direction = 1;
break;
case ScreenPosition::kTopRight:
position = { window_size.x - 10, 10 };
pivot = { 1, 0 };
direction = 1;
break;
case ScreenPosition::kBottomLeft:
position = { 10, window_size.y - 10 };
pivot = { 0, 1 };
direction = -1;
break;
case ScreenPosition::kBottomCenter:
position = { window_size.x / 2.0f, window_size.y - 10 };
pivot = { 0.5f, 1 };
direction = -1;
break;
case ScreenPosition::kBottomRight:
position = { window_size.x - 10, window_size.y - 10 };
pivot = { 1, 1 };
direction = -1;
break;
default:
UNREACHABLE;
}
}
void LatteOverlay_render(bool pad_view)
{
const auto& config = GetConfig();
if(config.overlay.position == ScreenPosition::kDisabled && config.notification.position == ScreenPosition::kDisabled)
return;
sint32 w = 0, h = 0;
if (pad_view && gui_isPadWindowOpen())
gui_getPadWindowPhysSize(w, h);
else
gui_getWindowPhysSize(w, h);
if (w == 0 || h == 0)
return;
const Vector2f window_size{ (float)w,(float)h };
float fontDPIScale = !pad_view ? gui_getWindowDPIScale() : gui_getPadDPIScale();
float overlayFontSize = 14.0f * (float)config.overlay.text_scale / 100.0f * fontDPIScale;
// test if fonts are already precached
if (!ImGui_GetFont(overlayFontSize))
return;
float notificationsFontSize = 14.0f * (float)config.notification.text_scale / 100.0f * fontDPIScale;
if (!ImGui_GetFont(notificationsFontSize))
return;
ImVec2 position{}, pivot{};
sint32 direction{};
if (config.overlay.position != ScreenPosition::kDisabled)
{
LatteOverlay_translateScreenPosition(config.overlay.position, window_size, position, pivot, direction);
LatteOverlay_renderOverlay(position, pivot, direction, overlayFontSize, pad_view);
}
if (config.notification.position != ScreenPosition::kDisabled)
{
if(config.overlay.position != config.notification.position)
LatteOverlay_translateScreenPosition(config.notification.position, window_size, position, pivot, direction);
LatteOverlay_RenderNotifications(position, pivot, direction, notificationsFontSize, pad_view);
}
}
void LatteOverlay_init()
{
g_state.processor_count = GetProcessorCount();
g_state.processor_times.resize(g_state.processor_count);
g_state.cpu_per_core.resize(g_state.processor_count);
}
static void UpdateStats_CemuCpu()
{
ProcessorTime now;
QueryProcTime(now);
double cpu = ProcessorTime::Compare(g_state.processor_time_cemu, now);
cpu /= g_state.processor_count;
g_state.cpu_usage = cpu * 100;
g_state.processor_time_cemu = now;
}
static void UpdateStats_CpuPerCore()
{
std::vector<ProcessorTime> now(g_state.processor_count);
QueryCoreTimes(g_state.processor_count, now);
for (int32_t i = 0; i < g_state.processor_count; ++i)
{
double cpu = ProcessorTime::Compare(g_state.processor_times[i], now[i]);
g_state.cpu_per_core[i] = cpu * 100;
g_state.processor_times[i] = now[i];
}
}
void LatteOverlay_updateStats(double fps, sint32 drawcalls, sint32 fastDrawcalls)
{
if (GetConfig().overlay.position == ScreenPosition::kDisabled)
return;
g_state.fps = fps;
g_state.draw_calls_per_frame = drawcalls;
g_state.fast_draw_calls_per_frame = fastDrawcalls;
UpdateStats_CemuCpu();
UpdateStats_CpuPerCore();
// update ram
g_state.ram_usage = (QueryRamUsage() / 1000) / 1000;
// update vram
g_renderer->GetVRAMInfo(g_state.vramUsage, g_state.vramTotal);
}
| 18,112
|
C++
|
.cpp
| 506
| 31.891304
| 215
| 0.689665
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,276
|
LattePerformanceMonitor.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LattePerformanceMonitor.cpp
|
#include "Cafe/HW/Latte/Core/LattePerformanceMonitor.h"
#include "Cafe/HW/Latte/Core/LatteOverlay.h"
#include "gui/guiWrapper.h"
performanceMonitor_t performanceMonitor{};
void LattePerformanceMonitor_frameEnd()
{
// per-frame stats
performanceMonitor.gpuTime_shaderCreate.frameFinished();
performanceMonitor.gpuTime_frameTime.frameFinished();
performanceMonitor.gpuTime_idleTime.frameFinished();
performanceMonitor.gpuTime_fenceTime.frameFinished();
performanceMonitor.gpuTime_dcStageTextures.frameFinished();
performanceMonitor.gpuTime_dcStageVertexMgr.frameFinished();
performanceMonitor.gpuTime_dcStageShaderAndUniformMgr.frameFinished();
performanceMonitor.gpuTime_dcStageIndexMgr.frameFinished();
performanceMonitor.gpuTime_dcStageMRT.frameFinished();
performanceMonitor.gpuTime_dcStageDrawcallAPI.frameFinished();
performanceMonitor.gpuTime_waitForAsync.frameFinished();
uint32 elapsedTime = GetTickCount() - performanceMonitor.cycle[performanceMonitor.cycleIndex].lastUpdate;
if (elapsedTime >= 1000)
{
bool isFirstUpdate = performanceMonitor.cycle[performanceMonitor.cycleIndex].lastUpdate == 0;
// sum up raw stats
uint32 totalElapsedTime = GetTickCount() - performanceMonitor.cycle[(performanceMonitor.cycleIndex + 1) % PERFORMANCE_MONITOR_TRACK_CYCLES].lastUpdate;
uint32 totalElapsedTimeFPS = GetTickCount() - performanceMonitor.cycle[(performanceMonitor.cycleIndex + PERFORMANCE_MONITOR_TRACK_CYCLES - 1) % PERFORMANCE_MONITOR_TRACK_CYCLES].lastUpdate;
uint32 elapsedFrames = 0;
uint32 elapsedFrames2S = 0; // elapsed frames for last two entries (seconds)
uint64 skippedCycles = 0;
uint64 vertexDataUploaded = 0;
uint64 vertexDataCached = 0;
uint64 uniformBankUploadedData = 0;
uint64 uniformBankUploadedCount = 0;
uint64 indexDataUploaded = 0;
uint64 indexDataCached = 0;
uint32 frameCounter = 0;
uint32 drawCallCounter = 0;
uint32 fastDrawCallCounter = 0;
uint32 shaderBindCounter = 0;
uint32 recompilerLeaveCount = 0;
uint32 threadLeaveCount = 0;
for (sint32 i = 0; i < PERFORMANCE_MONITOR_TRACK_CYCLES; i++)
{
elapsedFrames += performanceMonitor.cycle[i].frameCounter;
skippedCycles += performanceMonitor.cycle[i].skippedCycles;
vertexDataUploaded += performanceMonitor.cycle[i].vertexDataUploaded;
vertexDataCached += performanceMonitor.cycle[i].vertexDataCached;
uniformBankUploadedData += performanceMonitor.cycle[i].uniformBankUploadedData;
uniformBankUploadedCount += performanceMonitor.cycle[i].uniformBankUploadedCount;
indexDataUploaded += performanceMonitor.cycle[i].indexDataUploaded;
indexDataCached += performanceMonitor.cycle[i].indexDataCached;
frameCounter += performanceMonitor.cycle[i].frameCounter;
drawCallCounter += performanceMonitor.cycle[i].drawCallCounter;
fastDrawCallCounter += performanceMonitor.cycle[i].fastDrawCallCounter;
shaderBindCounter += performanceMonitor.cycle[i].shaderBindCount;
recompilerLeaveCount += performanceMonitor.cycle[i].recompilerLeaveCount;
threadLeaveCount += performanceMonitor.cycle[i].threadLeaveCount;
}
elapsedFrames = std::max<uint32>(elapsedFrames, 1);
elapsedFrames2S = performanceMonitor.cycle[(performanceMonitor.cycleIndex + PERFORMANCE_MONITOR_TRACK_CYCLES - 0) % PERFORMANCE_MONITOR_TRACK_CYCLES].frameCounter;
elapsedFrames2S += performanceMonitor.cycle[(performanceMonitor.cycleIndex + PERFORMANCE_MONITOR_TRACK_CYCLES - 1) % PERFORMANCE_MONITOR_TRACK_CYCLES].frameCounter;
elapsedFrames2S = std::max<uint32>(elapsedFrames2S, 1);
// calculate stats
uint64 passedCycles = PPCInterpreter_getMainCoreCycleCounter() - performanceMonitor.cycle[(performanceMonitor.cycleIndex + 1) % PERFORMANCE_MONITOR_TRACK_CYCLES].lastCycleCount;
passedCycles -= skippedCycles;
uint64 vertexDataUploadPerFrame = (vertexDataUploaded / (uint64)elapsedFrames);
vertexDataUploadPerFrame /= 1024ULL;
uint64 vertexDataCachedPerFrame = (vertexDataCached / (uint64)elapsedFrames);
vertexDataCachedPerFrame /= 1024ULL;
uint64 uniformBankDataUploadedPerFrame = (uniformBankUploadedData / (uint64)elapsedFrames);
uniformBankDataUploadedPerFrame /= 1024ULL;
uint32 uniformBankCountUploadedPerFrame = (uint32)(uniformBankUploadedCount / (uint64)elapsedFrames);
uint64 indexDataUploadPerFrame = (indexDataUploaded / (uint64)elapsedFrames);
indexDataUploadPerFrame /= 1024ULL;
double fps = (double)elapsedFrames2S * 1000.0 / (double)totalElapsedTimeFPS;
uint32 shaderBindsPerFrame = shaderBindCounter / elapsedFrames;
passedCycles = passedCycles * 1000ULL / totalElapsedTime;
uint32 rlps = (uint32)((uint64)recompilerLeaveCount * 1000ULL / (uint64)totalElapsedTime);
uint32 tlps = (uint32)((uint64)threadLeaveCount * 1000ULL / (uint64)totalElapsedTime);
// set stats
// next counter cycle
sint32 nextCycleIndex = (performanceMonitor.cycleIndex + 1) % PERFORMANCE_MONITOR_TRACK_CYCLES;
performanceMonitor.cycle[nextCycleIndex].drawCallCounter = 0;
performanceMonitor.cycle[nextCycleIndex].fastDrawCallCounter = 0;
performanceMonitor.cycle[nextCycleIndex].frameCounter = 0;
performanceMonitor.cycle[nextCycleIndex].shaderBindCount = 0;
performanceMonitor.cycle[nextCycleIndex].lastCycleCount = PPCInterpreter_getMainCoreCycleCounter();
performanceMonitor.cycle[nextCycleIndex].skippedCycles = 0;
performanceMonitor.cycle[nextCycleIndex].vertexDataUploaded = 0;
performanceMonitor.cycle[nextCycleIndex].vertexDataCached = 0;
performanceMonitor.cycle[nextCycleIndex].uniformBankUploadedData = 0;
performanceMonitor.cycle[nextCycleIndex].uniformBankUploadedCount = 0;
performanceMonitor.cycle[nextCycleIndex].indexDataUploaded = 0;
performanceMonitor.cycle[nextCycleIndex].indexDataCached = 0;
performanceMonitor.cycle[nextCycleIndex].recompilerLeaveCount = 0;
performanceMonitor.cycle[nextCycleIndex].threadLeaveCount = 0;
performanceMonitor.cycleIndex = nextCycleIndex;
// next update in 1 second
performanceMonitor.cycle[performanceMonitor.cycleIndex].lastUpdate = GetTickCount();
if (isFirstUpdate)
{
LatteOverlay_updateStats(0.0, 0, 0);
gui_updateWindowTitles(false, false, 0.0);
}
else
{
LatteOverlay_updateStats(fps, drawCallCounter / elapsedFrames, fastDrawCallCounter / elapsedFrames);
gui_updateWindowTitles(false, false, fps);
}
}
}
void LattePerformanceMonitor_frameBegin()
{
performanceMonitor.vk.numDrawBarriersPerFrame.reset();
performanceMonitor.vk.numBeginRenderpassPerFrame.reset();
}
| 6,468
|
C++
|
.cpp
| 115
| 53.33913
| 191
| 0.819042
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,277
|
LatteSurfaceCopy.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteSurfaceCopy.cpp
|
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/HW/Latte/Core/LatteDraw.h"
#include "Cafe/HW/Latte/Core/LatteShader.h"
#include "Cafe/HW/Latte/Core/LatteDefaultShaders.h"
#include "Cafe/HW/Latte/Core/LatteTexture.h"
#include "Cafe/HW/Latte/Renderer/Renderer.h"
void LatteSurfaceCopy_copySurfaceNew(MPTR srcPhysAddr, MPTR srcMipAddr, uint32 srcSwizzle, Latte::E_GX2SURFFMT srcSurfaceFormat, sint32 srcWidth, sint32 srcHeight, sint32 srcDepth, uint32 srcPitch, sint32 srcSlice, Latte::E_DIM srcDim, Latte::E_HWTILEMODE srcTilemode, sint32 srcAA, sint32 srcLevel, MPTR dstPhysAddr, MPTR dstMipAddr, uint32 dstSwizzle, Latte::E_GX2SURFFMT dstSurfaceFormat, sint32 dstWidth, sint32 dstHeight, sint32 dstDepth, uint32 dstPitch, sint32 dstSlice, Latte::E_DIM dstDim, Latte::E_HWTILEMODE dstTilemode, sint32 dstAA, sint32 dstLevel)
{
// check if source is within valid mip range
if (srcDim == Latte::E_DIM::DIM_3D && (srcDepth >> srcLevel) == 0 && (srcWidth >> srcLevel) == 0 && (srcHeight >> srcLevel) == 0)
return;
else if ((srcWidth >> srcLevel) == 0 && (srcHeight >> srcLevel) == 0)
return;
// look up source texture
LatteTexture* sourceTexture = nullptr;
LatteTextureView* sourceView = LatteTC_GetTextureSliceViewOrTryCreate(srcPhysAddr, srcMipAddr, srcSurfaceFormat, srcTilemode, srcWidth, srcHeight, srcDepth, srcPitch, srcSwizzle, srcSlice, srcLevel);
if (sourceView == nullptr)
{
debug_printf("HLECopySurface(): Source texture is not in list of dynamic textures\n");
return;
}
sourceTexture = sourceView->baseTexture;
if (sourceTexture->reloadFromDynamicTextures)
{
LatteTexture_UpdateCacheFromDynamicTextures(sourceTexture);
sourceTexture->reloadFromDynamicTextures = false;
}
// look up destination texture
LatteTexture* destinationTexture = nullptr;
LatteTextureView* destinationView = LatteTextureViewLookupCache::lookupSlice(dstPhysAddr, dstWidth, dstHeight, dstPitch, dstLevel, dstSlice, dstSurfaceFormat);
if (destinationView)
destinationTexture = destinationView->baseTexture;
// create destination texture if it doesnt exist
if (!destinationTexture)
{
LatteTexture* renderTargetConf = nullptr;
destinationView = LatteTexture_CreateMapping(dstPhysAddr, dstMipAddr, dstWidth, dstHeight, dstDepth, dstPitch, dstTilemode, dstSwizzle, dstLevel, 1, dstSlice, 1, dstSurfaceFormat, dstDim, Latte::IsMSAA(dstDim) ? Latte::E_DIM::DIM_2D_MSAA : Latte::E_DIM::DIM_2D, false);
destinationTexture = destinationView->baseTexture;
}
// copy texture
if (sourceTexture && destinationTexture)
{
// mark source and destination texture as still in use
LatteTC_MarkTextureStillInUse(destinationTexture);
LatteTC_MarkTextureStillInUse(sourceTexture);
sint32 realSrcSlice = srcSlice;
if (LatteTexture_doesEffectiveRescaleRatioMatch(sourceTexture, sourceView->firstMip, destinationTexture, destinationView->firstMip))
{
// adjust copy size
sint32 copyWidth = std::max(srcWidth >> srcLevel, 1);
sint32 copyHeight = std::max(srcHeight >> srcLevel, 1);
// use the smaller width/height as copy size
copyWidth = std::min(copyWidth, std::max(dstWidth >> dstLevel, 1));
copyHeight = std::min(copyHeight, std::max(dstHeight >> dstLevel, 1));
sint32 effectiveCopyWidth = copyWidth;
sint32 effectiveCopyHeight = copyHeight;
LatteTexture_scaleToEffectiveSize(sourceTexture, &effectiveCopyWidth, &effectiveCopyHeight, 0);
// copy slice
if (sourceView->baseTexture->isDepth != destinationView->baseTexture->isDepth)
g_renderer->surfaceCopy_copySurfaceWithFormatConversion(sourceTexture, sourceView->firstMip, sourceView->firstSlice, destinationTexture, destinationView->firstMip, destinationView->firstSlice, copyWidth, copyHeight);
else
g_renderer->texture_copyImageSubData(sourceTexture, sourceView->firstMip, 0, 0, realSrcSlice, destinationTexture, destinationView->firstMip, 0, 0, destinationView->firstSlice, effectiveCopyWidth, effectiveCopyHeight, 1);
const uint64 eventCounter = LatteTexture_getNextUpdateEventCounter();
LatteTexture_MarkDynamicTextureAsChanged(destinationTexture->baseView, destinationView->firstSlice, destinationView->firstMip, eventCounter);
}
else
{
debug_printf("gx2CP_itHLECopySurface(): Copy texture with non-matching effective size\n");
}
LatteTC_ResetTextureChangeTracker(destinationTexture);
// flag texture as updated
destinationTexture->lastUpdateEventCounter = LatteTexture_getNextUpdateEventCounter();
destinationTexture->isUpdatedOnGPU = true; // todo - also track update flag per-slice
}
else
debug_printf("Source or destination texture does not exist\n");
// download destination texture if it matches known accessed formats
if (destinationTexture->width == 8 && destinationTexture->height == 8 && destinationTexture->tileMode == Latte::E_HWTILEMODE::TM_1D_TILED_THIN1)
{
cemuLog_logDebug(LogType::Force, "Texture readback after copy for Bayonetta 2 (phys: 0x{:08x})", destinationTexture->physAddress);
LatteTextureReadback_Initate(destinationView);
}
}
| 5,015
|
C++
|
.cpp
| 83
| 57.722892
| 562
| 0.790381
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,278
|
LatteThread.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteThread.cpp
|
#include "Cafe/HW/Latte/ISA/RegDefines.h"
#include "Cafe/OS/libs/gx2/GX2.h" // todo - remove dependency
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/HW/Latte/Core/LatteDraw.h"
#include "Cafe/HW/Latte/Core/LatteShader.h"
#include "Cafe/HW/Latte/Core/LatteAsyncCommands.h"
#include "Cafe/GameProfile/GameProfile.h"
#include "Cafe/GraphicPack/GraphicPack2.h"
#include "gui/guiWrapper.h"
#include "Cafe/HW/Latte/Core/LatteBufferCache.h"
#include "Cafe/HW/Latte/Renderer/Renderer.h"
#include "Cafe/HW/Latte/Core/LatteTexture.h"
#include "util/helpers/helpers.h"
#include <imgui.h>
#include "config/ActiveSettings.h"
#include "Cafe/CafeSystem.h"
LatteGPUState_t LatteGPUState = {};
std::atomic_bool sLatteThreadRunning = false;
std::atomic_bool sLatteThreadFinishedInit = false;
void LatteThread_Exit();
void Latte_LoadInitialRegisters()
{
LatteGPUState.contextNew.CB_TARGET_MASK.set_MASK(0xFFFFFFFF);
LatteGPUState.contextNew.VGT_MULTI_PRIM_IB_RESET_INDX.set_RESTART_INDEX(0xFFFFFFFF);
LatteGPUState.contextRegister[Latte::REGADDR::PA_CL_CLIP_CNTL] = 0;
*(float*)&LatteGPUState.contextRegister[mmDB_DEPTH_CLEAR] = 1.0f;
}
extern bool gx2WriteGatherInited;
LatteTextureView* osScreenTVTex[2] = { nullptr };
LatteTextureView* osScreenDRCTex[2] = { nullptr };
LatteTextureView* LatteHandleOSScreen_getOrCreateScreenTex(MPTR physAddress, uint32 width, uint32 height, uint32 pitch)
{
LatteTextureView* texView = LatteTextureViewLookupCache::lookup(physAddress, width, height, 1, pitch, 0, 1, 0, 1, Latte::E_GX2SURFFMT::R8_G8_B8_A8_UNORM, Latte::E_DIM::DIM_2D);
if (texView)
return texView;
return LatteTexture_CreateTexture(Latte::E_DIM::DIM_2D, physAddress, 0, Latte::E_GX2SURFFMT::R8_G8_B8_A8_UNORM, width, height, 1, pitch, 1, 0, Latte::E_HWTILEMODE::TM_LINEAR_ALIGNED, false);
}
void LatteHandleOSScreen_prepareTextures()
{
osScreenTVTex[0] = LatteHandleOSScreen_getOrCreateScreenTex(LatteGPUState.osScreen.screen[0].physPtr, 1280, 720, 1280);
osScreenTVTex[1] = LatteHandleOSScreen_getOrCreateScreenTex(LatteGPUState.osScreen.screen[0].physPtr + 1280 * 720 * 4, 1280, 720, 1280);
osScreenDRCTex[0] = LatteHandleOSScreen_getOrCreateScreenTex(LatteGPUState.osScreen.screen[1].physPtr, 854, 480, 0x380);
osScreenDRCTex[1] = LatteHandleOSScreen_getOrCreateScreenTex(LatteGPUState.osScreen.screen[1].physPtr + 896 * 480 * 4, 854, 480, 0x380);
}
void LatteRenderTarget_copyToBackbuffer(LatteTextureView* textureView, bool isPadView);
bool LatteHandleOSScreen_TV()
{
if (!LatteGPUState.osScreen.screen[0].isEnabled)
return false;
if (LatteGPUState.osScreen.screen[0].flipExecuteCount == LatteGPUState.osScreen.screen[0].flipRequestCount)
return false;
LatteHandleOSScreen_prepareTextures();
sint32 bufferDisplayTV = (LatteGPUState.osScreen.screen[0].flipRequestCount & 1) ^ 1;
sint32 bufferDisplayDRC = (LatteGPUState.osScreen.screen[1].flipRequestCount & 1) ^ 1;
const uint32 bufferIndexTV = (bufferDisplayTV);
const uint32 bufferIndexDRC = bufferDisplayDRC;
LatteTexture_ReloadData(osScreenTVTex[bufferIndexTV]->baseTexture);
// TV screen
LatteRenderTarget_copyToBackbuffer(osScreenTVTex[bufferIndexTV]->baseTexture->baseView, false);
if (LatteGPUState.osScreen.screen[0].flipExecuteCount != LatteGPUState.osScreen.screen[0].flipRequestCount)
LatteGPUState.osScreen.screen[0].flipExecuteCount.store(LatteGPUState.osScreen.screen[0].flipRequestCount);
return true;
}
bool LatteHandleOSScreen_DRC()
{
if (!LatteGPUState.osScreen.screen[1].isEnabled)
return false;
if (LatteGPUState.osScreen.screen[1].flipExecuteCount == LatteGPUState.osScreen.screen[1].flipRequestCount)
return false;
LatteHandleOSScreen_prepareTextures();
sint32 bufferDisplayDRC = (LatteGPUState.osScreen.screen[1].flipRequestCount & 1) ^ 1;
const uint32 bufferIndexDRC = bufferDisplayDRC;
LatteTexture_ReloadData(osScreenDRCTex[bufferIndexDRC]->baseTexture);
// GamePad screen
LatteRenderTarget_copyToBackbuffer(osScreenDRCTex[bufferIndexDRC]->baseTexture->baseView, true);
if (LatteGPUState.osScreen.screen[1].flipExecuteCount != LatteGPUState.osScreen.screen[1].flipRequestCount)
LatteGPUState.osScreen.screen[1].flipExecuteCount.store(LatteGPUState.osScreen.screen[1].flipRequestCount);
return true;
}
void LatteThread_HandleOSScreen()
{
bool swapTV = LatteHandleOSScreen_TV();
bool swapDRC = LatteHandleOSScreen_DRC();
if(swapTV || swapDRC)
g_renderer->SwapBuffers(swapTV, swapDRC);
}
int Latte_ThreadEntry()
{
SetThreadName("LatteThread");
sint32 w,h;
gui_getWindowPhysSize(w,h);
// renderer
g_renderer->Initialize();
RendererOutputShader::InitializeStatic();
LatteTiming_Init();
LatteTexture_init();
LatteTC_Init();
LatteBufferCache_init(164 * 1024 * 1024);
LatteQuery_Init();
LatteSHRC_Init();
LatteStreamout_InitCache();
g_renderer->renderTarget_setViewport(0, 0, w, h, 0.0f, 1.0f);
// enable GLSL gl_PointSize support
// glEnable(GL_PROGRAM_POINT_SIZE); // breaks shader caching on AMD (as of 2018)
LatteGPUState.glVendor = GLVENDOR_UNKNOWN;
switch(g_renderer->GetVendor())
{
case GfxVendor::AMD:
LatteGPUState.glVendor = GLVENDOR_AMD;
break;
case GfxVendor::Intel:
LatteGPUState.glVendor = GLVENDOR_INTEL;
break;
case GfxVendor::Nvidia:
LatteGPUState.glVendor = GLVENDOR_NVIDIA;
break;
case GfxVendor::Apple:
LatteGPUState.glVendor = GLVENDOR_APPLE;
default:
break;
}
sLatteThreadFinishedInit = true;
// register debug handler
if (cemuLog_isLoggingEnabled(LogType::OpenGLLogging))
g_renderer->EnableDebugMode();
// wait till a game is started
while( true )
{
if( CafeSystem::IsTitleRunning() )
break;
g_renderer->DrawEmptyFrame(true);
g_renderer->DrawEmptyFrame(false);
gui_hasScreenshotRequest(); // keep the screenshot request queue empty
std::this_thread::sleep_for(std::chrono::milliseconds(1000/60));
}
g_renderer->DrawEmptyFrame(true);
// before doing anything with game specific shaders, we need to wait for graphic packs to finish loading
GraphicPack2::WaitUntilReady();
// if legacy packs are enabled we cannot use the colorbuffer resolution optimization
LatteGPUState.allowFramebufferSizeOptimization = true;
for(auto& pack : GraphicPack2::GetActiveGraphicPacks())
{
if(pack->AllowRendertargetSizeOptimization())
continue;
for(auto& rule : pack->GetTextureRules())
{
if(rule.filter_settings.width >= 0 || rule.filter_settings.height >= 0 || rule.filter_settings.depth >= 0 ||
rule.overwrite_settings.width >= 0 || rule.overwrite_settings.height >= 0 || rule.overwrite_settings.depth >= 0)
{
LatteGPUState.allowFramebufferSizeOptimization = false;
cemuLog_log(LogType::Force, "Graphic pack \"{}\" prevents rendertarget size optimization. This warning can be ignored and is intended for graphic pack developers", pack->GetName());
break;
}
}
}
// load disk shader cache
LatteShaderCache_Load();
// init registers
Latte_LoadInitialRegisters();
// let CPU thread know the GPU is done initializing
g_isGPUInitFinished = true;
// wait until CPU has called GX2Init()
while (LatteGPUState.gx2InitCalled == 0)
{
std::this_thread::yield();
std::this_thread::sleep_for(std::chrono::milliseconds(1));
LatteThread_HandleOSScreen();
if (Latte_GetStopSignal())
LatteThread_Exit();
}
gxRingBufferReadPtr = gx2WriteGatherPipe.gxRingBuffer;
LatteCP_ProcessRingbuffer();
cemu_assert_debug(false); // should never reach
return 0;
}
std::thread sLatteThread;
std::mutex sLatteThreadStateMutex;
// initializes GPU thread which in turn also activates graphic packs
// does not return until the thread finished initialization
void Latte_Start()
{
std::unique_lock _lock(sLatteThreadStateMutex);
cemu_assert_debug(!sLatteThreadRunning);
sLatteThreadRunning = true;
sLatteThreadFinishedInit = false;
sLatteThread = std::thread(Latte_ThreadEntry);
// wait until initialized
while (!sLatteThreadFinishedInit)
{
std::this_thread::sleep_for(std::chrono::milliseconds(1));
}
}
void Latte_Stop()
{
std::unique_lock _lock(sLatteThreadStateMutex);
sLatteThreadRunning = false;
_lock.unlock();
sLatteThread.join();
}
bool Latte_GetStopSignal()
{
return !sLatteThreadRunning;
}
void LatteThread_Exit()
{
if (g_renderer)
g_renderer->Shutdown();
// clean up vertex/uniform cache
LatteBufferCache_UnloadAll();
// clean up texture cache
LatteTC_UnloadAllTextures();
// clean up runtime shader cache
LatteSHRC_UnloadAll();
// close disk cache
LatteShaderCache_Close();
// destroy renderer but make sure that g_renderer remains valid until the destructor has finished
if (g_renderer)
{
Renderer* renderer = g_renderer.get();
delete renderer;
g_renderer.release();
}
// reset GPU7 state
std::memset(&LatteGPUState, 0, sizeof(LatteGPUState));
#if BOOST_OS_WINDOWS
ExitThread(0);
#else
pthread_exit(nullptr);
#endif
cemu_assert_unimplemented();
}
| 8,890
|
C++
|
.cpp
| 231
| 36.181818
| 191
| 0.783442
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,279
|
LatteBufferCache.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteBufferCache.cpp
|
#include "Cafe/HW/Latte/Renderer/Renderer.h"
#include "util/ChunkedHeap/ChunkedHeap.h"
#include "util/helpers/fspinlock.h"
#include "config/ActiveSettings.h"
#define CACHE_PAGE_SIZE 0x400
#define CACHE_PAGE_SIZE_M1 (CACHE_PAGE_SIZE-1)
uint32 g_currentCacheChronon = 0;
template<typename TRangeData, typename TNodeObject>
class IntervalTree2
{
// TNodeObject will be interfaced with via callbacks to static methods
// static TNodeObject* Create(TRangeData rangeBegin, TRangeData rangeEnd, std::span<TNodeObject*> overlappingObjects)
// Create a new node with the given range. overlappingObjects contains all the nodes that are replaced by this operation. The callee has to delete all objects in overlappingObjects (Delete callback wont be invoked)
// static void Delete(TNodeObject* nodeObject)
// Delete a node object. Replacement operations won't trigger this callback and instead pass the objects to Create()
// static void Resize(TNodeObject* nodeObject, TRangeData rangeBegin, TRangeData rangeEnd)
// Shrink or extend an existing range
// static TNodeObject* Split(TNodeObject* nodeObject, TRangeData firstRangeBegin, TRangeData firstRangeEnd, TRangeData secondRangeBegin, TRangeData secondRangeEnd)
// Cut a hole into an existing range and split it in two. Should return the newly created node object after the hole
static_assert(std::is_pointer<TNodeObject>::value == false, "TNodeObject must be a non-pointer type");
struct InternalRange
{
InternalRange() = default;
InternalRange(TRangeData _rangeBegin, TRangeData _rangeEnd) : rangeBegin(_rangeBegin), rangeEnd(_rangeEnd) { cemu_assert_debug(_rangeBegin < _rangeEnd); };
TRangeData rangeBegin;
TRangeData rangeEnd;
bool operator<(const InternalRange& rhs) const
{
// use <= instead of < because ranges are allowed to touch (e.g. 10-20 and 20-30 dont get merged)
return this->rangeEnd <= rhs.rangeBegin;
}
};
std::map<InternalRange, TNodeObject*> m_map;
std::vector<TNodeObject*> m_tempObjectArray;
public:
TNodeObject* getRange(TRangeData rangeBegin, TRangeData rangeEnd)
{
auto itr = m_map.find(InternalRange(rangeBegin, rangeEnd));
if (itr == m_map.cend())
return nullptr;
if (rangeBegin < (*itr).first.rangeBegin)
return nullptr;
if (rangeEnd > (*itr).first.rangeEnd)
return nullptr;
return (*itr).second;
}
TNodeObject* getRangeByPoint(TRangeData rangeOffset)
{
auto itr = m_map.find(InternalRange(rangeOffset, rangeOffset+1)); // todo - better to use custom comparator instead of +1?
if (itr == m_map.cend())
return nullptr;
cemu_assert_debug(rangeOffset >= (*itr).first.rangeBegin);
cemu_assert_debug(rangeOffset < (*itr).first.rangeEnd);
return (*itr).second;
}
void addRange(TRangeData rangeBegin, TRangeData rangeEnd)
{
if (rangeEnd == rangeBegin)
return;
InternalRange range(rangeBegin, rangeEnd);
auto itr = m_map.find(range);
if (itr == m_map.cend())
{
// new entry
m_map.emplace(range, TNodeObject::Create(rangeBegin, rangeEnd, std::span<TNodeObject*>()));
}
else
{
// overlap detected
if (rangeBegin >= (*itr).first.rangeBegin && rangeEnd <= (*itr).first.rangeEnd)
return; // do nothing if added range is already covered
rangeBegin = (std::min)(rangeBegin, (*itr).first.rangeBegin);
// DEBUG - make sure this is the start point of the merge process (the first entry that starts below minValue)
#ifdef CEMU_DEBUG_ASSERT
if (itr != m_map.cbegin())
{
// check previous result
auto itrCopy = itr;
--itrCopy;
if ((*itrCopy).first.rangeEnd > rangeBegin)
{
assert_dbg(); // n-1 entry is also overlapping
rangeBegin = (std::min)(rangeBegin, (*itrCopy).first.rangeBegin);
}
}
#endif
// DEBUG - END
// collect and remove all overlapping ranges
size_t count = 0;
while (itr != m_map.cend() && (*itr).first.rangeBegin < rangeEnd)
{
rangeEnd = (std::max)(rangeEnd, (*itr).first.rangeEnd);
if (m_tempObjectArray.size() <= count)
m_tempObjectArray.resize(count + 8);
m_tempObjectArray[count] = (*itr).second;
count++;
auto tempItr = itr;
++itr;
m_map.erase(tempItr);
}
// create callback
TNodeObject* newObject = TNodeObject::Create(rangeBegin, rangeEnd, std::span<TNodeObject*>(m_tempObjectArray.data(), count));
m_map.emplace(InternalRange(rangeBegin, rangeEnd), newObject);
}
}
void removeRange(TRangeData rangeBegin, TRangeData rangeEnd)
{
InternalRange range(rangeBegin, rangeEnd);
auto itr = m_map.find(range);
if (itr == m_map.cend())
return;
cemu_assert_debug(itr == m_map.lower_bound(range));
while (itr != m_map.cend() && (*itr).first.rangeBegin < rangeEnd)
{
if ((*itr).first.rangeBegin >= rangeBegin && (*itr).first.rangeEnd <= rangeEnd)
{
// delete entire range
auto itrCopy = itr;
TNodeObject* t = (*itr).second;
++itr;
m_map.erase(itrCopy);
TNodeObject::Delete(t);
continue;
}
if (rangeBegin > (*itr).first.rangeBegin && rangeEnd < (*itr).first.rangeEnd)
{
// cut hole into existing range
TRangeData firstRangeBegin = (*itr).first.rangeBegin;
TRangeData firstRangeEnd = rangeBegin;
TRangeData secondRangeBegin = rangeEnd;
TRangeData secondRangeEnd = (*itr).first.rangeEnd;
TNodeObject* newObject = TNodeObject::Split((*itr).second, firstRangeBegin, firstRangeEnd, secondRangeBegin, secondRangeEnd);
// modify key
auto nh = m_map.extract(itr);
nh.key().rangeBegin = firstRangeBegin;
nh.key().rangeEnd = firstRangeEnd;
m_map.insert(std::move(nh));
// insert new object after hole
m_map.emplace(InternalRange(secondRangeBegin, secondRangeEnd), newObject);
return; // done
}
// shrink (trim either beginning or end)
TRangeData newRangeBegin;
TRangeData newRangeEnd;
if ((rangeBegin <= (*itr).first.rangeBegin && rangeEnd < (*itr).first.rangeEnd))
{
// trim from beginning
newRangeBegin = (std::max)((*itr).first.rangeBegin, rangeEnd);
newRangeEnd = (*itr).first.rangeEnd;
}
else if ((rangeBegin > (*itr).first.rangeBegin && rangeEnd >= (*itr).first.rangeEnd))
{
// trim from end
newRangeBegin = (*itr).first.rangeBegin;
newRangeEnd = (std::min)((*itr).first.rangeEnd, rangeBegin);
}
else
{
assert_dbg(); // should not happen
}
TNodeObject::Resize((*itr).second, newRangeBegin, newRangeEnd);
// modify key and increment iterator
auto itrCopy = itr;
++itr;
auto nh = m_map.extract(itrCopy);
nh.key().rangeBegin = newRangeBegin;
nh.key().rangeEnd = newRangeEnd;
m_map.insert(std::move(nh));
}
}
// remove existing range that matches given begin and end
void removeRangeSingle(TRangeData rangeBegin, TRangeData rangeEnd)
{
InternalRange range(rangeBegin, rangeEnd);
auto itr = m_map.find(range);
cemu_assert_debug(itr != m_map.cend());
if (itr == m_map.cend())
return;
cemu_assert_debug((*itr).first.rangeBegin == rangeBegin && (*itr).first.rangeEnd == rangeEnd);
// delete entire range
TNodeObject* t = (*itr).second;
m_map.erase(itr);
TNodeObject::Delete(t);
}
// remove existing range that matches given begin and end without calling delete callback
void removeRangeSingleWithoutCallback(TRangeData rangeBegin, TRangeData rangeEnd)
{
InternalRange range(rangeBegin, rangeEnd);
auto itr = m_map.find(range);
cemu_assert_debug(itr != m_map.cend());
if (itr == m_map.cend())
return;
cemu_assert_debug((*itr).first.rangeBegin == rangeBegin && (*itr).first.rangeEnd == rangeEnd);
// delete entire range
TNodeObject* t = (*itr).second;
m_map.erase(itr);
}
void splitRange(TRangeData rangeOffset)
{
// not well tested
removeRange(rangeOffset, rangeOffset+1);
}
template<typename TFunc>
void forEachOverlapping(TRangeData rangeBegin, TRangeData rangeEnd, TFunc f)
{
InternalRange range(rangeBegin, rangeEnd);
auto itr = m_map.find(range);
if (itr == m_map.cend())
return;
cemu_assert_debug(itr == m_map.lower_bound(range));
while (itr != m_map.cend() && (*itr).first.rangeBegin < rangeEnd)
{
f((*itr).second, rangeBegin, rangeEnd);
++itr;
}
}
void validate()
{
if (m_map.empty())
return;
auto itr = m_map.begin();
if ((*itr).first.rangeBegin > (*itr).first.rangeEnd)
assert_dbg();
TRangeData currentLoc = (*itr).first.rangeEnd;
++itr;
while (itr != m_map.end())
{
if ((*itr).first.rangeBegin >= (*itr).first.rangeEnd)
assert_dbg(); // negative or zero size ranges are not allowed
if (currentLoc > (*itr).first.rangeBegin)
assert_dbg(); // stored ranges must not overlap
currentLoc = (*itr).first.rangeEnd;
++itr;
}
}
bool empty() const
{
return m_map.empty();
}
const std::map<InternalRange, TNodeObject*>& getAll() const { return m_map; };
};
std::unique_ptr<VHeap> g_gpuBufferHeap = nullptr;
std::vector<uint8> s_pageUploadBuffer;
std::vector<class BufferCacheNode*> s_allCacheNodes;
void LatteBufferCache_removeSingleNodeFromTree(BufferCacheNode* node);
class BufferCacheNode
{
static inline constexpr uint64 c_streamoutSig0 = 0xF0F0F0F0155C5B6Aull;
static inline constexpr uint64 c_streamoutSig1 = 0x8BE6336411814F4Full;
public:
// returns false if not enough space is available
bool allocateCacheMemory()
{
cemu_assert_debug(m_hasCacheAlloc == false);
cemu_assert_debug(m_rangeEnd > m_rangeBegin);
m_hasCacheAlloc = g_gpuBufferHeap->allocOffset(m_rangeEnd - m_rangeBegin, CACHE_PAGE_SIZE, m_cacheOffset);
return m_hasCacheAlloc;
}
void ReleaseCacheMemoryImmediately()
{
if (m_hasCacheAlloc)
{
cemu_assert_debug(isInUse() == false);
g_gpuBufferHeap->freeOffset(m_cacheOffset);
m_hasCacheAlloc = false;
}
}
uint32 getBufferOffset(MPTR physAddr) const
{
cemu_assert_debug(m_hasCacheAlloc);
cemu_assert_debug(physAddr >= m_rangeBegin);
cemu_assert_debug(physAddr < m_rangeEnd);
uint32 relOffset = physAddr - m_rangeBegin;
return m_cacheOffset + relOffset;
}
void writeStreamout(MPTR rangeBegin, MPTR rangeEnd)
{
if ((rangeBegin & 0xF))
{
cemuLog_logDebugOnce(LogType::Force, "writeStreamout(): RangeBegin not aligned to 16. Begin {:08x} End {:08x}", rangeBegin, rangeEnd);
rangeBegin = (rangeBegin + 0xF) & ~0xF;
rangeEnd = std::max(rangeBegin, rangeEnd);
}
if (rangeEnd & 0xF)
{
// todo - add support for 4 byte granularity for streamout writes and cache
// used by Affordable Space Adventures and YWW Level 1-8
// also used by CoD Ghosts (8 byte granularity)
//cemuLog_logDebug(LogType::Force, "Streamout write size is not aligned to 16 bytes");
rangeEnd &= ~0xF;
}
//cemu_assert_debug((rangeEnd & 0xF) == 0);
rangeBegin = std::max(rangeBegin, m_rangeBegin);
rangeEnd = std::min(rangeEnd, m_rangeEnd);
if (rangeBegin >= rangeEnd)
return;
sint32 numPages = getPageCountFromRange(rangeBegin, rangeEnd);
sint32 pageIndex = getPageIndexFromAddr(rangeBegin);
cemu_assert_debug((m_rangeBegin + pageIndex * CACHE_PAGE_SIZE) <= rangeBegin);
cemu_assert_debug((m_rangeBegin + (pageIndex + numPages) * CACHE_PAGE_SIZE) >= rangeEnd);
for (sint32 i = 0; i < numPages; i++)
{
pageWriteStreamoutSignatures(pageIndex, rangeBegin, rangeEnd);
pageIndex++;
//pageInfo->hasStreamoutData = true;
//pageInfo++;
}
if (numPages > 0)
m_hasStreamoutData = true;
}
void checkAndSyncModifications(MPTR rangeBegin, MPTR rangeEnd, bool uploadData)
{
cemu_assert_debug(rangeBegin >= m_rangeBegin);
cemu_assert_debug(rangeEnd <= m_rangeEnd);
cemu_assert_debug(rangeBegin < m_rangeEnd);
cemu_assert_debug((rangeBegin % CACHE_PAGE_SIZE) == 0);
cemu_assert_debug((rangeEnd % CACHE_PAGE_SIZE) == 0);
sint32 basePageIndex = getPageIndexFromAddrAligned(rangeBegin);
sint32 numPages = getPageCountFromRangeAligned(rangeBegin, rangeEnd);
uint8* pagePtr = memory_getPointerFromPhysicalOffset(rangeBegin);
sint32 uploadPageBegin = -1;
CachePageInfo* pageInfo = m_pageInfo.data() + basePageIndex;
for (sint32 i = 0; i < numPages; i++)
{
if (pageInfo->hasStreamoutData)
{
// first upload any pending sequence of pages
if (uploadPageBegin != -1)
{
// upload range
if (uploadData)
uploadPages(uploadPageBegin, basePageIndex + i);
uploadPageBegin = -1;
}
// check if hash changed
uint64 pageHash = hashPage(pagePtr);
if (pageInfo->hash != pageHash)
{
pageInfo->hash = pageHash;
// for pages that contain streamout data we do uploads with a much smaller granularity
// and skip uploading any data that is marked with streamout filler bytes
if (!uploadPageWithStreamoutFiltered(basePageIndex + i))
pageInfo->hasStreamoutData = false; // all streamout data was replaced
}
pagePtr += CACHE_PAGE_SIZE;
pageInfo++;
continue;
}
uint64 pageHash = hashPage(pagePtr);
pagePtr += CACHE_PAGE_SIZE;
if (pageInfo->hash != pageHash)
{
if (uploadPageBegin == -1)
uploadPageBegin = i + basePageIndex;
pageInfo->hash = pageHash;
}
else
{
if (uploadPageBegin != -1)
{
// upload range
if (uploadData)
uploadPages(uploadPageBegin, basePageIndex + i);
uploadPageBegin = -1;
}
}
pageInfo++;
}
if (uploadPageBegin != -1)
{
if (uploadData)
uploadPages(uploadPageBegin, basePageIndex + numPages);
}
}
void checkAndSyncModifications(bool uploadData)
{
checkAndSyncModifications(m_rangeBegin, m_rangeEnd, uploadData);
m_lastModifyCheckCronon = g_currentCacheChronon;
m_hasInvalidation = false;
}
void checkAndSyncModificationsIfChrononChanged(MPTR reservePhysAddress, uint32 reserveSize)
{
if (m_lastModifyCheckCronon != g_currentCacheChronon)
{
m_lastModifyCheckCronon = g_currentCacheChronon;
checkAndSyncModifications(m_rangeBegin, m_rangeEnd, true);
m_hasInvalidation = false;
}
if (m_hasInvalidation)
{
// ideally we would only upload the pages that intersect both the reserve range and the invalidation range
// but this would require complex per-page tracking of invalidation. Since this is on a hot path we do a cheap approximation
// where we only track one continous invalidation range
// try to bound uploads to the reserve range within the invalidation
uint32 resRangeBegin = reservePhysAddress & ~CACHE_PAGE_SIZE_M1;
uint32 resRangeEnd = ((reservePhysAddress + reserveSize) + CACHE_PAGE_SIZE_M1) & ~CACHE_PAGE_SIZE_M1;
uint32 uploadBegin = std::max(m_invalidationRangeBegin, resRangeBegin);
uint32 uploadEnd = std::min(resRangeEnd, m_invalidationRangeEnd);
if (uploadBegin >= uploadEnd)
return; // reserve range not within invalidation or range is zero sized
if (uploadBegin == m_invalidationRangeBegin)
{
m_invalidationRangeBegin = uploadEnd;
checkAndSyncModifications(uploadBegin, uploadEnd, true);
}
if (uploadEnd == m_invalidationRangeEnd)
{
m_invalidationRangeEnd = uploadBegin;
checkAndSyncModifications(uploadBegin, uploadEnd, true);
}
else
{
// upload all of invalidation
checkAndSyncModifications(m_invalidationRangeBegin, m_invalidationRangeEnd, true);
m_invalidationRangeBegin = m_invalidationRangeEnd;
}
if(m_invalidationRangeEnd <= m_invalidationRangeBegin)
m_hasInvalidation = false;
}
}
void invalidate(MPTR rangeBegin, MPTR rangeEnd)
{
rangeBegin = std::max(rangeBegin, m_rangeBegin);
rangeEnd = std::min(rangeEnd, m_rangeEnd);
if (rangeBegin >= rangeEnd)
return;
if (m_hasInvalidation)
{
m_invalidationRangeBegin = std::min(m_invalidationRangeBegin, rangeBegin);
m_invalidationRangeEnd = std::max(m_invalidationRangeEnd, rangeEnd);
}
else
{
m_invalidationRangeBegin = rangeBegin;
m_invalidationRangeEnd = rangeEnd;
m_hasInvalidation = true;
}
cemu_assert_debug(m_invalidationRangeBegin >= m_rangeBegin);
cemu_assert_debug(m_invalidationRangeEnd <= m_rangeEnd);
cemu_assert_debug(m_invalidationRangeBegin < m_invalidationRangeEnd);
m_invalidationRangeBegin = m_invalidationRangeBegin & ~CACHE_PAGE_SIZE_M1;
m_invalidationRangeEnd = (m_invalidationRangeEnd + CACHE_PAGE_SIZE_M1) & ~CACHE_PAGE_SIZE_M1;
}
void flagInUse()
{
m_lastDrawcall = LatteGPUState.drawCallCounter;
m_lastFrame = LatteGPUState.frameCounter;
}
bool isInUse() const
{
return m_lastDrawcall == LatteGPUState.drawCallCounter;
}
// returns true if the range does not contain any GPU-cache-only data and can be fully restored from RAM
bool isRAMOnly() const
{
return !m_hasStreamoutData;
}
MPTR GetRangeBegin() const { return m_rangeBegin; }
MPTR GetRangeEnd() const { return m_rangeEnd; }
uint32 GetDrawcallAge() const { return LatteGPUState.drawCallCounter - m_lastDrawcall; };
uint32 GetFrameAge() const { return LatteGPUState.frameCounter - m_lastFrame; };
bool HasStreamoutData() const { return m_hasStreamoutData; };
private:
struct CachePageInfo
{
uint64 hash{ 0 };
bool hasStreamoutData{ false };
};
MPTR m_rangeBegin;
MPTR m_rangeEnd; // (exclusive)
bool m_hasCacheAlloc{ false };
uint32 m_cacheOffset{ 0 };
// usage
uint32 m_lastDrawcall;
uint32 m_lastFrame;
uint32 m_arrayIndex;
// state tracking
uint32 m_lastModifyCheckCronon{ g_currentCacheChronon - 1 };
std::vector<CachePageInfo> m_pageInfo;
bool m_hasStreamoutData{ false };
// invalidation
bool m_hasInvalidation{false};
MPTR m_invalidationRangeBegin;
MPTR m_invalidationRangeEnd;
BufferCacheNode(MPTR rangeBegin, MPTR rangeEnd): m_rangeBegin(rangeBegin), m_rangeEnd(rangeEnd)
{
flagInUse();
cemu_assert_debug(rangeBegin < rangeEnd);
size_t numPages = getPageCountFromRangeAligned(rangeBegin, rangeEnd);
m_pageInfo.resize(numPages);
// append to array
m_arrayIndex = (uint32)s_allCacheNodes.size();
s_allCacheNodes.emplace_back(this);
};
~BufferCacheNode()
{
if (m_hasCacheAlloc)
g_deallocateQueue.emplace_back(m_cacheOffset); // release after current drawcall
// remove from array
auto temp = s_allCacheNodes.back();
s_allCacheNodes.pop_back();
if (this != temp)
{
s_allCacheNodes[m_arrayIndex] = temp;
temp->m_arrayIndex = m_arrayIndex;
}
}
uint32 getPageIndexFromAddrAligned(uint32 offset) const
{
cemu_assert_debug((offset % CACHE_PAGE_SIZE) == 0);
return (offset - m_rangeBegin) / CACHE_PAGE_SIZE;
}
uint32 getPageIndexFromAddr(uint32 offset) const
{
offset &= ~CACHE_PAGE_SIZE_M1;
return (offset - m_rangeBegin) / CACHE_PAGE_SIZE;
}
uint32 getPageCountFromRangeAligned(MPTR rangeBegin, MPTR rangeEnd) const
{
cemu_assert_debug((rangeBegin % CACHE_PAGE_SIZE) == 0);
cemu_assert_debug((rangeEnd % CACHE_PAGE_SIZE) == 0);
cemu_assert_debug(rangeBegin <= rangeEnd);
return (rangeEnd - rangeBegin) / CACHE_PAGE_SIZE;
}
uint32 getPageCountFromRange(MPTR rangeBegin, MPTR rangeEnd) const
{
rangeEnd = (rangeEnd + CACHE_PAGE_SIZE_M1) & ~CACHE_PAGE_SIZE_M1;
rangeBegin &= ~CACHE_PAGE_SIZE_M1;
cemu_assert_debug(rangeBegin <= rangeEnd);
return (rangeEnd - rangeBegin) / CACHE_PAGE_SIZE;
}
void syncFromRAM(MPTR rangeBegin, MPTR rangeEnd)
{
cemu_assert_debug(rangeBegin >= m_rangeBegin);
cemu_assert_debug(rangeEnd <= m_rangeEnd);
cemu_assert_debug(rangeEnd > rangeBegin);
cemu_assert_debug(m_hasCacheAlloc);
// reset write tracking
checkAndSyncModifications(rangeBegin, rangeEnd, false);
g_renderer->bufferCache_upload(memory_getPointerFromPhysicalOffset(rangeBegin), rangeEnd - rangeBegin, getBufferOffset(rangeBegin));
}
void syncFromNode(BufferCacheNode* srcNode)
{
// get shared range
MPTR rangeBegin = std::max(m_rangeBegin, srcNode->m_rangeBegin);
MPTR rangeEnd = std::min(m_rangeEnd, srcNode->m_rangeEnd);
cemu_assert_debug(rangeBegin < rangeEnd);
g_renderer->bufferCache_copy(srcNode->getBufferOffset(rangeBegin), this->getBufferOffset(rangeBegin), rangeEnd - rangeBegin);
// copy page checksums and information
sint32 numPages = getPageCountFromRangeAligned(rangeBegin, rangeEnd);
CachePageInfo* pageInfoDst = this->m_pageInfo.data() + this->getPageIndexFromAddrAligned(rangeBegin);
CachePageInfo* pageInfoSrc = srcNode->m_pageInfo.data() + srcNode->getPageIndexFromAddrAligned(rangeBegin);
for (sint32 i = 0; i < numPages; i++)
{
pageInfoDst[i] = pageInfoSrc[i];
if (pageInfoSrc[i].hasStreamoutData)
m_hasStreamoutData = true;
}
}
void uploadPages(uint32 firstPage, uint32 lastPagePlusOne)
{
cemu_assert_debug(lastPagePlusOne > firstPage);
uint32 uploadRangeBegin = m_rangeBegin + firstPage * CACHE_PAGE_SIZE;
uint32 uploadRangeEnd = m_rangeBegin + lastPagePlusOne * CACHE_PAGE_SIZE;
cemu_assert_debug(uploadRangeEnd > uploadRangeBegin);
// make sure uploaded pages and hashes match
uint32 numPages = lastPagePlusOne - firstPage;
if (s_pageUploadBuffer.size() < (numPages * CACHE_PAGE_SIZE))
s_pageUploadBuffer.resize(numPages * CACHE_PAGE_SIZE);
// todo - improve performance by merging memcpy + hashPage() ?
memcpy(s_pageUploadBuffer.data(), memory_getPointerFromPhysicalOffset(uploadRangeBegin), numPages * CACHE_PAGE_SIZE);
for (uint32 i = 0; i < numPages; i++)
{
m_pageInfo[firstPage + i].hash = hashPage(s_pageUploadBuffer.data() + i * CACHE_PAGE_SIZE);
}
g_renderer->bufferCache_upload(s_pageUploadBuffer.data(), uploadRangeEnd - uploadRangeBegin, getBufferOffset(uploadRangeBegin));
}
// upload only non-streamout data of a single page
// returns true if at least one streamout 16-byte block is present
// also updates the page hash to match the uploaded data (strict match)
sint32 uploadPageWithStreamoutFiltered(uint32 pageIndex)
{
uint8 pageCopy[CACHE_PAGE_SIZE];
memcpy(pageCopy, memory_getPointerFromPhysicalOffset(m_rangeBegin + pageIndex * CACHE_PAGE_SIZE), CACHE_PAGE_SIZE);
MPTR pageBase = m_rangeBegin + pageIndex * CACHE_PAGE_SIZE;
sint32 blockBegin = -1;
uint64* pagePtrU64 = (uint64*)pageCopy;
m_pageInfo[pageIndex].hash = hashPage(pageCopy);
bool hasStreamoutBlocks = false;
for (sint32 i = 0; i < CACHE_PAGE_SIZE / 16; i++)
{
if (pagePtrU64[0] == c_streamoutSig0 && pagePtrU64[1] == c_streamoutSig1)
{
hasStreamoutBlocks = true;
if (blockBegin != -1)
{
uint32 uploadRelRangeBegin = blockBegin * 16;
uint32 uploadRelRangeEnd = i * 16;
cemu_assert_debug(uploadRelRangeEnd > uploadRelRangeBegin);
g_renderer->bufferCache_upload(pageCopy + uploadRelRangeBegin, uploadRelRangeEnd - uploadRelRangeBegin, getBufferOffset(pageBase + uploadRelRangeBegin));
blockBegin = -1;
}
pagePtrU64 += 2;
continue;
}
else if (blockBegin == -1)
blockBegin = i;
pagePtrU64 += 2;
}
if (blockBegin != -1)
{
uint32 uploadRelRangeBegin = blockBegin * 16;
uint32 uploadRelRangeEnd = CACHE_PAGE_SIZE;
cemu_assert_debug(uploadRelRangeEnd > uploadRelRangeBegin);
g_renderer->bufferCache_upload(pageCopy + uploadRelRangeBegin, uploadRelRangeEnd - uploadRelRangeBegin, getBufferOffset(pageBase + uploadRelRangeBegin));
blockBegin = -1;
}
return hasStreamoutBlocks;
}
void shrink(MPTR newRangeBegin, MPTR newRangeEnd)
{
cemu_assert_debug(newRangeBegin >= m_rangeBegin);
cemu_assert_debug(newRangeEnd >= m_rangeEnd);
cemu_assert_debug(newRangeEnd > m_rangeBegin);
assert_dbg(); // todo (resize page array)
m_rangeBegin = newRangeBegin;
m_rangeEnd = newRangeEnd;
}
static uint64 hashPage(uint8* mem)
{
static const uint64 k0 = 0x55F23EAD;
static const uint64 k1 = 0x185FDC6D;
static const uint64 k2 = 0xF7431F49;
static const uint64 k3 = 0xA4C7AE9D;
cemu_assert_debug((CACHE_PAGE_SIZE % 32) == 0);
const uint64* ptr = (const uint64*)mem;
const uint64* end = ptr + (CACHE_PAGE_SIZE / sizeof(uint64));
uint64 h0 = 0;
uint64 h1 = 0;
uint64 h2 = 0;
uint64 h3 = 0;
while (ptr < end)
{
h0 = std::rotr(h0, 7);
h1 = std::rotr(h1, 7);
h2 = std::rotr(h2, 7);
h3 = std::rotr(h3, 7);
h0 += ptr[0] * k0;
h1 += ptr[1] * k1;
h2 += ptr[2] * k2;
h3 += ptr[3] * k3;
ptr += 4;
}
return h0 + h1 + h2 + h3;
}
// flag page as having streamout data, also write streamout signatures to page memory
// also incrementally updates the page hash to include the written signatures, this prevents signature writes from triggering a data upload
void pageWriteStreamoutSignatures(uint32 pageIndex, MPTR rangeBegin, MPTR rangeEnd)
{
uint32 pageRangeBegin = m_rangeBegin + pageIndex * CACHE_PAGE_SIZE;
uint32 pageRangeEnd = pageRangeBegin + CACHE_PAGE_SIZE;
rangeBegin = std::max(pageRangeBegin, rangeBegin);
rangeEnd = std::min(pageRangeEnd, rangeEnd);
cemu_assert_debug(rangeEnd > rangeBegin);
cemu_assert_debug(rangeBegin >= pageRangeBegin);
cemu_assert_debug(rangeEnd <= pageRangeEnd);
cemu_assert_debug((rangeBegin & 0xF) == 0);
cemu_assert_debug((rangeEnd & 0xF) == 0);
auto pageInfo = m_pageInfo.data() + pageIndex;
pageInfo->hasStreamoutData = true;
// if the whole page is replaced we can use a cached hash
if (pageRangeBegin == rangeBegin && pageRangeEnd == rangeEnd)
{
uint64* pageMem = (uint64*)memory_getPointerFromPhysicalOffset(rangeBegin);
uint32 numBlocks = (rangeEnd - rangeBegin) / 16;
for (uint32 i = 0; i < numBlocks; i++)
{
pageMem[0] = c_streamoutSig0;
pageMem[1] = c_streamoutSig1;
pageMem += 2;
}
pageInfo->hash = c_fullStreamoutPageHash;
return;
}
uint64* pageMem = (uint64*)memory_getPointerFromPhysicalOffset(rangeBegin);
uint32 numBlocks = (rangeEnd - rangeBegin) / 16;
uint32 indexHashBlock = (rangeBegin - pageRangeBegin) / sizeof(uint64);
for (uint32 i = 0; i < numBlocks; i++)
{
pageMem[0] = c_streamoutSig0;
pageMem[1] = c_streamoutSig1;
pageMem += 2;
}
pageInfo->hash = 0; // reset hash
}
static uint64 genStreamoutPageHash()
{
uint8 pageMem[CACHE_PAGE_SIZE];
uint64* pageMemU64 = (uint64*)pageMem;
for (uint32 i = 0; i < sizeof(pageMem) / sizeof(uint64) / 2; i++)
{
pageMemU64[0] = c_streamoutSig0;
pageMemU64[1] = c_streamoutSig1;
pageMemU64 += 2;
}
return hashPage(pageMem);
}
static inline uint64 c_fullStreamoutPageHash = genStreamoutPageHash();
static std::vector<uint32> g_deallocateQueue;
public:
static void UnloadAll()
{
size_t i = 0;
while (i < s_allCacheNodes.size())
{
BufferCacheNode* node = s_allCacheNodes[i];
node->ReleaseCacheMemoryImmediately();
LatteBufferCache_removeSingleNodeFromTree(node);
delete node;
}
for(auto& it : s_allCacheNodes)
delete it;
s_allCacheNodes.clear();
g_deallocateQueue.clear();
}
static void ProcessDeallocations()
{
for(auto& itr : g_deallocateQueue)
g_gpuBufferHeap->freeOffset(itr);
g_deallocateQueue.clear();
}
// drops everything from the cache that isn't considered in use or unrestorable (ranges with streamout)
static void CleanupCacheAggressive(MPTR excludedRangeBegin, MPTR excludedRangeEnd)
{
size_t i = 0;
while (i < s_allCacheNodes.size())
{
BufferCacheNode* node = s_allCacheNodes[i];
if (node->isInUse())
{
i++;
continue;
}
if(!node->isRAMOnly())
{
i++;
continue;
}
if(node->GetRangeBegin() < excludedRangeEnd && node->GetRangeEnd() > excludedRangeBegin)
{
i++;
continue;
}
// delete range
node->ReleaseCacheMemoryImmediately();
LatteBufferCache_removeSingleNodeFromTree(node);
delete node;
}
}
/* callbacks from IntervalTree */
static BufferCacheNode* Create(MPTR rangeBegin, MPTR rangeEnd, std::span<BufferCacheNode*> overlappingObjects)
{
auto newRange = new BufferCacheNode(rangeBegin, rangeEnd);
if (!newRange->allocateCacheMemory())
{
// not enough memory available, try to drop ram-only ranges from the ones we replace
for (size_t i = 0; i < overlappingObjects.size(); i++)
{
BufferCacheNode* nodeItr = overlappingObjects[i];
if (!nodeItr->isInUse() && nodeItr->isRAMOnly())
{
nodeItr->ReleaseCacheMemoryImmediately();
delete nodeItr;
overlappingObjects[i] = nullptr;
}
}
// retry allocation
if (!newRange->allocateCacheMemory())
{
cemuLog_log(LogType::Force, "Out-of-memory in GPU buffer (trying to allocate: {}KB) Cleaning up cache...", (rangeEnd - rangeBegin + 1023) / 1024);
CleanupCacheAggressive(rangeBegin, rangeEnd);
if (!newRange->allocateCacheMemory())
{
cemuLog_log(LogType::Force, "Failed to free enough memory in GPU buffer");
cemu_assert(false);
}
}
}
newRange->syncFromRAM(rangeBegin, rangeEnd); // possible small optimization: only load the ranges from RAM which are not overwritten by ->syncFromNode()
for (auto itr : overlappingObjects)
{
if(itr == nullptr)
continue;
newRange->syncFromNode(itr);
delete itr;
}
return newRange;
}
static void Delete(BufferCacheNode* nodeObject)
{
delete nodeObject;
}
static void Resize(BufferCacheNode* nodeObject, MPTR rangeBegin, MPTR rangeEnd)
{
nodeObject->shrink(rangeBegin, rangeEnd);
}
static BufferCacheNode* Split(BufferCacheNode* nodeObject, MPTR firstRangeBegin, MPTR firstRangeEnd, MPTR secondRangeBegin, MPTR secondRangeEnd)
{
auto newRange = new BufferCacheNode(secondRangeBegin, secondRangeEnd);
// todo - add support for splitting BufferCacheNode memory allocations, then we dont need to do a separate allocation
if (!newRange->allocateCacheMemory())
{
cemuLog_log(LogType::Force, "Out-of-memory in GPU buffer during split operation");
cemu_assert(false);
}
newRange->syncFromNode(nodeObject);
nodeObject->shrink(firstRangeBegin, firstRangeEnd);
return newRange;
}
};
std::vector<uint32> BufferCacheNode::g_deallocateQueue;
IntervalTree2<MPTR, BufferCacheNode> g_gpuBufferCache;
void LatteBufferCache_removeSingleNodeFromTree(BufferCacheNode* node)
{
g_gpuBufferCache.removeRangeSingleWithoutCallback(node->GetRangeBegin(), node->GetRangeEnd());
}
BufferCacheNode* LatteBufferCache_reserveRange(MPTR physAddress, uint32 size)
{
MPTR rangeStart = physAddress - (physAddress % CACHE_PAGE_SIZE);
MPTR rangeEnd = (physAddress + size + CACHE_PAGE_SIZE_M1) & ~CACHE_PAGE_SIZE_M1;
auto range = g_gpuBufferCache.getRange(rangeStart, rangeEnd);
if (!range)
{
g_gpuBufferCache.addRange(rangeStart, rangeEnd);
range = g_gpuBufferCache.getRange(rangeStart, rangeEnd);
cemu_assert_debug(range);
}
cemu_assert_debug(range->GetRangeBegin() <= physAddress);
cemu_assert_debug(range->GetRangeEnd() >= (physAddress + size));
return range;
}
uint32 LatteBufferCache_retrieveDataInCache(MPTR physAddress, uint32 size)
{
auto range = LatteBufferCache_reserveRange(physAddress, size);
range->flagInUse();
range->checkAndSyncModificationsIfChrononChanged(physAddress, size);
return range->getBufferOffset(physAddress);
}
void LatteBufferCache_copyStreamoutDataToCache(MPTR physAddress, uint32 size, uint32 streamoutBufferOffset)
{
if (size == 0)
return;
cemu_assert_debug(size >= 16);
auto range = LatteBufferCache_reserveRange(physAddress, size);
range->flagInUse();
g_renderer->bufferCache_copyStreamoutToMainBuffer(streamoutBufferOffset, range->getBufferOffset(physAddress), size);
// write streamout signatures, flag affected pages
range->writeStreamout(physAddress, (physAddress + size));
}
void LatteBufferCache_invalidate(MPTR physAddress, uint32 size)
{
if (size == 0)
return;
g_gpuBufferCache.forEachOverlapping(physAddress, physAddress + size, [](BufferCacheNode* node, MPTR invalidationRangeBegin, MPTR invalidationRangeEnd)
{
node->invalidate(invalidationRangeBegin, invalidationRangeEnd);
}
);
}
// optimized version of LatteBufferCache_invalidate() if physAddress points to the beginning of a page
void LatteBufferCache_invalidatePage(MPTR physAddress)
{
cemu_assert_debug((physAddress & CACHE_PAGE_SIZE_M1) == 0);
BufferCacheNode* node = g_gpuBufferCache.getRangeByPoint(physAddress);
if (node)
node->invalidate(physAddress, physAddress+CACHE_PAGE_SIZE);
}
void LatteBufferCache_processDeallocations()
{
BufferCacheNode::ProcessDeallocations();
}
void LatteBufferCache_init(size_t bufferSize)
{
cemu_assert_debug(g_gpuBufferCache.empty());
g_gpuBufferHeap.reset(new VHeap(nullptr, (uint32)bufferSize));
g_renderer->bufferCache_init((uint32)bufferSize);
}
void LatteBufferCache_UnloadAll()
{
BufferCacheNode::UnloadAll();
}
void LatteBufferCache_getStats(uint32& heapSize, uint32& allocationSize, uint32& allocNum)
{
g_gpuBufferHeap->getStats(heapSize, allocationSize, allocNum);
}
FSpinlock g_spinlockDCFlushQueue;
class SparseBitset
{
static inline constexpr size_t TABLE_MASK = 0xFF;
public:
bool Empty() const
{
return m_numNonEmptyVectors == 0;
}
void Set(uint32 index)
{
auto& v = m_bits[index & TABLE_MASK];
if (std::find(v.cbegin(), v.cend(), index) != v.end())
return;
if (v.empty())
{
m_nonEmptyVectors[m_numNonEmptyVectors] = &v;
m_numNonEmptyVectors++;
}
v.emplace_back(index);
}
template<typename TFunc>
void ForAllAndClear(TFunc callbackFunc)
{
auto vCurrent = m_nonEmptyVectors + 0;
auto vEnd = m_nonEmptyVectors + m_numNonEmptyVectors;
while (vCurrent < vEnd)
{
std::vector<uint32>* vec = *vCurrent;
vCurrent++;
for (const auto& it : *vec)
callbackFunc(it);
vec->clear();
}
m_numNonEmptyVectors = 0;
}
void Clear()
{
auto vCurrent = m_nonEmptyVectors + 0;
auto vEnd = m_nonEmptyVectors + m_numNonEmptyVectors;
while (vCurrent < vEnd)
{
std::vector<uint32>* vec = *vCurrent;
vCurrent++;
vec->clear();
}
m_numNonEmptyVectors = 0;
}
private:
std::vector<uint32> m_bits[TABLE_MASK + 1];
std::vector<uint32>* m_nonEmptyVectors[TABLE_MASK + 1];
size_t m_numNonEmptyVectors{ 0 };
};
SparseBitset* s_DCFlushQueue = new SparseBitset();
SparseBitset* s_DCFlushQueueAlternate = new SparseBitset();
void LatteBufferCache_notifyDCFlush(MPTR address, uint32 size)
{
if (address == 0 || size == 0xFFFFFFFF)
return; // global flushes are ignored for now
uint32 firstPage = address / CACHE_PAGE_SIZE;
uint32 lastPage = (address + size - 1) / CACHE_PAGE_SIZE;
g_spinlockDCFlushQueue.lock();
for (uint32 i = firstPage; i <= lastPage; i++)
s_DCFlushQueue->Set(i);
g_spinlockDCFlushQueue.unlock();
}
void LatteBufferCache_processDCFlushQueue()
{
if (s_DCFlushQueue->Empty()) // quick check to avoid locking if there is no work to do
return;
g_spinlockDCFlushQueue.lock();
std::swap(s_DCFlushQueue, s_DCFlushQueueAlternate);
g_spinlockDCFlushQueue.unlock();
s_DCFlushQueueAlternate->ForAllAndClear([](uint32 index) {LatteBufferCache_invalidatePage(index * CACHE_PAGE_SIZE); });
}
void LatteBufferCache_notifyDrawDone()
{
}
void LatteBufferCache_notifySwapTVScanBuffer()
{
if( ActiveSettings::FlushGPUCacheOnSwap() )
g_currentCacheChronon++;
}
void LatteBufferCache_incrementalCleanup()
{
static uint32 s_counter = 0;
if (s_allCacheNodes.empty())
return;
s_counter++;
s_counter %= (uint32)s_allCacheNodes.size();
auto range = s_allCacheNodes[s_counter];
if (range->HasStreamoutData())
{
// currently we never delete streamout ranges
// todo - check if streamout pages got overwritten + if the range would lose the hasStreamoutData flag
return;
}
uint32 heapSize;
uint32 allocationSize;
uint32 allocNum;
g_gpuBufferHeap->getStats(heapSize, allocationSize, allocNum);
if (allocationSize >= (heapSize * 4 / 5))
{
// heap is 80% filled
if (range->GetFrameAge() >= 2)
{
g_gpuBufferCache.removeRangeSingle(range->GetRangeBegin(), range->GetRangeEnd());
}
}
else if (allocationSize >= (heapSize * 3 / 4))
{
// heap is 75-100% filled
if (range->GetFrameAge() >= 4)
{
g_gpuBufferCache.removeRangeSingle(range->GetRangeBegin(), range->GetRangeEnd());
}
}
else if (allocationSize >= (heapSize / 2))
{
// if heap is 50-75% filled
if (range->GetFrameAge() >= 20)
{
g_gpuBufferCache.removeRangeSingle(range->GetRangeBegin(), range->GetRangeEnd());
}
}
else
{
// heap is under 50% capacity
if (range->GetFrameAge() >= 500)
{
g_gpuBufferCache.removeRangeSingle(range->GetRangeBegin(), range->GetRangeEnd());
}
}
}
| 36,136
|
C++
|
.cpp
| 1,029
| 31.812439
| 215
| 0.732279
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,280
|
LatteCommandProcessor.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteCommandProcessor.cpp
|
#include "Cafe/HW/Latte/ISA/RegDefines.h"
#include "Cafe/OS/libs/gx2/GX2.h" // for write gatherer and special state. Get rid of dependency
#include "Cafe/OS/libs/gx2/GX2_Misc.h" // for GX2::sGX2MainCoreIndex. Legacy dependency
#include "Cafe/OS/libs/gx2/GX2_Event.h" // for notification callbacks
#include "Cafe/HW/Latte/Renderer/Renderer.h"
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/HW/Latte/Core/LatteDraw.h"
#include "Cafe/HW/Latte/Core/LatteShader.h"
#include "Cafe/HW/Latte/Core/LatteAsyncCommands.h"
#include "Cafe/HW/Latte/Core/LattePerformanceMonitor.h"
#include "Cafe/HW/Latte/Core/LatteIndices.h"
#include "Cafe/HW/Latte/Core/LatteBufferCache.h"
#include "Cafe/HW/Latte/Core/LattePM4.h"
#include "Cafe/OS/libs/coreinit/coreinit_Time.h"
#include "Cafe/CafeSystem.h"
#include <boost/container/small_vector.hpp>
void LatteCP_DebugPrintCmdBuffer(uint32be* bufferPtr, uint32 size);
#define CP_TIMER_RECHECK 1024
//#define LATTE_CP_LOGGING
typedef uint32be* LatteCMDPtr;
#define LatteReadCMD() ((uint32)*(cmd++))
#define LatteSkipCMD(_nWords) cmd += (_nWords)
uint8* gxRingBufferReadPtr; // currently active read pointer (gx2 ring buffer or display list)
uint8* gx2CPParserDisplayListPtr;
uint8* gx2CPParserDisplayListStart; // used for debugging
uint8* gx2CPParserDisplayListEnd;
void LatteThread_HandleOSScreen();
void LatteThread_Exit();
class DrawPassContext
{
struct CmdQueuePos
{
CmdQueuePos(LatteCMDPtr current, LatteCMDPtr start, LatteCMDPtr end) : current(current), start(start), end(end) {};
LatteCMDPtr current;
LatteCMDPtr start;
LatteCMDPtr end;
};
public:
bool isWithinDrawPass() const
{
return m_drawPassActive;
}
void beginDrawPass()
{
m_drawPassActive = true;
m_isFirstDraw = true;
m_vertexBufferChanged = true;
m_uniformBufferChanged = true;
g_renderer->draw_beginSequence();
}
void executeDraw(uint32 count, bool isAutoIndex, MPTR physIndices)
{
uint32 baseVertex = LatteGPUState.contextRegister[mmSQ_VTX_BASE_VTX_LOC];
uint32 baseInstance = LatteGPUState.contextRegister[mmSQ_VTX_START_INST_LOC];
uint32 numInstances = LatteGPUState.drawContext.numInstances;
if (numInstances == 0)
return;
/*
if (GetAsyncKeyState('B'))
{
cemuLog_force("[executeDraw] {} Count {} BaseVertex {} BaseInstance {}", m_isFirstDraw?"Init":"Fast", count, baseVertex, baseInstance);
}
*/
if (!isAutoIndex)
{
cemu_assert_debug(physIndices != MPTR_NULL);
if (physIndices == MPTR_NULL)
return;
auto indexType = LatteGPUState.contextNew.VGT_DMA_INDEX_TYPE.get_INDEX_TYPE();
g_renderer->draw_execute(baseVertex, baseInstance, numInstances, count, physIndices, indexType, m_isFirstDraw);
}
else
{
g_renderer->draw_execute(baseVertex, baseInstance, numInstances, count, MPTR_NULL, Latte::LATTE_VGT_DMA_INDEX_TYPE::E_INDEX_TYPE::AUTO, m_isFirstDraw);
}
performanceMonitor.cycle[performanceMonitor.cycleIndex].drawCallCounter++;
if (!m_isFirstDraw)
performanceMonitor.cycle[performanceMonitor.cycleIndex].fastDrawCallCounter++;
m_isFirstDraw = false;
m_vertexBufferChanged = false;
m_uniformBufferChanged = false;
}
void endDrawPass()
{
g_renderer->draw_endSequence();
m_drawPassActive = false;
}
void notifyModifiedVertexBuffer()
{
m_vertexBufferChanged = true;
}
void notifyModifiedUniformBuffer()
{
m_uniformBufferChanged = true;
}
// command buffer processing position
void PushCurrentCommandQueuePos(LatteCMDPtr current, LatteCMDPtr start, LatteCMDPtr end)
{
m_queuePosStack.emplace_back(current, start, end);
}
bool PopCurrentCommandQueuePos(LatteCMDPtr& current, LatteCMDPtr& start, LatteCMDPtr& end)
{
if (m_queuePosStack.empty())
return false;
const auto& it = m_queuePosStack.back();
current = it.current;
start = it.start;
end = it.end;
m_queuePosStack.pop_back();
return true;
}
private:
bool m_drawPassActive{ false };
bool m_isFirstDraw{false};
bool m_vertexBufferChanged{ false };
bool m_uniformBufferChanged{ false };
boost::container::small_vector<CmdQueuePos, 4> m_queuePosStack;
};
void LatteCP_processCommandBuffer(DrawPassContext& drawPassCtx);
/*
* Read a U32 from the command buffer
* If no data is available then wait in a busy loop
*/
uint32 LatteCP_readU32Deprc()
{
uint32 v;
uint8* gxRingBufferWritePtr;
sint32 readDistance;
// no display list active
while (true)
{
gxRingBufferWritePtr = gx2WriteGatherPipe.writeGatherPtrGxBuffer[GX2::sGX2MainCoreIndex];
readDistance = (sint32)(gxRingBufferWritePtr - gxRingBufferReadPtr);
if (readDistance != 0)
break;
g_renderer->NotifyLatteCommandProcessorIdle(); // let the renderer know in case it wants to flush any commands
performanceMonitor.gpuTime_idleTime.beginMeasuring();
// no command data available, spin in a busy loop for a bit then check again
for (sint32 busy = 0; busy < 80; busy++)
{
_mm_pause();
}
LatteThread_HandleOSScreen(); // check if new frame was presented via OSScreen API
readDistance = (sint32)(gxRingBufferWritePtr - gxRingBufferReadPtr);
if (readDistance != 0)
break;
if (Latte_GetStopSignal())
LatteThread_Exit();
// still no command data available, do some other tasks
LatteTiming_HandleTimedVsync();
LatteAsyncCommands_checkAndExecute();
std::this_thread::yield();
performanceMonitor.gpuTime_idleTime.endMeasuring();
}
v = *(uint32*)gxRingBufferReadPtr;
gxRingBufferReadPtr += 4;
#ifdef CEMU_DEBUG_ASSERT
if (v == 0xcdcdcdcd)
assert_dbg();
#endif
v = _swapEndianU32(v);
return v;
}
void LatteCP_waitForNWords(uint32 numWords)
{
uint8* gxRingBufferWritePtr;
sint32 readDistance;
bool isFlushed = false;
sint32 waitDistance = numWords * sizeof(uint32be);
// no display list active
while (true)
{
gxRingBufferWritePtr = gx2WriteGatherPipe.writeGatherPtrGxBuffer[GX2::sGX2MainCoreIndex];
readDistance = (sint32)(gxRingBufferWritePtr - gxRingBufferReadPtr);
if (readDistance < 0)
return; // wrap around means there is at least one full command queued after this
if (readDistance >= waitDistance)
break;
g_renderer->NotifyLatteCommandProcessorIdle(); // let the renderer know in case it wants to flush any commands
performanceMonitor.gpuTime_idleTime.beginMeasuring();
// no command data available, spin in a busy loop for a while then check again
for (sint32 busy = 0; busy < 80; busy++)
{
_mm_pause();
}
readDistance = (sint32)(gxRingBufferWritePtr - gxRingBufferReadPtr);
if (readDistance < 0)
return; // wrap around means there is at least one full command queued after this
if (readDistance >= waitDistance)
break;
if (Latte_GetStopSignal())
LatteThread_Exit();
// still no command data available, do some other tasks
LatteTiming_HandleTimedVsync();
LatteAsyncCommands_checkAndExecute();
std::this_thread::yield();
performanceMonitor.gpuTime_idleTime.endMeasuring();
}
}
template<uint32 readU32()>
void LatteCP_skipWords(uint32 wordsToSkip)
{
while (wordsToSkip)
{
readU32();
wordsToSkip--;
}
}
LatteCMDPtr LatteCP_itSurfaceSync(LatteCMDPtr cmd)
{
uint32 invalidationFlags = LatteReadCMD();
uint32 size = LatteReadCMD() << 8;
MPTR addressPhys = LatteReadCMD() << 8;
uint32 pollInterval = LatteReadCMD();
if (addressPhys == MPTR_NULL || size == 0xFFFFFFFF)
return cmd; // block global invalidations because they are too expensive
if (invalidationFlags & 0x800000)
{
// invalidate uniform or attribute buffer
LatteBufferCache_invalidate(addressPhys, size);
}
return cmd;
}
// called from TCL command queue. Executes a memory command buffer
void LatteCP_itIndirectBufferDepr(LatteCMDPtr cmd, uint32 nWords)
{
cemu_assert_debug(nWords == 3);
uint32 physicalAddress = LatteReadCMD();
uint32 physicalAddressHigh = LatteReadCMD(); // unused
uint32 sizeInDWords = LatteReadCMD();
uint32 displayListSize = sizeInDWords * 4;
DrawPassContext drawPassCtx;
#ifdef LATTE_CP_LOGGING
if (GetAsyncKeyState('A'))
LatteCP_DebugPrintCmdBuffer(MEMPTR<uint32be>(physicalAddress), displayListSize);
#endif
uint32be* buf = MEMPTR<uint32be>(physicalAddress).GetPtr();
drawPassCtx.PushCurrentCommandQueuePos(buf, buf, buf + sizeInDWords);
LatteCP_processCommandBuffer(drawPassCtx);
if (drawPassCtx.isWithinDrawPass())
drawPassCtx.endDrawPass();
}
// pushes the command buffer to the stack
void LatteCP_itIndirectBuffer(LatteCMDPtr cmd, uint32 nWords, DrawPassContext& drawPassCtx)
{
cemu_assert_debug(nWords == 3);
uint32 physicalAddress = LatteReadCMD();
uint32 physicalAddressHigh = LatteReadCMD(); // unused
uint32 sizeInDWords = LatteReadCMD();
uint32 displayListSize = sizeInDWords * 4;
cemu_assert_debug(displayListSize >= 4);
uint32be* buf = MEMPTR<uint32be>(physicalAddress).GetPtr();
drawPassCtx.PushCurrentCommandQueuePos(buf, buf, buf + sizeInDWords);
}
LatteCMDPtr LatteCP_itStreamoutBufferUpdate(LatteCMDPtr cmd, uint32 nWords)
{
cemu_assert_debug(nWords == 5);
uint32 updateControl = LatteReadCMD();
uint32 physicalAddressWrite = LatteReadCMD();
uint32 ukn1 = LatteReadCMD();
uint32 physicalAddressRead = LatteReadCMD();
uint32 ukn3 = LatteReadCMD();
uint32 mode = (updateControl >> 1) & 3;
uint32 soIndex = (updateControl >> 8) & 3;
if (mode == 0)
{
// reset pointer
MPTR virtualAddress = memory_physicalToVirtual(physicalAddressRead);
uint32 bufferOffset = 0;
LatteGPUState.contextRegister[mmVGT_STRMOUT_BUFFER_OFFSET_0 + 4 * soIndex] = bufferOffset;
}
else if (mode == 3)
{
// store current offset to memory
MPTR virtualAddress = memory_physicalToVirtual(physicalAddressWrite);
uint32 bufferOffset = LatteGPUState.contextRegister[mmVGT_STRMOUT_BUFFER_OFFSET_0 + 4 * soIndex];
memory_writeU32(virtualAddress + 0x00, bufferOffset);
}
else
{
cemu_assert_unimplemented();
}
return cmd;
}
template<uint32 registerBaseMode>
void LatteCP_itSetRegistersGeneric_handleSpecialRanges(uint32 registerStartIndex, uint32 registerEndIndex)
{
if constexpr (registerBaseMode == IT_SET_CONTEXT_REG)
{
if (registerStartIndex <= mmSQ_VTX_SEMANTIC_CLEAR && registerEndIndex >= mmSQ_VTX_SEMANTIC_CLEAR)
{
for (uint32 i = 0; i < 32; i++)
{
LatteGPUState.contextRegister[mmSQ_VTX_SEMANTIC_0 + i] = 0xFF;
}
}
}
}
template<uint32 TRegisterBase>
LatteCMDPtr LatteCP_itSetRegistersGeneric(LatteCMDPtr cmd, uint32 nWords)
{
uint32 registerOffset = LatteReadCMD();
uint32 registerIndex = TRegisterBase + registerOffset;
uint32 registerStartIndex = registerIndex;
uint32 registerEndIndex = registerStartIndex + nWords;
#ifdef CEMU_DEBUG_ASSERT
cemu_assert_debug((registerIndex + nWords) <= LATTE_MAX_REGISTER);
#endif
uint32* outputReg = (uint32*)(LatteGPUState.contextRegister + registerIndex);
if (LatteGPUState.contextControl0 == 0x80000077)
{
// state shadowing enabled
uint32* shadowAddrs = LatteGPUState.contextRegisterShadowAddr + registerIndex;
sint32 indexCounter = 0;
while (--nWords)
{
uint32 dataWord = LatteReadCMD();
MPTR regShadowAddr = shadowAddrs[indexCounter];
if (regShadowAddr)
*(uint32*)(memory_base + regShadowAddr) = _swapEndianU32(dataWord);
outputReg[indexCounter] = dataWord;
indexCounter++;
}
}
else
{
// state shadowing disabled
sint32 indexCounter = 0;
while (--nWords)
{
*outputReg = LatteReadCMD();
outputReg++;
}
}
// some register writes trigger special behavior
LatteCP_itSetRegistersGeneric_handleSpecialRanges<TRegisterBase>(registerStartIndex, registerEndIndex);
return cmd;
}
template<uint32 TRegisterBase, typename TRegRangeCallback>
LatteCMDPtr LatteCP_itSetRegistersGeneric(LatteCMDPtr cmd, uint32 nWords, TRegRangeCallback cbRegRange)
{
uint32 registerOffset = LatteReadCMD();
uint32 registerIndex = TRegisterBase + registerOffset;
uint32 registerStartIndex = registerIndex;
uint32 registerEndIndex = registerStartIndex + nWords;
#ifdef CEMU_DEBUG_ASSERT
cemu_assert_debug((registerIndex + nWords) <= LATTE_MAX_REGISTER);
#endif
cbRegRange(registerStartIndex, registerEndIndex);
uint32* outputReg = (uint32*)(LatteGPUState.contextRegister + registerIndex);
if (LatteGPUState.contextControl0 == 0x80000077)
{
// state shadowing enabled
uint32* shadowAddrs = LatteGPUState.contextRegisterShadowAddr + registerIndex;
sint32 indexCounter = 0;
while (--nWords)
{
uint32 dataWord = LatteReadCMD();
MPTR regShadowAddr = shadowAddrs[indexCounter];
if (regShadowAddr)
*(uint32*)(memory_base + regShadowAddr) = _swapEndianU32(dataWord);
outputReg[indexCounter] = dataWord;
indexCounter++;
}
}
else
{
// state shadowing disabled
sint32 indexCounter = 0;
while (--nWords)
{
*outputReg = LatteReadCMD();
outputReg++;
}
}
// some register writes trigger special behavior
LatteCP_itSetRegistersGeneric_handleSpecialRanges<TRegisterBase>(registerStartIndex, registerEndIndex);
return cmd;
}
LatteCMDPtr LatteCP_itIndexType(LatteCMDPtr cmd, uint32 nWords)
{
cemu_assert_debug(nWords == 1);
LatteGPUState.contextNew.VGT_DMA_INDEX_TYPE.set_INDEX_TYPE((Latte::LATTE_VGT_DMA_INDEX_TYPE::E_INDEX_TYPE)LatteReadCMD());
return cmd;
}
LatteCMDPtr LatteCP_itNumInstances(LatteCMDPtr cmd, uint32 nWords)
{
cemu_assert_debug(nWords == 1);
LatteGPUState.drawContext.numInstances = LatteReadCMD();
return cmd;
}
LatteCMDPtr LatteCP_itWaitRegMem(LatteCMDPtr cmd, uint32 nWords)
{
cemu_assert_debug(nWords == 6);
uint32 word0 = LatteReadCMD();
uint32 word1 = LatteReadCMD();
uint32 word2 = LatteReadCMD();
uint32 word3 = LatteReadCMD();
uint32 word4 = LatteReadCMD();
uint32 word5 = LatteReadCMD();
uint32 compareOp = (word0) & 7;
uint32 physAddr = word1 & ~3;
cemu_assert_debug((physAddr&3) == 0);
uint32 fenceValue = word3;
uint32 fenceMask = word4;
uint32* fencePtr = (uint32*)memory_getPointerFromPhysicalOffset(physAddr);
const uint32 GPU7_WAIT_MEM_OP_ALWAYS = 0;
const uint32 GPU7_WAIT_MEM_OP_LESS = 1;
const uint32 GPU7_WAIT_MEM_OP_LEQUAL = 2;
const uint32 GPU7_WAIT_MEM_OP_EQUAL = 3;
const uint32 GPU7_WAIT_MEM_OP_NOTEQUAL = 4;
const uint32 GPU7_WAIT_MEM_OP_GEQUAL = 5;
const uint32 GPU7_WAIT_MEM_OP_GREATER = 6;
const uint32 GPU7_WAIT_MEM_OP_NEVER = 7;
bool stalls = false;
if ((word0 & 0x10) != 0)
{
// wait for memory address
performanceMonitor.gpuTime_fenceTime.beginMeasuring();
while (true)
{
uint32 fenceMemValue = _swapEndianU32(*fencePtr);
fenceMemValue &= fenceMask;
if (compareOp == GPU7_WAIT_MEM_OP_LESS)
{
if (fenceMemValue < fenceValue)
break;
}
else if (compareOp == GPU7_WAIT_MEM_OP_LEQUAL)
{
if (fenceMemValue <= fenceValue)
break;
}
else if (compareOp == GPU7_WAIT_MEM_OP_EQUAL)
{
if (fenceMemValue == fenceValue)
break;
}
else if (compareOp == GPU7_WAIT_MEM_OP_NOTEQUAL)
{
if (fenceMemValue != fenceValue)
break;
}
else if (compareOp == GPU7_WAIT_MEM_OP_GEQUAL)
{
if (fenceMemValue >= fenceValue)
break;
}
else if (compareOp == GPU7_WAIT_MEM_OP_GREATER)
{
if (fenceMemValue > fenceValue)
break;
}
else if (compareOp == GPU7_WAIT_MEM_OP_ALWAYS)
{
break;
}
else if (compareOp == GPU7_WAIT_MEM_OP_NEVER)
{
cemuLog_logOnce(LogType::Force, "Latte: WAIT_MEM_OP_NEVER encountered");
break;
}
else
assert_dbg();
if (!stalls)
{
g_renderer->NotifyLatteCommandProcessorIdle();
stalls = true;
}
// check if any GPU events happened
LatteTiming_HandleTimedVsync();
LatteAsyncCommands_checkAndExecute();
}
performanceMonitor.gpuTime_fenceTime.endMeasuring();
}
else
{
// wait for register
debugBreakpoint();
}
return cmd;
}
LatteCMDPtr LatteCP_itMemWrite(LatteCMDPtr cmd, uint32 nWords)
{
cemu_assert_debug(nWords == 4);
uint32 word0 = LatteReadCMD();
uint32 word1 = LatteReadCMD();
uint32 word2 = LatteReadCMD();
uint32 word3 = LatteReadCMD();
MPTR valuePhysAddr = (word0 & ~3);
if (valuePhysAddr == 0)
{
cemuLog_log(LogType::Force, "GPU: Invalid itMemWrite to null pointer");
return cmd;
}
uint32be* memPtr = (uint32be*)memory_getPointerFromPhysicalOffset(valuePhysAddr);
if (word1 == 0x40000)
{
// write U32
*memPtr = word2;
}
else if (word1 == 0x00000)
{
// write U64 (as two U32)
// note: The U32s are swapped
memPtr[0] = word2;
memPtr[1] = word3;
}
else if (word1 == 0x20000)
{
// write U64 (little endian)
memPtr[0] = _swapEndianU32(word2);
memPtr[1] = _swapEndianU32(word3);
}
else
cemu_assert_unimplemented();
return cmd;
}
LatteCMDPtr LatteCP_itMemSemaphore(LatteCMDPtr cmd, uint32 nWords)
{
cemu_assert_debug(nWords == 2);
MPTR semaphorePhysicalAddress = LatteReadCMD();
uint32 semaphoreControl = LatteReadCMD();
uint8 SEM_SIGNAL = (semaphoreControl >> 29) & 7;
std::atomic<uint64le>* semaphoreData = _rawPtrToAtomic((uint64le*)memory_getPointerFromPhysicalOffset(semaphorePhysicalAddress));
static_assert(sizeof(std::atomic<uint64le>) == sizeof(uint64le));
if (SEM_SIGNAL == 6)
{
// signal
semaphoreData->fetch_add(1);
}
else if(SEM_SIGNAL == 7)
{
// wait
size_t loopCount = 0;
while (true)
{
uint64le oldVal = semaphoreData->load();
if (oldVal == 0)
{
loopCount++;
if (loopCount > 2000)
std::this_thread::yield();
continue;
}
if (semaphoreData->compare_exchange_strong(oldVal, oldVal - 1))
break;
}
}
else
{
cemu_assert_debug(false);
}
return cmd;
}
LatteCMDPtr LatteCP_itContextControl(LatteCMDPtr cmd, uint32 nWords)
{
cemu_assert_debug(nWords == 2);
uint32 word0 = LatteReadCMD();
uint32 word1 = LatteReadCMD();
LatteGPUState.contextControl0 = word0;
LatteGPUState.contextControl1 = word1;
return cmd;
}
LatteCMDPtr LatteCP_itLoadReg(LatteCMDPtr cmd, uint32 nWords, uint32 regBase)
{
if (nWords < 2 || (nWords & 1) != 0)
{
cemuLog_logDebug(LogType::Force, "itLoadReg: Invalid nWords value");
return cmd;
}
MPTR physAddressRegArea = LatteReadCMD();
uint32 waitForIdle = LatteReadCMD();
uint32 loadEntries = (nWords - 2) / 2;
uint32 regShadowMemAddr = physAddressRegArea;
for (uint32 i = 0; i < loadEntries; i++)
{
uint32 regOffset = LatteReadCMD();
uint32 regCount = LatteReadCMD();
cemu_assert_debug(regCount != 0);
uint32 regAddr = regBase + regOffset;
for (uint32 f = 0; f < regCount; f++)
{
LatteGPUState.contextRegisterShadowAddr[regAddr] = regShadowMemAddr;
LatteGPUState.contextRegister[regAddr] = memory_read<uint32>(regShadowMemAddr);
regAddr++;
regShadowMemAddr += 4;
}
}
return cmd;
}
bool conditionalRenderActive = false;
LatteCMDPtr LatteCP_itSetPredication(LatteCMDPtr cmd, uint32 nWords)
{
cemu_assert_debug(nWords == 2);
MPTR physQueryInfo = LatteReadCMD();
uint32 flags = LatteReadCMD();
uint32 queryTypeFlag = (flags >> 13) & 7;
uint32 pixelsMustPassFlag = (flags >> 31) & 1;
uint32 dontWaitFlag = (flags >> 1) & 19;
if (queryTypeFlag == 0)
{
// disable conditional render
if (conditionalRenderActive == false)
debug_printf("conditionalRenderActive already inactive\n");
conditionalRenderActive = false;
}
else
{
// enable conditonal render
if (conditionalRenderActive == true)
debug_printf("conditionalRenderActive already active\n");
conditionalRenderActive = true;
}
return cmd;
}
LatteCMDPtr LatteCP_itDrawIndex2(LatteCMDPtr cmd, uint32 nWords, DrawPassContext& drawPassCtx)
{
cemu_assert_debug(nWords == 5);
uint32 ukn1 = LatteReadCMD();
MPTR physIndices = LatteReadCMD();
uint32 ukn2 = LatteReadCMD();
uint32 count = LatteReadCMD();
uint32 ukn3 = LatteReadCMD();
LatteGPUState.currentDrawCallTick = GetTickCount();
drawPassCtx.executeDraw(count, false, physIndices);
return cmd;
}
LatteCMDPtr LatteCP_itDrawIndexAuto(LatteCMDPtr cmd, uint32 nWords, DrawPassContext& drawPassCtx)
{
cemu_assert_debug(nWords == 2);
uint32 count = LatteReadCMD();
uint32 ukn = LatteReadCMD();
if (LatteGPUState.drawContext.numInstances == 0)
return cmd;
LatteGPUState.currentDrawCallTick = GetTickCount();
// todo - better way to identify compute drawcalls
if ((LatteGPUState.contextRegister[mmSQ_CONFIG] >> 24) == 0xE4)
{
uint32 vsProgramCode = ((LatteGPUState.contextRegister[mmSQ_PGM_START_ES] & 0xFFFFFF) << 8);
uint32 vsProgramSize = LatteGPUState.contextRegister[mmSQ_PGM_START_ES + 1] << 3;
cemuLog_logDebug(LogType::Force, "Compute {} {:08x} {:08x} (unsupported)", count, vsProgramCode, vsProgramSize);
}
else
{
drawPassCtx.executeDraw(count, true, MPTR_NULL);
}
return cmd;
}
MPTR _tempIndexArrayMPTR = MPTR_NULL;
LatteCMDPtr LatteCP_itDrawImmediate(LatteCMDPtr cmd, uint32 nWords, DrawPassContext& drawPassCtx)
{
uint32 count = LatteReadCMD();
uint32 ukn1 = LatteReadCMD();
// reserve array for index data
if (_tempIndexArrayMPTR == MPTR_NULL)
_tempIndexArrayMPTR = coreinit_allocFromSysArea(0x4000 * sizeof(uint32), 0x4);
LatteGPUState.currentDrawCallTick = GetTickCount();
// calculate size of index data in packet and read indices
uint32 numIndexU32s;
auto indexType = LatteGPUState.contextNew.VGT_DMA_INDEX_TYPE.get_INDEX_TYPE();
if (indexType == Latte::LATTE_VGT_DMA_INDEX_TYPE::E_INDEX_TYPE::U16_BE)
{
// 16bit indices
numIndexU32s = (count + 1) / 2;
memcpy(memory_getPointerFromVirtualOffset(_tempIndexArrayMPTR), cmd, numIndexU32s * sizeof(uint32));
LatteSkipCMD(numIndexU32s);
// swap pairs
uint32* indexDataU32 = (uint32*)memory_getPointerFromVirtualOffset(_tempIndexArrayMPTR);
for (uint32 i = 0; i < numIndexU32s; i++)
{
indexDataU32[i] = (indexDataU32[i] >> 16) | (indexDataU32[i] << 16);
}
LatteIndices_invalidate(memory_getPointerFromVirtualOffset(_tempIndexArrayMPTR), numIndexU32s * sizeof(uint32));
}
else if (indexType == Latte::LATTE_VGT_DMA_INDEX_TYPE::E_INDEX_TYPE::U32_BE)
{
// 32bit indices
cemu_assert_debug(false); // testing needed
numIndexU32s = count;
memcpy(memory_getPointerFromVirtualOffset(_tempIndexArrayMPTR), cmd, numIndexU32s * sizeof(uint32));
LatteSkipCMD(numIndexU32s);
LatteIndices_invalidate(memory_getPointerFromVirtualOffset(_tempIndexArrayMPTR), numIndexU32s * sizeof(uint32));
}
else
{
cemuLog_log(LogType::Force, "itDrawImmediate - Unsupported index type");
return cmd;
}
// verify packet size
if (nWords != (2 + numIndexU32s))
debugBreakpoint();
uint32 baseVertex = LatteGPUState.contextRegister[mmSQ_VTX_BASE_VTX_LOC];
uint32 baseInstance = LatteGPUState.contextRegister[mmSQ_VTX_START_INST_LOC];
uint32 numInstances = LatteGPUState.drawContext.numInstances;
drawPassCtx.executeDraw(count, false, _tempIndexArrayMPTR);
return cmd;
}
LatteCMDPtr LatteCP_itHLEFifoWrapAround(LatteCMDPtr cmd, uint32 nWords)
{
cemu_assert_debug(nWords == 1);
uint32 unused = LatteReadCMD();
gxRingBufferReadPtr = gx2WriteGatherPipe.gxRingBuffer;
cmd = (LatteCMDPtr)gxRingBufferReadPtr;
return cmd;
}
LatteCMDPtr LatteCP_itHLESampleTimer(LatteCMDPtr cmd, uint32 nWords)
{
cemu_assert_debug(nWords == 1);
MPTR timerMPTR = (MPTR)LatteReadCMD();
memory_writeU64(timerMPTR, coreinit::coreinit_getTimerTick());
return cmd;
}
LatteCMDPtr LatteCP_itHLESpecialState(LatteCMDPtr cmd, uint32 nWords)
{
cemu_assert_debug(nWords == 2);
uint32 stateId = LatteReadCMD();
uint32 stateValue = LatteReadCMD();
if (stateId > GX2_SPECIAL_STATE_COUNT)
{
cemu_assert_suspicious();
}
else
{
LatteGPUState.contextNew.GetSpecialStateValues()[stateId] = stateValue;
}
return cmd;
}
LatteCMDPtr LatteCP_itHLESetRetirementTimestamp(LatteCMDPtr cmd, uint32 nWords)
{
cemu_assert_debug(nWords == 2);
uint32 timestampHigh = (uint32)LatteReadCMD();
uint32 timestampLow = (uint32)LatteReadCMD();
uint64 timestamp = ((uint64)timestampHigh << 32ULL) | (uint64)timestampLow;
GX2::__GX2NotifyNewRetirementTimestamp(timestamp);
return cmd;
}
LatteCMDPtr LatteCP_itHLEBeginOcclusionQuery(LatteCMDPtr cmd, uint32 nWords)
{
cemu_assert_debug(nWords == 1);
MPTR queryMPTR = (MPTR)LatteReadCMD();
LatteQuery_BeginOcclusionQuery(queryMPTR);
return cmd;
}
LatteCMDPtr LatteCP_itHLEEndOcclusionQuery(LatteCMDPtr cmd, uint32 nWords)
{
cemu_assert_debug(nWords == 1);
MPTR queryMPTR = (MPTR)LatteReadCMD();
LatteQuery_EndOcclusionQuery(queryMPTR);
return cmd;
}
LatteCMDPtr LatteCP_itHLEBottomOfPipeCB(LatteCMDPtr cmd, uint32 nWords)
{
cemu_assert_debug(nWords == 3);
MPTR timestampMPTR = (uint32)LatteReadCMD();
uint32 timestampHigh = (uint32)LatteReadCMD();
uint32 timestampLow = (uint32)LatteReadCMD();
uint64 timestamp = ((uint64)timestampHigh << 32ULL) | (uint64)timestampLow;
// write timestamp
*(uint32*)memory_getPointerFromPhysicalOffset(timestampMPTR) = _swapEndianU32((uint32)(timestamp >> 32));
*(uint32*)memory_getPointerFromPhysicalOffset(timestampMPTR + 4) = _swapEndianU32((uint32)timestamp);
// send event
GX2::__GX2NotifyEvent(GX2::GX2CallbackEventType::TIMESTAMP_BOTTOM);
return cmd;
}
// GPU-side handler for GX2CopySurface/GX2CopySurfaceEx and similar
LatteCMDPtr LatteCP_itHLECopySurfaceNew(LatteCMDPtr cmd, uint32 nWords)
{
cemu_assert_debug(nWords == 26);
// src
MPTR srcPhysAddr = LatteReadCMD();
MPTR srcMipAddr = LatteReadCMD();
uint32 srcSwizzle = LatteReadCMD();
Latte::E_GX2SURFFMT srcSurfaceFormat = (Latte::E_GX2SURFFMT)LatteReadCMD();
sint32 srcWidth = LatteReadCMD();
sint32 srcHeight = LatteReadCMD();
sint32 srcDepth = LatteReadCMD();
uint32 srcPitch = LatteReadCMD();
uint32 srcSlice = LatteReadCMD();
Latte::E_DIM srcDim = (Latte::E_DIM)LatteReadCMD();
Latte::E_HWTILEMODE srcTilemode = (Latte::E_HWTILEMODE)LatteReadCMD();
sint32 srcAA = LatteReadCMD();
sint32 srcLevel = LatteReadCMD();
// dst
MPTR dstPhysAddr = LatteReadCMD();
MPTR dstMipAddr = LatteReadCMD();
uint32 dstSwizzle = LatteReadCMD();
Latte::E_GX2SURFFMT dstSurfaceFormat = (Latte::E_GX2SURFFMT)LatteReadCMD();
sint32 dstWidth = LatteReadCMD();
sint32 dstHeight = LatteReadCMD();
sint32 dstDepth = LatteReadCMD();
uint32 dstPitch = LatteReadCMD();
uint32 dstSlice = LatteReadCMD();
Latte::E_DIM dstDim = (Latte::E_DIM)LatteReadCMD();
Latte::E_HWTILEMODE dstTilemode = (Latte::E_HWTILEMODE)LatteReadCMD();
sint32 dstAA = LatteReadCMD();
sint32 dstLevel = LatteReadCMD();
LatteSurfaceCopy_copySurfaceNew(srcPhysAddr, srcMipAddr, srcSwizzle, srcSurfaceFormat, srcWidth, srcHeight, srcDepth, srcPitch, srcSlice, srcDim, srcTilemode, srcAA, srcLevel, dstPhysAddr, dstMipAddr, dstSwizzle, dstSurfaceFormat, dstWidth, dstHeight, dstDepth, dstPitch, dstSlice, dstDim, dstTilemode, dstAA, dstLevel);
return cmd;
}
LatteCMDPtr LatteCP_itHLEClearColorDepthStencil(LatteCMDPtr cmd, uint32 nWords)
{
cemu_assert_debug(nWords == 23);
uint32 clearMask = LatteReadCMD(); // color (1), depth (2), stencil (4)
// color buffer
MPTR colorBufferMPTR = LatteReadCMD(); // physical address for color buffer
Latte::E_GX2SURFFMT colorBufferFormat = (Latte::E_GX2SURFFMT)LatteReadCMD();
Latte::E_HWTILEMODE colorBufferTilemode = (Latte::E_HWTILEMODE)LatteReadCMD();
uint32 colorBufferWidth = LatteReadCMD();
uint32 colorBufferHeight = LatteReadCMD();
uint32 colorBufferPitch = LatteReadCMD();
uint32 colorBufferViewFirstSlice = LatteReadCMD();
uint32 colorBufferViewNumSlice = LatteReadCMD();
// depth buffer
MPTR depthBufferMPTR = LatteReadCMD(); // physical address for depth buffer
Latte::E_GX2SURFFMT depthBufferFormat = (Latte::E_GX2SURFFMT)LatteReadCMD();
Latte::E_HWTILEMODE depthBufferTileMode = (Latte::E_HWTILEMODE)LatteReadCMD();
uint32 depthBufferWidth = LatteReadCMD();
uint32 depthBufferHeight = LatteReadCMD();
uint32 depthBufferPitch = LatteReadCMD();
uint32 depthBufferViewFirstSlice = LatteReadCMD();
uint32 depthBufferViewNumSlice = LatteReadCMD();
float r = (float)LatteReadCMD() / 255.0f;
float g = (float)LatteReadCMD() / 255.0f;
float b = (float)LatteReadCMD() / 255.0f;
float a = (float)LatteReadCMD() / 255.0f;
float clearDepth;
*(uint32*)&clearDepth = LatteReadCMD();
uint32 clearStencil = LatteReadCMD();
LatteRenderTarget_itHLEClearColorDepthStencil(
clearMask,
colorBufferMPTR, colorBufferFormat, colorBufferTilemode, colorBufferWidth, colorBufferHeight, colorBufferPitch, colorBufferViewFirstSlice, colorBufferViewNumSlice,
depthBufferMPTR, depthBufferFormat, depthBufferTileMode, depthBufferWidth, depthBufferHeight, depthBufferPitch, depthBufferViewFirstSlice, depthBufferViewNumSlice,
r, g, b, a,
clearDepth, clearStencil);
return cmd;
}
LatteCMDPtr LatteCP_itHLERequestSwapBuffers(LatteCMDPtr cmd, uint32 nWords)
{
catchOpenGLError();
cemu_assert_debug(nWords == 1);
MPTR reserved1 = LatteReadCMD();
// request flip counter increase (will be increased on next flip)
LatteGPUState.flipRequestCount.fetch_add(1);
return cmd;
}
LatteCMDPtr LatteCP_itHLESwapScanBuffer(LatteCMDPtr cmd, uint32 nWords)
{
catchOpenGLError();
cemu_assert_debug(nWords == 1);
MPTR reserved1 = LatteReadCMD(); // reserved
LatteRenderTarget_itHLESwapScanBuffer();
return cmd;
}
LatteCMDPtr LatteCP_itHLEWaitForFlip(LatteCMDPtr cmd, uint32 nWords)
{
catchOpenGLError();
cemu_assert_debug(nWords == 1);
MPTR reserved1 = LatteReadCMD(); // reserved
// wait for flip
uint32 currentFlipCount = LatteGPUState.flipCounter;
while (true)
{
_mm_pause();
if (currentFlipCount != LatteGPUState.flipCounter)
{
break;
}
// check if any GPU events happened
LatteTiming_HandleTimedVsync();
std::this_thread::yield();
}
return cmd;
}
LatteCMDPtr LatteCP_itHLECopyColorBufferToScanBuffer(LatteCMDPtr cmd, uint32 nWords)
{
MPTR colorBufferPtr = LatteReadCMD(); // physical address
uint32 colorBufferWidth = LatteReadCMD();
uint32 colorBufferHeight = LatteReadCMD();
uint32 colorBufferPitch = LatteReadCMD();
Latte::E_HWTILEMODE colorBufferTilemode = (Latte::E_HWTILEMODE)LatteReadCMD();
uint32 colorBufferSwizzle = LatteReadCMD();
uint32 colorBufferSliceIndex = LatteReadCMD();
uint32 colorBufferFormat = LatteReadCMD();
uint32 renderTarget = LatteReadCMD();
LatteRenderTarget_itHLECopyColorBufferToScanBuffer(colorBufferPtr, colorBufferWidth, colorBufferHeight, colorBufferSliceIndex, colorBufferFormat, colorBufferPitch, colorBufferTilemode, colorBufferSwizzle, renderTarget);
return cmd;
}
void LatteCP_dumpCommandBufferError(LatteCMDPtr cmdStart, LatteCMDPtr cmdEnd, LatteCMDPtr cmdError)
{
cemuLog_log(LogType::Force, "Detected error in GPU command buffer");
cemuLog_log(LogType::Force, "Dumping contents and info");
cemuLog_log(LogType::Force, "Buffer 0x{0:08x} Size 0x{1:08x}", memory_getVirtualOffsetFromPointer(cmdStart), memory_getVirtualOffsetFromPointer(cmdEnd));
cemuLog_log(LogType::Force, "Error at 0x{0:08x}", memory_getVirtualOffsetFromPointer(cmdError));
for (LatteCMDPtr p = cmdStart; p < cmdEnd; p += 4)
{
if(cmdError >= p && cmdError < (p+4) )
cemuLog_log(LogType::Force, "0x{0:08x}: {1:08x} {2:08x} {3:08x} {4:08x} <<<<<", memory_getVirtualOffsetFromPointer(p), p[0], p[1], p[2], p[3]);
else
cemuLog_log(LogType::Force, "0x{0:08x}: {1:08x} {2:08x} {3:08x} {4:08x}", memory_getVirtualOffsetFromPointer(p), p[0], p[1], p[2], p[3]);
}
cemuLog_waitForFlush();
cemu_assert_debug(false);
}
// any drawcalls issued without changing textures, framebuffers, shader or other complex states can be done quickly without having to reinitialize the entire pipeline state
// we implement this optimization by having a specialized version of LatteCP_processCommandBuffer, called right after drawcalls, which only implements commands that dont interfere with fast drawing. Other commands will cause this function to return to the complex and generic parser
void LatteCP_processCommandBuffer_continuousDrawPass(DrawPassContext& drawPassCtx)
{
cemu_assert_debug(drawPassCtx.isWithinDrawPass());
// quit early if there are parameters set which are generally incompatible with fast drawing
if (LatteGPUState.contextRegister[mmVGT_STRMOUT_EN] != 0)
{
drawPassCtx.endDrawPass();
return;
}
// check for other special states?
while (true)
{
LatteCMDPtr cmd, cmdStart, cmdEnd;
if (!drawPassCtx.PopCurrentCommandQueuePos(cmd, cmdStart, cmdEnd))
{
drawPassCtx.endDrawPass();
return;
}
while (cmd < cmdEnd)
{
LatteCMDPtr cmdBeforeCommand = cmd;
uint32 itHeader = LatteReadCMD();
uint32 itHeaderType = (itHeader >> 30) & 3;
if (itHeaderType == 3)
{
uint32 itCode = (itHeader >> 8) & 0xFF;
uint32 nWords = ((itHeader >> 16) & 0x3FFF) + 1;
LatteCMDPtr cmdData = cmd;
cmd += nWords;
switch (itCode)
{
case IT_SET_RESOURCE: // attribute buffers, uniform buffers or texture units
{
LatteCP_itSetRegistersGeneric<LATTE_REG_BASE_RESOURCE>(cmdData, nWords, [&drawPassCtx](uint32 registerStart, uint32 registerEnd)
{
if ((registerStart >= Latte::REGADDR::SQ_TEX_RESOURCE_WORD0_N_PS && registerStart < (Latte::REGADDR::SQ_TEX_RESOURCE_WORD0_N_PS + Latte::GPU_LIMITS::NUM_TEXTURES_PER_STAGE * 7)) ||
(registerStart >= Latte::REGADDR::SQ_TEX_RESOURCE_WORD0_N_VS && registerStart < (Latte::REGADDR::SQ_TEX_RESOURCE_WORD0_N_VS + Latte::GPU_LIMITS::NUM_TEXTURES_PER_STAGE * 7)) ||
(registerStart >= Latte::REGADDR::SQ_TEX_RESOURCE_WORD0_N_GS && registerStart < (Latte::REGADDR::SQ_TEX_RESOURCE_WORD0_N_GS + Latte::GPU_LIMITS::NUM_TEXTURES_PER_STAGE * 7)))
drawPassCtx.endDrawPass(); // texture updates end the current draw sequence
else if (registerStart >= mmSQ_VTX_ATTRIBUTE_BLOCK_START && registerEnd <= mmSQ_VTX_ATTRIBUTE_BLOCK_END)
drawPassCtx.notifyModifiedVertexBuffer();
else
drawPassCtx.notifyModifiedUniformBuffer();
});
if (!drawPassCtx.isWithinDrawPass())
{
drawPassCtx.PushCurrentCommandQueuePos(cmd, cmdStart, cmdEnd);
return;
}
break;
}
case IT_SET_ALU_CONST: // uniform register
{
LatteCP_itSetRegistersGeneric<LATTE_REG_BASE_ALU_CONST>(cmdData, nWords);
break;
}
case IT_SET_CTL_CONST:
{
LatteCP_itSetRegistersGeneric<mmSQ_VTX_BASE_VTX_LOC>(cmdData, nWords);
break;
}
case IT_SET_CONFIG_REG:
{
LatteCP_itSetRegistersGeneric<LATTE_REG_BASE_CONFIG>(cmdData, nWords);
break;
}
case IT_INDEX_TYPE:
{
LatteCP_itIndexType(cmdData, nWords);
break;
}
case IT_NUM_INSTANCES:
{
LatteCP_itNumInstances(cmdData, nWords);
break;
}
case IT_DRAW_INDEX_2:
{
LatteCP_itDrawIndex2(cmdData, nWords, drawPassCtx);
break;
}
case IT_SET_CONTEXT_REG:
{
drawPassCtx.endDrawPass();
drawPassCtx.PushCurrentCommandQueuePos(cmdBeforeCommand, cmdStart, cmdEnd);
return;
}
case IT_INDIRECT_BUFFER_PRIV:
{
drawPassCtx.PushCurrentCommandQueuePos(cmd, cmdStart, cmdEnd);
LatteCP_itIndirectBuffer(cmdData, nWords, drawPassCtx);
if (!drawPassCtx.PopCurrentCommandQueuePos(cmd, cmdStart, cmdEnd)) // switch to sub buffer
cemu_assert_debug(false);
//if (!drawPassCtx.isWithinDrawPass())
// return cmdData;
break;
}
default:
// unsupported command for fast draw
drawPassCtx.endDrawPass();
drawPassCtx.PushCurrentCommandQueuePos(cmdBeforeCommand, cmdStart, cmdEnd);
return;
}
}
else if (itHeaderType == 2)
{
// filler packet
}
else
{
// unsupported command for fast draw
drawPassCtx.endDrawPass();
drawPassCtx.PushCurrentCommandQueuePos(cmdBeforeCommand, cmdStart, cmdEnd);
return;
}
}
}
if (drawPassCtx.isWithinDrawPass())
drawPassCtx.endDrawPass();
}
void LatteCP_processCommandBuffer(DrawPassContext& drawPassCtx)
{
while (true)
{
LatteCMDPtr cmd, cmdStart, cmdEnd;
if (!drawPassCtx.PopCurrentCommandQueuePos(cmd, cmdStart, cmdEnd))
break;
while (cmd < cmdEnd)
{
uint32 itHeader = LatteReadCMD();
uint32 itHeaderType = (itHeader >> 30) & 3;
if (itHeaderType == 3)
{
uint32 itCode = (itHeader >> 8) & 0xFF;
uint32 nWords = ((itHeader >> 16) & 0x3FFF) + 1;
LatteCMDPtr cmdData = cmd;
cmd += nWords;
switch (itCode)
{
case IT_SET_CONTEXT_REG:
{
LatteCP_itSetRegistersGeneric<LATTE_REG_BASE_CONTEXT>(cmdData, nWords);
}
break;
case IT_SET_RESOURCE:
{
LatteCP_itSetRegistersGeneric<LATTE_REG_BASE_RESOURCE>(cmdData, nWords);
}
break;
case IT_SET_ALU_CONST:
{
LatteCP_itSetRegistersGeneric<LATTE_REG_BASE_ALU_CONST>(cmdData, nWords);
}
break;
case IT_SET_CTL_CONST:
{
LatteCP_itSetRegistersGeneric<mmSQ_VTX_BASE_VTX_LOC>(cmdData, nWords);
}
break;
case IT_SET_SAMPLER:
{
LatteCP_itSetRegistersGeneric<LATTE_REG_BASE_SAMPLER>(cmdData, nWords);
}
break;
case IT_SET_CONFIG_REG:
{
LatteCP_itSetRegistersGeneric<LATTE_REG_BASE_CONFIG>(cmdData, nWords);
}
break;
case IT_SET_LOOP_CONST:
{
// todo
}
break;
case IT_SURFACE_SYNC:
{
LatteCP_itSurfaceSync(cmdData);
}
break;
case IT_INDIRECT_BUFFER_PRIV:
{
drawPassCtx.PushCurrentCommandQueuePos(cmd, cmdStart, cmdEnd);
LatteCP_itIndirectBuffer(cmdData, nWords, drawPassCtx);
if (!drawPassCtx.PopCurrentCommandQueuePos(cmd, cmdStart, cmdEnd)) // switch to sub buffer
cemu_assert_debug(false);
}
break;
case IT_STRMOUT_BUFFER_UPDATE:
{
LatteCP_itStreamoutBufferUpdate(cmdData, nWords);
}
break;
case IT_INDEX_TYPE:
{
LatteCP_itIndexType(cmdData, nWords);
}
break;
case IT_NUM_INSTANCES:
{
LatteCP_itNumInstances(cmdData, nWords);
}
break;
case IT_DRAW_INDEX_2:
{
drawPassCtx.beginDrawPass();
LatteCP_itDrawIndex2(cmdData, nWords, drawPassCtx);
// enter fast draw mode
drawPassCtx.PushCurrentCommandQueuePos(cmd, cmdStart, cmdEnd);
LatteCP_processCommandBuffer_continuousDrawPass(drawPassCtx);
cemu_assert_debug(!drawPassCtx.isWithinDrawPass());
if (!drawPassCtx.PopCurrentCommandQueuePos(cmd, cmdStart, cmdEnd))
return;
}
break;
case IT_DRAW_INDEX_AUTO:
{
drawPassCtx.beginDrawPass();
LatteCP_itDrawIndexAuto(cmdData, nWords, drawPassCtx);
// enter fast draw mode
drawPassCtx.PushCurrentCommandQueuePos(cmd, cmdStart, cmdEnd);
LatteCP_processCommandBuffer_continuousDrawPass(drawPassCtx);
cemu_assert_debug(!drawPassCtx.isWithinDrawPass());
if (!drawPassCtx.PopCurrentCommandQueuePos(cmd, cmdStart, cmdEnd))
return;
}
break;
case IT_DRAW_INDEX_IMMD:
{
DrawPassContext drawPassCtx;
drawPassCtx.beginDrawPass();
LatteCP_itDrawImmediate(cmdData, nWords, drawPassCtx);
drawPassCtx.endDrawPass();
break;
}
case IT_WAIT_REG_MEM:
{
LatteCP_itWaitRegMem(cmdData, nWords);
LatteTiming_HandleTimedVsync();
LatteAsyncCommands_checkAndExecute();
break;
}
case IT_MEM_WRITE:
{
LatteCP_itMemWrite(cmdData, nWords);
break;
}
case IT_CONTEXT_CONTROL:
{
LatteCP_itContextControl(cmdData, nWords);
break;
}
case IT_MEM_SEMAPHORE:
{
LatteCP_itMemSemaphore(cmdData, nWords);
break;
}
case IT_LOAD_CONFIG_REG:
{
LatteCP_itLoadReg(cmdData, nWords, LATTE_REG_BASE_CONFIG);
break;
}
case IT_LOAD_CONTEXT_REG:
{
LatteCP_itLoadReg(cmdData, nWords, LATTE_REG_BASE_CONTEXT);
break;
}
case IT_LOAD_ALU_CONST:
{
LatteCP_itLoadReg(cmdData, nWords, LATTE_REG_BASE_ALU_CONST);
break;
}
case IT_LOAD_LOOP_CONST:
{
LatteCP_itLoadReg(cmdData, nWords, LATTE_REG_BASE_LOOP_CONST);
break;
}
case IT_LOAD_RESOURCE:
{
LatteCP_itLoadReg(cmdData, nWords, LATTE_REG_BASE_RESOURCE);
break;
}
case IT_LOAD_SAMPLER:
{
LatteCP_itLoadReg(cmdData, nWords, LATTE_REG_BASE_SAMPLER);
break;
}
case IT_SET_PREDICATION:
{
LatteCP_itSetPredication(cmdData, nWords);
break;
}
case IT_HLE_COPY_COLORBUFFER_TO_SCANBUFFER:
{
LatteCP_itHLECopyColorBufferToScanBuffer(cmdData, nWords);
break;
}
case IT_HLE_TRIGGER_SCANBUFFER_SWAP:
{
LatteCP_itHLESwapScanBuffer(cmdData, nWords);
break;
}
case IT_HLE_WAIT_FOR_FLIP:
{
LatteCP_itHLEWaitForFlip(cmdData, nWords);
break;
}
case IT_HLE_REQUEST_SWAP_BUFFERS:
{
LatteCP_itHLERequestSwapBuffers(cmdData, nWords);
break;
}
case IT_HLE_CLEAR_COLOR_DEPTH_STENCIL:
{
LatteCP_itHLEClearColorDepthStencil(cmdData, nWords);
break;
}
case IT_HLE_COPY_SURFACE_NEW:
{
LatteCP_itHLECopySurfaceNew(cmdData, nWords);
break;
}
case IT_HLE_SAMPLE_TIMER:
{
LatteCP_itHLESampleTimer(cmdData, nWords);
break;
}
case IT_HLE_SPECIAL_STATE:
{
LatteCP_itHLESpecialState(cmdData, nWords);
break;
}
case IT_HLE_BEGIN_OCCLUSION_QUERY:
{
LatteCP_itHLEBeginOcclusionQuery(cmdData, nWords);
break;
}
case IT_HLE_END_OCCLUSION_QUERY:
{
LatteCP_itHLEEndOcclusionQuery(cmdData, nWords);
break;
}
case IT_HLE_SET_CB_RETIREMENT_TIMESTAMP:
{
LatteCP_itHLESetRetirementTimestamp(cmdData, nWords);
break;
}
case IT_HLE_BOTTOM_OF_PIPE_CB:
{
LatteCP_itHLEBottomOfPipeCB(cmdData, nWords);
break;
}
case IT_HLE_SYNC_ASYNC_OPERATIONS:
{
LatteTextureReadback_UpdateFinishedTransfers(true);
LatteQuery_UpdateFinishedQueriesForceFinishAll();
break;
}
default:
debug_printf("Unhandled IT %02x\n", itCode);
cemu_assert_debug(false);
break;
}
}
else if (itHeaderType == 2)
{
// filler packet
// has no body
}
else if (itHeaderType == 0)
{
uint32 registerBase = (itHeader & 0xFFFF);
uint32 registerCount = ((itHeader >> 16) & 0x3FFF) + 1;
if (registerBase == 0x304A)
{
GX2::__GX2NotifyEvent(GX2::GX2CallbackEventType::TIMESTAMP_TOP);
LatteSkipCMD(registerCount);
}
else if (registerBase == 0x304B)
{
LatteSkipCMD(registerCount);
}
else
{
LatteCP_dumpCommandBufferError(cmdStart, cmdEnd, cmd);
cemu_assert_debug(false);
}
}
else
{
debug_printf("invalid itHeaderType %08x\n", itHeaderType);
LatteCP_dumpCommandBufferError(cmdStart, cmdEnd, cmd);
cemu_assert_debug(false);
}
}
cemu_assert_debug(cmd == cmdEnd);
}
}
void LatteCP_ProcessRingbuffer()
{
sint32 timerRecheck = 0; // estimates how much CP processing time has elapsed based on the executed commands, if the value exceeds CP_TIMER_RECHECK then _handleTimers() is called
while (true)
{
uint32 itHeader = LatteCP_readU32Deprc();
uint32 itHeaderType = (itHeader >> 30) & 3;
if (itHeaderType == 3)
{
uint32 itCode = (itHeader >> 8) & 0xFF;
uint32 nWords = ((itHeader >> 16) & 0x3FFF) + 1;
LatteCP_waitForNWords(nWords);
LatteCMDPtr cmd = (LatteCMDPtr)gxRingBufferReadPtr;
uint8* cmdEnd = gxRingBufferReadPtr + nWords * 4;
gxRingBufferReadPtr = cmdEnd;
switch (itCode)
{
case IT_SURFACE_SYNC:
{
LatteCP_itSurfaceSync(cmd);
timerRecheck += CP_TIMER_RECHECK / 512;
}
break;
case IT_SET_CONTEXT_REG:
{
LatteCP_itSetRegistersGeneric<LATTE_REG_BASE_CONTEXT>(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 512;
}
break;
case IT_SET_RESOURCE:
{
LatteCP_itSetRegistersGeneric<LATTE_REG_BASE_RESOURCE>(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 512;
}
break;
case IT_SET_ALU_CONST:
{
LatteCP_itSetRegistersGeneric<LATTE_REG_BASE_ALU_CONST>(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 512;
break;
}
case IT_SET_CTL_CONST:
{
LatteCP_itSetRegistersGeneric<mmSQ_VTX_BASE_VTX_LOC>(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 512;
break;
}
case IT_SET_SAMPLER:
{
LatteCP_itSetRegistersGeneric<LATTE_REG_BASE_SAMPLER>(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 512;
break;
}
case IT_SET_CONFIG_REG:
{
LatteCP_itSetRegistersGeneric<LATTE_REG_BASE_CONFIG>(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 512;
break;
}
case IT_INDIRECT_BUFFER_PRIV:
{
LatteCP_itIndirectBufferDepr(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 512;
break;
}
case IT_STRMOUT_BUFFER_UPDATE:
{
LatteCP_itStreamoutBufferUpdate(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 512;
break;
}
case IT_INDEX_TYPE:
{
LatteCP_itIndexType(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 1024;
break;
}
case IT_NUM_INSTANCES:
{
LatteCP_itNumInstances(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 1024;
break;
}
case IT_DRAW_INDEX_2:
{
DrawPassContext drawPassCtx;
drawPassCtx.beginDrawPass();
LatteCP_itDrawIndex2(cmd, nWords, drawPassCtx);
drawPassCtx.endDrawPass();
timerRecheck += CP_TIMER_RECHECK / 64;
break;
}
case IT_DRAW_INDEX_AUTO:
{
DrawPassContext drawPassCtx;
drawPassCtx.beginDrawPass();
LatteCP_itDrawIndexAuto(cmd, nWords, drawPassCtx);
drawPassCtx.endDrawPass();
timerRecheck += CP_TIMER_RECHECK / 512;
break;
}
case IT_DRAW_INDEX_IMMD:
{
DrawPassContext drawPassCtx;
drawPassCtx.beginDrawPass();
LatteCP_itDrawImmediate(cmd, nWords, drawPassCtx);
drawPassCtx.endDrawPass();
timerRecheck += CP_TIMER_RECHECK / 64;
break;
}
case IT_WAIT_REG_MEM:
{
LatteCP_itWaitRegMem(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 16;
break;
}
case IT_MEM_WRITE:
{
LatteCP_itMemWrite(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 128;
break;
}
case IT_CONTEXT_CONTROL:
{
LatteCP_itContextControl(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 128;
break;
}
case IT_MEM_SEMAPHORE:
{
LatteCP_itMemSemaphore(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 128;
break;
}
case IT_LOAD_CONFIG_REG:
{
LatteCP_itLoadReg(cmd, nWords, LATTE_REG_BASE_CONFIG);
timerRecheck += CP_TIMER_RECHECK / 64;
break;
}
case IT_LOAD_CONTEXT_REG:
{
LatteCP_itLoadReg(cmd, nWords, LATTE_REG_BASE_CONTEXT);
timerRecheck += CP_TIMER_RECHECK / 64;
break;
}
case IT_LOAD_ALU_CONST:
{
LatteCP_itLoadReg(cmd, nWords, LATTE_REG_BASE_ALU_CONST);
timerRecheck += CP_TIMER_RECHECK / 64;
break;
}
case IT_LOAD_LOOP_CONST:
{
LatteCP_itLoadReg(cmd, nWords, LATTE_REG_BASE_LOOP_CONST);
timerRecheck += CP_TIMER_RECHECK / 64;
break;
}
case IT_LOAD_RESOURCE:
{
LatteCP_itLoadReg(cmd, nWords, LATTE_REG_BASE_RESOURCE);
timerRecheck += CP_TIMER_RECHECK / 64;
break;
}
case IT_LOAD_SAMPLER:
{
LatteCP_itLoadReg(cmd, nWords, LATTE_REG_BASE_SAMPLER);
timerRecheck += CP_TIMER_RECHECK / 64;
break;
}
case IT_SET_LOOP_CONST:
{
// todo
break;
}
case IT_SET_PREDICATION:
{
LatteCP_itSetPredication(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 512;
break;
}
case IT_HLE_COPY_COLORBUFFER_TO_SCANBUFFER:
{
LatteCP_itHLECopyColorBufferToScanBuffer(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 64;
break;
}
case IT_HLE_TRIGGER_SCANBUFFER_SWAP:
{
LatteCP_itHLESwapScanBuffer(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 64;
break;
}
case IT_HLE_WAIT_FOR_FLIP:
{
LatteCP_itHLEWaitForFlip(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 1;
break;
}
case IT_HLE_REQUEST_SWAP_BUFFERS:
{
LatteCP_itHLERequestSwapBuffers(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 32;
break;
}
case IT_HLE_CLEAR_COLOR_DEPTH_STENCIL:
{
LatteCP_itHLEClearColorDepthStencil(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 128;
break;
}
case IT_HLE_COPY_SURFACE_NEW:
{
LatteCP_itHLECopySurfaceNew(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 128;
break;
}
case IT_HLE_FIFO_WRAP_AROUND:
{
LatteCP_itHLEFifoWrapAround(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 512;
break;
}
case IT_HLE_SAMPLE_TIMER:
{
LatteCP_itHLESampleTimer(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 512;
break;
}
case IT_HLE_SPECIAL_STATE:
{
LatteCP_itHLESpecialState(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 512;
break;
}
case IT_HLE_BEGIN_OCCLUSION_QUERY:
{
LatteCP_itHLEBeginOcclusionQuery(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 512;
break;
}
case IT_HLE_END_OCCLUSION_QUERY:
{
LatteCP_itHLEEndOcclusionQuery(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 512;
break;
}
case IT_HLE_SET_CB_RETIREMENT_TIMESTAMP:
{
LatteCP_itHLESetRetirementTimestamp(cmd, nWords);
timerRecheck += CP_TIMER_RECHECK / 512;
break;
}
case IT_HLE_BOTTOM_OF_PIPE_CB:
{
LatteCP_itHLEBottomOfPipeCB(cmd, nWords);
break;
}
case IT_HLE_SYNC_ASYNC_OPERATIONS:
{
//LatteCP_skipWords<LatteCP_readU32Deprc>(nWords);
LatteTextureReadback_UpdateFinishedTransfers(true);
LatteQuery_UpdateFinishedQueriesForceFinishAll();
break;
}
default:
cemu_assert_debug(false);
}
}
else if (itHeaderType == 2)
{
// filler packet, skip this
cemu_assert_debug(itHeader == 0x80000000);
}
else if (itHeaderType == 0)
{
uint32 registerBase = (itHeader & 0xFFFF);
uint32 registerCount = ((itHeader >> 16) & 0x3FFF) + 1;
if (registerBase == 0x304A)
{
GX2::__GX2NotifyEvent(GX2::GX2CallbackEventType::TIMESTAMP_TOP);
LatteCP_skipWords<LatteCP_readU32Deprc>(registerCount);
}
else if (registerBase == 0x304B)
{
LatteCP_skipWords<LatteCP_readU32Deprc>(registerCount);
}
else
{
cemu_assert_debug(false);
}
}
else
{
debug_printf("invalid itHeaderType %08x\n", itHeaderType);
cemu_assert_debug(false);
}
if (timerRecheck >= CP_TIMER_RECHECK)
{
LatteTiming_HandleTimedVsync();
LatteAsyncCommands_checkAndExecute();
timerRecheck = 0;
}
}
}
#ifdef LATTE_CP_LOGGING
void LatteCP_DebugPrintCmdBuffer(uint32be* bufferPtr, uint32 size)
{
uint32be* bufferPtrInitial = bufferPtr;
uint32be* bufferPtrEnd = bufferPtr + (size/4);
while (bufferPtr < bufferPtrEnd)
{
std::string strPrefix = fmt::format("[PM4 Buf {:08x} Offs {:04x}]", MEMPTR<void>(bufferPtr).GetMPTR(), (bufferPtr - bufferPtrInitial) * 4);
uint32 itHeader = *bufferPtr;
bufferPtr++;
uint32 itHeaderType = (itHeader >> 30) & 3;
if (itHeaderType == 3)
{
uint32 itCode = (itHeader >> 8) & 0xFF;
uint32 nWords = ((itHeader >> 16) & 0x3FFF) + 1;
uint32be* cmdData = bufferPtr;
bufferPtr += nWords;
switch (itCode)
{
case IT_SURFACE_SYNC:
{
cemuLog_log(LogType::Force, "{} IT_SURFACE_SYNC", strPrefix);
break;
}
case IT_SET_CONTEXT_REG:
{
std::string regVals;
for (uint32 i = 0; i < std::min<uint32>(nWords - 1, 8); i++)
regVals.append(fmt::format("{:08x} ", cmdData[1 + i].value()));
cemuLog_log(LogType::Force, "{} IT_SET_CONTEXT_REG Reg {:04x} RegValues {}", strPrefix, cmdData[0].value(), regVals);
}
case IT_SET_RESOURCE:
{
std::string regVals;
for (uint32 i = 0; i < std::min<uint32>(nWords - 1, 8); i++)
regVals.append(fmt::format("{:08x} ", cmdData[1+i].value()));
cemuLog_log(LogType::Force, "{} IT_SET_RESOURCE Reg {:04x} RegValues {}", strPrefix, cmdData[0].value(), regVals);
break;
}
case IT_SET_ALU_CONST:
{
cemuLog_log(LogType::Force, "{} IT_SET_ALU_CONST", strPrefix);
break;
}
case IT_SET_CTL_CONST:
{
cemuLog_log(LogType::Force, "{} IT_SET_CTL_CONST", strPrefix);
break;
}
case IT_SET_SAMPLER:
{
cemuLog_log(LogType::Force, "{} IT_SET_SAMPLER", strPrefix);
break;
}
case IT_SET_CONFIG_REG:
{
cemuLog_log(LogType::Force, "{} IT_SET_CONFIG_REG", strPrefix);
break;
}
case IT_INDIRECT_BUFFER_PRIV:
{
if (nWords != 3)
{
cemuLog_log(LogType::Force, "{} IT_INDIRECT_BUFFER_PRIV (malformed!)", strPrefix);
}
else
{
uint32 physicalAddress = cmdData[0];
uint32 physicalAddressHigh = cmdData[1];
uint32 sizeInDWords = cmdData[2];
cemuLog_log(LogType::Force, "{} IT_INDIRECT_BUFFER_PRIV Addr {:08x} Size {:08x}", strPrefix, physicalAddress, sizeInDWords*4);
LatteCP_DebugPrintCmdBuffer(MEMPTR<uint32be>(physicalAddress), sizeInDWords * 4);
}
break;
}
case IT_STRMOUT_BUFFER_UPDATE:
{
cemuLog_log(LogType::Force, "{} IT_STRMOUT_BUFFER_UPDATE", strPrefix);
break;
}
case IT_INDEX_TYPE:
{
cemuLog_log(LogType::Force, "{} IT_INDEX_TYPE", strPrefix);
break;
}
case IT_NUM_INSTANCES:
{
cemuLog_log(LogType::Force, "{} IT_NUM_INSTANCES", strPrefix);
break;
}
case IT_DRAW_INDEX_2:
{
if (nWords != 5)
{
cemuLog_log(LogType::Force, "{} IT_DRAW_INDEX_2 (malformed!)", strPrefix);
}
else
{
uint32 ukn1 = cmdData[0];
MPTR physIndices = cmdData[1];
uint32 ukn2 = cmdData[2];
uint32 count = cmdData[3];
uint32 ukn3 = cmdData[4];
cemuLog_log(LogType::Force, "{} IT_DRAW_INDEX_2 | Count {}", strPrefix, count);
}
break;
}
case IT_DRAW_INDEX_AUTO:
{
cemuLog_log(LogType::Force, "{} IT_DRAW_INDEX_AUTO", strPrefix);
break;
}
case IT_DRAW_INDEX_IMMD:
{
cemuLog_log(LogType::Force, "{} IT_DRAW_INDEX_IMMD", strPrefix);
break;
}
case IT_WAIT_REG_MEM:
{
cemuLog_log(LogType::Force, "{} IT_WAIT_REG_MEM", strPrefix);
break;
}
case IT_MEM_WRITE:
{
cemuLog_log(LogType::Force, "{} IT_MEM_WRITE", strPrefix);
break;
}
case IT_CONTEXT_CONTROL:
{
cemuLog_log(LogType::Force, "{} IT_CONTEXT_CONTROL", strPrefix);
break;
}
case IT_MEM_SEMAPHORE:
{
cemuLog_log(LogType::Force, "{} IT_MEM_SEMAPHORE", strPrefix);
break;
}
case IT_LOAD_CONFIG_REG:
{
cemuLog_log(LogType::Force, "{} IT_LOAD_CONFIG_REG", strPrefix);
break;
}
case IT_LOAD_CONTEXT_REG:
{
cemuLog_log(LogType::Force, "{} IT_LOAD_CONTEXT_REG", strPrefix);
break;
}
case IT_LOAD_ALU_CONST:
{
cemuLog_log(LogType::Force, "{} IT_LOAD_ALU_CONST", strPrefix);
break;
}
case IT_LOAD_LOOP_CONST:
{
cemuLog_log(LogType::Force, "{} IT_LOAD_LOOP_CONST", strPrefix);
break;
}
case IT_LOAD_RESOURCE:
{
cemuLog_log(LogType::Force, "{} IT_LOAD_RESOURCE", strPrefix);
break;
}
case IT_LOAD_SAMPLER:
{
cemuLog_log(LogType::Force, "{} IT_LOAD_SAMPLER", strPrefix);
break;
}
case IT_SET_LOOP_CONST:
{
cemuLog_log(LogType::Force, "{} IT_SET_LOOP_CONST", strPrefix);
break;
}
case IT_SET_PREDICATION:
{
cemuLog_log(LogType::Force, "{} IT_SET_PREDICATION", strPrefix);
break;
}
case IT_HLE_COPY_COLORBUFFER_TO_SCANBUFFER:
{
cemuLog_log(LogType::Force, "{} IT_HLE_COPY_COLORBUFFER_TO_SCANBUFFER", strPrefix);
break;
}
case IT_HLE_TRIGGER_SCANBUFFER_SWAP:
{
cemuLog_log(LogType::Force, "{} IT_HLE_TRIGGER_SCANBUFFER_SWAP", strPrefix);
break;
}
case IT_HLE_WAIT_FOR_FLIP:
{
cemuLog_log(LogType::Force, "{} IT_HLE_WAIT_FOR_FLIP", strPrefix);
break;
}
case IT_HLE_REQUEST_SWAP_BUFFERS:
{
cemuLog_log(LogType::Force, "{} IT_HLE_REQUEST_SWAP_BUFFERS", strPrefix);
break;
}
case IT_HLE_CLEAR_COLOR_DEPTH_STENCIL:
{
cemuLog_log(LogType::Force, "{} IT_HLE_CLEAR_COLOR_DEPTH_STENCIL", strPrefix);
break;
}
case IT_HLE_COPY_SURFACE_NEW:
{
cemuLog_log(LogType::Force, "{} IT_HLE_COPY_SURFACE_NEW", strPrefix);
break;
}
case IT_HLE_FIFO_WRAP_AROUND:
{
cemuLog_log(LogType::Force, "{} IT_HLE_FIFO_WRAP_AROUND", strPrefix);
break;
}
case IT_HLE_SAMPLE_TIMER:
{
cemuLog_log(LogType::Force, "{} IT_HLE_SAMPLE_TIMER", strPrefix);
break;
}
case IT_HLE_SPECIAL_STATE:
{
cemuLog_log(LogType::Force, "{} IT_HLE_SPECIAL_STATE", strPrefix);
break;
}
case IT_HLE_BEGIN_OCCLUSION_QUERY:
{
cemuLog_log(LogType::Force, "{} IT_HLE_BEGIN_OCCLUSION_QUERY", strPrefix);
break;
}
case IT_HLE_END_OCCLUSION_QUERY:
{
cemuLog_log(LogType::Force, "{} IT_HLE_END_OCCLUSION_QUERY", strPrefix);
break;
}
case IT_HLE_SET_CB_RETIREMENT_TIMESTAMP:
{
cemuLog_log(LogType::Force, "{} IT_HLE_SET_CB_RETIREMENT_TIMESTAMP", strPrefix);
break;
}
case IT_HLE_BOTTOM_OF_PIPE_CB:
{
cemuLog_log(LogType::Force, "{} IT_HLE_BOTTOM_OF_PIPE_CB", strPrefix);
break;
}
case IT_HLE_SYNC_ASYNC_OPERATIONS:
{
cemuLog_log(LogType::Force, "{} IT_HLE_SYNC_ASYNC_OPERATIONS", strPrefix);
break;
}
default:
cemuLog_log(LogType::Force, "{} Unsupported operation code", strPrefix);
return;
}
}
else if (itHeaderType == 2)
{
// filler packet
}
else if (itHeaderType == 0)
{
uint32 registerBase = (itHeader & 0xFFFF);
uint32 registerCount = ((itHeader >> 16) & 0x3FFF) + 1;
LatteCP_skipWords<LatteCP_readU32Deprc>(registerCount);
cemuLog_log(LogType::Force, "[LatteCP] itType=0 registerBase={:04x}", registerBase);
}
else
{
cemuLog_log(LogType::Force, "Invalid itHeaderType %08x\n", itHeaderType);
return;
}
}
}
#endif
| 57,170
|
C++
|
.cpp
| 1,879
| 26.844066
| 321
| 0.722181
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,281
|
LatteTextureLoader.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteTextureLoader.cpp
|
#include "Cafe/HW/Latte/Renderer/Renderer.h"
#include "Cafe/HW/Latte/LatteAddrLib/LatteAddrLib.h"
#include "config/ActiveSettings.h"
#include "Cafe/CafeSystem.h"
//#define BENCHMARK_TEXTURE_DECODING // if defined, time it takes to decode textures will be measured and logged to log.txt
#ifdef BENCHMARK_TEXTURE_DECODING
uint64 textureDecodeBenchmark_perFormatSum[0x40] = { 0 }; // duration sum per texture format (hw format) - in microseconds
uint64 textureDecodeBenchmark_totalSum = 0;
#endif
void LatteTextureLoader_begin(LatteTextureLoaderCtx* textureLoader, uint32 sliceIndex, uint32 mipIndex, MPTR physImagePtr, MPTR physMipPtr, Latte::E_GX2SURFFMT format, Latte::E_DIM dim, uint32 width, uint32 height, uint32 depth, uint32 mipLevels, uint32 pitch, Latte::E_HWTILEMODE tileMode, uint32 swizzle)
{
textureLoader->physAddress = physImagePtr;
textureLoader->physMipAddress = physMipPtr;
textureLoader->sliceIndex = sliceIndex;
cemu_assert_debug(mipLevels != 0);
textureLoader->mipLevels = std::max<uint32>(1, mipLevels);
textureLoader->tileMode = tileMode;
textureLoader->bpp = Latte::GetFormatBits(format);
textureLoader->stepX = 1;
textureLoader->stepY = 1;
if (Latte::IsCompressedFormat(format))
{
textureLoader->stepX = 4;
textureLoader->stepY = 4;
}
textureLoader->pipeSwizzle = (swizzle >> 8) & 1;
textureLoader->bankSwizzle = ((swizzle >> 9) & 3);
uint32 surfaceAA = 0; // todo
if (mipIndex > 0 && Latte::TM_IsMacroTiled(tileMode))
{
// separate swizzle from mip pointer if mip chain is not macro-tiled (and thus not swizzled)
LatteAddrLib::AddrSurfaceInfo_OUT surfaceInfo;
LatteAddrLib::GX2CalculateSurfaceInfo(format, width, height, depth, dim, Latte::MakeGX2TileMode(tileMode), surfaceAA, 1, &surfaceInfo);
if (Latte::TM_IsMacroTiled(surfaceInfo.hwTileMode))
{
uint32 mipSwizzle = physMipPtr&0x700;
physMipPtr &= ~0x700;
textureLoader->physMipAddress = physMipPtr;
textureLoader->pipeSwizzle = (mipSwizzle >> 8) & 1;
textureLoader->bankSwizzle = ((mipSwizzle >> 9) & 3);
}
}
// calculate surface info
uint32 level = mipIndex;
LatteAddrLib::AddrSurfaceInfo_OUT surfaceInfo;
LatteAddrLib::GX2CalculateSurfaceInfo(format, width, height, depth, dim, Latte::MakeGX2TileMode(tileMode), surfaceAA, level, &surfaceInfo);
textureLoader->levelOffset = LatteAddrLib::CalculateMipOffset(format, width, height, depth, dim, (Latte::E_HWTILEMODE)tileMode, swizzle, surfaceAA, level);
textureLoader->tileMode = surfaceInfo.hwTileMode;
textureLoader->minOffsetOutdated = 0;
textureLoader->maxOffsetOutdated = (sint32)surfaceInfo.surfSize;
textureLoader->surfaceInfoHeight = surfaceInfo.height;
textureLoader->surfaceInfoDepth = surfaceInfo.depth;
// correct handling for LINEAR_ALIGNED pitch alignment is still not fully understood:
//seems like sometimes there is a conditional pitch alignment to 0x40 OR there is no pitch alignment at all and we have a bug somewhere else
uint64 titleId = CafeSystem::GetForegroundTitleId();
titleId &= ~0x300ULL;
if (tileMode == Latte::E_HWTILEMODE::TM_LINEAR_ALIGNED && titleId == (0x000500301001200aULL))
{
// examples of titles that use linear textures:
// Minecraft - Uses sprite atlases with mips and linear tilemode. Expects padding of pitch for smaller mips to be 0x40
// Browser - Linear pitch must be used as-is, padding/alignment will break textures (uses a weird way to calculate pitch by using GX2CalcSurface on a texture with tileMode 0/4)
// BotW - uses linear textures as render targets. With the smallest resolution being 3x3 with no pitch alignment expected at all (pitch = 3)? -> Not possible because both textures and rendertargets require a minimum alignment of 8 for pitch?
surfaceInfo.pitch = std::max<uint32>(1, pitch >> mipIndex);
}
textureLoader->width = width >> (mipIndex);
textureLoader->width = std::max(textureLoader->width, 1);
textureLoader->height = height >> (mipIndex);
textureLoader->height = std::max(textureLoader->height, 1);
textureLoader->pitch = surfaceInfo.pitch;
// calculate start address
if (level == 0)
textureLoader->inputData = (uint8*)memory_getPointerFromPhysicalOffset(physImagePtr);
else
textureLoader->inputData = (uint8*)memory_getPointerFromPhysicalOffset(physMipPtr) + textureLoader->levelOffset;
SetupCachedSurfaceAddrInfo(&textureLoader->computeAddrInfo, textureLoader->sliceIndex, 0, textureLoader->bpp, textureLoader->pitch, surfaceInfo.height, depth, 1 * 1, textureLoader->tileMode, false, textureLoader->pipeSwizzle, textureLoader->bankSwizzle);
}
uint8* LatteTextureLoader_GetInput(LatteTextureLoaderCtx* textureLoader, sint32 x, sint32 y)
{
// calculate address of input tile
uint32 offset = 0;
if (textureLoader->tileMode == Latte::E_HWTILEMODE::TM_LINEAR_GENERAL || textureLoader->tileMode == Latte::E_HWTILEMODE::TM_LINEAR_ALIGNED)
offset = LatteAddrLib::ComputeSurfaceAddrFromCoordLinear(x / textureLoader->stepX, y / textureLoader->stepY, textureLoader->sliceIndex, 0, textureLoader->bpp, textureLoader->pitch, textureLoader->surfaceInfoHeight, textureLoader->surfaceInfoDepth);
else if (textureLoader->tileMode == Latte::E_HWTILEMODE::TM_1D_TILED_THIN1 || textureLoader->tileMode == Latte::E_HWTILEMODE::TM_1D_TILED_THICK)
offset = LatteAddrLib::ComputeSurfaceAddrFromCoordMicroTiled(x / textureLoader->stepX, y / textureLoader->stepY, textureLoader->sliceIndex, textureLoader->bpp, textureLoader->pitch, textureLoader->surfaceInfoHeight, (Latte::E_HWTILEMODE)textureLoader->tileMode, false);
else
offset = LatteAddrLib::ComputeSurfaceAddrFromCoordMacroTiledCached(x / textureLoader->stepX, y / textureLoader->stepY, &textureLoader->computeAddrInfo);
uint8* blockData = textureLoader->inputData + offset;
return blockData;
}
/*
* Optimized version which assumes tileMode == 1
* Also does not do any min/max offset tracking
*/
uint8* LatteTextureLoader_getInputLinearOptimized(LatteTextureLoaderCtx* textureLoader, sint32 x, sint32 y)
{
// calculate address of input tile
uint32 bitPos = 0;
uint32 offset = 0;
offset = LatteAddrLib::ComputeSurfaceAddrFromCoordLinear(x / textureLoader->stepX, y / textureLoader->stepY, textureLoader->sliceIndex, 0, textureLoader->bpp, textureLoader->pitch, textureLoader->surfaceInfoHeight, textureLoader->surfaceInfoDepth);
return textureLoader->inputData + offset;
}
#define LatteTextureLoader_getInputLinearOptimized_(__textureLoader,__x,__y,__stepX,__stepY,__bpp,__sliceIndex,__numSlices,__sample,__pitch,__height) (textureLoader->inputData+((__x/__stepX) + __pitch * (__y/__stepY) + (__sliceIndex + __numSlices * __sample) * __height * __pitch)*(__bpp/8))
float SRGB_to_RGB(float cs)
{
float cl;
if (cs <= 0.04045f)
cl = cs / 12.92f;
else
cl = powf(((cs + 0.055f) / 1.055f), 2.4f);
return cl;
}
void decodeBC1Block(uint8* inputData, float* output4x4RGBA)
{
// read colors
uint16 c0 = *(uint16*)(inputData + 0);
uint16 c1 = *(uint16*)(inputData + 2);
// decode colors (RGB565 -> RGB888)
float r[4];
float g[4];
float b[4];
float a[4];
b[0] = (float)((c0 >> 0) & 0x1F) / 31.0f;
b[1] = (float)((c1 >> 0) & 0x1F) / 31.0f;
g[0] = (float)((c0 >> 5) & 0x3F) / 63.0f;
g[1] = (float)((c1 >> 5) & 0x3F) / 63.0f;
r[0] = (float)((c0 >> 11) & 0x1F) / 31.0f;
r[1] = (float)((c1 >> 11) & 0x1F) / 31.0f;
a[0] = 1.0f;
a[1] = 1.0f;
a[2] = 1.0f;
if (c0 > c1)
{
r[2] = (r[0] * 2.0f + r[1]) / 3.0f;
r[3] = (r[0] * 1.0f + r[1] * 2.0f) / 3.0f;
g[2] = (g[0] * 2.0f + g[1]) / 3.0f;
g[3] = (g[0] * 1.0f + g[1] * 2.0f) / 3.0f;
b[2] = (b[0] * 2.0f + b[1]) / 3.0f;
b[3] = (b[0] * 1.0f + b[1] * 2.0f) / 3.0f;
a[3] = 1.0f;
}
else
{
r[2] = (r[0] + r[1]) / 2.0f;
r[3] = 0.0f;
g[2] = (g[0] + g[1]) / 2.0f;
g[3] = 0.0f;
b[2] = (b[0] + b[1]) / 2.0f;
b[3] = 0.0f;
a[3] = 0.0f;
}
uint8* indexData = inputData + 4;
float* colorOutputRGBA = output4x4RGBA;
for (sint32 row = 0; row < 4; row++)
{
uint8 i0 = ((*indexData) >> 0) & 3;
uint8 i1 = ((*indexData) >> 2) & 3;
uint8 i2 = ((*indexData) >> 4) & 3;
uint8 i3 = ((*indexData) >> 6) & 3;
colorOutputRGBA[0] = r[i0];
colorOutputRGBA[1] = g[i0];
colorOutputRGBA[2] = b[i0];
colorOutputRGBA[3] = a[i0];
colorOutputRGBA += 4;
colorOutputRGBA[0] = r[i1];
colorOutputRGBA[1] = g[i1];
colorOutputRGBA[2] = b[i1];
colorOutputRGBA[3] = a[i1];
colorOutputRGBA += 4;
colorOutputRGBA[0] = r[i2];
colorOutputRGBA[1] = g[i2];
colorOutputRGBA[2] = b[i2];
colorOutputRGBA[3] = a[i2];
colorOutputRGBA += 4;
colorOutputRGBA[0] = r[i3];
colorOutputRGBA[1] = g[i3];
colorOutputRGBA[2] = b[i3];
colorOutputRGBA[3] = a[i3];
colorOutputRGBA += 4;
indexData++;
}
}
void decodeBC2Block_UNORM(uint8* inputData, float* imageRGBA)
{
uint32 color0 = *(uint16*)(inputData + 8);
uint32 color1 = *(uint16*)(inputData + 10);
uint32 colorIndices = *(uint32*)(inputData + 12);
uint8 r0 = (color0 >> 11) & 0x1F;
uint8 g0 = (color0 >> 5) & 0x3F;
uint8 b0 = (color0 >> 0) & 0x1F;
uint8 r1 = (color1 >> 11) & 0x1F;
uint8 g1 = (color1 >> 5) & 0x3F;
uint8 b1 = (color1 >> 0) & 0x1F;
float r[4];
float g[4];
float b[4];
r[0] = (float)r0 / 31.0f;
r[1] = (float)r1 / 31.0f;
r[2] = (r[0] * 2.0f + r[1]) / 3.0f;
r[3] = (r[0] + r[1] * 2.0f) / 3.0f;
g[0] = (float)g0 / 63.0f;
g[1] = (float)g1 / 63.0f;
g[2] = (g[0] * 2.0f + g[1]) / 3.0f;
g[3] = (g[0] + g[1] * 2.0f) / 3.0f;
b[0] = (float)b0 / 31.0f;
b[1] = (float)b1 / 31.0f;
b[2] = (b[0] * 2.0f + b[1]) / 3.0f;
b[3] = (b[0] + b[1] * 2.0f) / 3.0f;
for (sint32 py = 0; py < 4; py++)
{
for (sint32 px = 0; px < 4; px++)
{
uint8 colorIndex = (colorIndices >> (2 * (px + 4 * py))) & 0x03;
sint32 pixelOffset = (px + py * 4) * 4;
imageRGBA[pixelOffset + 0] = r[colorIndex];
imageRGBA[pixelOffset + 1] = g[colorIndex];
imageRGBA[pixelOffset + 2] = b[colorIndex];
}
}
// decode alpha
uint8* alphaData = (uint8*)(inputData + 0);
for (sint32 py = 0; py < 4; py++)
{
for (sint32 px = 0; px < 4; px++)
{
uint32 alphaIndex = (px + py * 4);
uint8 alphaCode = (alphaData[alphaIndex / 2] >> ((alphaIndex & 1) * 4)) & 0xF;
alphaCode |= (alphaCode << 4);
sint32 pixelOffset = (px + py * 4) * 4;
imageRGBA[pixelOffset + 3] = (float)alphaCode / 255.0f; // alpha
}
}
}
void decodeBC3Block_UNORM(uint8* inputData, float* imageRGBA)
{
uint32 color0 = *(uint16*)(inputData + 8);
uint32 color1 = *(uint16*)(inputData + 10);
uint32 colorIndices = *(uint32*)(inputData + 12);
uint8 r0 = (color0 >> 11) & 0x1F;
uint8 g0 = (color0 >> 5) & 0x3F;
uint8 b0 = (color0 >> 0) & 0x1F;
uint8 r1 = (color1 >> 11) & 0x1F;
uint8 g1 = (color1 >> 5) & 0x3F;
uint8 b1 = (color1 >> 0) & 0x1F;
float r[4];
float g[4];
float b[4];
r[0] = (float)r0 / 31.0f;
r[1] = (float)r1 / 31.0f;
r[2] = (r[0] * 2.0f + r[1]) / 3.0f;
r[3] = (r[0] + r[1] * 2.0f) / 3.0f;
g[0] = (float)g0 / 63.0f;
g[1] = (float)g1 / 63.0f;
g[2] = (g[0] * 2.0f + g[1]) / 3.0f;
g[3] = (g[0] + g[1] * 2.0f) / 3.0f;
b[0] = (float)b0 / 31.0f;
b[1] = (float)b1 / 31.0f;
b[2] = (b[0] * 2.0f + b[1]) / 3.0f;
b[3] = (b[0] + b[1] * 2.0f) / 3.0f;
for (sint32 py = 0; py < 4; py++)
{
for (sint32 px = 0; px < 4; px++)
{
uint8 colorIndex = (colorIndices >> (2 * (px + 4 * py))) & 0x03;
sint32 pixelOffset = (px + py * 4) * 4;
imageRGBA[pixelOffset + 0] = r[colorIndex];
imageRGBA[pixelOffset + 1] = g[colorIndex];
imageRGBA[pixelOffset + 2] = b[colorIndex];
//imageRGBA[pixelOffset+3] = 1.0f; // alpha
}
}
// decode alpha
uint8 alpha0 = *(uint8*)(inputData + 0);
uint8 alpha1 = *(uint8*)(inputData + 1);
uint32 alphaCodeRow[2] = { 0 };
alphaCodeRow[0] |= ((*(uint8*)(inputData + 2)) << 0);
alphaCodeRow[0] |= ((*(uint8*)(inputData + 3)) << 8);
alphaCodeRow[0] |= ((*(uint8*)(inputData + 4)) << 16);
alphaCodeRow[1] |= ((*(uint8*)(inputData + 5)) << 0);
alphaCodeRow[1] |= ((*(uint8*)(inputData + 6)) << 8);
alphaCodeRow[1] |= ((*(uint8*)(inputData + 7)) << 16);
float a[8];
a[0] = (float)alpha0 / 255.0f;
a[1] = (float)alpha1 / 255.0f;
if (alpha0 > alpha1)
{
// 6 interpolated alpha values.
a[2] = (a[0] * 6.0f + a[1] * 1.0f) / 7.0f;
a[3] = (a[0] * 5.0f + a[1] * 2.0f) / 7.0f;
a[4] = (a[0] * 4.0f + a[1] * 3.0f) / 7.0f;
a[5] = (a[0] * 3.0f + a[1] * 4.0f) / 7.0f;
a[6] = (a[0] * 2.0f + a[1] * 5.0f) / 7.0f;
a[7] = (a[0] * 1.0f + a[1] * 6.0f) / 7.0f;
}
else
{
// 4 interpolated alpha values.
a[2] = (a[0] * 4.0f + a[1] * 1.0f) / 5.0f;
a[3] = (a[0] * 3.0f + a[1] * 2.0f) / 5.0f;
a[4] = (a[0] * 2.0f + a[1] * 3.0f) / 5.0f;
a[5] = (a[0] * 1.0f + a[1] * 4.0f) / 5.0f;
a[6] = 0.0f;
a[7] = 1.0f;
}
for (sint32 py = 0; py < 4; py++)
{
for (sint32 px = 0; px < 4; px++)
{
uint8 alphaCode = (alphaCodeRow[py / 2] >> 3 * (px + 4 * (py & 1))) & 0x07;
sint32 pixelOffset = (px + py * 4) * 4;
imageRGBA[pixelOffset + 3] = a[alphaCode]; // alpha
}
}
}
void decodeBC4Block_UNORM(uint8* blockStorage, float* rOutput)
{
uint8* blockInput = (uint8*)blockStorage;
float red[8];
red[0] = ((float)(*(uint8*)(blockInput + 0))) / 255.0f;
red[1] = ((float)(*(uint8*)(blockInput + 1))) / 255.0f;
if (blockInput[0] > blockInput[1])
{
// 6 interpolated color values
red[2] = (6 * red[0] + 1 * red[1]) / 7.0f; // bit code 010
red[3] = (5 * red[0] + 2 * red[1]) / 7.0f; // bit code 011
red[4] = (4 * red[0] + 3 * red[1]) / 7.0f; // bit code 100
red[5] = (3 * red[0] + 4 * red[1]) / 7.0f; // bit code 101
red[6] = (2 * red[0] + 5 * red[1]) / 7.0f; // bit code 110
red[7] = (1 * red[0] + 6 * red[1]) / 7.0f; // bit code 111
}
else
{
// 4 interpolated color values
red[2] = (4 * red[0] + 1 * red[1]) / 5.0f; // bit code 010
red[3] = (3 * red[0] + 2 * red[1]) / 5.0f; // bit code 011
red[4] = (2 * red[0] + 3 * red[1]) / 5.0f; // bit code 100
red[5] = (1 * red[0] + 4 * red[1]) / 5.0f; // bit code 101
red[6] = 0.0f; // bit code 110
red[7] = 1.0f; // bit code 111
}
uint8* bitIndices = blockInput + 2;
uint32 redRow0 = (((uint32)bitIndices[2]) << 16) | (((uint32)bitIndices[1]) << 8) | (((uint32)bitIndices[0]) << 0);
uint32 redRow1 = (((uint32)bitIndices[5]) << 16) | (((uint32)bitIndices[4]) << 8) | (((uint32)bitIndices[3]) << 0);
uint8 pRed[16];
for (sint32 i = 0; i < 8; i++)
{
pRed[i] = (redRow0 >> (i * 3)) & 7;
pRed[i + 8] = (redRow1 >> (i * 3)) & 7;
}
float* pixelOutput = rOutput;
for (sint32 py = 0; py < 4; py++)
{
for (sint32 px = 0; px < 4; px++)
{
float c = red[pRed[px + py * 4]];
*pixelOutput = c;
pixelOutput++;
}
}
}
void decodeBC5Block_UNORM(uint8* blockStorage, float* rgOutput)
{
uint8* blockInput = (uint8*)blockStorage;
float red[8];
float green[8];
red[0] = ((float)(*(uint8*)(blockInput + 0))) / 255.0f;
red[1] = ((float)(*(uint8*)(blockInput + 1))) / 255.0f;
if (red[0] > red[1])
{
// 6 interpolated color values
red[2] = (6 * red[0] + 1 * red[1]) / 7.0f; // bit code 010
red[3] = (5 * red[0] + 2 * red[1]) / 7.0f; // bit code 011
red[4] = (4 * red[0] + 3 * red[1]) / 7.0f; // bit code 100
red[5] = (3 * red[0] + 4 * red[1]) / 7.0f; // bit code 101
red[6] = (2 * red[0] + 5 * red[1]) / 7.0f; // bit code 110
red[7] = (1 * red[0] + 6 * red[1]) / 7.0f; // bit code 111
}
else
{
// 4 interpolated color values
red[2] = (4 * red[0] + 1 * red[1]) / 5.0f; // bit code 010
red[3] = (3 * red[0] + 2 * red[1]) / 5.0f; // bit code 011
red[4] = (2 * red[0] + 3 * red[1]) / 5.0f; // bit code 100
red[5] = (1 * red[0] + 4 * red[1]) / 5.0f; // bit code 101
red[6] = 0.0f; // bit code 110
red[7] = 1.0f; // bit code 111
}
green[0] = ((float)(*(uint8*)(blockInput + 8))) / 255.0f;
green[1] = ((float)(*(uint8*)(blockInput + 9))) / 255.0f;
if (green[0] > green[1])
{
// 6 interpolated color values
green[2] = (6 * green[0] + 1 * green[1]) / 7.0f; // bit code 010
green[3] = (5 * green[0] + 2 * green[1]) / 7.0f; // bit code 011
green[4] = (4 * green[0] + 3 * green[1]) / 7.0f; // bit code 100
green[5] = (3 * green[0] + 4 * green[1]) / 7.0f; // bit code 101
green[6] = (2 * green[0] + 5 * green[1]) / 7.0f; // bit code 110
green[7] = (1 * green[0] + 6 * green[1]) / 7.0f; // bit code 111
}
else
{
// 4 interpolated color values
green[2] = (4 * green[0] + 1 * green[1]) / 5.0f; // bit code 010
green[3] = (3 * green[0] + 2 * green[1]) / 5.0f; // bit code 011
green[4] = (2 * green[0] + 3 * green[1]) / 5.0f; // bit code 100
green[5] = (1 * green[0] + 4 * green[1]) / 5.0f; // bit code 101
green[6] = 0.0f; // bit code 110
green[7] = 1.0f; // bit code 111
}
uint8* bitIndices = blockInput + 2;
uint32 redRow0 = (((uint32)bitIndices[2]) << 16) | (((uint32)bitIndices[1]) << 8) | (((uint32)bitIndices[0]) << 0);
uint32 redRow1 = (((uint32)bitIndices[5]) << 16) | (((uint32)bitIndices[4]) << 8) | (((uint32)bitIndices[3]) << 0);
bitIndices = blockInput + 8 + 2;
uint32 greenRow0 = (((uint32)bitIndices[2]) << 16) | (((uint32)bitIndices[1]) << 8) | (((uint32)bitIndices[0]) << 0);
uint32 greenRow1 = (((uint32)bitIndices[5]) << 16) | (((uint32)bitIndices[4]) << 8) | (((uint32)bitIndices[3]) << 0);
uint8 pRed[16];
uint8 pGreen[16];
for (sint32 i = 0; i < 8; i++)
{
pRed[i] = (redRow0 >> (i * 3)) & 7;
pRed[i + 8] = (redRow1 >> (i * 3)) & 7;
pGreen[i] = (greenRow0 >> (i * 3)) & 7;
pGreen[i + 8] = (greenRow1 >> (i * 3)) & 7;
}
float* pixelOutput = rgOutput;
for (sint32 py = 0; py < 4; py++)
{
for (sint32 px = 0; px < 4; px++)
{
float c = red[pRed[px + py * 4]];
*pixelOutput = c;
pixelOutput++;
c = green[pGreen[px + py * 4]];
*pixelOutput = c;
pixelOutput++;
}
}
}
void decodeBC5Block_SNORM(uint8* blockStorage, float* rgOutput) // todo - can merge this with the UNORM implementation by using a template?
{
uint8* blockInput = (uint8*)blockStorage;
float red[8];
float green[8];
red[0] = ((float)(*(sint8*)(blockInput + 0)) + 128.0f) / 255.0f;
red[1] = ((float)(*(sint8*)(blockInput + 1)) + 128.0f) / 255.0f;
red[0] = (red[0] * 2.0f - 1.0f);
red[1] = (red[1] * 2.0f - 1.0f);
if (red[0] > red[1])
{
// 6 interpolated color values
red[2] = (6 * red[0] + 1 * red[1]) / 7.0f; // bit code 010
red[3] = (5 * red[0] + 2 * red[1]) / 7.0f; // bit code 011
red[4] = (4 * red[0] + 3 * red[1]) / 7.0f; // bit code 100
red[5] = (3 * red[0] + 4 * red[1]) / 7.0f; // bit code 101
red[6] = (2 * red[0] + 5 * red[1]) / 7.0f; // bit code 110
red[7] = (1 * red[0] + 6 * red[1]) / 7.0f; // bit code 111
}
else
{
// 4 interpolated color values
red[2] = (4 * red[0] + 1 * red[1]) / 5.0f; // bit code 010
red[3] = (3 * red[0] + 2 * red[1]) / 5.0f; // bit code 011
red[4] = (2 * red[0] + 3 * red[1]) / 5.0f; // bit code 100
red[5] = (1 * red[0] + 4 * red[1]) / 5.0f; // bit code 101
red[6] = -1.0f; // bit code 110
red[7] = 1.0f; // bit code 111
}
green[0] = ((float)(*(sint8*)(blockInput + 8)) + 128.0f) / 255.0f;
green[1] = ((float)(*(sint8*)(blockInput + 9)) + 128.0f) / 255.0f;
green[0] = (green[0] * 2.0f - 1.0f);
green[1] = (green[1] * 2.0f - 1.0f);
if (green[0] > green[1])
{
// 6 interpolated color values
green[2] = (6 * green[0] + 1 * green[1]) / 7.0f; // bit code 010
green[3] = (5 * green[0] + 2 * green[1]) / 7.0f; // bit code 011
green[4] = (4 * green[0] + 3 * green[1]) / 7.0f; // bit code 100
green[5] = (3 * green[0] + 4 * green[1]) / 7.0f; // bit code 101
green[6] = (2 * green[0] + 5 * green[1]) / 7.0f; // bit code 110
green[7] = (1 * green[0] + 6 * green[1]) / 7.0f; // bit code 111
}
else
{
// 4 interpolated color values
green[2] = (4 * green[0] + 1 * green[1]) / 5.0f; // bit code 010
green[3] = (3 * green[0] + 2 * green[1]) / 5.0f; // bit code 011
green[4] = (2 * green[0] + 3 * green[1]) / 5.0f; // bit code 100
green[5] = (1 * green[0] + 4 * green[1]) / 5.0f; // bit code 101
green[6] = -1.0f; // bit code 110
green[7] = 1.0f; // bit code 111
}
uint8* bitIndices = blockInput + 2;
uint32 redRow0 = (((uint32)bitIndices[2]) << 16) | (((uint32)bitIndices[1]) << 8) | (((uint32)bitIndices[0]) << 0);
uint32 redRow1 = (((uint32)bitIndices[5]) << 16) | (((uint32)bitIndices[4]) << 8) | (((uint32)bitIndices[3]) << 0);
bitIndices = blockInput + 8 + 2;
uint32 greenRow0 = (((uint32)bitIndices[2]) << 16) | (((uint32)bitIndices[1]) << 8) | (((uint32)bitIndices[0]) << 0);
uint32 greenRow1 = (((uint32)bitIndices[5]) << 16) | (((uint32)bitIndices[4]) << 8) | (((uint32)bitIndices[3]) << 0);
uint8 pRed[16];
uint8 pGreen[16];
for (sint32 i = 0; i < 8; i++)
{
pRed[i] = (redRow0 >> (i * 3)) & 7;
pRed[i + 8] = (redRow1 >> (i * 3)) & 7;
pGreen[i] = (greenRow0 >> (i * 3)) & 7;
pGreen[i + 8] = (greenRow1 >> (i * 3)) & 7;
}
for (sint32 py = 0; py < 4; py++)
{
float* pixelOutput = rgOutput + (py * 4) * 2;
for (sint32 px = 0; px < 4; px++)
{
float c = red[pRed[px + py * 4]];
pixelOutput[0] = c;
c = green[pGreen[px + py * 4]];
pixelOutput[1] = c;
pixelOutput += 2;
}
}
}
void LatteTextureLoader_loadTextureDataIntoSlice(LatteTexture* hostTexture, sint32 width, sint32 height, sint32 depth, sint32 mipLevels, void* pixelData, sint32 sliceIndex, sint32 mipIndex, uint32 compressedImageSize)
{
if (mipIndex == 0)
{
cemu_assert_debug(width == hostTexture->width);
cemu_assert_debug(height == hostTexture->height);
cemu_assert_debug(depth == hostTexture->depth);
}
cemu_assert_debug(mipLevels == hostTexture->mipLevels);
if (hostTexture->overwriteInfo.hasResolutionOverwrite || hostTexture->overwriteInfo.hasFormatOverwrite)
{
// todo - ideally, we should scale/convert the data to the new format and resolution
g_renderer->texture_clearSlice(hostTexture, sliceIndex, mipIndex);
}
else
{
g_renderer->texture_loadSlice(hostTexture, width, height, depth, pixelData, sliceIndex, mipIndex, compressedImageSize);
}
}
void LatteTextureLoader_UpdateTextureSliceData(LatteTexture* tex, uint32 sliceIndex, uint32 mipIndex, MPTR physImagePtr, MPTR physMipPtr, Latte::E_DIM dim, uint32 width, uint32 height, uint32 depth, uint32 mipLevels, uint32 pitch, Latte::E_HWTILEMODE tileMode, uint32 swizzle, bool dumpTex)
{
LatteTextureLoaderCtx textureLoader = { 0 };
Latte::E_GX2SURFFMT format = tex->format;
LatteTextureLoader_begin(&textureLoader, sliceIndex, mipIndex, physImagePtr, physMipPtr, format, dim, width, height, depth, mipLevels, pitch, tileMode, swizzle);
// enable texture dumping
textureLoader.dump = ActiveSettings::DumpTexturesEnabled();
if (textureLoader.dump)
{
uint32 dumpSize = (((textureLoader.width + 4)&~4) * ((textureLoader.height + 4)&~4)) * 4;
textureLoader.dumpRGBA = (uint8*)malloc(dumpSize);
memset(textureLoader.dumpRGBA, 0x00, dumpSize);
}
// query texture decoder from renderer
TextureDecoder* texDecoder = nullptr;
texDecoder = g_renderer->texture_chooseDecodedFormat(format, tex->isDepth, dim, width, height);
if (tex->isDataDefined == false)
{
tex->AllocateOnHost();
tex->isDataDefined = true;
// if decoder is not set then clear texture
// on Vulkan this is used to make sure the texture is no longer in UNDEFINED layout
if (!texDecoder)
{
if(tex->isDepth)
g_renderer->texture_clearDepthSlice(tex, 0, 0, true, tex->hasStencil, 0.0f, 0);
else
g_renderer->texture_clearColorSlice(tex, 0, 0, 0.0f, 0.0f, 0.0f, 0.0f);
}
}
if (texDecoder == nullptr)
return;
textureLoader.decodedTexelCountX = texDecoder->getTexelCountX(&textureLoader);
textureLoader.decodedTexelCountY = texDecoder->getTexelCountY(&textureLoader);
// allocate memory for decoded texture
uint32 imageSize = texDecoder->calculateImageSize(&textureLoader);
uint8* pixelData = (uint8*)g_renderer->texture_acquireTextureUploadBuffer(imageSize);
// decode texture (if data is required)
#ifdef BENCHMARK_TEXTURE_DECODING
LARGE_INTEGER benchmark_begin;
LARGE_INTEGER benchmark_end;
LARGE_INTEGER benchmark_freq;
QueryPerformanceCounter(&benchmark_begin);
#endif
if (tex->overwriteInfo.hasFormatOverwrite == false && tex->overwriteInfo.hasResolutionOverwrite == false)
{
texDecoder->decode(&textureLoader, pixelData);
}
#ifdef BENCHMARK_TEXTURE_DECODING
QueryPerformanceCounter(&benchmark_end);
QueryPerformanceFrequency(&benchmark_freq);
uint64 benchmarkResultMicroSeconds = (benchmark_end.QuadPart - benchmark_begin.QuadPart) * 1000000ULL / benchmark_freq.QuadPart;
textureDecodeBenchmark_perFormatSum[(int)tex->format & 0x3F] += benchmarkResultMicroSeconds;
textureDecodeBenchmark_totalSum += benchmarkResultMicroSeconds;
cemuLog_log(LogType::Force, "TexDecode {:04}x{:04}x{:04} Fmt {:04x} Dim {} TileMode {:02x} Took {:03}.{:03}ms Sum(format) {:06}ms Sum(total) {:06}ms", textureLoader.width, textureLoader.height, textureLoader.surfaceInfoDepth, (int)tex->format, (int)tex->dim, textureLoader.tileMode, (uint32)(benchmarkResultMicroSeconds / 1000ULL), (uint32)(benchmarkResultMicroSeconds % 1000ULL), (uint32)(textureDecodeBenchmark_perFormatSum[tex->gx2Format & 0x3F] / 1000ULL), (uint32)(textureDecodeBenchmark_totalSum / 1000ULL));
#endif
// convert texture to RGBA when dumping is enabled
if (textureLoader.dump)
{
for (sint32 y = 0; y < textureLoader.height; y++)
{
sint32 pixelOffset = (y * textureLoader.width) * 4;
uint8* pixelOutput = textureLoader.dumpRGBA + pixelOffset;
for (sint32 x = 0; x < textureLoader.width; x++)
{
uint8* blockData = LatteTextureLoader_GetInput(&textureLoader, x, y);
texDecoder->decodePixelToRGBA(blockData, pixelOutput, x % textureLoader.stepX, y % textureLoader.stepY);
pixelOutput += 4;
}
}
}
// update texture data offsets and hashes
// this has to be done before the texture data is decoded & uploaded to prevent a race condition where updates during upload are missed
if (mipIndex == 0 || (tex->texDataPtrLow == 0 && tex->texDataPtrHigh == 0))
{
tex->texDataPtrLow = physImagePtr + textureLoader.minOffsetOutdated; // always zero
tex->texDataPtrHigh = physImagePtr + textureLoader.maxOffsetOutdated; // currently set to surface size
LatteTC_ResetTextureChangeTracker(tex, true);
}
// load slice
//debug_printf("[Load Slice] Addr: %08x MIP: %02d Slice: %02d Res %04x/%04x Texel Res %04x/%04x Fmt %04x Tm %d\n", textureLoader.physAddress, mipIndex, sliceIndex, textureLoader.width, textureLoader.height, textureLoader.texelCountX, textureLoader.texelCountY, (int)format, tileMode);
LatteTextureLoader_loadTextureDataIntoSlice(tex, textureLoader.width, textureLoader.height, depth, mipLevels, pixelData, sliceIndex, mipIndex, imageSize);
// write texture dump
if (textureLoader.dump)
{
wchar_t path[1024];
swprintf(path, 1024, L"dump/textures/%08x_fmt%04x_slice%d_mip%02d_%dx%d_tm%02d.tga", physImagePtr, (uint32)tex->format, sliceIndex, mipIndex, tex->width, tex->height, tileMode);
tga_write_rgba(path, textureLoader.width, textureLoader.height, textureLoader.dumpRGBA);
free(textureLoader.dumpRGBA);
}
// clean up
g_renderer->texture_releaseTextureUploadBuffer(pixelData);
catchOpenGLError();
}
template<typename copyType>
void optimizedLinearReadbackWriteLoop(LatteTextureLoaderCtx* textureLoader, uint8* linearPixelData)
{
uint32 pitch = textureLoader->width;
// optimized for linear
for (sint32 y = 0; y < textureLoader->height; y++)
{
sint32 yc = y;
sint32 pixelOffset = yc * pitch;
copyType* rowPixelData = (copyType*)(linearPixelData + pixelOffset * sizeof(copyType));
copyType* blockData = (copyType*)LatteTextureLoader_getInputLinearOptimized_(textureLoader, 0, y, 1, 1, sizeof(copyType) * 8, 0, 1, 0, textureLoader->pitch, textureLoader->height);
if constexpr (sizeof(copyType) == 4)
{
memcpy_dwords(blockData, rowPixelData, textureLoader->width);
}
else
{
for (sint32 x = 0; x < textureLoader->width; x++)
{
*blockData = *rowPixelData;
rowPixelData++;
blockData++;
}
}
}
}
void LatteTextureLoader_writeReadbackTextureToMemory(LatteTextureDefinition* textureData, uint32 sliceIndex, uint32 mipIndex, uint8* linearPixelData)
{
LatteTextureLoaderCtx textureLoader = { 0 };
LatteTextureLoader_begin(&textureLoader, sliceIndex, mipIndex, textureData->physAddress, textureData->physMipAddress, textureData->format, textureData->dim, textureData->width, textureData->height, textureData->depth, textureData->mipLevels, textureData->pitch, textureData->tileMode, textureData->swizzle);
#ifdef CEMU_DEBUG_ASSERT
if (textureData->depth != 1)
cemuLog_log(LogType::Force, "_writeReadbackTextureToMemory(): Texture has multiple slices (not supported)");
#endif
if (textureLoader.physAddress == MPTR_NULL)
{
cemuLog_log(LogType::Force, "_writeReadbackTextureToMemory(): Texture has invalid address");
return;
}
cemuLog_log(LogType::TextureReadback, "[TextureReadback-Write] PhysAddr {:08x} Res {}x{} Fmt {} Slice {} Mip {}", textureData->physAddress, textureData->width, textureData->height, textureData->format, sliceIndex, mipIndex);
if (textureData->tileMode == Latte::E_HWTILEMODE::TM_LINEAR_ALIGNED)
{
uint32 pitch = textureLoader.width;
if (textureData->format == Latte::E_GX2SURFFMT::R8_G8_B8_A8_UNORM ||
textureData->format == Latte::E_GX2SURFFMT::R8_G8_B8_A8_SRGB)
{
optimizedLinearReadbackWriteLoop<uint32>(&textureLoader, linearPixelData);
}
else if (textureData->format == Latte::E_GX2SURFFMT::R16_G16_B16_A16_UNORM)
{
optimizedLinearReadbackWriteLoop<uint64>(&textureLoader, linearPixelData);
}
else if (textureData->format == Latte::E_GX2SURFFMT::R32_G32_B32_A32_FLOAT)
{
for (sint32 y = 0; y < textureLoader.height; y += textureLoader.stepY)
{
sint32 yc = y;
sint32 pixelOffset = (0 + yc * pitch) * 16;
for (sint32 x = 0; x < textureLoader.width; x += textureLoader.stepX)
{
uint8* blockData = LatteTextureLoader_getInputLinearOptimized(&textureLoader, x, y);
(*(uint32*)(blockData + 0)) = *(uint32*)(linearPixelData + pixelOffset + 0);
(*(uint32*)(blockData + 4)) = *(uint32*)(linearPixelData + pixelOffset + 4);
(*(uint32*)(blockData + 8)) = *(uint32*)(linearPixelData + pixelOffset + 8);
(*(uint32*)(blockData + 12)) = *(uint32*)(linearPixelData + pixelOffset + 12);
pixelOffset += 16;
}
}
}
else if (textureData->format == Latte::E_GX2SURFFMT::R32_FLOAT)
{
for (sint32 y = 0; y < textureLoader.height; y += textureLoader.stepY)
{
sint32 yc = y;
for (sint32 x = 0; x < textureLoader.width; x += textureLoader.stepX)
{
uint8* blockData = LatteTextureLoader_getInputLinearOptimized(&textureLoader, x, y);
sint32 pixelOffset = (x + yc * pitch) * 4;
(*(uint32*)(blockData + 0)) = *(uint32*)(linearPixelData + pixelOffset + 0);
}
}
}
else if (textureData->format == Latte::E_GX2SURFFMT::R16_G16_B16_A16_FLOAT)
{
for (sint32 y = 0; y < textureLoader.height; y += textureLoader.stepY)
{
sint32 yc = y;
for (sint32 x = 0; x < textureLoader.width; x += textureLoader.stepX)
{
uint8* blockData = LatteTextureLoader_getInputLinearOptimized(&textureLoader, x, y);
sint32 pixelOffset = (x + yc * pitch) * 8;
(*(uint32*)(blockData + 0)) = *(uint32*)(linearPixelData + pixelOffset + 0);
(*(uint32*)(blockData + 4)) = *(uint32*)(linearPixelData + pixelOffset + 4);
}
}
}
else if (textureData->format == Latte::E_GX2SURFFMT::R8_G8_UNORM)
{
optimizedLinearReadbackWriteLoop<uint16>(&textureLoader, linearPixelData);
}
else if (textureData->format == Latte::E_GX2SURFFMT::R16_G16_B16_A16_UNORM)
{
cemu_assert_unimplemented();
}
else if (textureData->format == Latte::E_GX2SURFFMT::R16_UNORM)
{
optimizedLinearReadbackWriteLoop<uint16>(&textureLoader, linearPixelData);
}
else
{
cemuLog_logDebug(LogType::Force, "Linear texture readback unsupported for format 0x{:04x}", (uint32)textureData->format);
debugBreakpoint();
}
return;
}
// generic and slow decode loops
Latte::E_HWSURFFMT hwFormat = Latte::GetHWFormat(textureData->format);
if (hwFormat == Latte::E_HWSURFFMT::HWFMT_8_8_8_8)
{
// used in Bayonetta 2
for (sint32 y = 0; y < textureLoader.height; y++)
{
uint8* pixelInput = linearPixelData + (y * textureLoader.width) * 4;
for (sint32 x = 0; x < textureLoader.width; x++)
{
uint8* outputData = LatteTextureLoader_GetInput(&textureLoader, x, y);
*(uint32*)(outputData + 0) = *(uint32*)pixelInput;
pixelInput += 4;
}
}
}
else if (hwFormat == Latte::E_HWSURFFMT::HWFMT_32_FLOAT)
{
// required by Wind Waker for direct access to depth buffer
// Bayonetta 2 also uses this but it converts the depth buffer to a color texture first
for (sint32 y = 0; y < textureLoader.height; y++)
{
uint8* pixelInput = linearPixelData + (y * textureLoader.width) * 4;
for (sint32 x = 0; x < textureLoader.width; x++)
{
uint8* outputData = LatteTextureLoader_GetInput(&textureLoader, x, y);
*(uint32*)(outputData + 0) = *(uint32*)pixelInput;
pixelInput += 4;
}
}
}
else
{
cemuLog_logDebug(LogType::Force, "Texture readback unsupported format {:04x} for tileMode 0x{:02x}", (uint32)textureData->format, textureData->tileMode);
}
}
void LatteTextureLoader_estimateAccessedDataRange(LatteTexture* texture, sint32 sliceIndex, sint32 mipIndex, uint32& addrStart, uint32& addrEnd)
{
LatteTextureLoaderCtx textureLoader = { 0 };
LatteTextureLoader_begin(&textureLoader, sliceIndex, mipIndex, texture->physAddress, texture->physMipAddress, texture->format, texture->dim, texture->width, texture->height, texture->depth, texture->mipLevels, texture->pitch, texture->tileMode, texture->swizzle);
cemu_assert_debug(textureLoader.width > 0);
cemu_assert_debug(textureLoader.height > 0);
// estimate data range by checking addresses of corner pixels
// this isn't very reliable, find a better solution
uint32 estimatedMinAddr = 0xFFFFFFFF;
uint32 estimatedMaxAddr = 0x00000000;
uint32 tempAddr;
tempAddr = memory_getVirtualOffsetFromPointer(LatteTextureLoader_GetInput(&textureLoader, 0, 0));
estimatedMinAddr = std::min(estimatedMinAddr, tempAddr);
estimatedMaxAddr = std::max(estimatedMaxAddr, tempAddr);
tempAddr = memory_getVirtualOffsetFromPointer(LatteTextureLoader_GetInput(&textureLoader, textureLoader.width - 1, 0));
estimatedMinAddr = std::min(estimatedMinAddr, tempAddr);
estimatedMaxAddr = std::max(estimatedMaxAddr, tempAddr);
tempAddr = memory_getVirtualOffsetFromPointer(LatteTextureLoader_GetInput(&textureLoader, 0, textureLoader.height - 1));
estimatedMinAddr = std::min(estimatedMinAddr, tempAddr);
estimatedMaxAddr = std::max(estimatedMaxAddr, tempAddr);
tempAddr = memory_getVirtualOffsetFromPointer(LatteTextureLoader_GetInput(&textureLoader, textureLoader.width - 1, textureLoader.height - 1));
estimatedMinAddr = std::min(estimatedMinAddr, tempAddr);
estimatedMaxAddr = std::max(estimatedMaxAddr, tempAddr);
addrStart = estimatedMinAddr;
addrEnd = estimatedMaxAddr;
}
| 34,986
|
C++
|
.cpp
| 808
| 40.607673
| 515
| 0.662922
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,282
|
LatteGSCopyShaderParser.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteGSCopyShaderParser.cpp
|
#include "Cafe/HW/Latte/Core/LatteConst.h"
#include "Cafe/HW/Latte/Core/LatteShaderAssembly.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompiler.h"
void LatteGSCopyShaderParser_addFetchedParam(LatteParsedGSCopyShader* shaderContext, uint32 offset, uint32 gprIndex)
{
if( shaderContext->numParam >= GPU7_COPY_SHADER_MAX_PARAMS )
{
debug_printf("Copy shader: Too many fetched parameters\n");
cemu_assert_suspicious();
return;
}
shaderContext->paramMapping[shaderContext->numParam].exportParam = 0xFF;
shaderContext->paramMapping[shaderContext->numParam].offset = offset;
shaderContext->paramMapping[shaderContext->numParam].gprIndex = gprIndex;
shaderContext->numParam++;
}
void LatteGSCopyShaderParser_assignRegisterParameterOutput(LatteParsedGSCopyShader* shaderContext, uint32 gprIndex, uint32 exportType, uint32 exportParam)
{
// scan backwards to catch the most recently added entry in case a register has multiple entries
for(sint32 i=shaderContext->numParam-1; i>=0; i--)
{
if( shaderContext->paramMapping[i].gprIndex == gprIndex )
{
if( shaderContext->paramMapping[i].exportParam != 0xFF )
cemu_assert_debug(false);
if( exportParam >= 0x100 )
cemu_assert_debug(false);
shaderContext->paramMapping[i].exportType = (uint8)exportType;
shaderContext->paramMapping[i].exportParam = (uint8)exportParam;
return;
}
}
cemu_assert_debug(false); // register is exported but never initialized?
}
void LatteGSCopyShaderParser_addStreamWrite(LatteParsedGSCopyShader* shaderContext, uint32 bufferIndex, uint32 exportSourceGPR, uint32 exportArrayBase, uint32 memWriteArraySize, uint32 memWriteCompMask)
{
// get info about current state of GPR
for (sint32 i = shaderContext->numParam - 1; i >= 0; i--)
{
if (shaderContext->paramMapping[i].gprIndex == exportSourceGPR)
{
LatteGSCopyShaderStreamWrite_t streamWrite;
streamWrite.bufferIndex = (uint8)bufferIndex;
streamWrite.offset = shaderContext->paramMapping[i].offset;
streamWrite.exportArrayBase = exportArrayBase;
streamWrite.memWriteArraySize = memWriteArraySize;
streamWrite.memWriteCompMask = memWriteCompMask;
shaderContext->list_streamWrites.push_back(streamWrite);
return;
}
}
cemu_assert_debug(false); // GPR not initialized?
}
bool LatteGSCopyShaderParser_getExportTypeByOffset(LatteParsedGSCopyShader* shaderContext, uint32 offset, uint32* exportType, uint32* exportParam)
{
for(sint32 i=0; i<shaderContext->numParam; i++)
{
if( shaderContext->paramMapping[i].offset == offset )
{
*exportType = shaderContext->paramMapping[i].exportType;
*exportParam = shaderContext->paramMapping[i].exportParam;
return true;
}
}
return false;
}
bool LatteGSCopyShaderParser_parseClauseVtx(LatteParsedGSCopyShader* shaderContext, uint8* programData, uint32 programSize, uint32 addr, uint32 count)
{
for(uint32 i=0; i<count; i++)
{
uint32 instructionAddr = addr*2+i*4;
uint32 word0 = *(uint32*)(programData+instructionAddr*4+0);
uint32 word1 = *(uint32*)(programData+instructionAddr*4+4);
uint32 word2 = *(uint32*)(programData+instructionAddr*4+8);
uint32 word3 = *(uint32*)(programData+instructionAddr*4+12);
uint32 inst0_4 = (word0>>0)&0x1F;
if( inst0_4 == GPU7_TEX_INST_VFETCH )
{
// data fetch
uint32 fetchType = (word0>>5)&3;
uint32 bufferId = (word0>>8)&0xFF;
uint32 offset = (word2>>0)&0xFFFF;
uint32 endianSwap = (word2>>16)&0x3;
uint32 constNoStride = (word2>>18)&0x1;
uint32 srcGpr = (word0>>16)&0x7F;
uint32 srcRel = (word0>>23)&1;
if( srcRel != 0 )
debugBreakpoint();
uint32 destGpr = (word1>>0)&0x7F;
uint32 destRel = (word1>>7)&1;
if( destRel != 0 )
debugBreakpoint();
uint32 dstSelX = (word1>>9)&0x7;
uint32 dstSelY = (word1>>12)&0x7;
uint32 dstSelZ = (word1>>15)&0x7;
uint32 dstSelW = (word1>>18)&0x7;
uint32 srcSelX = (word0>>24)&0x3;
uint32 srcSelY = 0;
uint32 srcSelZ = 0;
uint32 srcSelW = 0;
if( bufferId != 0x9F )
{
debugBreakpoint(); // data not fetched from GS ring buffer
return false;
}
if( endianSwap != 0 )
debugBreakpoint();
if( fetchType != 2 )
debugBreakpoint();
if( srcSelX != 0 || srcGpr != 0 )
debugBreakpoint();
if( dstSelX != 0 || dstSelY != 1 || dstSelZ != 2 || dstSelW != 3 )
debugBreakpoint();
// remember imported parameter
LatteGSCopyShaderParser_addFetchedParam(shaderContext, offset, destGpr);
}
else
{
return false;
}
}
return true;
}
LatteParsedGSCopyShader* LatteGSCopyShaderParser_parse(uint8* programData, uint32 programSize)
{
cemu_assert_debug((programSize & 3) == 0);
LatteParsedGSCopyShader* shaderContext = new LatteParsedGSCopyShader();
shaderContext->numParam = 0;
// parse control flow instructions
for(uint32 i=0; i<programSize/8; i++)
{
uint32 cfWord0 = *(uint32*)(programData+i*8+0);
uint32 cfWord1 = *(uint32*)(programData+i*8+4);
uint32 cf_inst23_7 = (cfWord1>>23)&0x7F;
// check the bigger opcode fields first
if( cf_inst23_7 < 0x40 ) // at 0x40 the bits overlap with the ALU instruction encoding
{
bool isEndOfProgram = ((cfWord1>>21)&1)!=0;
uint32 addr = cfWord0&0xFFFFFFFF;
uint32 count = (cfWord1>>10)&7;
if( ((cfWord1>>19)&1) != 0 )
count |= 0x8;
count++;
if( cf_inst23_7 == GPU7_CF_INST_CALL_FS )
{
// nop
}
else if( cf_inst23_7 == GPU7_CF_INST_NOP )
{
// nop
if( ((cfWord1>>0)&7) != 0 )
debugBreakpoint(); // pop count is not zero,
}
else if( cf_inst23_7 == GPU7_CF_INST_EXPORT || cf_inst23_7 == GPU7_CF_INST_EXPORT_DONE )
{
// export
uint32 edType = (cfWord0>>13)&0x3;
uint32 edIndexGpr = (cfWord0>>23)&0x7F;
uint32 edRWRel = (cfWord0>>22)&1;
if( edRWRel != 0 || edIndexGpr != 0 )
debugBreakpoint();
// set export component selection
uint8 exportComponentSel[4];
exportComponentSel[0] = (cfWord1>>0)&0x7;
exportComponentSel[1] = (cfWord1>>3)&0x7;
exportComponentSel[2] = (cfWord1>>6)&0x7;
exportComponentSel[3] = (cfWord1>>9)&0x7;
// set export array base, index and burstcount (export field)
uint32 exportArrayBase = (cfWord0>>0)&0x1FFF;
uint32 exportBurstCount = (cfWord1>>17)&0xF;
// set export source GPR and type
uint32 exportSourceGPR = (cfWord0>>15)&0x7F;
uint32 exportType = edType;
if (exportArrayBase == GPU7_DECOMPILER_CF_EXPORT_BASE_POSITION && exportComponentSel[0] == 4 && exportComponentSel[1] == 4 && exportComponentSel[2] == 4 && exportComponentSel[3] == 4)
{
// aka gl_Position = vec4(0.0)
// this instruction form is generated when the original shader doesn't assign gl_Position a value?
}
else if (exportComponentSel[0] != 0 || exportComponentSel[1] != 1 || exportComponentSel[2] != 2 || exportComponentSel[3] != 3)
{
cemu_assert_debug(false);
}
else
{
// register as param
for (uint32 f = 0; f < exportBurstCount + 1; f++)
{
LatteGSCopyShaderParser_assignRegisterParameterOutput(shaderContext, exportSourceGPR + f, exportType, exportArrayBase + f);
}
}
}
else if( cf_inst23_7 == GPU7_CF_INST_VTX )
{
LatteGSCopyShaderParser_parseClauseVtx(shaderContext, programData, programSize, addr, count);
}
else if (cf_inst23_7 == GPU7_CF_INST_MEM_STREAM0_WRITE ||
cf_inst23_7 == GPU7_CF_INST_MEM_STREAM1_WRITE )
{
// streamout
uint32 bufferIndex;
if (cf_inst23_7 == GPU7_CF_INST_MEM_STREAM0_WRITE)
bufferIndex = 0;
else if (cf_inst23_7 == GPU7_CF_INST_MEM_STREAM1_WRITE)
bufferIndex = 1;
else
cemu_assert_debug(false);
uint32 exportArrayBase = (cfWord0 >> 0) & 0x1FFF;
uint32 memWriteArraySize = (cfWord1 >> 0) & 0xFFF;
uint32 memWriteCompMask = (cfWord1 >> 12) & 0xF;
uint32 exportSourceGPR = (cfWord0 >> 15) & 0x7F;
LatteGSCopyShaderParser_addStreamWrite(shaderContext, bufferIndex, exportSourceGPR, exportArrayBase, memWriteArraySize, memWriteCompMask);
}
else
{
cemuLog_log(LogType::Force, "Copyshader: Unknown 23_7 clause 0x{:x} found", cf_inst23_7);
cemu_assert_debug(false);
}
if( isEndOfProgram )
{
break;
}
}
else
{
// ALU clauses not supported
debug_printf("Copyshader has ALU clause?\n");
cemu_assert_debug(false);
delete shaderContext;
return nullptr;
}
}
// verify if all registers are exported
for(sint32 i=0; i<shaderContext->numParam; i++)
{
if( shaderContext->paramMapping[i].exportParam == 0xFF )
debugBreakpoint();
}
return shaderContext;
}
| 8,526
|
C++
|
.cpp
| 239
| 32.004184
| 202
| 0.709557
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,283
|
FetchShader.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/FetchShader.cpp
|
#include "Cafe/HW/Latte/Core/LatteConst.h"
#include "Cafe/HW/Latte/Core/LatteShaderAssembly.h"
#include "Cafe/HW/Latte/ISA/RegDefines.h"
#include "Cafe/OS/libs/gx2/GX2.h"
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/HW/Latte/Core/LatteDraw.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompiler.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompilerInstructions.h"
#include "Cafe/HW/Latte/Core/FetchShader.h"
#include "Cafe/HW/Latte/ISA/LatteInstructions.h"
#include "util/containers/LookupTableL3.h"
#include "util/helpers/fspinlock.h"
#include <openssl/sha.h> /* SHA1_DIGEST_LENGTH */
#include <openssl/evp.h> /* EVP_Digest */
uint32 LatteShaderRecompiler_getAttributeSize(LatteParsedFetchShaderAttribute_t* attrib)
{
if (attrib->format == FMT_32_32_32_32 || attrib->format == FMT_32_32_32_32_FLOAT)
return 4 * 4;
else if (attrib->format == FMT_32_32_32 || attrib->format == FMT_32_32_32_FLOAT)
return 3 * 4;
else if (attrib->format == FMT_32_32 || attrib->format == FMT_32_32_FLOAT)
return 2 * 4;
else if (attrib->format == FMT_32 || attrib->format == FMT_32_FLOAT)
return 1 * 4;
else if (attrib->format == FMT_16_16_16_16 || attrib->format == FMT_16_16_16_16_FLOAT)
return 4 * 2;
else if (attrib->format == FMT_16_16 || attrib->format == FMT_16_16_FLOAT)
return 2 * 2;
else if (attrib->format == FMT_16 || attrib->format == FMT_16_FLOAT)
return 1 * 2;
else if (attrib->format == FMT_8_8_8_8)
return 4 * 1;
else if (attrib->format == FMT_8_8)
return 2 * 1;
else if (attrib->format == FMT_8)
return 1 * 1;
else if (attrib->format == FMT_2_10_10_10)
return 4;
else
cemu_assert_unimplemented();
return 0;
}
uint32 LatteShaderRecompiler_getAttributeAlignment(LatteParsedFetchShaderAttribute_t* attrib)
{
if (attrib->format == FMT_32_32_32_32 || attrib->format == FMT_32_32_32_32_FLOAT)
return 4;
else if (attrib->format == FMT_32_32_32 || attrib->format == FMT_32_32_32_FLOAT)
return 4;
else if (attrib->format == FMT_32_32 || attrib->format == FMT_32_32_FLOAT)
return 4;
else if (attrib->format == FMT_32 || attrib->format == FMT_32_FLOAT)
return 4;
else if (attrib->format == FMT_16_16_16_16 || attrib->format == FMT_16_16_16_16_FLOAT)
return 2;
else if (attrib->format == FMT_16_16 || attrib->format == FMT_16_16_FLOAT)
return 2;
else if (attrib->format == FMT_16 || attrib->format == FMT_16_FLOAT)
return 2;
else if (attrib->format == FMT_8_8_8_8)
return 1;
else if (attrib->format == FMT_8_8)
return 1;
else if (attrib->format == FMT_8)
return 1;
else if (attrib->format == FMT_2_10_10_10)
return 4;
else
cemu_assert_unimplemented();
return 4;
}
void LatteShader_calculateFSKey(LatteFetchShader* fetchShader)
{
uint64 key = 0;
for (sint32 g = 0; g < fetchShader->bufferGroups.size(); g++)
{
LatteParsedFetchShaderBufferGroup_t& group = fetchShader->bufferGroups[g];
for (sint32 f = 0; f < group.attribCount; f++)
{
LatteParsedFetchShaderAttribute_t* attrib = group.attrib + f;
key += (uint64)attrib->endianSwap;
key = std::rotl<uint64>(key, 3);
key += (uint64)attrib->nfa;
key = std::rotl<uint64>(key, 3);
key += (uint64)(attrib->isSigned?1:0);
key = std::rotl<uint64>(key, 1);
key += (uint64)attrib->format;
key = std::rotl<uint64>(key, 7);
key += (uint64)attrib->fetchType;
key = std::rotl<uint64>(key, 8);
key += (uint64)attrib->ds[0];
key = std::rotl<uint64>(key, 2);
key += (uint64)attrib->ds[1];
key = std::rotl<uint64>(key, 2);
key += (uint64)attrib->ds[2];
key = std::rotl<uint64>(key, 2);
key += (uint64)attrib->ds[3];
key = std::rotl<uint64>(key, 2);
key += (uint64)(attrib->aluDivisor+1);
key = std::rotl<uint64>(key, 2);
key += (uint64)attrib->attributeBufferIndex;
key = std::rotl<uint64>(key, 8);
key += (uint64)attrib->semanticId;
key = std::rotl<uint64>(key, 8);
key += (uint64)(attrib->offset & 3);
key = std::rotl<uint64>(key, 2);
}
}
// todo - also hash invalid buffer groups?
fetchShader->key = key;
}
uint32 LatteParsedFetchShaderBufferGroup_t::getCurrentBufferStride(uint32* contextRegister) const
{
uint32 bufferIndex = this->attributeBufferIndex;
uint32 bufferBaseRegisterIndex = mmSQ_VTX_ATTRIBUTE_BLOCK_START + bufferIndex * 7;
uint32 bufferStride = (contextRegister[bufferBaseRegisterIndex + 2] >> 11) & 0xFFFF;
return bufferStride;
}
void LatteFetchShader::CalculateFetchShaderVkHash()
{
// calculate SHA1 of all states that are part of the Vulkan graphics pipeline
EVP_MD_CTX *ctx = EVP_MD_CTX_new();
EVP_DigestInit(ctx, EVP_sha1());
for(auto& group : bufferGroups)
{
// offsets
for (sint32 t = 0; t < group.attribCount; t++)
{
uint32 offset = group.attrib[t].offset;
EVP_DigestUpdate(ctx, &t, sizeof(t));
EVP_DigestUpdate(ctx, &offset, sizeof(offset));
}
}
uint8 shaDigest[SHA_DIGEST_LENGTH];
EVP_DigestFinal_ex(ctx, shaDigest, NULL);
EVP_MD_CTX_free(ctx);
// fold SHA1 hash into a 64bit value
uint64 h = *(uint64*)(shaDigest + 0);
h += *(uint64*)(shaDigest + 8);
h += (uint64)*(uint32*)(shaDigest + 16);
this->vkPipelineHashFragment = h;
}
void _fetchShaderDecompiler_parseInstruction_VTX_SEMANTIC(LatteFetchShader* parsedFetchShader, uint32* contextRegister, const LatteClauseInstruction_VTX* instr)
{
uint32 semanticId = instr->getFieldSEM_SEMANTIC_ID(); // location (attribute index inside shader)
uint32 bufferId = instr->getField_BUFFER_ID(); // the index used for GX2SetAttribBuffer (+0xA0)
LatteConst::VertexFetchType2 fetchType = instr->getField_FETCH_TYPE();
auto srcSelX = instr->getField_SRC_SEL_X();
auto dsx = instr->getField_DST_SEL(0);
auto dsy = instr->getField_DST_SEL(1);
auto dsz = instr->getField_DST_SEL(2);
auto dsw = instr->getField_DST_SEL(3);
auto dataFormat = instr->getField_DATA_FORMAT();
uint32 offset = instr->getField_OFFSET();
auto nfa = instr->getField_NUM_FORMAT_ALL();
bool isSigned = instr->getField_FORMAT_COMP_ALL() == LatteClauseInstruction_VTX::FORMAT_COMP::COMP_SIGNED;
auto endianSwap = instr->getField_ENDIAN_SWAP();
// get buffer
cemu_assert_debug(bufferId >= 0xA0 && bufferId < 0xB0);
uint32 bufferIndex = (bufferId - 0xA0);
// get or add new attribute group (by buffer index)
LatteParsedFetchShaderBufferGroup_t* attribGroup = nullptr;
if (LatteFetchShader::isValidBufferIndex(bufferIndex))
{
auto bufferGroupItr = std::find_if(parsedFetchShader->bufferGroups.begin(), parsedFetchShader->bufferGroups.end(), [bufferIndex](LatteParsedFetchShaderBufferGroup_t& bufferGroup) {return bufferGroup.attributeBufferIndex == bufferIndex; });
if (bufferGroupItr != parsedFetchShader->bufferGroups.end())
attribGroup = &(*bufferGroupItr);
}
else
{
auto bufferGroupItr = std::find_if(parsedFetchShader->bufferGroupsInvalid.begin(), parsedFetchShader->bufferGroupsInvalid.end(), [bufferIndex](LatteParsedFetchShaderBufferGroup_t& bufferGroup) {return bufferGroup.attributeBufferIndex == bufferIndex; });
if (bufferGroupItr != parsedFetchShader->bufferGroupsInvalid.end())
attribGroup = &(*bufferGroupItr);
}
// create new group if none found
if (attribGroup == nullptr)
{
if (LatteFetchShader::isValidBufferIndex(bufferIndex))
attribGroup = &parsedFetchShader->bufferGroups.emplace_back();
else
attribGroup = &parsedFetchShader->bufferGroupsInvalid.emplace_back();
attribGroup->attributeBufferIndex = bufferIndex;
attribGroup->minOffset = offset;
attribGroup->maxOffset = offset;
}
// add attribute
sint32 groupAttribIndex = attribGroup->attribCount;
if (attribGroup->attribCount < (groupAttribIndex + 1))
{
attribGroup->attribCount = (groupAttribIndex + 1);
attribGroup->attrib = (LatteParsedFetchShaderAttribute_t*)realloc(attribGroup->attrib, sizeof(LatteParsedFetchShaderAttribute_t) * attribGroup->attribCount);
}
attribGroup->attrib[groupAttribIndex].semanticId = semanticId;
attribGroup->attrib[groupAttribIndex].format = (uint8)dataFormat;
attribGroup->attrib[groupAttribIndex].fetchType = fetchType;
attribGroup->attrib[groupAttribIndex].nfa = (uint8)nfa;
attribGroup->attrib[groupAttribIndex].isSigned = isSigned;
attribGroup->attrib[groupAttribIndex].offset = offset;
attribGroup->attrib[groupAttribIndex].ds[0] = (uint8)dsx;
attribGroup->attrib[groupAttribIndex].ds[1] = (uint8)dsy;
attribGroup->attrib[groupAttribIndex].ds[2] = (uint8)dsz;
attribGroup->attrib[groupAttribIndex].ds[3] = (uint8)dsw;
attribGroup->attrib[groupAttribIndex].attributeBufferIndex = bufferIndex;
attribGroup->attrib[groupAttribIndex].endianSwap = endianSwap;
attribGroup->minOffset = (std::min)(attribGroup->minOffset, offset);
attribGroup->maxOffset = (std::max)(attribGroup->maxOffset, offset);
// get alu divisor
if (srcSelX == LatteClauseInstruction_VTX::SRC_SEL::SEL_X)
{
cemu_assert_debug(fetchType != LatteConst::VertexFetchType2::INSTANCE_DATA); // aluDivisor 0 in combination with instanced data is not allowed?
attribGroup->attrib[groupAttribIndex].aluDivisor = -1;
}
else if (srcSelX == LatteClauseInstruction_VTX::SRC_SEL::SEL_W)
{
cemu_assert_debug(fetchType == LatteConst::VertexFetchType2::INSTANCE_DATA); // using constant divisor 1 with per-vertex data seems strange? (divisor is instance-only)
// aluDivisor is constant 1
attribGroup->attrib[groupAttribIndex].aluDivisor = 1;
}
else if (srcSelX == LatteClauseInstruction_VTX::SRC_SEL::SEL_Y)
{
// use alu divisor 1
attribGroup->attrib[groupAttribIndex].aluDivisor = (sint32)contextRegister[Latte::REGADDR::VGT_INSTANCE_STEP_RATE_0];
cemu_assert_debug(attribGroup->attrib[groupAttribIndex].aluDivisor > 0);
}
else if (srcSelX == LatteClauseInstruction_VTX::SRC_SEL::SEL_Z)
{
// use alu divisor 2
attribGroup->attrib[groupAttribIndex].aluDivisor = (sint32)contextRegister[Latte::REGADDR::VGT_INSTANCE_STEP_RATE_1];
cemu_assert_debug(attribGroup->attrib[groupAttribIndex].aluDivisor > 0);
}
}
void _fetchShaderDecompiler_parseVTXClause(LatteFetchShader* parsedFetchShader, uint32* contextRegister, std::span<uint8> clauseCode, size_t numInstructions)
{
const LatteClauseInstruction_VTX* instr = (LatteClauseInstruction_VTX*)clauseCode.data();
const LatteClauseInstruction_VTX* end = instr + numInstructions;
while (instr < end)
{
if (instr->getField_VTX_INST() == LatteClauseInstruction_VTX::VTX_INST::_VTX_INST_SEMANTIC)
{
_fetchShaderDecompiler_parseInstruction_VTX_SEMANTIC(parsedFetchShader, contextRegister, instr);
}
else
{
assert_dbg();
}
instr++;
}
}
void _fetchShaderDecompiler_parseCF(LatteFetchShader* parsedFetchShader, uint32* contextRegister, std::span<uint8> programCode)
{
size_t maxCountCFInstructions = programCode.size_bytes() / sizeof(LatteCFInstruction);
const LatteCFInstruction* cfInstruction = (LatteCFInstruction*)programCode.data();
const LatteCFInstruction* end = cfInstruction + maxCountCFInstructions;
while (cfInstruction < end)
{
if (cfInstruction->getField_Opcode() == LatteCFInstruction::INST_VTX_TC)
{
auto vtxInstruction = cfInstruction->getParserIfOpcodeMatch<LatteCFInstruction_DEFAULT>();
cemu_assert_debug(vtxInstruction->getField_COND() == LatteCFInstruction::CF_COND::CF_COND_ACTIVE);
_fetchShaderDecompiler_parseVTXClause(parsedFetchShader, contextRegister, vtxInstruction->getClauseCode(programCode), vtxInstruction->getField_COUNT());
}
else if (cfInstruction->getField_Opcode() == LatteCFInstruction::INST_RETURN)
{
cemu_assert_debug(!cfInstruction->getField_END_OF_PROGRAM());
return;
}
else
{
cemu_assert_debug(false); // unhandled / unexpected CF instruction
}
if (cfInstruction->getField_END_OF_PROGRAM())
{
cemu_assert_debug(false); // unusual for fetch shader? They should end with a return instruction
break;
}
cfInstruction++;
}
cemu_assert_debug(false); // program must be terminated with an instruction that has EOP set?
}
// parse fetch shader and create LatteFetchShader object
// also registers the fs in the cache (s_fetchShaderByHash)
// can be assumed to be thread-safe, if called simultaneously on the same fetch shader only one shader will become registered. The others will be destroyed
LatteFetchShader* LatteShaderRecompiler_createFetchShader(LatteFetchShader::CacheHash fsHash, uint32* contextRegister, uint32* fsProgramCode, uint32 fsProgramSize)
{
LatteFetchShader* newFetchShader = new LatteFetchShader();
newFetchShader->m_cacheHash = fsHash;
if( (fsProgramSize&0xF) != 0 )
debugBreakpoint();
uint32 index = 0;
// if the first instruction is a CF instruction then parse shader properly
// otherwise fall back to our broken legacy method (where we assumed fetch shaders had no CF program)
// this workaround is required to make sure old shader caches dont break
// from old fetch shader gen (CF part missing):
// {0x0000a001, 0x27961000, 0x00020000, 0x00000000}
// {0x0000a001, 0x2c151002, 0x00020000, 0x00000000, 0x0000a001, 0x068d1000, 0x0000000c, ...}
// {0x0000a001, 0x2c151000, 0x00020000, 0x00000000}
// {0x0300aa21, 0x28cd1006, 0x00000000, 0x00000000, 0x0300ab21, 0x28cd1007, 0x00000000, ...}
// shaders shipped with games (e.g. BotW):
// {0x00000002, 0x01800400, 0x00000000, 0x8a000000, 0x1c00a001, 0x280d1000, 0x00090000, ...}
// {0x00000002, 0x01800000, 0x00000000, 0x8a000000, 0x1c00a001, 0x27961000, 0x000a0000, ...}
// {0x00000002, 0x01800c00, 0x00000000, 0x8a000000, 0x2c00a001, 0x2c151000, 0x000a0000, ...} // size 0x50
// {0x00000002, 0x01801000, 0x00000000, 0x8a000000, 0x1c00a001, 0x280d1000, 0x00090000, ...} // size 0x60
// {0x00000002, 0x01801c00, 0x00000000, 0x8a000000, 0x1c00a001, 0x280d1000, 0x00090000, ...} // size 0x90
// our new implementation:
// {0x00000002, 0x01800400, 0x00000000, 0x8a000000, 0x0000a001, 0x2c151000, 0x00020000, ...}
// for ALU instructions everything except the 01 is dynamic
newFetchShader->bufferGroups.reserve(16);
if (fsProgramSize == 0)
{
// empty fetch shader, seen in Minecraft
// these only make sense when vertex shader does not call FS?
LatteShader_calculateFSKey(newFetchShader);
newFetchShader->CalculateFetchShaderVkHash();
return newFetchShader;
}
if ((fsProgramCode[0] & 1) == 0 && fsProgramCode[0] <= 0x30 && (fsProgramCode[1]&~((3 << 10)| (1 << 19))) == 0x01800000)
{
// very likely a CF instruction
_fetchShaderDecompiler_parseCF(newFetchShader, contextRegister, { (uint8*)fsProgramCode, fsProgramSize });
}
else
{
while (index < (fsProgramSize / 4))
{
uint32 dword0 = fsProgramCode[index];
uint32 opcode = dword0 & 0x1F;
index++;
if (opcode == VTX_INST_MEM)
{
// this might be the clause initialization instruction? (Seems to be the first instruction always)
// todo - upon further investigation, it seems like fetch shaders also start with a CF program. Our implementation doesnt emit one right now
uint32 opcode2 = (dword0 >> 8) & 7;
index += 3;
}
else if (opcode == VTX_INST_SEMANTIC)
{
_fetchShaderDecompiler_parseInstruction_VTX_SEMANTIC(newFetchShader, contextRegister, (const LatteClauseInstruction_VTX*)(fsProgramCode + index - 1));
index += 3;
}
}
}
newFetchShader->bufferGroups.shrink_to_fit();
// calculate group information
// VBO offsets and stride
uint32 vboOffset = 0;
for (auto& bufferGroup : newFetchShader->bufferGroups)
{
for(sint32 i=0; i< bufferGroup.attribCount; i++)
{
uint32 attribSize = LatteShaderRecompiler_getAttributeSize(bufferGroup.attrib+i);
uint32 attribAlignment = LatteShaderRecompiler_getAttributeAlignment(bufferGroup.attrib+i);
// fix alignment
vboOffset = (vboOffset+attribAlignment-1)&~(attribAlignment-1);
vboOffset += attribSize;
// index type
if(bufferGroup.attrib[i].fetchType == LatteConst::VERTEX_DATA)
bufferGroup.hasVtxIndexAccess = true;
else if (bufferGroup.attrib[i].fetchType == LatteConst::INSTANCE_DATA)
bufferGroup.hasInstanceIndexAccess = true;
}
// fix alignment of whole vertex
if(bufferGroup.attribCount > 0 )
{
uint32 attribAlignment = LatteShaderRecompiler_getAttributeAlignment(bufferGroup.attrib+0);
vboOffset = (vboOffset+attribAlignment-1)&~(attribAlignment-1);
}
bufferGroup.vboStride = vboOffset;
}
LatteShader_calculateFSKey(newFetchShader);
newFetchShader->CalculateFetchShaderVkHash();
// register in cache
// its possible that during multi-threaded shader cache loading, two identical (same hash) fetch shaders get created simultaneously
// we catch and handle this case here. RegisterInCache() is atomic and if another fetch shader is already registered, we abandon the local instance
LatteFetchShader* registeredFS = newFetchShader->RegisterInCache(fsHash);
if (registeredFS)
{
delete newFetchShader;
newFetchShader = registeredFS;
}
else
{
newFetchShader->m_isRegistered = true;
}
return newFetchShader;
}
LatteFetchShader::~LatteFetchShader()
{
UnregisterInCache();
}
struct FetchShaderLookupInfo
{
LatteFetchShader* fetchShader;
uint32 programSize;
uint32 lastFrameAccessed;
};
LookupTableL3<8, 8, 8, FetchShaderLookupInfo*> g_fetchShaderLookupCache;
LatteFetchShader::CacheHash LatteFetchShader::CalculateCacheHash(void* programCode, uint32 programSize)
{
uint32* programCodeU32 = (uint32*)programCode;
uint64 progHash1 = 0;
uint64 progHash2 = 0;
for (uint32 i = 0; i < programSize / 4; i++)
{
uint32 temp = programCodeU32[i];
progHash1 += (uint64)temp;
progHash2 ^= (uint64)temp;
progHash1 = (progHash1 << 3) | (progHash1 >> 61);
progHash2 = (progHash2 >> 7) | (progHash2 << 57);
}
// todo - we should incorporate the value of VGT_INSTANCE_STEP_RATE_0/1 into the hash since it affects the generated LatteFetchShader object
// However, this would break compatibility with shader caches and gfx packs due to altering the shader base hashes
return progHash1 + progHash2;
}
LatteFetchShader* LatteFetchShader::FindInCacheByHash(LatteFetchShader::CacheHash fsHash)
{
// does not hold s_fetchShaderCache for better performance. Be careful not to call this while another thread invokes RegisterInCache()
auto itr = s_fetchShaderByHash.find(fsHash);
if (itr == s_fetchShaderByHash.end())
return nullptr;
return itr->second;
}
void* _getFSProgramPtr()
{
return memory_getPointerFromPhysicalOffset(LatteGPUState.contextRegister[mmSQ_PGM_START_FS + 0] << 8);
}
uint32 _getFSProgramSize()
{
return LatteGPUState.contextRegister[mmSQ_PGM_START_FS + 1] << 3;
}
LatteFetchShader* LatteFetchShader::FindByGPUState()
{
// retrieve fetch shader that matches the currently set GPU context registers
uint32 fsPhysAddr24 = LatteGPUState.contextRegister[mmSQ_PGM_START_FS + 0];
cemu_assert_debug(fsPhysAddr24 < 0x1000000); // should only contain the upper 24 bit of the address in the lower 24 bit of the register
FetchShaderLookupInfo* lookupInfo = g_fetchShaderLookupCache.lookup(fsPhysAddr24);
if (lookupInfo)
{
// return fetch shader if still the same
uint32 fsSize = _getFSProgramSize();
uint32 framesSinceLastAccess = LatteGPUState.frameCounter - lookupInfo->lastFrameAccessed;
if (lookupInfo->programSize == fsSize && framesSinceLastAccess == 0)
{
lookupInfo->lastFrameAccessed = LatteGPUState.frameCounter;
return lookupInfo->fetchShader;
}
// update lookup info
CacheHash fsHash = CalculateCacheHash(_getFSProgramPtr(), _getFSProgramSize());
LatteFetchShader* fetchShader = FindInCacheByHash(fsHash);
if (!fetchShader)
{
fetchShader = LatteShaderRecompiler_createFetchShader(fsHash, LatteGPUState.contextNew.GetRawView(), (uint32*)_getFSProgramPtr(), _getFSProgramSize());
cemu_assert(fetchShader);
}
lookupInfo->fetchShader = fetchShader;
lookupInfo->programSize = fsSize;
lookupInfo->lastFrameAccessed = LatteGPUState.frameCounter;
return fetchShader;
}
else
{
// try to find fetch shader by data hash
CacheHash fsHash = CalculateCacheHash(_getFSProgramPtr(), _getFSProgramSize());
LatteFetchShader* fetchShader = FindInCacheByHash(fsHash);
if (!fetchShader)
{
fetchShader = LatteShaderRecompiler_createFetchShader(fsHash, LatteGPUState.contextNew.GetRawView(), (uint32*)_getFSProgramPtr(), _getFSProgramSize());
cemu_assert(fetchShader);
}
// create new lookup entry
lookupInfo = new FetchShaderLookupInfo();
lookupInfo->fetchShader = fetchShader;
lookupInfo->programSize = _getFSProgramSize();
lookupInfo->lastFrameAccessed = LatteGPUState.frameCounter;
g_fetchShaderLookupCache.store(fsPhysAddr24, lookupInfo);
#ifdef CEMU_DEBUG_ASSERT
cemu_assert_debug(g_fetchShaderLookupCache.lookup(fsPhysAddr24) == lookupInfo);
#endif
}
return lookupInfo->fetchShader;
}
FSpinlock s_spinlockFetchShaderCache;
LatteFetchShader* LatteFetchShader::RegisterInCache(CacheHash fsHash)
{
s_spinlockFetchShaderCache.lock();
auto itr = s_fetchShaderByHash.find(fsHash);
if (itr != s_fetchShaderByHash.end())
{
LatteFetchShader* fs = itr->second;
s_spinlockFetchShaderCache.unlock();
return fs;
}
s_fetchShaderByHash.emplace(fsHash, this);
s_spinlockFetchShaderCache.unlock();
return nullptr;
}
void LatteFetchShader::UnregisterInCache()
{
if (!m_isRegistered)
return;
s_spinlockFetchShaderCache.lock();
auto itr = s_fetchShaderByHash.find(m_cacheHash);
cemu_assert(itr == s_fetchShaderByHash.end());
s_fetchShaderByHash.erase(itr);
s_spinlockFetchShaderCache.unlock();
}
std::unordered_map<LatteFetchShader::CacheHash, LatteFetchShader*> LatteFetchShader::s_fetchShaderByHash;
| 21,426
|
C++
|
.cpp
| 505
| 39.908911
| 255
| 0.754993
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,284
|
LatteRingBuffer.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteRingBuffer.cpp
|
#include "Cafe/HW/Latte/Core/LatteRingBuffer.h"
LatteRingBuffer_t* LatteRingBuffer_create(uint8* data, uint32 size)
{
LatteRingBuffer_t* rb = (LatteRingBuffer_t*)malloc(sizeof(LatteRingBuffer_t));
rb->data = data;
rb->size = size;
rb->writeIndex = 0;
return rb;
}
uint8* LatteRingBuffer_allocate(LatteRingBuffer_t* rb, sint32 size, sint32 alignment)
{
#ifdef CEMU_DEBUG_ASSERT
cemu_assert_debug(size < rb->size);
#endif
// align
rb->writeIndex = (rb->writeIndex + alignment - 1)&~(alignment-1);
// handle wrap-around
if ((rb->writeIndex + size) >= rb->size)
rb->writeIndex = 0;
// allocate range
uint8* data = rb->data + rb->writeIndex;
rb->writeIndex += size;
return data;
}
| 693
|
C++
|
.cpp
| 24
| 27.166667
| 85
| 0.727545
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,285
|
LatteShaderCache.cpp
|
cemu-project_Cemu/src/Cafe/HW/Latte/Core/LatteShaderCache.cpp
|
#include "Cafe/CafeSystem.h"
#include "Cafe/HW/Latte/Core/LatteConst.h"
#include "Cafe/HW/Latte/Core/Latte.h"
#include "Cafe/HW/Latte/Core/LatteShader.h"
#include "Cafe/HW/Latte/LegacyShaderDecompiler/LatteDecompiler.h"
#include "Cafe/HW/Latte/Core/FetchShader.h"
#include "Cemu/FileCache/FileCache.h"
#include "Cafe/GameProfile/GameProfile.h"
#include "gui/guiWrapper.h"
#include "Cafe/HW/Latte/Renderer/Renderer.h"
#include "Cafe/HW/Latte/Renderer/OpenGL/RendererShaderGL.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/RendererShaderVk.h"
#include "Cafe/HW/Latte/Renderer/Vulkan/VulkanPipelineStableCache.h"
#include <imgui.h>
#include "imgui/imgui_extension.h"
#include "config/ActiveSettings.h"
#include "Cafe/TitleList/GameInfo.h"
#include "util/helpers/SystemException.h"
#include "Cafe/HW/Latte/Common/RegisterSerializer.h"
#include "Cafe/HW/Latte/Common/ShaderSerializer.h"
#include "util/helpers/Serializer.h"
#include <wx/msgdlg.h>
#if BOOST_OS_WINDOWS
#include <psapi.h>
#endif
#define SHADER_CACHE_COMPILE_QUEUE_SIZE (32)
struct
{
sint32 compiledShaderCount;
// number of loaded shaders
sint32 vertexShaderCount;
sint32 geometryShaderCount;
sint32 pixelShaderCount;
}shaderCacheScreenStats;
struct
{
ImTextureID textureTVId;
ImTextureID textureDRCId;
// shader loading
sint32 loadedShaderFiles;
sint32 shaderFileCount;
// pipeline loading
uint32 loadedPipelines;
sint32 pipelineFileCount;
}g_shaderCacheLoaderState;
FileCache* s_shaderCacheGeneric = nullptr; // contains hardware and version independent shader information
#define SHADER_CACHE_GENERIC_EXTRA_VERSION 2 // changing this constant will invalidate all hardware-independent cache files
#define SHADER_CACHE_TYPE_VERTEX (0)
#define SHADER_CACHE_TYPE_GEOMETRY (1)
#define SHADER_CACHE_TYPE_PIXEL (2)
bool LatteShaderCache_readSeparableShader(uint8* shaderInfoData, sint32 shaderInfoSize);
void LatteShaderCache_LoadVulkanPipelineCache(uint64 cacheTitleId);
bool LatteShaderCache_updatePipelineLoadingProgress();
void LatteShaderCache_ShowProgress(const std::function <bool(void)>& loadUpdateFunc, bool isPipelines);
void LatteShaderCache_handleDeprecatedCacheFiles(fs::path pathGeneric, fs::path pathGenericPre1_25_0, fs::path pathGenericPre1_16_0);
struct
{
struct
{
LatteDecompilerShader* shader;
}entry[SHADER_CACHE_COMPILE_QUEUE_SIZE];
sint32 count;
}shaderCompileQueue;
void LatteShaderCache_initCompileQueue()
{
shaderCompileQueue.count = 0;
}
void LatteShaderCache_addToCompileQueue(LatteDecompilerShader* shader)
{
cemu_assert(shaderCompileQueue.count < SHADER_CACHE_COMPILE_QUEUE_SIZE);
shaderCompileQueue.entry[shaderCompileQueue.count].shader = shader;
shaderCompileQueue.count++;
}
void LatteShaderCache_removeFromCompileQueue(sint32 index)
{
for (sint32 i = index; i<shaderCompileQueue.count-1; i++)
shaderCompileQueue.entry[i].shader = shaderCompileQueue.entry[i + 1].shader;
shaderCompileQueue.count--;
}
/*
* Process entries from compile queue until there are equal or less entries
* left than specified by maxRemainingEntries
*/
void LatteShaderCache_updateCompileQueue(sint32 maxRemainingEntries)
{
while (true)
{
if (shaderCompileQueue.count <= maxRemainingEntries)
break;
auto shader = shaderCompileQueue.entry[0].shader;
if (shader)
LatteShader_FinishCompilation(shader);
LatteShaderCache_removeFromCompileQueue(0);
}
}
typedef struct
{
unsigned char imageTypeCode;
short int imageWidth;
short int imageHeight;
unsigned char bitCount;
std::vector<uint8> imageData;
} TGAFILE;
bool LoadTGAFile(const std::vector<uint8>& buffer, TGAFILE *tgaFile)
{
if (buffer.size() <= 18)
return false;
tgaFile->imageTypeCode = buffer[2];
if (tgaFile->imageTypeCode != 2 && tgaFile->imageTypeCode != 3)
return false;
tgaFile->imageWidth = *(uint16*)(buffer.data() + 12);
tgaFile->imageHeight = *(uint16*)(buffer.data() + 14);
tgaFile->bitCount = buffer[16];
// Color mode -> 3 = BGR, 4 = BGRA.
const uint8 colorMode = tgaFile->bitCount / 8;
if (colorMode != 3)
return false;
const uint32 imageSize = tgaFile->imageWidth * tgaFile->imageHeight * colorMode;
if (imageSize + 18 >= buffer.size())
return false;
tgaFile->imageData.resize(imageSize);
std::copy(buffer.data() + 18, buffer.data() + 18 + imageSize, tgaFile->imageData.begin());
// Change from BGR to RGB so OpenGL can read the image data.
for (uint32 imageIdx = 0; imageIdx < imageSize; imageIdx += colorMode)
{
std::swap(tgaFile->imageData[imageIdx], tgaFile->imageData[imageIdx + 2]);
}
return true;
}
void LatteShaderCache_finish()
{
if (g_renderer->GetType() == RendererAPI::Vulkan)
RendererShaderVk::ShaderCacheLoading_end();
else if (g_renderer->GetType() == RendererAPI::OpenGL)
RendererShaderGL::ShaderCacheLoading_end();
}
uint32 LatteShaderCache_getShaderCacheExtraVersion(uint64 titleId)
{
// encode the titleId in the version to prevent users from swapping caches between titles
const uint32 cacheFileVersion = 1;
uint32 extraVersion = ((uint32)(titleId >> 32) + ((uint32)titleId) * 3) + cacheFileVersion + 0xe97af1ad;
return extraVersion;
}
uint32 LatteShaderCache_getPipelineCacheExtraVersion(uint64 titleId)
{
const uint32 cacheFileVersion = 1;
uint32 extraVersion = ((uint32)(titleId >> 32) + ((uint32)titleId) * 3) + cacheFileVersion;
return extraVersion;
}
void LatteShaderCache_drawBackgroundImage(ImTextureID texture, int width, int height)
{
// clear framebuffers and clean up
const auto kPopupFlags =
ImGuiWindowFlags_NoMove | ImGuiWindowFlags_NoDecoration | ImGuiWindowFlags_NoSavedSettings |
ImGuiWindowFlags_NoFocusOnAppearing | ImGuiWindowFlags_NoNav | ImGuiWindowFlags_AlwaysAutoResize |
ImGuiWindowFlags_NoBringToFrontOnFocus;
auto& io = ImGui::GetIO();
ImGui::SetNextWindowPos({0, 0}, ImGuiCond_Always);
ImGui::SetNextWindowSize(io.DisplaySize, ImGuiCond_Always);
ImGui::PushStyleVar(ImGuiStyleVar_WindowBorderSize, 0);
ImGui::PushStyleVar(ImGuiStyleVar_WindowPadding, {0, 0});
if (ImGui::Begin("Background texture", nullptr, kPopupFlags))
{
if (texture)
{
float imageDisplayWidth = io.DisplaySize.x;
float imageDisplayHeight = height * imageDisplayWidth / width;
float paddingLeftAndRight = 0.0f;
float paddingTopAndBottom = (io.DisplaySize.y - imageDisplayHeight) / 2.0f;
if (imageDisplayHeight > io.DisplaySize.y)
{
imageDisplayHeight = io.DisplaySize.y;
imageDisplayWidth = width * imageDisplayHeight / height;
paddingLeftAndRight = (io.DisplaySize.x - imageDisplayWidth) / 2.0f;
paddingTopAndBottom = 0.0f;
}
ImGui::GetWindowDrawList()->AddImage(texture, ImVec2(paddingLeftAndRight, paddingTopAndBottom),
ImVec2(io.DisplaySize.x - paddingLeftAndRight,
io.DisplaySize.y - paddingTopAndBottom), {0, 1}, {1, 0});
}
}
ImGui::End();
ImGui::PopStyleVar(2);
}
void LatteShaderCache_Load()
{
shaderCacheScreenStats.compiledShaderCount = 0;
shaderCacheScreenStats.vertexShaderCount = 0;
shaderCacheScreenStats.geometryShaderCount = 0;
shaderCacheScreenStats.pixelShaderCount = 0;
uint64 cacheTitleId = CafeSystem::GetForegroundTitleId();
const auto timeLoadStart = now_cached();
// remember current amount of committed memory
#if BOOST_OS_WINDOWS
PROCESS_MEMORY_COUNTERS pmc1;
GetProcessMemoryInfo(GetCurrentProcess(), &pmc1, sizeof(PROCESS_MEMORY_COUNTERS));
LONGLONG totalMem1 = pmc1.PagefileUsage;
#endif
// init shader parallel compile queue
LatteShaderCache_initCompileQueue();
// create directories
std::error_code ec;
fs::create_directories(ActiveSettings::GetCachePath("shaderCache/transferable"), ec);
fs::create_directories(ActiveSettings::GetCachePath("shaderCache/precompiled"), ec);
// initialize renderer specific caches
if (g_renderer->GetType() == RendererAPI::Vulkan)
RendererShaderVk::ShaderCacheLoading_begin(cacheTitleId);
else if (g_renderer->GetType() == RendererAPI::OpenGL)
RendererShaderGL::ShaderCacheLoading_begin(cacheTitleId);
// get cache file name
const auto pathGeneric = ActiveSettings::GetCachePath("shaderCache/transferable/{:016x}_shaders.bin", cacheTitleId);
const auto pathGenericPre1_25_0 = ActiveSettings::GetCachePath("shaderCache/transferable/{:016x}.bin", cacheTitleId); // before 1.25.0
const auto pathGenericPre1_16_0 = ActiveSettings::GetCachePath("shaderCache/transferable/{:08x}.bin", CafeSystem::GetRPXHashBase()); // before 1.16.0
LatteShaderCache_handleDeprecatedCacheFiles(pathGeneric, pathGenericPre1_25_0, pathGenericPre1_16_0);
// calculate extraVersion for transferable and precompiled shader cache
uint32 transferableExtraVersion = SHADER_CACHE_GENERIC_EXTRA_VERSION;
s_shaderCacheGeneric = FileCache::Open(pathGeneric, false, transferableExtraVersion); // legacy extra version (1.25.0 - 1.25.1b)
if(!s_shaderCacheGeneric)
s_shaderCacheGeneric = FileCache::Open(pathGeneric, true, LatteShaderCache_getShaderCacheExtraVersion(cacheTitleId));
if(!s_shaderCacheGeneric)
{
// no shader cache available yet
cemuLog_log(LogType::Force, "Unable to open or create shader cache file \"{}\"", _pathToUtf8(pathGeneric));
LatteShaderCache_finish();
return;
}
s_shaderCacheGeneric->UseCompression(false);
// load/compile cached shaders
sint32 entryCount = s_shaderCacheGeneric->GetMaximumFileIndex();
g_shaderCacheLoaderState.shaderFileCount = s_shaderCacheGeneric->GetFileCount();
g_shaderCacheLoaderState.loadedShaderFiles = 0;
// get game background loading image
auto loadBackgroundTexture = [](bool isTV, ImTextureID& out)
{
TGAFILE file{};
out = nullptr;
std::string fileName = isTV ? "bootTvTex.tga" : "bootDRCTex.tga";
std::string texPath = fmt::format("{}/meta/{}", CafeSystem::GetMlcStoragePath(CafeSystem::GetForegroundTitleId()), fileName);
sint32 status;
auto fscfile = fsc_open(texPath.c_str(), FSC_ACCESS_FLAG::OPEN_FILE | FSC_ACCESS_FLAG::READ_PERMISSION, &status);
if (fscfile)
{
uint32 size = fsc_getFileSize(fscfile);
if (size > 0)
{
std::vector<uint8> tmpData(size);
fsc_readFile(fscfile, tmpData.data(), size);
const bool backgroundLoaded = LoadTGAFile(tmpData, &file);
if (backgroundLoaded)
out = g_renderer->GenerateTexture(file.imageData, { file.imageWidth, file.imageHeight });
}
fsc_close(fscfile);
}
};
loadBackgroundTexture(true, g_shaderCacheLoaderState.textureTVId);
loadBackgroundTexture(false, g_shaderCacheLoaderState.textureDRCId);
sint32 numLoadedShaders = 0;
uint32 loadIndex = 0;
auto LoadShadersUpdate = [&]() -> bool
{
if (loadIndex >= (uint32)s_shaderCacheGeneric->GetMaximumFileIndex())
return false;
LatteShaderCache_updateCompileQueue(SHADER_CACHE_COMPILE_QUEUE_SIZE - 2);
uint64 name1;
uint64 name2;
std::vector<uint8> fileData;
if (!s_shaderCacheGeneric->GetFileByIndex(loadIndex, &name1, &name2, fileData))
{
loadIndex++;
return true;
}
g_shaderCacheLoaderState.loadedShaderFiles++;
if (LatteShaderCache_readSeparableShader(fileData.data(), fileData.size()) == false)
{
// something is wrong with the stored shader, remove entry from shader cache files
cemuLog_log(LogType::Force, "Shader cache entry {} invalid, deleting...", loadIndex);
s_shaderCacheGeneric->DeleteFile({name1, name2 });
}
numLoadedShaders++;
loadIndex++;
return true;
};
LatteShaderCache_ShowProgress(LoadShadersUpdate, false);
LatteShaderCache_updateCompileQueue(0);
// write load time and RAM usage to log file (in dev build)
#if BOOST_OS_WINDOWS
const auto timeLoadEnd = now_cached();
const auto timeLoad = std::chrono::duration_cast<std::chrono::milliseconds>(timeLoadEnd - timeLoadStart).count();
PROCESS_MEMORY_COUNTERS pmc2;
GetProcessMemoryInfo(GetCurrentProcess(), &pmc2, sizeof(PROCESS_MEMORY_COUNTERS));
LONGLONG totalMem2 = pmc2.PagefileUsage;
LONGLONG memCommited = totalMem2 - totalMem1;
cemuLog_log(LogType::Force, "Shader cache loaded with {} shaders. Commited mem {}MB. Took {}ms", numLoadedShaders, (sint32)(memCommited/1024/1024), timeLoad);
#endif
LatteShaderCache_finish();
// if Vulkan then also load pipeline cache
if (g_renderer->GetType() == RendererAPI::Vulkan)
LatteShaderCache_LoadVulkanPipelineCache(cacheTitleId);
g_renderer->BeginFrame(true);
if (g_renderer->ImguiBegin(true))
{
LatteShaderCache_drawBackgroundImage(g_shaderCacheLoaderState.textureTVId, 1280, 720);
g_renderer->ImguiEnd();
}
g_renderer->BeginFrame(false);
if (g_renderer->ImguiBegin(false))
{
LatteShaderCache_drawBackgroundImage(g_shaderCacheLoaderState.textureDRCId, 854, 480);
g_renderer->ImguiEnd();
}
g_renderer->SwapBuffers(true, true);
if (g_shaderCacheLoaderState.textureTVId)
g_renderer->DeleteTexture(g_shaderCacheLoaderState.textureTVId);
if (g_shaderCacheLoaderState.textureDRCId)
g_renderer->DeleteTexture(g_shaderCacheLoaderState.textureDRCId);
}
void LatteShaderCache_ShowProgress(const std::function <bool(void)>& loadUpdateFunc, bool isPipelines)
{
const auto kPopupFlags = ImGuiWindowFlags_NoMove | ImGuiWindowFlags_NoDecoration | ImGuiWindowFlags_NoSavedSettings | ImGuiWindowFlags_NoFocusOnAppearing | ImGuiWindowFlags_NoNav | ImGuiWindowFlags_AlwaysAutoResize;
const auto textColor = 0xFF888888;
auto lastFrameUpdate = tick_cached();
while (true)
{
if (Latte_GetStopSignal())
break; // thread stop requested, cancel shader loading
bool r = loadUpdateFunc();
if (!r)
break;
// in order to slightly speed up shader loading, we don't update the display if little time passed
// this also avoids delayed loading in case third party software caps the framerate at 30
if ((tick_cached() - lastFrameUpdate) < std::chrono::milliseconds(1000 / 20)) // -> aim for 20 FPS
continue;
int w, h;
gui_getWindowPhysSize(w, h);
const Vector2f window_size{ (float)w,(float)h };
ImGui_GetFont(window_size.y / 32.0f); // = 24 by default
ImGui_GetFont(window_size.y / 48.0f); // = 16
g_renderer->BeginFrame(true);
if (g_renderer->ImguiBegin(true))
{
auto& io = ImGui::GetIO();
// render background texture
LatteShaderCache_drawBackgroundImage(g_shaderCacheLoaderState.textureTVId, 1280, 720);
const auto progress_font = ImGui_GetFont(window_size.y / 32.0f); // = 24 by default
const auto shader_count_font = ImGui_GetFont(window_size.y / 48.0f); // = 16
ImVec2 position = { window_size.x / 2.0f, window_size.y / 2.0f };
ImVec2 pivot = { 0.5f, 0.5f };
ImVec2 progress_size = { io.DisplaySize.x * 0.5f, 0 };
ImGui::SetNextWindowPos(position, ImGuiCond_Always, pivot);
ImGui::SetNextWindowSize(progress_size, ImGuiCond_Always);
ImGui::SetNextWindowBgAlpha(0.8f);
ImGui::PushStyleColor(ImGuiCol_PlotHistogram, textColor);
ImGui::PushStyleColor(ImGuiCol_WindowBg, 0);
ImGui::PushFont(progress_font);
std::string titleText = "Shader progress";
if (ImGui::Begin(titleText.c_str(), nullptr, kPopupFlags))
{
const float width = ImGui::GetWindowSize().x / 2.0f;
std::string text;
if (isPipelines)
{
text = "Loading cached Vulkan pipelines...";
}
else
{
if (shaderCacheScreenStats.compiledShaderCount >= 3)
text = "Compiling cached shaders...";
else
text = "Loading cached shaders...";
}
ImGui::SetCursorPosX(width - ImGui::CalcTextSize(text.c_str()).x / 2);
ImGui::Text("%s", text.c_str());
float percentLoaded;
if(isPipelines)
percentLoaded = (float)g_shaderCacheLoaderState.loadedPipelines / (float)g_shaderCacheLoaderState.pipelineFileCount;
else
percentLoaded = (float)g_shaderCacheLoaderState.loadedShaderFiles / (float)g_shaderCacheLoaderState.shaderFileCount;
ImGui::ProgressBar(percentLoaded, { -1, 0 }, "");
if (isPipelines)
text = fmt::format("{}/{} ({}%)", g_shaderCacheLoaderState.loadedPipelines, g_shaderCacheLoaderState.pipelineFileCount, (int)(percentLoaded * 100));
else
text = fmt::format("{}/{} ({}%)", g_shaderCacheLoaderState.loadedShaderFiles, g_shaderCacheLoaderState.shaderFileCount, (int)(percentLoaded * 100));
ImGui::SetCursorPosX(width - ImGui::CalcTextSize(text.c_str()).x / 2);
ImGui::Text("%s", text.c_str());
}
ImGui::End();
ImGui::PopFont();
ImGui::PopStyleColor(2);
if (!isPipelines)
{
position = { 10, window_size.y - 10 };
pivot = { 0, 1 };
ImGui::SetNextWindowPos(position, ImGuiCond_Always, pivot);
ImGui::SetNextWindowBgAlpha(0.8f);
ImGui::PushStyleColor(ImGuiCol_WindowBg, 0);
ImGui::PushFont(shader_count_font);
if (ImGui::Begin("Shader count", nullptr, kPopupFlags))
{
const float offset = shader_count_font->FallbackAdvanceX * 25.f;
ImGui::Text("Vertex shaders");
ImGui::SameLine(offset);
ImGui::Text("%d", shaderCacheScreenStats.vertexShaderCount);
ImGui::Text("Pixel shaders");
ImGui::SameLine(offset);
ImGui::Text("%d", shaderCacheScreenStats.pixelShaderCount);
ImGui::Text("Geometry shaders");
ImGui::SameLine(offset);
ImGui::Text("%d", shaderCacheScreenStats.geometryShaderCount);
}
ImGui::End();
ImGui::PopStyleColor();
ImGui::PopFont();
}
g_renderer->ImguiEnd();
lastFrameUpdate = tick_cached();
}
g_renderer->BeginFrame(false);
if (g_renderer->ImguiBegin(false))
{
LatteShaderCache_drawBackgroundImage(g_shaderCacheLoaderState.textureDRCId, 854, 480);
g_renderer->ImguiEnd();
}
// finish frame
g_renderer->SwapBuffers(true, true);
}
}
void LatteShaderCache_LoadVulkanPipelineCache(uint64 cacheTitleId)
{
auto& pipelineCache = VulkanPipelineStableCache::GetInstance();
g_shaderCacheLoaderState.pipelineFileCount = pipelineCache.BeginLoading(cacheTitleId);
g_shaderCacheLoaderState.loadedPipelines = 0;
LatteShaderCache_ShowProgress(LatteShaderCache_updatePipelineLoadingProgress, true);
pipelineCache.EndLoading();
if(Latte_GetStopSignal())
LatteThread_Exit();
}
bool LatteShaderCache_updatePipelineLoadingProgress()
{
uint32 pipelinesMissingShaders = 0;
return VulkanPipelineStableCache::GetInstance().UpdateLoading(g_shaderCacheLoaderState.loadedPipelines, pipelinesMissingShaders);
}
uint64 LatteShaderCache_getShaderNameInTransferableCache(uint64 baseHash, uint32 shaderType)
{
baseHash &= ~(7ULL << 61ULL);
baseHash |= ((uint64)shaderType << 61ULL);
return baseHash;
}
void LatteShaderCache_writeSeparableVertexShader(uint64 shaderBaseHash, uint64 shaderAuxHash, uint8* fetchShader, uint32 fetchShaderSize, uint8* vertexShader, uint32 vertexShaderSize, uint32* contextRegisters, bool usesGeometryShader)
{
if (!s_shaderCacheGeneric)
return;
MemStreamWriter streamWriter(128 * 1024);
// header
streamWriter.writeBE<uint8>(1 | (SHADER_CACHE_TYPE_VERTEX << 4)); // version and type (shared field)
streamWriter.writeBE<uint64>(shaderBaseHash);
streamWriter.writeBE<uint64>(shaderAuxHash);
streamWriter.writeBE<uint8>(usesGeometryShader ? 1 : 0);
// register state
Latte::GPUCompactedRegisterState compactRegState;
Latte::StoreGPURegisterState(*(LatteContextRegister*)contextRegisters, compactRegState);
Latte::SerializeRegisterState(compactRegState, streamWriter);
// fetch shader
Latte::SerializeShaderProgram(fetchShader, fetchShaderSize, streamWriter);
// vertex shader
Latte::SerializeShaderProgram(vertexShader, vertexShaderSize, streamWriter);
// write to cache
uint64 shaderCacheName = LatteShaderCache_getShaderNameInTransferableCache(shaderBaseHash, SHADER_CACHE_TYPE_VERTEX);
std::span<uint8> dataBlob = streamWriter.getResult();
s_shaderCacheGeneric->AddFileAsync({shaderCacheName, shaderAuxHash }, dataBlob.data(), dataBlob.size());
}
void LatteShaderCache_writeSeparableGeometryShader(uint64 shaderBaseHash, uint64 shaderAuxHash, uint8* geometryShader, uint32 geometryShaderSize, uint8* gsCopyShader, uint32 gsCopyShaderSize, uint32* contextRegisters, uint32* hleSpecialState, uint32 vsRingParameterCount)
{
if (!s_shaderCacheGeneric)
return;
MemStreamWriter streamWriter(128 * 1024);
// header
streamWriter.writeBE<uint8>(1 | (SHADER_CACHE_TYPE_GEOMETRY << 4)); // version and type (shared field)
streamWriter.writeBE<uint64>(shaderBaseHash);
streamWriter.writeBE<uint64>(shaderAuxHash);
cemu_assert_debug(vsRingParameterCount < 0x10000);
streamWriter.writeBE<uint16>(vsRingParameterCount);
// register state
Latte::GPUCompactedRegisterState compactRegState;
Latte::StoreGPURegisterState(*(LatteContextRegister*)contextRegisters, compactRegState);
Latte::SerializeRegisterState(compactRegState, streamWriter);
// geometry copy shader
Latte::SerializeShaderProgram(gsCopyShader, gsCopyShaderSize, streamWriter);
// geometry shader
Latte::SerializeShaderProgram(geometryShader, geometryShaderSize, streamWriter);
// write to cache
uint64 shaderCacheName = LatteShaderCache_getShaderNameInTransferableCache(shaderBaseHash, SHADER_CACHE_TYPE_GEOMETRY);
std::span<uint8> dataBlob = streamWriter.getResult();
s_shaderCacheGeneric->AddFileAsync({shaderCacheName, shaderAuxHash }, dataBlob.data(), dataBlob.size());
}
void LatteShaderCache_writeSeparablePixelShader(uint64 shaderBaseHash, uint64 shaderAuxHash, uint8* pixelShader, uint32 pixelShaderSize, uint32* contextRegisters, bool usesGeometryShader)
{
if (!s_shaderCacheGeneric)
return;
MemStreamWriter streamWriter(128 * 1024);
streamWriter.writeBE<uint8>(1 | (SHADER_CACHE_TYPE_PIXEL << 4)); // version and type (shared field)
streamWriter.writeBE<uint64>(shaderBaseHash);
streamWriter.writeBE<uint64>(shaderAuxHash);
streamWriter.writeBE<uint8>(usesGeometryShader ? 1 : 0);
// register state
Latte::GPUCompactedRegisterState compactRegState;
Latte::StoreGPURegisterState(*(LatteContextRegister*)contextRegisters, compactRegState);
Latte::SerializeRegisterState(compactRegState, streamWriter);
// pixel shader
Latte::SerializeShaderProgram(pixelShader, pixelShaderSize, streamWriter);
// write to cache
uint64 shaderCacheName = LatteShaderCache_getShaderNameInTransferableCache(shaderBaseHash, SHADER_CACHE_TYPE_PIXEL);
std::span<uint8> dataBlob = streamWriter.getResult();
s_shaderCacheGeneric->AddFileAsync({shaderCacheName, shaderAuxHash }, dataBlob.data(), dataBlob.size());
}
void LatteShaderCache_loadOrCompileSeparableShader(LatteDecompilerShader* shader, uint64 shaderBaseHash, uint64 shaderAuxHash)
{
RendererShader::ShaderType shaderType;
if (shader->shaderType == LatteConst::ShaderType::Vertex)
{
shaderType = RendererShader::ShaderType::kVertex;
shaderCacheScreenStats.vertexShaderCount++;
}
else if (shader->shaderType == LatteConst::ShaderType::Geometry)
{
shaderType = RendererShader::ShaderType::kGeometry;
shaderCacheScreenStats.geometryShaderCount++;
}
else if (shader->shaderType == LatteConst::ShaderType::Pixel)
{
shaderType = RendererShader::ShaderType::kFragment;
shaderCacheScreenStats.pixelShaderCount++;
}
// compile shader
shaderCacheScreenStats.compiledShaderCount++;
LatteShader_CreateRendererShader(shader, true);
if (shader->shader == nullptr)
return;
LatteShaderCache_addToCompileQueue(shader);
}
bool LatteShaderCache_readSeparableVertexShader(MemStreamReader& streamReader, uint8 version)
{
auto lcr = std::make_unique<LatteContextRegister>();
if (version != 1)
return false;
uint64 shaderBaseHash = streamReader.readBE<uint64>();
uint64 shaderAuxHash = streamReader.readBE<uint64>();
bool usesGeometryShader = streamReader.readBE<uint8>() != 0;
// context registers
Latte::GPUCompactedRegisterState regState;
if (!Latte::DeserializeRegisterState(regState, streamReader))
return false;
Latte::LoadGPURegisterState(*lcr, regState);
if (streamReader.hasError())
return false;
// fetch shader
std::vector<uint8> fetchShaderData;
if (!Latte::DeserializeShaderProgram(fetchShaderData, streamReader))
return false;
if (streamReader.hasError())
return false;
// vertex shader
std::vector<uint8> vertexShaderData;
if (!Latte::DeserializeShaderProgram(vertexShaderData, streamReader))
return false;
if (streamReader.hasError() || !streamReader.isEndOfStream())
return false;
// update PS inputs (affects VS shader outputs)
LatteShader_UpdatePSInputs(lcr->GetRawView());
// get fetch shader
LatteFetchShader::CacheHash fsHash = LatteFetchShader::CalculateCacheHash((uint32*)fetchShaderData.data(), fetchShaderData.size());
LatteFetchShader* fetchShader = LatteShaderRecompiler_createFetchShader(fsHash, lcr->GetRawView(), (uint32*)fetchShaderData.data(), fetchShaderData.size());
// determine decompiler options
LatteDecompilerOptions options;
LatteShader_GetDecompilerOptions(options, LatteConst::ShaderType::Vertex, usesGeometryShader);
// decompile vertex shader
LatteDecompilerOutput_t decompilerOutput{};
LatteDecompiler_DecompileVertexShader(shaderBaseHash, lcr->GetRawView(), vertexShaderData.data(), vertexShaderData.size(), fetchShader, options, &decompilerOutput);
LatteDecompilerShader* vertexShader = LatteShader_CreateShaderFromDecompilerOutput(decompilerOutput, shaderBaseHash, false, shaderAuxHash, lcr->GetRawView());
// compile
LatteShader_DumpShader(shaderBaseHash, shaderAuxHash, vertexShader);
LatteShader_DumpRawShader(shaderBaseHash, shaderAuxHash, SHADER_DUMP_TYPE_VERTEX, vertexShaderData.data(), vertexShaderData.size());
LatteShaderCache_loadOrCompileSeparableShader(vertexShader, shaderBaseHash, shaderAuxHash);
LatteSHRC_RegisterShader(vertexShader, shaderBaseHash, shaderAuxHash);
return true;
}
bool LatteShaderCache_readSeparableGeometryShader(MemStreamReader& streamReader, uint8 version)
{
if (version != 1)
return false;
auto lcr = std::make_unique<LatteContextRegister>();
uint64 shaderBaseHash = streamReader.readBE<uint64>();
uint64 shaderAuxHash = streamReader.readBE<uint64>();
uint32 vsRingParameterCount = streamReader.readBE<uint16>();
// context registers
Latte::GPUCompactedRegisterState regState;
if (!Latte::DeserializeRegisterState(regState, streamReader))
return false;
Latte::LoadGPURegisterState(*lcr, regState);
if (streamReader.hasError())
return false;
// geometry copy shader
std::vector<uint8> geometryCopyShaderData;
if (!Latte::DeserializeShaderProgram(geometryCopyShaderData, streamReader))
return false;
// geometry shader
std::vector<uint8> geometryShaderData;
if (!Latte::DeserializeShaderProgram(geometryShaderData, streamReader))
return false;
if (streamReader.hasError() || !streamReader.isEndOfStream())
return false;
// update PS inputs
LatteShader_UpdatePSInputs(lcr->GetRawView());
// determine decompiler options
LatteDecompilerOptions options;
LatteShader_GetDecompilerOptions(options, LatteConst::ShaderType::Geometry, true);
// decompile geometry shader
LatteDecompilerOutput_t decompilerOutput{};
LatteDecompiler_DecompileGeometryShader(shaderBaseHash, lcr->GetRawView(), geometryShaderData.data(), geometryShaderData.size(), geometryCopyShaderData.data(), geometryCopyShaderData.size(), vsRingParameterCount, options, &decompilerOutput);
LatteDecompilerShader* geometryShader = LatteShader_CreateShaderFromDecompilerOutput(decompilerOutput, shaderBaseHash, false, shaderAuxHash, lcr->GetRawView());
// compile
LatteShader_DumpShader(shaderBaseHash, shaderAuxHash, geometryShader);
LatteShader_DumpRawShader(shaderBaseHash, shaderAuxHash, SHADER_DUMP_TYPE_GEOMETRY, geometryShaderData.data(), geometryShaderData.size());
LatteShaderCache_loadOrCompileSeparableShader(geometryShader, shaderBaseHash, shaderAuxHash);
LatteSHRC_RegisterShader(geometryShader, shaderBaseHash, shaderAuxHash);
return true;
}
bool LatteShaderCache_readSeparablePixelShader(MemStreamReader& streamReader, uint8 version)
{
if (version != 1)
return false;
auto lcr = std::make_unique<LatteContextRegister>();
uint64 shaderBaseHash = streamReader.readBE<uint64>();
uint64 shaderAuxHash = streamReader.readBE<uint64>();
bool usesGeometryShader = streamReader.readBE<uint8>() != 0;
// context registers
Latte::GPUCompactedRegisterState regState;
if (!Latte::DeserializeRegisterState(regState, streamReader))
return false;
Latte::LoadGPURegisterState(*lcr, regState);
if (streamReader.hasError())
return false;
// pixel shader
std::vector<uint8> pixelShaderData;
if (!Latte::DeserializeShaderProgram(pixelShaderData, streamReader))
return false;
if (streamReader.hasError() || !streamReader.isEndOfStream())
return false;
// update PS inputs
LatteShader_UpdatePSInputs(lcr->GetRawView());
// determine decompiler options
LatteDecompilerOptions options;
LatteShader_GetDecompilerOptions(options, LatteConst::ShaderType::Pixel, usesGeometryShader);
// decompile pixel shader
LatteDecompilerOutput_t decompilerOutput{};
LatteDecompiler_DecompilePixelShader(shaderBaseHash, lcr->GetRawView(), pixelShaderData.data(), pixelShaderData.size(), options, &decompilerOutput);
LatteDecompilerShader* pixelShader = LatteShader_CreateShaderFromDecompilerOutput(decompilerOutput, shaderBaseHash, false, shaderAuxHash, lcr->GetRawView());
// compile
LatteShader_DumpShader(shaderBaseHash, shaderAuxHash, pixelShader);
LatteShader_DumpRawShader(shaderBaseHash, shaderAuxHash, SHADER_DUMP_TYPE_PIXEL, pixelShaderData.data(), pixelShaderData.size());
LatteShaderCache_loadOrCompileSeparableShader(pixelShader, shaderBaseHash, shaderAuxHash);
LatteSHRC_RegisterShader(pixelShader, shaderBaseHash, shaderAuxHash);
return true;
}
// read shader info from shader cache
bool LatteShaderCache_readSeparableShader(uint8* shaderInfoData, sint32 shaderInfoSize)
{
if (shaderInfoSize < 8)
return false;
MemStreamReader streamReader(shaderInfoData, shaderInfoSize);
uint8 versionAndType = streamReader.readBE<uint8>();
uint8 version = versionAndType & 0xF;
uint8 type = (versionAndType >> 4) & 0xF;
if (type == SHADER_CACHE_TYPE_VERTEX)
return LatteShaderCache_readSeparableVertexShader(streamReader, version);
else if (type == SHADER_CACHE_TYPE_GEOMETRY)
return LatteShaderCache_readSeparableGeometryShader(streamReader, version);
else if (type == SHADER_CACHE_TYPE_PIXEL)
return LatteShaderCache_readSeparablePixelShader(streamReader, version);
return false;
}
void LatteShaderCache_Close()
{
if(s_shaderCacheGeneric)
{
delete s_shaderCacheGeneric;
s_shaderCacheGeneric = nullptr;
}
if (g_renderer->GetType() == RendererAPI::Vulkan)
RendererShaderVk::ShaderCacheLoading_Close();
else if (g_renderer->GetType() == RendererAPI::OpenGL)
RendererShaderGL::ShaderCacheLoading_Close();
// if Vulkan then also close pipeline cache
if (g_renderer->GetType() == RendererAPI::Vulkan)
VulkanPipelineStableCache::GetInstance().Close();
}
#include <wx/msgdlg.h>
void LatteShaderCache_handleDeprecatedCacheFiles(fs::path pathGeneric, fs::path pathGenericPre1_25_0, fs::path pathGenericPre1_16_0)
{
std::error_code ec;
bool hasOldCacheFiles = fs::exists(pathGenericPre1_25_0, ec) || fs::exists(pathGenericPre1_16_0, ec);
bool hasNewCacheFiles = fs::exists(pathGeneric, ec);
if (hasOldCacheFiles && !hasNewCacheFiles)
{
// ask user if they want to delete or keep the old cache file
auto infoMsg = _("Cemu detected that the shader cache for this game is outdated.\nOnly shader caches generated with Cemu 1.25.0 or above are supported.\n\nWe recommend deleting the outdated cache file as it will no longer be used by Cemu.");
wxMessageDialog dialog(nullptr, infoMsg, _("Outdated shader cache"),
wxYES_NO | wxCENTRE | wxICON_EXCLAMATION);
dialog.SetYesNoLabels(_("Delete outdated cache file [recommended]"), _("Keep outdated cache file"));
const auto result = dialog.ShowModal();
if (result == wxID_YES)
{
fs::remove(pathGenericPre1_16_0, ec);
fs::remove(pathGenericPre1_25_0, ec);
}
}
}
| 31,674
|
C++
|
.cpp
| 716
| 41.511173
| 271
| 0.785166
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,286
|
SI.cpp
|
cemu-project_Cemu/src/Cafe/HW/SI/SI.cpp
|
#include "Cafe/HW/MMU/MMU.h"
#include "Cafe/HW/Common/HwReg.h"
#include "si.h"
namespace HW_SI
{
struct
{
struct
{
HWREG::SICOMCSR sicomcsr{};
HWREG::SIPOLL sipoll{};
HWREG::SICOUTBUF outBuf[4]{};
}registerState;
struct
{
uint8 cmd{};
uint8 buf[2]{};
}outputBufferState[4];
struct
{
bool hasErrorNoResponse{};
}channelStatus[4];
}g_si;
// normally we should call this periodically according to the parameters set in SIPOLL
// but for now we just call it whenever status registers are read
void handlePollUpdate()
{
for (uint32 i = 0; i < 4; i++)
{
// note: Order of EN and VBCPY is from MSB to LSB
bool isEnabled = ((g_si.registerState.sipoll.get_EN() >> (3 - i))&1) != 0;
if (isEnabled)
{
g_si.channelStatus[i].hasErrorNoResponse = true;
}
}
}
void handleQueuedTransfers()
{
}
void flushAllOutputBuffers()
{
for (uint32 i = 0; i < 4; i++)
{
g_si.outputBufferState[i].cmd = g_si.registerState.outBuf[i].get_CMD();
g_si.outputBufferState[i].buf[0] = g_si.registerState.outBuf[i].get_OUTPUT0();
g_si.outputBufferState[i].buf[1] = g_si.registerState.outBuf[i].get_OUTPUT1();
}
}
/* +0x6400/0x640C/0x6418/0x6424 | SI0COUTBUF - SI3COUTBUF */
HWREG::SICOUTBUF SI_COUTBUF_R32(PAddr addr)
{
uint32 joyChannelIndex = (addr & 0xFF) / 0xC;
cemu_assert_debug(false);
return HWREG::SICOUTBUF();
}
void SI_COUTBUF_W32(PAddr addr, HWREG::SICOUTBUF newValue)
{
uint32 joyChannelIndex = (addr & 0xFF) / 0xC;
g_si.registerState.outBuf[joyChannelIndex] = newValue;
}
/* +0x6430 | SIPOLL */
HWREG::SIPOLL SI_POLL_R32(PAddr addr)
{
cemu_assert_debug(false);
return g_si.registerState.sipoll;
}
void SI_POLL_W32(PAddr addr, HWREG::SIPOLL newValue)
{
g_si.registerState.sipoll = newValue;
}
/* +0x6434 | SICOMCSR */
HWREG::SICOMCSR SI_COMCSR_R32(PAddr addr)
{
//cemuLog_logDebug(LogType::Force, "Read SICOMCSR");
return g_si.registerState.sicomcsr;
}
void SI_COMCSR_W32(PAddr addr, HWREG::SICOMCSR newValue)
{
uint32 unhandledBits = g_si.registerState.sicomcsr.getRawValue() & ~(0x80000000);
cemu_assert_debug(unhandledBits == 0);
// clear transfer complete interrupt
if (newValue.get_TCINT())
{
g_si.registerState.sicomcsr.set_TCINT(0);
}
if (newValue.get_TRANSFER_START())
{
cemu_assert_debug(false);
handleQueuedTransfers();
}
}
/* +0x6438 | SISR */
HWREG::SISR SI_SR_R32(PAddr addr)
{
handlePollUpdate();
HWREG::SISR reg;
// no response error
if (g_si.channelStatus[0].hasErrorNoResponse)
reg.set_NOREP0(1);
if (g_si.channelStatus[1].hasErrorNoResponse)
reg.set_NOREP1(1);
if (g_si.channelStatus[2].hasErrorNoResponse)
reg.set_NOREP2(1);
if (g_si.channelStatus[3].hasErrorNoResponse)
reg.set_NOREP3(1);
// todo - other status fields
return reg;
}
void SI_SR_W32(PAddr addr, HWREG::SISR newValue)
{
if (newValue.get_NOREP0())
g_si.channelStatus[0].hasErrorNoResponse = false;
if (newValue.get_NOREP1())
g_si.channelStatus[1].hasErrorNoResponse = false;
if (newValue.get_NOREP2())
g_si.channelStatus[2].hasErrorNoResponse = false;
if (newValue.get_NOREP3())
g_si.channelStatus[3].hasErrorNoResponse = false;
if (newValue.get_WR())
{
// copies contents of SICOUTBUF to the internal shadow buffers
flushAllOutputBuffers();
}
}
void Initialize()
{
MMU::RegisterMMIO_32<HWREG::SICOUTBUF, SI_COUTBUF_R32, SI_COUTBUF_W32>(MMU::MMIOInterface::INTERFACE_0D000000, 0x6400);
MMU::RegisterMMIO_32<HWREG::SICOUTBUF, SI_COUTBUF_R32, SI_COUTBUF_W32>(MMU::MMIOInterface::INTERFACE_0D000000, 0x640C);
MMU::RegisterMMIO_32<HWREG::SICOUTBUF, SI_COUTBUF_R32, SI_COUTBUF_W32>(MMU::MMIOInterface::INTERFACE_0D000000, 0x6418);
MMU::RegisterMMIO_32<HWREG::SICOUTBUF, SI_COUTBUF_R32, SI_COUTBUF_W32>(MMU::MMIOInterface::INTERFACE_0D000000, 0x6424);
MMU::RegisterMMIO_32<HWREG::SIPOLL, SI_POLL_R32, SI_POLL_W32>(MMU::MMIOInterface::INTERFACE_0D000000, 0x6430);
MMU::RegisterMMIO_32<HWREG::SICOMCSR, SI_COMCSR_R32, SI_COMCSR_W32>(MMU::MMIOInterface::INTERFACE_0D000000, 0x6434);
MMU::RegisterMMIO_32<HWREG::SISR, SI_SR_R32, SI_SR_W32>(MMU::MMIOInterface::INTERFACE_0D000000, 0x6438);
}
}
| 4,219
|
C++
|
.cpp
| 136
| 27.977941
| 121
| 0.716646
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,287
|
GameProfile.cpp
|
cemu-project_Cemu/src/Cafe/GameProfile/GameProfile.cpp
|
#include "Cafe/GameProfile/GameProfile.h"
#include "util/helpers/helpers.h"
#include "boost/nowide/convert.hpp"
#include "config/ActiveSettings.h"
#include "Common/FileStream.h"
#include "util/IniParser/IniParser.h"
#include "util/helpers/StringHelpers.h"
#include "Cafe/CafeSystem.h"
std::unique_ptr<GameProfile> g_current_game_profile = std::make_unique<GameProfile>();
struct gameProfileBooleanOption_t
{
bool isPresent = false;
bool value;
};
/*
* Attempts to load a boolean option
* If the option exists, true is returned.
* The boolean is stored in *optionValue
*/
bool gameProfile_loadBooleanOption(IniParser* iniParser, char* optionName, gameProfileBooleanOption_t* option)
{
auto option_value = iniParser->FindOption(optionName);
option->isPresent = false;
option->value = false;
if (!option_value)
return false;
// parse option
if (boost::iequals(*option_value, "false") || boost::iequals(*option_value, "0"))
{
option->isPresent = true;
option->value = false;
return true;
}
else if (boost::iequals(*option_value, "true") || boost::iequals(*option_value, "1"))
{
option->isPresent = true;
option->value = true;
return true;
}
else
cemuLog_log(LogType::Force, "Unknown value '{}' for option '{}' in game profile", *option_value, optionName);
return false;
}
bool gameProfile_loadBooleanOption2(IniParser& iniParser, const char* optionName, bool& option)
{
auto option_value = iniParser.FindOption(optionName);
if (!option_value)
return false;
if (boost::iequals(*option_value, "1") || boost::iequals(*option_value, "true"))
{
option = true;
return true;
}
else if (boost::iequals(*option_value, "0") || boost::iequals(*option_value, "false"))
{
option = false;
return true;
}
else
cemuLog_log(LogType::Force, "Unknown value '{}' for option '{}' in game profile", *option_value, optionName);
return false;
}
bool gameProfile_loadBooleanOption2(IniParser& iniParser, const char* optionName, std::optional<bool>& option)
{
bool tmp;
const auto result = gameProfile_loadBooleanOption2(iniParser, optionName, tmp);
if(result)
option = tmp;
return result;
}
/*
* Attempts to load a integer option
* Allows to specify min and max value (error is logged if out of range and default value is picked)
*/
bool gameProfile_loadIntegerOption(IniParser* iniParser, const char* optionName, gameProfileIntegerOption_t* option, sint32 defaultValue, sint32 minVal, sint32 maxVal)
{
auto option_value = iniParser->FindOption(optionName);
option->isPresent = false;
if (!option_value)
{
option->value = defaultValue;
return false;
}
// parse option
sint32 val = StringHelpers::ToInt(*option_value, defaultValue);
if (val < minVal || val > maxVal)
{
cemuLog_log(LogType::Force, "Value '{}' is out of range for option '{}' in game profile", *option_value, optionName);
option->value = defaultValue;
return false;
}
option->isPresent = true;
option->value = val;
return true;
}
template <typename T>
bool gameProfile_loadIntegerOption(IniParser& iniParser, const char* optionName, T& option, T minVal, T maxVal)
{
static_assert(std::is_integral<T>::value);
auto option_value = iniParser.FindOption(optionName);
if (!option_value)
return false;
// parse option
try
{
T val = ConvertString<T>(*option_value);
if (val < minVal || val > maxVal)
{
cemuLog_log(LogType::Force, "Value '{}' is out of range for option '{}' in game profile", *option_value, optionName);
return false;
}
option = val;
return true;
}
catch(std::exception&)
{
cemuLog_log(LogType::Force, "Value '{}' is out of range for option '{}' in game profile", *option_value, optionName);
return false;
}
}
template<typename T>
bool gameProfile_loadEnumOption(IniParser& iniParser, const char* optionName, T& option)
{
static_assert(std::is_enum<T>::value);
auto option_value = iniParser.FindOption(optionName);
if (!option_value)
return false;
for(const T& v : T())
{
// test integer option
if (boost::iequals(fmt::format("{}", static_cast<typename std::underlying_type<T>::type>(v)), *option_value))
{
option = v;
return true;
}
// test enum name
if(boost::iequals(fmt::format("{}", v), *option_value))
{
option = v;
return true;
}
}
return false;
}
template<typename T>
bool gameProfile_loadEnumOption(IniParser& iniParser, const char* optionName, std::optional<T>& option)
{
T tmp;
const auto result = gameProfile_loadEnumOption(iniParser, optionName, tmp);
if(result)
option = tmp;
return result;
}
void gameProfile_load()
{
g_current_game_profile->ResetOptional(); // reset with global values as optional
g_current_game_profile->Load(CafeSystem::GetForegroundTitleId());
// apply some settings immediately
ppcThreadQuantum = g_current_game_profile->GetThreadQuantum();
if (ppcThreadQuantum != GameProfile::kThreadQuantumDefault)
cemuLog_log(LogType::Force, "Thread quantum set to {}", ppcThreadQuantum);
}
bool GameProfile::Load(uint64_t title_id)
{
auto gameProfilePath = ActiveSettings::GetConfigPath("gameProfiles/{:016x}.ini", title_id);
std::optional<std::vector<uint8>> profileContents = FileStream::LoadIntoMemory(gameProfilePath);
if (!profileContents)
{
gameProfilePath = ActiveSettings::GetDataPath("gameProfiles/default/{:016x}.ini", title_id);
profileContents = FileStream::LoadIntoMemory(gameProfilePath);
if (!profileContents)
return false;
m_is_default = true;
}
else
m_is_default = false;
m_is_loaded = true;
// most official gameprofiles start with "# gamename"
std::vector<char> game_name;
if (profileContents->size() > 0 && profileContents->data()[0] == '#')
{
char c;
size_t idx = 1;
while (idx < profileContents->size() && (c = profileContents->data()[idx]) != '\n' && idx < 128)
{
game_name.emplace_back(c);
idx++;
}
m_gameName = std::string(game_name.begin(), game_name.end());
trim(m_gameName.value());
}
IniParser iniParser(*profileContents, _pathToUtf8(gameProfilePath));
// parse ini
while (iniParser.NextSection())
{
if (boost::iequals(iniParser.GetCurrentSectionName(), "General"))
{
gameProfile_loadBooleanOption2(iniParser, "loadSharedLibraries", m_loadSharedLibraries);
gameProfile_loadBooleanOption2(iniParser, "startWithPadView", m_startWithPadView);
}
else if (boost::iequals(iniParser.GetCurrentSectionName(), "Graphics"))
{
gameProfileIntegerOption_t graphicsApi;
gameProfile_loadIntegerOption(&iniParser, "graphics_api", &graphicsApi, -1, 0, 1);
if (graphicsApi.value != -1)
m_graphics_api = (GraphicAPI)graphicsApi.value;
gameProfile_loadEnumOption(iniParser, "accurateShaderMul", m_accurateShaderMul);
// legacy support
auto option_precompiledShaders = iniParser.FindOption("precompiledShaders");
if (option_precompiledShaders)
{
if (boost::iequals(*option_precompiledShaders, "1") || boost::iequals(*option_precompiledShaders, "true"))
m_precompiledShaders = PrecompiledShaderOption::Enable;
else if (boost::iequals(*option_precompiledShaders, "0") || boost::iequals(*option_precompiledShaders, "false"))
m_precompiledShaders = PrecompiledShaderOption::Disable;
else
m_precompiledShaders = PrecompiledShaderOption::Auto;
}
else
m_precompiledShaders = PrecompiledShaderOption::Auto;
}
else if (boost::iequals(iniParser.GetCurrentSectionName(), "Audio"))
{
gameProfile_loadBooleanOption2(iniParser, "disableAudio", m_disableAudio);
}
else if (boost::iequals(iniParser.GetCurrentSectionName(), "CPU"))
{
gameProfile_loadIntegerOption(iniParser, "threadQuantum", m_threadQuantum, 1000U, 536870912U);
if (!gameProfile_loadEnumOption(iniParser, "cpuMode", m_cpuMode))
{
// try to load the old enum value strings
std::optional<CPUModeLegacy> cpu_mode_legacy;
if (gameProfile_loadEnumOption(iniParser, "cpuMode", cpu_mode_legacy) && cpu_mode_legacy.has_value())
{
m_cpuMode = (CPUMode)cpu_mode_legacy.value();
if (m_cpuMode == CPUMode::DualcoreRecompiler)
m_cpuMode = CPUMode::MulticoreRecompiler;
}
}
}
else if (boost::iequals(iniParser.GetCurrentSectionName(), "Controller"))
{
for (int i = 0; i < 8; ++i)
{
auto option_value = iniParser.FindOption(fmt::format("controller{}", (i + 1)));
if (option_value)
m_controllerProfile[i] = std::string(*option_value);
}
}
}
return true;
}
void GameProfile::Save(uint64_t title_id)
{
auto gameProfileDir = ActiveSettings::GetConfigPath("gameProfiles");
if (std::error_code ex_ec; !fs::exists(gameProfileDir, ex_ec))
fs::create_directories(gameProfileDir, ex_ec);
auto gameProfilePath = gameProfileDir / fmt::format("{:016x}.ini", title_id);
FileStream* fs = FileStream::createFile2(gameProfilePath);
if (!fs)
{
cemuLog_log(LogType::Force, "Failed to write game profile");
return;
}
if (m_gameName)
fs->writeLine(fmt::format("# {}\n", m_gameName.value()).c_str());
#define WRITE_OPTIONAL_ENTRY(__NAME) if (m_##__NAME) fs->writeLine(fmt::format("{} = {}", #__NAME, m_##__NAME.value()).c_str());
#define WRITE_ENTRY(__NAME) fs->writeLine(fmt::format("{} = {}", #__NAME, m_##__NAME).c_str());
fs->writeLine("[General]");
WRITE_OPTIONAL_ENTRY(loadSharedLibraries);
WRITE_ENTRY(startWithPadView);
fs->writeLine("");
fs->writeLine("[CPU]");
WRITE_OPTIONAL_ENTRY(cpuMode);
WRITE_ENTRY(threadQuantum);
fs->writeLine("");
fs->writeLine("[Graphics]");
WRITE_ENTRY(accurateShaderMul);
WRITE_OPTIONAL_ENTRY(precompiledShaders);
WRITE_OPTIONAL_ENTRY(graphics_api);
fs->writeLine("");
fs->writeLine("[Controller]");
for (int i = 0; i < 8; ++i)
{
if (m_controllerProfile[i])
fs->writeLine(fmt::format("controller{} = {}", (i + 1), m_controllerProfile[i].value()).c_str());
}
fs->writeLine("");
#undef WRITE_OPTIONAL_ENTRY
#undef WRITE_ENTRY
delete fs;
}
void GameProfile::ResetOptional()
{
m_gameName.reset();
// general settings
m_loadSharedLibraries.reset(); // true;
m_startWithPadView = false;
// graphic settings
m_accurateShaderMul = AccurateShaderMulOption::True;
// cpu settings
m_threadQuantum = kThreadQuantumDefault;
m_cpuMode.reset(); // CPUModeOption::kSingleCoreRecompiler;
// audio
m_disableAudio = false;
// controller settings
for (auto& profile : m_controllerProfile)
profile.reset();
}
void GameProfile::Reset()
{
m_gameName.reset();
// general settings
m_loadSharedLibraries = true;
m_startWithPadView = false;
// graphic settings
m_accurateShaderMul = AccurateShaderMulOption::True;
m_precompiledShaders = PrecompiledShaderOption::Auto;
// cpu settings
m_threadQuantum = kThreadQuantumDefault;
m_cpuMode = CPUMode::Auto;
// audio
m_disableAudio = false;
// controller settings
for (auto& profile : m_controllerProfile)
profile.reset();
}
| 10,780
|
C++
|
.cpp
| 327
| 30.409786
| 167
| 0.729727
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,288
|
imgui_extension.cpp
|
cemu-project_Cemu/src/imgui/imgui_extension.cpp
|
#include "imgui_extension.h"
#include "gui/guiWrapper.h"
#include "Cafe/HW/Latte/Renderer/Renderer.h"
#include "resource/IconsFontAwesome5.h"
#include "imgui_impl_opengl3.h"
#include "resource/resource.h"
#include "imgui_impl_vulkan.h"
#include "input/InputManager.h"
// <imgui_internal.h>
template<typename T> static T ImMin(T lhs, T rhs) { return lhs < rhs ? lhs : rhs; }
template<typename T> static T ImMax(T lhs, T rhs) { return lhs >= rhs ? lhs : rhs; }
static ImVec2 ImRotate(const ImVec2& v, float cos_a, float sin_a) { return ImVec2(v.x * cos_a - v.y * sin_a, v.x * sin_a + v.y * cos_a); }
int rotation_start_index;
void ImRotateStart()
{
rotation_start_index = ImGui::GetWindowDrawList()->VtxBuffer.Size;
}
ImVec2 ImRotationCenter()
{
ImVec2 l(FLT_MAX, FLT_MAX), u(-FLT_MAX, -FLT_MAX); // bounds
const auto& buf = ImGui::GetWindowDrawList()->VtxBuffer;
for (int i = rotation_start_index; i < buf.Size; i++)
l = ImMin(l, buf[i].pos), u = ImMax(u, buf[i].pos);
return ImVec2((l.x + u.x) / 2, (l.y + u.y) / 2); // or use _ClipRectStack?
}
void ImRotateEnd(float rad, ImVec2 center)
{
const float s = sin(rad);
const float c = cos(rad);
center = ImRotate(center, s, c) - center;
auto& buf = ImGui::GetWindowDrawList()->VtxBuffer;
for (int i = rotation_start_index; i < buf.Size; i++)
buf[i].pos = ImRotate(buf[i].pos, s, c) - center;
}
uint8* extractCafeDefaultFont(sint32* size);
sint32 g_font_size = 0;
uint8* g_font_data = nullptr;
#if !BOOST_OS_WINDOWS
extern int const g_fontawesome_size;
extern char const g_fontawesome_data[];
#endif
std::unordered_map<int, ImFont*> g_imgui_fonts;
std::stack<int> g_font_requests;
void ImGui_PrecacheFonts()
{
while (!g_font_requests.empty())
{
const int size = g_font_requests.top();
g_font_requests.pop();
auto& io = ImGui::GetIO();
cemu_assert(io.Fonts->Locked == false);
if (g_font_size == 0)
g_font_data = extractCafeDefaultFont(&g_font_size);
ImFontConfig cfg{};
cfg.FontDataOwnedByAtlas = false;
//cfg.FontData = g_font_data;
//cfg.FontDataSize = g_font_size;
//cfg.SizePixels = size;
ImFont* font = io.Fonts->AddFontFromMemoryTTF(g_font_data, g_font_size, (float)size, &cfg);
ImFontConfig cfgmerge{};
cfgmerge.FontDataOwnedByAtlas = false;
cfgmerge.MergeMode = true;
cfgmerge.GlyphMinAdvanceX = 20.0f;
//cfgmerge.GlyphOffset = { 2,2 };
static const ImWchar icon_ranges[] = { ICON_MIN_FA, ICON_MAX_FA, 0 };
#if BOOST_OS_WINDOWS
const auto hinstance = GetModuleHandle(nullptr);
const HRSRC res = FindResource(hinstance, MAKEINTRESOURCE(IDR_FONTAWESOME), RT_RCDATA);
if (res)
{
const HGLOBAL mem = ::LoadResource(hinstance, res);
if (mem)
{
void* data = LockResource(mem);
const size_t len = SizeofResource(hinstance, res);
io.Fonts->AddFontFromMemoryTTF(data, (int)len, (float)size, &cfgmerge, icon_ranges);
}
}
#else
io.Fonts->AddFontFromMemoryTTF((void*)g_fontawesome_data, (int)g_fontawesome_size, (float)size, &cfgmerge, icon_ranges);
#endif
g_imgui_fonts[(int)size] = font;
// Vulkan doesn't let us destroy resources that are still being used, so we flush here
g_renderer->Flush(true);
g_renderer->DeleteFontTextures();
}
}
void ImGui_ClearFonts()
{
g_imgui_fonts.clear();
}
ImFont* ImGui_GetFont(float size)
{
const auto it = g_imgui_fonts.find((int)size);
if (it != g_imgui_fonts.cend())
return it->second;
g_font_requests.emplace((int)size);
return nullptr; // will create the font in next precache call
}
void ImGui_UpdateWindowInformation(bool mainWindow)
{
extern WindowInfo g_window_info;
static std::map<uint32, ImGuiKey> keyboard_mapping;
static uint32 current_key = 0;
ImGuiIO& io = ImGui::GetIO();
io.BackendFlags |= ImGuiBackendFlags_HasMouseCursors;
io.ConfigFlags |= ImGuiConfigFlags_NavEnableGamepad;
#if BOOST_OS_WINDOWS
io.ImeWindowHandle = mainWindow ? g_window_info.window_main.hwnd : g_window_info.window_pad.hwnd;
#else
io.ImeWindowHandle = nullptr;
#endif
io.MousePos = ImVec2(-FLT_MAX, -FLT_MAX);
auto& instance = InputManager::instance();
const auto mousePos = instance.get_mouse_position(!mainWindow);
io.MousePos = { (float)mousePos.x, (float)mousePos.y };
bool padDown;
const auto pos = instance.get_left_down_mouse_info(&padDown);
io.MouseDown[0] = padDown != mainWindow && pos.has_value();
auto get_mapping = [&](uint32 key_code)
{
auto key = keyboard_mapping.find(key_code);
if (key != keyboard_mapping.end())
return key->second;
ImGuiKey mapped_key = (ImGuiKey)((uint32)current_key + ImGuiKey_NamedKey_BEGIN);
current_key = (current_key + 1) % (uint32)ImGuiKey_NamedKey_COUNT;
keyboard_mapping[key_code] = mapped_key;
return mapped_key;
};
g_window_info.iter_keystates([&](auto&& el){ io.AddKeyEvent(get_mapping(el.first), el.second); });
// printf("%f %f %d\n", io.MousePos.x, io.MousePos.y, io.MouseDown[0]);
for (auto i = 0; i < InputManager::kMaxController; ++i)
{
const auto controller = instance.get_controller(i);
if (!controller)
continue;
if (controller->is_start_down())
io.NavInputs[ImGuiNavInput_Input] = 1.0f;
if (controller->is_a_down())
io.NavInputs[ImGuiNavInput_Activate] = 1.0f;
if (controller->is_b_down())
io.NavInputs[ImGuiNavInput_Cancel] = 1.0f;
if (controller->is_left_down())
io.NavInputs[ImGuiNavInput_DpadLeft] = 1.0f;
if (controller->is_right_down())
io.NavInputs[ImGuiNavInput_DpadRight] = 1.0f;
if (controller->is_up_down())
io.NavInputs[ImGuiNavInput_DpadUp] = 1.0f;
if (controller->is_down_down())
io.NavInputs[ImGuiNavInput_DpadDown] = 1.0f;
}
}
| 5,602
|
C++
|
.cpp
| 152
| 34.309211
| 138
| 0.715158
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,291
|
unrealstatusGameModeBase.cpp
|
cemu-project_Cemu/dependencies/discord-rpc/examples/unrealstatus/Source/unrealstatus/unrealstatusGameModeBase.cpp
|
// Fill out your copyright notice in the Description page of Project Settings.
#include "unrealstatusGameModeBase.h"
| 118
|
C++
|
.cpp
| 2
| 57.5
| 78
| 0.826087
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| true
| true
| true
| false
|
23,303
|
EventService.h
|
cemu-project_Cemu/src/util/EventService.h
|
#pragma once
#include "util/helpers/Singleton.h"
#include <boost/signals2.hpp>
#include <boost/bind/placeholders.hpp>
enum class Events : int32_t
{
ControllerChanged,
};
using ControllerChangedFunc = void(void);
class EventService : public Singleton<EventService>
{
friend class Singleton<EventService>;
EventService() = default;
public:
template <Events event, typename TFunc, typename TClass>
boost::signals2::connection connect(TFunc function, TClass thisptr)
{
using namespace boost::placeholders;
if constexpr (event == Events::ControllerChanged)
return m_controller_changed_signal.connect(boost::bind(function, thisptr));
else
{
cemu_assert_suspicious();
}
}
template <Events event>
void disconnect(const boost::signals2::connection& slot)
{
using namespace boost::placeholders;
if constexpr (event == Events::ControllerChanged)
m_controller_changed_signal.disconnect(slot);
else
{
cemu_assert_suspicious();
}
}
template <Events event, typename ... TArgs>
void signal(TArgs&&... args)
{
try
{
if constexpr (event == Events::ControllerChanged)
m_controller_changed_signal(std::forward<TArgs>(args)...);
else
{
cemu_assert_suspicious();
}
}
catch (const std::exception& ex)
{
cemuLog_log(LogType::Force, "error when signaling {}: {}", event, ex.what());
}
}
private:
boost::signals2::signal<ControllerChangedFunc> m_controller_changed_signal;
};
| 1,443
|
C++
|
.h
| 56
| 23.107143
| 80
| 0.744372
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,304
|
IniParser.h
|
cemu-project_Cemu/src/util/IniParser/IniParser.h
|
#pragma once
#include <vector>
#include <span>
#include <string>
#include <optional>
class IniParser
{
private:
class IniSection
{
public:
IniSection(std::string_view sectionName, size_t lineNumber) : m_sectionName(sectionName), m_lineNumber(lineNumber) {}
std::string_view m_sectionName;
size_t m_lineNumber;
std::vector<std::pair<std::string_view, std::string_view>> m_optionPairs;
};
public:
IniParser(std::span<char> iniContents, std::string_view name = {});
IniParser(std::span<unsigned char> iniContents, std::string_view name = {}) : IniParser(std::span<char>((char*)iniContents.data(), iniContents.size()), name) {};
// section and option iterating
bool NextSection();
std::string_view GetCurrentSectionName();
size_t GetCurrentSectionLineNumber();
std::optional<std::string_view> FindOption(std::string_view optionName);
std::span<std::pair<std::string_view, std::string_view>> GetAllOptions();
private:
// parsing
bool parse();
bool ReadNextLine(std::string_view& lineString);
void TrimWhitespaces(std::string_view& str);
void StartSection(std::string_view sectionName, size_t lineNumber);
void PrintWarning(int lineNumber, std::string_view msg, std::string_view lineView);
std::vector<char> m_iniFileData;
std::string m_name;
bool m_isValid{ false };
size_t m_parseOffset{ 0 };
std::vector<IniSection> m_sectionList;
size_t m_currentSectionIndex{std::numeric_limits<size_t>::max()};
};
| 1,434
|
C++
|
.h
| 39
| 34.820513
| 162
| 0.753957
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,306
|
Fiber.h
|
cemu-project_Cemu/src/util/Fiber/Fiber.h
|
#pragma once
#if BOOST_OS_WINDOWS
#endif
class Fiber
{
public:
Fiber(void(*FiberEntryPoint)(void* userParam), void* userParam, void* privateData);
~Fiber();
static Fiber* PrepareCurrentThread(void* privateData = nullptr);
static void Switch(Fiber& targetFiber);
static void* GetFiberPrivateData();
private:
Fiber(void* privateData); // fiber from current thread
void* m_implData{nullptr};
void* m_privateData;
void* m_stackPtr{ nullptr };
};
| 455
|
C++
|
.h
| 17
| 25
| 84
| 0.774194
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,307
|
LookupTableL3.h
|
cemu-project_Cemu/src/util/containers/LookupTableL3.h
|
#pragma once
// staged lookup table suited for cases where the lookup index range can be very large (e.g. memory addresses)
// performs 3 consecutive table lookups, where each table's width is defined by TBitsN
// empty subtables consume no memory beyond the initial two default tables for TBitsY and TBitsZ
template<int TBitsX, int TBitsY, int TBitsZ, typename T>
class LookupTableL3
{
struct TableZ // z lookup
{
T arr[1 << TBitsZ]{};
};
struct TableY // y lookup
{
TableZ* arr[1 << TBitsY];
};
// by generating placeholder tables we can avoid conditionals in the lookup code since no null-pointer checking is necessary
TableY* m_placeholderTableY{};
TableZ* m_placeholderTableZ{};
public:
LookupTableL3()
{
// init placeholder table Z
m_placeholderTableZ = GenerateNewTableZ();
// init placeholder table Y (all entries point to placeholder table Z)
m_placeholderTableY = GenerateNewTableY();
// init x table
for (auto& itr : m_tableXArr)
itr = m_placeholderTableY;
}
~LookupTableL3()
{
delete m_placeholderTableY;
delete m_placeholderTableZ;
}
// lookup
// only the bottom most N bits bits are used of the offset
// N = TBitsX + TBitsY + TBitsZ
// if no match is found a default-constructed object is returned
T lookup(uint32 offset)
{
uint32 indexZ = offset & ((1u << TBitsZ) - 1);
offset >>= TBitsZ;
uint32 indexY = offset & ((1u << TBitsY) - 1);
offset >>= TBitsY;
uint32 indexX = offset & ((1u << TBitsX) - 1);
//offset >>= TBitsX;
return m_tableXArr[indexX]->arr[indexY]->arr[indexZ];
}
void store(uint32 offset, T& t)
{
uint32 indexZ = offset & ((1u << TBitsZ) - 1);
offset >>= TBitsZ;
uint32 indexY = offset & ((1u << TBitsY) - 1);
offset >>= TBitsY;
uint32 indexX = offset & ((1u << TBitsX) - 1);
if (m_tableXArr[indexX] == m_placeholderTableY)
m_tableXArr[indexX] = GenerateNewTableY();
TableY* lookupY = m_tableXArr[indexX];
if (lookupY->arr[indexY] == m_placeholderTableZ)
lookupY->arr[indexY] = GenerateNewTableZ();
TableZ* lookupZ = lookupY->arr[indexY];
lookupZ->arr[indexZ] = t;
}
private:
// generate a new Y lookup table which will initially contain only pointers to m_placeholderTableZ
TableY* GenerateNewTableY()
{
TableY* tableY = new TableY();
for (auto& itr : tableY->arr)
itr = m_placeholderTableZ;
return tableY;
}
// generate a new Z lookup table which will initially contain only default constructed T
TableZ* GenerateNewTableZ()
{
TableZ* tableZ = new TableZ();
return tableZ;
}
TableY* m_tableXArr[1 << TBitsX]; // x lookup
};
| 2,579
|
C++
|
.h
| 80
| 29.725
| 125
| 0.71463
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,308
|
RangeStore.h
|
cemu-project_Cemu/src/util/containers/RangeStore.h
|
#pragma once
template<typename _OBJ, typename _ADDR, size_t count, size_t granularity>
class RangeStore
{
public:
typedef struct
{
_ADDR start;
_ADDR end;
_OBJ data;
size_t lastIterationIndex;
}rangeEntry_t;
RangeStore() = default;
size_t getBucket(_ADDR addr)
{
size_t index = addr / granularity;
index %= count;
return index;
}
void getBucketRange(_ADDR addrStart, _ADDR addrEnd, size_t& bucketFirst, size_t& bucketCount)
{
bucketFirst = getBucket(addrStart);
size_t indexStart = addrStart / granularity;
size_t indexEnd = std::max(addrStart, addrEnd - 1) / granularity;
bucketCount = indexEnd - indexStart + 1;
}
// end address should be supplied as start+size
void* storeRange(_OBJ data, _ADDR start, _ADDR end)
{
size_t bucketFirst;
size_t bucketCount;
getBucketRange(start, end, bucketFirst, bucketCount);
bucketCount = std::min(bucketCount, count);
// create range
rangeEntry_t* rangeEntry = new rangeEntry_t();
rangeEntry->data = data;
rangeEntry->start = start;
rangeEntry->end = end;
rangeEntry->lastIterationIndex = currentIterationIndex;
// register range in every bucket it touches
size_t idx = bucketFirst;
for (size_t i = 0; i < bucketCount; i++)
{
rangeBuckets[idx].list_ranges.push_back(rangeEntry);
idx = (idx + 1) % count;
}
return rangeEntry;
}
void deleteRange(void* rangePtr)
{
rangeEntry_t* rangeEntry = (rangeEntry_t*)rangePtr;
// get bucket range
size_t bucketFirst;
size_t bucketCount;
getBucketRange(rangeEntry->start, rangeEntry->end, bucketFirst, bucketCount);
bucketCount = std::min(bucketCount, count);
// remove from buckets
size_t idx = bucketFirst;
for (size_t i = 0; i < bucketCount; i++)
{
rangeBuckets[idx].list_ranges.erase(std::remove(rangeBuckets[idx].list_ranges.begin(), rangeBuckets[idx].list_ranges.end(), rangeEntry), rangeBuckets[idx].list_ranges.end());
idx = (idx + 1) % count;
}
delete rangeEntry;
}
void findRanges(_ADDR start, _ADDR end, std::function <void(_ADDR start, _ADDR end, _OBJ data)> f)
{
currentIterationIndex++;
size_t bucketFirst;
size_t bucketCount;
getBucketRange(start, end, bucketFirst, bucketCount);
bucketCount = std::min(bucketCount, count);
size_t idx = bucketFirst;
for (size_t i = 0; i < bucketCount; i++)
{
for (auto r : rangeBuckets[idx].list_ranges)
{
if (start < r->end && end > r->start && r->lastIterationIndex != currentIterationIndex)
{
r->lastIterationIndex = currentIterationIndex;
f(r->start, r->end, r->data);
}
}
idx = (idx + 1) % count;
}
}
bool findFirstRange(_ADDR start, _ADDR end, _ADDR& rStart, _ADDR& rEnd, _OBJ& rData)
{
currentIterationIndex++;
size_t bucketFirst;
size_t bucketCount;
getBucketRange(start, end, bucketFirst, bucketCount);
bucketCount = std::min(bucketCount, count);
size_t idx = bucketFirst;
for (size_t i = 0; i < bucketCount; i++)
{
for (auto r : rangeBuckets[idx].list_ranges)
{
if (start < r->end && end > r->start && r->lastIterationIndex != currentIterationIndex)
{
r->lastIterationIndex = currentIterationIndex;
rStart = r->start;
rEnd = r->end;
rData = r->data;
return true;
}
}
idx = (idx + 1) % count;
}
return false;
}
void clear()
{
for(auto& bucket : rangeBuckets)
{
while(!bucket.list_ranges.empty())
deleteRange(bucket.list_ranges[0]);
}
}
private:
typedef struct
{
std::vector<rangeEntry_t*> list_ranges;
}rangeBucket_t;
std::array<rangeBucket_t, count> rangeBuckets;
size_t currentIterationIndex;
};
| 3,651
|
C++
|
.h
| 127
| 25.220472
| 177
| 0.687856
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,310
|
IntervalBucketContainer.h
|
cemu-project_Cemu/src/util/containers/IntervalBucketContainer.h
|
#pragma once
template<typename TData, typename TAddr, TAddr TAddressGranularity, int TBucketCount>
class IntervalBucketContainer
{
struct bucketEntry_t
{
TAddr rangeStart;
TAddr rangeEnd;
TData* data;
int bucketStartIndex;
bucketEntry_t(TAddr rangeStart, TAddr rangeEnd, TData* data, int bucketStartIndex) : rangeStart(rangeStart), rangeEnd(rangeEnd), data(data), bucketStartIndex(bucketStartIndex) {};
};
std::vector<bucketEntry_t> list_bucket[TBucketCount];
public:
IntervalBucketContainer() = default;
// range is defined as inclusive rangeStart and exclusive rangeEnd
void addRange(TAddr rangeStart, TAddr rangeEnd, TData* data)
{
assert(rangeStart < rangeEnd);
int bucketStartIndex = (rangeStart / TAddressGranularity);
int bucketEndIndex = ((rangeEnd + TAddressGranularity - 1) / TAddressGranularity);
int bucketItrCount = bucketEndIndex - bucketStartIndex;
bucketStartIndex %= TBucketCount;
int bucketFirstIndex = bucketStartIndex;
bucketItrCount = std::min(bucketItrCount, TBucketCount);
assert(bucketItrCount != 0);
while (bucketItrCount--)
{
list_bucket[bucketStartIndex].emplace_back(rangeStart, rangeEnd, data, bucketFirstIndex);
bucketStartIndex = (bucketStartIndex + 1) % TBucketCount;
}
}
void removeRange(TAddr rangeStart, TAddr rangeEnd, TData* data)
{
assert(rangeStart < rangeEnd);
int bucketStartIndex = (rangeStart / TAddressGranularity);
int bucketEndIndex = ((rangeEnd + TAddressGranularity - 1) / TAddressGranularity);
int bucketItrCount = bucketEndIndex - bucketStartIndex;
bucketStartIndex %= TBucketCount;
bucketItrCount = std::min(bucketItrCount, TBucketCount);
assert(bucketItrCount != 0);
int eraseCountVerifier = bucketItrCount;
while (bucketItrCount--)
{
for (auto it = list_bucket[bucketStartIndex].begin(); it != list_bucket[bucketStartIndex].end(); it++)
{
if (it->data == data)
{
assert(it->rangeStart == rangeStart && it->rangeEnd == rangeEnd);
// erase
list_bucket[bucketStartIndex].erase(it);
eraseCountVerifier--;
break;
}
}
bucketStartIndex = (bucketStartIndex + 1) % TBucketCount;
}
assert(eraseCountVerifier == 0); // triggers if rangeStart/End doesn't match up with any registered range
}
template<typename TRangeCallback>
void lookupRanges(TAddr rangeStart, TAddr rangeEnd, TRangeCallback cb)
{
assert(rangeStart < rangeEnd);
int bucketStartIndex = (rangeStart / TAddressGranularity);
int bucketEndIndex = ((rangeEnd + TAddressGranularity - 1) / TAddressGranularity);
int bucketItrCount = bucketEndIndex - bucketStartIndex;
bucketStartIndex %= TBucketCount;
bucketItrCount = std::min(bucketItrCount, TBucketCount);
assert(bucketItrCount != 0);
// in first round we dont need to check if bucket was already visited
for (auto& itr : list_bucket[bucketStartIndex])
{
if (itr.rangeStart < rangeEnd && itr.rangeEnd > rangeStart)
{
cb(itr.data);
}
}
bucketItrCount--;
bucketStartIndex = (bucketStartIndex + 1) % TBucketCount;
// for remaining buckets check if the range starts in the current bucket
while (bucketItrCount--)
{
for (auto& itr : list_bucket[bucketStartIndex])
{
if (itr.rangeStart < rangeEnd && itr.rangeEnd > rangeStart && itr.bucketStartIndex == bucketStartIndex)
{
cb(itr.data);
}
}
bucketStartIndex = (bucketStartIndex + 1) % TBucketCount;
}
}
};
| 3,413
|
C++
|
.h
| 93
| 33.451613
| 181
| 0.749547
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,311
|
SmallBitset.h
|
cemu-project_Cemu/src/util/containers/SmallBitset.h
|
// optimized and compact version of std::bitset with no error checking in release mode
// uses a single uint32 to store the bitmask, thus allowing up to 32 bool values
template<size_t N>
class SmallBitset
{
public:
SmallBitset() = default;
static_assert(N <= 32);
bool test(size_t index) const
{
cemu_assert_debug(index < N);
return ((m_bits >> index) & 1) != 0;
}
void set(size_t index, bool val)
{
cemu_assert_debug(index < N);
m_bits &= ~(1u << index);
if (val)
m_bits |= (1u << index);
}
void set(size_t index)
{
cemu_assert_debug(index < N);
m_bits |= (1u << index);
}
private:
uint32 m_bits{};
};
| 638
|
C++
|
.h
| 28
| 20.535714
| 86
| 0.668874
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,313
|
ThreadPool.h
|
cemu-project_Cemu/src/util/ThreadPool/ThreadPool.h
|
#include <thread>
class ThreadPool
{
public:
template<class TFunction, class... TArgs>
static void FireAndForget(TFunction&& f, TArgs&&... args)
{
// todo - find a way to use std::async here so we can utilize thread pooling?
std::thread t(std::forward<TFunction>(f), std::forward<TArgs>(args)...);
t.detach();
}
};
| 327
|
C++
|
.h
| 12
| 25.25
| 79
| 0.702875
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,315
|
aes128.h
|
cemu-project_Cemu/src/util/crypto/aes128.h
|
#ifndef _AES_H_
#define _AES_H_
void AES128_init();
extern void(*AES128_ECB_encrypt)(uint8* input, const uint8* key, uint8* output);
void AES128_ECB_decrypt(uint8* input, const uint8* key, uint8 *output);
void AES128_CBC_encrypt(uint8* output, uint8* input, uint32 length, const uint8* key, const uint8* iv);
extern void(*AES128_CBC_decrypt)(uint8* output, uint8* input, uint32 length, const uint8* key, const uint8* iv);
void AES128_CBC_decrypt_updateIV(uint8* output, uint8* input, uint32 length, const uint8* key, uint8* iv);
void AES128CTR_transform(uint8* data, sint32 length, uint8* key, uint8* nonceIv);
#endif //_AES_H_
| 636
|
C++
|
.h
| 10
| 61.8
| 112
| 0.749191
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,316
|
crc32.h
|
cemu-project_Cemu/src/util/crypto/crc32.h
|
#pragma once
uint32 crc32_calc(uint32 c, const void* data, size_t length);
inline uint32 crc32_calc(const void* data, size_t length)
{
return crc32_calc(0, data, length);
}
| 175
|
C++
|
.h
| 6
| 27.833333
| 61
| 0.755952
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,318
|
HighResolutionTimer.h
|
cemu-project_Cemu/src/util/highresolutiontimer/HighResolutionTimer.h
|
#pragma once
using HRTick = uint64;
class HighResolutionTimer
{
public:
HighResolutionTimer()
{
m_timePoint = 0;
}
HRTick getTick() const
{
return m_timePoint;
}
uint64 getTickInSeconds() const
{
return m_timePoint / m_freq;
}
// return time difference in seconds, this is an utility function mainly intended for debugging/benchmarking purposes. Avoid using doubles for precise timing
static double getTimeDiff(HRTick startTime, HRTick endTime)
{
return (double)(endTime - startTime) / (double)m_freq;
}
// returns tick difference and frequency
static uint64 getTimeDiffEx(HRTick startTime, HRTick endTime, uint64& freq)
{
freq = m_freq;
return endTime - startTime;
}
static HighResolutionTimer now();
static HRTick getFrequency();
static HRTick microsecondsToTicks(uint64 microseconds)
{
return microseconds * m_freq / 1000000;
}
static uint64 ticksToMicroseconds(HRTick ticks)
{
return ticks * 1000000 / m_freq;
}
private:
HighResolutionTimer(uint64 timePoint) : m_timePoint(timePoint) {};
uint64 m_timePoint;
static uint64 m_freq;
};
// benchmark helper utility
// measures time between Start() and Stop() call
class BenchmarkTimer
{
public:
void Start()
{
m_startTime = HighResolutionTimer::now().getTick();
}
void Stop()
{
m_stopTime = HighResolutionTimer::now().getTick();
}
double GetElapsedMilliseconds() const
{
cemu_assert_debug(m_startTime != 0 && m_stopTime != 0);
cemu_assert_debug(m_startTime <= m_stopTime);
uint64 tickDif = m_stopTime - m_startTime;
double freq = (double)HighResolutionTimer::now().getFrequency();
double elapsedMS = (double)tickDif * 1000.0 / freq;
return elapsedMS;
}
private:
HRTick m_startTime{};
HRTick m_stopTime{};
};
| 1,745
|
C++
|
.h
| 69
| 23.028986
| 158
| 0.757229
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,319
|
bluetooth.h
|
cemu-project_Cemu/src/util/boost/bluetooth.h
|
#pragma once
#include "platform/platform.h"
#include <boost/asio.hpp>
namespace boost
{
namespace asio
{
template <typename Protocol>
class device_endpoint
{
public:
typedef Protocol protocol_type;
typedef detail::socket_addr_type data_type;
struct device_t
{
device_t(long long device_addr)
: addr(device_addr)
{
}
long long addr;
};
device_endpoint()
{
memset(&addr, 0x00, sizeof(addr));
}
device_endpoint(device_t device_address)
{
memset(&addr, 0x00, sizeof(addr));
addr.addressFamily = AF_BLUETOOTH;
addr.btAddr = id.addr;
addr.serviceClassId = RFCOMM_PROTOCOL_UUID;
addr.port = BT_PORT_ANY;
}
device_endpoint(const device_endpoint& other)
: addr(other.addr)
{
}
device_endpoint& operator=(const device_endpoint& other)
{
addr = other.addr;
return *this;
}
protocol_type protocol() const
{
return protocol_type();
}
data_type* data()
{
return reinterpret_cast<data_type*>(&addr);
}
const data_type* data() const
{
return reinterpret_cast<const data_type*>(&addr);
}
size_t size() const
{
return sizeof(SOCKADDR_BTH);
}
size_t capacity() const
{
return size();
}
private:
SOCKADDR_BTH addr;
};
class bluetooth
{
public:
using endpoint = device_endpoint<bluetooth>;
using socket = basic_stream_socket<bluetooth>;
using acceptor = basic_socket_acceptor<bluetooth>;
using iostream = basic_socket_iostream<bluetooth>;
bluetooth() = default;
int type() const
{
return SOCK_STREAM;
}
int protocol() const
{
return BTPROTO_RFCOMM;
}
int family() const
{
return AF_BLUETOOTH;
}
};
}
}
| 1,755
|
C++
|
.h
| 88
| 15.829545
| 59
| 0.660996
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,320
|
MemMapper.h
|
cemu-project_Cemu/src/util/MemMapper/MemMapper.h
|
#pragma once
namespace MemMapper
{
enum class PAGE_PERMISSION : uint32
{
P_READ = (0x01),
P_WRITE = (0x02),
P_EXECUTE = (0x04),
// combined
P_NONE = 0,
P_RW = (0x03),
P_RWX = (0x07)
};
DEFINE_ENUM_FLAG_OPERATORS(PAGE_PERMISSION);
size_t GetPageSize();
void* ReserveMemory(void* baseAddr, size_t size, PAGE_PERMISSION permissionFlags);
void FreeReservation(void* baseAddr, size_t size);
void* AllocateMemory(void* baseAddr, size_t size, PAGE_PERMISSION permissionFlags, bool fromReservation = false);
void FreeMemory(void* baseAddr, size_t size, bool fromReservation = false);
};
| 605
|
C++
|
.h
| 20
| 27.95
| 114
| 0.735395
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,321
|
ZpIREmitGLSL.h
|
cemu-project_Cemu/src/util/Zir/EmitterGLSL/ZpIREmitGLSL.h
|
#pragma once
#include "util/Zir/Core/IR.h"
#include "util/Zir/Core/ZpIRPasses.h"
#include "util/helpers/StringBuf.h"
class DualStringBuffer;
namespace ZirEmitter
{
class GLSL
{
public:
GLSL() {};
// emit function code and append to output string buffer
void Emit(ZpIR::ZpIRFunction* irFunction, StringBuf* output);
private:
void GenerateBasicBlockCode(ZpIR::ZpIRBasicBlock& basicBlock);
void HandleInstruction(ZpIR::IR::InsRR* ins);
void HandleInstruction(ZpIR::IR::InsRRR* ins);
void HandleInstruction(ZpIR::IR::InsIMPORT* ins);
void HandleInstruction(ZpIR::IR::InsEXPORT* ins);
void appendSourceString(DualStringBuffer* buf, ZpIR::IRReg irReg);
void getRegisterName(char buf[16], ZpIR::IRReg irReg);
private:
ZpIR::ZpIRFunction* m_irFunction{};
StringBuf* m_glslSource{};
struct
{
ZpIR::ZpIRBasicBlock* currentBasicBlock{ nullptr };
std::vector<uint8> regReadTracking;
std::vector<DualStringBuffer*> regInlinedExpression;
bool CanInlineRegister(ZpIR::IRReg reg) const
{
cemu_assert_debug(ZpIR::isRegVar(reg));
return regReadTracking[ZpIR::getRegIndex(reg)] <= 1;
};
}m_blockContext;
void AssignResult(ZpIR::IRReg irReg, DualStringBuffer* buf, bool forceNoInline = false);
// inlined expression cache
void SetRegInlinedExpression(ZpIR::IRReg irReg, DualStringBuffer* buf);
void ResetRegInlinedExpression(ZpIR::IRReg irReg);
DualStringBuffer* GetRegInlinedExpression(ZpIR::IRReg irReg);
// memory pool for StringBuffer
DualStringBuffer* GetStringBuffer();
void ReleaseStringBuffer(DualStringBuffer* buf);
std::vector<DualStringBuffer*> m_stringBufferCache;
};
}
| 1,657
|
C++
|
.h
| 46
| 32.956522
| 90
| 0.772841
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
23,322
|
ZpIRDebug.h
|
cemu-project_Cemu/src/util/Zir/Core/ZpIRDebug.h
|
#pragma once
#include "util/Zir/Core/IR.h"
namespace ZpIR
{
class DebugPrinter
{
public:
void debugPrint(ZpIRFunction* irFunction);
void setShowPhysicalRegisters(bool showPhys)
{
m_showPhysicalRegisters = showPhys;
}
void setVirtualRegisterNameSource(std::string(*getRegisterNameCustom)(ZpIRBasicBlock* block, IRReg r))
{
m_getRegisterNameCustom = getRegisterNameCustom;
}
void setPhysicalRegisterNameSource(std::string(*getRegisterNameCustom)(ZpIRBasicBlock* block, ZpIRPhysicalReg r))
{
m_getPhysicalRegisterNameCustom = getRegisterNameCustom;
}
private:
std::string getRegisterName(ZpIRBasicBlock* block, IRReg r);
std::string getInstructionHRF(ZpIRBasicBlock* block, IR::__InsBase* cmd);
void debugPrintBlock(ZpIRBasicBlock* block);
std::string(*m_getRegisterNameCustom)(ZpIRBasicBlock* block, IRReg r) { nullptr };
std::string(*m_getPhysicalRegisterNameCustom)(ZpIRBasicBlock* block, ZpIRPhysicalReg r) { nullptr };
bool m_showPhysicalRegisters{}; // show global/physical register mapping instead of local IRReg indices
};
}
| 1,085
|
C++
|
.h
| 29
| 34.517241
| 115
| 0.794651
|
cemu-project/Cemu
| 7,119
| 558
| 254
|
MPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.