hexsha stringlengths 40 40 | size int64 19 11.4M | ext stringclasses 13
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 270 | max_stars_repo_name stringlengths 5 110 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 9 | max_stars_count float64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 270 | max_issues_repo_name stringlengths 5 116 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 9 | max_issues_count float64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 270 | max_forks_repo_name stringlengths 5 116 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 9 | max_forks_count float64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 19 11.4M | avg_line_length float64 1.93 229k | max_line_length int64 12 688k | alphanum_fraction float64 0.07 0.99 | matches listlengths 1 10 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
33fe511f775d6f1f769df05ee7693bf92ca4cfab | 10,935 | cxx | C++ | Common/Core/Testing/Cxx/TestCxxFeatures.cxx | isi-research/VTK | 56a615b4e54233b65072d3eddd89dd6c0df78dd6 | [
"BSD-3-Clause"
] | null | null | null | Common/Core/Testing/Cxx/TestCxxFeatures.cxx | isi-research/VTK | 56a615b4e54233b65072d3eddd89dd6c0df78dd6 | [
"BSD-3-Clause"
] | null | null | null | Common/Core/Testing/Cxx/TestCxxFeatures.cxx | isi-research/VTK | 56a615b4e54233b65072d3eddd89dd6c0df78dd6 | [
"BSD-3-Clause"
] | null | null | null | /*=========================================================================
Program: Visualization Toolkit
Module: TestCxxFeatures.cxx
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
// .NAME TestCxxFeatures
// .SECTION Description
// Provides a reference for the set of C++ features that can be used
// by VTK.
#include "vtkConfigure.h"
//----------------------------------------------------------------------------
/* Check for known compilers. */
#if defined(__HP_aCC)
# define VTK_CXX_ACC
#endif
#if defined(__SUNPRO_CC)
# define VTK_CXX_SUNPRO
#endif
//----------------------------------------------------------------------------
/* Check for known compiler limitations. */
// Assume standard behavior if symbol is not already defined.
#if !defined(VTK_TYPENAME)
# define VTK_TYPENAME typename
#endif
// Assume standard behavior if symbol is not already defined.
#if !defined(VTK_CLASS_TEMPLATE_SPECIALIZATION)
# define VTK_CLASS_TEMPLATE_SPECIALIZATION template <>
#endif
//----------------------------------------------------------------------------
#include "vtkSystemIncludes.h"
//----------------------------------------------------------------------------
/* Test inclusion of typeinfo header. */
#include <typeinfo>
//----------------------------------------------------------------------------
/* Test nested classes defined outside. */
class NestedTestOuter
{
public:
NestedTestOuter();
~NestedTestOuter();
private:
class NestedTestInner;
NestedTestInner* Inner;
};
class NestedTestOuter::NestedTestInner
{
public:
NestedTestInner() {}
~NestedTestInner() {}
};
NestedTestOuter::NestedTestOuter()
{
this->Inner = new NestedTestInner;
}
NestedTestOuter::~NestedTestOuter()
{
delete this->Inner;
}
//----------------------------------------------------------------------------
/* Test inclusion of some stl headers. */
#ifdef _MSC_VER
#pragma warning (push, 2)
#endif
#include <vector>
#ifdef _MSC_VER
#pragma warning(pop)
#endif
//----------------------------------------------------------------------------
/* Test full template specialization of functions. */
template <class T>
int FullySpecializedFunction(T*)
{
return 0;
}
template <>
int FullySpecializedFunction<int>(int*)
{
return 1;
}
int TestFullySpecializedFunction()
{
int result = 1;
int should_be_0 = FullySpecializedFunction(static_cast<float*>(0));
if(should_be_0 != 0)
{
cerr << "FullySpecializedFunction<float*>() returned "
<< should_be_0 << ", not 0.\n";
result = 0;
}
int should_be_1 = FullySpecializedFunction(static_cast<int*>(0));
if(should_be_1 != 1)
{
cerr << "FullySpecializedFunction(int*) returned "
<< should_be_1 << ", not 1.\n";
result = 0;
}
return result;
}
//----------------------------------------------------------------------------
/* Test member template of non-template. */
class NonTemplate
{
void* Pointer;
public:
template <class T> void Set(T* t) { this->Pointer = t; }
template <class T> void Get(T*& t) { t = static_cast<T*>(this->Pointer); }
};
int TestNonTemplateMemberTemplate()
{
int x = 123;
int* px = 0;
NonTemplate nt;
nt.Set(&x);
nt.Get(px);
return (*px == 123);
}
//----------------------------------------------------------------------------
/* Test member template of template. */
template <class T>
class OuterTemplate
{
T* Pointer;
public:
template <class U> void Set(U* u) { this->Pointer = u; }
template <class U> void Get(U*& u) { u = static_cast<U*>(this->Pointer); }
};
int TestTemplateMemberTemplate()
{
int x = 123;
int* px = 0;
OuterTemplate<void> nt;
nt.Set(&x);
nt.Get(px);
return (*px == 123);
}
//----------------------------------------------------------------------------
/* Test full template specialization of classes. */
template <class T>
struct FullySpecializedClass
{
static int Method() { return 0; }
typedef T Type;
};
VTK_CLASS_TEMPLATE_SPECIALIZATION
struct FullySpecializedClass<float>
{
static int Method() { return 1; }
typedef int Type;
};
template <class T>
int TestFullySpecializedClassTrait(T*)
{
typedef VTK_TYPENAME FullySpecializedClass<T>::Type Type;
if(static_cast<Type>(3.1) == 3.1)
{
return 0;
}
return 1;
}
int TestFullySpecializedClass()
{
int result = 1;
int should_be_0 = FullySpecializedClass<int>::Method();
if(should_be_0 != 0)
{
cerr << "FullySpecializedClass<int>::Method() returned "
<< should_be_0 << ", not 0.\n";
result = 0;
}
int should_be_1 = FullySpecializedClass<float>::Method();
if(should_be_1 != 1)
{
cerr << "FullySpecializedClass<float>::Method() returned "
<< should_be_1 << ", not 1.\n";
result = 0;
}
if(!TestFullySpecializedClassTrait(static_cast<float*>(0)))
{
cerr << "Trait lookup of float didn't produce int.";
result = 0;
}
return result;
}
//----------------------------------------------------------------------------
/* Test if(int x = f()) style scoping. */
int TestIfScopeHelper(int i)
{
int result = 1;
if(int x = i)
{
if(x != i)
{
cerr << "TestIfScope: x != " << i << "\n";
result = 0;
}
}
else
{
if(x != i)
{
cerr << "TestIfScope: x != " << i << "\n";
result = 0;
}
}
int x = result;
return x;
}
int TestIfScope()
{
int result = 1;
if(!TestIfScopeHelper(1))
{
result = 0;
}
if(!TestIfScopeHelper(0))
{
result = 0;
}
return result;
}
//----------------------------------------------------------------------------
/* Test non-type template parameter. */
template <int I>
struct NonTypeTemplate
{
static int GetValue() { return I; }
};
int TestNonTypeTemplate()
{
int result = 1;
if(NonTypeTemplate<0>::GetValue() != 0)
{
cerr << "NonTypeTemplate<0>::GetValue() != 0\n";
result = 0;
}
if(NonTypeTemplate<1>::GetValue() != 1)
{
cerr << "NonTypeTemplate<1>::GetValue() != 1\n";
result = 0;
}
if(NonTypeTemplate<2>::GetValue() != 2)
{
cerr << "NonTypeTemplate<2>::GetValue() != 2\n";
result = 0;
}
return result;
}
//----------------------------------------------------------------------------
/* Test mixed type and non-type template arguments in a non-trival way. */
#if !(defined(__BORLANDC__) && (__BORLANDC__ < 0x660))
// Borland does not support this fancy array template.
template <class T, int N>
int TestMixedTypeTemplateFunction(T (*)[N])
{
return N;
}
int TestMixedTypeTemplate()
{
int x2[2];
float x3[3];
int result = 1;
if(TestMixedTypeTemplateFunction(&x2) != 2)
{
cerr << "TestMixedTypeTemplateFunction(&x2) != 2\n";
result = 0;
}
if(TestMixedTypeTemplateFunction(&x3) != 3)
{
cerr << "TestMixedTypeTemplateFunction(&x3) != 3\n";
result = 0;
}
return result;
}
#endif
//----------------------------------------------------------------------------
class SafeBoolIdiomClass
{
private:
struct SafeBoolDummy { void Dummy() {} };
typedef void (SafeBoolDummy::* SafeBool)();
public:
SafeBoolIdiomClass(int x): Value(x) {}
operator SafeBool()
{
return this->Value? &SafeBoolDummy::Dummy : 0;
}
SafeBool operator !()
{
return this->Value? 0 : &SafeBoolDummy::Dummy;
}
protected:
int Value;
};
int TestSafeBoolIdiom()
{
int result = 1;
SafeBoolIdiomClass cTrue(1);
SafeBoolIdiomClass cFalse(0);
if(cTrue) {}
else
{
cerr << "if(cTrue) evaluates to false.\n";
result = 0;
}
if(!cTrue)
{
cerr << "if(!cTrue) evaluates to true.\n";
result = 0;
}
if(cFalse)
{
cerr << "if(cFalse) evaluates to true.\n";
result = 0;
}
if(!cFalse) {}
else
{
cerr << "if(!cFalse) evaluates to false.\n";
result = 0;
}
return result;
}
//----------------------------------------------------------------------------
/* Test use of exceptions. */
#if defined(_MSC_VER)
# pragma warning (push)
# pragma warning (disable: 4702) /* Unreachable code. */
#endif
class TestExceptionUnwind
{
int* pvalue;
public:
TestExceptionUnwind(int* p): pvalue(p) {}
~TestExceptionUnwind() { *pvalue = 1; }
void Use() {}
};
class ExceptionClass {};
void TestThrowException(int* p)
{
TestExceptionUnwind unwind(p);
unwind.Use();
throw ExceptionClass();
}
int TestException()
{
int value = 0;
try
{
TestThrowException(&value);
}
catch(ExceptionClass&)
{
if(value)
{
return 1;
}
else
{
cerr << "TestExceptionUnwind object not destroyed!" << endl;
return 0;
}
}
catch(...)
{
cerr << "ExceptionClass not caught!" << endl;
return 0;
}
cerr << "No exception caught!" << endl;
return 0;
}
#if defined(_MSC_VER)
# pragma warning (pop)
#endif
//-------------------------------------------------------------------
// See if the following code works on all platforms
#if defined(_MSC_VER) && defined(_DEBUG)
/* MSVC debug hook to prevent dialogs when running from DART. */
# include <crtdbg.h>
static int TestDriverDebugReport(int type, char* message, int* retVal)
{
(void)type; (void)retVal;
fprintf(stderr, message);
exit(1);
}
#endif
//----------------------------------------------------------------------------
/* Test setlocale */
#include <locale.h>
int TestSetLocale()
{
char *oldLocale = strdup(setlocale(LC_NUMERIC,NULL));
setlocale(LC_NUMERIC,"English");
// restore the local
if (oldLocale)
{
setlocale(LC_NUMERIC,oldLocale);
free(oldLocale);
return 1;
}
return 0;
}
//----------------------------------------------------------------------------
#define DO_TEST(x) \
if(x()) { cout << "Passed: " #x "\n"; } \
else { cout << "Failed: " #x "\n"; result = 1; }
int main()
{
int result = 0;
DO_TEST(TestFullySpecializedFunction);
DO_TEST(TestNonTemplateMemberTemplate);
DO_TEST(TestTemplateMemberTemplate);
DO_TEST(TestFullySpecializedClass);
DO_TEST(TestIfScope);
DO_TEST(TestNonTypeTemplate);
#if !(defined(__BORLANDC__) && (__BORLANDC__ < 0x660))
DO_TEST(TestMixedTypeTemplate);
#endif
DO_TEST(TestSafeBoolIdiom);
DO_TEST(TestException);
DO_TEST(TestSetLocale);
#if defined(_MSC_VER) && defined(_DEBUG)
// just call the code to shut up a linker warning
int retVal = 0;
if (result)
{
// really shouldn't be called unless something else failed
// just want to make the compiler think it might get called
// all this will be yanked once I see the results of this test
TestDriverDebugReport(0, "a temp test", &retVal);
}
#endif
return result;
}
| 21.15087 | 78 | 0.55775 | [
"object",
"vector"
] |
33fe551e71685bf86c810f8607f3a1c95f6a53c1 | 7,159 | cpp | C++ | lib/Core/PassManager.cpp | brchiu/onnc | 0d1ffe42ee108bdf4d5c8c93f94608347ee21e0b | [
"BSD-3-Clause"
] | 1 | 2018-08-27T02:51:59.000Z | 2018-08-27T02:51:59.000Z | lib/Core/PassManager.cpp | ffk0716/onnc | 91e4955ade64b479db17aaeccacf4b7339fe44d2 | [
"BSD-3-Clause"
] | null | null | null | lib/Core/PassManager.cpp | ffk0716/onnc | 91e4955ade64b479db17aaeccacf4b7339fe44d2 | [
"BSD-3-Clause"
] | null | null | null | //===- PassManager.cpp ----------------------------------------------------===//
//
// The ONNC Project
//
// See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include <onnc/Core/PassManager.h>
#include <onnc/Core/PassRegistry.h>
#include <onnc/Core/AnalysisUsage.h>
#include <onnc/Core/AnalysisResolver.h>
#include <onnc/Diagnostic/MsgHandling.h>
#include <onnc/ADT/Bits/DigraphArc.h>
#include <stack>
#include <set>
using namespace onnc;
char PassManager::StartPass::ID = 0;
//===----------------------------------------------------------------------===//
// PassManager
//===----------------------------------------------------------------------===//
PassManager::PassManager()
: m_pPassRegistry(onnc::GetPassRegistry()),
m_Dependencies(), m_AvailableAnalysis(),
m_RunState(),
m_pStart(m_Dependencies.addNode(new StartPass())) {
}
PassManager::PassManager(PassRegistry& pRegistry)
: m_pPassRegistry(&pRegistry),
m_Dependencies(), m_AvailableAnalysis(),
m_RunState(),
m_pStart(m_Dependencies.addNode(new StartPass())) {
}
PassManager::~PassManager()
{
m_Dependencies.clear();
}
// Use depth search first to build up a sub-graph of dependenciess.
void PassManager::add(Pass* pPass, State& pState)
{
doAdd(pPass, nullptr, pState);
}
void PassManager::add(Pass* pPass, TargetBackend* pBackend, State& pState)
{
doAdd(pPass, pBackend, pState);
}
// Use depth search first to build up a sub-graph of dependenciess.
void PassManager::add(Pass* pPass)
{
add(pPass, m_RunState);
}
void PassManager::add(Pass* pPass, TargetBackend* pBackend)
{
add(pPass, pBackend, m_RunState);
}
/// Add a pass by DSF order
void PassManager::doAdd(Pass* pPass, TargetBackend* pBackend, State& pState)
{
pState.execution.push_back(pPass->getPassID());
// If the pass is already in the dependency graph, then we don't
// need to add it into the graph.
if (hasAdded(pPass->getPassID()))
return;
std::stack<DepNode*> stack;
DepNode* cur_node = addNode(*pPass);
stack.push(cur_node);
// process pass dependency.
while (!stack.empty()) {
cur_node = stack.top();
stack.pop();
AnalysisUsage usage;
cur_node->pass->getAnalysisUsage(usage);
if (usage.isEmpty()) {
m_Dependencies.connect(*m_pStart, *cur_node);
continue;
}
// create resolver on demand.
// The pass is might be added to other PassManager, so it's resolver
// does exist.
AnalysisResolver* resolver = cur_node->pass->getResolver();
if (!resolver) {
resolver = new AnalysisResolver(*this);
cur_node->pass->setResolver(*resolver);
}
for (Pass::AnalysisID& use : usage) {
if (hasAdded(use)) {
DepNode* dep_node = findNode(use);
assert(dep_node && "dependency node doesn't exist?!");
// add dependency.
m_Dependencies.connect(*dep_node, *cur_node);
resolver->add(use, *dep_node->pass);
continue;
}
// Create dependent pass.
const PassInfo* info = getPassRegistry()->getPassInfo(use);
if (nullptr == info) {
error(pass_not_registered) << "nullptr";
return;
}
Pass* new_pass = info->makePass(pBackend);
// Register the newly created pass
DepNode* new_node = addNode(*new_pass);
// add dependency for cur_node.
m_Dependencies.connect(*new_node, *cur_node);
resolver->add(use, *new_pass);
// continue traverse dependency of new node.
stack.push(new_node);
} // for each usage
} // leave stacking
}
bool PassManager::run(Module& pModule, State& pState)
{
while (!pState.execution.empty()) {
if (!step(pModule, pState))
return false;
} // end of while
return true;
}
bool PassManager::run(Module& pModule)
{
return run(pModule, m_RunState);
}
bool PassManager::step(Module& pModule, State& pState)
{
DepNode* node = findNode(pState.execution.front());
if (nullptr == node)
return Pass::kPassFailure;
pState.pass = node->pass;
Pass::ReturnType result = doRun(*pState.pass, pModule);
if (Pass::IsFailed(result))
return false;
if (Pass::IsRetry(result)) {
if (Pass::IsRevised(result)) {
UpdateExecutionOrder(pState.execution);
pState.changed = false;
}
}
else { //< not retry
if (Pass::IsRevised(result))
pState.changed = true;
pState.execution.pop_front();
}
return true;
}
bool PassManager::step(Module& pModule)
{
return step(pModule, m_RunState);
}
Pass::ReturnType PassManager::doRun(Pass& pPass, Module& pModule)
{
// initialize the pass
Pass::ReturnType result = pPass.doInitialization(pModule);
if (Pass::IsRetry(result) || Pass::IsFailed(result))
return result;
// run the pass
result |= pPass.run(pModule);
if (Pass::IsRetry(result) || Pass::IsFailed(result))
return result;
// finalize the pass
result |= pPass.doFinalization(pModule);
return result;
}
PassManager::DepNode* PassManager::findNode(Pass::AnalysisID pID)
{
AvailableAnalysisMap::iterator entry = m_AvailableAnalysis.find(pID);
if (m_AvailableAnalysis.end() == entry)
return nullptr;
return entry->second;
}
PassManager::DepNode* PassManager::addNode(Pass& pPass)
{
DepNode* cur_node = m_Dependencies.addNode(&pPass);
m_AvailableAnalysis[cur_node->pass->getPassID()] = cur_node;
return cur_node;
}
unsigned int PassManager::size() const
{
return m_AvailableAnalysis.size();
}
Pass* PassManager::lookup(Pass::AnalysisID pID)
{
DepNode* node = findNode(pID);
if (nullptr == node)
return nullptr;
return node->pass;
}
bool PassManager::hasAdded(Pass::AnalysisID pID) const
{
return (m_AvailableAnalysis.end() != m_AvailableAnalysis.find(pID));
}
void PassManager::UpdateExecutionOrder(ExecutionOrder& pOrder)
{
std::unordered_set<DepNode*> visited;
std::deque<std::pair<bool, DepNode*> > stack;
std::vector<DepNode*> post_order;
stack.push_back(std::make_pair(false, findNode(pOrder.front())));
while (!stack.empty()) {
std::pair<bool, DepNode*> node = stack.back();
stack.pop_back();
if (node.first) { // get a parent
if (post_order.end() == std::find(post_order.begin(), post_order.end(), node.second)) {
post_order.push_back(node.second);
}
}
else { // get a child
// turn the child to parent
visited.insert(node.second);
stack.push_back(std::make_pair(true, node.second));
// push all children
digraph::ArcBase* iter = node.second->first_in;
while (nullptr != iter) {
// if not visited
DepNode* target = static_cast<DepNode*>(iter->source);
if (visited.end() == visited.find(target)) {
stack.push_back(std::make_pair(false, target));
}
iter = iter->next_in;
}
}
} // end of while
std::vector<DepNode*>::reverse_iterator ele = post_order.rbegin();
std::vector<DepNode*>::reverse_iterator eEnd = post_order.rend();
++ele;
while (eEnd != ele) {
if (eEnd != (ele + 1)) {
pOrder.push_front((*ele)->pass->getPassID());
}
++ele;
}
}
| 26.127737 | 93 | 0.634446 | [
"vector"
] |
d501cb3d9e6984c00a02f4759f5b1e30d7fb91fd | 6,147 | cpp | C++ | openbr/plugins/imgproc/affine.cpp | wittayaatt/openbr | 26cb128f740f46b7c18b346e2bcf2af7a8de29da | [
"Apache-2.0"
] | 1,883 | 2015-01-04T07:04:24.000Z | 2022-03-30T13:33:37.000Z | openbr/plugins/imgproc/affine.cpp | wittayaatt/openbr | 26cb128f740f46b7c18b346e2bcf2af7a8de29da | [
"Apache-2.0"
] | 272 | 2015-01-02T09:53:20.000Z | 2022-03-29T08:04:33.000Z | openbr/plugins/imgproc/affine.cpp | wittayaatt/openbr | 26cb128f740f46b7c18b346e2bcf2af7a8de29da | [
"Apache-2.0"
] | 718 | 2015-01-02T18:51:07.000Z | 2022-03-29T08:10:53.000Z | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Copyright 2012 The MITRE Corporation *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <opencv2/imgproc/imgproc.hpp>
#include <openbr/plugins/openbr_internal.h>
#include <openbr/core/opencvutils.h>
using namespace cv;
namespace br
{
/*!
* \ingroup transforms
* \brief Performs a two or three point registration.
* \author Josh Klontz \cite jklontz
* \note Method: Area should be used for shrinking an image, Cubic for slow but accurate enlargment, Bilin for fast enlargement.
*/
class AffineTransform : public UntrainableTransform
{
Q_OBJECT
Q_ENUMS(Method)
Q_ENUMS(BorderMode)
public:
/*!< */
enum Method { Near = INTER_NEAREST,
Area = INTER_AREA,
Bilin = INTER_LINEAR,
Cubic = INTER_CUBIC,
Lanczo = INTER_LANCZOS4 };
enum BorderMode { Replicate = BORDER_REPLICATE,
Constant = BORDER_CONSTANT,
Reflect = BORDER_REFLECT,
Wrap = BORDER_WRAP,
Reflect_101 = BORDER_REFLECT_101,
Transparent = BORDER_TRANSPARENT,
Isolated = BORDER_ISOLATED };
private:
Q_PROPERTY(int width READ get_width WRITE set_width RESET reset_width STORED false)
Q_PROPERTY(int height READ get_height WRITE set_height RESET reset_height STORED false)
Q_PROPERTY(float x1 READ get_x1 WRITE set_x1 RESET reset_x1 STORED false)
Q_PROPERTY(float y1 READ get_y1 WRITE set_y1 RESET reset_y1 STORED false)
Q_PROPERTY(float x2 READ get_x2 WRITE set_x2 RESET reset_x2 STORED false)
Q_PROPERTY(float y2 READ get_y2 WRITE set_y2 RESET reset_y2 STORED false)
Q_PROPERTY(float x3 READ get_x3 WRITE set_x3 RESET reset_x3 STORED false)
Q_PROPERTY(float y3 READ get_y3 WRITE set_y3 RESET reset_y3 STORED false)
Q_PROPERTY(Method method READ get_method WRITE set_method RESET reset_method STORED false)
Q_PROPERTY(BorderMode borderMode READ get_borderMode WRITE set_borderMode RESET reset_borderMode STORED false)
Q_PROPERTY(bool storeAffine READ get_storeAffine WRITE set_storeAffine RESET reset_storeAffine STORED false)
Q_PROPERTY(bool warpPoints READ get_warpPoints WRITE set_warpPoints RESET reset_warpPoints STORED false)
BR_PROPERTY(int, width, 64)
BR_PROPERTY(int, height, 64)
BR_PROPERTY(float, x1, 0)
BR_PROPERTY(float, y1, 0)
BR_PROPERTY(float, x2, -1)
BR_PROPERTY(float, y2, -1)
BR_PROPERTY(float, x3, -1)
BR_PROPERTY(float, y3, -1)
BR_PROPERTY(Method, method, Bilin)
BR_PROPERTY(BorderMode, borderMode, Constant)
BR_PROPERTY(bool, storeAffine, false)
BR_PROPERTY(bool, warpPoints, false)
static Point2f getThirdAffinePoint(const Point2f &a, const Point2f &b)
{
float dx = b.x - a.x;
float dy = b.y - a.y;
return Point2f(a.x - dy, a.y + dx);
}
void project(const Template &src, Template &dst) const
{
const bool twoPoints = ((x3 == -1) || (y3 == -1));
Point2f dstPoints[3];
dstPoints[0] = Point2f(x1*width, y1*height);
dstPoints[1] = Point2f((x2 == -1 ? 1 - x1 : x2)*width, (y2 == -1 ? y1 : y2)*height);
if (twoPoints) dstPoints[2] = getThirdAffinePoint(dstPoints[0], dstPoints[1]);
else dstPoints[2] = Point2f(x3*width, y3*height);
Point2f srcPoints[3];
if (src.file.contains("Affine_0") &&
src.file.contains("Affine_1") &&
(src.file.contains("Affine_2") || twoPoints)) {
srcPoints[0] = OpenCVUtils::toPoint(src.file.get<QPointF>("Affine_0"));
srcPoints[1] = OpenCVUtils::toPoint(src.file.get<QPointF>("Affine_1"));
if (!twoPoints) srcPoints[2] = OpenCVUtils::toPoint(src.file.get<QPointF>("Affine_2"));
} else {
const QList<Point2f> landmarks = OpenCVUtils::toPoints(src.file.points());
if ((landmarks.size() < 2) || (!twoPoints && (landmarks.size() < 3))) {
resize(src, dst, Size(width, height));
return;
} else {
srcPoints[0] = landmarks[0];
srcPoints[1] = landmarks[1];
if (!twoPoints) srcPoints[2] = landmarks[2];
}
}
if (twoPoints) srcPoints[2] = getThirdAffinePoint(srcPoints[0], srcPoints[1]);
Mat affineTransform = getAffineTransform(srcPoints, dstPoints);
warpAffine(src, dst, affineTransform, Size(width, height), method, borderMode);
if (warpPoints)
dst.file.setPoints(OpenCVUtils::rotatePoints(src.file.points(), affineTransform));
if (storeAffine) {
QList<float> affineParams;
for (int i = 0 ; i < 2; i++)
for (int j = 0; j < 3; j++)
affineParams.append(affineTransform.at<double>(i, j));
dst.file.setList("affineParameters", affineParams);
}
}
};
BR_REGISTER(Transform, AffineTransform)
} // namespace br
#include "imgproc/affine.moc"
| 44.223022 | 128 | 0.585326 | [
"transform"
] |
d502a92cfa49be8b895821377776aac046999fc0 | 18,021 | cpp | C++ | src/tests/conduit/t_conduit_array.cpp | jschueller/conduit | de18c819b6a11e9d50f7adbe5a0eac5831267a81 | [
"BSD-3-Clause"
] | null | null | null | src/tests/conduit/t_conduit_array.cpp | jschueller/conduit | de18c819b6a11e9d50f7adbe5a0eac5831267a81 | [
"BSD-3-Clause"
] | null | null | null | src/tests/conduit/t_conduit_array.cpp | jschueller/conduit | de18c819b6a11e9d50f7adbe5a0eac5831267a81 | [
"BSD-3-Clause"
] | null | null | null | //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~//
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
//
// Produced at the Lawrence Livermore National Laboratory
//
// LLNL-CODE-666778
//
// All rights reserved.
//
// This file is part of Conduit.
//
// For details, see: http://software.llnl.gov/conduit/.
//
// Please also read conduit/LICENSE
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the disclaimer below.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the disclaimer (as noted below) in the
// documentation and/or other materials provided with the distribution.
//
// * Neither the name of the LLNS/LLNL nor the names of its contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY,
// LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~//
//-----------------------------------------------------------------------------
///
/// file: conduit_array.cpp
///
//-----------------------------------------------------------------------------
#include "conduit.hpp"
#include <iostream>
#include "gtest/gtest.h"
using namespace conduit;
//-----------------------------------------------------------------------------
TEST(conduit_array, basic_construction)
{
std::vector<int8> data1(10,8);
std::vector<int8> data2(10,-8);
void *data1_ptr = &data1[0];
const void *cdata2_ptr = &data2[0];
DataArray<int8> da_1(data1_ptr,DataType::int8(10));
std::cout << da_1.to_string() << std::endl;
for(index_t i=0;i<10;i++)
{
EXPECT_EQ(8,da_1[i]);
}
DataArray<int8> da_2(cdata2_ptr,DataType::int8(10));
std::cout << da_2.to_string() << std::endl;
for(index_t i=0;i<10;i++)
{
EXPECT_EQ(-8,da_2[i]);
}
DataArray<int8> da_3(da_1);
for(index_t i=0;i<10;i++)
{
EXPECT_EQ(8,da_3[i]);
}
da_3[0] = 16;
da_3 = da_2;
for(index_t i=0;i<10;i++)
{
EXPECT_EQ(-8,da_2[i]);
}
da_3[0] = -16;
std::cout << da_3.to_string() << std::endl;
// test other variants of to_string and to stream, etc
da_3.to_string_stream(std::cout);
da_3.to_json_stream(std::cout);
EXPECT_EQ(16,data1[0]);
EXPECT_EQ(-16,data2[0]);
}
//-----------------------------------------------------------------------------
TEST(conduit_array, array_stride_int8)
{
std::vector<int8> data(20,0);
for(int i=0;i<20;i+=2)
{
data[i] = i/2;
}
for(int i=1;i<20;i+=2)
{
data[i] = -i/2;
}
std::cout << "Full Data" << std::endl;
for(int i=0;i<20;i++)
{
std::cout << (int64) data[i] << " ";
}
std::cout << std::endl;
DataType arr_t(DataType::INT8_ID,
10,
0,
sizeof(int8)*2, // stride
sizeof(int8),
Endianness::DEFAULT_ID);
Node n;
n["value"].set_external(arr_t,&data[0]);
int8_array arr = n["value"].as_int8_array();
for(int i=0;i<10;i++)
{
// note: the cast is for proper printing to std::out
std::cout << "value[" << i << "] = " << ((int64)arr[i] ) << std::endl;
}
std::cout << std::endl;
EXPECT_EQ(arr[5],5);
EXPECT_EQ(arr[9],9);
arr[1] = 100;
EXPECT_EQ(data[2],100);
std::cout << "Full Data" << std::endl;
for(int i=0;i<20;i++)
{
std::cout << (int64) data[i] << " ";
}
std::cout << std::endl;
Node n2(DataType::int8(10,sizeof(int8),sizeof(int8)*2),
&data[0],
true); /// true for external
int8_array arr_2 = n2.as_int8_array();
for(int i=0;i<10;i++)
{
// note: the cast is for proper printing to std::out
std::cout << "value[" << i << "] = " << ((int64)arr_2[i] ) << std::endl;
}
std::cout << std::endl;
EXPECT_EQ(arr_2[0],0);
EXPECT_EQ(arr_2[9],-9);
}
//-----------------------------------------------------------------------------
TEST(conduit_array, array_stride_int8_external)
{
std::vector<int64> data(20,0);
for(int i=0;i<20;i+=2)
{
data[i] = i/2;
}
for(int i=1;i<20;i+=2)
{
data[i] = -i/2;
}
std::cout << "Full Data" << std::endl;
for(int i=0;i<20;i++)
{
std::cout << (int64) data[i] << " ";
}
std::cout << std::endl;
Node n;
n["value"].set_external(data);
int64_array arr = n["value"].as_int64_array();
for(int i=0;i<20;i++)
{
// note: the cast is for proper printing to std::out
std::cout << "value[" << i << "] = " << arr[i] << std::endl;
}
std::cout << std::endl;
data[2]*=10;
data[3]*=10;
EXPECT_EQ(arr[2],10);
EXPECT_EQ(arr[3],-10);
}
//-----------------------------------------------------------------------------
TEST(conduit_array, set_using_ptrs)
{
//in this case we are using std vectors to init data conveniently
// we are actually testing the pointer set cases
// we test std vector set cases directly in "set_using_std_vectors"
std::vector<int8> v_int8(10,-8);
std::vector<int16> v_int16(10,-16);
std::vector<int32> v_int32(10,-32);
std::vector<int64> v_int64(10,-64);
std::vector<uint8> v_uint8(10,8);
std::vector<uint16> v_uint16(10,16);
std::vector<uint32> v_uint32(10,32);
std::vector<uint64> v_uint64(10,64);
std::vector<float32> v_float32(10,32.0);
std::vector<float64> v_float64(10,64.0);
Node n;
// int8_array
n["vint8"].set(DataType::int8(10));
n["vint8"].as_int8_array().set(&v_int8[0],10);
int8 *n_int8_ptr = n["vint8"].value();
for(size_t i=0;i<10;i++)
{
EXPECT_EQ(n_int8_ptr[i],v_int8[i]);
}
// int16_array
n["vint16"].set(DataType::int16(10));
n["vint16"].as_int16_array().set(&v_int16[0],10);
int16 *n_int16_ptr = n["vint16"].value();
for(size_t i=0;i<10;i++)
{
EXPECT_EQ(n_int16_ptr[i],v_int16[i]);
}
// int32_array
n["vint32"].set(DataType::int32(10));
n["vint32"].as_int32_array().set(&v_int32[0],10);
int32 *n_int32_ptr = n["vint32"].value();
for(size_t i=0;i<10;i++)
{
EXPECT_EQ(n_int32_ptr[i],v_int32[i]);
}
// int64_array
n["vint64"].set(DataType::int64(10));
n["vint64"].as_int64_array().set(&v_int64[0],10);
int64 *n_int64_ptr = n["vint64"].value();
for(size_t i=0;i<10;i++)
{
EXPECT_EQ(n_int64_ptr[i],v_int64[i]);
}
// uint8_array
n["vuint8"].set(DataType::uint8(10));
n["vuint8"].as_uint8_array().set(&v_uint8[0],10);
uint8 *n_uint8_ptr = n["vuint8"].value();
for(size_t i=0;i<10;i++)
{
EXPECT_EQ(n_uint8_ptr[i],v_uint8[i]);
}
// uint16_array
n["vuint16"].set(DataType::uint16(10));
n["vuint16"].as_uint16_array().set(&v_uint16[0],10);
uint16 *n_uint16_ptr = n["vuint16"].value();
for(size_t i=0;i<10;i++)
{
EXPECT_EQ(n_uint16_ptr[i],v_uint16[i]);
}
// uint32_array
n["vuint32"].set(DataType::uint32(10));
n["vuint32"].as_uint32_array().set(&v_uint32[0],10);
uint32 *n_uint32_ptr = n["vuint32"].value();
for(size_t i=0;i<10;i++)
{
EXPECT_EQ(n_uint32_ptr[i],v_uint32[i]);
}
// uint64_array
n["vuint64"].set(DataType::uint64(10));
n["vuint64"].as_uint64_array().set(&v_uint64[0],10);
uint64 *n_uint64_ptr = n["vuint64"].value();
for(size_t i=0;i<10;i++)
{
EXPECT_EQ(n_uint64_ptr[i],v_uint64[i]);
}
// float32_array
n["vfloat32"].set(DataType::float32(10));
n["vfloat32"].as_float32_array().set(&v_float32[0],10);
float32 *n_float32_ptr = n["vfloat32"].value();
for(size_t i=0;i<10;i++)
{
EXPECT_EQ(n_float32_ptr[i],v_float32[i]);
}
// float64_array
n["vfloat64"].set(DataType::float64(10));
n["vfloat64"].as_float64_array().set(&v_float64[0],10);
float64 *n_float64_ptr = n["vfloat64"].value();
for(size_t i=0;i<10;i++)
{
EXPECT_EQ(n_float64_ptr[i],v_float64[i]);
}
}
//-----------------------------------------------------------------------------
TEST(conduit_array, set_using_data_array)
{
std::vector<int8> v_int8(10,-8);
std::vector<int16> v_int16(10,-16);
std::vector<int32> v_int32(10,-32);
std::vector<int64> v_int64(10,-64);
std::vector<uint8> v_uint8(10,8);
std::vector<uint16> v_uint16(10,16);
std::vector<uint32> v_uint32(10,32);
std::vector<uint64> v_uint64(10,64);
std::vector<float32> v_float32(10,32.0);
std::vector<float64> v_float64(10,64.0);
int8_array va_int8(&v_int8[0],DataType::int8(10));
int16_array va_int16(&v_int16[0],DataType::int16(10));
int32_array va_int32(&v_int32[0],DataType::int32(10));
int64_array va_int64(&v_int64[0],DataType::int64(10));
uint8_array va_uint8(&v_uint8[0],DataType::uint8(10));
uint16_array va_uint16(&v_uint16[0],DataType::uint16(10));
uint32_array va_uint32(&v_uint32[0],DataType::uint32(10));
uint64_array va_uint64(&v_uint64[0],DataType::uint64(10));
float32_array va_float32(&v_float32[0],DataType::float32(10));
float64_array va_float64(&v_float64[0],DataType::float64(10));
Node n;
// int8_array
n["vint8"].set(DataType::int8(10));
n["vint8"].as_int8_array().set(va_int8);
int8 *n_int8_ptr = n["vint8"].value();
for(index_t i=0;i<10;i++)
{
EXPECT_EQ(n_int8_ptr[i],va_int8[i]);
}
// int16_array
n["vint16"].set(DataType::int16(10));
n["vint16"].as_int16_array().set(va_int16);
int16 *n_int16_ptr = n["vint16"].value();
for(index_t i=0;i<10;i++)
{
EXPECT_EQ(n_int16_ptr[i],va_int16[i]);
}
// int32_array
n["vint32"].set(DataType::int32(10));
n["vint32"].as_int32_array().set(va_int32);
int32 *n_int32_ptr = n["vint32"].value();
for(index_t i=0;i<10;i++)
{
EXPECT_EQ(n_int32_ptr[i],va_int32[i]);
}
// int64_array
n["vint64"].set(DataType::int64(10));
n["vint64"].as_int64_array().set(va_int64);
int64 *n_int64_ptr = n["vint64"].value();
for(index_t i=0;i<10;i++)
{
EXPECT_EQ(n_int64_ptr[i],va_int64[i]);
}
// uint8_array
n["vuint8"].set(DataType::uint8(10));
n["vuint8"].as_uint8_array().set(va_uint8);
uint8 *n_uint8_ptr = n["vuint8"].value();
for(index_t i=0;i<10;i++)
{
EXPECT_EQ(n_uint8_ptr[i],va_uint8[i]);
}
// uint16_array
n["vuint16"].set(DataType::uint16(10));
n["vuint16"].as_uint16_array().set(va_uint16);
uint16 *n_uint16_ptr = n["vuint16"].value();
for(index_t i=0;i<10;i++)
{
EXPECT_EQ(n_uint16_ptr[i],va_uint16[i]);
}
// uint32_array
n["vuint32"].set(DataType::uint32(10));
n["vuint32"].as_uint32_array().set(va_uint32);
uint32 *n_uint32_ptr = n["vuint32"].value();
for(index_t i=0;i<10;i++)
{
EXPECT_EQ(n_uint32_ptr[i],va_uint32[i]);
}
// uint64_array
n["vuint64"].set(DataType::uint64(10));
n["vuint64"].as_uint64_array().set(va_uint64);
uint64 *n_uint64_ptr = n["vuint64"].value();
for(index_t i=0;i<10;i++)
{
EXPECT_EQ(n_uint64_ptr[i],va_uint64[i]);
}
// float32_array
n["vfloat32"].set(DataType::float32(10));
n["vfloat32"].as_float32_array().set(va_float32);
float32 *n_float32_ptr = n["vfloat32"].value();
for(index_t i=0;i<10;i++)
{
EXPECT_EQ(n_float32_ptr[i],va_float32[i]);
}
// float64_array
n["vfloat64"].set(DataType::float64(10));
n["vfloat64"].as_float64_array().set(va_float64);
float64 *n_float64_ptr = n["vfloat64"].value();
for(index_t i=0;i<10;i++)
{
EXPECT_EQ(n_float64_ptr[i],va_float64[i]);
}
}
//-----------------------------------------------------------------------------
TEST(conduit_array, set_using_std_vectors)
{
std::vector<int8> v_int8(10,-8);
std::vector<int16> v_int16(10,-16);
std::vector<int32> v_int32(10,-32);
std::vector<int64> v_int64(10,-64);
std::vector<uint8> v_uint8(10,8);
std::vector<uint16> v_uint16(10,16);
std::vector<uint32> v_uint32(10,32);
std::vector<uint64> v_uint64(10,64);
std::vector<float32> v_float32(10,32.0);
std::vector<float64> v_float64(10,64.0);
Node n;
// int8_array
n["vint8"].set(DataType::int8(10));
n["vint8"].as_int8_array().set(v_int8);
int8 *n_int8_ptr = n["vint8"].value();
for(size_t i=0;i<10;i++)
{
EXPECT_EQ(n_int8_ptr[i],v_int8[i]);
}
// int16_array
n["vint16"].set(DataType::int16(10));
n["vint16"].as_int16_array().set(v_int16);
int16 *n_int16_ptr = n["vint16"].value();
for(size_t i=0;i<10;i++)
{
EXPECT_EQ(n_int16_ptr[i],v_int16[i]);
}
// int32_array
n["vint32"].set(DataType::int32(10));
n["vint32"].as_int32_array().set(v_int32);
int32 *n_int32_ptr = n["vint32"].value();
for(size_t i=0;i<10;i++)
{
EXPECT_EQ(n_int32_ptr[i],v_int32[i]);
}
// int64_array
n["vint64"].set(DataType::int64(10));
n["vint64"].as_int64_array().set(v_int64);
int64 *n_int64_ptr = n["vint64"].value();
for(size_t i=0;i<10;i++)
{
EXPECT_EQ(n_int64_ptr[i],v_int64[i]);
}
// uint8_array
n["vuint8"].set(DataType::uint8(10));
n["vuint8"].as_uint8_array().set(v_uint8);
uint8 *n_uint8_ptr = n["vuint8"].value();
for(size_t i=0;i<10;i++)
{
EXPECT_EQ(n_uint8_ptr[i],v_uint8[i]);
}
// uint16_array
n["vuint16"].set(DataType::uint16(10));
n["vuint16"].as_uint16_array().set(v_uint16);
uint16 *n_uint16_ptr = n["vuint16"].value();
for(size_t i=0;i<10;i++)
{
EXPECT_EQ(n_uint16_ptr[i],v_uint16[i]);
}
// uint32_array
n["vuint32"].set(DataType::uint32(10));
n["vuint32"].as_uint32_array().set(v_uint32);
uint32 *n_uint32_ptr = n["vuint32"].value();
for(size_t i=0;i<10;i++)
{
EXPECT_EQ(n_uint32_ptr[i],v_uint32[i]);
}
// uint64_array
n["vuint64"].set(DataType::uint64(10));
n["vuint64"].as_uint64_array().set(v_uint64);
uint64 *n_uint64_ptr = n["vuint64"].value();
for(size_t i=0;i<10;i++)
{
EXPECT_EQ(n_uint64_ptr[i],v_uint64[i]);
}
// float32_array
n["vfloat32"].set(DataType::float32(10));
n["vfloat32"].as_float32_array().set(v_float32);
float32 *n_float32_ptr = n["vfloat32"].value();
for(size_t i=0;i<10;i++)
{
EXPECT_EQ(n_float32_ptr[i],v_float32[i]);
}
// float64_array
n["vfloat64"].set(DataType::float64(10));
n["vfloat64"].as_float64_array().set(v_float64);
float64 *n_float64_ptr = n["vfloat64"].value();
for(size_t i=0;i<10;i++)
{
EXPECT_EQ(n_float64_ptr[i],v_float64[i]);
}
}
//-----------------------------------------------------------------------------
TEST(conduit_array, print_bells_and_whistles)
{
Node n;
n["int32_1"].set(DataType::int32(1));
n["int32_2"].set(DataType::int32(2));
int32_array va_int32_1 = n["int32_1"].value();
int32_array va_int32_2 = n["int32_2"].value();
va_int32_1[0] = 1;
va_int32_2[0] = 1;
va_int32_2[1] = 2;
std::string s_json_int32_1 = va_int32_1.to_json();
std::string s_json_int32_2 = va_int32_2.to_json();
std::string s_yaml_int32_1 = va_int32_1.to_yaml();
std::string s_yaml_int32_2 = va_int32_2.to_yaml();
std::cout << "int32_1: " << s_json_int32_1 << std::endl;
std::cout << "int32_2: " << s_json_int32_2 << std::endl;
EXPECT_EQ(s_json_int32_1,"1");
EXPECT_EQ(s_json_int32_2,"[1, 2]");
EXPECT_EQ(s_json_int32_1,s_yaml_int32_1);
EXPECT_EQ(s_json_int32_2,s_yaml_int32_2);
std::vector<float64> v_float64(10,64.0);
float64_array va_float64(&v_float64[0],DataType::float64(10));
std::cout << "to_string(\"yaml\")" << std::endl;
std::cout << va_float64.to_string("yaml") << std::endl;
std::cout << "to_string(\"json\")" << std::endl;
std::cout << va_float64.to_string("json") << std::endl;
std::cout << "to_json()" << std::endl;
std::cout << va_float64.to_json() << std::endl;
std::cout << "to_yaml()" << std::endl;
std::cout << va_float64.to_yaml() << std::endl;
std::cout << "to_string_stream(..., yaml)" << std::endl;
va_float64.to_string_stream(std::cout,"yaml");
std::cout << std::endl;
std::cout << "to_string_stream(..., json)" << std::endl;
va_float64.to_string_stream(std::cout,"json");
std::cout << std::endl;
std::cout << "to_json_stream()" << std::endl;
va_float64.to_json_stream(std::cout);
std::cout << std::endl;
std::cout << "to_yaml_stream()" << std::endl;
va_float64.to_yaml_stream(std::cout);
std::cout << std::endl;
}
| 27.810185 | 81 | 0.571111 | [
"vector"
] |
d50323896028aaf3d7b257194a5ec2d1a08cc602 | 1,713 | cpp | C++ | JSON/src/Stringifier.cpp | CiaranWelsh/poco | c85929fc8338bedd28a7392b59b09a6d93f60e1e | [
"BSL-1.0"
] | 1 | 2021-03-25T23:10:39.000Z | 2021-03-25T23:10:39.000Z | JSON/src/Stringifier.cpp | sys-bio/poco | c85929fc8338bedd28a7392b59b09a6d93f60e1e | [
"BSL-1.0"
] | null | null | null | JSON/src/Stringifier.cpp | sys-bio/poco | c85929fc8338bedd28a7392b59b09a6d93f60e1e | [
"BSL-1.0"
] | null | null | null | //
// Stringifier.cpp
//
// $Id$
//
// Library: JSON
// Package: JSON
// Module: Stringifier
//
// Copyright (c) 2012, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// SPDX-License-Identifier: BSL-1.0
//
#include "Poco/JSON/Stringifier.h"
#include "Poco/JSON/Array.h"
#include "Poco/JSON/Object.h"
#include <iomanip>
using Poco::Dynamic::Var;
namespace Poco {
namespace JSON {
void Stringifier::stringify(const Var& any, std::ostream& out, unsigned int indent, int step, bool preserveInsertionOrder)
{
if (step == -1) step = indent;
if ( any.type() == typeid(Object) )
{
const Object& o = any.extract<Object>();
o.stringify(out, indent == 0 ? 0 : indent, step);
}
else if ( any.type() == typeid(Array) )
{
const Array& a = any.extract<Array>();
a.stringify(out, indent == 0 ? 0 : indent, step);
}
else if ( any.type() == typeid(Object::Ptr) )
{
const Object::Ptr& o = any.extract<Object::Ptr>();
o->stringify(out, indent == 0 ? 0 : indent, step);
}
else if ( any.type() == typeid(Array::Ptr) )
{
const Array::Ptr& a = any.extract<Array::Ptr>();
a->stringify(out, indent == 0 ? 0 : indent, step);
}
else if ( any.isEmpty() )
{
out << "null";
}
else if ( any.isString() )
{
std::string value = any.convert<std::string>();
formatString(value, out);
}
else
{
out << any.convert<std::string>();
}
}
void Stringifier::formatString(const std::string& value, std::ostream& out)
{
out << '"';
for (std::string::const_iterator it = value.begin(),
end = value.end(); it != end; ++it)
{
if (*it <= 0x1F || *it == '"' || *it == '\\' || *it == '/')
{
out << '\\';
}
out << *it;
}
out << '"';
}
} } // Namespace Poco::JSON
| 19.689655 | 122 | 0.594279 | [
"object"
] |
d5037dc03ef25e414e395fca8608f6a33ea9f1b0 | 9,928 | hpp | C++ | kfusion/src/utils/dual_quaternion.hpp | Algomorph/dynamicfusion | bcc5c47f90b2ad0e6e4b1667c4ca5cb5d027638c | [
"BSD-3-Clause"
] | 336 | 2017-06-01T09:39:36.000Z | 2022-03-25T10:55:10.000Z | kfusion/src/utils/dual_quaternion.hpp | Algomorph/dynamicfusion | bcc5c47f90b2ad0e6e4b1667c4ca5cb5d027638c | [
"BSD-3-Clause"
] | 51 | 2017-07-18T18:24:59.000Z | 2021-06-25T04:34:38.000Z | kfusion/src/utils/dual_quaternion.hpp | Algomorph/dynamicfusion | bcc5c47f90b2ad0e6e4b1667c4ca5cb5d027638c | [
"BSD-3-Clause"
] | 112 | 2017-08-26T12:17:47.000Z | 2022-03-30T11:44:13.000Z | #ifndef DYNAMIC_FUSION_DUAL_QUATERNION_HPP
#define DYNAMIC_FUSION_DUAL_QUATERNION_HPP
#include<iostream>
#include<quaternion.hpp>
//Adapted from https://github.com/Poofjunior/QPose
/**
* \brief a dual quaternion class for encoding transformations.
* \details transformations are stored as first a translation; then a
* rotation. It is possible to switch the order. See this paper:
* https://www.thinkmind.org/download.php?articleid=intsys_v6_n12_2013_5
*/
namespace kfusion {
namespace utils {
static float epsilon()
{
return 1e-6;
}
template<typename T>
class DualQuaternion {
public:
/**
* \brief default constructor.
*/
DualQuaternion()
{
rotation_ = Quaternion<float>();
translation_ = Quaternion<float>();
};
~DualQuaternion(){};
/**
* \brief constructor that takes cartesian coordinates and Euler angles as
* arguments.
*/
// FIXME: always use Rodrigues angles, not Euler
DualQuaternion(T x, T y, T z, T roll, T pitch, T yaw)
{
// convert here.
rotation_.w_ = cos(roll / 2) * cos(pitch / 2) * cos(yaw / 2) +
sin(roll / 2) * sin(pitch / 2) * sin(yaw / 2);
rotation_.x_ = sin(roll / 2) * cos(pitch / 2) * cos(yaw / 2) -
cos(roll / 2) * sin(pitch / 2) * sin(yaw / 2);
rotation_.y_ = cos(roll / 2) * sin(pitch / 2) * cos(yaw / 2) +
sin(roll / 2) * cos(pitch / 2) * sin(yaw / 2);
rotation_.z_ = cos(roll / 2) * cos(pitch / 2) * sin(yaw / 2) -
sin(roll / 2) * sin(pitch / 2) * cos(yaw / 2);
translation_ = 0.5 * Quaternion<T>(0, x, y, z) * rotation_;
}
/**
* \brief constructor that takes two quaternions as arguments.
* \details The rotation
* quaternion has the conventional encoding for a rotation as a
* quaternion. The translation quaternion is a quaternion with
* cartesian coordinates encoded as (0, x, y, z)
*/
DualQuaternion(Quaternion<T> translation, Quaternion<T> rotation)
{
rotation_ = rotation;
translation_ = 0.5 * translation * rotation;
}
/**
* \brief store a rotation
* \param angle is in radians
*/
void encodeRotation(T angle, T x, T y, T z)
{
rotation_.encodeRotation(angle, x, y, z);
}
/**
* \brief store a rotation
* \param angle is in radians
*/
void encodeRotation(T x, T y, T z)
{
rotation_.encodeRotation(sqrt(x*x+y*y+z*z), x, y, z);
}
void encodeTranslation(T x, T y, T z)
{
translation_ = 0.5 * Quaternion<T>(0, x, y, z) * rotation_;
}
/// handle accumulating error.
void normalize()
{
T x, y, z;
getTranslation(x, y, z);
rotation_.normalize();
encodeTranslation(x, y, z);
}
/**
* \brief a reference-based method for acquiring the latest
* translation data.
*/
void getTranslation(T &x, T &y, T &z) const
{
Quaternion<T> result = getTranslation();
/// note: inverse of a quaternion is the same as the conjugate.
x = result.x_;
y = result.y_;
z = result.z_;
}
/**
* \brief a reference-based method for acquiring the latest
* translation data.
*/
void getTranslation(Vec3f& vec3f) const
{
getTranslation(vec3f[0], vec3f[1], vec3f[2]);
}
Quaternion<T> getTranslation() const
{
auto rot = rotation_;
rot.normalize();
return 2 * translation_ * rot.conjugate();
}
/**
* \brief a reference-based method for acquiring the latest rotation data.
*/
void getEuler(T &roll, T &pitch, T &yaw)
{
// FIXME: breaks for some value around PI.
roll = getRoll();
pitch = getPitch();
yaw = getYaw();
}
Quaternion<T> getRotation() const
{
return rotation_;
}
DualQuaternion operator+(const DualQuaternion &other)
{
DualQuaternion result;
result.rotation_ = rotation_ + other.rotation_;
result.translation_ = translation_ + other.translation_;
return result;
}
DualQuaternion operator-(const DualQuaternion &other)
{
DualQuaternion result;
result.rotation_ = rotation_ - other.rotation_;
result.translation_ = translation_ - other.translation_;
return result;
}
DualQuaternion operator*(const DualQuaternion &other)
{
DualQuaternion<T> result;
result.rotation_ = rotation_ * other.rotation_;
// result.translation_ = (rotation_ * other.translation_) + (translation_ * other.rotation_);
result.translation_ = translation_ + other.translation_;
return result;
}
DualQuaternion operator/(const std::pair<T,T> divisor)
{
DualQuaternion<T> result;
result.rotation_ = 1 / divisor.first * rotation_;
result.translation_ = 1 / divisor.second * translation_;
return result;
}
/// (left) Scalar Multiplication
/**
* \fn template <typename U> friend Quaternion operator*(const U scalar,
* \brief implements scalar multiplication for arbitrary scalar types
*/
template<typename U>
friend DualQuaternion operator*(const U scalar, const DualQuaternion &q)
{
DualQuaternion<T> result;
result.rotation_ = scalar * q.rotation_;
result.translation_ = scalar * q.translation_;
return result;
}
DualQuaternion conjugate()
{
DualQuaternion<T> result;
result.rotation_ = rotation_.conjugate();
result.translation_ = translation_.conjugate();
return result;
}
inline DualQuaternion identity()
{
return DualQuaternion(Quaternion<T>(0, 0, 0, 0),Quaternion<T>(0, 1, 0, 0));
}
void transform(Vec3f& point) // TODO: this should be a lot more generic
{
Vec3f translation;
getTranslation(translation);
rotation_.rotate(point);
point += translation;
}
void from_twist(const float &r0, const float &r1, const float &r2,
const float &x, const float &y, const float &z)
{
float norm = sqrt(r0*r0 + r1 * r1 + r2 * r2);
Quaternion<T> rotation;
if (norm > epsilon())
{
float cosNorm = cos(norm);
float sign = (cosNorm > 0.f) - (cosNorm < 0.f);
cosNorm *= sign;
float sinNorm_norm = sign * sin(norm) / norm;
rotation = Quaternion<T>(cosNorm, r0 * sinNorm_norm, r1 * sinNorm_norm, r2 * sinNorm_norm);
}
else
rotation = Quaternion<T>();
*this = DualQuaternion<T>(Quaternion<T>(0, x, y, z), rotation);
}
std::pair<T,T> magnitude()
{
DualQuaternion result = (*this) * (*this).conjugate();
return std::make_pair(result.rotation_.w_, result.translation_.w_);
}
private:
Quaternion<T> rotation_;
Quaternion<T> translation_;
T position_[3] = {}; /// default initialize vector to zeros.
T rotAxis_[3] = {}; /// default initialize vector to zeros.
T rotAngle_;
T getRoll()
{
// TODO: test this!
return atan2(2*((rotation_.w_ * rotation_.x_) + (rotation_.y_ * rotation_.z_)),
(1 - 2*((rotation_.x_*rotation_.x_) + (rotation_.y_*rotation_.y_))));
}
T getPitch()
{
// TODO: test this!
return asin(2*(rotation_.w_ * rotation_.y_ - rotation_.z_ * rotation_.x_));
}
T getYaw()
{
// TODO: test this!
return atan2(2*((rotation_.w_ * rotation_.z_) + (rotation_.x_ * rotation_.y_)),
(1 - 2*((rotation_.y_*rotation_.y_) + (rotation_.z_*rotation_.z_))));
}
};
template <typename T>
std::ostream &operator<<(std::ostream &os, const DualQuaternion<T> &q)
{
os << "[" << q.getRotation() << ", " << q.getTranslation()<< ", " << "]" << std::endl;
return os;
}
}
}
#endif //DYNAMIC_FUSION_DUAL_QUATERNION_HPP | 35.971014 | 111 | 0.475624 | [
"vector",
"transform"
] |
d507d119650a119c9ee215619c6e12e9f87419c0 | 1,291 | cpp | C++ | buildspecs/apps/amrex/test/Source/LevelBldAdv.cpp | liuyangzhuan/buildtest-cori | 59f5428e06336134537876723a7906bbfb16ada9 | [
"MIT"
] | 6 | 2021-04-01T16:37:18.000Z | 2021-12-09T19:05:56.000Z | buildspecs/apps/amrex/test/Source/LevelBldAdv.cpp | liuyangzhuan/buildtest-cori | 59f5428e06336134537876723a7906bbfb16ada9 | [
"MIT"
] | 33 | 2021-01-08T20:39:55.000Z | 2022-01-05T17:57:45.000Z | buildspecs/apps/amrex/test/Source/LevelBldAdv.cpp | liuyangzhuan/buildtest-cori | 59f5428e06336134537876723a7906bbfb16ada9 | [
"MIT"
] | 8 | 2021-03-18T06:51:55.000Z | 2021-11-24T19:26:25.000Z |
#include <AMReX_LevelBld.H>
#include <AmrLevelAdv.H>
using namespace amrex;
class LevelBldAdv
:
public LevelBld
{
virtual void variableSetUp () override;
virtual void variableCleanUp () override;
virtual AmrLevel *operator() () override;
virtual AmrLevel *operator() (Amr& papa,
int lev,
const Geometry& level_geom,
const BoxArray& ba,
const DistributionMapping& dm,
Real time) override;
};
LevelBldAdv Adv_bld;
LevelBld*
getLevelBld ()
{
return &Adv_bld;
}
void
LevelBldAdv::variableSetUp ()
{
AmrLevelAdv::variableSetUp();
}
void
LevelBldAdv::variableCleanUp ()
{
AmrLevelAdv::variableCleanUp();
}
AmrLevel*
LevelBldAdv::operator() ()
{
return new AmrLevelAdv;
}
AmrLevel*
LevelBldAdv::operator() (Amr& papa,
int lev,
const Geometry& level_geom,
const BoxArray& ba,
const DistributionMapping& dm,
Real time)
{
return new AmrLevelAdv(papa, lev, level_geom, ba, dm, time);
}
| 22.258621 | 65 | 0.515105 | [
"geometry"
] |
d50a27f53d86471106468a896737bcc0ec05d64f | 65,561 | hpp | C++ | include/x/cba/addons/main/script_macros_common.hpp | SzwedzikPL/SOGVanillaMainMenu | 2b2fef44ae6bf570a0a9afc1ccba2d99260b76d6 | [
"MIT"
] | 1 | 2021-05-21T09:07:32.000Z | 2021-05-21T09:07:32.000Z | include/x/cba/addons/main/script_macros_common.hpp | SzwedzikPL/SOGVanillaMainMenu | 2b2fef44ae6bf570a0a9afc1ccba2d99260b76d6 | [
"MIT"
] | 3 | 2021-05-09T19:52:22.000Z | 2021-05-21T22:05:19.000Z | include/x/cba/addons/main/script_macros_common.hpp | SzwedzikPL/SOGVanillaMainMenu | 2b2fef44ae6bf570a0a9afc1ccba2d99260b76d6 | [
"MIT"
] | 5 | 2021-04-25T14:01:54.000Z | 2022-01-28T19:27:40.000Z | /*
Header: script_macros_common.hpp
Description:
A general set of useful macro functions for use by CBA itself or by any module that uses CBA.
Authors:
Sickboy <sb_at_dev-heaven.net> and Spooner
*/
/* ****************************************************
New - Should be exported to general addon
Aim:
- Simplify (shorten) the amount of characters required for repetitive tasks
- Provide a solid structure that can be dynamic and easy editable (Which sometimes means we cannot adhere to Aim #1 ;-)
An example is the path that is built from defines. Some available in this file, others in mods and addons.
Follows Standard:
Object variables: PREFIX_COMPONENT
Main-object variables: PREFIX_main
Paths: MAINPREFIX\PREFIX\SUBPREFIX\COMPONENT\SCRIPTNAME.sqf
e.g: x\six\addons\sys_menu\fDate.sqf
Usage:
define PREFIX and COMPONENT, then include this file
(Note, you could have a main addon for your mod, define the PREFIX in a macros.hpp,
and include this script_macros_common.hpp file.
Then in your addons, add a component.hpp, define the COMPONENT,
and include your mod's script_macros.hpp
In your scripts you can then include the addon's component.hpp with relative path)
TODO:
- Try only to use 1 string type " vs '
- Evaluate double functions, and simplification
- Evaluate naming scheme; current = prototype
- Evaluate "Debug" features..
- Evaluate "create mini function per precompiled script, that will load the script on first usage, rather than on init"
- Also saw "Namespace" typeName, evaluate which we need :P
- Single/Multi player gamelogics? (Incase of MP, you would want only 1 gamelogic per component, which is pv'ed from server, etc)
*/
#ifndef MAINPREFIX
#define MAINPREFIX x
#endif
#ifndef SUBPREFIX
#define SUBPREFIX addons
#endif
#ifndef MAINLOGIC
#define MAINLOGIC main
#endif
#define ADDON DOUBLES(PREFIX,COMPONENT)
#define MAIN_ADDON DOUBLES(PREFIX,main)
/* -------------------------------------------
Macro: VERSION_CONFIG
Define CBA Versioning System config entries.
VERSION should be a floating-point number (1 separator).
VERSION_STR is a string representation of the version.
VERSION_AR is an array representation of the version.
VERSION must always be defined, otherwise it is 0.
VERSION_STR and VERSION_AR default to VERSION if undefined.
Parameters:
None
Example:
(begin example)
#define VERSION 1.0
#define VERSION_STR 1.0.1
#define VERSION_AR 1,0,1
class CfgPatches {
class MyMod_main {
VERSION_CONFIG;
};
};
(end)
Author:
?, Jonpas
------------------------------------------- */
#ifndef VERSION
#define VERSION 0
#endif
#ifndef VERSION_STR
#define VERSION_STR VERSION
#endif
#ifndef VERSION_AR
#define VERSION_AR VERSION
#endif
#ifndef VERSION_CONFIG
#define VERSION_CONFIG version = VERSION; versionStr = QUOTE(VERSION_STR); versionAr[] = {VERSION_AR}
#endif
/* -------------------------------------------
Group: Debugging
------------------------------------------- */
/* -------------------------------------------
Macros: DEBUG_MODE_x
Managing debugging based on debug level.
According to the *highest* level of debugging that has been defined *before* script_macros_common.hpp is included,
only the appropriate debugging commands will be functional. With no level explicitely defined, assume DEBUG_MODE_NORMAL.
DEBUG_MODE_FULL - Full debugging output.
DEBUG_MODE_NORMAL - All debugging except <TRACE_n()> and <LOG()> (Default setting if none specified).
DEBUG_MODE_MINIMAL - Only <ERROR()> and <ERROR_WITH_TITLE()> enabled.
Examples:
In order to turn on full debugging for a single file,
(begin example)
// Top of individual script file.
#define DEBUG_MODE_FULL
#include "script_component.hpp"
(end)
In order to force minimal debugging for a single component,
(begin example)
// Top of addons\<component>\script_component.hpp
// Ensure that any FULL and NORMAL setting from the individual files are undefined and MINIMAL is set.
#ifdef DEBUG_MODE_FULL
#undef DEBUG_MODE_FULL
#endif
#ifdef DEBUG_MODE_NORMAL
#undef DEBUG_MODE_NORMAL
#endif
#ifndef DEBUG_MODE_MINIMAL
#define DEBUG_MODE_MINIMAL
#endif
#include "script_macros.hpp"
(end)
In order to turn on full debugging for a whole addon,
(begin example)
// Top of addons\main\script_macros.hpp
#ifndef DEBUG_MODE_FULL
#define DEBUG_MODE_FULL
#endif
#include "\x\cba\addons\main\script_macros_common.hpp"
(end)
Author:
Spooner
------------------------------------------- */
// If DEBUG_MODE_FULL, then also enable DEBUG_MODE_NORMAL.
#ifdef DEBUG_MODE_FULL
#define DEBUG_MODE_NORMAL
#endif
// If DEBUG_MODE_NORMAL, then also enable DEBUG_MODE_MINIMAL.
#ifdef DEBUG_MODE_NORMAL
#define DEBUG_MODE_MINIMAL
#endif
// If no debug modes specified, use DEBUG_MODE_NORMAL (+ DEBUG_MODE_MINIMAL).
#ifndef DEBUG_MODE_MINIMAL
#define DEBUG_MODE_NORMAL
#define DEBUG_MODE_MINIMAL
#endif
#define LOG_SYS_FORMAT(LEVEL,MESSAGE) format ['[%1] (%2) %3: %4', toUpper 'PREFIX', 'COMPONENT', LEVEL, MESSAGE]
#ifdef DEBUG_SYNCHRONOUS
#define LOG_SYS(LEVEL,MESSAGE) diag_log text LOG_SYS_FORMAT(LEVEL,MESSAGE)
#else
#define LOG_SYS(LEVEL,MESSAGE) LOG_SYS_FORMAT(LEVEL,MESSAGE) call CBA_fnc_log
#endif
#define LOG_SYS_FILELINENUMBERS(LEVEL,MESSAGE) LOG_SYS(LEVEL,format [ARR_4('%1 %2:%3',MESSAGE,__FILE__,__LINE__ + 1)])
/* -------------------------------------------
Macro: LOG()
Log a debug message into the RPT log.
Only run if <DEBUG_MODE_FULL> is defined.
Parameters:
MESSAGE - Message to record <STRING>
Example:
(begin example)
LOG("Initiated clog-dancing simulator.");
(end)
Author:
Spooner
------------------------------------------- */
#ifdef DEBUG_MODE_FULL
#define LOG(MESSAGE) LOG_SYS('LOG',MESSAGE)
#define LOG_1(MESSAGE,ARG1) LOG(FORMAT_1(MESSAGE,ARG1))
#define LOG_2(MESSAGE,ARG1,ARG2) LOG(FORMAT_2(MESSAGE,ARG1,ARG2))
#define LOG_3(MESSAGE,ARG1,ARG2,ARG3) LOG(FORMAT_3(MESSAGE,ARG1,ARG2,ARG3))
#define LOG_4(MESSAGE,ARG1,ARG2,ARG3,ARG4) LOG(FORMAT_4(MESSAGE,ARG1,ARG2,ARG3,ARG4))
#define LOG_5(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5) LOG(FORMAT_5(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5))
#define LOG_6(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6) LOG(FORMAT_6(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6))
#define LOG_7(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7) LOG(FORMAT_7(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7))
#define LOG_8(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7,ARG8) LOG(FORMAT_8(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7,ARG8))
#else
#define LOG(MESSAGE) /* disabled */
#define LOG_1(MESSAGE,ARG1) /* disabled */
#define LOG_2(MESSAGE,ARG1,ARG2) /* disabled */
#define LOG_3(MESSAGE,ARG1,ARG2,ARG3) /* disabled */
#define LOG_4(MESSAGE,ARG1,ARG2,ARG3,ARG4) /* disabled */
#define LOG_5(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5) /* disabled */
#define LOG_6(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6) /* disabled */
#define LOG_7(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7) /* disabled */
#define LOG_8(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7,ARG8) /* disabled */
#endif
/* -------------------------------------------
Macro: INFO()
Record a message without file and line number in the RPT log.
Parameters:
MESSAGE - Message to record <STRING>
Example:
(begin example)
INFO("Mod X is loaded, do Y");
(end)
Author:
commy2
------------------------------------------- */
#define INFO(MESSAGE) LOG_SYS('INFO',MESSAGE)
#define INFO_1(MESSAGE,ARG1) INFO(FORMAT_1(MESSAGE,ARG1))
#define INFO_2(MESSAGE,ARG1,ARG2) INFO(FORMAT_2(MESSAGE,ARG1,ARG2))
#define INFO_3(MESSAGE,ARG1,ARG2,ARG3) INFO(FORMAT_3(MESSAGE,ARG1,ARG2,ARG3))
#define INFO_4(MESSAGE,ARG1,ARG2,ARG3,ARG4) INFO(FORMAT_4(MESSAGE,ARG1,ARG2,ARG3,ARG4))
#define INFO_5(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5) INFO(FORMAT_5(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5))
#define INFO_6(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6) INFO(FORMAT_6(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6))
#define INFO_7(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7) INFO(FORMAT_7(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7))
#define INFO_8(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7,ARG8) INFO(FORMAT_8(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7,ARG8))
/* -------------------------------------------
Macro: WARNING()
Record a non-critical error in the RPT log.
Only run if <DEBUG_MODE_NORMAL> or higher is defined.
Parameters:
MESSAGE - Message to record <STRING>
Example:
(begin example)
WARNING("This function has been deprecated. Please don't use it in future!");
(end)
Author:
Spooner
------------------------------------------- */
#ifdef DEBUG_MODE_NORMAL
#define WARNING(MESSAGE) LOG_SYS('WARNING',MESSAGE)
#define WARNING_1(MESSAGE,ARG1) WARNING(FORMAT_1(MESSAGE,ARG1))
#define WARNING_2(MESSAGE,ARG1,ARG2) WARNING(FORMAT_2(MESSAGE,ARG1,ARG2))
#define WARNING_3(MESSAGE,ARG1,ARG2,ARG3) WARNING(FORMAT_3(MESSAGE,ARG1,ARG2,ARG3))
#define WARNING_4(MESSAGE,ARG1,ARG2,ARG3,ARG4) WARNING(FORMAT_4(MESSAGE,ARG1,ARG2,ARG3,ARG4))
#define WARNING_5(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5) WARNING(FORMAT_5(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5))
#define WARNING_6(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6) WARNING(FORMAT_6(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6))
#define WARNING_7(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7) WARNING(FORMAT_7(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7))
#define WARNING_8(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7,ARG8) WARNING(FORMAT_8(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7,ARG8))
#else
#define WARNING(MESSAGE) /* disabled */
#define WARNING_1(MESSAGE,ARG1) /* disabled */
#define WARNING_2(MESSAGE,ARG1,ARG2) /* disabled */
#define WARNING_3(MESSAGE,ARG1,ARG2,ARG3) /* disabled */
#define WARNING_4(MESSAGE,ARG1,ARG2,ARG3,ARG4) /* disabled */
#define WARNING_5(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5) /* disabled */
#define WARNING_6(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6) /* disabled */
#define WARNING_7(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7) /* disabled */
#define WARNING_8(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7,ARG8) /* disabled */
#endif
/* -------------------------------------------
Macro: ERROR()
Record a critical error in the RPT log.
Parameters:
MESSAGE - Message to record <STRING>
Example:
(begin example)
ERROR("value of frog not found in config ...yada...yada...");
(end)
Author:
Spooner
------------------------------------------- */
#define ERROR(MESSAGE) LOG_SYS('ERROR',MESSAGE)
#define ERROR_1(MESSAGE,ARG1) ERROR(FORMAT_1(MESSAGE,ARG1))
#define ERROR_2(MESSAGE,ARG1,ARG2) ERROR(FORMAT_2(MESSAGE,ARG1,ARG2))
#define ERROR_3(MESSAGE,ARG1,ARG2,ARG3) ERROR(FORMAT_3(MESSAGE,ARG1,ARG2,ARG3))
#define ERROR_4(MESSAGE,ARG1,ARG2,ARG3,ARG4) ERROR(FORMAT_4(MESSAGE,ARG1,ARG2,ARG3,ARG4))
#define ERROR_5(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5) ERROR(FORMAT_5(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5))
#define ERROR_6(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6) ERROR(FORMAT_6(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6))
#define ERROR_7(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7) ERROR(FORMAT_7(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7))
#define ERROR_8(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7,ARG8) ERROR(FORMAT_8(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7,ARG8))
/* -------------------------------------------
Macro: ERROR_MSG()
Record a critical error in the RPT log and display on screen error message.
Newlines (\n) in the MESSAGE will be put on separate lines.
Parameters:
MESSAGE - Message to record <STRING>
Example:
(begin example)
ERROR_MSG("value of frog not found in config ...yada...yada...");
(end)
Author:
commy2
------------------------------------------- */
#define ERROR_MSG(MESSAGE) ['PREFIX', 'COMPONENT', nil, MESSAGE, __FILE__, __LINE__ + 1] call CBA_fnc_error
#define ERROR_MSG_1(MESSAGE,ARG1) ERROR_MSG(FORMAT_1(MESSAGE,ARG1))
#define ERROR_MSG_2(MESSAGE,ARG1,ARG2) ERROR_MSG(FORMAT_2(MESSAGE,ARG1,ARG2))
#define ERROR_MSG_3(MESSAGE,ARG1,ARG2,ARG3) ERROR_MSG(FORMAT_3(MESSAGE,ARG1,ARG2,ARG3))
#define ERROR_MSG_4(MESSAGE,ARG1,ARG2,ARG3,ARG4) ERROR_MSG(FORMAT_4(MESSAGE,ARG1,ARG2,ARG3,ARG4))
#define ERROR_MSG_5(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5) ERROR_MSG(FORMAT_5(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5))
#define ERROR_MSG_6(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6) ERROR_MSG(FORMAT_6(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6))
#define ERROR_MSG_7(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7) ERROR_MSG(FORMAT_7(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7))
#define ERROR_MSG_8(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7,ARG8) ERROR_MSG(FORMAT_8(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7,ARG8))
/* -------------------------------------------
Macro: ERROR_WITH_TITLE()
Record a critical error in the RPT log.
The title can be specified (in <ERROR()> the heading is always just "ERROR")
Newlines (\n) in the MESSAGE will be put on separate lines.
Parameters:
TITLE - Title of error message <STRING>
MESSAGE - Body of error message <STRING>
Example:
(begin example)
ERROR_WITH_TITLE("Value not found","Value of frog not found in config ...yada...yada...");
(end)
Author:
Spooner
------------------------------------------- */
#define ERROR_WITH_TITLE(TITLE,MESSAGE) ['PREFIX', 'COMPONENT', TITLE, MESSAGE, __FILE__, __LINE__ + 1] call CBA_fnc_error
#define ERROR_WITH_TITLE_1(TITLE,MESSAGE,ARG1) ERROR_WITH_TITLE(TITLE,FORMAT_1(MESSAGE,ARG1))
#define ERROR_WITH_TITLE_2(TITLE,MESSAGE,ARG1,ARG2) ERROR_WITH_TITLE(TITLE,FORMAT_2(MESSAGE,ARG1,ARG2))
#define ERROR_WITH_TITLE_3(TITLE,MESSAGE,ARG1,ARG2,ARG3) ERROR_WITH_TITLE(TITLE,FORMAT_3(MESSAGE,ARG1,ARG2,ARG3))
#define ERROR_WITH_TITLE_4(TITLE,MESSAGE,ARG1,ARG2,ARG3,ARG4) ERROR_WITH_TITLE(TITLE,FORMAT_4(MESSAGE,ARG1,ARG2,ARG3,ARG4))
#define ERROR_WITH_TITLE_5(TITLE,MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5) ERROR_WITH_TITLE(TITLE,FORMAT_5(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5))
#define ERROR_WITH_TITLE_6(TITLE,MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6) ERROR_WITH_TITLE(TITLE,FORMAT_6(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6))
#define ERROR_WITH_TITLE_7(TITLE,MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7) ERROR_WITH_TITLE(TITLE,FORMAT_7(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7))
#define ERROR_WITH_TITLE_8(TITLE,MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7,ARG8) ERROR_WITH_TITLE(TITLE,FORMAT_8(MESSAGE,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7,ARG8))
/* -------------------------------------------
Macro: MESSAGE_WITH_TITLE()
Record a single line in the RPT log.
Parameters:
TITLE - Title of log message <STRING>
MESSAGE - Body of message <STRING>
Example:
(begin example)
MESSAGE_WITH_TITLE("Value found","Value of frog found in config <someconfig>");
(end)
Author:
Killswitch
------------------------------------------- */
#define MESSAGE_WITH_TITLE(TITLE,MESSAGE) LOG_SYS_FILELINENUMBERS(TITLE,MESSAGE)
/* -------------------------------------------
Macro: RETDEF()
If a variable is undefined, return the default value. Otherwise, return the
variable itself.
Parameters:
VARIABLE - the variable to check
DEFAULT_VALUE - the default value to use if variable is undefined
Example:
(begin example)
// _var is undefined
hintSilent format ["_var=%1", RETDEF(_var,5)]; // "_var=5"
_var = 7;
hintSilent format ["_var=%1", RETDEF(_var,5)]; // "_var=7"
(end example)
Author:
654wak654
------------------------------------------- */
#define RETDEF(VARIABLE,DEFAULT_VALUE) (if (isNil {VARIABLE}) then [{DEFAULT_VALUE}, {VARIABLE}])
/* -------------------------------------------
Macro: RETNIL()
If a variable is undefined, return the value nil. Otherwise, return the
variable itself.
Parameters:
VARIABLE - the variable to check
Example:
(begin example)
// _var is undefined
hintSilent format ["_var=%1", RETNIL(_var)]; // "_var=any"
(end example)
Author:
Alef (see CBA issue #8514)
------------------------------------------- */
#define RETNIL(VARIABLE) RETDEF(VARIABLE,nil)
/* -------------------------------------------
Macros: TRACE_n()
Log a message and 1-8 variables to the RPT log.
Only run if <DEBUG_MODE_FULL> is defined.
TRACE_1(MESSAGE,A) - Log 1 variable.
TRACE_2(MESSAGE,A,B) - Log 2 variables.
TRACE_3(MESSAGE,A,B,C) - Log 3 variables.
TRACE_4(MESSAGE,A,B,C,D) - Log 4 variables.
TRACE_5(MESSAGE,A,B,C,D,E) - Log 5 variables.
TRACE_6(MESSAGE,A,B,C,D,E,F) - Log 6 variables.
TRACE_7(MESSAGE,A,B,C,D,E,F,G) - Log 7 variables.
TRACE_8(MESSAGE,A,B,C,D,E,F,G,H) - Log 8 variables.
TRACE_9(MESSAGE,A,B,C,D,E,F,G,H,I) - Log 9 variables.
Parameters:
MESSAGE - Message to add to the trace [String]
A..H - Variable names to log values of [Any]
Example:
(begin example)
TRACE_3("After takeoff",_vehicle player,getPos (_vehicle player), getPosASL (_vehicle player));
(end)
Author:
Spooner
------------------------------------------- */
#define PFORMAT_1(MESSAGE,A) \
format ['%1: A=%2', MESSAGE, RETNIL(A)]
#define PFORMAT_2(MESSAGE,A,B) \
format ['%1: A=%2, B=%3', MESSAGE, RETNIL(A), RETNIL(B)]
#define PFORMAT_3(MESSAGE,A,B,C) \
format ['%1: A=%2, B=%3, C=%4', MESSAGE, RETNIL(A), RETNIL(B), RETNIL(C)]
#define PFORMAT_4(MESSAGE,A,B,C,D) \
format ['%1: A=%2, B=%3, C=%4, D=%5', MESSAGE, RETNIL(A), RETNIL(B), RETNIL(C), RETNIL(D)]
#define PFORMAT_5(MESSAGE,A,B,C,D,E) \
format ['%1: A=%2, B=%3, C=%4, D=%5, E=%6', MESSAGE, RETNIL(A), RETNIL(B), RETNIL(C), RETNIL(D), RETNIL(E)]
#define PFORMAT_6(MESSAGE,A,B,C,D,E,F) \
format ['%1: A=%2, B=%3, C=%4, D=%5, E=%6, F=%7', MESSAGE, RETNIL(A), RETNIL(B), RETNIL(C), RETNIL(D), RETNIL(E), RETNIL(F)]
#define PFORMAT_7(MESSAGE,A,B,C,D,E,F,G) \
format ['%1: A=%2, B=%3, C=%4, D=%5, E=%6, F=%7, G=%8', MESSAGE, RETNIL(A), RETNIL(B), RETNIL(C), RETNIL(D), RETNIL(E), RETNIL(F), RETNIL(G)]
#define PFORMAT_8(MESSAGE,A,B,C,D,E,F,G,H) \
format ['%1: A=%2, B=%3, C=%4, D=%5, E=%6, F=%7, G=%8, H=%9', MESSAGE, RETNIL(A), RETNIL(B), RETNIL(C), RETNIL(D), RETNIL(E), RETNIL(F), RETNIL(G), RETNIL(H)]
#define PFORMAT_9(MESSAGE,A,B,C,D,E,F,G,H,I) \
format ['%1: A=%2, B=%3, C=%4, D=%5, E=%6, F=%7, G=%8, H=%9, I=%10', MESSAGE, RETNIL(A), RETNIL(B), RETNIL(C), RETNIL(D), RETNIL(E), RETNIL(F), RETNIL(G), RETNIL(H), RETNIL(I)]
#ifdef DEBUG_MODE_FULL
#define TRACE_1(MESSAGE,A) LOG_SYS_FILELINENUMBERS('TRACE',PFORMAT_1(str diag_frameNo + ' ' + (MESSAGE),A))
#define TRACE_2(MESSAGE,A,B) LOG_SYS_FILELINENUMBERS('TRACE',PFORMAT_2(str diag_frameNo + ' ' + (MESSAGE),A,B))
#define TRACE_3(MESSAGE,A,B,C) LOG_SYS_FILELINENUMBERS('TRACE',PFORMAT_3(str diag_frameNo + ' ' + (MESSAGE),A,B,C))
#define TRACE_4(MESSAGE,A,B,C,D) LOG_SYS_FILELINENUMBERS('TRACE',PFORMAT_4(str diag_frameNo + ' ' + (MESSAGE),A,B,C,D))
#define TRACE_5(MESSAGE,A,B,C,D,E) LOG_SYS_FILELINENUMBERS('TRACE',PFORMAT_5(str diag_frameNo + ' ' + (MESSAGE),A,B,C,D,E))
#define TRACE_6(MESSAGE,A,B,C,D,E,F) LOG_SYS_FILELINENUMBERS('TRACE',PFORMAT_6(str diag_frameNo + ' ' + (MESSAGE),A,B,C,D,E,F))
#define TRACE_7(MESSAGE,A,B,C,D,E,F,G) LOG_SYS_FILELINENUMBERS('TRACE',PFORMAT_7(str diag_frameNo + ' ' + (MESSAGE),A,B,C,D,E,F,G))
#define TRACE_8(MESSAGE,A,B,C,D,E,F,G,H) LOG_SYS_FILELINENUMBERS('TRACE',PFORMAT_8(str diag_frameNo + ' ' + (MESSAGE),A,B,C,D,E,F,G,H))
#define TRACE_9(MESSAGE,A,B,C,D,E,F,G,H,I) LOG_SYS_FILELINENUMBERS('TRACE',PFORMAT_9(str diag_frameNo + ' ' + (MESSAGE),A,B,C,D,E,F,G,H,I))
#else
#define TRACE_1(MESSAGE,A) /* disabled */
#define TRACE_2(MESSAGE,A,B) /* disabled */
#define TRACE_3(MESSAGE,A,B,C) /* disabled */
#define TRACE_4(MESSAGE,A,B,C,D) /* disabled */
#define TRACE_5(MESSAGE,A,B,C,D,E) /* disabled */
#define TRACE_6(MESSAGE,A,B,C,D,E,F) /* disabled */
#define TRACE_7(MESSAGE,A,B,C,D,E,F,G) /* disabled */
#define TRACE_8(MESSAGE,A,B,C,D,E,F,G,H) /* disabled */
#define TRACE_9(MESSAGE,A,B,C,D,E,F,G,H,I) /* disabled */
#endif
/* -------------------------------------------
Group: General
------------------------------------------- */
// *************************************
// Internal Functions
#define DOUBLES(var1,var2) var1##_##var2
#define TRIPLES(var1,var2,var3) var1##_##var2##_##var3
#define QUOTE(var1) #var1
#ifdef MODULAR
#define COMPONENT_T DOUBLES(t,COMPONENT)
#define COMPONENT_M DOUBLES(m,COMPONENT)
#define COMPONENT_S DOUBLES(s,COMPONENT)
#define COMPONENT_C DOUBLES(c,COMPONENT)
#define COMPONENT_F COMPONENT_C
#else
#define COMPONENT_T COMPONENT
#define COMPONENT_M COMPONENT
#define COMPONENT_S COMPONENT
#define COMPONENT_F COMPONENT
#define COMPONENT_C COMPONENT
#endif
/* -------------------------------------------
Macro: INC()
Description:
Increase a number by one.
Parameters:
VAR - Variable to increment [Number]
Example:
(begin example)
_counter = 0;
INC(_counter);
// _counter => 1
(end)
Author:
Spooner
------------------------------------------- */
#define INC(var) var = (var) + 1
/* -------------------------------------------
Macro: DEC()
Description:
Decrease a number by one.
Parameters:
VAR - Variable to decrement [Number]
Example:
(begin example)
_counter = 99;
DEC(_counter);
// _counter => 98
(end)
Author:
Spooner
------------------------------------------- */
#define DEC(var) var = (var) - 1
/* -------------------------------------------
Macro: ADD()
Description:
Add a value to a variable. Variable and value should be both Numbers or both Strings.
Parameters:
VAR - Variable to add to [Number or String]
VALUE - Value to add [Number or String]
Examples:
(begin example)
_counter = 2;
ADD(_counter,3);
// _counter => 5
(end)
(begin example)
_str = "hello";
ADD(_str," ");
ADD(_str,"Fred");
// _str => "hello Fred"
(end)
Author:
Sickboy
------------------------------------------- */
#define ADD(var1,var2) var1 = (var1) + (var2)
/* -------------------------------------------
Macro: SUB()
Description:
Subtract a value from a number variable. VAR and VALUE should both be Numbers.
Parameters:
VAR - Variable to subtract from [Number]
VALUE - Value to subtract [Number]
Examples:
(begin example)
_numChickens = 2;
SUB(_numChickens,3);
// _numChickens => -1
(end)
------------------------------------------- */
#define SUB(var1,var2) var1 = (var1) - (var2)
/* -------------------------------------------
Macro: REM()
Description:
Remove an element from an array each time it occurs.
This recreates the entire array, so use BIS_fnc_removeIndex if modification of the original array is required
or if only one of the elements that matches ELEMENT needs to be removed.
Parameters:
ARRAY - Array to modify [Array]
ELEMENT - Element to remove [Any]
Examples:
(begin example)
_array = [1, 2, 3, 4, 3, 8];
REM(_array,3);
// _array = [1, 2, 4, 8];
(end)
Author:
Spooner
------------------------------------------- */
#define REM(var1,var2) SUB(var1,[var2])
/* -------------------------------------------
Macro: PUSH()
Description:
Appends a single value onto the end of an ARRAY. Change is made to the ARRAY itself, not creating a new array.
Parameters:
ARRAY - Array to push element onto [Array]
ELEMENT - Element to push [Any]
Examples:
(begin example)
_fish = ["blue", "green", "smelly"];
PUSH(_fish,"monkey-flavoured");
// _fish => ["blue", "green", "smelly", "monkey-flavoured"]
(end)
Author:
Spooner
------------------------------------------- */
#define PUSH(var1,var2) (var1) pushBack (var2)
/* -------------------------------------------
Macro: MAP()
Description:
Applies given code to each element of the array, then assigns the
resulting array to the original
Parameters:
ARRAY - Array to be modified
CODE - Code that'll be applied to each element of the array.
Example:
(begin example)
_array = [1, 2, 3, 4, 3, 8];
MAP(_array,_x + 1);
// _array is now [2, 3, 4, 5, 4, 9];
(end)
Author:
654wak654
------------------------------------------- */
#define MAP(ARR,CODE) ARR = ARR apply {CODE}
/* -------------------------------------------
Macro: FILTER()
Description:
Filters an array based on given code, then assigns the resulting array
to the original
Parameters:
ARRAY - Array to be filtered
CODE - Condition to pick elements
Example:
(begin example)
_array = [1, 2, 3, 4, 3, 8];
FILTER(_array,_x % 2 == 0)
// _array is now [2, 4, 8];
(end)
Author:
Commy2
------------------------------------------- */
#define FILTER(ARR,CODE) ARR = ARR select {CODE}
/* -------------------------------------------
Macro: UNIQUE()
Description:
Removes duplicate values in given array
Parameters:
ARRAY - The array to be modified
Example:
(begin example)
_someArray = [4, 4, 5, 5, 5, 2];
UNIQUE(_someArray);
// _someArray is now [4, 5, 2]
(end)
Author:
Commy2
------------------------------------------- */
#define UNIQUE(ARR) ARR = ARR arrayIntersect ARR
/* -------------------------------------------
Macro: INTERSECTION()
Description:
Finds unique common elements between two arrays and assigns them
to the first array
Parameters:
ARRAY0 - The array to be modified
ARRAY1 - The array to find intersections with
Example:
(begin example)
_someArray = [1, 2, 3, 4, 5, 5];
_anotherArray = [4, 5, 6, 7];
INTERSECTION(_someArray,_anotherArray);
// _someArray is now [4, 5]
(end)
Author:
654wak654
------------------------------------------- */
#define INTERSECTION(ARG0,ARG1) ARG0 = ARG0 arrayIntersect (ARG1)
/* -------------------------------------------
Macro: ISNILS()
Description:
Sets a variable with a value, but only if it is undefined.
Parameters:
VARIABLE - Variable to set [Any, not nil]
DEFAULT_VALUE - Value to set VARIABLE to if it is undefined [Any, not nil]
Examples:
(begin example)
// _fish is undefined
ISNILS(_fish,0);
// _fish => 0
(end)
(begin example)
_fish = 12;
// ...later...
ISNILS(_fish,0);
// _fish => 12
(end)
Author:
Sickboy
------------------------------------------- */
#define ISNILS(VARIABLE,DEFAULT_VALUE) if (isNil #VARIABLE) then { VARIABLE = DEFAULT_VALUE }
#define ISNILS2(var1,var2,var3,var4) ISNILS(TRIPLES(var1,var2,var3),var4)
#define ISNILS3(var1,var2,var3) ISNILS(DOUBLES(var1,var2),var3)
#define ISNIL(var1,var2) ISNILS2(PREFIX,COMPONENT,var1,var2)
#define ISNILMAIN(var1,var2) ISNILS3(PREFIX,var1,var2)
#define CREATELOGICS(var1,var2) var1##_##var2 = ([sideLogic] call CBA_fnc_getSharedGroup) createUnit ["LOGIC", [0, 0, 0], [], 0, "NONE"]
#define CREATELOGICLOCALS(var1,var2) var1##_##var2 = "LOGIC" createVehicleLocal [0, 0, 0]
#define CREATELOGICGLOBALS(var1,var2) var1##_##var2 = ([sideLogic] call CBA_fnc_getSharedGroup) createUnit ["LOGIC", [0, 0, 0], [], 0, "NONE"]; publicVariable QUOTE(DOUBLES(var1,var2))
#define CREATELOGICGLOBALTESTS(var1,var2) var1##_##var2 = ([sideLogic] call CBA_fnc_getSharedGroup) createUnit [QUOTE(DOUBLES(ADDON,logic)), [0, 0, 0], [], 0, "NONE"]
#define GETVARS(var1,var2,var3) (var1##_##var2 getVariable #var3)
#define GETVARMAINS(var1,var2) GETVARS(var1,MAINLOGIC,var2)
#ifndef PATHTO_SYS
#define PATHTO_SYS(var1,var2,var3) \MAINPREFIX\var1\SUBPREFIX\var2\var3.sqf
#endif
#ifndef PATHTOF_SYS
#define PATHTOF_SYS(var1,var2,var3) \MAINPREFIX\var1\SUBPREFIX\var2\var3
#endif
#ifndef PATHTOF2_SYS
#define PATHTOF2_SYS(var1,var2,var3) MAINPREFIX\var1\SUBPREFIX\var2\var3
#endif
#define PATHTO_R(var1) PATHTOF2_SYS(PREFIX,COMPONENT_C,var1)
#define PATHTO_T(var1) PATHTOF_SYS(PREFIX,COMPONENT_T,var1)
#define PATHTO_M(var1) PATHTOF_SYS(PREFIX,COMPONENT_M,var1)
#define PATHTO_S(var1) PATHTOF_SYS(PREFIX,COMPONENT_S,var1)
#define PATHTO_C(var1) PATHTOF_SYS(PREFIX,COMPONENT_C,var1)
#define PATHTO_F(var1) PATHTO_SYS(PREFIX,COMPONENT_F,var1)
// Already quoted ""
#define QPATHTO_R(var1) QUOTE(PATHTO_R(var1))
#define QPATHTO_T(var1) QUOTE(PATHTO_T(var1))
#define QPATHTO_M(var1) QUOTE(PATHTO_M(var1))
#define QPATHTO_S(var1) QUOTE(PATHTO_S(var1))
#define QPATHTO_C(var1) QUOTE(PATHTO_C(var1))
#define QPATHTO_F(var1) QUOTE(PATHTO_F(var1))
// This only works for binarized configs after recompiling the pbos
// TODO: Reduce amount of calls / code..
#define COMPILE_FILE2_CFG_SYS(var1) compile preprocessFileLineNumbers var1
#define COMPILE_FILE2_SYS(var1) COMPILE_FILE2_CFG_SYS(var1)
#define COMPILE_FILE_SYS(var1,var2,var3) COMPILE_FILE2_SYS('PATHTO_SYS(var1,var2,var3)')
#define COMPILE_FILE_CFG_SYS(var1,var2,var3) COMPILE_FILE2_CFG_SYS('PATHTO_SYS(var1,var2,var3)')
#define SETVARS(var1,var2) var1##_##var2 setVariable
#define SETVARMAINS(var1) SETVARS(var1,MAINLOGIC)
#define GVARMAINS(var1,var2) var1##_##var2
#define CFGSETTINGSS(var1,var2) configFile >> "CfgSettings" >> #var1 >> #var2
//#define SETGVARS(var1,var2,var3) var1##_##var2##_##var3 =
//#define SETGVARMAINS(var1,var2) var1##_##var2 =
// Compile-Once, JIT: On first use.
// #define PREPMAIN_SYS(var1,var2,var3) var1##_fnc_##var3 = { var1##_fnc_##var3 = COMPILE_FILE_SYS(var1,var2,DOUBLES(fnc,var3)); if (isNil "_this") then { call var1##_fnc_##var3 } else { _this call var1##_fnc_##var3 } }
// #define PREP_SYS(var1,var2,var3) var1##_##var2##_fnc_##var3 = { var1##_##var2##_fnc_##var3 = COMPILE_FILE_SYS(var1,var2,DOUBLES(fnc,var3)); if (isNil "_this") then { call var1##_##var2##_fnc_##var3 } else { _this call var1##_##var2##_fnc_##var3 } }
// #define PREP_SYS2(var1,var2,var3,var4) var1##_##var2##_fnc_##var4 = { var1##_##var2##_fnc_##var4 = COMPILE_FILE_SYS(var1,var3,DOUBLES(fnc,var4)); if (isNil "_this") then { call var1##_##var2##_fnc_##var4 } else { _this call var1##_##var2##_fnc_##var4 } }
// Compile-Once, at Macro. As opposed to Compile-Once, on first use.
#define PREPMAIN_SYS(var1,var2,var3) var1##_fnc_##var3 = COMPILE_FILE_SYS(var1,var2,DOUBLES(fnc,var3))
#define PREP_SYS(var1,var2,var3) var1##_##var2##_fnc_##var3 = COMPILE_FILE_SYS(var1,var2,DOUBLES(fnc,var3))
#define PREP_SYS2(var1,var2,var3,var4) var1##_##var2##_fnc_##var4 = COMPILE_FILE_SYS(var1,var3,DOUBLES(fnc,var4))
#define LSTR(var1) TRIPLES(ADDON,STR,var1)
#ifndef DEBUG_SETTINGS
#define DEBUG_SETTINGS [false, true, false]
#endif
#define MSG_INIT QUOTE(Initializing: ADDON version: VERSION)
// *************************************
// User Functions
#define CFGSETTINGS CFGSETTINGSS(PREFIX,COMPONENT)
#define PATHTO(var1) PATHTO_SYS(PREFIX,COMPONENT_F,var1)
#define PATHTOF(var1) PATHTOF_SYS(PREFIX,COMPONENT,var1)
#define PATHTOEF(var1,var2) PATHTOF_SYS(PREFIX,var1,var2)
#define QPATHTOF(var1) QUOTE(PATHTOF(var1))
#define QPATHTOEF(var1,var2) QUOTE(PATHTOEF(var1,var2))
#define COMPILE_FILE(var1) COMPILE_FILE_SYS(PREFIX,COMPONENT_F,var1)
#define COMPILE_FILE_CFG(var1) COMPILE_FILE_CFG_SYS(PREFIX,COMPONENT_F,var1)
#define COMPILE_FILE2(var1) COMPILE_FILE2_SYS('var1')
#define COMPILE_FILE2_CFG(var1) COMPILE_FILE2_CFG_SYS('var1')
#define VERSIONING_SYS(var1) class CfgSettings \
{ \
class CBA \
{ \
class Versioning \
{ \
class var1 \
{ \
}; \
}; \
}; \
};
#define VERSIONING VERSIONING_SYS(PREFIX)
/* -------------------------------------------
Macro: GVAR()
Get full variable identifier for a global variable owned by this component.
Parameters:
VARIABLE - Partial name of global variable owned by this component [Any].
Example:
(begin example)
GVAR(frog) = 12;
// In SPON_FrogDancing component, equivalent to SPON_FrogDancing_frog = 12
(end)
Author:
Sickboy
------------------------------------------- */
#define GVAR(var1) DOUBLES(ADDON,var1)
#define EGVAR(var1,var2) TRIPLES(PREFIX,var1,var2)
#define QGVAR(var1) QUOTE(GVAR(var1))
#define QEGVAR(var1,var2) QUOTE(EGVAR(var1,var2))
#define QQGVAR(var1) QUOTE(QGVAR(var1))
#define QQEGVAR(var1,var2) QUOTE(QEGVAR(var1,var2))
/* -------------------------------------------
Macro: GVARMAIN()
Get full variable identifier for a global variable owned by this addon.
Parameters:
VARIABLE - Partial name of global variable owned by this addon [Any].
Example:
(begin example)
GVARMAIN(frog) = 12;
// In SPON_FrogDancing component, equivalent to SPON_frog = 12
(end)
Author:
Sickboy
------------------------------------------- */
#define GVARMAIN(var1) GVARMAINS(PREFIX,var1)
#define QGVARMAIN(var1) QUOTE(GVARMAIN(var1))
#define QQGVARMAIN(var1) QUOTE(QGVARMAIN(var1))
// TODO: What's this?
#define SETTINGS DOUBLES(PREFIX,settings)
#define CREATELOGIC CREATELOGICS(PREFIX,COMPONENT)
#define CREATELOGICGLOBAL CREATELOGICGLOBALS(PREFIX,COMPONENT)
#define CREATELOGICGLOBALTEST CREATELOGICGLOBALTESTS(PREFIX,COMPONENT)
#define CREATELOGICLOCAL CREATELOGICLOCALS(PREFIX,COMPONENT)
#define CREATELOGICMAIN CREATELOGICS(PREFIX,MAINLOGIC)
#define GETVAR(var1) GETVARS(PREFIX,COMPONENT,var1)
#define SETVAR SETVARS(PREFIX,COMPONENT)
#define SETVARMAIN SETVARMAINS(PREFIX)
#define IFCOUNT(var1,var2,var3) if (count var1 > var2) then { var3 = var1 select var2 };
/* -------------------------------------------
Macro: PREP()
Description:
Defines a function.
Full file path:
'\MAINPREFIX\PREFIX\SUBPREFIX\COMPONENT\fnc_<FNC>.sqf'
Resulting function name:
'PREFIX_COMPONENT_<FNC>'
The PREP macro should be placed in a script run by a XEH preStart and XEH preInit event.
The PREP macro allows for CBA function caching, which drastically speeds up load times.
Beware though that function caching is enabled by default and as such to disable it, you need to
#define DISABLE_COMPILE_CACHE above your #include "script_components.hpp" include!
The function will be defined in ui and mission namespace. It can not be overwritten without
a mission restart.
Parameters:
FUNCTION NAME - Name of the function, unquoted <STRING>
Examples:
(begin example)
PREP(banana);
call FUNC(banana);
(end)
Author:
dixon13
------------------------------------------- */
//#define PREP(var1) PREP_SYS(PREFIX,COMPONENT_F,var1)
#ifdef DISABLE_COMPILE_CACHE
#define PREP(var1) TRIPLES(ADDON,fnc,var1) = compile preProcessFileLineNumbers 'PATHTO_SYS(PREFIX,COMPONENT_F,DOUBLES(fnc,var1))'
#define PREPMAIN(var1) TRIPLES(PREFIX,fnc,var1) = compile preProcessFileLineNumbers 'PATHTO_SYS(PREFIX,COMPONENT_F,DOUBLES(fnc,var1))'
#else
#define PREP(var1) ['PATHTO_SYS(PREFIX,COMPONENT_F,DOUBLES(fnc,var1))', 'TRIPLES(ADDON,fnc,var1)'] call SLX_XEH_COMPILE_NEW
#define PREPMAIN(var1) ['PATHTO_SYS(PREFIX,COMPONENT_F,DOUBLES(fnc,var1))', 'TRIPLES(PREFIX,fnc,var1)'] call SLX_XEH_COMPILE_NEW
#endif
/* -------------------------------------------
Macro: PATHTO_FNC()
Description:
Defines a function inside CfgFunctions.
Full file path in addons:
'\MAINPREFIX\PREFIX\SUBPREFIX\COMPONENT\fnc_<FNC>.sqf'
Define 'RECOMPILE' to enable recompiling.
Define 'SKIP_FUNCTION_HEADER' to skip adding function header.
Parameters:
FUNCTION NAME - Name of the function, unquoted <STRING>
Examples:
(begin example)
// file name: fnc_addPerFrameHandler.sqf
class CfgFunctions {
class CBA {
class Misc {
PATHTO_FNC(addPerFrameHandler);
};
};
};
// -> CBA_fnc_addPerFrameHandler
(end)
Author:
dixon13, commy2
------------------------------------------- */
#ifdef RECOMPILE
#undef RECOMPILE
#define RECOMPILE recompile = 1
#else
#define RECOMPILE recompile = 0
#endif
// Set function header type: -1 - no header; 0 - default header; 1 - system header.
#ifdef SKIP_FUNCTION_HEADER
#define CFGFUNCTION_HEADER headerType = -1
#else
#define CFGFUNCTION_HEADER headerType = 0
#endif
#define PATHTO_FNC(func) class func {\
file = QPATHTOF(DOUBLES(fnc,func).sqf);\
CFGFUNCTION_HEADER;\
RECOMPILE;\
}
#define FUNC(var1) TRIPLES(ADDON,fnc,var1)
#define FUNCMAIN(var1) TRIPLES(PREFIX,fnc,var1)
#define FUNC_INNER(var1,var2) TRIPLES(DOUBLES(PREFIX,var1),fnc,var2)
#define EFUNC(var1,var2) FUNC_INNER(var1,var2)
#define QFUNC(var1) QUOTE(FUNC(var1))
#define QFUNCMAIN(var1) QUOTE(FUNCMAIN(var1))
#define QFUNC_INNER(var1,var2) QUOTE(FUNC_INNER(var1,var2))
#define QEFUNC(var1,var2) QUOTE(EFUNC(var1,var2))
#define QQFUNC(var1) QUOTE(QFUNC(var1))
#define QQFUNCMAIN(var1) QUOTE(QFUNCMAIN(var1))
#define QQFUNC_INNER(var1,var2) QUOTE(QFUNC_INNER(var1,var2))
#define QQEFUNC(var1,var2) QUOTE(QEFUNC(var1,var2))
#ifndef PRELOAD_ADDONS
#define PRELOAD_ADDONS class CfgAddons \
{ \
class PreloadAddons \
{ \
class ADDON \
{ \
list[]={ QUOTE(ADDON) }; \
}; \
}; \
}
#endif
/* -------------------------------------------
Macros: ARG_#()
Select from list of array arguments
Parameters:
VARIABLE(1-8) - elements for the list
Author:
Rommel
------------------------------------------- */
#define ARG_1(A,B) ((A) select (B))
#define ARG_2(A,B,C) (ARG_1(ARG_1(A,B),C))
#define ARG_3(A,B,C,D) (ARG_1(ARG_2(A,B,C),D))
#define ARG_4(A,B,C,D,E) (ARG_1(ARG_3(A,B,C,D),E))
#define ARG_5(A,B,C,D,E,F) (ARG_1(ARG_4(A,B,C,D,E),F))
#define ARG_6(A,B,C,D,E,F,G) (ARG_1(ARG_5(A,B,C,D,E,F),G))
#define ARG_7(A,B,C,D,E,F,G,H) (ARG_1(ARG_6(A,B,C,D,E,E,F,G),H))
#define ARG_8(A,B,C,D,E,F,G,H,I) (ARG_1(ARG_7(A,B,C,D,E,E,F,G,H),I))
/* -------------------------------------------
Macros: ARR_#()
Create list from arguments. Useful for working around , in macro parameters.
1-8 arguments possible.
Parameters:
VARIABLE(1-8) - elements for the list
Author:
Nou
------------------------------------------- */
#define ARR_1(ARG1) ARG1
#define ARR_2(ARG1,ARG2) ARG1, ARG2
#define ARR_3(ARG1,ARG2,ARG3) ARG1, ARG2, ARG3
#define ARR_4(ARG1,ARG2,ARG3,ARG4) ARG1, ARG2, ARG3, ARG4
#define ARR_5(ARG1,ARG2,ARG3,ARG4,ARG5) ARG1, ARG2, ARG3, ARG4, ARG5
#define ARR_6(ARG1,ARG2,ARG3,ARG4,ARG5,ARG6) ARG1, ARG2, ARG3, ARG4, ARG5, ARG6
#define ARR_7(ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7) ARG1, ARG2, ARG3, ARG4, ARG5, ARG6, ARG7
#define ARR_8(ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7,ARG8) ARG1, ARG2, ARG3, ARG4, ARG5, ARG6, ARG7, ARG8
/* -------------------------------------------
Macros: FORMAT_#(STR, ARG1)
Format - Useful for working around , in macro parameters.
1-8 arguments possible.
Parameters:
STRING - string used by format
VARIABLE(1-8) - elements for usage in format
Author:
Nou & Sickboy
------------------------------------------- */
#define FORMAT_1(STR,ARG1) format[STR, ARG1]
#define FORMAT_2(STR,ARG1,ARG2) format[STR, ARG1, ARG2]
#define FORMAT_3(STR,ARG1,ARG2,ARG3) format[STR, ARG1, ARG2, ARG3]
#define FORMAT_4(STR,ARG1,ARG2,ARG3,ARG4) format[STR, ARG1, ARG2, ARG3, ARG4]
#define FORMAT_5(STR,ARG1,ARG2,ARG3,ARG4,ARG5) format[STR, ARG1, ARG2, ARG3, ARG4, ARG5]
#define FORMAT_6(STR,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6) format[STR, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6]
#define FORMAT_7(STR,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7) format[STR, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6, ARG7]
#define FORMAT_8(STR,ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7,ARG8) format[STR, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6, ARG7, ARG8]
// CONTROL(46) 12
#define DISPLAY(A) (findDisplay A)
#define CONTROL(A) DISPLAY(A) displayCtrl
/* -------------------------------------------
Macros: IS_x()
Checking the data types of variables.
IS_ARRAY() - Array
IS_BOOL() - Boolean
IS_BOOLEAN() - UI display handle(synonym for <IS_BOOL()>)
IS_CODE() - Code block (i.e a compiled function)
IS_CONFIG() - Configuration
IS_CONTROL() - UI control handle.
IS_DISPLAY() - UI display handle.
IS_FUNCTION() - A compiled function (synonym for <IS_CODE()>)
IS_GROUP() - Group.
IS_INTEGER() - Is a number a whole number?
IS_LOCATION() - World location.
IS_NUMBER() - A floating point number (synonym for <IS_SCALAR()>)
IS_OBJECT() - World object.
IS_SCALAR() - Floating point number.
IS_SCRIPT() - A script handle (as returned by execVM and spawn commands).
IS_SIDE() - Game side.
IS_STRING() - World object.
IS_TEXT() - Structured text.
Parameters:
VARIABLE - Variable to check if it is of a particular type [Any, not nil]
Author:
Spooner
------------------------------------------- */
#define IS_META_SYS(VAR,TYPE) (if (isNil {VAR}) then {false} else {(VAR) isEqualType TYPE})
#define IS_ARRAY(VAR) IS_META_SYS(VAR,[])
#define IS_BOOL(VAR) IS_META_SYS(VAR,false)
#define IS_CODE(VAR) IS_META_SYS(VAR,{})
#define IS_CONFIG(VAR) IS_META_SYS(VAR,configNull)
#define IS_CONTROL(VAR) IS_META_SYS(VAR,controlNull)
#define IS_DISPLAY(VAR) IS_META_SYS(VAR,displayNull)
#define IS_GROUP(VAR) IS_META_SYS(VAR,grpNull)
#define IS_OBJECT(VAR) IS_META_SYS(VAR,objNull)
#define IS_SCALAR(VAR) IS_META_SYS(VAR,0)
#define IS_SCRIPT(VAR) IS_META_SYS(VAR,scriptNull)
#define IS_SIDE(VAR) IS_META_SYS(VAR,west)
#define IS_STRING(VAR) IS_META_SYS(VAR,"STRING")
#define IS_TEXT(VAR) IS_META_SYS(VAR,text "")
#define IS_LOCATION(VAR) IS_META_SYS(VAR,locationNull)
#define IS_BOOLEAN(VAR) IS_BOOL(VAR)
#define IS_FUNCTION(VAR) IS_CODE(VAR)
#define IS_INTEGER(VAR) (if (IS_SCALAR(VAR)) then {floor (VAR) == (VAR)} else {false})
#define IS_NUMBER(VAR) IS_SCALAR(VAR)
#define FLOAT_TO_STRING(num) (if (_this == 0) then {"0"} else {str parseNumber (str (_this % _this) + str floor abs _this) + "." + (str (abs _this - floor abs _this) select [2]) + "0"})
/* -------------------------------------------
Macro: SCRIPT()
Sets name of script (relies on PREFIX and COMPONENT values being #defined).
Define 'SKIP_SCRIPT_NAME' to skip adding scriptName.
Parameters:
NAME - Name of script [Indentifier]
Example:
(begin example)
SCRIPT(eradicateMuppets);
(end)
Author:
Spooner
------------------------------------------- */
#ifndef SKIP_SCRIPT_NAME
#define SCRIPT(NAME) scriptName 'PREFIX\COMPONENT\NAME'
#else
#define SCRIPT(NAME) /* nope */
#endif
/* -------------------------------------------
Macros: EXPLODE_n()
DEPRECATED - Use param/params commands added in Arma 3 1.48
Splitting an ARRAY into a number of variables (A, B, C, etc).
Note that this NOT does make the created variables private.
_PVT variants do.
EXPLODE_1(ARRAY,A,B) - Split a 1-element array into separate variable.
EXPLODE_2(ARRAY,A,B) - Split a 2-element array into separate variables.
EXPLODE_3(ARRAY,A,B,C) - Split a 3-element array into separate variables.
EXPLODE_4(ARRAY,A,B,C,D) - Split a 4-element array into separate variables.
EXPLODE_5(ARRAY,A,B,C,D,E) - Split a 5-element array into separate variables.
EXPLODE_6(ARRAY,A,B,C,D,E,F) - Split a 6-element array into separate variables.
EXPLODE_7(ARRAY,A,B,C,D,E,F,G) - Split a 7-element array into separate variables.
EXPLODE_8(ARRAY,A,B,C,D,E,F,G,H) - Split a 8-element array into separate variables.
EXPLODE_9(ARRAY,A,B,C,D,E,F,G,H,I) - Split a 9-element array into separate variables.
Parameters:
ARRAY - Array to read from [Array]
A..H - Names of variables to set from array [Identifier]
Example:
(begin example)
_array = ["fred", 156.8, 120.9];
EXPLODE_3(_array,_name,_height,_weight);
(end)
Author:
Spooner
------------------------------------------- */
#define EXPLODE_1_SYS(ARRAY,A) A = ARRAY param [0]
#define EXPLODE_1(ARRAY,A) EXPLODE_1_SYS(ARRAY,A); TRACE_1("EXPLODE_1, " + QUOTE(ARRAY),A)
#define EXPLODE_1_PVT(ARRAY,A) ARRAY params [#A]; TRACE_1("EXPLODE_1, " + QUOTE(ARRAY),A)
#define EXPLODE_2_SYS(ARRAY,A,B) EXPLODE_1_SYS(ARRAY,A); B = ARRAY param [1]
#define EXPLODE_2(ARRAY,A,B) EXPLODE_2_SYS(ARRAY,A,B); TRACE_2("EXPLODE_2, " + QUOTE(ARRAY),A,B)
#define EXPLODE_2_PVT(ARRAY,A,B) ARRAY params [#A,#B]; TRACE_2("EXPLODE_2, " + QUOTE(ARRAY),A,B)
#define EXPLODE_3_SYS(ARRAY,A,B,C) EXPLODE_2_SYS(ARRAY,A,B); C = ARRAY param [2]
#define EXPLODE_3(ARRAY,A,B,C) EXPLODE_3_SYS(ARRAY,A,B,C); TRACE_3("EXPLODE_3, " + QUOTE(ARRAY),A,B,C)
#define EXPLODE_3_PVT(ARRAY,A,B,C) ARRAY params [#A,#B,#C]; TRACE_3("EXPLODE_3, " + QUOTE(ARRAY),A,B,C)
#define EXPLODE_4_SYS(ARRAY,A,B,C,D) EXPLODE_3_SYS(ARRAY,A,B,C); D = ARRAY param [3]
#define EXPLODE_4(ARRAY,A,B,C,D) EXPLODE_4_SYS(ARRAY,A,B,C,D); TRACE_4("EXPLODE_4, " + QUOTE(ARRAY),A,B,C,D)
#define EXPLODE_4_PVT(ARRAY,A,B,C,D) ARRAY params [#A,#B,#C,#D]; TRACE_4("EXPLODE_4, " + QUOTE(ARRAY),A,B,C,D)
#define EXPLODE_5_SYS(ARRAY,A,B,C,D,E) EXPLODE_4_SYS(ARRAY,A,B,C,D); E = ARRAY param [4]
#define EXPLODE_5(ARRAY,A,B,C,D,E) EXPLODE_5_SYS(ARRAY,A,B,C,D,E); TRACE_5("EXPLODE_5, " + QUOTE(ARRAY),A,B,C,D,E)
#define EXPLODE_5_PVT(ARRAY,A,B,C,D,E) ARRAY params [#A,#B,#C,#D,#E]; TRACE_5("EXPLODE_5, " + QUOTE(ARRAY),A,B,C,D,E)
#define EXPLODE_6_SYS(ARRAY,A,B,C,D,E,F) EXPLODE_5_SYS(ARRAY,A,B,C,D,E); F = ARRAY param [5]
#define EXPLODE_6(ARRAY,A,B,C,D,E,F) EXPLODE_6_SYS(ARRAY,A,B,C,D,E,F); TRACE_6("EXPLODE_6, " + QUOTE(ARRAY),A,B,C,D,E,F)
#define EXPLODE_6_PVT(ARRAY,A,B,C,D,E,F) ARRAY params [#A,#B,#C,#D,#E,#F]; TRACE_6("EXPLODE_6, " + QUOTE(ARRAY),A,B,C,D,E,F)
#define EXPLODE_7_SYS(ARRAY,A,B,C,D,E,F,G) EXPLODE_6_SYS(ARRAY,A,B,C,D,E,F); G = ARRAY param [6]
#define EXPLODE_7(ARRAY,A,B,C,D,E,F,G) EXPLODE_7_SYS(ARRAY,A,B,C,D,E,F,G); TRACE_7("EXPLODE_7, " + QUOTE(ARRAY),A,B,C,D,E,F,G)
#define EXPLODE_7_PVT(ARRAY,A,B,C,D,E,F,G) ARRAY params [#A,#B,#C,#D,#E,#F,#G]; TRACE_7("EXPLODE_7, " + QUOTE(ARRAY),A,B,C,D,E,F,G)
#define EXPLODE_8_SYS(ARRAY,A,B,C,D,E,F,G,H) EXPLODE_7_SYS(ARRAY,A,B,C,D,E,F,G); H = ARRAY param [7]
#define EXPLODE_8(ARRAY,A,B,C,D,E,F,G,H) EXPLODE_8_SYS(ARRAY,A,B,C,D,E,F,G,H); TRACE_8("EXPLODE_8, " + QUOTE(ARRAY),A,B,C,D,E,F,G,H)
#define EXPLODE_8_PVT(ARRAY,A,B,C,D,E,F,G,H) ARRAY params [#A,#B,#C,#D,#E,#F,#G,#H]; TRACE_8("EXPLODE_8, " + QUOTE(ARRAY),A,B,C,D,E,F,G,H)
#define EXPLODE_9_SYS(ARRAY,A,B,C,D,E,F,G,H,I) EXPLODE_8_SYS(ARRAY,A,B,C,D,E,F,G,H); I = ARRAY param [8]
#define EXPLODE_9(ARRAY,A,B,C,D,E,F,G,H,I) EXPLODE_9_SYS(ARRAY,A,B,C,D,E,F,G,H,I); TRACE_9("EXPLODE_9, " + QUOTE(ARRAY),A,B,C,D,E,F,G,H,I)
#define EXPLODE_9_PVT(ARRAY,A,B,C,D,E,F,G,H,I) ARRAY params [#A,#B,#C,#D,#E,#F,#G,#H,#I]; TRACE_9("EXPLODE_9, " + QUOTE(ARRAY),A,B,C,D,E,F,G,H,I)
/* -------------------------------------------
Macro: xSTRING()
Get full string identifier from a stringtable owned by this component.
Parameters:
VARIABLE - Partial name of global variable owned by this component [Any].
Example:
ADDON is CBA_Balls.
(begin example)
// Localized String (localize command must still be used with it)
LSTRING(Example); // STR_CBA_Balls_Example;
// Config String (note the $)
CSTRING(Example); // $STR_CBA_Balls_Example;
(end)
Author:
Jonpas
------------------------------------------- */
#ifndef STRING_MACROS_GUARD
#define STRING_MACROS_GUARD
#define LSTRING(var1) QUOTE(TRIPLES(STR,ADDON,var1))
#define ELSTRING(var1,var2) QUOTE(TRIPLES(STR,DOUBLES(PREFIX,var1),var2))
#define CSTRING(var1) QUOTE(TRIPLES($STR,ADDON,var1))
#define ECSTRING(var1,var2) QUOTE(TRIPLES($STR,DOUBLES(PREFIX,var1),var2))
#define LLSTRING(var1) localize QUOTE(TRIPLES(STR,ADDON,var1))
#define LELSTRING(var1,var2) localize QUOTE(TRIPLES(STR,DOUBLES(PREFIX,var1),var2))
#endif
/* -------------------------------------------
Group: Managing Function Parameters
------------------------------------------- */
/* -------------------------------------------
Macros: PARAMS_n()
DEPRECATED - Use param/params commands added in Arma 3 1.48
Setting variables based on parameters passed to a function.
Each parameter is defines as private and set to the appropriate value from _this.
PARAMS_1(A) - Get 1 parameter from the _this array (or _this if it's not an array).
PARAMS_2(A,B) - Get 2 parameters from the _this array.
PARAMS_3(A,B,C) - Get 3 parameters from the _this array.
PARAMS_4(A,B,C,D) - Get 4 parameters from the _this array.
PARAMS_5(A,B,C,D,E) - Get 5 parameters from the _this array.
PARAMS_6(A,B,C,D,E,F) - Get 6 parameters from the _this array.
PARAMS_7(A,B,C,D,E,F,G) - Get 7 parameters from the _this array.
PARAMS_8(A,B,C,D,E,F,G,H) - Get 8 parameters from the _this array.
Parameters:
A..H - Name of variable to read from _this [Identifier]
Example:
A function called like this:
(begin example)
[_name,_address,_telephone] call recordPersonalDetails;
(end)
expects 3 parameters and those variables could be initialised at the start of the function definition with:
(begin example)
recordPersonalDetails = {
PARAMS_3(_name,_address,_telephone);
// Rest of function follows...
};
(end)
Author:
Spooner
------------------------------------------- */
#define PARAMS_1(A) EXPLODE_1_PVT(_this,A)
#define PARAMS_2(A,B) EXPLODE_2_PVT(_this,A,B)
#define PARAMS_3(A,B,C) EXPLODE_3_PVT(_this,A,B,C)
#define PARAMS_4(A,B,C,D) EXPLODE_4_PVT(_this,A,B,C,D)
#define PARAMS_5(A,B,C,D,E) EXPLODE_5_PVT(_this,A,B,C,D,E)
#define PARAMS_6(A,B,C,D,E,F) EXPLODE_6_PVT(_this,A,B,C,D,E,F)
#define PARAMS_7(A,B,C,D,E,F,G) EXPLODE_7_PVT(_this,A,B,C,D,E,F,G)
#define PARAMS_8(A,B,C,D,E,F,G,H) EXPLODE_8_PVT(_this,A,B,C,D,E,F,G,H)
#define PARAMS_9(A,B,C,D,E,F,G,H,I) EXPLODE_9_PVT(_this,A,B,C,D,E,F,G,H,I)
/* -------------------------------------------
Macro: DEFAULT_PARAM()
DEPRECATED - Use param/params commands added in Arma 3 1.48
Getting a default function parameter. This may be used together with <PARAMS_n()> to have a mix of required and
optional parameters.
Parameters:
INDEX - Index of parameter in _this [Integer, 0+]
NAME - Name of the variable to set [Identifier]
DEF_VALUE - Default value to use in case the array is too short or the value at INDEX is nil [Any]
Example:
A function called with optional parameters:
(begin example)
[_name] call myFunction;
[_name, _numberOfLegs] call myFunction;
[_name, _numberOfLegs, _hasAHead] call myFunction;
(end)
1 required parameter and 2 optional parameters. Those variables could be initialised at the start of the function
definition with:
(begin example)
myFunction = {
PARAMS_1(_name);
DEFAULT_PARAM(1,_numberOfLegs,2);
DEFAULT_PARAM(2,_hasAHead,true);
// Rest of function follows...
};
(end)
Author:
Spooner
------------------------------------------- */
#define DEFAULT_PARAM(INDEX,NAME,DEF_VALUE) \
private [#NAME,"_this"]; \
ISNILS(_this,[]); \
NAME = _this param [INDEX, DEF_VALUE]; \
TRACE_3("DEFAULT_PARAM",INDEX,NAME,DEF_VALUE)
/* -------------------------------------------
Macro: KEY_PARAM()
Get value from key in _this list, return default when key is not included in list.
Parameters:
KEY - Key name [String]
NAME - Name of the variable to set [Identifier]
DEF_VALUE - Default value to use in case key not found [ANY]
Example:
Author:
Muzzleflash
------------------------------------------- */
#define KEY_PARAM(KEY,NAME,DEF_VALUE) \
private #NAME; \
NAME = [toLower KEY, toUpper KEY, DEF_VALUE, RETNIL(_this)] call CBA_fnc_getArg; \
TRACE_3("KEY_PARAM",KEY,NAME,DEF_VALUE)
/* -------------------------------------------
Group: Assertions
------------------------------------------- */
#define ASSERTION_ERROR(MESSAGE) ERROR_WITH_TITLE("Assertion failed!",MESSAGE)
/* -------------------------------------------
Macro: ASSERT_TRUE()
Asserts that a CONDITION is true. When an assertion fails, an error is raised with the given MESSAGE.
Parameters:
CONDITION - Condition to assert as true [Boolean]
MESSSAGE - Message to display if (A OPERATOR B) is false [String]
Example:
(begin example)
ASSERT_TRUE(_frogIsDead,"The frog is alive");
(end)
Author:
Spooner
------------------------------------------- */
#define ASSERT_TRUE(CONDITION,MESSAGE) \
if (not (CONDITION)) then \
{ \
ASSERTION_ERROR('Assertion (CONDITION) failed!\n\n' + (MESSAGE)); \
}
/* -------------------------------------------
Macro: ASSERT_FALSE()
Asserts that a CONDITION is false. When an assertion fails, an error is raised with the given MESSAGE.
Parameters:
CONDITION - Condition to assert as false [Boolean]
MESSSAGE - Message to display if (A OPERATOR B) is true [String]
Example:
(begin example)
ASSERT_FALSE(_frogIsDead,"The frog died");
(end)
Author:
Spooner
------------------------------------------- */
#define ASSERT_FALSE(CONDITION,MESSAGE) \
if (CONDITION) then \
{ \
ASSERTION_ERROR('Assertion (not (CONDITION)) failed!\n\n' + (MESSAGE)) \
}
/* -------------------------------------------
Macro: ASSERT_OP()
Asserts that (A OPERATOR B) is true. When an assertion fails, an error is raised with the given MESSAGE.
Parameters:
A - First value [Any]
OPERATOR - Binary operator to use [Operator]
B - Second value [Any]
MESSSAGE - Message to display if (A OPERATOR B) is false. [String]
Example:
(begin example)
ASSERT_OP(_fish,>,5,"Too few fish!");
(end)
Author:
Spooner
------------------------------------------- */
#define ASSERT_OP(A,OPERATOR,B,MESSAGE) \
if (not ((A) OPERATOR (B))) then \
{ \
ASSERTION_ERROR('Assertion (A OPERATOR B) failed!\n' + 'A: ' + (str (A)) + '\n' + 'B: ' + (str (B)) + "\n\n" + (MESSAGE)); \
}
/* -------------------------------------------
Macro: ASSERT_DEFINED()
Asserts that a VARIABLE is defined. When an assertion fails, an error is raised with the given MESSAGE..
Parameters:
VARIABLE - Variable to test if defined [String or Function].
MESSAGE - Message to display if variable is undefined [String].
Examples:
(begin example)
ASSERT_DEFINED("_anUndefinedVar","Too few fish!");
ASSERT_DEFINED({ obj getVariable "anUndefinedVar" },"Too many fish!");
(end)
Author:
Spooner
------------------------------------------- */
#define ASSERT_DEFINED(VARIABLE,MESSAGE) \
if (isNil VARIABLE) then \
{ \
ASSERTION_ERROR('Assertion (VARIABLE is defined) failed!\n\n' + (MESSAGE)); \
}
/* -------------------------------------------
Group: Unit tests
------------------------------------------- */
#define TEST_SUCCESS(MESSAGE) MESSAGE_WITH_TITLE("Test OK",MESSAGE)
#define TEST_FAIL(MESSAGE) ERROR_WITH_TITLE("Test FAIL",MESSAGE)
/* -------------------------------------------
Macro: TEST_TRUE()
Tests that a CONDITION is true.
If the condition is not true, an error is raised with the given MESSAGE.
Parameters:
CONDITION - Condition to assert as true [Boolean]
MESSSAGE - Message to display if (A OPERATOR B) is false [String]
Example:
(begin example)
TEST_TRUE(_frogIsDead,"The frog is alive");
(end)
Author:
Killswitch
------------------------------------------- */
#define TEST_TRUE(CONDITION, MESSAGE) \
if (CONDITION) then \
{ \
TEST_SUCCESS('(CONDITION)'); \
} \
else \
{ \
TEST_FAIL('(CONDITION) ' + (MESSAGE)); \
}
/* -------------------------------------------
Macro: TEST_FALSE()
Tests that a CONDITION is false.
If the condition is not false, an error is raised with the given MESSAGE.
Parameters:
CONDITION - Condition to test as false [Boolean]
MESSSAGE - Message to display if (A OPERATOR B) is true [String]
Example:
(begin example)
TEST_FALSE(_frogIsDead,"The frog died");
(end)
Author:
Killswitch
------------------------------------------- */
#define TEST_FALSE(CONDITION, MESSAGE) \
if (not (CONDITION)) then \
{ \
TEST_SUCCESS('(not (CONDITION))'); \
} \
else \
{ \
TEST_FAIL('(not (CONDITION)) ' + (MESSAGE)); \
}
/* -------------------------------------------
Macro: TEST_OP()
Tests that (A OPERATOR B) is true.
If the test fails, an error is raised with the given MESSAGE.
Parameters:
A - First value [Any]
OPERATOR - Binary operator to use [Operator]
B - Second value [Any]
MESSSAGE - Message to display if (A OPERATOR B) is false. [String]
Example:
(begin example)
TEST_OP(_fish,>,5,"Too few fish!");
(end)
Author:
Killswitch
------------------------------------------- */
#define TEST_OP(A,OPERATOR,B,MESSAGE) \
if ((A) OPERATOR (B)) then \
{ \
TEST_SUCCESS('(A OPERATOR B)') \
} \
else \
{ \
TEST_FAIL('(A OPERATOR B)') \
};
/* -------------------------------------------
Macro: TEST_DEFINED_AND_OP()
Tests that A and B are defined and (A OPERATOR B) is true.
If the test fails, an error is raised with the given MESSAGE.
Parameters:
A - First value [Any]
OPERATOR - Binary operator to use [Operator]
B - Second value [Any]
MESSSAGE - Message to display [String]
Example:
(begin example)
TEST_OP(_fish,>,5,"Too few fish!");
(end)
Author:
Killswitch, PabstMirror
------------------------------------------- */
#define TEST_DEFINED_AND_OP(A,OPERATOR,B,MESSAGE) \
if (isNil #A) then { \
TEST_FAIL('(A is not defined) ' + (MESSAGE)); \
} else { \
if (isNil #B) then { \
TEST_FAIL('(B is not defined) ' + (MESSAGE)); \
} else { \
if ((A) OPERATOR (B)) then { \
TEST_SUCCESS('(A OPERATOR B) ' + (MESSAGE)) \
} else { \
TEST_FAIL('(A OPERATOR B) ' + (MESSAGE)) \
}; }; };
/* -------------------------------------------
Macro: TEST_DEFINED()
Tests that a VARIABLE is defined.
Parameters:
VARIABLE - Variable to test if defined [String or Function].
MESSAGE - Message to display if variable is undefined [String].
Examples:
(begin example)
TEST_DEFINED("_anUndefinedVar","Too few fish!");
TEST_DEFINED({ obj getVariable "anUndefinedVar" },"Too many fish!");
(end)
Author:
Killswitch
------------------------------------------- */
#define TEST_DEFINED(VARIABLE,MESSAGE) \
if (not isNil VARIABLE) then \
{ \
TEST_SUCCESS('(' + VARIABLE + ' is defined)'); \
} \
else \
{ \
TEST_FAIL('(' + VARIABLE + ' is not defined)' + (MESSAGE)); \
}
/* -------------------------------------------
Group: Managing Deprecation
------------------------------------------- */
/* -------------------------------------------
Macro: DEPRECATE_SYS()
Allow deprecation of a function that has been renamed.
Replaces an old OLD_FUNCTION (which will have PREFIX_ prepended) with a NEW_FUNCTION
(PREFIX_ prepended) with the intention that the old function will be disabled in the future.
Shows a warning in RPT each time the deprecated function is used, but runs the new function.
Parameters:
OLD_FUNCTION - Full name of old function [Identifier for function that does not exist any more]
NEW_FUNCTION - Full name of new function [Function]
Example:
(begin example)
// After renaming CBA_fnc_frog as CBA_fnc_fish
DEPRECATE_SYS(CBA_fnc_frog,CBA_fnc_fish);
(end)
Author:
Sickboy
------------------------------------------- */
#define DEPRECATE_SYS(OLD_FUNCTION,NEW_FUNCTION) \
OLD_FUNCTION = { \
WARNING('Deprecated function used: OLD_FUNCTION (new: NEW_FUNCTION) in ADDON'); \
if (isNil "_this") then { call NEW_FUNCTION } else { _this call NEW_FUNCTION }; \
}
/* -------------------------------------------
Macro: DEPRECATE()
Allow deprecation of a function, in the current component, that has been renamed.
Replaces an OLD_FUNCTION (which will have PREFIX_ prepended) with a NEW_FUNCTION
(PREFIX_ prepended) with the intention that the old function will be disabled in the future.
Shows a warning in RPT each time the deprecated function is used, but runs the new function.
Parameters:
OLD_FUNCTION - Name of old function, assuming PREFIX [Identifier for function that does not exist any more]
NEW_FUNCTION - Name of new function, assuming PREFIX [Function]
Example:
(begin example)
// After renaming CBA_fnc_frog as CBA_fnc_fish
DEPRECATE(fnc_frog,fnc_fish);
(end)
Author:
Sickboy
------------------------------------------- */
#define DEPRECATE(OLD_FUNCTION,NEW_FUNCTION) \
DEPRECATE_SYS(DOUBLES(PREFIX,OLD_FUNCTION),DOUBLES(PREFIX,NEW_FUNCTION))
/* -------------------------------------------
Macro: OBSOLETE_SYS()
Replace a function that has become obsolete.
Replace an obsolete OLD_FUNCTION with a simple COMMAND_FUNCTION, with the intention that anyone
using the function should replace it with the simple command, since the function will be disabled in the future.
Shows a warning in RPT each time the deprecated function is used, and runs the command function.
Parameters:
OLD_FUNCTION - Full name of old function [Identifier for function that does not exist any more]
COMMAND_CODE - Code to replace the old function [Function]
Example:
(begin example)
// In Arma2, currentWeapon command made the CBA_fMyWeapon function obsolete:
OBSOLETE_SYS(CBA_fMyWeapon,{ currentWeapon player });
(end)
Author:
Spooner
------------------------------------------- */
#define OBSOLETE_SYS(OLD_FUNCTION,COMMAND_CODE) \
OLD_FUNCTION = { \
WARNING('Obsolete function used: (use: OLD_FUNCTION) in ADDON'); \
if (isNil "_this") then { call COMMAND_CODE } else { _this call COMMAND_CODE }; \
}
/* -------------------------------------------
Macro: OBSOLETE()
Replace a function, in the current component, that has become obsolete.
Replace an obsolete OLD_FUNCTION (which will have PREFIX_ prepended) with a simple
COMMAND_CODE, with the intention that anyone using the function should replace it with the simple
command.
Shows a warning in RPT each time the deprecated function is used.
Parameters:
OLD_FUNCTION - Name of old function, assuming PREFIX [Identifier for function that does not exist any more]
COMMAND_CODE - Code to replace the old function [Function]
Example:
(begin example)
// In Arma2, currentWeapon command made the CBA_fMyWeapon function obsolete:
OBSOLETE(fMyWeapon,{ currentWeapon player });
(end)
Author:
Spooner
------------------------------------------- */
#define OBSOLETE(OLD_FUNCTION,COMMAND_CODE) \
OBSOLETE_SYS(DOUBLES(PREFIX,OLD_FUNCTION),COMMAND_CODE)
#define BWC_CONFIG(NAME) class NAME { \
units[] = {}; \
weapons[] = {}; \
requiredVersion = REQUIRED_VERSION; \
requiredAddons[] = {}; \
version = VERSION; \
}
// XEH Specific
#define XEH_CLASS CBA_Extended_EventHandlers
#define XEH_CLASS_BASE DOUBLES(XEH_CLASS,base)
#define XEH_DISABLED class EventHandlers { class XEH_CLASS {}; }; SLX_XEH_DISABLED = 1
#define XEH_ENABLED class EventHandlers { class XEH_CLASS { EXTENDED_EVENTHANDLERS }; }; SLX_XEH_DISABLED = 0
// TODO: These are actually outdated; _Once ?
#define XEH_PRE_INIT QUOTE(call COMPILE_FILE(XEH_PreInit_Once))
#define XEH_PRE_CINIT QUOTE(call COMPILE_FILE(XEH_PreClientInit_Once))
#define XEH_PRE_SINIT QUOTE(call COMPILE_FILE(XEH_PreServerInit_Once))
#define XEH_POST_INIT QUOTE(call COMPILE_FILE(XEH_PostInit_Once))
#define XEH_POST_CINIT QUOTE(call COMPILE_FILE(XEH_PostClientInit_Once))
#define XEH_POST_SINIT QUOTE(call COMPILE_FILE(XEH_PostServerInit_Once))
/* -------------------------------------------
Macro: IS_ADMIN
Check if the local machine is an admin in the multiplayer environment.
Reports 'true' for logged and voted in admins.
Parameters:
None
Example:
(begin example)
// print "true" if player is admin
systemChat str IS_ADMIN;
(end)
Author:
commy2
------------------------------------------- */
#define IS_ADMIN_SYS(x) x##kick
#define IS_ADMIN serverCommandAvailable 'IS_ADMIN_SYS(#)'
/* -------------------------------------------
Macro: IS_ADMIN_LOGGED
Check if the local machine is a logged in admin in the multiplayer environment.
Reports 'false' if the player was voted to be the admin.
Parameters:
None
Example:
(begin example)
// print "true" if player is admin and entered in the server password
systemChat str IS_ADMIN_LOGGED;
(end)
Author:
commy2
------------------------------------------- */
#define IS_ADMIN_LOGGED_SYS(x) x##shutdown
#define IS_ADMIN_LOGGED serverCommandAvailable 'IS_ADMIN_LOGGED_SYS(#)'
/* -------------------------------------------
Macro: FILE_EXISTS
Check if a file exists
Reports "false" if the file does not exist.
Parameters:
FILE - Path to the file
Example:
(begin example)
// print "true" if file exists
systemChat str FILE_EXISTS("\A3\ui_f\data\igui\cfg\cursors\weapon_ca.paa");
(end)
Author:
commy2
------------------------------------------- */
#define FILE_EXISTS(FILE) (fileExists (FILE))
| 35.786572 | 257 | 0.640259 | [
"object",
"solid"
] |
d50d668f9b2c19a377a080fb10c08f82c4b368af | 13,000 | cc | C++ | src/systat.cc | hellojixian/node-systat | 7d1601a1d27a85e6bb6d5cd936701263c847ea21 | [
"BSD-3-Clause"
] | 1 | 2017-10-23T13:37:54.000Z | 2017-10-23T13:37:54.000Z | src/systat.cc | hellojixian/node-systat | 7d1601a1d27a85e6bb6d5cd936701263c847ea21 | [
"BSD-3-Clause"
] | null | null | null | src/systat.cc | hellojixian/node-systat | 7d1601a1d27a85e6bb6d5cd936701263c847ea21 | [
"BSD-3-Clause"
] | null | null | null | #include <node.h>
#include <stdio.h>
#include <string>
#include <vector>
#include <iostream>
#include "sensor/sensor.h"
#include "system/system.h"
namespace shadowgrid {
using v8::FunctionCallbackInfo;
using v8::Isolate;
using v8::Local;
using v8::Object;
using v8::String;
using v8::Value;
using v8::Array;
using v8::Boolean;
using v8::Number;
void checkChip(const FunctionCallbackInfo<Value>& args){
Isolate* isolate = args.GetIsolate();
bool result = Sensor::checkChip();
args.GetReturnValue().Set(Boolean::New(isolate,result));
}
void getSystemTemperatures(const FunctionCallbackInfo<Value>& args){
Isolate* isolate = args.GetIsolate();
Local<Array> results = Array::New(isolate);
//get values
std::vector<int> ret = Sensor::getSystemTemperatures();
//assign value to v8
for (std::vector<int>::iterator it = ret.begin() ; it != ret.end(); ++it){
results->Set(std::distance(ret.begin(),it), //index
v8::Number::New(isolate,*it)); //value
}
args.GetReturnValue().Set(results);
}
void getCPUTemperatures(const FunctionCallbackInfo<Value>& args){
Isolate* isolate = args.GetIsolate();
Local<Array> results = Array::New(isolate);
//get values
std::vector<int> ret = System::getCPUTemperatures();
//assign value to v8
for (std::vector<int>::iterator it = ret.begin() ; it != ret.end(); ++it){
results->Set(std::distance(ret.begin(),it), //index
v8::Number::New(isolate,*it)); //value
}
args.GetReturnValue().Set(results);
}
void getFanSpeeds(const FunctionCallbackInfo<Value>& args){
Isolate* isolate = args.GetIsolate();
Local<Array> results = Array::New(isolate);
//get values
std::vector<int> ret = Sensor::getFanSpeeds();
//assign value to v8
for (std::vector<int>::iterator it = ret.begin() ; it != ret.end(); ++it){
results->Set(std::distance(ret.begin(),it), //index
v8::Number::New(isolate,*it)); //value
}
args.GetReturnValue().Set(results);
}
void getVoltages(const FunctionCallbackInfo<Value>& args){
Isolate* isolate = args.GetIsolate();
Local<Array> results = Array::New(isolate);
//get values
std::vector<int> ret = Sensor::getVoltages();
//assign value to v8
for (std::vector<int>::iterator it = ret.begin() ; it != ret.end(); ++it){
results->Set(std::distance(ret.begin(),it), //index
v8::Number::New(isolate,*it)); //value
}
args.GetReturnValue().Set(results);
}
void getNICStat(const FunctionCallbackInfo<Value>& args){
Isolate* isolate = args.GetIsolate();
Local<Object> results = Object::New(isolate);
Local<Object> framesSentDetail = Object::New(isolate);
Local<Object> framesReceivedDetail = Object::New(isolate);
//get arugments
v8::String::Utf8Value nic(args[0]->ToString());
//call system
NICStatInfo info = System::getNICStat(*nic);
results->Set(String::NewFromUtf8(isolate,"packetsSent"),
Number::New(isolate,info.packetsSent));
results->Set(String::NewFromUtf8(isolate,"packetsReceived"),
Number::New(isolate,info.packetsReceived));
results->Set(String::NewFromUtf8(isolate,"bytesSent"),
Number::New(isolate,info.bytesSent));
results->Set(String::NewFromUtf8(isolate,"bytesReceived"),
Number::New(isolate,info.bytesReceived));
results->Set(String::NewFromUtf8(isolate,"framesSent"),
Number::New(isolate,info.framesSent));
results->Set(String::NewFromUtf8(isolate,"framesReceived"),
Number::New(isolate,info.framesReceived));
results->Set(String::NewFromUtf8(isolate,"framesSentDetail"),
framesSentDetail);
framesSentDetail->Set(String::NewFromUtf8(isolate,"frames1024_1522"),
Number::New(isolate,info.framesSentDetail.frames1024_1522));
framesSentDetail->Set(String::NewFromUtf8(isolate,"frames512_1023"),
Number::New(isolate,info.framesSentDetail.frames512_1023));
framesSentDetail->Set(String::NewFromUtf8(isolate,"frames256_511"),
Number::New(isolate,info.framesSentDetail.frames256_511));
framesSentDetail->Set(String::NewFromUtf8(isolate,"frames128_255"),
Number::New(isolate,info.framesSentDetail.frames256_511));
framesSentDetail->Set(String::NewFromUtf8(isolate,"frames65_127"),
Number::New(isolate,info.framesSentDetail.frames65_127));
framesSentDetail->Set(String::NewFromUtf8(isolate,"frames64"),
Number::New(isolate,info.framesSentDetail.frames64));
results->Set(String::NewFromUtf8(isolate,"framesReceviedDetail"),
framesReceivedDetail);
framesReceivedDetail->Set(String::NewFromUtf8(isolate,"frames1024_1522"),
Number::New(isolate,info.framesReceivedDetail.frames1024_1522));
framesReceivedDetail->Set(String::NewFromUtf8(isolate,"frames512_1023"),
Number::New(isolate,info.framesReceivedDetail.frames512_1023));
framesReceivedDetail->Set(String::NewFromUtf8(isolate,"frames256_511"),
Number::New(isolate,info.framesReceivedDetail.frames256_511));
framesReceivedDetail->Set(String::NewFromUtf8(isolate,"frames128_255"),
Number::New(isolate,info.framesReceivedDetail.frames256_511));
framesReceivedDetail->Set(String::NewFromUtf8(isolate,"frames65_127"),
Number::New(isolate,info.framesReceivedDetail.frames65_127));
framesReceivedDetail->Set(String::NewFromUtf8(isolate,"frames64"),
Number::New(isolate,info.framesReceivedDetail.frames64));
results->Set(String::NewFromUtf8(isolate,"framesSentDetail"),
framesSentDetail);
results->Set(String::NewFromUtf8(isolate,"boardcastPacketsSent"),
Number::New(isolate,info.boardcastPacketsSent));
results->Set(String::NewFromUtf8(isolate,"boardcastPacketsReceived"),
Number::New(isolate,info.boardcastPacketsReceived));
results->Set(String::NewFromUtf8(isolate,"multicastPacketsSent"),
Number::New(isolate,info.multicastPacketsSent));
results->Set(String::NewFromUtf8(isolate,"multicastPacketsReceived"),
Number::New(isolate,info.multicastPacketsReceived));
//prepare output
args.GetReturnValue().Set(results);
}
void getDiskIOStat(const FunctionCallbackInfo<Value>& args){
Isolate* isolate = args.GetIsolate();
Local<Object> result = Object::New(isolate);
//call system
std::vector<DiskIOStat> disks = System::getDiskIOStat();
for (auto it=disks.begin(); it != disks.end(); ++it) {
Local<Object> disk = Object::New(isolate);
disk->Set(String::NewFromUtf8(isolate,"readOperations"),
Number::New(isolate,it->readOperations));
disk->Set(String::NewFromUtf8(isolate,"writeOperations"),
Number::New(isolate,it->writeOperations));
disk->Set(String::NewFromUtf8(isolate,"readBytes"),
Number::New(isolate,it->readBytes));
disk->Set(String::NewFromUtf8(isolate,"writeBytes"),
Number::New(isolate,it->writeBytes));
disk->Set(String::NewFromUtf8(isolate,"readTime"),
Number::New(isolate,it->readTime));
disk->Set(String::NewFromUtf8(isolate,"writeTime"),
Number::New(isolate,it->writeTime));
disk->Set(String::NewFromUtf8(isolate,"busyTime"),
Number::New(isolate,it->busyTime));
result->Set(String::NewFromUtf8(isolate,it->name),disk);
}
args.GetReturnValue().Set(result);
}
void getDiskPartitions(const FunctionCallbackInfo<Value>& args){
Isolate* isolate = args.GetIsolate();
Local<Array> result = Array::New(isolate);
//call system
std::vector<DiskPartitionInfo> partitions = System::getDiskPartitions();
for (auto it=partitions.begin(); it != partitions.end(); ++it) {
Local<Object> partition = Object::New(isolate);
partition->Set(String::NewFromUtf8(isolate,"device"),
String::NewFromUtf8(isolate,it->device));
partition->Set(String::NewFromUtf8(isolate,"mountPoint"),
String::NewFromUtf8(isolate,it->mountPoint));
partition->Set(String::NewFromUtf8(isolate,"fileSystem"),
String::NewFromUtf8(isolate,it->fileSystem));
result->Set(std::distance(partitions.begin(),it),partition);
}
args.GetReturnValue().Set(result);
}
void getDiskUsage(const FunctionCallbackInfo<Value>& args){
Isolate* isolate = args.GetIsolate();
Local<Object> result = Object::New(isolate);
//get arugments
v8::String::Utf8Value mountPoint(args[0]->ToString());
//call system
DiskUsageInfo disk = System::getDiskUsage(*mountPoint);
Local<Object> space = Object::New(isolate);
space->Set(String::NewFromUtf8(isolate,"total"),
Number::New(isolate,disk.space.total));
space->Set(String::NewFromUtf8(isolate,"used"),
Number::New(isolate,disk.space.used));
space->Set(String::NewFromUtf8(isolate,"free"),
Number::New(isolate,disk.space.free));
space->Set(String::NewFromUtf8(isolate,"percent"),
Number::New(isolate,disk.space.percent));
result->Set(String::NewFromUtf8(isolate,"space"),space);
Local<Object> inode = Object::New(isolate);
inode->Set(String::NewFromUtf8(isolate,"total"),
Number::New(isolate,disk.inode.total));
inode->Set(String::NewFromUtf8(isolate,"used"),
Number::New(isolate,disk.inode.used));
inode->Set(String::NewFromUtf8(isolate,"free"),
Number::New(isolate,disk.inode.free));
inode->Set(String::NewFromUtf8(isolate,"percent"),
Number::New(isolate,disk.inode.percent));
result->Set(String::NewFromUtf8(isolate,"inode"),inode);
args.GetReturnValue().Set(result);
}
void getNetworkIOStat(const FunctionCallbackInfo<Value>& args){
Isolate* isolate = args.GetIsolate();
Local<Object> result = Object::New(isolate);
//call system
std::vector<NetworkIOStat> stats = System::getNetworkIOStat();
for(auto it = stats.begin(); it!= stats.end(); ++it){
Local<Object> interface = Object::New(isolate);
interface->Set(String::NewFromUtf8(isolate,"bytesReceived"),
Number::New(isolate, it->bytesReceived));
interface->Set(String::NewFromUtf8(isolate,"bytesSent"),
Number::New(isolate, it->bytesSent));
interface->Set(String::NewFromUtf8(isolate,"packetsReceived"),
Number::New(isolate, it->packetsReceived));
interface->Set(String::NewFromUtf8(isolate,"packetsSent"),
Number::New(isolate, it->packetsSent));
interface->Set(String::NewFromUtf8(isolate,"multicastPacketsReceived"),
Number::New(isolate, it->multicastPacketsReceived));
interface->Set(String::NewFromUtf8(isolate,"multicastPacketsSent"),
Number::New(isolate, it->multicastPacketsSent));
interface->Set(String::NewFromUtf8(isolate,"errorIn"),
Number::New(isolate, it->errorIn));
interface->Set(String::NewFromUtf8(isolate,"errorOut"),
Number::New(isolate, it->errorOut));
interface->Set(String::NewFromUtf8(isolate,"dropIn"),
Number::New(isolate, it->dropIn));
interface->Set(String::NewFromUtf8(isolate,"dropOut"),
Number::New(isolate, it->dropOut));
interface->Set(String::NewFromUtf8(isolate,"collisions"),
Number::New(isolate, it->collisions));
result->Set(String::NewFromUtf8(isolate, it->name), interface);
}
args.GetReturnValue().Set(result);
}
void getNetworkInterfaceStatus(const FunctionCallbackInfo<Value>& args){
Isolate* isolate = args.GetIsolate();
Local<Object> result = Object::New(isolate);
//call system
std::vector<NetworkInterfaceStatus> stats = System::getNetworkInterfaceStatus();
for(auto it = stats.begin(); it!= stats.end(); ++it){
Local<Object> interface = Object::New(isolate);
interface->Set(String::NewFromUtf8(isolate,"isUp"),
Boolean::New(isolate, it->isUp));
interface->Set(String::NewFromUtf8(isolate,"speed"),
Number::New(isolate, it->speed));
interface->Set(String::NewFromUtf8(isolate,"mtu"),
Number::New(isolate, it->mtu));
interface->Set(String::NewFromUtf8(isolate,"duplex"),
Number::New(isolate, it->duplex));
result->Set(String::NewFromUtf8(isolate, it->name), interface);
}
args.GetReturnValue().Set(result);
}
void init(Local<Object> exports) {
NODE_SET_METHOD(exports, "checkChip", checkChip);
NODE_SET_METHOD(exports, "getFanSpeeds", getFanSpeeds);
NODE_SET_METHOD(exports, "getSystemTemperatures",getSystemTemperatures);
NODE_SET_METHOD(exports, "getCPUTemperatures", getCPUTemperatures);
NODE_SET_METHOD(exports, "getVoltages", getVoltages);
NODE_SET_METHOD(exports, "getNICStat", getNICStat);
NODE_SET_METHOD(exports, "getDiskIOStat", getDiskIOStat);
NODE_SET_METHOD(exports, "getDiskPartitions", getDiskPartitions);
NODE_SET_METHOD(exports, "getDiskUsage", getDiskUsage);
NODE_SET_METHOD(exports, "getNetworkIOStat", getNetworkIOStat);
NODE_SET_METHOD(exports, "getNetworkInterfaceStatus", getNetworkInterfaceStatus);
}
NODE_MODULE(systat, init)
} | 34.759358 | 86 | 0.691231 | [
"object",
"vector"
] |
d50e2060acfe64ea09980d0b27639cd6daeb421d | 4,898 | cc | C++ | tensorflow/core/kernels/unique_op.cc | xincao79/tensorflow | 7fa0cf39f854d5fdaaa19ad6425dfed02f5fea64 | [
"Apache-2.0"
] | 13 | 2017-02-22T02:20:06.000Z | 2018-06-06T04:18:03.000Z | tensorflow/core/kernels/unique_op.cc | xincao79/tensorflow | 7fa0cf39f854d5fdaaa19ad6425dfed02f5fea64 | [
"Apache-2.0"
] | 3 | 2020-03-24T18:15:52.000Z | 2021-02-02T22:28:38.000Z | tensorflow/core/kernels/unique_op.cc | xincao79/tensorflow | 7fa0cf39f854d5fdaaa19ad6425dfed02f5fea64 | [
"Apache-2.0"
] | 3 | 2017-06-09T10:39:33.000Z | 2021-04-08T16:13:30.000Z | /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <utility>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
template <typename T>
class UniqueOp : public OpKernel {
public:
explicit UniqueOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
OP_REQUIRES(context, TensorShapeUtils::IsVector(input.shape()),
errors::InvalidArgument("unique expects a 1D vector."));
// TODO(dga): Make unique polymorphic for returning int32 and int64
// vectors to support large tensors.
OP_REQUIRES(context,
input.NumElements() <= std::numeric_limits<int32>::max(),
errors::InvalidArgument(
"unique does not support input tensors larger than ",
std::numeric_limits<int32>::max(), " elements"));
auto Tin = input.vec<T>();
const int64 N = static_cast<int64>(Tin.size());
Tensor* idx = nullptr;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{0}, 1, input.shape(), &idx));
auto idx_vec = idx->template vec<int32>();
gtl::FlatMap<T, int32> uniq(N);
for (int64 i = 0, j = 0; i < N; ++i) {
auto it = uniq.insert(std::make_pair(Tin(i), j));
idx_vec(i) = it.first->second;
if (it.second) {
++j;
}
}
int64 uniq_size = static_cast<int64>(uniq.size());
Tensor* output = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(
0, TensorShape({uniq_size}), &output));
auto output_vec = output->template vec<T>();
for (auto it : uniq) {
output_vec(it.second) = it.first;
}
if (num_outputs() > 2) {
OP_REQUIRES_OK(context, context->allocate_output(
2, TensorShape({uniq_size}), &output));
auto count_output_vec = output->template vec<int32>();
count_output_vec.setZero();
for (int64 i = 0; i < N; ++i) {
count_output_vec(idx_vec(i))++;
}
}
}
};
#define REGISTER_UNIQUE(type) \
REGISTER_KERNEL_BUILDER(Name("Unique") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<int32>("out_idx"), \
UniqueOp<type>); \
REGISTER_KERNEL_BUILDER(Name("UniqueWithCounts") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.TypeConstraint<int32>("out_idx"), \
UniqueOp<type>)
TF_CALL_REAL_NUMBER_TYPES(REGISTER_UNIQUE);
REGISTER_UNIQUE(string)
#undef REGISTER_UNIQUE
// Fake integer GPU kernels so that the use of Unique in optimizers (to
// de-duplicate sparse gradient indices) does not conflict with gradients being
// located on a GPU. These kernels run on the CPU, their inputs and outputs
// residing in host (not GPU) memory.
REGISTER_KERNEL_BUILDER(Name("Unique")
.Device(DEVICE_GPU)
.TypeConstraint<int32>("T")
.TypeConstraint<int32>("out_idx")
.HostMemory("x")
.HostMemory("y")
.HostMemory("idx"),
UniqueOp<int32>);
REGISTER_KERNEL_BUILDER(Name("Unique")
.Device(DEVICE_GPU)
.TypeConstraint<int64>("T")
.TypeConstraint<int32>("out_idx")
.HostMemory("x")
.HostMemory("y")
.HostMemory("idx"),
UniqueOp<int64>);
} // namespace tensorflow
| 41.159664 | 80 | 0.565741 | [
"shape",
"vector"
] |
d50f3cfd1876cab1fd165589c2b4c99e23c06586 | 8,688 | cpp | C++ | iree/compiler/Translation/SPIRV/EmbeddedKernels.cpp | so-man/iree | 36be189aa74d6f5bc7eb665e5a7baf3effaca12d | [
"Apache-2.0"
] | 1 | 2021-11-04T02:47:40.000Z | 2021-11-04T02:47:40.000Z | iree/compiler/Translation/SPIRV/EmbeddedKernels.cpp | so-man/iree | 36be189aa74d6f5bc7eb665e5a7baf3effaca12d | [
"Apache-2.0"
] | null | null | null | iree/compiler/Translation/SPIRV/EmbeddedKernels.cpp | so-man/iree | 36be189aa74d6f5bc7eb665e5a7baf3effaca12d | [
"Apache-2.0"
] | null | null | null | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "iree/compiler/Translation/SPIRV/EmbeddedKernels.h"
#include "iree/compiler/Translation/SPIRV/Kernels/Kernels.h"
#include "iree/schemas/spirv_executable_def_generated.h"
#include "mlir/IR/Function.h"
#include "mlir/IR/Module.h"
#include "tensorflow/compiler/mlir/xla/ir/hlo_ops.h"
namespace mlir {
namespace iree_compiler {
namespace {
// Reads the SPIR-V code for the embedded kernel with the given file name.
// If the kernel under Kernels/ is 'matmul.comp' then |kernelName| would be
// 'matmul.spv' (because it's been compiled).
std::vector<uint32_t> readEmbeddedKernelCode(std::string kernelName) {
auto *fileToc = spirv_kernels::Kernels_create();
for (int i = 0; i < spirv_kernels::Kernels_size(); ++i) {
if (std::strcmp(fileToc[i].name, kernelName.c_str()) == 0) {
std::vector<uint32_t> code;
code.resize(fileToc[i].size / 4);
std::memcpy(code.data(), fileToc[i].data, fileToc[i].size);
return code;
}
}
return {};
}
// Adds a storage buffer binding to the descriptor set layout.
void addDescriptorSetLayoutBinding(uint32_t binding,
iree::VkDescriptorSetLayoutDefT *dsl) {
auto bindingDef = std::make_unique<iree::VkDescriptorSetLayoutBindingDefT>();
bindingDef->binding = binding;
bindingDef->descriptor_count = 1;
bindingDef->descriptor_type = 7; // VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
bindingDef->stage_flags = 0x00000020; // VK_SHADER_STAGE_COMPUTE_BIT
dsl->bindings.push_back(std::move(bindingDef));
}
// Adds a specialization map entry for |constant_id| set to a 4-byte int value.
void addSpecializationMapEntry(
uint32_t constant_id, uint32_t value,
iree::VkSpecializationInfoDefT *specializationInfoDef) {
auto specValue = std::make_unique<iree::VkSpecializationMapEntryDefT>();
specValue->constant_id = constant_id;
specValue->uint32_value = value;
specializationInfoDef->map_entries.push_back(std::move(specValue));
}
LogicalResult buildReductionExecutable(IREE::ExecutableOp executableOp,
FuncOp entryFuncOp,
iree::SpirVExecutableDefT *out_def) {
auto funcType = entryFuncOp.getType();
auto arg0 = funcType.getInput(0).cast<ShapedType>();
if (!arg0.getElementType().isF32()) {
// When we do other types we'll need other shaders.
return entryFuncOp.emitOpError()
<< "Only floating point reduction is implemented";
}
auto module = executableOp.getInnerModule();
auto applyFuncAttr = entryFuncOp.getAttrOfType<SymbolRefAttr>(
"iree.executable.reduction.apply");
auto applyFuncOp = module.lookupSymbol(applyFuncAttr.getValue());
// TODO(benvanik): specialize (template on shapes/types/etc).
std::string kernelName = "reduce_untiled.spv";
llvm::Optional<uint32_t> operationId;
applyFuncOp->walk([&](Operation *op) {
if (isa<xla_hlo::AddOp>(op)) {
operationId = 0;
} else if (isa<xla_hlo::MaxOp>(op)) {
operationId = 1;
} else if (isa<xla_hlo::MinOp>(op)) {
operationId = 2;
}
});
if (!operationId.hasValue()) {
applyFuncOp->dump();
return applyFuncOp->emitOpError() << "Unsupported reduction operator";
}
out_def->tag = "__reduce__";
out_def->entry_points = {"main"};
out_def->code = readEmbeddedKernelCode(kernelName);
// arg0, arg1, ret0
auto pipelineLayoutDef = std::make_unique<iree::VkPipelineLayoutDefT>();
pipelineLayoutDef->buffer_binding_set = 0;
auto dsl = std::make_unique<iree::VkDescriptorSetLayoutDefT>();
addDescriptorSetLayoutBinding(0, dsl.get());
addDescriptorSetLayoutBinding(1, dsl.get());
addDescriptorSetLayoutBinding(2, dsl.get());
pipelineLayoutDef->descriptor_set_layouts.push_back(std::move(dsl));
out_def->pipeline_layout = std::move(pipelineLayoutDef);
// See the shader source for documentation on the values of A/B/C/R.
int64_t reductionDimension =
entryFuncOp
.getAttrOfType<IntegerAttr>("iree.executable.reduction.dimension")
.getInt();
uint32_t r = arg0.getDimSize(reductionDimension);
uint32_t a = 1;
for (int i = 0; i < reductionDimension; ++i) {
a *= arg0.getDimSize(i);
}
uint32_t b = 1;
for (int i = reductionDimension + 1; i < arg0.getRank(); ++i) {
b *= arg0.getDimSize(i);
}
uint32_t c = b;
auto specializationInfoDef =
std::make_unique<iree::VkSpecializationInfoDefT>();
addSpecializationMapEntry(/*kOperationId*/ 100, operationId.getValue(),
specializationInfoDef.get());
addSpecializationMapEntry(/*kA*/ 101, a, specializationInfoDef.get());
addSpecializationMapEntry(/*kB*/ 102, b, specializationInfoDef.get());
addSpecializationMapEntry(/*kC*/ 103, c, specializationInfoDef.get());
addSpecializationMapEntry(/*kR*/ 104, r, specializationInfoDef.get());
out_def->specialization_info = std::move(specializationInfoDef);
return success();
}
// Builds a SPIR-V executable from a well-known matmul executable.
// |out_def| will be populated with all required information for serialization.
LogicalResult buildMatMulExecutable(IREE::ExecutableOp executableOp,
FuncOp entryFuncOp, xla_hlo::DotOp dotOp,
iree::SpirVExecutableDefT *out_def) {
auto arg0 = dotOp.getOperand(0)->getType().cast<ShapedType>();
auto arg1 = dotOp.getOperand(1)->getType().cast<ShapedType>();
out_def->tag = "__matmul__";
out_def->entry_points = {"main"};
// TODO(benvanik): specialize (template on shapes/types/etc).
out_def->code = readEmbeddedKernelCode("matmul.spv");
// arg0, arg1, ret0
auto pipelineLayoutDef = std::make_unique<iree::VkPipelineLayoutDefT>();
pipelineLayoutDef->buffer_binding_set = 0;
auto dsl = std::make_unique<iree::VkDescriptorSetLayoutDefT>();
addDescriptorSetLayoutBinding(0, dsl.get());
addDescriptorSetLayoutBinding(1, dsl.get());
addDescriptorSetLayoutBinding(2, dsl.get());
pipelineLayoutDef->descriptor_set_layouts.push_back(std::move(dsl));
out_def->pipeline_layout = std::move(pipelineLayoutDef);
// Shapes of [arg0, arg1, ret0].
// arg0 = [b0, m, k]
// arg1 = [b0, k, n]
// ret0 = [b0, m, n]
// Note that we handle both batched (rank 3) and unbatched (rank 2).
uint32_t m = arg0.getRank() == 3 ? arg0.getDimSize(1) : arg0.getDimSize(0);
uint32_t k = arg0.getRank() == 3 ? arg0.getDimSize(2) : arg0.getDimSize(1);
uint32_t n = arg1.getRank() == 3 ? arg1.getDimSize(2) : arg1.getDimSize(1);
auto specializationInfoDef =
std::make_unique<iree::VkSpecializationInfoDefT>();
addSpecializationMapEntry(/*kMatrixM*/ 100, m, specializationInfoDef.get());
addSpecializationMapEntry(/*kMatrixK*/ 101, k, specializationInfoDef.get());
addSpecializationMapEntry(/*kMatrixN*/ 102, n, specializationInfoDef.get());
out_def->specialization_info = std::move(specializationInfoDef);
return success();
}
} // namespace
bool tryEmbeddedKernelRewrite(IREE::ExecutableOp executableOp,
iree::SpirVExecutableDefT *out_def) {
auto module = executableOp.getInnerModule();
for (auto funcOp : module.getOps<FuncOp>()) {
if (funcOp.getAttr("iree.executable.reduction")) {
if (failed(buildReductionExecutable(executableOp, funcOp, out_def))) {
executableOp.emitOpError() << "Failed to splat in the reduction kernel";
return false;
}
return true;
}
for (auto &block : funcOp) {
for (auto &op : block) {
if (isa<xla_hlo::ConvOp>(&op)) {
executableOp.emitOpError() << "Conv not yet implemented";
return false;
} else if (auto dotOp = dyn_cast_or_null<xla_hlo::DotOp>(&op)) {
if (failed(buildMatMulExecutable(executableOp, funcOp, dotOp,
out_def))) {
executableOp.emitOpError()
<< "Failed to splat in the matmul kernel";
return false;
}
return true;
}
}
}
}
return false;
}
} // namespace iree_compiler
} // namespace mlir
| 39.490909 | 80 | 0.685889 | [
"vector"
] |
d5118ba073d9940cfcbab353c88cefec72ad6863 | 12,578 | cc | C++ | webrtc_dsp/modules/audio_processing/gain_control_impl.cc | sdanny/libtgvoip | c4a48b3627f7d08c7be40dc63b84f8b16eaf281a | [
"Unlicense"
] | 851 | 2018-02-05T09:54:56.000Z | 2022-03-24T23:13:10.000Z | references/WebRTC/modules/audio_processing/gain_control_impl.cc | azrael11/alcinoe | 98e92421321ef5df4be876f8d818dbfdfdca6757 | [
"Apache-2.0"
] | 200 | 2018-02-06T18:52:39.000Z | 2022-03-24T19:59:14.000Z | references/WebRTC/modules/audio_processing/gain_control_impl.cc | azrael11/alcinoe | 98e92421321ef5df4be876f8d818dbfdfdca6757 | [
"Apache-2.0"
] | 197 | 2018-03-20T20:49:55.000Z | 2022-03-21T17:38:14.000Z | /*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/audio_processing/gain_control_impl.h"
#include <cstdint>
#include "absl/types/optional.h"
#include "modules/audio_processing/agc/legacy/gain_control.h"
#include "modules/audio_processing/audio_buffer.h"
#include "modules/audio_processing/include/audio_processing.h"
#include "modules/audio_processing/logging/apm_data_dumper.h"
#include "rtc_base/checks.h"
#include "rtc_base/constructormagic.h"
namespace webrtc {
typedef void Handle;
namespace {
int16_t MapSetting(GainControl::Mode mode) {
switch (mode) {
case GainControl::kAdaptiveAnalog:
return kAgcModeAdaptiveAnalog;
case GainControl::kAdaptiveDigital:
return kAgcModeAdaptiveDigital;
case GainControl::kFixedDigital:
return kAgcModeFixedDigital;
}
RTC_NOTREACHED();
return -1;
}
} // namespace
class GainControlImpl::GainController {
public:
explicit GainController() {
state_ = WebRtcAgc_Create();
RTC_CHECK(state_);
}
~GainController() {
RTC_DCHECK(state_);
WebRtcAgc_Free(state_);
}
Handle* state() {
RTC_DCHECK(state_);
return state_;
}
void Initialize(int minimum_capture_level,
int maximum_capture_level,
Mode mode,
int sample_rate_hz,
int capture_level) {
RTC_DCHECK(state_);
int error =
WebRtcAgc_Init(state_, minimum_capture_level, maximum_capture_level,
MapSetting(mode), sample_rate_hz);
RTC_DCHECK_EQ(0, error);
set_capture_level(capture_level);
}
void set_capture_level(int capture_level) { capture_level_ = capture_level; }
int get_capture_level() {
RTC_DCHECK(capture_level_);
return *capture_level_;
}
private:
Handle* state_;
// TODO(peah): Remove the optional once the initialization is moved into the
// ctor.
absl::optional<int> capture_level_;
RTC_DISALLOW_COPY_AND_ASSIGN(GainController);
};
int GainControlImpl::instance_counter_ = 0;
GainControlImpl::GainControlImpl(rtc::CriticalSection* crit_render,
rtc::CriticalSection* crit_capture)
: crit_render_(crit_render),
crit_capture_(crit_capture),
data_dumper_(new ApmDataDumper(instance_counter_)),
mode_(kAdaptiveAnalog),
minimum_capture_level_(0),
maximum_capture_level_(255),
limiter_enabled_(true),
target_level_dbfs_(3),
compression_gain_db_(9),
analog_capture_level_(0),
was_analog_level_set_(false),
stream_is_saturated_(false) {
RTC_DCHECK(crit_render);
RTC_DCHECK(crit_capture);
}
GainControlImpl::~GainControlImpl() {}
void GainControlImpl::ProcessRenderAudio(
rtc::ArrayView<const int16_t> packed_render_audio) {
rtc::CritScope cs_capture(crit_capture_);
if (!enabled_) {
return;
}
for (auto& gain_controller : gain_controllers_) {
WebRtcAgc_AddFarend(gain_controller->state(), packed_render_audio.data(),
packed_render_audio.size());
}
}
void GainControlImpl::PackRenderAudioBuffer(
AudioBuffer* audio,
std::vector<int16_t>* packed_buffer) {
RTC_DCHECK_GE(160, audio->num_frames_per_band());
packed_buffer->clear();
packed_buffer->insert(
packed_buffer->end(), audio->mixed_low_pass_data(),
(audio->mixed_low_pass_data() + audio->num_frames_per_band()));
}
int GainControlImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
rtc::CritScope cs(crit_capture_);
if (!enabled_) {
return AudioProcessing::kNoError;
}
RTC_DCHECK(num_proc_channels_);
RTC_DCHECK_GE(160, audio->num_frames_per_band());
RTC_DCHECK_EQ(audio->num_channels(), *num_proc_channels_);
RTC_DCHECK_LE(*num_proc_channels_, gain_controllers_.size());
if (mode_ == kAdaptiveAnalog) {
int capture_channel = 0;
for (auto& gain_controller : gain_controllers_) {
gain_controller->set_capture_level(analog_capture_level_);
int err = WebRtcAgc_AddMic(
gain_controller->state(), audio->split_bands(capture_channel),
audio->num_bands(), audio->num_frames_per_band());
if (err != AudioProcessing::kNoError) {
return AudioProcessing::kUnspecifiedError;
}
++capture_channel;
}
} else if (mode_ == kAdaptiveDigital) {
int capture_channel = 0;
for (auto& gain_controller : gain_controllers_) {
int32_t capture_level_out = 0;
int err = WebRtcAgc_VirtualMic(
gain_controller->state(), audio->split_bands(capture_channel),
audio->num_bands(), audio->num_frames_per_band(),
analog_capture_level_, &capture_level_out);
gain_controller->set_capture_level(capture_level_out);
if (err != AudioProcessing::kNoError) {
return AudioProcessing::kUnspecifiedError;
}
++capture_channel;
}
}
return AudioProcessing::kNoError;
}
int GainControlImpl::ProcessCaptureAudio(AudioBuffer* audio,
bool stream_has_echo) {
rtc::CritScope cs(crit_capture_);
if (!enabled_) {
return AudioProcessing::kNoError;
}
if (mode_ == kAdaptiveAnalog && !was_analog_level_set_) {
return AudioProcessing::kStreamParameterNotSetError;
}
RTC_DCHECK(num_proc_channels_);
RTC_DCHECK_GE(160, audio->num_frames_per_band());
RTC_DCHECK_EQ(audio->num_channels(), *num_proc_channels_);
stream_is_saturated_ = false;
int capture_channel = 0;
for (auto& gain_controller : gain_controllers_) {
int32_t capture_level_out = 0;
uint8_t saturation_warning = 0;
// The call to stream_has_echo() is ok from a deadlock perspective
// as the capture lock is allready held.
int err = WebRtcAgc_Process(
gain_controller->state(), audio->split_bands_const(capture_channel),
audio->num_bands(), audio->num_frames_per_band(),
audio->split_bands(capture_channel),
gain_controller->get_capture_level(), &capture_level_out,
stream_has_echo, &saturation_warning);
if (err != AudioProcessing::kNoError) {
return AudioProcessing::kUnspecifiedError;
}
gain_controller->set_capture_level(capture_level_out);
if (saturation_warning == 1) {
stream_is_saturated_ = true;
}
++capture_channel;
}
RTC_DCHECK_LT(0ul, *num_proc_channels_);
if (mode_ == kAdaptiveAnalog) {
// Take the analog level to be the average across the handles.
analog_capture_level_ = 0;
for (auto& gain_controller : gain_controllers_) {
analog_capture_level_ += gain_controller->get_capture_level();
}
analog_capture_level_ /= (*num_proc_channels_);
}
was_analog_level_set_ = false;
return AudioProcessing::kNoError;
}
int GainControlImpl::compression_gain_db() const {
rtc::CritScope cs(crit_capture_);
return compression_gain_db_;
}
// TODO(ajm): ensure this is called under kAdaptiveAnalog.
int GainControlImpl::set_stream_analog_level(int level) {
rtc::CritScope cs(crit_capture_);
data_dumper_->DumpRaw("gain_control_set_stream_analog_level", 1, &level);
was_analog_level_set_ = true;
if (level < minimum_capture_level_ || level > maximum_capture_level_) {
return AudioProcessing::kBadParameterError;
}
analog_capture_level_ = level;
return AudioProcessing::kNoError;
}
int GainControlImpl::stream_analog_level() {
rtc::CritScope cs(crit_capture_);
data_dumper_->DumpRaw("gain_control_stream_analog_level", 1,
&analog_capture_level_);
// TODO(ajm): enable this assertion?
// RTC_DCHECK_EQ(kAdaptiveAnalog, mode_);
return analog_capture_level_;
}
int GainControlImpl::Enable(bool enable) {
rtc::CritScope cs_render(crit_render_);
rtc::CritScope cs_capture(crit_capture_);
if (enable && !enabled_) {
enabled_ = enable; // Must be set before Initialize() is called.
RTC_DCHECK(num_proc_channels_);
RTC_DCHECK(sample_rate_hz_);
Initialize(*num_proc_channels_, *sample_rate_hz_);
} else {
enabled_ = enable;
}
return AudioProcessing::kNoError;
}
bool GainControlImpl::is_enabled() const {
rtc::CritScope cs(crit_capture_);
return enabled_;
}
int GainControlImpl::set_mode(Mode mode) {
rtc::CritScope cs_render(crit_render_);
rtc::CritScope cs_capture(crit_capture_);
if (MapSetting(mode) == -1) {
return AudioProcessing::kBadParameterError;
}
mode_ = mode;
RTC_DCHECK(num_proc_channels_);
RTC_DCHECK(sample_rate_hz_);
Initialize(*num_proc_channels_, *sample_rate_hz_);
return AudioProcessing::kNoError;
}
GainControl::Mode GainControlImpl::mode() const {
rtc::CritScope cs(crit_capture_);
return mode_;
}
int GainControlImpl::set_analog_level_limits(int minimum, int maximum) {
if (minimum < 0) {
return AudioProcessing::kBadParameterError;
}
if (maximum > 65535) {
return AudioProcessing::kBadParameterError;
}
if (maximum < minimum) {
return AudioProcessing::kBadParameterError;
}
size_t num_proc_channels_local = 0u;
int sample_rate_hz_local = 0;
{
rtc::CritScope cs(crit_capture_);
minimum_capture_level_ = minimum;
maximum_capture_level_ = maximum;
RTC_DCHECK(num_proc_channels_);
RTC_DCHECK(sample_rate_hz_);
num_proc_channels_local = *num_proc_channels_;
sample_rate_hz_local = *sample_rate_hz_;
}
Initialize(num_proc_channels_local, sample_rate_hz_local);
return AudioProcessing::kNoError;
}
int GainControlImpl::analog_level_minimum() const {
rtc::CritScope cs(crit_capture_);
return minimum_capture_level_;
}
int GainControlImpl::analog_level_maximum() const {
rtc::CritScope cs(crit_capture_);
return maximum_capture_level_;
}
bool GainControlImpl::stream_is_saturated() const {
rtc::CritScope cs(crit_capture_);
return stream_is_saturated_;
}
int GainControlImpl::set_target_level_dbfs(int level) {
if (level > 31 || level < 0) {
return AudioProcessing::kBadParameterError;
}
{
rtc::CritScope cs(crit_capture_);
target_level_dbfs_ = level;
}
return Configure();
}
int GainControlImpl::target_level_dbfs() const {
rtc::CritScope cs(crit_capture_);
return target_level_dbfs_;
}
int GainControlImpl::set_compression_gain_db(int gain) {
if (gain < 0 || gain > 90) {
return AudioProcessing::kBadParameterError;
}
{
rtc::CritScope cs(crit_capture_);
compression_gain_db_ = gain;
}
return Configure();
}
int GainControlImpl::enable_limiter(bool enable) {
{
rtc::CritScope cs(crit_capture_);
limiter_enabled_ = enable;
}
return Configure();
}
bool GainControlImpl::is_limiter_enabled() const {
rtc::CritScope cs(crit_capture_);
return limiter_enabled_;
}
void GainControlImpl::Initialize(size_t num_proc_channels, int sample_rate_hz) {
rtc::CritScope cs_render(crit_render_);
rtc::CritScope cs_capture(crit_capture_);
data_dumper_->InitiateNewSetOfRecordings();
num_proc_channels_ = num_proc_channels;
sample_rate_hz_ = sample_rate_hz;
if (!enabled_) {
return;
}
gain_controllers_.resize(*num_proc_channels_);
for (auto& gain_controller : gain_controllers_) {
if (!gain_controller) {
gain_controller.reset(new GainController());
}
gain_controller->Initialize(minimum_capture_level_, maximum_capture_level_,
mode_, *sample_rate_hz_, analog_capture_level_);
}
Configure();
}
int GainControlImpl::Configure() {
rtc::CritScope cs_render(crit_render_);
rtc::CritScope cs_capture(crit_capture_);
WebRtcAgcConfig config;
// TODO(ajm): Flip the sign here (since AGC expects a positive value) if we
// change the interface.
// RTC_DCHECK_LE(target_level_dbfs_, 0);
// config.targetLevelDbfs = static_cast<int16_t>(-target_level_dbfs_);
config.targetLevelDbfs = static_cast<int16_t>(target_level_dbfs_);
config.compressionGaindB = static_cast<int16_t>(compression_gain_db_);
config.limiterEnable = limiter_enabled_;
int error = AudioProcessing::kNoError;
for (auto& gain_controller : gain_controllers_) {
const int handle_error =
WebRtcAgc_set_config(gain_controller->state(), config);
if (handle_error != AudioProcessing::kNoError) {
error = handle_error;
}
}
return error;
}
} // namespace webrtc
| 28.586364 | 80 | 0.719033 | [
"vector"
] |
d512421c7c13a0747681be637006f7730707ffa7 | 196,276 | cpp | C++ | Meet & Greet MR (AR)/Library/Il2cppBuildCache/Android/arm64-v8a/il2cppOutput/Il2CppCCalculateFieldValues2.cpp | 23SAMY23/Meet-and-Greet-MR | a85d2ccbda6d82651c5ef46469b0fa1c7b048d86 | [
"Unlicense"
] | null | null | null | Meet & Greet MR (AR)/Library/Il2cppBuildCache/Android/arm64-v8a/il2cppOutput/Il2CppCCalculateFieldValues2.cpp | 23SAMY23/Meet-and-Greet-MR | a85d2ccbda6d82651c5ef46469b0fa1c7b048d86 | [
"Unlicense"
] | null | null | null | Meet & Greet MR (AR)/Library/Il2cppBuildCache/Android/arm64-v8a/il2cppOutput/Il2CppCCalculateFieldValues2.cpp | 23SAMY23/Meet-and-Greet-MR | a85d2ccbda6d82651c5ef46469b0fa1c7b048d86 | [
"Unlicense"
] | null | null | null | #include "pch-cpp.hpp"
#ifndef _MSC_VER
# include <alloca.h>
#else
# include <malloc.h>
#endif
#include <stdint.h>
#include <limits>
// System.Collections.Generic.Dictionary`2<System.String,Photon.Chat.Demo.FriendItem>
struct Dictionary_2_t57A8BB163A6245E3061BCE534FCD51622968502F;
// System.Collections.Generic.Dictionary`2<System.String,UnityEngine.UI.Toggle>
struct Dictionary_2_t832D46F27B23EC7C03258EEC3ACC466A27782A0E;
// System.Collections.Generic.List`1<Photon.Voice.Unity.Demos.DemoVoiceUI.MicRef>
struct List_1_tBB19EB51B7E4AE8EE288B3D1F29CE27282724C8A;
// System.Collections.Generic.List`1<System.String>
struct List_1_t6C9F81EDBF0F4A31A9B0DA372D2EF34BDA3A1AF3;
// System.Collections.Generic.Queue`1<RockVR.Video.VideoCapture/FrameData>
struct Queue_1_tD8A602F88CB1BAFC9053BC90676C759C80917D8C;
// System.Byte[]
struct ByteU5BU5D_tDBBEB0E8362242FA7223000D978B0DD19D4B0726;
// System.Char[]
struct CharU5BU5D_t7B7FC5BC8091AA3B9CB0B29CDD80B5EE9254AA34;
// System.Delegate[]
struct DelegateU5BU5D_t677D8FE08A5F99E8EE49150B73966CD6E9BF7DB8;
// UnityEngine.MeshFilter[]
struct MeshFilterU5BU5D_tE8AA77783A24784C69A8083B4F3E482D866FD503;
// UnityEngine.MeshRenderer[]
struct MeshRendererU5BU5D_t535468079DEF88AD38546DC5D04E9102C401D228;
// System.String[]
struct StringU5BU5D_tACEBFEDE350025B554CD507C9AE8FFE49359549A;
// RockVR.Video.VideoCaptureBase[]
struct VideoCaptureBaseU5BU5D_t7AE282114378AE52385D1AC298E457A80C251C2C;
// Photon.Realtime.AppSettings
struct AppSettings_tABB056AEAFF5113D2D970906784B48C42DF13906;
// System.AsyncCallback
struct AsyncCallback_tA7921BEF974919C46FF8F9D9867C567B200BB0EA;
// RockVR.Video.AudioCapture
struct AudioCapture_tAF87A71022D987367D112D48136C96AD0E2196A8;
// UnityEngine.Camera
struct Camera_tC44E094BAB53AFC8A014C6F9CFCE11F4FC38006C;
// Photon.Chat.ChatAppSettings
struct ChatAppSettings_tC09074621647F01C35C1373F86530BBC8D9DF2C8;
// Photon.Chat.ChatClient
struct ChatClient_t5E3E39382FC13DF25585EFE46374B01B3BC377DF;
// Photon.Chat.Demo.ChatGui
struct ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F;
// Photon.Realtime.ConnectionHandler
struct ConnectionHandler_t85A89B8EDF5CB1D550EEB28590A5E610034A06FE;
// UnityEngine.Cubemap
struct Cubemap_tB48EEA79C233417AF4D7BF03EA1BE4AA07A5B938;
// System.DelegateData
struct DelegateData_t17DD30660E330C49381DAA99F934BE75CB11F288;
// UnityEngine.UI.Dropdown
struct Dropdown_t099F5232BB75810BC79EED6E27DDCED46C3BCD96;
// RockVR.Common.EventDelegate
struct EventDelegate_t25FC2A0C7474DD9C0674EB6B0BCE63D0FDFA7B54;
// UnityEngine.GameObject
struct GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319;
// System.IAsyncResult
struct IAsyncResult_tC9F97BF36FCF122D29D3101D80642278297BF370;
// UnityEngine.UI.Image
struct Image_t4021FF27176E44BFEDDCBE43C7FE6B713EC70D3C;
// UnityEngine.InputSystem.InputActionReference
struct InputActionReference_tB2E9E368D60A4C8E066C7CE0EE2A80C62320C28E;
// UnityEngine.UI.InputField
struct InputField_tB41A2814F31A3E9373D443EDEBBB2856006324D0;
// Photon.Realtime.LoadBalancingClient
struct LoadBalancingClient_tBEEEE3B7EAB2BE4F38AF50B935F7C73C0F8DC86A;
// UnityEngine.Material
struct Material_t8927C00353A72755313F046D0CE85178AE8218EE;
// UnityEngine.MeshFilter
struct MeshFilter_t763BB2BBF3881176AD25E4570E6DD215BA0AA51A;
// UnityEngine.MeshRenderer
struct MeshRenderer_tCD983A2F635E12BCB0BAA2E635D96A318757908B;
// System.Reflection.MethodInfo
struct MethodInfo_t;
// LylekGames.RPGGoblin.MouseLook
struct MouseLook_t0FA6420BB99D5DE117BB116C3ABDAA146270627D;
// System.Random
struct Random_t6C9E9775A149D0ADCFEB4B252C408F03EE870118;
// Photon.Voice.Unity.Recorder
struct Recorder_t13D849D3581771415EF2D39F877106A1AC72C6C5;
// UnityEngine.RectTransform
struct RectTransform_t8A6A306FB29A6C8C22010CF9040E319753571072;
// UnityEngine.RenderTexture
struct RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849;
// RockVR.Video.Screenshot
struct Screenshot_tA0FF0B7359459C6837DCE33D269E93C27AC15B80;
// UnityEngine.UI.Selectable
struct Selectable_t34088A3677CC9D344F81B0D91999D8C5963D7DBD;
// Photon.Voice.Unity.Speaker
struct Speaker_tDF067516E356439A1F51638A7725C56C1DF3EFEA;
// System.String
struct String_t;
// UnityEngine.UI.Text
struct Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1;
// UnityEngine.Texture2D
struct Texture2D_t9B604D0D8E28032123641A7E7338FA872E2698BF;
// System.Threading.Thread
struct Thread_tB9EB71664220EE16451AF3276D78DE6614D2A414;
// UnityEngine.UI.Toggle
struct Toggle_t68F5A84CDD2BBAEA866F42EB4E0C9F2B431D612E;
// RockVR.Video.VideoCapture
struct VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB;
// RockVR.Video.VideoCaptureCtrlBase
struct VideoCaptureCtrlBase_tC8EFDDF51B9F6BF21AAD8BD587A95A6E12F8B4C9;
// UnityEngine.Video.VideoPlayer
struct VideoPlayer_t47DCC396CBA28512CF97C6CC4F55878E8D62FE86;
// Photon.Voice.Unity.VoiceConnection
struct VoiceConnection_t65A719BDA53623411E6DC2E6CC34B120E0EBA704;
// System.Void
struct Void_t700C6383A2A510C2CF4DD86DABD5CA9FF70ADAC5;
// RockVR.Common.EventDelegate/CompleteDelegate
struct CompleteDelegate_t45443F293492379B818DB003E631998EBE0EA0D4;
// RockVR.Common.EventDelegate/ErrorDelegate
struct ErrorDelegate_t9C20F46B8E1CCFF35DDF3B0717119F0D3507522C;
struct Delegate_t_marshaled_com;
struct Delegate_t_marshaled_pinvoke;
IL2CPP_EXTERN_C_BEGIN
IL2CPP_EXTERN_C_END
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Winvalid-offsetof"
#pragma clang diagnostic ignored "-Wunused-variable"
#endif
// System.Object
// Photon.Chat.Demo.AppSettingsExtensions
struct AppSettingsExtensions_t4C63FC053FCF2F826B594D0E29A33B5DC98966FC : public RuntimeObject
{
public:
public:
};
// RockVR.Common.CmdProcess
struct CmdProcess_tE1D68715DDE99C903B3690372D9ADE0CCC7DB125 : public RuntimeObject
{
public:
public:
};
// RockVR.Common.EventDelegate
struct EventDelegate_t25FC2A0C7474DD9C0674EB6B0BCE63D0FDFA7B54 : public RuntimeObject
{
public:
// RockVR.Common.EventDelegate/ErrorDelegate RockVR.Common.EventDelegate::OnError
ErrorDelegate_t9C20F46B8E1CCFF35DDF3B0717119F0D3507522C * ___OnError_0;
// RockVR.Common.EventDelegate/CompleteDelegate RockVR.Common.EventDelegate::OnComplete
CompleteDelegate_t45443F293492379B818DB003E631998EBE0EA0D4 * ___OnComplete_1;
public:
inline static int32_t get_offset_of_OnError_0() { return static_cast<int32_t>(offsetof(EventDelegate_t25FC2A0C7474DD9C0674EB6B0BCE63D0FDFA7B54, ___OnError_0)); }
inline ErrorDelegate_t9C20F46B8E1CCFF35DDF3B0717119F0D3507522C * get_OnError_0() const { return ___OnError_0; }
inline ErrorDelegate_t9C20F46B8E1CCFF35DDF3B0717119F0D3507522C ** get_address_of_OnError_0() { return &___OnError_0; }
inline void set_OnError_0(ErrorDelegate_t9C20F46B8E1CCFF35DDF3B0717119F0D3507522C * value)
{
___OnError_0 = value;
Il2CppCodeGenWriteBarrier((void**)(&___OnError_0), (void*)value);
}
inline static int32_t get_offset_of_OnComplete_1() { return static_cast<int32_t>(offsetof(EventDelegate_t25FC2A0C7474DD9C0674EB6B0BCE63D0FDFA7B54, ___OnComplete_1)); }
inline CompleteDelegate_t45443F293492379B818DB003E631998EBE0EA0D4 * get_OnComplete_1() const { return ___OnComplete_1; }
inline CompleteDelegate_t45443F293492379B818DB003E631998EBE0EA0D4 ** get_address_of_OnComplete_1() { return &___OnComplete_1; }
inline void set_OnComplete_1(CompleteDelegate_t45443F293492379B818DB003E631998EBE0EA0D4 * value)
{
___OnComplete_1 = value;
Il2CppCodeGenWriteBarrier((void**)(&___OnComplete_1), (void*)value);
}
};
// RockVR.Video.MathUtils
struct MathUtils_t52E1D7752FC35F06897AC8508046BC5ABAED6984 : public RuntimeObject
{
public:
public:
};
// RockVR.Video.PathConfig
struct PathConfig_t0DBE1019CCE7BADF3780A8276A65F3AEC40138AE : public RuntimeObject
{
public:
public:
};
struct PathConfig_t0DBE1019CCE7BADF3780A8276A65F3AEC40138AE_StaticFields
{
public:
// System.String RockVR.Video.PathConfig::persistentDataPath
String_t* ___persistentDataPath_0;
// System.String RockVR.Video.PathConfig::streamingAssetsPath
String_t* ___streamingAssetsPath_1;
// System.String RockVR.Video.PathConfig::myDocumentsPath
String_t* ___myDocumentsPath_2;
// System.String RockVR.Video.PathConfig::saveFolder
String_t* ___saveFolder_3;
// System.String RockVR.Video.PathConfig::lastVideoFile
String_t* ___lastVideoFile_4;
public:
inline static int32_t get_offset_of_persistentDataPath_0() { return static_cast<int32_t>(offsetof(PathConfig_t0DBE1019CCE7BADF3780A8276A65F3AEC40138AE_StaticFields, ___persistentDataPath_0)); }
inline String_t* get_persistentDataPath_0() const { return ___persistentDataPath_0; }
inline String_t** get_address_of_persistentDataPath_0() { return &___persistentDataPath_0; }
inline void set_persistentDataPath_0(String_t* value)
{
___persistentDataPath_0 = value;
Il2CppCodeGenWriteBarrier((void**)(&___persistentDataPath_0), (void*)value);
}
inline static int32_t get_offset_of_streamingAssetsPath_1() { return static_cast<int32_t>(offsetof(PathConfig_t0DBE1019CCE7BADF3780A8276A65F3AEC40138AE_StaticFields, ___streamingAssetsPath_1)); }
inline String_t* get_streamingAssetsPath_1() const { return ___streamingAssetsPath_1; }
inline String_t** get_address_of_streamingAssetsPath_1() { return &___streamingAssetsPath_1; }
inline void set_streamingAssetsPath_1(String_t* value)
{
___streamingAssetsPath_1 = value;
Il2CppCodeGenWriteBarrier((void**)(&___streamingAssetsPath_1), (void*)value);
}
inline static int32_t get_offset_of_myDocumentsPath_2() { return static_cast<int32_t>(offsetof(PathConfig_t0DBE1019CCE7BADF3780A8276A65F3AEC40138AE_StaticFields, ___myDocumentsPath_2)); }
inline String_t* get_myDocumentsPath_2() const { return ___myDocumentsPath_2; }
inline String_t** get_address_of_myDocumentsPath_2() { return &___myDocumentsPath_2; }
inline void set_myDocumentsPath_2(String_t* value)
{
___myDocumentsPath_2 = value;
Il2CppCodeGenWriteBarrier((void**)(&___myDocumentsPath_2), (void*)value);
}
inline static int32_t get_offset_of_saveFolder_3() { return static_cast<int32_t>(offsetof(PathConfig_t0DBE1019CCE7BADF3780A8276A65F3AEC40138AE_StaticFields, ___saveFolder_3)); }
inline String_t* get_saveFolder_3() const { return ___saveFolder_3; }
inline String_t** get_address_of_saveFolder_3() { return &___saveFolder_3; }
inline void set_saveFolder_3(String_t* value)
{
___saveFolder_3 = value;
Il2CppCodeGenWriteBarrier((void**)(&___saveFolder_3), (void*)value);
}
inline static int32_t get_offset_of_lastVideoFile_4() { return static_cast<int32_t>(offsetof(PathConfig_t0DBE1019CCE7BADF3780A8276A65F3AEC40138AE_StaticFields, ___lastVideoFile_4)); }
inline String_t* get_lastVideoFile_4() const { return ___lastVideoFile_4; }
inline String_t** get_address_of_lastVideoFile_4() { return &___lastVideoFile_4; }
inline void set_lastVideoFile_4(String_t* value)
{
___lastVideoFile_4 = value;
Il2CppCodeGenWriteBarrier((void**)(&___lastVideoFile_4), (void*)value);
}
};
// Photon.Voice.Unity.Demos.DemoVoiceUI.PhotonDemoExtensions
struct PhotonDemoExtensions_tC689085D327FB7D9001E521B1467589AF31358E6 : public RuntimeObject
{
public:
public:
};
// RockVR.Video.StringUtils
struct StringUtils_t6D8E52FFF50257DFB4E33A9AF03DF9EC8B1C014A : public RuntimeObject
{
public:
public:
};
// System.ValueType
struct ValueType_tDBF999C1B75C48C68621878250DBF6CDBCF51E52 : public RuntimeObject
{
public:
public:
};
// Native definition for P/Invoke marshalling of System.ValueType
struct ValueType_tDBF999C1B75C48C68621878250DBF6CDBCF51E52_marshaled_pinvoke
{
};
// Native definition for COM marshalling of System.ValueType
struct ValueType_tDBF999C1B75C48C68621878250DBF6CDBCF51E52_marshaled_com
{
};
// RockVR.Video.VideoMuxing
struct VideoMuxing_t25D06A6E9370F76FDBC1B5D056F9BE0255AB8FE3 : public RuntimeObject
{
public:
// System.String RockVR.Video.VideoMuxing::filePath
String_t* ___filePath_0;
// RockVR.Video.VideoCapture RockVR.Video.VideoMuxing::videoCapture
VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB * ___videoCapture_1;
// RockVR.Video.AudioCapture RockVR.Video.VideoMuxing::audioCapture
AudioCapture_tAF87A71022D987367D112D48136C96AD0E2196A8 * ___audioCapture_2;
public:
inline static int32_t get_offset_of_filePath_0() { return static_cast<int32_t>(offsetof(VideoMuxing_t25D06A6E9370F76FDBC1B5D056F9BE0255AB8FE3, ___filePath_0)); }
inline String_t* get_filePath_0() const { return ___filePath_0; }
inline String_t** get_address_of_filePath_0() { return &___filePath_0; }
inline void set_filePath_0(String_t* value)
{
___filePath_0 = value;
Il2CppCodeGenWriteBarrier((void**)(&___filePath_0), (void*)value);
}
inline static int32_t get_offset_of_videoCapture_1() { return static_cast<int32_t>(offsetof(VideoMuxing_t25D06A6E9370F76FDBC1B5D056F9BE0255AB8FE3, ___videoCapture_1)); }
inline VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB * get_videoCapture_1() const { return ___videoCapture_1; }
inline VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB ** get_address_of_videoCapture_1() { return &___videoCapture_1; }
inline void set_videoCapture_1(VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB * value)
{
___videoCapture_1 = value;
Il2CppCodeGenWriteBarrier((void**)(&___videoCapture_1), (void*)value);
}
inline static int32_t get_offset_of_audioCapture_2() { return static_cast<int32_t>(offsetof(VideoMuxing_t25D06A6E9370F76FDBC1B5D056F9BE0255AB8FE3, ___audioCapture_2)); }
inline AudioCapture_tAF87A71022D987367D112D48136C96AD0E2196A8 * get_audioCapture_2() const { return ___audioCapture_2; }
inline AudioCapture_tAF87A71022D987367D112D48136C96AD0E2196A8 ** get_address_of_audioCapture_2() { return &___audioCapture_2; }
inline void set_audioCapture_2(AudioCapture_tAF87A71022D987367D112D48136C96AD0E2196A8 * value)
{
___audioCapture_2 = value;
Il2CppCodeGenWriteBarrier((void**)(&___audioCapture_2), (void*)value);
}
};
// RockVR.Video.Screenshot/<AutoTakeScreenshot>d__8
struct U3CAutoTakeScreenshotU3Ed__8_tF24CC8DACA967AA9CF08BAAD92AA69BB99243DFA : public RuntimeObject
{
public:
// System.Int32 RockVR.Video.Screenshot/<AutoTakeScreenshot>d__8::<>1__state
int32_t ___U3CU3E1__state_0;
// System.Object RockVR.Video.Screenshot/<AutoTakeScreenshot>d__8::<>2__current
RuntimeObject * ___U3CU3E2__current_1;
// System.Int32 RockVR.Video.Screenshot/<AutoTakeScreenshot>d__8::seconds
int32_t ___seconds_2;
// RockVR.Video.Screenshot RockVR.Video.Screenshot/<AutoTakeScreenshot>d__8::<>4__this
Screenshot_tA0FF0B7359459C6837DCE33D269E93C27AC15B80 * ___U3CU3E4__this_3;
public:
inline static int32_t get_offset_of_U3CU3E1__state_0() { return static_cast<int32_t>(offsetof(U3CAutoTakeScreenshotU3Ed__8_tF24CC8DACA967AA9CF08BAAD92AA69BB99243DFA, ___U3CU3E1__state_0)); }
inline int32_t get_U3CU3E1__state_0() const { return ___U3CU3E1__state_0; }
inline int32_t* get_address_of_U3CU3E1__state_0() { return &___U3CU3E1__state_0; }
inline void set_U3CU3E1__state_0(int32_t value)
{
___U3CU3E1__state_0 = value;
}
inline static int32_t get_offset_of_U3CU3E2__current_1() { return static_cast<int32_t>(offsetof(U3CAutoTakeScreenshotU3Ed__8_tF24CC8DACA967AA9CF08BAAD92AA69BB99243DFA, ___U3CU3E2__current_1)); }
inline RuntimeObject * get_U3CU3E2__current_1() const { return ___U3CU3E2__current_1; }
inline RuntimeObject ** get_address_of_U3CU3E2__current_1() { return &___U3CU3E2__current_1; }
inline void set_U3CU3E2__current_1(RuntimeObject * value)
{
___U3CU3E2__current_1 = value;
Il2CppCodeGenWriteBarrier((void**)(&___U3CU3E2__current_1), (void*)value);
}
inline static int32_t get_offset_of_seconds_2() { return static_cast<int32_t>(offsetof(U3CAutoTakeScreenshotU3Ed__8_tF24CC8DACA967AA9CF08BAAD92AA69BB99243DFA, ___seconds_2)); }
inline int32_t get_seconds_2() const { return ___seconds_2; }
inline int32_t* get_address_of_seconds_2() { return &___seconds_2; }
inline void set_seconds_2(int32_t value)
{
___seconds_2 = value;
}
inline static int32_t get_offset_of_U3CU3E4__this_3() { return static_cast<int32_t>(offsetof(U3CAutoTakeScreenshotU3Ed__8_tF24CC8DACA967AA9CF08BAAD92AA69BB99243DFA, ___U3CU3E4__this_3)); }
inline Screenshot_tA0FF0B7359459C6837DCE33D269E93C27AC15B80 * get_U3CU3E4__this_3() const { return ___U3CU3E4__this_3; }
inline Screenshot_tA0FF0B7359459C6837DCE33D269E93C27AC15B80 ** get_address_of_U3CU3E4__this_3() { return &___U3CU3E4__this_3; }
inline void set_U3CU3E4__this_3(Screenshot_tA0FF0B7359459C6837DCE33D269E93C27AC15B80 * value)
{
___U3CU3E4__this_3 = value;
Il2CppCodeGenWriteBarrier((void**)(&___U3CU3E4__this_3), (void*)value);
}
};
// RockVR.Video.StringUtils/<>c__DisplayClass1_0
struct U3CU3Ec__DisplayClass1_0_t4A54FA79F4C6132585A7F8E94CA69963916772F2 : public RuntimeObject
{
public:
// System.Random RockVR.Video.StringUtils/<>c__DisplayClass1_0::random
Random_t6C9E9775A149D0ADCFEB4B252C408F03EE870118 * ___random_0;
public:
inline static int32_t get_offset_of_random_0() { return static_cast<int32_t>(offsetof(U3CU3Ec__DisplayClass1_0_t4A54FA79F4C6132585A7F8E94CA69963916772F2, ___random_0)); }
inline Random_t6C9E9775A149D0ADCFEB4B252C408F03EE870118 * get_random_0() const { return ___random_0; }
inline Random_t6C9E9775A149D0ADCFEB4B252C408F03EE870118 ** get_address_of_random_0() { return &___random_0; }
inline void set_random_0(Random_t6C9E9775A149D0ADCFEB4B252C408F03EE870118 * value)
{
___random_0 = value;
Il2CppCodeGenWriteBarrier((void**)(&___random_0), (void*)value);
}
};
// RockVR.Video.VideoCapture/<CaptureFrameAsync>d__26
struct U3CCaptureFrameAsyncU3Ed__26_t5429C11B856B49F1B958909BB0800EF09F86023C : public RuntimeObject
{
public:
// System.Int32 RockVR.Video.VideoCapture/<CaptureFrameAsync>d__26::<>1__state
int32_t ___U3CU3E1__state_0;
// System.Object RockVR.Video.VideoCapture/<CaptureFrameAsync>d__26::<>2__current
RuntimeObject * ___U3CU3E2__current_1;
// RockVR.Video.VideoCapture RockVR.Video.VideoCapture/<CaptureFrameAsync>d__26::<>4__this
VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB * ___U3CU3E4__this_2;
public:
inline static int32_t get_offset_of_U3CU3E1__state_0() { return static_cast<int32_t>(offsetof(U3CCaptureFrameAsyncU3Ed__26_t5429C11B856B49F1B958909BB0800EF09F86023C, ___U3CU3E1__state_0)); }
inline int32_t get_U3CU3E1__state_0() const { return ___U3CU3E1__state_0; }
inline int32_t* get_address_of_U3CU3E1__state_0() { return &___U3CU3E1__state_0; }
inline void set_U3CU3E1__state_0(int32_t value)
{
___U3CU3E1__state_0 = value;
}
inline static int32_t get_offset_of_U3CU3E2__current_1() { return static_cast<int32_t>(offsetof(U3CCaptureFrameAsyncU3Ed__26_t5429C11B856B49F1B958909BB0800EF09F86023C, ___U3CU3E2__current_1)); }
inline RuntimeObject * get_U3CU3E2__current_1() const { return ___U3CU3E2__current_1; }
inline RuntimeObject ** get_address_of_U3CU3E2__current_1() { return &___U3CU3E2__current_1; }
inline void set_U3CU3E2__current_1(RuntimeObject * value)
{
___U3CU3E2__current_1 = value;
Il2CppCodeGenWriteBarrier((void**)(&___U3CU3E2__current_1), (void*)value);
}
inline static int32_t get_offset_of_U3CU3E4__this_2() { return static_cast<int32_t>(offsetof(U3CCaptureFrameAsyncU3Ed__26_t5429C11B856B49F1B958909BB0800EF09F86023C, ___U3CU3E4__this_2)); }
inline VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB * get_U3CU3E4__this_2() const { return ___U3CU3E4__this_2; }
inline VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB ** get_address_of_U3CU3E4__this_2() { return &___U3CU3E4__this_2; }
inline void set_U3CU3E4__this_2(VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB * value)
{
___U3CU3E4__this_2 = value;
Il2CppCodeGenWriteBarrier((void**)(&___U3CU3E4__this_2), (void*)value);
}
};
// UnityEngine.Color
struct Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659
{
public:
// System.Single UnityEngine.Color::r
float ___r_0;
// System.Single UnityEngine.Color::g
float ___g_1;
// System.Single UnityEngine.Color::b
float ___b_2;
// System.Single UnityEngine.Color::a
float ___a_3;
public:
inline static int32_t get_offset_of_r_0() { return static_cast<int32_t>(offsetof(Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659, ___r_0)); }
inline float get_r_0() const { return ___r_0; }
inline float* get_address_of_r_0() { return &___r_0; }
inline void set_r_0(float value)
{
___r_0 = value;
}
inline static int32_t get_offset_of_g_1() { return static_cast<int32_t>(offsetof(Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659, ___g_1)); }
inline float get_g_1() const { return ___g_1; }
inline float* get_address_of_g_1() { return &___g_1; }
inline void set_g_1(float value)
{
___g_1 = value;
}
inline static int32_t get_offset_of_b_2() { return static_cast<int32_t>(offsetof(Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659, ___b_2)); }
inline float get_b_2() const { return ___b_2; }
inline float* get_address_of_b_2() { return &___b_2; }
inline void set_b_2(float value)
{
___b_2 = value;
}
inline static int32_t get_offset_of_a_3() { return static_cast<int32_t>(offsetof(Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659, ___a_3)); }
inline float get_a_3() const { return ___a_3; }
inline float* get_address_of_a_3() { return &___a_3; }
inline void set_a_3(float value)
{
___a_3 = value;
}
};
// System.Enum
struct Enum_t23B90B40F60E677A8025267341651C94AE079CDA : public ValueType_tDBF999C1B75C48C68621878250DBF6CDBCF51E52
{
public:
public:
};
struct Enum_t23B90B40F60E677A8025267341651C94AE079CDA_StaticFields
{
public:
// System.Char[] System.Enum::enumSeperatorCharArray
CharU5BU5D_t7B7FC5BC8091AA3B9CB0B29CDD80B5EE9254AA34* ___enumSeperatorCharArray_0;
public:
inline static int32_t get_offset_of_enumSeperatorCharArray_0() { return static_cast<int32_t>(offsetof(Enum_t23B90B40F60E677A8025267341651C94AE079CDA_StaticFields, ___enumSeperatorCharArray_0)); }
inline CharU5BU5D_t7B7FC5BC8091AA3B9CB0B29CDD80B5EE9254AA34* get_enumSeperatorCharArray_0() const { return ___enumSeperatorCharArray_0; }
inline CharU5BU5D_t7B7FC5BC8091AA3B9CB0B29CDD80B5EE9254AA34** get_address_of_enumSeperatorCharArray_0() { return &___enumSeperatorCharArray_0; }
inline void set_enumSeperatorCharArray_0(CharU5BU5D_t7B7FC5BC8091AA3B9CB0B29CDD80B5EE9254AA34* value)
{
___enumSeperatorCharArray_0 = value;
Il2CppCodeGenWriteBarrier((void**)(&___enumSeperatorCharArray_0), (void*)value);
}
};
// Native definition for P/Invoke marshalling of System.Enum
struct Enum_t23B90B40F60E677A8025267341651C94AE079CDA_marshaled_pinvoke
{
};
// Native definition for COM marshalling of System.Enum
struct Enum_t23B90B40F60E677A8025267341651C94AE079CDA_marshaled_com
{
};
// System.Int32
struct Int32_tFDE5F8CD43D10453F6A2E0C77FE48C6CC7009046
{
public:
// System.Int32 System.Int32::m_value
int32_t ___m_value_0;
public:
inline static int32_t get_offset_of_m_value_0() { return static_cast<int32_t>(offsetof(Int32_tFDE5F8CD43D10453F6A2E0C77FE48C6CC7009046, ___m_value_0)); }
inline int32_t get_m_value_0() const { return ___m_value_0; }
inline int32_t* get_address_of_m_value_0() { return &___m_value_0; }
inline void set_m_value_0(int32_t value)
{
___m_value_0 = value;
}
};
// System.IntPtr
struct IntPtr_t
{
public:
// System.Void* System.IntPtr::m_value
void* ___m_value_0;
public:
inline static int32_t get_offset_of_m_value_0() { return static_cast<int32_t>(offsetof(IntPtr_t, ___m_value_0)); }
inline void* get_m_value_0() const { return ___m_value_0; }
inline void** get_address_of_m_value_0() { return &___m_value_0; }
inline void set_m_value_0(void* value)
{
___m_value_0 = value;
}
};
struct IntPtr_t_StaticFields
{
public:
// System.IntPtr System.IntPtr::Zero
intptr_t ___Zero_1;
public:
inline static int32_t get_offset_of_Zero_1() { return static_cast<int32_t>(offsetof(IntPtr_t_StaticFields, ___Zero_1)); }
inline intptr_t get_Zero_1() const { return ___Zero_1; }
inline intptr_t* get_address_of_Zero_1() { return &___Zero_1; }
inline void set_Zero_1(intptr_t value)
{
___Zero_1 = value;
}
};
// UnityEngine.Matrix4x4
struct Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461
{
public:
// System.Single UnityEngine.Matrix4x4::m00
float ___m00_0;
// System.Single UnityEngine.Matrix4x4::m10
float ___m10_1;
// System.Single UnityEngine.Matrix4x4::m20
float ___m20_2;
// System.Single UnityEngine.Matrix4x4::m30
float ___m30_3;
// System.Single UnityEngine.Matrix4x4::m01
float ___m01_4;
// System.Single UnityEngine.Matrix4x4::m11
float ___m11_5;
// System.Single UnityEngine.Matrix4x4::m21
float ___m21_6;
// System.Single UnityEngine.Matrix4x4::m31
float ___m31_7;
// System.Single UnityEngine.Matrix4x4::m02
float ___m02_8;
// System.Single UnityEngine.Matrix4x4::m12
float ___m12_9;
// System.Single UnityEngine.Matrix4x4::m22
float ___m22_10;
// System.Single UnityEngine.Matrix4x4::m32
float ___m32_11;
// System.Single UnityEngine.Matrix4x4::m03
float ___m03_12;
// System.Single UnityEngine.Matrix4x4::m13
float ___m13_13;
// System.Single UnityEngine.Matrix4x4::m23
float ___m23_14;
// System.Single UnityEngine.Matrix4x4::m33
float ___m33_15;
public:
inline static int32_t get_offset_of_m00_0() { return static_cast<int32_t>(offsetof(Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461, ___m00_0)); }
inline float get_m00_0() const { return ___m00_0; }
inline float* get_address_of_m00_0() { return &___m00_0; }
inline void set_m00_0(float value)
{
___m00_0 = value;
}
inline static int32_t get_offset_of_m10_1() { return static_cast<int32_t>(offsetof(Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461, ___m10_1)); }
inline float get_m10_1() const { return ___m10_1; }
inline float* get_address_of_m10_1() { return &___m10_1; }
inline void set_m10_1(float value)
{
___m10_1 = value;
}
inline static int32_t get_offset_of_m20_2() { return static_cast<int32_t>(offsetof(Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461, ___m20_2)); }
inline float get_m20_2() const { return ___m20_2; }
inline float* get_address_of_m20_2() { return &___m20_2; }
inline void set_m20_2(float value)
{
___m20_2 = value;
}
inline static int32_t get_offset_of_m30_3() { return static_cast<int32_t>(offsetof(Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461, ___m30_3)); }
inline float get_m30_3() const { return ___m30_3; }
inline float* get_address_of_m30_3() { return &___m30_3; }
inline void set_m30_3(float value)
{
___m30_3 = value;
}
inline static int32_t get_offset_of_m01_4() { return static_cast<int32_t>(offsetof(Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461, ___m01_4)); }
inline float get_m01_4() const { return ___m01_4; }
inline float* get_address_of_m01_4() { return &___m01_4; }
inline void set_m01_4(float value)
{
___m01_4 = value;
}
inline static int32_t get_offset_of_m11_5() { return static_cast<int32_t>(offsetof(Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461, ___m11_5)); }
inline float get_m11_5() const { return ___m11_5; }
inline float* get_address_of_m11_5() { return &___m11_5; }
inline void set_m11_5(float value)
{
___m11_5 = value;
}
inline static int32_t get_offset_of_m21_6() { return static_cast<int32_t>(offsetof(Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461, ___m21_6)); }
inline float get_m21_6() const { return ___m21_6; }
inline float* get_address_of_m21_6() { return &___m21_6; }
inline void set_m21_6(float value)
{
___m21_6 = value;
}
inline static int32_t get_offset_of_m31_7() { return static_cast<int32_t>(offsetof(Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461, ___m31_7)); }
inline float get_m31_7() const { return ___m31_7; }
inline float* get_address_of_m31_7() { return &___m31_7; }
inline void set_m31_7(float value)
{
___m31_7 = value;
}
inline static int32_t get_offset_of_m02_8() { return static_cast<int32_t>(offsetof(Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461, ___m02_8)); }
inline float get_m02_8() const { return ___m02_8; }
inline float* get_address_of_m02_8() { return &___m02_8; }
inline void set_m02_8(float value)
{
___m02_8 = value;
}
inline static int32_t get_offset_of_m12_9() { return static_cast<int32_t>(offsetof(Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461, ___m12_9)); }
inline float get_m12_9() const { return ___m12_9; }
inline float* get_address_of_m12_9() { return &___m12_9; }
inline void set_m12_9(float value)
{
___m12_9 = value;
}
inline static int32_t get_offset_of_m22_10() { return static_cast<int32_t>(offsetof(Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461, ___m22_10)); }
inline float get_m22_10() const { return ___m22_10; }
inline float* get_address_of_m22_10() { return &___m22_10; }
inline void set_m22_10(float value)
{
___m22_10 = value;
}
inline static int32_t get_offset_of_m32_11() { return static_cast<int32_t>(offsetof(Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461, ___m32_11)); }
inline float get_m32_11() const { return ___m32_11; }
inline float* get_address_of_m32_11() { return &___m32_11; }
inline void set_m32_11(float value)
{
___m32_11 = value;
}
inline static int32_t get_offset_of_m03_12() { return static_cast<int32_t>(offsetof(Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461, ___m03_12)); }
inline float get_m03_12() const { return ___m03_12; }
inline float* get_address_of_m03_12() { return &___m03_12; }
inline void set_m03_12(float value)
{
___m03_12 = value;
}
inline static int32_t get_offset_of_m13_13() { return static_cast<int32_t>(offsetof(Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461, ___m13_13)); }
inline float get_m13_13() const { return ___m13_13; }
inline float* get_address_of_m13_13() { return &___m13_13; }
inline void set_m13_13(float value)
{
___m13_13 = value;
}
inline static int32_t get_offset_of_m23_14() { return static_cast<int32_t>(offsetof(Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461, ___m23_14)); }
inline float get_m23_14() const { return ___m23_14; }
inline float* get_address_of_m23_14() { return &___m23_14; }
inline void set_m23_14(float value)
{
___m23_14 = value;
}
inline static int32_t get_offset_of_m33_15() { return static_cast<int32_t>(offsetof(Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461, ___m33_15)); }
inline float get_m33_15() const { return ___m33_15; }
inline float* get_address_of_m33_15() { return &___m33_15; }
inline void set_m33_15(float value)
{
___m33_15 = value;
}
};
struct Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461_StaticFields
{
public:
// UnityEngine.Matrix4x4 UnityEngine.Matrix4x4::zeroMatrix
Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461 ___zeroMatrix_16;
// UnityEngine.Matrix4x4 UnityEngine.Matrix4x4::identityMatrix
Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461 ___identityMatrix_17;
public:
inline static int32_t get_offset_of_zeroMatrix_16() { return static_cast<int32_t>(offsetof(Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461_StaticFields, ___zeroMatrix_16)); }
inline Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461 get_zeroMatrix_16() const { return ___zeroMatrix_16; }
inline Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461 * get_address_of_zeroMatrix_16() { return &___zeroMatrix_16; }
inline void set_zeroMatrix_16(Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461 value)
{
___zeroMatrix_16 = value;
}
inline static int32_t get_offset_of_identityMatrix_17() { return static_cast<int32_t>(offsetof(Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461_StaticFields, ___identityMatrix_17)); }
inline Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461 get_identityMatrix_17() const { return ___identityMatrix_17; }
inline Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461 * get_address_of_identityMatrix_17() { return &___identityMatrix_17; }
inline void set_identityMatrix_17(Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461 value)
{
___identityMatrix_17 = value;
}
};
// System.Void
struct Void_t700C6383A2A510C2CF4DD86DABD5CA9FF70ADAC5
{
public:
union
{
struct
{
};
uint8_t Void_t700C6383A2A510C2CF4DD86DABD5CA9FF70ADAC5__padding[1];
};
public:
};
// <PrivateImplementationDetails>/__StaticArrayInitTypeSize=24
struct __StaticArrayInitTypeSizeU3D24_t2F23740D8943FC7C06AD3DD80B71D65744F140F2
{
public:
union
{
struct
{
union
{
};
};
uint8_t __StaticArrayInitTypeSizeU3D24_t2F23740D8943FC7C06AD3DD80B71D65744F140F2__padding[24];
};
public:
};
// RockVR.Video.VideoCapture/FrameData
struct FrameData_tBAF7FFEA70EBD988E28C240481C06DA90C4E1FE4
{
public:
// System.Byte[] RockVR.Video.VideoCapture/FrameData::pixels
ByteU5BU5D_tDBBEB0E8362242FA7223000D978B0DD19D4B0726* ___pixels_0;
// System.Int32 RockVR.Video.VideoCapture/FrameData::count
int32_t ___count_1;
public:
inline static int32_t get_offset_of_pixels_0() { return static_cast<int32_t>(offsetof(FrameData_tBAF7FFEA70EBD988E28C240481C06DA90C4E1FE4, ___pixels_0)); }
inline ByteU5BU5D_tDBBEB0E8362242FA7223000D978B0DD19D4B0726* get_pixels_0() const { return ___pixels_0; }
inline ByteU5BU5D_tDBBEB0E8362242FA7223000D978B0DD19D4B0726** get_address_of_pixels_0() { return &___pixels_0; }
inline void set_pixels_0(ByteU5BU5D_tDBBEB0E8362242FA7223000D978B0DD19D4B0726* value)
{
___pixels_0 = value;
Il2CppCodeGenWriteBarrier((void**)(&___pixels_0), (void*)value);
}
inline static int32_t get_offset_of_count_1() { return static_cast<int32_t>(offsetof(FrameData_tBAF7FFEA70EBD988E28C240481C06DA90C4E1FE4, ___count_1)); }
inline int32_t get_count_1() const { return ___count_1; }
inline int32_t* get_address_of_count_1() { return &___count_1; }
inline void set_count_1(int32_t value)
{
___count_1 = value;
}
};
// Native definition for P/Invoke marshalling of RockVR.Video.VideoCapture/FrameData
struct FrameData_tBAF7FFEA70EBD988E28C240481C06DA90C4E1FE4_marshaled_pinvoke
{
Il2CppSafeArray/*NONE*/* ___pixels_0;
int32_t ___count_1;
};
// Native definition for COM marshalling of RockVR.Video.VideoCapture/FrameData
struct FrameData_tBAF7FFEA70EBD988E28C240481C06DA90C4E1FE4_marshaled_com
{
Il2CppSafeArray/*NONE*/* ___pixels_0;
int32_t ___count_1;
};
// <PrivateImplementationDetails>
struct U3CPrivateImplementationDetailsU3E_t6BC7664D9CD46304D39A7D175BB8FFBE0B9F4528 : public RuntimeObject
{
public:
public:
};
struct U3CPrivateImplementationDetailsU3E_t6BC7664D9CD46304D39A7D175BB8FFBE0B9F4528_StaticFields
{
public:
// <PrivateImplementationDetails>/__StaticArrayInitTypeSize=24 <PrivateImplementationDetails>::CD9A54ED1F18BF97DB08914E280EA7349E11CA2C4885A4D8052552CEBA84208D
__StaticArrayInitTypeSizeU3D24_t2F23740D8943FC7C06AD3DD80B71D65744F140F2 ___CD9A54ED1F18BF97DB08914E280EA7349E11CA2C4885A4D8052552CEBA84208D_0;
public:
inline static int32_t get_offset_of_CD9A54ED1F18BF97DB08914E280EA7349E11CA2C4885A4D8052552CEBA84208D_0() { return static_cast<int32_t>(offsetof(U3CPrivateImplementationDetailsU3E_t6BC7664D9CD46304D39A7D175BB8FFBE0B9F4528_StaticFields, ___CD9A54ED1F18BF97DB08914E280EA7349E11CA2C4885A4D8052552CEBA84208D_0)); }
inline __StaticArrayInitTypeSizeU3D24_t2F23740D8943FC7C06AD3DD80B71D65744F140F2 get_CD9A54ED1F18BF97DB08914E280EA7349E11CA2C4885A4D8052552CEBA84208D_0() const { return ___CD9A54ED1F18BF97DB08914E280EA7349E11CA2C4885A4D8052552CEBA84208D_0; }
inline __StaticArrayInitTypeSizeU3D24_t2F23740D8943FC7C06AD3DD80B71D65744F140F2 * get_address_of_CD9A54ED1F18BF97DB08914E280EA7349E11CA2C4885A4D8052552CEBA84208D_0() { return &___CD9A54ED1F18BF97DB08914E280EA7349E11CA2C4885A4D8052552CEBA84208D_0; }
inline void set_CD9A54ED1F18BF97DB08914E280EA7349E11CA2C4885A4D8052552CEBA84208D_0(__StaticArrayInitTypeSizeU3D24_t2F23740D8943FC7C06AD3DD80B71D65744F140F2 value)
{
___CD9A54ED1F18BF97DB08914E280EA7349E11CA2C4885A4D8052552CEBA84208D_0 = value;
}
};
// System.Delegate
struct Delegate_t : public RuntimeObject
{
public:
// System.IntPtr System.Delegate::method_ptr
Il2CppMethodPointer ___method_ptr_0;
// System.IntPtr System.Delegate::invoke_impl
intptr_t ___invoke_impl_1;
// System.Object System.Delegate::m_target
RuntimeObject * ___m_target_2;
// System.IntPtr System.Delegate::method
intptr_t ___method_3;
// System.IntPtr System.Delegate::delegate_trampoline
intptr_t ___delegate_trampoline_4;
// System.IntPtr System.Delegate::extra_arg
intptr_t ___extra_arg_5;
// System.IntPtr System.Delegate::method_code
intptr_t ___method_code_6;
// System.Reflection.MethodInfo System.Delegate::method_info
MethodInfo_t * ___method_info_7;
// System.Reflection.MethodInfo System.Delegate::original_method_info
MethodInfo_t * ___original_method_info_8;
// System.DelegateData System.Delegate::data
DelegateData_t17DD30660E330C49381DAA99F934BE75CB11F288 * ___data_9;
// System.Boolean System.Delegate::method_is_virtual
bool ___method_is_virtual_10;
public:
inline static int32_t get_offset_of_method_ptr_0() { return static_cast<int32_t>(offsetof(Delegate_t, ___method_ptr_0)); }
inline Il2CppMethodPointer get_method_ptr_0() const { return ___method_ptr_0; }
inline Il2CppMethodPointer* get_address_of_method_ptr_0() { return &___method_ptr_0; }
inline void set_method_ptr_0(Il2CppMethodPointer value)
{
___method_ptr_0 = value;
}
inline static int32_t get_offset_of_invoke_impl_1() { return static_cast<int32_t>(offsetof(Delegate_t, ___invoke_impl_1)); }
inline intptr_t get_invoke_impl_1() const { return ___invoke_impl_1; }
inline intptr_t* get_address_of_invoke_impl_1() { return &___invoke_impl_1; }
inline void set_invoke_impl_1(intptr_t value)
{
___invoke_impl_1 = value;
}
inline static int32_t get_offset_of_m_target_2() { return static_cast<int32_t>(offsetof(Delegate_t, ___m_target_2)); }
inline RuntimeObject * get_m_target_2() const { return ___m_target_2; }
inline RuntimeObject ** get_address_of_m_target_2() { return &___m_target_2; }
inline void set_m_target_2(RuntimeObject * value)
{
___m_target_2 = value;
Il2CppCodeGenWriteBarrier((void**)(&___m_target_2), (void*)value);
}
inline static int32_t get_offset_of_method_3() { return static_cast<int32_t>(offsetof(Delegate_t, ___method_3)); }
inline intptr_t get_method_3() const { return ___method_3; }
inline intptr_t* get_address_of_method_3() { return &___method_3; }
inline void set_method_3(intptr_t value)
{
___method_3 = value;
}
inline static int32_t get_offset_of_delegate_trampoline_4() { return static_cast<int32_t>(offsetof(Delegate_t, ___delegate_trampoline_4)); }
inline intptr_t get_delegate_trampoline_4() const { return ___delegate_trampoline_4; }
inline intptr_t* get_address_of_delegate_trampoline_4() { return &___delegate_trampoline_4; }
inline void set_delegate_trampoline_4(intptr_t value)
{
___delegate_trampoline_4 = value;
}
inline static int32_t get_offset_of_extra_arg_5() { return static_cast<int32_t>(offsetof(Delegate_t, ___extra_arg_5)); }
inline intptr_t get_extra_arg_5() const { return ___extra_arg_5; }
inline intptr_t* get_address_of_extra_arg_5() { return &___extra_arg_5; }
inline void set_extra_arg_5(intptr_t value)
{
___extra_arg_5 = value;
}
inline static int32_t get_offset_of_method_code_6() { return static_cast<int32_t>(offsetof(Delegate_t, ___method_code_6)); }
inline intptr_t get_method_code_6() const { return ___method_code_6; }
inline intptr_t* get_address_of_method_code_6() { return &___method_code_6; }
inline void set_method_code_6(intptr_t value)
{
___method_code_6 = value;
}
inline static int32_t get_offset_of_method_info_7() { return static_cast<int32_t>(offsetof(Delegate_t, ___method_info_7)); }
inline MethodInfo_t * get_method_info_7() const { return ___method_info_7; }
inline MethodInfo_t ** get_address_of_method_info_7() { return &___method_info_7; }
inline void set_method_info_7(MethodInfo_t * value)
{
___method_info_7 = value;
Il2CppCodeGenWriteBarrier((void**)(&___method_info_7), (void*)value);
}
inline static int32_t get_offset_of_original_method_info_8() { return static_cast<int32_t>(offsetof(Delegate_t, ___original_method_info_8)); }
inline MethodInfo_t * get_original_method_info_8() const { return ___original_method_info_8; }
inline MethodInfo_t ** get_address_of_original_method_info_8() { return &___original_method_info_8; }
inline void set_original_method_info_8(MethodInfo_t * value)
{
___original_method_info_8 = value;
Il2CppCodeGenWriteBarrier((void**)(&___original_method_info_8), (void*)value);
}
inline static int32_t get_offset_of_data_9() { return static_cast<int32_t>(offsetof(Delegate_t, ___data_9)); }
inline DelegateData_t17DD30660E330C49381DAA99F934BE75CB11F288 * get_data_9() const { return ___data_9; }
inline DelegateData_t17DD30660E330C49381DAA99F934BE75CB11F288 ** get_address_of_data_9() { return &___data_9; }
inline void set_data_9(DelegateData_t17DD30660E330C49381DAA99F934BE75CB11F288 * value)
{
___data_9 = value;
Il2CppCodeGenWriteBarrier((void**)(&___data_9), (void*)value);
}
inline static int32_t get_offset_of_method_is_virtual_10() { return static_cast<int32_t>(offsetof(Delegate_t, ___method_is_virtual_10)); }
inline bool get_method_is_virtual_10() const { return ___method_is_virtual_10; }
inline bool* get_address_of_method_is_virtual_10() { return &___method_is_virtual_10; }
inline void set_method_is_virtual_10(bool value)
{
___method_is_virtual_10 = value;
}
};
// Native definition for P/Invoke marshalling of System.Delegate
struct Delegate_t_marshaled_pinvoke
{
intptr_t ___method_ptr_0;
intptr_t ___invoke_impl_1;
Il2CppIUnknown* ___m_target_2;
intptr_t ___method_3;
intptr_t ___delegate_trampoline_4;
intptr_t ___extra_arg_5;
intptr_t ___method_code_6;
MethodInfo_t * ___method_info_7;
MethodInfo_t * ___original_method_info_8;
DelegateData_t17DD30660E330C49381DAA99F934BE75CB11F288 * ___data_9;
int32_t ___method_is_virtual_10;
};
// Native definition for COM marshalling of System.Delegate
struct Delegate_t_marshaled_com
{
intptr_t ___method_ptr_0;
intptr_t ___invoke_impl_1;
Il2CppIUnknown* ___m_target_2;
intptr_t ___method_3;
intptr_t ___delegate_trampoline_4;
intptr_t ___extra_arg_5;
intptr_t ___method_code_6;
MethodInfo_t * ___method_info_7;
MethodInfo_t * ___original_method_info_8;
DelegateData_t17DD30660E330C49381DAA99F934BE75CB11F288 * ___data_9;
int32_t ___method_is_virtual_10;
};
// UnityEngine.Object
struct Object_tF2F3778131EFF286AF62B7B013A170F95A91571A : public RuntimeObject
{
public:
// System.IntPtr UnityEngine.Object::m_CachedPtr
intptr_t ___m_CachedPtr_0;
public:
inline static int32_t get_offset_of_m_CachedPtr_0() { return static_cast<int32_t>(offsetof(Object_tF2F3778131EFF286AF62B7B013A170F95A91571A, ___m_CachedPtr_0)); }
inline intptr_t get_m_CachedPtr_0() const { return ___m_CachedPtr_0; }
inline intptr_t* get_address_of_m_CachedPtr_0() { return &___m_CachedPtr_0; }
inline void set_m_CachedPtr_0(intptr_t value)
{
___m_CachedPtr_0 = value;
}
};
struct Object_tF2F3778131EFF286AF62B7B013A170F95A91571A_StaticFields
{
public:
// System.Int32 UnityEngine.Object::OffsetOfInstanceIDInCPlusPlusObject
int32_t ___OffsetOfInstanceIDInCPlusPlusObject_1;
public:
inline static int32_t get_offset_of_OffsetOfInstanceIDInCPlusPlusObject_1() { return static_cast<int32_t>(offsetof(Object_tF2F3778131EFF286AF62B7B013A170F95A91571A_StaticFields, ___OffsetOfInstanceIDInCPlusPlusObject_1)); }
inline int32_t get_OffsetOfInstanceIDInCPlusPlusObject_1() const { return ___OffsetOfInstanceIDInCPlusPlusObject_1; }
inline int32_t* get_address_of_OffsetOfInstanceIDInCPlusPlusObject_1() { return &___OffsetOfInstanceIDInCPlusPlusObject_1; }
inline void set_OffsetOfInstanceIDInCPlusPlusObject_1(int32_t value)
{
___OffsetOfInstanceIDInCPlusPlusObject_1 = value;
}
};
// Native definition for P/Invoke marshalling of UnityEngine.Object
struct Object_tF2F3778131EFF286AF62B7B013A170F95A91571A_marshaled_pinvoke
{
intptr_t ___m_CachedPtr_0;
};
// Native definition for COM marshalling of UnityEngine.Object
struct Object_tF2F3778131EFF286AF62B7B013A170F95A91571A_marshaled_com
{
intptr_t ___m_CachedPtr_0;
};
// RockVR.Common.PlatformType
struct PlatformType_tCFE7573EF622F67BA0192D2FEC2110E741383AEE
{
public:
// System.Int32 RockVR.Common.PlatformType::value__
int32_t ___value___2;
public:
inline static int32_t get_offset_of_value___2() { return static_cast<int32_t>(offsetof(PlatformType_tCFE7573EF622F67BA0192D2FEC2110E741383AEE, ___value___2)); }
inline int32_t get_value___2() const { return ___value___2; }
inline int32_t* get_address_of_value___2() { return &___value___2; }
inline void set_value___2(int32_t value)
{
___value___2 = value;
}
};
// LylekGames.RPGGoblin.EnableMouseLook/InputType
struct InputType_t6A74EB06ABBA29763640ADBAC38137D11C0A4E83
{
public:
// System.Int32 LylekGames.RPGGoblin.EnableMouseLook/InputType::value__
int32_t ___value___2;
public:
inline static int32_t get_offset_of_value___2() { return static_cast<int32_t>(offsetof(InputType_t6A74EB06ABBA29763640ADBAC38137D11C0A4E83, ___value___2)); }
inline int32_t get_value___2() const { return ___value___2; }
inline int32_t* get_address_of_value___2() { return &___value___2; }
inline void set_value___2(int32_t value)
{
___value___2 = value;
}
};
// LylekGames.RPGGoblin.MouseLook/RotationAxes
struct RotationAxes_t160C17846428E420A1BEA024DC3BCF176F9BAF45
{
public:
// System.Int32 LylekGames.RPGGoblin.MouseLook/RotationAxes::value__
int32_t ___value___2;
public:
inline static int32_t get_offset_of_value___2() { return static_cast<int32_t>(offsetof(RotationAxes_t160C17846428E420A1BEA024DC3BCF176F9BAF45, ___value___2)); }
inline int32_t get_value___2() const { return ___value___2; }
inline int32_t* get_address_of_value___2() { return &___value___2; }
inline void set_value___2(int32_t value)
{
___value___2 = value;
}
};
// Photon.Voice.Unity.Recorder/MicType
struct MicType_t25290DF0A5AF1BBC22A27D10C65C62AC81B71DD9
{
public:
// System.Int32 Photon.Voice.Unity.Recorder/MicType::value__
int32_t ___value___2;
public:
inline static int32_t get_offset_of_value___2() { return static_cast<int32_t>(offsetof(MicType_t25290DF0A5AF1BBC22A27D10C65C62AC81B71DD9, ___value___2)); }
inline int32_t get_value___2() const { return ___value___2; }
inline int32_t* get_address_of_value___2() { return &___value___2; }
inline void set_value___2(int32_t value)
{
___value___2 = value;
}
};
// RockVR.Video.VideoCaptureBase/AntiAliasingType
struct AntiAliasingType_t9585D007F94A7825158CD94711B0EA0D2DF968E7
{
public:
// System.Int32 RockVR.Video.VideoCaptureBase/AntiAliasingType::value__
int32_t ___value___2;
public:
inline static int32_t get_offset_of_value___2() { return static_cast<int32_t>(offsetof(AntiAliasingType_t9585D007F94A7825158CD94711B0EA0D2DF968E7, ___value___2)); }
inline int32_t get_value___2() const { return ___value___2; }
inline int32_t* get_address_of_value___2() { return &___value___2; }
inline void set_value___2(int32_t value)
{
___value___2 = value;
}
};
// RockVR.Video.VideoCaptureBase/CubemapSizeType
struct CubemapSizeType_t1BEAF3986B85123F0367EECF74C0F29B129A9FE3
{
public:
// System.Int32 RockVR.Video.VideoCaptureBase/CubemapSizeType::value__
int32_t ___value___2;
public:
inline static int32_t get_offset_of_value___2() { return static_cast<int32_t>(offsetof(CubemapSizeType_t1BEAF3986B85123F0367EECF74C0F29B129A9FE3, ___value___2)); }
inline int32_t get_value___2() const { return ___value___2; }
inline int32_t* get_address_of_value___2() { return &___value___2; }
inline void set_value___2(int32_t value)
{
___value___2 = value;
}
};
// RockVR.Video.VideoCaptureBase/EncodeQualityType
struct EncodeQualityType_t99A80555020D4522474B5612A040EF116670C18C
{
public:
// System.Int32 RockVR.Video.VideoCaptureBase/EncodeQualityType::value__
int32_t ___value___2;
public:
inline static int32_t get_offset_of_value___2() { return static_cast<int32_t>(offsetof(EncodeQualityType_t99A80555020D4522474B5612A040EF116670C18C, ___value___2)); }
inline int32_t get_value___2() const { return ___value___2; }
inline int32_t* get_address_of_value___2() { return &___value___2; }
inline void set_value___2(int32_t value)
{
___value___2 = value;
}
};
// RockVR.Video.VideoCaptureBase/FormatType
struct FormatType_t5146714865CD8A744D0E48ADFB21C8B0E672E71E
{
public:
// System.Int32 RockVR.Video.VideoCaptureBase/FormatType::value__
int32_t ___value___2;
public:
inline static int32_t get_offset_of_value___2() { return static_cast<int32_t>(offsetof(FormatType_t5146714865CD8A744D0E48ADFB21C8B0E672E71E, ___value___2)); }
inline int32_t get_value___2() const { return ___value___2; }
inline int32_t* get_address_of_value___2() { return &___value___2; }
inline void set_value___2(int32_t value)
{
___value___2 = value;
}
};
// RockVR.Video.VideoCaptureBase/FrameSizeType
struct FrameSizeType_tDCE15CC8D96EC55F0CDCF884134DD6DA7F1032B2
{
public:
// System.Int32 RockVR.Video.VideoCaptureBase/FrameSizeType::value__
int32_t ___value___2;
public:
inline static int32_t get_offset_of_value___2() { return static_cast<int32_t>(offsetof(FrameSizeType_tDCE15CC8D96EC55F0CDCF884134DD6DA7F1032B2, ___value___2)); }
inline int32_t get_value___2() const { return ___value___2; }
inline int32_t* get_address_of_value___2() { return &___value___2; }
inline void set_value___2(int32_t value)
{
___value___2 = value;
}
};
// RockVR.Video.VideoCaptureBase/ModeType
struct ModeType_tB12C34FF8E101F43B612F86E972D44D7E5A9C57C
{
public:
// System.Int32 RockVR.Video.VideoCaptureBase/ModeType::value__
int32_t ___value___2;
public:
inline static int32_t get_offset_of_value___2() { return static_cast<int32_t>(offsetof(ModeType_tB12C34FF8E101F43B612F86E972D44D7E5A9C57C, ___value___2)); }
inline int32_t get_value___2() const { return ___value___2; }
inline int32_t* get_address_of_value___2() { return &___value___2; }
inline void set_value___2(int32_t value)
{
___value___2 = value;
}
};
// RockVR.Video.VideoCaptureBase/PanoramaProjectionType
struct PanoramaProjectionType_t4C5251BE54E854B3B8420D92325AA2EE569C0955
{
public:
// System.Int32 RockVR.Video.VideoCaptureBase/PanoramaProjectionType::value__
int32_t ___value___2;
public:
inline static int32_t get_offset_of_value___2() { return static_cast<int32_t>(offsetof(PanoramaProjectionType_t4C5251BE54E854B3B8420D92325AA2EE569C0955, ___value___2)); }
inline int32_t get_value___2() const { return ___value___2; }
inline int32_t* get_address_of_value___2() { return &___value___2; }
inline void set_value___2(int32_t value)
{
___value___2 = value;
}
};
// RockVR.Video.VideoCaptureBase/StereoFormatType
struct StereoFormatType_t03F79A41E5E55051234CB0B28BC5BE95C2A2720E
{
public:
// System.Int32 RockVR.Video.VideoCaptureBase/StereoFormatType::value__
int32_t ___value___2;
public:
inline static int32_t get_offset_of_value___2() { return static_cast<int32_t>(offsetof(StereoFormatType_t03F79A41E5E55051234CB0B28BC5BE95C2A2720E, ___value___2)); }
inline int32_t get_value___2() const { return ___value___2; }
inline int32_t* get_address_of_value___2() { return &___value___2; }
inline void set_value___2(int32_t value)
{
___value___2 = value;
}
};
// RockVR.Video.VideoCaptureBase/StereoType
struct StereoType_t52A5ED4AB47A17794922C7A01210256589B531BA
{
public:
// System.Int32 RockVR.Video.VideoCaptureBase/StereoType::value__
int32_t ___value___2;
public:
inline static int32_t get_offset_of_value___2() { return static_cast<int32_t>(offsetof(StereoType_t52A5ED4AB47A17794922C7A01210256589B531BA, ___value___2)); }
inline int32_t get_value___2() const { return ___value___2; }
inline int32_t* get_address_of_value___2() { return &___value___2; }
inline void set_value___2(int32_t value)
{
___value___2 = value;
}
};
// RockVR.Video.VideoCaptureBase/TargetFramerateType
struct TargetFramerateType_t9F547BDB989E136FFECAE00E2F3A4D085F1AA84A
{
public:
// System.Int32 RockVR.Video.VideoCaptureBase/TargetFramerateType::value__
int32_t ___value___2;
public:
inline static int32_t get_offset_of_value___2() { return static_cast<int32_t>(offsetof(TargetFramerateType_t9F547BDB989E136FFECAE00E2F3A4D085F1AA84A, ___value___2)); }
inline int32_t get_value___2() const { return ___value___2; }
inline int32_t* get_address_of_value___2() { return &___value___2; }
inline void set_value___2(int32_t value)
{
___value___2 = value;
}
};
// RockVR.Video.VideoCaptureCtrlBase/ErrorCodeType
struct ErrorCodeType_t15B8A59B00A4723B91B9856AEFBA575D83A260E4
{
public:
// System.Int32 RockVR.Video.VideoCaptureCtrlBase/ErrorCodeType::value__
int32_t ___value___2;
public:
inline static int32_t get_offset_of_value___2() { return static_cast<int32_t>(offsetof(ErrorCodeType_t15B8A59B00A4723B91B9856AEFBA575D83A260E4, ___value___2)); }
inline int32_t get_value___2() const { return ___value___2; }
inline int32_t* get_address_of_value___2() { return &___value___2; }
inline void set_value___2(int32_t value)
{
___value___2 = value;
}
};
// RockVR.Video.VideoCaptureCtrlBase/StatusType
struct StatusType_t43810E5CF4B0DF784CD154F58165F9C460E49582
{
public:
// System.Int32 RockVR.Video.VideoCaptureCtrlBase/StatusType::value__
int32_t ___value___2;
public:
inline static int32_t get_offset_of_value___2() { return static_cast<int32_t>(offsetof(StatusType_t43810E5CF4B0DF784CD154F58165F9C460E49582, ___value___2)); }
inline int32_t get_value___2() const { return ___value___2; }
inline int32_t* get_address_of_value___2() { return &___value___2; }
inline void set_value___2(int32_t value)
{
___value___2 = value;
}
};
// UnityEngine.Component
struct Component_t62FBC8D2420DA4BE9037AFE430740F6B3EECA684 : public Object_tF2F3778131EFF286AF62B7B013A170F95A91571A
{
public:
public:
};
// Photon.Voice.Unity.Demos.DemoVoiceUI.MicRef
struct MicRef_tA64BD32143F5FA1D26559FB61CF16F09B3A94C94
{
public:
// Photon.Voice.Unity.Recorder/MicType Photon.Voice.Unity.Demos.DemoVoiceUI.MicRef::MicType
int32_t ___MicType_0;
// System.String Photon.Voice.Unity.Demos.DemoVoiceUI.MicRef::Name
String_t* ___Name_1;
// System.Int32 Photon.Voice.Unity.Demos.DemoVoiceUI.MicRef::PhotonId
int32_t ___PhotonId_2;
// System.String Photon.Voice.Unity.Demos.DemoVoiceUI.MicRef::PhotonIdString
String_t* ___PhotonIdString_3;
public:
inline static int32_t get_offset_of_MicType_0() { return static_cast<int32_t>(offsetof(MicRef_tA64BD32143F5FA1D26559FB61CF16F09B3A94C94, ___MicType_0)); }
inline int32_t get_MicType_0() const { return ___MicType_0; }
inline int32_t* get_address_of_MicType_0() { return &___MicType_0; }
inline void set_MicType_0(int32_t value)
{
___MicType_0 = value;
}
inline static int32_t get_offset_of_Name_1() { return static_cast<int32_t>(offsetof(MicRef_tA64BD32143F5FA1D26559FB61CF16F09B3A94C94, ___Name_1)); }
inline String_t* get_Name_1() const { return ___Name_1; }
inline String_t** get_address_of_Name_1() { return &___Name_1; }
inline void set_Name_1(String_t* value)
{
___Name_1 = value;
Il2CppCodeGenWriteBarrier((void**)(&___Name_1), (void*)value);
}
inline static int32_t get_offset_of_PhotonId_2() { return static_cast<int32_t>(offsetof(MicRef_tA64BD32143F5FA1D26559FB61CF16F09B3A94C94, ___PhotonId_2)); }
inline int32_t get_PhotonId_2() const { return ___PhotonId_2; }
inline int32_t* get_address_of_PhotonId_2() { return &___PhotonId_2; }
inline void set_PhotonId_2(int32_t value)
{
___PhotonId_2 = value;
}
inline static int32_t get_offset_of_PhotonIdString_3() { return static_cast<int32_t>(offsetof(MicRef_tA64BD32143F5FA1D26559FB61CF16F09B3A94C94, ___PhotonIdString_3)); }
inline String_t* get_PhotonIdString_3() const { return ___PhotonIdString_3; }
inline String_t** get_address_of_PhotonIdString_3() { return &___PhotonIdString_3; }
inline void set_PhotonIdString_3(String_t* value)
{
___PhotonIdString_3 = value;
Il2CppCodeGenWriteBarrier((void**)(&___PhotonIdString_3), (void*)value);
}
};
// Native definition for P/Invoke marshalling of Photon.Voice.Unity.Demos.DemoVoiceUI.MicRef
struct MicRef_tA64BD32143F5FA1D26559FB61CF16F09B3A94C94_marshaled_pinvoke
{
int32_t ___MicType_0;
char* ___Name_1;
int32_t ___PhotonId_2;
char* ___PhotonIdString_3;
};
// Native definition for COM marshalling of Photon.Voice.Unity.Demos.DemoVoiceUI.MicRef
struct MicRef_tA64BD32143F5FA1D26559FB61CF16F09B3A94C94_marshaled_com
{
int32_t ___MicType_0;
Il2CppChar* ___Name_1;
int32_t ___PhotonId_2;
Il2CppChar* ___PhotonIdString_3;
};
// System.MulticastDelegate
struct MulticastDelegate_t : public Delegate_t
{
public:
// System.Delegate[] System.MulticastDelegate::delegates
DelegateU5BU5D_t677D8FE08A5F99E8EE49150B73966CD6E9BF7DB8* ___delegates_11;
public:
inline static int32_t get_offset_of_delegates_11() { return static_cast<int32_t>(offsetof(MulticastDelegate_t, ___delegates_11)); }
inline DelegateU5BU5D_t677D8FE08A5F99E8EE49150B73966CD6E9BF7DB8* get_delegates_11() const { return ___delegates_11; }
inline DelegateU5BU5D_t677D8FE08A5F99E8EE49150B73966CD6E9BF7DB8** get_address_of_delegates_11() { return &___delegates_11; }
inline void set_delegates_11(DelegateU5BU5D_t677D8FE08A5F99E8EE49150B73966CD6E9BF7DB8* value)
{
___delegates_11 = value;
Il2CppCodeGenWriteBarrier((void**)(&___delegates_11), (void*)value);
}
};
// Native definition for P/Invoke marshalling of System.MulticastDelegate
struct MulticastDelegate_t_marshaled_pinvoke : public Delegate_t_marshaled_pinvoke
{
Delegate_t_marshaled_pinvoke** ___delegates_11;
};
// Native definition for COM marshalling of System.MulticastDelegate
struct MulticastDelegate_t_marshaled_com : public Delegate_t_marshaled_com
{
Delegate_t_marshaled_com** ___delegates_11;
};
// RockVR.Common.Platform
struct Platform_t1A0D4D265C56D6A23288AFBCE8B143AAC4DAF814 : public RuntimeObject
{
public:
public:
};
// UnityEngine.Behaviour
struct Behaviour_t1A3DDDCF73B4627928FBFE02ED52B7251777DBD9 : public Component_t62FBC8D2420DA4BE9037AFE430740F6B3EECA684
{
public:
public:
};
// RockVR.Common.EventDelegate/CompleteDelegate
struct CompleteDelegate_t45443F293492379B818DB003E631998EBE0EA0D4 : public MulticastDelegate_t
{
public:
public:
};
// RockVR.Common.EventDelegate/ErrorDelegate
struct ErrorDelegate_t9C20F46B8E1CCFF35DDF3B0717119F0D3507522C : public MulticastDelegate_t
{
public:
public:
};
// UnityEngine.MonoBehaviour
struct MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A : public Behaviour_t1A3DDDCF73B4627928FBFE02ED52B7251777DBD9
{
public:
public:
};
// RockVR.Common.Singleton`1<RockVR.Video.VideoCaptureCtrlBase>
struct Singleton_1_t326812034F9AD281C5527A5119A3DF775F3B68D4 : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
public:
};
struct Singleton_1_t326812034F9AD281C5527A5119A3DF775F3B68D4_StaticFields
{
public:
// T RockVR.Common.Singleton`1::_instance
VideoCaptureCtrlBase_tC8EFDDF51B9F6BF21AAD8BD587A95A6E12F8B4C9 * ____instance_4;
// System.Object RockVR.Common.Singleton`1::_lock
RuntimeObject * ____lock_5;
// System.Boolean RockVR.Common.Singleton`1::applicationIsQuitting
bool ___applicationIsQuitting_6;
public:
inline static int32_t get_offset_of__instance_4() { return static_cast<int32_t>(offsetof(Singleton_1_t326812034F9AD281C5527A5119A3DF775F3B68D4_StaticFields, ____instance_4)); }
inline VideoCaptureCtrlBase_tC8EFDDF51B9F6BF21AAD8BD587A95A6E12F8B4C9 * get__instance_4() const { return ____instance_4; }
inline VideoCaptureCtrlBase_tC8EFDDF51B9F6BF21AAD8BD587A95A6E12F8B4C9 ** get_address_of__instance_4() { return &____instance_4; }
inline void set__instance_4(VideoCaptureCtrlBase_tC8EFDDF51B9F6BF21AAD8BD587A95A6E12F8B4C9 * value)
{
____instance_4 = value;
Il2CppCodeGenWriteBarrier((void**)(&____instance_4), (void*)value);
}
inline static int32_t get_offset_of__lock_5() { return static_cast<int32_t>(offsetof(Singleton_1_t326812034F9AD281C5527A5119A3DF775F3B68D4_StaticFields, ____lock_5)); }
inline RuntimeObject * get__lock_5() const { return ____lock_5; }
inline RuntimeObject ** get_address_of__lock_5() { return &____lock_5; }
inline void set__lock_5(RuntimeObject * value)
{
____lock_5 = value;
Il2CppCodeGenWriteBarrier((void**)(&____lock_5), (void*)value);
}
inline static int32_t get_offset_of_applicationIsQuitting_6() { return static_cast<int32_t>(offsetof(Singleton_1_t326812034F9AD281C5527A5119A3DF775F3B68D4_StaticFields, ___applicationIsQuitting_6)); }
inline bool get_applicationIsQuitting_6() const { return ___applicationIsQuitting_6; }
inline bool* get_address_of_applicationIsQuitting_6() { return &___applicationIsQuitting_6; }
inline void set_applicationIsQuitting_6(bool value)
{
___applicationIsQuitting_6 = value;
}
};
// RockVR.Video.AudioCapture
struct AudioCapture_tAF87A71022D987367D112D48136C96AD0E2196A8 : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
// RockVR.Video.VideoCaptureCtrlBase/StatusType RockVR.Video.AudioCapture::<status>k__BackingField
int32_t ___U3CstatusU3Ek__BackingField_4;
// System.String RockVR.Video.AudioCapture::<filePath>k__BackingField
String_t* ___U3CfilePathU3Ek__BackingField_5;
// RockVR.Common.EventDelegate RockVR.Video.AudioCapture::eventDelegate
EventDelegate_t25FC2A0C7474DD9C0674EB6B0BCE63D0FDFA7B54 * ___eventDelegate_6;
// System.IntPtr RockVR.Video.AudioCapture::libAPI
intptr_t ___libAPI_7;
// System.IntPtr RockVR.Video.AudioCapture::audioPointer
intptr_t ___audioPointer_8;
// System.Byte[] RockVR.Video.AudioCapture::audioByteBuffer
ByteU5BU5D_tDBBEB0E8362242FA7223000D978B0DD19D4B0726* ___audioByteBuffer_9;
public:
inline static int32_t get_offset_of_U3CstatusU3Ek__BackingField_4() { return static_cast<int32_t>(offsetof(AudioCapture_tAF87A71022D987367D112D48136C96AD0E2196A8, ___U3CstatusU3Ek__BackingField_4)); }
inline int32_t get_U3CstatusU3Ek__BackingField_4() const { return ___U3CstatusU3Ek__BackingField_4; }
inline int32_t* get_address_of_U3CstatusU3Ek__BackingField_4() { return &___U3CstatusU3Ek__BackingField_4; }
inline void set_U3CstatusU3Ek__BackingField_4(int32_t value)
{
___U3CstatusU3Ek__BackingField_4 = value;
}
inline static int32_t get_offset_of_U3CfilePathU3Ek__BackingField_5() { return static_cast<int32_t>(offsetof(AudioCapture_tAF87A71022D987367D112D48136C96AD0E2196A8, ___U3CfilePathU3Ek__BackingField_5)); }
inline String_t* get_U3CfilePathU3Ek__BackingField_5() const { return ___U3CfilePathU3Ek__BackingField_5; }
inline String_t** get_address_of_U3CfilePathU3Ek__BackingField_5() { return &___U3CfilePathU3Ek__BackingField_5; }
inline void set_U3CfilePathU3Ek__BackingField_5(String_t* value)
{
___U3CfilePathU3Ek__BackingField_5 = value;
Il2CppCodeGenWriteBarrier((void**)(&___U3CfilePathU3Ek__BackingField_5), (void*)value);
}
inline static int32_t get_offset_of_eventDelegate_6() { return static_cast<int32_t>(offsetof(AudioCapture_tAF87A71022D987367D112D48136C96AD0E2196A8, ___eventDelegate_6)); }
inline EventDelegate_t25FC2A0C7474DD9C0674EB6B0BCE63D0FDFA7B54 * get_eventDelegate_6() const { return ___eventDelegate_6; }
inline EventDelegate_t25FC2A0C7474DD9C0674EB6B0BCE63D0FDFA7B54 ** get_address_of_eventDelegate_6() { return &___eventDelegate_6; }
inline void set_eventDelegate_6(EventDelegate_t25FC2A0C7474DD9C0674EB6B0BCE63D0FDFA7B54 * value)
{
___eventDelegate_6 = value;
Il2CppCodeGenWriteBarrier((void**)(&___eventDelegate_6), (void*)value);
}
inline static int32_t get_offset_of_libAPI_7() { return static_cast<int32_t>(offsetof(AudioCapture_tAF87A71022D987367D112D48136C96AD0E2196A8, ___libAPI_7)); }
inline intptr_t get_libAPI_7() const { return ___libAPI_7; }
inline intptr_t* get_address_of_libAPI_7() { return &___libAPI_7; }
inline void set_libAPI_7(intptr_t value)
{
___libAPI_7 = value;
}
inline static int32_t get_offset_of_audioPointer_8() { return static_cast<int32_t>(offsetof(AudioCapture_tAF87A71022D987367D112D48136C96AD0E2196A8, ___audioPointer_8)); }
inline intptr_t get_audioPointer_8() const { return ___audioPointer_8; }
inline intptr_t* get_address_of_audioPointer_8() { return &___audioPointer_8; }
inline void set_audioPointer_8(intptr_t value)
{
___audioPointer_8 = value;
}
inline static int32_t get_offset_of_audioByteBuffer_9() { return static_cast<int32_t>(offsetof(AudioCapture_tAF87A71022D987367D112D48136C96AD0E2196A8, ___audioByteBuffer_9)); }
inline ByteU5BU5D_tDBBEB0E8362242FA7223000D978B0DD19D4B0726* get_audioByteBuffer_9() const { return ___audioByteBuffer_9; }
inline ByteU5BU5D_tDBBEB0E8362242FA7223000D978B0DD19D4B0726** get_address_of_audioByteBuffer_9() { return &___audioByteBuffer_9; }
inline void set_audioByteBuffer_9(ByteU5BU5D_tDBBEB0E8362242FA7223000D978B0DD19D4B0726* value)
{
___audioByteBuffer_9 = value;
Il2CppCodeGenWriteBarrier((void**)(&___audioByteBuffer_9), (void*)value);
}
};
// RockVR.Video.Demo.AutoRotate
struct AutoRotate_t8F887A59B7947408E6372250D0B2A1A6B5567C96 : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
public:
};
// Photon.Chat.Demo.ChannelSelector
struct ChannelSelector_t47871E6B0E0C5E7304CC0AA4E9C98DDCC883D96E : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
// System.String Photon.Chat.Demo.ChannelSelector::Channel
String_t* ___Channel_4;
public:
inline static int32_t get_offset_of_Channel_4() { return static_cast<int32_t>(offsetof(ChannelSelector_t47871E6B0E0C5E7304CC0AA4E9C98DDCC883D96E, ___Channel_4)); }
inline String_t* get_Channel_4() const { return ___Channel_4; }
inline String_t** get_address_of_Channel_4() { return &___Channel_4; }
inline void set_Channel_4(String_t* value)
{
___Channel_4 = value;
Il2CppCodeGenWriteBarrier((void**)(&___Channel_4), (void*)value);
}
};
// Photon.Chat.Demo.ChatAppIdCheckerUI
struct ChatAppIdCheckerUI_t679D1BB36AB9116707587AFBDDDA0C9BFC99B182 : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
// UnityEngine.UI.Text Photon.Chat.Demo.ChatAppIdCheckerUI::Description
Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * ___Description_4;
public:
inline static int32_t get_offset_of_Description_4() { return static_cast<int32_t>(offsetof(ChatAppIdCheckerUI_t679D1BB36AB9116707587AFBDDDA0C9BFC99B182, ___Description_4)); }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * get_Description_4() const { return ___Description_4; }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 ** get_address_of_Description_4() { return &___Description_4; }
inline void set_Description_4(Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * value)
{
___Description_4 = value;
Il2CppCodeGenWriteBarrier((void**)(&___Description_4), (void*)value);
}
};
// Photon.Chat.Demo.ChatGui
struct ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
// System.String[] Photon.Chat.Demo.ChatGui::ChannelsToJoinOnConnect
StringU5BU5D_tACEBFEDE350025B554CD507C9AE8FFE49359549A* ___ChannelsToJoinOnConnect_4;
// System.String[] Photon.Chat.Demo.ChatGui::FriendsList
StringU5BU5D_tACEBFEDE350025B554CD507C9AE8FFE49359549A* ___FriendsList_5;
// System.Int32 Photon.Chat.Demo.ChatGui::HistoryLengthToFetch
int32_t ___HistoryLengthToFetch_6;
// System.String Photon.Chat.Demo.ChatGui::<UserName>k__BackingField
String_t* ___U3CUserNameU3Ek__BackingField_7;
// System.String Photon.Chat.Demo.ChatGui::selectedChannelName
String_t* ___selectedChannelName_8;
// Photon.Chat.ChatClient Photon.Chat.Demo.ChatGui::chatClient
ChatClient_t5E3E39382FC13DF25585EFE46374B01B3BC377DF * ___chatClient_9;
// Photon.Chat.ChatAppSettings Photon.Chat.Demo.ChatGui::chatAppSettings
ChatAppSettings_tC09074621647F01C35C1373F86530BBC8D9DF2C8 * ___chatAppSettings_10;
// UnityEngine.GameObject Photon.Chat.Demo.ChatGui::missingAppIdErrorPanel
GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * ___missingAppIdErrorPanel_11;
// UnityEngine.GameObject Photon.Chat.Demo.ChatGui::ConnectingLabel
GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * ___ConnectingLabel_12;
// UnityEngine.RectTransform Photon.Chat.Demo.ChatGui::ChatPanel
RectTransform_t8A6A306FB29A6C8C22010CF9040E319753571072 * ___ChatPanel_13;
// UnityEngine.GameObject Photon.Chat.Demo.ChatGui::UserIdFormPanel
GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * ___UserIdFormPanel_14;
// UnityEngine.UI.InputField Photon.Chat.Demo.ChatGui::InputFieldChat
InputField_tB41A2814F31A3E9373D443EDEBBB2856006324D0 * ___InputFieldChat_15;
// UnityEngine.UI.Text Photon.Chat.Demo.ChatGui::CurrentChannelText
Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * ___CurrentChannelText_16;
// UnityEngine.UI.Toggle Photon.Chat.Demo.ChatGui::ChannelToggleToInstantiate
Toggle_t68F5A84CDD2BBAEA866F42EB4E0C9F2B431D612E * ___ChannelToggleToInstantiate_17;
// UnityEngine.GameObject Photon.Chat.Demo.ChatGui::FriendListUiItemtoInstantiate
GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * ___FriendListUiItemtoInstantiate_18;
// System.Collections.Generic.Dictionary`2<System.String,UnityEngine.UI.Toggle> Photon.Chat.Demo.ChatGui::channelToggles
Dictionary_2_t832D46F27B23EC7C03258EEC3ACC466A27782A0E * ___channelToggles_19;
// System.Collections.Generic.Dictionary`2<System.String,Photon.Chat.Demo.FriendItem> Photon.Chat.Demo.ChatGui::friendListItemLUT
Dictionary_2_t57A8BB163A6245E3061BCE534FCD51622968502F * ___friendListItemLUT_20;
// System.Boolean Photon.Chat.Demo.ChatGui::ShowState
bool ___ShowState_21;
// UnityEngine.GameObject Photon.Chat.Demo.ChatGui::Title
GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * ___Title_22;
// UnityEngine.UI.Text Photon.Chat.Demo.ChatGui::StateText
Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * ___StateText_23;
// UnityEngine.UI.Text Photon.Chat.Demo.ChatGui::UserIdText
Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * ___UserIdText_24;
// System.Int32 Photon.Chat.Demo.ChatGui::TestLength
int32_t ___TestLength_26;
// System.Byte[] Photon.Chat.Demo.ChatGui::testBytes
ByteU5BU5D_tDBBEB0E8362242FA7223000D978B0DD19D4B0726* ___testBytes_27;
public:
inline static int32_t get_offset_of_ChannelsToJoinOnConnect_4() { return static_cast<int32_t>(offsetof(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F, ___ChannelsToJoinOnConnect_4)); }
inline StringU5BU5D_tACEBFEDE350025B554CD507C9AE8FFE49359549A* get_ChannelsToJoinOnConnect_4() const { return ___ChannelsToJoinOnConnect_4; }
inline StringU5BU5D_tACEBFEDE350025B554CD507C9AE8FFE49359549A** get_address_of_ChannelsToJoinOnConnect_4() { return &___ChannelsToJoinOnConnect_4; }
inline void set_ChannelsToJoinOnConnect_4(StringU5BU5D_tACEBFEDE350025B554CD507C9AE8FFE49359549A* value)
{
___ChannelsToJoinOnConnect_4 = value;
Il2CppCodeGenWriteBarrier((void**)(&___ChannelsToJoinOnConnect_4), (void*)value);
}
inline static int32_t get_offset_of_FriendsList_5() { return static_cast<int32_t>(offsetof(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F, ___FriendsList_5)); }
inline StringU5BU5D_tACEBFEDE350025B554CD507C9AE8FFE49359549A* get_FriendsList_5() const { return ___FriendsList_5; }
inline StringU5BU5D_tACEBFEDE350025B554CD507C9AE8FFE49359549A** get_address_of_FriendsList_5() { return &___FriendsList_5; }
inline void set_FriendsList_5(StringU5BU5D_tACEBFEDE350025B554CD507C9AE8FFE49359549A* value)
{
___FriendsList_5 = value;
Il2CppCodeGenWriteBarrier((void**)(&___FriendsList_5), (void*)value);
}
inline static int32_t get_offset_of_HistoryLengthToFetch_6() { return static_cast<int32_t>(offsetof(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F, ___HistoryLengthToFetch_6)); }
inline int32_t get_HistoryLengthToFetch_6() const { return ___HistoryLengthToFetch_6; }
inline int32_t* get_address_of_HistoryLengthToFetch_6() { return &___HistoryLengthToFetch_6; }
inline void set_HistoryLengthToFetch_6(int32_t value)
{
___HistoryLengthToFetch_6 = value;
}
inline static int32_t get_offset_of_U3CUserNameU3Ek__BackingField_7() { return static_cast<int32_t>(offsetof(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F, ___U3CUserNameU3Ek__BackingField_7)); }
inline String_t* get_U3CUserNameU3Ek__BackingField_7() const { return ___U3CUserNameU3Ek__BackingField_7; }
inline String_t** get_address_of_U3CUserNameU3Ek__BackingField_7() { return &___U3CUserNameU3Ek__BackingField_7; }
inline void set_U3CUserNameU3Ek__BackingField_7(String_t* value)
{
___U3CUserNameU3Ek__BackingField_7 = value;
Il2CppCodeGenWriteBarrier((void**)(&___U3CUserNameU3Ek__BackingField_7), (void*)value);
}
inline static int32_t get_offset_of_selectedChannelName_8() { return static_cast<int32_t>(offsetof(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F, ___selectedChannelName_8)); }
inline String_t* get_selectedChannelName_8() const { return ___selectedChannelName_8; }
inline String_t** get_address_of_selectedChannelName_8() { return &___selectedChannelName_8; }
inline void set_selectedChannelName_8(String_t* value)
{
___selectedChannelName_8 = value;
Il2CppCodeGenWriteBarrier((void**)(&___selectedChannelName_8), (void*)value);
}
inline static int32_t get_offset_of_chatClient_9() { return static_cast<int32_t>(offsetof(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F, ___chatClient_9)); }
inline ChatClient_t5E3E39382FC13DF25585EFE46374B01B3BC377DF * get_chatClient_9() const { return ___chatClient_9; }
inline ChatClient_t5E3E39382FC13DF25585EFE46374B01B3BC377DF ** get_address_of_chatClient_9() { return &___chatClient_9; }
inline void set_chatClient_9(ChatClient_t5E3E39382FC13DF25585EFE46374B01B3BC377DF * value)
{
___chatClient_9 = value;
Il2CppCodeGenWriteBarrier((void**)(&___chatClient_9), (void*)value);
}
inline static int32_t get_offset_of_chatAppSettings_10() { return static_cast<int32_t>(offsetof(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F, ___chatAppSettings_10)); }
inline ChatAppSettings_tC09074621647F01C35C1373F86530BBC8D9DF2C8 * get_chatAppSettings_10() const { return ___chatAppSettings_10; }
inline ChatAppSettings_tC09074621647F01C35C1373F86530BBC8D9DF2C8 ** get_address_of_chatAppSettings_10() { return &___chatAppSettings_10; }
inline void set_chatAppSettings_10(ChatAppSettings_tC09074621647F01C35C1373F86530BBC8D9DF2C8 * value)
{
___chatAppSettings_10 = value;
Il2CppCodeGenWriteBarrier((void**)(&___chatAppSettings_10), (void*)value);
}
inline static int32_t get_offset_of_missingAppIdErrorPanel_11() { return static_cast<int32_t>(offsetof(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F, ___missingAppIdErrorPanel_11)); }
inline GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * get_missingAppIdErrorPanel_11() const { return ___missingAppIdErrorPanel_11; }
inline GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 ** get_address_of_missingAppIdErrorPanel_11() { return &___missingAppIdErrorPanel_11; }
inline void set_missingAppIdErrorPanel_11(GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * value)
{
___missingAppIdErrorPanel_11 = value;
Il2CppCodeGenWriteBarrier((void**)(&___missingAppIdErrorPanel_11), (void*)value);
}
inline static int32_t get_offset_of_ConnectingLabel_12() { return static_cast<int32_t>(offsetof(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F, ___ConnectingLabel_12)); }
inline GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * get_ConnectingLabel_12() const { return ___ConnectingLabel_12; }
inline GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 ** get_address_of_ConnectingLabel_12() { return &___ConnectingLabel_12; }
inline void set_ConnectingLabel_12(GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * value)
{
___ConnectingLabel_12 = value;
Il2CppCodeGenWriteBarrier((void**)(&___ConnectingLabel_12), (void*)value);
}
inline static int32_t get_offset_of_ChatPanel_13() { return static_cast<int32_t>(offsetof(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F, ___ChatPanel_13)); }
inline RectTransform_t8A6A306FB29A6C8C22010CF9040E319753571072 * get_ChatPanel_13() const { return ___ChatPanel_13; }
inline RectTransform_t8A6A306FB29A6C8C22010CF9040E319753571072 ** get_address_of_ChatPanel_13() { return &___ChatPanel_13; }
inline void set_ChatPanel_13(RectTransform_t8A6A306FB29A6C8C22010CF9040E319753571072 * value)
{
___ChatPanel_13 = value;
Il2CppCodeGenWriteBarrier((void**)(&___ChatPanel_13), (void*)value);
}
inline static int32_t get_offset_of_UserIdFormPanel_14() { return static_cast<int32_t>(offsetof(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F, ___UserIdFormPanel_14)); }
inline GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * get_UserIdFormPanel_14() const { return ___UserIdFormPanel_14; }
inline GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 ** get_address_of_UserIdFormPanel_14() { return &___UserIdFormPanel_14; }
inline void set_UserIdFormPanel_14(GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * value)
{
___UserIdFormPanel_14 = value;
Il2CppCodeGenWriteBarrier((void**)(&___UserIdFormPanel_14), (void*)value);
}
inline static int32_t get_offset_of_InputFieldChat_15() { return static_cast<int32_t>(offsetof(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F, ___InputFieldChat_15)); }
inline InputField_tB41A2814F31A3E9373D443EDEBBB2856006324D0 * get_InputFieldChat_15() const { return ___InputFieldChat_15; }
inline InputField_tB41A2814F31A3E9373D443EDEBBB2856006324D0 ** get_address_of_InputFieldChat_15() { return &___InputFieldChat_15; }
inline void set_InputFieldChat_15(InputField_tB41A2814F31A3E9373D443EDEBBB2856006324D0 * value)
{
___InputFieldChat_15 = value;
Il2CppCodeGenWriteBarrier((void**)(&___InputFieldChat_15), (void*)value);
}
inline static int32_t get_offset_of_CurrentChannelText_16() { return static_cast<int32_t>(offsetof(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F, ___CurrentChannelText_16)); }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * get_CurrentChannelText_16() const { return ___CurrentChannelText_16; }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 ** get_address_of_CurrentChannelText_16() { return &___CurrentChannelText_16; }
inline void set_CurrentChannelText_16(Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * value)
{
___CurrentChannelText_16 = value;
Il2CppCodeGenWriteBarrier((void**)(&___CurrentChannelText_16), (void*)value);
}
inline static int32_t get_offset_of_ChannelToggleToInstantiate_17() { return static_cast<int32_t>(offsetof(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F, ___ChannelToggleToInstantiate_17)); }
inline Toggle_t68F5A84CDD2BBAEA866F42EB4E0C9F2B431D612E * get_ChannelToggleToInstantiate_17() const { return ___ChannelToggleToInstantiate_17; }
inline Toggle_t68F5A84CDD2BBAEA866F42EB4E0C9F2B431D612E ** get_address_of_ChannelToggleToInstantiate_17() { return &___ChannelToggleToInstantiate_17; }
inline void set_ChannelToggleToInstantiate_17(Toggle_t68F5A84CDD2BBAEA866F42EB4E0C9F2B431D612E * value)
{
___ChannelToggleToInstantiate_17 = value;
Il2CppCodeGenWriteBarrier((void**)(&___ChannelToggleToInstantiate_17), (void*)value);
}
inline static int32_t get_offset_of_FriendListUiItemtoInstantiate_18() { return static_cast<int32_t>(offsetof(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F, ___FriendListUiItemtoInstantiate_18)); }
inline GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * get_FriendListUiItemtoInstantiate_18() const { return ___FriendListUiItemtoInstantiate_18; }
inline GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 ** get_address_of_FriendListUiItemtoInstantiate_18() { return &___FriendListUiItemtoInstantiate_18; }
inline void set_FriendListUiItemtoInstantiate_18(GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * value)
{
___FriendListUiItemtoInstantiate_18 = value;
Il2CppCodeGenWriteBarrier((void**)(&___FriendListUiItemtoInstantiate_18), (void*)value);
}
inline static int32_t get_offset_of_channelToggles_19() { return static_cast<int32_t>(offsetof(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F, ___channelToggles_19)); }
inline Dictionary_2_t832D46F27B23EC7C03258EEC3ACC466A27782A0E * get_channelToggles_19() const { return ___channelToggles_19; }
inline Dictionary_2_t832D46F27B23EC7C03258EEC3ACC466A27782A0E ** get_address_of_channelToggles_19() { return &___channelToggles_19; }
inline void set_channelToggles_19(Dictionary_2_t832D46F27B23EC7C03258EEC3ACC466A27782A0E * value)
{
___channelToggles_19 = value;
Il2CppCodeGenWriteBarrier((void**)(&___channelToggles_19), (void*)value);
}
inline static int32_t get_offset_of_friendListItemLUT_20() { return static_cast<int32_t>(offsetof(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F, ___friendListItemLUT_20)); }
inline Dictionary_2_t57A8BB163A6245E3061BCE534FCD51622968502F * get_friendListItemLUT_20() const { return ___friendListItemLUT_20; }
inline Dictionary_2_t57A8BB163A6245E3061BCE534FCD51622968502F ** get_address_of_friendListItemLUT_20() { return &___friendListItemLUT_20; }
inline void set_friendListItemLUT_20(Dictionary_2_t57A8BB163A6245E3061BCE534FCD51622968502F * value)
{
___friendListItemLUT_20 = value;
Il2CppCodeGenWriteBarrier((void**)(&___friendListItemLUT_20), (void*)value);
}
inline static int32_t get_offset_of_ShowState_21() { return static_cast<int32_t>(offsetof(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F, ___ShowState_21)); }
inline bool get_ShowState_21() const { return ___ShowState_21; }
inline bool* get_address_of_ShowState_21() { return &___ShowState_21; }
inline void set_ShowState_21(bool value)
{
___ShowState_21 = value;
}
inline static int32_t get_offset_of_Title_22() { return static_cast<int32_t>(offsetof(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F, ___Title_22)); }
inline GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * get_Title_22() const { return ___Title_22; }
inline GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 ** get_address_of_Title_22() { return &___Title_22; }
inline void set_Title_22(GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * value)
{
___Title_22 = value;
Il2CppCodeGenWriteBarrier((void**)(&___Title_22), (void*)value);
}
inline static int32_t get_offset_of_StateText_23() { return static_cast<int32_t>(offsetof(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F, ___StateText_23)); }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * get_StateText_23() const { return ___StateText_23; }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 ** get_address_of_StateText_23() { return &___StateText_23; }
inline void set_StateText_23(Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * value)
{
___StateText_23 = value;
Il2CppCodeGenWriteBarrier((void**)(&___StateText_23), (void*)value);
}
inline static int32_t get_offset_of_UserIdText_24() { return static_cast<int32_t>(offsetof(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F, ___UserIdText_24)); }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * get_UserIdText_24() const { return ___UserIdText_24; }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 ** get_address_of_UserIdText_24() { return &___UserIdText_24; }
inline void set_UserIdText_24(Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * value)
{
___UserIdText_24 = value;
Il2CppCodeGenWriteBarrier((void**)(&___UserIdText_24), (void*)value);
}
inline static int32_t get_offset_of_TestLength_26() { return static_cast<int32_t>(offsetof(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F, ___TestLength_26)); }
inline int32_t get_TestLength_26() const { return ___TestLength_26; }
inline int32_t* get_address_of_TestLength_26() { return &___TestLength_26; }
inline void set_TestLength_26(int32_t value)
{
___TestLength_26 = value;
}
inline static int32_t get_offset_of_testBytes_27() { return static_cast<int32_t>(offsetof(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F, ___testBytes_27)); }
inline ByteU5BU5D_tDBBEB0E8362242FA7223000D978B0DD19D4B0726* get_testBytes_27() const { return ___testBytes_27; }
inline ByteU5BU5D_tDBBEB0E8362242FA7223000D978B0DD19D4B0726** get_address_of_testBytes_27() { return &___testBytes_27; }
inline void set_testBytes_27(ByteU5BU5D_tDBBEB0E8362242FA7223000D978B0DD19D4B0726* value)
{
___testBytes_27 = value;
Il2CppCodeGenWriteBarrier((void**)(&___testBytes_27), (void*)value);
}
};
struct ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F_StaticFields
{
public:
// System.String Photon.Chat.Demo.ChatGui::HelpText
String_t* ___HelpText_25;
public:
inline static int32_t get_offset_of_HelpText_25() { return static_cast<int32_t>(offsetof(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F_StaticFields, ___HelpText_25)); }
inline String_t* get_HelpText_25() const { return ___HelpText_25; }
inline String_t** get_address_of_HelpText_25() { return &___HelpText_25; }
inline void set_HelpText_25(String_t* value)
{
___HelpText_25 = value;
Il2CppCodeGenWriteBarrier((void**)(&___HelpText_25), (void*)value);
}
};
// LylekGames.CombineMeshes
struct CombineMeshes_t5FBF5FA265991F3ECA765F4260F9CC6DC78E1B28 : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
// UnityEngine.Matrix4x4 LylekGames.CombineMeshes::myMatrix
Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461 ___myMatrix_4;
// UnityEngine.MeshFilter LylekGames.CombineMeshes::myMeshFilter
MeshFilter_t763BB2BBF3881176AD25E4570E6DD215BA0AA51A * ___myMeshFilter_5;
// UnityEngine.MeshRenderer LylekGames.CombineMeshes::myMeshRenderer
MeshRenderer_tCD983A2F635E12BCB0BAA2E635D96A318757908B * ___myMeshRenderer_6;
// UnityEngine.MeshFilter[] LylekGames.CombineMeshes::meshFilters
MeshFilterU5BU5D_tE8AA77783A24784C69A8083B4F3E482D866FD503* ___meshFilters_7;
// UnityEngine.MeshRenderer[] LylekGames.CombineMeshes::meshRenderers
MeshRendererU5BU5D_t535468079DEF88AD38546DC5D04E9102C401D228* ___meshRenderers_8;
public:
inline static int32_t get_offset_of_myMatrix_4() { return static_cast<int32_t>(offsetof(CombineMeshes_t5FBF5FA265991F3ECA765F4260F9CC6DC78E1B28, ___myMatrix_4)); }
inline Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461 get_myMatrix_4() const { return ___myMatrix_4; }
inline Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461 * get_address_of_myMatrix_4() { return &___myMatrix_4; }
inline void set_myMatrix_4(Matrix4x4_tDE7FF4F2E2EA284F6EFE00D627789D0E5B8B4461 value)
{
___myMatrix_4 = value;
}
inline static int32_t get_offset_of_myMeshFilter_5() { return static_cast<int32_t>(offsetof(CombineMeshes_t5FBF5FA265991F3ECA765F4260F9CC6DC78E1B28, ___myMeshFilter_5)); }
inline MeshFilter_t763BB2BBF3881176AD25E4570E6DD215BA0AA51A * get_myMeshFilter_5() const { return ___myMeshFilter_5; }
inline MeshFilter_t763BB2BBF3881176AD25E4570E6DD215BA0AA51A ** get_address_of_myMeshFilter_5() { return &___myMeshFilter_5; }
inline void set_myMeshFilter_5(MeshFilter_t763BB2BBF3881176AD25E4570E6DD215BA0AA51A * value)
{
___myMeshFilter_5 = value;
Il2CppCodeGenWriteBarrier((void**)(&___myMeshFilter_5), (void*)value);
}
inline static int32_t get_offset_of_myMeshRenderer_6() { return static_cast<int32_t>(offsetof(CombineMeshes_t5FBF5FA265991F3ECA765F4260F9CC6DC78E1B28, ___myMeshRenderer_6)); }
inline MeshRenderer_tCD983A2F635E12BCB0BAA2E635D96A318757908B * get_myMeshRenderer_6() const { return ___myMeshRenderer_6; }
inline MeshRenderer_tCD983A2F635E12BCB0BAA2E635D96A318757908B ** get_address_of_myMeshRenderer_6() { return &___myMeshRenderer_6; }
inline void set_myMeshRenderer_6(MeshRenderer_tCD983A2F635E12BCB0BAA2E635D96A318757908B * value)
{
___myMeshRenderer_6 = value;
Il2CppCodeGenWriteBarrier((void**)(&___myMeshRenderer_6), (void*)value);
}
inline static int32_t get_offset_of_meshFilters_7() { return static_cast<int32_t>(offsetof(CombineMeshes_t5FBF5FA265991F3ECA765F4260F9CC6DC78E1B28, ___meshFilters_7)); }
inline MeshFilterU5BU5D_tE8AA77783A24784C69A8083B4F3E482D866FD503* get_meshFilters_7() const { return ___meshFilters_7; }
inline MeshFilterU5BU5D_tE8AA77783A24784C69A8083B4F3E482D866FD503** get_address_of_meshFilters_7() { return &___meshFilters_7; }
inline void set_meshFilters_7(MeshFilterU5BU5D_tE8AA77783A24784C69A8083B4F3E482D866FD503* value)
{
___meshFilters_7 = value;
Il2CppCodeGenWriteBarrier((void**)(&___meshFilters_7), (void*)value);
}
inline static int32_t get_offset_of_meshRenderers_8() { return static_cast<int32_t>(offsetof(CombineMeshes_t5FBF5FA265991F3ECA765F4260F9CC6DC78E1B28, ___meshRenderers_8)); }
inline MeshRendererU5BU5D_t535468079DEF88AD38546DC5D04E9102C401D228* get_meshRenderers_8() const { return ___meshRenderers_8; }
inline MeshRendererU5BU5D_t535468079DEF88AD38546DC5D04E9102C401D228** get_address_of_meshRenderers_8() { return &___meshRenderers_8; }
inline void set_meshRenderers_8(MeshRendererU5BU5D_t535468079DEF88AD38546DC5D04E9102C401D228* value)
{
___meshRenderers_8 = value;
Il2CppCodeGenWriteBarrier((void**)(&___meshRenderers_8), (void*)value);
}
};
// Photon.Realtime.Demo.ConnectAndJoinRandomLb
struct ConnectAndJoinRandomLb_t8A2B7E9487A2305C4713FE75288DDBDA75B9209B : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
// Photon.Realtime.AppSettings Photon.Realtime.Demo.ConnectAndJoinRandomLb::appSettings
AppSettings_tABB056AEAFF5113D2D970906784B48C42DF13906 * ___appSettings_4;
// Photon.Realtime.LoadBalancingClient Photon.Realtime.Demo.ConnectAndJoinRandomLb::lbc
LoadBalancingClient_tBEEEE3B7EAB2BE4F38AF50B935F7C73C0F8DC86A * ___lbc_5;
// Photon.Realtime.ConnectionHandler Photon.Realtime.Demo.ConnectAndJoinRandomLb::ch
ConnectionHandler_t85A89B8EDF5CB1D550EEB28590A5E610034A06FE * ___ch_6;
// UnityEngine.UI.Text Photon.Realtime.Demo.ConnectAndJoinRandomLb::StateUiText
Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * ___StateUiText_7;
public:
inline static int32_t get_offset_of_appSettings_4() { return static_cast<int32_t>(offsetof(ConnectAndJoinRandomLb_t8A2B7E9487A2305C4713FE75288DDBDA75B9209B, ___appSettings_4)); }
inline AppSettings_tABB056AEAFF5113D2D970906784B48C42DF13906 * get_appSettings_4() const { return ___appSettings_4; }
inline AppSettings_tABB056AEAFF5113D2D970906784B48C42DF13906 ** get_address_of_appSettings_4() { return &___appSettings_4; }
inline void set_appSettings_4(AppSettings_tABB056AEAFF5113D2D970906784B48C42DF13906 * value)
{
___appSettings_4 = value;
Il2CppCodeGenWriteBarrier((void**)(&___appSettings_4), (void*)value);
}
inline static int32_t get_offset_of_lbc_5() { return static_cast<int32_t>(offsetof(ConnectAndJoinRandomLb_t8A2B7E9487A2305C4713FE75288DDBDA75B9209B, ___lbc_5)); }
inline LoadBalancingClient_tBEEEE3B7EAB2BE4F38AF50B935F7C73C0F8DC86A * get_lbc_5() const { return ___lbc_5; }
inline LoadBalancingClient_tBEEEE3B7EAB2BE4F38AF50B935F7C73C0F8DC86A ** get_address_of_lbc_5() { return &___lbc_5; }
inline void set_lbc_5(LoadBalancingClient_tBEEEE3B7EAB2BE4F38AF50B935F7C73C0F8DC86A * value)
{
___lbc_5 = value;
Il2CppCodeGenWriteBarrier((void**)(&___lbc_5), (void*)value);
}
inline static int32_t get_offset_of_ch_6() { return static_cast<int32_t>(offsetof(ConnectAndJoinRandomLb_t8A2B7E9487A2305C4713FE75288DDBDA75B9209B, ___ch_6)); }
inline ConnectionHandler_t85A89B8EDF5CB1D550EEB28590A5E610034A06FE * get_ch_6() const { return ___ch_6; }
inline ConnectionHandler_t85A89B8EDF5CB1D550EEB28590A5E610034A06FE ** get_address_of_ch_6() { return &___ch_6; }
inline void set_ch_6(ConnectionHandler_t85A89B8EDF5CB1D550EEB28590A5E610034A06FE * value)
{
___ch_6 = value;
Il2CppCodeGenWriteBarrier((void**)(&___ch_6), (void*)value);
}
inline static int32_t get_offset_of_StateUiText_7() { return static_cast<int32_t>(offsetof(ConnectAndJoinRandomLb_t8A2B7E9487A2305C4713FE75288DDBDA75B9209B, ___StateUiText_7)); }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * get_StateUiText_7() const { return ___StateUiText_7; }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 ** get_address_of_StateUiText_7() { return &___StateUiText_7; }
inline void set_StateUiText_7(Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * value)
{
___StateUiText_7 = value;
Il2CppCodeGenWriteBarrier((void**)(&___StateUiText_7), (void*)value);
}
};
// LylekGames.RPGGoblin.EnableMouseLook
struct EnableMouseLook_t16A036A73774449E0297494E66A79E751D2E619E : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
// LylekGames.RPGGoblin.MouseLook LylekGames.RPGGoblin.EnableMouseLook::mouseLook
MouseLook_t0FA6420BB99D5DE117BB116C3ABDAA146270627D * ___mouseLook_4;
// LylekGames.RPGGoblin.EnableMouseLook/InputType LylekGames.RPGGoblin.EnableMouseLook::click
int32_t ___click_5;
public:
inline static int32_t get_offset_of_mouseLook_4() { return static_cast<int32_t>(offsetof(EnableMouseLook_t16A036A73774449E0297494E66A79E751D2E619E, ___mouseLook_4)); }
inline MouseLook_t0FA6420BB99D5DE117BB116C3ABDAA146270627D * get_mouseLook_4() const { return ___mouseLook_4; }
inline MouseLook_t0FA6420BB99D5DE117BB116C3ABDAA146270627D ** get_address_of_mouseLook_4() { return &___mouseLook_4; }
inline void set_mouseLook_4(MouseLook_t0FA6420BB99D5DE117BB116C3ABDAA146270627D * value)
{
___mouseLook_4 = value;
Il2CppCodeGenWriteBarrier((void**)(&___mouseLook_4), (void*)value);
}
inline static int32_t get_offset_of_click_5() { return static_cast<int32_t>(offsetof(EnableMouseLook_t16A036A73774449E0297494E66A79E751D2E619E, ___click_5)); }
inline int32_t get_click_5() const { return ___click_5; }
inline int32_t* get_address_of_click_5() { return &___click_5; }
inline void set_click_5(int32_t value)
{
___click_5 = value;
}
};
// Photon.Chat.UtilityScripts.EventSystemSpawner
struct EventSystemSpawner_t867D964087ADB2057AA1FB60869D8F51390F6562 : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
public:
};
// RockVR.Common.FPSDisplay
struct FPSDisplay_t0150EDC42472BF4ABEAB6910E5B4921E26CDB4CC : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
// System.Single RockVR.Common.FPSDisplay::deltaTime
float ___deltaTime_4;
// UnityEngine.UI.Text RockVR.Common.FPSDisplay::text
Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * ___text_5;
public:
inline static int32_t get_offset_of_deltaTime_4() { return static_cast<int32_t>(offsetof(FPSDisplay_t0150EDC42472BF4ABEAB6910E5B4921E26CDB4CC, ___deltaTime_4)); }
inline float get_deltaTime_4() const { return ___deltaTime_4; }
inline float* get_address_of_deltaTime_4() { return &___deltaTime_4; }
inline void set_deltaTime_4(float value)
{
___deltaTime_4 = value;
}
inline static int32_t get_offset_of_text_5() { return static_cast<int32_t>(offsetof(FPSDisplay_t0150EDC42472BF4ABEAB6910E5B4921E26CDB4CC, ___text_5)); }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * get_text_5() const { return ___text_5; }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 ** get_address_of_text_5() { return &___text_5; }
inline void set_text_5(Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * value)
{
___text_5 = value;
Il2CppCodeGenWriteBarrier((void**)(&___text_5), (void*)value);
}
};
// Photon.Chat.Demo.FriendItem
struct FriendItem_tF80B41237BF241C90F2D1DAC6082DC433E2DD592 : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
// UnityEngine.UI.Text Photon.Chat.Demo.FriendItem::NameLabel
Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * ___NameLabel_4;
// UnityEngine.UI.Text Photon.Chat.Demo.FriendItem::StatusLabel
Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * ___StatusLabel_5;
// UnityEngine.UI.Text Photon.Chat.Demo.FriendItem::Health
Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * ___Health_6;
public:
inline static int32_t get_offset_of_NameLabel_4() { return static_cast<int32_t>(offsetof(FriendItem_tF80B41237BF241C90F2D1DAC6082DC433E2DD592, ___NameLabel_4)); }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * get_NameLabel_4() const { return ___NameLabel_4; }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 ** get_address_of_NameLabel_4() { return &___NameLabel_4; }
inline void set_NameLabel_4(Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * value)
{
___NameLabel_4 = value;
Il2CppCodeGenWriteBarrier((void**)(&___NameLabel_4), (void*)value);
}
inline static int32_t get_offset_of_StatusLabel_5() { return static_cast<int32_t>(offsetof(FriendItem_tF80B41237BF241C90F2D1DAC6082DC433E2DD592, ___StatusLabel_5)); }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * get_StatusLabel_5() const { return ___StatusLabel_5; }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 ** get_address_of_StatusLabel_5() { return &___StatusLabel_5; }
inline void set_StatusLabel_5(Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * value)
{
___StatusLabel_5 = value;
Il2CppCodeGenWriteBarrier((void**)(&___StatusLabel_5), (void*)value);
}
inline static int32_t get_offset_of_Health_6() { return static_cast<int32_t>(offsetof(FriendItem_tF80B41237BF241C90F2D1DAC6082DC433E2DD592, ___Health_6)); }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * get_Health_6() const { return ___Health_6; }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 ** get_address_of_Health_6() { return &___Health_6; }
inline void set_Health_6(Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * value)
{
___Health_6 = value;
Il2CppCodeGenWriteBarrier((void**)(&___Health_6), (void*)value);
}
};
// Photon.Chat.Demo.IgnoreUiRaycastWhenInactive
struct IgnoreUiRaycastWhenInactive_tC968D3FF74D6954F66346D6C2995862F3F84CFD1 : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
public:
};
// Photon.Voice.Unity.Demos.DemoVoiceUI.MicrophoneDropdownFiller
struct MicrophoneDropdownFiller_t7EE7E45185226E7E6099341DB6D315F2A7002911 : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
// System.Collections.Generic.List`1<Photon.Voice.Unity.Demos.DemoVoiceUI.MicRef> Photon.Voice.Unity.Demos.DemoVoiceUI.MicrophoneDropdownFiller::micOptions
List_1_tBB19EB51B7E4AE8EE288B3D1F29CE27282724C8A * ___micOptions_4;
// UnityEngine.UI.Dropdown Photon.Voice.Unity.Demos.DemoVoiceUI.MicrophoneDropdownFiller::micDropdown
Dropdown_t099F5232BB75810BC79EED6E27DDCED46C3BCD96 * ___micDropdown_5;
// Photon.Voice.Unity.Recorder Photon.Voice.Unity.Demos.DemoVoiceUI.MicrophoneDropdownFiller::recorder
Recorder_t13D849D3581771415EF2D39F877106A1AC72C6C5 * ___recorder_6;
// UnityEngine.GameObject Photon.Voice.Unity.Demos.DemoVoiceUI.MicrophoneDropdownFiller::refreshButton
GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * ___refreshButton_7;
// UnityEngine.GameObject Photon.Voice.Unity.Demos.DemoVoiceUI.MicrophoneDropdownFiller::toggleButton
GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * ___toggleButton_8;
// UnityEngine.UI.Toggle Photon.Voice.Unity.Demos.DemoVoiceUI.MicrophoneDropdownFiller::photonToggle
Toggle_t68F5A84CDD2BBAEA866F42EB4E0C9F2B431D612E * ___photonToggle_9;
public:
inline static int32_t get_offset_of_micOptions_4() { return static_cast<int32_t>(offsetof(MicrophoneDropdownFiller_t7EE7E45185226E7E6099341DB6D315F2A7002911, ___micOptions_4)); }
inline List_1_tBB19EB51B7E4AE8EE288B3D1F29CE27282724C8A * get_micOptions_4() const { return ___micOptions_4; }
inline List_1_tBB19EB51B7E4AE8EE288B3D1F29CE27282724C8A ** get_address_of_micOptions_4() { return &___micOptions_4; }
inline void set_micOptions_4(List_1_tBB19EB51B7E4AE8EE288B3D1F29CE27282724C8A * value)
{
___micOptions_4 = value;
Il2CppCodeGenWriteBarrier((void**)(&___micOptions_4), (void*)value);
}
inline static int32_t get_offset_of_micDropdown_5() { return static_cast<int32_t>(offsetof(MicrophoneDropdownFiller_t7EE7E45185226E7E6099341DB6D315F2A7002911, ___micDropdown_5)); }
inline Dropdown_t099F5232BB75810BC79EED6E27DDCED46C3BCD96 * get_micDropdown_5() const { return ___micDropdown_5; }
inline Dropdown_t099F5232BB75810BC79EED6E27DDCED46C3BCD96 ** get_address_of_micDropdown_5() { return &___micDropdown_5; }
inline void set_micDropdown_5(Dropdown_t099F5232BB75810BC79EED6E27DDCED46C3BCD96 * value)
{
___micDropdown_5 = value;
Il2CppCodeGenWriteBarrier((void**)(&___micDropdown_5), (void*)value);
}
inline static int32_t get_offset_of_recorder_6() { return static_cast<int32_t>(offsetof(MicrophoneDropdownFiller_t7EE7E45185226E7E6099341DB6D315F2A7002911, ___recorder_6)); }
inline Recorder_t13D849D3581771415EF2D39F877106A1AC72C6C5 * get_recorder_6() const { return ___recorder_6; }
inline Recorder_t13D849D3581771415EF2D39F877106A1AC72C6C5 ** get_address_of_recorder_6() { return &___recorder_6; }
inline void set_recorder_6(Recorder_t13D849D3581771415EF2D39F877106A1AC72C6C5 * value)
{
___recorder_6 = value;
Il2CppCodeGenWriteBarrier((void**)(&___recorder_6), (void*)value);
}
inline static int32_t get_offset_of_refreshButton_7() { return static_cast<int32_t>(offsetof(MicrophoneDropdownFiller_t7EE7E45185226E7E6099341DB6D315F2A7002911, ___refreshButton_7)); }
inline GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * get_refreshButton_7() const { return ___refreshButton_7; }
inline GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 ** get_address_of_refreshButton_7() { return &___refreshButton_7; }
inline void set_refreshButton_7(GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * value)
{
___refreshButton_7 = value;
Il2CppCodeGenWriteBarrier((void**)(&___refreshButton_7), (void*)value);
}
inline static int32_t get_offset_of_toggleButton_8() { return static_cast<int32_t>(offsetof(MicrophoneDropdownFiller_t7EE7E45185226E7E6099341DB6D315F2A7002911, ___toggleButton_8)); }
inline GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * get_toggleButton_8() const { return ___toggleButton_8; }
inline GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 ** get_address_of_toggleButton_8() { return &___toggleButton_8; }
inline void set_toggleButton_8(GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * value)
{
___toggleButton_8 = value;
Il2CppCodeGenWriteBarrier((void**)(&___toggleButton_8), (void*)value);
}
inline static int32_t get_offset_of_photonToggle_9() { return static_cast<int32_t>(offsetof(MicrophoneDropdownFiller_t7EE7E45185226E7E6099341DB6D315F2A7002911, ___photonToggle_9)); }
inline Toggle_t68F5A84CDD2BBAEA866F42EB4E0C9F2B431D612E * get_photonToggle_9() const { return ___photonToggle_9; }
inline Toggle_t68F5A84CDD2BBAEA866F42EB4E0C9F2B431D612E ** get_address_of_photonToggle_9() { return &___photonToggle_9; }
inline void set_photonToggle_9(Toggle_t68F5A84CDD2BBAEA866F42EB4E0C9F2B431D612E * value)
{
___photonToggle_9 = value;
Il2CppCodeGenWriteBarrier((void**)(&___photonToggle_9), (void*)value);
}
};
// LylekGames.RPGGoblin.MouseLook
struct MouseLook_t0FA6420BB99D5DE117BB116C3ABDAA146270627D : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
// LylekGames.RPGGoblin.MouseLook/RotationAxes LylekGames.RPGGoblin.MouseLook::axes
int32_t ___axes_4;
// System.Single LylekGames.RPGGoblin.MouseLook::sensitivityX
float ___sensitivityX_5;
// System.Single LylekGames.RPGGoblin.MouseLook::sensitivityY
float ___sensitivityY_6;
// System.Single LylekGames.RPGGoblin.MouseLook::minimumX
float ___minimumX_7;
// System.Single LylekGames.RPGGoblin.MouseLook::maximumX
float ___maximumX_8;
// System.Single LylekGames.RPGGoblin.MouseLook::minimumY
float ___minimumY_9;
// System.Single LylekGames.RPGGoblin.MouseLook::maximumY
float ___maximumY_10;
// System.Single LylekGames.RPGGoblin.MouseLook::rotationY
float ___rotationY_11;
// System.Single LylekGames.RPGGoblin.MouseLook::rotationX
float ___rotationX_12;
public:
inline static int32_t get_offset_of_axes_4() { return static_cast<int32_t>(offsetof(MouseLook_t0FA6420BB99D5DE117BB116C3ABDAA146270627D, ___axes_4)); }
inline int32_t get_axes_4() const { return ___axes_4; }
inline int32_t* get_address_of_axes_4() { return &___axes_4; }
inline void set_axes_4(int32_t value)
{
___axes_4 = value;
}
inline static int32_t get_offset_of_sensitivityX_5() { return static_cast<int32_t>(offsetof(MouseLook_t0FA6420BB99D5DE117BB116C3ABDAA146270627D, ___sensitivityX_5)); }
inline float get_sensitivityX_5() const { return ___sensitivityX_5; }
inline float* get_address_of_sensitivityX_5() { return &___sensitivityX_5; }
inline void set_sensitivityX_5(float value)
{
___sensitivityX_5 = value;
}
inline static int32_t get_offset_of_sensitivityY_6() { return static_cast<int32_t>(offsetof(MouseLook_t0FA6420BB99D5DE117BB116C3ABDAA146270627D, ___sensitivityY_6)); }
inline float get_sensitivityY_6() const { return ___sensitivityY_6; }
inline float* get_address_of_sensitivityY_6() { return &___sensitivityY_6; }
inline void set_sensitivityY_6(float value)
{
___sensitivityY_6 = value;
}
inline static int32_t get_offset_of_minimumX_7() { return static_cast<int32_t>(offsetof(MouseLook_t0FA6420BB99D5DE117BB116C3ABDAA146270627D, ___minimumX_7)); }
inline float get_minimumX_7() const { return ___minimumX_7; }
inline float* get_address_of_minimumX_7() { return &___minimumX_7; }
inline void set_minimumX_7(float value)
{
___minimumX_7 = value;
}
inline static int32_t get_offset_of_maximumX_8() { return static_cast<int32_t>(offsetof(MouseLook_t0FA6420BB99D5DE117BB116C3ABDAA146270627D, ___maximumX_8)); }
inline float get_maximumX_8() const { return ___maximumX_8; }
inline float* get_address_of_maximumX_8() { return &___maximumX_8; }
inline void set_maximumX_8(float value)
{
___maximumX_8 = value;
}
inline static int32_t get_offset_of_minimumY_9() { return static_cast<int32_t>(offsetof(MouseLook_t0FA6420BB99D5DE117BB116C3ABDAA146270627D, ___minimumY_9)); }
inline float get_minimumY_9() const { return ___minimumY_9; }
inline float* get_address_of_minimumY_9() { return &___minimumY_9; }
inline void set_minimumY_9(float value)
{
___minimumY_9 = value;
}
inline static int32_t get_offset_of_maximumY_10() { return static_cast<int32_t>(offsetof(MouseLook_t0FA6420BB99D5DE117BB116C3ABDAA146270627D, ___maximumY_10)); }
inline float get_maximumY_10() const { return ___maximumY_10; }
inline float* get_address_of_maximumY_10() { return &___maximumY_10; }
inline void set_maximumY_10(float value)
{
___maximumY_10 = value;
}
inline static int32_t get_offset_of_rotationY_11() { return static_cast<int32_t>(offsetof(MouseLook_t0FA6420BB99D5DE117BB116C3ABDAA146270627D, ___rotationY_11)); }
inline float get_rotationY_11() const { return ___rotationY_11; }
inline float* get_address_of_rotationY_11() { return &___rotationY_11; }
inline void set_rotationY_11(float value)
{
___rotationY_11 = value;
}
inline static int32_t get_offset_of_rotationX_12() { return static_cast<int32_t>(offsetof(MouseLook_t0FA6420BB99D5DE117BB116C3ABDAA146270627D, ___rotationX_12)); }
inline float get_rotationX_12() const { return ___rotationX_12; }
inline float* get_address_of_rotationX_12() { return &___rotationX_12; }
inline void set_rotationX_12(float value)
{
___rotationX_12 = value;
}
};
// Photon.Chat.Demo.NamePickGui
struct NamePickGui_tFE24EAFC067F5417D9F66DFF06A7A89767A2E181 : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
// Photon.Chat.Demo.ChatGui Photon.Chat.Demo.NamePickGui::chatNewComponent
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F * ___chatNewComponent_5;
// UnityEngine.UI.InputField Photon.Chat.Demo.NamePickGui::idInput
InputField_tB41A2814F31A3E9373D443EDEBBB2856006324D0 * ___idInput_6;
public:
inline static int32_t get_offset_of_chatNewComponent_5() { return static_cast<int32_t>(offsetof(NamePickGui_tFE24EAFC067F5417D9F66DFF06A7A89767A2E181, ___chatNewComponent_5)); }
inline ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F * get_chatNewComponent_5() const { return ___chatNewComponent_5; }
inline ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F ** get_address_of_chatNewComponent_5() { return &___chatNewComponent_5; }
inline void set_chatNewComponent_5(ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F * value)
{
___chatNewComponent_5 = value;
Il2CppCodeGenWriteBarrier((void**)(&___chatNewComponent_5), (void*)value);
}
inline static int32_t get_offset_of_idInput_6() { return static_cast<int32_t>(offsetof(NamePickGui_tFE24EAFC067F5417D9F66DFF06A7A89767A2E181, ___idInput_6)); }
inline InputField_tB41A2814F31A3E9373D443EDEBBB2856006324D0 * get_idInput_6() const { return ___idInput_6; }
inline InputField_tB41A2814F31A3E9373D443EDEBBB2856006324D0 ** get_address_of_idInput_6() { return &___idInput_6; }
inline void set_idInput_6(InputField_tB41A2814F31A3E9373D443EDEBBB2856006324D0 * value)
{
___idInput_6 = value;
Il2CppCodeGenWriteBarrier((void**)(&___idInput_6), (void*)value);
}
};
// Photon.Chat.UtilityScripts.OnStartDelete
struct OnStartDelete_tA0F1DFAFBC46814B905F637B420BA27F546FD5E6 : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
public:
};
// Photon.Voice.Unity.Demos.DemoVoiceUI.RemoteSpeakerUI
struct RemoteSpeakerUI_t4EBDFA8C541F3CEB6088908D041384C68BE50730 : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
// UnityEngine.UI.Text Photon.Voice.Unity.Demos.DemoVoiceUI.RemoteSpeakerUI::nameText
Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * ___nameText_4;
// UnityEngine.UI.Image Photon.Voice.Unity.Demos.DemoVoiceUI.RemoteSpeakerUI::remoteIsMuting
Image_t4021FF27176E44BFEDDCBE43C7FE6B713EC70D3C * ___remoteIsMuting_5;
// UnityEngine.UI.Image Photon.Voice.Unity.Demos.DemoVoiceUI.RemoteSpeakerUI::remoteIsTalking
Image_t4021FF27176E44BFEDDCBE43C7FE6B713EC70D3C * ___remoteIsTalking_6;
// UnityEngine.UI.InputField Photon.Voice.Unity.Demos.DemoVoiceUI.RemoteSpeakerUI::minDelaySoftInputField
InputField_tB41A2814F31A3E9373D443EDEBBB2856006324D0 * ___minDelaySoftInputField_7;
// UnityEngine.UI.InputField Photon.Voice.Unity.Demos.DemoVoiceUI.RemoteSpeakerUI::maxDelaySoftInputField
InputField_tB41A2814F31A3E9373D443EDEBBB2856006324D0 * ___maxDelaySoftInputField_8;
// UnityEngine.UI.InputField Photon.Voice.Unity.Demos.DemoVoiceUI.RemoteSpeakerUI::maxDelayHardInputField
InputField_tB41A2814F31A3E9373D443EDEBBB2856006324D0 * ___maxDelayHardInputField_9;
// UnityEngine.UI.Text Photon.Voice.Unity.Demos.DemoVoiceUI.RemoteSpeakerUI::bufferLagText
Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * ___bufferLagText_10;
// Photon.Voice.Unity.Speaker Photon.Voice.Unity.Demos.DemoVoiceUI.RemoteSpeakerUI::speaker
Speaker_tDF067516E356439A1F51638A7725C56C1DF3EFEA * ___speaker_11;
// Photon.Voice.Unity.VoiceConnection Photon.Voice.Unity.Demos.DemoVoiceUI.RemoteSpeakerUI::voiceConnection
VoiceConnection_t65A719BDA53623411E6DC2E6CC34B120E0EBA704 * ___voiceConnection_12;
// Photon.Realtime.LoadBalancingClient Photon.Voice.Unity.Demos.DemoVoiceUI.RemoteSpeakerUI::loadBalancingClient
LoadBalancingClient_tBEEEE3B7EAB2BE4F38AF50B935F7C73C0F8DC86A * ___loadBalancingClient_13;
public:
inline static int32_t get_offset_of_nameText_4() { return static_cast<int32_t>(offsetof(RemoteSpeakerUI_t4EBDFA8C541F3CEB6088908D041384C68BE50730, ___nameText_4)); }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * get_nameText_4() const { return ___nameText_4; }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 ** get_address_of_nameText_4() { return &___nameText_4; }
inline void set_nameText_4(Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * value)
{
___nameText_4 = value;
Il2CppCodeGenWriteBarrier((void**)(&___nameText_4), (void*)value);
}
inline static int32_t get_offset_of_remoteIsMuting_5() { return static_cast<int32_t>(offsetof(RemoteSpeakerUI_t4EBDFA8C541F3CEB6088908D041384C68BE50730, ___remoteIsMuting_5)); }
inline Image_t4021FF27176E44BFEDDCBE43C7FE6B713EC70D3C * get_remoteIsMuting_5() const { return ___remoteIsMuting_5; }
inline Image_t4021FF27176E44BFEDDCBE43C7FE6B713EC70D3C ** get_address_of_remoteIsMuting_5() { return &___remoteIsMuting_5; }
inline void set_remoteIsMuting_5(Image_t4021FF27176E44BFEDDCBE43C7FE6B713EC70D3C * value)
{
___remoteIsMuting_5 = value;
Il2CppCodeGenWriteBarrier((void**)(&___remoteIsMuting_5), (void*)value);
}
inline static int32_t get_offset_of_remoteIsTalking_6() { return static_cast<int32_t>(offsetof(RemoteSpeakerUI_t4EBDFA8C541F3CEB6088908D041384C68BE50730, ___remoteIsTalking_6)); }
inline Image_t4021FF27176E44BFEDDCBE43C7FE6B713EC70D3C * get_remoteIsTalking_6() const { return ___remoteIsTalking_6; }
inline Image_t4021FF27176E44BFEDDCBE43C7FE6B713EC70D3C ** get_address_of_remoteIsTalking_6() { return &___remoteIsTalking_6; }
inline void set_remoteIsTalking_6(Image_t4021FF27176E44BFEDDCBE43C7FE6B713EC70D3C * value)
{
___remoteIsTalking_6 = value;
Il2CppCodeGenWriteBarrier((void**)(&___remoteIsTalking_6), (void*)value);
}
inline static int32_t get_offset_of_minDelaySoftInputField_7() { return static_cast<int32_t>(offsetof(RemoteSpeakerUI_t4EBDFA8C541F3CEB6088908D041384C68BE50730, ___minDelaySoftInputField_7)); }
inline InputField_tB41A2814F31A3E9373D443EDEBBB2856006324D0 * get_minDelaySoftInputField_7() const { return ___minDelaySoftInputField_7; }
inline InputField_tB41A2814F31A3E9373D443EDEBBB2856006324D0 ** get_address_of_minDelaySoftInputField_7() { return &___minDelaySoftInputField_7; }
inline void set_minDelaySoftInputField_7(InputField_tB41A2814F31A3E9373D443EDEBBB2856006324D0 * value)
{
___minDelaySoftInputField_7 = value;
Il2CppCodeGenWriteBarrier((void**)(&___minDelaySoftInputField_7), (void*)value);
}
inline static int32_t get_offset_of_maxDelaySoftInputField_8() { return static_cast<int32_t>(offsetof(RemoteSpeakerUI_t4EBDFA8C541F3CEB6088908D041384C68BE50730, ___maxDelaySoftInputField_8)); }
inline InputField_tB41A2814F31A3E9373D443EDEBBB2856006324D0 * get_maxDelaySoftInputField_8() const { return ___maxDelaySoftInputField_8; }
inline InputField_tB41A2814F31A3E9373D443EDEBBB2856006324D0 ** get_address_of_maxDelaySoftInputField_8() { return &___maxDelaySoftInputField_8; }
inline void set_maxDelaySoftInputField_8(InputField_tB41A2814F31A3E9373D443EDEBBB2856006324D0 * value)
{
___maxDelaySoftInputField_8 = value;
Il2CppCodeGenWriteBarrier((void**)(&___maxDelaySoftInputField_8), (void*)value);
}
inline static int32_t get_offset_of_maxDelayHardInputField_9() { return static_cast<int32_t>(offsetof(RemoteSpeakerUI_t4EBDFA8C541F3CEB6088908D041384C68BE50730, ___maxDelayHardInputField_9)); }
inline InputField_tB41A2814F31A3E9373D443EDEBBB2856006324D0 * get_maxDelayHardInputField_9() const { return ___maxDelayHardInputField_9; }
inline InputField_tB41A2814F31A3E9373D443EDEBBB2856006324D0 ** get_address_of_maxDelayHardInputField_9() { return &___maxDelayHardInputField_9; }
inline void set_maxDelayHardInputField_9(InputField_tB41A2814F31A3E9373D443EDEBBB2856006324D0 * value)
{
___maxDelayHardInputField_9 = value;
Il2CppCodeGenWriteBarrier((void**)(&___maxDelayHardInputField_9), (void*)value);
}
inline static int32_t get_offset_of_bufferLagText_10() { return static_cast<int32_t>(offsetof(RemoteSpeakerUI_t4EBDFA8C541F3CEB6088908D041384C68BE50730, ___bufferLagText_10)); }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * get_bufferLagText_10() const { return ___bufferLagText_10; }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 ** get_address_of_bufferLagText_10() { return &___bufferLagText_10; }
inline void set_bufferLagText_10(Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * value)
{
___bufferLagText_10 = value;
Il2CppCodeGenWriteBarrier((void**)(&___bufferLagText_10), (void*)value);
}
inline static int32_t get_offset_of_speaker_11() { return static_cast<int32_t>(offsetof(RemoteSpeakerUI_t4EBDFA8C541F3CEB6088908D041384C68BE50730, ___speaker_11)); }
inline Speaker_tDF067516E356439A1F51638A7725C56C1DF3EFEA * get_speaker_11() const { return ___speaker_11; }
inline Speaker_tDF067516E356439A1F51638A7725C56C1DF3EFEA ** get_address_of_speaker_11() { return &___speaker_11; }
inline void set_speaker_11(Speaker_tDF067516E356439A1F51638A7725C56C1DF3EFEA * value)
{
___speaker_11 = value;
Il2CppCodeGenWriteBarrier((void**)(&___speaker_11), (void*)value);
}
inline static int32_t get_offset_of_voiceConnection_12() { return static_cast<int32_t>(offsetof(RemoteSpeakerUI_t4EBDFA8C541F3CEB6088908D041384C68BE50730, ___voiceConnection_12)); }
inline VoiceConnection_t65A719BDA53623411E6DC2E6CC34B120E0EBA704 * get_voiceConnection_12() const { return ___voiceConnection_12; }
inline VoiceConnection_t65A719BDA53623411E6DC2E6CC34B120E0EBA704 ** get_address_of_voiceConnection_12() { return &___voiceConnection_12; }
inline void set_voiceConnection_12(VoiceConnection_t65A719BDA53623411E6DC2E6CC34B120E0EBA704 * value)
{
___voiceConnection_12 = value;
Il2CppCodeGenWriteBarrier((void**)(&___voiceConnection_12), (void*)value);
}
inline static int32_t get_offset_of_loadBalancingClient_13() { return static_cast<int32_t>(offsetof(RemoteSpeakerUI_t4EBDFA8C541F3CEB6088908D041384C68BE50730, ___loadBalancingClient_13)); }
inline LoadBalancingClient_tBEEEE3B7EAB2BE4F38AF50B935F7C73C0F8DC86A * get_loadBalancingClient_13() const { return ___loadBalancingClient_13; }
inline LoadBalancingClient_tBEEEE3B7EAB2BE4F38AF50B935F7C73C0F8DC86A ** get_address_of_loadBalancingClient_13() { return &___loadBalancingClient_13; }
inline void set_loadBalancingClient_13(LoadBalancingClient_tBEEEE3B7EAB2BE4F38AF50B935F7C73C0F8DC86A * value)
{
___loadBalancingClient_13 = value;
Il2CppCodeGenWriteBarrier((void**)(&___loadBalancingClient_13), (void*)value);
}
};
// RockVR.Video.Screenshot
struct Screenshot_tA0FF0B7359459C6837DCE33D269E93C27AC15B80 : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
// UnityEngine.Material RockVR.Video.Screenshot::transformMaterial
Material_t8927C00353A72755313F046D0CE85178AE8218EE * ___transformMaterial_4;
// System.Int32 RockVR.Video.Screenshot::startSeconds
int32_t ___startSeconds_5;
// UnityEngine.Texture2D RockVR.Video.Screenshot::frameTexture
Texture2D_t9B604D0D8E28032123641A7E7338FA872E2698BF * ___frameTexture_6;
// UnityEngine.RenderTexture RockVR.Video.Screenshot::frameRenderTexture
RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849 * ___frameRenderTexture_7;
// UnityEngine.Cubemap RockVR.Video.Screenshot::frameCubemap
Cubemap_tB48EEA79C233417AF4D7BF03EA1BE4AA07A5B938 * ___frameCubemap_8;
public:
inline static int32_t get_offset_of_transformMaterial_4() { return static_cast<int32_t>(offsetof(Screenshot_tA0FF0B7359459C6837DCE33D269E93C27AC15B80, ___transformMaterial_4)); }
inline Material_t8927C00353A72755313F046D0CE85178AE8218EE * get_transformMaterial_4() const { return ___transformMaterial_4; }
inline Material_t8927C00353A72755313F046D0CE85178AE8218EE ** get_address_of_transformMaterial_4() { return &___transformMaterial_4; }
inline void set_transformMaterial_4(Material_t8927C00353A72755313F046D0CE85178AE8218EE * value)
{
___transformMaterial_4 = value;
Il2CppCodeGenWriteBarrier((void**)(&___transformMaterial_4), (void*)value);
}
inline static int32_t get_offset_of_startSeconds_5() { return static_cast<int32_t>(offsetof(Screenshot_tA0FF0B7359459C6837DCE33D269E93C27AC15B80, ___startSeconds_5)); }
inline int32_t get_startSeconds_5() const { return ___startSeconds_5; }
inline int32_t* get_address_of_startSeconds_5() { return &___startSeconds_5; }
inline void set_startSeconds_5(int32_t value)
{
___startSeconds_5 = value;
}
inline static int32_t get_offset_of_frameTexture_6() { return static_cast<int32_t>(offsetof(Screenshot_tA0FF0B7359459C6837DCE33D269E93C27AC15B80, ___frameTexture_6)); }
inline Texture2D_t9B604D0D8E28032123641A7E7338FA872E2698BF * get_frameTexture_6() const { return ___frameTexture_6; }
inline Texture2D_t9B604D0D8E28032123641A7E7338FA872E2698BF ** get_address_of_frameTexture_6() { return &___frameTexture_6; }
inline void set_frameTexture_6(Texture2D_t9B604D0D8E28032123641A7E7338FA872E2698BF * value)
{
___frameTexture_6 = value;
Il2CppCodeGenWriteBarrier((void**)(&___frameTexture_6), (void*)value);
}
inline static int32_t get_offset_of_frameRenderTexture_7() { return static_cast<int32_t>(offsetof(Screenshot_tA0FF0B7359459C6837DCE33D269E93C27AC15B80, ___frameRenderTexture_7)); }
inline RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849 * get_frameRenderTexture_7() const { return ___frameRenderTexture_7; }
inline RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849 ** get_address_of_frameRenderTexture_7() { return &___frameRenderTexture_7; }
inline void set_frameRenderTexture_7(RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849 * value)
{
___frameRenderTexture_7 = value;
Il2CppCodeGenWriteBarrier((void**)(&___frameRenderTexture_7), (void*)value);
}
inline static int32_t get_offset_of_frameCubemap_8() { return static_cast<int32_t>(offsetof(Screenshot_tA0FF0B7359459C6837DCE33D269E93C27AC15B80, ___frameCubemap_8)); }
inline Cubemap_tB48EEA79C233417AF4D7BF03EA1BE4AA07A5B938 * get_frameCubemap_8() const { return ___frameCubemap_8; }
inline Cubemap_tB48EEA79C233417AF4D7BF03EA1BE4AA07A5B938 ** get_address_of_frameCubemap_8() { return &___frameCubemap_8; }
inline void set_frameCubemap_8(Cubemap_tB48EEA79C233417AF4D7BF03EA1BE4AA07A5B938 * value)
{
___frameCubemap_8 = value;
Il2CppCodeGenWriteBarrier((void**)(&___frameCubemap_8), (void*)value);
}
};
// LylekGames.Demo.SimpleCameraMove
struct SimpleCameraMove_t92A85B2CE71DA14B3312C8800D30A689E88AC885 : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
// System.Single LylekGames.Demo.SimpleCameraMove::sensitivity
float ___sensitivity_4;
public:
inline static int32_t get_offset_of_sensitivity_4() { return static_cast<int32_t>(offsetof(SimpleCameraMove_t92A85B2CE71DA14B3312C8800D30A689E88AC885, ___sensitivity_4)); }
inline float get_sensitivity_4() const { return ___sensitivity_4; }
inline float* get_address_of_sensitivity_4() { return &___sensitivity_4; }
inline void set_sensitivity_4(float value)
{
___sensitivity_4 = value;
}
};
// LylekGames.RPGGoblin.SimpleCameraScroll
struct SimpleCameraScroll_t9F3DB7BD2C82FB7202E557D272E9293C364D6536 : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
public:
};
// Photon.Chat.UtilityScripts.TextButtonTransition
struct TextButtonTransition_t51491A0543434FBDA97FB36A7A95C483FB6EE16F : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
// UnityEngine.UI.Text Photon.Chat.UtilityScripts.TextButtonTransition::_text
Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * ____text_4;
// UnityEngine.UI.Selectable Photon.Chat.UtilityScripts.TextButtonTransition::Selectable
Selectable_t34088A3677CC9D344F81B0D91999D8C5963D7DBD * ___Selectable_5;
// UnityEngine.Color Photon.Chat.UtilityScripts.TextButtonTransition::NormalColor
Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659 ___NormalColor_6;
// UnityEngine.Color Photon.Chat.UtilityScripts.TextButtonTransition::HoverColor
Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659 ___HoverColor_7;
public:
inline static int32_t get_offset_of__text_4() { return static_cast<int32_t>(offsetof(TextButtonTransition_t51491A0543434FBDA97FB36A7A95C483FB6EE16F, ____text_4)); }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * get__text_4() const { return ____text_4; }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 ** get_address_of__text_4() { return &____text_4; }
inline void set__text_4(Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * value)
{
____text_4 = value;
Il2CppCodeGenWriteBarrier((void**)(&____text_4), (void*)value);
}
inline static int32_t get_offset_of_Selectable_5() { return static_cast<int32_t>(offsetof(TextButtonTransition_t51491A0543434FBDA97FB36A7A95C483FB6EE16F, ___Selectable_5)); }
inline Selectable_t34088A3677CC9D344F81B0D91999D8C5963D7DBD * get_Selectable_5() const { return ___Selectable_5; }
inline Selectable_t34088A3677CC9D344F81B0D91999D8C5963D7DBD ** get_address_of_Selectable_5() { return &___Selectable_5; }
inline void set_Selectable_5(Selectable_t34088A3677CC9D344F81B0D91999D8C5963D7DBD * value)
{
___Selectable_5 = value;
Il2CppCodeGenWriteBarrier((void**)(&___Selectable_5), (void*)value);
}
inline static int32_t get_offset_of_NormalColor_6() { return static_cast<int32_t>(offsetof(TextButtonTransition_t51491A0543434FBDA97FB36A7A95C483FB6EE16F, ___NormalColor_6)); }
inline Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659 get_NormalColor_6() const { return ___NormalColor_6; }
inline Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659 * get_address_of_NormalColor_6() { return &___NormalColor_6; }
inline void set_NormalColor_6(Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659 value)
{
___NormalColor_6 = value;
}
inline static int32_t get_offset_of_HoverColor_7() { return static_cast<int32_t>(offsetof(TextButtonTransition_t51491A0543434FBDA97FB36A7A95C483FB6EE16F, ___HoverColor_7)); }
inline Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659 get_HoverColor_7() const { return ___HoverColor_7; }
inline Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659 * get_address_of_HoverColor_7() { return &___HoverColor_7; }
inline void set_HoverColor_7(Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659 value)
{
___HoverColor_7 = value;
}
};
// Photon.Chat.UtilityScripts.TextToggleIsOnTransition
struct TextToggleIsOnTransition_t138BBDEFB7B6957F27ED0A5E0B4776503FA1436D : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
// UnityEngine.UI.Toggle Photon.Chat.UtilityScripts.TextToggleIsOnTransition::toggle
Toggle_t68F5A84CDD2BBAEA866F42EB4E0C9F2B431D612E * ___toggle_4;
// UnityEngine.UI.Text Photon.Chat.UtilityScripts.TextToggleIsOnTransition::_text
Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * ____text_5;
// UnityEngine.Color Photon.Chat.UtilityScripts.TextToggleIsOnTransition::NormalOnColor
Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659 ___NormalOnColor_6;
// UnityEngine.Color Photon.Chat.UtilityScripts.TextToggleIsOnTransition::NormalOffColor
Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659 ___NormalOffColor_7;
// UnityEngine.Color Photon.Chat.UtilityScripts.TextToggleIsOnTransition::HoverOnColor
Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659 ___HoverOnColor_8;
// UnityEngine.Color Photon.Chat.UtilityScripts.TextToggleIsOnTransition::HoverOffColor
Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659 ___HoverOffColor_9;
// System.Boolean Photon.Chat.UtilityScripts.TextToggleIsOnTransition::isHover
bool ___isHover_10;
public:
inline static int32_t get_offset_of_toggle_4() { return static_cast<int32_t>(offsetof(TextToggleIsOnTransition_t138BBDEFB7B6957F27ED0A5E0B4776503FA1436D, ___toggle_4)); }
inline Toggle_t68F5A84CDD2BBAEA866F42EB4E0C9F2B431D612E * get_toggle_4() const { return ___toggle_4; }
inline Toggle_t68F5A84CDD2BBAEA866F42EB4E0C9F2B431D612E ** get_address_of_toggle_4() { return &___toggle_4; }
inline void set_toggle_4(Toggle_t68F5A84CDD2BBAEA866F42EB4E0C9F2B431D612E * value)
{
___toggle_4 = value;
Il2CppCodeGenWriteBarrier((void**)(&___toggle_4), (void*)value);
}
inline static int32_t get_offset_of__text_5() { return static_cast<int32_t>(offsetof(TextToggleIsOnTransition_t138BBDEFB7B6957F27ED0A5E0B4776503FA1436D, ____text_5)); }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * get__text_5() const { return ____text_5; }
inline Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 ** get_address_of__text_5() { return &____text_5; }
inline void set__text_5(Text_t6A2339DA6C05AE2646FC1A6C8FCC127391BE7FA1 * value)
{
____text_5 = value;
Il2CppCodeGenWriteBarrier((void**)(&____text_5), (void*)value);
}
inline static int32_t get_offset_of_NormalOnColor_6() { return static_cast<int32_t>(offsetof(TextToggleIsOnTransition_t138BBDEFB7B6957F27ED0A5E0B4776503FA1436D, ___NormalOnColor_6)); }
inline Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659 get_NormalOnColor_6() const { return ___NormalOnColor_6; }
inline Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659 * get_address_of_NormalOnColor_6() { return &___NormalOnColor_6; }
inline void set_NormalOnColor_6(Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659 value)
{
___NormalOnColor_6 = value;
}
inline static int32_t get_offset_of_NormalOffColor_7() { return static_cast<int32_t>(offsetof(TextToggleIsOnTransition_t138BBDEFB7B6957F27ED0A5E0B4776503FA1436D, ___NormalOffColor_7)); }
inline Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659 get_NormalOffColor_7() const { return ___NormalOffColor_7; }
inline Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659 * get_address_of_NormalOffColor_7() { return &___NormalOffColor_7; }
inline void set_NormalOffColor_7(Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659 value)
{
___NormalOffColor_7 = value;
}
inline static int32_t get_offset_of_HoverOnColor_8() { return static_cast<int32_t>(offsetof(TextToggleIsOnTransition_t138BBDEFB7B6957F27ED0A5E0B4776503FA1436D, ___HoverOnColor_8)); }
inline Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659 get_HoverOnColor_8() const { return ___HoverOnColor_8; }
inline Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659 * get_address_of_HoverOnColor_8() { return &___HoverOnColor_8; }
inline void set_HoverOnColor_8(Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659 value)
{
___HoverOnColor_8 = value;
}
inline static int32_t get_offset_of_HoverOffColor_9() { return static_cast<int32_t>(offsetof(TextToggleIsOnTransition_t138BBDEFB7B6957F27ED0A5E0B4776503FA1436D, ___HoverOffColor_9)); }
inline Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659 get_HoverOffColor_9() const { return ___HoverOffColor_9; }
inline Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659 * get_address_of_HoverOffColor_9() { return &___HoverOffColor_9; }
inline void set_HoverOffColor_9(Color_tF40DAF76C04FFECF3FE6024F85A294741C9CC659 value)
{
___HoverOffColor_9 = value;
}
inline static int32_t get_offset_of_isHover_10() { return static_cast<int32_t>(offsetof(TextToggleIsOnTransition_t138BBDEFB7B6957F27ED0A5E0B4776503FA1436D, ___isHover_10)); }
inline bool get_isHover_10() const { return ___isHover_10; }
inline bool* get_address_of_isHover_10() { return &___isHover_10; }
inline void set_isHover_10(bool value)
{
___isHover_10 = value;
}
};
// RockVR.Video.VideoCaptureBase
struct VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
// RockVR.Video.VideoCaptureBase/FormatType RockVR.Video.VideoCaptureBase::format
int32_t ___format_4;
// RockVR.Video.VideoCaptureBase/StereoType RockVR.Video.VideoCaptureBase::stereo
int32_t ___stereo_5;
// RockVR.Video.VideoCaptureBase/StereoFormatType RockVR.Video.VideoCaptureBase::stereoFormat
int32_t ___stereoFormat_6;
// RockVR.Video.VideoCaptureBase/FrameSizeType RockVR.Video.VideoCaptureBase::frameSize
int32_t ___frameSize_7;
// RockVR.Video.VideoCaptureBase/CubemapSizeType RockVR.Video.VideoCaptureBase::_cubemapSize
int32_t ____cubemapSize_8;
// RockVR.Video.VideoCaptureBase/PanoramaProjectionType RockVR.Video.VideoCaptureBase::panoramaProjection
int32_t ___panoramaProjection_9;
// RockVR.Video.VideoCaptureBase/EncodeQualityType RockVR.Video.VideoCaptureBase::encodeQuality
int32_t ___encodeQuality_10;
// RockVR.Video.VideoCaptureBase/AntiAliasingType RockVR.Video.VideoCaptureBase::_antiAliasing
int32_t ____antiAliasing_11;
// RockVR.Video.VideoCaptureBase/TargetFramerateType RockVR.Video.VideoCaptureBase::_targetFramerate
int32_t ____targetFramerate_12;
// UnityEngine.Camera RockVR.Video.VideoCaptureBase::captureCamera
Camera_tC44E094BAB53AFC8A014C6F9CFCE11F4FC38006C * ___captureCamera_13;
// System.Boolean RockVR.Video.VideoCaptureBase::isDedicated
bool ___isDedicated_14;
// System.Boolean RockVR.Video.VideoCaptureBase::captureGUI
bool ___captureGUI_15;
// System.Single RockVR.Video.VideoCaptureBase::deltaFrameTime
float ___deltaFrameTime_16;
// RockVR.Video.VideoCaptureBase/ModeType RockVR.Video.VideoCaptureBase::mode
int32_t ___mode_17;
// System.String RockVR.Video.VideoCaptureBase::<filePath>k__BackingField
String_t* ___U3CfilePathU3Ek__BackingField_18;
// System.Boolean RockVR.Video.VideoCaptureBase::customPath
bool ___customPath_19;
// System.String RockVR.Video.VideoCaptureBase::customPathFolder
String_t* ___customPathFolder_20;
// System.String RockVR.Video.VideoCaptureBase::streamingAddress
String_t* ___streamingAddress_21;
// RockVR.Common.EventDelegate RockVR.Video.VideoCaptureBase::eventDelegate
EventDelegate_t25FC2A0C7474DD9C0674EB6B0BCE63D0FDFA7B54 * ___eventDelegate_22;
// System.Single RockVR.Video.VideoCaptureBase::interPupillaryDistance
float ___interPupillaryDistance_23;
// UnityEngine.Material RockVR.Video.VideoCaptureBase::stereoPackMaterial
Material_t8927C00353A72755313F046D0CE85178AE8218EE * ___stereoPackMaterial_24;
// UnityEngine.Material RockVR.Video.VideoCaptureBase::copyReverseMaterial
Material_t8927C00353A72755313F046D0CE85178AE8218EE * ___copyReverseMaterial_25;
// UnityEngine.Material RockVR.Video.VideoCaptureBase::cubemap2Equirectangular
Material_t8927C00353A72755313F046D0CE85178AE8218EE * ___cubemap2Equirectangular_26;
// UnityEngine.Material RockVR.Video.VideoCaptureBase::blitMaterial
Material_t8927C00353A72755313F046D0CE85178AE8218EE * ___blitMaterial_27;
// UnityEngine.RenderTexture RockVR.Video.VideoCaptureBase::frameRenderTexture
RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849 * ___frameRenderTexture_28;
// UnityEngine.RenderTexture RockVR.Video.VideoCaptureBase::stereoTargetTexture
RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849 * ___stereoTargetTexture_29;
// UnityEngine.RenderTexture RockVR.Video.VideoCaptureBase::finalTargetTexture
RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849 * ___finalTargetTexture_30;
// UnityEngine.RenderTexture RockVR.Video.VideoCaptureBase::panoramaTempRenderTexture
RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849 * ___panoramaTempRenderTexture_31;
// UnityEngine.RenderTexture RockVR.Video.VideoCaptureBase::faceTarget
RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849 * ___faceTarget_32;
public:
inline static int32_t get_offset_of_format_4() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___format_4)); }
inline int32_t get_format_4() const { return ___format_4; }
inline int32_t* get_address_of_format_4() { return &___format_4; }
inline void set_format_4(int32_t value)
{
___format_4 = value;
}
inline static int32_t get_offset_of_stereo_5() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___stereo_5)); }
inline int32_t get_stereo_5() const { return ___stereo_5; }
inline int32_t* get_address_of_stereo_5() { return &___stereo_5; }
inline void set_stereo_5(int32_t value)
{
___stereo_5 = value;
}
inline static int32_t get_offset_of_stereoFormat_6() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___stereoFormat_6)); }
inline int32_t get_stereoFormat_6() const { return ___stereoFormat_6; }
inline int32_t* get_address_of_stereoFormat_6() { return &___stereoFormat_6; }
inline void set_stereoFormat_6(int32_t value)
{
___stereoFormat_6 = value;
}
inline static int32_t get_offset_of_frameSize_7() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___frameSize_7)); }
inline int32_t get_frameSize_7() const { return ___frameSize_7; }
inline int32_t* get_address_of_frameSize_7() { return &___frameSize_7; }
inline void set_frameSize_7(int32_t value)
{
___frameSize_7 = value;
}
inline static int32_t get_offset_of__cubemapSize_8() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ____cubemapSize_8)); }
inline int32_t get__cubemapSize_8() const { return ____cubemapSize_8; }
inline int32_t* get_address_of__cubemapSize_8() { return &____cubemapSize_8; }
inline void set__cubemapSize_8(int32_t value)
{
____cubemapSize_8 = value;
}
inline static int32_t get_offset_of_panoramaProjection_9() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___panoramaProjection_9)); }
inline int32_t get_panoramaProjection_9() const { return ___panoramaProjection_9; }
inline int32_t* get_address_of_panoramaProjection_9() { return &___panoramaProjection_9; }
inline void set_panoramaProjection_9(int32_t value)
{
___panoramaProjection_9 = value;
}
inline static int32_t get_offset_of_encodeQuality_10() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___encodeQuality_10)); }
inline int32_t get_encodeQuality_10() const { return ___encodeQuality_10; }
inline int32_t* get_address_of_encodeQuality_10() { return &___encodeQuality_10; }
inline void set_encodeQuality_10(int32_t value)
{
___encodeQuality_10 = value;
}
inline static int32_t get_offset_of__antiAliasing_11() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ____antiAliasing_11)); }
inline int32_t get__antiAliasing_11() const { return ____antiAliasing_11; }
inline int32_t* get_address_of__antiAliasing_11() { return &____antiAliasing_11; }
inline void set__antiAliasing_11(int32_t value)
{
____antiAliasing_11 = value;
}
inline static int32_t get_offset_of__targetFramerate_12() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ____targetFramerate_12)); }
inline int32_t get__targetFramerate_12() const { return ____targetFramerate_12; }
inline int32_t* get_address_of__targetFramerate_12() { return &____targetFramerate_12; }
inline void set__targetFramerate_12(int32_t value)
{
____targetFramerate_12 = value;
}
inline static int32_t get_offset_of_captureCamera_13() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___captureCamera_13)); }
inline Camera_tC44E094BAB53AFC8A014C6F9CFCE11F4FC38006C * get_captureCamera_13() const { return ___captureCamera_13; }
inline Camera_tC44E094BAB53AFC8A014C6F9CFCE11F4FC38006C ** get_address_of_captureCamera_13() { return &___captureCamera_13; }
inline void set_captureCamera_13(Camera_tC44E094BAB53AFC8A014C6F9CFCE11F4FC38006C * value)
{
___captureCamera_13 = value;
Il2CppCodeGenWriteBarrier((void**)(&___captureCamera_13), (void*)value);
}
inline static int32_t get_offset_of_isDedicated_14() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___isDedicated_14)); }
inline bool get_isDedicated_14() const { return ___isDedicated_14; }
inline bool* get_address_of_isDedicated_14() { return &___isDedicated_14; }
inline void set_isDedicated_14(bool value)
{
___isDedicated_14 = value;
}
inline static int32_t get_offset_of_captureGUI_15() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___captureGUI_15)); }
inline bool get_captureGUI_15() const { return ___captureGUI_15; }
inline bool* get_address_of_captureGUI_15() { return &___captureGUI_15; }
inline void set_captureGUI_15(bool value)
{
___captureGUI_15 = value;
}
inline static int32_t get_offset_of_deltaFrameTime_16() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___deltaFrameTime_16)); }
inline float get_deltaFrameTime_16() const { return ___deltaFrameTime_16; }
inline float* get_address_of_deltaFrameTime_16() { return &___deltaFrameTime_16; }
inline void set_deltaFrameTime_16(float value)
{
___deltaFrameTime_16 = value;
}
inline static int32_t get_offset_of_mode_17() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___mode_17)); }
inline int32_t get_mode_17() const { return ___mode_17; }
inline int32_t* get_address_of_mode_17() { return &___mode_17; }
inline void set_mode_17(int32_t value)
{
___mode_17 = value;
}
inline static int32_t get_offset_of_U3CfilePathU3Ek__BackingField_18() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___U3CfilePathU3Ek__BackingField_18)); }
inline String_t* get_U3CfilePathU3Ek__BackingField_18() const { return ___U3CfilePathU3Ek__BackingField_18; }
inline String_t** get_address_of_U3CfilePathU3Ek__BackingField_18() { return &___U3CfilePathU3Ek__BackingField_18; }
inline void set_U3CfilePathU3Ek__BackingField_18(String_t* value)
{
___U3CfilePathU3Ek__BackingField_18 = value;
Il2CppCodeGenWriteBarrier((void**)(&___U3CfilePathU3Ek__BackingField_18), (void*)value);
}
inline static int32_t get_offset_of_customPath_19() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___customPath_19)); }
inline bool get_customPath_19() const { return ___customPath_19; }
inline bool* get_address_of_customPath_19() { return &___customPath_19; }
inline void set_customPath_19(bool value)
{
___customPath_19 = value;
}
inline static int32_t get_offset_of_customPathFolder_20() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___customPathFolder_20)); }
inline String_t* get_customPathFolder_20() const { return ___customPathFolder_20; }
inline String_t** get_address_of_customPathFolder_20() { return &___customPathFolder_20; }
inline void set_customPathFolder_20(String_t* value)
{
___customPathFolder_20 = value;
Il2CppCodeGenWriteBarrier((void**)(&___customPathFolder_20), (void*)value);
}
inline static int32_t get_offset_of_streamingAddress_21() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___streamingAddress_21)); }
inline String_t* get_streamingAddress_21() const { return ___streamingAddress_21; }
inline String_t** get_address_of_streamingAddress_21() { return &___streamingAddress_21; }
inline void set_streamingAddress_21(String_t* value)
{
___streamingAddress_21 = value;
Il2CppCodeGenWriteBarrier((void**)(&___streamingAddress_21), (void*)value);
}
inline static int32_t get_offset_of_eventDelegate_22() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___eventDelegate_22)); }
inline EventDelegate_t25FC2A0C7474DD9C0674EB6B0BCE63D0FDFA7B54 * get_eventDelegate_22() const { return ___eventDelegate_22; }
inline EventDelegate_t25FC2A0C7474DD9C0674EB6B0BCE63D0FDFA7B54 ** get_address_of_eventDelegate_22() { return &___eventDelegate_22; }
inline void set_eventDelegate_22(EventDelegate_t25FC2A0C7474DD9C0674EB6B0BCE63D0FDFA7B54 * value)
{
___eventDelegate_22 = value;
Il2CppCodeGenWriteBarrier((void**)(&___eventDelegate_22), (void*)value);
}
inline static int32_t get_offset_of_interPupillaryDistance_23() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___interPupillaryDistance_23)); }
inline float get_interPupillaryDistance_23() const { return ___interPupillaryDistance_23; }
inline float* get_address_of_interPupillaryDistance_23() { return &___interPupillaryDistance_23; }
inline void set_interPupillaryDistance_23(float value)
{
___interPupillaryDistance_23 = value;
}
inline static int32_t get_offset_of_stereoPackMaterial_24() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___stereoPackMaterial_24)); }
inline Material_t8927C00353A72755313F046D0CE85178AE8218EE * get_stereoPackMaterial_24() const { return ___stereoPackMaterial_24; }
inline Material_t8927C00353A72755313F046D0CE85178AE8218EE ** get_address_of_stereoPackMaterial_24() { return &___stereoPackMaterial_24; }
inline void set_stereoPackMaterial_24(Material_t8927C00353A72755313F046D0CE85178AE8218EE * value)
{
___stereoPackMaterial_24 = value;
Il2CppCodeGenWriteBarrier((void**)(&___stereoPackMaterial_24), (void*)value);
}
inline static int32_t get_offset_of_copyReverseMaterial_25() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___copyReverseMaterial_25)); }
inline Material_t8927C00353A72755313F046D0CE85178AE8218EE * get_copyReverseMaterial_25() const { return ___copyReverseMaterial_25; }
inline Material_t8927C00353A72755313F046D0CE85178AE8218EE ** get_address_of_copyReverseMaterial_25() { return &___copyReverseMaterial_25; }
inline void set_copyReverseMaterial_25(Material_t8927C00353A72755313F046D0CE85178AE8218EE * value)
{
___copyReverseMaterial_25 = value;
Il2CppCodeGenWriteBarrier((void**)(&___copyReverseMaterial_25), (void*)value);
}
inline static int32_t get_offset_of_cubemap2Equirectangular_26() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___cubemap2Equirectangular_26)); }
inline Material_t8927C00353A72755313F046D0CE85178AE8218EE * get_cubemap2Equirectangular_26() const { return ___cubemap2Equirectangular_26; }
inline Material_t8927C00353A72755313F046D0CE85178AE8218EE ** get_address_of_cubemap2Equirectangular_26() { return &___cubemap2Equirectangular_26; }
inline void set_cubemap2Equirectangular_26(Material_t8927C00353A72755313F046D0CE85178AE8218EE * value)
{
___cubemap2Equirectangular_26 = value;
Il2CppCodeGenWriteBarrier((void**)(&___cubemap2Equirectangular_26), (void*)value);
}
inline static int32_t get_offset_of_blitMaterial_27() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___blitMaterial_27)); }
inline Material_t8927C00353A72755313F046D0CE85178AE8218EE * get_blitMaterial_27() const { return ___blitMaterial_27; }
inline Material_t8927C00353A72755313F046D0CE85178AE8218EE ** get_address_of_blitMaterial_27() { return &___blitMaterial_27; }
inline void set_blitMaterial_27(Material_t8927C00353A72755313F046D0CE85178AE8218EE * value)
{
___blitMaterial_27 = value;
Il2CppCodeGenWriteBarrier((void**)(&___blitMaterial_27), (void*)value);
}
inline static int32_t get_offset_of_frameRenderTexture_28() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___frameRenderTexture_28)); }
inline RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849 * get_frameRenderTexture_28() const { return ___frameRenderTexture_28; }
inline RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849 ** get_address_of_frameRenderTexture_28() { return &___frameRenderTexture_28; }
inline void set_frameRenderTexture_28(RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849 * value)
{
___frameRenderTexture_28 = value;
Il2CppCodeGenWriteBarrier((void**)(&___frameRenderTexture_28), (void*)value);
}
inline static int32_t get_offset_of_stereoTargetTexture_29() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___stereoTargetTexture_29)); }
inline RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849 * get_stereoTargetTexture_29() const { return ___stereoTargetTexture_29; }
inline RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849 ** get_address_of_stereoTargetTexture_29() { return &___stereoTargetTexture_29; }
inline void set_stereoTargetTexture_29(RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849 * value)
{
___stereoTargetTexture_29 = value;
Il2CppCodeGenWriteBarrier((void**)(&___stereoTargetTexture_29), (void*)value);
}
inline static int32_t get_offset_of_finalTargetTexture_30() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___finalTargetTexture_30)); }
inline RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849 * get_finalTargetTexture_30() const { return ___finalTargetTexture_30; }
inline RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849 ** get_address_of_finalTargetTexture_30() { return &___finalTargetTexture_30; }
inline void set_finalTargetTexture_30(RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849 * value)
{
___finalTargetTexture_30 = value;
Il2CppCodeGenWriteBarrier((void**)(&___finalTargetTexture_30), (void*)value);
}
inline static int32_t get_offset_of_panoramaTempRenderTexture_31() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___panoramaTempRenderTexture_31)); }
inline RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849 * get_panoramaTempRenderTexture_31() const { return ___panoramaTempRenderTexture_31; }
inline RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849 ** get_address_of_panoramaTempRenderTexture_31() { return &___panoramaTempRenderTexture_31; }
inline void set_panoramaTempRenderTexture_31(RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849 * value)
{
___panoramaTempRenderTexture_31 = value;
Il2CppCodeGenWriteBarrier((void**)(&___panoramaTempRenderTexture_31), (void*)value);
}
inline static int32_t get_offset_of_faceTarget_32() { return static_cast<int32_t>(offsetof(VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA, ___faceTarget_32)); }
inline RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849 * get_faceTarget_32() const { return ___faceTarget_32; }
inline RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849 ** get_address_of_faceTarget_32() { return &___faceTarget_32; }
inline void set_faceTarget_32(RenderTexture_t5FE7A5B47EF962A0E8D7BEBA05E9FC87D49A1849 * value)
{
___faceTarget_32 = value;
Il2CppCodeGenWriteBarrier((void**)(&___faceTarget_32), (void*)value);
}
};
// RockVR.Video.Demo.VideoCaptureUI
struct VideoCaptureUI_t9D33A122A5901DE765EAD49928EB6B56D3A9B27C : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
// UnityEngine.GameObject RockVR.Video.Demo.VideoCaptureUI::UIController
GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * ___UIController_4;
// UnityEngine.GameObject RockVR.Video.Demo.VideoCaptureUI::BaseController
GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * ___BaseController_5;
// UnityEngine.InputSystem.InputActionReference RockVR.Video.Demo.VideoCaptureUI::inputActionReference_VideoRecord
InputActionReference_tB2E9E368D60A4C8E066C7CE0EE2A80C62320C28E * ___inputActionReference_VideoRecord_6;
// UnityEngine.GameObject RockVR.Video.Demo.VideoCaptureUI::Icon
GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * ___Icon_7;
public:
inline static int32_t get_offset_of_UIController_4() { return static_cast<int32_t>(offsetof(VideoCaptureUI_t9D33A122A5901DE765EAD49928EB6B56D3A9B27C, ___UIController_4)); }
inline GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * get_UIController_4() const { return ___UIController_4; }
inline GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 ** get_address_of_UIController_4() { return &___UIController_4; }
inline void set_UIController_4(GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * value)
{
___UIController_4 = value;
Il2CppCodeGenWriteBarrier((void**)(&___UIController_4), (void*)value);
}
inline static int32_t get_offset_of_BaseController_5() { return static_cast<int32_t>(offsetof(VideoCaptureUI_t9D33A122A5901DE765EAD49928EB6B56D3A9B27C, ___BaseController_5)); }
inline GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * get_BaseController_5() const { return ___BaseController_5; }
inline GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 ** get_address_of_BaseController_5() { return &___BaseController_5; }
inline void set_BaseController_5(GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * value)
{
___BaseController_5 = value;
Il2CppCodeGenWriteBarrier((void**)(&___BaseController_5), (void*)value);
}
inline static int32_t get_offset_of_inputActionReference_VideoRecord_6() { return static_cast<int32_t>(offsetof(VideoCaptureUI_t9D33A122A5901DE765EAD49928EB6B56D3A9B27C, ___inputActionReference_VideoRecord_6)); }
inline InputActionReference_tB2E9E368D60A4C8E066C7CE0EE2A80C62320C28E * get_inputActionReference_VideoRecord_6() const { return ___inputActionReference_VideoRecord_6; }
inline InputActionReference_tB2E9E368D60A4C8E066C7CE0EE2A80C62320C28E ** get_address_of_inputActionReference_VideoRecord_6() { return &___inputActionReference_VideoRecord_6; }
inline void set_inputActionReference_VideoRecord_6(InputActionReference_tB2E9E368D60A4C8E066C7CE0EE2A80C62320C28E * value)
{
___inputActionReference_VideoRecord_6 = value;
Il2CppCodeGenWriteBarrier((void**)(&___inputActionReference_VideoRecord_6), (void*)value);
}
inline static int32_t get_offset_of_Icon_7() { return static_cast<int32_t>(offsetof(VideoCaptureUI_t9D33A122A5901DE765EAD49928EB6B56D3A9B27C, ___Icon_7)); }
inline GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * get_Icon_7() const { return ___Icon_7; }
inline GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 ** get_address_of_Icon_7() { return &___Icon_7; }
inline void set_Icon_7(GameObject_tC000A2E1A7CF1E10FD7BA08863287C072207C319 * value)
{
___Icon_7 = value;
Il2CppCodeGenWriteBarrier((void**)(&___Icon_7), (void*)value);
}
};
// RockVR.Video.VideoPlayer
struct VideoPlayer_t39CA97EB2A9AB274DBE4BE3E0D0F41DF29F010AF : public MonoBehaviour_t37A501200D970A8257124B0EAE00A0FF3DDC354A
{
public:
// System.Collections.Generic.List`1<System.String> RockVR.Video.VideoPlayer::videoFiles
List_1_t6C9F81EDBF0F4A31A9B0DA372D2EF34BDA3A1AF3 * ___videoFiles_4;
// UnityEngine.Video.VideoPlayer RockVR.Video.VideoPlayer::videoPlayerImpl
VideoPlayer_t47DCC396CBA28512CF97C6CC4F55878E8D62FE86 * ___videoPlayerImpl_5;
// System.Int32 RockVR.Video.VideoPlayer::index
int32_t ___index_6;
public:
inline static int32_t get_offset_of_videoFiles_4() { return static_cast<int32_t>(offsetof(VideoPlayer_t39CA97EB2A9AB274DBE4BE3E0D0F41DF29F010AF, ___videoFiles_4)); }
inline List_1_t6C9F81EDBF0F4A31A9B0DA372D2EF34BDA3A1AF3 * get_videoFiles_4() const { return ___videoFiles_4; }
inline List_1_t6C9F81EDBF0F4A31A9B0DA372D2EF34BDA3A1AF3 ** get_address_of_videoFiles_4() { return &___videoFiles_4; }
inline void set_videoFiles_4(List_1_t6C9F81EDBF0F4A31A9B0DA372D2EF34BDA3A1AF3 * value)
{
___videoFiles_4 = value;
Il2CppCodeGenWriteBarrier((void**)(&___videoFiles_4), (void*)value);
}
inline static int32_t get_offset_of_videoPlayerImpl_5() { return static_cast<int32_t>(offsetof(VideoPlayer_t39CA97EB2A9AB274DBE4BE3E0D0F41DF29F010AF, ___videoPlayerImpl_5)); }
inline VideoPlayer_t47DCC396CBA28512CF97C6CC4F55878E8D62FE86 * get_videoPlayerImpl_5() const { return ___videoPlayerImpl_5; }
inline VideoPlayer_t47DCC396CBA28512CF97C6CC4F55878E8D62FE86 ** get_address_of_videoPlayerImpl_5() { return &___videoPlayerImpl_5; }
inline void set_videoPlayerImpl_5(VideoPlayer_t47DCC396CBA28512CF97C6CC4F55878E8D62FE86 * value)
{
___videoPlayerImpl_5 = value;
Il2CppCodeGenWriteBarrier((void**)(&___videoPlayerImpl_5), (void*)value);
}
inline static int32_t get_offset_of_index_6() { return static_cast<int32_t>(offsetof(VideoPlayer_t39CA97EB2A9AB274DBE4BE3E0D0F41DF29F010AF, ___index_6)); }
inline int32_t get_index_6() const { return ___index_6; }
inline int32_t* get_address_of_index_6() { return &___index_6; }
inline void set_index_6(int32_t value)
{
___index_6 = value;
}
};
struct VideoPlayer_t39CA97EB2A9AB274DBE4BE3E0D0F41DF29F010AF_StaticFields
{
public:
// RockVR.Video.VideoPlayer RockVR.Video.VideoPlayer::instance
VideoPlayer_t39CA97EB2A9AB274DBE4BE3E0D0F41DF29F010AF * ___instance_7;
public:
inline static int32_t get_offset_of_instance_7() { return static_cast<int32_t>(offsetof(VideoPlayer_t39CA97EB2A9AB274DBE4BE3E0D0F41DF29F010AF_StaticFields, ___instance_7)); }
inline VideoPlayer_t39CA97EB2A9AB274DBE4BE3E0D0F41DF29F010AF * get_instance_7() const { return ___instance_7; }
inline VideoPlayer_t39CA97EB2A9AB274DBE4BE3E0D0F41DF29F010AF ** get_address_of_instance_7() { return &___instance_7; }
inline void set_instance_7(VideoPlayer_t39CA97EB2A9AB274DBE4BE3E0D0F41DF29F010AF * value)
{
___instance_7 = value;
Il2CppCodeGenWriteBarrier((void**)(&___instance_7), (void*)value);
}
};
// RockVR.Video.VideoCapture
struct VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB : public VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA
{
public:
// RockVR.Video.VideoCaptureCtrlBase/StatusType RockVR.Video.VideoCapture::<status>k__BackingField
int32_t ___U3CstatusU3Ek__BackingField_33;
// System.Boolean RockVR.Video.VideoCapture::offlineRender
bool ___offlineRender_34;
// UnityEngine.Texture2D RockVR.Video.VideoCapture::frameTexture
Texture2D_t9B604D0D8E28032123641A7E7338FA872E2698BF * ___frameTexture_35;
// UnityEngine.Cubemap RockVR.Video.VideoCapture::frameCubemap
Cubemap_tB48EEA79C233417AF4D7BF03EA1BE4AA07A5B938 * ___frameCubemap_36;
// System.Boolean RockVR.Video.VideoCapture::isCapturingFrame
bool ___isCapturingFrame_37;
// System.Boolean RockVR.Video.VideoCapture::isCreateRenderTexture
bool ___isCreateRenderTexture_38;
// System.Single RockVR.Video.VideoCapture::capturingTime
float ___capturingTime_39;
// System.Int32 RockVR.Video.VideoCapture::capturedFrameCount
int32_t ___capturedFrameCount_40;
// System.Int32 RockVR.Video.VideoCapture::encodedFrameCount
int32_t ___encodedFrameCount_41;
// System.IntPtr RockVR.Video.VideoCapture::libAPI
intptr_t ___libAPI_42;
// System.Single RockVR.Video.VideoCapture::originalMaximumDeltaTime
float ___originalMaximumDeltaTime_43;
// System.Collections.Generic.Queue`1<RockVR.Video.VideoCapture/FrameData> RockVR.Video.VideoCapture::frameQueue
Queue_1_tD8A602F88CB1BAFC9053BC90676C759C80917D8C * ___frameQueue_44;
// System.Threading.Thread RockVR.Video.VideoCapture::encodeThread
Thread_tB9EB71664220EE16451AF3276D78DE6614D2A414 * ___encodeThread_45;
public:
inline static int32_t get_offset_of_U3CstatusU3Ek__BackingField_33() { return static_cast<int32_t>(offsetof(VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB, ___U3CstatusU3Ek__BackingField_33)); }
inline int32_t get_U3CstatusU3Ek__BackingField_33() const { return ___U3CstatusU3Ek__BackingField_33; }
inline int32_t* get_address_of_U3CstatusU3Ek__BackingField_33() { return &___U3CstatusU3Ek__BackingField_33; }
inline void set_U3CstatusU3Ek__BackingField_33(int32_t value)
{
___U3CstatusU3Ek__BackingField_33 = value;
}
inline static int32_t get_offset_of_offlineRender_34() { return static_cast<int32_t>(offsetof(VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB, ___offlineRender_34)); }
inline bool get_offlineRender_34() const { return ___offlineRender_34; }
inline bool* get_address_of_offlineRender_34() { return &___offlineRender_34; }
inline void set_offlineRender_34(bool value)
{
___offlineRender_34 = value;
}
inline static int32_t get_offset_of_frameTexture_35() { return static_cast<int32_t>(offsetof(VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB, ___frameTexture_35)); }
inline Texture2D_t9B604D0D8E28032123641A7E7338FA872E2698BF * get_frameTexture_35() const { return ___frameTexture_35; }
inline Texture2D_t9B604D0D8E28032123641A7E7338FA872E2698BF ** get_address_of_frameTexture_35() { return &___frameTexture_35; }
inline void set_frameTexture_35(Texture2D_t9B604D0D8E28032123641A7E7338FA872E2698BF * value)
{
___frameTexture_35 = value;
Il2CppCodeGenWriteBarrier((void**)(&___frameTexture_35), (void*)value);
}
inline static int32_t get_offset_of_frameCubemap_36() { return static_cast<int32_t>(offsetof(VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB, ___frameCubemap_36)); }
inline Cubemap_tB48EEA79C233417AF4D7BF03EA1BE4AA07A5B938 * get_frameCubemap_36() const { return ___frameCubemap_36; }
inline Cubemap_tB48EEA79C233417AF4D7BF03EA1BE4AA07A5B938 ** get_address_of_frameCubemap_36() { return &___frameCubemap_36; }
inline void set_frameCubemap_36(Cubemap_tB48EEA79C233417AF4D7BF03EA1BE4AA07A5B938 * value)
{
___frameCubemap_36 = value;
Il2CppCodeGenWriteBarrier((void**)(&___frameCubemap_36), (void*)value);
}
inline static int32_t get_offset_of_isCapturingFrame_37() { return static_cast<int32_t>(offsetof(VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB, ___isCapturingFrame_37)); }
inline bool get_isCapturingFrame_37() const { return ___isCapturingFrame_37; }
inline bool* get_address_of_isCapturingFrame_37() { return &___isCapturingFrame_37; }
inline void set_isCapturingFrame_37(bool value)
{
___isCapturingFrame_37 = value;
}
inline static int32_t get_offset_of_isCreateRenderTexture_38() { return static_cast<int32_t>(offsetof(VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB, ___isCreateRenderTexture_38)); }
inline bool get_isCreateRenderTexture_38() const { return ___isCreateRenderTexture_38; }
inline bool* get_address_of_isCreateRenderTexture_38() { return &___isCreateRenderTexture_38; }
inline void set_isCreateRenderTexture_38(bool value)
{
___isCreateRenderTexture_38 = value;
}
inline static int32_t get_offset_of_capturingTime_39() { return static_cast<int32_t>(offsetof(VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB, ___capturingTime_39)); }
inline float get_capturingTime_39() const { return ___capturingTime_39; }
inline float* get_address_of_capturingTime_39() { return &___capturingTime_39; }
inline void set_capturingTime_39(float value)
{
___capturingTime_39 = value;
}
inline static int32_t get_offset_of_capturedFrameCount_40() { return static_cast<int32_t>(offsetof(VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB, ___capturedFrameCount_40)); }
inline int32_t get_capturedFrameCount_40() const { return ___capturedFrameCount_40; }
inline int32_t* get_address_of_capturedFrameCount_40() { return &___capturedFrameCount_40; }
inline void set_capturedFrameCount_40(int32_t value)
{
___capturedFrameCount_40 = value;
}
inline static int32_t get_offset_of_encodedFrameCount_41() { return static_cast<int32_t>(offsetof(VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB, ___encodedFrameCount_41)); }
inline int32_t get_encodedFrameCount_41() const { return ___encodedFrameCount_41; }
inline int32_t* get_address_of_encodedFrameCount_41() { return &___encodedFrameCount_41; }
inline void set_encodedFrameCount_41(int32_t value)
{
___encodedFrameCount_41 = value;
}
inline static int32_t get_offset_of_libAPI_42() { return static_cast<int32_t>(offsetof(VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB, ___libAPI_42)); }
inline intptr_t get_libAPI_42() const { return ___libAPI_42; }
inline intptr_t* get_address_of_libAPI_42() { return &___libAPI_42; }
inline void set_libAPI_42(intptr_t value)
{
___libAPI_42 = value;
}
inline static int32_t get_offset_of_originalMaximumDeltaTime_43() { return static_cast<int32_t>(offsetof(VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB, ___originalMaximumDeltaTime_43)); }
inline float get_originalMaximumDeltaTime_43() const { return ___originalMaximumDeltaTime_43; }
inline float* get_address_of_originalMaximumDeltaTime_43() { return &___originalMaximumDeltaTime_43; }
inline void set_originalMaximumDeltaTime_43(float value)
{
___originalMaximumDeltaTime_43 = value;
}
inline static int32_t get_offset_of_frameQueue_44() { return static_cast<int32_t>(offsetof(VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB, ___frameQueue_44)); }
inline Queue_1_tD8A602F88CB1BAFC9053BC90676C759C80917D8C * get_frameQueue_44() const { return ___frameQueue_44; }
inline Queue_1_tD8A602F88CB1BAFC9053BC90676C759C80917D8C ** get_address_of_frameQueue_44() { return &___frameQueue_44; }
inline void set_frameQueue_44(Queue_1_tD8A602F88CB1BAFC9053BC90676C759C80917D8C * value)
{
___frameQueue_44 = value;
Il2CppCodeGenWriteBarrier((void**)(&___frameQueue_44), (void*)value);
}
inline static int32_t get_offset_of_encodeThread_45() { return static_cast<int32_t>(offsetof(VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB, ___encodeThread_45)); }
inline Thread_tB9EB71664220EE16451AF3276D78DE6614D2A414 * get_encodeThread_45() const { return ___encodeThread_45; }
inline Thread_tB9EB71664220EE16451AF3276D78DE6614D2A414 ** get_address_of_encodeThread_45() { return &___encodeThread_45; }
inline void set_encodeThread_45(Thread_tB9EB71664220EE16451AF3276D78DE6614D2A414 * value)
{
___encodeThread_45 = value;
Il2CppCodeGenWriteBarrier((void**)(&___encodeThread_45), (void*)value);
}
};
// RockVR.Video.VideoCaptureCtrlBase
struct VideoCaptureCtrlBase_tC8EFDDF51B9F6BF21AAD8BD587A95A6E12F8B4C9 : public Singleton_1_t326812034F9AD281C5527A5119A3DF775F3B68D4
{
public:
// RockVR.Video.VideoCaptureCtrlBase/StatusType RockVR.Video.VideoCaptureCtrlBase::<status>k__BackingField
int32_t ___U3CstatusU3Ek__BackingField_7;
// System.Boolean RockVR.Video.VideoCaptureCtrlBase::debug
bool ___debug_8;
// System.Boolean RockVR.Video.VideoCaptureCtrlBase::startOnAwake
bool ___startOnAwake_9;
// System.Single RockVR.Video.VideoCaptureCtrlBase::captureTime
float ___captureTime_10;
// System.Boolean RockVR.Video.VideoCaptureCtrlBase::quitAfterCapture
bool ___quitAfterCapture_11;
// RockVR.Common.EventDelegate RockVR.Video.VideoCaptureCtrlBase::eventDelegate
EventDelegate_t25FC2A0C7474DD9C0674EB6B0BCE63D0FDFA7B54 * ___eventDelegate_12;
// RockVR.Video.VideoCaptureBase[] RockVR.Video.VideoCaptureCtrlBase::_videoCaptures
VideoCaptureBaseU5BU5D_t7AE282114378AE52385D1AC298E457A80C251C2C* ____videoCaptures_13;
public:
inline static int32_t get_offset_of_U3CstatusU3Ek__BackingField_7() { return static_cast<int32_t>(offsetof(VideoCaptureCtrlBase_tC8EFDDF51B9F6BF21AAD8BD587A95A6E12F8B4C9, ___U3CstatusU3Ek__BackingField_7)); }
inline int32_t get_U3CstatusU3Ek__BackingField_7() const { return ___U3CstatusU3Ek__BackingField_7; }
inline int32_t* get_address_of_U3CstatusU3Ek__BackingField_7() { return &___U3CstatusU3Ek__BackingField_7; }
inline void set_U3CstatusU3Ek__BackingField_7(int32_t value)
{
___U3CstatusU3Ek__BackingField_7 = value;
}
inline static int32_t get_offset_of_debug_8() { return static_cast<int32_t>(offsetof(VideoCaptureCtrlBase_tC8EFDDF51B9F6BF21AAD8BD587A95A6E12F8B4C9, ___debug_8)); }
inline bool get_debug_8() const { return ___debug_8; }
inline bool* get_address_of_debug_8() { return &___debug_8; }
inline void set_debug_8(bool value)
{
___debug_8 = value;
}
inline static int32_t get_offset_of_startOnAwake_9() { return static_cast<int32_t>(offsetof(VideoCaptureCtrlBase_tC8EFDDF51B9F6BF21AAD8BD587A95A6E12F8B4C9, ___startOnAwake_9)); }
inline bool get_startOnAwake_9() const { return ___startOnAwake_9; }
inline bool* get_address_of_startOnAwake_9() { return &___startOnAwake_9; }
inline void set_startOnAwake_9(bool value)
{
___startOnAwake_9 = value;
}
inline static int32_t get_offset_of_captureTime_10() { return static_cast<int32_t>(offsetof(VideoCaptureCtrlBase_tC8EFDDF51B9F6BF21AAD8BD587A95A6E12F8B4C9, ___captureTime_10)); }
inline float get_captureTime_10() const { return ___captureTime_10; }
inline float* get_address_of_captureTime_10() { return &___captureTime_10; }
inline void set_captureTime_10(float value)
{
___captureTime_10 = value;
}
inline static int32_t get_offset_of_quitAfterCapture_11() { return static_cast<int32_t>(offsetof(VideoCaptureCtrlBase_tC8EFDDF51B9F6BF21AAD8BD587A95A6E12F8B4C9, ___quitAfterCapture_11)); }
inline bool get_quitAfterCapture_11() const { return ___quitAfterCapture_11; }
inline bool* get_address_of_quitAfterCapture_11() { return &___quitAfterCapture_11; }
inline void set_quitAfterCapture_11(bool value)
{
___quitAfterCapture_11 = value;
}
inline static int32_t get_offset_of_eventDelegate_12() { return static_cast<int32_t>(offsetof(VideoCaptureCtrlBase_tC8EFDDF51B9F6BF21AAD8BD587A95A6E12F8B4C9, ___eventDelegate_12)); }
inline EventDelegate_t25FC2A0C7474DD9C0674EB6B0BCE63D0FDFA7B54 * get_eventDelegate_12() const { return ___eventDelegate_12; }
inline EventDelegate_t25FC2A0C7474DD9C0674EB6B0BCE63D0FDFA7B54 ** get_address_of_eventDelegate_12() { return &___eventDelegate_12; }
inline void set_eventDelegate_12(EventDelegate_t25FC2A0C7474DD9C0674EB6B0BCE63D0FDFA7B54 * value)
{
___eventDelegate_12 = value;
Il2CppCodeGenWriteBarrier((void**)(&___eventDelegate_12), (void*)value);
}
inline static int32_t get_offset_of__videoCaptures_13() { return static_cast<int32_t>(offsetof(VideoCaptureCtrlBase_tC8EFDDF51B9F6BF21AAD8BD587A95A6E12F8B4C9, ____videoCaptures_13)); }
inline VideoCaptureBaseU5BU5D_t7AE282114378AE52385D1AC298E457A80C251C2C* get__videoCaptures_13() const { return ____videoCaptures_13; }
inline VideoCaptureBaseU5BU5D_t7AE282114378AE52385D1AC298E457A80C251C2C** get_address_of__videoCaptures_13() { return &____videoCaptures_13; }
inline void set__videoCaptures_13(VideoCaptureBaseU5BU5D_t7AE282114378AE52385D1AC298E457A80C251C2C* value)
{
____videoCaptures_13 = value;
Il2CppCodeGenWriteBarrier((void**)(&____videoCaptures_13), (void*)value);
}
};
// RockVR.Video.VideoCaptureCtrl
struct VideoCaptureCtrl_tE7C7D44FB4083DFD1EC5A827BF281298FB4F57AE : public VideoCaptureCtrlBase_tC8EFDDF51B9F6BF21AAD8BD587A95A6E12F8B4C9
{
public:
// RockVR.Video.AudioCapture RockVR.Video.VideoCaptureCtrl::_audioCapture
AudioCapture_tAF87A71022D987367D112D48136C96AD0E2196A8 * ____audioCapture_14;
// System.Int32 RockVR.Video.VideoCaptureCtrl::videoCaptureFinishCount
int32_t ___videoCaptureFinishCount_15;
// System.Int32 RockVR.Video.VideoCaptureCtrl::videoCaptureRequiredCount
int32_t ___videoCaptureRequiredCount_16;
// System.Threading.Thread RockVR.Video.VideoCaptureCtrl::videoMergeThread
Thread_tB9EB71664220EE16451AF3276D78DE6614D2A414 * ___videoMergeThread_17;
// System.Threading.Thread RockVR.Video.VideoCaptureCtrl::garbageCollectionThread
Thread_tB9EB71664220EE16451AF3276D78DE6614D2A414 * ___garbageCollectionThread_18;
// System.Boolean RockVR.Video.VideoCaptureCtrl::isCaptureAudio
bool ___isCaptureAudio_19;
// System.Boolean RockVR.Video.VideoCaptureCtrl::isOfflineRender
bool ___isOfflineRender_20;
public:
inline static int32_t get_offset_of__audioCapture_14() { return static_cast<int32_t>(offsetof(VideoCaptureCtrl_tE7C7D44FB4083DFD1EC5A827BF281298FB4F57AE, ____audioCapture_14)); }
inline AudioCapture_tAF87A71022D987367D112D48136C96AD0E2196A8 * get__audioCapture_14() const { return ____audioCapture_14; }
inline AudioCapture_tAF87A71022D987367D112D48136C96AD0E2196A8 ** get_address_of__audioCapture_14() { return &____audioCapture_14; }
inline void set__audioCapture_14(AudioCapture_tAF87A71022D987367D112D48136C96AD0E2196A8 * value)
{
____audioCapture_14 = value;
Il2CppCodeGenWriteBarrier((void**)(&____audioCapture_14), (void*)value);
}
inline static int32_t get_offset_of_videoCaptureFinishCount_15() { return static_cast<int32_t>(offsetof(VideoCaptureCtrl_tE7C7D44FB4083DFD1EC5A827BF281298FB4F57AE, ___videoCaptureFinishCount_15)); }
inline int32_t get_videoCaptureFinishCount_15() const { return ___videoCaptureFinishCount_15; }
inline int32_t* get_address_of_videoCaptureFinishCount_15() { return &___videoCaptureFinishCount_15; }
inline void set_videoCaptureFinishCount_15(int32_t value)
{
___videoCaptureFinishCount_15 = value;
}
inline static int32_t get_offset_of_videoCaptureRequiredCount_16() { return static_cast<int32_t>(offsetof(VideoCaptureCtrl_tE7C7D44FB4083DFD1EC5A827BF281298FB4F57AE, ___videoCaptureRequiredCount_16)); }
inline int32_t get_videoCaptureRequiredCount_16() const { return ___videoCaptureRequiredCount_16; }
inline int32_t* get_address_of_videoCaptureRequiredCount_16() { return &___videoCaptureRequiredCount_16; }
inline void set_videoCaptureRequiredCount_16(int32_t value)
{
___videoCaptureRequiredCount_16 = value;
}
inline static int32_t get_offset_of_videoMergeThread_17() { return static_cast<int32_t>(offsetof(VideoCaptureCtrl_tE7C7D44FB4083DFD1EC5A827BF281298FB4F57AE, ___videoMergeThread_17)); }
inline Thread_tB9EB71664220EE16451AF3276D78DE6614D2A414 * get_videoMergeThread_17() const { return ___videoMergeThread_17; }
inline Thread_tB9EB71664220EE16451AF3276D78DE6614D2A414 ** get_address_of_videoMergeThread_17() { return &___videoMergeThread_17; }
inline void set_videoMergeThread_17(Thread_tB9EB71664220EE16451AF3276D78DE6614D2A414 * value)
{
___videoMergeThread_17 = value;
Il2CppCodeGenWriteBarrier((void**)(&___videoMergeThread_17), (void*)value);
}
inline static int32_t get_offset_of_garbageCollectionThread_18() { return static_cast<int32_t>(offsetof(VideoCaptureCtrl_tE7C7D44FB4083DFD1EC5A827BF281298FB4F57AE, ___garbageCollectionThread_18)); }
inline Thread_tB9EB71664220EE16451AF3276D78DE6614D2A414 * get_garbageCollectionThread_18() const { return ___garbageCollectionThread_18; }
inline Thread_tB9EB71664220EE16451AF3276D78DE6614D2A414 ** get_address_of_garbageCollectionThread_18() { return &___garbageCollectionThread_18; }
inline void set_garbageCollectionThread_18(Thread_tB9EB71664220EE16451AF3276D78DE6614D2A414 * value)
{
___garbageCollectionThread_18 = value;
Il2CppCodeGenWriteBarrier((void**)(&___garbageCollectionThread_18), (void*)value);
}
inline static int32_t get_offset_of_isCaptureAudio_19() { return static_cast<int32_t>(offsetof(VideoCaptureCtrl_tE7C7D44FB4083DFD1EC5A827BF281298FB4F57AE, ___isCaptureAudio_19)); }
inline bool get_isCaptureAudio_19() const { return ___isCaptureAudio_19; }
inline bool* get_address_of_isCaptureAudio_19() { return &___isCaptureAudio_19; }
inline void set_isCaptureAudio_19(bool value)
{
___isCaptureAudio_19 = value;
}
inline static int32_t get_offset_of_isOfflineRender_20() { return static_cast<int32_t>(offsetof(VideoCaptureCtrl_tE7C7D44FB4083DFD1EC5A827BF281298FB4F57AE, ___isOfflineRender_20)); }
inline bool get_isOfflineRender_20() const { return ___isOfflineRender_20; }
inline bool* get_address_of_isOfflineRender_20() { return &___isOfflineRender_20; }
inline void set_isOfflineRender_20(bool value)
{
___isOfflineRender_20 = value;
}
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Winvalid-offsetof"
#pragma clang diagnostic ignored "-Wunused-variable"
#endif
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5780[4] =
{
MicRef_tA64BD32143F5FA1D26559FB61CF16F09B3A94C94::get_offset_of_MicType_0() + static_cast<int32_t>(sizeof(RuntimeObject)),
MicRef_tA64BD32143F5FA1D26559FB61CF16F09B3A94C94::get_offset_of_Name_1() + static_cast<int32_t>(sizeof(RuntimeObject)),
MicRef_tA64BD32143F5FA1D26559FB61CF16F09B3A94C94::get_offset_of_PhotonId_2() + static_cast<int32_t>(sizeof(RuntimeObject)),
MicRef_tA64BD32143F5FA1D26559FB61CF16F09B3A94C94::get_offset_of_PhotonIdString_3() + static_cast<int32_t>(sizeof(RuntimeObject)),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5781[6] =
{
MicrophoneDropdownFiller_t7EE7E45185226E7E6099341DB6D315F2A7002911::get_offset_of_micOptions_4(),
MicrophoneDropdownFiller_t7EE7E45185226E7E6099341DB6D315F2A7002911::get_offset_of_micDropdown_5(),
MicrophoneDropdownFiller_t7EE7E45185226E7E6099341DB6D315F2A7002911::get_offset_of_recorder_6(),
MicrophoneDropdownFiller_t7EE7E45185226E7E6099341DB6D315F2A7002911::get_offset_of_refreshButton_7(),
MicrophoneDropdownFiller_t7EE7E45185226E7E6099341DB6D315F2A7002911::get_offset_of_toggleButton_8(),
MicrophoneDropdownFiller_t7EE7E45185226E7E6099341DB6D315F2A7002911::get_offset_of_photonToggle_9(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5782[1] =
{
0,
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5783[10] =
{
RemoteSpeakerUI_t4EBDFA8C541F3CEB6088908D041384C68BE50730::get_offset_of_nameText_4(),
RemoteSpeakerUI_t4EBDFA8C541F3CEB6088908D041384C68BE50730::get_offset_of_remoteIsMuting_5(),
RemoteSpeakerUI_t4EBDFA8C541F3CEB6088908D041384C68BE50730::get_offset_of_remoteIsTalking_6(),
RemoteSpeakerUI_t4EBDFA8C541F3CEB6088908D041384C68BE50730::get_offset_of_minDelaySoftInputField_7(),
RemoteSpeakerUI_t4EBDFA8C541F3CEB6088908D041384C68BE50730::get_offset_of_maxDelaySoftInputField_8(),
RemoteSpeakerUI_t4EBDFA8C541F3CEB6088908D041384C68BE50730::get_offset_of_maxDelayHardInputField_9(),
RemoteSpeakerUI_t4EBDFA8C541F3CEB6088908D041384C68BE50730::get_offset_of_bufferLagText_10(),
RemoteSpeakerUI_t4EBDFA8C541F3CEB6088908D041384C68BE50730::get_offset_of_speaker_11(),
RemoteSpeakerUI_t4EBDFA8C541F3CEB6088908D041384C68BE50730::get_offset_of_voiceConnection_12(),
RemoteSpeakerUI_t4EBDFA8C541F3CEB6088908D041384C68BE50730::get_offset_of_loadBalancingClient_13(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5784[4] =
{
ConnectAndJoinRandomLb_t8A2B7E9487A2305C4713FE75288DDBDA75B9209B::get_offset_of_appSettings_4(),
ConnectAndJoinRandomLb_t8A2B7E9487A2305C4713FE75288DDBDA75B9209B::get_offset_of_lbc_5(),
ConnectAndJoinRandomLb_t8A2B7E9487A2305C4713FE75288DDBDA75B9209B::get_offset_of_ch_6(),
ConnectAndJoinRandomLb_t8A2B7E9487A2305C4713FE75288DDBDA75B9209B::get_offset_of_StateUiText_7(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5786[1] =
{
ChannelSelector_t47871E6B0E0C5E7304CC0AA4E9C98DDCC883D96E::get_offset_of_Channel_4(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5787[1] =
{
ChatAppIdCheckerUI_t679D1BB36AB9116707587AFBDDDA0C9BFC99B182::get_offset_of_Description_4(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5788[24] =
{
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F::get_offset_of_ChannelsToJoinOnConnect_4(),
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F::get_offset_of_FriendsList_5(),
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F::get_offset_of_HistoryLengthToFetch_6(),
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F::get_offset_of_U3CUserNameU3Ek__BackingField_7(),
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F::get_offset_of_selectedChannelName_8(),
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F::get_offset_of_chatClient_9(),
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F::get_offset_of_chatAppSettings_10(),
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F::get_offset_of_missingAppIdErrorPanel_11(),
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F::get_offset_of_ConnectingLabel_12(),
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F::get_offset_of_ChatPanel_13(),
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F::get_offset_of_UserIdFormPanel_14(),
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F::get_offset_of_InputFieldChat_15(),
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F::get_offset_of_CurrentChannelText_16(),
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F::get_offset_of_ChannelToggleToInstantiate_17(),
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F::get_offset_of_FriendListUiItemtoInstantiate_18(),
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F::get_offset_of_channelToggles_19(),
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F::get_offset_of_friendListItemLUT_20(),
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F::get_offset_of_ShowState_21(),
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F::get_offset_of_Title_22(),
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F::get_offset_of_StateText_23(),
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F::get_offset_of_UserIdText_24(),
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F_StaticFields::get_offset_of_HelpText_25(),
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F::get_offset_of_TestLength_26(),
ChatGui_t6943DDCBC27D148758239BACA7BBA62A266D9B0F::get_offset_of_testBytes_27(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5789[3] =
{
FriendItem_tF80B41237BF241C90F2D1DAC6082DC433E2DD592::get_offset_of_NameLabel_4(),
FriendItem_tF80B41237BF241C90F2D1DAC6082DC433E2DD592::get_offset_of_StatusLabel_5(),
FriendItem_tF80B41237BF241C90F2D1DAC6082DC433E2DD592::get_offset_of_Health_6(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5791[3] =
{
0,
NamePickGui_tFE24EAFC067F5417D9F66DFF06A7A89767A2E181::get_offset_of_chatNewComponent_5(),
NamePickGui_tFE24EAFC067F5417D9F66DFF06A7A89767A2E181::get_offset_of_idInput_6(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5794[4] =
{
TextButtonTransition_t51491A0543434FBDA97FB36A7A95C483FB6EE16F::get_offset_of__text_4(),
TextButtonTransition_t51491A0543434FBDA97FB36A7A95C483FB6EE16F::get_offset_of_Selectable_5(),
TextButtonTransition_t51491A0543434FBDA97FB36A7A95C483FB6EE16F::get_offset_of_NormalColor_6(),
TextButtonTransition_t51491A0543434FBDA97FB36A7A95C483FB6EE16F::get_offset_of_HoverColor_7(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5795[7] =
{
TextToggleIsOnTransition_t138BBDEFB7B6957F27ED0A5E0B4776503FA1436D::get_offset_of_toggle_4(),
TextToggleIsOnTransition_t138BBDEFB7B6957F27ED0A5E0B4776503FA1436D::get_offset_of__text_5(),
TextToggleIsOnTransition_t138BBDEFB7B6957F27ED0A5E0B4776503FA1436D::get_offset_of_NormalOnColor_6(),
TextToggleIsOnTransition_t138BBDEFB7B6957F27ED0A5E0B4776503FA1436D::get_offset_of_NormalOffColor_7(),
TextToggleIsOnTransition_t138BBDEFB7B6957F27ED0A5E0B4776503FA1436D::get_offset_of_HoverOnColor_8(),
TextToggleIsOnTransition_t138BBDEFB7B6957F27ED0A5E0B4776503FA1436D::get_offset_of_HoverOffColor_9(),
TextToggleIsOnTransition_t138BBDEFB7B6957F27ED0A5E0B4776503FA1436D::get_offset_of_isHover_10(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5796[6] =
{
AudioCapture_tAF87A71022D987367D112D48136C96AD0E2196A8::get_offset_of_U3CstatusU3Ek__BackingField_4(),
AudioCapture_tAF87A71022D987367D112D48136C96AD0E2196A8::get_offset_of_U3CfilePathU3Ek__BackingField_5(),
AudioCapture_tAF87A71022D987367D112D48136C96AD0E2196A8::get_offset_of_eventDelegate_6(),
AudioCapture_tAF87A71022D987367D112D48136C96AD0E2196A8::get_offset_of_libAPI_7(),
AudioCapture_tAF87A71022D987367D112D48136C96AD0E2196A8::get_offset_of_audioPointer_8(),
AudioCapture_tAF87A71022D987367D112D48136C96AD0E2196A8::get_offset_of_audioByteBuffer_9(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5797[3] =
{
ModeType_tB12C34FF8E101F43B612F86E972D44D7E5A9C57C::get_offset_of_value___2() + static_cast<int32_t>(sizeof(RuntimeObject)),
0,
0,
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5798[3] =
{
FormatType_t5146714865CD8A744D0E48ADFB21C8B0E672E71E::get_offset_of_value___2() + static_cast<int32_t>(sizeof(RuntimeObject)),
0,
0,
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5799[3] =
{
PanoramaProjectionType_t4C5251BE54E854B3B8420D92325AA2EE569C0955::get_offset_of_value___2() + static_cast<int32_t>(sizeof(RuntimeObject)),
0,
0,
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5800[4] =
{
StereoType_t52A5ED4AB47A17794922C7A01210256589B531BA::get_offset_of_value___2() + static_cast<int32_t>(sizeof(RuntimeObject)),
0,
0,
0,
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5801[3] =
{
StereoFormatType_t03F79A41E5E55051234CB0B28BC5BE95C2A2720E::get_offset_of_value___2() + static_cast<int32_t>(sizeof(RuntimeObject)),
0,
0,
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5802[10] =
{
FrameSizeType_tDCE15CC8D96EC55F0CDCF884134DD6DA7F1032B2::get_offset_of_value___2() + static_cast<int32_t>(sizeof(RuntimeObject)),
0,
0,
0,
0,
0,
0,
0,
0,
0,
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5803[4] =
{
CubemapSizeType_t1BEAF3986B85123F0367EECF74C0F29B129A9FE3::get_offset_of_value___2() + static_cast<int32_t>(sizeof(RuntimeObject)),
0,
0,
0,
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5804[4] =
{
EncodeQualityType_t99A80555020D4522474B5612A040EF116670C18C::get_offset_of_value___2() + static_cast<int32_t>(sizeof(RuntimeObject)),
0,
0,
0,
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5805[5] =
{
AntiAliasingType_t9585D007F94A7825158CD94711B0EA0D2DF968E7::get_offset_of_value___2() + static_cast<int32_t>(sizeof(RuntimeObject)),
0,
0,
0,
0,
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5806[6] =
{
TargetFramerateType_t9F547BDB989E136FFECAE00E2F3A4D085F1AA84A::get_offset_of_value___2() + static_cast<int32_t>(sizeof(RuntimeObject)),
0,
0,
0,
0,
0,
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5807[29] =
{
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_format_4(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_stereo_5(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_stereoFormat_6(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_frameSize_7(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of__cubemapSize_8(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_panoramaProjection_9(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_encodeQuality_10(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of__antiAliasing_11(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of__targetFramerate_12(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_captureCamera_13(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_isDedicated_14(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_captureGUI_15(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_deltaFrameTime_16(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_mode_17(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_U3CfilePathU3Ek__BackingField_18(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_customPath_19(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_customPathFolder_20(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_streamingAddress_21(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_eventDelegate_22(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_interPupillaryDistance_23(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_stereoPackMaterial_24(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_copyReverseMaterial_25(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_cubemap2Equirectangular_26(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_blitMaterial_27(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_frameRenderTexture_28(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_stereoTargetTexture_29(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_finalTargetTexture_30(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_panoramaTempRenderTexture_31(),
VideoCaptureBase_t681EB3FA3A86654DC876A5C526E13D324D2A80EA::get_offset_of_faceTarget_32(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5808[6] =
{
StatusType_t43810E5CF4B0DF784CD154F58165F9C460E49582::get_offset_of_value___2() + static_cast<int32_t>(sizeof(RuntimeObject)),
0,
0,
0,
0,
0,
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5809[4] =
{
ErrorCodeType_t15B8A59B00A4723B91B9856AEFBA575D83A260E4::get_offset_of_value___2() + static_cast<int32_t>(sizeof(RuntimeObject)),
0,
0,
0,
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5810[7] =
{
VideoCaptureCtrlBase_tC8EFDDF51B9F6BF21AAD8BD587A95A6E12F8B4C9::get_offset_of_U3CstatusU3Ek__BackingField_7(),
VideoCaptureCtrlBase_tC8EFDDF51B9F6BF21AAD8BD587A95A6E12F8B4C9::get_offset_of_debug_8(),
VideoCaptureCtrlBase_tC8EFDDF51B9F6BF21AAD8BD587A95A6E12F8B4C9::get_offset_of_startOnAwake_9(),
VideoCaptureCtrlBase_tC8EFDDF51B9F6BF21AAD8BD587A95A6E12F8B4C9::get_offset_of_captureTime_10(),
VideoCaptureCtrlBase_tC8EFDDF51B9F6BF21AAD8BD587A95A6E12F8B4C9::get_offset_of_quitAfterCapture_11(),
VideoCaptureCtrlBase_tC8EFDDF51B9F6BF21AAD8BD587A95A6E12F8B4C9::get_offset_of_eventDelegate_12(),
VideoCaptureCtrlBase_tC8EFDDF51B9F6BF21AAD8BD587A95A6E12F8B4C9::get_offset_of__videoCaptures_13(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5811[5] =
{
PathConfig_t0DBE1019CCE7BADF3780A8276A65F3AEC40138AE_StaticFields::get_offset_of_persistentDataPath_0(),
PathConfig_t0DBE1019CCE7BADF3780A8276A65F3AEC40138AE_StaticFields::get_offset_of_streamingAssetsPath_1(),
PathConfig_t0DBE1019CCE7BADF3780A8276A65F3AEC40138AE_StaticFields::get_offset_of_myDocumentsPath_2(),
PathConfig_t0DBE1019CCE7BADF3780A8276A65F3AEC40138AE_StaticFields::get_offset_of_saveFolder_3(),
PathConfig_t0DBE1019CCE7BADF3780A8276A65F3AEC40138AE_StaticFields::get_offset_of_lastVideoFile_4(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5812[4] =
{
U3CAutoTakeScreenshotU3Ed__8_tF24CC8DACA967AA9CF08BAAD92AA69BB99243DFA::get_offset_of_U3CU3E1__state_0(),
U3CAutoTakeScreenshotU3Ed__8_tF24CC8DACA967AA9CF08BAAD92AA69BB99243DFA::get_offset_of_U3CU3E2__current_1(),
U3CAutoTakeScreenshotU3Ed__8_tF24CC8DACA967AA9CF08BAAD92AA69BB99243DFA::get_offset_of_seconds_2(),
U3CAutoTakeScreenshotU3Ed__8_tF24CC8DACA967AA9CF08BAAD92AA69BB99243DFA::get_offset_of_U3CU3E4__this_3(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5813[5] =
{
Screenshot_tA0FF0B7359459C6837DCE33D269E93C27AC15B80::get_offset_of_transformMaterial_4(),
Screenshot_tA0FF0B7359459C6837DCE33D269E93C27AC15B80::get_offset_of_startSeconds_5(),
Screenshot_tA0FF0B7359459C6837DCE33D269E93C27AC15B80::get_offset_of_frameTexture_6(),
Screenshot_tA0FF0B7359459C6837DCE33D269E93C27AC15B80::get_offset_of_frameRenderTexture_7(),
Screenshot_tA0FF0B7359459C6837DCE33D269E93C27AC15B80::get_offset_of_frameCubemap_8(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5814[1] =
{
U3CU3Ec__DisplayClass1_0_t4A54FA79F4C6132585A7F8E94CA69963916772F2::get_offset_of_random_0(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5817[2] =
{
FrameData_tBAF7FFEA70EBD988E28C240481C06DA90C4E1FE4::get_offset_of_pixels_0() + static_cast<int32_t>(sizeof(RuntimeObject)),
FrameData_tBAF7FFEA70EBD988E28C240481C06DA90C4E1FE4::get_offset_of_count_1() + static_cast<int32_t>(sizeof(RuntimeObject)),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5818[3] =
{
U3CCaptureFrameAsyncU3Ed__26_t5429C11B856B49F1B958909BB0800EF09F86023C::get_offset_of_U3CU3E1__state_0(),
U3CCaptureFrameAsyncU3Ed__26_t5429C11B856B49F1B958909BB0800EF09F86023C::get_offset_of_U3CU3E2__current_1(),
U3CCaptureFrameAsyncU3Ed__26_t5429C11B856B49F1B958909BB0800EF09F86023C::get_offset_of_U3CU3E4__this_2(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5819[13] =
{
VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB::get_offset_of_U3CstatusU3Ek__BackingField_33(),
VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB::get_offset_of_offlineRender_34(),
VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB::get_offset_of_frameTexture_35(),
VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB::get_offset_of_frameCubemap_36(),
VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB::get_offset_of_isCapturingFrame_37(),
VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB::get_offset_of_isCreateRenderTexture_38(),
VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB::get_offset_of_capturingTime_39(),
VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB::get_offset_of_capturedFrameCount_40(),
VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB::get_offset_of_encodedFrameCount_41(),
VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB::get_offset_of_libAPI_42(),
VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB::get_offset_of_originalMaximumDeltaTime_43(),
VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB::get_offset_of_frameQueue_44(),
VideoCapture_t2BA39B2620AC5A0CC894310FF3A5CE45AD11C3FB::get_offset_of_encodeThread_45(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5820[3] =
{
VideoMuxing_t25D06A6E9370F76FDBC1B5D056F9BE0255AB8FE3::get_offset_of_filePath_0(),
VideoMuxing_t25D06A6E9370F76FDBC1B5D056F9BE0255AB8FE3::get_offset_of_videoCapture_1(),
VideoMuxing_t25D06A6E9370F76FDBC1B5D056F9BE0255AB8FE3::get_offset_of_audioCapture_2(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5821[7] =
{
VideoCaptureCtrl_tE7C7D44FB4083DFD1EC5A827BF281298FB4F57AE::get_offset_of__audioCapture_14(),
VideoCaptureCtrl_tE7C7D44FB4083DFD1EC5A827BF281298FB4F57AE::get_offset_of_videoCaptureFinishCount_15(),
VideoCaptureCtrl_tE7C7D44FB4083DFD1EC5A827BF281298FB4F57AE::get_offset_of_videoCaptureRequiredCount_16(),
VideoCaptureCtrl_tE7C7D44FB4083DFD1EC5A827BF281298FB4F57AE::get_offset_of_videoMergeThread_17(),
VideoCaptureCtrl_tE7C7D44FB4083DFD1EC5A827BF281298FB4F57AE::get_offset_of_garbageCollectionThread_18(),
VideoCaptureCtrl_tE7C7D44FB4083DFD1EC5A827BF281298FB4F57AE::get_offset_of_isCaptureAudio_19(),
VideoCaptureCtrl_tE7C7D44FB4083DFD1EC5A827BF281298FB4F57AE::get_offset_of_isOfflineRender_20(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5822[4] =
{
VideoPlayer_t39CA97EB2A9AB274DBE4BE3E0D0F41DF29F010AF::get_offset_of_videoFiles_4(),
VideoPlayer_t39CA97EB2A9AB274DBE4BE3E0D0F41DF29F010AF::get_offset_of_videoPlayerImpl_5(),
VideoPlayer_t39CA97EB2A9AB274DBE4BE3E0D0F41DF29F010AF::get_offset_of_index_6(),
VideoPlayer_t39CA97EB2A9AB274DBE4BE3E0D0F41DF29F010AF_StaticFields::get_offset_of_instance_7(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5824[4] =
{
VideoCaptureUI_t9D33A122A5901DE765EAD49928EB6B56D3A9B27C::get_offset_of_UIController_4(),
VideoCaptureUI_t9D33A122A5901DE765EAD49928EB6B56D3A9B27C::get_offset_of_BaseController_5(),
VideoCaptureUI_t9D33A122A5901DE765EAD49928EB6B56D3A9B27C::get_offset_of_inputActionReference_VideoRecord_6(),
VideoCaptureUI_t9D33A122A5901DE765EAD49928EB6B56D3A9B27C::get_offset_of_Icon_7(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5827[2] =
{
EventDelegate_t25FC2A0C7474DD9C0674EB6B0BCE63D0FDFA7B54::get_offset_of_OnError_0(),
EventDelegate_t25FC2A0C7474DD9C0674EB6B0BCE63D0FDFA7B54::get_offset_of_OnComplete_1(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5828[2] =
{
FPSDisplay_t0150EDC42472BF4ABEAB6910E5B4921E26CDB4CC::get_offset_of_deltaTime_4(),
FPSDisplay_t0150EDC42472BF4ABEAB6910E5B4921E26CDB4CC::get_offset_of_text_5(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5829[9] =
{
PlatformType_tCFE7573EF622F67BA0192D2FEC2110E741383AEE::get_offset_of_value___2() + static_cast<int32_t>(sizeof(RuntimeObject)),
0,
0,
0,
0,
0,
0,
0,
0,
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5830[1] =
{
0,
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5832[3] =
{
0,
0,
0,
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5833[5] =
{
CombineMeshes_t5FBF5FA265991F3ECA765F4260F9CC6DC78E1B28::get_offset_of_myMatrix_4(),
CombineMeshes_t5FBF5FA265991F3ECA765F4260F9CC6DC78E1B28::get_offset_of_myMeshFilter_5(),
CombineMeshes_t5FBF5FA265991F3ECA765F4260F9CC6DC78E1B28::get_offset_of_myMeshRenderer_6(),
CombineMeshes_t5FBF5FA265991F3ECA765F4260F9CC6DC78E1B28::get_offset_of_meshFilters_7(),
CombineMeshes_t5FBF5FA265991F3ECA765F4260F9CC6DC78E1B28::get_offset_of_meshRenderers_8(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5834[1] =
{
SimpleCameraMove_t92A85B2CE71DA14B3312C8800D30A689E88AC885::get_offset_of_sensitivity_4(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5835[3] =
{
InputType_t6A74EB06ABBA29763640ADBAC38137D11C0A4E83::get_offset_of_value___2() + static_cast<int32_t>(sizeof(RuntimeObject)),
0,
0,
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5836[2] =
{
EnableMouseLook_t16A036A73774449E0297494E66A79E751D2E619E::get_offset_of_mouseLook_4(),
EnableMouseLook_t16A036A73774449E0297494E66A79E751D2E619E::get_offset_of_click_5(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5837[4] =
{
RotationAxes_t160C17846428E420A1BEA024DC3BCF176F9BAF45::get_offset_of_value___2() + static_cast<int32_t>(sizeof(RuntimeObject)),
0,
0,
0,
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5838[9] =
{
MouseLook_t0FA6420BB99D5DE117BB116C3ABDAA146270627D::get_offset_of_axes_4(),
MouseLook_t0FA6420BB99D5DE117BB116C3ABDAA146270627D::get_offset_of_sensitivityX_5(),
MouseLook_t0FA6420BB99D5DE117BB116C3ABDAA146270627D::get_offset_of_sensitivityY_6(),
MouseLook_t0FA6420BB99D5DE117BB116C3ABDAA146270627D::get_offset_of_minimumX_7(),
MouseLook_t0FA6420BB99D5DE117BB116C3ABDAA146270627D::get_offset_of_maximumX_8(),
MouseLook_t0FA6420BB99D5DE117BB116C3ABDAA146270627D::get_offset_of_minimumY_9(),
MouseLook_t0FA6420BB99D5DE117BB116C3ABDAA146270627D::get_offset_of_maximumY_10(),
MouseLook_t0FA6420BB99D5DE117BB116C3ABDAA146270627D::get_offset_of_rotationY_11(),
MouseLook_t0FA6420BB99D5DE117BB116C3ABDAA146270627D::get_offset_of_rotationX_12(),
};
IL2CPP_EXTERN_C const int32_t g_FieldOffsetTable5841[1] =
{
U3CPrivateImplementationDetailsU3E_t6BC7664D9CD46304D39A7D175BB8FFBE0B9F4528_StaticFields::get_offset_of_CD9A54ED1F18BF97DB08914E280EA7349E11CA2C4885A4D8052552CEBA84208D_0(),
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
| 50.612687 | 310 | 0.848081 | [
"object"
] |
d514d2ac73765d0d3340d2ab7ba6758bf5df71f2 | 3,087 | cpp | C++ | Src/Drawable.cpp | Condzi/Engine | 10f6acdf33b24b8d0aab9efcec570821a73423a1 | [
"MIT"
] | 1 | 2018-07-30T10:51:23.000Z | 2018-07-30T10:51:23.000Z | Src/Drawable.cpp | Condzi/Engine | 10f6acdf33b24b8d0aab9efcec570821a73423a1 | [
"MIT"
] | null | null | null | Src/Drawable.cpp | Condzi/Engine | 10f6acdf33b24b8d0aab9efcec570821a73423a1 | [
"MIT"
] | null | null | null | /*
Conrad 'Condzi' Kubacki 2018
https://github.com/condzi
*/
#include "Engine/EnginePCH.hpp"
#include "Engine/Drawable.hpp"
#include "Engine/Renderer.hpp"
namespace con
{
IDrawable::IDrawable()
{
Global._Renderer.add( this );
}
IDrawable::~IDrawable()
{
Global._Renderer.remove( this );
}
void IDrawable::setDrawLayer( int16_t layer_ )
{
layer = layer_;
}
int16_t IDrawable::getDrawLayer() const
{
return layer;
}
void IDrawable::boundWithEntity( Entity* boundedEntity_ )
{
boundedEntity = boundedEntity_;
}
Entity* IDrawable::getBoundedEntity() const
{
return boundedEntity;
}
bool IDrawable::isBoundedWithEntity() const
{
return boundedEntity != nullptr;
}
void RectangleShape::render( sf::RenderWindow& window )
{
renderInternal( window, *this );
}
void RectangleShape::update()
{
updateFrameTime();
if ( auto opt = getFrameRect(); opt.has_value() ) {
setTexture( animationInfo.sheet );
setTextureRect( opt.value() );
}
}
void CircleShape::render( sf::RenderWindow& window )
{
renderInternal( window, *this );
}
void CircleShape::update()
{
updateFrameTime();
if ( auto opt = getFrameRect(); opt.has_value() ) {
setTexture( animationInfo.sheet );
setTextureRect( opt.value() );
}
}
void Sprite::render( sf::RenderWindow& window )
{
renderInternal( window, *this );
}
void Sprite::update()
{
updateFrameTime();
if ( auto opt = getFrameRect(); opt.has_value() ) {
// Incosistency - rest of drawables need pointer, Sprite needs a reference.
setTexture( *animationInfo.sheet );
setTextureRect( opt.value() );
}
}
void Text::render( sf::RenderWindow& window )
{
renderInternal( window, *this );
}
void IAnimation::updateFrameTime()
{
const auto& ai = animationInfo;
if ( !isAnimation )
return;
if ( errorWithInfo() ) {
isAnimation = false;
return;
}
currentFrameTime += Global.FrameTime;
const sf::Time fps = sf::seconds( 1.f / ai.fps );
if ( currentFrameTime > fps ) {
currentFrameTime -= fps;
if ( ++currentFrameNumber == ai.framesCount )
currentFrameNumber = 0;
}
}
std::optional<sf::IntRect> IAnimation::getFrameRect()
{
const auto& ai = animationInfo;
if ( !isAnimation )
return {};
auto newFrameXpos = ( currentFrameNumber + ai.begin ) * ai.frameSize.x;
return sf::IntRect( newFrameXpos, 0, ai.frameSize.x, ai.frameSize.y );
}
bool IAnimation::errorWithInfo()
{
const auto& ai = animationInfo;
if ( !ai.sheet ) {
print( LogPriority::Error, "null sheet." );
} else if ( ai.sheet->getSize().x < ( ai.framesCount + ai.begin ) * ai.frameSize.x ) {
auto has = ai.sheet->getSize().x;
auto need = ai.framesCount * ai.frameSize.x;
print( LogPriority::Error, "sheet is smaller in X than declared (is %, but % needed).", has, need );
} else if ( ai.sheet->getSize().y < ai.frameSize.y ) {
auto has = ai.sheet->getSize().y;
auto need = ai.frameSize.y;
print( LogPriority::Error, "sheet is smaller in Y than declared (is %, but % needed).", has, need );
} else
return false;
return true;
}
void BitmapText::render( sf::RenderWindow& window )
{
renderInternal( window, *this );
}
} | 20.176471 | 102 | 0.684159 | [
"render"
] |
d515cf4afbd1aff83aa197246725e8bfa424e97e | 1,894 | cpp | C++ | example/1-HelloWindow.cpp | sltn011/OpenGL-Learning | 7f3b8cd730ba9d300406cdd6608afb1db6d23b31 | [
"MIT"
] | 1 | 2020-10-26T17:53:33.000Z | 2020-10-26T17:53:33.000Z | example/1-HelloWindow.cpp | sltn011/OpenGL-Learning | 7f3b8cd730ba9d300406cdd6608afb1db6d23b31 | [
"MIT"
] | null | null | null | example/1-HelloWindow.cpp | sltn011/OpenGL-Learning | 7f3b8cd730ba9d300406cdd6608afb1db6d23b31 | [
"MIT"
] | null | null | null | #include "glad/glad.h"
#include "GLFW/glfw3.h"
#include <iostream>
enum Screen {
width = 1000,
height = 800
};
void framebufferSizeCallback(
GLFWwindow *window,
int width,
int height
) {
glViewport(0, 0, width, height);
}
void processInput(
GLFWwindow *window
) {
if (glfwGetKey(window, GLFW_KEY_BACKSPACE) == GLFW_PRESS) {
glfwSetWindowShouldClose(window, true);
}
}
void render(
GLFWwindow *window
) {
glClearColor(0.3f, 0.15f, 0.6f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
}
int main(
) {
// Initialize GLFW
if(!glfwInit()) {
return 1;
}
// Configure GLFW
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
// Create window
GLFWwindow *window = glfwCreateWindow(Screen::width, Screen::height, "Hello, Window", NULL, NULL);
if (!window) {
std::cout << "Error creating window!" << std::endl;
glfwTerminate();
return 1;
}
// Set window as current context
glfwMakeContextCurrent(window);
// Load OpenGL functions pointer given by GLFW
if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress)) {
std::cout << "Failed to initialize GLAD" << std::endl;
return 1;
}
// Set dimensions of rendering window
glViewport(0, 0, Screen::width, Screen::height);
// Set callback function for window resize
glfwSetFramebufferSizeCallback(window, framebufferSizeCallback);
// Render loop
while (!glfwWindowShouldClose(window)) {
processInput(window);
render(window);
// Swap front and back buffers
glfwSwapBuffers(window);
// Process pending events
glfwPollEvents();
}
// Deallocate GLFW resources
glfwTerminate();
return 0;
} | 22.282353 | 102 | 0.642555 | [
"render"
] |
d515e29221ad4d539067fe55502d603232961e6a | 18,423 | cpp | C++ | eraepub/src/RectHelper.cpp | Mimars-Project/OpenReadEra-20.03.26 | 8db0c096e681947d02fa15c3eaa283e389ccf8a9 | [
"FTL"
] | null | null | null | eraepub/src/RectHelper.cpp | Mimars-Project/OpenReadEra-20.03.26 | 8db0c096e681947d02fa15c3eaa283e389ccf8a9 | [
"FTL"
] | null | null | null | eraepub/src/RectHelper.cpp | Mimars-Project/OpenReadEra-20.03.26 | 8db0c096e681947d02fa15c3eaa283e389ccf8a9 | [
"FTL"
] | 1 | 2021-07-21T07:50:33.000Z | 2021-07-21T07:50:33.000Z | /*
* Copyright (C) 2013-2020 READERA LLC
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Developers: ReadEra Team (2013-2020), Tarasus (2018-2020).
*/
#include "include/RectHelper.h"
#include "include/crconfig.h"
void RectHelper::Invalidate()
{
finalNode_ = NULL;
absRect_ = lvRect();
NodeIndex_ = -1;
LineIndex_ = 0;
NodeLineIndex_ = 0;
NodeIsInvisible_ = false;
}
void RectHelper::Init(ldomXRange *range)
{
ldomNode * Node = range->getStartNode();
Init(Node);
}
void RectHelper::Init(ldomNode *Node)
{
if(Node == Node_)
{
return;
}
Node_ = Node;
ldomNode * finalNode = GetFinalNode();
if (finalNode != finalNode_)
{
#if DEBUG_GETRECT_LOGS
CRLog::warn("Init FinalNode");
#endif
InitFinalNode(finalNode);
}
#if DEBUG_GETRECT_LOGS
CRLog::warn("Update node");
#endif
InitNode(Node_);
#if DEBUG_GETRECT_LOGS
//CRLog::error("NEW FINALNODE = [%s]",LCSTR(finalNode_->getXPath()));
//CRLog::error("Node_ Text = %s",LCSTR(Node_->getText()));
#endif
}
bool RectHelper::NodeIsInvisible(ldomNode *node)
{
if (node == NULL)
{
return false;
}
if (node->getRendMethod() == erm_invisible)
{
#if DEBUG_GETRECT_LOGS
CRLog::error("INVISIBLE NODE FOUND");
CRLog::error("node = [%s]", LCSTR(node->getXPath()));
#endif
return true;
}
return false;
}
ldomNode *RectHelper::GetFinalNode()
{
ldomNode * p = Node_->isElement() ? Node_: Node_->getParentNode() ;
ldomNode * mainNode = p->getCrDom()->getRootNode();
ldomNode * finalNode = NULL;
for (; p; p = p->getParentNode())
{
int rm = p->getRendMethod();
if (rm == erm_final || rm == erm_list_item)
{
finalNode = p; // found final block
}
else if (p->getRendMethod() == erm_invisible)
{
return p;
//return false; // invisible !!!
}
if (p == mainNode)
{
break;
}
}
return finalNode;
}
void RectHelper::InitFinalNode(ldomNode *finalNode)
{
if (isInit)
{
Invalidate();
}
isInit = true;
NodeIsInvisible_ = NodeIsInvisible(finalNode);
finalNode_ = finalNode;
if (finalNode == NULL)
{
#if DEBUG_GETRECT_LOGS
CRLog::error("INITFINALNODE NULL");
#endif
return;
}
finalNode_->getAbsRect(absRect_);
RenderRectAccessor r(finalNode_);
finalNode_->renderFinalBlock(txtform_, &r, r.getWidth());
#if DEBUG_GETRECT_LOGS
//CRLog::error("txtform_->GetLineCount() = %d", txtform_->GetLineCount());
//CRLog::error("finalnode = [%s]", LCSTR(finalNode_->getXPath()));
//CRLog::error("finalnode childcount = %d", finalNode_->getChildCount());
#endif
}
void RectHelper::InitNode(ldomNode *Node)
{
// text node
srcIndex_ = -1;
srcLen_ = -1;
lastIndex_ = -1;
lastLen_ = -1;
lastOffset_ = -1;
if (finalNode_ == NULL || NodeIsInvisible_)
{
return;
}
int index = FindNodeIndex(Node, NodeIndex_);
NodeIndex_ = index;
int count = txtform_->GetSrcCount();
if (index > 0)
{
const src_text_fragment_t *src = txtform_->GetSrcInfo(index);
bool isObject = (src->flags & LTEXT_SRC_IS_OBJECT) != 0;
srcIndex_ = index;
srcLen_ = isObject ? 0 : src->t.len;
lastIndex_ = index - 1;
const src_text_fragment_t *src2 = txtform_->GetSrcInfo(lastIndex_);
bool isObject2 = (src2->flags & LTEXT_SRC_IS_OBJECT) != 0;
lastLen_ = isObject2 ? 0 : src2->t.len;
lastOffset_ = isObject2 ? 0 : src2->t.offset;
}
else if (index == 0)
{
srcIndex_ = index;
const src_text_fragment_t *src = txtform_->GetSrcInfo(index);
bool isObject = (src->flags & LTEXT_SRC_IS_OBJECT) != 0;
srcLen_ = isObject ? 0 : src->t.len;
}
else if (count > 0) // srcindex < 0
{
lastIndex_ = (FindLastIndexEnable_) ? FindLastIndex(Node) : count - 1;
const src_text_fragment_t *src = txtform_->GetSrcInfo(lastIndex_);
bool isObject = (src->flags & LTEXT_SRC_IS_OBJECT) != 0;
srcIndex_ = lastIndex_;
srcLen_ = isObject ? 0 : src->t.len;
lastLen_ = srcLen_;
lastOffset_ = isObject ? 0 : src->t.offset;
}
else
{
#if DEBUG_GETRECT_LOGS
CRLog::error("GetSrcCount: Final node contains no text nodes!");
#endif
}
LineIndex_ = FindLineIndex(LineIndex_);
NodeLineIndex_ = LineIndex_ ;
}
bool RectHelper::ifnull(ldomXPointerEx xpointer, lvRect &rect)
{
// no base final node, using blocks
//lvRect rc;
ldomNode *node = xpointer.getNode();
//CRLog::error("NEW NODE = [%s]",LCSTR(node->getXPath()));
//CRLog::error("Node Text = %s",LCSTR(node->getText()));
int offset = xpointer.getOffset();
if (offset < 0 || node->getChildCount() == 0)
{
CRLog::error("Ifnull 1");
node->getAbsRect(rect);
return true;
//return rc.topLeft();
}
if (offset < node->getChildCount())
{
CRLog::error("Ifnull 2");
node->getChildNode(offset)->getAbsRect(rect);
return true;
//return rc.topLeft();
}
CRLog::error("Ifnull 3");
node->getChildNode(node->getChildCount() - 1)->getAbsRect(rect);
return true;
//return rc.bottomRight();
}
//get line where current node starts
int RectHelper::FindLineIndex(int start)
{
#if DEBUG_GETRECT_LOGS
CRLog::error("start = %d, linecount = %d", start, txtform_->GetLineCount());
#endif
int l = 0;
int w = 0;
bool found = false;
for (l = start; l < txtform_->GetLineCount(); l++)
{
const formatted_line_t *frmline = txtform_->GetLineInfo(l);
for (w = 0; w < (int) frmline->word_count; w++)
{
const formatted_word_t *word = &frmline->words[w];
if (word->src_text_index == srcIndex_)
{
#if DEBUG_GETRECT_LOGS
CRLog::error("Found Line Index1 = %d", l);
#endif
found = true;
break;
}
}
if (found)
{
break;
}
}
if (found == false)
{
#if DEBUG_GETRECT_LOGS
CRLog::error("NOT Found Line Index = 0");
#endif
return 0;
}
if (start > 0 && l == start)
{
for (l = start; l >= 0; l--)
{
const formatted_line_t *frmline = txtform_->GetLineInfo(l);
w = (l == start) ? w - 1 : (int) frmline->word_count;
for (; w >= 0; w--)
{
formatted_word_t frmword = frmline->words[w];
if (frmword.src_text_index != srcIndex_)
{
#if DEBUG_GETRECT_LOGS
CRLog::error("Found Line Index2 = %d", l);
#endif
return l;
}
}
}
}
else
{
return l;
}
}
int RectHelper::FindNodeIndex(ldomNode *node, int start)
{
start = (start < 0) ? 0 : start;
int count = txtform_->GetSrcCount();
for (int i = start; i < count; i++)
{
const src_text_fragment_t *src = txtform_->GetSrcInfo(i);
if (src->object == node)
{
return i;
}
}
//RLog::error("start = %d, count = %d",start,count);
#if DEBUG_GETRECT_LOGS
CRLog::error("NOT FOUND NODE INDEX RETRYING");
#endif
//nodes in txtform appear to be able not to be in order,
// so we retry search cycle from zero, to find node index again
if (start > 0)
{
#if DEBUG_GETRECT_LOGS
CRLog::error("start = %d, count = %d", start, count);
#endif
return FindNodeIndex(node, 0);
}
return -1;
}
int RectHelper::FindLastIndex(ldomNode *node)
{
// ПЕРЕНЕСЕНО ИЗ СТАРОЙ РЕАЛИЗАЦИИ
// Проблемный участок, от которого одновременно зависит
// генерация оглавлений (Гаррисон), и генерация хитбоксов в некоторых случаях (Storia della mafia)
// К функции GetRect добавлен параметр forlvpoint, который при вызове в генерации оглавлений, включает этот кусок.
int count = txtform_->GetSrcCount();
if (count <= 0)
{
return -1;
}
ldomXPointerEx xp1(node, 0);
for (int i = 0; i < count; i++)
{
const src_text_fragment_t *src = txtform_->GetSrcInfo(i);
bool isObject = (src->flags & LTEXT_SRC_IS_OBJECT) != 0;
int offset = isObject ? 0 : src->t.offset;
ldomXPointerEx xp2((ldomNode *) src->object, offset);
if (xp2.compare(xp1) > 0)
{
//CRLog::error("FindLastIndex = %d",i);
return i;
}
}
#if DEBUG_GETRECT_LOGS
CRLog::error("FindLastIndex Not found = %d", count - 1);
#endif
return count - 1;
}
void RectHelper::ResetLineIndex()
{
LineIndex_ = NodeLineIndex_;
}
lvRect RectHelper::getRect(ldomWord word, bool init)
{
if(init)
{
Init(word.getNode());
}
return getRect(word);
}
lvRect RectHelper::getRect(ldomXPointer xPointer)
{
Init(xPointer.getNode());
lvRect rect;
processRect(xPointer,rect);
return rect;
}
lvRect RectHelper::getRect(ldomWord word)
{
#if DEBUG_GETRECT_LOGS
CRLog::trace("GetRect START");
#endif
lvRect rect;
if (word.isNull())
{
#if DEBUG_GETRECT_LOGS
CRLog::error("word is null STOP");
#endif
return rect;
}
// get start and end rects
lvRect rc1;
lvRect rc1old;
lvRect rc2;
lvRect rc2old;
ldomXPointerEx xp1 = word.getStartXPointer();
ldomXPointerEx xp2 = word.getEndXPointer();
#if DEBUG_GETRECT_LOGS
CRLog::error("Nodetext = [%s] , LETTER = [%s]", LCSTR(word.getNode()->getText()), LCSTR(word.getText()));
#endif
//bool a1 = false;
//bool a2 = false;
//a1 = this->processRect(xp1, rc1);
//a2 = this->processRect(xp2, rc2);
//xp1.getRect(rc1old,false);
//xp2.getRect(rc2old,false);
#if DEBUG_GETRECT_LOGS
CRLog::trace("GetRect STOP1");
#endif
//if(rc1 != rc1old)
//{
// CRLog::warn("RC1 New rect != old rect! ^ [%d:%d][%d:%d] != [%d:%d][%d:%d]",rc1.left,rc1.right,rc1.top,rc1.bottom,rc1old.left,rc1old.right,rc1old.top,rc1old.bottom);
//}
//if (rc2 != rc2old)
//{
// CRLog::warn("RC2 New rect != old rect! ^ [%d:%d][%d:%d] != [%d:%d][%d:%d]",rc2.left,rc2.right,rc2.top,rc2.bottom,rc2old.left,rc2old.right,rc2old.top,rc2old.bottom);
//}
//if (!xp1.getRect(rc1,false) || !xp2.getRect(rc2,false)) //OLD OLD
//if (!xp1.getRect(rc1,false) || !this->processRect(xp2, rc2)) //OLD NEW
if (!this->processRect(xp1, rc1) || !this->processRect(xp2, rc2)) //NEW NEW
//if (!a1 || !a2)
{
return rect;
}
if (rc1.top == rc2.top && rc1.bottom == rc2.bottom)
{
// on same line
rect.left = rc1.left;
rect.top = rc1.top;
rect.right = rc2.right;
rect.bottom = rc2.bottom;
#if DEBUG_GETRECT_LOGS
CRLog::trace("GetRect STOP2");
#endif
return rect;
}
// on different lines
ldomXRange range(xp1, xp2);
ldomNode *parent = range.getNearestCommonParent();
if (!parent)
{
#if DEBUG_GETRECT_LOGS
CRLog::trace("GetRect STOP3");
#endif
return rect;
}
parent->getAbsRect(rect);
rect.top = rc1.top;
rect.bottom = rc2.bottom;
rect.left = rc1.left < rc2.left ? rc1.left : rc2.left;
rect.right = rc1.right > rc2.right ? rc1.right : rc2.right;
return rect;
}
bool RectHelper::processRect(ldomXPointerEx xpointer, lvRect &rect)
{
if (NodeIsInvisible_)
{
return false;
}
//return xpointer.getRect(rect, false);
if (finalNode_ == NULL)
{
bool res = ifnull(xpointer, rect);
#if DEBUG_GETRECT_LOGS
CRLog::error("Rect ifnull = [%d:%d][%d:%d]", rect.left, rect.right, rect.top, rect.bottom);
#endif
return res;
}
lvRect rc;
finalNode_->getAbsRect(rc);
//CRLog::error("absrect = [%d:%d][%d:%d]",rc.left,rc.right,rc.top,rc.bottom);
if (rc.height() == 0 && rc.width() > 0)
{
rect = rc;
rect.bottom++;
#if DEBUG_GETRECT_LOGS
CRLog::error("1");
#endif
return true;
}
if (NodeIndex_ < 0 && lastIndex_ < 0)
{
//CRLog::error("LastIndex < 0");
return false;
}
ldomNode *node = xpointer.getNode();
#if DEBUG_GETRECT_LOGS
CRLog::error("NEW FINAL NODE = [%s]", LCSTR(finalNode_->getXPath()));
// CRLog::error("FINAL NODE TEXT = [%s]",LCSTR(finalNode_->getText()));
// CRLog::error("NODE = [%s]",LCSTR(Node_->getXPath()));
// CRLog::error("NODE TEXT = [%s]",LCSTR(Node_->getText()));
#endif
int offset = xpointer.getOffset();
//CRLog::error("NEW offset = %d",offset);
//CRLog::error("NEW lastoffset = %d",lastOffset_);
if (NodeIndex_ < 0)
{
#if DEBUG_GETRECT_LOGS
CRLog::error("NEW replacing offset with last offset");
#endif
offset = lastOffset_;
}
#if DEBUG_GETRECT_LOGS
CRLog::error("srcIndex = %d srcLen = %d lastIndex = %d lastLen = %d lastOffset = %d nodeIndex =%d ", srcIndex_, srcLen_, lastIndex_, lastLen_, lastOffset_, NodeIndex_);
#endif
int count = txtform_->GetLineCount();
int start = LineIndex_;
for (int l = start; l < count; l++)
{
const formatted_line_t *frmline = txtform_->GetLineInfo(l);
for (int w = 0; w < (int) frmline->word_count; w++)
{
const formatted_word_t *word = &frmline->words[w];
bool lastWord = (l == txtform_->GetLineCount() - 1 && w == frmline->word_count - 1);
if (word->src_text_index >= srcIndex_ || lastWord)
{
#if DEBUG_GETRECT_LOGS
CRLog::error("l = %d, w = %d lastword = %d line_index = %d", l, w, (lastWord) ? 1 : 0, LineIndex_);
//CRLog::error("word->src_text_index > srcIndex || offset <= word->t.start");
//CRLog::error("%d>%d || %d <= %d",word->src_text_index,srcIndex_,offset,word->t.start);
//CRLog::error("(offset < word->t.start + word->t.len) || (offset == srcLen && offset == word->t.start + word->t.len)");
//CRLog::error("(%d < %d + %d) || (%d == %d && %d == %d + %d)",offset, word->t.start , word->t.len,offset , srcLen_ ,offset , word->t.start , word->t.len);
#endif
// found word from same src line
if (word->src_text_index > srcIndex_ || offset <= word->t.start)
{
// before this word
rect.left = word->x + rc.left + frmline->x;
//rect.top = word->y + rc.top + frmline->y + frmline->baseline;
rect.top = rc.top + frmline->y;
rect.right = rect.left + 1;
rect.bottom = rect.top + frmline->height;
LineIndex_ = l;
#if DEBUG_GETRECT_LOGS
CRLog::error("word->x = %d, rc.left = %d, frmline->x = %d", word->x, rc.left, frmline->x);
CRLog::error("Rect1 = [%d:%d][%d:%d]", rect.left, rect.right, rect.top, rect.bottom);
//CRLog::error("LINEINDEX END = %d",LineIndex_);
#endif
return true;
}
else if ((offset < word->t.start + word->t.len) || (offset == srcLen_ && offset == word->t.start + word->t.len))
{
// pointer inside this word
LVFont *font = (LVFont *) txtform_->GetSrcInfo(srcIndex_)->t.font;
lUInt16 widths[512];
lUInt8 flg[512];
lString16 str = node->getText();
font->measureText(str.c_str() + word->t.start, offset - word->t.start, widths, flg, word->width + 50, '?', txtform_->GetSrcInfo(srcIndex_)->letter_spacing);
int chx = widths[offset - word->t.start - 1];
rect.left = word->x + rc.left + frmline->x + chx;
//rect.top = word->y + rc.top + frmline->y + frmline->baseline;
rect.top = rc.top + frmline->y;
rect.right = rect.left + 1;
rect.bottom = rect.top + frmline->height;
LineIndex_ = l;
#if DEBUG_GETRECT_LOGS
CRLog::error("Rect2 = [%d:%d][%d:%d]", rect.left, rect.right, rect.top, rect.bottom);
//CRLog::error("LINEINDEX END = %d",LineIndex_);
#endif
return true;
}
else if (lastWord)
{
// after last word
rect.left = word->x + rc.left + frmline->x + word->width;
//rect.top = word->y + rc.top + frmline->y + frmline->baseline;
rect.top = rc.top + frmline->y;
rect.right = rect.left + 1;
rect.bottom = rect.top + frmline->height;
LineIndex_ = l;
#if DEBUG_GETRECT_LOGS
CRLog::error("Rect3 = [%d:%d][%d:%d]", rect.left, rect.right, rect.top, rect.bottom);
//CRLog::error("LINEINDEX END = %d",LineIndex_);
#endif
return true;
}
}
}
}
#if DEBUG_GETRECT_LOGS
//CRLog::error("new getrect return false");
#endif
return false;
} | 31.438567 | 176 | 0.548391 | [
"object"
] |
d51926eae1f76139d1228e05e4ea0dbaef5b3b70 | 23,772 | cpp | C++ | lib/Base/Tensor.cpp | Ewenwan/glow | d1a5dda6f6419a9cb3ae20182e132a55b138098c | [
"Apache-2.0"
] | null | null | null | lib/Base/Tensor.cpp | Ewenwan/glow | d1a5dda6f6419a9cb3ae20182e132a55b138098c | [
"Apache-2.0"
] | null | null | null | lib/Base/Tensor.cpp | Ewenwan/glow | d1a5dda6f6419a9cb3ae20182e132a55b138098c | [
"Apache-2.0"
] | null | null | null | /**
* Copyright (c) Glow Contributors. See CONTRIBUTORS file.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "glow/Base/Tensor.h"
#include "glow/Base/Type.h"
#include "llvm/Support/NativeFormatting.h"
#include "llvm/Support/raw_ostream.h"
#include <glog/logging.h>
using namespace glow;
namespace {
/// This is a helper method that's used in the visualization of tensors.
template <class ElemTy> static char valueToChar(ElemTy input) {
char ch = ' ';
const double val = input;
if (val > 0.2) {
ch = '.';
}
if (val > 0.4) {
ch = ',';
}
if (val > 0.6) {
ch = ':';
}
if (val > 0.8) {
ch = 'o';
}
if (val > 1.0) {
ch = 'O';
}
if (val > 1.5) {
ch = '0';
}
if (val > 2.0) {
ch = '@';
}
if (val < -0.1) {
ch = '-';
}
if (val < -0.2) {
ch = '~';
}
if (val < -0.4) {
ch = '=';
}
if (val < -1.0) {
ch = '#';
}
return ch;
}
static void dumpShape(llvm::ArrayRef<dim_t> shape, llvm::raw_ostream &os) {
os << "shape: ( ";
for (auto &d : shape) {
os << d << " ";
}
os << ")";
}
template <class ElemTy>
static void dumpGenericImpl(Handle<ElemTy> handle, llvm::raw_ostream &os,
unsigned maxNumElem) {
auto shape = handle.dims();
size_t numDims = shape.size();
auto &Ty = handle.getType();
// Check for 0-dimensional tensor.
if (!numDims) {
os << "[ Scalar containing: ";
llvm::write_double(os, handle.raw(0), llvm::FloatStyle::Fixed, 3);
os << " ]\n";
return;
}
// Output shape.
dumpShape(shape, os);
os << "\n";
// Check for tensor of size 0.
if (handle.getUnpaddedSizeInBytes() == 0) {
os << "[ tensor has no elements ]\n";
return;
}
ElemTy mx = handle.raw(0);
ElemTy mn = handle.raw(0);
for (auto elem : handle) {
mx = std::max(mx, elem);
mn = std::min(mn, elem);
}
// Check for zero tensor.
if (mn == ElemTy(.0) && mx == ElemTy(.0)) {
os << "[ Zero tensor ]\n";
return;
}
// Output max and min.
os << "max: ";
llvm::write_double(os, mx, llvm::FloatStyle::Fixed, 3);
os << " min: ";
llvm::write_double(os, mn, llvm::FloatStyle::Fixed, 3);
os << "\n";
os << "[";
for (size_t i = 0, e = std::min<size_t>(maxNumElem, handle.size()); i < e;
i++) {
// Print one open brace at the beginning of every row, slice, and tensor.
for (size_t j = 0, e = numDims - 1; numDims > 1 && j < e; j++) {
if (i % Ty.getSliceSize(j + 1) == 0) {
// This iteration of outer loop is a new row, slice or tensor.
os << "[";
}
}
// Print the value at the current index.
llvm::write_double(os, handle.raw(i), llvm::FloatStyle::Fixed, 3);
// Print one closed brace at the end of every row, slice, or tensor.
for (size_t j = 0, e = numDims - 1; numDims > 1 && j < e; j++) {
size_t next_index = i + 1;
if (next_index % Ty.getSliceSize(j + 1) == 0u) {
os << "]";
}
}
os << ", ";
// Print one newline at the end of every row, slice, or tensor.
for (size_t j = 0, e = numDims - 1; numDims > 1 && j < e; j++) {
size_t next_index = i + 1;
if (next_index % Ty.getSliceSize(j + 1) == 0u) {
// Next iteration of outer loop will be a new row, slice or tensor.
os << "\n";
}
}
}
if (handle.size() > maxNumElem) {
os << "...";
}
os << "]\n";
os.flush();
}
template <class ElemTy>
static void dumpAsciiGenericImpl(Handle<ElemTy> handle, llvm::raw_ostream &os) {
auto d = handle.dims();
if (d.size() == 2) {
for (dim_t x = 0; x < d[0]; x++) {
for (dim_t y = 0; y < d[1]; y++) {
auto val = handle.at({x, y});
os << valueToChar(val);
}
os << "\n";
}
} else if (d.size() == 3) {
// Print monochrome (one-color channel) tensors:
if (d[2] == 1) {
for (dim_t x = 0; x < d[0]; x++) {
for (dim_t y = 0; y < d[1]; y++) {
auto val = handle.at({x, y, 0});
os << valueToChar(val);
}
os << "\n";
}
} else {
for (dim_t z = 0; z < d[2]; z++) {
os << "\n";
for (dim_t x = 0; x < d[0]; x++) {
for (dim_t y = 0; y < d[1]; y++) {
auto val = handle.at({x, y, z});
os << valueToChar(val);
}
os << "\n";
}
}
}
} else {
llvm_unreachable("Invalid tensor size");
}
os.flush();
}
/// This is a slow generic transpose. This method performs a single for loop
/// over a single dimension, or if we've reached the last dimension perform a
/// single copy of a single element.
template <class ElemTy>
static void
transposeGenericImpl(const Handle<ElemTy> &src, Handle<ElemTy> &dest,
dim_t *srcCoor, dim_t *destCoor,
llvm::ArrayRef<unsigned_t> shuffle, unsigned depth = 0) {
if (depth == shuffle.size()) {
auto srcIdx = llvm::ArrayRef<dim_t>(srcCoor, depth);
auto destIdx = llvm::ArrayRef<dim_t>(destCoor, depth);
dest.at(destIdx) = src.at(srcIdx);
return;
}
// Iterate over one dimension and continue recursively to the next dim.
for (dim_t x = 0, e = dest.dims()[depth]; x < e; x++) {
unsigned_t swizzledDepth = shuffle[depth];
srcCoor[swizzledDepth] = x;
destCoor[depth] = x;
transposeGenericImpl(src, dest, srcCoor, destCoor, shuffle, depth + 1);
}
}
/// Faster function for transposing a tensor for important/common tensor
/// shapes. If a transpose successfully occurs, the function \returns true;
/// otherwise it \returns false, representing no transpose occurred and some
/// other transpose function (e.g. transposeGenericImpl) must be called. \p
/// dest is the tensor to transpose, and \p shuffle defines how to transpose.
template <class ElemTy>
static bool tryTransposeFastImpl(const Handle<ElemTy> &src,
Handle<ElemTy> &dest,
llvm::ArrayRef<unsigned_t> shuffle) {
const dim_t numDims = dest.dims().size();
dim_t srcCoorArr[max_tensor_dimensions];
dim_t destCoorArr[max_tensor_dimensions] = {0};
auto srcCoor = llvm::ArrayRef<dim_t>(srcCoorArr, numDims);
auto destCoor = llvm::ArrayRef<dim_t>(destCoorArr, numDims);
/// This defines a single depth of the for loop used to iterate over the
/// source and destination tensors for transposing.
#define TRANSPOSE_LOOP_LEVEL(DEPTH_) \
for (srcCoorArr[shuffle[DEPTH_]] = 0, destCoorArr[DEPTH_] = 0; \
destCoorArr[DEPTH_] < dest.dims()[DEPTH_]; \
srcCoorArr[shuffle[DEPTH_]]++, destCoorArr[DEPTH_]++)
switch (numDims) {
case 2:
TRANSPOSE_LOOP_LEVEL(1) {
TRANSPOSE_LOOP_LEVEL(0) { dest.at(destCoor) = src.at(srcCoor); }
}
return true;
case 4:
TRANSPOSE_LOOP_LEVEL(1) {
TRANSPOSE_LOOP_LEVEL(2) {
TRANSPOSE_LOOP_LEVEL(0) {
TRANSPOSE_LOOP_LEVEL(3) { dest.at(destCoor) = src.at(srcCoor); }
}
}
}
return true;
}
return false;
}
template <class ElemTy>
static void transposeSelectImpl(const Handle<ElemTy> &src, Handle<ElemTy> &dest,
llvm::ArrayRef<unsigned_t> shuffle) {
bool transposeOccurred = tryTransposeFastImpl(src, dest, shuffle);
if (!transposeOccurred) {
dim_t srcCoor[max_tensor_dimensions];
dim_t destCoor[max_tensor_dimensions];
transposeGenericImpl(src, dest, srcCoor, destCoor, shuffle);
}
}
} // namespace
void glow::dumpAsciiImpl(const Tensor *T, llvm::raw_ostream &os) {
switch (T->getElementType()) {
case ElemKind::FloatTy:
return dumpAsciiGenericImpl(T->getHandle<float>(), os);
case ElemKind::Float16Ty:
return dumpAsciiGenericImpl(T->getHandle<float16_t>(), os);
case ElemKind::Int8QTy:
return dumpAsciiGenericImpl(T->getHandle<int8_t>(), os);
case ElemKind::UInt8QTy:
return dumpAsciiGenericImpl(T->getHandle<uint8_t>(), os);
case ElemKind::Int16QTy:
return dumpAsciiGenericImpl(T->getHandle<int16_t>(), os);
case ElemKind::Int32QTy:
return dumpAsciiGenericImpl(T->getHandle<int32_t>(), os);
case ElemKind::Int32ITy:
return dumpAsciiGenericImpl(T->getHandle<int32_t>(), os);
case ElemKind::Int64ITy:
return dumpAsciiGenericImpl(T->getHandle<int64_t>(), os);
case ElemKind::UInt8FusedQTy:
return dumpAsciiGenericImpl(T->getHandle<uint8_t>(), os);
case ElemKind::UInt8FusedFP16QTy:
return dumpAsciiGenericImpl(T->getHandle<uint8_t>(), os);
case ElemKind::UInt4FusedFP16QTy:
return dumpAsciiGenericImpl(T->getHandle<uint8_t>(), os);
case ElemKind::BoolTy:
return dumpAsciiGenericImpl(T->getHandle<bool>(), os);
}
}
void glow::dumpAsciiImpl(const Tensor *T) { dumpAsciiImpl(T, llvm::outs()); }
void glow::dumpImpl(const Tensor *T, llvm::raw_ostream &os,
unsigned maxNumElem) {
switch (T->getElementType()) {
case ElemKind::FloatTy:
return dumpGenericImpl(T->getHandle<float>(), os, maxNumElem);
case ElemKind::Float16Ty:
return dumpGenericImpl(T->getHandle<float16_t>(), os, maxNumElem);
case ElemKind::Int8QTy:
return dumpGenericImpl(T->getHandle<int8_t>(), os, maxNumElem);
case ElemKind::UInt8QTy:
return dumpGenericImpl(T->getHandle<uint8_t>(), os, maxNumElem);
case ElemKind::Int16QTy:
return dumpGenericImpl(T->getHandle<int16_t>(), os, maxNumElem);
case ElemKind::Int32QTy:
return dumpGenericImpl(T->getHandle<int32_t>(), os, maxNumElem);
case ElemKind::Int32ITy:
return dumpGenericImpl(T->getHandle<int32_t>(), os, maxNumElem);
case ElemKind::Int64ITy:
return dumpGenericImpl(T->getHandle<int64_t>(), os, maxNumElem);
case ElemKind::UInt8FusedQTy:
return dumpGenericImpl(T->getHandle<uint8_t>(), os, maxNumElem);
case ElemKind::UInt8FusedFP16QTy:
return dumpGenericImpl(T->getHandle<uint8_t>(), os, maxNumElem);
case ElemKind::UInt4FusedFP16QTy:
return dumpGenericImpl(T->getHandle<uint8_t>(), os, maxNumElem);
case ElemKind::BoolTy:
return dumpGenericImpl(T->getHandle<bool>(), os, maxNumElem);
}
}
void glow::dumpImpl(const Tensor *T, unsigned maxNumElem) {
dumpImpl(T, llvm::outs(), maxNumElem);
}
void glow::dumpImpl(const Tensor *T) { dumpImpl(T, llvm::outs()); }
// Dump functions.
void Tensor::dump(llvm::raw_ostream &os) const { dumpImpl(this, os); }
void Tensor::dump() const { dumpImpl(this, llvm::outs()); }
std::string Tensor::toString() const {
std::string storage;
llvm::raw_string_ostream os(storage);
dumpImpl(this, os);
return os.str();
}
std::string Tensor::getShapeToString() const {
std::string storage;
llvm::raw_string_ostream os(storage);
dumpShape(dims(), os);
return os.str();
}
void Tensor::dump(llvm::raw_ostream &os, unsigned maxNumElem) const {
dumpImpl(this, os, maxNumElem);
}
void Tensor::dump(unsigned maxNumElem) const {
dumpImpl(this, llvm::outs(), maxNumElem);
}
std::string Tensor::toString(unsigned maxNumElem) const {
std::string storage;
llvm::raw_string_ostream os(storage);
dumpImpl(this, os, maxNumElem);
return os.str();
}
/// Dump a textual representation of a specific number of elements in the Tensor
/// to std::string.
void glow::genericTranspose(const Tensor *src, Tensor *dest,
llvm::ArrayRef<unsigned_t> shuffle) {
DCHECK(src->dims().size() == shuffle.size())
<< "Invalid dimensions " << src->dims().size()
<< " != " << src->dims().size();
dim_t newSizes[max_tensor_dimensions];
// Generate the swizzled dimensions.
auto origDims = src->dims();
for (unsigned i = 0; i < origDims.size(); i++) {
newSizes[i] = origDims[shuffle[i]];
}
// Resize the tensor to the transposed shape.
auto destType = Type::newShape(src->getType(), {newSizes, origDims.size()});
// genericTranspose function doesn't know how to set non-trivial strides and
// alignments and it cannot figure out the correct ones as it can be
// backend-specific. Therefore set the type to destType only if it is not set
// properly by the caller yet.
// Reset should be called anyways to allocate memory for the tensor.
if (dest->dims() != destType.dims()) {
dest->reset(destType);
} else {
dest->reset(dest->getType());
}
switch (src->getElementType()) {
case ElemKind::FloatTy: {
auto srcH = src->getHandle<float>();
auto destH = dest->getHandle<float>();
transposeSelectImpl(srcH, destH, shuffle);
return;
}
case ElemKind::Float16Ty: {
auto srcH = src->getHandle<float16_t>();
auto destH = dest->getHandle<float16_t>();
transposeSelectImpl(srcH, destH, shuffle);
return;
}
case ElemKind::Int8QTy: {
auto srcH = src->getHandle<int8_t>();
auto destH = dest->getHandle<int8_t>();
transposeSelectImpl(srcH, destH, shuffle);
return;
}
case ElemKind::UInt8QTy: {
auto srcH = src->getHandle<uint8_t>();
auto destH = dest->getHandle<uint8_t>();
transposeSelectImpl(srcH, destH, shuffle);
return;
}
case ElemKind::Int16QTy: {
auto srcH = src->getHandle<int16_t>();
auto destH = dest->getHandle<int16_t>();
transposeSelectImpl(srcH, destH, shuffle);
return;
}
case ElemKind::Int32QTy: {
auto srcH = src->getHandle<int32_t>();
auto destH = dest->getHandle<int32_t>();
transposeSelectImpl(srcH, destH, shuffle);
return;
}
case ElemKind::Int32ITy: {
auto srcH = src->getHandle<int32_t>();
auto destH = dest->getHandle<int32_t>();
transposeSelectImpl(srcH, destH, shuffle);
return;
}
case ElemKind::Int64ITy: {
auto srcH = src->getHandle<int64_t>();
auto destH = dest->getHandle<int64_t>();
transposeSelectImpl(srcH, destH, shuffle);
return;
}
case ElemKind::UInt8FusedQTy: {
llvm_unreachable("Transposing UInt8FusedQTy is unsupported.");
}
case ElemKind::UInt8FusedFP16QTy: {
llvm_unreachable("Transposing UInt8FusedFP16QTy is unsupported.");
}
case ElemKind::UInt4FusedFP16QTy: {
llvm_unreachable("Transposing UInt4FusedFP16QTy is unsupported.");
}
case ElemKind::BoolTy: {
auto srcH = src->getHandle<bool>();
auto destH = dest->getHandle<bool>();
transposeSelectImpl(srcH, destH, shuffle);
return;
}
}
}
ShapeVector glow::expandDimsToMax(llvm::ArrayRef<dim_t> currDims) {
ShapeVector newDims(currDims.begin(), currDims.end());
for (size_t i = newDims.size(); i < max_tensor_dimensions; i++) {
newDims.push_back(1);
}
return newDims;
}
void Tensor::init(InitKind init, float val, PseudoRNG &PRNG) {
assert(!isDeviceResident() && "Tensor must reside on host to access data.");
switch (init) {
case InitKind::Zero:
zero();
break;
case InitKind::Broadcast: {
switch (getElementType()) {
case ElemKind::FloatTy: {
getHandle<float>().clear(val);
break;
}
case ElemKind::Float16Ty: {
getHandle<float16_t>().clear(float16_t(val));
break;
}
case ElemKind::Int8QTy: {
getHandle<int8_t>().clear(val);
break;
}
case ElemKind::UInt8QTy: {
getHandle<uint8_t>().clear(val);
break;
}
case ElemKind::Int16QTy: {
getHandle<int16_t>().clear(val);
break;
}
case ElemKind::Int32QTy: {
getHandle<int32_t>().clear(val);
break;
}
case ElemKind::Int32ITy: {
getHandle<int32_t>().clear(val);
break;
}
case ElemKind::Int64ITy: {
getHandle<int64_t>().clear(val);
break;
}
#define FUSED_CASE(ELEM_KIND, DATA_TYPE) \
case ElemKind::ELEM_KIND: { \
DCHECK(dims().size() == 2) \
<< "Fused tensor must be 2-dimensional but instead has " \
<< dims().size() << " dimensions."; \
DCHECK(dims()[1] > 2 * sizeof(DATA_TYPE)) \
<< "Fused tensor must have space for scale/offset, but only has " \
<< dims()[1] << " columns."; \
auto H = getHandle<uint8_t>(); \
for (dim_t i = 0; i < dims()[0]; i++) { \
for (dim_t j = 0, f = dims()[1] - 2 * sizeof(DATA_TYPE); j < f; j++) { \
H.at({i, j}) = val; \
} \
} \
break; \
}
FUSED_CASE(UInt8FusedQTy, float);
FUSED_CASE(UInt8FusedFP16QTy, float16_t);
FUSED_CASE(UInt4FusedFP16QTy, float16_t);
#undef FUSED_CASE
case ElemKind::BoolTy: {
getHandle<bool>().clear(val);
break;
}
}
break;
}
case InitKind::Xavier: {
switch (getElementType()) {
case ElemKind::FloatTy: {
getHandle<float>().initXavier(val, PRNG);
break;
}
case ElemKind::Float16Ty: {
getHandle<float16_t>().initXavier(val, PRNG);
break;
}
default: {
llvm_unreachable("Undefined to Xavier-initialize non-Float Tensors.");
}
}
break;
}
}
}
void Tensor::convertToType(ElemKind newTy) {
assert(!isDeviceResident() && "Tensor must reside on host to access data.");
*this = this->getCopyConvertedToType(newTy);
}
Tensor Tensor::getCopyConvertedToType(ElemKind newKind) const {
assert(!isDeviceResident() && "Tensor must reside on host to access data.");
const ElemKind origKind = getElementType();
DCHECK((origKind == ElemKind::FloatTy && newKind == ElemKind::Float16Ty) ||
(origKind == ElemKind::FloatTy && newKind == ElemKind::Int32ITy) ||
(origKind == ElemKind::FloatTy && newKind == ElemKind::Int64ITy) ||
(origKind == ElemKind::Float16Ty && newKind == ElemKind::FloatTy) ||
(origKind == ElemKind::Int64ITy && newKind == ElemKind::Int32ITy) ||
(origKind == ElemKind::Int64ITy && newKind == ElemKind::FloatTy) ||
(origKind == ElemKind::Int32ITy && newKind == ElemKind::Int64ITy) ||
(origKind == ElemKind::Int32ITy && newKind == ElemKind::FloatTy) ||
(origKind == ElemKind::UInt8FusedQTy &&
newKind == ElemKind::UInt8FusedFP16QTy))
<< "Conversion from " << Type::getElementName(origKind).str() << " to "
<< Type::getElementName(newKind).str() << " is not yet implemented";
if (!isQuantizedElemKind(newKind)) {
Tensor tmp(newKind, dims());
switch (newKind) {
case ElemKind::Float16Ty:
tmp.copyWithCast<float16_t, float>(this);
break;
case ElemKind::FloatTy:
if (getElementType() == ElemKind::Int32ITy) {
tmp.copyWithCast<float, int32_t>(this);
} else if (getElementType() == ElemKind::Int64ITy) {
tmp.copyWithCast<float, int64_t>(this);
} else if (getElementType() == ElemKind::Float16Ty) {
tmp.copyWithCast<float, float16_t>(this);
} else if (getElementType() == ElemKind::FloatTy) {
tmp.copyRawFrom(this);
} else {
llvm_unreachable("Invalid conversion to FLOAT.");
}
break;
case ElemKind::Int32ITy:
if (getElementType() == ElemKind::Int64ITy) {
tmp.copyWithCast<int32_t, int64_t>(this);
} else if (getElementType() == ElemKind::FloatTy) {
tmp.copyWithCast<int32_t, float>(this);
} else {
llvm_unreachable("Invalid conversion from FLOAT.");
}
break;
case ElemKind::Int64ITy:
if (getElementType() == ElemKind::Int32ITy) {
tmp.copyWithCast<int64_t, int32_t>(this);
} else {
llvm_unreachable("Invalid conversion from FLOAT.");
}
break;
default:
llvm_unreachable("Type not supported");
}
return tmp;
}
// Handle Fused conversion. Currently only supports UInt8FusedQTy ->
// UInt8FusedFP16QTy.
DCHECK(origKind == ElemKind::UInt8FusedQTy && dims().size() == 2)
<< "UInt8FusedQTy must be 2 dimensional.";
Tensor tmp(newKind,
{dims()[0], dims()[1] - 2 * ((dim_t)sizeof(float) -
(dim_t)sizeof(float16_t))},
1.0, 0);
const size_t dstWidth = tmp.dims()[1];
auto srcH = getHandle<uint8_t>();
auto dstH = tmp.getHandle<uint8_t>();
for (dim_t i = 0, e = dims()[0]; i < e; i++) {
// Copy the scale/offset from src to dst.
float scale, offset;
std::tie(scale, offset) = srcH.getFusedScaleOffsetFromRow<float>(i);
dstH.setFusedScaleOffsetInRow<float16_t>(i, static_cast<float16_t>(scale),
static_cast<float16_t>(offset));
// Copy over the row's uint8 data from src to dst; scales and offsets were
// already copied over above.
for (dim_t j = 0, f = dstWidth - 2 * sizeof(float16_t); j < f; j++) {
dstH.at({i, j}) = srcH.at({i, j});
}
}
return tmp;
}
namespace glow {
llvm::raw_ostream &operator<<(llvm::raw_ostream &os, const Tensor &t) {
t.dump(os);
return os;
}
llvm::raw_ostream &operator<<(llvm::raw_ostream &os, const Tensor *t) {
assert(t != nullptr && "Null Pointer.");
t->dump(os);
return os;
}
void Tensor::moveToDevice(DeviceTensorTransferManager *deviceManager,
void *locationContext) {
if (deviceResidency_ == nullptr) {
deviceResidency_ = new DeviceResidencyInfo();
}
deviceResidency_->deviceManager_ = deviceManager;
deviceResidency_->locationContext_ = locationContext;
deviceResidency_->tensorResidency_ =
DeviceResidencyInfo::TensorResidency::Device;
}
void Tensor::ensureOnHost() {
if (deviceResidency_ == nullptr) {
// already on host.
return;
}
if (deviceResidency_->isDeviceResident()) {
deviceResidency_->deviceManager_->transferFromDevice(*this);
}
assert(!isDeviceResident());
}
void Tensor::copyRawToDevice(const Tensor *t) {
assert(isDeviceResident());
void *locationContext = deviceResidency_->locationContext_;
DeviceTensorTransferManager *DM = deviceResidency_->deviceManager_;
clearDeviceResidency();
copyRawFrom(t);
DM->transferToDevice(*this, locationContext);
}
bool isSliceContiguous(llvm::ArrayRef<dim_t> sliceShape,
llvm::ArrayRef<dim_t> tensorShape) {
assert(sliceShape.size() == tensorShape.size() &&
"Array length mismatch for slice/tensor sizes!");
// Search first non-singleton slice dimension. If all the dimensions are
// singleton then by convention the first non-singleton dimension is the
// slice size.
size_t firstNonSingleDim = sliceShape.size();
for (size_t dim = 0, dimEnd = sliceShape.size(); dim < dimEnd; ++dim) {
if (sliceShape[dim] != 1) {
firstNonSingleDim = dim;
break;
}
}
// First non-singleton slice dimension can be partially or fully extracted.
// The following dimensions must be fully extracted.
for (size_t dim = firstNonSingleDim + 1, dimEnd = sliceShape.size();
dim < dimEnd; ++dim) {
if (sliceShape[dim] != tensorShape[dim]) {
return false;
}
}
return true;
}
} // namespace glow
| 31.908725 | 80 | 0.609583 | [
"shape"
] |
d51927409198d7302e8d25683ff9df162adbe10a | 1,777 | cpp | C++ | src/Wrapper.cpp | inkrement/fastplm | 198b4b33f0dc5f2455fe17aa86d0bb881e45ad95 | [
"MIT"
] | 7 | 2019-09-02T17:33:32.000Z | 2021-06-06T12:54:00.000Z | src/Wrapper.cpp | inkrement/fastplm | 198b4b33f0dc5f2455fe17aa86d0bb881e45ad95 | [
"MIT"
] | 4 | 2020-01-07T17:07:24.000Z | 2021-08-20T05:58:47.000Z | src/Wrapper.cpp | inkrement/fastplm | 198b4b33f0dc5f2455fe17aa86d0bb881e45ad95 | [
"MIT"
] | 6 | 2019-09-02T00:58:46.000Z | 2022-01-22T00:55:38.000Z | #include "CrushQueue.h"
#include "FixedEffects.h"
#include "FixedEffectModel.h"
using Rcpp::List;
using Rcpp::XPtr;
// [[Rcpp::export()]]
SEXP CreateFixedEffects(arma::uvec levelCounts, arma::mat indsR,
arma::uvec simpleEffects,
arma::uvec complexEffects,
arma::uvec complexInfluences,
List wrappedWeights) {
simpleEffects -= 1u;
complexEffects -= 1u;
complexInfluences -= 1u;
std::vector<arma::mat> weights;
for (const SEXP& elem : wrappedWeights) {
arma::mat m = Rcpp::as<arma::mat>(elem);
weights.push_back(std::move(m));
}
auto ptr = FixedEffects::create(levelCounts, indsR,
simpleEffects, complexEffects, complexInfluences, weights);
return XPtr<const FixedEffects>(ptr.release());
}
// [[Rcpp::export()]]
bool ContainMultipleComponents(SEXP wrappedFixedEffects) {
auto fixedEffects = XPtr<const FixedEffects>(wrappedFixedEffects);
return static_cast<bool>(fixedEffects->componentTables);
}
// [[Rcpp::export()]]
List CheckComponents(SEXP wrappedFixedEffects, arma::mat indicators) {
auto fixedEffects = XPtr<const FixedEffects>(wrappedFixedEffects);
auto cppErrors = fixedEffects->checkComponents(indicators);
List rErrors;
for (const auto& error : cppErrors)
rErrors.push_back(static_cast<List>(error));
return rErrors;
}
// [[Rcpp::export()]]
List SolveFixedEffects(arma::mat data, SEXP wrappedFixedEffects, std::size_t coreNum = 1) {
mainQueue = new CrushQueue(coreNum);
ScopeGuard _([]{ delete mainQueue; mainQueue = nullptr; });
auto fixedEffects = XPtr<const FixedEffects>(wrappedFixedEffects);
return FixedEffectModel::solve(data, *fixedEffects);
}
| 31.732143 | 91 | 0.673607 | [
"vector"
] |
d51a0f7342ae6094c4d5425b3fd53375c701d6f7 | 5,367 | cpp | C++ | src/shogun/classifier/svm/SGDQN.cpp | rka97/shogun | 93d7afa8073fcb5a9f3d9e6492a6fd3c8a2e48be | [
"BSD-3-Clause"
] | null | null | null | src/shogun/classifier/svm/SGDQN.cpp | rka97/shogun | 93d7afa8073fcb5a9f3d9e6492a6fd3c8a2e48be | [
"BSD-3-Clause"
] | null | null | null | src/shogun/classifier/svm/SGDQN.cpp | rka97/shogun | 93d7afa8073fcb5a9f3d9e6492a6fd3c8a2e48be | [
"BSD-3-Clause"
] | null | null | null | /*
* This software is distributed under BSD 3-clause license (see LICENSE file).
*
* Authors: Soeren Sonnenburg, Shashwat Lal Das, Giovanni De Toni,
* Sergey Lisitsyn, Thoralf Klein, Evan Shelhamer, Bjoern Esser
*/
#include <shogun/base/Parameter.h>
#include <shogun/base/progress.h>
#include <shogun/classifier/svm/SGDQN.h>
#include <shogun/labels/BinaryLabels.h>
#include <shogun/lib/Signal.h>
#include <shogun/loss/HingeLoss.h>
#include <shogun/mathematics/Math.h>
using namespace shogun;
CSGDQN::CSGDQN()
: CLinearMachine()
{
init();
}
CSGDQN::CSGDQN(float64_t C)
: CLinearMachine()
{
init();
C1=C;
C2=C;
}
CSGDQN::CSGDQN(float64_t C, CDotFeatures* traindat, CLabels* trainlab)
: CLinearMachine()
{
init();
C1=C;
C2=C;
set_features(traindat);
set_labels(trainlab);
}
CSGDQN::~CSGDQN()
{
SG_UNREF(loss);
}
void CSGDQN::set_loss_function(CLossFunction* loss_func)
{
SG_REF(loss_func);
SG_UNREF(loss);
loss=loss_func;
}
void CSGDQN::compute_ratio(float64_t* W,float64_t* W_1,float64_t* B,float64_t* dst,int32_t dim,float64_t lambda,float64_t loss_val)
{
for (int32_t i=0; i < dim;i++)
{
float64_t diffw=W_1[i]-W[i];
if(diffw)
B[i]+=diffw/ (lambda*diffw+ loss_val*dst[i]);
else
B[i]+=1/lambda;
}
}
void CSGDQN::combine_and_clip(float64_t* Bc,float64_t* B,int32_t dim,float64_t c1,float64_t c2,float64_t v1,float64_t v2)
{
for (int32_t i=0; i < dim;i++)
{
if(B[i])
{
Bc[i] = Bc[i] * c1 + B[i] * c2;
Bc[i]= CMath::min(CMath::max(Bc[i],v1),v2);
}
}
}
bool CSGDQN::train(CFeatures* data)
{
ASSERT(m_labels)
ASSERT(m_labels->get_label_type() == LT_BINARY)
if (data)
{
if (!data->has_property(FP_DOT))
SG_ERROR("Specified features are not of type CDotFeatures\n")
set_features((CDotFeatures*) data);
}
ASSERT(features)
int32_t num_train_labels=m_labels->get_num_labels();
int32_t num_vec=features->get_num_vectors();
ASSERT(num_vec==num_train_labels)
ASSERT(num_vec>0)
SGVector<float64_t> w(features->get_dim_feature_space());
w.zero();
float64_t lambda= 1.0/(C1*num_vec);
// Shift t in order to have a
// reasonable initial learning rate.
// This assumes |x| \approx 1.
float64_t maxw = 1.0 / sqrt(lambda);
float64_t typw = sqrt(maxw);
float64_t eta0 = typw / CMath::max(1.0,-loss->first_derivative(-typw,1));
t = 1 / (eta0 * lambda);
SG_INFO("lambda=%f, epochs=%d, eta0=%f\n", lambda, epochs, eta0)
float64_t* Bc=SG_MALLOC(float64_t, w.vlen);
SGVector<float64_t>::fill_vector(Bc, w.vlen, 1/lambda);
float64_t* result=SG_MALLOC(float64_t, w.vlen);
float64_t* B=SG_MALLOC(float64_t, w.vlen);
//Calibrate
calibrate();
SG_INFO("Training on %d vectors\n", num_vec)
ELossType loss_type = loss->get_loss_type();
bool is_log_loss = false;
if ((loss_type == L_LOGLOSS) || (loss_type == L_LOGLOSSMARGIN))
is_log_loss = true;
for (auto e : SG_PROGRESS(range(epochs)))
{
COMPUTATION_CONTROLLERS
count = skip;
bool updateB=false;
for (int32_t i=0; i<num_vec; i++)
{
SGVector<float64_t> v = features->get_computed_dot_feature_vector(i);
ASSERT(w.vlen==v.vlen)
float64_t eta = 1.0/t;
float64_t y = ((CBinaryLabels*) m_labels)->get_label(i);
float64_t z = y * features->dot(i, w);
if(updateB==true)
{
if (z < 1 || is_log_loss)
{
SGVector<float64_t> w_1=w.clone();
float64_t loss_1=-loss->first_derivative(z,1);
SGVector<float64_t>::vector_multiply(result,Bc,v.vector,w.vlen);
SGVector<float64_t>::add(w.vector,eta*loss_1*y,result,1.0,w.vector,w.vlen);
float64_t z2 = y * features->dot(i, w);
float64_t diffloss = -loss->first_derivative(z2,1) - loss_1;
if(diffloss)
{
compute_ratio(w.vector,w_1.vector,B,v.vector,w.vlen,lambda,y*diffloss);
if(t>skip)
combine_and_clip(Bc,B,w.vlen,(t-skip)/(t+skip),2*skip/(t+skip),1/(100*lambda),100/lambda);
else
combine_and_clip(Bc,B,w.vlen,t/(t+skip),skip/(t+skip),1/(100*lambda),100/lambda);
}
}
updateB=false;
}
else
{
if(--count<=0)
{
SGVector<float64_t>::vector_multiply(result,Bc,w.vector,w.vlen);
SGVector<float64_t>::add(w.vector,-skip*lambda*eta,result,1.0,w.vector,w.vlen);
count = skip;
updateB=true;
}
if (z < 1 || is_log_loss)
{
SGVector<float64_t>::vector_multiply(result,Bc,v.vector,w.vlen);
SGVector<float64_t>::add(w.vector,eta*-loss->first_derivative(z,1)*y,result,1.0,w.vector,w.vlen);
}
}
t++;
}
}
SG_FREE(result);
SG_FREE(B);
set_w(w);
return true;
}
void CSGDQN::calibrate()
{
ASSERT(features)
int32_t num_vec=features->get_num_vectors();
int32_t c_dim=features->get_dim_feature_space();
ASSERT(num_vec>0)
ASSERT(c_dim>0)
SG_INFO("Estimating sparsity num_vec=%d num_feat=%d.\n", num_vec, c_dim)
int32_t n = 0;
float64_t r = 0;
for (int32_t j=0; j<num_vec ; j++, n++)
r += features->get_nnz_features_for_vector(j);
// compute weight decay skip
skip = (int32_t) ((16 * n * c_dim) / r);
}
void CSGDQN::init()
{
t=0;
C1=1;
C2=1;
epochs=5;
skip=1000;
count=1000;
loss=new CHingeLoss();
SG_REF(loss);
SG_ADD(&C1, "C1", "Cost constant 1.", ParameterProperties::HYPER);
SG_ADD(&C2, "C2", "Cost constant 2.", ParameterProperties::HYPER);
SG_ADD(&epochs, "epochs", "epochs", ParameterProperties::HYPER);
SG_ADD(&skip, "skip", "skip");
SG_ADD(&count, "count", "count");
}
| 22.838298 | 131 | 0.671325 | [
"vector"
] |
d51cc66914c4f1746afa91b4183452ae35735c3d | 17,801 | cpp | C++ | Libraries/LibCrypto/BigInt/UnsignedBigInteger.cpp | shadowfacts/serenity | 7dd49047f38cf9b2e19fd05cb8700fe5d03c38e7 | [
"BSD-2-Clause"
] | null | null | null | Libraries/LibCrypto/BigInt/UnsignedBigInteger.cpp | shadowfacts/serenity | 7dd49047f38cf9b2e19fd05cb8700fe5d03c38e7 | [
"BSD-2-Clause"
] | null | null | null | Libraries/LibCrypto/BigInt/UnsignedBigInteger.cpp | shadowfacts/serenity | 7dd49047f38cf9b2e19fd05cb8700fe5d03c38e7 | [
"BSD-2-Clause"
] | null | null | null | /*
* Copyright (c) 2020, Itamar S. <itamar8910@gmail.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "UnsignedBigInteger.h"
#include <AK/StringBuilder.h>
namespace Crypto {
UnsignedBigInteger UnsignedBigInteger::create_invalid()
{
UnsignedBigInteger invalid(0);
invalid.invalidate();
return invalid;
}
// FIXME: in great need of optimisation
UnsignedBigInteger UnsignedBigInteger::import_data(const u8* ptr, size_t length)
{
UnsignedBigInteger integer { 0 };
for (size_t i = 0; i < length; ++i) {
auto part = UnsignedBigInteger { ptr[length - i - 1] }.shift_left(8 * i);
integer = integer.plus(part);
}
return integer;
}
size_t UnsignedBigInteger::export_data(AK::ByteBuffer& data)
{
UnsignedBigInteger copy { *this };
UnsignedBigInteger quotient;
UnsignedBigInteger remainder;
size_t size = trimmed_length() * sizeof(u32);
size_t i = 0;
for (; i < size; ++i) {
if (copy.trimmed_length() == 0)
break;
data[size - i - 1] = copy.m_words[0] & 0xff;
divide_u16_without_allocation(copy, 256, quotient, remainder);
copy.set_to(quotient);
}
return i;
}
UnsignedBigInteger UnsignedBigInteger::from_base10(const String& str)
{
UnsignedBigInteger result;
UnsignedBigInteger ten { 10 };
for (auto& c : str) {
result = result.multiplied_by(ten).plus(c - '0');
}
return result;
}
String UnsignedBigInteger::to_base10() const
{
StringBuilder builder;
UnsignedBigInteger temp(*this);
UnsignedBigInteger quotient;
UnsignedBigInteger remainder;
while (temp != UnsignedBigInteger { 0 }) {
divide_u16_without_allocation(temp, 10, quotient, remainder);
ASSERT(remainder.words()[0] < 10);
builder.append(static_cast<char>(remainder.words()[0] + '0'));
temp.set_to(quotient);
}
auto reversed_string = builder.to_string();
builder.clear();
for (int i = reversed_string.length() - 1; i >= 0; --i) {
builder.append(reversed_string[i]);
}
return builder.to_string();
}
void UnsignedBigInteger::set_to_0()
{
m_words.clear_with_capacity();
m_is_invalid = false;
}
void UnsignedBigInteger::set_to(u32 other)
{
m_is_invalid = false;
m_words.resize_and_keep_capacity(1);
m_words[0] = other;
}
void UnsignedBigInteger::set_to(const UnsignedBigInteger& other)
{
m_is_invalid = other.m_is_invalid;
m_words.resize_and_keep_capacity(other.m_words.size());
__builtin_memcpy(m_words.data(), other.m_words.data(), other.m_words.size() * sizeof(u32));
}
size_t UnsignedBigInteger::trimmed_length() const
{
size_t num_leading_zeroes = 0;
for (int i = length() - 1; i >= 0; --i, ++num_leading_zeroes) {
if (m_words[i] != 0)
break;
}
return length() - num_leading_zeroes;
}
FLATTEN UnsignedBigInteger UnsignedBigInteger::plus(const UnsignedBigInteger& other) const
{
UnsignedBigInteger result;
add_without_allocation(*this, other, result);
return result;
}
FLATTEN UnsignedBigInteger UnsignedBigInteger::minus(const UnsignedBigInteger& other) const
{
UnsignedBigInteger result;
subtract_without_allocation(*this, other, result);
return result;
}
FLATTEN UnsignedBigInteger UnsignedBigInteger::shift_left(size_t num_bits) const
{
UnsignedBigInteger output;
UnsignedBigInteger temp_result;
UnsignedBigInteger temp_plus;
shift_left_without_allocation(*this, num_bits, temp_result, temp_plus, output);
return output;
}
FLATTEN UnsignedBigInteger UnsignedBigInteger::multiplied_by(const UnsignedBigInteger& other) const
{
UnsignedBigInteger result;
UnsignedBigInteger temp_shift_result;
UnsignedBigInteger temp_shift_plus;
UnsignedBigInteger temp_shift;
UnsignedBigInteger temp_plus;
multiply_without_allocation(*this, other, temp_shift_result, temp_shift_plus, temp_shift, temp_plus, result);
return result;
}
FLATTEN UnsignedDivisionResult UnsignedBigInteger::divided_by(const UnsignedBigInteger& divisor) const
{
UnsignedBigInteger quotient;
UnsignedBigInteger remainder;
// If we actually have a u16-compatible divisor, short-circuit to the
// less computationally-intensive "divide_u16_without_allocation" method.
if (divisor.trimmed_length() == 1 && divisor.m_words[0] < (1 << 16)) {
divide_u16_without_allocation(*this, divisor.m_words[0], quotient, remainder);
return UnsignedDivisionResult { quotient, remainder };
}
UnsignedBigInteger temp_shift_result;
UnsignedBigInteger temp_shift_plus;
UnsignedBigInteger temp_shift;
UnsignedBigInteger temp_minus;
divide_without_allocation(*this, divisor, temp_shift_result, temp_shift_plus, temp_shift, temp_minus, quotient, remainder);
return UnsignedDivisionResult { quotient, remainder };
}
void UnsignedBigInteger::set_bit_inplace(size_t bit_index)
{
const size_t word_index = bit_index / UnsignedBigInteger::BITS_IN_WORD;
const size_t inner_word_index = bit_index % UnsignedBigInteger::BITS_IN_WORD;
m_words.ensure_capacity(word_index);
for (size_t i = length(); i <= word_index; ++i) {
m_words.unchecked_append(0);
}
m_words[word_index] |= (1 << inner_word_index);
}
bool UnsignedBigInteger::operator==(const UnsignedBigInteger& other) const
{
if (is_invalid() != other.is_invalid())
return false;
auto length = trimmed_length();
if (length != other.trimmed_length())
return false;
return !__builtin_memcmp(m_words.data(), other.words().data(), length);
}
bool UnsignedBigInteger::operator!=(const UnsignedBigInteger& other) const
{
return !(*this == other);
}
bool UnsignedBigInteger::operator<(const UnsignedBigInteger& other) const
{
auto length = trimmed_length();
auto other_length = other.trimmed_length();
if (length < other_length) {
return true;
}
if (length > other_length) {
return false;
}
if (length == 0) {
return false;
}
for (int i = length - 1; i >= 0; --i) {
if (m_words[i] == other.m_words[i])
continue;
return m_words[i] < other.m_words[i];
}
return false;
}
/**
* Complexity: O(N) where N is the number of words in the larger number
*/
void UnsignedBigInteger::add_without_allocation(
const UnsignedBigInteger& left,
const UnsignedBigInteger& right,
UnsignedBigInteger& output)
{
const UnsignedBigInteger* const longer = (left.length() > right.length()) ? &left : &right;
const UnsignedBigInteger* const shorter = (longer == &right) ? &left : &right;
u8 carry = 0;
output.set_to_0();
output.m_words.resize_and_keep_capacity(longer->length());
for (size_t i = 0; i < shorter->length(); ++i) {
u32 word_addition_result = shorter->m_words[i] + longer->m_words[i];
u8 carry_out = 0;
// if there was a carry, the result will be smaller than any of the operands
if (word_addition_result + carry < shorter->m_words[i]) {
carry_out = 1;
}
if (carry) {
word_addition_result++;
}
carry = carry_out;
output.m_words[i] = word_addition_result;
}
for (size_t i = shorter->length(); i < longer->length(); ++i) {
u32 word_addition_result = longer->m_words[i] + carry;
carry = 0;
if (word_addition_result < longer->m_words[i]) {
carry = 1;
}
output.m_words[i] = word_addition_result;
}
if (carry) {
output.m_words.append(carry);
}
}
/**
* Complexity: O(N) where N is the number of words in the larger number
*/
void UnsignedBigInteger::subtract_without_allocation(
const UnsignedBigInteger& left,
const UnsignedBigInteger& right,
UnsignedBigInteger& output)
{
if (left < right) {
output.invalidate();
return;
}
u8 borrow = 0;
auto own_length = left.length();
auto other_length = right.length();
output.set_to_0();
output.m_words.resize_and_keep_capacity(own_length);
for (size_t i = 0; i < own_length; ++i) {
u32 other_word = (i < other_length) ? right.m_words[i] : 0;
i64 temp = static_cast<i64>(left.m_words[i]) - static_cast<i64>(other_word) - static_cast<i64>(borrow);
// If temp < 0, we had an underflow
borrow = (temp >= 0) ? 0 : 1;
if (temp < 0) {
temp += (UINT32_MAX + 1);
}
output.m_words[i] = temp;
}
// This assertion should not fail, because we verified that *this>=other at the beginning of the function
ASSERT(borrow == 0);
}
/**
* Complexity : O(N + num_bits % 8) where N is the number of words in the number
* Shift method :
* Start by shifting by whole words in num_bits (by putting missing words at the start),
* then shift the number's words two by two by the remaining amount of bits.
*/
FLATTEN void UnsignedBigInteger::shift_left_without_allocation(
const UnsignedBigInteger& number,
size_t num_bits,
UnsignedBigInteger& temp_result,
UnsignedBigInteger& temp_plus,
UnsignedBigInteger& output)
{
// We can only do shift operations on individual words
// where the shift amount is <= size of word (32).
// But we do know how to shift by a multiple of word size (e.g 64=32*2)
// So we first shift the result by how many whole words fit in 'num_bits'
shift_left_by_n_words(number, num_bits / UnsignedBigInteger::BITS_IN_WORD, temp_result);
output.set_to(temp_result);
// And now we shift by the leftover amount of bits
num_bits %= UnsignedBigInteger::BITS_IN_WORD;
if (num_bits == 0) {
return;
}
for (size_t i = 0; i < temp_result.length(); ++i) {
u32 current_word_of_temp_result = shift_left_get_one_word(temp_result, num_bits, i);
output.m_words[i] = current_word_of_temp_result;
}
// Shifting the last word can produce a carry
u32 carry_word = shift_left_get_one_word(temp_result, num_bits, temp_result.length());
if (carry_word != 0) {
// output += (carry_word << temp_result.length())
// FIXME : Using temp_plus this way to transform carry_word into a bigint is not
// efficient nor pretty. Maybe we should have an "add_with_shift" method ?
temp_plus.set_to_0();
temp_plus.m_words.append(carry_word);
shift_left_by_n_words(temp_plus, temp_result.length(), temp_result);
add_without_allocation(output, temp_result, temp_plus);
output.set_to(temp_plus);
}
}
/**
* Complexity: O(N^2) where N is the number of words in the larger number
* Multiplication method:
* An integer is equal to the sum of the powers of two
* according to the indexes of its 'on' bits.
* So to multiple x*y, we go over each '1' bit in x (say the i'th bit),
* and add y<<i to the result.
*/
FLATTEN void UnsignedBigInteger::multiply_without_allocation(
const UnsignedBigInteger& left,
const UnsignedBigInteger& right,
UnsignedBigInteger& temp_shift_result,
UnsignedBigInteger& temp_shift_plus,
UnsignedBigInteger& temp_shift,
UnsignedBigInteger& temp_plus,
UnsignedBigInteger& output)
{
output.set_to_0();
// iterate all bits
for (size_t word_index = 0; word_index < left.length(); ++word_index) {
for (size_t bit_index = 0; bit_index < UnsignedBigInteger::BITS_IN_WORD; ++bit_index) {
// If the bit is off - skip over it
if (!(left.m_words[word_index] & (1 << bit_index)))
continue;
const size_t shift_amount = word_index * UnsignedBigInteger::BITS_IN_WORD + bit_index;
// output += (right << shift_amount);
shift_left_without_allocation(right, shift_amount, temp_shift_result, temp_shift_plus, temp_shift);
add_without_allocation(output, temp_shift, temp_plus);
output.set_to(temp_plus);
}
}
}
/**
* Complexity: O(N^2) where N is the number of words in the larger number
* Division method:
* We loop over the bits of the divisor, attempting to subtract divisor<<i from the dividend.
* If the result is non-negative, it means that divisor*2^i "fits" in the dividend,
* so we set the ith bit in the quotient and reduce divisor<<i from the dividend.
* When we're done, what's left from the dividend is the remainder.
*/
FLATTEN void UnsignedBigInteger::divide_without_allocation(
const UnsignedBigInteger& numerator,
const UnsignedBigInteger& denominator,
UnsignedBigInteger& temp_shift_result,
UnsignedBigInteger& temp_shift_plus,
UnsignedBigInteger& temp_shift,
UnsignedBigInteger& temp_minus,
UnsignedBigInteger& quotient,
UnsignedBigInteger& remainder)
{
quotient.set_to_0();
remainder.set_to(numerator);
// iterate all bits
for (int word_index = numerator.trimmed_length() - 1; word_index >= 0; --word_index) {
for (int bit_index = UnsignedBigInteger::BITS_IN_WORD - 1; bit_index >= 0; --bit_index) {
const size_t shift_amount = word_index * UnsignedBigInteger::BITS_IN_WORD + bit_index;
shift_left_without_allocation(denominator, shift_amount, temp_shift_result, temp_shift_plus, temp_shift);
subtract_without_allocation(remainder, temp_shift, temp_minus);
if (!temp_minus.is_invalid()) {
remainder.set_to(temp_minus);
quotient.set_bit_inplace(shift_amount);
}
}
}
}
/**
* Complexity : O(N) where N is the number of digits in the numerator
* Division method :
* Starting from the most significant one, for each half-word of the numerator, combine it
* with the existing remainder if any, divide the combined number as a u32 operation and
* update the quotient / remainder as needed.
*/
FLATTEN void UnsignedBigInteger::divide_u16_without_allocation(
const UnsignedBigInteger& numerator,
u32 denominator,
UnsignedBigInteger& quotient,
UnsignedBigInteger& remainder)
{
ASSERT(denominator < (1 << 16));
u32 remainder_word = 0;
auto numerator_length = numerator.trimmed_length();
quotient.set_to_0();
quotient.m_words.resize(numerator_length);
for (int word_index = numerator_length - 1; word_index >= 0; --word_index) {
auto word_high = numerator.m_words[word_index] >> 16;
auto word_low = numerator.m_words[word_index] & ((1 << 16) - 1);
auto number_to_divide_high = (remainder_word << 16) | word_high;
auto quotient_high = number_to_divide_high / denominator;
remainder_word = number_to_divide_high % denominator;
auto number_to_divide_low = remainder_word << 16 | word_low;
auto quotient_low = number_to_divide_low / denominator;
remainder_word = number_to_divide_low % denominator;
quotient.m_words[word_index] = (quotient_high << 16) | quotient_low;
}
remainder.set_to(remainder_word);
}
ALWAYS_INLINE void UnsignedBigInteger::shift_left_by_n_words(
const UnsignedBigInteger& number,
const size_t number_of_words,
UnsignedBigInteger& output)
{
// shifting left by N words means just inserting N zeroes to the beginning of the words vector
output.set_to_0();
output.m_words.resize_and_keep_capacity(number_of_words + number.length());
__builtin_memset(output.m_words.data(), 0, number_of_words * sizeof(unsigned));
__builtin_memcpy(&output.m_words.data()[number_of_words], number.m_words.data(), number.m_words.size() * sizeof(unsigned));
}
/**
* Returns the word at a requested index in the result of a shift operation
*/
ALWAYS_INLINE u32 UnsignedBigInteger::shift_left_get_one_word(
const UnsignedBigInteger& number,
const size_t num_bits,
const size_t result_word_index)
{
// "<= length()" (rather than length() - 1) is intentional,
// The result inedx of length() is used when calculating the carry word
ASSERT(result_word_index <= number.length());
ASSERT(num_bits <= UnsignedBigInteger::BITS_IN_WORD);
u32 result = 0;
// we need to check for "num_bits != 0" since shifting right by 32 is apparently undefined behaviour!
if (result_word_index > 0 && num_bits != 0) {
result += number.m_words[result_word_index - 1] >> (UnsignedBigInteger::BITS_IN_WORD - num_bits);
}
if (result_word_index < number.length() && num_bits < 32) {
result += number.m_words[result_word_index] << num_bits;
}
return result;
}
}
| 33.971374 | 127 | 0.691815 | [
"vector",
"transform"
] |
d522d9a32862812e60fe7f65b178a44851422e87 | 48,178 | hpp | C++ | library/src/level2/rocsparse_csrmv.hpp | akilaMD/rocSPARSE | 2694e68938cefa711a50b286fd9fd0baff712099 | [
"MIT"
] | null | null | null | library/src/level2/rocsparse_csrmv.hpp | akilaMD/rocSPARSE | 2694e68938cefa711a50b286fd9fd0baff712099 | [
"MIT"
] | null | null | null | library/src/level2/rocsparse_csrmv.hpp | akilaMD/rocSPARSE | 2694e68938cefa711a50b286fd9fd0baff712099 | [
"MIT"
] | null | null | null | /* ************************************************************************
* Copyright (c) 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
* ************************************************************************ */
#pragma once
#ifndef ROCSPARSE_CSRMV_HPP
#define ROCSPARSE_CSRMV_HPP
#include "rocsparse.h"
#include "csrmv_device.h"
#include "handle.h"
#include "utility.h"
#include <hip/hip_runtime.h>
#define BLOCK_SIZE 1024
#define BLOCK_MULTIPLIER 3
#define ROWS_FOR_VECTOR 1
#define WG_BITS 24
#define ROW_BITS 32
#define WG_SIZE 256
__attribute__((unused)) static unsigned int flp2(unsigned int x)
{
x |= (x >> 1);
x |= (x >> 2);
x |= (x >> 4);
x |= (x >> 8);
x |= (x >> 16);
return x - (x >> 1);
}
// Short rows in CSR-Adaptive are batched together into a single row block.
// If there are a relatively small number of these, then we choose to do
// a horizontal reduction (groups of threads all reduce the same row).
// If there are many threads (e.g. more threads than the maximum size
// of our workgroup) then we choose to have each thread serially reduce
// the row.
// This function calculates the number of threads that could team up
// to reduce these groups of rows. For instance, if you have a
// workgroup size of 256 and 4 rows, you could have 64 threads
// working on each row. If you have 5 rows, only 32 threads could
// reliably work on each row because our reduction assumes power-of-2.
static unsigned long long numThreadsForReduction(unsigned long long num_rows)
{
#if defined(__INTEL_COMPILER)
return WG_SIZE >> (_bit_scan_reverse(num_rows - 1) + 1);
#elif(defined(__clang__) && __has_builtin(__builtin_clz)) \
|| !defined(__clang) && defined(__GNUG__) \
&& ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 30202)
return (WG_SIZE >> (8 * sizeof(int) - __builtin_clz(num_rows - 1)));
#elif defined(_MSC_VER) && (_MSC_VER >= 1400)
unsigned long long bit_returned;
_BitScanReverse(&bit_returned, (num_rows - 1));
return WG_SIZE >> (bit_returned + 1);
#else
return flp2(WG_SIZE / num_rows);
#endif
}
static void ComputeRowBlocks(unsigned long long* rowBlocks,
size_t& rowBlockSize,
const rocsparse_int* rowDelimiters,
rocsparse_int nRows,
bool allocate_row_blocks = true)
{
unsigned long long* rowBlocksBase;
// Start at one because of rowBlock[0]
rocsparse_int total_row_blocks = 1;
if(allocate_row_blocks)
{
rowBlocksBase = rowBlocks;
*rowBlocks = 0;
++rowBlocks;
}
unsigned long long sum = 0;
unsigned long long i;
unsigned long long last_i = 0;
// Check to ensure nRows can fit in 32 bits
if(static_cast<unsigned long long>(nRows)
> static_cast<unsigned long long>(std::pow(2, ROW_BITS)))
{
fprintf(stderr, "nrow does not fit in 32 bits\n");
exit(1);
}
rocsparse_int consecutive_long_rows = 0;
for(i = 1; i <= static_cast<unsigned long long>(nRows); ++i)
{
rocsparse_int row_length = (rowDelimiters[i] - rowDelimiters[i - 1]);
sum += row_length;
// The following section of code calculates whether you're moving between
// a series of "short" rows and a series of "long" rows.
// This is because the reduction in CSR-Adaptive likes things to be
// roughly the same length. Long rows can be reduced horizontally.
// Short rows can be reduced one-thread-per-row. Try not to mix them.
if(row_length > 128)
{
++consecutive_long_rows;
}
else if(consecutive_long_rows > 0)
{
// If it turns out we WERE in a long-row region, cut if off now.
if(row_length < 32) // Now we're in a short-row region
{
consecutive_long_rows = -1;
}
else
{
consecutive_long_rows++;
}
}
// If you just entered into a "long" row from a series of short rows,
// then we need to make sure we cut off those short rows. Put them in
// their own workgroup.
if(consecutive_long_rows == 1)
{
// Assuming there *was* a previous workgroup. If not, nothing to do here.
if(i - last_i > 1)
{
if(allocate_row_blocks)
{
*rowBlocks = ((i - 1) << (64 - ROW_BITS));
// If this row fits into CSR-Stream, calculate how many rows
// can be used to do a parallel reduction.
// Fill in the low-order bits with the numThreadsForRed
if(((i - 1) - last_i) > static_cast<unsigned long long>(ROWS_FOR_VECTOR))
{
*(rowBlocks - 1) |= numThreadsForReduction((i - 1) - last_i);
}
++rowBlocks;
}
++total_row_blocks;
last_i = i - 1;
sum = row_length;
}
}
else if(consecutive_long_rows == -1)
{
// We see the first short row after some long ones that
// didn't previously fill up a row block.
if(allocate_row_blocks)
{
*rowBlocks = ((i - 1) << (64 - ROW_BITS));
if(((i - 1) - last_i) > static_cast<unsigned long long>(ROWS_FOR_VECTOR))
{
*(rowBlocks - 1) |= numThreadsForReduction((i - 1) - last_i);
}
++rowBlocks;
}
++total_row_blocks;
last_i = i - 1;
sum = row_length;
consecutive_long_rows = 0;
}
// Now, what's up with this row? What did it do?
// exactly one row results in non-zero elements to be greater than blockSize
// This is csr-vector case; bottom WGBITS == workgroup ID
if((i - last_i == 1) && sum > static_cast<unsigned long long>(BLOCK_SIZE))
{
rocsparse_int numWGReq = static_cast<rocsparse_int>(
std::ceil(static_cast<double>(row_length) / (BLOCK_MULTIPLIER * BLOCK_SIZE)));
// Check to ensure #workgroups can fit in WGBITS bits, if not
// then the last workgroup will do all the remaining work
numWGReq = (numWGReq < static_cast<rocsparse_int>(std::pow(2, WG_BITS)))
? numWGReq
: static_cast<rocsparse_int>(std::pow(2, WG_BITS));
if(allocate_row_blocks)
{
for(rocsparse_int w = 1; w < numWGReq; ++w)
{
*rowBlocks = ((i - 1) << (64 - ROW_BITS));
*rowBlocks |= static_cast<unsigned long long>(w);
++rowBlocks;
}
*rowBlocks = (i << (64 - ROW_BITS));
++rowBlocks;
}
total_row_blocks += numWGReq;
last_i = i;
sum = 0;
consecutive_long_rows = 0;
}
// more than one row results in non-zero elements to be greater than blockSize
// This is csr-stream case; bottom WGBITS = number of parallel reduction threads
else if((i - last_i > 1) && sum > static_cast<unsigned long long>(BLOCK_SIZE))
{
// This row won't fit, so back off one.
--i;
if(allocate_row_blocks)
{
*rowBlocks = (i << (64 - ROW_BITS));
if((i - last_i) > static_cast<unsigned long long>(ROWS_FOR_VECTOR))
{
*(rowBlocks - 1) |= numThreadsForReduction(i - last_i);
}
++rowBlocks;
}
++total_row_blocks;
last_i = i;
sum = 0;
consecutive_long_rows = 0;
}
// This is csr-stream case; bottom WGBITS = number of parallel reduction threads
else if(sum == static_cast<unsigned long long>(BLOCK_SIZE))
{
if(allocate_row_blocks)
{
*rowBlocks = (i << (64 - ROW_BITS));
if((i - last_i) > static_cast<unsigned long long>(ROWS_FOR_VECTOR))
{
*(rowBlocks - 1) |= numThreadsForReduction(i - last_i);
}
++rowBlocks;
}
++total_row_blocks;
last_i = i;
sum = 0;
consecutive_long_rows = 0;
}
}
// If we didn't fill a row block with the last row, make sure we don't lose it.
if(allocate_row_blocks
&& (*(rowBlocks - 1) >> (64 - ROW_BITS)) != static_cast<unsigned long long>(nRows))
{
*rowBlocks = (static_cast<unsigned long long>(nRows) << (64 - ROW_BITS));
if((nRows - last_i) > static_cast<unsigned long long>(ROWS_FOR_VECTOR))
{
*(rowBlocks - 1) |= numThreadsForReduction(i - last_i);
}
++rowBlocks;
}
++total_row_blocks;
if(allocate_row_blocks)
{
size_t dist = std::distance(rowBlocksBase, rowBlocks);
assert((2 * dist) <= rowBlockSize);
// Update the size of rowBlocks to reflect the actual amount of memory used
// We're multiplying the size by two because the extended precision form of
// CSR-Adaptive requires more space for the final global reduction.
rowBlockSize = 2 * dist;
}
else
{
rowBlockSize = 2 * total_row_blocks;
}
}
template <typename T>
rocsparse_status rocsparse_csrmv_analysis_template(rocsparse_handle handle,
rocsparse_operation trans,
rocsparse_int m,
rocsparse_int n,
rocsparse_int nnz,
const rocsparse_mat_descr descr,
const T* csr_val,
const rocsparse_int* csr_row_ptr,
const rocsparse_int* csr_col_ind,
rocsparse_mat_info info)
{
// Check for valid handle and matrix descriptor
if(handle == nullptr)
{
return rocsparse_status_invalid_handle;
}
else if(descr == nullptr)
{
return rocsparse_status_invalid_pointer;
}
else if(info == nullptr)
{
return rocsparse_status_invalid_pointer;
}
// Logging
log_trace(handle,
"rocsparse_csrmv_analysis",
trans,
m,
n,
nnz,
(const void*&)descr,
(const void*&)csr_val,
(const void*&)csr_row_ptr,
(const void*&)csr_col_ind,
(const void*&)info);
// Check index base
if(descr->base != rocsparse_index_base_zero && descr->base != rocsparse_index_base_one)
{
return rocsparse_status_invalid_value;
}
if(descr->type != rocsparse_matrix_type_general)
{
// TODO
return rocsparse_status_not_implemented;
}
// Check sizes
if(m < 0)
{
return rocsparse_status_invalid_size;
}
else if(n < 0)
{
return rocsparse_status_invalid_size;
}
else if(nnz < 0)
{
return rocsparse_status_invalid_size;
}
// Quick return if possible
if(m == 0 || n == 0 || nnz == 0)
{
return rocsparse_status_success;
}
// Check pointer arguments
if(csr_row_ptr == nullptr)
{
return rocsparse_status_invalid_pointer;
}
else if(csr_col_ind == nullptr)
{
return rocsparse_status_invalid_pointer;
}
else if(csr_val == nullptr)
{
return rocsparse_status_invalid_pointer;
}
// Clear csrmv info
RETURN_IF_ROCSPARSE_ERROR(rocsparse_destroy_csrmv_info(info->csrmv_info));
// Create csrmv info
RETURN_IF_ROCSPARSE_ERROR(rocsparse_create_csrmv_info(&info->csrmv_info));
// Stream
hipStream_t stream = handle->stream;
// row blocks size
info->csrmv_info->size = 0;
// Temporary arrays to hold device data
std::vector<rocsparse_int> hptr(m + 1);
RETURN_IF_HIP_ERROR(hipMemcpyAsync(
hptr.data(), csr_row_ptr, sizeof(rocsparse_int) * (m + 1), hipMemcpyDeviceToHost, stream));
// Wait for host transfer to finish
RETURN_IF_HIP_ERROR(hipStreamSynchronize(stream));
// Determine row blocks array size
ComputeRowBlocks((unsigned long long*)NULL, info->csrmv_info->size, hptr.data(), m, false);
// Create row blocks structure
std::vector<unsigned long long> row_blocks(info->csrmv_info->size, 0);
ComputeRowBlocks(row_blocks.data(), info->csrmv_info->size, hptr.data(), m, true);
// Allocate memory on device to hold csrmv info, if required
if(info->csrmv_info->size > 0)
{
RETURN_IF_HIP_ERROR(hipMalloc((void**)&info->csrmv_info->row_blocks,
sizeof(unsigned long long) * info->csrmv_info->size));
// Copy row blocks information to device
RETURN_IF_HIP_ERROR(hipMemcpyAsync(info->csrmv_info->row_blocks,
row_blocks.data(),
sizeof(unsigned long long) * info->csrmv_info->size,
hipMemcpyHostToDevice,
stream));
// Wait for device transfer to finish
RETURN_IF_HIP_ERROR(hipStreamSynchronize(stream));
}
// Store some pointers to verify correct execution
info->csrmv_info->trans = trans;
info->csrmv_info->m = m;
info->csrmv_info->n = n;
info->csrmv_info->nnz = nnz;
info->csrmv_info->descr = descr;
info->csrmv_info->csr_row_ptr = csr_row_ptr;
info->csrmv_info->csr_col_ind = csr_col_ind;
return rocsparse_status_success;
}
template <typename T, rocsparse_int WF_SIZE>
__global__ void csrmvn_general_kernel_host_pointer(rocsparse_int m,
T alpha,
const rocsparse_int* __restrict__ csr_row_ptr,
const rocsparse_int* __restrict__ csr_col_ind,
const T* __restrict__ csr_val,
const T* __restrict__ x,
T beta,
T* __restrict__ y,
rocsparse_index_base idx_base)
{
csrmvn_general_device<T, WF_SIZE>(
m, alpha, csr_row_ptr, csr_col_ind, csr_val, x, beta, y, idx_base);
}
template <typename T, rocsparse_int WF_SIZE>
__global__ void csrmvn_general_kernel_device_pointer(rocsparse_int m,
const T* alpha,
const rocsparse_int* __restrict__ csr_row_ptr,
const rocsparse_int* __restrict__ csr_col_ind,
const T* __restrict__ csr_val,
const T* __restrict__ x,
const T* beta,
T* __restrict__ y,
rocsparse_index_base idx_base)
{
csrmvn_general_device<T, WF_SIZE>(
m, *alpha, csr_row_ptr, csr_col_ind, csr_val, x, *beta, y, idx_base);
}
template <typename T>
__launch_bounds__(WG_SIZE) __global__
void csrmvn_adaptive_kernel_host_pointer(unsigned long long* __restrict__ row_blocks,
T alpha,
const rocsparse_int* __restrict__ csr_row_ptr,
const rocsparse_int* __restrict__ csr_col_ind,
const T* __restrict__ csr_val,
const T* __restrict__ x,
T beta,
T* __restrict__ y,
rocsparse_index_base idx_base)
{
csrmvn_adaptive_device<T,
BLOCK_SIZE,
BLOCK_MULTIPLIER,
ROWS_FOR_VECTOR,
WG_BITS,
ROW_BITS,
WG_SIZE>(
row_blocks, alpha, csr_row_ptr, csr_col_ind, csr_val, x, beta, y, idx_base);
}
template <typename T>
__launch_bounds__(WG_SIZE) __global__
void csrmvn_adaptive_kernel_device_pointer(unsigned long long* __restrict__ row_blocks,
const T* alpha,
const rocsparse_int* __restrict__ csr_row_ptr,
const rocsparse_int* __restrict__ csr_col_ind,
const T* __restrict__ csr_val,
const T* __restrict__ x,
const T* beta,
T* __restrict__ y,
rocsparse_index_base idx_base)
{
csrmvn_adaptive_device<T,
BLOCK_SIZE,
BLOCK_MULTIPLIER,
ROWS_FOR_VECTOR,
WG_BITS,
ROW_BITS,
WG_SIZE>(
row_blocks, *alpha, csr_row_ptr, csr_col_ind, csr_val, x, *beta, y, idx_base);
}
template <typename T>
rocsparse_status rocsparse_csrmv_template(rocsparse_handle handle,
rocsparse_operation trans,
rocsparse_int m,
rocsparse_int n,
rocsparse_int nnz,
const T* alpha,
const rocsparse_mat_descr descr,
const T* csr_val,
const rocsparse_int* csr_row_ptr,
const rocsparse_int* csr_col_ind,
rocsparse_mat_info info,
const T* x,
const T* beta,
T* y)
{
// Check for valid handle and matrix descriptor
if(handle == nullptr)
{
return rocsparse_status_invalid_handle;
}
else if(descr == nullptr)
{
return rocsparse_status_invalid_pointer;
}
// Logging
if(handle->pointer_mode == rocsparse_pointer_mode_host)
{
log_trace(handle,
replaceX<T>("rocsparse_Xcsrmv"),
trans,
m,
n,
nnz,
*alpha,
(const void*&)descr,
(const void*&)csr_val,
(const void*&)csr_row_ptr,
(const void*&)csr_col_ind,
(const void*&)x,
*beta,
(const void*&)y,
(const void*&)info);
log_bench(handle,
"./rocsparse-bench -f csrmv -r",
replaceX<T>("X"),
"--mtx <matrix.mtx> "
"--alpha",
*alpha,
"--beta",
*beta);
}
else
{
log_trace(handle,
replaceX<T>("rocsparse_Xcsrmv"),
trans,
m,
n,
nnz,
(const void*&)alpha,
(const void*&)descr,
(const void*&)csr_val,
(const void*&)csr_row_ptr,
(const void*&)csr_col_ind,
(const void*&)x,
(const void*&)beta,
(const void*&)y);
}
// Check index base
if(descr->base != rocsparse_index_base_zero && descr->base != rocsparse_index_base_one)
{
return rocsparse_status_invalid_value;
}
if(descr->type != rocsparse_matrix_type_general)
{
// TODO
return rocsparse_status_not_implemented;
}
// Check sizes
if(m < 0)
{
return rocsparse_status_invalid_size;
}
else if(n < 0)
{
return rocsparse_status_invalid_size;
}
else if(nnz < 0)
{
return rocsparse_status_invalid_size;
}
// Quick return if possible
if(m == 0 || n == 0 || nnz == 0)
{
return rocsparse_status_success;
}
// Check pointer arguments
if(csr_val == nullptr)
{
return rocsparse_status_invalid_pointer;
}
else if(csr_row_ptr == nullptr)
{
return rocsparse_status_invalid_pointer;
}
else if(csr_col_ind == nullptr)
{
return rocsparse_status_invalid_pointer;
}
else if(x == nullptr)
{
return rocsparse_status_invalid_pointer;
}
else if(y == nullptr)
{
return rocsparse_status_invalid_pointer;
}
else if(alpha == nullptr)
{
return rocsparse_status_invalid_pointer;
}
else if(beta == nullptr)
{
return rocsparse_status_invalid_pointer;
}
if(info == nullptr)
{
// If csrmv info is not available, call csrmv general
return rocsparse_csrmv_general_template(
handle, trans, m, n, nnz, alpha, descr, csr_val, csr_row_ptr, csr_col_ind, x, beta, y);
}
else if(info->csrmv_info == nullptr)
{
// If csrmv info is not available, call csrmv general
return rocsparse_csrmv_general_template(
handle, trans, m, n, nnz, alpha, descr, csr_val, csr_row_ptr, csr_col_ind, x, beta, y);
}
else
{
// If csrmv info is available, call csrmv adaptive
return rocsparse_csrmv_adaptive_template(handle,
trans,
m,
n,
nnz,
alpha,
descr,
csr_val,
csr_row_ptr,
csr_col_ind,
info->csrmv_info,
x,
beta,
y);
}
}
template <typename T>
rocsparse_status rocsparse_csrmv_general_template(rocsparse_handle handle,
rocsparse_operation trans,
rocsparse_int m,
rocsparse_int n,
rocsparse_int nnz,
const T* alpha,
const rocsparse_mat_descr descr,
const T* csr_val,
const rocsparse_int* csr_row_ptr,
const rocsparse_int* csr_col_ind,
const T* x,
const T* beta,
T* y)
{
// Stream
hipStream_t stream = handle->stream;
// Run different csrmv kernels
if(trans == rocsparse_operation_none)
{
#define CSRMVN_DIM 512
rocsparse_int nnz_per_row = nnz / m;
dim3 csrmvn_blocks((m - 1) / CSRMVN_DIM + 1);
dim3 csrmvn_threads(CSRMVN_DIM);
if(handle->pointer_mode == rocsparse_pointer_mode_device)
{
if(handle->wavefront_size == 32)
{
if(nnz_per_row < 4)
{
hipLaunchKernelGGL((csrmvn_general_kernel_device_pointer<T, 2>),
csrmvn_blocks,
csrmvn_threads,
0,
stream,
m,
alpha,
csr_row_ptr,
csr_col_ind,
csr_val,
x,
beta,
y,
descr->base);
}
else if(nnz_per_row < 8)
{
hipLaunchKernelGGL((csrmvn_general_kernel_device_pointer<T, 4>),
csrmvn_blocks,
csrmvn_threads,
0,
stream,
m,
alpha,
csr_row_ptr,
csr_col_ind,
csr_val,
x,
beta,
y,
descr->base);
}
else if(nnz_per_row < 16)
{
hipLaunchKernelGGL((csrmvn_general_kernel_device_pointer<T, 8>),
csrmvn_blocks,
csrmvn_threads,
0,
stream,
m,
alpha,
csr_row_ptr,
csr_col_ind,
csr_val,
x,
beta,
y,
descr->base);
}
else if(nnz_per_row < 32)
{
hipLaunchKernelGGL((csrmvn_general_kernel_device_pointer<T, 16>),
csrmvn_blocks,
csrmvn_threads,
0,
stream,
m,
alpha,
csr_row_ptr,
csr_col_ind,
csr_val,
x,
beta,
y,
descr->base);
}
else
{
hipLaunchKernelGGL((csrmvn_general_kernel_device_pointer<T, 32>),
csrmvn_blocks,
csrmvn_threads,
0,
stream,
m,
alpha,
csr_row_ptr,
csr_col_ind,
csr_val,
x,
beta,
y,
descr->base);
}
}
else if(handle->wavefront_size == 64)
{
if(nnz_per_row < 4)
{
hipLaunchKernelGGL((csrmvn_general_kernel_device_pointer<T, 2>),
csrmvn_blocks,
csrmvn_threads,
0,
stream,
m,
alpha,
csr_row_ptr,
csr_col_ind,
csr_val,
x,
beta,
y,
descr->base);
}
else if(nnz_per_row < 8)
{
hipLaunchKernelGGL((csrmvn_general_kernel_device_pointer<T, 4>),
csrmvn_blocks,
csrmvn_threads,
0,
stream,
m,
alpha,
csr_row_ptr,
csr_col_ind,
csr_val,
x,
beta,
y,
descr->base);
}
else if(nnz_per_row < 16)
{
hipLaunchKernelGGL((csrmvn_general_kernel_device_pointer<T, 8>),
csrmvn_blocks,
csrmvn_threads,
0,
stream,
m,
alpha,
csr_row_ptr,
csr_col_ind,
csr_val,
x,
beta,
y,
descr->base);
}
else if(nnz_per_row < 32)
{
hipLaunchKernelGGL((csrmvn_general_kernel_device_pointer<T, 16>),
csrmvn_blocks,
csrmvn_threads,
0,
stream,
m,
alpha,
csr_row_ptr,
csr_col_ind,
csr_val,
x,
beta,
y,
descr->base);
}
else if(nnz_per_row < 64)
{
hipLaunchKernelGGL((csrmvn_general_kernel_device_pointer<T, 32>),
csrmvn_blocks,
csrmvn_threads,
0,
stream,
m,
alpha,
csr_row_ptr,
csr_col_ind,
csr_val,
x,
beta,
y,
descr->base);
}
else
{
hipLaunchKernelGGL((csrmvn_general_kernel_device_pointer<T, 64>),
csrmvn_blocks,
csrmvn_threads,
0,
stream,
m,
alpha,
csr_row_ptr,
csr_col_ind,
csr_val,
x,
beta,
y,
descr->base);
}
}
else
{
return rocsparse_status_arch_mismatch;
}
}
else
{
if(*alpha == static_cast<T>(0) && *beta == static_cast<T>(1))
{
return rocsparse_status_success;
}
if(handle->wavefront_size == 32)
{
if(nnz_per_row < 4)
{
hipLaunchKernelGGL((csrmvn_general_kernel_host_pointer<T, 2>),
csrmvn_blocks,
csrmvn_threads,
0,
stream,
m,
*alpha,
csr_row_ptr,
csr_col_ind,
csr_val,
x,
*beta,
y,
descr->base);
}
else if(nnz_per_row < 8)
{
hipLaunchKernelGGL((csrmvn_general_kernel_host_pointer<T, 4>),
csrmvn_blocks,
csrmvn_threads,
0,
stream,
m,
*alpha,
csr_row_ptr,
csr_col_ind,
csr_val,
x,
*beta,
y,
descr->base);
}
else if(nnz_per_row < 16)
{
hipLaunchKernelGGL((csrmvn_general_kernel_host_pointer<T, 8>),
csrmvn_blocks,
csrmvn_threads,
0,
stream,
m,
*alpha,
csr_row_ptr,
csr_col_ind,
csr_val,
x,
*beta,
y,
descr->base);
}
else if(nnz_per_row < 32)
{
hipLaunchKernelGGL((csrmvn_general_kernel_host_pointer<T, 16>),
csrmvn_blocks,
csrmvn_threads,
0,
stream,
m,
*alpha,
csr_row_ptr,
csr_col_ind,
csr_val,
x,
*beta,
y,
descr->base);
}
else
{
hipLaunchKernelGGL((csrmvn_general_kernel_host_pointer<T, 32>),
csrmvn_blocks,
csrmvn_threads,
0,
stream,
m,
*alpha,
csr_row_ptr,
csr_col_ind,
csr_val,
x,
*beta,
y,
descr->base);
}
}
else if(handle->wavefront_size == 64)
{
if(nnz_per_row < 4)
{
hipLaunchKernelGGL((csrmvn_general_kernel_host_pointer<T, 2>),
csrmvn_blocks,
csrmvn_threads,
0,
stream,
m,
*alpha,
csr_row_ptr,
csr_col_ind,
csr_val,
x,
*beta,
y,
descr->base);
}
else if(nnz_per_row < 8)
{
hipLaunchKernelGGL((csrmvn_general_kernel_host_pointer<T, 4>),
csrmvn_blocks,
csrmvn_threads,
0,
stream,
m,
*alpha,
csr_row_ptr,
csr_col_ind,
csr_val,
x,
*beta,
y,
descr->base);
}
else if(nnz_per_row < 16)
{
hipLaunchKernelGGL((csrmvn_general_kernel_host_pointer<T, 8>),
csrmvn_blocks,
csrmvn_threads,
0,
stream,
m,
*alpha,
csr_row_ptr,
csr_col_ind,
csr_val,
x,
*beta,
y,
descr->base);
}
else if(nnz_per_row < 32)
{
hipLaunchKernelGGL((csrmvn_general_kernel_host_pointer<T, 16>),
csrmvn_blocks,
csrmvn_threads,
0,
stream,
m,
*alpha,
csr_row_ptr,
csr_col_ind,
csr_val,
x,
*beta,
y,
descr->base);
}
else if(nnz_per_row < 64)
{
hipLaunchKernelGGL((csrmvn_general_kernel_host_pointer<T, 32>),
csrmvn_blocks,
csrmvn_threads,
0,
stream,
m,
*alpha,
csr_row_ptr,
csr_col_ind,
csr_val,
x,
*beta,
y,
descr->base);
}
else
{
hipLaunchKernelGGL((csrmvn_general_kernel_host_pointer<T, 64>),
csrmvn_blocks,
csrmvn_threads,
0,
stream,
m,
*alpha,
csr_row_ptr,
csr_col_ind,
csr_val,
x,
*beta,
y,
descr->base);
}
}
else
{
return rocsparse_status_arch_mismatch;
}
}
#undef CSRMVN_DIM
}
else
{
// TODO
return rocsparse_status_not_implemented;
}
return rocsparse_status_success;
}
template <typename T>
rocsparse_status rocsparse_csrmv_adaptive_template(rocsparse_handle handle,
rocsparse_operation trans,
rocsparse_int m,
rocsparse_int n,
rocsparse_int nnz,
const T* alpha,
const rocsparse_mat_descr descr,
const T* csr_val,
const rocsparse_int* csr_row_ptr,
const rocsparse_int* csr_col_ind,
rocsparse_csrmv_info info,
const T* x,
const T* beta,
T* y)
{
// Check if info matches current matrix and options
if(info->trans != trans)
{
return rocsparse_status_invalid_value;
}
else if(info->m != m)
{
return rocsparse_status_invalid_size;
}
else if(info->n != n)
{
return rocsparse_status_invalid_size;
}
else if(info->nnz != nnz)
{
return rocsparse_status_invalid_size;
}
else if(info->descr != descr)
{
return rocsparse_status_invalid_value;
}
else if(info->csr_row_ptr != csr_row_ptr)
{
return rocsparse_status_invalid_pointer;
}
else if(info->csr_col_ind != csr_col_ind)
{
return rocsparse_status_invalid_pointer;
}
// Stream
hipStream_t stream = handle->stream;
// Run different csrmv kernels
if(trans == rocsparse_operation_none)
{
dim3 csrmvn_blocks((info->size / 2) - 1);
dim3 csrmvn_threads(WG_SIZE);
if(handle->pointer_mode == rocsparse_pointer_mode_device)
{
hipLaunchKernelGGL((csrmvn_adaptive_kernel_device_pointer<T>),
csrmvn_blocks,
csrmvn_threads,
0,
stream,
info->row_blocks,
alpha,
csr_row_ptr,
csr_col_ind,
csr_val,
x,
beta,
y,
descr->base);
}
else
{
if(*alpha == static_cast<T>(0) && *beta == static_cast<T>(1))
{
return rocsparse_status_success;
}
hipLaunchKernelGGL((csrmvn_adaptive_kernel_host_pointer<T>),
csrmvn_blocks,
csrmvn_threads,
0,
stream,
info->row_blocks,
*alpha,
csr_row_ptr,
csr_col_ind,
csr_val,
x,
*beta,
y,
descr->base);
}
}
else
{
// TODO
return rocsparse_status_not_implemented;
}
return rocsparse_status_success;
}
#endif // ROCSPARSE_CSRMV_HPP
| 39.457821 | 99 | 0.377019 | [
"vector"
] |
d52453253c075c647c001347692aabdfa1dc0547 | 209,197 | cc | C++ | tensorflow/stream_executor/rocm/rocm_dnn.cc | wainshine/tensorflow | dc7a8dc8546c679b9c7b3df7494ce4506bfc1a6d | [
"Apache-2.0"
] | 54 | 2017-06-17T14:07:48.000Z | 2022-03-29T02:11:20.000Z | tensorflow/stream_executor/rocm/rocm_dnn.cc | wainshine/tensorflow | dc7a8dc8546c679b9c7b3df7494ce4506bfc1a6d | [
"Apache-2.0"
] | 19 | 2021-12-28T12:44:55.000Z | 2022-01-13T08:11:28.000Z | tensorflow/stream_executor/rocm/rocm_dnn.cc | wainshine/tensorflow | dc7a8dc8546c679b9c7b3df7494ce4506bfc1a6d | [
"Apache-2.0"
] | 11 | 2018-04-19T22:36:01.000Z | 2021-08-02T08:44:43.000Z | /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/stream_executor/rocm/rocm_dnn.h"
#include <functional>
#include <memory>
#include "absl/algorithm/container.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "third_party/eigen3/Eigen/Core"
#include "rocm/include/miopen/miopen.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/util/env_var.h"
#include "tensorflow/stream_executor/dnn.h"
#include "tensorflow/stream_executor/gpu/gpu_activation.h"
#include "tensorflow/stream_executor/gpu/gpu_driver.h"
#include "tensorflow/stream_executor/gpu/gpu_executor.h"
#include "tensorflow/stream_executor/gpu/gpu_stream.h"
#include "tensorflow/stream_executor/gpu/gpu_timer.h"
#include "tensorflow/stream_executor/lib/env.h"
#include "tensorflow/stream_executor/lib/error.h"
#include "tensorflow/stream_executor/lib/initialize.h"
#include "tensorflow/stream_executor/lib/threadpool.h"
#include "tensorflow/stream_executor/platform/dso_loader.h"
#include "tensorflow/stream_executor/platform/logging.h"
#include "tensorflow/stream_executor/plugin_registry.h"
#include "tensorflow/stream_executor/rocm/rocm_diagnostics.h"
#include "tensorflow/stream_executor/rocm/rocm_platform_id.h"
#include "tensorflow/stream_executor/scratch_allocator.h"
#include "tensorflow/stream_executor/stream.h"
#include "tensorflow/stream_executor/stream_executor_pimpl.h"
namespace {
// Converts (via narrowing) a type T value to a type U, and checks that the
// value has no value change due to the conversion.
template <typename WideT, typename NarrowT>
NarrowT CheckedNarrowing(const WideT& wide) {
NarrowT narrow = wide;
CHECK_EQ(narrow, wide)
<< "checked narrowing failed; values not equal post-conversion";
return narrow;
}
const int kConvDebugVlogLevel = 3;
} // namespace
namespace stream_executor {
using dnn::AlgorithmDesc;
using dnn::BatchDescriptor;
using dnn::ConvolutionDescriptor;
using dnn::FilterDescriptor;
using dnn::NormalizeDescriptor;
using dnn::PoolingDescriptor;
namespace gpu {
PLUGIN_REGISTRY_DEFINE_PLUGIN_ID(kMIOpenPlugin);
string ToString(miopenStatus_t status) {
switch (status) {
case miopenStatusSuccess:
return "miopenStatusSuccess";
case miopenStatusNotInitialized:
return "miopenStatusNotInitialized";
case miopenStatusAllocFailed:
return "miopenStatusAllocFailed";
case miopenStatusBadParm:
return "miopenStatusBadParm";
case miopenStatusInternalError:
return "miopenStatusInternalError";
case miopenStatusInvalidValue:
return "miopenStatusInvalidValue";
case miopenStatusNotImplemented:
return "miopenStatusNotImplemented";
case miopenStatusUnknownError:
return "miopenStatusUnknownError";
default:
return absl::StrCat("<unknown miopen status: ", static_cast<int>(status),
">");
}
}
string ToString(miopenConvFwdAlgorithm_t algorithm) {
string s;
switch (algorithm) {
case miopenConvolutionFwdAlgoGEMM:
s = "GEMM";
break;
case miopenConvolutionFwdAlgoDirect:
s = "Direct";
break;
case miopenConvolutionFwdAlgoFFT:
s = "FFT";
break;
case miopenConvolutionFwdAlgoWinograd:
s = "Winograd";
break;
case miopenConvolutionFwdAlgoImplicitGEMM:
s = "Implicit GEMM";
break;
}
return s;
}
string ToString(miopenConvBwdWeightsAlgorithm_t algorithm) {
string s;
switch (algorithm) {
case miopenConvolutionBwdWeightsAlgoGEMM:
s = "GEMM";
break;
case miopenConvolutionBwdWeightsAlgoDirect:
s = "Direct";
break;
case miopenConvolutionBwdWeightsAlgoWinograd:
s = "Winograd";
break;
case miopenConvolutionBwdWeightsAlgoImplicitGEMM:
s = "Implicit GEMM";
break;
}
return s;
}
string ToString(miopenConvBwdDataAlgorithm_t algorithm) {
string s;
switch (algorithm) {
case miopenConvolutionBwdDataAlgoGEMM:
s = "GEMM";
break;
case miopenConvolutionBwdDataAlgoDirect:
s = "Direct";
break;
case miopenConvolutionBwdDataAlgoFFT:
s = "FFT";
break;
case miopenConvolutionBwdDataAlgoWinograd:
s = "Winograd";
break;
case miopenTransposeBwdDataAlgoGEMM:
s = "Transpose GEMM";
break;
case miopenConvolutionBwdDataAlgoImplicitGEMM:
s = "Implicit GEMM";
break;
}
return s;
}
string ToString(miopenConvAlgorithm_t algorithm) {
string s;
switch (algorithm) {
case miopenConvolutionAlgoGEMM:
s = "GEMM";
break;
case miopenConvolutionAlgoDirect:
s = "Direct";
break;
case miopenConvolutionAlgoFFT:
s = "FFT";
break;
case miopenConvolutionAlgoWinograd:
s = "Winograd";
break;
case miopenConvolutionAlgoImplicitGEMM:
s = "Implicit GEMM";
break;
}
return s;
}
// RAII wrapper for all calls to MIOpen with a MIOpen handle argument.
//
// See MIOpenAccess::GetHandle() for details.
class MIOpenHandle {
public:
// Takes ownership of the executor context and the lock to access MIOpen
// using handle.
MIOpenHandle(gpu::ScopedActivateExecutorContext context,
std::unique_ptr<absl::MutexLock> lock, miopenHandle_t handle)
: context_(std::move(context)), lock_(std::move(lock)), handle_(handle) {}
// Returns MIOpen handle. To be passed directly to MIOpen APIs, don't keep
// a copy.
miopenHandle_t handle() const { return handle_; }
private:
gpu::ScopedActivateExecutorContext context_;
std::unique_ptr<absl::MutexLock> lock_;
miopenHandle_t handle_; // Not owned.
};
namespace wrap {
#ifdef PLATFORM_GOOGLE
#define STREAM_EXECUTOR_MIOPEN_WRAP(__name) \
struct WrapperShim__##__name { \
template <typename... Args> \
miopenStatus_t operator()(Args... args) { \
miopenStatus_t retval = ::__name(args...); \
return retval; \
} \
} __name;
#else
#define STREAM_EXECUTOR_MIOPEN_WRAP(__name) \
struct DynLoadShim__##__name { \
static const char* kName; \
using FuncPtrT = std::add_pointer<decltype(::__name)>::type; \
static void* GetDsoHandle() { \
auto s = internal::CachedDsoLoader::GetMiopenDsoHandle(); \
return s.ValueOrDie(); \
} \
static FuncPtrT LoadOrDie() { \
void* f; \
auto s = port::Env::Default()->GetSymbolFromLibrary(GetDsoHandle(), \
kName, &f); \
CHECK(s.ok()) << "could not find " << kName \
<< " in miopen DSO; dlerror: " << s.error_message(); \
return reinterpret_cast<FuncPtrT>(f); \
} \
static FuncPtrT DynLoad() { \
static FuncPtrT f = LoadOrDie(); \
return f; \
} \
template <typename... Args> \
miopenStatus_t operator()(Args... args) { \
return DynLoad()(args...); \
} \
} __name; \
const char* DynLoadShim__##__name::kName = #__name;
#endif
// clang-format off
#define MIOPEN_DNN_ROUTINE_EACH(__macro) \
__macro(miopenBatchNormalizationBackward) \
__macro(miopenBatchNormalizationForwardInference) \
__macro(miopenBatchNormalizationForwardTraining) \
__macro(miopenGetConvolutionForwardOutputDim) \
__macro(miopenGetConvolutionNdForwardOutputDim) \
__macro(miopenFindConvolutionForwardAlgorithm) \
__macro(miopenCreateTensorDescriptor) \
__macro(miopenDestroyTensorDescriptor) \
__macro(miopenSetNdPoolingDescriptor) \
__macro(miopenSetPoolingIndexType) \
__macro(miopenSetLRNDescriptor) \
__macro(miopenLRNGetWorkSpaceSize) \
__macro(miopenCreateConvolutionDescriptor) \
__macro(miopenCreatePoolingDescriptor) \
__macro(miopenDestroyPoolingDescriptor) \
__macro(miopenCreateLRNDescriptor) \
__macro(miopenDestroyLRNDescriptor) \
__macro(miopenDestroyConvolutionDescriptor) \
__macro(miopenCreateWithStream) \
__macro(miopenDestroy) \
__macro(miopenSetStream) \
__macro(miopenSetAllocator) \
__macro(miopenActivationForward) \
__macro(miopenConvolutionForward) \
__macro(miopenConvolutionBackwardBias) \
__macro(miopenConvolutionForwardGetWorkSpaceSize) \
__macro(miopenInitConvolutionDescriptor) \
__macro(miopenInitConvolutionNdDescriptor) \
__macro(miopenGetConvolutionDescriptor) \
__macro(miopenGetConvolutionNdDescriptor) \
__macro(miopenSetConvolutionGroupCount) \
__macro(miopenSet4dTensorDescriptor) \
__macro(miopenGetTensorDescriptor) \
__macro(miopenSetTensorDescriptor) \
__macro(miopenGetTensorDescriptorSize) \
__macro(miopenPoolingForward) \
__macro(miopenPoolingGetWorkSpaceSizeV2) \
__macro(miopenPoolingBackward) \
__macro(miopenLRNForward) \
__macro(miopenLRNBackward) \
__macro(miopenOpTensor) \
__macro(miopenConvolutionBackwardData) \
__macro(miopenConvolutionBackwardWeights) \
__macro(miopenConvolutionBackwardWeightsGetWorkSpaceSize) \
__macro(miopenFindConvolutionBackwardDataAlgorithm) \
__macro(miopenFindConvolutionBackwardWeightsAlgorithm) \
__macro(miopenConvolutionBackwardDataGetWorkSpaceSize) \
__macro(miopenCreateRNNDescriptor) \
__macro(miopenSetRNNDescriptor) \
__macro(miopenDestroyRNNDescriptor) \
__macro(miopenGetRNNParamsSize) \
__macro(miopenGetRNNLayerParam) \
__macro(miopenGetRNNLayerBias) \
__macro(miopenGetRNNWorkspaceSize) \
__macro(miopenGetRNNTrainingReserveSize) \
__macro(miopenRNNForwardInference) \
__macro(miopenRNNForwardTraining) \
__macro(miopenRNNBackwardData) \
__macro(miopenRNNBackwardWeights) \
__macro(miopenGetRNNLayerParamOffset) \
__macro(miopenGetRNNLayerParamSize) \
__macro(miopenGetRNNLayerBiasOffset) \
__macro(miopenGetRNNLayerBiasSize) \
__macro(miopenGetRNNParamsDescriptor) \
__macro(miopenCreateActivationDescriptor) \
__macro(miopenSetActivationDescriptor) \
__macro(miopenGetActivationDescriptor) \
__macro(miopenDestroyActivationDescriptor) \
__macro(miopenCreateFusionPlan) \
__macro(miopenCreateOpConvForward) \
__macro(miopenCreateOpBiasForward) \
__macro(miopenCreateOpActivationForward) \
__macro(miopenCreateOpActivationBackward) \
__macro(miopenCreateOpBatchNormInference) \
__macro(miopenCreateOpBatchNormForward) \
__macro(miopenCreateOpBatchNormBackward) \
__macro(miopenCompileFusionPlan) \
__macro(miopenFusionPlanGetOp) \
__macro(miopenCreateOperatorArgs) \
__macro(miopenSetOpArgsConvForward) \
__macro(miopenSetOpArgsBiasForward) \
__macro(miopenSetOpArgsActivForward) \
__macro(miopenSetOpArgsActivBackward) \
__macro(miopenSetOpArgsBatchNormInference) \
__macro(miopenSetOpArgsBatchNormForward) \
__macro(miopenSetOpArgsBatchNormBackward) \
__macro(miopenExecuteFusionPlan) \
__macro(miopenDestroyOperatorArgs) \
__macro(miopenDestroyFusionPlan) \
__macro(miopenConvolutionForwardGetSolutionCount) \
__macro(miopenConvolutionForwardGetSolution) \
__macro(miopenConvolutionForwardGetSolutionWorkspaceSize) \
__macro(miopenConvolutionForwardCompileSolution) \
__macro(miopenConvolutionForwardImmediate) \
__macro(miopenConvolutionBackwardDataGetSolutionCount) \
__macro(miopenConvolutionBackwardDataGetSolution) \
__macro(miopenConvolutionBackwardDataGetSolutionWorkspaceSize) \
__macro(miopenConvolutionBackwardDataCompileSolution) \
__macro(miopenConvolutionBackwardDataImmediate) \
__macro(miopenConvolutionBackwardWeightsGetSolutionCount) \
__macro(miopenConvolutionBackwardWeightsGetSolution) \
__macro(miopenConvolutionBackwardWeightsGetSolutionWorkspaceSize) \
__macro(miopenConvolutionBackwardWeightsCompileSolution) \
__macro(miopenConvolutionBackwardWeightsImmediate) \
__macro(miopenCreateCTCLossDescriptor) \
__macro(miopenSetCTCLossDescriptor) \
__macro(miopenGetCTCLossWorkspaceSize) \
__macro(miopenCTCLoss) \
__macro(miopenDestroyCTCLossDescriptor)
// clang-format on
MIOPEN_DNN_ROUTINE_EACH(STREAM_EXECUTOR_MIOPEN_WRAP)
#undef MIOPEN_DNN_ROUTINE_EACH
} // namespace wrap
namespace {
// These routines should ideally be provided as an MIOpen API.
// They are called for *every* _ROCMmFusedOp*::Compute call, and they need to be
// efficient! Instead of calculating the hash value by quering the MIOpen Get*
// APIs for the descriptor components, it would be a lot more efficient if,
// MIOpen calculated the hash value when creating the descriptor, stored it on
// the descriptor datastructure, and provided an API routine to query it.
const int kMaxMIOpenTensorSize = 5;
uint64_t GetHashValue(miopenTensorDescriptor_t tensor_desc) {
miopenDataType_t datatype = miopenFloat;
int dims[kMaxMIOpenTensorSize] = {0};
int strides[kMaxMIOpenTensorSize] = {0};
wrap::miopenGetTensorDescriptor(tensor_desc, &datatype, dims, strides);
uint64_t hash_value = tensorflow::hash<int>()(datatype);
for (int dim : dims)
hash_value =
tensorflow::Hash64Combine(hash_value, tensorflow::hash<int>()(dim));
for (int stride : strides)
hash_value =
tensorflow::Hash64Combine(hash_value, tensorflow::hash<int>()(stride));
return hash_value;
}
uint64_t GetHashValue(miopenConvolutionDescriptor_t conv_desc) {
miopenConvolutionMode_t c_mode = miopenConvolution;
int nd = 0;
wrap::miopenGetConvolutionNdDescriptor(conv_desc, 0, &nd, nullptr, nullptr,
nullptr, &c_mode);
std::vector<int> stride(nd);
std::vector<int> pad(nd);
std::vector<int> dilation(nd);
wrap::miopenGetConvolutionNdDescriptor(
conv_desc, nd, &nd, pad.data(), stride.data(), dilation.data(), &c_mode);
uint64_t hash_value = tensorflow::hash<int>()(c_mode);
auto hash64Combine = [&hash_value](int element) {
tensorflow::Hash64Combine(hash_value, tensorflow::hash<int>()(element));
};
std::for_each(pad.begin(), pad.end(), hash64Combine);
std::for_each(stride.begin(), stride.end(), hash64Combine);
std::for_each(dilation.begin(), dilation.end(), hash64Combine);
return hash_value;
}
// Class to implement a cache of compiled fusion plans
class CachedFusionPlans {
public:
// Check if we already have a fusion_plan corresponding to the given hash
// value.
// If we do, then
// return true (+ the cached fusion plan via given pointer)
// Else
// create a new fusion plan descriptor,
// associate it with the given hash value in the cache
// return false (+ newly created fusion plan via given pointer)
static bool FindOrCreate(uint64_t hash,
miopenFusionPlanDescriptor_t* fusion_plan,
miopenFusionDirection_t fusion_direction,
miopenTensorDescriptor_t input_descriptor) {
absl::MutexLock lock{&cached_plans_mutex};
bool found_cached_plan = false;
auto it = cached_plans.find(hash);
if (it != cached_plans.end()) {
*fusion_plan = it->second;
found_cached_plan = true;
} else {
auto status = wrap::miopenCreateFusionPlan(fusion_plan, fusion_direction,
input_descriptor);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenCreateFusionPlan failed: "
<< ToString(status);
} else {
cached_plans[hash] = *fusion_plan;
}
}
return found_cached_plan;
}
// Need to figure out the right place to call this routine
static void Clear() {
absl::MutexLock lock{&cached_plans_mutex};
for (auto it : cached_plans) {
auto status = wrap::miopenDestroyFusionPlan(it.second);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenDestroyFusionPlan failed: "
<< ToString(status);
}
}
cached_plans.clear();
unsupported_plans.clear();
}
// Is the Fusion plan corresponding to this hash unsupported
static bool IsUnsupportedFusionPlan(uint64_t hash) {
absl::MutexLock lock{&cached_plans_mutex};
return unsupported_plans.count(hash) > 0;
}
// Mark the given hash value as corresponding to an unsupported fusion plan
static void MarkFusionPlanUnsupported(uint64_t hash) {
absl::MutexLock lock{&cached_plans_mutex};
unsupported_plans.insert(hash);
}
private:
// Mutex to guard access to all data within this class
static absl::Mutex cached_plans_mutex;
// Map of hash-value to MIOpen Fusion plan descriptors
// Need to be able share this across more than one stream and hence static
static std::map<uint64_t, miopenFusionPlanDescriptor_t> cached_plans;
// Set of hash-values that correspond to MIOpen Fusion plans that will fail
// compile and hence are not supported.
static std::set<uint64_t> unsupported_plans;
};
absl::Mutex CachedFusionPlans::cached_plans_mutex;
std::map<uint64_t, miopenFusionPlanDescriptor_t>
CachedFusionPlans::cached_plans;
std::set<uint64_t> CachedFusionPlans::unsupported_plans;
dnn::ProfileResult GetProfileResultFromConvSolution(
miopenConvSolution_t solution) {
dnn::ProfileResult profile_result;
profile_result.set_algorithm(
{solution.solution_id, false, solution.workspace_size});
profile_result.set_elapsed_time_in_ms(solution.time);
profile_result.set_scratch_size(solution.workspace_size);
return profile_result;
}
dnn::ProfileResult GetProfileResultFromConvAlgoPerf(
dnn::ConvolutionKind kind, miopenConvAlgoPerf_t algorithm) {
int64_t algo_id;
switch (kind) {
case dnn::ConvolutionKind::FORWARD:
algo_id = algorithm.fwd_algo;
break;
case dnn::ConvolutionKind::BACKWARD_DATA:
algo_id = algorithm.bwd_data_algo;
break;
case dnn::ConvolutionKind::BACKWARD_FILTER:
algo_id = algorithm.bwd_weights_algo;
break;
default:
LOG(FATAL) << "Unexpected convolution kind " << static_cast<int>(kind);
break;
}
dnn::ProfileResult profile_result;
profile_result.set_algorithm({algo_id, false, algorithm.memory});
profile_result.set_elapsed_time_in_ms(algorithm.time);
profile_result.set_scratch_size(algorithm.memory);
return profile_result;
}
} // namespace
// Wraps a MIOpen handle and provides access to it through miopenHandle_t
// instances, which also locks a mutex, acquires the ROCm context, and sets
// the stream that MIOpen should use to enqueue any work.
//
// Note: MIOpenSupport::miopen_ should be the only instantiation of this class.
class MIOpenAccess {
public:
// Takes ownership of the handle.
explicit MIOpenAccess(miopenHandle_t handle) : handle_(handle) {}
~MIOpenAccess() {
absl::MutexLock lock(&mutex_);
wrap::miopenDestroy(handle_);
}
// Creates a MIOpenHandle instance for stream.
//
// MIOpen API calls using the same handle instance need to be serialized
// across threads. This is guaranteed by MIOpenHandle instances locking the
// mutex owned by this class.
//
// Most MIOpen APIs taking a handle perform work on a HIP stream. The
// MIOpenHandle instance acquires the executor's ROCm context and sets MIOpen
// to use the provided stream.
//
// The stream argument may be null, which translates to the null stream.
// The null stream synchronizes with all other streams and it is
// therefore a bad idea (performance wise) to call any MIOpen APIs that
// enqueue work in the stream.
MIOpenHandle GetHandle(GpuExecutor* executor, Stream* stream) {
auto lock = absl::make_unique<absl::MutexLock>(&mutex_);
mutex_.AssertHeld();
gpu::ScopedActivateExecutorContext context(executor);
hipStream_t hip_stream = stream ? AsGpuStreamValue(stream) : nullptr;
auto status = wrap::miopenSetStream(handle_, hip_stream);
CHECK_EQ(status, miopenStatusSuccess) << "Failed to set MIOpen stream.";
return MIOpenHandle(std::move(context), std::move(lock), handle_);
}
private:
// Guards the enqueueing of MIOpen operations via the handle_ below.
absl::Mutex mutex_;
// MIOpen library handle.
miopenHandle_t handle_ TF_GUARDED_BY(mutex_); // Owned.
};
MIOpenSupport::MIOpenSupport(GpuExecutor* parent) : parent_(parent) {
// by default, the Get*Algorithm API will return the list of all applicable
// algorithms
return_best_algo_only_ = false;
// but if the env var TF_ROCM_RETURN_BEST_ALGO_ONLY is set, only the best
// (i.e. most efficient) algorithm will be returned
tensorflow::ReadBoolFromEnvVar("TF_ROCM_RETURN_BEST_ALGO_ONLY", false,
&return_best_algo_only_);
// by default, use Find Mode APIs for convolution
use_immediate_mode_ = false;
// swich to Find Mode if env var TF_ROCM_USE_IMMEDIATE_MODE is set
tensorflow::ReadBoolFromEnvVar("TF_ROCM_USE_IMMEDIATE_MODE", false,
&use_immediate_mode_);
bool enable_pooling_cache = false;
tensorflow::ReadBoolFromEnvVar("TF_ROCM_BW_POOL_CACHE", false,
&enable_pooling_cache);
if (enable_pooling_cache) m_pooling_cache_allowed = true;
}
port::Status MIOpenSupport::Init() {
ScopedActivateExecutorContext context(parent_);
miopenHandle_t miopen_handle = nullptr;
auto status = wrap::miopenCreateWithStream(
reinterpret_cast<miopenHandle_t*>(&miopen_handle), (hipStream_t)(0));
if (status == miopenStatusSuccess) {
miopen_.reset(new MIOpenAccess(miopen_handle));
return port::Status::OK();
}
CHECK_EQ(miopen_handle, nullptr);
LOG(ERROR) << "could not create miopen handle: " << ToString(status);
if (status == miopenStatusNotInitialized) {
auto result = rocm::Diagnostician::FindKernelDriverVersion();
if (!result.ok()) {
LOG(ERROR) << "error retrieving driver version: "
<< rocm::DriverVersionStatusToString(result);
} else {
const auto& version = result.ValueOrDie();
LOG(INFO) << "possibly insufficient driver version: "
<< rocm::DriverVersionToString(version);
}
}
return port::Status{port::error::INTERNAL,
absl::StrCat("miopen library could not create a handle: ",
ToString(status))};
}
port::StatusOr<perftools::gputools::dnn::VersionInfo>
MIOpenSupport::GetVersion() {
// ROCM TODO: retrieve MIOpen version with its API
return perftools::gputools::dnn::VersionInfo(1, 3, 0);
}
// Turns a BatchDescriptor structure into a miopen tensor handle within a scope.
class ScopedTensorDescriptor {
public:
ScopedTensorDescriptor(const BatchDescriptor& batch_descriptor,
miopenDataType_t elem_type)
: handle_(nullptr) {
auto status = wrap::miopenCreateTensorDescriptor(&handle_);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "could not create miopen tensor descriptor: "
<< ToString(status);
}
switch (batch_descriptor.layout()) {
case dnn::DataLayout::kBatchYXDepth:
case dnn::DataLayout::kBatchDepthYX: {
const int nd = batch_descriptor.ndims() + 2;
// MIOpen requires the strides and dims to be ordered as BDYX.
std::vector<int64_t> strides64 =
batch_descriptor.full_strides(dnn::DataLayout::kBatchDepthYX);
std::vector<int64_t> dims64 =
batch_descriptor.full_dims(dnn::DataLayout::kBatchDepthYX);
// MIOpen requires arrays of ints.
std::vector<int> strides(nd);
std::vector<int> dims(nd);
std::transform(strides64.cbegin(), strides64.cend(), strides.begin(),
&CheckedNarrowing<int64_t, int>);
std::transform(dims64.cbegin(), dims64.cend(), dims.begin(),
&CheckedNarrowing<int64_t, int>);
status = wrap::miopenSetTensorDescriptor(handle_, elem_type, nd,
dims.data(), strides.data());
if (status != miopenStatusSuccess) {
LOG(FATAL) << "could not convert BatchDescriptor "
<< batch_descriptor.ToString()
<< " to miopen tensor descriptor: " << ToString(status);
}
} break;
default:
LOG(FATAL) << "Unsupported tensor format "
<< DataLayoutString(batch_descriptor.layout());
break;
}
}
~ScopedTensorDescriptor() {
auto status = wrap::miopenDestroyTensorDescriptor(handle_);
if (status != miopenStatusSuccess) {
LOG(ERROR) << "could not destroy miopen tensor descriptor: "
<< ToString(status);
}
}
miopenTensorDescriptor_t handle() const { return handle_; }
private:
miopenTensorDescriptor_t handle_; // Owned.
SE_DISALLOW_COPY_AND_ASSIGN(ScopedTensorDescriptor);
};
// Turns a FilterDescriptor structure into a miopen filter handle within a
// scope.
class ScopedFilterDescriptor {
public:
ScopedFilterDescriptor(const FilterDescriptor& filter_descriptor,
miopenDataType_t elem_type)
: handle_(nullptr) {
auto status = wrap::miopenCreateTensorDescriptor(&handle_);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "could not create miopen filter descriptor: "
<< ToString(status);
}
// We need to pass two vectors to the miopenSetTensorDescriptor routine
// "dims" (length == number of dims, elem value == dimension size)
// "strides" (length == number of dims, elem value == stride size)
//
// Irrespective of the actual filter layout, the indexing of both those
// vectors must be the following (coz that is what MIOpen expects)
// dims[0] = strides[0] = N or output
// dims[1] = strides[1] = C or input
// dims[2] = strides[2] = H or spatial dim 0
// dims[3] = strides[3] = W or spatial dim 1
//
// assume you have a tensor with dimensions
// batch descriptor name filter descriptor name value
// N (batch size) O (output features) 256
// C (channels) I (input features) 3
// H (height) H (height) 7
// W (width) W (width) 5
//
// The content of "dims" will be the same irrespective of layout
// layout (NCHW or NHWC), and MIOpen expects it should be
// NCHW layout NHWC layout
// dims[0] = size of N dim = 256 256
// dims[1] = size of C dim = 3 3
// dims[2] = size of H dim = 7 7
// dims[3] = size of W dim = 5 5
//
// The content of "strides" will be different based on layout
// NCHW layout NHWC layout
// strides[0] = stride of N dim = 7x5x3 7x5x3
// strides[1] = stride of C dim = 7x5 1
// strides[2] = stride of H dim = 5 5x3
// strides[3] = stride of W dim = 1 3
switch (filter_descriptor.layout()) {
case dnn::FilterLayout::kOutputYXInput:
case dnn::FilterLayout::kOutputInputYX: {
const int nd = filter_descriptor.ndims() + 2;
// MIOpen requires the strides and dims to be ordered as BDYX.
std::vector<int64_t> strides64 =
filter_descriptor.full_strides(dnn::FilterLayout::kOutputInputYX);
std::vector<int64_t> dims64 =
filter_descriptor.full_dims(dnn::FilterLayout::kOutputInputYX);
// MIOpen requires arrays of ints.
std::vector<int> strides;
std::vector<int> dims;
absl::c_transform(strides64, std::back_inserter(strides),
&CheckedNarrowing<int64_t, int>);
absl::c_transform(dims64, std::back_inserter(dims),
&CheckedNarrowing<int64_t, int>);
status = wrap::miopenSetTensorDescriptor(handle_, elem_type, nd,
dims.data(), strides.data());
if (status != miopenStatusSuccess) {
LOG(FATAL) << "could not convert FilterDescriptor "
<< filter_descriptor.ToString()
<< " to miopen tensor descriptor: " << ToString(status);
}
} break;
default:
LOG(FATAL) << "Unsupported tensor format "
<< FilterLayoutString(filter_descriptor.layout());
break;
}
}
~ScopedFilterDescriptor() {
auto status = wrap::miopenDestroyTensorDescriptor(handle_);
if (status != miopenStatusSuccess) {
LOG(ERROR) << "could not destroy miopen filter descriptor: "
<< ToString(status);
}
}
miopenTensorDescriptor_t handle() const { return handle_; }
private:
// miopen filter descriptor this object creates. Owned.
miopenTensorDescriptor_t handle_;
SE_DISALLOW_COPY_AND_ASSIGN(ScopedFilterDescriptor);
};
// Turns a ConvolutionDescriptor structure into a miopen convolution handle
// within a scope.
class ScopedConvolutionDescriptor {
public:
ScopedConvolutionDescriptor(
const ConvolutionDescriptor& convolution_descriptor,
miopenDataType_t data_type)
: handle_(nullptr) {
auto status = wrap::miopenCreateConvolutionDescriptor(&handle_);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "could not create miopen convolution descriptor: "
<< ToString(status);
}
const auto& strides64 = convolution_descriptor.strides();
const auto& padding64 = convolution_descriptor.padding();
if (convolution_descriptor.pad_alignment() ==
dnn::PadAlignment::kTensorFlowPadding) {
LOG(ERROR) << "TensorFlow padding alignment is not supported.";
}
// MIOpen requires arrays of ints.
std::vector<int> strides(convolution_descriptor.ndims());
std::vector<int> padding(convolution_descriptor.ndims());
std::transform(strides64.cbegin(), strides64.cend(), strides.begin(),
&CheckedNarrowing<int64_t, int>);
std::transform(padding64.cbegin(), padding64.cend(), padding.begin(),
&CheckedNarrowing<int64_t, int>);
std::vector<int> upscale(convolution_descriptor.ndims());
const auto& dilations64 = convolution_descriptor.dilations();
std::transform(dilations64.cbegin(), dilations64.cend(), upscale.begin(),
&CheckedNarrowing<int64_t, int>);
status = wrap::miopenInitConvolutionNdDescriptor(
handle_, convolution_descriptor.ndims(), padding.data(), strides.data(),
upscale.data(), miopenConvolution);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "could not set miopen convolution descriptor: "
<< ToString(status);
}
VLOG(2) << "Requesting grouped convolution: "
<< convolution_descriptor.group_count();
status = wrap::miopenSetConvolutionGroupCount(
handle_, convolution_descriptor.group_count());
if (status != miopenStatusSuccess) {
LOG(FATAL) << "could not set miopen convolution group count: "
<< ToString(status);
}
}
~ScopedConvolutionDescriptor() {
auto status = wrap::miopenDestroyConvolutionDescriptor(handle_);
if (status != miopenStatusSuccess) {
LOG(ERROR) << "could not destroy miopen convolution descriptor: "
<< ToString(status);
}
}
miopenConvolutionDescriptor_t handle() const { return handle_; }
private:
miopenConvolutionDescriptor_t handle_; // Owned.
SE_DISALLOW_COPY_AND_ASSIGN(ScopedConvolutionDescriptor);
};
// Turns a PoolingDescriptor structure into a miopen pooling descriptor handle
// within a scope.
class ScopedPoolingDescriptor {
public:
ScopedPoolingDescriptor(const PoolingDescriptor& pooling_descriptor)
: handle_(nullptr) {
auto status = wrap::miopenCreatePoolingDescriptor(&handle_);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "could not create miopen pooling descriptor: "
<< ToString(status);
}
absl::Span<const int64_t> strides64 = pooling_descriptor.strides();
absl::Span<const int64_t> padding64 = pooling_descriptor.padding();
absl::Span<const int64_t> shape64 = pooling_descriptor.window();
const int nd = pooling_descriptor.ndims();
std::vector<int> shape(nd);
std::vector<int> padding(nd);
std::vector<int> strides(nd);
std::transform(strides64.cbegin(), strides64.cend(), strides.begin(),
&CheckedNarrowing<int64_t, int>);
std::transform(padding64.cbegin(), padding64.cend(), padding.begin(),
&CheckedNarrowing<int64_t, int>);
std::transform(shape64.cbegin(), shape64.cend(), shape.begin(),
&CheckedNarrowing<int64_t, int>);
status = wrap::miopenSetNdPoolingDescriptor(
handle_,
(pooling_descriptor.mode() == dnn::PoolingMode::kMaximum
? miopenPoolingMax
: miopenPoolingAverage),
nd, shape.data(), padding.data(), strides.data());
// Note: The index type has to be uint32 type for now because MIOpen
// API assumes all input indexes to be the same type. Since a tensor
// descriptor can only use int32 type, the index type here need to be
// aligned with the tensor index type of the (input) tensor descritptor
status = wrap::miopenSetPoolingIndexType(handle_, miopenIndexUint32);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "could not set miopen pooling descriptor: "
<< ToString(status);
}
}
~ScopedPoolingDescriptor() {
auto status = wrap::miopenDestroyPoolingDescriptor(handle_);
if (status != miopenStatusSuccess) {
LOG(ERROR) << "could not destroy miopen pooling descriptor: "
<< ToString(status);
}
}
miopenPoolingDescriptor_t handle() const { return handle_; }
private:
miopenPoolingDescriptor_t handle_; // Owned.
SE_DISALLOW_COPY_AND_ASSIGN(ScopedPoolingDescriptor);
};
// Turns a NormalizeDescriptor structure into a miopen LRN descriptor handle.
class ScopedNormalizeDescriptor {
public:
ScopedNormalizeDescriptor(const NormalizeDescriptor& normalize_descriptor)
: handle_(nullptr) {
auto status = wrap::miopenCreateLRNDescriptor(&handle_);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "could not create miopen LRN descriptor: "
<< ToString(status);
}
// The range specifies that the indices in the closed range
// [i - range, i + range] should be included in the normalization for index
// i. The lrnN value is the total number of elements in the range, so
// lrnN = 2*range + 1.
unsigned lrn_N = 2 * normalize_descriptor.range() + 1;
// Note that SE defines the normalization operation as
//
// U_i = V_i / ((bias + alpha * (sum_j V_j^2)) ^ beta)
//
// but MIOpen defines it as
//
// U_i = V_i / ((bias + (alpha / n) * (sum_j V_j^2)) ^ beta)
//
// i.e. there is a factor of n difference between the meaning of the alphas
// in the two contexts. The MIOpen alpha is n times the SE alpha.
double lrn_alpha = lrn_N * normalize_descriptor.alpha();
double lrn_beta = normalize_descriptor.beta();
double lrn_k = normalize_descriptor.bias();
status = wrap::miopenSetLRNDescriptor(handle_, miopenLRNCrossChannel, lrn_N,
lrn_alpha, lrn_beta, lrn_k);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "could not set miopen LRN descriptor: " << ToString(status);
}
}
~ScopedNormalizeDescriptor() {
auto status = wrap::miopenDestroyLRNDescriptor(handle_);
if (status != miopenStatusSuccess) {
LOG(ERROR) << "could not destroy miopen LRN descriptor: "
<< ToString(status);
}
}
miopenLRNDescriptor_t handle() const { return handle_; }
private:
miopenLRNDescriptor_t handle_; // Owned.
SE_DISALLOW_COPY_AND_ASSIGN(ScopedNormalizeDescriptor);
};
// Turns a activation mode into a miopen activation mode descriptor with a scope
// around it
class ScopedActivationDescriptor {
public:
ScopedActivationDescriptor(dnn::ActivationMode activation_mode)
: handle_(nullptr),
miopen_activation_mode_(miopenActivationPASTHRU),
alpha_(0.0),
beta_(0.0),
gamma_(0.0) {
auto status = wrap::miopenCreateActivationDescriptor(&handle_);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenCreateActivationDescriptor failed: "
<< ToString(status);
} else {
switch (activation_mode) {
case dnn::ActivationMode::kNone:
miopen_activation_mode_ = miopenActivationPASTHRU;
break;
case dnn::ActivationMode::kSigmoid:
miopen_activation_mode_ = miopenActivationLOGISTIC;
break;
case dnn::ActivationMode::kRelu:
miopen_activation_mode_ = miopenActivationRELU;
break;
case dnn::ActivationMode::kRelu6:
miopen_activation_mode_ = miopenActivationRELU;
alpha_ = 6.0;
break;
case dnn::ActivationMode::kTanh:
miopen_activation_mode_ = miopenActivationTANH;
break;
default:
LOG(FATAL) << "Activation mode ("
<< dnn::ActivationModeString(activation_mode)
<< ") not yet implemented";
break;
}
status = wrap::miopenSetActivationDescriptor(
handle_, miopen_activation_mode_, alpha_, beta_, gamma_);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenSetActivationDescriptor failed: "
<< ToString(status);
}
}
}
~ScopedActivationDescriptor() {
auto status = wrap::miopenDestroyActivationDescriptor(handle_);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenDestroyActivationDescriptor failed: "
<< ToString(status);
}
}
miopenActivationDescriptor_t handle() const { return handle_; }
uint64_t GetHashValue() {
uint64_t hash_value = tensorflow::hash<int>()(miopen_activation_mode_);
hash_value = tensorflow::Hash64Combine(hash_value,
tensorflow::hash<double>()(alpha_));
hash_value = tensorflow::Hash64Combine(hash_value,
tensorflow::hash<double>()(beta_));
hash_value = tensorflow::Hash64Combine(hash_value,
tensorflow::hash<double>()(gamma_));
return hash_value;
}
private:
miopenActivationDescriptor_t handle_; // Owned.
SE_DISALLOW_COPY_AND_ASSIGN(ScopedActivationDescriptor);
public:
// caching these values here to avoid calling miopenGetActivationDescriptor
// to do the same. miopenGetActivationDescriptor gets called twice during each
// call to execute a fusion plan (that involves the activation op)...once call
// during calculating hashvalue for the fusion op, and another before calling
// SetOpArgs for the activation op
miopenActivationMode_t miopen_activation_mode_;
double alpha_;
double beta_;
double gamma_;
};
// base class for all fusion plan implementations to derive from
class ScopedFusionPlanBase {
public:
ScopedFusionPlanBase(miopenHandle_t miopen_handle,
const miopenFusionDirection_t fuse_direction,
const miopenTensorDescriptor_t input_descriptor)
: miopen_handle_(miopen_handle),
fusion_plan_(nullptr),
fusion_args_(nullptr),
fusion_plan_compiled_(false) {
auto status = wrap::miopenCreateOperatorArgs(&fusion_args_);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenCreateOperatorArgs failed: "
<< ToString(status);
}
}
virtual ~ScopedFusionPlanBase() {
auto status = wrap::miopenDestroyOperatorArgs(fusion_args_);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenDestroyoperatorArgs failed: "
<< ToString(status);
}
}
miopenStatus_t Execute(miopenTensorDescriptor_t input_descriptor,
const void* input_data,
miopenTensorDescriptor_t output_descriptor,
void* output_data) {
auto status = wrap::miopenExecuteFusionPlan(
miopen_handle_, fusion_plan_, input_descriptor, input_data,
output_descriptor, output_data, fusion_args_);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenExecuteFusionPlan failed: "
<< ToString(status);
}
return status;
}
bool CompilationSucceeded() { return fusion_plan_compiled_; }
protected:
miopenStatus_t SetConvolutionArgs(const int op_idx, const float* alpha,
const float* beta, const void* data) {
miopenFusionOpDescriptor_t conv_op;
auto status = wrap::miopenFusionPlanGetOp(fusion_plan_, op_idx, &conv_op);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenFusionPlanGetOp failed: "
<< ToString(status);
}
status = wrap::miopenSetOpArgsConvForward(fusion_args_, conv_op, alpha,
beta, data);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenSetOpArgsConvForward failed: "
<< ToString(status);
}
return status;
}
miopenStatus_t SetBiasArgs(const int op_idx, const float* alpha,
const float* beta, const void* data) {
miopenFusionOpDescriptor_t bias_op;
auto status = wrap::miopenFusionPlanGetOp(fusion_plan_, op_idx, &bias_op);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenFusionPlanGetOp failed: "
<< ToString(status);
}
status = wrap::miopenSetOpArgsBiasForward(fusion_args_, bias_op, alpha,
beta, data);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenSetOpArgsBiasForward failed: "
<< ToString(status);
}
return status;
}
miopenStatus_t SetBatchNormInferenceArgs(const int op_idx, const float* alpha,
const float* beta, const void* scale,
const void* offset, const void* mean,
const void* variance,
double epsilon) {
miopenFusionOpDescriptor_t batchnorm_op;
auto status =
wrap::miopenFusionPlanGetOp(fusion_plan_, op_idx, &batchnorm_op);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenFusionPlanGetOp failed: "
<< ToString(status);
}
status = wrap::miopenSetOpArgsBatchNormInference(fusion_args_, batchnorm_op,
alpha, beta, scale, offset,
mean, variance, epsilon);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenSetOpArgsBatchNormInference failed: "
<< ToString(status);
}
return status;
}
miopenStatus_t SetBatchNormForwardArgs(
const int op_idx, const float* alpha, const float* beta,
const void* scale, const void* offset, void* running_mean,
void* running_variance, void* saved_mean, void* saved_inv_variance,
double epsilon, double exponential_average_factor) {
miopenFusionOpDescriptor_t batchnorm_op;
auto status =
wrap::miopenFusionPlanGetOp(fusion_plan_, op_idx, &batchnorm_op);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenFusionPlanGetOp failed: "
<< ToString(status);
}
status = wrap::miopenSetOpArgsBatchNormForward(
fusion_args_, batchnorm_op, alpha, beta, scale, offset, saved_mean,
saved_inv_variance, running_mean, running_variance, epsilon,
exponential_average_factor);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenSetOpArgsBatchNormForward failed: "
<< ToString(status);
}
return status;
}
miopenStatus_t SetBatchNormBackwardArgs(const int op_idx, const float* alpha,
const float* beta, const void* x,
const void* scale, const void* offset,
void* scale_grad, void* offset_grad,
const void* saved_mean,
const void* saved_inv_variance) {
miopenFusionOpDescriptor_t batchnorm_op;
auto status =
wrap::miopenFusionPlanGetOp(fusion_plan_, op_idx, &batchnorm_op);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenFusionPlanGetOp failed: "
<< ToString(status);
}
status = wrap::miopenSetOpArgsBatchNormBackward(
fusion_args_, batchnorm_op, alpha, beta, x, scale, offset, scale_grad,
offset_grad, saved_mean, saved_inv_variance);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenSetOpArgsBatchNormBackward failed: "
<< ToString(status);
}
return status;
}
miopenStatus_t SetActivationForwardArgs(const int op_idx, const float* alpha,
const float* beta, double activ_alpha,
double activ_beta,
double activ_gamma) {
miopenFusionOpDescriptor_t actv_op;
auto status = wrap::miopenFusionPlanGetOp(fusion_plan_, op_idx, &actv_op);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenFusionPlanGetOp failed: "
<< ToString(status);
}
status =
wrap::miopenSetOpArgsActivForward(fusion_args_, actv_op, alpha, beta,
activ_alpha, activ_beta, activ_gamma);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenSetOpArgsActivForward failed: "
<< ToString(status);
}
return status;
}
miopenStatus_t SetActivationBackwardArgs(const int op_idx, const float* alpha,
const float* beta, const void* y,
double activ_alpha,
double activ_beta,
double activ_gamma) {
miopenFusionOpDescriptor_t actv_op;
auto status = wrap::miopenFusionPlanGetOp(fusion_plan_, op_idx, &actv_op);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenFusionPlanGetOp failed: "
<< ToString(status);
}
status = wrap::miopenSetOpArgsActivBackward(fusion_args_, actv_op, alpha,
beta, y, nullptr, activ_alpha,
activ_beta, activ_gamma);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenSetOpArgsActivBackward failed: "
<< ToString(status);
}
return status;
}
miopenHandle_t miopen_handle_;
miopenFusionPlanDescriptor_t fusion_plan_;
miopenOperatorArgs_t fusion_args_; // Owned.
bool fusion_plan_compiled_;
SE_DISALLOW_COPY_AND_ASSIGN(ScopedFusionPlanBase);
};
// class to represent the Convolution+Bias+Activation fusion plan
class ScopedFusionPlanConvolutionBiasActivation : public ScopedFusionPlanBase {
public:
ScopedFusionPlanConvolutionBiasActivation(
miopenHandle_t miopen_handle, miopenTensorDescriptor_t input_descriptor,
miopenTensorDescriptor_t filter_descriptor,
miopenConvolutionDescriptor_t conv_descriptor,
miopenTensorDescriptor_t bias_descriptor,
ScopedActivationDescriptor& activation_descriptor)
: ScopedFusionPlanBase(miopen_handle, miopenVerticalFusion,
input_descriptor) {
uint64_t hash = GetFusionOpHashValue(
miopen_handle, input_descriptor, filter_descriptor, conv_descriptor,
bias_descriptor, activation_descriptor);
bool is_compiled = CachedFusionPlans::FindOrCreate(
hash, &fusion_plan_, miopenVerticalFusion, input_descriptor);
if (!is_compiled) {
miopenFusionOpDescriptor_t conv_op;
auto status = wrap::miopenCreateOpConvForward(
fusion_plan_, &conv_op, conv_descriptor, filter_descriptor);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenCreateOpConvForward failed: "
<< ToString(status);
}
miopenFusionOpDescriptor_t bias_op;
status = wrap::miopenCreateOpBiasForward(fusion_plan_, &bias_op,
bias_descriptor);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenCreateOpBiasForward failed: "
<< ToString(status);
}
miopenFusionOpDescriptor_t actv_op;
status = wrap::miopenCreateOpActivationForward(
fusion_plan_, &actv_op,
activation_descriptor.miopen_activation_mode_);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenCreateOpActivationForward failed: "
<< ToString(status);
}
status = wrap::miopenCompileFusionPlan(miopen_handle_, fusion_plan_);
if (status != miopenStatusSuccess) {
VLOG(2) << "call to miopenCompileFusionPlan (CBA) failed: "
<< ToString(status);
CachedFusionPlans::MarkFusionPlanUnsupported(hash);
} else {
VLOG(2) << "Fusion Plan compile succedded (CBA) ";
fusion_plan_compiled_ = true;
}
} else {
// fusion plan was already compiled...check whether it failed to compile
fusion_plan_compiled_ = !CachedFusionPlans::IsUnsupportedFusionPlan(hash);
}
}
miopenStatus_t SetConvolutionArgs(const void* filter_data) {
float alpha = 1.0;
float beta = 0.0;
return ScopedFusionPlanBase::SetConvolutionArgs(k_conv_op_idx, &alpha,
&beta, filter_data);
}
miopenStatus_t SetBiasArgs(const void* bias_data) {
float alpha = 1.0;
float beta = 0.0;
return ScopedFusionPlanBase::SetBiasArgs(k_bias_op_idx, &alpha, &beta,
bias_data);
}
miopenStatus_t SetActivationForwardArgs(
ScopedActivationDescriptor& activation_descriptor) {
float alpha = 1.0;
float beta = 0.0;
return ScopedFusionPlanBase::SetActivationForwardArgs(
k_actv_op_idx, &alpha, &beta, activation_descriptor.alpha_,
activation_descriptor.beta_, activation_descriptor.gamma_);
}
uint64_t GetFusionOpHashValue(
miopenHandle_t miopen_handle, miopenTensorDescriptor_t input_descriptor,
miopenTensorDescriptor_t filter_descriptor,
miopenConvolutionDescriptor_t conv_descriptor,
miopenTensorDescriptor_t bias_descriptor,
ScopedActivationDescriptor& activation_descriptor) {
uint64_t hash_value = tensorflow::Hash64("ConvolutionBiasActivation");
hash_value = tensorflow::Hash64Combine(
hash_value, tensorflow::hash<miopenHandle_t>()(miopen_handle));
hash_value =
tensorflow::Hash64Combine(hash_value, GetHashValue(input_descriptor));
hash_value =
tensorflow::Hash64Combine(hash_value, GetHashValue(filter_descriptor));
hash_value =
tensorflow::Hash64Combine(hash_value, GetHashValue(conv_descriptor));
hash_value =
tensorflow::Hash64Combine(hash_value, GetHashValue(bias_descriptor));
hash_value = tensorflow::Hash64Combine(
hash_value, activation_descriptor.GetHashValue());
return hash_value;
}
private:
const int k_conv_op_idx = 0;
const int k_bias_op_idx = 1;
const int k_actv_op_idx = 2;
SE_DISALLOW_COPY_AND_ASSIGN(ScopedFusionPlanConvolutionBiasActivation);
};
// class to represent the BatchNorm+Activation (inference) fusion plan
class ScopedFusionPlanBatchNormActivationInference
: public ScopedFusionPlanBase {
public:
ScopedFusionPlanBatchNormActivationInference(
miopenHandle_t miopen_handle, miopenTensorDescriptor_t input_descriptor,
miopenTensorDescriptor_t scale_offset_mean_variance_descriptor,
ScopedActivationDescriptor& activation_descriptor)
: ScopedFusionPlanBase(miopen_handle, miopenVerticalFusion,
input_descriptor) {
uint64_t hash = GetFusionOpHashValue(miopen_handle, input_descriptor,
scale_offset_mean_variance_descriptor,
activation_descriptor);
bool is_compiled = CachedFusionPlans::FindOrCreate(
hash, &fusion_plan_, miopenVerticalFusion, input_descriptor);
if (!is_compiled) {
miopenFusionOpDescriptor_t batchnorm_op;
auto status = wrap::miopenCreateOpBatchNormInference(
fusion_plan_, &batchnorm_op, miopenBNSpatial,
scale_offset_mean_variance_descriptor);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenCreateOpBatchNormInference failed: "
<< ToString(status);
}
miopenFusionOpDescriptor_t actv_op;
status = wrap::miopenCreateOpActivationForward(
fusion_plan_, &actv_op,
activation_descriptor.miopen_activation_mode_);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenCreateOpActivationForward failed: "
<< ToString(status);
}
status = wrap::miopenCompileFusionPlan(miopen_handle_, fusion_plan_);
if (status != miopenStatusSuccess) {
VLOG(2) << "call to miopenCompileFusionPlan (BnA inference) failed: "
<< ToString(status);
CachedFusionPlans::MarkFusionPlanUnsupported(hash);
} else {
VLOG(2) << "Fusion Plan compile succedded (BnA inference) ";
fusion_plan_compiled_ = true;
}
} else {
// fusion plan was already compiled...check whether it failed to compile
fusion_plan_compiled_ = !CachedFusionPlans::IsUnsupportedFusionPlan(hash);
}
}
miopenStatus_t SetBatchNormInferenceArgs(const void* scale,
const void* offset, const void* mean,
const void* variance,
double epsilon) {
float alpha = 1.0;
float beta = 0.0;
return ScopedFusionPlanBase::SetBatchNormInferenceArgs(
k_batchnorm_op_idx, &alpha, &beta, scale, offset, mean, variance,
epsilon);
}
miopenStatus_t SetActivationForwardArgs(
ScopedActivationDescriptor& activation_descriptor) {
float alpha = 1.0;
float beta = 0.0;
return ScopedFusionPlanBase::SetActivationForwardArgs(
k_actv_op_idx, &alpha, &beta, activation_descriptor.alpha_,
activation_descriptor.beta_, activation_descriptor.gamma_);
}
uint64_t GetFusionOpHashValue(
miopenHandle_t miopen_handle, miopenTensorDescriptor_t input_descriptor,
miopenTensorDescriptor_t scale_offset_mean_variance_descriptor,
ScopedActivationDescriptor& activation_descriptor) {
uint64_t hash_value = tensorflow::Hash64("BatchNormActivationInference");
hash_value = tensorflow::Hash64Combine(
hash_value, tensorflow::hash<miopenHandle_t>()(miopen_handle));
hash_value =
tensorflow::Hash64Combine(hash_value, GetHashValue(input_descriptor));
hash_value = tensorflow::Hash64Combine(
hash_value, GetHashValue(scale_offset_mean_variance_descriptor));
hash_value = tensorflow::Hash64Combine(
hash_value, activation_descriptor.GetHashValue());
return hash_value;
}
private:
const int k_batchnorm_op_idx = 0;
const int k_actv_op_idx = 1;
SE_DISALLOW_COPY_AND_ASSIGN(ScopedFusionPlanBatchNormActivationInference);
};
// class to represent the BatchNorm+Activation (training-forward) fusion plan
class ScopedFusionPlanBatchNormActivationForward : public ScopedFusionPlanBase {
public:
ScopedFusionPlanBatchNormActivationForward(
miopenHandle_t miopen_handle, miopenTensorDescriptor_t input_descriptor,
miopenTensorDescriptor_t scale_offset_mean_variance_descriptor,
ScopedActivationDescriptor& activation_descriptor)
: ScopedFusionPlanBase(miopen_handle, miopenVerticalFusion,
input_descriptor) {
uint64_t hash = GetFusionOpHashValue(miopen_handle, input_descriptor,
scale_offset_mean_variance_descriptor,
activation_descriptor);
bool is_compiled = CachedFusionPlans::FindOrCreate(
hash, &fusion_plan_, miopenVerticalFusion, input_descriptor);
if (!is_compiled) {
miopenFusionOpDescriptor_t batchnorm_op;
auto status = wrap::miopenCreateOpBatchNormForward(
fusion_plan_, &batchnorm_op, miopenBNSpatial,
true /* runningMeanVariance */);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenCreateOpBatchNormForward failed: "
<< ToString(status);
}
miopenFusionOpDescriptor_t actv_op;
status = wrap::miopenCreateOpActivationForward(
fusion_plan_, &actv_op,
activation_descriptor.miopen_activation_mode_);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenCreateOpActivationForward failed: "
<< ToString(status);
}
status = wrap::miopenCompileFusionPlan(miopen_handle_, fusion_plan_);
if (status != miopenStatusSuccess) {
VLOG(2) << "call to miopenCompileFusionPlan (BnA forward) failed: "
<< ToString(status);
CachedFusionPlans::MarkFusionPlanUnsupported(hash);
} else {
VLOG(2) << "Fusion Plan compile succedded (BnA forward) ";
fusion_plan_compiled_ = true;
}
} else {
// fusion plan was already compiled...check whether it failed to compile
fusion_plan_compiled_ = !CachedFusionPlans::IsUnsupportedFusionPlan(hash);
}
}
miopenStatus_t SetBatchNormForwardArgs(const void* scale, const void* offset,
void* batch_mean, void* batch_var,
void* saved_mean, void* saved_var,
double epsilon) {
float alpha = 1.0;
float beta = 0.0;
return ScopedFusionPlanBase::SetBatchNormForwardArgs(
k_batchnorm_op_idx, &alpha, &beta, scale, offset, batch_mean, batch_var,
saved_mean, saved_var, epsilon, /*exponential_average_factor=*/1.0);
}
miopenStatus_t SetActivationForwardArgs(
ScopedActivationDescriptor& activation_descriptor) {
float alpha = 1.0;
float beta = 0.0;
return ScopedFusionPlanBase::SetActivationForwardArgs(
k_actv_op_idx, &alpha, &beta, activation_descriptor.alpha_,
activation_descriptor.beta_, activation_descriptor.gamma_);
}
uint64_t GetFusionOpHashValue(
miopenHandle_t miopen_handle, miopenTensorDescriptor_t input_descriptor,
miopenTensorDescriptor_t scale_offset_mean_variance_descriptor,
ScopedActivationDescriptor& activation_descriptor) {
uint64_t hash_value = tensorflow::Hash64("BatchNormActivationForward");
hash_value = tensorflow::Hash64Combine(
hash_value, tensorflow::hash<miopenHandle_t>()(miopen_handle));
hash_value =
tensorflow::Hash64Combine(hash_value, GetHashValue(input_descriptor));
hash_value = tensorflow::Hash64Combine(
hash_value, GetHashValue(scale_offset_mean_variance_descriptor));
hash_value = tensorflow::Hash64Combine(
hash_value, activation_descriptor.GetHashValue());
return hash_value;
}
private:
const int k_batchnorm_op_idx = 0;
const int k_actv_op_idx = 1;
SE_DISALLOW_COPY_AND_ASSIGN(ScopedFusionPlanBatchNormActivationForward);
};
// class to represent the BatchNorm+Activation (training-backward) fusion plan
class ScopedFusionPlanBatchNormActivationBackward
: public ScopedFusionPlanBase {
public:
ScopedFusionPlanBatchNormActivationBackward(
miopenHandle_t miopen_handle, miopenTensorDescriptor_t input_descriptor,
miopenTensorDescriptor_t scale_offset_mean_variance_descriptor,
ScopedActivationDescriptor& activation_descriptor)
: ScopedFusionPlanBase(miopen_handle, miopenVerticalFusion,
input_descriptor) {
uint64_t hash = GetFusionOpHashValue(miopen_handle, input_descriptor,
scale_offset_mean_variance_descriptor,
activation_descriptor);
bool is_compiled = CachedFusionPlans::FindOrCreate(
hash, &fusion_plan_, miopenVerticalFusion, input_descriptor);
if (!is_compiled) {
miopenFusionOpDescriptor_t batchnorm_op;
auto status = wrap::miopenCreateOpBatchNormBackward(
fusion_plan_, &batchnorm_op, miopenBNSpatial);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenCreateOpBatchNormBackward failed: "
<< ToString(status);
}
miopenFusionOpDescriptor_t actv_op;
status = wrap::miopenCreateOpActivationBackward(
fusion_plan_, &actv_op,
activation_descriptor.miopen_activation_mode_);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenCreateOpActivationBackward failed: "
<< ToString(status);
}
status = wrap::miopenCompileFusionPlan(miopen_handle_, fusion_plan_);
if (status != miopenStatusSuccess) {
VLOG(2) << "call to miopenCompileFusionPlan (BnA backward) failed: "
<< ToString(status);
CachedFusionPlans::MarkFusionPlanUnsupported(hash);
} else {
VLOG(2) << "Fusion Plan compile succedded (BnA backward) ";
fusion_plan_compiled_ = true;
}
} else {
// fusion plan was already compiled...check whether it failed to compile
fusion_plan_compiled_ = !CachedFusionPlans::IsUnsupportedFusionPlan(hash);
}
}
miopenStatus_t SetBatchNormBackwardArgs(const void* x, const void* scale,
const void* offset,
const void* saved_mean,
const void* saved_var,
void* scale_grad, void* offset_grad) {
float alpha = 1.0;
float beta = 0.0;
return ScopedFusionPlanBase::SetBatchNormBackwardArgs(
k_batchnorm_op_idx, &alpha, &beta, x, scale, offset, scale_grad,
offset_grad, saved_mean, saved_var);
}
miopenStatus_t SetActivationBackwardArgs(
ScopedActivationDescriptor& activation_descriptor, const void* y) {
float alpha = 1.0;
float beta = 0.0;
return ScopedFusionPlanBase::SetActivationBackwardArgs(
k_actv_op_idx, &alpha, &beta, y, activation_descriptor.alpha_,
activation_descriptor.beta_, activation_descriptor.gamma_);
}
uint64_t GetFusionOpHashValue(
miopenHandle_t miopen_handle, miopenTensorDescriptor_t input_descriptor,
miopenTensorDescriptor_t scale_offset_mean_variance_descriptor,
ScopedActivationDescriptor& activation_descriptor) {
uint64_t hash_value = tensorflow::Hash64("BatchNormActivationBackward");
hash_value = tensorflow::Hash64Combine(
hash_value, tensorflow::hash<miopenHandle_t>()(miopen_handle));
hash_value =
tensorflow::Hash64Combine(hash_value, GetHashValue(input_descriptor));
hash_value = tensorflow::Hash64Combine(
hash_value, GetHashValue(scale_offset_mean_variance_descriptor));
hash_value = tensorflow::Hash64Combine(
hash_value, activation_descriptor.GetHashValue());
return hash_value;
}
private:
const int k_batchnorm_op_idx = 0;
const int k_actv_op_idx = 1;
SE_DISALLOW_COPY_AND_ASSIGN(ScopedFusionPlanBatchNormActivationBackward);
};
namespace {
miopenDataType_t ToMIOpenDataType(
dnn::DataType data_type,
dnn::DataLayout data_layout = dnn::DataLayout::kBatchDepthYX) {
switch (data_type) {
case dnn::DataType::kFloat:
return miopenFloat;
case dnn::DataType::kHalf:
return miopenHalf;
case dnn::DataType::kDouble:
default:
LOG(FATAL) << "Invalid DNN data type: " << static_cast<int>(data_type);
}
}
miopenRNNInputMode_t ToMIOpenRnnInputMode(dnn::RnnInputMode input_mode) {
switch (input_mode) {
case dnn::RnnInputMode::kRnnLinearSkip:
return miopenRNNlinear;
case dnn::RnnInputMode::kRnnSkipInput:
return miopenRNNskip;
default:
LOG(FATAL) << "Invalid RNN input mode: " << static_cast<int>(input_mode);
}
}
miopenRNNDirectionMode_t ToMIOpenRnnDirectionMode(
dnn::RnnDirectionMode direction_mode) {
switch (direction_mode) {
case dnn::RnnDirectionMode::kRnnUnidirectional:
return miopenRNNunidirection;
case dnn::RnnDirectionMode::kRnnBidirectional:
return miopenRNNbidirection;
default:
LOG(FATAL) << "Invalid RNN direction mode: "
<< static_cast<int>(direction_mode);
}
}
miopenRNNMode_t ToMIOpenRnnMode(dnn::RnnMode rnn_mode) {
switch (rnn_mode) {
case dnn::RnnMode::kRnnRelu:
return miopenRNNRELU;
case dnn::RnnMode::kRnnTanh:
return miopenRNNTANH;
case dnn::RnnMode::kRnnLstm:
return miopenLSTM;
case dnn::RnnMode::kRnnGru:
return miopenGRU;
default:
LOG(FATAL) << "Invalid RNN Mode: " << static_cast<int>(rnn_mode);
}
}
template <typename Base>
class MixinBase : public Base {};
template <>
class MixinBase<void> {};
} // namespace
#define RETURN_IF_MIOPEN_ERROR(STATUS, ...) \
if (!SE_PREDICT_TRUE((STATUS) == miopenStatusSuccess)) { \
string error_msg = absl::StrCat(ToString(STATUS), " ", __VA_ARGS__); \
SetFailure(port::Status(port::error::UNKNOWN, error_msg)); \
LOG(ERROR) << error_msg; \
return; \
}
template <typename Base>
class MIOpenDescriptorCommon : public MixinBase<Base> {
public:
bool ok() const { return status_.ok(); }
port::Status Status() const { return status_; }
protected:
void SetFailure(const port::Status& status) { status_.Update(status); }
port::Status status_;
};
class MIOpenRnnParamsDescriptor : public MIOpenDescriptorCommon<void> {
public:
typedef dnn::RnnDescriptor::ParamsRegion ParamsRegion;
typedef dnn::RnnDescriptor::ParamsRegions ParamsRegions;
MIOpenRnnParamsDescriptor(miopenHandle_t miopen_handle,
const MIOpenRnnDescriptor& rnn_desc);
~MIOpenRnnParamsDescriptor() {
auto status = wrap::miopenDestroyTensorDescriptor(handle_);
RETURN_IF_MIOPEN_ERROR(status, "Failed to destroy RNN tensor descriptor");
}
miopenTensorDescriptor_t handle() const {
if (!ok()) return nullptr;
return handle_;
}
int64_t params_size_in_bytes() const { return params_size_in_bytes_; }
ParamsRegions params_weights() const {
if (!ok()) return ParamsRegions();
return weights_;
}
ParamsRegions params_biases() const {
if (!ok()) return ParamsRegions();
return biases_;
}
private:
int GetRegionCountPerLayer() const;
miopenTensorDescriptor_t handle_;
const MIOpenRnnDescriptor* rnn_desc_;
int64_t params_size_in_bytes_;
ParamsRegions weights_;
ParamsRegions biases_;
port::Status status_;
SE_DISALLOW_COPY_AND_ASSIGN(MIOpenRnnParamsDescriptor);
};
class MIOpenRnnDescriptor : public MIOpenDescriptorCommon<dnn::RnnDescriptor> {
public:
MIOpenRnnDescriptor(miopenHandle_t miopen_handle, int num_layers,
int hidden_size, int input_size,
miopenRNNInputMode_t input_mode,
miopenRNNDirectionMode_t direction_mode,
miopenRNNMode_t rnn_mode, miopenDataType_t data_type,
float dropout, uint64_t seed,
ScratchAllocator* state_allocator)
: rnn_desc_(nullptr),
num_layers_(num_layers),
hidden_size_(hidden_size),
input_size_(input_size),
input_mode_(input_mode),
direction_mode_(direction_mode),
rnn_mode_(rnn_mode),
data_type_(data_type) {
// Create the RNN handle
auto status = wrap::miopenCreateRNNDescriptor(&rnn_desc_);
RETURN_IF_MIOPEN_ERROR(status, "Unable to create RNN descriptor");
status = wrap::miopenSetRNNDescriptor(
rnn_desc_ /*rnnDesc*/, hidden_size /*hiddenSize*/,
num_layers /*numLayers*/, input_mode /*inputMode*/,
direction_mode /*direction*/, rnn_mode /*mode*/,
miopenRNNwithBias /*biasMode*/, miopenRNNdefault /*algo*/,
data_type /*dataType*/);
RETURN_IF_MIOPEN_ERROR(status, "Unable to update RNN descriptor");
// Create the params handle.
miopen_params_desc_.reset(
new MIOpenRnnParamsDescriptor(miopen_handle, *this));
if (!miopen_params_desc_->ok()) {
SetFailure(miopen_params_desc_->Status());
return;
}
}
~MIOpenRnnDescriptor() override {
if (rnn_desc_) {
auto status = wrap::miopenDestroyRNNDescriptor(rnn_desc_);
RETURN_IF_MIOPEN_ERROR(status, "Unable to destroy RNN descriptor");
}
}
miopenRNNDescriptor_t handle() const {
if (!ok()) return nullptr;
return rnn_desc_;
}
int num_layers() const { return num_layers_; }
int hidden_size() const { return hidden_size_; }
int input_size() const { return input_size_; }
miopenRNNInputMode_t input_mode() const { return input_mode_; }
miopenRNNDirectionMode_t direction_mode() const { return direction_mode_; }
miopenRNNMode_t rnn_mode() const { return rnn_mode_; }
miopenDataType_t data_type() const { return data_type_; }
int64_t ParamsSizeInBytes() const override {
return miopen_params_desc_->params_size_in_bytes();
}
miopenTensorDescriptor_t params_handle() const {
if (!miopen_params_desc_) return nullptr;
return miopen_params_desc_->handle();
}
ParamsRegions ParamsWeightRegions() const override {
if (!ok()) return ParamsRegions();
return miopen_params_desc_->params_weights();
}
ParamsRegions ParamsBiasRegions() const override {
if (!ok()) return ParamsRegions();
return miopen_params_desc_->params_biases();
}
private:
miopenRNNDescriptor_t rnn_desc_;
int num_layers_;
int hidden_size_;
int input_size_;
miopenRNNInputMode_t input_mode_;
miopenRNNDirectionMode_t direction_mode_;
miopenRNNMode_t rnn_mode_;
miopenDataType_t data_type_;
port::Status status_;
// no dropout in MIOpen.
// std::unique_ptr<miopenDropoutDescriptor> miopen_dropout_desc_;
std::unique_ptr<MIOpenRnnParamsDescriptor> miopen_params_desc_;
SE_DISALLOW_COPY_AND_ASSIGN(MIOpenRnnDescriptor);
};
// Get ID of the internal parameter tensor.
//
int MIOpenRnnParamsDescriptor::GetRegionCountPerLayer() const {
auto rnn_mode = rnn_desc_->rnn_mode();
switch (rnn_mode) {
case miopenRNNRELU:
case miopenRNNTANH:
return 2;
case miopenLSTM:
return 8;
case miopenGRU:
return 6;
default:
LOG(FATAL) << "Invalid RNN Mode: " << static_cast<int>(rnn_mode);
}
}
class MIOpenRnnSequenceTensorDescriptor
: public MIOpenDescriptorCommon<dnn::RnnSequenceTensorDescriptor> {
public:
MIOpenRnnSequenceTensorDescriptor(int seq_length, int batch_size,
int data_size, miopenDataType_t data_type)
: seq_length_(seq_length),
batch_size_(batch_size),
data_size_(data_size),
data_type_(data_type) {
miopenTensorDescriptor_t handle = nullptr;
if (seq_length <= 0) {
string error_msg =
absl::StrCat("sequence length must be positive: ", seq_length);
LOG(ERROR) << error_msg;
SetFailure(port::Status(port::error::UNKNOWN, error_msg));
return;
}
auto status = wrap::miopenCreateTensorDescriptor(&handle);
RETURN_IF_MIOPEN_ERROR(status, "Failed to create tensor descriptor");
std::array<int, 2> dims = {{batch_size, data_size}};
status = wrap::miopenSetTensorDescriptor(
handle /*tensorDesc*/, data_type /*dataType*/, 2 /*nbDims*/,
dims.data() /*dimA*/, nullptr /*strideA*/);
RETURN_IF_MIOPEN_ERROR(status, "Failed to update tensor descriptor");
// Replicate handle across the number of steps.
handles_.assign(seq_length, handle);
}
~MIOpenRnnSequenceTensorDescriptor() override {
// Only the first one needs to be destroyed. All others are the same.
auto status = wrap::miopenDestroyTensorDescriptor(handles_[0]);
RETURN_IF_MIOPEN_ERROR(status,
"Failed to destroy sequence tensor descriptor");
}
const miopenTensorDescriptor_t* handles() const {
if (!ok()) return nullptr;
CHECK(!handles_.empty()) << "handles cannot be empty";
return handles_.data();
}
int seq_length() const { return seq_length_; }
int batch_size() const { return batch_size_; }
int data_size() const { return data_size_; }
private:
int seq_length_;
int batch_size_;
int data_size_;
miopenDataType_t data_type_;
std::vector<miopenTensorDescriptor_t> handles_;
port::Status status_;
SE_DISALLOW_COPY_AND_ASSIGN(MIOpenRnnSequenceTensorDescriptor);
};
class MIOpenRnnStateTensorDescriptor
: public MIOpenDescriptorCommon<dnn::RnnStateTensorDescriptor> {
public:
MIOpenRnnStateTensorDescriptor(int num_layers, int batch_size, int data_size,
miopenDataType_t data_type)
: handle_(nullptr),
num_layers_(num_layers),
batch_size_(batch_size),
data_size_(data_size),
data_type_(data_type) {
auto status = wrap::miopenCreateTensorDescriptor(&handle_);
RETURN_IF_MIOPEN_ERROR(status, "Failed to create tensor descriptor");
std::array<int, 3> dims = {{num_layers, batch_size, data_size}};
status = wrap::miopenSetTensorDescriptor(
handle_ /*tensorDesc*/, data_type /*dataType*/, 3 /*nbDims*/,
dims.data() /*dimA*/, nullptr /*strideA*/);
RETURN_IF_MIOPEN_ERROR(status, "Failed to update tensor descriptor");
}
~MIOpenRnnStateTensorDescriptor() override {
if (!handle_) {
auto status = wrap::miopenDestroyTensorDescriptor(handle_);
RETURN_IF_MIOPEN_ERROR(status, "Unable to destroy RNN state tensor");
}
}
miopenTensorDescriptor_t handle() const {
if (!ok()) return nullptr;
return handle_;
}
int num_layers() const { return num_layers_; }
int batch_size() const { return batch_size_; }
int data_size() const { return data_size_; }
private:
miopenTensorDescriptor_t handle_;
int num_layers_;
int batch_size_;
int data_size_;
port::Status status_;
miopenDataType_t data_type_;
SE_DISALLOW_COPY_AND_ASSIGN(MIOpenRnnStateTensorDescriptor);
};
namespace {
struct RnnModelDims {
int num_layers = 0;
int batch_size = 0;
int seq_length = 0;
int hidden_size = 0;
int input_size = 0;
int dir_count = 0;
};
template <class T>
bool ExtractAndCheckRnnForward(
const MIOpenRnnDescriptor& rnn_desc,
const MIOpenRnnSequenceTensorDescriptor& input_desc,
const DeviceMemory<T>& input_data,
const MIOpenRnnStateTensorDescriptor& input_h_desc,
const DeviceMemory<T>& input_h_data,
const MIOpenRnnStateTensorDescriptor& input_c_desc,
const DeviceMemory<T>& input_c_data, const DeviceMemory<T>& params,
const MIOpenRnnSequenceTensorDescriptor& output_desc,
const DeviceMemory<T>& output_data,
const MIOpenRnnStateTensorDescriptor& output_h_desc,
const DeviceMemory<T>& output_h_data,
const MIOpenRnnStateTensorDescriptor& output_c_desc,
const DeviceMemory<T>& output_c_data, RnnModelDims* model_dims) {
// extract model parameters
model_dims->num_layers = rnn_desc.num_layers();
model_dims->batch_size = input_desc.batch_size();
model_dims->seq_length = input_desc.seq_length();
model_dims->hidden_size = rnn_desc.hidden_size();
model_dims->input_size = input_desc.data_size();
model_dims->dir_count =
(rnn_desc.direction_mode() == miopenRNNbidirection) ? 2 : 1;
// check parameters
if (!(input_h_desc.num_layers() ==
model_dims->num_layers * model_dims->dir_count &&
input_h_desc.batch_size() == model_dims->batch_size &&
input_h_desc.data_size() == model_dims->hidden_size)) {
LOG(ERROR) << "Invalid input_h shape";
return false;
}
if (!(input_h_desc.num_layers() == input_c_desc.num_layers() &&
input_h_desc.batch_size() == input_c_desc.batch_size() &&
input_h_desc.data_size() == input_c_desc.data_size())) {
LOG(ERROR) << "Invalid input_c shape";
return false;
}
if (!(output_desc.seq_length() == model_dims->seq_length &&
output_desc.batch_size() == model_dims->batch_size &&
output_desc.data_size() ==
model_dims->hidden_size * model_dims->dir_count)) {
LOG(ERROR) << "Invalid output shape";
return false;
}
if (!(input_h_desc.num_layers() == output_h_desc.num_layers() &&
input_h_desc.batch_size() == output_h_desc.batch_size() &&
input_h_desc.data_size() == output_h_desc.data_size())) {
LOG(ERROR) << "Invalid output_h shape";
return false;
}
if (!(input_h_desc.num_layers() == output_c_desc.num_layers() &&
input_h_desc.batch_size() == output_c_desc.batch_size() &&
input_h_desc.data_size() == output_c_desc.data_size())) {
LOG(ERROR) << "Invalid output_h shape";
return false;
}
return true;
}
bool CheckRNNParameterSize(
miopenHandle_t miopen_handle, const MIOpenRnnDescriptor& rnn_desc,
const MIOpenRnnSequenceTensorDescriptor& input_desc) {
size_t params_size_in_bytes = 0;
auto status = wrap::miopenGetRNNParamsSize(
miopen_handle /*handle*/, rnn_desc.handle() /*rnnDesc*/,
input_desc.handles()[0] /*xDesc*/, ¶ms_size_in_bytes /*sizeInBytes*/,
rnn_desc.data_type() /*dataType*/);
if (status != miopenStatusSuccess) {
LOG(ERROR) << "Unable to check RNN param size: " << ToString(status);
return false;
}
return static_cast<int64_t>(params_size_in_bytes) ==
rnn_desc.ParamsSizeInBytes();
}
bool CreateRnnWorkspace(Stream* stream, miopenHandle_t miopen_handle,
const MIOpenRnnDescriptor& rnn_desc,
const MIOpenRnnSequenceTensorDescriptor& input_desc,
ScratchAllocator* workspace_allocator,
DeviceMemory<uint8>* workspace) {
// Query the workspace size.
size_t workspace_size_in_bytes = 0;
auto status = wrap::miopenGetRNNWorkspaceSize(
miopen_handle /*handle*/, rnn_desc.handle() /*rnnDesc*/,
input_desc.seq_length() /*seqLength*/, input_desc.handles() /*xDesc*/,
&workspace_size_in_bytes /*sizeInBytes*/);
if (status != miopenStatusSuccess) {
LOG(ERROR) << "Unable to query workspace size: " << ToString(status);
return false;
}
// Allocate the workspace.
if (workspace_size_in_bytes > 0) {
auto allocated =
workspace_allocator->AllocateBytes(workspace_size_in_bytes);
if (!allocated.ok() || (*workspace = allocated.ValueOrDie()) == nullptr) {
LOG(ERROR) << "Failed to allocate RNN workspace";
return false;
}
stream->ThenMemZero(workspace, workspace_size_in_bytes);
} else {
*workspace = DeviceMemory<uint8>();
}
return true;
}
} // namespace
template <class T>
bool MIOpenSupport::DoRnnForwardImpl(
Stream* stream, const MIOpenRnnDescriptor& rnn_desc,
const MIOpenRnnSequenceTensorDescriptor& input_desc,
const DeviceMemory<T>& input_data,
const MIOpenRnnStateTensorDescriptor& input_h_desc,
const DeviceMemory<T>& input_h_data,
const MIOpenRnnStateTensorDescriptor& input_c_desc,
const DeviceMemory<T>& input_c_data, const DeviceMemory<T>& params,
const MIOpenRnnSequenceTensorDescriptor& output_desc,
DeviceMemory<T>* output_data,
const MIOpenRnnStateTensorDescriptor& output_h_desc,
DeviceMemory<T>* output_h_data,
const MIOpenRnnStateTensorDescriptor& output_c_desc,
DeviceMemory<T>* output_c_data, bool is_training,
ScratchAllocator* reserve_space_allocator,
ScratchAllocator* workspace_allocator) {
// extract model parameters
RnnModelDims model_dims;
bool res = ExtractAndCheckRnnForward(
rnn_desc, input_desc, input_data, input_h_desc, input_h_data,
input_c_desc, input_c_data, params, output_desc, *output_data,
output_h_desc, *output_h_data, output_c_desc, *output_c_data,
&model_dims);
if (!res) {
LOG(ERROR) << "Invalid parameters for RNN Model";
return false;
}
auto miopen = miopen_->GetHandle(parent_, stream);
// check params size
if (!CheckRNNParameterSize(miopen.handle(), rnn_desc, input_desc)) {
LOG(ERROR) << "Invalid parameters";
return false;
}
// create the workspace
DeviceMemory<uint8> workspace;
if (!CreateRnnWorkspace(stream, miopen.handle(), rnn_desc, input_desc,
workspace_allocator, &workspace)) {
LOG(ERROR) << "Unable to create rnn workspace";
return false;
}
// query the reserve space size
// allocate the reserve space
DeviceMemory<uint8> reserve_space;
if (is_training) {
size_t reserve_space_size_in_bytes = 0;
auto status = wrap::miopenGetRNNTrainingReserveSize(
miopen.handle() /*handle*/, rnn_desc.handle() /*rnnDesc*/,
model_dims.seq_length /*seqLength*/, input_desc.handles() /*xDesc*/,
&reserve_space_size_in_bytes /*sizeInBytes*/);
if (status != miopenStatusSuccess) {
LOG(ERROR) << "Unable to query reserve space size: " << ToString(status);
return false;
}
if (reserve_space_size_in_bytes > 0) {
auto allocated =
reserve_space_allocator->AllocateBytes(reserve_space_size_in_bytes);
if (!allocated.ok() ||
(reserve_space = allocated.ValueOrDie()) == nullptr) {
LOG(ERROR) << "Fail to allocate RNN reserve space";
return false;
}
stream->ThenMemZero(&reserve_space, reserve_space_size_in_bytes);
}
}
// make the forward call
if (!is_training) {
auto status = wrap::miopenRNNForwardInference(
miopen.handle() /*handle*/, rnn_desc.handle() /*rnnDesc*/,
model_dims.seq_length /*seqLength*/, input_desc.handles() /*xDesc*/,
input_data.opaque() /*x*/, input_h_desc.handle() /*hxDesc*/,
input_h_data.opaque() /*hx*/, input_c_desc.handle() /*cxDesc*/,
input_c_data.opaque() /*cx*/, rnn_desc.params_handle() /*wDesc*/,
params.opaque() /*w*/, output_desc.handles() /*yDesc*/,
output_data->opaque() /*y*/, output_h_desc.handle() /*hyDesc*/,
output_h_data->opaque() /*hy*/, output_c_desc.handle() /*cyDesc*/,
output_c_data->opaque() /*cy*/, workspace.opaque() /*workspace*/,
workspace.size() /*workSpaceSizeInBytes*/);
if (status != miopenStatusSuccess) {
LOG(ERROR) << "Failed to call miopenRNNForwardInference: "
<< ToString(status);
return false;
}
} else {
auto status = wrap::miopenRNNForwardTraining(
miopen.handle() /*handle*/, rnn_desc.handle() /*rnnDesc*/,
model_dims.seq_length /*seqLength*/, input_desc.handles() /*xDesc*/,
input_data.opaque() /*x*/, input_h_desc.handle() /*hxDesc*/,
input_h_data.opaque() /*hx*/, input_c_desc.handle() /*cxDesc*/,
input_c_data.opaque() /*cx*/, rnn_desc.params_handle() /*wDesc*/,
params.opaque() /*w*/, output_desc.handles() /*yDesc*/,
output_data->opaque() /*y*/, output_h_desc.handle() /*hyDesc*/,
output_h_data->opaque() /*hy*/, output_c_desc.handle() /*cyDesc*/,
output_c_data->opaque() /*cy*/, workspace.opaque() /*workspace*/,
workspace.size() /*workSpaceSizeInBytes*/,
reserve_space.opaque() /*reserveSpace*/,
reserve_space.size() /*reserveSpaceSizeInBytes*/);
if (status != miopenStatusSuccess) {
LOG(ERROR) << "Failed to call miopenRNNForwardTraining"
<< ToString(status);
return false;
}
}
return true;
}
template <class T>
bool MIOpenSupport::DoRnnBackwardImpl(
Stream* stream, const MIOpenRnnDescriptor& rnn_desc,
const MIOpenRnnSequenceTensorDescriptor& input_desc,
const DeviceMemory<T>& input_data,
const MIOpenRnnStateTensorDescriptor& input_h_desc,
const DeviceMemory<T>& input_h_data,
const MIOpenRnnStateTensorDescriptor& input_c_desc,
const DeviceMemory<T>& input_c_data, const DeviceMemory<T>& params,
const MIOpenRnnSequenceTensorDescriptor& output_desc,
const DeviceMemory<T>& output_data,
const MIOpenRnnStateTensorDescriptor& output_h_desc,
const DeviceMemory<T>& output_h_data,
const MIOpenRnnStateTensorDescriptor& output_c_desc,
const DeviceMemory<T>& output_c_data,
const DeviceMemory<T>& output_backprop_data,
const DeviceMemory<T>& output_h_backprop_data,
const DeviceMemory<T>& output_c_backprop_data,
DeviceMemory<T>* input_backprop_data,
DeviceMemory<T>* input_h_backprop_data,
DeviceMemory<T>* input_c_backprop_data,
DeviceMemory<T>* params_backprop_data,
DeviceMemory<uint8>* reserve_space_data,
ScratchAllocator* workspace_allocator) {
// extract model parameters
RnnModelDims model_dims;
bool res = ExtractAndCheckRnnForward(
rnn_desc, input_desc, input_data, input_h_desc, input_h_data,
input_c_desc, input_c_data, params, output_desc, output_data,
output_h_desc, output_h_data, output_c_desc, output_c_data, &model_dims);
if (!res) {
LOG(ERROR) << "Invalid parameters for RNN Model";
return false;
}
auto miopen = miopen_->GetHandle(parent_, stream);
// check params size
if (!CheckRNNParameterSize(miopen.handle(), rnn_desc, input_desc)) {
LOG(ERROR) << "Invalid parameters";
return false;
}
// create the workspace
DeviceMemory<uint8> workspace;
if (!CreateRnnWorkspace(stream, miopen.handle(), rnn_desc, input_desc,
workspace_allocator, &workspace)) {
LOG(ERROR) << "Unable to create rnn workspace";
return false;
}
// workaround for missing initialization support in MIOpen.
// TODO: remove this when MIOpen is ready.
auto type_size = std::is_same<T, Eigen::half>::value ? 2 : sizeof(T);
auto size_data = input_desc.seq_length() * input_desc.batch_size() *
input_desc.data_size();
if ((size_data > 0) && (input_backprop_data->opaque() != nullptr))
stream->ThenMemZero(input_backprop_data, size_data * type_size);
size_data = input_h_desc.num_layers() * input_h_desc.batch_size() *
input_h_desc.data_size();
if ((size_data > 0) && (input_h_backprop_data->opaque() != nullptr))
stream->ThenMemZero(input_h_backprop_data, size_data * type_size);
size_data = input_c_desc.num_layers() * input_c_desc.batch_size() *
input_c_desc.data_size();
if ((size_data > 0) && (input_c_backprop_data->opaque() != nullptr))
stream->ThenMemZero(input_c_backprop_data, size_data * type_size);
// make the backward data call
auto status = wrap::miopenRNNBackwardData(
miopen.handle() /*handle*/, rnn_desc.handle() /*rnnDesc*/,
model_dims.seq_length /*seqLength*/, output_desc.handles() /*yDesc*/,
output_data.opaque() /*y*/, output_desc.handles() /*dyDesc*/,
output_backprop_data.opaque() /*dy*/, output_h_desc.handle() /*dhyDesc*/,
output_h_backprop_data.opaque() /*dhy*/,
output_c_desc.handle() /*dcyDesc*/,
output_c_backprop_data.opaque() /*dcy*/,
rnn_desc.params_handle() /*wDesc*/, params.opaque() /*w*/,
input_h_desc.handle() /*hxDesc*/, input_h_data.opaque() /*hx*/,
input_c_desc.handle() /*cxDesc*/, input_c_data.opaque() /*cx*/,
input_desc.handles() /*dxDesc*/, input_backprop_data->opaque() /*dx*/,
input_h_desc.handle() /*dhxDesc*/,
input_h_backprop_data->opaque() /*dhx*/,
input_c_desc.handle() /*dcxDesc*/,
input_c_backprop_data->opaque() /*dcx*/, workspace.opaque() /*workspace*/,
workspace.size() /*workSpaceSizeInBytes*/,
reserve_space_data->opaque() /*reserveSpace*/,
reserve_space_data->size() /*reserveSpaceSizeInBytes*/);
if (status != miopenStatusSuccess) {
LOG(ERROR) << "Failed to call miopenRNNBackwardData: " << ToString(status);
return false;
}
if (params_backprop_data != nullptr) {
// Clear the dw to zeros.
stream->ThenMemZero(params_backprop_data, params_backprop_data->size());
// make the backward weight call
status = wrap::miopenRNNBackwardWeights(
miopen.handle() /*handle*/, rnn_desc.handle() /*rnnDesc*/,
model_dims.seq_length /*seqLength*/, input_desc.handles() /*xDesc*/,
input_data.opaque() /*x*/, input_h_desc.handle() /*hxDesc*/,
input_h_data.opaque() /*hx*/, output_desc.handles() /*yDesc*/,
output_data.opaque() /*y*/, rnn_desc.params_handle() /*dwDesc*/,
params_backprop_data->opaque() /*dw*/, workspace.opaque() /*workspace*/,
workspace.size() /*workSpaceSizeInBytes*/,
reserve_space_data->opaque() /*reserveSpace*/,
reserve_space_data->size() /*reserveSpaceSizeInBytes*/);
if (status != miopenStatusSuccess) {
LOG(ERROR) << "Failed to call miopenRNNBackwardWeights: "
<< ToString(status);
return false;
}
}
return true;
}
MIOpenRnnParamsDescriptor::MIOpenRnnParamsDescriptor(
miopenHandle_t miopen_handle, const MIOpenRnnDescriptor& rnn_desc)
: handle_(nullptr), rnn_desc_(&rnn_desc), params_size_in_bytes_(0) {
miopenTensorDescriptor_t input_desc = nullptr;
{
// Query the params size.
auto status = wrap::miopenCreateTensorDescriptor(&input_desc);
RETURN_IF_MIOPEN_ERROR(status, "MIOpen fails to create tensor descriptor");
std::array<int, 2> dims = {{1, rnn_desc.input_size()}};
status = wrap::miopenSetTensorDescriptor(
input_desc /*tensorDesc*/, rnn_desc.data_type() /*dataType*/,
2 /*nbDims*/, dims.data() /*dimA*/, nullptr /*strideA*/);
RETURN_IF_MIOPEN_ERROR(status, "MIOpen fails to set tensor descriptor");
size_t params_size = 0;
status = wrap::miopenGetRNNParamsSize(
miopen_handle /*handle*/, rnn_desc.handle() /*rnnDesc*/,
input_desc /*xDesc*/, ¶ms_size /*sizeInBytes*/,
rnn_desc.data_type() /*dataType*/);
RETURN_IF_MIOPEN_ERROR(status, "MIOpen fails to get RNN parameter size");
params_size_in_bytes_ = static_cast<int64_t>(params_size);
}
{
// Create the params descriptor.
auto status = wrap::miopenCreateTensorDescriptor(&handle_);
RETURN_IF_MIOPEN_ERROR(status,
"MIOpen fails to create RNN params descriptor");
status = wrap::miopenGetRNNParamsDescriptor(miopen_handle,
rnn_desc.handle(), input_desc,
handle_, rnn_desc.data_type());
RETURN_IF_MIOPEN_ERROR(status,
"MIOpen fails to update RNN filter descriptor");
}
{
// Release the dummy input tensor descriptor.
auto status = wrap::miopenDestroyTensorDescriptor(input_desc);
RETURN_IF_MIOPEN_ERROR(status, "MIOpen fails to destroy tensor descriptor");
}
}
class MIOpenCTCLossDescriptor {
public:
explicit MIOpenCTCLossDescriptor(miopenDataType_t data_type) {
auto status = wrap::miopenCreateCTCLossDescriptor(&handle_);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenCreateCTCLossDescriptor failed: "
<< ToString(status);
}
bool apply_softmax_layer = true;
status = wrap::miopenSetCTCLossDescriptor(handle_, data_type, 0,
apply_softmax_layer);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenSetCTCLossDescriptor failed: "
<< ToString(status);
}
}
~MIOpenCTCLossDescriptor() {
auto status = wrap::miopenDestroyCTCLossDescriptor(handle_);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenDestroyCTCLossDescriptor failed: "
<< ToString(status);
}
}
miopenCTCLossDescriptor_t handle() const { return handle_; }
private:
miopenCTCLossDescriptor_t handle_; // Owned
SE_DISALLOW_COPY_AND_ASSIGN(MIOpenCTCLossDescriptor);
};
port::Status MIOpenSupport::DoPrepareForCtcLoss(
Stream* stream, dnn::DataType element_type,
const dnn::RnnStateTensorDescriptor& probs_desc,
const dnn::RnnStateTensorDescriptor& grads_desc,
absl::Span<const int> labels_data,
absl::Span<const int> labels_lengths_data,
absl::Span<const int> input_lengths_data,
ScratchAllocator* scratch_allocator, DeviceMemory<uint8>* scratch_memory,
int* ctc_loss_algo_id) {
auto miopen = miopen_->GetHandle(parent_, stream);
MIOpenCTCLossDescriptor miopen_ctc_loss_desc(ToMIOpenDataType(element_type));
// Query the workspace size.
size_t workspace_size_in_bytes = 0;
const MIOpenRnnStateTensorDescriptor& miopen_probs_desc =
static_cast<const MIOpenRnnStateTensorDescriptor&>(probs_desc);
const MIOpenRnnStateTensorDescriptor& miopen_grads_desc =
static_cast<const MIOpenRnnStateTensorDescriptor&>(grads_desc);
auto status = wrap::miopenGetCTCLossWorkspaceSize(
miopen.handle(), miopen_probs_desc.handle(), miopen_grads_desc.handle(),
labels_data.data(), labels_lengths_data.data(), input_lengths_data.data(),
MIOPEN_CTC_LOSS_ALGO_DETERMINISTIC, miopen_ctc_loss_desc.handle(),
&workspace_size_in_bytes);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenDestroyCTCLossDescriptor failed: "
<< ToString(status);
return port::InternalError(
"Failed to determine scratch memory size for MIOpen CTC Loss");
}
*scratch_memory = DeviceMemory<uint8>();
// Allocate the workspace.
if (workspace_size_in_bytes != 0) {
if (scratch_allocator == nullptr) {
return port::InternalError(
absl::StrCat("An allocator must be specified when scratch memory is "
"needed"));
}
auto scratch_or = scratch_allocator->AllocateBytes(workspace_size_in_bytes);
if (scratch_or.ok()) {
*scratch_memory = scratch_or.ValueOrDie();
} else {
LOG(ERROR)
<< "Failed to allocate scratch memory - "
<< scratch_or.status().error_message() << "\n"
<< "\tYou can set the env var TF_CUDNN_WORKSPACE_LIMIT_IN_MB to a "
"larger number (e.g. 8192) to increase the max memory limit.\n"
<< "\tIncreasing the max memory limit might help resolve this "
"error";
return port::InternalError(absl::StrCat(
"Failed to allocate scratch memory for MIOpen CTC Loss, of size: ",
workspace_size_in_bytes));
}
}
return port::Status::OK();
}
port::Status MIOpenSupport::DoCtcLossImpl(
Stream* stream, const MIOpenRnnStateTensorDescriptor& probs_desc,
const DeviceMemoryBase probs_data, absl::Span<const int> labels_data,
absl::Span<const int> labels_lengths_data,
absl::Span<const int> input_lengths_data, DeviceMemoryBase costs_data,
const MIOpenRnnStateTensorDescriptor& grads_desc,
DeviceMemoryBase grads_data, const MIOpenCTCLossDescriptor& ctc_loss_desc,
DeviceMemory<uint8> scratch_memory, int ctc_loss_algo_id) {
auto miopen = miopen_->GetHandle(parent_, stream);
int kNumTimestamps = probs_desc.num_layers();
int kBatchSize = probs_desc.batch_size();
int kNumLabels = probs_desc.data_size();
int total_size = kNumLabels * kNumTimestamps * kBatchSize;
(void)total_size;
auto status = wrap::miopenCTCLoss(
miopen.handle(), probs_desc.handle(), probs_data.opaque(),
labels_data.data(), labels_lengths_data.data(), input_lengths_data.data(),
costs_data.opaque(), grads_desc.handle(), grads_data.opaque(),
MIOPEN_CTC_LOSS_ALGO_DETERMINISTIC, ctc_loss_desc.handle(),
scratch_memory.opaque(), scratch_memory.size());
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenCTCLoss failed: " << ToString(status);
return port::InternalError("Failure during MIOpen CTC Loss");
}
return port::Status::OK();
}
port::Status MIOpenSupport::DoCtcLoss(
Stream* stream, dnn::DataType element_type,
const dnn::RnnStateTensorDescriptor& probs_desc,
const DeviceMemoryBase probs_data, absl::Span<const int> labels_data,
absl::Span<const int> labels_lengths_data,
absl::Span<const int> input_lengths_data, DeviceMemoryBase costs_data,
const dnn::RnnStateTensorDescriptor& grads_desc,
DeviceMemoryBase grads_data, DeviceMemory<uint8> scratch_memory,
int ctc_loss_algo_id) {
// Current MIOPen CTC Loss only supports the float datatype
if (element_type != dnn::DataType::kFloat) {
return port::Status(port::error::INVALID_ARGUMENT,
"MIOpenCTCLossDescriptor is supported only when the "
"DataType is float");
}
MIOpenCTCLossDescriptor miopen_ctc_loss_desc(ToMIOpenDataType(element_type));
const MIOpenRnnStateTensorDescriptor& miopen_probs_desc =
static_cast<const MIOpenRnnStateTensorDescriptor&>(probs_desc);
const MIOpenRnnStateTensorDescriptor& miopen_grads_desc =
static_cast<const MIOpenRnnStateTensorDescriptor&>(grads_desc);
return DoCtcLossImpl(stream, miopen_probs_desc, probs_data, labels_data,
labels_lengths_data, input_lengths_data, costs_data,
miopen_grads_desc, grads_data, miopen_ctc_loss_desc,
scratch_memory, ctc_loss_algo_id);
}
port::StatusOr<std::unique_ptr<dnn::RnnDescriptor>>
MIOpenSupport::createRnnDescriptor(
int num_layers, int hidden_size, int input_size, int cell_size,
int batch_size, dnn::RnnInputMode input_mode,
dnn::RnnDirectionMode direction_mode, dnn::RnnMode rnn_mode,
dnn::DataType data_type, const dnn::AlgorithmConfig& algorithm_config,
float dropout, uint64_t seed, ScratchAllocator* state_allocator,
bool use_padded_io) {
// ROCM TODO: batch_size is used in dynamic persistent RNN algorithm and is
// not supported by MIOpen now.
if (use_padded_io) {
return port::Status(port::error::INVALID_ARGUMENT,
"ROCm MIOpen only supports packed input output.");
}
bool use_projection = cell_size != 0 && hidden_size < cell_size;
if (use_projection) {
return port::Status(
port::error::INVALID_ARGUMENT,
"ROCm MIOpen does not support RNN ProjectionLayers yet.");
}
auto miopen = miopen_->GetHandle(parent_, nullptr);
std::unique_ptr<MIOpenRnnDescriptor> rnn_desc(new MIOpenRnnDescriptor(
miopen.handle(), num_layers, hidden_size, input_size,
ToMIOpenRnnInputMode(input_mode),
ToMIOpenRnnDirectionMode(direction_mode), ToMIOpenRnnMode(rnn_mode),
ToMIOpenDataType(data_type), dropout, seed, state_allocator));
if (!rnn_desc->ok()) {
return rnn_desc->Status();
}
return port::StatusOr<std::unique_ptr<dnn::RnnDescriptor>>(
std::move(rnn_desc));
}
port::StatusOr<std::unique_ptr<dnn::RnnSequenceTensorDescriptor>>
MIOpenSupport::createRnnSequenceTensorDescriptor(int seq_length, int batch_size,
int data_size,
dnn::DataType data_type) {
std::unique_ptr<MIOpenRnnSequenceTensorDescriptor> seq_desc(
new MIOpenRnnSequenceTensorDescriptor(seq_length, batch_size, data_size,
ToMIOpenDataType(data_type)));
if (!seq_desc->ok()) {
return seq_desc->Status();
}
return port::StatusOr<std::unique_ptr<dnn::RnnSequenceTensorDescriptor>>(
std::move(seq_desc));
}
port::StatusOr<std::unique_ptr<dnn::RnnStateTensorDescriptor>>
MIOpenSupport::createRnnStateTensorDescriptor(int num_layer, int batch_size,
int data_size,
dnn::DataType data_type) {
std::unique_ptr<MIOpenRnnStateTensorDescriptor> state_desc(
new MIOpenRnnStateTensorDescriptor(num_layer, batch_size, data_size,
ToMIOpenDataType(data_type)));
if (!state_desc->ok()) {
return state_desc->Status();
}
return port::StatusOr<std::unique_ptr<dnn::RnnStateTensorDescriptor>>(
std::move(state_desc));
}
bool MIOpenSupport::DoRnnForward(
Stream* stream, const dnn::RnnDescriptor& rnn_desc,
const dnn::RnnSequenceTensorDescriptor& input_desc,
const DeviceMemory<Eigen::half>& input_data,
const DeviceMemory<int>& seq_lengths_data,
const dnn::RnnStateTensorDescriptor& input_h_desc,
const DeviceMemory<Eigen::half>& input_h_data,
const dnn::RnnStateTensorDescriptor& input_c_desc,
const DeviceMemory<Eigen::half>& input_c_data,
const DeviceMemory<Eigen::half>& params,
const dnn::RnnSequenceTensorDescriptor& output_desc,
DeviceMemory<Eigen::half>* output_data,
const dnn::RnnStateTensorDescriptor& output_h_desc,
DeviceMemory<Eigen::half>* output_h_data,
const dnn::RnnStateTensorDescriptor& output_c_desc,
DeviceMemory<Eigen::half>* output_c_data, bool is_training,
ScratchAllocator* reserve_space_allocator,
ScratchAllocator* workspace_allocator,
dnn::ProfileResult* output_profile_result) {
// ROCM TODO: output_profile_result is ignore for now
const MIOpenRnnDescriptor& miopen_rnn_desc =
static_cast<const MIOpenRnnDescriptor&>(rnn_desc);
const MIOpenRnnSequenceTensorDescriptor& miopen_input_desc =
static_cast<const MIOpenRnnSequenceTensorDescriptor&>(input_desc);
const MIOpenRnnStateTensorDescriptor& miopen_input_h_desc =
static_cast<const MIOpenRnnStateTensorDescriptor&>(input_h_desc);
const MIOpenRnnStateTensorDescriptor& miopen_input_c_desc =
static_cast<const MIOpenRnnStateTensorDescriptor&>(input_c_desc);
const MIOpenRnnSequenceTensorDescriptor& miopen_output_desc =
static_cast<const MIOpenRnnSequenceTensorDescriptor&>(output_desc);
const MIOpenRnnStateTensorDescriptor& miopen_output_h_desc =
static_cast<const MIOpenRnnStateTensorDescriptor&>(output_h_desc);
const MIOpenRnnStateTensorDescriptor& miopen_output_c_desc =
static_cast<const MIOpenRnnStateTensorDescriptor&>(output_c_desc);
return DoRnnForwardImpl<Eigen::half>(
stream, miopen_rnn_desc, miopen_input_desc, input_data,
miopen_input_h_desc, input_h_data, miopen_input_c_desc, input_c_data,
params, miopen_output_desc, output_data, miopen_output_h_desc,
output_h_data, miopen_output_c_desc, output_c_data, is_training,
reserve_space_allocator, workspace_allocator);
}
bool MIOpenSupport::DoRnnForward(
Stream* stream, const dnn::RnnDescriptor& rnn_desc,
const dnn::RnnSequenceTensorDescriptor& input_desc,
const DeviceMemory<float>& input_data,
const DeviceMemory<int>& seq_lengths_data,
const dnn::RnnStateTensorDescriptor& input_h_desc,
const DeviceMemory<float>& input_h_data,
const dnn::RnnStateTensorDescriptor& input_c_desc,
const DeviceMemory<float>& input_c_data, const DeviceMemory<float>& params,
const dnn::RnnSequenceTensorDescriptor& output_desc,
DeviceMemory<float>* output_data,
const dnn::RnnStateTensorDescriptor& output_h_desc,
DeviceMemory<float>* output_h_data,
const dnn::RnnStateTensorDescriptor& output_c_desc,
DeviceMemory<float>* output_c_data, bool is_training,
ScratchAllocator* reserve_space_allocator,
ScratchAllocator* workspace_allocator,
dnn::ProfileResult* output_profile_result) {
// ROCM TODO: output_profile_result is ignore for now
const MIOpenRnnDescriptor& miopen_rnn_desc =
static_cast<const MIOpenRnnDescriptor&>(rnn_desc);
const MIOpenRnnSequenceTensorDescriptor& miopen_input_desc =
static_cast<const MIOpenRnnSequenceTensorDescriptor&>(input_desc);
const MIOpenRnnStateTensorDescriptor& miopen_input_h_desc =
static_cast<const MIOpenRnnStateTensorDescriptor&>(input_h_desc);
const MIOpenRnnStateTensorDescriptor& miopen_input_c_desc =
static_cast<const MIOpenRnnStateTensorDescriptor&>(input_c_desc);
const MIOpenRnnSequenceTensorDescriptor& miopen_output_desc =
static_cast<const MIOpenRnnSequenceTensorDescriptor&>(output_desc);
const MIOpenRnnStateTensorDescriptor& miopen_output_h_desc =
static_cast<const MIOpenRnnStateTensorDescriptor&>(output_h_desc);
const MIOpenRnnStateTensorDescriptor& miopen_output_c_desc =
static_cast<const MIOpenRnnStateTensorDescriptor&>(output_c_desc);
return DoRnnForwardImpl<float>(
stream, miopen_rnn_desc, miopen_input_desc, input_data,
miopen_input_h_desc, input_h_data, miopen_input_c_desc, input_c_data,
params, miopen_output_desc, output_data, miopen_output_h_desc,
output_h_data, miopen_output_c_desc, output_c_data, is_training,
reserve_space_allocator, workspace_allocator);
}
bool MIOpenSupport::DoRnnForward(
Stream* stream, const dnn::RnnDescriptor& rnn_desc,
const dnn::RnnSequenceTensorDescriptor& input_desc,
const DeviceMemory<double>& input_data,
const DeviceMemory<int>& seq_lengths_data,
const dnn::RnnStateTensorDescriptor& input_h_desc,
const DeviceMemory<double>& input_h_data,
const dnn::RnnStateTensorDescriptor& input_c_desc,
const DeviceMemory<double>& input_c_data,
const DeviceMemory<double>& params,
const dnn::RnnSequenceTensorDescriptor& output_desc,
DeviceMemory<double>* output_data,
const dnn::RnnStateTensorDescriptor& output_h_desc,
DeviceMemory<double>* output_h_data,
const dnn::RnnStateTensorDescriptor& output_c_desc,
DeviceMemory<double>* output_c_data, bool is_training,
ScratchAllocator* reserve_space_allocator,
ScratchAllocator* workspace_allocator,
dnn::ProfileResult* output_profile_result) {
LOG(ERROR) << "miopen does not support double type RNN fwd yet";
return false;
}
bool MIOpenSupport::DoRnnBackward(
Stream* stream, const dnn::RnnDescriptor& rnn_desc,
const dnn::RnnSequenceTensorDescriptor& input_desc,
const DeviceMemory<Eigen::half>& input_data,
const DeviceMemory<int>& seq_lengths_data,
const dnn::RnnStateTensorDescriptor& input_h_desc,
const DeviceMemory<Eigen::half>& input_h_data,
const dnn::RnnStateTensorDescriptor& input_c_desc,
const DeviceMemory<Eigen::half>& input_c_data,
const DeviceMemory<Eigen::half>& params,
const dnn::RnnSequenceTensorDescriptor& output_desc,
const DeviceMemory<Eigen::half>& output_data,
const dnn::RnnStateTensorDescriptor& output_h_desc,
const DeviceMemory<Eigen::half>& output_h_data,
const dnn::RnnStateTensorDescriptor& output_c_desc,
const DeviceMemory<Eigen::half>& output_c_data,
const DeviceMemory<Eigen::half>& output_backprop_data,
const DeviceMemory<Eigen::half>& output_h_backprop_data,
const DeviceMemory<Eigen::half>& output_c_backprop_data,
DeviceMemory<Eigen::half>* input_backprop_data,
DeviceMemory<Eigen::half>* input_h_backprop_data,
DeviceMemory<Eigen::half>* input_c_backprop_data,
DeviceMemory<Eigen::half>* params_backprop_data,
DeviceMemory<uint8>* reserve_space_data,
ScratchAllocator* workspace_allocator,
dnn::ProfileResult* output_profile_result) {
// ROCM TODO: output_profile_result is ignore for now
const MIOpenRnnDescriptor& miopen_rnn_desc =
static_cast<const MIOpenRnnDescriptor&>(rnn_desc);
const MIOpenRnnSequenceTensorDescriptor& miopen_input_desc =
static_cast<const MIOpenRnnSequenceTensorDescriptor&>(input_desc);
const MIOpenRnnStateTensorDescriptor& miopen_input_h_desc =
static_cast<const MIOpenRnnStateTensorDescriptor&>(input_h_desc);
const MIOpenRnnStateTensorDescriptor& miopen_input_c_desc =
static_cast<const MIOpenRnnStateTensorDescriptor&>(input_c_desc);
const MIOpenRnnSequenceTensorDescriptor& miopen_output_desc =
static_cast<const MIOpenRnnSequenceTensorDescriptor&>(output_desc);
const MIOpenRnnStateTensorDescriptor& miopen_output_h_desc =
static_cast<const MIOpenRnnStateTensorDescriptor&>(output_h_desc);
const MIOpenRnnStateTensorDescriptor& miopen_output_c_desc =
static_cast<const MIOpenRnnStateTensorDescriptor&>(output_c_desc);
return DoRnnBackwardImpl<Eigen::half>(
stream, miopen_rnn_desc, miopen_input_desc, input_data,
miopen_input_h_desc, input_h_data, miopen_input_c_desc, input_c_data,
params, miopen_output_desc, output_data, miopen_output_h_desc,
output_h_data, miopen_output_c_desc, output_c_data, output_backprop_data,
output_h_backprop_data, output_c_backprop_data, input_backprop_data,
input_h_backprop_data, input_c_backprop_data, params_backprop_data,
reserve_space_data, workspace_allocator);
}
bool MIOpenSupport::DoRnnBackward(
Stream* stream, const dnn::RnnDescriptor& rnn_desc,
const dnn::RnnSequenceTensorDescriptor& input_desc,
const DeviceMemory<float>& input_data,
const DeviceMemory<int>& seq_lengths_data,
const dnn::RnnStateTensorDescriptor& input_h_desc,
const DeviceMemory<float>& input_h_data,
const dnn::RnnStateTensorDescriptor& input_c_desc,
const DeviceMemory<float>& input_c_data, const DeviceMemory<float>& params,
const dnn::RnnSequenceTensorDescriptor& output_desc,
const DeviceMemory<float>& output_data,
const dnn::RnnStateTensorDescriptor& output_h_desc,
const DeviceMemory<float>& output_h_data,
const dnn::RnnStateTensorDescriptor& output_c_desc,
const DeviceMemory<float>& output_c_data,
const DeviceMemory<float>& output_backprop_data,
const DeviceMemory<float>& output_h_backprop_data,
const DeviceMemory<float>& output_c_backprop_data,
DeviceMemory<float>* input_backprop_data,
DeviceMemory<float>* input_h_backprop_data,
DeviceMemory<float>* input_c_backprop_data,
DeviceMemory<float>* params_backprop_data,
DeviceMemory<uint8>* reserve_space_data,
ScratchAllocator* workspace_allocator,
dnn::ProfileResult* output_profile_result) {
// ROCM TODO: output_profile_result is ignore for now
const MIOpenRnnDescriptor& miopen_rnn_desc =
static_cast<const MIOpenRnnDescriptor&>(rnn_desc);
const MIOpenRnnSequenceTensorDescriptor& miopen_input_desc =
static_cast<const MIOpenRnnSequenceTensorDescriptor&>(input_desc);
const MIOpenRnnStateTensorDescriptor& miopen_input_h_desc =
static_cast<const MIOpenRnnStateTensorDescriptor&>(input_h_desc);
const MIOpenRnnStateTensorDescriptor& miopen_input_c_desc =
static_cast<const MIOpenRnnStateTensorDescriptor&>(input_c_desc);
const MIOpenRnnSequenceTensorDescriptor& miopen_output_desc =
static_cast<const MIOpenRnnSequenceTensorDescriptor&>(output_desc);
const MIOpenRnnStateTensorDescriptor& miopen_output_h_desc =
static_cast<const MIOpenRnnStateTensorDescriptor&>(output_h_desc);
const MIOpenRnnStateTensorDescriptor& miopen_output_c_desc =
static_cast<const MIOpenRnnStateTensorDescriptor&>(output_c_desc);
return DoRnnBackwardImpl<float>(
stream, miopen_rnn_desc, miopen_input_desc, input_data,
miopen_input_h_desc, input_h_data, miopen_input_c_desc, input_c_data,
params, miopen_output_desc, output_data, miopen_output_h_desc,
output_h_data, miopen_output_c_desc, output_c_data, output_backprop_data,
output_h_backprop_data, output_c_backprop_data, input_backprop_data,
input_h_backprop_data, input_c_backprop_data, params_backprop_data,
reserve_space_data, workspace_allocator);
}
bool MIOpenSupport::DoRnnBackward(
Stream* stream, const dnn::RnnDescriptor& rnn_desc,
const dnn::RnnSequenceTensorDescriptor& input_desc,
const DeviceMemory<double>& input_data,
const DeviceMemory<int>& seq_lengths_data,
const dnn::RnnStateTensorDescriptor& input_h_desc,
const DeviceMemory<double>& input_h_data,
const dnn::RnnStateTensorDescriptor& input_c_desc,
const DeviceMemory<double>& input_c_data,
const DeviceMemory<double>& params,
const dnn::RnnSequenceTensorDescriptor& output_desc,
const DeviceMemory<double>& output_data,
const dnn::RnnStateTensorDescriptor& output_h_desc,
const DeviceMemory<double>& output_h_data,
const dnn::RnnStateTensorDescriptor& output_c_desc,
const DeviceMemory<double>& output_c_data,
const DeviceMemory<double>& output_backprop_data,
const DeviceMemory<double>& output_h_backprop_data,
const DeviceMemory<double>& output_c_backprop_data,
DeviceMemory<double>* input_backprop_data,
DeviceMemory<double>* input_h_backprop_data,
DeviceMemory<double>* input_c_backprop_data,
DeviceMemory<double>* params_backprop_data,
DeviceMemory<uint8>* reserve_space_data,
ScratchAllocator* workspace_allocator,
dnn::ProfileResult* output_profile_result) {
LOG(ERROR) << "miopen does not support half type RNN bwd yet";
return false;
}
// This is the context required to use the TF scratch allocator:
struct MIOpenAllocatorContext {
MIOpenAllocatorContext(ScratchAllocator* scratch_allocator, Stream* stream)
: scratch_allocator_(scratch_allocator), stream_(stream) {}
ScratchAllocator* scratch_allocator_;
Stream* stream_;
};
void* MIOpenAllocatorCallback(void* ctx, size_t size_in_bytes) {
auto* mac = static_cast<MIOpenAllocatorContext*>(ctx);
auto allocated = mac->scratch_allocator_->AllocateBytes(size_in_bytes);
DeviceMemory<uint8> scratch;
if (allocated.ok()) {
scratch = allocated.ValueOrDie();
return scratch.opaque();
} else {
return nullptr;
}
}
void MIOpenDeallocatorCallback(void* ctx, void* mem) {
// Don't need deallocator since the TensorFlow heap will automatically
// reclaim the memory
}
port::Status MIOpenSupport::DoPrepareForConvolution(
dnn::ConvolutionKind kind, dnn::DataType element_type, Stream* stream,
const dnn::BatchDescriptor& input_descriptor, DeviceMemoryBase input_data,
const dnn::FilterDescriptor& filter_descriptor,
DeviceMemoryBase filter_data, const dnn::BatchDescriptor& output_descriptor,
DeviceMemoryBase output_data,
const dnn::ConvolutionDescriptor& convolution_descriptor,
const dnn::AlgorithmConfig& algorithm_config,
ScratchAllocator* scratch_allocator, dnn::AlgorithmDesc* algorithm_desc,
DeviceMemory<uint8>* scratch_memory) {
absl::optional<dnn::AlgorithmDesc> input_algo_desc =
algorithm_config.algorithm();
assert(input_algo_desc.has_value());
// An algorithm has been specified.
*algorithm_desc = *input_algo_desc;
assert(algorithm_config.scratch_size().has_value());
size_t scratch_memory_size = *(algorithm_config.scratch_size());
// allocate scratch memory
if (scratch_memory_size != 0) {
if (scratch_allocator == nullptr) {
return port::InternalError(
absl::StrCat("An allocator must be specified when scratch memory is "
"needed"));
}
auto allocated = scratch_allocator->AllocateBytes(scratch_memory_size);
if (allocated.ok()) {
*scratch_memory = allocated.ValueOrDie();
} else {
LOG(ERROR)
<< "Failed to allocate scratch memory - "
<< allocated.status().error_message() << "\n"
<< "\tYou can set the env var TF_CUDNN_WORKSPACE_LIMIT_IN_MB to a "
"larger number (e.g. 8192) to increase the max memory limit.\n"
<< "\tIncreasing the max memory limit might help resolve this "
"error";
return port::InternalError(absl::StrCat(
"Failed to allocate scratch memory of size: ", scratch_memory_size));
}
}
return port::Status::OK();
}
class RocmConvRunner : public dnn::ConvRunner {
public:
RocmConvRunner(GpuExecutor* parent, MIOpenAccess* miopen, int64_t algo_id,
size_t workspace_size, dnn::ConvolutionKind kind,
dnn::DataType input_type, bool use_immediate_mode,
BatchDescriptor input_descriptor,
BatchDescriptor output_descriptor,
FilterDescriptor filter_descriptor,
ConvolutionDescriptor conv_descriptor)
: parent_(parent),
miopen_(miopen),
algo_id_(algo_id),
workspace_size_(workspace_size),
kind_(kind),
use_immediate_mode_(use_immediate_mode),
input_desc_{input_descriptor, ToMIOpenDataType(input_type)},
output_desc_{output_descriptor, ToMIOpenDataType(input_type)},
filter_desc_{filter_descriptor, ToMIOpenDataType(input_type)},
conv_desc_{conv_descriptor, ToMIOpenDataType(input_type)} {}
std::string ToString() const override {
return dnn::AlgorithmDesc{algo_id_, false, workspace_size_}.ToString();
}
size_t GetWorkspaceSize() const override { return workspace_size_; }
port::StatusOr<AlgorithmDesc> ToAlgorithmDesc() const override {
return {{algo_id_, false, workspace_size_}};
}
port::Status operator()(Stream* stream, DeviceMemoryBase input_data,
DeviceMemoryBase filter_data,
DeviceMemoryBase output_data,
DeviceMemoryBase scratch_memory,
dnn::ProfileResult* profile_result) const override {
auto miopen = miopen_->GetHandle(parent_, stream);
// Alpha is the scaling factor for input.
float alpha = 1.0;
// Beta is the scaling factor for output.
float beta = 0.0;
const bool is_profiling = profile_result != nullptr;
std::unique_ptr<GpuTimer> timer;
if (is_profiling) {
timer.reset(new GpuTimer(parent_));
if (!timer->Init()) {
return port::Status(port::error::INTERNAL, "Failed to init timer");
}
// The start and stop of the timer should be as close to the MIOpen call
// as possible. It is still possible for other threads to issue workload
// on to this stream. So it could take multiple profiling measurements.
if (!timer->Start(AsGpuStream(stream))) {
timer->Destroy();
return port::Status(port::error::INTERNAL, "Failed to start timer");
}
}
miopenStatus_t status = miopenStatusSuccess;
switch (kind_) {
case dnn::ConvolutionKind::FORWARD: {
if (use_immediate_mode_) {
status = wrap::miopenConvolutionForwardImmediate(
miopen.handle(), filter_desc_.handle(), filter_data.opaque(),
input_desc_.handle(), input_data.opaque(), conv_desc_.handle(),
output_desc_.handle(), output_data.opaque(),
scratch_memory.opaque(), scratch_memory.size(),
static_cast<uint64_t>(algo_id_));
} else {
status = wrap::miopenConvolutionForward(
miopen.handle(), &alpha, input_desc_.handle(),
input_data.opaque(), filter_desc_.handle(), filter_data.opaque(),
conv_desc_.handle(),
static_cast<miopenConvFwdAlgorithm_t>(algo_id_), &beta,
output_desc_.handle(), output_data.opaque(),
scratch_memory.opaque(), scratch_memory.size());
}
break;
}
case dnn::ConvolutionKind::BACKWARD_DATA: {
if (use_immediate_mode_) {
status = wrap::miopenConvolutionBackwardDataImmediate(
miopen.handle(), output_desc_.handle(), output_data.opaque(),
filter_desc_.handle(), filter_data.opaque(), conv_desc_.handle(),
input_desc_.handle(), input_data.opaque(),
scratch_memory.opaque(), scratch_memory.size(),
static_cast<uint64_t>(algo_id_));
} else {
status = wrap::miopenConvolutionBackwardData(
miopen.handle(), &alpha, output_desc_.handle(),
output_data.opaque(), filter_desc_.handle(), filter_data.opaque(),
conv_desc_.handle(),
static_cast<miopenConvBwdDataAlgorithm_t>(algo_id_), &beta,
input_desc_.handle(), input_data.opaque(),
scratch_memory.opaque(), scratch_memory.size());
}
break;
}
case dnn::ConvolutionKind::BACKWARD_FILTER: {
if (use_immediate_mode_) {
status = wrap::miopenConvolutionBackwardWeightsImmediate(
miopen.handle(), output_desc_.handle(), output_data.opaque(),
input_desc_.handle(), input_data.opaque(), conv_desc_.handle(),
filter_desc_.handle(), filter_data.opaque(),
scratch_memory.opaque(), scratch_memory.size(),
static_cast<uint64_t>(algo_id_));
} else {
status = wrap::miopenConvolutionBackwardWeights(
miopen.handle(), &alpha, output_desc_.handle(),
output_data.opaque(), input_desc_.handle(), input_data.opaque(),
conv_desc_.handle(),
static_cast<miopenConvBwdWeightsAlgorithm_t>(algo_id_), &beta,
filter_desc_.handle(), filter_data.opaque(),
scratch_memory.opaque(), scratch_memory.size());
}
break;
}
default:
return port::InternalError(absl::StrCat("Unexpected convolution kind ",
static_cast<int>(kind_)));
}
if (is_profiling) {
if (!timer->Stop(AsGpuStream(stream))) {
timer->Destroy();
return port::Status(port::error::INTERNAL, "Failed to stop timer");
}
if (status == miopenStatusSuccess) {
dnn::AlgorithmDesc algotype(algo_id_, false);
profile_result->set_algorithm(algotype);
profile_result->set_elapsed_time_in_ms(timer->GetElapsedMilliseconds());
profile_result->set_scratch_size(scratch_memory.size());
}
timer->Destroy();
}
if (status != miopenStatusSuccess) {
return port::InternalError(
absl::StrCat("Failed to enqueue convolution on stream: ",
::stream_executor::gpu::ToString(status)));
}
return port::Status::OK();
}
private:
GpuExecutor* parent_;
MIOpenAccess* miopen_;
int64_t algo_id_;
size_t workspace_size_;
dnn::ConvolutionKind kind_;
bool use_immediate_mode_;
ScopedTensorDescriptor input_desc_;
ScopedTensorDescriptor output_desc_;
ScopedFilterDescriptor filter_desc_;
ScopedConvolutionDescriptor conv_desc_;
};
port::Status MIOpenSupport::DoConvolve(
dnn::ConvolutionKind kind, dnn::DataType element_type,
dnn::DataType output_type, Stream* stream,
const dnn::BatchDescriptor& input_descriptor, DeviceMemoryBase input_data,
const dnn::FilterDescriptor& filter_descriptor,
DeviceMemoryBase filter_data, const dnn::BatchDescriptor& output_descriptor,
DeviceMemoryBase output_data,
const dnn::ConvolutionDescriptor& convolution_descriptor,
dnn::AlgorithmDesc algorithm_desc, DeviceMemory<uint8> scratch_memory,
dnn::ProfileResult* output_profile_result) {
SE_ASSIGN_OR_RETURN(
auto runner,
ConvolveRunnerFromDesc(stream, algorithm_desc, kind, element_type,
output_type, input_descriptor, filter_descriptor,
output_descriptor, convolution_descriptor));
return (*runner)(stream, input_data, filter_data, output_data, scratch_memory,
output_profile_result);
}
bool MIOpenSupport::GetConvolveAlgorithms(
// ROCM TODO: refactor cc_major / cc_minor
CudaComputeCapability cuda_compute_capability,
std::vector<dnn::AlgorithmDesc>* out_algorithms) {
out_algorithms->assign({
// clang-format off
dnn::AlgorithmDesc(miopenConvolutionFwdAlgoGEMM, false),
dnn::AlgorithmDesc(miopenConvolutionFwdAlgoDirect, false),
dnn::AlgorithmDesc(miopenConvolutionFwdAlgoFFT, false),
dnn::AlgorithmDesc(miopenConvolutionFwdAlgoWinograd, false),
// clang-format on
});
return true;
}
port::Status MIOpenSupport::GetConvolveRunners(
bool use_cudnn_frontend, dnn::ConvolutionKind kind,
dnn::DataType input_type, dnn::DataType output_type, Stream* stream,
const dnn::BatchDescriptor& input_descriptor, DeviceMemoryBase input_data,
const dnn::FilterDescriptor& filter_descriptor,
DeviceMemoryBase filter_data, const dnn::BatchDescriptor& output_descriptor,
DeviceMemoryBase output_data,
const dnn::ConvolutionDescriptor& convolution_descriptor, bool use_fallback,
ScratchAllocator* scratch_allocator,
std::vector<std::unique_ptr<const dnn::ConvRunner>>* out_runners) {
if (input_type != output_type) {
return port::UnimplementedError(
absl::StrFormat("MIOpen backend does not support different input and "
"output types: %d != %d",
input_type, output_type));
}
std::vector<dnn::ProfileResult> profile_results;
if (!GetMIOpenConvolveAlgorithms(
kind, input_type, stream, input_descriptor, input_data,
filter_descriptor, filter_data, output_descriptor, output_data,
convolution_descriptor, scratch_allocator, &profile_results)) {
return port::Status(
port::error::UNKNOWN,
"GetConvolveRunners: GetMIOpenConvolveAlgorithms failed");
}
for (const auto& profile_result : profile_results) {
SE_ASSIGN_OR_RETURN(
auto runner, ConvolveRunnerFromDesc(
stream, profile_result.algorithm(), kind, input_type,
output_type, input_descriptor, filter_descriptor,
output_descriptor, convolution_descriptor));
out_runners->push_back(std::move(runner));
}
return port::Status::OK();
}
port::StatusOr<std::unique_ptr<const dnn::ConvRunner>>
MIOpenSupport::ConvolveRunnerFromDesc(
Stream* stream, const dnn::AlgorithmDesc& algorithm_desc,
dnn::ConvolutionKind kind, dnn::DataType input_type,
dnn::DataType output_type, const dnn::BatchDescriptor& input_descriptor,
const dnn::FilterDescriptor& filter_descriptor,
const dnn::BatchDescriptor& output_descriptor,
const dnn::ConvolutionDescriptor& convolution_descriptor) {
if (input_type != output_type) {
return port::UnimplementedError(
absl::StrFormat("MIOpen backend does not support different input and "
"output types: %d != %d",
input_type, output_type));
}
auto workspace_size = algorithm_desc.workspace_size();
if (!workspace_size) {
return port::InvalidArgumentError(
"MIOpenSupport::ConvolveRunnerFromDesc requires "
"AlgorithmProto.workspace_size, but it was missing.");
}
return {std::make_unique<RocmConvRunner>(
parent_, miopen_.get(), algorithm_desc.algo_id(), *workspace_size, kind,
input_type, use_immediate_mode_, input_descriptor, output_descriptor,
filter_descriptor, convolution_descriptor)};
}
bool MIOpenSupport::GetMIOpenConvolveAlgorithms(
dnn::ConvolutionKind kind, dnn::DataType element_type, Stream* stream,
const dnn::BatchDescriptor& input_descriptor, DeviceMemoryBase input_data,
const dnn::FilterDescriptor& filter_descriptor,
DeviceMemoryBase filter_data, const dnn::BatchDescriptor& output_descriptor,
DeviceMemoryBase output_data,
const dnn::ConvolutionDescriptor& convolution_descriptor,
ScratchAllocator* scratch_allocator,
std::vector<dnn::ProfileResult>* out_algorithms) {
return use_immediate_mode_
? GetMIOpenConvolveAlgorithmsImmediateMode(
kind, element_type, stream, input_descriptor, input_data,
filter_descriptor, filter_data, output_descriptor,
output_data, convolution_descriptor, scratch_allocator,
out_algorithms)
: GetMIOpenConvolveAlgorithmsFindMode(
kind, element_type, stream, input_descriptor, input_data,
filter_descriptor, filter_data, output_descriptor,
output_data, convolution_descriptor, scratch_allocator,
out_algorithms);
}
bool MIOpenSupport::GetMIOpenConvolveAlgorithmsImmediateMode(
dnn::ConvolutionKind kind, dnn::DataType element_type, Stream* stream,
const dnn::BatchDescriptor& input_descriptor, DeviceMemoryBase input_data,
const dnn::FilterDescriptor& filter_descriptor,
DeviceMemoryBase filter_data, const dnn::BatchDescriptor& output_descriptor,
DeviceMemoryBase output_data,
const dnn::ConvolutionDescriptor& convolution_descriptor,
ScratchAllocator* scratch_allocator,
std::vector<dnn::ProfileResult>* out_algorithms) {
auto miopen = miopen_->GetHandle(parent_, stream);
ScopedTensorDescriptor input_nd{input_descriptor,
ToMIOpenDataType(element_type)};
ScopedTensorDescriptor output_nd{output_descriptor,
ToMIOpenDataType(element_type)};
ScopedFilterDescriptor filter{filter_descriptor,
ToMIOpenDataType(element_type)};
ScopedConvolutionDescriptor conv{convolution_descriptor,
ToMIOpenDataType(element_type)};
// First determine the number of algorityhms available
size_t maxSolutionCount = 0;
switch (kind) {
case dnn::ConvolutionKind::FORWARD: {
auto status = wrap::miopenConvolutionForwardGetSolutionCount(
miopen.handle(), filter.handle(), input_nd.handle(), conv.handle(),
output_nd.handle(), &maxSolutionCount);
if (status != miopenStatusSuccess) {
LOG(FATAL)
<< "call to miopenConvolutionForwardGetSolutionCount failed: "
<< ToString(status);
return false;
}
break;
}
case dnn::ConvolutionKind::BACKWARD_DATA: {
auto status = wrap::miopenConvolutionBackwardDataGetSolutionCount(
miopen.handle(), output_nd.handle(), filter.handle(), conv.handle(),
input_nd.handle(), &maxSolutionCount);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenConvolutionBackwardDataGetSolutionCount "
"failed: "
<< ToString(status);
return false;
}
break;
}
case dnn::ConvolutionKind::BACKWARD_FILTER: {
auto status = wrap::miopenConvolutionBackwardWeightsGetSolutionCount(
miopen.handle(), output_nd.handle(), input_nd.handle(), conv.handle(),
filter.handle(), &maxSolutionCount);
if (status != miopenStatusSuccess) {
LOG(FATAL)
<< "call to miopenConvolutionBackwardWeightsGetSolutionCount "
"failed: "
<< ToString(status);
return false;
}
break;
}
default: {
LOG(FATAL) << "Unexpected convolution kind " << static_cast<int>(kind);
return false;
break;
}
}
VLOG(kConvDebugVlogLevel)
<< "Number of conv solutions max: " << maxSolutionCount;
if (return_best_algo_only_) {
VLOG(kConvDebugVlogLevel) << "TF_ROCM_RETURN_BEST_ALGO_ONLY is set, "
<< "setting maxSolutionCount to 1";
maxSolutionCount = 1;
}
size_t solutionCount = 0;
std::unique_ptr<miopenConvSolution_t[]> solutions(
new miopenConvSolution_t[maxSolutionCount]);
switch (kind) {
case dnn::ConvolutionKind::FORWARD: {
auto status = wrap::miopenConvolutionForwardGetSolution(
miopen.handle(), filter.handle(), input_nd.handle(), conv.handle(),
output_nd.handle(), maxSolutionCount, &solutionCount,
solutions.get());
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenConvolutionForwardGetSolution failed: "
<< ToString(status);
return false;
}
VLOG(kConvDebugVlogLevel)
<< "Number of conv solutions actual: " << solutionCount;
for (size_t i = 0; i < solutionCount; i++) {
miopenConvSolution_t solution = solutions[i];
VLOG(kConvDebugVlogLevel)
<< "solution " << i << " (time, mem, id, algo) = " << solution.time
<< ", " << solution.workspace_size << ", " << solution.solution_id
<< ", " << ToString(solution.algorithm);
status = wrap::miopenConvolutionForwardCompileSolution(
miopen.handle(), filter.handle(), input_nd.handle(), conv.handle(),
output_nd.handle(), solution.solution_id);
if (status != miopenStatusSuccess) {
LOG(FATAL)
<< "call to miopenConvolutionForwardCompileSolution failed: "
<< ToString(status);
return false;
}
out_algorithms->emplace_back(
GetProfileResultFromConvSolution(solution));
}
break;
}
case dnn::ConvolutionKind::BACKWARD_DATA: {
auto status = wrap::miopenConvolutionBackwardDataGetSolution(
miopen.handle(), output_nd.handle(), filter.handle(), conv.handle(),
input_nd.handle(), maxSolutionCount, &solutionCount, solutions.get());
if (status != miopenStatusSuccess) {
LOG(FATAL)
<< "call to miopenConvolutionBackwardDataGetSolution failed: "
<< ToString(status);
return false;
}
VLOG(kConvDebugVlogLevel)
<< "Number of conv solutions actual: " << solutionCount;
for (size_t i = 0; i < solutionCount; i++) {
miopenConvSolution_t solution = solutions[i];
VLOG(kConvDebugVlogLevel)
<< "solution " << i << " (time, mem, id, algo) = " << solution.time
<< ", " << solution.workspace_size << ", " << solution.solution_id
<< ", " << ToString(solution.algorithm);
status = wrap::miopenConvolutionBackwardDataCompileSolution(
miopen.handle(), output_nd.handle(), filter.handle(), conv.handle(),
input_nd.handle(), solution.solution_id);
if (status != miopenStatusSuccess) {
LOG(FATAL) << " call to miopenConvolutionBackwardDataCompileSolution "
"failed: "
<< ToString(status);
return false;
}
out_algorithms->emplace_back(
GetProfileResultFromConvSolution(solution));
}
break;
}
case dnn::ConvolutionKind::BACKWARD_FILTER: {
auto status = wrap::miopenConvolutionBackwardWeightsGetSolution(
miopen.handle(), output_nd.handle(), input_nd.handle(), conv.handle(),
filter.handle(), maxSolutionCount, &solutionCount, solutions.get());
if (status != miopenStatusSuccess) {
LOG(FATAL)
<< "call to miopenConvolutionBackwardWeightsGetSolution failed: "
<< ToString(status);
return false;
}
VLOG(kConvDebugVlogLevel)
<< "Number of conv solutions actual: " << solutionCount;
for (size_t i = 0; i < solutionCount; i++) {
miopenConvSolution_t solution = solutions[i];
VLOG(kConvDebugVlogLevel)
<< "solution " << i << " (time, mem, id, algo) = " << solution.time
<< ", " << solution.workspace_size << ", " << solution.solution_id
<< ", " << ToString(solution.algorithm);
status = wrap::miopenConvolutionBackwardWeightsCompileSolution(
miopen.handle(), output_nd.handle(), input_nd.handle(),
conv.handle(), filter.handle(), solution.solution_id);
if (status != miopenStatusSuccess) {
LOG(FATAL)
<< "call to miopenConvolutionBackwardWeightsCompileSolution "
"failed: "
<< ToString(status);
return false;
}
out_algorithms->emplace_back(
GetProfileResultFromConvSolution(solution));
}
break;
}
default: {
LOG(FATAL) << "Unexpected convolution kind " << static_cast<int>(kind);
return false;
break;
}
}
return true;
}
bool MIOpenSupport::GetMIOpenConvolveAlgorithmsFindMode(
dnn::ConvolutionKind kind, dnn::DataType element_type, Stream* stream,
const dnn::BatchDescriptor& input_descriptor, DeviceMemoryBase input_data,
const dnn::FilterDescriptor& filter_descriptor,
DeviceMemoryBase filter_data, const dnn::BatchDescriptor& output_descriptor,
DeviceMemoryBase output_data,
const dnn::ConvolutionDescriptor& convolution_descriptor,
ScratchAllocator* scratch_allocator,
std::vector<dnn::ProfileResult>* out_algorithms) {
auto miopen = miopen_->GetHandle(parent_, stream);
ScopedTensorDescriptor input_nd{input_descriptor,
ToMIOpenDataType(element_type)};
ScopedTensorDescriptor output_nd{output_descriptor,
ToMIOpenDataType(element_type)};
ScopedFilterDescriptor filter{filter_descriptor,
ToMIOpenDataType(element_type)};
ScopedConvolutionDescriptor conv{convolution_descriptor,
ToMIOpenDataType(element_type)};
// Determine the workspace memory size that will need by the call to Find
size_t scratch_memory_size = 0;
switch (kind) {
case dnn::ConvolutionKind::FORWARD: {
auto status = wrap::miopenConvolutionForwardGetWorkSpaceSize(
miopen.handle(), filter.handle(), input_nd.handle(), conv.handle(),
output_nd.handle(), &scratch_memory_size);
if (status != miopenStatusSuccess) {
LOG(FATAL)
<< "call to miopenConvolutionForwardGetWorkspaceSize failed: "
<< ToString(status);
return false;
}
break;
}
case dnn::ConvolutionKind::BACKWARD_DATA: {
auto status = wrap::miopenConvolutionBackwardDataGetWorkSpaceSize(
miopen.handle(), output_nd.handle(), filter.handle(), conv.handle(),
input_nd.handle(), &scratch_memory_size);
if (status != miopenStatusSuccess) {
LOG(FATAL)
<< "call to miopenConvolutionBackwardDataGetWorkspaceSize failed: "
<< ToString(status);
return false;
}
break;
}
case dnn::ConvolutionKind::BACKWARD_FILTER: {
auto status = wrap::miopenConvolutionBackwardWeightsGetWorkSpaceSize(
miopen.handle(), output_nd.handle(), input_nd.handle(), conv.handle(),
filter.handle(), &scratch_memory_size);
if (status != miopenStatusSuccess) {
LOG(FATAL)
<< "call to miopenConvolutionBackwardWeightsGetWorkspaceSize "
"failed: "
<< ToString(status);
return false;
}
break;
}
default: {
LOG(FATAL) << "Unexpected convolution kind " << static_cast<int>(kind);
return false;
break;
}
}
// allocate scratch memory
DeviceMemory<uint8> scratch_memory;
if (scratch_memory_size != 0) {
if (scratch_allocator == nullptr) {
LOG(FATAL)
<< "An allocator must be specified when scratch memory is needed";
return false;
}
auto allocated = scratch_allocator->AllocateBytes(scratch_memory_size);
if (allocated.ok()) {
scratch_memory = allocated.ValueOrDie();
} else {
LOG(FATAL)
<< "Failed to allocate scratch memory - "
<< allocated.status().error_message() << "\n"
<< "\tYou can set the env var TF_CUDNN_WORKSPACE_LIMIT_IN_MB to a "
"larger number (e.g. 8192) to increase the max memory limit.\n"
<< "\tIncreasing the max memory limit might help resolve this "
"error";
return false;
}
}
// Only get the best algorithm for Find Mode
size_t requestedAlgorithmCount = 1;
VLOG(kConvDebugVlogLevel)
<< "Number of conv algortihms to request: " << requestedAlgorithmCount;
miopenConvAlgoPerf_t returnedAlgorithm;
int returnedAlgorithmCount = 0;
bool exhaustiveSearch = false;
switch (kind) {
case dnn::ConvolutionKind::FORWARD: {
auto status = wrap::miopenFindConvolutionForwardAlgorithm(
miopen.handle(), input_nd.handle(), input_data.opaque(),
filter.handle(), filter_data.opaque(), conv.handle(),
output_nd.handle(), output_data.opaque(), requestedAlgorithmCount,
&returnedAlgorithmCount, &returnedAlgorithm, scratch_memory.opaque(),
scratch_memory_size, exhaustiveSearch);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenFindConvolutionForwardAlgorithm failed: "
<< ToString(status);
return false;
}
break;
}
case dnn::ConvolutionKind::BACKWARD_DATA: {
auto status = wrap::miopenFindConvolutionBackwardDataAlgorithm(
miopen.handle(), output_nd.handle(), output_data.opaque(),
filter.handle(), filter_data.opaque(), conv.handle(),
input_nd.handle(), input_data.opaque(), requestedAlgorithmCount,
&returnedAlgorithmCount, &returnedAlgorithm, scratch_memory.opaque(),
scratch_memory_size, exhaustiveSearch);
if (status != miopenStatusSuccess) {
LOG(FATAL)
<< "call to miopenFindConvolutionBackwardDataAlgorithm failed: "
<< ToString(status);
return false;
}
break;
}
case dnn::ConvolutionKind::BACKWARD_FILTER: {
auto status = wrap::miopenFindConvolutionBackwardWeightsAlgorithm(
miopen.handle(), output_nd.handle(), output_data.opaque(),
input_nd.handle(), input_data.opaque(), conv.handle(),
filter.handle(), filter_data.opaque(), requestedAlgorithmCount,
&returnedAlgorithmCount, &returnedAlgorithm, scratch_memory.opaque(),
scratch_memory_size, exhaustiveSearch);
if (status != miopenStatusSuccess) {
LOG(FATAL) << "call to miopenConvolutionBackwardWeightsAlgorithm "
"failed: "
<< ToString(status);
return false;
}
break;
}
default: {
LOG(FATAL) << "Unexpected convolution kind " << static_cast<int>(kind);
return false;
break;
}
}
out_algorithms->emplace_back(
GetProfileResultFromConvAlgoPerf(kind, returnedAlgorithm));
return true;
}
bool MIOpenSupport::GetRnnAlgorithms(
std::vector<dnn::AlgorithmDesc>* out_algorithms) {
// ROCM TODO: implement this with proper MIOpen API
return true;
}
bool MIOpenSupport::GetConvolveBackwardDataAlgorithms(
// ROCM TODO: refactor cc_major / cc_minor
CudaComputeCapability cuda_compute_capability,
std::vector<dnn::AlgorithmDesc>* out_algorithms) {
out_algorithms->assign({
// clang-format off
dnn::AlgorithmDesc(miopenConvolutionBwdDataAlgoGEMM, false),
dnn::AlgorithmDesc(miopenConvolutionBwdDataAlgoDirect, false),
dnn::AlgorithmDesc(miopenConvolutionBwdDataAlgoFFT, false),
dnn::AlgorithmDesc(miopenConvolutionBwdDataAlgoWinograd, false),
// clang-format on
});
return true;
}
bool MIOpenSupport::GetConvolveBackwardFilterAlgorithms(
// ROCM TODO: refactor cc_major / cc_minor
CudaComputeCapability cuda_compute_capability,
std::vector<dnn::AlgorithmDesc>* out_algorithms) {
out_algorithms->assign({
// clang-format off
dnn::AlgorithmDesc(miopenConvolutionBwdWeightsAlgoGEMM, false),
dnn::AlgorithmDesc(miopenConvolutionBwdWeightsAlgoDirect, false),
// clang-format on
});
return true;
}
bool MIOpenSupport::DoBatchNormalizationForward(
Stream* stream, const DeviceMemory<Eigen::half>& x,
const DeviceMemory<float>& scale, const DeviceMemory<float>& offset,
const DeviceMemory<float>& estimated_mean,
const DeviceMemory<float>& estimated_variance,
const DeviceMemory<Eigen::half>& side_input,
const dnn::BatchDescriptor& x_desc,
const dnn::BatchDescriptor& scale_offset_desc, const double epsilon,
const double exponential_average_factor,
dnn::ActivationMode activation_mode, DeviceMemory<Eigen::half>* y,
DeviceMemory<float>* batch_mean, DeviceMemory<float>* batch_var,
DeviceMemory<float>* saved_mean, DeviceMemory<float>* saved_inv_var,
bool is_training, ScratchAllocator* reserve_space_allocator,
ScratchAllocator* workspace_allocator) {
return DoBatchNormalizationForwardImpl<Eigen::half, float>(
stream, dnn::DataType::kHalf, dnn::DataType::kFloat, x, scale, offset,
estimated_mean, estimated_variance, side_input, x_desc, scale_offset_desc,
epsilon, exponential_average_factor, activation_mode, y, batch_mean,
batch_var, saved_mean, saved_inv_var, is_training);
}
bool MIOpenSupport::DoBatchNormalizationForward(
Stream* stream, const DeviceMemory<float>& x,
const DeviceMemory<float>& scale, const DeviceMemory<float>& offset,
const DeviceMemory<float>& estimated_mean,
const DeviceMemory<float>& estimated_variance,
const DeviceMemory<float>& side_input, const dnn::BatchDescriptor& x_desc,
const dnn::BatchDescriptor& scale_offset_desc, const double epsilon,
const double exponential_average_factor,
dnn::ActivationMode activation_mode, DeviceMemory<float>* y,
DeviceMemory<float>* batch_mean, DeviceMemory<float>* batch_var,
DeviceMemory<float>* saved_mean, DeviceMemory<float>* saved_inv_var,
bool is_training, ScratchAllocator* reserve_space_allocator,
ScratchAllocator* workspace_allocator) {
return DoBatchNormalizationForwardImpl<float, float>(
stream, dnn::DataType::kFloat, dnn::DataType::kFloat, x, scale, offset,
estimated_mean, estimated_variance, side_input, x_desc, scale_offset_desc,
epsilon, exponential_average_factor, activation_mode, y, batch_mean,
batch_var, saved_mean, saved_inv_var, is_training);
}
template <class T, class U>
bool MIOpenSupport::DoBatchNormalizationForwardImpl(
Stream* stream, dnn::DataType input_data_type,
dnn::DataType scale_data_type, const DeviceMemory<T>& x,
const DeviceMemory<U>& scale, const DeviceMemory<U>& offset,
const DeviceMemory<U>& estimated_mean,
const DeviceMemory<U>& estimated_variance,
const DeviceMemory<T>& side_input, const dnn::BatchDescriptor& x_desc,
const dnn::BatchDescriptor& scale_offset_desc, const double epsilon,
const double exponential_average_factor,
dnn::ActivationMode activation_mode, DeviceMemory<T>* y,
DeviceMemory<U>* batch_mean, DeviceMemory<U>* batch_var,
DeviceMemory<U>* saved_mean, DeviceMemory<U>* saved_inv_var,
bool is_training) {
auto miopen = miopen_->GetHandle(parent_, stream);
ScopedTensorDescriptor x_descriptor{x_desc,
ToMIOpenDataType(input_data_type)};
ScopedTensorDescriptor scale_offset_descriptor{
scale_offset_desc, ToMIOpenDataType(scale_data_type)};
miopenBatchNormMode_t mode = miopenBNSpatial;
float one = 1.0;
float zero = 0.0;
auto status = miopenStatusInvalidValue;
if (is_training) {
status = wrap::miopenBatchNormalizationForwardTraining(
miopen.handle(), mode, &one, &zero, x_descriptor.handle(), x.opaque(),
x_descriptor.handle(), y->opaque(), scale_offset_descriptor.handle(),
const_cast<void*>(scale.opaque()), const_cast<void*>(offset.opaque()),
exponential_average_factor, batch_mean->opaque(), batch_var->opaque(),
epsilon, saved_mean->opaque(), saved_inv_var->opaque());
} else {
const void* maybe_inv_var = estimated_variance.opaque();
status = wrap::miopenBatchNormalizationForwardInference(
miopen.handle(), mode, &one, &zero, x_descriptor.handle(), x.opaque(),
x_descriptor.handle(), y->opaque(), scale_offset_descriptor.handle(),
const_cast<void*>(scale.opaque()), const_cast<void*>(offset.opaque()),
const_cast<void*>(estimated_mean.opaque()),
const_cast<void*>(maybe_inv_var), epsilon);
}
if (status != miopenStatusSuccess) {
LOG(ERROR) << "failed to enqueue forward batch normalization on stream: "
<< ToString(status);
return false;
}
return true;
}
bool MIOpenSupport::DoBatchNormalizationBackward(
Stream* stream, const DeviceMemory<Eigen::half>& y_backprop,
const DeviceMemory<Eigen::half>& x, const DeviceMemory<float>& scale,
const DeviceMemory<float>& offset, const DeviceMemory<float>& mean,
const DeviceMemory<float>& inv_var, const DeviceMemory<Eigen::half>& y,
const dnn::BatchDescriptor& x_desc,
const dnn::BatchDescriptor& scale_offset_desc, const double epsilon,
dnn::ActivationMode activation_mode, DeviceMemory<Eigen::half>* x_backprop,
DeviceMemory<float>* scale_backprop, DeviceMemory<float>* offset_backprop,
DeviceMemory<Eigen::half>* side_input_backprop,
DeviceMemory<uint8>* reserve_space_data,
ScratchAllocator* workspace_allocator) {
return DoBatchNormalizationBackwardImpl<Eigen::half, float>(
stream, miopenHalf, miopenFloat, y_backprop, x, scale, mean, inv_var,
x_desc, scale_offset_desc, epsilon, x_backprop, scale_backprop,
offset_backprop);
}
bool MIOpenSupport::DoBatchNormalizationBackward(
Stream* stream, const DeviceMemory<float>& y_backprop,
const DeviceMemory<float>& x, const DeviceMemory<float>& scale,
const DeviceMemory<float>& offset, const DeviceMemory<float>& mean,
const DeviceMemory<float>& variance, const DeviceMemory<float>& y,
const dnn::BatchDescriptor& x_desc,
const dnn::BatchDescriptor& scale_offset_desc, const double epsilon,
dnn::ActivationMode activation_mode, DeviceMemory<float>* x_backprop,
DeviceMemory<float>* scale_backprop, DeviceMemory<float>* offset_backprop,
DeviceMemory<float>* side_input_backprop,
DeviceMemory<uint8>* reserve_space_data,
ScratchAllocator* workspace_allocator) {
return DoBatchNormalizationBackwardImpl<float, float>(
stream, miopenFloat, miopenFloat, y_backprop, x, scale, mean, variance,
x_desc, scale_offset_desc, epsilon, x_backprop, scale_backprop,
offset_backprop);
}
template <class T, class U>
bool MIOpenSupport::DoBatchNormalizationBackwardImpl(
Stream* stream, int miopen_input_type, int miopen_scale_type,
const DeviceMemory<T>& y_backprop, const DeviceMemory<T>& x,
const DeviceMemory<U>& scale, const DeviceMemory<U>& mean,
const DeviceMemory<U>& variance, const dnn::BatchDescriptor& x_desc,
const dnn::BatchDescriptor& scale_offset_desc, const double epsilon,
DeviceMemory<T>* x_backprop, DeviceMemory<U>* scale_backprop,
DeviceMemory<U>* offset_backprop) {
auto miopen = miopen_->GetHandle(parent_, stream);
ScopedTensorDescriptor x_descriptor{
x_desc, static_cast<miopenDataType_t>(miopen_input_type)};
ScopedTensorDescriptor scale_offset_descriptor{
scale_offset_desc, static_cast<miopenDataType_t>(miopen_scale_type)};
miopenBatchNormMode_t mode = miopenBNSpatial;
float one = 1.0;
float zero = 0.0;
auto status = wrap::miopenBatchNormalizationBackward(
miopen.handle(), mode, &one, &zero, &one, &zero, x_descriptor.handle(),
x.opaque(), x_descriptor.handle(), y_backprop.opaque(),
x_descriptor.handle(), x_backprop->opaque(),
scale_offset_descriptor.handle(), scale.opaque(),
scale_backprop->opaque(), offset_backprop->opaque(), epsilon,
mean.opaque(), variance.opaque());
if (status != miopenStatusSuccess) {
LOG(ERROR) << "failed to enqueue backward batch normalization on stream: "
<< ToString(status);
return false;
}
return true;
}
port::Status MIOpenSupport::DoFusedConvolve(
Stream* stream, dnn::DataType input_type, dnn::DataType side_input_type,
dnn::DataType bias_type, dnn::DataType output_type,
const dnn::BatchDescriptor& conv_input_descriptor,
DeviceMemoryBase conv_input_data, double conv_input_scale,
const dnn::FilterDescriptor& filter_descriptor,
DeviceMemoryBase filter_data,
const dnn::ConvolutionDescriptor& convolution_descriptor,
DeviceMemoryBase side_input_data, double side_input_scale,
const dnn::BatchDescriptor& bias_descriptor, DeviceMemoryBase biases,
dnn::ActivationMode activation_mode,
const dnn::BatchDescriptor& output_descriptor, DeviceMemoryBase output_data,
ScratchAllocator* scratch_allocator,
const dnn::AlgorithmConfig& algorithm_config,
dnn::ProfileResult* output_profile_result) {
return port::UnimplementedError("fused convolve not implemented yet");
}
bool MIOpenSupport::DoTransformTensor(Stream* stream,
const dnn::BatchDescriptor& input_desc,
dnn::DataType input_type,
const DeviceMemoryBase& input_data,
const dnn::BatchDescriptor& output_desc,
dnn::DataType output_type, float scale,
DeviceMemoryBase* output_data) {
// ROCM TODO implement this operation
LOG(ERROR) << "transform tensor not implemented yet";
return false;
}
bool MIOpenSupport::DoMatMul(Stream* stream,
const DeviceMemory<float>& input_data,
const DeviceMemory<float>& weights,
const dnn::BatchDescriptor& input_dimensions,
const dnn::BatchDescriptor& output_dimensions,
DeviceMemory<float>* output_data) {
if (input_dimensions.count() != output_dimensions.count()) {
LOG(ERROR) << "MatMul input and output dimensions are not compatible.";
return false;
}
// We do not permute the input or output, instead we just
// reinterpret the layout. We are working with row-major matrices
// and the rows of the input and output correspond to batch, so
// batch has to be outermost in both the input and output.
//
// By adding transposes to the BLAS gemm call we could perhaps make
// the kYXDepthBatch layout work as well, but there has been no need
// for that so far.
if (input_dimensions.layout() != dnn::DataLayout::kBatchYXDepth &&
input_dimensions.layout() != dnn::DataLayout::kBatchDepthYX) {
LOG(ERROR) << "Unsupported MatMul input layout.";
return false;
}
if (output_dimensions.layout() != dnn::DataLayout::kBatchYXDepth &&
output_dimensions.layout() != dnn::DataLayout::kBatchDepthYX) {
LOG(ERROR) << "Unsupported MatMul output layout.";
return false;
}
if (output_dimensions.width() == 1 && output_dimensions.height() == 1) {
// This is a fast path that also supports the kBatchYXDepth layout.
// The matrices here are in row-major format while BLAS expects
// column-major, i.e. our matrices are transposed as far as BLAS
// is concerned. So we need to compute output^T =
// input^T*weights^T. There is no parameter for transposing the
// output in BLAS gemm, but instead we can transpose both sides of
// the equality to see that this is equivalent to
// output=weights*input. So we only need to swap the order of
// weights and input in the matrix product to correct for the
// row-major versus column-major difference.
const int64_t m = output_dimensions.NodesAcrossFeatureMaps();
const int64_t n = input_dimensions.count();
const int64_t k = input_dimensions.NodesAcrossFeatureMaps();
if (!stream
->ThenBlasGemm(blas::Transpose::kNoTranspose,
blas::Transpose::kNoTranspose, m, n, k, weights, m,
input_data, k, output_data, m)
.ok()) {
return false;
}
} else {
// This is a slower and more complex path that supports output
// width() * height() > 1, though it only supports the
// kBatchYXDepth layout. Does support kBatchDepthYX if output
// feature_map_count() == 1, as then there is no difference
// between the two layouts.
//
// The operation here is the same as above, except that we have to
// do the matrix multiplication for each (y,x) output coordinate
// separately. We then interpret weights as containing K = width()
// * height() different matrices, which we all multiply onto the
// matrix from input_data, yielding K matrix products. We then
// combine these together into one matrix by concatenating all the
// first rows of these matrices, then all the seconds rows and so
// on. We can do this with a batched matrix multiplication, where
// the result is written to a different submatrix of the output
// for each matrix multiplication.
//
// The reason that we only support the kBatchYXDepth output layout
// is that we have to do something in the depth for each (y,x)
// coordinate. The kBatchYXDepth layout has the depth information
// for each point (y,x) in contiguous memory while the
// kBatchDepthYX layout does not.
//
// TODO(broune): Consider a special case for when output depth ==
// 1, as then possibly this could all be done as one matrix
// multiplication instead of a batched one, which should be
// faster. Another possibility would be to add a weights layout
// parameter and then support kBatchDepthYX for a different
// weights layout.
if (output_dimensions.layout() != dnn::DataLayout::kBatchYXDepth &&
!(output_dimensions.layout() == dnn::DataLayout::kBatchDepthYX &&
output_dimensions.feature_map_count() == 1)) {
LOG(ERROR) << "Unsupported MatMul output layout.";
return false;
}
const float alpha = 1.0f; // Take the matrix product without scaling it.
const float beta = 0.0f; // Ignore the original values in output_data.
const uint64_t m = output_dimensions.feature_map_count();
const uint64_t n = input_dimensions.count();
const uint64_t k = input_dimensions.NodesAcrossFeatureMaps();
const int lda = m;
const int ldb = k;
const int ldc = output_dimensions.NodesAcrossFeatureMaps();
const int batch_count = output_dimensions.NodesPerFeatureMap();
std::vector<DeviceMemory<float>> a(batch_count);
std::vector<DeviceMemory<float>> b(batch_count);
std::vector<DeviceMemory<float>> c(batch_count);
for (int i = 0; i < batch_count; ++i) {
const int weights_offset = i * input_dimensions.NodesAcrossFeatureMaps() *
output_dimensions.feature_map_count();
a[i] = DeviceMemory<float>::MakeFromByteSize(
const_cast<float*>(reinterpret_cast<const float*>(weights.opaque())) +
weights_offset,
weights.ElementCount() - weights_offset);
b[i] = input_data;
const int output_offset = i * output_dimensions.feature_map_count();
c[i] = DeviceMemory<float>::MakeFromByteSize(
const_cast<float*>(
reinterpret_cast<const float*>(output_data->opaque())) +
output_offset,
output_data->ElementCount() - output_offset);
}
const auto toPtrs = [](std::vector<DeviceMemory<float>>& v) {
std::vector<DeviceMemory<float>*> ptrs;
ptrs.reserve(v.size());
for (auto& mem : v) {
ptrs.push_back(&mem);
}
return ptrs;
};
stream->ThenBlasGemmBatched(blas::Transpose::kNoTranspose,
blas::Transpose::kNoTranspose, m, n, k, alpha,
toPtrs(a), lda, toPtrs(b), ldb, beta, toPtrs(c),
ldc, batch_count);
}
return stream->ok();
}
bool MIOpenSupport::DoBiasAdd(Stream* stream,
const DeviceMemory<float>& input_data,
const DeviceMemory<float>& biases,
const dnn::BatchDescriptor& dimensions,
DeviceMemory<float>* output_data) {
ScopedTensorDescriptor input_descriptor{dimensions, miopenFloat};
BatchDescriptor bias_dimensions;
bias_dimensions.set_count(1)
.set_feature_map_count(dimensions.feature_map_count())
.set_height(1)
.set_width(1)
.set_layout(dnn::DataLayout::kBatchYXDepth);
ScopedTensorDescriptor bias_descriptor{bias_dimensions, miopenFloat};
if (input_data.opaque() != output_data->opaque()) {
stream->ThenMemcpy(output_data, input_data,
dimensions.ElementCount() * sizeof(float));
if (!stream->ok()) {
LOG(ERROR)
<< "stream " << stream
<< " could not enqueue a tensor copy as part of bias addition.";
return false;
}
}
auto miopen = miopen_->GetHandle(parent_, stream);
const float alpha1 = 1.0f;
const float alpha2 = 0.0f;
const float beta = 1.0f;
auto status = wrap::miopenOpTensor(
miopen.handle(), miopenTensorOpAdd, &alpha1, bias_descriptor.handle(),
biases.opaque(), &alpha2, bias_descriptor.handle(), biases.opaque(),
&beta, input_descriptor.handle(), output_data->opaque());
if (status != miopenStatusSuccess) {
LOG(ERROR) << "stream " << stream << " could not enqueue bias addition.";
return false;
}
return true;
}
bool MIOpenSupport::DoActivate(Stream* stream,
dnn::ActivationMode activation_mode,
const dnn::BatchDescriptor& dimensions,
const DeviceMemory<float>& input_data,
DeviceMemory<float>* output_data,
uint64_t options) {
LOG(ERROR) << "miopen does not support activation yet";
return false;
}
bool MIOpenSupport::DoPoolForward(
Stream* stream, const dnn::PoolingDescriptor& pooling_dimensions,
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<double>& input_data,
const dnn::BatchDescriptor& output_dimensions,
DeviceMemory<double>* output_data, ScratchAllocator* workspace_allocator) {
LOG(ERROR) << "miopen does not support pooling for double type yet";
return false;
}
bool PoolingWorkspaceDescriptor::IsSame(
const dnn::BatchDescriptor& input_dimensions,
const dnn::BatchDescriptor& output_dimensions,
const dnn::PoolingDescriptor& pooling_dimensions, int _type) {
return dtype == _type &&
input_dims ==
input_dimensions.full_dims(dnn::DataLayout::kBatchDepthYX) &&
output_dims ==
output_dimensions.full_dims(dnn::DataLayout::kBatchDepthYX) &&
op.mode() == pooling_dimensions.mode() &&
op.window() == pooling_dimensions.window() &&
op.padding() == pooling_dimensions.padding() &&
op.strides() == pooling_dimensions.strides();
}
bool PoolingWorkspaceCache::find(
const void* p, const dnn::BatchDescriptor& input_dimensions,
const dnn::BatchDescriptor& output_dimensions,
const dnn::PoolingDescriptor& pooling_dimensions, int _type,
PoolingWorkspaceDescriptor*& pdesc) {
pdesc = 0;
auto it = cache.find(p);
if (it == cache.end()) {
return false;
}
if (!it->second.IsSame(input_dimensions, output_dimensions,
pooling_dimensions, _type)) {
return false;
}
pdesc = &it->second;
return true;
}
void PoolingWorkspaceCache::insert(
const void* p, const dnn::BatchDescriptor& input_dimensions,
const dnn::BatchDescriptor& output_dimensions,
const dnn::PoolingDescriptor& pooling_dimensions, int _type,
std::unique_ptr<TemporaryDeviceMemory<uint8>>& workspace, size_t wsp_size,
hipStream_t hip_stream) {
PoolingWorkspaceDescriptor* desc = 0;
auto it = cache.find(p);
if (it != cache.end()) {
// replacing an entry with the same pointer but different attributes
// (if everything matches, the caller is expected to reuse the entry)
desc = &it->second;
hipStreamSynchronize(hip_stream);
memory_used -= desc->workspace_size;
} else {
cache[p] = PoolingWorkspaceDescriptor();
desc = &cache[p];
}
desc->input_dims = input_dimensions.full_dims(dnn::DataLayout::kBatchDepthYX);
desc->output_dims =
output_dimensions.full_dims(dnn::DataLayout::kBatchDepthYX);
desc->op = pooling_dimensions;
desc->dtype = _type;
desc->timestamp = timestamp;
timestamp++;
desc->workspace = std::move(workspace);
desc->workspace_size = wsp_size;
memory_used += wsp_size;
trim(hip_stream);
}
void PoolingWorkspaceCache::trim(hipStream_t hip_stream) {
if (memory_used < memory_budget && cache.size() < trim_size) return;
bool must_sync = true;
while (true) {
int new_size = cache.size() - (cache.size() >> 2);
std::vector<const void*> old_entries;
for (auto& x : cache)
if (x.second.timestamp + new_size < timestamp)
old_entries.push_back(x.first);
if (old_entries.empty()) break;
if (must_sync) hipStreamSynchronize(hip_stream);
must_sync = true;
for (auto x : old_entries) {
memory_used -= cache[x].workspace_size;
cache.erase(x);
}
if (memory_used < memory_budget || cache.size() < 10) break;
}
}
bool MIOpenSupport::DoPoolForward(
Stream* stream, const dnn::PoolingDescriptor& pooling_dimensions,
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<float>& input_data,
const dnn::BatchDescriptor& output_dimensions,
DeviceMemory<float>* output_data, ScratchAllocator* workspace_allocator) {
auto miopen = miopen_->GetHandle(parent_, stream);
// Alpha is the scaling factor for input.
float alpha = 1.0;
// Beta is the scaling factor for output.
float beta = 0.0;
ScopedTensorDescriptor src_desc{input_dimensions, miopenFloat};
ScopedTensorDescriptor dest_desc{output_dimensions, miopenFloat};
ScopedPoolingDescriptor pooling_desc{pooling_dimensions};
bool do_backward = false;
uint8* workspace = 0;
size_t workspace_size = 0;
std::unique_ptr<TemporaryDeviceMemory<uint8>> wsp_mem;
if (m_pooling_cache_enabled) {
do_backward = true;
auto status = wrap::miopenPoolingGetWorkSpaceSizeV2(
pooling_desc.handle(), dest_desc.handle(), &workspace_size);
if (status != miopenStatusSuccess) {
LOG(ERROR)
<< "failed to obtain workspace size for backward pooling on stream: "
<< ToString(status);
return false;
}
if (workspace_size != 0) {
PoolingWorkspaceDescriptor* pdesc = 0;
bool cache_hit =
m_pooling_cache_allowed &&
m_pooling_cache.find(input_data.opaque(), input_dimensions,
output_dimensions, pooling_dimensions,
miopenFloat, pdesc);
if (cache_hit) {
// reusing the same buffer
workspace = reinterpret_cast<uint8*>(
pdesc->workspace->mutable_device_memory()->opaque());
} else {
wsp_mem = stream->AllocateTemporaryArray<uint8>(workspace_size)
.ConsumeValueOrDie();
workspace = reinterpret_cast<uint8*>(
wsp_mem->mutable_device_memory()->opaque());
m_pooling_cache.insert(input_data.opaque(), input_dimensions,
output_dimensions, pooling_dimensions,
miopenFloat, wsp_mem, workspace_size,
AsGpuStreamValue(stream));
}
}
}
auto status = wrap::miopenPoolingForward(
miopen.handle(), pooling_desc.handle(), &alpha, src_desc.handle(),
input_data.opaque(), &beta, dest_desc.handle(), output_data->opaque(),
do_backward, workspace, workspace_size);
if (status != miopenStatusSuccess) {
LOG(ERROR) << "failed to enqueue forward pooling on stream: "
<< ToString(status);
return false;
}
return true;
}
bool MIOpenSupport::DoPoolForward(
Stream* stream, const dnn::PoolingDescriptor& pooling_dimensions,
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<Eigen::half>& input_data,
const dnn::BatchDescriptor& output_dimensions,
DeviceMemory<Eigen::half>* output_data,
ScratchAllocator* workspace_allocator) {
auto miopen = miopen_->GetHandle(parent_, stream);
// Alpha is the scaling factor for input.
float alpha = 1.0;
// Beta is the scaling factor for output.
float beta = 0.0;
ScopedTensorDescriptor src_desc{input_dimensions, miopenHalf};
ScopedTensorDescriptor dest_desc{output_dimensions, miopenHalf};
ScopedPoolingDescriptor pooling_desc{pooling_dimensions};
auto status = wrap::miopenPoolingForward(
miopen.handle(), pooling_desc.handle(), &alpha, src_desc.handle(),
input_data.opaque(), &beta, dest_desc.handle(), output_data->opaque(),
false, nullptr, 0);
if (status != miopenStatusSuccess) {
LOG(ERROR) << "failed to enqueue forward pooling on stream: "
<< ToString(status);
return false;
}
return true;
}
template <class T>
bool MIOpenSupport::DoPoolBackwardImpl(
Stream* stream, const dnn::PoolingDescriptor& pooling_dimensions,
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<T>& input_data,
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<T>& output_data, const DeviceMemory<T>& input_diff_data,
DeviceMemory<T>* output_diff_data, ScratchAllocator* workspace_allocator) {
auto miopen = miopen_->GetHandle(parent_, stream);
if (m_pooling_cache_allowed) m_pooling_cache_enabled = true;
// Alpha is the scaling factor for input.
float alpha = 1.0;
// Beta is the scaling factor for output.
float beta = 0.0;
auto type =
std::is_same<T, float>::value
? miopenFloat
: (std::is_same<T, Eigen::half>::value ? miopenHalf
: (miopenDataType_t)-1);
ScopedTensorDescriptor src_desc{input_dimensions, type};
ScopedTensorDescriptor dest_desc{output_dimensions, type};
ScopedPoolingDescriptor pooling_desc{pooling_dimensions};
uint8* workspace_ptr = 0;
DeviceMemory<uint8> workspace;
PoolingWorkspaceDescriptor* pdesc = 0;
size_t workspace_size_in_bytes = 0;
auto status = wrap::miopenPoolingGetWorkSpaceSizeV2(
pooling_desc.handle(), dest_desc.handle(), &workspace_size_in_bytes);
if (status != miopenStatusSuccess) {
LOG(ERROR)
<< "failed to obtain workspace size for backward pooling on stream: "
<< ToString(status);
return false;
}
// Allocate the workspace.
if (workspace_size_in_bytes > 0) {
bool cache_hit = m_pooling_cache_allowed &&
m_pooling_cache.find(input_data.opaque(), input_dimensions,
output_dimensions, pooling_dimensions,
type, pdesc);
if (cache_hit) {
assert(pdesc != 0);
workspace_ptr = reinterpret_cast<uint8*>(
pdesc->workspace->mutable_device_memory()->opaque());
VLOG(1) << "Pooling cache hit";
} else {
VLOG(1) << "Pooling cache miss";
assert(workspace_allocator);
auto allocated =
workspace_allocator->AllocateBytes(workspace_size_in_bytes);
if (!allocated.ok() || (workspace = allocated.ValueOrDie()) == nullptr) {
LOG(ERROR) << "Failed to allocate backward pooling workspace";
return false;
}
DeviceMemory<uint8> dest2; // duplicated dest from forward:
int64_t dest2_size = 0;
// miopen requires the strides and dims to be ordered as BDYX.
std::vector<int64_t> dims64 =
output_dimensions.full_dims(dnn::DataLayout::kBatchDepthYX);
// miopen does not use strides and must have 4D tensor.
// std::vector<int> dims(pooling_dimensions.ndims() + 2);
dest2_size = sizeof(T);
for (auto& x : dims64) dest2_size *= x;
if (dest2_size > 0) {
assert(workspace_allocator);
auto allocated = workspace_allocator->AllocateBytes(dest2_size);
if (!allocated.ok() || (dest2 = allocated.ValueOrDie()) == nullptr) {
LOG(ERROR) << "Failed to allocate backward pooling workspace";
return false;
}
} else {
LOG(ERROR) << "Failed to calculate tensor size to chain forward and "
"backward pooling";
}
status = wrap::miopenPoolingForward(
miopen.handle(), pooling_desc.handle(), &alpha, src_desc.handle(),
input_data.opaque(), &beta, dest_desc.handle(), dest2.opaque(), true,
workspace.opaque(), workspace_size_in_bytes);
if (status != miopenStatusSuccess) {
LOG(ERROR)
<< "failed to enqueue forward pooling (before backward) on stream: "
<< ToString(status);
return false;
}
workspace_ptr = reinterpret_cast<uint8*>(workspace.opaque());
}
}
status = wrap::miopenPoolingBackward(
miopen.handle(), pooling_desc.handle(), &alpha, dest_desc.handle(),
output_data.opaque(), dest_desc.handle(), input_diff_data.opaque(),
src_desc.handle(), input_data.opaque(), &beta, src_desc.handle(),
output_diff_data->opaque(), workspace_ptr);
if (status != miopenStatusSuccess) {
LOG(ERROR) << "failed to enqueue backward pooling on stream: "
<< ToString(status);
return false;
}
return true;
}
bool MIOpenSupport::DoPoolBackward(
Stream* stream, const dnn::PoolingDescriptor& pooling_dimensions,
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<double>& input_data,
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<double>& output_data,
const DeviceMemory<double>& input_diff_data,
DeviceMemory<double>* output_diff_data,
ScratchAllocator* workspace_allocator) {
LOG(ERROR) << "miopen does not support backward pooling on double type yet";
return false;
}
bool MIOpenSupport::DoPoolBackward(
Stream* stream, const dnn::PoolingDescriptor& pooling_dimensions,
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<float>& input_data,
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<float>& output_data,
const DeviceMemory<float>& input_diff_data,
DeviceMemory<float>* output_diff_data,
ScratchAllocator* workspace_allocator) {
return DoPoolBackwardImpl(stream, pooling_dimensions, input_dimensions,
input_data, output_dimensions, output_data,
input_diff_data, output_diff_data,
workspace_allocator);
}
bool MIOpenSupport::DoPoolBackward(
Stream* stream, const dnn::PoolingDescriptor& pooling_dimensions,
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<Eigen::half>& input_data,
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<Eigen::half>& output_data,
const DeviceMemory<Eigen::half>& input_diff_data,
DeviceMemory<Eigen::half>* output_diff_data,
ScratchAllocator* workspace_allocator) {
return DoPoolBackwardImpl(stream, pooling_dimensions, input_dimensions,
input_data, output_dimensions, output_data,
input_diff_data, output_diff_data,
workspace_allocator);
}
bool MIOpenSupport::DoNormalizeWithDimensions(
Stream* stream, const dnn::NormalizeDescriptor& normalize_descriptor,
const dnn::BatchDescriptor& dimensions,
const DeviceMemory<float>& input_data, DeviceMemory<float>* output_data) {
// Check for unsupported modes.
if (normalize_descriptor.wrap_around()) {
LOG(ERROR) << "MIOpen LRN does not support wrap-around mode";
return false;
}
if (normalize_descriptor.segment_size()) {
LOG(ERROR) << "MIOpen LRN does not support segmentation";
return false;
}
auto miopen = miopen_->GetHandle(parent_, stream);
// Launch the normalization.
ScopedTensorDescriptor dims{dimensions, miopenFloat};
ScopedNormalizeDescriptor normalize{normalize_descriptor};
// Alpha is the scaling factor for input.
float alpha = 1.0f;
// Beta is the scaling factor for output.
float beta = 0.0f;
auto status = wrap::miopenLRNForward(
miopen.handle(), normalize.handle(), &alpha, dims.handle(),
input_data.opaque(), &beta, dims.handle(), output_data->opaque(), false,
nullptr);
if (status != miopenStatusSuccess) {
LOG(ERROR) << "failed to run miopenLRNForward";
return false;
}
return true;
}
bool MIOpenSupport::DoNormalizeBackwardWithDimensions(
Stream* stream, const dnn::NormalizeDescriptor& normalize_descriptor,
const dnn::BatchDescriptor& dimensions, const DeviceMemory<float>& raw_data,
const DeviceMemory<float>& normalized_data,
const DeviceMemory<float>& normalized_variable_gradient,
DeviceMemory<float>* raw_variable_gradient,
ScratchAllocator* workspace_allocator) {
// Check for unsupported modes.
if (normalize_descriptor.wrap_around()) {
LOG(ERROR) << "MIOpen LRN does not support wrap-around mode";
return false;
}
if (normalize_descriptor.segment_size()) {
LOG(ERROR) << "MIOpen LRN does not support segmentation";
return false;
}
auto miopen = miopen_->GetHandle(parent_, stream);
ScopedTensorDescriptor dims{dimensions, miopenFloat};
ScopedNormalizeDescriptor normalize{normalize_descriptor};
float alpha = 1.0f;
float beta = 0.0f;
DeviceMemory<uint8> workspace;
size_t workspace_size_in_bytes = 0;
auto status =
wrap::miopenLRNGetWorkSpaceSize(dims.handle(), &workspace_size_in_bytes);
if (status != miopenStatusSuccess) {
LOG(ERROR) << "failed to obtain workspace size for miopenLRNBackward";
return false;
}
// Allocate the workspace.
if (workspace_size_in_bytes > 0) {
assert(workspace_allocator);
auto allocated =
workspace_allocator->AllocateBytes(workspace_size_in_bytes);
if (!allocated.ok() || (workspace = allocated.ValueOrDie()) == nullptr) {
LOG(ERROR) << "Failed to allocate backward pooling workspace";
return false;
}
}
DeviceMemory<uint8> dest2; // duplicated dest from forward:
int dest2_size = 0;
// miopen requires the strides and dims to be ordered as BDYX.
std::vector<int64_t> dims64 =
dimensions.full_dims(dnn::DataLayout::kBatchDepthYX);
// miopen does not use strides and must have 4D tensor.
std::vector<int> dimsint(4);
std::transform(dims64.cbegin(), dims64.cend(), dimsint.begin(),
&CheckedNarrowing<int64_t, int>);
dest2_size =
dimsint[0] * dimsint[1] * dimsint[2] * dimsint[3] * sizeof(float);
if (dest2_size > 0) {
assert(workspace_allocator);
auto allocated = workspace_allocator->AllocateBytes(dest2_size);
if (!allocated.ok() || (dest2 = allocated.ValueOrDie()) == nullptr) {
LOG(ERROR)
<< "Failed to allocate tensor to chain forward and backward LRN";
return false;
}
} else {
LOG(ERROR) << "Failed to calculate tensor size to chain forward and "
"backward LRN";
}
status = wrap::miopenLRNForward(miopen.handle(), normalize.handle(), &alpha,
dims.handle(), raw_data.opaque(), &beta,
dims.handle(), dest2.opaque(), true,
workspace.opaque());
if (status != miopenStatusSuccess) {
LOG(ERROR) << "failed to run miopenLRNForward";
return false;
}
status = wrap::miopenLRNBackward(
miopen.handle(), normalize.handle(), &alpha, dims.handle(),
normalized_data.opaque(), dims.handle(),
normalized_variable_gradient.opaque(), dims.handle(), raw_data.opaque(),
&beta, dims.handle(), raw_variable_gradient->opaque(),
workspace.opaque());
if (status != miopenStatusSuccess) {
LOG(ERROR) << "failed to run miopenLRNBackward";
return false;
}
return true;
}
bool MIOpenSupport::DoDepthConcatenate(
Stream* stream, port::ArraySlice<dnn::BatchDescriptor> input_dimensions,
port::ArraySlice<const DeviceMemory<float>*> input_data,
DeviceMemory<float>* output_data) {
CHECK_EQ(input_dimensions.size(), input_data.size());
for (const auto& dimensions : input_dimensions) {
if (dimensions.layout() != dnn::DataLayout::kBatchDepthYX) {
LOG(ERROR) << "MIOpenSupport::DoDepthConcatenate currently only "
"supports the kBatchDepthYX layout.";
return false;
}
}
if (input_dimensions.empty()) {
return true; // Nothing to do.
}
dnn::BatchDescriptor output_dimensions =
dnn::BatchDescriptor::DepthConcatenateOutputDescriptor(input_dimensions);
const int64_t area = output_dimensions.width() * output_dimensions.height();
const auto index = [area](int64_t batch, int64_t depth, int64_t yx,
int64_t max_depth) {
return (batch * max_depth + depth) * area + yx;
};
std::vector<float> output_host(output_dimensions.ElementCount());
std::vector<float> tmp;
int64_t depth_sum = 0;
for (size_t i = 0; i < input_data.size(); ++i) {
const auto& dimensions = input_dimensions[i];
tmp.resize(dimensions.ElementCount());
stream->ThenMemcpyD2H<float>(*input_data[i], absl::MakeSpan(tmp));
port::Status block_status = stream->BlockHostUntilDone();
if (!block_status.ok()) {
LOG(ERROR) << "BlockHostUntilDone failed: " << block_status;
return false;
}
for (int64_t batch = 0; batch < output_dimensions.count(); ++batch) {
for (int64_t yx = 0; yx < area; ++yx) {
for (int64_t depth = 0; depth < dimensions.feature_map_count();
++depth) {
LOG(INFO) << output_dimensions.ElementCount() << ' ' << batch << ' '
<< yx << ' ' << depth;
output_host[index(batch, depth + depth_sum, yx,
output_dimensions.feature_map_count())] =
tmp[index(batch, depth, yx, dimensions.feature_map_count())];
}
}
}
depth_sum += dimensions.feature_map_count();
}
stream->ThenMemcpyH2D<float>(output_host, output_data);
return true;
}
bool MIOpenSupport::DoElementwiseOperate(
Stream* stream, dnn::ElementwiseOperation operation,
port::ArraySlice<dnn::BatchDescriptor> input_dimensions,
port::ArraySlice<const DeviceMemory<float>*> input_data,
const dnn::BatchDescriptor& output_dimensions,
DeviceMemory<float>* output_data) {
LOG(FATAL) << "not yet implemented"; // TODO(leary)
return false;
}
bool MIOpenSupport::DoXYPad(Stream* stream,
const dnn::BatchDescriptor& dimensions,
const DeviceMemory<float>& input_data,
int64_t left_pad, int64_t right_pad,
int64_t top_pad, int64_t bottom_pad,
DeviceMemory<float>* output_data) {
LOG(FATAL) << "not yet implemented"; // TODO(leary)
return false;
}
bool MIOpenSupport::DoXYSlice(Stream* stream,
const dnn::BatchDescriptor& dimensions,
const DeviceMemory<float>& input_data,
int64_t left_trim, int64_t right_trim,
int64_t top_trim, int64_t bottom_trim,
DeviceMemory<float>* output_data) {
LOG(FATAL) << "not yet implemented"; // TODO(leary)
return false;
}
bool MIOpenSupport::DoMemcpyD2HQuantized(
Stream* stream, const DeviceMemory<float>& gpu_unquantized_src,
dnn::QuantizedActivationMode mode, void* host_dst, int64_t size) {
LOG(ERROR) << "quantized memcpy not supported by MIOpen";
return false;
}
bool MIOpenSupport::DoMemcpyH2DQuantized(
Stream* stream, const void* host_src, int64_t size,
dnn::QuantizedActivationMode mode,
DeviceMemory<float>* gpu_unquantized_dst) {
LOG(ERROR) << "quantized memcpy not supported by MIOpen";
return false;
}
bool MIOpenSupport::DeriveOutputBatchDescriptor(
const BatchDescriptor& batch_descriptor,
const FilterDescriptor& filter_descriptor,
const dnn::ConvolutionDescriptor& convolution_descriptor,
dnn::BatchDescriptor* output_batch_descriptor) {
ScopedTensorDescriptor input_nd{batch_descriptor, miopenFloat};
ScopedFilterDescriptor filter{filter_descriptor, miopenFloat};
ScopedConvolutionDescriptor conv{convolution_descriptor, miopenFloat};
int dn = batch_descriptor.ndims() + 2;
std::vector<int> dims(dn); // in BDYX
auto status = wrap::miopenGetConvolutionNdForwardOutputDim(
conv.handle(), input_nd.handle(), filter.handle(), &dn, dims.data());
if (status != miopenStatusSuccess) {
LOG(ERROR) << "could not get output tensor for convolution: "
<< ToString(status);
return false;
}
output_batch_descriptor->set_count(dims[0])
.set_feature_map_count(dims[1])
.set_layout(batch_descriptor.layout());
for (int i = 0; i < batch_descriptor.ndims(); i++) {
output_batch_descriptor->set_spatial_dim(static_cast<dnn::DimIndex>(i),
dims.rbegin()[i]);
}
return true;
}
template <typename T>
bool MIOpenSupport::DoFusedConvolutionBiasActivationImpl(
Stream* stream,
int miopen_type, // Actually miopenDataType_t.
const dnn::BatchDescriptor& conv_input_descriptor,
const DeviceMemory<T>& conv_input_data,
const dnn::FilterDescriptor& filter_descriptor,
const DeviceMemory<T>& filter_data,
const dnn::ConvolutionDescriptor& convolution_descriptor,
const dnn::BatchDescriptor& bias_descriptor,
const DeviceMemory<T>& bias_data, dnn::ActivationMode activation_mode,
const dnn::BatchDescriptor& output_descriptor, DeviceMemory<T>* output_data,
dnn::ProfileResult* output_profile_result) {
auto miopen = miopen_->GetHandle(parent_, stream);
ScopedTensorDescriptor conv_input_nd{
conv_input_descriptor, static_cast<miopenDataType_t>(miopen_type)};
ScopedTensorDescriptor bias_nd{bias_descriptor,
static_cast<miopenDataType_t>(miopen_type)};
ScopedTensorDescriptor output_nd{output_descriptor,
static_cast<miopenDataType_t>(miopen_type)};
ScopedConvolutionDescriptor conv{convolution_descriptor,
static_cast<miopenDataType_t>(miopen_type)};
ScopedFilterDescriptor filter{filter_descriptor,
static_cast<miopenDataType_t>(miopen_type)};
ScopedActivationDescriptor activation_desc{activation_mode};
ScopedFusionPlanConvolutionBiasActivation fusion_plan{
miopen.handle(), conv_input_nd.handle(), filter.handle(),
conv.handle(), bias_nd.handle(), activation_desc};
bool retval = false;
if (fusion_plan.CompilationSucceeded()) {
const bool is_profiling = output_profile_result != nullptr;
std::unique_ptr<GpuTimer> timer;
if (is_profiling) {
timer.reset(new GpuTimer(parent_));
timer->Init();
timer->Start(AsGpuStream(stream));
}
miopenStatus_t status = miopenStatusSuccess;
if (status == miopenStatusSuccess) {
fusion_plan.SetConvolutionArgs(filter_data.opaque());
}
if (status == miopenStatusSuccess) {
status = fusion_plan.SetBiasArgs(bias_data.opaque());
}
if (status == miopenStatusSuccess) {
status = fusion_plan.SetActivationForwardArgs(activation_desc);
}
if (status == miopenStatusSuccess) {
status =
fusion_plan.Execute(conv_input_nd.handle(), conv_input_data.opaque(),
output_nd.handle(), output_data->opaque());
}
if (is_profiling) {
timer->Stop(AsGpuStream(stream));
if (status == miopenStatusSuccess) {
output_profile_result->set_elapsed_time_in_ms(
timer->GetElapsedMilliseconds());
}
timer->Destroy();
}
if (status != miopenStatusSuccess) {
// Silently return when we are profiling.
if (!is_profiling) {
LOG(FATAL) << "failed to enqueue fused-convolution on stream: "
<< ToString(status);
}
}
retval = true;
}
return retval;
}
bool MIOpenSupport::DoFusedConvolutionBiasActivation(
Stream* stream, const dnn::BatchDescriptor& conv_input_descriptor,
const DeviceMemory<float>& conv_input_data,
const dnn::FilterDescriptor& filter_descriptor,
const DeviceMemory<float>& filter_data,
const dnn::ConvolutionDescriptor& convolution_descriptor,
const dnn::BatchDescriptor& bias_descriptor,
const DeviceMemory<float>& bias_data, dnn::ActivationMode activation_mode,
const dnn::BatchDescriptor& output_descriptor,
DeviceMemory<float>* output_data,
dnn::ProfileResult* output_profile_result) {
return DoFusedConvolutionBiasActivationImpl<float>(
stream, miopenFloat, conv_input_descriptor, conv_input_data,
filter_descriptor, filter_data, convolution_descriptor, bias_descriptor,
bias_data, activation_mode, output_descriptor, output_data,
output_profile_result);
}
template <typename T, typename U>
bool MIOpenSupport::DoFusedBatchNormActivationInferenceImpl(
Stream* stream,
int miopen_type, // Actually miopenDataType_t.
const dnn::BatchDescriptor& x_descriptor, const DeviceMemory<T>& x_data,
const dnn::BatchDescriptor& scale_offset_mean_variance_descriptor,
const DeviceMemory<U>& scale_data, const DeviceMemory<U>& offset_data,
const DeviceMemory<U>& mean_data, const DeviceMemory<U>& variance_data,
double epsilon, dnn::ActivationMode activation_mode,
DeviceMemory<T>* y_data, dnn::ProfileResult* output_profile_result) {
auto miopen = miopen_->GetHandle(parent_, stream);
ScopedTensorDescriptor x_nd{x_descriptor,
static_cast<miopenDataType_t>(miopen_type)};
ScopedTensorDescriptor scale_offset_mean_variance_nd{
scale_offset_mean_variance_descriptor,
static_cast<miopenDataType_t>(miopen_type)};
ScopedActivationDescriptor activation_desc{activation_mode};
ScopedFusionPlanBatchNormActivationInference fusion_plan{
miopen.handle(), x_nd.handle(), scale_offset_mean_variance_nd.handle(),
activation_desc};
bool retval = false;
if (fusion_plan.CompilationSucceeded()) {
const bool is_profiling = output_profile_result != nullptr;
std::unique_ptr<GpuTimer> timer;
if (is_profiling) {
timer.reset(new GpuTimer(parent_));
timer->Init();
timer->Start(AsGpuStream(stream));
}
miopenStatus_t status = miopenStatusSuccess;
if (status == miopenStatusSuccess) {
fusion_plan.SetBatchNormInferenceArgs(
scale_data.opaque(), offset_data.opaque(), mean_data.opaque(),
variance_data.opaque(), epsilon);
}
if (status == miopenStatusSuccess) {
status = fusion_plan.SetActivationForwardArgs(activation_desc);
}
if (status == miopenStatusSuccess) {
status = fusion_plan.Execute(x_nd.handle(), x_data.opaque(),
x_nd.handle(), y_data->opaque());
}
if (is_profiling) {
timer->Stop(AsGpuStream(stream));
if (status == miopenStatusSuccess) {
output_profile_result->set_elapsed_time_in_ms(
timer->GetElapsedMilliseconds());
}
timer->Destroy();
}
if (status != miopenStatusSuccess) {
// Silently return when we are profiling.
if (!is_profiling) {
LOG(FATAL) << "failed to enqueue fused-convolution on stream: "
<< ToString(status);
}
}
retval = true;
}
return retval;
}
bool MIOpenSupport::DoFusedBatchNormActivationInference(
Stream* stream, const dnn::BatchDescriptor& x_descriptor,
const DeviceMemory<float>& x_data,
const dnn::BatchDescriptor& scale_offset_mean_variance_descriptor,
const DeviceMemory<float>& scale_data,
const DeviceMemory<float>& offset_data,
const DeviceMemory<float>& mean_data,
const DeviceMemory<float>& variance_data, double epsilon,
dnn::ActivationMode activation_mode, DeviceMemory<float>* y_data,
dnn::ProfileResult* output_profile_result) {
return DoFusedBatchNormActivationInferenceImpl<float, float>(
stream, miopenFloat, x_descriptor, x_data,
scale_offset_mean_variance_descriptor, scale_data, offset_data, mean_data,
variance_data, epsilon, activation_mode, y_data, output_profile_result);
}
bool MIOpenSupport::DoFusedBatchNormActivationInference(
Stream* stream, const dnn::BatchDescriptor& x_descriptor,
const DeviceMemory<Eigen::half>& x_data,
const dnn::BatchDescriptor& scale_offset_mean_variance_descriptor,
const DeviceMemory<float>& scale_data,
const DeviceMemory<float>& offset_data,
const DeviceMemory<float>& mean_data,
const DeviceMemory<float>& variance_data, double epsilon,
dnn::ActivationMode activation_mode, DeviceMemory<Eigen::half>* y_data,
dnn::ProfileResult* output_profile_result) {
return DoFusedBatchNormActivationInferenceImpl<Eigen::half, float>(
stream, miopenHalf, x_descriptor, x_data,
scale_offset_mean_variance_descriptor, scale_data, offset_data, mean_data,
variance_data, epsilon, activation_mode, y_data, output_profile_result);
}
template <typename T, typename U>
bool MIOpenSupport::DoFusedBatchNormActivationForwardImpl(
Stream* stream,
int miopen_type, // Actually miopenDataType_t.
const dnn::BatchDescriptor& x_descriptor, const DeviceMemory<T>& x_data,
const dnn::BatchDescriptor& scale_offset_mean_variance_descriptor,
const DeviceMemory<U>& scale_data, const DeviceMemory<U>& offset_data,
double epsilon, dnn::ActivationMode activation_mode,
DeviceMemory<T>* y_data, DeviceMemory<U>* batch_mean_data,
DeviceMemory<U>* batch_var_data, DeviceMemory<U>* saved_mean_data,
DeviceMemory<U>* saved_var_data,
dnn::ProfileResult* output_profile_result) {
auto miopen = miopen_->GetHandle(parent_, stream);
ScopedTensorDescriptor x_nd{x_descriptor,
static_cast<miopenDataType_t>(miopen_type)};
ScopedTensorDescriptor scale_offset_mean_variance_nd{
scale_offset_mean_variance_descriptor,
static_cast<miopenDataType_t>(miopen_type)};
ScopedActivationDescriptor activation_desc{activation_mode};
ScopedFusionPlanBatchNormActivationForward fusion_plan{
miopen.handle(), x_nd.handle(), scale_offset_mean_variance_nd.handle(),
activation_desc};
bool retval = false;
if (fusion_plan.CompilationSucceeded()) {
const bool is_profiling = output_profile_result != nullptr;
std::unique_ptr<GpuTimer> timer;
if (is_profiling) {
timer.reset(new GpuTimer(parent_));
timer->Init();
timer->Start(AsGpuStream(stream));
}
miopenStatus_t status = miopenStatusSuccess;
if (status == miopenStatusSuccess) {
fusion_plan.SetBatchNormForwardArgs(
scale_data.opaque(), offset_data.opaque(), batch_mean_data->opaque(),
batch_var_data->opaque(), saved_mean_data->opaque(),
saved_var_data->opaque(), epsilon);
}
if (status == miopenStatusSuccess) {
status = fusion_plan.SetActivationForwardArgs(activation_desc);
}
if (status == miopenStatusSuccess) {
status = fusion_plan.Execute(x_nd.handle(), x_data.opaque(),
x_nd.handle(), y_data->opaque());
}
if (is_profiling) {
timer->Stop(AsGpuStream(stream));
if (status == miopenStatusSuccess) {
output_profile_result->set_elapsed_time_in_ms(
timer->GetElapsedMilliseconds());
}
timer->Destroy();
}
if (status != miopenStatusSuccess) {
// Silently return when we are profiling.
if (!is_profiling) {
LOG(FATAL) << "failed to enqueue fused-convolution on stream: "
<< ToString(status);
}
}
retval = true;
}
return retval;
}
bool MIOpenSupport::DoFusedBatchNormActivationForward(
Stream* stream, const dnn::BatchDescriptor& x_descriptor,
const DeviceMemory<float>& x_data,
const dnn::BatchDescriptor& scale_offset_mean_variance_descriptor,
const DeviceMemory<float>& scale_data,
const DeviceMemory<float>& offset_data, double epsilon,
dnn::ActivationMode activation_mode, DeviceMemory<float>* y_data,
DeviceMemory<float>* batch_mean_data, DeviceMemory<float>* batch_var_data,
DeviceMemory<float>* saved_mean_data, DeviceMemory<float>* saved_var_data,
dnn::ProfileResult* output_profile_result) {
return DoFusedBatchNormActivationForwardImpl<float, float>(
stream, miopenFloat, x_descriptor, x_data,
scale_offset_mean_variance_descriptor, scale_data, offset_data, epsilon,
activation_mode, y_data, batch_mean_data, batch_var_data, saved_mean_data,
saved_var_data, output_profile_result);
}
bool MIOpenSupport::DoFusedBatchNormActivationForward(
Stream* stream, const dnn::BatchDescriptor& x_descriptor,
const DeviceMemory<Eigen::half>& x_data,
const dnn::BatchDescriptor& scale_offset_mean_variance_descriptor,
const DeviceMemory<float>& scale_data,
const DeviceMemory<float>& offset_data, double epsilon,
dnn::ActivationMode activation_mode, DeviceMemory<Eigen::half>* y_data,
DeviceMemory<float>* batch_mean_data, DeviceMemory<float>* batch_var_data,
DeviceMemory<float>* saved_mean_data, DeviceMemory<float>* saved_var_data,
dnn::ProfileResult* output_profile_result) {
return DoFusedBatchNormActivationForwardImpl<Eigen::half, float>(
stream, miopenHalf, x_descriptor, x_data,
scale_offset_mean_variance_descriptor, scale_data, offset_data, epsilon,
activation_mode, y_data, batch_mean_data, batch_var_data, saved_mean_data,
saved_var_data, output_profile_result);
}
template <typename T, typename U>
bool MIOpenSupport::DoFusedBatchNormActivationBackwardImpl(
Stream* stream,
int miopen_type, // Actually miopenDataType_t.
const dnn::BatchDescriptor& y_act_backprop_descriptor,
const DeviceMemory<T>& y_act_backprop_data,
const DeviceMemory<T>& y_act_data, dnn::ActivationMode activation_mode,
const DeviceMemory<T>& x_bn_data,
const dnn::BatchDescriptor& scale_offset_mean_variance_descriptor,
const DeviceMemory<U>& scale_data, const DeviceMemory<U>& offset_data,
const DeviceMemory<U>& saved_mean_data,
const DeviceMemory<U>& saved_var_data, DeviceMemory<T>* x_bn_backprop_data,
DeviceMemory<U>* scale_backprop_data, DeviceMemory<U>* offset_backprop_data,
dnn::ProfileResult* output_profile_result) {
auto miopen = miopen_->GetHandle(parent_, stream);
ScopedTensorDescriptor y_act_backprop_nd{
y_act_backprop_descriptor, static_cast<miopenDataType_t>(miopen_type)};
ScopedTensorDescriptor scale_offset_mean_variance_nd{
scale_offset_mean_variance_descriptor,
static_cast<miopenDataType_t>(miopen_type)};
ScopedActivationDescriptor activation_desc{activation_mode};
ScopedFusionPlanBatchNormActivationBackward fusion_plan{
miopen.handle(), y_act_backprop_nd.handle(),
scale_offset_mean_variance_nd.handle(), activation_desc};
bool retval = false;
if (fusion_plan.CompilationSucceeded()) {
const bool is_profiling = output_profile_result != nullptr;
std::unique_ptr<GpuTimer> timer;
if (is_profiling) {
timer.reset(new GpuTimer(parent_));
timer->Init();
timer->Start(AsGpuStream(stream));
}
miopenStatus_t status = miopenStatusSuccess;
if (status == miopenStatusSuccess) {
fusion_plan.SetBatchNormBackwardArgs(
x_bn_data.opaque(), scale_data.opaque(), offset_data.opaque(),
saved_mean_data.opaque(), saved_var_data.opaque(),
scale_backprop_data->opaque(), offset_backprop_data->opaque());
}
if (status == miopenStatusSuccess) {
status = fusion_plan.SetActivationBackwardArgs(activation_desc,
y_act_data.opaque());
}
if (status == miopenStatusSuccess) {
status = fusion_plan.Execute(
y_act_backprop_nd.handle(), y_act_backprop_data.opaque(),
y_act_backprop_nd.handle(), x_bn_backprop_data->opaque());
}
if (is_profiling) {
timer->Stop(AsGpuStream(stream));
if (status == miopenStatusSuccess) {
output_profile_result->set_elapsed_time_in_ms(
timer->GetElapsedMilliseconds());
}
timer->Destroy();
}
if (status != miopenStatusSuccess) {
// Silently return when we are profiling.
if (!is_profiling) {
LOG(FATAL) << "failed to enqueue fused-convolution on stream: "
<< ToString(status);
}
}
retval = true;
}
return retval;
}
bool MIOpenSupport::DoFusedBatchNormActivationBackward(
Stream* stream, const dnn::BatchDescriptor& y_act_backprop_descriptor,
const DeviceMemory<float>& y_act_backprop_data,
const DeviceMemory<float>& y_act_data, dnn::ActivationMode activation_mode,
const DeviceMemory<float>& x_bn_data,
const dnn::BatchDescriptor& scale_offset_mean_variance_descriptor,
const DeviceMemory<float>& scale_data,
const DeviceMemory<float>& offset_data,
const DeviceMemory<float>& saved_mean_data,
const DeviceMemory<float>& saved_var_data,
DeviceMemory<float>* x_bn_backprop_data,
DeviceMemory<float>* scale_backprop_data,
DeviceMemory<float>* offset_backprop_data,
dnn::ProfileResult* output_profile_result) {
return DoFusedBatchNormActivationBackwardImpl<float, float>(
stream, miopenFloat, y_act_backprop_descriptor, y_act_backprop_data,
y_act_data, activation_mode, x_bn_data,
scale_offset_mean_variance_descriptor, scale_data, offset_data,
saved_mean_data, saved_var_data, x_bn_backprop_data, scale_backprop_data,
offset_backprop_data, output_profile_result);
}
bool MIOpenSupport::DoFusedBatchNormActivationBackward(
Stream* stream, const dnn::BatchDescriptor& y_act_backprop_descriptor,
const DeviceMemory<Eigen::half>& y_act_backprop_data,
const DeviceMemory<Eigen::half>& y_act_data,
dnn::ActivationMode activation_mode,
const DeviceMemory<Eigen::half>& x_bn_data,
const dnn::BatchDescriptor& scale_offset_mean_variance_descriptor,
const DeviceMemory<float>& scale_data,
const DeviceMemory<float>& offset_data,
const DeviceMemory<float>& saved_mean_data,
const DeviceMemory<float>& saved_var_data,
DeviceMemory<Eigen::half>* x_bn_backprop_data,
DeviceMemory<float>* scale_backprop_data,
DeviceMemory<float>* offset_backprop_data,
dnn::ProfileResult* output_profile_result) {
return DoFusedBatchNormActivationBackwardImpl<Eigen::half, float>(
stream, miopenHalf, y_act_backprop_descriptor, y_act_backprop_data,
y_act_data, activation_mode, x_bn_data,
scale_offset_mean_variance_descriptor, scale_data, offset_data,
saved_mean_data, saved_var_data, x_bn_backprop_data, scale_backprop_data,
offset_backprop_data, output_profile_result);
}
} // namespace gpu
void initialize_miopen() {
auto miopenAlreadyRegistered = PluginRegistry::Instance()->HasFactory(
rocm::kROCmPlatformId, PluginKind::kDnn, gpu::kMIOpenPlugin);
if (!miopenAlreadyRegistered) {
port::Status status =
PluginRegistry::Instance()->RegisterFactory<PluginRegistry::DnnFactory>(
rocm::kROCmPlatformId, gpu::kMIOpenPlugin, "MIOpen",
[](internal::StreamExecutorInterface* parent) -> dnn::DnnSupport* {
gpu::GpuExecutor* rocm_executor =
dynamic_cast<gpu::GpuExecutor*>(parent);
if (rocm_executor == nullptr) {
LOG(ERROR)
<< "Attempting to initialize an instance of the MIOpen "
<< "support library with a non-ROCM StreamExecutor";
return nullptr;
}
gpu::MIOpenSupport* dnn = new gpu::MIOpenSupport(rocm_executor);
if (!dnn->Init().ok()) {
// Note: Init() will log a more specific error.
delete dnn;
return nullptr;
}
return dnn;
});
if (!status.ok()) {
LOG(ERROR) << "Unable to register MIOpen factory: "
<< status.error_message();
}
PluginRegistry::Instance()->SetDefaultFactory(
rocm::kROCmPlatformId, PluginKind::kDnn, gpu::kMIOpenPlugin);
}
}
} // namespace stream_executor
REGISTER_MODULE_INITIALIZER(register_miopen,
{ stream_executor::initialize_miopen(); });
| 40.755309 | 80 | 0.675077 | [
"object",
"shape",
"vector",
"model",
"transform"
] |
d5250a16671760c3dbda131238892b9079680af1 | 22,106 | cpp | C++ | src/tests/end2end/D3D12VideoViewsTests.cpp | softwarecapital/google.dawn | 045a02adc0c6b2d6b406507bc58131644b41dc0c | [
"Apache-2.0"
] | null | null | null | src/tests/end2end/D3D12VideoViewsTests.cpp | softwarecapital/google.dawn | 045a02adc0c6b2d6b406507bc58131644b41dc0c | [
"Apache-2.0"
] | null | null | null | src/tests/end2end/D3D12VideoViewsTests.cpp | softwarecapital/google.dawn | 045a02adc0c6b2d6b406507bc58131644b41dc0c | [
"Apache-2.0"
] | null | null | null | // Copyright 2021 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "tests/DawnTest.h"
#include <d3d11.h>
#include <d3d12.h>
#include <dxgi1_4.h>
#include <wrl/client.h>
#include "dawn_native/D3D12Backend.h"
#include "utils/ComboRenderPipelineDescriptor.h"
#include "utils/WGPUHelpers.h"
using Microsoft::WRL::ComPtr;
namespace {
class D3D12VideoViewsTests : public DawnTest {
protected:
void SetUp() override {
DawnTest::SetUp();
DAWN_SKIP_TEST_IF(UsesWire());
DAWN_SKIP_TEST_IF(!IsMultiPlanarFormatsSupported());
// Create the D3D11 device/contexts that will be used in subsequent tests
ComPtr<ID3D12Device> d3d12Device = dawn_native::d3d12::GetD3D12Device(device.Get());
const LUID adapterLuid = d3d12Device->GetAdapterLuid();
ComPtr<IDXGIFactory4> dxgiFactory;
HRESULT hr = ::CreateDXGIFactory2(0, IID_PPV_ARGS(&dxgiFactory));
ASSERT_EQ(hr, S_OK);
ComPtr<IDXGIAdapter> dxgiAdapter;
hr = dxgiFactory->EnumAdapterByLuid(adapterLuid, IID_PPV_ARGS(&dxgiAdapter));
ASSERT_EQ(hr, S_OK);
ComPtr<ID3D11Device> d3d11Device;
D3D_FEATURE_LEVEL d3dFeatureLevel;
ComPtr<ID3D11DeviceContext> d3d11DeviceContext;
hr = ::D3D11CreateDevice(dxgiAdapter.Get(), D3D_DRIVER_TYPE_UNKNOWN, nullptr, 0,
nullptr, 0, D3D11_SDK_VERSION, &d3d11Device, &d3dFeatureLevel,
&d3d11DeviceContext);
ASSERT_EQ(hr, S_OK);
// Runtime of the created texture (D3D11 device) and OpenSharedHandle runtime (Dawn's
// D3D12 device) must agree on resource sharing capability. For NV12 formats, D3D11
// requires at-least D3D11_SHARED_RESOURCE_TIER_2 support.
// https://docs.microsoft.com/en-us/windows/win32/api/d3d11/ne-d3d11-d3d11_shared_resource_tier
D3D11_FEATURE_DATA_D3D11_OPTIONS5 featureOptions5{};
hr = d3d11Device->CheckFeatureSupport(D3D11_FEATURE_D3D11_OPTIONS5, &featureOptions5,
sizeof(featureOptions5));
ASSERT_EQ(hr, S_OK);
ASSERT_GE(featureOptions5.SharedResourceTier, D3D11_SHARED_RESOURCE_TIER_2);
mD3d11Device = std::move(d3d11Device);
}
std::vector<const char*> GetRequiredExtensions() override {
mIsMultiPlanarFormatsSupported = SupportsExtensions({"multiplanar_formats"});
if (!mIsMultiPlanarFormatsSupported) {
return {};
}
return {"multiplanar_formats"};
}
bool IsMultiPlanarFormatsSupported() const {
return mIsMultiPlanarFormatsSupported;
}
static DXGI_FORMAT GetDXGITextureFormat(wgpu::TextureFormat format) {
switch (format) {
case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
return DXGI_FORMAT_NV12;
default:
UNREACHABLE();
return DXGI_FORMAT_UNKNOWN;
}
}
// Returns a pre-prepared multi-planar formatted texture
// The encoded texture data represents a 4x4 converted image. When |isCheckerboard| is true,
// the top left is a 2x2 yellow block, bottom right is a 2x2 red block, top right is a 2x2
// blue block, and bottom left is a 2x2 white block. When |isCheckerboard| is false, the
// image is converted from a solid yellow 4x4 block.
static std::vector<uint8_t> GetTestTextureData(wgpu::TextureFormat format,
bool isCheckerboard) {
constexpr uint8_t Yy = kYellowYUVColor[kYUVLumaPlaneIndex].r;
constexpr uint8_t Yu = kYellowYUVColor[kYUVChromaPlaneIndex].r;
constexpr uint8_t Yv = kYellowYUVColor[kYUVChromaPlaneIndex].g;
switch (format) {
// The first 16 bytes is the luma plane (Y), followed by the chroma plane (UV) which
// is half the number of bytes (subsampled by 2) but same bytes per line as luma
// plane.
case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
if (isCheckerboard) {
constexpr uint8_t Wy = kWhiteYUVColor[kYUVLumaPlaneIndex].r;
constexpr uint8_t Wu = kWhiteYUVColor[kYUVChromaPlaneIndex].r;
constexpr uint8_t Wv = kWhiteYUVColor[kYUVChromaPlaneIndex].g;
constexpr uint8_t Ry = kRedYUVColor[kYUVLumaPlaneIndex].r;
constexpr uint8_t Ru = kRedYUVColor[kYUVChromaPlaneIndex].r;
constexpr uint8_t Rv = kRedYUVColor[kYUVChromaPlaneIndex].g;
constexpr uint8_t By = kBlueYUVColor[kYUVLumaPlaneIndex].r;
constexpr uint8_t Bu = kBlueYUVColor[kYUVChromaPlaneIndex].r;
constexpr uint8_t Bv = kBlueYUVColor[kYUVChromaPlaneIndex].g;
// clang-format off
return {
Wy, Wy, Ry, Ry, // plane 0, start + 0
Wy, Wy, Ry, Ry,
Yy, Yy, By, By,
Yy, Yy, By, By,
Wu, Wv, Ru, Rv, // plane 1, start + 16
Yu, Yv, Bu, Bv,
};
// clang-format on
} else {
// clang-format off
return {
Yy, Yy, Yy, Yy, // plane 0, start + 0
Yy, Yy, Yy, Yy,
Yy, Yy, Yy, Yy,
Yy, Yy, Yy, Yy,
Yu, Yv, Yu, Yv, // plane 1, start + 16
Yu, Yv, Yu, Yv,
};
// clang-format on
}
default:
UNREACHABLE();
return {};
}
}
void CreateVideoTextureForTest(wgpu::TextureFormat format,
wgpu::TextureUsage usage,
bool isCheckerboard,
wgpu::Texture* dawnTextureOut) {
wgpu::TextureDescriptor textureDesc;
textureDesc.format = format;
textureDesc.dimension = wgpu::TextureDimension::e2D;
textureDesc.usage = usage;
textureDesc.size = {kYUVImageDataWidthInTexels, kYUVImageDataHeightInTexels, 1};
// Create a DX11 texture with data then wrap it in a shared handle.
D3D11_TEXTURE2D_DESC d3dDescriptor;
d3dDescriptor.Width = kYUVImageDataWidthInTexels;
d3dDescriptor.Height = kYUVImageDataHeightInTexels;
d3dDescriptor.MipLevels = 1;
d3dDescriptor.ArraySize = 1;
d3dDescriptor.Format = GetDXGITextureFormat(format);
d3dDescriptor.SampleDesc.Count = 1;
d3dDescriptor.SampleDesc.Quality = 0;
d3dDescriptor.Usage = D3D11_USAGE_DEFAULT;
d3dDescriptor.BindFlags = D3D11_BIND_SHADER_RESOURCE;
d3dDescriptor.CPUAccessFlags = 0;
d3dDescriptor.MiscFlags =
D3D11_RESOURCE_MISC_SHARED_NTHANDLE | D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX;
std::vector<uint8_t> initialData = GetTestTextureData(format, isCheckerboard);
D3D11_SUBRESOURCE_DATA subres;
subres.pSysMem = initialData.data();
subres.SysMemPitch = kYUVImageDataWidthInTexels;
ComPtr<ID3D11Texture2D> d3d11Texture;
HRESULT hr = mD3d11Device->CreateTexture2D(&d3dDescriptor, &subres, &d3d11Texture);
ASSERT_EQ(hr, S_OK);
ComPtr<IDXGIResource1> dxgiResource;
hr = d3d11Texture.As(&dxgiResource);
ASSERT_EQ(hr, S_OK);
HANDLE sharedHandle;
hr = dxgiResource->CreateSharedHandle(
nullptr, DXGI_SHARED_RESOURCE_READ | DXGI_SHARED_RESOURCE_WRITE, nullptr,
&sharedHandle);
ASSERT_EQ(hr, S_OK);
// DX11 texture should be initialized upon CreateTexture2D. However, if we do not
// acquire/release the keyed mutex before using the wrapped WebGPU texture, the WebGPU
// texture is left uninitialized. This is required for D3D11 and D3D12 interop.
ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex;
hr = d3d11Texture.As(&dxgiKeyedMutex);
ASSERT_EQ(hr, S_OK);
hr = dxgiKeyedMutex->AcquireSync(0, INFINITE);
ASSERT_EQ(hr, S_OK);
hr = dxgiKeyedMutex->ReleaseSync(1);
ASSERT_EQ(hr, S_OK);
// Open the DX11 texture in Dawn from the shared handle and return it as a WebGPU
// texture.
dawn_native::d3d12::ExternalImageDescriptorDXGISharedHandle externalImageDesc;
externalImageDesc.cTextureDescriptor =
reinterpret_cast<const WGPUTextureDescriptor*>(&textureDesc);
externalImageDesc.sharedHandle = sharedHandle;
std::unique_ptr<dawn_native::d3d12::ExternalImageDXGI> externalImage =
dawn_native::d3d12::ExternalImageDXGI::Create(device.Get(), &externalImageDesc);
// Handle is no longer needed once resources are created.
::CloseHandle(sharedHandle);
dawn_native::d3d12::ExternalImageAccessDescriptorDXGIKeyedMutex externalAccessDesc;
externalAccessDesc.acquireMutexKey = 1;
externalAccessDesc.isInitialized = true;
externalAccessDesc.usage = static_cast<WGPUTextureUsageFlags>(textureDesc.usage);
*dawnTextureOut = wgpu::Texture::Acquire(
externalImage->ProduceTexture(device.Get(), &externalAccessDesc));
}
// Vertex shader used to render a sampled texture into a quad.
wgpu::ShaderModule GetTestVertexShaderModule() const {
return utils::CreateShaderModule(device, R"(
struct VertexOut {
[[location(0)]] texCoord : vec2 <f32>;
[[builtin(position)]] position : vec4<f32>;
};
[[stage(vertex)]]
fn main([[builtin(vertex_index)]] VertexIndex : u32) -> VertexOut {
let pos : array<vec2<f32>, 6> = array<vec2<f32>, 6>(
vec2<f32>(-1.0, 1.0),
vec2<f32>(-1.0, -1.0),
vec2<f32>(1.0, -1.0),
vec2<f32>(-1.0, 1.0),
vec2<f32>(1.0, -1.0),
vec2<f32>(1.0, 1.0)
);
var output : VertexOut;
output.position = vec4<f32>(pos[VertexIndex], 0.0, 1.0);
output.texCoord = vec2<f32>(output.position.xy * 0.5) + vec2<f32>(0.5, 0.5);
return output;
})");
}
// The width and height in texels are 4 for all YUV formats.
static constexpr uint32_t kYUVImageDataWidthInTexels = 4;
static constexpr uint32_t kYUVImageDataHeightInTexels = 4;
static constexpr size_t kYUVLumaPlaneIndex = 0;
static constexpr size_t kYUVChromaPlaneIndex = 1;
// RGB colors converted into YUV (per plane), for testing.
// RGB colors are mapped to the BT.601 definition of luma.
// https://docs.microsoft.com/en-us/windows/win32/medfound/about-yuv-video
static constexpr std::array<RGBA8, 2> kYellowYUVColor = {RGBA8{210, 0, 0, 0xFF}, // Y
RGBA8{16, 146, 0, 0xFF}}; // UV
static constexpr std::array<RGBA8, 2> kWhiteYUVColor = {RGBA8{235, 0, 0, 0xFF}, // Y
RGBA8{128, 128, 0, 0xFF}}; // UV
static constexpr std::array<RGBA8, 2> kBlueYUVColor = {RGBA8{41, 0, 0, 0xFF}, // Y
RGBA8{240, 110, 0, 0xFF}}; // UV
static constexpr std::array<RGBA8, 2> kRedYUVColor = {RGBA8{81, 0, 0, 0xFF}, // Y
RGBA8{90, 240, 0, 0xFF}}; // UV
ComPtr<ID3D11Device> mD3d11Device;
bool mIsMultiPlanarFormatsSupported = false;
};
} // namespace
// Samples the luminance (Y) plane from an imported NV12 texture into a single channel of an RGBA
// output attachment and checks for the expected pixel value in the rendered quad.
TEST_P(D3D12VideoViewsTests, NV12SampleYtoR) {
wgpu::Texture wgpuTexture;
CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
wgpu::TextureUsage::Sampled, /*isCheckerboard*/ false, &wgpuTexture);
ASSERT_NE(wgpuTexture.Get(), nullptr);
wgpu::TextureViewDescriptor viewDesc;
viewDesc.aspect = wgpu::TextureAspect::Plane0Only;
wgpu::TextureView textureView = wgpuTexture.CreateView(&viewDesc);
utils::ComboRenderPipelineDescriptor2 renderPipelineDescriptor;
renderPipelineDescriptor.vertex.module = GetTestVertexShaderModule();
renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
[[set(0), binding(0)]] var sampler0 : sampler;
[[set(0), binding(1)]] var texture : texture_2d<f32>;
[[stage(fragment)]]
fn main([[location(0)]] texCoord : vec2<f32>) -> [[location(0)]] vec4<f32> {
let y : f32 = textureSample(texture, sampler0, texCoord).r;
return vec4<f32>(y, 0.0, 0.0, 1.0);
})");
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(
device, kYUVImageDataWidthInTexels, kYUVImageDataHeightInTexels);
renderPipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
renderPipelineDescriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline2(&renderPipelineDescriptor);
wgpu::Sampler sampler = device.CreateSampler();
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
{
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
pass.SetPipeline(renderPipeline);
pass.SetBindGroup(0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
{{0, sampler}, {1, textureView}}));
pass.Draw(6);
pass.EndPass();
}
wgpu::CommandBuffer commands = encoder.Finish();
queue.Submit(1, &commands);
// Test the luma plane in the top left corner of RGB image.
EXPECT_PIXEL_RGBA8_EQ(kYellowYUVColor[kYUVLumaPlaneIndex], renderPass.color, 0, 0);
}
// Samples the chrominance (UV) plane from an imported texture into two channels of an RGBA output
// attachment and checks for the expected pixel value in the rendered quad.
TEST_P(D3D12VideoViewsTests, NV12SampleUVtoRG) {
wgpu::Texture wgpuTexture;
CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
wgpu::TextureUsage::Sampled, /*isCheckerboard*/ false, &wgpuTexture);
ASSERT_NE(wgpuTexture.Get(), nullptr);
wgpu::TextureViewDescriptor viewDesc;
viewDesc.aspect = wgpu::TextureAspect::Plane1Only;
wgpu::TextureView textureView = wgpuTexture.CreateView(&viewDesc);
utils::ComboRenderPipelineDescriptor2 renderPipelineDescriptor;
renderPipelineDescriptor.vertex.module = GetTestVertexShaderModule();
renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
[[set(0), binding(0)]] var sampler0 : sampler;
[[set(0), binding(1)]] var texture : texture_2d<f32>;
[[stage(fragment)]]
fn main([[location(0)]] texCoord : vec2<f32>) -> [[location(0)]] vec4<f32> {
let u : f32 = textureSample(texture, sampler0, texCoord).r;
let v : f32 = textureSample(texture, sampler0, texCoord).g;
return vec4<f32>(u, v, 0.0, 1.0);
})");
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(
device, kYUVImageDataWidthInTexels, kYUVImageDataHeightInTexels);
renderPipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
renderPipelineDescriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline2(&renderPipelineDescriptor);
wgpu::Sampler sampler = device.CreateSampler();
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
{
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
pass.SetPipeline(renderPipeline);
pass.SetBindGroup(0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
{{0, sampler}, {1, textureView}}));
pass.Draw(6);
pass.EndPass();
}
wgpu::CommandBuffer commands = encoder.Finish();
queue.Submit(1, &commands);
// Test the chroma plane in the top left corner of RGB image.
EXPECT_PIXEL_RGBA8_EQ(kYellowYUVColor[kYUVChromaPlaneIndex], renderPass.color, 0, 0);
}
// Renders a NV12 "checkerboard" texture into a RGB quad then checks the color at specific
// points to ensure the image has not been flipped.
TEST_P(D3D12VideoViewsTests, NV12SampleYUVtoRGB) {
// TODO(https://crbug.com/dawn/733): Figure out why Nvidia bot occasionally fails testing all
// four corners.
DAWN_SKIP_TEST_IF(IsNvidia());
wgpu::Texture wgpuTexture;
CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
wgpu::TextureUsage::Sampled, /*isCheckerboard*/ true, &wgpuTexture);
ASSERT_NE(wgpuTexture.Get(), nullptr);
wgpu::TextureViewDescriptor lumaViewDesc;
lumaViewDesc.aspect = wgpu::TextureAspect::Plane0Only;
wgpu::TextureView lumaTextureView = wgpuTexture.CreateView(&lumaViewDesc);
wgpu::TextureViewDescriptor chromaViewDesc;
chromaViewDesc.aspect = wgpu::TextureAspect::Plane1Only;
wgpu::TextureView chromaTextureView = wgpuTexture.CreateView(&chromaViewDesc);
utils::ComboRenderPipelineDescriptor2 renderPipelineDescriptor;
renderPipelineDescriptor.vertex.module = GetTestVertexShaderModule();
renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
[[set(0), binding(0)]] var sampler0 : sampler;
[[set(0), binding(1)]] var lumaTexture : texture_2d<f32>;
[[set(0), binding(2)]] var chromaTexture : texture_2d<f32>;
[[stage(fragment)]]
fn main([[location(0)]] texCoord : vec2<f32>) -> [[location(0)]] vec4<f32> {
let y : f32 = textureSample(lumaTexture, sampler0, texCoord).r;
let u : f32 = textureSample(chromaTexture, sampler0, texCoord).r;
let v : f32 = textureSample(chromaTexture, sampler0, texCoord).g;
return vec4<f32>(y, u, v, 1.0);
})");
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(
device, kYUVImageDataWidthInTexels, kYUVImageDataHeightInTexels);
renderPipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline2(&renderPipelineDescriptor);
wgpu::Sampler sampler = device.CreateSampler();
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
{
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
pass.SetPipeline(renderPipeline);
pass.SetBindGroup(
0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
{{0, sampler}, {1, lumaTextureView}, {2, chromaTextureView}}));
pass.Draw(6);
pass.EndPass();
}
wgpu::CommandBuffer commands = encoder.Finish();
queue.Submit(1, &commands);
// Test four corners of the checkerboard image (YUV color space).
RGBA8 yellowYUV(kYellowYUVColor[kYUVLumaPlaneIndex].r, kYellowYUVColor[kYUVChromaPlaneIndex].r,
kYellowYUVColor[kYUVChromaPlaneIndex].g, 0xFF);
EXPECT_PIXEL_RGBA8_EQ(yellowYUV, renderPass.color, 0, 0); // top left
RGBA8 redYUV(kRedYUVColor[kYUVLumaPlaneIndex].r, kRedYUVColor[kYUVChromaPlaneIndex].r,
kRedYUVColor[kYUVChromaPlaneIndex].g, 0xFF);
EXPECT_PIXEL_RGBA8_EQ(redYUV, renderPass.color, kYUVImageDataWidthInTexels - 1,
kYUVImageDataHeightInTexels - 1); // bottom right
RGBA8 blueYUV(kBlueYUVColor[kYUVLumaPlaneIndex].r, kBlueYUVColor[kYUVChromaPlaneIndex].r,
kBlueYUVColor[kYUVChromaPlaneIndex].g, 0xFF);
EXPECT_PIXEL_RGBA8_EQ(blueYUV, renderPass.color, kYUVImageDataWidthInTexels - 1,
0); // top right
RGBA8 whiteYUV(kWhiteYUVColor[kYUVLumaPlaneIndex].r, kWhiteYUVColor[kYUVChromaPlaneIndex].r,
kWhiteYUVColor[kYUVChromaPlaneIndex].g, 0xFF);
EXPECT_PIXEL_RGBA8_EQ(whiteYUV, renderPass.color, 0,
kYUVImageDataHeightInTexels - 1); // bottom left
}
DAWN_INSTANTIATE_TEST(D3D12VideoViewsTests, D3D12Backend()); | 47.437768 | 107 | 0.618113 | [
"render",
"vector",
"solid"
] |
d525d2e9f233dd87d0e994ed4dfb3f231030cc1f | 4,133 | hpp | C++ | src/fixed_geometry.hpp | Rouslan/NTracer | f28710ac8f9d5afbba8d31dafa288a7716a5a2cc | [
"MIT"
] | 1 | 2015-11-17T04:45:33.000Z | 2015-11-17T04:45:33.000Z | src/fixed_geometry.hpp | Rouslan/NTracer | f28710ac8f9d5afbba8d31dafa288a7716a5a2cc | [
"MIT"
] | null | null | null | src/fixed_geometry.hpp | Rouslan/NTracer | f28710ac8f9d5afbba8d31dafa288a7716a5a2cc | [
"MIT"
] | 2 | 2017-05-21T17:05:56.000Z | 2018-04-24T06:54:49.000Z | #ifndef fixed_geometry_hpp
#define fixed_geometry_hpp
#include "compatibility.hpp"
#include <type_traits>
#include "geometry.hpp"
namespace fixed {
/* an array that can be initialized with a call-back */
template<typename T,size_t Size> struct init_array {
typename std::aligned_storage<sizeof(T) * Size,alignof(T)>::type data;
T *begin() { return reinterpret_cast<T*>(&data); }
const T *begin() const { return reinterpret_cast<const T*>(&data); }
T *end() { return begin() + Size; }
const T *end() const { return begin() + Size; }
T &front() { return begin()[0]; }
const T &front() const { return begin()[0]; }
T &back() { return begin()[Size-1]; }
const T &back() const { return begin()[Size-1]; }
T &operator[](size_t i) { return begin()[i]; }
const T &operator[](size_t i) const { return begin()[i]; }
operator T*() { return begin(); }
operator const T*() const { return begin(); }
size_t size() const { return size; }
template<typename F> init_array(size_t size,F f) {
assert(size == Size);
subinit(f,0);
}
~init_array() {
for(T &x : *this) x.~T();
}
private:
template<typename F> void subinit(F f,size_t i) {
new(begin() + i) T(f(i));
if(i < Size-1) {
try {
subinit(f,i+1);
} catch(...) {
(*this)[i].~T();
throw;
}
}
}
};
// set pad items to 1 to avoid dividing 0/infinity/NaN
template<typename T> inline void item_array_init(T *items,size_t start,size_t end) {
if constexpr(std::is_arithmetic_v<T>) {
for(size_t i=start; i<end; ++i) items[i] = 1;
}
}
template<size_t N,typename RealItems,typename T> struct item_array {
static constexpr size_t _real_size = simd::padded_size<T>(RealItems::get(N));
static constexpr size_t max_items = _real_size;
explicit item_array(size_t d,v_array_allocator* =nullptr) {
assert(d == N);
item_array_init(items.raw,RealItems::get(N),_real_size);
}
item_array(const item_array &b) {
(*this) = b;
}
item_array(const item_array &b,v_array_allocator*) {
(*this) = b;
}
item_array(const item_array &b,shallow_copy_t) {
(*this) = b;
}
item_array &operator=(const item_array &b) {
items = b.items;
return *this;
}
size_t dimension() const { return N; }
simd::packed_union_array<T,_real_size> items;
template<size_t Size> FORCE_INLINE void store_vec(size_t n,simd::v_type<T,Size> val) {
simd::at<Size>(items,n) = val;
}
template<size_t Size> FORCE_INLINE auto vec(size_t n) const {
return simd::at<Size>(items,n);
}
T *data() { return items.raw; }
const T *data() const { return items.raw; }
};
template<size_t N,typename T> struct item_store {
typedef T item_t;
template<typename U=T> static size_t v_dimension(size_t d) {
return simd::padded_size<U>(d);
}
static const size_t required_d = N;
template<typename U> using init_array = fixed::init_array<U,N>;
template<typename U> using smaller_init_array = fixed::init_array<U,N-1>;
template<typename RealItems,typename U=T> using type = item_array<N,RealItems,U>;
static constexpr v_array_allocator *def_allocator = nullptr;
static geom_allocator *new_allocator(size_t d,size_t items_per_block) {
return nullptr;
}
template<typename> static v_array_allocator *allocator_for(geom_allocator*) {
return nullptr;
}
};
}
template<size_t N,typename T> struct smaller_store<fixed::item_store<N,T> > {
static_assert(N > 1,"it can't get any smaller");
typedef fixed::item_store<N-1,T> type;
};
#endif
| 29.105634 | 94 | 0.564965 | [
"geometry"
] |
d527f9613f639e61d4a146d338e3720002a0abbe | 6,957 | cpp | C++ | test/control_points_container.cpp | KIT-MRT/uniform_bspline | 158f026f72849088351dc7b31f33ff5b6684965d | [
"BSL-1.0"
] | 2 | 2020-12-23T00:13:21.000Z | 2021-12-28T09:22:33.000Z | test/control_points_container.cpp | KIT-MRT/uniform_bspline | 158f026f72849088351dc7b31f33ff5b6684965d | [
"BSL-1.0"
] | null | null | null | test/control_points_container.cpp | KIT-MRT/uniform_bspline | 158f026f72849088351dc7b31f33ff5b6684965d | [
"BSL-1.0"
] | 2 | 2021-01-16T15:17:57.000Z | 2021-12-28T09:22:34.000Z | #include "control_points_container.hpp"
#include <gtest/gtest.h>
TEST(ControlPointsContainer, ForEachVector) {
const std::vector<double> controlPoints{1.0, 4.0, 3.0, 2.0, 5.0};
{
const ubs::ControlPointsContainer<double, 4, double, double, std::vector<double>> controlPointsContainer(
controlPoints);
const auto& controlPointRef = controlPointsContainer.get();
int counter = 0;
controlPointsContainer.forEach([&](const double& val) {
EXPECT_EQ(val, controlPoints[counter]);
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
EXPECT_EQ(&val, controlPointRef.data() + counter);
++counter;
});
EXPECT_EQ(counter, (int)controlPoints.size());
}
{
ubs::ControlPointsContainer<double, 4, double, double, std::vector<double>> controlPointsContainer(
controlPoints);
const auto& controlPointRef = controlPointsContainer.get();
int counter = 0;
controlPointsContainer.forEach([&](double& val) {
EXPECT_EQ(val, controlPoints[counter]);
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
EXPECT_EQ(&val, controlPointRef.data() + counter);
++counter;
});
EXPECT_EQ(counter, (int)controlPoints.size());
}
}
TEST(ControlPointsContainer, ForEachEigenMatrix) {
const Eigen::MatrixXd controlPoints = Eigen::MatrixXd::Random(10, 20);
ubs::ControlPointsContainer<double, 4, Eigen::Vector2d, double, Eigen::MatrixXd> controlPointsContainer(
controlPoints);
const auto& controlPointRef = controlPointsContainer.get();
int counter = 0;
controlPointsContainer.forEach([&](double& val) {
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
EXPECT_EQ(val, controlPoints.data()[counter]);
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
EXPECT_EQ(&val, controlPointRef.data() + counter);
++counter;
});
EXPECT_EQ(counter, (int)controlPoints.size());
}
TEST(ControlPointsContainer, ForEachBoostMultiArray) {
using ControlPointsContainer = ubs::ControlPointsContainer<double,
2,
Eigen::Vector3d,
Eigen::Vector4d,
ubs::EigenAlignedMultiArray<Eigen::Vector4d, 3>>;
ControlPointsContainer::ControlPointsType controlPoints(boost::extents[10][12][8]);
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < 12; ++j) {
for (int k = 0; k < 8; ++k) {
controlPoints[i][j][k].setRandom();
}
}
}
ControlPointsContainer controlPointsContainer(controlPoints);
const auto& controlPointRef = controlPointsContainer.get();
int counter = 0;
controlPointsContainer.forEach([&](Eigen::Vector4d& val) {
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
EXPECT_EQ(val, controlPoints.data()[counter]);
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
EXPECT_EQ(&val, controlPointRef.data() + counter);
++counter;
});
EXPECT_EQ(counter, (int)controlPoints.num_elements());
}
TEST(ControlPointsContainer, Transform) {
using ControlPointContainerInput = ubs::ControlPointsContainer<double,
2,
Eigen::Vector3d,
Eigen::Vector4d,
ubs::EigenAlignedMultiArray<Eigen::Vector4d, 3>>;
ControlPointContainerInput::ControlPointsType controlPointsInput(boost::extents[10][12][8]);
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < 12; ++j) {
for (int k = 0; k < 8; ++k) {
controlPointsInput[i][j][k].setRandom();
}
}
}
ControlPointContainerInput controlPointContainerInput(controlPointsInput);
using ControlPointContainerOutput = ubs::ControlPointsContainer<float,
2,
Eigen::Vector3d,
Eigen::Vector2d,
ubs::EigenAlignedMultiArray<Eigen::Vector2d, 3>>;
ControlPointContainerOutput::ControlPointsType controlPointsOutput(boost::extents[10][12][8]);
std::for_each(
controlPointsOutput.flat_begin(), controlPointsOutput.flat_end(), [](Eigen::Vector2d& v) { v.setZero(); });
ControlPointContainerOutput controlPointContainerOutput(controlPointsOutput);
controlPointContainerInput.transform(controlPointContainerOutput, [&](const Eigen::Vector4d& val) {
return Eigen::Vector2d{val[0] + val[1], val[2] + val[3]};
});
int counter = 0;
controlPointContainerOutput.forEach([&](const Eigen::Vector2d& v) {
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
const Eigen::Vector4d& inputVec = controlPointsInput.data()[counter];
EXPECT_FLOAT_EQ(inputVec[0] + inputVec[1], v[0]);
EXPECT_FLOAT_EQ(inputVec[2] + inputVec[3], v[1]);
++counter;
});
}
namespace ubs {
// This test needs to be in the ubs namespace because of the friend declaration.
TEST(ControlPointsContainer, IsIdxValid) {
using ControlPointContainerInput = ubs::ControlPointsContainer<double,
2,
Eigen::Vector3d,
Eigen::Vector4d,
ubs::EigenAlignedMultiArray<Eigen::Vector4d, 3>>;
constexpr int SizeX = 10;
constexpr int SizeY = 12;
constexpr int SizeZ = 8;
ControlPointContainerInput::ControlPointsType controlPointsInput(boost::extents[SizeX][SizeY][SizeZ]);
for (int i = 0; i < SizeX; ++i) {
for (int j = 0; j < SizeY; ++j) {
for (int k = 0; k < SizeZ; ++k) {
controlPointsInput[i][j][k].setRandom();
}
}
}
ControlPointContainerInput controlPointContainerInput(controlPointsInput);
for (int i = 0; i < SizeX * SizeY * SizeZ; ++i) {
EXPECT_TRUE(controlPointContainerInput.isIdxValid(i));
}
EXPECT_FALSE(controlPointContainerInput.isIdxValid(-1));
EXPECT_FALSE(controlPointContainerInput.isIdxValid(SizeX * SizeY * SizeZ));
}
} // namespace ubs
| 43.48125 | 117 | 0.566049 | [
"vector",
"transform"
] |
d528d663e67b4f99681d663229e0e08e6994e254 | 4,779 | hpp | C++ | include/dynamix/domain.hpp | ggerganov/dynamix | 7530d2d6a39a0824410f2535ab5fc95d3821488f | [
"MIT"
] | 580 | 2016-06-26T20:44:17.000Z | 2022-03-30T01:26:51.000Z | include/dynamix/domain.hpp | ggerganov/dynamix | 7530d2d6a39a0824410f2535ab5fc95d3821488f | [
"MIT"
] | 35 | 2016-06-28T11:15:49.000Z | 2022-01-28T14:03:30.000Z | include/dynamix/domain.hpp | ggerganov/dynamix | 7530d2d6a39a0824410f2535ab5fc95d3821488f | [
"MIT"
] | 52 | 2016-06-26T19:49:24.000Z | 2022-01-25T18:18:31.000Z | // DynaMix
// Copyright (c) 2013-2020 Borislav Stanimirov, Zahary Karadjov
//
// Distributed under the MIT Software License
// See accompanying file LICENSE.txt or copy at
// https://opensource.org/licenses/MIT
//
#pragma once
#include "config.hpp"
#include "mutation_rule_id.hpp"
#include "mixin_type_info.hpp"
#include "feature.hpp"
#include "message.hpp"
#include "mixin_collection.hpp" // for mixin_type_info_vector
#include "internal/assert.hpp"
#include <unordered_map>
#include <memory>
#include <type_traits> // alignment of
#if DYNAMIX_THREAD_SAFE_MUTATIONS
#include <mutex>
#endif
/**
* \file
* Domain related classes and functions.
*/
// The domain collection of mixins and messages
// It serves as a library instance of sorts
namespace dynamix
{
class mutation_rule;
class object_type_mutation;
class domain_allocator;
class type_class;
class object_type_info;
namespace internal
{
struct message_t;
class DYNAMIX_API domain
{
public:
// contains static local variable which has thread-safe initialization
// so this function is a bit slower, but it's safe to call globally
static domain& safe_instance();
// no static variables, not safe to call globally
static const domain& instance();
mutation_rule_id add_mutation_rule(std::shared_ptr<mutation_rule> rule);
mutation_rule_id add_mutation_rule(mutation_rule* rule);
std::shared_ptr<mutation_rule> remove_mutation_rule(mutation_rule_id id);
void apply_mutation_rules(object_type_mutation& mutation, const mixin_collection& source_mixins);
size_t num_registered_mixins() const { return _num_registered_mixins; }
void register_mixin_type(mixin_type_info& info);
void unregister_mixin_type(const mixin_type_info& info);
// feature registration functions for the supported kinds of features
void register_feature(message_t& m);
void unregister_feature(const message_t& m);
// type class registration
void register_type_class(type_class& t);
void unregister_type_class(const type_class& t);
// creates a new type info if needed
const object_type_info* get_object_type_info(mixin_collection mixins);
const mixin_type_info& mixin_info(mixin_id id) const
{
I_DYNAMIX_ASSERT(id != INVALID_MIXIN_ID);
I_DYNAMIX_ASSERT(id <= _num_registered_mixins);
I_DYNAMIX_ASSERT(_mixin_type_infos[id]);
return *_mixin_type_infos[id];
}
const message_t& message_data(feature_id id) const
{
I_DYNAMIX_ASSERT(id <= _num_registered_messages);
I_DYNAMIX_ASSERT(_messages[id]);
return *_messages[id];
}
// sets the current domain allocator
void set_allocator(domain_allocator* allocator);
domain_allocator* allocator() const { return _allocator; }
// get mixin id by name string
mixin_id get_mixin_id_by_name(const char* mixin_name) const;
// erases all type infos with zero objects
void garbage_collect_type_infos();
private:
domain();
~domain();
friend class dynamix::object_type_info;
friend class object_mutator;
// non-copyable
domain(const domain&) = delete;
domain& operator=(const domain&) = delete;
// sparse list of all mixin infos
// some elements might be nullptr
// such elements have been registered from a loadable module (plugin)
// and then unregistered when it was unloaded
mixin_type_info* _mixin_type_infos[DYNAMIX_MAX_MIXINS];
size_t _num_registered_mixins; // max registered mixin
// sparse list of all message infos
// some elements might be nullptr
// such elements have been registered from a loadable module (plugin)
// and then unregistered when it was unloaded
message_t* _messages[DYNAMIX_MAX_MESSAGES];
size_t _num_registered_messages;
// sparse list of all registered type classes
// some elements might be nullptr
// such elements have been registered from a loadable module (plugin)
// and then unregistered when it was unloaded
std::vector<type_class*> _type_classes;
typedef std::unordered_map<available_mixins_bitset, std::unique_ptr<object_type_info>> object_type_info_map;
object_type_info_map _object_type_infos;
// mutation rules for this domain
std::vector<std::shared_ptr<mutation_rule>> _mutation_rules;
#if DYNAMIX_THREAD_SAFE_MUTATIONS
std::mutex _object_type_infos_mutex;
std::mutex _mutation_rules_mutex;
#endif
// allocators
domain_allocator* _allocator;
static const domain& _instance; // used for the fast version of the instance getter
};
} // namespace internal
// allocator functions
/// Sets an global allocator for all mixins and datas.
void DYNAMIX_API set_global_allocator(domain_allocator* allocator);
} // namespace dynamix
| 29.68323 | 112 | 0.745135 | [
"vector"
] |
d52b540002182356c6039b1c7b7f4e2114e34347 | 642 | cpp | C++ | Online Judges/HackerEarth/multipleSubtrees.cpp | NelsonGomesNeto/ProgramC | e743b1b869f58f7f3022d18bac00c5e0b078562e | [
"MIT"
] | 3 | 2018-12-18T13:39:42.000Z | 2021-06-23T18:05:18.000Z | Online Judges/HackerEarth/multipleSubtrees.cpp | NelsonGomesNeto/ProgramC | e743b1b869f58f7f3022d18bac00c5e0b078562e | [
"MIT"
] | 1 | 2018-11-02T21:32:40.000Z | 2018-11-02T22:47:12.000Z | Online Judges/HackerEarth/multipleSubtrees.cpp | NelsonGomesNeto/ProgramC | e743b1b869f58f7f3022d18bac00c5e0b078562e | [
"MIT"
] | 6 | 2018-10-27T14:07:52.000Z | 2019-11-14T13:49:29.000Z | #include <bits/stdc++.h>
using namespace std;
const int maxN = 1e5;
vector<int> tree[maxN];
int e[maxN], ans[maxN];
void dfs(int u, int prev)
{
for (int v: tree[u])
{
if (v == prev) continue;
ans[v] = e[v] + ans[u] - 2;
dfs(v, u);
}
}
int main()
{
int n; scanf("%d", &n); int u, v;
for (int i = 0; i < n - 1; i ++)
{
scanf("%d %d", &u, &v); u --, v--;
tree[u].push_back(v); tree[v].push_back(u);
e[u] ++, e[v] ++;
}
ans[0] = e[0]; dfs(0, -1);
int q; scanf("%d", &q);
while (q --)
{
scanf("%d", &u); u --;
printf("%d\n", ans[u]);
}
return(0);
}
| 17.351351 | 48 | 0.429907 | [
"vector"
] |
d52dd20ee17afbe9dc89163e698056da03778dab | 1,210 | cpp | C++ | Applications/DataExplorer/DataView/AddLayerToMeshDialog.cpp | ufz/ogs | 97d0249e0c578c3055730f4e9d994b9970885098 | [
"BSD-3-Clause"
] | 111 | 2015-03-20T22:54:17.000Z | 2022-03-30T04:37:21.000Z | Applications/DataExplorer/DataView/AddLayerToMeshDialog.cpp | ufz/ogs | 97d0249e0c578c3055730f4e9d994b9970885098 | [
"BSD-3-Clause"
] | 3,015 | 2015-01-05T21:55:16.000Z | 2021-02-15T01:09:17.000Z | Applications/DataExplorer/DataView/AddLayerToMeshDialog.cpp | ufz/ogs | 97d0249e0c578c3055730f4e9d994b9970885098 | [
"BSD-3-Clause"
] | 250 | 2015-02-10T15:43:57.000Z | 2022-03-30T04:37:20.000Z | /**
* \file
* \author Karsten Rink
* \date 2016-01-18
* \brief Implementation of the AddLayerToMeshDialog class.
*
* \copyright
* Copyright (c) 2012-2021, OpenGeoSys Community (http://www.opengeosys.org)
* Distributed under a Modified BSD License.
* See accompanying file LICENSE.txt or
* http://www.opengeosys.org/project/license
*
*/
#include "AddLayerToMeshDialog.h"
#include "Base/OGSError.h"
#include "Base/StrictDoubleValidator.h"
AddLayerToMeshDialog::AddLayerToMeshDialog(QDialog* parent) : QDialog(parent)
{
setupUi(this);
auto* thickness_validator = new StrictDoubleValidator(0, 1000000, 7, this);
this->thicknessEdit->setValidator(thickness_validator);
}
void AddLayerToMeshDialog::accept()
{
if (this->nameEdit->text().isEmpty())
{
OGSError::box("Please enter a name for the new Mesh.");
}
else if (this->thicknessEdit->text().isEmpty() ||
this->thicknessEdit->text().toDouble() <= 0)
{
OGSError::box("Thickness needs to be larger 0");
}
else
{
this->done(QDialog::Accepted);
}
}
void AddLayerToMeshDialog::reject()
{
this->done(QDialog::Rejected);
}
| 24.693878 | 79 | 0.652066 | [
"mesh"
] |
d52e41bda0e1690fe27ef25455ce24fe35cc0b0b | 8,379 | cpp | C++ | folly/logging/AsyncLogWriter.cpp | Aoikiseki/folly | df3633c731d08bab0173039a050a30853fb47212 | [
"Apache-2.0"
] | 19,046 | 2015-01-01T17:01:10.000Z | 2022-03-31T23:01:43.000Z | folly/logging/AsyncLogWriter.cpp | Aoikiseki/folly | df3633c731d08bab0173039a050a30853fb47212 | [
"Apache-2.0"
] | 1,493 | 2015-01-11T15:47:13.000Z | 2022-03-28T18:13:58.000Z | folly/logging/AsyncLogWriter.cpp | Aoikiseki/folly | df3633c731d08bab0173039a050a30853fb47212 | [
"Apache-2.0"
] | 4,818 | 2015-01-01T12:28:16.000Z | 2022-03-31T16:22:10.000Z | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/logging/AsyncLogWriter.h>
#include <folly/Exception.h>
#include <folly/FileUtil.h>
#include <folly/detail/AtFork.h>
#include <folly/logging/LoggerDB.h>
#include <folly/system/ThreadName.h>
namespace folly {
constexpr size_t AsyncLogWriter::kDefaultMaxBufferSize;
AsyncLogWriter::AsyncLogWriter() {
folly::detail::AtFork::registerHandler(
this,
[this] { return preFork(); },
[this] { postForkParent(); },
[this] { postForkChild(); });
// Start the I/O thread after registering the atfork handler.
// preFork() may be invoked in another thread as soon as registerHandler()
// returns. It will check FLAG_IO_THREAD_STARTED to see if the I/O thread is
// running yet.
{
auto data = data_.lock();
data->flags |= FLAG_IO_THREAD_STARTED;
data->ioThread = std::thread([this] { ioThread(); });
}
}
AsyncLogWriter::~AsyncLogWriter() {
{
auto data = data_.lock();
if (!(data->flags & FLAG_DESTROYING)) {
LoggerDB::internalWarning(
__FILE__, __LINE__, "cleanup() is not called before destroying");
stopIoThread(data, FLAG_DESTROYING);
assert(false);
}
}
// Unregister the atfork handler after stopping the I/O thread.
// preFork(), postForkParent(), and postForkChild() calls can run
// concurrently with the destructor until unregisterHandler() returns.
folly::detail::AtFork::unregisterHandler(this);
}
void AsyncLogWriter::cleanup() {
std::vector<std::string>* ioQueue;
size_t numDiscarded;
{
// Stop the I/O thread
auto data = data_.lock();
stopIoThread(data, FLAG_DESTROYING);
// stopIoThread() causes the I/O thread to stop as soon as possible,
// without waiting for all pending messages to be written. Extract any
// remaining messages to write them below.
ioQueue = data->getCurrentQueue();
numDiscarded = data->numDiscarded;
}
// If there are still any pending messages, flush them now.
if (!ioQueue->empty()) {
performIO(*ioQueue, numDiscarded);
}
}
void AsyncLogWriter::writeMessage(StringPiece buffer, uint32_t flags) {
return writeMessage(buffer.str(), flags);
}
void AsyncLogWriter::writeMessage(std::string&& buffer, uint32_t flags) {
auto data = data_.lock();
if ((data->currentBufferSize >= data->maxBufferBytes) &&
!(flags & NEVER_DISCARD)) {
++data->numDiscarded;
return;
}
data->currentBufferSize += buffer.size();
auto* queue = data->getCurrentQueue();
queue->emplace_back(std::move(buffer));
messageReady_.notify_one();
}
void AsyncLogWriter::flush() {
auto data = data_.lock();
auto start = data->ioThreadCounter;
// Wait until ioThreadCounter increments by at least two.
// Waiting for a single increment is not sufficient, as this happens after
// the I/O thread has swapped the queues, which is before it has actually
// done the I/O.
while (data->ioThreadCounter < start + 2) {
// Enqueue an empty string and wake the I/O thread.
// The empty string ensures that the I/O thread will break out of its wait
// loop and increment the ioThreadCounter, even if there is no other work
// to do.
data->getCurrentQueue()->emplace_back();
messageReady_.notify_one();
// Wait for notification from the I/O thread that it has done work.
ioCV_.wait(data.as_lock());
}
}
void AsyncLogWriter::setMaxBufferSize(size_t size) {
auto data = data_.lock();
data->maxBufferBytes = size;
}
size_t AsyncLogWriter::getMaxBufferSize() const {
auto data = data_.lock();
return data->maxBufferBytes;
}
void AsyncLogWriter::ioThread() {
folly::setThreadName("log_writer");
while (true) {
// With the lock held, grab a pointer to the current queue, then increment
// the ioThreadCounter index so that other threads will write into the
// other queue as we process this one.
std::vector<std::string>* ioQueue;
size_t numDiscarded;
{
auto data = data_.lock();
ioQueue = data->getCurrentQueue();
while (ioQueue->empty() && !(data->flags & FLAG_STOP)) {
// Wait for a message or one of the above flags to be set.
messageReady_.wait(data.as_lock());
}
if (data->flags & FLAG_STOP) {
// We have been asked to stop. We exit immediately in this case
// without writing out any pending messages. If we are stopping due
// to a fork() the I/O thread will be restarted after the fork (as
// long as we are not also being destroyed). If we are stopping due
// to the destructor, any remaining messages will be written out
// inside the destructor.
data->flags |= FLAG_IO_THREAD_STOPPED;
data.unlock();
ioCV_.notify_all();
return;
}
++data->ioThreadCounter;
numDiscarded = data->numDiscarded;
data->numDiscarded = 0;
data->currentBufferSize = 0;
}
ioCV_.notify_all();
// Write the log messages now that we have released the lock
performIO(*ioQueue, numDiscarded);
// clear() empties the vector, but the allocated capacity remains so we can
// just reuse it without having to re-allocate in most cases.
ioQueue->clear();
}
}
bool AsyncLogWriter::preFork() {
// Stop the I/O thread.
//
// It would perhaps be better to not stop the I/O thread in the parent
// process. However, this leaves us in a slightly tricky situation in the
// child process where data_->ioThread has been initialized and does not
// really point to a valid thread. While we could store it in a union and
// replace it without ever calling its destructor, in practice this still has
// some tricky corner cases to deal with.
// Grab the data lock to ensure no other thread is holding it
// while we fork.
lockedData_ = data_.lock();
// If the I/O thread has been started, stop it now
if (lockedData_->flags & FLAG_IO_THREAD_STARTED) {
stopIoThread(lockedData_, 0);
}
return true;
}
void AsyncLogWriter::postForkParent() {
// Restart the I/O thread
restartThread();
}
void AsyncLogWriter::postForkChild() {
// Clear any messages in the queue. We only want them to be written once,
// and we let the parent process handle writing them.
lockedData_->queues[0].clear();
lockedData_->queues[1].clear();
// Restart the I/O thread
restartThread();
}
void AsyncLogWriter::stopIoThread(
folly::Synchronized<Data, std::mutex>::LockedPtr& data,
uint32_t extraFlags) {
data->flags |= (FLAG_STOP | extraFlags);
messageReady_.notify_one();
ioCV_.wait(data.as_lock(), [&] {
return bool(data->flags & FLAG_IO_THREAD_STOPPED);
});
// Check FLAG_IO_THREAD_JOINED before calling join().
// preFork() and the destructor may both run concurrently in separate
// threads, and only one should try to join the thread.
if ((data->flags & FLAG_IO_THREAD_JOINED) == 0) {
data->ioThread.join();
data->flags |= FLAG_IO_THREAD_JOINED;
}
}
void AsyncLogWriter::restartThread() {
// Move lockedData_ into a local member variable so it will be released
// when we return.
folly::Synchronized<Data, std::mutex>::LockedPtr data =
std::move(lockedData_);
if (!(data->flags & FLAG_IO_THREAD_STARTED)) {
// Do not start the I/O thread if the constructor has not finished yet
return;
}
if (data->flags & FLAG_DESTROYING) {
// Do not restart the I/O thread if we were being destroyed.
// If there are more pending messages that need to be flushed the
// destructor's stopIoThread() call will handle flushing the messages in
// this case.
return;
}
data->flags &= ~(FLAG_STOP | FLAG_IO_THREAD_JOINED | FLAG_IO_THREAD_STOPPED);
data->ioThread = std::thread([this] { ioThread(); });
}
} // namespace folly
| 32.476744 | 79 | 0.685046 | [
"vector"
] |
d52f31a10275f2ea964bd7f475f3fa4d09fd381e | 6,450 | cpp | C++ | crypt/src/statistics/CryptStatistics.cpp | SoftMatterMechanics/ApicalStressFibers | 17d343c09a246a50f9e3a3cbfc399ca6bef353ce | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2020-09-10T16:12:13.000Z | 2020-09-10T16:12:13.000Z | crypt/src/statistics/CryptStatistics.cpp | SoftMatterMechanics/ApicalStressFibers | 17d343c09a246a50f9e3a3cbfc399ca6bef353ce | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | crypt/src/statistics/CryptStatistics.cpp | SoftMatterMechanics/ApicalStressFibers | 17d343c09a246a50f9e3a3cbfc399ca6bef353ce | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2020-09-10T16:12:21.000Z | 2020-09-10T16:12:21.000Z | /*
Copyright (c) 2005-2020, University of Oxford.
All rights reserved.
University of Oxford means the Chancellor, Masters and Scholars of the
University of Oxford, having an administrative office at Wellington
Square, Oxford OX1 2JD, UK.
This file is part of Chaste.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of Oxford nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "CryptStatistics.hpp"
#include "RandomNumberGenerator.hpp"
/**
* This global function is to allow the list of cells in to be compared in
* terms of their y-value and std::list.sort() to be called
*/
bool CellsHeightComparison(const std::pair<CellPtr, double> lhs, const std::pair<CellPtr, double> rhs)
{
return lhs.second < rhs.second;
}
CryptStatistics::CryptStatistics(MeshBasedCellPopulation<2>& rCrypt)
: AbstractCryptStatistics(rCrypt)
{
}
std::vector<CellPtr> CryptStatistics::GetCryptSection(double yTop, double xBottom, double xTop, bool periodic)
{
double crypt_width = mrCrypt.rGetMesh().GetWidth(0);
// Fill in the default values - in a sequential manner
if (xBottom == DBL_MAX)
{
xBottom = RandomNumberGenerator::Instance()->ranf()*crypt_width;
}
if (xTop == DBL_MAX)
{
xTop = RandomNumberGenerator::Instance()->ranf()*crypt_width;
}
assert(yTop>0.0);
std::list<std::pair<CellPtr, double> > cells_list; // the second entry is the y value (needed for sorting)
if (fabs(xTop-xBottom)<0.5*crypt_width)
{
// The periodic version isn't needed, ignore even if periodic was set to true
periodic = false;
}
// Loop over cells and add to the store if they are within a cell's radius of the specified line
for (AbstractCellPopulation<2>::Iterator cell_iter = mrCrypt.Begin();
cell_iter != mrCrypt.End();
++cell_iter)
{
if (periodic)
{
if (CellIsInSectionPeriodic(xBottom, xTop, yTop, mrCrypt.GetLocationOfCellCentre(*cell_iter)))
{
// Set up a pair, equal to (cell,y_val) and insert
std::pair<CellPtr, double> pair(*cell_iter, mrCrypt.GetLocationOfCellCentre(*cell_iter)[1]);
cells_list.push_back(pair);
}
}
else
{
if (CellIsInSection(xBottom, xTop, yTop, mrCrypt.GetLocationOfCellCentre(*cell_iter)))
{
// Set up a pair, equal to (cell,y_val) and insert
std::pair<CellPtr, double> pair(*cell_iter, mrCrypt.GetLocationOfCellCentre(*cell_iter)[1]);
cells_list.push_back(pair);
}
}
}
// Sort the list
cells_list.sort(CellsHeightComparison);
// Copy to a vector
std::vector<CellPtr> ordered_cells;
for (std::list<std::pair<CellPtr, double> >::iterator iter = cells_list.begin();
iter!=cells_list.end();
++iter)
{
ordered_cells.push_back(iter->first);
}
return ordered_cells;
}
std::vector<CellPtr> CryptStatistics::GetCryptSectionPeriodic(double yTop, double xBottom, double xTop)
{
return GetCryptSection(yTop, xBottom, xTop, true);
}
bool CryptStatistics::CellIsInSection(double xBottom, double xTop, double yTop, const c_vector<double,2>& rCellPosition, double widthOfSection)
{
c_vector<double,2> intercept;
if (xBottom == xTop)
{
intercept[0] = xTop;
intercept[1] = rCellPosition[1];
}
else
{
double m = (yTop)/(xTop-xBottom); // gradient of line
intercept[0] = (m*m*xBottom + rCellPosition[0] + m*rCellPosition[1])/(1+m*m);
intercept[1] = m*(intercept[0] - xBottom);
}
c_vector<double,2> vec_from_A_to_B = mrCrypt.rGetMesh().GetVectorFromAtoB(intercept, rCellPosition);
double dist = norm_2(vec_from_A_to_B);
return (dist <= widthOfSection);
}
bool CryptStatistics::CellIsInSectionPeriodic(double xBottom, double xTop, double yTop, const c_vector<double,2>& rCellPosition, double widthOfSection)
{
bool is_in_section = false;
c_vector<double,2> intercept;
double crypt_width = mrCrypt.rGetMesh().GetWidth(0u);
double m; // gradient of line
double offset;
if (xBottom < xTop)
{
offset = -crypt_width;
}
else
{
offset = crypt_width;
}
m = (yTop)/(xTop-xBottom+offset); // gradient of line
// 1st line
intercept[0] = (m*m*xBottom + rCellPosition[0] + m*rCellPosition[1])/(1+m*m);
intercept[1] = m*(intercept[0] - xBottom);
c_vector<double,2> vec_from_A_to_B = mrCrypt.rGetMesh().GetVectorFromAtoB(intercept, rCellPosition);
double dist = norm_2(vec_from_A_to_B);
if (dist < widthOfSection)
{
is_in_section = true;
}
// 2nd line
intercept[0] = (m*m*(xBottom-offset) + rCellPosition[0] + m*rCellPosition[1])/(1+m*m);
intercept[1] = m*(intercept[0] - (xBottom-offset));
vec_from_A_to_B = mrCrypt.rGetMesh().GetVectorFromAtoB(intercept, rCellPosition);
dist = norm_2(vec_from_A_to_B);
if (dist < widthOfSection)
{
is_in_section = true;
}
return is_in_section;
}
| 33.947368 | 151 | 0.686202 | [
"vector"
] |
d53146e14bf50243b2d715361ad4ad4735d65281 | 64,557 | cpp | C++ | src/sksl/codegen/SkSLGLSLCodeGenerator.cpp | twinsunllc/skia | 8318cc9928ef12577f249f49250dd94ee2bc1d28 | [
"BSD-3-Clause"
] | null | null | null | src/sksl/codegen/SkSLGLSLCodeGenerator.cpp | twinsunllc/skia | 8318cc9928ef12577f249f49250dd94ee2bc1d28 | [
"BSD-3-Clause"
] | null | null | null | src/sksl/codegen/SkSLGLSLCodeGenerator.cpp | twinsunllc/skia | 8318cc9928ef12577f249f49250dd94ee2bc1d28 | [
"BSD-3-Clause"
] | null | null | null | /*
* Copyright 2016 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "src/sksl/codegen/SkSLGLSLCodeGenerator.h"
#include "include/core/SkSpan.h"
#include "include/core/SkTypes.h"
#include "include/private/SkSLDefines.h"
#include "include/private/SkSLLayout.h"
#include "include/private/SkSLModifiers.h"
#include "include/private/SkSLProgramElement.h"
#include "include/private/SkSLProgramKind.h"
#include "include/private/SkSLStatement.h"
#include "include/private/SkSLString.h"
#include "include/private/SkTArray.h"
#include "include/sksl/SkSLErrorReporter.h"
#include "include/sksl/SkSLPosition.h"
#include "src/sksl/SkSLBuiltinTypes.h"
#include "src/sksl/SkSLCompiler.h"
#include "src/sksl/SkSLGLSL.h"
#include "src/sksl/SkSLLexer.h"
#include "src/sksl/SkSLOutputStream.h"
#include "src/sksl/SkSLProgramSettings.h"
#include "src/sksl/SkSLUtil.h"
#include "src/sksl/ir/SkSLBinaryExpression.h"
#include "src/sksl/ir/SkSLBlock.h"
#include "src/sksl/ir/SkSLConstructor.h"
#include "src/sksl/ir/SkSLConstructorArrayCast.h"
#include "src/sksl/ir/SkSLConstructorDiagonalMatrix.h"
#include "src/sksl/ir/SkSLDoStatement.h"
#include "src/sksl/ir/SkSLExpression.h"
#include "src/sksl/ir/SkSLExpressionStatement.h"
#include "src/sksl/ir/SkSLExtension.h"
#include "src/sksl/ir/SkSLFieldAccess.h"
#include "src/sksl/ir/SkSLForStatement.h"
#include "src/sksl/ir/SkSLFunctionCall.h"
#include "src/sksl/ir/SkSLFunctionDeclaration.h"
#include "src/sksl/ir/SkSLFunctionDefinition.h"
#include "src/sksl/ir/SkSLFunctionPrototype.h"
#include "src/sksl/ir/SkSLIfStatement.h"
#include "src/sksl/ir/SkSLIndexExpression.h"
#include "src/sksl/ir/SkSLInterfaceBlock.h"
#include "src/sksl/ir/SkSLLiteral.h"
#include "src/sksl/ir/SkSLModifiersDeclaration.h"
#include "src/sksl/ir/SkSLPostfixExpression.h"
#include "src/sksl/ir/SkSLPrefixExpression.h"
#include "src/sksl/ir/SkSLProgram.h"
#include "src/sksl/ir/SkSLReturnStatement.h"
#include "src/sksl/ir/SkSLSetting.h"
#include "src/sksl/ir/SkSLStructDefinition.h"
#include "src/sksl/ir/SkSLSwitchCase.h"
#include "src/sksl/ir/SkSLSwitchStatement.h"
#include "src/sksl/ir/SkSLSwizzle.h"
#include "src/sksl/ir/SkSLTernaryExpression.h"
#include "src/sksl/ir/SkSLType.h"
#include "src/sksl/ir/SkSLVarDeclarations.h"
#include "src/sksl/ir/SkSLVariable.h"
#include "src/sksl/ir/SkSLVariableReference.h"
#include "src/sksl/spirv.h"
#include <memory>
#include <type_traits>
#include <vector>
namespace SkSL {
void GLSLCodeGenerator::write(std::string_view s) {
if (!s.length()) {
return;
}
if (fAtLineStart) {
for (int i = 0; i < fIndentation; i++) {
fOut->writeText(" ");
}
}
fOut->write(s.data(), s.length());
fAtLineStart = false;
}
void GLSLCodeGenerator::writeLine(std::string_view s) {
this->write(s);
fOut->writeText("\n");
fAtLineStart = true;
}
void GLSLCodeGenerator::finishLine() {
if (!fAtLineStart) {
this->writeLine();
}
}
void GLSLCodeGenerator::writeExtension(std::string_view name, bool require) {
fExtensions.writeText("#extension ");
fExtensions.write(name.data(), name.length());
fExtensions.writeText(require ? " : require\n" : " : enable\n");
}
bool GLSLCodeGenerator::usesPrecisionModifiers() const {
return this->caps().usesPrecisionModifiers();
}
// Returns the name of the type with array dimensions, e.g. `float[2]`.
std::string GLSLCodeGenerator::getTypeName(const Type& raw) {
const Type& type = raw.resolve();
switch (type.typeKind()) {
case Type::TypeKind::kVector: {
const Type& component = type.componentType();
std::string result;
if (component.matches(*fContext.fTypes.fFloat) ||
component.matches(*fContext.fTypes.fHalf)) {
result = "vec";
}
else if (component.isSigned()) {
result = "ivec";
}
else if (component.isUnsigned()) {
result = "uvec";
}
else if (component.matches(*fContext.fTypes.fBool)) {
result = "bvec";
}
else {
SK_ABORT("unsupported vector type");
}
result += std::to_string(type.columns());
return result;
}
case Type::TypeKind::kMatrix: {
std::string result;
const Type& component = type.componentType();
if (component.matches(*fContext.fTypes.fFloat) ||
component.matches(*fContext.fTypes.fHalf)) {
result = "mat";
}
else {
SK_ABORT("unsupported matrix type");
}
result += std::to_string(type.columns());
if (type.columns() != type.rows()) {
result += "x";
result += std::to_string(type.rows());
}
return result;
}
case Type::TypeKind::kArray: {
std::string baseTypeName = this->getTypeName(type.componentType());
return String::printf("%s[%d]", baseTypeName.c_str(), type.columns());
}
case Type::TypeKind::kScalar: {
if (type.matches(*fContext.fTypes.fHalf)) {
return "float";
}
else if (type.matches(*fContext.fTypes.fShort)) {
return "int";
}
else if (type.matches(*fContext.fTypes.fUShort)) {
return "uint";
}
else {
return std::string(type.name());
}
break;
}
default:
return std::string(type.name());
}
}
void GLSLCodeGenerator::writeStructDefinition(const StructDefinition& s) {
const Type& type = s.type();
this->write("struct ");
this->write(type.name());
this->writeLine(" {");
fIndentation++;
for (const auto& f : type.fields()) {
this->writeModifiers(f.fModifiers, false);
this->writeTypePrecision(*f.fType);
const Type& baseType = f.fType->isArray() ? f.fType->componentType() : *f.fType;
this->writeType(baseType);
this->write(" ");
this->write(f.fName);
if (f.fType->isArray()) {
this->write("[" + std::to_string(f.fType->columns()) + "]");
}
this->writeLine(";");
}
fIndentation--;
this->writeLine("};");
}
void GLSLCodeGenerator::writeType(const Type& type) {
this->write(this->getTypeName(type));
}
void GLSLCodeGenerator::writeExpression(const Expression& expr, Precedence parentPrecedence) {
switch (expr.kind()) {
case Expression::Kind::kBinary:
this->writeBinaryExpression(expr.as<BinaryExpression>(), parentPrecedence);
break;
case Expression::Kind::kConstructorDiagonalMatrix:
this->writeConstructorDiagonalMatrix(expr.as<ConstructorDiagonalMatrix>(),
parentPrecedence);
break;
case Expression::Kind::kConstructorArrayCast:
this->writeExpression(*expr.as<ConstructorArrayCast>().argument(), parentPrecedence);
break;
case Expression::Kind::kConstructorArray:
case Expression::Kind::kConstructorCompound:
case Expression::Kind::kConstructorMatrixResize:
case Expression::Kind::kConstructorSplat:
case Expression::Kind::kConstructorStruct:
this->writeAnyConstructor(expr.asAnyConstructor(), parentPrecedence);
break;
case Expression::Kind::kConstructorScalarCast:
case Expression::Kind::kConstructorCompoundCast:
this->writeCastConstructor(expr.asAnyConstructor(), parentPrecedence);
break;
case Expression::Kind::kFieldAccess:
this->writeFieldAccess(expr.as<FieldAccess>());
break;
case Expression::Kind::kFunctionCall:
this->writeFunctionCall(expr.as<FunctionCall>());
break;
case Expression::Kind::kLiteral:
this->writeLiteral(expr.as<Literal>());
break;
case Expression::Kind::kPrefix:
this->writePrefixExpression(expr.as<PrefixExpression>(), parentPrecedence);
break;
case Expression::Kind::kPostfix:
this->writePostfixExpression(expr.as<PostfixExpression>(), parentPrecedence);
break;
case Expression::Kind::kSetting:
this->writeSetting(expr.as<Setting>());
break;
case Expression::Kind::kSwizzle:
this->writeSwizzle(expr.as<Swizzle>());
break;
case Expression::Kind::kVariableReference:
this->writeVariableReference(expr.as<VariableReference>());
break;
case Expression::Kind::kTernary:
this->writeTernaryExpression(expr.as<TernaryExpression>(), parentPrecedence);
break;
case Expression::Kind::kIndex:
this->writeIndexExpression(expr.as<IndexExpression>());
break;
default:
SkDEBUGFAILF("unsupported expression: %s", expr.description().c_str());
break;
}
}
static bool is_abs(Expression& expr) {
return expr.is<FunctionCall>() &&
expr.as<FunctionCall>().function().intrinsicKind() == k_abs_IntrinsicKind;
}
// turns min(abs(x), y) into ((tmpVar1 = abs(x)) < (tmpVar2 = y) ? tmpVar1 : tmpVar2) to avoid a
// Tegra3 compiler bug.
void GLSLCodeGenerator::writeMinAbsHack(Expression& absExpr, Expression& otherExpr) {
SkASSERT(!this->caps().canUseMinAndAbsTogether());
std::string tmpVar1 = "minAbsHackVar" + std::to_string(fVarCount++);
std::string tmpVar2 = "minAbsHackVar" + std::to_string(fVarCount++);
this->fFunctionHeader += std::string(" ") + this->getTypePrecision(absExpr.type()) +
this->getTypeName(absExpr.type()) + " " + tmpVar1 + ";\n";
this->fFunctionHeader += std::string(" ") + this->getTypePrecision(otherExpr.type()) +
this->getTypeName(otherExpr.type()) + " " + tmpVar2 + ";\n";
this->write("((" + tmpVar1 + " = ");
this->writeExpression(absExpr, Precedence::kTopLevel);
this->write(") < (" + tmpVar2 + " = ");
this->writeExpression(otherExpr, Precedence::kAssignment);
this->write(") ? " + tmpVar1 + " : " + tmpVar2 + ")");
}
void GLSLCodeGenerator::writeInverseSqrtHack(const Expression& x) {
this->write("(1.0 / sqrt(");
this->writeExpression(x, Precedence::kTopLevel);
this->write("))");
}
void GLSLCodeGenerator::writeDeterminantHack(const Expression& mat) {
std::string name;
const Type& type = mat.type();
if (type.matches(*fContext.fTypes.fFloat2x2) || type.matches(*fContext.fTypes.fHalf2x2)) {
name = "_determinant2";
if (fWrittenIntrinsics.find(name) == fWrittenIntrinsics.end()) {
fWrittenIntrinsics.insert(name);
fExtraFunctions.writeText((
"float " + name + "(mat2 m) {"
" return m[0][0] * m[1][1] - m[0][1] * m[1][0];"
"}"
).c_str());
}
}
else if (type.matches(*fContext.fTypes.fFloat3x3) || type.matches(*fContext.fTypes.fHalf3x3)) {
name = "_determinant3";
if (fWrittenIntrinsics.find(name) == fWrittenIntrinsics.end()) {
fWrittenIntrinsics.insert(name);
fExtraFunctions.writeText((
"float " + name + "(mat3 m) {"
" float a00 = m[0][0], a01 = m[0][1], a02 = m[0][2];"
" float a10 = m[1][0], a11 = m[1][1], a12 = m[1][2];"
" float a20 = m[2][0], a21 = m[2][1], a22 = m[2][2];"
" float b01 = a22 * a11 - a12 * a21;"
" float b11 = -a22 * a10 + a12 * a20;"
" float b21 = a21 * a10 - a11 * a20;"
" return a00 * b01 + a01 * b11 + a02 * b21;"
"}"
).c_str());
}
}
else if (type.matches(*fContext.fTypes.fFloat4x4) || type.matches(*fContext.fTypes.fHalf4x4)) {
name = "_determinant4";
if (fWrittenIntrinsics.find(name) == fWrittenIntrinsics.end()) {
fWrittenIntrinsics.insert(name);
fExtraFunctions.writeText((
"mat4 " + name + "(mat4 m) {"
" float a00 = m[0][0], a01 = m[0][1], a02 = m[0][2], a03 = m[0][3];"
" float a10 = m[1][0], a11 = m[1][1], a12 = m[1][2], a13 = m[1][3];"
" float a20 = m[2][0], a21 = m[2][1], a22 = m[2][2], a23 = m[2][3];"
" float a30 = m[3][0], a31 = m[3][1], a32 = m[3][2], a33 = m[3][3];"
" float b00 = a00 * a11 - a01 * a10;"
" float b01 = a00 * a12 - a02 * a10;"
" float b02 = a00 * a13 - a03 * a10;"
" float b03 = a01 * a12 - a02 * a11;"
" float b04 = a01 * a13 - a03 * a11;"
" float b05 = a02 * a13 - a03 * a12;"
" float b06 = a20 * a31 - a21 * a30;"
" float b07 = a20 * a32 - a22 * a30;"
" float b08 = a20 * a33 - a23 * a30;"
" float b09 = a21 * a32 - a22 * a31;"
" float b10 = a21 * a33 - a23 * a31;"
" float b11 = a22 * a33 - a23 * a32;"
" return b00 * b11 - b01 * b10 + b02 * b09 + b03 * b08 - b04 * b07 + b05 * b06;"
"}"
).c_str());
}
}
else {
SkASSERT(false);
}
this->write(name + "(");
this->writeExpression(mat, Precedence::kTopLevel);
this->write(")");
}
void GLSLCodeGenerator::writeInverseHack(const Expression& mat) {
std::string name;
const Type& type = mat.type();
if (type.matches(*fContext.fTypes.fFloat2x2) || type.matches(*fContext.fTypes.fHalf2x2)) {
name = "_inverse2";
if (fWrittenIntrinsics.find(name) == fWrittenIntrinsics.end()) {
fWrittenIntrinsics.insert(name);
fExtraFunctions.writeText((
"mat2 " + name + "(mat2 m) {"
" return mat2(m[1][1], -m[0][1], -m[1][0], m[0][0]) / "
"(m[0][0] * m[1][1] - m[0][1] * m[1][0]);"
"}"
).c_str());
}
}
else if (type.matches(*fContext.fTypes.fFloat3x3) || type.matches(*fContext.fTypes.fHalf3x3)) {
name = "_inverse3";
if (fWrittenIntrinsics.find(name) == fWrittenIntrinsics.end()) {
fWrittenIntrinsics.insert(name);
fExtraFunctions.writeText((
"mat3 " + name + "(mat3 m) {"
" float a00 = m[0][0], a01 = m[0][1], a02 = m[0][2];"
" float a10 = m[1][0], a11 = m[1][1], a12 = m[1][2];"
" float a20 = m[2][0], a21 = m[2][1], a22 = m[2][2];"
" float b01 = a22 * a11 - a12 * a21;"
" float b11 = -a22 * a10 + a12 * a20;"
" float b21 = a21 * a10 - a11 * a20;"
" float det = a00 * b01 + a01 * b11 + a02 * b21;"
" return mat3(b01, (-a22 * a01 + a02 * a21), (a12 * a01 - a02 * a11),"
" b11, (a22 * a00 - a02 * a20), (-a12 * a00 + a02 * a10),"
" b21, (-a21 * a00 + a01 * a20), (a11 * a00 - a01 * a10)) / det;"
"}"
).c_str());
}
}
else if (type.matches(*fContext.fTypes.fFloat4x4) || type.matches(*fContext.fTypes.fHalf4x4)) {
name = "_inverse4";
if (fWrittenIntrinsics.find(name) == fWrittenIntrinsics.end()) {
fWrittenIntrinsics.insert(name);
fExtraFunctions.writeText((
"mat4 " + name + "(mat4 m) {"
" float a00 = m[0][0], a01 = m[0][1], a02 = m[0][2], a03 = m[0][3];"
" float a10 = m[1][0], a11 = m[1][1], a12 = m[1][2], a13 = m[1][3];"
" float a20 = m[2][0], a21 = m[2][1], a22 = m[2][2], a23 = m[2][3];"
" float a30 = m[3][0], a31 = m[3][1], a32 = m[3][2], a33 = m[3][3];"
" float b00 = a00 * a11 - a01 * a10;"
" float b01 = a00 * a12 - a02 * a10;"
" float b02 = a00 * a13 - a03 * a10;"
" float b03 = a01 * a12 - a02 * a11;"
" float b04 = a01 * a13 - a03 * a11;"
" float b05 = a02 * a13 - a03 * a12;"
" float b06 = a20 * a31 - a21 * a30;"
" float b07 = a20 * a32 - a22 * a30;"
" float b08 = a20 * a33 - a23 * a30;"
" float b09 = a21 * a32 - a22 * a31;"
" float b10 = a21 * a33 - a23 * a31;"
" float b11 = a22 * a33 - a23 * a32;"
" float det = b00 * b11 - b01 * b10 + b02 * b09 + b03 * b08 - "
" b04 * b07 + b05 * b06;"
" return mat4("
" a11 * b11 - a12 * b10 + a13 * b09,"
" a02 * b10 - a01 * b11 - a03 * b09,"
" a31 * b05 - a32 * b04 + a33 * b03,"
" a22 * b04 - a21 * b05 - a23 * b03,"
" a12 * b08 - a10 * b11 - a13 * b07,"
" a00 * b11 - a02 * b08 + a03 * b07,"
" a32 * b02 - a30 * b05 - a33 * b01,"
" a20 * b05 - a22 * b02 + a23 * b01,"
" a10 * b10 - a11 * b08 + a13 * b06,"
" a01 * b08 - a00 * b10 - a03 * b06,"
" a30 * b04 - a31 * b02 + a33 * b00,"
" a21 * b02 - a20 * b04 - a23 * b00,"
" a11 * b07 - a10 * b09 - a12 * b06,"
" a00 * b09 - a01 * b07 + a02 * b06,"
" a31 * b01 - a30 * b03 - a32 * b00,"
" a20 * b03 - a21 * b01 + a22 * b00) / det;"
"}"
).c_str());
}
}
else {
SkASSERT(false);
}
this->write(name + "(");
this->writeExpression(mat, Precedence::kTopLevel);
this->write(")");
}
void GLSLCodeGenerator::writeTransposeHack(const Expression& mat) {
const Type& type = mat.type();
std::string name = "transpose" + std::to_string(type.columns()) + std::to_string(type.rows());
if (fWrittenIntrinsics.find(name) == fWrittenIntrinsics.end()) {
fWrittenIntrinsics.insert(name);
std::string typeName = this->getTypeName(type);
const Type& base = type.componentType();
std::string transposed = this->getTypeName(base.toCompound(fContext,
type.rows(),
type.columns()));
fExtraFunctions.writeText((transposed + " " + name + "(" + typeName + " m) {\nreturn " +
transposed + "(").c_str());
const char* separator = "";
for (int row = 0; row < type.rows(); ++row) {
for (int column = 0; column < type.columns(); ++column) {
fExtraFunctions.writeText(separator);
fExtraFunctions.writeText(("m[" + std::to_string(column) + "][" +
std::to_string(row) + "]").c_str());
separator = ", ";
}
}
fExtraFunctions.writeText("); }");
}
this->write(name + "(");
this->writeExpression(mat, Precedence::kTopLevel);
this->write(")");
}
void GLSLCodeGenerator::writeFunctionCall(const FunctionCall& c) {
const FunctionDeclaration& function = c.function();
const ExpressionArray& arguments = c.arguments();
bool isTextureFunctionWithBias = false;
bool nameWritten = false;
const char* closingParen = ")";
switch (c.function().intrinsicKind()) {
case k_abs_IntrinsicKind: {
if (!this->caps().emulateAbsIntFunction())
break;
SkASSERT(arguments.size() == 1);
if (!arguments[0]->type().matches(*fContext.fTypes.fInt)) {
break;
}
// abs(int) on Intel OSX is incorrect, so emulate it:
std::string name = "_absemulation";
this->write(name);
nameWritten = true;
if (fWrittenIntrinsics.find(name) == fWrittenIntrinsics.end()) {
fWrittenIntrinsics.insert(name);
fExtraFunctions.writeText((
"int " + name + "(int x) {\n"
" return x * sign(x);\n"
"}\n"
).c_str());
}
break;
}
case k_atan_IntrinsicKind:
if (this->caps().mustForceNegatedAtanParamToFloat() &&
arguments.size() == 2 &&
arguments[1]->kind() == Expression::Kind::kPrefix) {
const PrefixExpression& p = (PrefixExpression&) *arguments[1];
if (p.getOperator().kind() == Token::Kind::TK_MINUS) {
this->write("atan(");
this->writeExpression(*arguments[0], Precedence::kSequence);
this->write(", -1.0 * ");
this->writeExpression(*p.operand(), Precedence::kMultiplicative);
this->write(")");
return;
}
}
break;
case k_ldexp_IntrinsicKind:
if (this->caps().mustForceNegatedLdexpParamToMultiply() &&
arguments.size() == 2 &&
arguments[1]->is<PrefixExpression>()) {
const PrefixExpression& p = arguments[1]->as<PrefixExpression>();
if (p.getOperator().kind() == Token::Kind::TK_MINUS) {
this->write("ldexp(");
this->writeExpression(*arguments[0], Precedence::kSequence);
this->write(", ");
this->writeExpression(*p.operand(), Precedence::kMultiplicative);
this->write(" * -1)");
return;
}
}
break;
case k_dFdy_IntrinsicKind:
// Flipping Y also negates the Y derivatives.
closingParen = "))";
this->write("(" SKSL_RTFLIP_NAME ".y * dFdy");
nameWritten = true;
[[fallthrough]];
case k_dFdx_IntrinsicKind:
case k_fwidth_IntrinsicKind:
if (!fFoundDerivatives &&
this->caps().shaderDerivativeExtensionString()) {
this->writeExtension(this->caps().shaderDerivativeExtensionString());
fFoundDerivatives = true;
}
break;
case k_determinant_IntrinsicKind:
if (!this->caps().builtinDeterminantSupport()) {
SkASSERT(arguments.size() == 1);
this->writeDeterminantHack(*arguments[0]);
return;
}
break;
case k_fma_IntrinsicKind:
if (!this->caps().builtinFMASupport()) {
SkASSERT(arguments.size() == 3);
this->write("((");
this->writeExpression(*arguments[0], Precedence::kSequence);
this->write(") * (");
this->writeExpression(*arguments[1], Precedence::kSequence);
this->write(") + (");
this->writeExpression(*arguments[2], Precedence::kSequence);
this->write("))");
return;
}
break;
case k_fract_IntrinsicKind:
if (!this->caps().canUseFractForNegativeValues()) {
SkASSERT(arguments.size() == 1);
this->write("(0.5 - sign(");
this->writeExpression(*arguments[0], Precedence::kSequence);
this->write(") * (0.5 - fract(abs(");
this->writeExpression(*arguments[0], Precedence::kSequence);
this->write("))))");
return;
}
break;
case k_inverse_IntrinsicKind:
if (this->caps().generation() < SkSL::GLSLGeneration::k140) {
SkASSERT(arguments.size() == 1);
this->writeInverseHack(*arguments[0]);
return;
}
break;
case k_inversesqrt_IntrinsicKind:
if (this->caps().generation() < SkSL::GLSLGeneration::k130) {
SkASSERT(arguments.size() == 1);
this->writeInverseSqrtHack(*arguments[0]);
return;
}
break;
case k_min_IntrinsicKind:
if (!this->caps().canUseMinAndAbsTogether()) {
SkASSERT(arguments.size() == 2);
if (is_abs(*arguments[0])) {
this->writeMinAbsHack(*arguments[0], *arguments[1]);
return;
}
if (is_abs(*arguments[1])) {
// note that this violates the GLSL left-to-right evaluation semantics.
// I doubt it will ever end up mattering, but it's worth calling out.
this->writeMinAbsHack(*arguments[1], *arguments[0]);
return;
}
}
break;
case k_pow_IntrinsicKind:
if (!this->caps().removePowWithConstantExponent()) {
break;
}
// pow(x, y) on some NVIDIA drivers causes crashes if y is a
// constant. It's hard to tell what constitutes "constant" here
// so just replace in all cases.
// Change pow(x, y) into exp2(y * log2(x))
this->write("exp2(");
this->writeExpression(*arguments[1], Precedence::kMultiplicative);
this->write(" * log2(");
this->writeExpression(*arguments[0], Precedence::kSequence);
this->write("))");
return;
case k_saturate_IntrinsicKind:
SkASSERT(arguments.size() == 1);
this->write("clamp(");
this->writeExpression(*arguments[0], Precedence::kSequence);
this->write(", 0.0, 1.0)");
return;
case k_sample_IntrinsicKind: {
const char* dim = "";
bool proj = false;
const Type& arg0Type = arguments[0]->type();
const Type& arg1Type = arguments[1]->type();
switch (arg0Type.dimensions()) {
case SpvDim1D:
dim = "1D";
isTextureFunctionWithBias = true;
if (arg1Type.matches(*fContext.fTypes.fFloat)) {
proj = false;
} else {
SkASSERT(arg1Type.matches(*fContext.fTypes.fFloat2));
proj = true;
}
break;
case SpvDim2D:
dim = "2D";
if (!arg0Type.matches(*fContext.fTypes.fSamplerExternalOES)) {
isTextureFunctionWithBias = true;
}
if (arg1Type.matches(*fContext.fTypes.fFloat2)) {
proj = false;
} else {
SkASSERT(arg1Type.matches(*fContext.fTypes.fFloat3));
proj = true;
}
break;
case SpvDim3D:
dim = "3D";
isTextureFunctionWithBias = true;
if (arg1Type.matches(*fContext.fTypes.fFloat3)) {
proj = false;
} else {
SkASSERT(arg1Type.matches(*fContext.fTypes.fFloat4));
proj = true;
}
break;
case SpvDimCube:
dim = "Cube";
isTextureFunctionWithBias = true;
proj = false;
break;
case SpvDimRect:
dim = "2DRect";
proj = false;
break;
case SpvDimBuffer:
SkASSERT(false); // doesn't exist
dim = "Buffer";
proj = false;
break;
case SpvDimSubpassData:
SkASSERT(false); // doesn't exist
dim = "SubpassData";
proj = false;
break;
}
this->write("texture");
if (this->caps().generation() < SkSL::GLSLGeneration::k130) {
this->write(dim);
}
if (proj) {
this->write("Proj");
}
nameWritten = true;
break;
}
case k_transpose_IntrinsicKind:
if (this->caps().generation() < SkSL::GLSLGeneration::k130) {
SkASSERT(arguments.size() == 1);
this->writeTransposeHack(*arguments[0]);
return;
}
break;
default:
break;
}
if (!nameWritten) {
this->write(function.mangledName());
}
this->write("(");
const char* separator = "";
for (const auto& arg : arguments) {
this->write(separator);
separator = ", ";
this->writeExpression(*arg, Precedence::kSequence);
}
if (fProgram.fConfig->fSettings.fSharpenTextures && isTextureFunctionWithBias) {
this->write(", -0.5");
}
this->write(closingParen);
}
void GLSLCodeGenerator::writeConstructorDiagonalMatrix(const ConstructorDiagonalMatrix& c,
Precedence parentPrecedence) {
if (c.type().columns() == 4 && c.type().rows() == 2) {
// Due to a longstanding bug in glslang and Mesa, several GPU drivers generate diagonal 4x2
// matrices incorrectly. (skia:12003, https://github.com/KhronosGroup/glslang/pull/2646)
// We can work around this issue by multiplying a scalar by the identity matrix.
// In practice, this doesn't come up naturally in real code and we don't know every affected
// driver, so we just apply this workaround everywhere.
this->write("(");
this->writeType(c.type());
this->write("(1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0) * ");
this->writeExpression(*c.argument(), Precedence::kMultiplicative);
this->write(")");
return;
}
this->writeAnyConstructor(c, parentPrecedence);
}
void GLSLCodeGenerator::writeCastConstructor(const AnyConstructor& c, Precedence parentPrecedence) {
const auto arguments = c.argumentSpan();
SkASSERT(arguments.size() == 1);
const Expression& argument = *arguments.front();
if ((this->getTypeName(c.type()) == this->getTypeName(argument.type()) ||
(argument.type().matches(*fContext.fTypes.fFloatLiteral)))) {
// In cases like half(float), they're different types as far as SkSL is concerned but
// the same type as far as GLSL is concerned. We avoid a redundant float(float) by just
// writing out the inner expression here.
this->writeExpression(argument, parentPrecedence);
return;
}
// This cast should be emitted as-is.
return this->writeAnyConstructor(c, parentPrecedence);
}
void GLSLCodeGenerator::writeAnyConstructor(const AnyConstructor& c, Precedence parentPrecedence) {
this->writeType(c.type());
this->write("(");
const char* separator = "";
for (const auto& arg : c.argumentSpan()) {
this->write(separator);
separator = ", ";
this->writeExpression(*arg, Precedence::kSequence);
}
this->write(")");
}
void GLSLCodeGenerator::writeFragCoord() {
if (!this->caps().canUseFragCoord()) {
if (!fSetupFragCoordWorkaround) {
const char* precision = usesPrecisionModifiers() ? "highp " : "";
fFunctionHeader += precision;
fFunctionHeader += " float sk_FragCoord_InvW = 1. / sk_FragCoord_Workaround.w;\n";
fFunctionHeader += precision;
fFunctionHeader += " vec4 sk_FragCoord_Resolved = "
"vec4(sk_FragCoord_Workaround.xyz * sk_FragCoord_InvW, sk_FragCoord_InvW);\n";
// Ensure that we get exact .5 values for x and y.
fFunctionHeader += " sk_FragCoord_Resolved.xy = floor(sk_FragCoord_Resolved.xy) + "
"vec2(.5);\n";
fSetupFragCoordWorkaround = true;
}
this->write("sk_FragCoord_Resolved");
return;
}
if (!fSetupFragPosition) {
fFunctionHeader += usesPrecisionModifiers() ? "highp " : "";
fFunctionHeader += " vec4 sk_FragCoord = vec4("
"gl_FragCoord.x, "
SKSL_RTFLIP_NAME ".x + " SKSL_RTFLIP_NAME ".y * gl_FragCoord.y, "
"gl_FragCoord.z, "
"gl_FragCoord.w);\n";
fSetupFragPosition = true;
}
this->write("sk_FragCoord");
}
void GLSLCodeGenerator::writeVariableReference(const VariableReference& ref) {
switch (ref.variable()->modifiers().fLayout.fBuiltin) {
case SK_FRAGCOLOR_BUILTIN:
if (this->caps().mustDeclareFragmentShaderOutput()) {
this->write("sk_FragColor");
} else {
this->write("gl_FragColor");
}
break;
case SK_SECONDARYFRAGCOLOR_BUILTIN:
this->write("gl_SecondaryFragColorEXT");
break;
case SK_FRAGCOORD_BUILTIN:
this->writeFragCoord();
break;
case SK_CLOCKWISE_BUILTIN:
if (!fSetupClockwise) {
fFunctionHeader +=
" bool sk_Clockwise = gl_FrontFacing;\n"
" if (" SKSL_RTFLIP_NAME ".y < 0.0) {\n"
" sk_Clockwise = !sk_Clockwise;\n"
" }\n";
fSetupClockwise = true;
}
this->write("sk_Clockwise");
break;
case SK_VERTEXID_BUILTIN:
this->write("gl_VertexID");
break;
case SK_INSTANCEID_BUILTIN:
this->write("gl_InstanceID");
break;
case SK_LASTFRAGCOLOR_BUILTIN:
if (this->caps().fbFetchSupport()) {
this->write(this->caps().fbFetchColorName());
} else {
fContext.fErrors->error(ref.fPosition,
"sk_LastFragColor requires framebuffer fetch support");
}
break;
default:
this->write(ref.variable()->name());
break;
}
}
void GLSLCodeGenerator::writeIndexExpression(const IndexExpression& expr) {
this->writeExpression(*expr.base(), Precedence::kPostfix);
this->write("[");
this->writeExpression(*expr.index(), Precedence::kTopLevel);
this->write("]");
}
bool is_sk_position(const FieldAccess& f) {
return "sk_Position" == f.base()->type().fields()[f.fieldIndex()].fName;
}
void GLSLCodeGenerator::writeFieldAccess(const FieldAccess& f) {
if (f.ownerKind() == FieldAccess::OwnerKind::kDefault) {
this->writeExpression(*f.base(), Precedence::kPostfix);
this->write(".");
}
const Type& baseType = f.base()->type();
std::string_view name = baseType.fields()[f.fieldIndex()].fName;
if (name == "sk_Position") {
this->write("gl_Position");
} else if (name == "sk_PointSize") {
this->write("gl_PointSize");
} else {
this->write(baseType.fields()[f.fieldIndex()].fName);
}
}
void GLSLCodeGenerator::writeSwizzle(const Swizzle& swizzle) {
this->writeExpression(*swizzle.base(), Precedence::kPostfix);
this->write(".");
for (int c : swizzle.components()) {
SkASSERT(c >= 0 && c <= 3);
this->write(&("x\0y\0z\0w\0"[c * 2]));
}
}
void GLSLCodeGenerator::writeMatrixComparisonWorkaround(const BinaryExpression& b) {
const Expression& left = *b.left();
const Expression& right = *b.right();
Operator op = b.getOperator();
SkASSERT(op.kind() == Token::Kind::TK_EQEQ || op.kind() == Token::Kind::TK_NEQ);
SkASSERT(left.type().isMatrix());
SkASSERT(right.type().isMatrix());
std::string tempMatrix1 = "_tempMatrix" + std::to_string(fVarCount++);
std::string tempMatrix2 = "_tempMatrix" + std::to_string(fVarCount++);
this->fFunctionHeader += std::string(" ") + this->getTypePrecision(left.type()) +
this->getTypeName(left.type()) + " " + tempMatrix1 + ";\n " +
this->getTypePrecision(right.type()) +
this->getTypeName(right.type()) + " " + tempMatrix2 + ";\n";
this->write("((" + tempMatrix1 + " = ");
this->writeExpression(left, Precedence::kAssignment);
this->write("), (" + tempMatrix2 + " = ");
this->writeExpression(right, Precedence::kAssignment);
this->write("), (" + tempMatrix1);
this->write(op.operatorName());
this->write(tempMatrix2 + "))");
}
void GLSLCodeGenerator::writeBinaryExpression(const BinaryExpression& b,
Precedence parentPrecedence) {
const Expression& left = *b.left();
const Expression& right = *b.right();
Operator op = b.getOperator();
if (this->caps().unfoldShortCircuitAsTernary() &&
(op.kind() == Token::Kind::TK_LOGICALAND || op.kind() == Token::Kind::TK_LOGICALOR)) {
this->writeShortCircuitWorkaroundExpression(b, parentPrecedence);
return;
}
if (this->caps().rewriteMatrixComparisons() &&
left.type().isMatrix() && right.type().isMatrix() &&
(op.kind() == Token::Kind::TK_EQEQ || op.kind() == Token::Kind::TK_NEQ)) {
this->writeMatrixComparisonWorkaround(b);
return;
}
Precedence precedence = op.getBinaryPrecedence();
if (precedence >= parentPrecedence) {
this->write("(");
}
bool positionWorkaround = fProgram.fConfig->fKind == ProgramKind::kVertex &&
op.isAssignment() &&
left.is<FieldAccess>() &&
is_sk_position(left.as<FieldAccess>()) &&
!right.containsRTAdjust() &&
!this->caps().canUseFragCoord();
if (positionWorkaround) {
this->write("sk_FragCoord_Workaround = (");
}
this->writeExpression(left, precedence);
this->write(op.operatorName());
this->writeExpression(right, precedence);
if (positionWorkaround) {
this->write(")");
}
if (precedence >= parentPrecedence) {
this->write(")");
}
}
void GLSLCodeGenerator::writeShortCircuitWorkaroundExpression(const BinaryExpression& b,
Precedence parentPrecedence) {
if (Precedence::kTernary >= parentPrecedence) {
this->write("(");
}
// Transform:
// a && b => a ? b : false
// a || b => a ? true : b
this->writeExpression(*b.left(), Precedence::kTernary);
this->write(" ? ");
if (b.getOperator().kind() == Token::Kind::TK_LOGICALAND) {
this->writeExpression(*b.right(), Precedence::kTernary);
} else {
Literal boolTrue(Position(), /*value=*/1, fContext.fTypes.fBool.get());
this->writeLiteral(boolTrue);
}
this->write(" : ");
if (b.getOperator().kind() == Token::Kind::TK_LOGICALAND) {
Literal boolFalse(Position(), /*value=*/0, fContext.fTypes.fBool.get());
this->writeLiteral(boolFalse);
} else {
this->writeExpression(*b.right(), Precedence::kTernary);
}
if (Precedence::kTernary >= parentPrecedence) {
this->write(")");
}
}
void GLSLCodeGenerator::writeTernaryExpression(const TernaryExpression& t,
Precedence parentPrecedence) {
if (Precedence::kTernary >= parentPrecedence) {
this->write("(");
}
this->writeExpression(*t.test(), Precedence::kTernary);
this->write(" ? ");
this->writeExpression(*t.ifTrue(), Precedence::kTernary);
this->write(" : ");
this->writeExpression(*t.ifFalse(), Precedence::kTernary);
if (Precedence::kTernary >= parentPrecedence) {
this->write(")");
}
}
void GLSLCodeGenerator::writePrefixExpression(const PrefixExpression& p,
Precedence parentPrecedence) {
if (Precedence::kPrefix >= parentPrecedence) {
this->write("(");
}
this->write(p.getOperator().tightOperatorName());
this->writeExpression(*p.operand(), Precedence::kPrefix);
if (Precedence::kPrefix >= parentPrecedence) {
this->write(")");
}
}
void GLSLCodeGenerator::writePostfixExpression(const PostfixExpression& p,
Precedence parentPrecedence) {
if (Precedence::kPostfix >= parentPrecedence) {
this->write("(");
}
this->writeExpression(*p.operand(), Precedence::kPostfix);
this->write(p.getOperator().tightOperatorName());
if (Precedence::kPostfix >= parentPrecedence) {
this->write(")");
}
}
void GLSLCodeGenerator::writeLiteral(const Literal& l) {
const Type& type = l.type();
if (type.isFloat()) {
this->write(skstd::to_string(l.floatValue()));
return;
}
if (type.isInteger()) {
if (type.matches(*fContext.fTypes.fUInt)) {
this->write(std::to_string(l.intValue() & 0xffffffff) + "u");
} else if (type.matches(*fContext.fTypes.fUShort)) {
this->write(std::to_string(l.intValue() & 0xffff) + "u");
} else {
this->write(std::to_string(l.intValue()));
}
return;
}
SkASSERT(type.isBoolean());
this->write(l.boolValue() ? "true" : "false");
}
void GLSLCodeGenerator::writeSetting(const Setting& s) {
SK_ABORT("internal error; setting was not folded to a constant during compilation\n");
}
void GLSLCodeGenerator::writeFunctionDeclaration(const FunctionDeclaration& f) {
this->writeTypePrecision(f.returnType());
this->writeType(f.returnType());
this->write(" " + f.mangledName() + "(");
const char* separator = "";
for (const auto& param : f.parameters()) {
// This is a workaround for our test files. They use the runtime effect signature, so main
// takes a coords parameter. The IR generator tags those with a builtin ID (sk_FragCoord),
// and we omit them from the declaration here, so the function is valid GLSL.
if (f.isMain() && param->modifiers().fLayout.fBuiltin != -1) {
continue;
}
this->write(separator);
separator = ", ";
this->writeModifiers(param->modifiers(), false);
std::vector<int> sizes;
const Type* type = ¶m->type();
if (type->isArray()) {
sizes.push_back(type->columns());
type = &type->componentType();
}
this->writeTypePrecision(*type);
this->writeType(*type);
this->write(" " + std::string(param->name()));
for (int s : sizes) {
this->write("[" + std::to_string(s) + "]");
}
}
this->write(")");
}
void GLSLCodeGenerator::writeFunction(const FunctionDefinition& f) {
fSetupFragPosition = false;
fSetupFragCoordWorkaround = false;
this->writeFunctionDeclaration(f.declaration());
this->writeLine(" {");
fIndentation++;
fFunctionHeader.clear();
OutputStream* oldOut = fOut;
StringStream buffer;
fOut = &buffer;
for (const std::unique_ptr<Statement>& stmt : f.body()->as<Block>().children()) {
if (!stmt->isEmpty()) {
this->writeStatement(*stmt);
this->finishLine();
}
}
fIndentation--;
this->writeLine("}");
fOut = oldOut;
this->write(fFunctionHeader);
this->write(buffer.str());
}
void GLSLCodeGenerator::writeFunctionPrototype(const FunctionPrototype& f) {
this->writeFunctionDeclaration(f.declaration());
this->writeLine(";");
}
void GLSLCodeGenerator::writeModifiers(const Modifiers& modifiers,
bool globalContext) {
std::string layout = modifiers.fLayout.description();
if (layout.size()) {
this->write(layout + " ");
}
// For GLSL 4.1 and below, qualifier-order matters! These are written out in Modifier-bit order.
if (modifiers.fFlags & Modifiers::kFlat_Flag) {
this->write("flat ");
}
if (modifiers.fFlags & Modifiers::kNoPerspective_Flag) {
this->write("noperspective ");
}
if (modifiers.fFlags & Modifiers::kConst_Flag) {
this->write("const ");
}
if (modifiers.fFlags & Modifiers::kUniform_Flag) {
this->write("uniform ");
}
if ((modifiers.fFlags & Modifiers::kIn_Flag) &&
(modifiers.fFlags & Modifiers::kOut_Flag)) {
this->write("inout ");
} else if (modifiers.fFlags & Modifiers::kIn_Flag) {
if (globalContext &&
this->caps().generation() < SkSL::GLSLGeneration::k130) {
this->write(fProgram.fConfig->fKind == ProgramKind::kVertex ? "attribute "
: "varying ");
} else {
this->write("in ");
}
} else if (modifiers.fFlags & Modifiers::kOut_Flag) {
if (globalContext &&
this->caps().generation() < SkSL::GLSLGeneration::k130) {
this->write("varying ");
} else {
this->write("out ");
}
}
}
void GLSLCodeGenerator::writeInterfaceBlock(const InterfaceBlock& intf) {
if (intf.typeName() == "sk_PerVertex") {
return;
}
this->writeModifiers(intf.variable().modifiers(), true);
this->writeLine(std::string(intf.typeName()) + " {");
fIndentation++;
const Type* structType = &intf.variable().type();
if (structType->isArray()) {
structType = &structType->componentType();
}
for (const auto& f : structType->fields()) {
this->writeModifiers(f.fModifiers, false);
this->writeTypePrecision(*f.fType);
this->writeType(*f.fType);
this->writeLine(" " + std::string(f.fName) + ";");
}
fIndentation--;
this->write("}");
if (intf.instanceName().size()) {
this->write(" ");
this->write(intf.instanceName());
if (intf.arraySize() > 0) {
this->write("[");
this->write(std::to_string(intf.arraySize()));
this->write("]");
}
}
this->writeLine(";");
}
void GLSLCodeGenerator::writeVarInitializer(const Variable& var, const Expression& value) {
this->writeExpression(value, Precedence::kTopLevel);
}
const char* GLSLCodeGenerator::getTypePrecision(const Type& type) {
if (usesPrecisionModifiers()) {
switch (type.typeKind()) {
case Type::TypeKind::kScalar:
if (type.matches(*fContext.fTypes.fShort) ||
type.matches(*fContext.fTypes.fUShort)) {
if (fProgram.fConfig->fSettings.fForceHighPrecision ||
this->caps().incompleteShortIntPrecision()) {
return "highp ";
}
return "mediump ";
}
if (type.matches(*fContext.fTypes.fHalf)) {
return fProgram.fConfig->fSettings.fForceHighPrecision ? "highp " : "mediump ";
}
if (type.matches(*fContext.fTypes.fFloat) || type.matches(*fContext.fTypes.fInt) ||
type.matches(*fContext.fTypes.fUInt)) {
return "highp ";
}
return "";
case Type::TypeKind::kVector: // fall through
case Type::TypeKind::kMatrix:
case Type::TypeKind::kArray:
return this->getTypePrecision(type.componentType());
default:
break;
}
}
return "";
}
void GLSLCodeGenerator::writeTypePrecision(const Type& type) {
this->write(this->getTypePrecision(type));
}
void GLSLCodeGenerator::writeVarDeclaration(const VarDeclaration& var, bool global) {
this->writeModifiers(var.var().modifiers(), global);
this->writeTypePrecision(var.baseType());
this->writeType(var.baseType());
this->write(" ");
this->write(var.var().name());
if (var.arraySize() > 0) {
this->write("[");
this->write(std::to_string(var.arraySize()));
this->write("]");
}
if (var.value()) {
this->write(" = ");
this->writeVarInitializer(var.var(), *var.value());
}
if (!fFoundExternalSamplerDecl &&
var.var().type().matches(*fContext.fTypes.fSamplerExternalOES)) {
if (this->caps().externalTextureExtensionString()) {
this->writeExtension(this->caps().externalTextureExtensionString());
}
if (this->caps().secondExternalTextureExtensionString()) {
this->writeExtension(this->caps().secondExternalTextureExtensionString());
}
fFoundExternalSamplerDecl = true;
}
if (!fFoundRectSamplerDecl && var.var().type().matches(*fContext.fTypes.fSampler2DRect)) {
fFoundRectSamplerDecl = true;
}
this->write(";");
}
void GLSLCodeGenerator::writeStatement(const Statement& s) {
switch (s.kind()) {
case Statement::Kind::kBlock:
this->writeBlock(s.as<Block>());
break;
case Statement::Kind::kExpression:
this->writeExpressionStatement(s.as<ExpressionStatement>());
break;
case Statement::Kind::kReturn:
this->writeReturnStatement(s.as<ReturnStatement>());
break;
case Statement::Kind::kVarDeclaration:
this->writeVarDeclaration(s.as<VarDeclaration>(), false);
break;
case Statement::Kind::kIf:
this->writeIfStatement(s.as<IfStatement>());
break;
case Statement::Kind::kFor:
this->writeForStatement(s.as<ForStatement>());
break;
case Statement::Kind::kDo:
this->writeDoStatement(s.as<DoStatement>());
break;
case Statement::Kind::kSwitch:
this->writeSwitchStatement(s.as<SwitchStatement>());
break;
case Statement::Kind::kBreak:
this->write("break;");
break;
case Statement::Kind::kContinue:
this->write("continue;");
break;
case Statement::Kind::kDiscard:
this->write("discard;");
break;
case Statement::Kind::kInlineMarker:
case Statement::Kind::kNop:
this->write(";");
break;
default:
SkDEBUGFAILF("unsupported statement: %s", s.description().c_str());
break;
}
}
void GLSLCodeGenerator::writeBlock(const Block& b) {
// Write scope markers if this block is a scope, or if the block is empty (since we need to emit
// something here to make the code valid).
bool isScope = b.isScope() || b.isEmpty();
if (isScope) {
this->writeLine("{");
fIndentation++;
}
for (const std::unique_ptr<Statement>& stmt : b.children()) {
if (!stmt->isEmpty()) {
this->writeStatement(*stmt);
this->finishLine();
}
}
if (isScope) {
fIndentation--;
this->write("}");
}
}
void GLSLCodeGenerator::writeIfStatement(const IfStatement& stmt) {
this->write("if (");
this->writeExpression(*stmt.test(), Precedence::kTopLevel);
this->write(") ");
this->writeStatement(*stmt.ifTrue());
if (stmt.ifFalse()) {
this->write(" else ");
this->writeStatement(*stmt.ifFalse());
}
}
void GLSLCodeGenerator::writeForStatement(const ForStatement& f) {
// Emit loops of the form 'for(;test;)' as 'while(test)', which is probably how they started
if (!f.initializer() && f.test() && !f.next()) {
this->write("while (");
this->writeExpression(*f.test(), Precedence::kTopLevel);
this->write(") ");
this->writeStatement(*f.statement());
return;
}
this->write("for (");
if (f.initializer() && !f.initializer()->isEmpty()) {
this->writeStatement(*f.initializer());
} else {
this->write("; ");
}
if (f.test()) {
if (this->caps().addAndTrueToLoopCondition()) {
std::unique_ptr<Expression> and_true(new BinaryExpression(
Position(), f.test()->clone(), Token::Kind::TK_LOGICALAND,
Literal::MakeBool(fContext, Position(), /*value=*/true),
fContext.fTypes.fBool.get()));
this->writeExpression(*and_true, Precedence::kTopLevel);
} else {
this->writeExpression(*f.test(), Precedence::kTopLevel);
}
}
this->write("; ");
if (f.next()) {
this->writeExpression(*f.next(), Precedence::kTopLevel);
}
this->write(") ");
this->writeStatement(*f.statement());
}
void GLSLCodeGenerator::writeDoStatement(const DoStatement& d) {
if (!this->caps().rewriteDoWhileLoops()) {
this->write("do ");
this->writeStatement(*d.statement());
this->write(" while (");
this->writeExpression(*d.test(), Precedence::kTopLevel);
this->write(");");
return;
}
// Otherwise, do the do while loop workaround, to rewrite loops of the form:
// do {
// CODE;
// } while (CONDITION)
//
// to loops of the form
// bool temp = false;
// while (true) {
// if (temp) {
// if (!CONDITION) {
// break;
// }
// }
// temp = true;
// CODE;
// }
std::string tmpVar = "_tmpLoopSeenOnce" + std::to_string(fVarCount++);
this->write("bool ");
this->write(tmpVar);
this->writeLine(" = false;");
this->writeLine("while (true) {");
fIndentation++;
this->write("if (");
this->write(tmpVar);
this->writeLine(") {");
fIndentation++;
this->write("if (!");
this->writeExpression(*d.test(), Precedence::kPrefix);
this->writeLine(") {");
fIndentation++;
this->writeLine("break;");
fIndentation--;
this->writeLine("}");
fIndentation--;
this->writeLine("}");
this->write(tmpVar);
this->writeLine(" = true;");
this->writeStatement(*d.statement());
this->finishLine();
fIndentation--;
this->write("}");
}
void GLSLCodeGenerator::writeExpressionStatement(const ExpressionStatement& s) {
if (s.expression()->hasSideEffects()) {
this->writeExpression(*s.expression(), Precedence::kTopLevel);
this->write(";");
}
}
void GLSLCodeGenerator::writeSwitchStatement(const SwitchStatement& s) {
if (this->caps().rewriteSwitchStatements()) {
std::string fallthroughVar = "_tmpSwitchFallthrough" + std::to_string(fVarCount++);
std::string valueVar = "_tmpSwitchValue" + std::to_string(fVarCount++);
std::string loopVar = "_tmpSwitchLoop" + std::to_string(fVarCount++);
this->write("int ");
this->write(valueVar);
this->write(" = ");
this->writeExpression(*s.value(), Precedence::kAssignment);
this->write(", ");
this->write(fallthroughVar);
this->writeLine(" = 0;");
this->write("for (int ");
this->write(loopVar);
this->write(" = 0; ");
this->write(loopVar);
this->write(" < 1; ");
this->write(loopVar);
this->writeLine("++) {");
fIndentation++;
bool firstCase = true;
for (const std::unique_ptr<Statement>& stmt : s.cases()) {
const SwitchCase& c = stmt->as<SwitchCase>();
if (!c.isDefault()) {
this->write("if ((");
if (firstCase) {
firstCase = false;
} else {
this->write(fallthroughVar);
this->write(" > 0) || (");
}
this->write(valueVar);
this->write(" == ");
this->write(std::to_string(c.value()));
this->writeLine(")) {");
fIndentation++;
// We write the entire case-block statement here, and then set `switchFallthrough`
// to 1. If the case-block had a break statement in it, we break out of the outer
// for-loop entirely, meaning the `switchFallthrough` assignment never occurs, nor
// does any code after it inside the switch. We've forbidden `continue` statements
// inside switch case-blocks entirely, so we don't need to consider their effect on
// control flow; see the Finalizer in FunctionDefinition::Convert.
this->writeStatement(*c.statement());
this->finishLine();
this->write(fallthroughVar);
this->write(" = 1;");
this->writeLine();
fIndentation--;
this->writeLine("}");
} else {
// This is the default case. Since it's always last, we can just dump in the code.
this->writeStatement(*c.statement());
this->finishLine();
}
}
fIndentation--;
this->writeLine("}");
return;
}
this->write("switch (");
this->writeExpression(*s.value(), Precedence::kTopLevel);
this->writeLine(") {");
fIndentation++;
// If a switch contains only a `default` case and nothing else, this confuses some drivers and
// can lead to a crash. Adding a real case before the default seems to work around the bug,
// and doesn't change the meaning of the switch. (skia:12465)
if (s.cases().size() == 1 && s.cases().front()->as<SwitchCase>().isDefault()) {
this->writeLine("case 0:");
}
for (const std::unique_ptr<Statement>& stmt : s.cases()) {
const SwitchCase& c = stmt->as<SwitchCase>();
if (c.isDefault()) {
this->writeLine("default:");
} else {
this->write("case ");
this->write(std::to_string(c.value()));
this->writeLine(":");
}
if (!c.statement()->isEmpty()) {
fIndentation++;
this->writeStatement(*c.statement());
this->finishLine();
fIndentation--;
}
}
fIndentation--;
this->finishLine();
this->write("}");
}
void GLSLCodeGenerator::writeReturnStatement(const ReturnStatement& r) {
this->write("return");
if (r.expression()) {
this->write(" ");
this->writeExpression(*r.expression(), Precedence::kTopLevel);
}
this->write(";");
}
void GLSLCodeGenerator::writeHeader() {
if (this->caps().versionDeclString()) {
this->write(this->caps().versionDeclString());
this->finishLine();
}
}
void GLSLCodeGenerator::writeProgramElement(const ProgramElement& e) {
switch (e.kind()) {
case ProgramElement::Kind::kExtension:
this->writeExtension(e.as<Extension>().name());
break;
case ProgramElement::Kind::kGlobalVar: {
const VarDeclaration& decl =
e.as<GlobalVarDeclaration>().declaration()->as<VarDeclaration>();
int builtin = decl.var().modifiers().fLayout.fBuiltin;
if (builtin == -1) {
// normal var
this->writeVarDeclaration(decl, true);
this->finishLine();
} else if (builtin == SK_FRAGCOLOR_BUILTIN &&
this->caps().mustDeclareFragmentShaderOutput()) {
if (fProgram.fConfig->fSettings.fFragColorIsInOut) {
this->write("inout ");
} else {
this->write("out ");
}
if (usesPrecisionModifiers()) {
this->write("mediump ");
}
this->writeLine("vec4 sk_FragColor;");
}
break;
}
case ProgramElement::Kind::kInterfaceBlock:
this->writeInterfaceBlock(e.as<InterfaceBlock>());
break;
case ProgramElement::Kind::kFunction:
this->writeFunction(e.as<FunctionDefinition>());
break;
case ProgramElement::Kind::kFunctionPrototype:
this->writeFunctionPrototype(e.as<FunctionPrototype>());
break;
case ProgramElement::Kind::kModifiers: {
const Modifiers& modifiers = e.as<ModifiersDeclaration>().modifiers();
this->writeModifiers(modifiers, true);
this->writeLine(";");
break;
}
case ProgramElement::Kind::kStructDefinition:
this->writeStructDefinition(e.as<StructDefinition>());
break;
default:
SkDEBUGFAILF("unsupported program element %s\n", e.description().c_str());
break;
}
}
void GLSLCodeGenerator::writeInputVars() {
if (fProgram.fInputs.fUseFlipRTUniform) {
const char* precision = usesPrecisionModifiers() ? "highp " : "";
fGlobals.writeText("uniform ");
fGlobals.writeText(precision);
fGlobals.writeText("vec2 " SKSL_RTFLIP_NAME ";\n");
}
}
bool GLSLCodeGenerator::generateCode() {
this->writeHeader();
OutputStream* rawOut = fOut;
StringStream body;
fOut = &body;
// Write all the program elements except for functions.
for (const ProgramElement* e : fProgram.elements()) {
if (!e->is<FunctionDefinition>()) {
this->writeProgramElement(*e);
}
}
// Write the functions last.
// Why don't we write things in their original order? Because the Inliner likes to move function
// bodies around. After inlining, code can inadvertently move upwards, above ProgramElements
// that the code relies on.
for (const ProgramElement* e : fProgram.elements()) {
if (e->is<FunctionDefinition>()) {
this->writeProgramElement(*e);
}
}
fOut = rawOut;
write_stringstream(fExtensions, *rawOut);
this->writeInputVars();
write_stringstream(fGlobals, *rawOut);
if (!this->caps().canUseFragCoord()) {
Layout layout;
switch (fProgram.fConfig->fKind) {
case ProgramKind::kVertex: {
Modifiers modifiers(layout, Modifiers::kOut_Flag);
this->writeModifiers(modifiers, true);
if (this->usesPrecisionModifiers()) {
this->write("highp ");
}
this->write("vec4 sk_FragCoord_Workaround;\n");
break;
}
case ProgramKind::kFragment: {
Modifiers modifiers(layout, Modifiers::kIn_Flag);
this->writeModifiers(modifiers, true);
if (this->usesPrecisionModifiers()) {
this->write("highp ");
}
this->write("vec4 sk_FragCoord_Workaround;\n");
break;
}
default:
break;
}
}
if (this->usesPrecisionModifiers()) {
const char* precision =
fProgram.fConfig->fSettings.fForceHighPrecision ? "highp" : "mediump";
this->write(String::printf("precision %s float;\n", precision));
this->write(String::printf("precision %s sampler2D;\n", precision));
if (fFoundExternalSamplerDecl && !this->caps().noDefaultPrecisionForExternalSamplers()) {
this->write(String::printf("precision %s samplerExternalOES;\n", precision));
}
if (fFoundRectSamplerDecl) {
this->write(String::printf("precision %s sampler2DRect;\n", precision));
}
}
write_stringstream(fExtraFunctions, *rawOut);
write_stringstream(body, *rawOut);
return fContext.fErrors->errorCount() == 0;
}
} // namespace SkSL
| 39.030834 | 100 | 0.539601 | [
"vector",
"transform",
"3d"
] |
d53581c7389e2b541d772ab1b80aa9685b590d4e | 2,849 | cc | C++ | content/browser/worker_host/worker_script_fetcher_unittest.cc | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668 | 2015-01-01T01:57:10.000Z | 2022-03-31T23:33:32.000Z | content/browser/worker_host/worker_script_fetcher_unittest.cc | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 86 | 2015-10-21T13:02:42.000Z | 2022-03-14T07:50:50.000Z | content/browser/worker_host/worker_script_fetcher_unittest.cc | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941 | 2015-01-02T11:32:21.000Z | 2022-03-31T16:35:46.000Z | // Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/browser/worker_host/worker_script_fetcher.h"
#include <vector>
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/blink/public/mojom/worker/worker_main_script_load_params.mojom.h"
#include "url/gurl.h"
namespace content {
namespace {
blink::mojom::WorkerMainScriptLoadParamsPtr CreateParams(
const std::vector<GURL>& url_list_via_service_worker,
const std::vector<GURL>& redirect_infos) {
blink::mojom::WorkerMainScriptLoadParamsPtr main_script_load_params =
blink::mojom::WorkerMainScriptLoadParams::New();
main_script_load_params->response_head =
network::mojom::URLResponseHead::New();
if (!url_list_via_service_worker.empty()) {
main_script_load_params->response_head->was_fetched_via_service_worker =
true;
main_script_load_params->response_head->url_list_via_service_worker =
url_list_via_service_worker;
}
for (const GURL& url : redirect_infos) {
net::RedirectInfo redirect_info;
redirect_info.new_url = url;
main_script_load_params->redirect_infos.push_back(redirect_info);
}
return main_script_load_params;
}
} // namespace
TEST(WorkerScriptFetcherTest, DetermineFinalResponseUrl) {
struct TestCase {
GURL initial_request_url;
std::vector<GURL> url_list_via_service_worker;
std::vector<GURL> redirect_infos;
GURL expected_final_response_url;
};
static const std::vector<TestCase> kTestCases = {
{
GURL("https://initial.com"),
{},
{},
GURL("https://initial.com"),
},
{
GURL("https://initial.com"),
{GURL("https://url_list_1.com"), GURL("https://url_list_2.com")},
{},
GURL("https://url_list_2.com"),
},
{
GURL("https://initial.com"),
{},
{GURL("https://redirect_1.com"), GURL("https://redirect_2.com")},
GURL("https://redirect_2.com"),
},
{
GURL("https://initial.com"),
{GURL("https://url_list_1.com"), GURL("https://url_list_2.com")},
{GURL("https://redirect_1.com"), GURL("https://redirect_2.com")},
GURL("https://url_list_2.com"),
},
};
for (const auto& test_case : kTestCases) {
blink::mojom::WorkerMainScriptLoadParamsPtr main_script_load_params =
CreateParams(test_case.url_list_via_service_worker,
test_case.redirect_infos);
GURL final_response_url = WorkerScriptFetcher::DetermineFinalResponseUrl(
test_case.initial_request_url, main_script_load_params.get());
EXPECT_EQ(final_response_url, test_case.expected_final_response_url);
}
}
} // namespace content
| 30.967391 | 87 | 0.679537 | [
"vector"
] |
d53ca3e6d56b28ff172bb95de7e89595130814be | 489 | cc | C++ | iridium/files/patch-chrome_browser_devtools_devtools__eye__dropper.cc | behemoth3663/ports_local | ad57042ae62c907f9340ee696f468fdfeb562a8b | [
"BSD-3-Clause"
] | 1 | 2022-02-08T02:24:08.000Z | 2022-02-08T02:24:08.000Z | iridium/files/patch-chrome_browser_devtools_devtools__eye__dropper.cc | behemoth3663/ports_local | ad57042ae62c907f9340ee696f468fdfeb562a8b | [
"BSD-3-Clause"
] | null | null | null | iridium/files/patch-chrome_browser_devtools_devtools__eye__dropper.cc | behemoth3663/ports_local | ad57042ae62c907f9340ee696f468fdfeb562a8b | [
"BSD-3-Clause"
] | null | null | null | --- chrome/browser/devtools/devtools_eye_dropper.cc.orig 2020-02-03 21:53:29 UTC
+++ chrome/browser/devtools/devtools_eye_dropper.cc
@@ -164,7 +164,7 @@ void DevToolsEyeDropper::UpdateCursor() {
// magnified projection only with centered hotspot.
// Mac Retina requires cursor to be > 120px in order to render smoothly.
-#if defined(OS_LINUX)
+#if defined(OS_LINUX) || defined(OS_BSD)
const float kCursorSize = 63;
const float kDiameter = 63;
const float kHotspotOffset = 32;
| 40.75 | 80 | 0.742331 | [
"render"
] |
d53e975d648e66aa3aecbb7b98b206f7f499106b | 15,777 | cpp | C++ | be/src/runtime/mem_tracker.cpp | Lchangliang/incubator-doris | d056f5873b9ddfd11e32dc97cb31f0cdf2ae3676 | [
"Apache-2.0"
] | null | null | null | be/src/runtime/mem_tracker.cpp | Lchangliang/incubator-doris | d056f5873b9ddfd11e32dc97cb31f0cdf2ae3676 | [
"Apache-2.0"
] | null | null | null | be/src/runtime/mem_tracker.cpp | Lchangliang/incubator-doris | d056f5873b9ddfd11e32dc97cb31f0cdf2ae3676 | [
"Apache-2.0"
] | null | null | null | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// This file is copied from
// https://github.com/apache/impala/blob/branch-2.9.0/be/src/runtime/mem-tracker.cpp
// and modified by Doris
#include "runtime/mem_tracker.h"
#include <fmt/format.h>
#include <memory>
#include "exec/exec_node.h"
#include "gutil/once.h"
#include "runtime/exec_env.h"
#include "runtime/runtime_state.h"
#include "service/backend_options.h"
#include "util/pretty_printer.h"
#include "util/string_util.h"
#include "util/uid_util.h"
namespace doris {
const std::string MemTracker::COUNTER_NAME = "PeakMemoryUsage";
// The ancestor for all trackers. Every tracker is visible from the process down.
// All manually created trackers should specify the process tracker as the parent.
static std::shared_ptr<MemTracker> process_tracker;
static MemTracker* raw_process_tracker;
static GoogleOnceType process_tracker_once = GOOGLE_ONCE_INIT;
void MemTracker::create_process_tracker() {
process_tracker.reset(
new MemTracker(-1, "Process", nullptr, MemTrackerLevel::OVERVIEW, nullptr));
process_tracker->init();
raw_process_tracker = process_tracker.get();
}
std::shared_ptr<MemTracker> MemTracker::get_process_tracker() {
GoogleOnceInit(&process_tracker_once, &MemTracker::create_process_tracker);
return process_tracker;
}
MemTracker* MemTracker::get_raw_process_tracker() {
GoogleOnceInit(&process_tracker_once, &MemTracker::create_process_tracker);
return raw_process_tracker;
}
static TrackersMap _temporary_mem_trackers;
std::shared_ptr<MemTracker> MemTracker::get_temporary_mem_tracker(const std::string& label) {
// First time this label registered, make a new object, otherwise do nothing.
// Avoid using locks to resolve erase conflicts.
_temporary_mem_trackers.try_emplace_l(
label, [](std::shared_ptr<MemTracker>) {},
MemTracker::create_tracker(-1, fmt::format("[Temporary]-{}", label), nullptr,
MemTrackerLevel::OVERVIEW));
return _temporary_mem_trackers[label];
}
void MemTracker::list_process_trackers(std::vector<std::shared_ptr<MemTracker>>* trackers) {
trackers->clear();
std::deque<std::shared_ptr<MemTracker>> to_process;
to_process.push_front(get_process_tracker());
while (!to_process.empty()) {
std::shared_ptr<MemTracker> t = to_process.back();
to_process.pop_back();
trackers->push_back(t);
std::list<std::weak_ptr<MemTracker>> children;
{
lock_guard<SpinLock> l(t->_child_trackers_lock);
children = t->_child_trackers;
}
for (const auto& child_weak : children) {
std::shared_ptr<MemTracker> child = child_weak.lock();
if (child && static_cast<decltype(config::mem_tracker_level)>(child->_level) <=
config::mem_tracker_level) {
to_process.emplace_back(std::move(child));
}
}
}
}
std::shared_ptr<MemTracker> MemTracker::create_tracker(int64_t byte_limit, const std::string& label,
const std::shared_ptr<MemTracker>& parent,
MemTrackerLevel level,
RuntimeProfile* profile) {
std::shared_ptr<MemTracker> tracker =
MemTracker::create_tracker_impl(byte_limit, label, parent, level, profile);
tracker->init();
return tracker;
}
std::shared_ptr<MemTracker> MemTracker::create_virtual_tracker(
int64_t byte_limit, const std::string& label, const std::shared_ptr<MemTracker>& parent,
MemTrackerLevel level) {
std::shared_ptr<MemTracker> tracker = MemTracker::create_tracker_impl(
byte_limit, "[Virtual]-" + label, parent, level, nullptr);
tracker->init_virtual();
return tracker;
}
std::shared_ptr<MemTracker> MemTracker::create_tracker_impl(
int64_t byte_limit, const std::string& label, const std::shared_ptr<MemTracker>& parent,
MemTrackerLevel level, RuntimeProfile* profile) {
std::shared_ptr<MemTracker> reset_parent =
parent ? parent : tls_ctx()->_thread_mem_tracker_mgr->mem_tracker();
DCHECK(reset_parent);
std::string reset_label;
MemTracker* task_parent_tracker = reset_parent->parent_task_mem_tracker();
if (task_parent_tracker) {
reset_label = fmt::format("{}#{}", label, split(task_parent_tracker->label(), "#")[1]);
} else {
reset_label = label;
}
if (byte_limit == -1) byte_limit = reset_parent->limit();
std::shared_ptr<MemTracker> tracker(
new MemTracker(byte_limit, reset_label, reset_parent,
level > reset_parent->_level ? level : reset_parent->_level, profile));
// Do not check limit exceed when add_child_tracker, otherwise it will cause deadlock when log_usage is called.
STOP_CHECK_LIMIT_THREAD_LOCAL_MEM_TRACKER();
reset_parent->add_child_tracker(tracker);
return tracker;
}
MemTracker::MemTracker(int64_t byte_limit, const std::string& label)
: MemTracker(byte_limit, label, std::shared_ptr<MemTracker>(), MemTrackerLevel::VERBOSE,
nullptr) {}
MemTracker::MemTracker(int64_t byte_limit, const std::string& label,
const std::shared_ptr<MemTracker>& parent, MemTrackerLevel level,
RuntimeProfile* profile)
: _limit(byte_limit),
_label(label),
// Not 100% sure the id is unique. This is generated because it is faster than converting to int after hash.
_id((GetCurrentTimeMicros() % 1000000) * 100 + _label.length()),
_parent(parent),
_level(level) {
if (profile == nullptr) {
_consumption = std::make_shared<RuntimeProfile::HighWaterMarkCounter>(TUnit::BYTES);
} else {
_consumption = profile->AddSharedHighWaterMarkCounter(COUNTER_NAME, TUnit::BYTES);
}
}
void MemTracker::init() {
DCHECK_GE(_limit, -1);
MemTracker* tracker = this;
while (tracker != nullptr) {
_all_trackers.push_back(tracker);
if (tracker->has_limit()) _limit_trackers.push_back(tracker);
// This means that it terminates when recursively consume/release from the current tracker up to the virtual tracker.
if (tracker->_virtual == true) {
break;
}
tracker = tracker->_parent.get();
}
DCHECK_GT(_all_trackers.size(), 0);
DCHECK_EQ(_all_trackers[0], this);
}
void MemTracker::init_virtual() {
DCHECK_GE(_limit, -1);
_all_trackers.push_back(this);
if (this->has_limit()) _limit_trackers.push_back(this);
_virtual = true;
}
MemTracker::~MemTracker() {
consume(_untracked_mem.exchange(0)); // before memory_leak_check
// TCMalloc hook will be triggered during destructor memtracker, may cause crash.
if (_label == "Process") STOP_THREAD_LOCAL_MEM_TRACKER(false);
if (!_virtual && config::memory_leak_detection) MemTracker::memory_leak_check(this);
if (!_virtual && parent()) {
// Do not call release on the parent tracker to avoid repeated releases.
// Ensure that all consume/release are triggered by TCMalloc new/delete hook.
lock_guard<SpinLock> l(_parent->_child_trackers_lock);
if (_child_tracker_it != _parent->_child_trackers.end()) {
_parent->_child_trackers.erase(_child_tracker_it);
_child_tracker_it = _parent->_child_trackers.end();
}
}
DCHECK_EQ(_untracked_mem, 0);
}
void MemTracker::transfer_to_relative(MemTracker* dst, int64_t bytes) {
if (id() == dst->id()) return;
DCHECK_EQ(_all_trackers.back(), dst->_all_trackers.back()) << "Must have same ancestor";
DCHECK(!dst->has_limit());
// Find the common ancestor and update trackers between 'this'/'dst' and
// the common ancestor. This logic handles all cases, including the
// two trackers being the same or being ancestors of each other because
// 'all_trackers_' includes the current tracker.
int ancestor_idx = _all_trackers.size() - 1;
int dst_ancestor_idx = dst->_all_trackers.size() - 1;
while (ancestor_idx > 0 && dst_ancestor_idx > 0 &&
_all_trackers[ancestor_idx - 1] == dst->_all_trackers[dst_ancestor_idx - 1]) {
DCHECK(!dst->_all_trackers[dst_ancestor_idx - 1]->has_limit());
--ancestor_idx;
--dst_ancestor_idx;
}
MemTracker* common_ancestor = _all_trackers[ancestor_idx];
release_local(bytes, common_ancestor);
dst->consume_local(bytes, common_ancestor);
}
// Calling this on the query tracker results in output like:
//
// Query(4a4c81fedaed337d:4acadfda00000000) Limit=10.00 GB Total=508.28 MB Peak=508.45 MB
// Fragment 4a4c81fedaed337d:4acadfda00000000: Total=8.00 KB Peak=8.00 KB
// EXCHANGE_NODE (id=4): Total=0 Peak=0
// DataStreamRecvr: Total=0 Peak=0
// Block Manager: Limit=6.68 GB Total=394.00 MB Peak=394.00 MB
// Fragment 4a4c81fedaed337d:4acadfda00000006: Total=233.72 MB Peak=242.24 MB
// AGGREGATION_NODE (id=1): Total=139.21 MB Peak=139.84 MB
// HDFS_SCAN_NODE (id=0): Total=93.94 MB Peak=102.24 MB
// DataStreamSender (dst_id=2): Total=45.99 KB Peak=85.99 KB
// Fragment 4a4c81fedaed337d:4acadfda00000003: Total=274.55 MB Peak=274.62 MB
// AGGREGATION_NODE (id=3): Total=274.50 MB Peak=274.50 MB
// EXCHANGE_NODE (id=2): Total=0 Peak=0
// DataStreamRecvr: Total=45.91 KB Peak=684.07 KB
// DataStreamSender (dst_id=4): Total=680.00 B Peak=680.00 B
//
// If 'reservation_metrics_' are set, we ge a more granular breakdown:
// TrackerName: Limit=5.00 MB Reservation=5.00 MB OtherMemory=1.04 MB
// Total=6.04 MB Peak=6.45 MB
//
std::string MemTracker::log_usage(int max_recursive_depth, int64_t* logged_consumption) {
// Make sure the consumption is up to date.
int64_t curr_consumption = consumption();
int64_t peak_consumption = _consumption->value();
if (logged_consumption != nullptr) *logged_consumption = curr_consumption;
if (_level > MemTrackerLevel::INSTANCE && curr_consumption == 0) return "";
std::string detail =
"MemTracker log_usage Label: {}, Limit: {}, Total: {}, Peak: {}, Exceeded: {}";
detail = fmt::format(detail, _label, PrettyPrinter::print(_limit, TUnit::BYTES),
PrettyPrinter::print(curr_consumption, TUnit::BYTES),
PrettyPrinter::print(peak_consumption, TUnit::BYTES),
limit_exceeded() ? "true" : "false");
// This call does not need the children, so return early.
if (max_recursive_depth == 0) return detail;
// Recurse and get information about the children
int64_t child_consumption;
std::string child_trackers_usage;
std::list<std::weak_ptr<MemTracker>> children;
{
lock_guard<SpinLock> l(_child_trackers_lock);
children = _child_trackers;
}
child_trackers_usage = log_usage(max_recursive_depth - 1, children, &child_consumption);
if (!child_trackers_usage.empty()) detail += "\n" + child_trackers_usage;
return detail;
}
std::string MemTracker::log_usage(int max_recursive_depth,
const std::list<std::weak_ptr<MemTracker>>& trackers,
int64_t* logged_consumption) {
*logged_consumption = 0;
std::vector<std::string> usage_strings;
for (const auto& tracker_weak : trackers) {
std::shared_ptr<MemTracker> tracker = tracker_weak.lock();
if (tracker) {
int64_t tracker_consumption;
std::string usage_string =
tracker->log_usage(max_recursive_depth, &tracker_consumption);
if (!usage_string.empty()) usage_strings.push_back(usage_string);
*logged_consumption += tracker_consumption;
}
}
return join(usage_strings, "\n");
}
Status MemTracker::mem_limit_exceeded(RuntimeState* state, const std::string& details,
int64_t failed_allocation_size, Status failed_alloc) {
STOP_CHECK_LIMIT_THREAD_LOCAL_MEM_TRACKER();
MemTracker* process_tracker = MemTracker::get_raw_process_tracker();
std::string detail =
"Memory exceed limit. fragment={}, details={}, on backend={}. Memory left in process "
"limit={}.";
detail = fmt::format(detail, state != nullptr ? print_id(state->fragment_instance_id()) : "",
details, BackendOptions::get_localhost(),
PrettyPrinter::print(process_tracker->spare_capacity(), TUnit::BYTES));
if (!failed_alloc) {
detail += " failed alloc=<{}>. current tracker={}.";
detail = fmt::format(detail, failed_alloc.to_string(), _label);
} else {
detail += " current tracker <label={}, used={}, limit={}, failed alloc size={}>.";
detail = fmt::format(detail, _label, _consumption->current_value(), _limit,
PrettyPrinter::print(failed_allocation_size, TUnit::BYTES));
}
detail += " If this is a query, can change the limit by session variable exec_mem_limit.";
Status status = Status::MemoryLimitExceeded(detail);
if (state != nullptr) state->log_error(detail);
// only print the tracker log_usage in be log.
if (process_tracker->spare_capacity() < failed_allocation_size) {
// Dumping the process MemTracker is expensive. Limiting the recursive depth to two
// levels limits the level of detail to a one-line summary for each query MemTracker.
detail += "\n" + process_tracker->log_usage(2);
}
if (parent_task_mem_tracker() != nullptr) {
detail += "\n" + parent_task_mem_tracker()->log_usage();
}
LOG(WARNING) << detail;
return status;
}
bool MemTracker::gc_memory(int64_t max_consumption) {
if (max_consumption < 0) return true;
lock_guard<std::mutex> l(_gc_lock);
int64_t pre_gc_consumption = consumption();
// Check if someone gc'd before us
if (pre_gc_consumption < max_consumption) return false;
int64_t curr_consumption = pre_gc_consumption;
// Free some extra memory to avoid frequent GC, 4M is an empirical value, maybe it will be tested later.
const int64_t EXTRA_BYTES_TO_FREE = 4L * 1024L * 1024L * 1024L;
// Try to free up some memory
for (int i = 0; i < _gc_functions.size(); ++i) {
// Try to free up the amount we are over plus some extra so that we don't have to
// immediately GC again. Don't free all the memory since that can be unnecessarily
// expensive.
int64_t bytes_to_free = curr_consumption - max_consumption + EXTRA_BYTES_TO_FREE;
_gc_functions[i](bytes_to_free);
curr_consumption = consumption();
if (max_consumption - curr_consumption <= EXTRA_BYTES_TO_FREE) break;
}
return curr_consumption > max_consumption;
}
} // namespace doris
| 44.567797 | 125 | 0.672815 | [
"object",
"vector"
] |
d54014766464504e831cf5020fc514abb4062beb | 5,409 | hh | C++ | dune/multiscale/common/df_io.hh | wwu-numerik/DUNE-Multiscale | db7c4520c87d61bccdc05b05c54e7e50bdfd8d14 | [
"BSD-2-Clause"
] | 3 | 2018-09-17T12:00:02.000Z | 2022-03-01T08:54:32.000Z | dune/multiscale/common/df_io.hh | wwu-numerik/DUNE-Multiscale | db7c4520c87d61bccdc05b05c54e7e50bdfd8d14 | [
"BSD-2-Clause"
] | null | null | null | dune/multiscale/common/df_io.hh | wwu-numerik/DUNE-Multiscale | db7c4520c87d61bccdc05b05c54e7e50bdfd8d14 | [
"BSD-2-Clause"
] | 1 | 2018-09-17T12:00:04.000Z | 2018-09-17T12:00:04.000Z | // dune-multiscale
// Copyright Holders: Patrick Henning, Rene Milk
// License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
/**
* \file discretefunctionwriter.hh
* \brief write a bunch of discrete functions to one file and retrieve 'em
**/
#ifndef DISCRETEFUNCTIONWRITER_HEADERGUARD
#define DISCRETEFUNCTIONWRITER_HEADERGUARD
#include <fstream>
#include <vector>
#include <cassert>
#include <memory>
#include <unordered_map>
#include <dune/multiscale/common/traits.hh>
#include <dune/multiscale/msfem/msfem_traits.hh>
#include <dune/common/deprecated.hh>
#include <dune/common/exceptions.hh>
#include <dune/xt/common/configuration.hh>
#include <dune/xt/common/filesystem.hh>
#include <dune/xt/common/ranges.hh>
#include <dune/stuff/aliases.hh>
#include <dune/xt/common/memory.hh>
#include <dune/xt/common/type_traits.hh>
#include <boost/filesystem/path.hpp>
namespace Dune {
namespace Multiscale {
struct IOTraits
{
typedef MsFEMTraits::LocalGridDiscreteFunctionType DiscreteFunctionType;
typedef std::shared_ptr<DiscreteFunctionType> DiscreteFunction_ptr;
typedef typename DiscreteFunctionType::SpaceType DiscreteFunctionSpaceType;
typedef std::vector<DiscreteFunction_ptr> Vector;
typedef typename DiscreteFunctionSpaceType::GridViewType GridViewType;
};
class DiskBackend : public boost::noncopyable
{
void load_disk_functions()
{
Dune::XT::Common::test_create_directory(dir_.string());
// if functions present, load em
}
public:
/**
* \brief DiscreteFunctionWriter
* \param filename will open fstream at config["global.datadir"]/filename
* filename may include additional path components
* \throws Dune::IOError if config["global.datadir"]/filename cannot be opened
*/
DiskBackend(const Dune::XT::Common::Configuration& config, const std::string filename = "nonsense_default_for_map")
: dir_(boost::filesystem::path(config.get("global.datadir", "data")) / filename)
, index_(0)
{
}
void append(const IOTraits::DiscreteFunction_ptr& /*df*/)
{
const std::string fn = (dir_ / Dune::XT::Common::to_string(index_++)).string();
Dune::XT::Common::test_create_directory(fn);
DUNE_THROW(NotImplemented, "");
}
void read(const unsigned long index, IOTraits::DiscreteFunction_ptr& /*df*/)
{
const std::string fn = (dir_ / Dune::XT::Common::to_string(index)).string();
DUNE_THROW(NotImplemented, "");
}
private:
const boost::filesystem::path dir_;
unsigned int index_;
};
/**
* \brief simple discrete function to disk writer
* this class isn't type safe in the sense that different appends may append
* non-convertible discrete function implementations
*/
class MemoryBackend : public boost::noncopyable
{
public:
/**
* \brief DiscreteFunctionWriter
* \param filename will open fstream at config["global.datadir"]/filename
* filename may include additional path components
* \throws Dune::IOError if config["global.datadir"]/filename cannot be opened
*/
MemoryBackend(IOTraits::GridViewType& grid_view, const std::string /*filename*/ = "nonsense_default_for_map")
: space_(MsFEMTraits::SpaceChooserType::make_space(grid_view))
{
}
void append(const IOTraits::DiscreteFunction_ptr& df)
{
functions_.push_back(df);
}
void read(const unsigned long index, IOTraits::DiscreteFunction_ptr& df)
{
if (index < functions_.size()) {
df = functions_.at(index);
} else
DUNE_THROW(InvalidStateException, "requesting function at oob index " << index);
assert(df != nullptr);
}
IOTraits::DiscreteFunctionSpaceType& space()
{
return space_;
}
private:
IOTraits::DiscreteFunctionSpaceType space_;
IOTraits::Vector functions_;
};
class DiscreteFunctionIO : public boost::noncopyable
{
typedef DiscreteFunctionIO ThisType;
DiscreteFunctionIO() = default;
private:
static ThisType& instance()
{
static ThisType s_this;
return s_this;
}
struct ClearGuard
{
~ClearGuard()
{
DiscreteFunctionIO::clear();
}
};
template <class IOMapType, class... Args>
typename IOMapType::mapped_type& get(IOMapType& map, typename IOMapType::key_type key, Args&&... ctor_args)
{
auto it = map.find(key);
if (it != map.end())
return it->second;
std::lock_guard<std::mutex> lock(mutex_);
auto ptr = std::make_shared<typename IOMapType::mapped_type::element_type>(ctor_args...);
auto ret = Dune::XT::Common::map_emplace(map, key, std::move(ptr));
assert(ret.second);
return ret.first->second;
}
DiskBackend& get_disk(const Dune::XT::Common::Configuration& config, std::string filename);
MemoryBackend& get_memory(std::string filename, IOTraits::GridViewType& grid_view);
//! this needs to be called before global de-init or else dune fem fails
static void clear();
public:
static MemoryBackend& memory(std::string filename, IOTraits::GridViewType& grid_view);
static DiskBackend& disk(const XT::Common::Configuration& config, std::string filename);
static ClearGuard clear_guard()
{
return ClearGuard();
}
private:
std::unordered_map<size_t, std::shared_ptr<MemoryBackend>> memory_;
std::unordered_map<std::string, std::shared_ptr<DiskBackend>> disk_;
std::mutex mutex_;
}; // class DiscreteFunctionIO
} // namespace Multiscale {
} // namespace Dune {
#endif // ifndef DISCRETEFUNCTIONWRITER_HEADERGUARD
| 28.771277 | 117 | 0.725458 | [
"vector"
] |
d5418cbeefed4757053d704e60d6f211a7382ed1 | 2,170 | cpp | C++ | Shark/src/Shark/Render/Image.cpp | morlyno/Shark | 27afc267d76e0b69128606479c061685ba8576f8 | [
"Apache-2.0"
] | 1 | 2021-01-12T18:50:14.000Z | 2021-01-12T18:50:14.000Z | Shark/src/Shark/Render/Image.cpp | morlyno/Shark | 27afc267d76e0b69128606479c061685ba8576f8 | [
"Apache-2.0"
] | null | null | null | Shark/src/Shark/Render/Image.cpp | morlyno/Shark | 27afc267d76e0b69128606479c061685ba8576f8 | [
"Apache-2.0"
] | null | null | null | #include "skpch.h"
#include "Image.h"
#include "Shark/Render/RendererAPI.h"
#include "Platform/DirectX11/DirectXImage.h"
namespace Shark {
std::string ImageFormatToString(ImageFormat format)
{
switch (format)
{
case ImageFormat::None: return "None";
case ImageFormat::RGBA8: return "RGBA8";
case ImageFormat::R32_SINT: return "R32_SINT";
case ImageFormat::Depth32: return "Depth32";
case ImageFormat::SwapChain: return "(SwapChain) [SOON DEPRECATED]";
}
SK_CORE_ASSERT(false);
return "Unkonw";
}
std::string ImageTypeToString(ImageType usage)
{
switch (usage)
{
case ImageType::Default: return "Default";
case ImageType::Immutable: return "Immutable";
case ImageType::Dynamic: return "Dynamic";
case ImageType::Staging: return "Staging";
}
SK_CORE_ASSERT(false);
return "Unkonw";
}
std::string ImageUsageToString(uint32_t flags)
{
if (flags == ImageUsageNone)
return "None";
std::string str;
if (flags & ImageUsageTexture)
str.empty() ? str = "ShaderResource" : str += " | ShaderResource";
if (flags & ImageUsageFrameBuffer)
str.empty() ? str = "FrameBuffer" : str += " | FrameBuffer";
if (flags & ImageUsageDethStencil)
str.empty() ? str = "DethStencil" : str += " | DethStencil";
return str;
}
Ref<Image2D> Image2D::Create(const ImageSpecification& specs)
{
return Create(specs, nullptr);
}
Ref<Image2D> Image2D::Create(const ImageSpecification& specs, void* data)
{
switch (RendererAPI::GetAPI())
{
case RendererAPI::API::None: SK_CORE_ASSERT(false, "No RendererAPI specified"); return nullptr;
case RendererAPI::API::DirectX11: return Ref<DirectXImage2D>::Create(data, specs);
}
SK_CORE_ASSERT(false, "Unkonw RendererAPI");
return nullptr;
}
Ref<Image2D> Image2D::Create(const std::filesystem::path& filepath, const ImageSpecification& specs)
{
switch (RendererAPI::GetAPI())
{
case RendererAPI::API::None: SK_CORE_ASSERT(false, "No RendererAPI specified"); return nullptr;
case RendererAPI::API::DirectX11: return Ref<DirectXImage2D>::Create(filepath, specs);
}
SK_CORE_ASSERT(false, "Unkonw RendererAPI");
return nullptr;
}
}
| 25.833333 | 101 | 0.704608 | [
"render"
] |
d542557b21d58d3545ff15804794be5a2e30108d | 33,032 | cpp | C++ | test/framework/integration_framework/integration_test_framework.cpp | kuvaldini/iroha | f6a739743f8c5d80bc341c1e286b56fe484e5daa | [
"Apache-2.0"
] | 1 | 2021-05-18T09:02:45.000Z | 2021-05-18T09:02:45.000Z | test/framework/integration_framework/integration_test_framework.cpp | kuvaldini/iroha | f6a739743f8c5d80bc341c1e286b56fe484e5daa | [
"Apache-2.0"
] | 6 | 2021-05-20T15:07:14.000Z | 2021-05-21T08:50:11.000Z | test/framework/integration_framework/integration_test_framework.cpp | kuvaldini/iroha | f6a739743f8c5d80bc341c1e286b56fe484e5daa | [
"Apache-2.0"
] | 1 | 2021-05-13T09:20:23.000Z | 2021-05-13T09:20:23.000Z | /**
* Copyright Soramitsu Co., Ltd. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
#include "framework/integration_framework/integration_test_framework.hpp"
#include <boost/assert.hpp>
#include <boost/thread/barrier.hpp>
#include <limits>
#include <memory>
#include <rxcpp/operators/rx-filter.hpp>
#include <rxcpp/operators/rx-take.hpp>
#include "ametsuchi/storage.hpp"
#include "backend/protobuf/block.hpp"
#include "backend/protobuf/common_objects/proto_common_objects_factory.hpp"
#include "backend/protobuf/proto_transport_factory.hpp"
#include "backend/protobuf/queries/proto_query.hpp"
#include "backend/protobuf/query_responses/proto_query_response.hpp"
#include "backend/protobuf/transaction.hpp"
#include "backend/protobuf/transaction_responses/proto_tx_response.hpp"
#include "builders/protobuf/transaction.hpp"
#include "builders/protobuf/transaction_sequence_builder.hpp"
#include "consensus/yac/transport/impl/network_impl.hpp"
#include "cryptography/blob.hpp"
#include "cryptography/default_hash_provider.hpp"
#include "datetime/time.hpp"
#include "endpoint.grpc.pb.h"
#include "framework/common_constants.hpp"
#include "framework/integration_framework/fake_peer/behaviour/honest.hpp"
#include "framework/integration_framework/fake_peer/fake_peer.hpp"
#include "framework/integration_framework/iroha_instance.hpp"
#include "framework/integration_framework/port_guard.hpp"
#include "framework/integration_framework/test_irohad.hpp"
#include "framework/result_fixture.hpp"
#include "framework/result_gtest_checkers.hpp"
#include "framework/test_client_factory.hpp"
#include "framework/test_logger.hpp"
#include "interfaces/iroha_internal/transaction_batch_factory_impl.hpp"
#include "interfaces/iroha_internal/transaction_batch_parser_impl.hpp"
#include "interfaces/permissions.hpp"
#include "logger/logger.hpp"
#include "logger/logger_manager.hpp"
#include "main/subscription.hpp"
#include "module/irohad/ametsuchi/tx_presence_cache_stub.hpp"
#include "module/irohad/common/validators_config.hpp"
#include "module/shared_model/builders/protobuf/block.hpp"
#include "module/shared_model/builders/protobuf/proposal.hpp"
#include "module/shared_model/validators/always_valid_validators.hpp"
#include "multi_sig_transactions/mst_processor.hpp"
#include "multi_sig_transactions/transport/mst_transport_grpc.hpp"
#include "network/consensus_gate.hpp"
#include "network/impl/async_grpc_client.hpp"
#include "network/impl/channel_factory.hpp"
#include "network/impl/client_factory.hpp"
#include "network/peer_communication_service.hpp"
#include "ordering/impl/on_demand_os_client_grpc.hpp"
#include "simulator/verified_proposal_creator_common.hpp"
#include "synchronizer/synchronizer_common.hpp"
#include "torii/command_client.hpp"
#include "torii/query_client.hpp"
#include "torii/status_bus.hpp"
#include "validators/default_validator.hpp"
#include "validators/protobuf/proto_proposal_validator.hpp"
using namespace shared_model::crypto;
using namespace std::literals::string_literals;
using namespace common_constants;
using shared_model::interface::types::PublicKeyHexStringView;
using AlwaysValidProtoCommonObjectsFactory =
shared_model::proto::ProtoCommonObjectsFactory<
shared_model::validation::AlwaysValidFieldValidator>;
using ProtoTransactionFactory = shared_model::proto::ProtoTransportFactory<
shared_model::interface::Transaction,
shared_model::proto::Transaction>;
using AbstractTransactionValidator =
shared_model::validation::AbstractValidator<
shared_model::interface::Transaction>;
using AlwaysValidInterfaceTransactionValidator =
shared_model::validation::AlwaysValidModelValidator<
shared_model::interface::Transaction>;
using AlwaysValidProtoTransactionValidator =
shared_model::validation::AlwaysValidModelValidator<
iroha::protocol::Transaction>;
using AlwaysValidProtoProposalValidator =
shared_model::validation::AlwaysValidModelValidator<
shared_model::interface::Proposal>;
using AlwaysMissingTxPresenceCache = iroha::ametsuchi::TxPresenceCacheStub<
iroha::ametsuchi::tx_cache_status_responses::Missing>;
using FakePeer = integration_framework::fake_peer::FakePeer;
using iroha::network::makeTransportClientFactory;
namespace {
std::string kLocalHost = "127.0.0.1";
constexpr size_t kDefaultToriiPort = 11501;
constexpr size_t kDefaultInternalPort = 50541;
static const std::shared_ptr<iroha::network::GrpcChannelParams>
kChannelParams = iroha::network::getDefaultTestChannelParams();
std::string format_address(std::string ip,
integration_framework::PortGuard::PortType port) {
ip.append(":");
ip.append(std::to_string(port));
return ip;
}
} // namespace
namespace integration_framework {
template <typename T>
class IntegrationTestFramework::CheckerQueue {
public:
CheckerQueue(std::chrono::milliseconds timeout) : timeout_(timeout) {}
void push(T obj) {
std::lock_guard<std::mutex> lock(queue_mutex_);
queue_.push(std::move(obj));
cv_.notify_one();
}
boost::optional<T> try_pop() {
std::unique_lock<std::mutex> lock(queue_mutex_);
if (queue_.empty()) {
if (not cv_.wait_for(
lock, timeout_, [this] { return not queue_.empty(); })) {
return boost::none;
}
}
T obj(std::move(queue_.front()));
queue_.pop();
return obj;
}
private:
std::chrono::milliseconds timeout_;
std::queue<T> queue_;
std::mutex queue_mutex_;
std::condition_variable cv_;
};
IntegrationTestFramework::IntegrationTestFramework(
size_t maximum_proposal_size,
const boost::optional<std::string> &dbname,
iroha::StartupWsvDataPolicy startup_wsv_data_policy,
bool cleanup_on_exit,
bool mst_support,
const boost::optional<std::string> block_store_path,
milliseconds proposal_waiting,
milliseconds block_waiting,
milliseconds tx_response_waiting,
logger::LoggerManagerTreePtr log_manager)
: log_(log_manager->getLogger()),
log_manager_(std::move(log_manager)),
proposal_queue_(
std::make_unique<CheckerQueue<
std::shared_ptr<const shared_model::interface::Proposal>>>(
proposal_waiting)),
verified_proposal_queue_(
std::make_unique<CheckerQueue<VerifiedProposalType>>(
proposal_waiting)),
block_queue_(std::make_unique<CheckerQueue<BlockType>>(block_waiting)),
port_guard_(std::make_unique<PortGuard>()),
torii_port_(port_guard_->getPort(kDefaultToriiPort)),
command_client_(std::make_unique<torii::CommandSyncClient>(
iroha::network::createInsecureClient<
torii::CommandSyncClient::Service>(
kLocalHost, torii_port_, *kChannelParams),
log_manager_->getChild("CommandClient")->getLogger())),
query_client_(std::make_unique<torii_utils::QuerySyncClient>(
iroha::network::createInsecureClient<
torii_utils::QuerySyncClient::Service>(
kLocalHost, torii_port_, *kChannelParams))),
async_call_(std::make_shared<AsyncCall>(
log_manager_->getChild("AsyncCall")->getLogger())),
tx_response_waiting(tx_response_waiting),
maximum_proposal_size_(maximum_proposal_size),
common_objects_factory_(
std::make_shared<AlwaysValidProtoCommonObjectsFactory>(
iroha::test::kTestsValidatorsConfig)),
transaction_factory_(std::make_shared<ProtoTransactionFactory>(
std::make_unique<AlwaysValidInterfaceTransactionValidator>(),
std::make_unique<AlwaysValidProtoTransactionValidator>())),
batch_parser_(std::make_shared<
shared_model::interface::TransactionBatchParserImpl>()),
batch_validator_(
std::make_shared<shared_model::validation::DefaultBatchValidator>(
iroha::test::kTestsValidatorsConfig)),
transaction_batch_factory_(
std::make_shared<
shared_model::interface::TransactionBatchFactoryImpl>(
batch_validator_)),
proposal_factory_([] {
std::shared_ptr<shared_model::validation::AbstractValidator<
iroha::protocol::Transaction>>
proto_transaction_validator =
std::make_shared<AlwaysValidProtoTransactionValidator>();
std::unique_ptr<shared_model::validation::AbstractValidator<
shared_model::interface::Proposal>>
proposal_validator =
std::make_unique<AlwaysValidProtoProposalValidator>();
std::unique_ptr<shared_model::validation::AbstractValidator<
iroha::protocol::Proposal>>
proto_proposal_validator = std::make_unique<
shared_model::validation::ProtoProposalValidator>(
std::move(proto_transaction_validator));
return std::make_shared<shared_model::proto::ProtoTransportFactory<
shared_model::interface::Proposal,
shared_model::proto::Proposal>>(
std::move(proposal_validator),
std::move(proto_proposal_validator));
}()),
tx_presence_cache_(std::make_shared<AlwaysMissingTxPresenceCache>()),
client_factory_(
iroha::network::getTestInsecureClientFactory(kChannelParams)),
yac_transport_(std::make_shared<iroha::consensus::yac::NetworkImpl>(
async_call_,
makeTransportClientFactory<iroha::consensus::yac::NetworkImpl>(
client_factory_),
log_manager_->getChild("ConsensusTransport")->getLogger())),
cleanup_on_exit_(cleanup_on_exit) {
// 1 h proposal_timeout results in non-deterministic behavior due to thread
// scheduling and network
config_.proposal_delay = 3600000;
// 100 ms is small delay to avoid unnecessary messages due to eternal voting
// and to allow scheduler to switch threads
config_.vote_delay = 100;
// amount of minutes in a day
config_.mst_expiration_time = 24 * 60;
config_.max_round_delay_ms = 0;
config_.stale_stream_max_rounds = 2;
config_.max_proposal_size = 10;
config_.mst_support = mst_support;
config_.block_store_path = block_store_path;
config_.torii_port = torii_port_;
config_.internal_port = port_guard_->getPort(kDefaultInternalPort);
iroha_instance_ =
std::make_shared<IrohaInstance>(config_,
kLocalHost,
log_manager_->getChild("Irohad"),
log_,
startup_wsv_data_policy,
dbname);
}
IntegrationTestFramework::~IntegrationTestFramework() {
if (cleanup_on_exit_) {
iroha_instance_->terminateAndCleanup();
}
for (auto &server : fake_peers_servers_) {
server->shutdown(std::chrono::system_clock::now());
}
// the code below should be executed anyway in order to prevent app hang
if (iroha_instance_ and iroha_instance_->getIrohaInstance()) {
iroha_instance_->getIrohaInstance()->terminate(
std::chrono::system_clock::now());
}
}
std::shared_ptr<FakePeer> IntegrationTestFramework::addFakePeer(
const boost::optional<Keypair> &key) {
BOOST_ASSERT_MSG(this_peer_, "Need to set the ITF peer key first!");
const auto port = port_guard_->getPort(kDefaultInternalPort);
auto fake_peer = std::make_shared<FakePeer>(
kLocalHost,
port,
key,
this_peer_,
common_objects_factory_,
transaction_factory_,
batch_parser_,
transaction_batch_factory_,
proposal_factory_,
tx_presence_cache_,
log_manager_->getChild("FakePeer")
->getChild("at " + format_address(kLocalHost, port)));
fake_peer->initialize();
fake_peers_.emplace_back(fake_peer);
log_->debug("Added a fake peer at {} with {}.",
fake_peer->getAddress(),
fake_peer->getKeypair().publicKey());
return fake_peer;
}
std::vector<std::shared_ptr<fake_peer::FakePeer>>
IntegrationTestFramework::addFakePeers(size_t amount) {
std::vector<std::shared_ptr<fake_peer::FakePeer>> fake_peers;
std::generate_n(std::back_inserter(fake_peers), amount, [this] {
auto fake_peer = addFakePeer({});
fake_peer->setBehaviour(std::make_shared<fake_peer::HonestBehaviour>());
return fake_peer;
});
return fake_peers;
}
shared_model::proto::Block IntegrationTestFramework::defaultBlock(
const shared_model::crypto::Keypair &key) const {
shared_model::interface::RolePermissionSet all_perms{};
for (size_t i = 0; i < all_perms.size(); ++i) {
auto perm = static_cast<shared_model::interface::permissions::Role>(i);
all_perms.set(perm);
}
auto genesis_tx_builder =
shared_model::proto::TransactionBuilder()
.creatorAccountId(kAdminId)
.createdTime(iroha::time::now())
.addPeer(getAddress(), PublicKeyHexStringView{key.publicKey()})
.createRole(kAdminRole, all_perms)
.createRole(kDefaultRole, {})
.createDomain(kDomain, kDefaultRole)
.createAccount(
kAdminName, kDomain, PublicKeyHexStringView{key.publicKey()})
.detachRole(kAdminId, kDefaultRole)
.appendRole(kAdminId, kAdminRole)
.createAsset(kAssetName, kDomain, 1)
.quorum(1);
// add fake peers
for (const auto &fake_peer : fake_peers_) {
genesis_tx_builder = genesis_tx_builder.addPeer(
fake_peer->getAddress(),
PublicKeyHexStringView{fake_peer->getKeypair().publicKey()});
};
auto genesis_tx =
genesis_tx_builder.build().signAndAddSignature(key).finish();
auto genesis_block =
shared_model::proto::BlockBuilder()
.transactions(
std::vector<shared_model::proto::Transaction>{genesis_tx})
.height(1)
.prevHash(DefaultHashProvider::makeHash(Blob("")))
.createdTime(iroha::time::now())
.build()
.signAndAddSignature(key)
.finish();
return genesis_block;
}
shared_model::proto::Block IntegrationTestFramework::defaultBlock() const {
BOOST_ASSERT_MSG(my_key_, "Need to set the ITF peer key first!");
return defaultBlock(*my_key_);
}
IntegrationTestFramework &IntegrationTestFramework::setGenesisBlock(
const shared_model::interface::Block &block) {
iroha_instance_->makeGenesis(clone(block));
iroha_instance_->init();
return *this;
}
IntegrationTestFramework &IntegrationTestFramework::setInitialState(
const Keypair &keypair) {
initPipeline(keypair);
setGenesisBlock(defaultBlock(keypair));
log_->info("added genesis block");
subscribeQueuesAndRun();
return *this;
}
IntegrationTestFramework &IntegrationTestFramework::setMstGossipParams(
std::chrono::milliseconds mst_gossip_emitting_period,
uint32_t mst_gossip_amount_per_once) {
iroha_instance_->setMstGossipParams(mst_gossip_emitting_period,
mst_gossip_amount_per_once);
return *this;
}
IntegrationTestFramework &IntegrationTestFramework::setInitialState(
const Keypair &keypair, const shared_model::interface::Block &block) {
initPipeline(keypair);
setGenesisBlock(block);
log_->info("added genesis block");
subscribeQueuesAndRun();
return *this;
}
IntegrationTestFramework &IntegrationTestFramework::recoverState(
const Keypair &keypair) {
initPipeline(keypair);
iroha_instance_->init();
subscribeQueuesAndRun();
return *this;
}
void IntegrationTestFramework::initPipeline(
const shared_model::crypto::Keypair &keypair) {
log_->info("init state");
my_key_ = keypair;
this_peer_ =
framework::expected::val(
common_objects_factory_->createPeer(
getAddress(), PublicKeyHexStringView{keypair.publicKey()}))
.value()
.value;
iroha_instance_->initPipeline(keypair, maximum_proposal_size_);
log_->info("created pipeline");
}
void IntegrationTestFramework::subscribeQueuesAndRun() {
// subscribing for components
rxcpp::observable<iroha::network::OrderingEvent> requested_proposals =
iroha_instance_->getIrohaInstance()
->getPeerCommunicationService()
->onProposal();
rxcpp::observable<iroha::network::OrderingEvent> received_proposals =
requested_proposals.filter(
[](const auto &event) { return event.proposal; });
received_proposals.subscribe([this](const auto &event) {
proposal_queue_->push(getProposalUnsafe(event));
log_->info("proposal");
});
verified_proposal_subscription_ = iroha::SubscriberCreator<
bool,
iroha::simulator::VerifiedProposalCreatorEvent>::
template create<iroha::EventTypes::kOnVerifiedProposal>(
static_cast<iroha::SubscriptionEngineHandlers>(
decltype(iroha::getSubscription())::element_type::Dispatcher::
kExecuteInPool),
[verified_proposal_queue(
iroha::utils::make_weak(verified_proposal_queue_)),
log(iroha::utils::make_weak(log_))](
auto, auto verified_proposal_and_errors) {
auto maybe_verified_proposal_queue =
verified_proposal_queue.lock();
auto maybe_log = log.lock();
if (maybe_verified_proposal_queue and maybe_log
and verified_proposal_and_errors.verified_proposal_result) {
maybe_verified_proposal_queue->push(
iroha::simulator::getVerifiedProposalUnsafe(
verified_proposal_and_errors));
maybe_log->info("verified proposal");
}
});
iroha_instance_->getIrohaInstance()->getStorage()->on_commit().subscribe(
[this](auto committed_block) {
block_queue_->push(committed_block);
log_->info("block commit");
});
iroha_instance_->getIrohaInstance()->getStatusBus()->statuses().subscribe(
[this](auto response) {
const auto hash = response->transactionHash().hex();
auto it = responses_queues_.find(hash);
if (it == responses_queues_.end()) {
it = responses_queues_
.emplace(hash,
std::make_unique<CheckerQueue<TxResponseType>>(
tx_response_waiting))
.first;
}
it->second->push(response);
log_->info("response added to status queue: {}",
response->toString());
});
if (fake_peers_.size() > 0) {
log_->info("starting fake iroha peers");
for (auto &fake_peer : fake_peers_) {
fake_peers_servers_.push_back(fake_peer->run());
}
}
// start instance
log_->info("starting main iroha instance");
iroha_instance_->run();
}
std::shared_ptr<shared_model::interface::Peer>
IntegrationTestFramework::getThisPeer() const {
return this_peer_;
}
std::string IntegrationTestFramework::getAddress() const {
return format_address(kLocalHost, config_.internal_port);
}
rxcpp::observable<std::shared_ptr<iroha::MstState>>
IntegrationTestFramework::getMstStateUpdateObservable() {
return iroha_instance_->getIrohaInstance()
->getMstProcessor()
->onStateUpdate();
}
rxcpp::observable<iroha::BatchPtr>
IntegrationTestFramework::getMstPreparedBatchesObservable() {
return iroha_instance_->getIrohaInstance()
->getMstProcessor()
->onPreparedBatches();
}
rxcpp::observable<iroha::BatchPtr>
IntegrationTestFramework::getMstExpiredBatchesObservable() {
return iroha_instance_->getIrohaInstance()
->getMstProcessor()
->onExpiredBatches();
}
rxcpp::observable<iroha::consensus::GateObject>
IntegrationTestFramework::getYacOnCommitObservable() {
return iroha_instance_->getIrohaInstance()->getConsensusGate()->onOutcome();
}
std::shared_ptr<iroha::ametsuchi::BlockQuery>
IntegrationTestFramework::getBlockQuery() {
return getIrohaInstance().getIrohaInstance()->getStorage()->getBlockQuery();
}
IntegrationTestFramework &IntegrationTestFramework::getTxStatus(
const shared_model::crypto::Hash &hash,
std::function<void(const shared_model::proto::TransactionResponse &)>
validation) {
iroha::protocol::TxStatusRequest request;
request.set_tx_hash(hash.hex());
iroha::protocol::ToriiResponse response;
command_client_->Status(request, response);
validation(shared_model::proto::TransactionResponse(std::move(response)));
return *this;
}
IntegrationTestFramework &IntegrationTestFramework::sendTxWithoutValidation(
const shared_model::proto::Transaction &tx) {
log_->info("sending transaction");
log_->debug("{}", tx);
command_client_->Torii(tx.getTransport());
return *this;
}
IntegrationTestFramework &IntegrationTestFramework::sendTx(
const shared_model::proto::Transaction &tx,
std::function<void(const shared_model::proto::TransactionResponse &)>
validation) {
// Required for StatusBus synchronization
boost::barrier bar1(2);
auto bar2 = std::make_shared<boost::barrier>(2);
rxcpp::observable<
std::shared_ptr<shared_model::interface::TransactionResponse>>
statuses =
iroha_instance_->getIrohaInstance()->getStatusBus()->statuses();
rxcpp::observable<
std::shared_ptr<shared_model::interface::TransactionResponse>>
filtered_statuses = statuses.filter(
[&](auto s) { return s->transactionHash() == tx.hash(); });
rxcpp::observable<
std::shared_ptr<shared_model::interface::TransactionResponse>>
first_status = filtered_statuses.take(1);
first_status.subscribe(
[&bar1, b2 = std::weak_ptr<boost::barrier>(bar2)](auto s) {
bar1.wait();
if (auto lock = b2.lock()) {
lock->wait();
}
});
sendTxWithoutValidation(tx);
// make sure that the first (stateless) status has come
bar1.wait();
// fetch status of transaction
getTxStatus(tx.hash(), [&validation, &bar2](auto &status) {
// make sure that the following statuses (stateful/committed)
// haven't reached the bus yet
bar2->wait();
// check validation function
validation(status);
});
return *this;
}
IntegrationTestFramework &IntegrationTestFramework::sendTx(
const shared_model::proto::Transaction &tx) {
sendTx(tx, [this](const auto &status) {
if (!status.statelessErrorOrCommandName().empty()) {
log_->debug("Got error while sending transaction: "
+ status.statelessErrorOrCommandName());
}
});
return *this;
}
IntegrationTestFramework &IntegrationTestFramework::sendTxAwait(
const shared_model::proto::Transaction &tx) {
return sendTxAwait(tx, [](const auto &) {});
}
IntegrationTestFramework &IntegrationTestFramework::sendTxAwait(
const shared_model::proto::Transaction &tx,
std::function<void(const BlockType &)> check) {
sendTx(tx).skipProposal().skipVerifiedProposal().checkBlock(check);
return *this;
}
IntegrationTestFramework &IntegrationTestFramework::sendTxSequence(
const shared_model::interface::TransactionSequence &tx_sequence,
std::function<void(std::vector<shared_model::proto::TransactionResponse>
&)> validation) {
log_->info("send transactions");
const auto &transactions = tx_sequence.transactions();
std::mutex m;
std::condition_variable cv;
bool processed = false;
// subscribe on status bus and save all stateless statuses into a vector
std::vector<shared_model::proto::TransactionResponse> observed_statuses;
rxcpp::observable<
std::shared_ptr<shared_model::interface::TransactionResponse>>
statuses =
iroha_instance_->getIrohaInstance()->getStatusBus()->statuses();
rxcpp::observable<
std::shared_ptr<shared_model::interface::TransactionResponse>>
filtered_statuses = statuses.filter([&transactions](auto s) {
// filter statuses for transactions from sequence
auto it = std::find_if(
transactions.begin(), transactions.end(), [&s](const auto tx) {
// check if status is either stateless valid or failed
bool is_stateless_status = iroha::visit_in_place(
s->get(),
[](const shared_model::interface::StatelessFailedTxResponse
&stateless_failed_response) { return true; },
[](const shared_model::interface::StatelessValidTxResponse
&stateless_valid_response) { return true; },
[](const auto &other_responses) { return false; });
return is_stateless_status
and s->transactionHash() == tx->hash();
});
return it != transactions.end();
});
rxcpp::observable<
std::shared_ptr<shared_model::interface::TransactionResponse>>
first_statuses = filtered_statuses.take(transactions.size());
first_statuses.subscribe(
[&observed_statuses](auto s) {
observed_statuses.push_back(
*std::static_pointer_cast<
shared_model::proto::TransactionResponse>(s));
},
[&cv, &m, &processed] {
std::lock_guard<std::mutex> lock(m);
processed = true;
cv.notify_all();
});
// put all transactions to the TxList and send them to iroha
iroha::protocol::TxList tx_list;
for (const auto &tx : transactions) {
auto proto_tx =
std::static_pointer_cast<shared_model::proto::Transaction>(tx)
->getTransport();
*tx_list.add_transactions() = proto_tx;
}
command_client_->ListTorii(tx_list);
std::unique_lock<std::mutex> lk(m);
cv.wait(lk, [&] { return processed; });
validation(observed_statuses);
return *this;
}
IntegrationTestFramework &IntegrationTestFramework::sendTxSequenceAwait(
const shared_model::interface::TransactionSequence &tx_sequence,
std::function<void(const BlockType &)> check) {
sendTxSequence(tx_sequence)
.skipProposal()
.skipVerifiedProposal()
.checkBlock(check);
return *this;
}
IntegrationTestFramework &IntegrationTestFramework::sendQuery(
const shared_model::proto::Query &qry,
std::function<void(const shared_model::proto::QueryResponse &)>
validation) {
log_->info("send query");
log_->debug("{}", qry);
iroha::protocol::QueryResponse response;
query_client_->Find(qry.getTransport(), response);
shared_model::proto::QueryResponse query_response{std::move(response)};
validation(query_response);
return *this;
}
IntegrationTestFramework &IntegrationTestFramework::sendQuery(
const shared_model::proto::Query &qry) {
sendQuery(qry, [](const auto &) {});
return *this;
}
IntegrationTestFramework &IntegrationTestFramework::sendBatches(
const std::vector<TransactionBatchSPtr> &batches) {
auto on_demand_os_transport =
iroha::ordering::transport::OnDemandOsClientGrpcFactory(
async_call_,
proposal_factory_,
[] { return std::chrono::system_clock::now(); },
// the proposal waiting timeout is only used when waiting a response
// for a proposal request, which our client does not do
std::chrono::milliseconds(0),
log_manager_->getChild("OrderingClientTransport")->getLogger(),
makeTransportClientFactory<
iroha::ordering::transport::OnDemandOsClientGrpcFactory>(
client_factory_))
.create(*this_peer_)
.assumeValue();
on_demand_os_transport->onBatches(batches);
return *this;
}
boost::optional<std::shared_ptr<const shared_model::interface::Proposal>>
IntegrationTestFramework::requestProposal(
const iroha::consensus::Round &round, std::chrono::milliseconds timeout) {
auto on_demand_os_transport =
iroha::ordering::transport::OnDemandOsClientGrpcFactory(
async_call_,
proposal_factory_,
[] { return std::chrono::system_clock::now(); },
timeout,
log_manager_->getChild("OrderingClientTransport")->getLogger(),
makeTransportClientFactory<
iroha::ordering::transport::OnDemandOsClientGrpcFactory>(
client_factory_))
.create(*this_peer_)
.assumeValue();
return on_demand_os_transport->onRequestProposal(round);
}
IntegrationTestFramework &IntegrationTestFramework::sendMstState(
PublicKeyHexStringView src_key, const iroha::MstState &mst_state) {
auto client = makeTransportClientFactory<iroha::network::MstTransportGrpc>(
client_factory_)
->createClient(*this_peer_)
.assumeValue();
iroha::network::sendStateAsync(mst_state, src_key, *client, *async_call_);
return *this;
}
IntegrationTestFramework &IntegrationTestFramework::sendYacState(
const std::vector<iroha::consensus::yac::VoteMessage> &yac_state) {
yac_transport_->sendState(*this_peer_, yac_state);
return *this;
}
IntegrationTestFramework &IntegrationTestFramework::checkProposal(
std::function<void(
const std::shared_ptr<const shared_model::interface::Proposal> &)>
validation) {
log_->info("check proposal");
// fetch first proposal from proposal queue
auto opt_proposal = proposal_queue_->try_pop();
if (not opt_proposal) {
throw std::runtime_error("missed proposal");
}
validation(*opt_proposal);
return *this;
}
IntegrationTestFramework &IntegrationTestFramework::skipProposal() {
checkProposal([](const auto &) {});
return *this;
}
IntegrationTestFramework &IntegrationTestFramework::checkVerifiedProposal(
std::function<void(
const std::shared_ptr<const shared_model::interface::Proposal> &)>
validation) {
log_->info("check verified proposal");
// fetch first proposal from proposal queue
auto opt_verified_proposal_and_errors = verified_proposal_queue_->try_pop();
if (not opt_verified_proposal_and_errors) {
throw std::runtime_error("missed verified proposal");
}
validation(opt_verified_proposal_and_errors.value()->verified_proposal);
return *this;
}
IntegrationTestFramework &IntegrationTestFramework::skipVerifiedProposal() {
checkVerifiedProposal([](const auto &) {});
return *this;
}
IntegrationTestFramework &IntegrationTestFramework::checkBlock(
std::function<void(const BlockType &)> validation) {
// fetch first from block queue
log_->info("check block");
auto opt_block = block_queue_->try_pop();
if (not opt_block) {
throw std::runtime_error("missed block");
}
validation(*opt_block);
return *this;
}
IntegrationTestFramework &IntegrationTestFramework::skipBlock() {
checkBlock([](const auto &) {});
return *this;
}
IntegrationTestFramework &IntegrationTestFramework::checkStatus(
const shared_model::interface::types::HashType &tx_hash,
std::function<void(const shared_model::proto::TransactionResponse &)>
validation) {
// fetch first response associated with the tx from related queue
boost::optional<TxResponseType> opt_response;
const auto it = responses_queues_.find(tx_hash.hex());
if (it != responses_queues_.end()) {
opt_response = it->second->try_pop();
}
if (not opt_response) {
throw std::runtime_error("missed status");
}
validation(static_cast<const shared_model::proto::TransactionResponse &>(
*opt_response.value()));
return *this;
}
size_t IntegrationTestFramework::internalPort() const {
return config_.internal_port;
}
void IntegrationTestFramework::done() {
log_->info("done");
iroha_instance_->terminateAndCleanup();
}
IrohaInstance &IntegrationTestFramework::getIrohaInstance() {
return *iroha_instance_;
}
logger::LoggerManagerTreePtr getDefaultItfLogManager() {
return getTestLoggerManager()->getChild("IntegrationFramework");
}
} // namespace integration_framework
| 39.183867 | 80 | 0.677555 | [
"vector"
] |
d5429cd6ee7499b57c6b4cd7c201a9978ed1a097 | 1,763 | cpp | C++ | C++/problems/0118_minimum_cost_tree_from_leaf_values.cpp | oxone-999/algorithms | 52dc527111e7422923a0e25684d8f4837e81a09b | [
"MIT"
] | 6 | 2019-03-20T22:23:26.000Z | 2020-08-28T03:10:27.000Z | C++/problems/0118_minimum_cost_tree_from_leaf_values.cpp | oxone-999/algorithms | 52dc527111e7422923a0e25684d8f4837e81a09b | [
"MIT"
] | 15 | 2019-10-13T20:53:53.000Z | 2022-03-31T02:01:35.000Z | C++/problems/0118_minimum_cost_tree_from_leaf_values.cpp | oxone-999/algorithms | 52dc527111e7422923a0e25684d8f4837e81a09b | [
"MIT"
] | 3 | 2019-03-11T10:57:46.000Z | 2020-02-26T21:13:21.000Z | // Problem Statement
// Given an array arr of positive integers, consider all binary trees such that:
// Each node has either 0 or 2 children;
// The values of arr correspond to the values of each leaf in an in-order traversal of the tree.
// (Recall that a node is a leaf if and only if it has 0 children.)
// The value of each non-leaf node is equal to the product of the largest leaf value in its left
// and right subtree respectively.
// Among all possible binary trees considered, return the smallest possible sum of the values of
// each non-leaf node. It is guaranteed this sum fits into a 32-bit integer.
#include <bits/stdc++.h>
using namespace std;
void printVec(const vector<int> &vec){
for(auto val : vec){
cout<<val<<" ";
}
cout<<endl;
}
class Solution {
private:
vector<int> max_left, max_right;
map<pair<int,int>,pair<int,int>> memo;
pair<int,int> dp(const vector<int> &arr, int i, int j){
pair<int,int> curr = make_pair(i,j);
if(memo.find(curr)!=memo.end()){
return memo[curr];
}
if(i==j){
return memo[curr] = {0,arr[i]};
}
pair<int,int> result = {INT_MAX,INT_MIN};
for(int k=i;k<j;k++){
auto left = dp(arr,i,k);
auto right = dp(arr,k+1,j);
pair<int,int> curr_result = {left.second*right.second+left.first+right.first,max(left.second,right.second)};
result = min(result,curr_result);
}
return memo[curr] =result;
}
public:
int mctFromLeafValues(vector<int>& arr) {
return dp(arr,0,arr.size()-1).first;
}
};
int main(){
vector<int> leafs = {6,2,4,5};
Solution sol;
cout<<sol.mctFromLeafValues(leafs)<<endl;
return 0;
}
| 28.901639 | 120 | 0.618832 | [
"vector"
] |
d543a394c2da0f77ff2cac6e3df392979bc5688a | 7,757 | cc | C++ | garnet/bin/appmgr/system_objects_directory.cc | yanyushr/fuchsia | 98e70672a81a206d235503e398f37b7b65581f79 | [
"BSD-3-Clause"
] | 1 | 2019-10-09T10:50:57.000Z | 2019-10-09T10:50:57.000Z | garnet/bin/appmgr/system_objects_directory.cc | bootingman/fuchsia2 | 04012f0aa1edd1d4108a2ac647a65e59730fc4c2 | [
"BSD-3-Clause"
] | null | null | null | garnet/bin/appmgr/system_objects_directory.cc | bootingman/fuchsia2 | 04012f0aa1edd1d4108a2ac647a65e59730fc4c2 | [
"BSD-3-Clause"
] | null | null | null | // Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "system_objects_directory.h"
#include <fs/pseudo-file.h>
#include <src/lib/fxl/strings/string_printf.h>
#include <algorithm>
#include "debug_info_retriever.h"
using fxl::StringPrintf;
namespace {
const char* obj_type_get_name(zx_obj_type_t type) {
switch (type) {
case ZX_OBJ_TYPE_NONE:
return "none";
case ZX_OBJ_TYPE_PROCESS:
return "process";
case ZX_OBJ_TYPE_THREAD:
return "thread";
case ZX_OBJ_TYPE_VMO:
return "vmo";
case ZX_OBJ_TYPE_CHANNEL:
return "channel";
case ZX_OBJ_TYPE_EVENT:
return "event";
case ZX_OBJ_TYPE_PORT:
return "port";
case ZX_OBJ_TYPE_INTERRUPT:
return "interrupt";
case ZX_OBJ_TYPE_PCI_DEVICE:
return "pci_device";
case ZX_OBJ_TYPE_LOG:
return "log";
case ZX_OBJ_TYPE_SOCKET:
return "socket";
case ZX_OBJ_TYPE_RESOURCE:
return "resource";
case ZX_OBJ_TYPE_EVENTPAIR:
return "eventpair";
case ZX_OBJ_TYPE_JOB:
return "job";
case ZX_OBJ_TYPE_VMAR:
return "vmar";
case ZX_OBJ_TYPE_FIFO:
return "fifo";
case ZX_OBJ_TYPE_GUEST:
return "guest";
case ZX_OBJ_TYPE_VCPU:
return "vcpu";
case ZX_OBJ_TYPE_TIMER:
return "timer";
case ZX_OBJ_TYPE_IOMMU:
return "iommu";
case ZX_OBJ_TYPE_BTI:
return "bti";
case ZX_OBJ_TYPE_PROFILE:
return "profile";
default:
return "unknown";
}
}
} // namespace
namespace component {
SystemObjectsDirectory::SystemObjectsDirectory(zx::process process)
: ExposedObject("system_objects"),
process_(std::move(process)),
threads_(std::make_unique<ThreadsDirectory>(&process_)),
memory_(std::make_unique<MemoryDirectory>(&process_)) {
add_child(threads_.get());
add_child(memory_.get());
object_dir().set_children_callback(
[this](component::Object::ObjectVector* out_children) {
zx_info_process_handle_stats_t process_handle_stats;
if (GetProcessHandleStats(&process_handle_stats) != ZX_OK)
return;
auto handle_count_dir = component::ObjectDir::Make("handle_count");
for (zx_obj_type_t obj_type = ZX_OBJ_TYPE_NONE;
obj_type < ZX_OBJ_TYPE_UPPER_BOUND; ++obj_type) {
handle_count_dir.set_metric(
obj_type_get_name(obj_type),
component::UIntMetric(
process_handle_stats.handle_count[obj_type]));
}
out_children->push_back(handle_count_dir.object());
});
}
zx_status_t SystemObjectsDirectory::GetProcessHandleStats(
zx_info_process_handle_stats_t* process_handle_stats) {
zx_status_t status =
process_.get_info(ZX_INFO_PROCESS_HANDLE_STATS, process_handle_stats,
sizeof(zx_info_process_handle_stats), nullptr, nullptr);
if (status != ZX_OK) {
FXL_LOG(ERROR) << "zx_object_get_info failed, status: " << status;
return status;
}
return ZX_OK;
}
SystemObjectsDirectory::ThreadsDirectory::ThreadsDirectory(
const zx::process* process)
: ExposedObject("threads"), process_(process) {
auto all_dir = component::ObjectDir::Make("all_thread_stacks");
all_dir.set_prop("stacks", [this]() -> std::string {
return StringPrintf("\n%s", DebugInfoRetriever::GetInfo(process_).data());
});
object_dir().set_child(all_dir.object());
object_dir().set_children_callback(
[this](component::Object::ObjectVector* out_children) {
fbl::Vector<ThreadInfo> threads;
threads.reserve(kMaxThreads);
GetThreads(&threads);
for (const auto& thread : threads) {
auto koid_string = StringPrintf("%lu", thread.koid);
auto thread_obj = component::ObjectDir::Make(koid_string);
thread_obj.set_prop("koid", koid_string);
thread_obj.set_prop("name", thread.name.data());
zx_handle_t handle = thread.thread.get();
zx_info_thread_stats_t thread_stats;
thread_obj.set_metric(
"total_runtime",
UIntMetric(GetThreadStats(handle, &thread_stats) == ZX_OK
? thread_stats.total_runtime
: 0u));
out_children->push_back(thread_obj.object());
auto koid = thread.koid;
auto stack_obj = component::ObjectDir::Make("stack");
stack_obj.set_prop("dump", [this, koid]() -> std::string {
zx_koid_t koids[] = {koid};
return StringPrintf(
"\n%s", DebugInfoRetriever::GetInfo(process_, koids, 1).data());
});
thread_obj.set_child(stack_obj.object());
}
});
}
void SystemObjectsDirectory::ThreadsDirectory::GetThreads(
fbl::Vector<ThreadInfo>* out) {
zx_koid_t thread_ids[kMaxThreads];
size_t num_ids;
if (process_->get_info(ZX_INFO_PROCESS_THREADS, thread_ids,
sizeof(zx_koid_t) * kMaxThreads, &num_ids,
nullptr) != ZX_OK) {
return;
}
for (size_t i = 0; i < num_ids; i++) {
zx::thread t;
char name[ZX_MAX_NAME_LEN];
if (process_->get_child(thread_ids[i], ZX_RIGHT_SAME_RIGHTS, &t) != ZX_OK) {
return;
}
if (t.get_property(ZX_PROP_NAME, &name, ZX_MAX_NAME_LEN) != ZX_OK) {
return;
}
t.get_property(ZX_PROP_NAME, &name, ZX_MAX_NAME_LEN);
out->push_back({thread_ids[i], name, std::move(t)});
}
}
zx_status_t SystemObjectsDirectory::ThreadsDirectory::GetThreadStats(
zx_handle_t thread, zx_info_thread_stats_t* thread_stats) {
zx_status_t status =
zx_object_get_info(thread, ZX_INFO_THREAD_STATS, thread_stats,
sizeof(zx_info_thread_stats_t), nullptr, nullptr);
if (status != ZX_OK) {
FXL_LOG(ERROR) << "zx_object_get_info failed, status: " << status
<< " thread: " << thread;
return status;
}
return ZX_OK;
}
SystemObjectsDirectory::MemoryDirectory::MemoryDirectory(
const zx::process* process)
: ExposedObject("memory"), process_(process) {
object_dir().set_metric(
"mapped_bytes", component::CallbackMetric([this](component::Metric* out) {
zx_info_task_stats_t task_stats;
if (GetTaskStats(&task_stats) != ZX_OK)
return;
out->SetUInt(task_stats.mem_mapped_bytes);
}));
object_dir().set_metric(
"private_bytes",
component::CallbackMetric([this](component::Metric* out) {
zx_info_task_stats_t task_stats;
if (GetTaskStats(&task_stats) != ZX_OK)
return;
out->SetUInt(task_stats.mem_private_bytes);
}));
object_dir().set_metric(
"shared_bytes", component::CallbackMetric([this](component::Metric* out) {
zx_info_task_stats_t task_stats;
if (GetTaskStats(&task_stats) != ZX_OK)
return;
out->SetUInt(task_stats.mem_shared_bytes);
}));
object_dir().set_metric(
"scaled_shared_bytes",
component::CallbackMetric([this](component::Metric* out) {
zx_info_task_stats_t task_stats;
if (GetTaskStats(&task_stats) != ZX_OK)
return;
out->SetUInt(task_stats.mem_scaled_shared_bytes);
}));
}
zx_status_t SystemObjectsDirectory::MemoryDirectory::GetTaskStats(
zx_info_task_stats_t* task_stats) {
zx_status_t status =
process_->get_info(ZX_INFO_TASK_STATS, task_stats,
sizeof(zx_info_task_stats_t), nullptr, nullptr);
if (status != ZX_OK) {
FXL_LOG(ERROR) << "zx_object_get_info failed, status: " << status;
return status;
}
return ZX_OK;
}
} // namespace component
| 32.320833 | 80 | 0.654763 | [
"object",
"vector"
] |
d54415ef5afa17fbe032830624f5acaf1482a8e1 | 16,910 | cpp | C++ | Engine/source/materials/processedCustomMaterial.cpp | fr1tz/terminal-overload | 85f0689a40022e5eb7e54dcb6ddfb5ddd82a0a60 | [
"CC-BY-4.0"
] | 46 | 2015-01-05T17:34:43.000Z | 2022-01-04T04:03:09.000Z | Engine/source/materials/processedCustomMaterial.cpp | fr1tz/terminal-overload | 85f0689a40022e5eb7e54dcb6ddfb5ddd82a0a60 | [
"CC-BY-4.0"
] | 10 | 2015-01-20T23:14:46.000Z | 2019-04-05T22:04:15.000Z | Engine/source/materials/processedCustomMaterial.cpp | fr1tz/terminal-overload | 85f0689a40022e5eb7e54dcb6ddfb5ddd82a0a60 | [
"CC-BY-4.0"
] | 9 | 2015-08-08T18:46:06.000Z | 2021-02-01T13:53:20.000Z | // Copyright information can be found in the file named COPYING
// located in the root directory of this distribution.
#include "platform/platform.h"
#include "materials/processedCustomMaterial.h"
#include "gfx/sim/cubemapData.h"
#include "materials/sceneData.h"
#include "shaderGen/shaderGenVars.h"
#include "scene/sceneRenderState.h"
#include "materials/customMaterialDefinition.h"
#include "materials/shaderData.h"
#include "materials/materialManager.h"
#include "materials/matTextureTarget.h"
#include "materials/materialFeatureTypes.h"
#include "materials/materialParameters.h"
#include "gfx/sim/gfxStateBlockData.h"
#include "core/util/safeDelete.h"
#include "gfx/genericConstBuffer.h"
#include "console/simFieldDictionary.h"
#include "console/propertyParsing.h"
#include "gfx/util/screenspace.h"
#include "scene/reflectionManager.h"
ProcessedCustomMaterial::ProcessedCustomMaterial(Material &mat)
{
mMaterial = &mat;
AssertFatal(dynamic_cast<CustomMaterial*>(mMaterial), "Incompatible Material type!");
mCustomMaterial = static_cast<CustomMaterial*>(mMaterial);
mHasSetStageData = false;
mHasGlow = false;
mHasAccumulation = false;
mMaxStages = 0;
mMaxTex = 0;
}
ProcessedCustomMaterial::~ProcessedCustomMaterial()
{
}
void ProcessedCustomMaterial::_setStageData()
{
// Only do this once
if ( mHasSetStageData )
return;
mHasSetStageData = true;
ShaderRenderPassData* rpd = _getRPD(0);
mConditionerMacros.clear();
// Loop through all the possible textures, set the right flags, and load them if needed
for(U32 i=0; i<CustomMaterial::MAX_TEX_PER_PASS; i++ )
{
rpd->mTexType[i] = Material::NoTexture; // Set none as the default in case none of the cases below catch it.
String filename = mCustomMaterial->mTexFilename[i];
if(filename.isEmpty())
continue;
if(filename.equal(String("$dynamiclight"), String::NoCase))
{
rpd->mTexType[i] = Material::DynamicLight;
rpd->mSamplerNames[i] = mCustomMaterial->mSamplerNames[i];
mMaxTex = i+1;
continue;
}
if(filename.equal(String("$dynamiclightmask"), String::NoCase))
{
rpd->mTexType[i] = Material::DynamicLightMask;
rpd->mSamplerNames[i] = mCustomMaterial->mSamplerNames[i];
mMaxTex = i+1;
continue;
}
if(filename.equal(String("$lightmap"), String::NoCase))
{
rpd->mTexType[i] = Material::Lightmap;
rpd->mSamplerNames[i] = mCustomMaterial->mSamplerNames[i];
mMaxTex = i+1;
continue;
}
if(filename.equal(String("$cubemap"), String::NoCase))
{
if( mCustomMaterial->mCubemapData )
{
rpd->mTexType[i] = Material::Cube;
rpd->mSamplerNames[i] = mCustomMaterial->mSamplerNames[i];
mMaxTex = i+1;
}
else
{
mCustomMaterial->logError( "Could not find CubemapData - %s", mCustomMaterial->mCubemapName.c_str());
}
continue;
}
if(filename.equal(String("$dynamicCubemap"), String::NoCase))
{
rpd->mTexType[i] = Material::SGCube;
rpd->mSamplerNames[i] = mCustomMaterial->mSamplerNames[i];
mMaxTex = i+1;
continue;
}
if(filename.equal(String("$backbuff"), String::NoCase))
{
rpd->mTexType[i] = Material::BackBuff;
rpd->mSamplerNames[i] = mCustomMaterial->mSamplerNames[i];
mMaxTex = i+1;
continue;
}
if(filename.equal(String("$reflectbuff"), String::NoCase))
{
rpd->mTexType[i] = Material::ReflectBuff;
rpd->mSamplerNames[i] = mCustomMaterial->mSamplerNames[i];
mMaxTex = i+1;
continue;
}
if(filename.equal(String("$miscbuff"), String::NoCase))
{
rpd->mTexType[i] = Material::Misc;
rpd->mSamplerNames[i] = mCustomMaterial->mSamplerNames[i];
mMaxTex = i+1;
continue;
}
// Check for a RenderTexTargetBin assignment
if (filename.substr( 0, 1 ).equal("#"))
{
String texTargetBufferName = filename.substr(1, filename.length() - 1);
NamedTexTarget *texTarget = NamedTexTarget::find( texTargetBufferName );
rpd->mTexSlot[i].texTarget = texTarget;
// Get the conditioner macros.
if ( texTarget )
texTarget->getShaderMacros( &mConditionerMacros );
rpd->mTexType[i] = Material::TexTarget;
rpd->mSamplerNames[i] = mCustomMaterial->mSamplerNames[i];
mMaxTex = i+1;
continue;
}
rpd->mTexSlot[i].texObject = _createTexture( filename, &GFXDefaultStaticDiffuseProfile );
if ( !rpd->mTexSlot[i].texObject )
{
mMaterial->logError("Failed to load texture %s", _getTexturePath(filename).c_str());
continue;
}
rpd->mTexType[i] = Material::Standard;
rpd->mSamplerNames[i] = mCustomMaterial->mSamplerNames[i];
mMaxTex = i+1;
}
// We only get one cubemap
if( mCustomMaterial->mCubemapData )
{
mCustomMaterial->mCubemapData->createMap();
rpd->mCubeMap = mMaterial->mCubemapData->mCubemap; // BTRTODO ?
if ( !rpd->mCubeMap )
mMaterial->logError("Failed to load cubemap");
}
// If this has a output target defined, it may be writing
// to a tex target bin with a conditioner, so search for
// one and add its macros.
if ( mCustomMaterial->mOutputTarget.isNotEmpty() )
{
NamedTexTarget *texTarget = NamedTexTarget::find( mCustomMaterial->mOutputTarget );
if ( texTarget )
texTarget->getShaderMacros( &mConditionerMacros );
}
// Copy the glow state over.
mHasGlow = mCustomMaterial->mGlow[0];
}
bool ProcessedCustomMaterial::init( const FeatureSet &features,
const GFXVertexFormat *vertexFormat,
const MatFeaturesDelegate &featuresDelegate )
{
// If we don't have a shader data... we have nothing to do.
if ( !mCustomMaterial->mShaderData )
return true;
// Custom materials only do one pass at the moment... so
// add one for the stage data to fill in.
ShaderRenderPassData *rpd = new ShaderRenderPassData();
mPasses.push_back( rpd );
_setStageData();
_initPassStateBlocks();
mStateHint.clear();
// Note: We don't use the vertex format in a custom
// material at all right now.
//
// Maybe we can add some required semantics and
// validate that the format fits the shader?
// Build a composite list of shader macros from
// the conditioner and the user defined lists.
Vector<GFXShaderMacro> macros;
macros.merge( mConditionerMacros );
macros.merge( mUserMacros );
// Ask the shader data to give us a shader instance.
rpd->shader = mCustomMaterial->mShaderData->getShader( macros );
if ( !rpd->shader )
{
delete rpd;
mPasses.clear();
return false;
}
rpd->shaderHandles.init( rpd->shader, mCustomMaterial );
_initMaterialParameters();
mDefaultParameters = allocMaterialParameters();
setMaterialParameters( mDefaultParameters, 0 );
mStateHint.init( this );
for(int i = 0; i < mMaxTex; i++)
{
ShaderConstHandles *handles = _getShaderConstHandles( mPasses.size()-1 );
AssertFatal(handles,"");
if(rpd->mSamplerNames[i].isEmpty())
continue;
String samplerName = rpd->mSamplerNames[i].startsWith("$") ? rpd->mSamplerNames[i] : String("$") + rpd->mSamplerNames[i];
GFXShaderConstHandle *handle = rpd->shader->getShaderConstHandle( samplerName );
AssertFatal(handle,"");
handles->mTexHandlesSC[i] = handle;
}
return true;
}
void ProcessedCustomMaterial::_initPassStateBlock( RenderPassData *rpd, GFXStateBlockDesc &result )
{
Parent::_initPassStateBlock( rpd, result );
if (mCustomMaterial->getStateBlockData())
result.addDesc(mCustomMaterial->getStateBlockData()->getState());
}
void ProcessedCustomMaterial::_initPassStateBlocks()
{
AssertFatal(mHasSetStageData, "State data must be set before initializing state block!");
ShaderRenderPassData* rpd = _getRPD(0);
_initRenderStateStateBlocks( rpd );
}
bool ProcessedCustomMaterial::_hasCubemap(U32 pass)
{
// If the material doesn't have a cubemap, we don't
if( mMaterial->mCubemapData ) return true;
else return false;
}
bool ProcessedCustomMaterial::setupPass( SceneRenderState *state, const SceneData& sgData, U32 pass )
{
PROFILE_SCOPE( ProcessedCustomMaterial_SetupPass );
// Make sure we have a pass.
if ( pass >= mPasses.size() )
return false;
ShaderRenderPassData* rpd = _getRPD( pass );
U32 currState = _getRenderStateIndex( state, sgData, pass );
GFX->setStateBlock(rpd->mRenderStates[currState]);
// activate shader
if ( rpd->shader )
GFX->setShader( rpd->shader );
else
GFX->setupGenericShaders();
// Set our textures
setTextureStages( state, sgData, pass );
GFXShaderConstBuffer* shaderConsts = _getShaderConstBuffer(pass);
GFX->setShaderConstBuffer(shaderConsts);
// Set our shader constants.
_setTextureTransforms(pass);
_setShaderConstants(state, sgData, pass);
LightManager* lm = state ? LIGHTMGR : NULL;
if (lm)
lm->setLightInfo(this, NULL, sgData, state, pass, shaderConsts);
shaderConsts->setSafe(rpd->shaderHandles.mAccumTimeSC, MATMGR->getTotalTime());
return true;
}
void ProcessedCustomMaterial::setTextureStages( SceneRenderState *state, const SceneData &sgData, U32 pass )
{
LightManager* lm = state ? LIGHTMGR : NULL;
ShaderRenderPassData* rpd = _getRPD(pass);
ShaderConstHandles* handles = _getShaderConstHandles(pass);
GFXShaderConstBuffer* shaderConsts = _getShaderConstBuffer(pass);
const NamedTexTarget *texTarget;
GFXTextureObject *texObject;
for( U32 i=0; i<mMaxTex; i++ )
{
U32 currTexFlag = rpd->mTexType[i];
if ( !lm || !lm->setTextureStage(sgData, currTexFlag, i, shaderConsts, handles ) )
{
GFXShaderConstHandle* handle = handles->mTexHandlesSC[i];
if ( !handle->isValid() )
continue;
S32 samplerRegister = handle->getSamplerRegister();
switch( currTexFlag )
{
case 0:
default:
break;
case Material::Mask:
case Material::Standard:
case Material::Bump:
case Material::Detail:
{
GFX->setTexture( samplerRegister, rpd->mTexSlot[i].texObject );
break;
}
case Material::Lightmap:
{
GFX->setTexture( samplerRegister, sgData.lightmap );
break;
}
case Material::Cube:
{
GFX->setCubeTexture( samplerRegister, rpd->mCubeMap );
break;
}
case Material::SGCube:
{
GFX->setCubeTexture( samplerRegister, sgData.cubemap );
break;
}
case Material::BackBuff:
{
GFX->setTexture( samplerRegister, sgData.backBuffTex );
//if ( sgData.reflectTex )
// GFX->setTexture( samplerRegister, sgData.reflectTex );
//else
//{
// GFXTextureObject *refractTex = REFLECTMGR->getRefractTex( true );
// GFX->setTexture( samplerRegister, refractTex );
//}
break;
}
case Material::ReflectBuff:
{
GFX->setTexture( samplerRegister, sgData.reflectTex );
break;
}
case Material::Misc:
{
GFX->setTexture( samplerRegister, sgData.miscTex );
break;
}
case Material::TexTarget:
{
texTarget = rpd->mTexSlot[i].texTarget;
if ( !texTarget )
{
GFX->setTexture( samplerRegister, NULL );
break;
}
texObject = texTarget->getTexture();
// If no texture is available then map the default 2x2
// black texture to it. This at least will ensure that
// we get consistant behavior across GPUs and platforms.
if ( !texObject )
texObject = GFXTexHandle::ZERO;
if ( handles->mRTParamsSC[i]->isValid() && texObject )
{
const Point3I &targetSz = texObject->getSize();
const RectI &targetVp = texTarget->getViewport();
Point4F rtParams;
ScreenSpace::RenderTargetParameters(targetSz, targetVp, rtParams);
shaderConsts->set(handles->mRTParamsSC[i], rtParams);
}
GFX->setTexture( samplerRegister, texObject );
break;
}
}
}
}
}
template <typename T>
void ProcessedCustomMaterial::setMaterialParameter(MaterialParameters* param,
MaterialParameterHandle* handle,
const String& value)
{
T typedValue;
if (PropertyInfo::default_scan(value, typedValue))
{
param->set(handle, typedValue);
} else {
Con::errorf("Error setting %s, parse error: %s", handle->getName().c_str(), value.c_str());
}
}
void ProcessedCustomMaterial::setMatrixParameter(MaterialParameters* param,
MaterialParameterHandle* handle,
const String& value, GFXShaderConstType matrixType)
{
MatrixF result(true);
F32* m = result;
switch (matrixType)
{
case GFXSCT_Float2x2 :
dSscanf(value.c_str(),"%g %g %g %g",
m[result.idx(0,0)], m[result.idx(0,1)],
m[result.idx(1,0)], m[result.idx(1,1)]);
break;
case GFXSCT_Float3x3 :
dSscanf(value.c_str(),"%g %g %g %g %g %g %g %g %g",
m[result.idx(0,0)], m[result.idx(0,1)], m[result.idx(0,2)],
m[result.idx(1,0)], m[result.idx(1,1)], m[result.idx(1,2)],
m[result.idx(2,0)], m[result.idx(2,1)], m[result.idx(2,2)]);
break;
default:
AssertFatal(false, "Invalid type!");
break;
}
}
// BTRTODO: Support arrays!?
MaterialParameters* ProcessedCustomMaterial::allocMaterialParameters()
{
MaterialParameters* ret = Parent::allocMaterialParameters();
// See if any of the dynamic fields match up with shader constants we have.
SimFieldDictionary* fields = mMaterial->getFieldDictionary();
if (!fields || fields->getNumFields() == 0)
return ret;
const Vector<GFXShaderConstDesc>& consts = ret->getShaderConstDesc();
for (U32 i = 0; i < consts.size(); i++)
{
// strip the dollar sign from the front.
String stripped(consts[i].name);
stripped.erase(0, 1);
SimFieldDictionary::Entry* field = fields->findDynamicField(stripped);
if (field)
{
MaterialParameterHandle* handle = getMaterialParameterHandle(consts[i].name);
switch (consts[i].constType)
{
case GFXSCT_Float :
setMaterialParameter<F32>(ret, handle, field->value);
break;
case GFXSCT_Float2:
setMaterialParameter<Point2F>(ret, handle, field->value);
break;
case GFXSCT_Float3:
setMaterialParameter<Point3F>(ret, handle, field->value);
break;
case GFXSCT_Float4:
setMaterialParameter<Point4F>(ret, handle, field->value);
break;
case GFXSCT_Float2x2:
case GFXSCT_Float3x3:
setMatrixParameter(ret, handle, field->value, consts[i].constType);
break;
case GFXSCT_Float4x4:
setMaterialParameter<MatrixF>(ret, handle, field->value);
break;
case GFXSCT_Int:
setMaterialParameter<S32>(ret, handle, field->value);
break;
case GFXSCT_Int2:
setMaterialParameter<Point2I>(ret, handle, field->value);
break;
case GFXSCT_Int3:
setMaterialParameter<Point3I>(ret, handle, field->value);
break;
case GFXSCT_Int4:
setMaterialParameter<Point4I>(ret, handle, field->value);
break;
// Do we want to ignore these?
case GFXSCT_Sampler:
case GFXSCT_SamplerCube:
default:
break;
}
}
}
return ret;
} | 33.027344 | 127 | 0.601478 | [
"vector"
] |
d5493b661bc32b1c9c3e3abd485f1e443ebe5874 | 21,754 | cpp | C++ | src/coreclr/vm/callhelpers.cpp | mklement0/runtime | 664e232c51a5223ca41a6db5a02e824815c483ec | [
"MIT"
] | 28 | 2021-02-08T01:22:52.000Z | 2022-01-19T08:01:48.000Z | src/coreclr/vm/callhelpers.cpp | mklement0/runtime | 664e232c51a5223ca41a6db5a02e824815c483ec | [
"MIT"
] | 2 | 2021-04-05T17:42:27.000Z | 2021-04-06T02:59:53.000Z | src/coreclr/vm/callhelpers.cpp | mklement0/runtime | 664e232c51a5223ca41a6db5a02e824815c483ec | [
"MIT"
] | 1 | 2020-12-11T07:16:37.000Z | 2020-12-11T07:16:37.000Z | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*
* CallHelpers.CPP: helpers to call managed code
*
*/
#include "common.h"
#include "dbginterface.h"
// To include declaration of "AppDomainTransitionExceptionFilter"
#include "excep.h"
// To include declaration of "SignatureNative"
#include "runtimehandles.h"
#include "invokeutil.h"
#include "argdestination.h"
#if defined(FEATURE_MULTICOREJIT) && defined(_DEBUG)
// Allow system module for Appx
void AssertMulticoreJitAllowedModule(PCODE pTarget)
{
MethodDesc* pMethod = Entry2MethodDesc(pTarget, NULL);
Module * pModule = pMethod->GetModule_NoLogging();
_ASSERTE(pModule->IsSystem());
}
#endif
// For X86, INSTALL_COMPLUS_EXCEPTION_HANDLER grants us sufficient protection to call into
// managed code.
//
// But on 64-bit, the personality routine will not pop frames or trackers as exceptions unwind
// out of managed code. Instead, we rely on explicit cleanup like CLRException::HandlerState::CleanupTry
// or UMThunkUnwindFrameChainHandler.
//
// So most callers should call through CallDescrWorkerWithHandler (or a wrapper like MethodDesc::Call)
// and get the platform-appropriate exception handling. A few places try to optimize by calling direct
// to managed methods (see ArrayInitializeWorker or FastCallFinalize). This sort of thing is
// dangerous. You have to worry about marking yourself as a legal managed caller and you have to
// worry about how exceptions will be handled on a FEATURE_EH_FUNCLETS plan. It is generally only suitable
// for X86.
//*******************************************************************************
void CallDescrWorkerWithHandler(
CallDescrData * pCallDescrData,
BOOL fCriticalCall)
{
#if defined(FEATURE_MULTICOREJIT) && defined(_DEBUG)
// For multicore JITting, background thread should not call managed code, except when calling system code (e.g. throwing managed exception)
if (GetThread()->HasThreadStateNC(Thread::TSNC_CallingManagedCodeDisabled))
{
AssertMulticoreJitAllowedModule(pCallDescrData->pTarget);
}
#endif
#if defined(HOST_OSX) && defined(HOST_ARM64)
auto jitWriteEnableHolder = PAL_JITWriteEnable(false);
#endif // defined(HOST_OSX) && defined(HOST_ARM64)
BEGIN_CALL_TO_MANAGEDEX(fCriticalCall ? EEToManagedCriticalCall : EEToManagedDefault);
CallDescrWorker(pCallDescrData);
END_CALL_TO_MANAGED();
}
#if !defined(HOST_64BIT) && defined(_DEBUG)
//*******************************************************************************
// assembly code, in i386/asmhelpers.asm
void CallDescrWorker(CallDescrData * pCallDescrData)
{
//
// This function must not have a contract ... it's caller has pushed an FS:0 frame (COMPlusFrameHandler) that must
// be the first handler on the stack. The contract causes, at a minimum, a C++ exception handler to be pushed to
// handle the destruction of the contract object. If there is an exception in the managed code called from here,
// and that exception is handled in that same block of managed code, then the COMPlusFrameHandler will actually
// unwind the C++ handler before branching to the catch clause in managed code. That essentially causes an
// out-of-order destruction of the contract object, resulting in very odd crashes later.
//
#if 0
CONTRACTL {
THROWS;
GC_TRIGGERS;
} CONTRACTL_END;
#endif // 0
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
_ASSERTE(!NingenEnabled() && "You cannot invoke managed code inside the ngen compilation process.");
TRIGGERSGC_NOSTOMP(); // Can't stomp object refs because they are args to the function
// Save a copy of dangerousObjRefs in table.
Thread* curThread;
DWORD_PTR ObjRefTable[OBJREF_TABSIZE];
curThread = GetThread();
_ASSERTE(curThread != NULL);
static_assert_no_msg(sizeof(curThread->dangerousObjRefs) == sizeof(ObjRefTable));
memcpy(ObjRefTable, curThread->dangerousObjRefs, sizeof(ObjRefTable));
#ifndef FEATURE_INTERPRETER
// When the interpreter is used, this mayb be called from preemptive code.
_ASSERTE(curThread->PreemptiveGCDisabled()); // Jitted code expects to be in cooperative mode
#endif
// If the current thread owns spinlock or unbreakable lock, it cannot call managed code.
_ASSERTE(!curThread->HasUnbreakableLock() &&
(curThread->m_StateNC & Thread::TSNC_OwnsSpinLock) == 0);
#ifdef TARGET_ARM
_ASSERTE(IsThumbCode(pCallDescrData->pTarget));
#endif
CallDescrWorkerInternal(pCallDescrData);
// Restore dangerousObjRefs when we return back to EE after call
memcpy(curThread->dangerousObjRefs, ObjRefTable, sizeof(ObjRefTable));
TRIGGERSGC();
ENABLESTRESSHEAP();
}
#endif // !defined(HOST_64BIT) && defined(_DEBUG)
void DispatchCallDebuggerWrapper(
CallDescrData * pCallDescrData,
BOOL fCriticalCall
)
{
// Use static contracts b/c we have SEH.
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
struct Param : NotifyOfCHFFilterWrapperParam
{
CallDescrData * pCallDescrData;
BOOL fCriticalCall;
} param;
param.pFrame = NULL;
param.pCallDescrData = pCallDescrData;
param.fCriticalCall = fCriticalCall;
PAL_TRY(Param *, pParam, ¶m)
{
CallDescrWorkerWithHandler(
pParam->pCallDescrData,
pParam->fCriticalCall);
}
PAL_EXCEPT_FILTER(AppDomainTransitionExceptionFilter)
{
// Should never reach here b/c handler should always continue search.
_ASSERTE(!"Unreachable");
}
PAL_ENDTRY
}
// Helper for VM->managed calls with simple signatures.
void * DispatchCallSimple(
SIZE_T *pSrc,
DWORD numStackSlotsToCopy,
PCODE pTargetAddress,
DWORD dwDispatchCallSimpleFlags)
{
CONTRACTL
{
GC_TRIGGERS;
THROWS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
#ifdef DEBUGGING_SUPPORTED
if (CORDebuggerTraceCall())
g_pDebugInterface->TraceCall((const BYTE *)pTargetAddress);
#endif // DEBUGGING_SUPPORTED
CallDescrData callDescrData;
#ifdef CALLDESCR_ARGREGS
callDescrData.pSrc = pSrc + NUM_ARGUMENT_REGISTERS;
callDescrData.numStackSlots = numStackSlotsToCopy;
callDescrData.pArgumentRegisters = (ArgumentRegisters *)pSrc;
#else
callDescrData.pSrc = pSrc;
callDescrData.numStackSlots = numStackSlotsToCopy;
#endif
#ifdef CALLDESCR_RETBUFFARGREG
UINT64 retBuffArgPlaceholder = 0;
callDescrData.pRetBuffArg = &retBuffArgPlaceholder;
#endif
#ifdef CALLDESCR_FPARGREGS
callDescrData.pFloatArgumentRegisters = NULL;
#endif
#ifdef CALLDESCR_REGTYPEMAP
callDescrData.dwRegTypeMap = 0;
#endif
callDescrData.fpReturnSize = 0;
callDescrData.pTarget = pTargetAddress;
if ((dwDispatchCallSimpleFlags & DispatchCallSimple_CatchHandlerFoundNotification) != 0)
{
DispatchCallDebuggerWrapper(
&callDescrData,
dwDispatchCallSimpleFlags & DispatchCallSimple_CriticalCall);
}
else
{
CallDescrWorkerWithHandler(&callDescrData, dwDispatchCallSimpleFlags & DispatchCallSimple_CriticalCall);
}
return *(void **)(&callDescrData.returnValue);
}
#ifdef CALLDESCR_REGTYPEMAP
//*******************************************************************************
void FillInRegTypeMap(int argOffset, CorElementType typ, BYTE * pMap)
{
CONTRACTL
{
WRAPPER(THROWS);
WRAPPER(GC_TRIGGERS);
MODE_ANY;
PRECONDITION(CheckPointer(pMap, NULL_NOT_OK));
}
CONTRACTL_END;
int regArgNum = TransitionBlock::GetArgumentIndexFromOffset(argOffset);
// Create a map of the first 8 argument types. This is used in
// CallDescrWorkerInternal to load args into general registers or
// floating point registers.
//
// we put these in order from the LSB to the MSB so that we can keep
// the map in a register and just examine the low byte and then shift
// right for each arg.
if (regArgNum < NUM_ARGUMENT_REGISTERS)
{
pMap[regArgNum] = typ;
}
}
#endif // CALLDESCR_REGTYPEMAP
//*******************************************************************************
#ifdef FEATURE_INTERPRETER
void MethodDescCallSite::CallTargetWorker(const ARG_SLOT *pArguments, ARG_SLOT *pReturnValue, int cbReturnValue, bool transitionToPreemptive)
#else
void MethodDescCallSite::CallTargetWorker(const ARG_SLOT *pArguments, ARG_SLOT *pReturnValue, int cbReturnValue)
#endif
{
//
// WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
//
// This method needs to have a GC_TRIGGERS contract because it
// calls managed code. However, IT MAY NOT TRIGGER A GC ITSELF
// because the argument array is not protected and may contain gc
// refs.
//
// WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
//
CONTRACTL
{
THROWS;
GC_TRIGGERS;
INJECT_FAULT(COMPlusThrowOM(););
MODE_COOPERATIVE;
PRECONDITION(GetAppDomain()->CheckCanExecuteManagedCode(m_pMD));
PRECONDITION(m_pMD->CheckActivated()); // EnsureActive will trigger, so we must already be activated
}
CONTRACTL_END;
_ASSERTE(!NingenEnabled() && "You cannot invoke managed code inside the ngen compilation process.");
// If we're invoking an CoreLib method, lift the restriction on type load limits. Calls into CoreLib are
// typically calls into specific and controlled helper methods for security checks and other linktime tasks.
//
// @todo: In an ideal world, we would require each of those sites to do the override rather than disabling
// the assert broadly here. However, by limiting the override to CoreLib methods, we should still be able
// to effectively enforce the more general rule about loader recursion.
MAYBE_OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED, m_pMD->GetModule()->IsSystem());
LPBYTE pTransitionBlock;
UINT nStackBytes;
UINT fpReturnSize;
#ifdef CALLDESCR_REGTYPEMAP
UINT64 dwRegTypeMap;
#endif
#ifdef CALLDESCR_FPARGREGS
FloatArgumentRegisters *pFloatArgumentRegisters = NULL;
#endif
void* pvRetBuff = NULL;
{
//
// the incoming argument array is not gc-protected, so we
// may not trigger a GC before we actually call managed code
//
GCX_FORBID();
// Record this call if required
g_IBCLogger.LogMethodDescAccess(m_pMD);
//
// All types must already be loaded. This macro also sets up a FAULT_FORBID region which is
// also required for critical calls since we cannot inject any failure points between the
// caller of MethodDesc::CallDescr and the actual transition to managed code.
//
ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
#ifdef FEATURE_INTERPRETER
_ASSERTE(isCallConv(m_methodSig.GetCallingConvention(), IMAGE_CEE_CS_CALLCONV_DEFAULT)
|| isCallConv(m_methodSig.GetCallingConvention(), CorCallingConvention(IMAGE_CEE_CS_CALLCONV_C))
|| isCallConv(m_methodSig.GetCallingConvention(), CorCallingConvention(IMAGE_CEE_CS_CALLCONV_VARARG))
|| isCallConv(m_methodSig.GetCallingConvention(), CorCallingConvention(IMAGE_CEE_CS_CALLCONV_NATIVEVARARG))
|| isCallConv(m_methodSig.GetCallingConvention(), CorCallingConvention(IMAGE_CEE_CS_CALLCONV_STDCALL)));
#else
_ASSERTE(isCallConv(m_methodSig.GetCallingConvention(), IMAGE_CEE_CS_CALLCONV_DEFAULT));
_ASSERTE(!(m_methodSig.GetCallingConventionInfo() & CORINFO_CALLCONV_PARAMTYPE));
#endif
#ifdef DEBUGGING_SUPPORTED
if (CORDebuggerTraceCall())
{
g_pDebugInterface->TraceCall((const BYTE *)m_pCallTarget);
}
#endif // DEBUGGING_SUPPORTED
#ifdef _DEBUG
{
#ifdef UNIX_AMD64_ABI
// Validate that the return value is not too big for the buffer passed
if (m_pMD->GetMethodTable()->IsRegPassedStruct())
{
TypeHandle thReturnValueType;
if (m_methodSig.GetReturnTypeNormalized(&thReturnValueType) == ELEMENT_TYPE_VALUETYPE)
{
_ASSERTE((DWORD)cbReturnValue >= thReturnValueType.GetSize());
}
}
#endif // UNIX_AMD64_ABI
// The metasig should be reset
_ASSERTE(m_methodSig.GetArgNum() == 0);
// Check to see that any value type args have been loaded and restored.
// This is because we may be calling a FramedMethodFrame which will use the sig
// to trace the args, but if any are unloaded we will be stuck if a GC occurs.
_ASSERTE(m_pMD->IsRestored_NoLogging());
CorElementType argType;
while ((argType = m_methodSig.NextArg()) != ELEMENT_TYPE_END)
{
if (argType == ELEMENT_TYPE_VALUETYPE)
{
TypeHandle th = m_methodSig.GetLastTypeHandleThrowing(ClassLoader::DontLoadTypes);
CONSISTENCY_CHECK(th.CheckFullyLoaded());
CONSISTENCY_CHECK(th.IsRestored_NoLogging());
}
}
m_methodSig.Reset();
}
#endif // _DEBUG
DWORD arg = 0;
nStackBytes = m_argIt.SizeOfFrameArgumentArray();
// Create a fake FramedMethodFrame on the stack.
// Note that SizeOfFrameArgumentArray does overflow checks with sufficient margin to prevent overflows here
DWORD dwAllocaSize = TransitionBlock::GetNegSpaceSize() + sizeof(TransitionBlock) + nStackBytes;
LPBYTE pAlloc = (LPBYTE)_alloca(dwAllocaSize);
pTransitionBlock = pAlloc + TransitionBlock::GetNegSpaceSize();
#ifdef CALLDESCR_REGTYPEMAP
dwRegTypeMap = 0;
BYTE* pMap = (BYTE*)&dwRegTypeMap;
#endif // CALLDESCR_REGTYPEMAP
if (m_argIt.HasThis())
{
*((LPVOID*)(pTransitionBlock + m_argIt.GetThisOffset())) = ArgSlotToPtr(pArguments[arg++]);
}
if (m_argIt.HasRetBuffArg())
{
*((LPVOID*)(pTransitionBlock + m_argIt.GetRetBuffArgOffset())) = ArgSlotToPtr(pArguments[arg++]);
}
#ifdef FEATURE_HFA
#ifdef FEATURE_INTERPRETER
// Something is necessary for HFA's, but what's below (in the FEATURE_INTERPRETER ifdef)
// doesn't seem to do the proper test. It fires,
// incorrectly, for a one-word struct that *doesn't* have a ret buff. So we'll try this, instead:
// We're here because it doesn't have a ret buff. If it would, except that the struct being returned
// is an HFA, *then* assume the invoker made this slot a ret buff pointer.
// It's an HFA if the return type is a struct, but it has a non-zero FP return size.
// (If it were an HFA, but had a ret buff because it was varargs, then we wouldn't be here.
// Also this test won't work for float enums.
else if (m_methodSig.GetReturnType() == ELEMENT_TYPE_VALUETYPE
&& m_argIt.GetFPReturnSize() > 0)
#else // FEATURE_INTERPRETER
else if (ELEMENT_TYPE_VALUETYPE == m_methodSig.GetReturnTypeNormalized())
#endif // FEATURE_INTERPRETER
{
pvRetBuff = ArgSlotToPtr(pArguments[arg++]);
}
#endif // FEATURE_HFA
#ifdef FEATURE_INTERPRETER
if (m_argIt.IsVarArg())
{
*((LPVOID*)(pTransitionBlock + m_argIt.GetVASigCookieOffset())) = ArgSlotToPtr(pArguments[arg++]);
}
if (m_argIt.HasParamType())
{
*((LPVOID*)(pTransitionBlock + m_argIt.GetParamTypeArgOffset())) = ArgSlotToPtr(pArguments[arg++]);
}
#endif
int ofs;
for (; TransitionBlock::InvalidOffset != (ofs = m_argIt.GetNextOffset()); arg++)
{
#ifdef CALLDESCR_REGTYPEMAP
FillInRegTypeMap(ofs, m_argIt.GetArgType(), pMap);
#endif
#ifdef CALLDESCR_FPARGREGS
// Under CALLDESCR_FPARGREGS -ve offsets indicate arguments in floating point registers. If we
// have at least one such argument we point the call worker at the floating point area of the
// frame (we leave it null otherwise since the worker can perform a useful optimization if it
// knows no floating point registers need to be set up).
if (TransitionBlock::HasFloatRegister(ofs, m_argIt.GetArgLocDescForStructInRegs()) &&
(pFloatArgumentRegisters == NULL))
{
pFloatArgumentRegisters = (FloatArgumentRegisters*)(pTransitionBlock +
TransitionBlock::GetOffsetOfFloatArgumentRegisters());
}
#endif
ArgDestination argDest(pTransitionBlock, ofs, m_argIt.GetArgLocDescForStructInRegs());
UINT32 stackSize = m_argIt.GetArgSize();
// We need to pass in a pointer, but be careful of the ARG_SLOT calling convention. We might already have a pointer in the ARG_SLOT.
PVOID pSrc = stackSize > sizeof(ARG_SLOT) ? (LPVOID)ArgSlotToPtr(pArguments[arg]) : (LPVOID)ArgSlotEndianessFixup((ARG_SLOT*)&pArguments[arg], stackSize);
#if defined(UNIX_AMD64_ABI)
if (argDest.IsStructPassedInRegs())
{
TypeHandle th;
m_argIt.GetArgType(&th);
argDest.CopyStructToRegisters(pSrc, th.AsMethodTable()->GetNumInstanceFieldBytes(), 0);
}
else
#endif // UNIX_AMD64_ABI
{
PVOID pDest = argDest.GetDestinationAddress();
switch (stackSize)
{
case 1:
case 2:
case 4:
*((INT32*)pDest) = (INT32)pArguments[arg];
break;
case 8:
*((INT64*)pDest) = pArguments[arg];
break;
default:
// The ARG_SLOT contains a pointer to the value-type
#ifdef ENREGISTERED_PARAMTYPE_MAXSIZE
if (m_argIt.IsArgPassedByRef())
{
*(PVOID*)pDest = pSrc;
}
else
#endif // ENREGISTERED_PARAMTYPE_MAXSIZE
if (stackSize > sizeof(ARG_SLOT))
{
CopyMemory(pDest, ArgSlotToPtr(pArguments[arg]), stackSize);
}
else
{
CopyMemory(pDest, (LPVOID) (&pArguments[arg]), stackSize);
}
break;
}
}
}
fpReturnSize = m_argIt.GetFPReturnSize();
} // END GCX_FORBID & ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE
CallDescrData callDescrData;
callDescrData.pSrc = pTransitionBlock + sizeof(TransitionBlock);
_ASSERTE((nStackBytes % TARGET_POINTER_SIZE) == 0);
callDescrData.numStackSlots = nStackBytes / TARGET_POINTER_SIZE;
#ifdef CALLDESCR_ARGREGS
callDescrData.pArgumentRegisters = (ArgumentRegisters*)(pTransitionBlock + TransitionBlock::GetOffsetOfArgumentRegisters());
#endif
#ifdef CALLDESCR_RETBUFFARGREG
callDescrData.pRetBuffArg = (UINT64*)(pTransitionBlock + TransitionBlock::GetOffsetOfRetBuffArgReg());
#endif
#ifdef CALLDESCR_FPARGREGS
callDescrData.pFloatArgumentRegisters = pFloatArgumentRegisters;
#endif
#ifdef CALLDESCR_REGTYPEMAP
callDescrData.dwRegTypeMap = dwRegTypeMap;
#endif
callDescrData.fpReturnSize = fpReturnSize;
callDescrData.pTarget = m_pCallTarget;
#ifdef FEATURE_INTERPRETER
if (transitionToPreemptive)
{
GCPreemp transitionIfILStub(transitionToPreemptive);
DWORD* pLastError = &GetThread()->m_dwLastErrorInterp;
CallDescrWorkerInternal(&callDescrData);
*pLastError = GetLastError();
}
else
#endif // FEATURE_INTERPRETER
{
CallDescrWorkerWithHandler(&callDescrData);
}
if (pvRetBuff != NULL)
{
memcpyNoGCRefs(pvRetBuff, &callDescrData.returnValue, sizeof(callDescrData.returnValue));
}
if (pReturnValue != NULL)
{
_ASSERTE((DWORD)cbReturnValue <= sizeof(callDescrData.returnValue));
memcpyNoGCRefs(pReturnValue, &callDescrData.returnValue, cbReturnValue);
#if !defined(HOST_64BIT) && BIGENDIAN
{
GCX_FORBID();
if (!m_methodSig.Is64BitReturn())
{
pReturnValue[0] >>= 32;
}
}
#endif // !defined(HOST_64BIT) && BIGENDIAN
}
}
void CallDefaultConstructor(OBJECTREF ref)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
MethodTable *pMT = ref->GetMethodTable();
PREFIX_ASSUME(pMT != NULL);
if (!pMT->HasDefaultConstructor())
{
SString ctorMethodName(SString::Utf8, COR_CTOR_METHOD_NAME);
COMPlusThrowNonLocalized(kMissingMethodException, ctorMethodName.GetUnicode());
}
GCPROTECT_BEGIN (ref);
MethodDesc *pMD = pMT->GetDefaultConstructor();
PREPARE_NONVIRTUAL_CALLSITE_USING_METHODDESC(pMD);
DECLARE_ARGHOLDER_ARRAY(CtorArgs, 1);
CtorArgs[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(ref);
// Call the ctor...
CATCH_HANDLER_FOUND_NOTIFICATION_CALLSITE;
CALL_MANAGED_METHOD_NORET(CtorArgs);
GCPROTECT_END ();
}
| 35.779605 | 166 | 0.658775 | [
"object"
] |
d54b325b9276e2e6cebe8dba1859c2887fe20c46 | 24,096 | cpp | C++ | groups/bsl/bslstl/bslstl_treenode.t.cpp | adambde/bde | a2efe118da642be42b25e81ca986a0fe56078305 | [
"Apache-2.0"
] | 26 | 2015-05-07T04:22:06.000Z | 2022-01-26T09:10:12.000Z | groups/bsl/bslstl/bslstl_treenode.t.cpp | adambde/bde | a2efe118da642be42b25e81ca986a0fe56078305 | [
"Apache-2.0"
] | 3 | 2015-05-07T21:06:36.000Z | 2015-08-28T20:02:18.000Z | groups/bsl/bslstl/bslstl_treenode.t.cpp | adambde/bde | a2efe118da642be42b25e81ca986a0fe56078305 | [
"Apache-2.0"
] | 12 | 2015-05-06T08:41:07.000Z | 2021-11-09T12:52:19.000Z | // bslstl_treenode.t.cpp -*-C++-*-
#include <bslstl_treenode.h>
#include <bslstl_allocatortraits.h>
#include <bslstl_allocator.h>
#include <bslstl_string.h>
#include <bslalg_rbtreeanchor.h>
#include <bslalg_rbtreenode.h>
#include <bslalg_rbtreeutil.h>
#include <bslma_allocator.h>
#include <bslma_testallocator.h>
#include <bslma_defaultallocatorguard.h>
#include <bsls_assert.h>
#include <bsls_bsltestutil.h>
#include <bsls_util.h>
#include <algorithm>
#include <functional>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
using namespace BloombergLP;
using namespace std;
using namespace bslstl;
//=============================================================================
// TEST PLAN
//-----------------------------------------------------------------------------
// Overview
// --------
//
// Global Concerns:
//: o Pointer/reference parameters are declared 'const'.
//: o No memory is ever allocated.
//: o Precondition violations are detected in appropriate build modes.
//-----------------------------------------------------------------------------
// MANIPULATORS
// [ 2] VALUE_TYPE& value();
// [ 2] const VALUE_TYPE& value() const;
// ----------------------------------------------------------------------------
// [ 1] BREATHING TEST
// [ 4] USAGE EXAMPLE
// [ 3] CONCERN: 'value' can be constructed with 'allocator_traits'.
//=============================================================================
// STANDARD BDE ASSERT TEST MACRO
//-----------------------------------------------------------------------------
// NOTE: THIS IS A LOW-LEVEL COMPONENT AND MAY NOT USE ANY C++ LIBRARY
// FUNCTIONS, INCLUDING IOSTREAMS.
static int testStatus = 0;
static void aSsErT(bool b, const char *s, int i) {
if (b) {
printf("Error " __FILE__ "(%d): %s (failed)\n", i, s);
if (testStatus >= 0 && testStatus <= 100) ++testStatus;
}
}
//=============================================================================
// STANDARD BDE TEST DRIVER MACROS
//-----------------------------------------------------------------------------
#define ASSERT BSLS_BSLTESTUTIL_ASSERT
#define LOOP_ASSERT BSLS_BSLTESTUTIL_LOOP_ASSERT
#define LOOP0_ASSERT BSLS_BSLTESTUTIL_LOOP0_ASSERT
#define LOOP1_ASSERT BSLS_BSLTESTUTIL_LOOP1_ASSERT
#define LOOP2_ASSERT BSLS_BSLTESTUTIL_LOOP2_ASSERT
#define LOOP3_ASSERT BSLS_BSLTESTUTIL_LOOP3_ASSERT
#define LOOP4_ASSERT BSLS_BSLTESTUTIL_LOOP4_ASSERT
#define LOOP5_ASSERT BSLS_BSLTESTUTIL_LOOP5_ASSERT
#define LOOP6_ASSERT BSLS_BSLTESTUTIL_LOOP6_ASSERT
#define ASSERTV BSLS_BSLTESTUTIL_ASSERTV
#define Q BSLS_BSLTESTUTIL_Q // Quote identifier literally.
#define P BSLS_BSLTESTUTIL_P // Print identifier and value.
#define P_ BSLS_BSLTESTUTIL_P_ // P(X) without '\n'.
#define T_ BSLS_BSLTESTUTIL_T_ // Print a tab (w/o newline).
#define L_ BSLS_BSLTESTUTIL_L_ // current Line number
// ============================================================================
// NEGATIVE-TEST MACRO ABBREVIATIONS
// ----------------------------------------------------------------------------
#define ASSERT_SAFE_PASS(EXPR) BSLS_ASSERTTEST_ASSERT_SAFE_PASS(EXPR)
#define ASSERT_SAFE_FAIL(EXPR) BSLS_ASSERTTEST_ASSERT_SAFE_FAIL(EXPR)
#define ASSERT_PASS(EXPR) BSLS_ASSERTTEST_ASSERT_PASS(EXPR)
#define ASSERT_FAIL(EXPR) BSLS_ASSERTTEST_ASSERT_FAIL(EXPR)
#define ASSERT_OPT_PASS(EXPR) BSLS_ASSERTTEST_ASSERT_OPT_PASS(EXPR)
#define ASSERT_OPT_FAIL(EXPR) BSLS_ASSERTTEST_ASSERT_OPT_FAIL(EXPR)
//=============================================================================
// GLOBAL HELPER FUNCTIONS FOR TESTING
//-----------------------------------------------------------------------------
typedef bslalg::RbTreeNode Node;
// Fundamental-type-specific print functions.
namespace bsl {
inline void debugprint(const bsl::string &s) {
printf("\"%s\"", s.c_str()); fflush(stdout);
}
} // close namespace bsl
//=============================================================================
// GLOBAL TYPEDEFS/CONSTANTS FOR TESTING
//-----------------------------------------------------------------------------
#ifdef BSLS_PLATFORM_CPU_32_BIT
#define SUFFICIENTLY_LONG_STRING "123456789012345678901234567890123"
#else // 64_BIT
#define SUFFICIENTLY_LONG_STRING "12345678901234567890123456789012" \
"123456789012345678901234567890123"
#endif
BSLMF_ASSERT(sizeof SUFFICIENTLY_LONG_STRING > sizeof(bsl::string));
class TestType1 {
static bool s_constructedFlag;
public:
TestType1() { s_constructedFlag = true; }
static bool isConstructed() { return s_constructedFlag; }
static void reset() { s_constructedFlag = false; }
};
bool TestType1::s_constructedFlag = false;
//=============================================================================
// USAGE EXAMPLE
//-----------------------------------------------------------------------------
///Usage
///-----
// In this section we show intended usage of this component.
//
///Example 1: Allocating and Deallocating 'TreeNode' Objects.
/// - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// In the following example we define a factory class for allocating and
// destroying 'TreeNode' objects.
//
// First, we define the interface for the class 'NodeFactory':
//..
template <class VALUE, class ALLOCATOR>
class NodeFactory {
//..
// The parameterized 'ALLOCATOR' is intended to allocate objects of the
// parameterized 'VALUE', so to use it to allocate objects of 'TreeNode<VALUE>'
// we must rebind it to the tree node type. Note that in general, we use
// 'allocator_traits' to perform actions using an allocator (including the
// rebind below):
//..
// PRIVATE TYPES
typedef typename bsl::allocator_traits<ALLOCATOR>::template
rebind_traits<TreeNode<VALUE> > AllocatorTraits;
typedef typename AllocatorTraits::allocator_type NodeAllocator;
// DATA
NodeAllocator d_allocator; // rebound tree-node allocator
// NOT IMPLEMENTED
NodeFactory(const NodeFactory&);
NodeFactory& operator=(const NodeFactory&);
public:
// CREATORS
NodeFactory(const ALLOCATOR& allocator);
// Create a tree node-factory that will use the specified
// 'allocator' to supply memory.
// MANIPULATORS
TreeNode<VALUE> *createNode(const VALUE& value);
// Create a new 'TreeNode' object holding the specified 'value'.
void deleteNode(bslalg::RbTreeNode *node);
// Destroy and deallocate the specified 'node'. The behavior is
// undefined unless 'node' is the address of a
// 'TreeNode<VALUE>' object.
};
//..
// Now, we implement the 'NodeFactory' type:
//..
template <class VALUE, class ALLOCATOR>
inline
NodeFactory<VALUE, ALLOCATOR>::NodeFactory(const ALLOCATOR& allocator)
: d_allocator(allocator)
{
}
//..
// We implement the 'createNode' function by using the rebound
// 'allocator_traits' for our allocator to in-place copy-construct the
// supplied 'value' into the 'value' data member of our 'result' node
// object. Note that 'TreeNode' is a POD-like type, without a constructor, so
// we do not need to call its constructor here:
//..
template <class VALUE, class ALLOCATOR>
inline
TreeNode<VALUE> *
NodeFactory<VALUE, ALLOCATOR>::createNode(const VALUE& value)
{
TreeNode<VALUE> *result = AllocatorTraits::allocate(d_allocator, 1);
AllocatorTraits::construct(d_allocator,
bsls::Util::addressOf(result->value()),
value);
return result;
}
//..
// Finally, we define the function 'deleteNode', for destroying 'TreeNode'
// objects. Again, we use the rebound 'allocator_traits' for our tree node
// type, this time to destroy the 'd_value' date member of node, and then to
// deallocate its footprint. Note that 'TreeNode' is a POD-like type,
// so we do not need to call its destructor here:
//..
template <class VALUE, class ALLOCATOR>
inline
void NodeFactory<VALUE, ALLOCATOR>::deleteNode(bslalg::RbTreeNode *node)
{
TreeNode<VALUE> *treeNode = static_cast<TreeNode<VALUE> *>(node);
AllocatorTraits::destroy(d_allocator,
bsls::Util::addressOf(treeNode->value()));
AllocatorTraits::deallocate(d_allocator, treeNode, 1);
}
//..
//
///Example 2: Creating a Simple Tree of 'TreeNode' Objects.
/// - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// In the following example we create a container-type 'Set' for
// holding a set of values of a parameterized 'VALUE'.
//
// First, we define a comparator for 'VALUE' of 'TreeNode<VALUE>' objects.
// This type is designed to be supplied to functions in 'bslalg::RbTreeUtil'.
// Note that, for simplicity, this type uses 'operator<' to compare values,
// rather than a client defined comparator type.
//..
template <class VALUE>
class Comparator {
public:
// CREATORS
Comparator() {}
// Create a node-value comparator.
// ACCESSORS
bool operator()(const VALUE& lhs,
const bslalg::RbTreeNode& rhs) const;
bool operator()(const bslalg::RbTreeNode& lhs,
const VALUE& rhs) const;
// Return 'true' if the specified 'lhs' is less than (ordered
// before) the specified 'rhs', and 'false' otherwise. The
// behavior is undefined unless the supplied 'bslalg::RbTreeNode'
// object is of the derived 'TreeNode<VALUE>' type.
};
//..
// Then, we implement the comparison methods of 'Comparator'. Note that the
// supplied 'RbTreeNode' objects must be 'static_cast' to
// 'TreeNode<VALUE>' to access their value:
//..
template <class VALUE>
inline
bool Comparator<VALUE>::operator()(const VALUE& lhs,
const bslalg::RbTreeNode& rhs) const
{
return lhs < static_cast<const TreeNode<VALUE>& >(rhs).value();
}
template <class VALUE>
inline
bool Comparator<VALUE>::operator()(const bslalg::RbTreeNode& lhs,
const VALUE& rhs) const
{
return static_cast<const TreeNode<VALUE>& >(lhs).value() < rhs;
}
//..
// Now, having defined the requisite helper types, we define the public
// interface for 'Set'. Note that for the purposes of illustrating the use of
// 'TreeNode' a number of simplifications have been made. For example, this
// implementation provides only 'insert', 'remove', 'isMember', and
// 'numMembers' operations:
//..
template <class VALUE,
class ALLOCATOR = bsl::allocator<VALUE> >
class Set {
// PRIVATE TYPES
typedef Comparator<VALUE> ValueComparator;
typedef NodeFactory<VALUE, ALLOCATOR> Factory;
// DATA
bslalg::RbTreeAnchor d_tree; // tree of node objects
Factory d_factory; // allocator for node objects
// NOT IMPLEMENTED
Set(const Set&);
Set& operator=(const Set&);
public:
// CREATORS
Set(const ALLOCATOR& allocator = ALLOCATOR());
// Create an empty set. Optionally specify a 'allocator' used to
// supply memory. If 'allocator' is not specified, a default
// constructed 'ALLOCATOR' object is used.
~Set();
// Destroy this set.
// MANIPULATORS
void insert(const VALUE& value);
// Insert the specified value into this set.
bool remove(const VALUE& value);
// If 'value' is a member of this set, then remove it and return
// 'true', and return 'false' otherwise.
// ACCESSORS
bool isElement(const VALUE& value) const;
// Return 'true' if the specified 'value' is a member of this set,
// and 'false' otherwise.
int numElements() const;
// Return the number of elements in this set.
};
//..
// Now, we define the implementation of 'Set':
//..
// CREATORS
template <class VALUE, class ALLOCATOR>
inline
Set<VALUE, ALLOCATOR>::Set(const ALLOCATOR& allocator)
: d_tree()
, d_factory(allocator)
{
}
template <class VALUE, class ALLOCATOR>
inline
Set<VALUE, ALLOCATOR>::~Set()
{
bslalg::RbTreeUtil::deleteTree(&d_tree, &d_factory);
}
// MANIPULATORS
template <class VALUE, class ALLOCATOR>
void Set<VALUE, ALLOCATOR>::insert(const VALUE& value)
{
int comparisonResult;
ValueComparator comparator;
bslalg::RbTreeNode *parent =
bslalg::RbTreeUtil::findUniqueInsertLocation(&comparisonResult,
&d_tree,
comparator,
value);
if (0 != comparisonResult) {
bslalg::RbTreeNode *node = d_factory.createNode(value);
bslalg::RbTreeUtil::insertAt(&d_tree,
parent,
comparisonResult < 0,
node);
}
}
template <class VALUE, class ALLOCATOR>
bool Set<VALUE, ALLOCATOR>::remove(const VALUE& value)
{
bslalg::RbTreeNode *node =
bslalg::RbTreeUtil::find(d_tree, ValueComparator(), value);
if (node) {
bslalg::RbTreeUtil::remove(&d_tree, node);
d_factory.deleteNode(node);
}
return node;
}
// ACCESSORS
template <class VALUE, class ALLOCATOR>
inline
bool Set<VALUE, ALLOCATOR>::isElement(const VALUE& value) const
{
ValueComparator comparator;
return bslalg::RbTreeUtil::find(d_tree, comparator, value);
}
template <class VALUE, class ALLOCATOR>
inline
int Set<VALUE, ALLOCATOR>::numElements() const
{
return d_tree.numNodes();
}
//..
// Notice that the definition and implementation of 'Set' never directly
// uses the 'TreeNode' type, but instead use it indirectly through
// 'Comparator', and 'NodeFactory', and uses it via its base-class
// 'bslalg::RbTreeNode'.
//=============================================================================
// MAIN PROGRAM
//-----------------------------------------------------------------------------
int main(int argc, char *argv[])
{
int test = argc > 1 ? atoi(argv[1]) : 0;
bool verbose = argc > 2;
bool veryVerbose = argc > 3;
bool veryVeryVerbose = argc > 4;
bool veryVeryVeryVerbose = argc > 5;
(void) veryVerbose;
(void) veryVeryVerbose;
(void) veryVeryVeryVerbose;
printf("TEST " __FILE__ " CASE %d\n", test);
switch (test) { case 0:
case 4: {
// --------------------------------------------------------------------
// USAGE EXAMPLE
//
// Concerns:
//: 1 The usage example provided in the component header file compiles,
//: links, and runs as shown.
//
// Plan:
//: 1 Incorporate usage example from header into test driver, remove
//: leading comment characters, and replace 'assert' with 'ASSERT'.
//: (C-1)
//
// Testing:
// USAGE EXAMPLE
// --------------------------------------------------------------------
if (verbose) printf("\nUSAGE EXAMPLE"
"\n=============\n");
// Finally, we test our 'Set'.
//..
bslma::TestAllocator defaultAllocator("defaultAllocator");
bslma::DefaultAllocatorGuard defaultGuard(&defaultAllocator);
bslma::TestAllocator objectAllocator("objectAllocator");
Set<int> set(&objectAllocator);
ASSERT(0 == defaultAllocator.numBytesInUse());
ASSERT(0 == objectAllocator.numBytesInUse());
ASSERT(0 == set.numElements());
set.insert(1);
ASSERT(set.isElement(1));
ASSERT(1 == set.numElements());
set.insert(1);
ASSERT(set.isElement(1));
ASSERT(1 == set.numElements());
set.insert(2);
ASSERT(set.isElement(1));
ASSERT(set.isElement(2));
ASSERT(2 == set.numElements());
ASSERT(0 == defaultAllocator.numBytesInUse());
ASSERT(0 < objectAllocator.numBytesInUse());
//..
} break;
case 3: {
// --------------------------------------------------------------------
// CONCERN: 'value' CAN BE CONSTRUCTED WITH 'allocator_traits'.
//
// Concerns:
//: 1 'value' can be constructed with 'allocator_traits::construct'.
//
// Plan:
//: 1 Create a class of which its construction can be verified. Use
//: 'allocator_traits::construct' to construct the class and verify
//: that the class's constructor is called.
//
// Testing:
// CONCERN: 'value' can be constructed with 'allocator_traits'.
// --------------------------------------------------------------------
bslma::TestAllocator da("default");
bslma::TestAllocator oa("object");
if (verbose) printf(
"\nTesting construction of 'value' using allocator_traits.\n");
{
typedef TestType1 Type;
typedef TreeNode<Type> Obj;
typedef bsl::allocator<Obj> Alloc;
typedef bsl::allocator_traits<Alloc> AllocTraits;
Alloc allocator(&oa);
ASSERTV(!Type::isConstructed());
Obj *xPtr = AllocTraits::allocate(allocator, 1);
Obj& mX = *xPtr; const Obj& X = mX;
(void) X;
AllocTraits::construct(allocator,
bsls::Util::addressOf(mX.value()));
ASSERTV(Type::isConstructed());
ASSERTV(0 == da.numBlocksInUse());
ASSERTV(1 == oa.numBlocksInUse());
mX.value().~Type();
AllocTraits::deallocate(allocator, &mX, 1);
}
} break;
case 2: {
// --------------------------------------------------------------------
// PRIMARY MANIPULATORS AND BASIC ACCESSORS
//
// Concerns:
//: 1 Manipulators can set value.
//:
//: 2 Accessor return value set by manipulator.
//:
//: 3 Accessor is declared const.
//
// Plan:
//: 1 Create a 'TreeNode' with 'VALUE_TYPE' as 'int' and set 'value'
//: distinct numbers. Verify the values are set with the accessor.
//:
//: 2 Create a 'TreeNode' with a type that has a constructor that can
//: be verified if it has been invoked. Verify that the constructor
//: is invoked when 'allocator_traits::construct' is used.
//
// Testing:
// VALUE_TYPE& value();
// const VALUE_TYPE& value() const;
// --------------------------------------------------------------------
bslma::TestAllocator da("default");
bslma::TestAllocator oa("object");
bslma::DefaultAllocatorGuard defaultGuard(&da);
if (verbose) printf("\nTesting manipulator and accessor for 'int'.\n");
{
typedef int Type;
typedef TreeNode<Type> Obj;
typedef bsl::allocator<Obj> Alloc;
typedef bsl::allocator_traits<Alloc> AllocTraits;
Alloc allocator(&oa);
Obj *xPtr = AllocTraits::allocate(allocator, 1);
Obj& mX = *xPtr; const Obj& X = mX;
mX.value() = 0;
ASSERTV(X.value(), 0 == X.value());
mX.value() = 1;
ASSERTV(X.value(), 1 == X.value());
mX.value() = INT_MAX;
ASSERTV(X.value(), INT_MAX == X.value());
ASSERTV(0 == da.numBlocksTotal());
ASSERTV(1 == oa.numBlocksInUse());
mX.value().~Type();
AllocTraits::deallocate(allocator, &mX, 1);
ASSERTV(0 == oa.numBlocksInUse());
}
if (verbose) printf(
"\nTesting manipulator and accessor for 'string'.\n");
{
typedef bsl::string Type;
typedef TreeNode<Type> Obj;
typedef bsl::allocator<Obj> Alloc;
typedef bsl::allocator_traits<Alloc> AllocTraits;
Alloc allocator(&oa);
Obj *xPtr = AllocTraits::allocate(allocator, 1);
Obj& mX = *xPtr; const Obj& X = mX;
AllocTraits::construct(allocator,
bsls::Util::addressOf(mX.value()));
const char D[] = "";
const char A[] = "a_" SUFFICIENTLY_LONG_STRING;
bslma::TestAllocator scratch("scratch");
const bsl::string B("ABC", &scratch);
Type value(&scratch);
mX.value() = D;
value = X.value();
ASSERTV(value, D == value);
ASSERTV(1 == oa.numBlocksInUse());
mX.value() = A;
value = X.value();
ASSERTV(value, A == value);
ASSERTV(2 == oa.numBlocksInUse());
mX.value() = B;
value = X.value();
ASSERTV(value, B == value);
ASSERTV(2 == oa.numBlocksInUse());
ASSERTV(0 == da.numBlocksTotal());
mX.value().~Type();
AllocTraits::deallocate(allocator, &mX, 1);
ASSERTV(0 == oa.numBlocksInUse());
}
} break;
case 1: {
// --------------------------------------------------------------------
// BREATHING TEST:
// Developers' Sandbox.
//
// Plan:
// Perform and ad-hoc test of the primary modifiers and accessors.
//
// Testing:
// This "test" *exercises* basic functionality, but *tests* nothing.
// --------------------------------------------------------------------
if (verbose) printf("\nBREATHING TEST"
"\n==============\n");
bslma::TestAllocator da("default");
bslma::DefaultAllocatorGuard defaultGuard(&da);
typedef TreeNode<int> Obj;
Obj *xPtr = static_cast<Obj *>(da.allocate(sizeof(Obj)));
Obj& mX = *xPtr; const Obj& X = mX;
mX.value() = 0;
ASSERTV(X.value(), 0 == X.value());
mX.value() = 1;
ASSERTV(X.value(), 1 == X.value());
da.deallocate(&mX);
ASSERTV(0 == da.numBytesInUse());
} break;
default: {
fprintf(stderr, "WARNING: CASE `%d' NOT FOUND.\n", test);
testStatus = -1;
}
}
if (testStatus > 0) {
fprintf(stderr, "Error, non-zero test status = %d.\n", testStatus);
}
return testStatus;
}
// ----------------------------------------------------------------------------
// Copyright 2013 Bloomberg Finance L.P.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ----------------------------- END-OF-FILE ----------------------------------
| 35.857143 | 79 | 0.538762 | [
"object"
] |
d55009fe9f701d23cc0f4dbc3cdc9295252f9716 | 9,675 | cc | C++ | App/relumaxpool_slalom.cc | LukeZheZhu/slalom | 96ff15977b7058b96d2a00a51c6aabbe729cc6d5 | [
"MIT"
] | 128 | 2018-06-11T06:07:21.000Z | 2022-03-30T19:33:29.000Z | App/relumaxpool_slalom.cc | LukeZheZhu/slalom | 96ff15977b7058b96d2a00a51c6aabbe729cc6d5 | [
"MIT"
] | 41 | 2018-09-03T15:33:35.000Z | 2022-02-09T23:40:03.000Z | App/relumaxpool_slalom.cc | LukeZheZhu/slalom | 96ff15977b7058b96d2a00a51c6aabbe729cc6d5 | [
"MIT"
] | 46 | 2018-11-23T09:11:20.000Z | 2022-03-21T08:38:39.000Z | #define EIGEN_USE_THREADS
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/util/padding.h"
#include "tensorflow/core/util/tensor_format.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include <dlfcn.h>
namespace tensorflow {
Eigen::PaddingType BrainPadding2EigenPadding(Padding padding) {
switch (padding) {
case Padding::VALID:
return Eigen::PADDING_VALID;
case Padding::SAME:
return Eigen::PADDING_SAME;
}
return Eigen::PADDING_SAME; // Prevent compiler warning about missing return
}
// A helper class to manage sizes and shapes for pooling operations.
struct PoolParameters {
// Updates context->status if there is an invalid input.
PoolParameters(OpKernelContext* context, const std::vector<int32>& ksize,
const std::vector<int32>& stride, Padding padding,
TensorFormat data_format, const TensorShape& tensor_in_shape);
// Returns the shape of the output for "forward" pooling operations.
TensorShape forward_output_shape();
int depth;
int tensor_in_cols;
int tensor_in_rows;
int tensor_in_batch;
int window_rows;
int window_cols;
int depth_window;
int row_stride;
int col_stride;
int depth_stride;
int64 out_height;
int64 out_width;
int out_depth;
int64 pad_rows;
int64 pad_cols;
int pad_depth;
TensorFormat data_format;
};
PoolParameters::PoolParameters(OpKernelContext* context,
const std::vector<int32>& ksize,
const std::vector<int32>& stride,
Padding padding, TensorFormat data_format,
const TensorShape& tensor_in_shape) {
// For maxpooling, tensor_in should have 2 spatial dimensions.
// Note: the total number of dimensions could be 4 for NHWC, NCHW,
// or 5 for NCHW_VECT_C.
OP_REQUIRES(context,
GetTensorSpatialDims(tensor_in_shape.dims(), data_format) == 2,
errors::InvalidArgument(
"tensor_in_shape must have 2 spatial dimensions. ",
tensor_in_shape.dims(), " ", data_format));
this->data_format = data_format;
depth = GetTensorDim(tensor_in_shape, data_format, 'C') *
(data_format == FORMAT_NCHW_VECT_C ? 4 : 1);
tensor_in_cols = GetTensorDim(tensor_in_shape, data_format, 'W');
tensor_in_rows = GetTensorDim(tensor_in_shape, data_format, 'H');
tensor_in_batch = GetTensorDim(tensor_in_shape, data_format, 'N');
window_rows = GetTensorDim(ksize, data_format, 'H');
window_cols = GetTensorDim(ksize, data_format, 'W');
depth_window = GetTensorDim(ksize, data_format, 'C');
row_stride = GetTensorDim(stride, data_format, 'H');
col_stride = GetTensorDim(stride, data_format, 'W');
depth_stride = GetTensorDim(stride, data_format, 'C');
// We only support 2D pooling across width/height and depthwise
// pooling, not a combination.
OP_REQUIRES(context,
(depth_window == 1 || (window_rows == 1 && window_cols == 1)),
errors::Unimplemented(
"MaxPooling supports exactly one of pooling across depth "
"or pooling across width/height."));
if (depth_window == 1) {
OP_REQUIRES_OK(
context, GetWindowedOutputSize(tensor_in_rows, window_rows, row_stride,
padding, &out_height, &pad_rows));
OP_REQUIRES_OK(
context, GetWindowedOutputSize(tensor_in_cols, window_cols, col_stride,
padding, &out_width, &pad_cols));
pad_depth = 0;
out_depth = depth;
}
}
TensorShape PoolParameters::forward_output_shape() {
if (depth_window == 1) {
// Spatial pooling
return ShapeFromFormat(data_format, tensor_in_batch, out_height, out_width,
depth);
} else {
// Depthwise pooling
return TensorShape(
{tensor_in_batch, tensor_in_rows, tensor_in_cols, out_depth});
}
}
template <typename Device, typename T>
class ReluMaxPoolSlalomOp : public OpKernel {
public:
explicit ReluMaxPoolSlalomOp(OpKernelConstruction* context) : OpKernel(context) {
string data_format;
auto status = context->GetAttr("data_format", &data_format);
if (status.ok()) {
OP_REQUIRES(context, FormatFromString(data_format, &data_format_),
errors::InvalidArgument("Invalid data format"));
OP_REQUIRES(
context, data_format_ == FORMAT_NHWC,
errors::InvalidArgument("Default MaxPoolingOp only supports NHWC."));
} else {
data_format_ = FORMAT_NHWC;
}
OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_));
OP_REQUIRES(context, ksize_.size() == 4,
errors::InvalidArgument("Sliding window ksize field must "
"specify 4 dimensions"));
OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_));
OP_REQUIRES(context, stride_.size() == 4,
errors::InvalidArgument("Sliding window stride field must "
"specify 4 dimensions"));
OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_));
OP_REQUIRES(context, ksize_[0] == 1 && stride_[0] == 1,
errors::Unimplemented(
"Pooling is not yet supported on the batch dimension."));
#ifdef USE_SGX
OP_REQUIRES_OK(context, context->GetAttr("eid_low", &eid_low_));
OP_REQUIRES_OK(context, context->GetAttr("eid_high", &eid_high_));
lib_ = dlopen("App/enclave_bridge.so", RTLD_NOW);
#else
lib_ = dlopen("lib/sgxdnn.so", RTLD_NOW);
#endif
}
void Compute(OpKernelContext* context) override {
const Tensor& tensor_in = context->input(0);
const Tensor& blind = context->input(1);
//std::cout << "in relumaxpool!" << std::endl;
//std::cout << "input: " << tensor_in.DebugString() << std::endl;
//std::cout << "blind: " << blind.DebugString() << std::endl;
PoolParameters params{context, ksize_, stride_,
padding_, FORMAT_NHWC, tensor_in.shape()};
if (!context->status().ok()) {
return;
}
Tensor* output = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(
0, params.forward_output_shape(), &output));
SpatialMaxPool(context, output, tensor_in, blind, params, padding_);
}
private:
void SpatialMaxPool(OpKernelContext* context, Tensor* output,
const Tensor& input, const Tensor& blind,
const PoolParameters& params,
const Padding& padding) {
Eigen::PaddingType pt = BrainPadding2EigenPadding(padding);
auto dim_in_ = input.tensor<T, 4>().dimensions();
long int dim_in[4] = {dim_in_[0], dim_in_[1], dim_in_[2], dim_in_[3]};
auto dim_out_ = output->tensor<T, 4>().dimensions();
long int dim_out[4] = {dim_out_[0], dim_out_[1], dim_out_[2], dim_out_[3]};
#ifdef USE_SGX
unsigned long int eid_ = (eid_high_ << 32) + eid_low_;
typedef void (*maxpoolrelu_ecall)(unsigned long int eid, float* in, float* out, float* blind,
long int dim_in[4], long int dim_out[4],
int window_rows, int window_cols, int row_stride, int col_stride, bool same_padding);
dlerror();
maxpoolrelu_ecall mpr = (maxpoolrelu_ecall) dlsym(lib_, "slalom_maxpoolrelu");
const char *dlsym_error = dlerror();
OP_REQUIRES(context, !dlsym_error, errors::Unknown("loading of maxpoolrelu failed: ", dlsym_error));
mpr(eid_, (float*) input.flat<T>().data(), (float*) output->flat<T>().data(), (float*) blind.flat<T>().data(),
dim_in, dim_out, params.window_rows, params.window_cols, params.row_stride, params.col_stride, (pt == Eigen::PaddingType::PADDING_SAME));
#else
typedef void (*maxpoolrelu_ecall)(float* in, float* out, float* blind,
long int dim_in[4], long int dim_out[4],
int window_rows, int window_cols, int row_stride, int col_stride, bool same_padding);
dlerror();
maxpoolrelu_ecall mpr = (maxpoolrelu_ecall) dlsym(lib_, "slalom_maxpoolrelu");
const char *dlsym_error = dlerror();
OP_REQUIRES(context, !dlsym_error, errors::Unknown("loading of maxpoolrelu failed: ", dlsym_error));
mpr((float*) input.flat<T>().data(), (float*) output->flat<T>().data(), (float*) blind.flat<T>().data(),
dim_in, dim_out, params.window_rows, params.window_cols, params.row_stride, params.col_stride, (pt == Eigen::PaddingType::PADDING_SAME));
#endif
}
std::vector<int32> ksize_;
std::vector<int32> stride_;
Padding padding_;
TensorFormat data_format_;
void* lib_;
#ifdef USE_SGX
int64 eid_low_;
int64 eid_high_;
#endif
};
typedef Eigen::ThreadPoolDevice CPUDevice;
REGISTER_KERNEL_BUILDER(Name("ReluMaxPoolSlalom").Device(DEVICE_CPU), ReluMaxPoolSlalomOp<CPUDevice, float>);
REGISTER_OP("ReluMaxPoolSlalom")
.Attr(
"T: {half, bfloat16, float, double, int32, int64, uint8, int16, int8, "
"uint16, qint8} = DT_FLOAT")
.Attr("ksize: list(int) >= 4")
.Attr("strides: list(int) >= 4")
.Attr(GetPaddingAttrString())
.Attr("data_format: {'NHWC', 'NCHW', 'NCHW_VECT_C'} = 'NHWC'")
#ifdef USE_SGX
.Attr("eid_low: int")
.Attr("eid_high: int")
#endif
.Input("inp: float")
.Input("blind: float")
.Output("output: T")
.SetShapeFn(shape_inference::MaxPoolShape);
} // namespace tensorflow
#undef EIGEN_USE_THREADS
| 37.068966 | 145 | 0.658088 | [
"shape",
"vector"
] |
d5508762878b342671699fb0c3bb591432340702 | 7,107 | hpp | C++ | ThirdParty-mod/java2cpp/android/app/KeyguardManager.hpp | kakashidinho/HQEngine | 8125b290afa7c62db6cc6eac14e964d8138c7fd0 | [
"MIT"
] | 1 | 2019-04-03T01:53:28.000Z | 2019-04-03T01:53:28.000Z | ThirdParty-mod/java2cpp/android/app/KeyguardManager.hpp | kakashidinho/HQEngine | 8125b290afa7c62db6cc6eac14e964d8138c7fd0 | [
"MIT"
] | null | null | null | ThirdParty-mod/java2cpp/android/app/KeyguardManager.hpp | kakashidinho/HQEngine | 8125b290afa7c62db6cc6eac14e964d8138c7fd0 | [
"MIT"
] | null | null | null | /*================================================================================
code generated by: java2cpp
author: Zoran Angelov, mailto://baldzar@gmail.com
class: android.app.KeyguardManager
================================================================================*/
#ifndef J2CPP_INCLUDE_IMPLEMENTATION
#ifndef J2CPP_ANDROID_APP_KEYGUARDMANAGER_HPP_DECL
#define J2CPP_ANDROID_APP_KEYGUARDMANAGER_HPP_DECL
namespace j2cpp { namespace java { namespace lang { class Object; } } }
namespace j2cpp { namespace java { namespace lang { class String; } } }
namespace j2cpp { namespace android { namespace app { namespace KeyguardManager_ { class OnKeyguardExitResult; } } } }
namespace j2cpp { namespace android { namespace app { namespace KeyguardManager_ { class KeyguardLock; } } } }
#include <android/app/KeyguardManager.hpp>
#include <java/lang/Object.hpp>
#include <java/lang/String.hpp>
namespace j2cpp {
namespace android { namespace app {
class KeyguardManager;
namespace KeyguardManager_ {
class OnKeyguardExitResult;
class OnKeyguardExitResult
: public object<OnKeyguardExitResult>
{
public:
J2CPP_DECLARE_CLASS
J2CPP_DECLARE_METHOD(0)
explicit OnKeyguardExitResult(jobject jobj)
: object<OnKeyguardExitResult>(jobj)
{
}
operator local_ref<java::lang::Object>() const;
void onKeyguardExitResult(jboolean);
}; //class OnKeyguardExitResult
class KeyguardLock;
class KeyguardLock
: public object<KeyguardLock>
{
public:
J2CPP_DECLARE_CLASS
J2CPP_DECLARE_METHOD(0)
J2CPP_DECLARE_METHOD(1)
J2CPP_DECLARE_METHOD(2)
J2CPP_DECLARE_FIELD(0)
explicit KeyguardLock(jobject jobj)
: object<KeyguardLock>(jobj)
{
}
operator local_ref<java::lang::Object>() const;
void disableKeyguard();
void reenableKeyguard();
}; //class KeyguardLock
} //namespace KeyguardManager_
class KeyguardManager
: public object<KeyguardManager>
{
public:
J2CPP_DECLARE_CLASS
J2CPP_DECLARE_METHOD(0)
J2CPP_DECLARE_METHOD(1)
J2CPP_DECLARE_METHOD(2)
J2CPP_DECLARE_METHOD(3)
typedef KeyguardManager_::OnKeyguardExitResult OnKeyguardExitResult;
typedef KeyguardManager_::KeyguardLock KeyguardLock;
explicit KeyguardManager(jobject jobj)
: object<KeyguardManager>(jobj)
{
}
operator local_ref<java::lang::Object>() const;
local_ref< android::app::KeyguardManager_::KeyguardLock > newKeyguardLock(local_ref< java::lang::String > const&);
jboolean inKeyguardRestrictedInputMode();
void exitKeyguardSecurely(local_ref< android::app::KeyguardManager_::OnKeyguardExitResult > const&);
}; //class KeyguardManager
} //namespace app
} //namespace android
} //namespace j2cpp
#endif //J2CPP_ANDROID_APP_KEYGUARDMANAGER_HPP_DECL
#else //J2CPP_INCLUDE_IMPLEMENTATION
#ifndef J2CPP_ANDROID_APP_KEYGUARDMANAGER_HPP_IMPL
#define J2CPP_ANDROID_APP_KEYGUARDMANAGER_HPP_IMPL
namespace j2cpp {
android::app::KeyguardManager_::OnKeyguardExitResult::operator local_ref<java::lang::Object>() const
{
return local_ref<java::lang::Object>(get_jobject());
}
void android::app::KeyguardManager_::OnKeyguardExitResult::onKeyguardExitResult(jboolean a0)
{
return call_method<
android::app::KeyguardManager_::OnKeyguardExitResult::J2CPP_CLASS_NAME,
android::app::KeyguardManager_::OnKeyguardExitResult::J2CPP_METHOD_NAME(0),
android::app::KeyguardManager_::OnKeyguardExitResult::J2CPP_METHOD_SIGNATURE(0),
void
>(get_jobject(), a0);
}
J2CPP_DEFINE_CLASS(android::app::KeyguardManager_::OnKeyguardExitResult,"android/app/KeyguardManager$OnKeyguardExitResult")
J2CPP_DEFINE_METHOD(android::app::KeyguardManager_::OnKeyguardExitResult,0,"onKeyguardExitResult","(Z)V")
android::app::KeyguardManager_::KeyguardLock::operator local_ref<java::lang::Object>() const
{
return local_ref<java::lang::Object>(get_jobject());
}
void android::app::KeyguardManager_::KeyguardLock::disableKeyguard()
{
return call_method<
android::app::KeyguardManager_::KeyguardLock::J2CPP_CLASS_NAME,
android::app::KeyguardManager_::KeyguardLock::J2CPP_METHOD_NAME(1),
android::app::KeyguardManager_::KeyguardLock::J2CPP_METHOD_SIGNATURE(1),
void
>(get_jobject());
}
void android::app::KeyguardManager_::KeyguardLock::reenableKeyguard()
{
return call_method<
android::app::KeyguardManager_::KeyguardLock::J2CPP_CLASS_NAME,
android::app::KeyguardManager_::KeyguardLock::J2CPP_METHOD_NAME(2),
android::app::KeyguardManager_::KeyguardLock::J2CPP_METHOD_SIGNATURE(2),
void
>(get_jobject());
}
J2CPP_DEFINE_CLASS(android::app::KeyguardManager_::KeyguardLock,"android/app/KeyguardManager$KeyguardLock")
J2CPP_DEFINE_METHOD(android::app::KeyguardManager_::KeyguardLock,0,"<init>","(Landroid/app/KeyguardManager;)V")
J2CPP_DEFINE_METHOD(android::app::KeyguardManager_::KeyguardLock,1,"disableKeyguard","()V")
J2CPP_DEFINE_METHOD(android::app::KeyguardManager_::KeyguardLock,2,"reenableKeyguard","()V")
J2CPP_DEFINE_FIELD(android::app::KeyguardManager_::KeyguardLock,0,"this$0","Landroid/app/KeyguardManager;")
android::app::KeyguardManager::operator local_ref<java::lang::Object>() const
{
return local_ref<java::lang::Object>(get_jobject());
}
local_ref< android::app::KeyguardManager_::KeyguardLock > android::app::KeyguardManager::newKeyguardLock(local_ref< java::lang::String > const &a0)
{
return call_method<
android::app::KeyguardManager::J2CPP_CLASS_NAME,
android::app::KeyguardManager::J2CPP_METHOD_NAME(1),
android::app::KeyguardManager::J2CPP_METHOD_SIGNATURE(1),
local_ref< android::app::KeyguardManager_::KeyguardLock >
>(get_jobject(), a0);
}
jboolean android::app::KeyguardManager::inKeyguardRestrictedInputMode()
{
return call_method<
android::app::KeyguardManager::J2CPP_CLASS_NAME,
android::app::KeyguardManager::J2CPP_METHOD_NAME(2),
android::app::KeyguardManager::J2CPP_METHOD_SIGNATURE(2),
jboolean
>(get_jobject());
}
void android::app::KeyguardManager::exitKeyguardSecurely(local_ref< android::app::KeyguardManager_::OnKeyguardExitResult > const &a0)
{
return call_method<
android::app::KeyguardManager::J2CPP_CLASS_NAME,
android::app::KeyguardManager::J2CPP_METHOD_NAME(3),
android::app::KeyguardManager::J2CPP_METHOD_SIGNATURE(3),
void
>(get_jobject(), a0);
}
J2CPP_DEFINE_CLASS(android::app::KeyguardManager,"android/app/KeyguardManager")
J2CPP_DEFINE_METHOD(android::app::KeyguardManager,0,"<init>","()V")
J2CPP_DEFINE_METHOD(android::app::KeyguardManager,1,"newKeyguardLock","(Ljava/lang/String;)Landroid/app/KeyguardManager$KeyguardLock;")
J2CPP_DEFINE_METHOD(android::app::KeyguardManager,2,"inKeyguardRestrictedInputMode","()Z")
J2CPP_DEFINE_METHOD(android::app::KeyguardManager,3,"exitKeyguardSecurely","(Landroid/app/KeyguardManager$OnKeyguardExitResult;)V")
} //namespace j2cpp
#endif //J2CPP_ANDROID_APP_KEYGUARDMANAGER_HPP_IMPL
#endif //J2CPP_INCLUDE_IMPLEMENTATION
| 30.9 | 148 | 0.738286 | [
"object"
] |
d555f2836c45042d559d8b9d9b3c874a1c590ed7 | 87,156 | cc | C++ | gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc | sunjc53yy/chromium | 049b380040949089c2a6e447b0cd0ac3c4ece38e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc | sunjc53yy/chromium | 049b380040949089c2a6e447b0cd0ac3c4ece38e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | gpu/command_buffer/service/gles2_cmd_decoder_unittest_framebuffers.cc | sunjc53yy/chromium | 049b380040949089c2a6e447b0cd0ac3c4ece38e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | // Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
#include "base/command_line.h"
#include "base/strings/string_number_conversions.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
#include "gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h"
#include "gpu/command_buffer/service/async_pixel_transfer_manager.h"
#include "gpu/command_buffer/service/async_pixel_transfer_manager_mock.h"
#include "gpu/command_buffer/service/cmd_buffer_engine.h"
#include "gpu/command_buffer/service/context_group.h"
#include "gpu/command_buffer/service/context_state.h"
#include "gpu/command_buffer/service/gl_surface_mock.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder_unittest.h"
#include "gpu/command_buffer/service/gpu_switches.h"
#include "gpu/command_buffer/service/image_manager.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/command_buffer/service/mocks.h"
#include "gpu/command_buffer/service/program_manager.h"
#include "gpu/command_buffer/service/test_helper.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gl/gl_implementation.h"
#include "ui/gl/gl_mock.h"
#include "ui/gl/gl_surface_stub.h"
#if !defined(GL_DEPTH24_STENCIL8)
#define GL_DEPTH24_STENCIL8 0x88F0
#endif
using ::gfx::MockGLInterface;
using ::testing::_;
using ::testing::DoAll;
using ::testing::InSequence;
using ::testing::Invoke;
using ::testing::MatcherCast;
using ::testing::Mock;
using ::testing::Pointee;
using ::testing::Return;
using ::testing::SaveArg;
using ::testing::SetArrayArgument;
using ::testing::SetArgumentPointee;
using ::testing::SetArgPointee;
using ::testing::StrEq;
using ::testing::StrictMock;
namespace gpu {
namespace gles2 {
using namespace cmds;
class GLES2DecoderTestWithExtensionsOnGLES2 : public GLES2DecoderTest {
public:
GLES2DecoderTestWithExtensionsOnGLES2() {}
void SetUp() override {}
void Init(const char* extensions) {
InitState init;
init.extensions = extensions;
init.gl_version = "opengl es 2.0";
init.has_alpha = true;
init.has_depth = true;
init.request_alpha = true;
init.request_depth = true;
InitDecoder(init);
}
};
TEST_P(GLES2DecoderTest, CheckFramebufferStatusWithNoBoundTarget) {
EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(_)).Times(0);
CheckFramebufferStatus::Result* result =
static_cast<CheckFramebufferStatus::Result*>(shared_memory_address_);
*result = 0;
CheckFramebufferStatus cmd;
cmd.Init(GL_FRAMEBUFFER, shared_memory_id_, shared_memory_offset_);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE), *result);
}
TEST_P(GLES2DecoderWithShaderTest, BindAndDeleteFramebuffer) {
SetupTexture();
AddExpectationsForSimulatedAttrib0(kNumVertices, 0);
SetupExpectationsForApplyingDefaultDirtyState();
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
DoDeleteFramebuffer(client_framebuffer_id_,
kServiceFramebufferId,
true,
GL_FRAMEBUFFER,
0,
true,
GL_FRAMEBUFFER,
0);
EXPECT_CALL(*gl_, DrawArrays(GL_TRIANGLES, 0, kNumVertices))
.Times(1)
.RetiresOnSaturation();
DrawArrays cmd;
cmd.Init(GL_TRIANGLES, 0, kNumVertices);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
TEST_P(GLES2DecoderTest, FramebufferRenderbufferWithNoBoundTarget) {
EXPECT_CALL(*gl_, FramebufferRenderbufferEXT(_, _, _, _)).Times(0);
FramebufferRenderbuffer cmd;
cmd.Init(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_RENDERBUFFER,
client_renderbuffer_id_);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
}
TEST_P(GLES2DecoderTest, FramebufferTexture2DWithNoBoundTarget) {
EXPECT_CALL(*gl_, FramebufferTexture2DEXT(_, _, _, _, _)).Times(0);
FramebufferTexture2D cmd;
cmd.Init(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D,
client_texture_id_);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
}
TEST_P(GLES2DecoderTest, GetFramebufferAttachmentParameterivWithNoBoundTarget) {
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
EXPECT_CALL(*gl_, GetFramebufferAttachmentParameterivEXT(_, _, _, _))
.Times(0);
GetFramebufferAttachmentParameteriv cmd;
cmd.Init(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE,
shared_memory_id_,
shared_memory_offset_);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
}
TEST_P(GLES2DecoderTest, GetFramebufferAttachmentParameterivWithRenderbuffer) {
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
EXPECT_CALL(*gl_,
FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_RENDERBUFFER,
kServiceRenderbufferId))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
GetFramebufferAttachmentParameteriv::Result* result =
static_cast<GetFramebufferAttachmentParameteriv::Result*>(
shared_memory_address_);
result->size = 0;
const GLint* result_value = result->GetData();
FramebufferRenderbuffer fbrb_cmd;
GetFramebufferAttachmentParameteriv cmd;
fbrb_cmd.Init(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_RENDERBUFFER,
client_renderbuffer_id_);
cmd.Init(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME,
shared_memory_id_,
shared_memory_offset_);
EXPECT_EQ(error::kNoError, ExecuteCmd(fbrb_cmd));
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_EQ(static_cast<GLuint>(*result_value), client_renderbuffer_id_);
}
TEST_P(GLES2DecoderTest, GetFramebufferAttachmentParameterivWithTexture) {
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
EXPECT_CALL(*gl_,
FramebufferTexture2DEXT(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D,
kServiceTextureId,
0))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
GetFramebufferAttachmentParameteriv::Result* result =
static_cast<GetFramebufferAttachmentParameteriv::Result*>(
shared_memory_address_);
result->SetNumResults(0);
const GLint* result_value = result->GetData();
FramebufferTexture2D fbtex_cmd;
GetFramebufferAttachmentParameteriv cmd;
fbtex_cmd.Init(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D,
client_texture_id_);
cmd.Init(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME,
shared_memory_id_,
shared_memory_offset_);
EXPECT_EQ(error::kNoError, ExecuteCmd(fbtex_cmd));
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_EQ(static_cast<GLuint>(*result_value), client_texture_id_);
}
TEST_P(GLES2DecoderWithShaderTest,
GetRenderbufferParameterivRebindRenderbuffer) {
SetupTexture();
DoBindRenderbuffer(
GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
DoRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA4, GL_RGBA, 1, 1, GL_NO_ERROR);
GetRenderbufferParameteriv cmd;
cmd.Init(GL_RENDERBUFFER,
GL_RENDERBUFFER_RED_SIZE,
shared_memory_id_,
shared_memory_offset_);
RestoreRenderbufferBindings();
EnsureRenderbufferBound(true);
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
EXPECT_CALL(*gl_,
GetRenderbufferParameterivEXT(
GL_RENDERBUFFER, GL_RENDERBUFFER_RED_SIZE, _));
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
TEST_P(GLES2DecoderTest, GetRenderbufferParameterivWithNoBoundTarget) {
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
EXPECT_CALL(*gl_, GetRenderbufferParameterivEXT(_, _, _)).Times(0);
GetRenderbufferParameteriv cmd;
cmd.Init(GL_RENDERBUFFER,
GL_RENDERBUFFER_WIDTH,
shared_memory_id_,
shared_memory_offset_);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
}
TEST_P(GLES2DecoderWithShaderTest, RenderbufferStorageRebindRenderbuffer) {
SetupTexture();
DoBindRenderbuffer(
GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
RestoreRenderbufferBindings();
EnsureRenderbufferBound(true);
DoRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA4, GL_RGBA, 1, 1, GL_NO_ERROR);
}
TEST_P(GLES2DecoderTest, RenderbufferStorageWithNoBoundTarget) {
EXPECT_CALL(*gl_, RenderbufferStorageEXT(_, _, _, _)).Times(0);
RenderbufferStorage cmd;
cmd.Init(GL_RENDERBUFFER, GL_RGBA4, 3, 4);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
}
namespace {
// A class to emulate glReadPixels
class ReadPixelsEmulator {
public:
// pack_alignment is the alignment you want ReadPixels to use
// when copying. The actual data passed in pixels should be contiguous.
ReadPixelsEmulator(GLsizei width,
GLsizei height,
GLint bytes_per_pixel,
const void* src_pixels,
const void* expected_pixels,
GLint pack_alignment)
: width_(width),
height_(height),
pack_alignment_(pack_alignment),
bytes_per_pixel_(bytes_per_pixel),
src_pixels_(reinterpret_cast<const int8*>(src_pixels)),
expected_pixels_(reinterpret_cast<const int8*>(expected_pixels)) {}
void ReadPixels(GLint x,
GLint y,
GLsizei width,
GLsizei height,
GLenum format,
GLenum type,
void* pixels) const {
DCHECK_GE(x, 0);
DCHECK_GE(y, 0);
DCHECK_LE(x + width, width_);
DCHECK_LE(y + height, height_);
for (GLint yy = 0; yy < height; ++yy) {
const int8* src = GetPixelAddress(src_pixels_, x, y + yy);
const void* dst = ComputePackAlignmentAddress(0, yy, width, pixels);
memcpy(const_cast<void*>(dst), src, width * bytes_per_pixel_);
}
}
bool CompareRowSegment(GLint x,
GLint y,
GLsizei width,
const void* data) const {
DCHECK(x + width <= width_ || width == 0);
return memcmp(data,
GetPixelAddress(expected_pixels_, x, y),
width * bytes_per_pixel_) == 0;
}
// Helper to compute address of pixel in pack aligned data.
const void* ComputePackAlignmentAddress(GLint x,
GLint y,
GLsizei width,
const void* address) const {
GLint unpadded_row_size = ComputeImageDataSize(width, 1);
GLint two_rows_size = ComputeImageDataSize(width, 2);
GLsizei padded_row_size = two_rows_size - unpadded_row_size;
GLint offset = y * padded_row_size + x * bytes_per_pixel_;
return static_cast<const int8*>(address) + offset;
}
GLint ComputeImageDataSize(GLint width, GLint height) const {
GLint row_size = width * bytes_per_pixel_;
if (height > 1) {
GLint temp = row_size + pack_alignment_ - 1;
GLint padded_row_size = (temp / pack_alignment_) * pack_alignment_;
GLint size_of_all_but_last_row = (height - 1) * padded_row_size;
return size_of_all_but_last_row + row_size;
} else {
return height * row_size;
}
}
private:
const int8* GetPixelAddress(const int8* base, GLint x, GLint y) const {
return base + (width_ * y + x) * bytes_per_pixel_;
}
GLsizei width_;
GLsizei height_;
GLint pack_alignment_;
GLint bytes_per_pixel_;
const int8* src_pixels_;
const int8* expected_pixels_;
};
} // anonymous namespace
void GLES2DecoderTest::CheckReadPixelsOutOfRange(GLint in_read_x,
GLint in_read_y,
GLsizei in_read_width,
GLsizei in_read_height,
bool init) {
const GLsizei kWidth = 5;
const GLsizei kHeight = 3;
const GLint kBytesPerPixel = 3;
const GLint kPackAlignment = 4;
const GLenum kFormat = GL_RGB;
static const int8 kSrcPixels[kWidth * kHeight * kBytesPerPixel] = {
12, 13, 14, 18, 19, 18, 19, 12, 13, 14, 18, 19, 18, 19, 13,
29, 28, 23, 22, 21, 22, 21, 29, 28, 23, 22, 21, 22, 21, 28,
31, 34, 39, 37, 32, 37, 32, 31, 34, 39, 37, 32, 37, 32, 34,
};
ClearSharedMemory();
// We need to setup an FBO so we can know the max size that ReadPixels will
// access
if (init) {
DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
DoTexImage2D(GL_TEXTURE_2D,
0,
kFormat,
kWidth,
kHeight,
0,
kFormat,
GL_UNSIGNED_BYTE,
kSharedMemoryId,
kSharedMemoryOffset);
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
DoFramebufferTexture2D(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D,
client_texture_id_,
kServiceTextureId,
0,
GL_NO_ERROR);
EXPECT_CALL(*gl_, CheckFramebufferStatusEXT(GL_FRAMEBUFFER))
.WillOnce(Return(GL_FRAMEBUFFER_COMPLETE))
.RetiresOnSaturation();
}
ReadPixelsEmulator emu(
kWidth, kHeight, kBytesPerPixel, kSrcPixels, kSrcPixels, kPackAlignment);
typedef ReadPixels::Result Result;
Result* result = GetSharedMemoryAs<Result*>();
uint32 result_shm_id = kSharedMemoryId;
uint32 result_shm_offset = kSharedMemoryOffset;
uint32 pixels_shm_id = kSharedMemoryId;
uint32 pixels_shm_offset = kSharedMemoryOffset + sizeof(*result);
void* dest = &result[1];
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
// ReadPixels will be called for valid size only even though the command
// is requesting a larger size.
GLint read_x = std::max(0, in_read_x);
GLint read_y = std::max(0, in_read_y);
GLint read_end_x = std::max(0, std::min(kWidth, in_read_x + in_read_width));
GLint read_end_y = std::max(0, std::min(kHeight, in_read_y + in_read_height));
GLint read_width = read_end_x - read_x;
GLint read_height = read_end_y - read_y;
if (read_width > 0 && read_height > 0) {
for (GLint yy = read_y; yy < read_end_y; ++yy) {
EXPECT_CALL(
*gl_,
ReadPixels(read_x, yy, read_width, 1, kFormat, GL_UNSIGNED_BYTE, _))
.WillOnce(Invoke(&emu, &ReadPixelsEmulator::ReadPixels))
.RetiresOnSaturation();
}
}
ReadPixels cmd;
cmd.Init(in_read_x,
in_read_y,
in_read_width,
in_read_height,
kFormat,
GL_UNSIGNED_BYTE,
pixels_shm_id,
pixels_shm_offset,
result_shm_id,
result_shm_offset,
false);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
GLint unpadded_row_size = emu.ComputeImageDataSize(in_read_width, 1);
scoped_ptr<int8[]> zero(new int8[unpadded_row_size]);
scoped_ptr<int8[]> pack(new int8[kPackAlignment]);
memset(zero.get(), 0, unpadded_row_size);
memset(pack.get(), kInitialMemoryValue, kPackAlignment);
for (GLint yy = 0; yy < in_read_height; ++yy) {
const int8* row = static_cast<const int8*>(
emu.ComputePackAlignmentAddress(0, yy, in_read_width, dest));
GLint y = in_read_y + yy;
if (y < 0 || y >= kHeight) {
EXPECT_EQ(0, memcmp(zero.get(), row, unpadded_row_size));
} else {
// check off left.
GLint num_left_pixels = std::max(-in_read_x, 0);
GLint num_left_bytes = num_left_pixels * kBytesPerPixel;
EXPECT_EQ(0, memcmp(zero.get(), row, num_left_bytes));
// check off right.
GLint num_right_pixels = std::max(in_read_x + in_read_width - kWidth, 0);
GLint num_right_bytes = num_right_pixels * kBytesPerPixel;
EXPECT_EQ(0,
memcmp(zero.get(),
row + unpadded_row_size - num_right_bytes,
num_right_bytes));
// check middle.
GLint x = std::max(in_read_x, 0);
GLint num_middle_pixels =
std::max(in_read_width - num_left_pixels - num_right_pixels, 0);
EXPECT_TRUE(
emu.CompareRowSegment(x, y, num_middle_pixels, row + num_left_bytes));
}
// check padding
if (yy != in_read_height - 1) {
GLint num_padding_bytes =
(kPackAlignment - 1) - (unpadded_row_size % kPackAlignment);
EXPECT_EQ(0,
memcmp(pack.get(), row + unpadded_row_size, num_padding_bytes));
}
}
}
TEST_P(GLES2DecoderTest, ReadPixels) {
const GLsizei kWidth = 5;
const GLsizei kHeight = 3;
const GLint kBytesPerPixel = 3;
const GLint kPackAlignment = 4;
static const int8 kSrcPixels[kWidth * kHeight * kBytesPerPixel] = {
12, 13, 14, 18, 19, 18, 19, 12, 13, 14, 18, 19, 18, 19, 13,
29, 28, 23, 22, 21, 22, 21, 29, 28, 23, 22, 21, 22, 21, 28,
31, 34, 39, 37, 32, 37, 32, 31, 34, 39, 37, 32, 37, 32, 34,
};
surface_->SetSize(gfx::Size(INT_MAX, INT_MAX));
ReadPixelsEmulator emu(
kWidth, kHeight, kBytesPerPixel, kSrcPixels, kSrcPixels, kPackAlignment);
typedef ReadPixels::Result Result;
Result* result = GetSharedMemoryAs<Result*>();
uint32 result_shm_id = kSharedMemoryId;
uint32 result_shm_offset = kSharedMemoryOffset;
uint32 pixels_shm_id = kSharedMemoryId;
uint32 pixels_shm_offset = kSharedMemoryOffset + sizeof(*result);
void* dest = &result[1];
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
EXPECT_CALL(*gl_,
ReadPixels(0, 0, kWidth, kHeight, GL_RGB, GL_UNSIGNED_BYTE, _))
.WillOnce(Invoke(&emu, &ReadPixelsEmulator::ReadPixels));
ReadPixels cmd;
cmd.Init(0,
0,
kWidth,
kHeight,
GL_RGB,
GL_UNSIGNED_BYTE,
pixels_shm_id,
pixels_shm_offset,
result_shm_id,
result_shm_offset,
false);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
for (GLint yy = 0; yy < kHeight; ++yy) {
EXPECT_TRUE(emu.CompareRowSegment(
0, yy, kWidth, emu.ComputePackAlignmentAddress(0, yy, kWidth, dest)));
}
}
TEST_P(GLES2DecoderRGBBackbufferTest, ReadPixelsNoAlphaBackbuffer) {
const GLsizei kWidth = 3;
const GLsizei kHeight = 3;
const GLint kBytesPerPixel = 4;
const GLint kPackAlignment = 4;
static const uint8 kExpectedPixels[kWidth * kHeight * kBytesPerPixel] = {
12, 13, 14, 255, 19, 18, 19, 255, 13, 14, 18, 255,
29, 28, 23, 255, 21, 22, 21, 255, 28, 23, 22, 255,
31, 34, 39, 255, 32, 37, 32, 255, 34, 39, 37, 255,
};
static const uint8 kSrcPixels[kWidth * kHeight * kBytesPerPixel] = {
12, 13, 14, 18, 19, 18, 19, 12, 13, 14, 18, 19, 29, 28, 23, 22, 21, 22,
21, 29, 28, 23, 22, 21, 31, 34, 39, 37, 32, 37, 32, 31, 34, 39, 37, 32,
};
surface_->SetSize(gfx::Size(INT_MAX, INT_MAX));
ReadPixelsEmulator emu(kWidth,
kHeight,
kBytesPerPixel,
kSrcPixels,
kExpectedPixels,
kPackAlignment);
typedef ReadPixels::Result Result;
Result* result = GetSharedMemoryAs<Result*>();
uint32 result_shm_id = kSharedMemoryId;
uint32 result_shm_offset = kSharedMemoryOffset;
uint32 pixels_shm_id = kSharedMemoryId;
uint32 pixels_shm_offset = kSharedMemoryOffset + sizeof(*result);
void* dest = &result[1];
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
EXPECT_CALL(*gl_,
ReadPixels(0, 0, kWidth, kHeight, GL_RGBA, GL_UNSIGNED_BYTE, _))
.WillOnce(Invoke(&emu, &ReadPixelsEmulator::ReadPixels));
ReadPixels cmd;
cmd.Init(0,
0,
kWidth,
kHeight,
GL_RGBA,
GL_UNSIGNED_BYTE,
pixels_shm_id,
pixels_shm_offset,
result_shm_id,
result_shm_offset,
false);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
for (GLint yy = 0; yy < kHeight; ++yy) {
EXPECT_TRUE(emu.CompareRowSegment(
0, yy, kWidth, emu.ComputePackAlignmentAddress(0, yy, kWidth, dest)));
}
}
TEST_P(GLES2DecoderTest, ReadPixelsOutOfRange) {
static GLint tests[][4] = {
{
-2, -1, 9, 5,
}, // out of range on all sides
{
2, 1, 9, 5,
}, // out of range on right, bottom
{
-7, -4, 9, 5,
}, // out of range on left, top
{
0, -5, 9, 5,
}, // completely off top
{
0, 3, 9, 5,
}, // completely off bottom
{
-9, 0, 9, 5,
}, // completely off left
{
5, 0, 9, 5,
}, // completely off right
};
for (size_t tt = 0; tt < arraysize(tests); ++tt) {
CheckReadPixelsOutOfRange(
tests[tt][0], tests[tt][1], tests[tt][2], tests[tt][3], tt == 0);
}
}
TEST_P(GLES2DecoderTest, ReadPixelsInvalidArgs) {
typedef ReadPixels::Result Result;
Result* result = GetSharedMemoryAs<Result*>();
uint32 result_shm_id = kSharedMemoryId;
uint32 result_shm_offset = kSharedMemoryOffset;
uint32 pixels_shm_id = kSharedMemoryId;
uint32 pixels_shm_offset = kSharedMemoryOffset + sizeof(*result);
EXPECT_CALL(*gl_, ReadPixels(_, _, _, _, _, _, _)).Times(0);
ReadPixels cmd;
cmd.Init(0,
0,
-1,
1,
GL_RGB,
GL_UNSIGNED_BYTE,
pixels_shm_id,
pixels_shm_offset,
result_shm_id,
result_shm_offset,
false);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
cmd.Init(0,
0,
1,
-1,
GL_RGB,
GL_UNSIGNED_BYTE,
pixels_shm_id,
pixels_shm_offset,
result_shm_id,
result_shm_offset,
false);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
cmd.Init(0,
0,
1,
1,
GL_RGB,
GL_INT,
pixels_shm_id,
pixels_shm_offset,
result_shm_id,
result_shm_offset,
false);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
cmd.Init(0,
0,
1,
1,
GL_RGB,
GL_UNSIGNED_BYTE,
kInvalidSharedMemoryId,
pixels_shm_offset,
result_shm_id,
result_shm_offset,
false);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
cmd.Init(0,
0,
1,
1,
GL_RGB,
GL_UNSIGNED_BYTE,
pixels_shm_id,
kInvalidSharedMemoryOffset,
result_shm_id,
result_shm_offset,
false);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
cmd.Init(0,
0,
1,
1,
GL_RGB,
GL_UNSIGNED_BYTE,
pixels_shm_id,
pixels_shm_offset,
kInvalidSharedMemoryId,
result_shm_offset,
false);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
cmd.Init(0,
0,
1,
1,
GL_RGB,
GL_UNSIGNED_BYTE,
pixels_shm_id,
pixels_shm_offset,
result_shm_id,
kInvalidSharedMemoryOffset,
false);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
}
TEST_P(GLES2DecoderManualInitTest, ReadPixelsAsyncError) {
InitState init;
init.extensions = "GL_ARB_sync";
init.gl_version = "opengl es 3.0";
init.has_alpha = true;
init.request_alpha = true;
init.bind_generates_resource = true;
InitDecoder(init);
typedef ReadPixels::Result Result;
Result* result = GetSharedMemoryAs<Result*>();
const GLsizei kWidth = 4;
const GLsizei kHeight = 4;
uint32 result_shm_id = kSharedMemoryId;
uint32 result_shm_offset = kSharedMemoryOffset;
uint32 pixels_shm_id = kSharedMemoryId;
uint32 pixels_shm_offset = kSharedMemoryOffset + sizeof(*result);
EXPECT_CALL(*gl_, GetError())
// first error check must pass to get to the test
.WillOnce(Return(GL_NO_ERROR))
// second check is after BufferData, simulate fail here
.WillOnce(Return(GL_INVALID_OPERATION))
// third error check is fall-through call to sync ReadPixels
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
EXPECT_CALL(*gl_,
ReadPixels(0, 0, kWidth, kHeight, GL_RGB, GL_UNSIGNED_BYTE, _))
.Times(1);
EXPECT_CALL(*gl_, GenBuffersARB(1, _)).Times(1);
EXPECT_CALL(*gl_, BindBuffer(GL_PIXEL_PACK_BUFFER_ARB, _)).Times(2);
EXPECT_CALL(*gl_,
BufferData(GL_PIXEL_PACK_BUFFER_ARB, _, NULL, GL_STREAM_READ))
.Times(1);
ReadPixels cmd;
cmd.Init(0,
0,
kWidth,
kHeight,
GL_RGB,
GL_UNSIGNED_BYTE,
pixels_shm_id,
pixels_shm_offset,
result_shm_id,
result_shm_offset,
true);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
}
// Check that if a renderbuffer is attached and GL returns
// GL_FRAMEBUFFER_COMPLETE that the buffer is cleared and state is restored.
TEST_P(GLES2DecoderTest, FramebufferRenderbufferClearColor) {
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
ClearColor color_cmd;
ColorMask color_mask_cmd;
Enable enable_cmd;
FramebufferRenderbuffer cmd;
color_cmd.Init(0.1f, 0.2f, 0.3f, 0.4f);
color_mask_cmd.Init(0, 1, 0, 1);
enable_cmd.Init(GL_SCISSOR_TEST);
cmd.Init(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_RENDERBUFFER,
client_renderbuffer_id_);
InSequence sequence;
EXPECT_CALL(*gl_, ClearColor(0.1f, 0.2f, 0.3f, 0.4f))
.Times(1)
.RetiresOnSaturation();
SetupExpectationsForEnableDisable(GL_SCISSOR_TEST, true);
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
EXPECT_CALL(*gl_,
FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_RENDERBUFFER,
kServiceRenderbufferId))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
EXPECT_EQ(error::kNoError, ExecuteCmd(color_cmd));
EXPECT_EQ(error::kNoError, ExecuteCmd(color_mask_cmd));
EXPECT_EQ(error::kNoError, ExecuteCmd(enable_cmd));
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
}
TEST_P(GLES2DecoderTest, FramebufferRenderbufferClearDepth) {
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
ClearDepthf depth_cmd;
DepthMask depth_mask_cmd;
FramebufferRenderbuffer cmd;
depth_cmd.Init(0.5f);
depth_mask_cmd.Init(false);
cmd.Init(GL_FRAMEBUFFER,
GL_DEPTH_ATTACHMENT,
GL_RENDERBUFFER,
client_renderbuffer_id_);
InSequence sequence;
EXPECT_CALL(*gl_, ClearDepth(0.5f)).Times(1).RetiresOnSaturation();
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
EXPECT_CALL(*gl_,
FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
GL_DEPTH_ATTACHMENT,
GL_RENDERBUFFER,
kServiceRenderbufferId))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
EXPECT_EQ(error::kNoError, ExecuteCmd(depth_cmd));
EXPECT_EQ(error::kNoError, ExecuteCmd(depth_mask_cmd));
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
}
TEST_P(GLES2DecoderTest, FramebufferRenderbufferClearStencil) {
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
ClearStencil stencil_cmd;
StencilMaskSeparate stencil_mask_separate_cmd;
FramebufferRenderbuffer cmd;
stencil_cmd.Init(123);
stencil_mask_separate_cmd.Init(GL_BACK, 0x1234u);
cmd.Init(GL_FRAMEBUFFER,
GL_STENCIL_ATTACHMENT,
GL_RENDERBUFFER,
client_renderbuffer_id_);
InSequence sequence;
EXPECT_CALL(*gl_, ClearStencil(123)).Times(1).RetiresOnSaturation();
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
EXPECT_CALL(*gl_,
FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
GL_STENCIL_ATTACHMENT,
GL_RENDERBUFFER,
kServiceRenderbufferId))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
EXPECT_EQ(error::kNoError, ExecuteCmd(stencil_cmd));
EXPECT_EQ(error::kNoError, ExecuteCmd(stencil_mask_separate_cmd));
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
}
#if 0 // Turn this test on once we allow GL_DEPTH_STENCIL_ATTACHMENT
TEST_P(GLES2DecoderTest, FramebufferRenderbufferClearDepthStencil) {
DoBindFramebuffer(GL_FRAMEBUFFER, client_framebuffer_id_,
kServiceFramebufferId);
ClearDepthf depth_cmd;
ClearStencil stencil_cmd;
FramebufferRenderbuffer cmd;
depth_cmd.Init(0.5f);
stencil_cmd.Init(123);
cmd.Init(
GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER,
client_renderbuffer_id_);
InSequence sequence;
EXPECT_CALL(*gl_, ClearDepth(0.5f))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*gl_, ClearStencil(123))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*gl_, FramebufferRenderbufferEXT(
GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER,
kServiceRenderbufferId))
.Times(1)
.RetiresOnSaturation();
EXPECT_EQ(error::kNoError, ExecuteCmd(depth_cmd));
EXPECT_EQ(error::kNoError, ExecuteCmd(stencil_cmd));
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
}
#endif
TEST_P(GLES2DecoderManualInitTest, ActualAlphaMatchesRequestedAlpha) {
InitState init;
init.gl_version = "3.0";
init.has_alpha = true;
init.request_alpha = true;
init.bind_generates_resource = true;
InitDecoder(init);
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
typedef GetIntegerv::Result Result;
Result* result = static_cast<Result*>(shared_memory_address_);
EXPECT_CALL(*gl_, GetIntegerv(GL_ALPHA_BITS, _))
.WillOnce(SetArgumentPointee<1>(8))
.RetiresOnSaturation();
result->size = 0;
GetIntegerv cmd2;
cmd2.Init(GL_ALPHA_BITS, shared_memory_id_, shared_memory_offset_);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_ALPHA_BITS),
result->GetNumResults());
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_EQ(8, result->GetData()[0]);
}
TEST_P(GLES2DecoderManualInitTest, ActualAlphaDoesNotMatchRequestedAlpha) {
InitState init;
init.gl_version = "3.0";
init.has_alpha = true;
init.bind_generates_resource = true;
InitDecoder(init);
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
typedef GetIntegerv::Result Result;
Result* result = static_cast<Result*>(shared_memory_address_);
EXPECT_CALL(*gl_, GetIntegerv(GL_ALPHA_BITS, _))
.WillOnce(SetArgumentPointee<1>(8))
.RetiresOnSaturation();
result->size = 0;
GetIntegerv cmd2;
cmd2.Init(GL_ALPHA_BITS, shared_memory_id_, shared_memory_offset_);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_ALPHA_BITS),
result->GetNumResults());
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_EQ(0, result->GetData()[0]);
}
TEST_P(GLES2DecoderManualInitTest, ActualDepthMatchesRequestedDepth) {
InitState init;
init.gl_version = "3.0";
init.has_depth = true;
init.request_depth = true;
init.bind_generates_resource = true;
InitDecoder(init);
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
typedef GetIntegerv::Result Result;
Result* result = static_cast<Result*>(shared_memory_address_);
EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_BITS, _))
.WillOnce(SetArgumentPointee<1>(24))
.RetiresOnSaturation();
result->size = 0;
GetIntegerv cmd2;
cmd2.Init(GL_DEPTH_BITS, shared_memory_id_, shared_memory_offset_);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DEPTH_BITS),
result->GetNumResults());
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_EQ(24, result->GetData()[0]);
}
TEST_P(GLES2DecoderManualInitTest, ActualDepthDoesNotMatchRequestedDepth) {
InitState init;
init.gl_version = "3.0";
init.has_depth = true;
init.bind_generates_resource = true;
InitDecoder(init);
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
typedef GetIntegerv::Result Result;
Result* result = static_cast<Result*>(shared_memory_address_);
EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_BITS, _))
.WillOnce(SetArgumentPointee<1>(24))
.RetiresOnSaturation();
result->size = 0;
GetIntegerv cmd2;
cmd2.Init(GL_DEPTH_BITS, shared_memory_id_, shared_memory_offset_);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DEPTH_BITS),
result->GetNumResults());
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_EQ(0, result->GetData()[0]);
}
TEST_P(GLES2DecoderManualInitTest, ActualStencilMatchesRequestedStencil) {
InitState init;
init.gl_version = "3.0";
init.has_stencil = true;
init.request_stencil = true;
init.bind_generates_resource = true;
InitDecoder(init);
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
typedef GetIntegerv::Result Result;
Result* result = static_cast<Result*>(shared_memory_address_);
EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_BITS, _))
.WillOnce(SetArgumentPointee<1>(8))
.RetiresOnSaturation();
result->size = 0;
GetIntegerv cmd2;
cmd2.Init(GL_STENCIL_BITS, shared_memory_id_, shared_memory_offset_);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_STENCIL_BITS),
result->GetNumResults());
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_EQ(8, result->GetData()[0]);
}
TEST_P(GLES2DecoderManualInitTest, ActualStencilDoesNotMatchRequestedStencil) {
InitState init;
init.gl_version = "3.0";
init.has_stencil = true;
init.bind_generates_resource = true;
InitDecoder(init);
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
typedef GetIntegerv::Result Result;
Result* result = static_cast<Result*>(shared_memory_address_);
EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_BITS, _))
.WillOnce(SetArgumentPointee<1>(8))
.RetiresOnSaturation();
result->size = 0;
GetIntegerv cmd2;
cmd2.Init(GL_STENCIL_BITS, shared_memory_id_, shared_memory_offset_);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_STENCIL_BITS),
result->GetNumResults());
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_EQ(0, result->GetData()[0]);
}
TEST_P(GLES2DecoderManualInitTest, PackedDepthStencilReportsCorrectValues) {
InitState init;
init.extensions = "GL_OES_packed_depth_stencil";
init.gl_version = "opengl es 2.0";
init.has_depth = true;
init.has_stencil = true;
init.request_depth = true;
init.request_stencil = true;
init.bind_generates_resource = true;
InitDecoder(init);
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
typedef GetIntegerv::Result Result;
Result* result = static_cast<Result*>(shared_memory_address_);
result->size = 0;
GetIntegerv cmd2;
cmd2.Init(GL_STENCIL_BITS, shared_memory_id_, shared_memory_offset_);
EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_BITS, _))
.WillOnce(SetArgumentPointee<1>(8))
.RetiresOnSaturation();
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_STENCIL_BITS),
result->GetNumResults());
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_EQ(8, result->GetData()[0]);
result->size = 0;
cmd2.Init(GL_DEPTH_BITS, shared_memory_id_, shared_memory_offset_);
EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_BITS, _))
.WillOnce(SetArgumentPointee<1>(24))
.RetiresOnSaturation();
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DEPTH_BITS),
result->GetNumResults());
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_EQ(24, result->GetData()[0]);
}
TEST_P(GLES2DecoderManualInitTest, PackedDepthStencilNoRequestedStencil) {
InitState init;
init.extensions = "GL_OES_packed_depth_stencil";
init.gl_version = "opengl es 2.0";
init.has_depth = true;
init.has_stencil = true;
init.request_depth = true;
init.bind_generates_resource = true;
InitDecoder(init);
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
typedef GetIntegerv::Result Result;
Result* result = static_cast<Result*>(shared_memory_address_);
result->size = 0;
GetIntegerv cmd2;
cmd2.Init(GL_STENCIL_BITS, shared_memory_id_, shared_memory_offset_);
EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_BITS, _))
.WillOnce(SetArgumentPointee<1>(8))
.RetiresOnSaturation();
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_STENCIL_BITS),
result->GetNumResults());
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_EQ(0, result->GetData()[0]);
result->size = 0;
cmd2.Init(GL_DEPTH_BITS, shared_memory_id_, shared_memory_offset_);
EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_BITS, _))
.WillOnce(SetArgumentPointee<1>(24))
.RetiresOnSaturation();
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DEPTH_BITS),
result->GetNumResults());
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_EQ(24, result->GetData()[0]);
}
TEST_P(GLES2DecoderManualInitTest, PackedDepthStencilRenderbufferDepth) {
InitState init;
init.extensions = "GL_OES_packed_depth_stencil";
init.gl_version = "opengl es 2.0";
init.bind_generates_resource = true;
InitDecoder(init);
DoBindRenderbuffer(
GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
EnsureRenderbufferBound(false);
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR)) // for RenderbufferStoage
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR)) // for FramebufferRenderbuffer
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR)) // for GetIntegerv
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR)) // for GetIntegerv
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
EXPECT_CALL(
*gl_,
RenderbufferStorageEXT(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, 100, 50))
.Times(1)
.RetiresOnSaturation();
RenderbufferStorage cmd;
cmd.Init(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, 100, 50);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_CALL(*gl_,
FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
GL_DEPTH_ATTACHMENT,
GL_RENDERBUFFER,
kServiceRenderbufferId))
.Times(1)
.RetiresOnSaturation();
FramebufferRenderbuffer fbrb_cmd;
fbrb_cmd.Init(GL_FRAMEBUFFER,
GL_DEPTH_ATTACHMENT,
GL_RENDERBUFFER,
client_renderbuffer_id_);
EXPECT_EQ(error::kNoError, ExecuteCmd(fbrb_cmd));
typedef GetIntegerv::Result Result;
Result* result = static_cast<Result*>(shared_memory_address_);
result->size = 0;
GetIntegerv cmd2;
cmd2.Init(GL_STENCIL_BITS, shared_memory_id_, shared_memory_offset_);
EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_BITS, _))
.WillOnce(SetArgumentPointee<1>(8))
.RetiresOnSaturation();
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_STENCIL_BITS),
result->GetNumResults());
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_EQ(0, result->GetData()[0]);
result->size = 0;
cmd2.Init(GL_DEPTH_BITS, shared_memory_id_, shared_memory_offset_);
EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_BITS, _))
.WillOnce(SetArgumentPointee<1>(24))
.RetiresOnSaturation();
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DEPTH_BITS),
result->GetNumResults());
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_EQ(24, result->GetData()[0]);
}
TEST_P(GLES2DecoderManualInitTest, PackedDepthStencilRenderbufferStencil) {
InitState init;
init.extensions = "GL_OES_packed_depth_stencil";
init.gl_version = "opengl es 2.0";
init.bind_generates_resource = true;
InitDecoder(init);
DoBindRenderbuffer(
GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
EnsureRenderbufferBound(false);
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR)) // for RenderbufferStoage
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR)) // for FramebufferRenderbuffer
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR)) // for GetIntegerv
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR)) // for GetIntegerv
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
EXPECT_CALL(
*gl_,
RenderbufferStorageEXT(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, 100, 50))
.Times(1)
.RetiresOnSaturation();
RenderbufferStorage cmd;
cmd.Init(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, 100, 50);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_CALL(*gl_,
FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
GL_STENCIL_ATTACHMENT,
GL_RENDERBUFFER,
kServiceRenderbufferId))
.Times(1)
.RetiresOnSaturation();
FramebufferRenderbuffer fbrb_cmd;
fbrb_cmd.Init(GL_FRAMEBUFFER,
GL_STENCIL_ATTACHMENT,
GL_RENDERBUFFER,
client_renderbuffer_id_);
EXPECT_EQ(error::kNoError, ExecuteCmd(fbrb_cmd));
typedef GetIntegerv::Result Result;
Result* result = static_cast<Result*>(shared_memory_address_);
result->size = 0;
GetIntegerv cmd2;
cmd2.Init(GL_STENCIL_BITS, shared_memory_id_, shared_memory_offset_);
EXPECT_CALL(*gl_, GetIntegerv(GL_STENCIL_BITS, _))
.WillOnce(SetArgumentPointee<1>(8))
.RetiresOnSaturation();
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_STENCIL_BITS),
result->GetNumResults());
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_EQ(8, result->GetData()[0]);
result->size = 0;
cmd2.Init(GL_DEPTH_BITS, shared_memory_id_, shared_memory_offset_);
EXPECT_CALL(*gl_, GetIntegerv(GL_DEPTH_BITS, _))
.WillOnce(SetArgumentPointee<1>(24))
.RetiresOnSaturation();
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd2));
EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DEPTH_BITS),
result->GetNumResults());
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_EQ(0, result->GetData()[0]);
}
TEST_P(GLES2DecoderTest, FramebufferRenderbufferGLError) {
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_OUT_OF_MEMORY))
.RetiresOnSaturation();
EXPECT_CALL(*gl_,
FramebufferRenderbufferEXT(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_RENDERBUFFER,
kServiceRenderbufferId))
.Times(1)
.RetiresOnSaturation();
FramebufferRenderbuffer cmd;
cmd.Init(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_RENDERBUFFER,
client_renderbuffer_id_);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
}
TEST_P(GLES2DecoderTest, FramebufferTexture2DGLError) {
const GLsizei kWidth = 5;
const GLsizei kHeight = 3;
const GLenum kFormat = GL_RGB;
DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
DoTexImage2D(GL_TEXTURE_2D,
0,
kFormat,
kWidth,
kHeight,
0,
kFormat,
GL_UNSIGNED_BYTE,
0,
0);
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_OUT_OF_MEMORY))
.RetiresOnSaturation();
EXPECT_CALL(*gl_,
FramebufferTexture2DEXT(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D,
kServiceTextureId,
0))
.Times(1)
.RetiresOnSaturation();
FramebufferTexture2D fbtex_cmd;
fbtex_cmd.Init(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D,
client_texture_id_);
EXPECT_EQ(error::kNoError, ExecuteCmd(fbtex_cmd));
EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
}
TEST_P(GLES2DecoderTest, RenderbufferStorageGLError) {
DoBindRenderbuffer(
GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
EnsureRenderbufferBound(false);
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_OUT_OF_MEMORY))
.RetiresOnSaturation();
EXPECT_CALL(*gl_, RenderbufferStorageEXT(GL_RENDERBUFFER, GL_RGBA, 100, 50))
.Times(1)
.RetiresOnSaturation();
RenderbufferStorage cmd;
cmd.Init(GL_RENDERBUFFER, GL_RGBA4, 100, 50);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
}
TEST_P(GLES2DecoderTest, RenderbufferStorageBadArgs) {
DoBindRenderbuffer(
GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
EXPECT_CALL(*gl_, RenderbufferStorageEXT(_, _, _, _))
.Times(0)
.RetiresOnSaturation();
RenderbufferStorage cmd;
cmd.Init(GL_RENDERBUFFER, GL_RGBA4, TestHelper::kMaxRenderbufferSize + 1, 1);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
cmd.Init(GL_RENDERBUFFER, GL_RGBA4, 1, TestHelper::kMaxRenderbufferSize + 1);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
}
TEST_P(GLES2DecoderManualInitTest,
RenderbufferStorageMultisampleCHROMIUMGLError) {
InitState init;
init.extensions = "GL_EXT_framebuffer_multisample";
init.gl_version = "2.1";
init.bind_generates_resource = true;
InitDecoder(init);
DoBindRenderbuffer(
GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
EnsureRenderbufferBound(false);
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_OUT_OF_MEMORY))
.RetiresOnSaturation();
EXPECT_CALL(
*gl_,
RenderbufferStorageMultisampleEXT(GL_RENDERBUFFER, 1, GL_RGBA, 100, 50))
.Times(1)
.RetiresOnSaturation();
RenderbufferStorageMultisampleCHROMIUM cmd;
cmd.Init(GL_RENDERBUFFER, 1, GL_RGBA4, 100, 50);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
}
TEST_P(GLES2DecoderManualInitTest,
RenderbufferStorageMultisampleCHROMIUMBadArgs) {
InitState init;
init.extensions = "GL_EXT_framebuffer_multisample";
init.gl_version = "2.1";
init.bind_generates_resource = true;
InitDecoder(init);
DoBindRenderbuffer(
GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
EXPECT_CALL(*gl_, RenderbufferStorageMultisampleEXT(_, _, _, _, _))
.Times(0)
.RetiresOnSaturation();
RenderbufferStorageMultisampleCHROMIUM cmd;
cmd.Init(GL_RENDERBUFFER,
TestHelper::kMaxSamples + 1,
GL_RGBA4,
TestHelper::kMaxRenderbufferSize,
1);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
cmd.Init(GL_RENDERBUFFER,
TestHelper::kMaxSamples,
GL_RGBA4,
TestHelper::kMaxRenderbufferSize + 1,
1);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
cmd.Init(GL_RENDERBUFFER,
TestHelper::kMaxSamples,
GL_RGBA4,
1,
TestHelper::kMaxRenderbufferSize + 1);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
}
TEST_P(GLES2DecoderManualInitTest, RenderbufferStorageMultisampleCHROMIUM) {
InitState init;
init.extensions = "GL_EXT_framebuffer_multisample";
init.gl_version = "2.1";
InitDecoder(init);
DoBindRenderbuffer(
GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
InSequence sequence;
EnsureRenderbufferBound(false);
DoRenderbufferStorageMultisampleCHROMIUM(GL_RENDERBUFFER,
TestHelper::kMaxSamples,
GL_RGBA4,
GL_RGBA,
TestHelper::kMaxRenderbufferSize,
1);
}
TEST_P(GLES2DecoderManualInitTest,
RenderbufferStorageMultisampleCHROMIUMRebindRenderbuffer) {
InitState init;
init.extensions = "GL_EXT_framebuffer_multisample";
init.gl_version = "2.1";
InitDecoder(init);
DoBindRenderbuffer(
GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
RestoreRenderbufferBindings();
InSequence sequence;
EnsureRenderbufferBound(true);
DoRenderbufferStorageMultisampleCHROMIUM(GL_RENDERBUFFER,
TestHelper::kMaxSamples,
GL_RGBA4,
GL_RGBA,
TestHelper::kMaxRenderbufferSize,
1);
}
TEST_P(GLES2DecoderManualInitTest,
RenderbufferStorageMultisampleEXTNotSupported) {
InitState init;
init.extensions = "GL_EXT_framebuffer_multisample";
init.gl_version = "2.1";
init.bind_generates_resource = true;
InitDecoder(init);
DoBindRenderbuffer(
GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
InSequence sequence;
// GL_EXT_framebuffer_multisample uses RenderbufferStorageMultisampleCHROMIUM.
RenderbufferStorageMultisampleEXT cmd;
cmd.Init(GL_RENDERBUFFER,
TestHelper::kMaxSamples,
GL_RGBA4,
TestHelper::kMaxRenderbufferSize,
1);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
}
class GLES2DecoderMultisampledRenderToTextureTest
: public GLES2DecoderTestWithExtensionsOnGLES2 {
public:
void TestNotCompatibleWithRenderbufferStorageMultisampleCHROMIUM() {
DoBindRenderbuffer(
GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
RenderbufferStorageMultisampleCHROMIUM cmd;
cmd.Init(GL_RENDERBUFFER,
TestHelper::kMaxSamples,
GL_RGBA4,
TestHelper::kMaxRenderbufferSize,
1);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
}
void TestRenderbufferStorageMultisampleEXT(const char* extension,
bool rb_rebind) {
DoBindRenderbuffer(
GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
InSequence sequence;
if (rb_rebind) {
RestoreRenderbufferBindings();
EnsureRenderbufferBound(true);
} else {
EnsureRenderbufferBound(false);
}
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
if (strstr(extension, "GL_IMG_multisampled_render_to_texture")) {
EXPECT_CALL(
*gl_,
RenderbufferStorageMultisampleIMG(GL_RENDERBUFFER,
TestHelper::kMaxSamples,
GL_RGBA,
TestHelper::kMaxRenderbufferSize,
1))
.Times(1)
.RetiresOnSaturation();
} else {
EXPECT_CALL(
*gl_,
RenderbufferStorageMultisampleEXT(GL_RENDERBUFFER,
TestHelper::kMaxSamples,
GL_RGBA,
TestHelper::kMaxRenderbufferSize,
1))
.Times(1)
.RetiresOnSaturation();
}
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
RenderbufferStorageMultisampleEXT cmd;
cmd.Init(GL_RENDERBUFFER,
TestHelper::kMaxSamples,
GL_RGBA4,
TestHelper::kMaxRenderbufferSize,
1);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
};
INSTANTIATE_TEST_CASE_P(Service,
GLES2DecoderMultisampledRenderToTextureTest,
::testing::Bool());
TEST_P(GLES2DecoderMultisampledRenderToTextureTest,
NotCompatibleWithRenderbufferStorageMultisampleCHROMIUM_EXT) {
Init("GL_EXT_multisampled_render_to_texture");
TestNotCompatibleWithRenderbufferStorageMultisampleCHROMIUM();
}
TEST_P(GLES2DecoderMultisampledRenderToTextureTest,
NotCompatibleWithRenderbufferStorageMultisampleCHROMIUM_IMG) {
Init("GL_IMG_multisampled_render_to_texture");
TestNotCompatibleWithRenderbufferStorageMultisampleCHROMIUM();
}
TEST_P(GLES2DecoderMultisampledRenderToTextureTest,
RenderbufferStorageMultisampleEXT_EXT) {
Init("GL_EXT_multisampled_render_to_texture");
TestRenderbufferStorageMultisampleEXT("GL_EXT_multisampled_render_to_texture",
false);
}
TEST_P(GLES2DecoderMultisampledRenderToTextureTest,
RenderbufferStorageMultisampleEXT_IMG) {
Init("GL_IMG_multisampled_render_to_texture");
TestRenderbufferStorageMultisampleEXT("GL_IMG_multisampled_render_to_texture",
false);
}
TEST_P(GLES2DecoderMultisampledRenderToTextureTest,
RenderbufferStorageMultisampleEXT_EXT_RebindRenderbuffer) {
Init("GL_EXT_multisampled_render_to_texture");
TestRenderbufferStorageMultisampleEXT("GL_EXT_multisampled_render_to_texture",
true);
}
TEST_P(GLES2DecoderMultisampledRenderToTextureTest,
RenderbufferStorageMultisampleEXT_IMG_RebindRenderbuffer) {
Init("GL_IMG_multisampled_render_to_texture");
TestRenderbufferStorageMultisampleEXT("GL_IMG_multisampled_render_to_texture",
true);
}
TEST_P(GLES2DecoderTest, ReadPixelsGLError) {
GLenum kFormat = GL_RGBA;
GLint x = 0;
GLint y = 0;
GLsizei width = 2;
GLsizei height = 4;
typedef ReadPixels::Result Result;
Result* result = GetSharedMemoryAs<Result*>();
uint32 result_shm_id = kSharedMemoryId;
uint32 result_shm_offset = kSharedMemoryOffset;
uint32 pixels_shm_id = kSharedMemoryId;
uint32 pixels_shm_offset = kSharedMemoryOffset + sizeof(*result);
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_OUT_OF_MEMORY))
.RetiresOnSaturation();
EXPECT_CALL(*gl_,
ReadPixels(x, y, width, height, kFormat, GL_UNSIGNED_BYTE, _))
.Times(1)
.RetiresOnSaturation();
ReadPixels cmd;
cmd.Init(x,
y,
width,
height,
kFormat,
GL_UNSIGNED_BYTE,
pixels_shm_id,
pixels_shm_offset,
result_shm_id,
result_shm_offset,
false);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_OUT_OF_MEMORY, GetGLError());
}
TEST_P(GLES2DecoderWithShaderTest, UnClearedAttachmentsGetClearedOnClear) {
const GLuint kFBOClientTextureId = 4100;
const GLuint kFBOServiceTextureId = 4101;
// Register a texture id.
EXPECT_CALL(*gl_, GenTextures(_, _))
.WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
.RetiresOnSaturation();
GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
// Setup "render to" texture.
DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
DoTexImage2D(
GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
DoFramebufferTexture2D(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D,
kFBOClientTextureId,
kFBOServiceTextureId,
0,
GL_NO_ERROR);
// Setup "render from" texture.
SetupTexture();
SetupExpectationsForFramebufferClearing(GL_FRAMEBUFFER, // target
GL_COLOR_BUFFER_BIT, // clear bits
0,
0,
0,
0, // color
0, // stencil
1.0f, // depth
false); // scissor test
SetupExpectationsForApplyingDirtyState(false, // Framebuffer is RGB
false, // Framebuffer has depth
false, // Framebuffer has stencil
0x1111, // color bits
false, // depth mask
false, // depth enabled
0, // front stencil mask
0, // back stencil mask
false); // stencil enabled
EXPECT_CALL(*gl_, Clear(GL_COLOR_BUFFER_BIT)).Times(1).RetiresOnSaturation();
Clear cmd;
cmd.Init(GL_COLOR_BUFFER_BIT);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
TEST_P(GLES2DecoderWithShaderTest, UnClearedAttachmentsGetClearedOnReadPixels) {
const GLuint kFBOClientTextureId = 4100;
const GLuint kFBOServiceTextureId = 4101;
// Register a texture id.
EXPECT_CALL(*gl_, GenTextures(_, _))
.WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
.RetiresOnSaturation();
GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
// Setup "render to" texture.
DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
DoTexImage2D(
GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
DoFramebufferTexture2D(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D,
kFBOClientTextureId,
kFBOServiceTextureId,
0,
GL_NO_ERROR);
// Setup "render from" texture.
SetupTexture();
SetupExpectationsForFramebufferClearing(GL_FRAMEBUFFER, // target
GL_COLOR_BUFFER_BIT, // clear bits
0,
0,
0,
0, // color
0, // stencil
1.0f, // depth
false); // scissor test
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
EXPECT_CALL(*gl_, ReadPixels(0, 0, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, _))
.Times(1)
.RetiresOnSaturation();
typedef ReadPixels::Result Result;
Result* result = GetSharedMemoryAs<Result*>();
uint32 result_shm_id = kSharedMemoryId;
uint32 result_shm_offset = kSharedMemoryOffset;
uint32 pixels_shm_id = kSharedMemoryId;
uint32 pixels_shm_offset = kSharedMemoryOffset + sizeof(*result);
ReadPixels cmd;
cmd.Init(0,
0,
1,
1,
GL_RGBA,
GL_UNSIGNED_BYTE,
pixels_shm_id,
pixels_shm_offset,
result_shm_id,
result_shm_offset,
false);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
TEST_P(GLES2DecoderManualInitTest,
UnClearedAttachmentsGetClearedOnReadPixelsAndDrawBufferGetsRestored) {
InitState init;
init.extensions = "GL_EXT_framebuffer_multisample";
init.gl_version = "2.1";
init.bind_generates_resource = true;
InitDecoder(init);
const GLuint kFBOClientTextureId = 4100;
const GLuint kFBOServiceTextureId = 4101;
// Register a texture id.
EXPECT_CALL(*gl_, GenTextures(_, _))
.WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
.RetiresOnSaturation();
GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
// Setup "render from" texture.
DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
DoTexImage2D(
GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
DoBindFramebuffer(
GL_READ_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
DoFramebufferTexture2D(GL_READ_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D,
kFBOClientTextureId,
kFBOServiceTextureId,
0,
GL_NO_ERROR);
// Enable GL_SCISSOR_TEST to make sure we disable it in the clear,
// then re-enable after.
DoEnableDisable(GL_SCISSOR_TEST, true);
SetupExpectationsForFramebufferClearingMulti(
kServiceFramebufferId, // read framebuffer service id
0, // backbuffer service id
GL_READ_FRAMEBUFFER, // target
GL_COLOR_BUFFER_BIT, // clear bits
0,
0,
0,
0, // color
0, // stencil
1.0f, // depth
true); // scissor test
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
EXPECT_CALL(*gl_, ReadPixels(0, 0, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, _))
.Times(1)
.RetiresOnSaturation();
typedef ReadPixels::Result Result;
uint32 result_shm_id = kSharedMemoryId;
uint32 result_shm_offset = kSharedMemoryOffset;
uint32 pixels_shm_id = kSharedMemoryId;
uint32 pixels_shm_offset = kSharedMemoryOffset + sizeof(Result);
ReadPixels cmd;
cmd.Init(0,
0,
1,
1,
GL_RGBA,
GL_UNSIGNED_BYTE,
pixels_shm_id,
pixels_shm_offset,
result_shm_id,
result_shm_offset,
false);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
TEST_P(GLES2DecoderWithShaderTest, CopyTexImageWithInCompleteFBOFails) {
GLenum target = GL_TEXTURE_2D;
GLint level = 0;
GLenum internal_format = GL_RGBA;
GLsizei width = 2;
GLsizei height = 4;
SetupTexture();
DoBindRenderbuffer(
GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
DoRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA4, GL_RGBA, 0, 0, GL_NO_ERROR);
DoFramebufferRenderbuffer(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_RENDERBUFFER,
client_renderbuffer_id_,
kServiceRenderbufferId,
GL_NO_ERROR);
EXPECT_CALL(*gl_, CopyTexImage2D(_, _, _, _, _, _, _, _))
.Times(0)
.RetiresOnSaturation();
CopyTexImage2D cmd;
cmd.Init(target, level, internal_format, 0, 0, width, height);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(GL_INVALID_FRAMEBUFFER_OPERATION, GetGLError());
}
void GLES2DecoderWithShaderTest::CheckRenderbufferChangesMarkFBOAsNotComplete(
bool bound_fbo) {
FramebufferManager* framebuffer_manager = group().framebuffer_manager();
SetupTexture();
DoBindRenderbuffer(
GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
DoRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA4, GL_RGBA, 1, 1, GL_NO_ERROR);
DoFramebufferRenderbuffer(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_RENDERBUFFER,
client_renderbuffer_id_,
kServiceRenderbufferId,
GL_NO_ERROR);
if (!bound_fbo) {
DoBindFramebuffer(GL_FRAMEBUFFER, 0, 0);
}
Framebuffer* framebuffer =
framebuffer_manager->GetFramebuffer(client_framebuffer_id_);
ASSERT_TRUE(framebuffer != NULL);
framebuffer_manager->MarkAsComplete(framebuffer);
EXPECT_TRUE(framebuffer_manager->IsComplete(framebuffer));
// Test that renderbufferStorage marks fbo as not complete.
DoRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA4, GL_RGBA, 1, 1, GL_NO_ERROR);
EXPECT_FALSE(framebuffer_manager->IsComplete(framebuffer));
framebuffer_manager->MarkAsComplete(framebuffer);
EXPECT_TRUE(framebuffer_manager->IsComplete(framebuffer));
// Test deleting renderbuffer marks fbo as not complete.
DoDeleteRenderbuffer(client_renderbuffer_id_, kServiceRenderbufferId);
if (bound_fbo) {
EXPECT_FALSE(framebuffer_manager->IsComplete(framebuffer));
} else {
EXPECT_TRUE(framebuffer_manager->IsComplete(framebuffer));
}
// Cleanup
DoDeleteFramebuffer(client_framebuffer_id_,
kServiceFramebufferId,
bound_fbo,
GL_FRAMEBUFFER,
0,
bound_fbo,
GL_FRAMEBUFFER,
0);
}
TEST_P(GLES2DecoderWithShaderTest,
RenderbufferChangesMarkFBOAsNotCompleteBoundFBO) {
CheckRenderbufferChangesMarkFBOAsNotComplete(true);
}
TEST_P(GLES2DecoderWithShaderTest,
RenderbufferChangesMarkFBOAsNotCompleteUnboundFBO) {
CheckRenderbufferChangesMarkFBOAsNotComplete(false);
}
void GLES2DecoderWithShaderTest::CheckTextureChangesMarkFBOAsNotComplete(
bool bound_fbo) {
FramebufferManager* framebuffer_manager = group().framebuffer_manager();
const GLuint kFBOClientTextureId = 4100;
const GLuint kFBOServiceTextureId = 4101;
// Register a texture id.
EXPECT_CALL(*gl_, GenTextures(_, _))
.WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
.RetiresOnSaturation();
GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
SetupTexture();
// Setup "render to" texture.
DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
DoTexImage2D(
GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
DoFramebufferTexture2D(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D,
kFBOClientTextureId,
kFBOServiceTextureId,
0,
GL_NO_ERROR);
DoBindRenderbuffer(
GL_RENDERBUFFER, client_renderbuffer_id_, kServiceRenderbufferId);
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
DoRenderbufferStorage(GL_RENDERBUFFER,
GL_DEPTH_COMPONENT16,
GL_DEPTH_COMPONENT,
1,
1,
GL_NO_ERROR);
DoFramebufferRenderbuffer(GL_FRAMEBUFFER,
GL_DEPTH_ATTACHMENT,
GL_RENDERBUFFER,
client_renderbuffer_id_,
kServiceRenderbufferId,
GL_NO_ERROR);
if (!bound_fbo) {
DoBindFramebuffer(GL_FRAMEBUFFER, 0, 0);
}
Framebuffer* framebuffer =
framebuffer_manager->GetFramebuffer(client_framebuffer_id_);
ASSERT_TRUE(framebuffer != NULL);
framebuffer_manager->MarkAsComplete(framebuffer);
EXPECT_TRUE(framebuffer_manager->IsComplete(framebuffer));
// Test TexImage2D marks fbo as not complete.
DoTexImage2D(
GL_TEXTURE_2D, 0, GL_RGB, 1, 1, 0, GL_RGB, GL_UNSIGNED_BYTE, 0, 0);
EXPECT_FALSE(framebuffer_manager->IsComplete(framebuffer));
framebuffer_manager->MarkAsComplete(framebuffer);
EXPECT_TRUE(framebuffer_manager->IsComplete(framebuffer));
// Test CopyImage2D marks fbo as not complete.
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
EXPECT_CALL(*gl_, CopyTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 0, 0, 1, 1, 0))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
CopyTexImage2D cmd;
cmd.Init(GL_TEXTURE_2D, 0, GL_RGB, 0, 0, 1, 1);
// Unbind fbo and bind again after CopyTexImage2D tp avoid feedback loops.
if (bound_fbo) {
DoBindFramebuffer(GL_FRAMEBUFFER, 0, 0);
}
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
if (bound_fbo) {
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
}
EXPECT_FALSE(framebuffer_manager->IsComplete(framebuffer));
// Test deleting texture marks fbo as not complete.
framebuffer_manager->MarkAsComplete(framebuffer);
EXPECT_TRUE(framebuffer_manager->IsComplete(framebuffer));
DoDeleteTexture(kFBOClientTextureId, kFBOServiceTextureId);
if (bound_fbo) {
EXPECT_FALSE(framebuffer_manager->IsComplete(framebuffer));
} else {
EXPECT_TRUE(framebuffer_manager->IsComplete(framebuffer));
}
// Cleanup
DoDeleteFramebuffer(client_framebuffer_id_,
kServiceFramebufferId,
bound_fbo,
GL_FRAMEBUFFER,
0,
bound_fbo,
GL_FRAMEBUFFER,
0);
}
TEST_P(GLES2DecoderWithShaderTest, TextureChangesMarkFBOAsNotCompleteBoundFBO) {
CheckTextureChangesMarkFBOAsNotComplete(true);
}
TEST_P(GLES2DecoderWithShaderTest,
TextureChangesMarkFBOAsNotCompleteUnboundFBO) {
CheckTextureChangesMarkFBOAsNotComplete(false);
}
TEST_P(GLES2DecoderTest, CanChangeSurface) {
scoped_refptr<GLSurfaceMock> other_surface(new GLSurfaceMock);
EXPECT_CALL(*other_surface.get(), GetBackingFrameBufferObject())
.WillOnce(Return(7));
EXPECT_CALL(*gl_, BindFramebufferEXT(GL_FRAMEBUFFER_EXT, 7));
decoder_->SetSurface(other_surface);
}
TEST_P(GLES2DecoderTest, DrawBuffersEXTImmediateSuccceeds) {
const GLsizei count = 1;
const GLenum bufs[] = {GL_COLOR_ATTACHMENT0};
DrawBuffersEXTImmediate& cmd = *GetImmediateAs<DrawBuffersEXTImmediate>();
cmd.Init(count, bufs);
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
EXPECT_CALL(*gl_, DrawBuffersARB(count, _)).Times(1).RetiresOnSaturation();
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(bufs)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
TEST_P(GLES2DecoderTest, DrawBuffersEXTImmediateFails) {
const GLsizei count = 1;
const GLenum bufs[] = {GL_COLOR_ATTACHMENT1_EXT};
DrawBuffersEXTImmediate& cmd = *GetImmediateAs<DrawBuffersEXTImmediate>();
cmd.Init(count, bufs);
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(bufs)));
EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
}
TEST_P(GLES2DecoderTest, DrawBuffersEXTImmediateBackbuffer) {
const GLsizei count = 1;
const GLenum bufs[] = {GL_BACK};
DrawBuffersEXTImmediate& cmd = *GetImmediateAs<DrawBuffersEXTImmediate>();
cmd.Init(count, bufs);
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(bufs)));
EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
DoBindFramebuffer(GL_FRAMEBUFFER, 0, 0); // unbind
EXPECT_CALL(*gl_, DrawBuffersARB(count, _)).Times(1).RetiresOnSaturation();
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(bufs)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
TEST_P(GLES2DecoderManualInitTest, InvalidateFramebufferBinding) {
InitState init;
init.gl_version = "opengl es 3.0";
InitDecoder(init);
// EXPECT_EQ can't be used to compare function pointers
EXPECT_TRUE(
gfx::MockGLInterface::GetGLProcAddress("glInvalidateFramebuffer") ==
gfx::g_driver_gl.fn.glDiscardFramebufferEXTFn);
EXPECT_TRUE(
gfx::MockGLInterface::GetGLProcAddress("glInvalidateFramebuffer") !=
gfx::MockGLInterface::GetGLProcAddress("glDiscardFramebufferEXT"));
}
TEST_P(GLES2DecoderManualInitTest, DiscardFramebufferEXT) {
InitState init;
init.extensions = "GL_EXT_discard_framebuffer";
init.gl_version = "opengl es 2.0";
InitDecoder(init);
// EXPECT_EQ can't be used to compare function pointers
EXPECT_TRUE(
gfx::MockGLInterface::GetGLProcAddress("glDiscardFramebufferEXT") ==
gfx::g_driver_gl.fn.glDiscardFramebufferEXTFn);
const GLenum target = GL_FRAMEBUFFER;
const GLsizei count = 1;
const GLenum attachments[] = {GL_COLOR_ATTACHMENT0};
SetupTexture();
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
DoFramebufferTexture2D(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D,
client_texture_id_,
kServiceTextureId,
0,
GL_NO_ERROR);
FramebufferManager* framebuffer_manager = group().framebuffer_manager();
Framebuffer* framebuffer =
framebuffer_manager->GetFramebuffer(client_framebuffer_id_);
EXPECT_TRUE(framebuffer->IsCleared());
EXPECT_CALL(*gl_, DiscardFramebufferEXT(target, count, _))
.Times(1)
.RetiresOnSaturation();
DiscardFramebufferEXTImmediate& cmd =
*GetImmediateAs<DiscardFramebufferEXTImmediate>();
cmd.Init(target, count, attachments);
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(attachments)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_FALSE(framebuffer->IsCleared());
}
TEST_P(GLES2DecoderTest, DiscardFramebufferEXTUnsupported) {
const GLenum target = GL_FRAMEBUFFER;
const GLsizei count = 1;
const GLenum attachments[] = {GL_COLOR_EXT};
DiscardFramebufferEXTImmediate& cmd =
*GetImmediateAs<DiscardFramebufferEXTImmediate>();
cmd.Init(target, count, attachments);
// Should not result into a call into GL.
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(attachments)));
EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
}
TEST_P(GLES2DecoderManualInitTest,
DiscardedAttachmentsEXTMarksFramebufferIncomplete) {
InitState init;
init.extensions = "GL_EXT_discard_framebuffer";
init.gl_version = "opengl es 2.0";
init.has_alpha = true;
init.bind_generates_resource = true;
InitDecoder(init);
const GLuint kFBOClientTextureId = 4100;
const GLuint kFBOServiceTextureId = 4101;
// Register a texture id.
EXPECT_CALL(*gl_, GenTextures(_, _))
.WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
.RetiresOnSaturation();
GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
// Setup "render to" texture.
DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
DoTexImage2D(
GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
DoFramebufferTexture2D(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D,
kFBOClientTextureId,
kFBOServiceTextureId,
0,
GL_NO_ERROR);
// Setup "render from" texture.
SetupTexture();
SetupExpectationsForFramebufferClearing(GL_FRAMEBUFFER, // target
GL_COLOR_BUFFER_BIT, // clear bits
0,
0,
0,
0, // color
0, // stencil
1.0f, // depth
false); // scissor test
SetupExpectationsForApplyingDirtyState(false, // Framebuffer is RGB
false, // Framebuffer has depth
false, // Framebuffer has stencil
0x1111, // color bits
false, // depth mask
false, // depth enabled
0, // front stencil mask
0, // back stencil mask
false); // stencil enabled
EXPECT_CALL(*gl_, Clear(GL_COLOR_BUFFER_BIT)).Times(1).RetiresOnSaturation();
Clear clear_cmd;
clear_cmd.Init(GL_COLOR_BUFFER_BIT);
EXPECT_EQ(error::kNoError, ExecuteCmd(clear_cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
// Check that framebuffer is cleared and complete.
FramebufferManager* framebuffer_manager = group().framebuffer_manager();
Framebuffer* framebuffer =
framebuffer_manager->GetFramebuffer(client_framebuffer_id_);
EXPECT_TRUE(framebuffer->IsCleared());
EXPECT_TRUE(framebuffer_manager->IsComplete(framebuffer));
// Check that Discard GL_COLOR_ATTACHMENT0, sets the attachment as uncleared
// and the framebuffer as incomplete.
EXPECT_TRUE(
gfx::MockGLInterface::GetGLProcAddress("glDiscardFramebufferEXT") ==
gfx::g_driver_gl.fn.glDiscardFramebufferEXTFn);
const GLenum target = GL_FRAMEBUFFER;
const GLsizei count = 1;
const GLenum attachments[] = {GL_COLOR_ATTACHMENT0};
DiscardFramebufferEXTImmediate& discard_cmd =
*GetImmediateAs<DiscardFramebufferEXTImmediate>();
discard_cmd.Init(target, count, attachments);
EXPECT_CALL(*gl_, DiscardFramebufferEXT(target, count, _))
.Times(1)
.RetiresOnSaturation();
EXPECT_EQ(error::kNoError,
ExecuteImmediateCmd(discard_cmd, sizeof(attachments)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
EXPECT_FALSE(framebuffer->IsCleared());
EXPECT_FALSE(framebuffer_manager->IsComplete(framebuffer));
}
TEST_P(GLES2DecoderManualInitTest, ReadFormatExtension) {
InitState init;
init.extensions = "GL_OES_read_format";
init.gl_version = "2.1";
init.bind_generates_resource = true;
InitDecoder(init);
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
EXPECT_CALL(*gl_, GetError()).Times(6).RetiresOnSaturation();
typedef GetIntegerv::Result Result;
Result* result = static_cast<Result*>(shared_memory_address_);
GetIntegerv cmd;
const GLuint kFBOClientTextureId = 4100;
const GLuint kFBOServiceTextureId = 4101;
// Register a texture id.
EXPECT_CALL(*gl_, GenTextures(_, _))
.WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
.RetiresOnSaturation();
GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
// Setup "render to" texture.
DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
DoTexImage2D(
GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
DoFramebufferTexture2D(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D,
kFBOClientTextureId,
kFBOServiceTextureId,
0,
GL_NO_ERROR);
result->size = 0;
EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(1).RetiresOnSaturation();
cmd.Init(GL_IMPLEMENTATION_COLOR_READ_FORMAT,
shared_memory_id_,
shared_memory_offset_);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(1, result->GetNumResults());
EXPECT_EQ(GL_NO_ERROR, GetGLError());
result->size = 0;
EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(1).RetiresOnSaturation();
cmd.Init(GL_IMPLEMENTATION_COLOR_READ_TYPE,
shared_memory_id_,
shared_memory_offset_);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(1, result->GetNumResults());
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
TEST_P(GLES2DecoderManualInitTest, NoReadFormatExtension) {
InitState init;
init.gl_version = "2.1";
init.bind_generates_resource = true;
InitDecoder(init);
EXPECT_CALL(*gl_, GetError())
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.WillOnce(Return(GL_NO_ERROR))
.RetiresOnSaturation();
typedef GetIntegerv::Result Result;
Result* result = static_cast<Result*>(shared_memory_address_);
GetIntegerv cmd;
const GLuint kFBOClientTextureId = 4100;
const GLuint kFBOServiceTextureId = 4101;
// Register a texture id.
EXPECT_CALL(*gl_, GenTextures(_, _))
.WillOnce(SetArgumentPointee<1>(kFBOServiceTextureId))
.RetiresOnSaturation();
GenHelper<GenTexturesImmediate>(kFBOClientTextureId);
// Setup "render to" texture.
DoBindTexture(GL_TEXTURE_2D, kFBOClientTextureId, kFBOServiceTextureId);
DoTexImage2D(
GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0, 0);
DoBindFramebuffer(
GL_FRAMEBUFFER, client_framebuffer_id_, kServiceFramebufferId);
DoFramebufferTexture2D(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D,
kFBOClientTextureId,
kFBOServiceTextureId,
0,
GL_NO_ERROR);
result->size = 0;
EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(0).RetiresOnSaturation();
cmd.Init(GL_IMPLEMENTATION_COLOR_READ_FORMAT,
shared_memory_id_,
shared_memory_offset_);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(1, result->GetNumResults());
EXPECT_EQ(GL_NO_ERROR, GetGLError());
result->size = 0;
EXPECT_CALL(*gl_, GetIntegerv(_, _)).Times(0).RetiresOnSaturation();
cmd.Init(GL_IMPLEMENTATION_COLOR_READ_TYPE,
shared_memory_id_,
shared_memory_offset_);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(1, result->GetNumResults());
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
// TODO(gman): PixelStorei
// TODO(gman): SwapBuffers
} // namespace gles2
} // namespace gpu
| 36.254576 | 80 | 0.662869 | [
"render"
] |
d5580a4f6db9229f65355c95e65e2fcacd259bbf | 21,842 | cpp | C++ | tests/src/runtimeApi/memory/hipMemcpyWithStreamMultiThread.cpp | parmance/HIP | 96ee9d1397f02ac4b4badd9243994728f6a89fe5 | [
"MIT"
] | 1,935 | 2017-05-28T04:52:18.000Z | 2022-03-30T23:50:43.000Z | tests/src/runtimeApi/memory/hipMemcpyWithStreamMultiThread.cpp | JCLYHY23/HIP | 6a09344dba91a1a9816cb6bcdcc6d8bc6ea564c3 | [
"MIT"
] | 1,310 | 2017-05-30T22:16:09.000Z | 2022-03-31T08:25:58.000Z | tests/src/runtimeApi/memory/hipMemcpyWithStreamMultiThread.cpp | JCLYHY23/HIP | 6a09344dba91a1a9816cb6bcdcc6d8bc6ea564c3 | [
"MIT"
] | 495 | 2017-06-01T01:26:27.000Z | 2022-03-28T16:36:51.000Z | /*
Copyright (c) 2020 - 2021 Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
/*
* Different test for checking functionality of
* hipError_t hipMemcpyWithStream(void* dst, const void* src, size_t sizeBytes,
* hipMemcpyKind kind, hipStream_t stream);
*/
/* HIT_START
* BUILD: %t %s ../../test_common.cpp NVCC_OPTIONS -std=c++11
* TEST: %t
* HIT_END
*/
#include <vector>
#include <thread>
#include <chrono>
#include "test_common.h"
#define LEN 64
#define SIZE LEN << 2
#define THREADS 2
#define MAX_THREADS 16
#define test_passed(test_name) printf("%s %s PASSED!%s\n", \
KGRN, #test_name, KNRM);
#define test_failed(test_name) printf("%s %s FAILED!%s\n", \
KRED, #test_name, KNRM);
enum class ops
{ TestwithOnestream,
TestwithTwoStream,
TestOnMultiGPUwithOneStream,
TestkindDtoH,
TestkindDtoD,
TestkindHtoH,
TestkindDefault,
#ifndef __HIP_PLATFORM_NVIDIA__
TestkindDefaultForDtoD,
#endif
TestDtoDonSameDevice,
END_OF_LIST
};
class HipMemcpyWithStreamMultiThreadtests {
// Test hipMemcpyWithStream with one streams and launch kernel in
// that stream, verify the data.
void TestwithOnestream(void);
// Test hipMemcpyWithStream with two streams and launch kernels in
// two streams, verify the data.
void TestwithTwoStream(void);
// Test hipMemcpyWithStream with one stream for each gpu and launch
// kernels in each, verify the data
void TestOnMultiGPUwithOneStream(void);
// Test hipMemcpyWithStream to copy data from
// device to host (hipMemcpyDeviceToHost).
void TestkindDtoH(void);
// Test hipMemcpyWithStream with hipMemcpyDeviceToDevice on MultiGPU.
void TestkindDtoD(void);
// Test hipMemcpyWithStream with hipMemcpyHostToHost.
void TestkindHtoH(void);
// Test hipMemcpyWithStream with hipMemcpyDefault.
void TestkindDefault(void);
// Test hipMemcpyWithStream with hipMemcpyDefault for
// device to device transfer case.
void TestkindDefaultForDtoD(void);
// Test hipMemcpyWithStream with hipMemcpyDeviceToDevice on same device.
void TestDtoDonSameDevice(void);
public:
// run all the tests on multithreaded.
void TestwithMultiThreaded(ops op);
};
struct joinable_thread : std::thread {
template <class... Xs>
explicit joinable_thread(Xs&&... xs) : std::thread(std::forward<Xs>(xs)...)
{} // NOLINT
joinable_thread& operator=(joinable_thread&& other) = default;
joinable_thread(joinable_thread&& other) = default;
~joinable_thread() {
if (this->joinable())
this->join();
}
};
void HipMemcpyWithStreamMultiThreadtests::TestwithOnestream(void) {
size_t Nbytes = N * sizeof(int);
int numDevices = 0;
int *A_d, *B_d, *C_d;
int *A_h, *B_h, *C_h;
unsigned blocks = HipTest::setNumBlocks(blocksPerCU, threadsPerBlock, N);
HipTest::initArrays(&A_d, &B_d, &C_d, &A_h, &B_h, &C_h, N, false);
hipStream_t stream;
HIPCHECK(hipStreamCreate(&stream));
HIPCHECK(hipMemcpyWithStream(A_d, A_h, Nbytes,
hipMemcpyHostToDevice, stream));
HIPCHECK(hipMemcpyWithStream(B_d, B_h, Nbytes,
hipMemcpyHostToDevice, stream));
hipLaunchKernelGGL(HipTest::vectorADD, dim3(blocks), dim3(threadsPerBlock),
0, stream, static_cast<const int*>(A_d),
static_cast<const int*>(B_d), C_d, N);
HIPCHECK(hipStreamSynchronize(stream));
HIPCHECK(hipMemcpy(C_h, C_d, Nbytes, hipMemcpyDeviceToHost));
HipTest::checkVectorADD(A_h, B_h, C_h, N);
HipTest::freeArrays(A_d, B_d, C_d, A_h, B_h, C_h, false);
HIPCHECK(hipStreamDestroy(stream));
}
void HipMemcpyWithStreamMultiThreadtests::TestwithTwoStream(void) {
size_t Nbytes = N * sizeof(int);
int numDevices = 0;
int noOfstreams = 2;
int *A_d[noOfstreams], *B_d[noOfstreams], *C_d[noOfstreams];
int *A_h[noOfstreams], *B_h[noOfstreams], *C_h[noOfstreams];
unsigned blocks = HipTest::setNumBlocks(blocksPerCU, threadsPerBlock, N);
for (int i=0; i < noOfstreams; ++i) {
HipTest::initArrays(&A_d[i], &B_d[i], &C_d[i],
&A_h[i], &B_h[i], &C_h[i], N, false);
}
hipStream_t stream[noOfstreams];
for (int i=0; i < noOfstreams; ++i) {
HIPCHECK(hipStreamCreate(&stream[i]));
}
for (int i=0; i < noOfstreams; ++i) {
HIPCHECK(hipMemcpyWithStream(A_d[i], A_h[i], Nbytes,
hipMemcpyHostToDevice, stream[i]));
HIPCHECK(hipMemcpyWithStream(B_d[i], B_h[i], Nbytes,
hipMemcpyHostToDevice, stream[i]));
}
for (int i=0; i < noOfstreams; ++i) {
hipLaunchKernelGGL(HipTest::vectorADD, dim3(blocks), dim3(threadsPerBlock),
0, stream[i], static_cast<const int*>(A_d[i]),
static_cast<const int*>(B_d[i]), C_d[i], N);
}
for (int i=0; i < noOfstreams; ++i) {
HIPCHECK(hipStreamSynchronize(stream[i]));
HIPCHECK(hipMemcpy(C_h[i], C_d[i], Nbytes, hipMemcpyDeviceToHost));
HipTest::checkVectorADD(A_h[i], B_h[i], C_h[i], N);
}
for (int i=0; i < noOfstreams; ++i) {
HipTest::freeArrays(A_d[i], B_d[i], C_d[i], A_h[i], B_h[i], C_h[i], false);
HIPCHECK(hipStreamDestroy(stream[i]));
}
}
void HipMemcpyWithStreamMultiThreadtests::TestDtoDonSameDevice(void) {
size_t Nbytes = N * sizeof(int);
int numDevices = 0;
int noOfstreams = 2;
int *A_d[noOfstreams], *B_d[noOfstreams], *C_d[noOfstreams];
int *A_h[noOfstreams], *B_h[noOfstreams], *C_h[noOfstreams];
unsigned blocks = HipTest::setNumBlocks(blocksPerCU, threadsPerBlock, N);
HipTest::initArrays(&A_d[0], &B_d[0], &C_d[0],
&A_h[0], &B_h[0], &C_h[0], N, false);
hipStream_t stream[noOfstreams];
for (int i=0; i < noOfstreams; ++i) {
HIPCHECK(hipSetDevice(0));
HIPCHECK(hipStreamCreate(&stream[i]));
}
HIPCHECK(hipSetDevice(0));
HIPCHECK(hipMalloc(&A_d[1], Nbytes));
HIPCHECK(hipMalloc(&B_d[1], Nbytes));
HIPCHECK(hipMalloc(&C_d[1], Nbytes));
C_h[1] = reinterpret_cast<int*>(malloc(Nbytes));
HIPASSERT(C_h[1] != NULL);
HIPCHECK(hipMemcpyWithStream(A_d[0], A_h[0], Nbytes,
hipMemcpyHostToDevice, stream[0]));
HIPCHECK(hipMemcpyWithStream(B_d[0], B_h[0], Nbytes,
hipMemcpyHostToDevice, stream[0]));
HIPCHECK(hipMemcpyWithStream(A_d[1], A_d[0], Nbytes,
hipMemcpyDeviceToDevice, stream[1]));
HIPCHECK(hipMemcpyWithStream(B_d[1], B_d[0], Nbytes,
hipMemcpyDeviceToDevice, stream[1]));
for (int i=0; i < noOfstreams; ++i) {
HIPCHECK(hipSetDevice(0));
hipLaunchKernelGGL(HipTest::vectorADD, dim3(blocks), dim3(threadsPerBlock),
0, stream[i], static_cast<const int*>(A_d[i]),
static_cast<const int*>(B_d[i]), C_d[i], N);
}
for (int i=0; i < noOfstreams; ++i) {
HIPCHECK(hipSetDevice(0));
HIPCHECK(hipStreamSynchronize(stream[i]));
HIPCHECK(hipMemcpy(C_h[i], C_d[i], Nbytes, hipMemcpyDeviceToHost));
HipTest::checkVectorADD(A_h[0], B_h[0], C_h[i], N);
}
HipTest::freeArrays(A_d[0], B_d[0], C_d[0], A_h[0], B_h[0], C_h[0], false);
if (A_d[1]) {
HIPCHECK(hipFree(A_d[1]));
}
if (B_d[1]) {
HIPCHECK(hipFree(B_d[1]));
}
if (C_d[1]) {
HIPCHECK(hipFree(C_d[1]));
}
if (C_h[1]) {
free(C_h[1]);
}
for (int i=0; i < noOfstreams; ++i) {
HIPCHECK(hipStreamDestroy(stream[i]));
}
}
void HipMemcpyWithStreamMultiThreadtests::TestOnMultiGPUwithOneStream(void) {
size_t Nbytes = N * sizeof(int);
int numDevices = 0;
unsigned blocks = HipTest::setNumBlocks(blocksPerCU, threadsPerBlock, N);
HIPCHECK(hipGetDeviceCount(&numDevices));
// If you have single GPU machine the return
if (numDevices <= 1) {
return;
}
int *A_d[numDevices], *B_d[numDevices], *C_d[numDevices];
int *A_h[numDevices], *B_h[numDevices], *C_h[numDevices];
hipStream_t stream[numDevices];
for (int i=0; i < numDevices; ++i) {
HIPCHECK(hipSetDevice(i));
HIPCHECK(hipStreamCreate(&stream[i]));
}
for (int i=0; i < numDevices; ++i) {
HIPCHECK(hipSetDevice(i));
HipTest::initArrays(&A_d[i], &B_d[i], &C_d[i],
&A_h[i], &B_h[i], &C_h[i], N, false);
}
for (int i=0; i < numDevices; ++i) {
HIPCHECK(hipSetDevice(i));
HIPCHECK(hipMemcpyWithStream(A_d[i], A_h[i], Nbytes,
hipMemcpyHostToDevice, stream[i]));
HIPCHECK(hipMemcpyWithStream(B_d[i], B_h[i], Nbytes,
hipMemcpyHostToDevice, stream[i]));
}
for (int i=0; i < numDevices; ++i) {
HIPCHECK(hipSetDevice(i));
hipLaunchKernelGGL(HipTest::vectorADD, dim3(blocks), dim3(threadsPerBlock),
0, stream[i], static_cast<const int*>(A_d[i]),
static_cast<const int*>(B_d[i]), C_d[i], N);
}
for (int i=0; i < numDevices; ++i) {
HIPCHECK(hipSetDevice(i));
HIPCHECK(hipStreamSynchronize(stream[i]));
HIPCHECK(hipMemcpy(C_h[i], C_d[i], Nbytes, hipMemcpyDeviceToHost));
HipTest::checkVectorADD(A_h[i], B_h[i], C_h[i], N);
}
for (int i=0; i < numDevices; ++i) {
HIPCHECK(hipSetDevice(i));
HipTest::freeArrays(A_d[i], B_d[i], C_d[i], A_h[i], B_h[i], C_h[i], false);
HIPCHECK(hipStreamDestroy(stream[i]));
}
}
void HipMemcpyWithStreamMultiThreadtests::TestkindDtoH(void) {
size_t Nbytes = N * sizeof(int);
int numDevices = 0;
int *A_d, *B_d, *C_d;
int *A_h, *B_h, *C_h;
unsigned blocks = HipTest::setNumBlocks(blocksPerCU, threadsPerBlock, N);
HipTest::initArrays(&A_d, &B_d, &C_d, &A_h, &B_h, &C_h, N, false);
hipStream_t stream;
HIPCHECK(hipStreamCreate(&stream));
HIPCHECK(hipMemcpyWithStream(A_d, A_h, Nbytes,
hipMemcpyHostToDevice, stream));
HIPCHECK(hipMemcpyWithStream(B_d, B_h, Nbytes,
hipMemcpyHostToDevice, stream));
hipLaunchKernelGGL(HipTest::vectorADD, dim3(blocks), dim3(threadsPerBlock),
0, stream, static_cast<const int*>(A_d),
static_cast<const int*>(B_d), C_d, N);
HIPCHECK(hipStreamSynchronize(stream));
HIPCHECK(hipMemcpyWithStream(C_h, C_d, Nbytes,
hipMemcpyDeviceToHost, stream));
HipTest::checkVectorADD(A_h, B_h, C_h, N);
HipTest::freeArrays(A_d, B_d, C_d, A_h, B_h, C_h, false);
HIPCHECK(hipStreamDestroy(stream));
}
void HipMemcpyWithStreamMultiThreadtests::TestkindDtoD(void) {
size_t Nbytes = N * sizeof(int);
int numDevices = 0;
unsigned blocks = HipTest::setNumBlocks(blocksPerCU, threadsPerBlock, N);
HIPCHECK(hipGetDeviceCount(&numDevices));
// If you have single GPU machine the return
if (numDevices <= 1) {
return;
}
int *A_d[numDevices], *B_d[numDevices], *C_d[numDevices];
int *A_h[numDevices], *B_h[numDevices], *C_h[numDevices];
hipStream_t stream[numDevices];
for (int i=0; i < numDevices; ++i) {
HIPCHECK(hipSetDevice(i));
HIPCHECK(hipStreamCreate(&stream[i]));
}
// Initialize and create the host and device elements for first device
HIPCHECK(hipSetDevice(0));
HipTest::initArrays(&A_d[0], &B_d[0], &C_d[0],
&A_h[0], &B_h[0], &C_h[0], N, false);
for (int i=1; i < numDevices; ++i) {
HIPCHECK(hipSetDevice(i))
HIPCHECK(hipMalloc(&A_d[i], Nbytes));
HIPCHECK(hipMalloc(&B_d[i], Nbytes));
HIPCHECK(hipMalloc(&C_d[i], Nbytes));
C_h[i] = reinterpret_cast<int*>(malloc(Nbytes));
HIPASSERT(C_h[i] != NULL);
}
HIPCHECK(hipSetDevice(0));
HIPCHECK(hipMemcpyWithStream(A_d[0], A_h[0], Nbytes,
hipMemcpyHostToDevice, stream[0]));
HIPCHECK(hipMemcpyWithStream(B_d[0], B_h[0], Nbytes,
hipMemcpyHostToDevice, stream[0]));
// Copying device data from 1st GPU to the rest of the the GPUs that is
// numDevices in the setup. 1st GPU start numbering from 0,1,2..n etc.
for (int i=1; i < numDevices; ++i) {
HIPCHECK(hipSetDevice(i));
HIPCHECK(hipMemcpyWithStream(A_d[i], A_d[0], Nbytes,
hipMemcpyDeviceToDevice, stream[i]));
HIPCHECK(hipMemcpyWithStream(B_d[i], B_d[0], Nbytes,
hipMemcpyDeviceToDevice, stream[i]));
}
// Launching the kernel including the 1st GPU to the no of GPUs present
// in the setup. 1st GPU start numbering from 0,1,2..n etc.
for (int i=0; i < numDevices; ++i) {
HIPCHECK(hipSetDevice(i));
hipLaunchKernelGGL(HipTest::vectorADD, dim3(blocks), dim3(threadsPerBlock),
0, stream[i], static_cast<const int*>(A_d[i]),
static_cast<const int*>(B_d[i]), C_d[i], N);
}
for (int i=0; i < numDevices; ++i) {
HIPCHECK(hipSetDevice(i));
HIPCHECK(hipStreamSynchronize(stream[i]));
HIPCHECK(hipMemcpy(C_h[i], C_d[i], Nbytes, hipMemcpyDeviceToHost));
HipTest::checkVectorADD(A_h[0], B_h[0], C_h[i], N);
}
HipTest::freeArrays(A_d[0], B_d[0], C_d[0], A_h[0], B_h[0], C_h[0], false);
HIPCHECK(hipStreamDestroy(stream[0]));
for (int i=1; i < numDevices; ++i) {
if (A_d[i]) {
HIPCHECK(hipFree(A_d[i]));
}
if (B_d[i]) {
HIPCHECK(hipFree(B_d[i]));
}
if (C_d[i]) {
HIPCHECK(hipFree(C_d[i]));
}
if (C_h[i]) {
free(C_h[i]);
}
HIPCHECK(hipStreamDestroy(stream[i]));
}
}
void HipMemcpyWithStreamMultiThreadtests::TestkindDefault(void) {
size_t Nbytes = N * sizeof(int);
int numDevices = 0;
int *A_d, *B_d, *C_d;
int *A_h, *B_h, *C_h;
unsigned blocks = HipTest::setNumBlocks(blocksPerCU, threadsPerBlock, N);
HipTest::initArrays(&A_d, &B_d, &C_d, &A_h, &B_h, &C_h, N, false);
hipStream_t stream;
HIPCHECK(hipStreamCreate(&stream));
HIPCHECK(hipMemcpyWithStream(A_d, A_h, Nbytes, hipMemcpyDefault, stream));
HIPCHECK(hipMemcpyWithStream(B_d, B_h, Nbytes, hipMemcpyDefault, stream));
hipLaunchKernelGGL(HipTest::vectorADD, dim3(blocks), dim3(threadsPerBlock),
0, stream, static_cast<const int*>(A_d),
static_cast<const int*>(B_d), C_d, N);
HIPCHECK(hipStreamSynchronize(stream));
HIPCHECK(hipMemcpyWithStream(C_h, C_d, Nbytes, hipMemcpyDefault, stream));
HipTest::checkVectorADD(A_h, B_h, C_h, N);
HipTest::freeArrays(A_d, B_d, C_d, A_h, B_h, C_h, false);
HIPCHECK(hipStreamDestroy(stream));
}
void HipMemcpyWithStreamMultiThreadtests::TestkindDefaultForDtoD(void) {
size_t Nbytes = N * sizeof(int);
int numDevices = 0;
unsigned blocks = HipTest::setNumBlocks(blocksPerCU, threadsPerBlock, N);
HIPCHECK(hipGetDeviceCount(&numDevices));
// Test case will not run on single GPU setup.
if (numDevices <= 1) {
return;
}
int *A_d[numDevices], *B_d[numDevices], *C_d[numDevices];
int *A_h[numDevices], *B_h[numDevices], *C_h[numDevices];
// Initialize and create the host and device elements for first device
HIPCHECK(hipSetDevice(0));
HipTest::initArrays(&A_d[0], &B_d[0], &C_d[0],
&A_h[0], &B_h[0], &C_h[0], N, false);
for (int i=1; i < numDevices; ++i) {
HIPCHECK(hipSetDevice(i));
HIPCHECK(hipMalloc(&A_d[i], Nbytes));
HIPCHECK(hipMalloc(&B_d[i], Nbytes));
HIPCHECK(hipMalloc(&C_d[i], Nbytes));
C_h[i] = reinterpret_cast<int*>(malloc(Nbytes));
HIPASSERT(C_h[i] != NULL);
}
hipStream_t stream[numDevices];
for (int i=0; i < numDevices; ++i) {
HIPCHECK(hipSetDevice(i));
HIPCHECK(hipStreamCreate(&stream[i]));
}
HIPCHECK(hipMemcpyWithStream(A_d[0], A_h[0], Nbytes,
hipMemcpyHostToDevice, stream[0]));
HIPCHECK(hipMemcpyWithStream(B_d[0], B_h[0], Nbytes,
hipMemcpyHostToDevice, stream[0]));
// Copying device data from 1st GPU to the rest of the the GPUs
// using hipMemcpyDefault kind that is numDevices in the setup.
// 1st GPU start numbering from 0,1,2..n etc.
for (int i=1; i < numDevices; ++i) {
HIPCHECK(hipMemcpyWithStream(A_d[i], A_d[0], Nbytes,
hipMemcpyDefault, stream[i]));
HIPCHECK(hipMemcpyWithStream(B_d[i], B_d[0], Nbytes,
hipMemcpyDefault, stream[i]));
}
for (int i=0; i < numDevices; ++i) {
hipLaunchKernelGGL(HipTest::vectorADD, dim3(blocks), dim3(threadsPerBlock),
0, stream[i], static_cast<const int*>(A_d[i]),
static_cast<const int*>(B_d[i]), C_d[i], N);
}
for (int i=0; i < numDevices; ++i) {
HIPCHECK(hipSetDevice(i)); // hipMemcpy will be on this device
HIPCHECK(hipStreamSynchronize(stream[i]));
HIPCHECK(hipMemcpy(C_h[i], C_d[i], Nbytes, hipMemcpyDeviceToHost));
// Output of each GPU is getting validated with input of 1st GPU.
HipTest::checkVectorADD(A_h[0], B_h[0], C_h[i], N);
}
HipTest::freeArrays(A_d[0], B_d[0], C_d[0], A_h[0], B_h[0], C_h[0], false);
HIPCHECK(hipStreamDestroy(stream[0]));
for (int i=1; i < numDevices; ++i) {
if (A_d[i]) {
HIPCHECK(hipFree(A_d[i]));
}
if (B_d[i]) {
HIPCHECK(hipFree(B_d[i]));
}
if (C_d[i]) {
HIPCHECK(hipFree(C_d[i]));
}
if (C_h[i]) {
free(C_h[i]);
}
HIPCHECK(hipStreamDestroy(stream[i]));
}
}
void HipMemcpyWithStreamMultiThreadtests::TestkindHtoH(void) {
size_t Nbytes = N * sizeof(int);
int numDevices = 0;
int *A_h, *B_h;
unsigned blocks = HipTest::setNumBlocks(blocksPerCU, threadsPerBlock, N);
// Allocate memory to A_h and B_h
A_h = static_cast<int*>(malloc(Nbytes));
HIPASSERT(A_h != NULL);
B_h = static_cast<int*>(malloc(Nbytes));
HIPASSERT(B_h != NULL);
for (size_t i = 0; i < N; ++i) {
if (A_h) {
(A_h)[i] = 3.146f + i; // Pi
}
}
hipStream_t stream;
HIPCHECK(hipStreamCreate(&stream));
HIPCHECK(hipMemcpyWithStream(B_h, A_h, Nbytes, hipMemcpyHostToHost, stream));
HIPCHECK(hipStreamSynchronize(stream));
for (size_t i = 0; i < N; i++) {
HIPASSERT(A_h[i] == B_h[i]);
}
if (A_h) {
free(A_h);
}
if (B_h) {
free(B_h);
}
HIPCHECK(hipStreamDestroy(stream));
}
void HipMemcpyWithStreamMultiThreadtests::TestwithMultiThreaded(ops op) {
size_t thread_count = getHostThreadCount();
if (thread_count == 0) {
failed("Thread Count is 0");
}
std::vector<joinable_thread> threads;
for (uint32_t i = 0; i < thread_count; i++) {
threads.emplace_back(std::thread{[&] {
switch ( op ) {
case ops::TestwithOnestream:
TestwithOnestream();
break;
case ops::TestwithTwoStream:
TestwithTwoStream();
break;
case ops::TestkindDtoH:
TestkindDtoH();
break;
case ops::TestkindHtoH:
TestkindHtoH();
break;
case ops::TestkindDtoD:
TestkindDtoD();
break;
case ops::TestOnMultiGPUwithOneStream:
TestOnMultiGPUwithOneStream();
break;
case ops::TestkindDefault:
TestkindDefault();
break;
#ifndef __HIP_PLATFORM_NVIDIA__
case ops::TestkindDefaultForDtoD:
TestkindDefaultForDtoD();
break;
#endif
case ops::TestDtoDonSameDevice:
TestDtoDonSameDevice();
break;
default:{}
}
}});
}
}
int main() {
HipMemcpyWithStreamMultiThreadtests tests;
for (int op = static_cast<int>(ops::TestwithOnestream);
op < static_cast<int>(ops::END_OF_LIST); ++op) {
tests.TestwithMultiThreaded(static_cast<ops>(op));
switch ( static_cast<ops>(op) ) {
case ops::TestwithOnestream:
test_passed(HipMemcpyWithStreamMultiThreadtests
::TestwithOnestream);
break;
case ops::TestwithTwoStream:
test_passed(HipMemcpyWithStreamMultiThreadtests
::TestwithTwoStream);
break;
case ops::TestkindDtoH:
test_passed(HipMemcpyWithStreamMultiThreadtests
::TestkindDtoH);
break;
case ops::TestkindHtoH:
test_passed(HipMemcpyWithStreamMultiThreadtests
::TestkindHtoH);
break;
case ops::TestkindDtoD:
test_passed(HipMemcpyWithStreamMultiThreadtests
::TestkindDtoD);
break;
case ops::TestOnMultiGPUwithOneStream:
test_passed(HipMemcpyWithStreamMultiThreadtests
::TestOnMultiGPUwithOneStream);
break;
case ops::TestkindDefault:
test_passed(HipMemcpyWithStreamMultiThreadtests
::TestkindDefault);
break;
#ifndef __HIP_PLATFORM_NVIDIA__
case ops::TestkindDefaultForDtoD:
test_passed(HipMemcpyWithStreamMultiThreadtests
::TestkindDefaultForDtoD);
break;
#endif
case ops::TestDtoDonSameDevice:
test_passed(HipMemcpyWithStreamMultiThreadtests
::TestDtoDonSameDevice);
break;
default: { test_failed("No Operation to done with API"); }
}
}
}
| 32.648729 | 79 | 0.645957 | [
"vector"
] |
f65aabd996087bb3494735294ea7ffe4e00ee357 | 398 | cpp | C++ | Source/UtyMapUnreal/UtyMapUnreal.cpp | lonnibesancon/UtyMapUnreal | 2639173e82208cff5e405b8105fd6277e5212246 | [
"Apache-2.0"
] | 2 | 2020-11-28T12:57:33.000Z | 2021-07-19T20:53:28.000Z | Source/UtyMapUnreal/UtyMapUnreal.cpp | lonnibesancon/UtyMapUnreal | 2639173e82208cff5e405b8105fd6277e5212246 | [
"Apache-2.0"
] | null | null | null | Source/UtyMapUnreal/UtyMapUnreal.cpp | lonnibesancon/UtyMapUnreal | 2639173e82208cff5e405b8105fd6277e5212246 | [
"Apache-2.0"
] | 1 | 2020-09-17T07:02:55.000Z | 2020-09-17T07:02:55.000Z | #include "UtyMapUnreal.h"
IMPLEMENT_PRIMARY_GAME_MODULE( FDefaultGameModuleImpl, UtyMapUnreal, "UtyMapUnreal" );
//General Log
DEFINE_LOG_CATEGORY(UtyLog);
//Logging during game startup
DEFINE_LOG_CATEGORY(UtyInit);
//Logging during map and mesh processing
DEFINE_LOG_CATEGORY(UtyProcessing);
//Logging for Critical Errors that must always be addressed
DEFINE_LOG_CATEGORY(UtyCriticalErrors);
| 24.875 | 86 | 0.829146 | [
"mesh"
] |
f65f36456b4374e23d4ab2b6e5782da127469fc2 | 1,986 | cpp | C++ | src/cfd_fee.cpp | cryptogarageinc/cg-cfd | 8e251221634fd1e2e6d9132adaa5adc386cd2893 | [
"MIT"
] | null | null | null | src/cfd_fee.cpp | cryptogarageinc/cg-cfd | 8e251221634fd1e2e6d9132adaa5adc386cd2893 | [
"MIT"
] | 2 | 2019-11-11T07:14:44.000Z | 2019-11-15T00:54:02.000Z | src/cfd_fee.cpp | cryptogarageinc/cg-cfd | 8e251221634fd1e2e6d9132adaa5adc386cd2893 | [
"MIT"
] | null | null | null | // Copyright 2019 CryptoGarage
/**
* @file cfd_fee.cpp
*
* @brief Fee計算の関連クラスの実装ファイル
*/
#include <algorithm>
#include <string>
#include <vector>
#include "cfd/cfd_fee.h"
#include "cfdcore/cfdcore_amount.h"
#include "cfdcore/cfdcore_transaction_common.h"
namespace cfd {
using cfd::core::AbstractTransaction;
using cfd::core::Amount;
// -----------------------------------------------------------------------------
// ファイル内定数
// -----------------------------------------------------------------------------
//! KB size
static constexpr const uint64_t kKiloByteSize = 1000;
// -----------------------------------------------------------------------------
// FeeCalculator
// -----------------------------------------------------------------------------
Amount FeeCalculator::CalculateFee( // Fee計算
uint32_t size, uint32_t vsize, uint64_t rate) {
int64_t satoshi;
uint32_t use_size = ((vsize != 0) && (size != vsize)) ? vsize : size;
satoshi = use_size * rate / kKiloByteSize;
if (satoshi < kRelayMinimumFee) {
satoshi = kRelayMinimumFee;
}
return Amount::CreateBySatoshiAmount(satoshi);
}
FeeCalculator::FeeCalculator()
: FeeCalculator(FeeCalculator::kRelayMinimumFee) {}
FeeCalculator::FeeCalculator(uint64_t baserate) : baserate_(baserate) {}
Amount FeeCalculator::GetFee(uint32_t size) const {
return GetFee(static_cast<size_t>(size));
}
Amount FeeCalculator::GetFee(size_t size) const {
int64_t byte_size = static_cast<int64_t>(size);
int64_t fee = baserate_ * byte_size / 1000;
if ((fee == 0) && (byte_size != 0) && (baserate_ != 0)) {
fee = 1;
}
return Amount::CreateBySatoshiAmount(fee);
}
Amount FeeCalculator::GetFee(const Utxo& utxo) const {
uint32_t minimum_txin = static_cast<uint32_t>(TxIn::kMinimumTxInSize);
uint32_t nowit_size = minimum_txin + utxo.uscript_size_max;
uint32_t vsize =
AbstractTransaction::GetVsizeFromSize(nowit_size, utxo.witness_size_max);
return GetFee(vsize);
}
} // namespace cfd
| 29.641791 | 80 | 0.608258 | [
"vector"
] |
f665aa8fe90a16666247c69131f81143b6ae83fc | 1,254 | cpp | C++ | Codeforces/1463/a.cpp | eyangch/competitive-programming | 59839efcec72cb792e61b7d316f83ad54f16a166 | [
"MIT"
] | 14 | 2019-08-14T00:43:10.000Z | 2021-12-16T05:43:31.000Z | Codeforces/1463/a.cpp | eyangch/competitive-programming | 59839efcec72cb792e61b7d316f83ad54f16a166 | [
"MIT"
] | null | null | null | Codeforces/1463/a.cpp | eyangch/competitive-programming | 59839efcec72cb792e61b7d316f83ad54f16a166 | [
"MIT"
] | 6 | 2020-12-30T03:30:17.000Z | 2022-03-11T03:40:02.000Z | #include <bits/stdc++.h>
#define f first
#define s second
#define endl "\n"
using namespace std;
typedef long long ll;
typedef vector<int> vi;
typedef pair<int, int> pii;
template <typename T1, typename T2>
ostream &operator <<(ostream &os, pair<T1, T2> p){os << p.first << " " << p.second; return os;}
template <typename T>
ostream &operator <<(ostream &os, vector<T> &v){for(T i : v)os << i << ", "; return os;}
template <typename T>
ostream &operator <<(ostream &os, set<T> s){for(T i : s) os << i << ", "; return os;}
template <typename T1, typename T2>
ostream &operator <<(ostream &os, map<T1, T2> m){for(pair<T1, T2> i : m) os << i << endl; return os;}
void setIO(string s){
ios_base::sync_with_stdio(false); cin.tie(NULL);
if(s != ""){
freopen((s+".in").c_str(), "r", stdin);
freopen((s+".out").c_str(), "w", stdout);
}
}
signed main(){
setIO("");
int T; cin >> T;
while(T--){
int a, b, c; cin >> a >> b >> c;
if((a+b+c) % 9 > 0){
cout << "NO" << endl;
continue;
}
int mn = (a+b+c)/9;
if(a < mn || b < mn || c < mn){
cout << "NO" << endl;
continue;
}
cout << "YES" << endl;
}
return 0;
}
| 26.125 | 101 | 0.516746 | [
"vector"
] |
f667ba6ba69d9eae671217fb9a2ba6bba6f3992f | 6,636 | hpp | C++ | a3/src/DGP/HyperplaneN.hpp | meetps/CS-749 | f1ddc4ed003b3a9f222f2a724d53076ddec697a6 | [
"MIT"
] | 2 | 2022-01-04T00:10:13.000Z | 2022-03-16T23:54:38.000Z | a3/src/DGP/HyperplaneN.hpp | meetps/CS-749 | f1ddc4ed003b3a9f222f2a724d53076ddec697a6 | [
"MIT"
] | null | null | null | a3/src/DGP/HyperplaneN.hpp | meetps/CS-749 | f1ddc4ed003b3a9f222f2a724d53076ddec697a6 | [
"MIT"
] | null | null | null | //============================================================================
//
// DGP: Digital Geometry Processing toolkit
// Copyright (C) 2016, Siddhartha Chaudhuri
//
// This software is covered by a BSD license. Portions derived from other
// works are covered by their respective licenses. For full licensing
// information see the LICENSE.txt file.
//
//============================================================================
#ifndef __DGP_HyperplaneN_hpp__
#define __DGP_HyperplaneN_hpp__
#include "Common.hpp"
#include "Math.hpp"
#include "MatrixMN.hpp"
#include "RayIntersectableN.hpp"
#include "VectorN.hpp"
namespace DGP {
// Forward declarations
template <long N, typename T> class HyperplaneN;
namespace Internal {
/**
* <b>[Internal]</b> Base class for hyperplanes ((N - 1)-flats) in N-dimensional space, where N is any <b>positive</b>
* (non-zero) integer and T is a field.
*
* @note This class is <b>INTERNAL</b>! Don't use it directly.
*/
template <long N, typename T>
class /* DGP_DLL_LOCAL */ HyperplaneNBase : public RayIntersectableN<N, T>
{
public:
typedef HyperplaneN<N, T> HyperplaneT; ///< N-dimensional hyperplane.
typedef VectorN<N, T> VectorT; ///< N-dimensional vector.
/** Default constructor. */
HyperplaneNBase() : normal(VectorT::zero()), dist(0) {}
/** Construct a hyperplane from a point on it, and the direction vector of the hyperplane (need not be a unit vector). */
static HyperplaneT fromPointAndNormal(VectorT const & point_, VectorT const & normal_)
{
HyperplaneT hyperplane;
hyperplane.normal = normal_.unit();
hyperplane.dist = hyperplane.normal.dot(point_);
return hyperplane;
}
/** Construct a hyperplane from N points on it. */
static HyperplaneT fromNPoints(std::vector<VectorT> const & points)
{
alwaysAssertM(points.size() >= N,
format("HyperplaneN: Too few points specified (provided %ld points, hyperplane requires %ld points)",
(long)points.size(), N));
MatrixMN<N, N, T> a;
for (long i = 0; i < N; ++i)
a.setRow(i, points[i]);
try
{
a.invert();
}
catch (...)
{
throw Error("HyperplaneN: Points are degenerate");
}
HyperplaneT hyperplane;
hyperplane.normal = (a * VectorT(-1)).unit();
hyperplane.dist = hyperplane.normal.dot(points[0]);
return hyperplane;
}
/** Get a point on the hyperplane. */
VectorT getPoint() const { return dist * normal; }
/** Get the unit normal vector of the hyperplane. */
VectorT const & getNormal() const { return normal; }
/**
* Get the coefficients {a_i} of the hyperplane equation a_0 * x_0 + a_1 * x_1 + ... + a_{N - 1} * x_{N - 1} + a_N * 1 = 0.
*/
VectorN<N + 1, T> getEquation() const
{
VectorN<N + 1, T> coeffs;
for (long i = 0; i < N; ++i)
coeffs[i] = normal[i];
coeffs[N] = -dist;
return coeffs;
}
/** Flip the hyperplane so that the normal points the other way. */
void flip()
{
normal = -normal;
dist = -dist;
}
/** Get the (unsigned) distance of a given point from the hyperplane. */
T distance(VectorT const & p) const
{
return std::abs(signedDistance(p));
}
/**
* Get the signed distance of a given point from the hyperplane. This is positive if the point is on the side of the
* hyperplane containing the normal, and negative if the point is on the other side.
*/
T signedDistance(VectorT const & p) const
{
return p.dot(normal) - dist;
}
/** Get the square of the distance of the hyperplane from a given point. */
T squaredDistance(VectorT const & p) const
{
return Math::square(signedDistance(p));
}
/** Get the point on the hyperplane closest to a given point. */
VectorT closestPoint(VectorT const & p) const
{
return p - signedDistance(p) * normal;
}
/**
* Check if the positive half space (the side of the hyperplane containing the normal) contains a given point. Returns true
* if the point lies on the hyperplane.
*/
bool positiveHalfSpaceContains(VectorT const & p) const
{
return signedDistance(p) >= 0;
}
/**
* Check if the negative half space (the side of the hyperplane <b>not</b> containing the normal) contains a given point.
* Returns true if the point lies on the hyperplane.
*/
bool negativeHalfSpaceContains(VectorT const & p) const
{
return signedDistance(p) <= 0;
}
/** Reflect a point in the hyperplane. */
VectorT reflect(VectorT const & p) const
{
return p - 2 * signedDistance(p) * normal;
}
bool rayIntersects(RayN<N, T> const & ray, T max_time = -1) const
{
return rayIntersectionTime(ray, max_time) >= 0;
}
T rayIntersectionTime(RayN<N, T> const & ray, T max_time = -1) const
{
T numer = dist - normal.dot(ray.getOrigin());
T denom = normal.dot(ray.getDirection());
if (std::abs(denom) < Math::eps(numer, denom))
return -1;
else
{
T t = numer / denom;
return (max_time < 0 || t <= max_time) ? t : -1;
}
}
RayIntersectionN<N, T> rayIntersection(RayN<N, T> const & ray, T max_time = -1) const
{
T t = rayIntersectionTime(ray, max_time);
if (t >= 0)
return RayIntersectionN<N, T>(t, &normal);
else
return RayIntersectionN<N, T>(-1);
}
/** Get a textual description of the hyperplane. */
std::string toString() const
{
std::ostringstream oss;
oss << "[N: " << normal.toString() << ", D: " << dist << ']';
return oss.str();
}
protected:
VectorT normal; ///< The unit normal vector of the hyperplane.
T dist; ///< The signed distance of the hyperplane from the origin.
}; // class HyperplaneNBase
} // namespace Internal
/** A hyperplane ((N - 1)-flat) in N-dimensional space, where N is any <b>positive</b> (non-zero) integer and T is a field. */
template <long N, typename T>
class /* DGP_API */ HyperplaneN : public Internal::HyperplaneNBase<N, T>
{
public:
/** Default constructor. */
HyperplaneN() {}
}; // class HyperplaneN
/** Pipe a textual representation of a hyperplane to a <code>std::ostream</code>. */
template <long N, typename T>
std::ostream &
operator<<(std::ostream & os, HyperplaneN<N, T> const & plane)
{
return os << plane.toString();
}
} // namespace DGP
#include "Hyperplane3.hpp"
#endif
| 29.493333 | 127 | 0.602773 | [
"geometry",
"vector"
] |
f6680540224ffe2cb302bcd4d5790d5e9da6bb88 | 5,492 | cpp | C++ | engine/modules/procedural/editor/procedural_geometry_panel.cpp | dulingzhi/echo | de7ee416c49e1494a008e0a818155d6d0f6d5a7a | [
"MIT"
] | null | null | null | engine/modules/procedural/editor/procedural_geometry_panel.cpp | dulingzhi/echo | de7ee416c49e1494a008e0a818155d6d0f6d5a7a | [
"MIT"
] | null | null | null | engine/modules/procedural/editor/procedural_geometry_panel.cpp | dulingzhi/echo | de7ee416c49e1494a008e0a818155d6d0f6d5a7a | [
"MIT"
] | null | null | null | #include "procedural_geometry_panel.h"
#include "engine/core/editor/editor.h"
#include "engine/core/editor/qt/QWidgets.h"
#include "engine/core/base/class_method_bind.h"
#include "engine/core/util/PathUtil.h"
#include "engine/core/util/StringUtil.h"
#include "engine/core/io/MemoryReader.h"
#include "engine/core/util/Buffer.h"
#include "engine/core/io/IO.h"
#include "engine/core/render/base/image/image.h"
#include "engine/core/main/Engine.h"
#include "engine/core/render/base/atla/texture_atlas.h"
#include "engine/core/log/Log.h"
#include "painter/qgraphics_scene_ex.h"
namespace Echo
{
#ifdef ECHO_EDITOR_MODE
ProceduralGeometryPanel::ProceduralGeometryPanel(Object* obj)
{
m_proceduralGeometry = ECHO_DOWN_CAST<ProceduralGeometry*>(obj);
m_ui = qobject_cast<QDockWidget*>(EditorApi.qLoadUi("engine/modules/procedural/editor/procedural_geometry_panel.ui"));
QSplitter* splitter = m_ui->findChild<QSplitter*>("m_splitter");
if (splitter)
{
splitter->setStretchFactor(0, 0);
splitter->setStretchFactor(1, 1);
}
// Tool button icons
m_ui->findChild<QToolButton*>("m_play")->setIcon(QIcon((Engine::instance()->getRootPath() + "engine/modules/procedural/editor/icon/play.png").c_str()));
// connect signal slots
EditorApi.qConnectWidget(m_ui->findChild<QWidget*>("m_graphicsView"), QSIGNAL(customContextMenuRequested(const QPoint&)), this, createMethodBind(&ProceduralGeometryPanel::onRightClickGraphicsView));
EditorApi.qConnectWidget(m_ui->findChild<QWidget*>("m_play"), QSIGNAL(clicked()), this, createMethodBind(&ProceduralGeometryPanel::onPlay));
EditorApi.qConnectAction(m_ui->findChild<QAction*>("m_actionDeleteNodes"), QSIGNAL(triggered()), this, createMethodBind(&ProceduralGeometryPanel::onDeletePGNodes));
// create QGraphicsScene
m_graphicsView = m_ui->findChild<QGraphicsView*>("m_graphicsView");
m_graphicsScene = new QGraphicsSceneEx;
m_graphicsView->setScene(m_graphicsScene);
// background
m_backgroundGridSmall.set(m_graphicsView, m_graphicsScene);
m_backgroundGridBig.set(m_graphicsView, m_graphicsScene);
}
ProceduralGeometryPanel::~ProceduralGeometryPanel()
{
clearImageItemAndBorder();
EditorApi.removeCenterPanel(m_ui);
delete m_ui; m_ui = nullptr;
}
void ProceduralGeometryPanel::update()
{
refreshUiDisplay();
}
void ProceduralGeometryPanel::onRightClickGraphicsView()
{
if (!m_menuNew)
{
m_menuNew = EchoNew(QMenu(m_ui));
Echo::StringArray pgNodeClasses;
Echo::Class::getChildClasses(pgNodeClasses, "PGNode", true);
for (String& className : pgNodeClasses)
{
QAction* newAction = new QAction;
newAction->setText(Echo::StringUtil::Replace(className, "PG", "").c_str());
newAction->setData(className.c_str());
m_menuNew->addAction(newAction);
EditorApi.qConnectAction(newAction, QSIGNAL(triggered()), this, createMethodBind(&ProceduralGeometryPanel::onNewPGNode));
}
}
QPoint localPos = m_graphicsView->mapFromGlobal(QCursor::pos());
QPointF scenePos = m_graphicsView->mapToScene(localPos);
m_newPGNodePosition = Echo::Vector2(scenePos.x(), scenePos.y());
m_menuNew->exec(QCursor::pos());
}
void ProceduralGeometryPanel::onNewPGNode()
{
QAction* action = qobject_cast<QAction*>(EditorApi.qSender());
if(action)
{
Echo::String className = action->data().toString().toStdString().c_str();
if (m_proceduralGeometry)
{
Echo::PGNode* root = m_proceduralGeometry->getPGNode();
if (root)
{
Echo::PGNode* pgNode = Echo::Class::create<PGNode*>(className);
pgNode->setPosition(m_newPGNodePosition);
root->addChild(pgNode);
}
}
}
}
void ProceduralGeometryPanel::onDeletePGNodes()
{
int a = 10;
}
void ProceduralGeometryPanel::onPlay()
{
if (m_proceduralGeometry)
{
m_proceduralGeometry->play();
}
}
void ProceduralGeometryPanel::refreshUiDisplay()
{
drawBackground();
drawNodes();
}
void ProceduralGeometryPanel::drawBackground()
{
m_backgroundStyle.m_backgroundColor.setRGBA(77, 77, 77, 255);
m_backgroundStyle.m_fineGridColor.setRGBA(84, 84, 84, 255);
m_backgroundStyle.m_coarseGridColor.setRGBA(64, 64, 64, 255);
EditorApi.qGraphicsViewSetBackgroundBrush(m_ui->findChild<QGraphicsView*>("m_graphicsView"), m_backgroundStyle.m_backgroundColor);
m_backgroundGridSmall.update(15, m_backgroundStyle.m_fineGridColor);
m_backgroundGridBig.update(150, m_backgroundStyle.m_coarseGridColor);
}
void ProceduralGeometryPanel::drawNodes()
{
const vector<PGNode*>::type& pgNodes = m_proceduralGeometry->getPGNode()->children();
while (m_pgNodePainters.size() > pgNodes.size())
{
EchoSafeDelete(m_pgNodePainters.back(), PGNodePainter);
m_pgNodePainters.pop_back();
}
if (m_pgNodePainters.size() < pgNodes.size())
{
for (size_t i = m_pgNodePainters.size(); i < pgNodes.size(); ++i)
m_pgNodePainters.emplace_back(EchoNew(Procedural::PGNodePainter(m_graphicsView, m_graphicsScene, pgNodes[i])));
}
for (size_t i = 0; i < pgNodes.size(); i++)
{
if (!m_pgNodePainters[i] || m_pgNodePainters[i]->m_pgNode != pgNodes[i])
{
EchoSafeDelete(m_pgNodePainters[i], PGNodePainter);
m_pgNodePainters[i] = EchoNew(Procedural::PGNodePainter(m_graphicsView, m_graphicsScene, pgNodes[i]));
}
}
for (size_t i = 0; i < pgNodes.size(); i++)
{
m_pgNodePainters[i]->update();
}
}
void ProceduralGeometryPanel::clearImageItemAndBorder()
{
}
void ProceduralGeometryPanel::refreshImageDisplay()
{
}
#endif
}
| 30.342541 | 200 | 0.739075 | [
"render",
"object",
"vector"
] |
f66943a7018e9f1f08805fd2aeb6f47392872dbb | 191,828 | cpp | C++ | lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp | DalavanCloud/clang | 937984b3a98c5c2a3eef78d0b1d6573287ae97a6 | [
"Apache-2.0"
] | 1 | 2019-02-10T03:01:34.000Z | 2019-02-10T03:01:34.000Z | lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp | DalavanCloud/clang | 937984b3a98c5c2a3eef78d0b1d6573287ae97a6 | [
"Apache-2.0"
] | null | null | null | lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp | DalavanCloud/clang | 937984b3a98c5c2a3eef78d0b1d6573287ae97a6 | [
"Apache-2.0"
] | null | null | null | //===---- CGOpenMPRuntimeNVPTX.cpp - Interface to OpenMP NVPTX Runtimes ---===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This provides a class for OpenMP runtime code generation specialized to NVPTX
// targets.
//
//===----------------------------------------------------------------------===//
#include "CGOpenMPRuntimeNVPTX.h"
#include "CodeGenFunction.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/Cuda.h"
#include "llvm/ADT/SmallPtrSet.h"
using namespace clang;
using namespace CodeGen;
namespace {
enum OpenMPRTLFunctionNVPTX {
/// Call to void __kmpc_kernel_init(kmp_int32 thread_limit,
/// int16_t RequiresOMPRuntime);
OMPRTL_NVPTX__kmpc_kernel_init,
/// Call to void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized);
OMPRTL_NVPTX__kmpc_kernel_deinit,
/// Call to void __kmpc_spmd_kernel_init(kmp_int32 thread_limit,
/// int16_t RequiresOMPRuntime, int16_t RequiresDataSharing);
OMPRTL_NVPTX__kmpc_spmd_kernel_init,
/// Call to void __kmpc_spmd_kernel_deinit_v2(int16_t RequiresOMPRuntime);
OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2,
/// Call to void __kmpc_kernel_prepare_parallel(void
/// *outlined_function, int16_t
/// IsOMPRuntimeInitialized);
OMPRTL_NVPTX__kmpc_kernel_prepare_parallel,
/// Call to bool __kmpc_kernel_parallel(void **outlined_function,
/// int16_t IsOMPRuntimeInitialized);
OMPRTL_NVPTX__kmpc_kernel_parallel,
/// Call to void __kmpc_kernel_end_parallel();
OMPRTL_NVPTX__kmpc_kernel_end_parallel,
/// Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
/// global_tid);
OMPRTL_NVPTX__kmpc_serialized_parallel,
/// Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
/// global_tid);
OMPRTL_NVPTX__kmpc_end_serialized_parallel,
/// Call to int32_t __kmpc_shuffle_int32(int32_t element,
/// int16_t lane_offset, int16_t warp_size);
OMPRTL_NVPTX__kmpc_shuffle_int32,
/// Call to int64_t __kmpc_shuffle_int64(int64_t element,
/// int16_t lane_offset, int16_t warp_size);
OMPRTL_NVPTX__kmpc_shuffle_int64,
/// Call to __kmpc_nvptx_parallel_reduce_nowait_v2(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_vars, size_t reduce_size, void* reduce_data,
/// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
/// lane_offset, int16_t shortCircuit),
/// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num));
OMPRTL_NVPTX__kmpc_parallel_reduce_nowait_v2,
/// Call to __kmpc_nvptx_teams_reduce_nowait_simple(ident_t *loc, kmp_int32
/// global_tid, kmp_critical_name *lck)
OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_simple,
/// Call to __kmpc_nvptx_teams_end_reduce_nowait_simple(ident_t *loc,
/// kmp_int32 global_tid, kmp_critical_name *lck)
OMPRTL_NVPTX__kmpc_nvptx_teams_end_reduce_nowait_simple,
/// Call to __kmpc_nvptx_end_reduce_nowait(int32_t global_tid);
OMPRTL_NVPTX__kmpc_end_reduce_nowait,
/// Call to void __kmpc_data_sharing_init_stack();
OMPRTL_NVPTX__kmpc_data_sharing_init_stack,
/// Call to void __kmpc_data_sharing_init_stack_spmd();
OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd,
/// Call to void* __kmpc_data_sharing_coalesced_push_stack(size_t size,
/// int16_t UseSharedMemory);
OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack,
/// Call to void __kmpc_data_sharing_pop_stack(void *a);
OMPRTL_NVPTX__kmpc_data_sharing_pop_stack,
/// Call to void __kmpc_begin_sharing_variables(void ***args,
/// size_t n_args);
OMPRTL_NVPTX__kmpc_begin_sharing_variables,
/// Call to void __kmpc_end_sharing_variables();
OMPRTL_NVPTX__kmpc_end_sharing_variables,
/// Call to void __kmpc_get_shared_variables(void ***GlobalArgs)
OMPRTL_NVPTX__kmpc_get_shared_variables,
/// Call to uint16_t __kmpc_parallel_level(ident_t *loc, kmp_int32
/// global_tid);
OMPRTL_NVPTX__kmpc_parallel_level,
/// Call to int8_t __kmpc_is_spmd_exec_mode();
OMPRTL_NVPTX__kmpc_is_spmd_exec_mode,
/// Call to void __kmpc_get_team_static_memory(int16_t isSPMDExecutionMode,
/// const void *buf, size_t size, int16_t is_shared, const void **res);
OMPRTL_NVPTX__kmpc_get_team_static_memory,
/// Call to void __kmpc_restore_team_static_memory(int16_t
/// isSPMDExecutionMode, int16_t is_shared);
OMPRTL_NVPTX__kmpc_restore_team_static_memory,
/// Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
OMPRTL__kmpc_barrier,
/// Call to void __kmpc_barrier_simple_spmd(ident_t *loc, kmp_int32
/// global_tid);
OMPRTL__kmpc_barrier_simple_spmd,
};
/// Pre(post)-action for different OpenMP constructs specialized for NVPTX.
class NVPTXActionTy final : public PrePostActionTy {
llvm::FunctionCallee EnterCallee = nullptr;
ArrayRef<llvm::Value *> EnterArgs;
llvm::FunctionCallee ExitCallee = nullptr;
ArrayRef<llvm::Value *> ExitArgs;
bool Conditional = false;
llvm::BasicBlock *ContBlock = nullptr;
public:
NVPTXActionTy(llvm::FunctionCallee EnterCallee,
ArrayRef<llvm::Value *> EnterArgs,
llvm::FunctionCallee ExitCallee,
ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
: EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
ExitArgs(ExitArgs), Conditional(Conditional) {}
void Enter(CodeGenFunction &CGF) override {
llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
if (Conditional) {
llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
ContBlock = CGF.createBasicBlock("omp_if.end");
// Generate the branch (If-stmt)
CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
CGF.EmitBlock(ThenBlock);
}
}
void Done(CodeGenFunction &CGF) {
// Emit the rest of blocks/branches
CGF.EmitBranch(ContBlock);
CGF.EmitBlock(ContBlock, true);
}
void Exit(CodeGenFunction &CGF) override {
CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
}
};
/// A class to track the execution mode when codegening directives within
/// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry
/// to the target region and used by containing directives such as 'parallel'
/// to emit optimized code.
class ExecutionRuntimeModesRAII {
private:
CGOpenMPRuntimeNVPTX::ExecutionMode SavedExecMode =
CGOpenMPRuntimeNVPTX::EM_Unknown;
CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode;
bool SavedRuntimeMode = false;
bool *RuntimeMode = nullptr;
public:
/// Constructor for Non-SPMD mode.
ExecutionRuntimeModesRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode)
: ExecMode(ExecMode) {
SavedExecMode = ExecMode;
ExecMode = CGOpenMPRuntimeNVPTX::EM_NonSPMD;
}
/// Constructor for SPMD mode.
ExecutionRuntimeModesRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode,
bool &RuntimeMode, bool FullRuntimeMode)
: ExecMode(ExecMode), RuntimeMode(&RuntimeMode) {
SavedExecMode = ExecMode;
SavedRuntimeMode = RuntimeMode;
ExecMode = CGOpenMPRuntimeNVPTX::EM_SPMD;
RuntimeMode = FullRuntimeMode;
}
~ExecutionRuntimeModesRAII() {
ExecMode = SavedExecMode;
if (RuntimeMode)
*RuntimeMode = SavedRuntimeMode;
}
};
/// GPU Configuration: This information can be derived from cuda registers,
/// however, providing compile time constants helps generate more efficient
/// code. For all practical purposes this is fine because the configuration
/// is the same for all known NVPTX architectures.
enum MachineConfiguration : unsigned {
WarpSize = 32,
/// Number of bits required to represent a lane identifier, which is
/// computed as log_2(WarpSize).
LaneIDBits = 5,
LaneIDMask = WarpSize - 1,
/// Global memory alignment for performance.
GlobalMemoryAlignment = 128,
/// Maximal size of the shared memory buffer.
SharedMemorySize = 128,
};
static const ValueDecl *getPrivateItem(const Expr *RefExpr) {
RefExpr = RefExpr->IgnoreParens();
if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr)) {
const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
Base = TempASE->getBase()->IgnoreParenImpCasts();
RefExpr = Base;
} else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr)) {
const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
Base = TempOASE->getBase()->IgnoreParenImpCasts();
while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
Base = TempASE->getBase()->IgnoreParenImpCasts();
RefExpr = Base;
}
RefExpr = RefExpr->IgnoreParenImpCasts();
if (const auto *DE = dyn_cast<DeclRefExpr>(RefExpr))
return cast<ValueDecl>(DE->getDecl()->getCanonicalDecl());
const auto *ME = cast<MemberExpr>(RefExpr);
return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
}
typedef std::pair<CharUnits /*Align*/, const ValueDecl *> VarsDataTy;
static bool stable_sort_comparator(const VarsDataTy P1, const VarsDataTy P2) {
return P1.first > P2.first;
}
static RecordDecl *buildRecordForGlobalizedVars(
ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls,
ArrayRef<const ValueDecl *> EscapedDeclsForTeams,
llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
&MappedDeclsFields) {
if (EscapedDecls.empty() && EscapedDeclsForTeams.empty())
return nullptr;
SmallVector<VarsDataTy, 4> GlobalizedVars;
for (const ValueDecl *D : EscapedDecls)
GlobalizedVars.emplace_back(
CharUnits::fromQuantity(std::max(
C.getDeclAlign(D).getQuantity(),
static_cast<CharUnits::QuantityType>(GlobalMemoryAlignment))),
D);
for (const ValueDecl *D : EscapedDeclsForTeams)
GlobalizedVars.emplace_back(C.getDeclAlign(D), D);
std::stable_sort(GlobalizedVars.begin(), GlobalizedVars.end(),
stable_sort_comparator);
// Build struct _globalized_locals_ty {
// /* globalized vars */[WarSize] align (max(decl_align,
// GlobalMemoryAlignment))
// /* globalized vars */ for EscapedDeclsForTeams
// };
RecordDecl *GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty");
GlobalizedRD->startDefinition();
llvm::SmallPtrSet<const ValueDecl *, 16> SingleEscaped(
EscapedDeclsForTeams.begin(), EscapedDeclsForTeams.end());
for (const auto &Pair : GlobalizedVars) {
const ValueDecl *VD = Pair.second;
QualType Type = VD->getType();
if (Type->isLValueReferenceType())
Type = C.getPointerType(Type.getNonReferenceType());
else
Type = Type.getNonReferenceType();
SourceLocation Loc = VD->getLocation();
FieldDecl *Field;
if (SingleEscaped.count(VD)) {
Field = FieldDecl::Create(
C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
C.getTrivialTypeSourceInfo(Type, SourceLocation()),
/*BW=*/nullptr, /*Mutable=*/false,
/*InitStyle=*/ICIS_NoInit);
Field->setAccess(AS_public);
if (VD->hasAttrs()) {
for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
E(VD->getAttrs().end());
I != E; ++I)
Field->addAttr(*I);
}
} else {
llvm::APInt ArraySize(32, WarpSize);
Type = C.getConstantArrayType(Type, ArraySize, ArrayType::Normal, 0);
Field = FieldDecl::Create(
C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
C.getTrivialTypeSourceInfo(Type, SourceLocation()),
/*BW=*/nullptr, /*Mutable=*/false,
/*InitStyle=*/ICIS_NoInit);
Field->setAccess(AS_public);
llvm::APInt Align(32, std::max(C.getDeclAlign(VD).getQuantity(),
static_cast<CharUnits::QuantityType>(
GlobalMemoryAlignment)));
Field->addAttr(AlignedAttr::CreateImplicit(
C, AlignedAttr::GNU_aligned, /*IsAlignmentExpr=*/true,
IntegerLiteral::Create(C, Align,
C.getIntTypeForBitwidth(32, /*Signed=*/0),
SourceLocation())));
}
GlobalizedRD->addDecl(Field);
MappedDeclsFields.try_emplace(VD, Field);
}
GlobalizedRD->completeDefinition();
return GlobalizedRD;
}
/// Get the list of variables that can escape their declaration context.
class CheckVarsEscapingDeclContext final
: public ConstStmtVisitor<CheckVarsEscapingDeclContext> {
CodeGenFunction &CGF;
llvm::SetVector<const ValueDecl *> EscapedDecls;
llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls;
llvm::SmallPtrSet<const Decl *, 4> EscapedParameters;
RecordDecl *GlobalizedRD = nullptr;
llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
bool AllEscaped = false;
bool IsForCombinedParallelRegion = false;
void markAsEscaped(const ValueDecl *VD) {
// Do not globalize declare target variables.
if (!isa<VarDecl>(VD) ||
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
return;
VD = cast<ValueDecl>(VD->getCanonicalDecl());
// Variables captured by value must be globalized.
if (auto *CSI = CGF.CapturedStmtInfo) {
if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) {
// Check if need to capture the variable that was already captured by
// value in the outer region.
if (!IsForCombinedParallelRegion) {
if (!FD->hasAttrs())
return;
const auto *Attr = FD->getAttr<OMPCaptureKindAttr>();
if (!Attr)
return;
if (((Attr->getCaptureKind() != OMPC_map) &&
!isOpenMPPrivate(
static_cast<OpenMPClauseKind>(Attr->getCaptureKind()))) ||
((Attr->getCaptureKind() == OMPC_map) &&
!FD->getType()->isAnyPointerType()))
return;
}
if (!FD->getType()->isReferenceType()) {
assert(!VD->getType()->isVariablyModifiedType() &&
"Parameter captured by value with variably modified type");
EscapedParameters.insert(VD);
} else if (!IsForCombinedParallelRegion) {
return;
}
}
}
if ((!CGF.CapturedStmtInfo ||
(IsForCombinedParallelRegion && CGF.CapturedStmtInfo)) &&
VD->getType()->isReferenceType())
// Do not globalize variables with reference type.
return;
if (VD->getType()->isVariablyModifiedType())
EscapedVariableLengthDecls.insert(VD);
else
EscapedDecls.insert(VD);
}
void VisitValueDecl(const ValueDecl *VD) {
if (VD->getType()->isLValueReferenceType())
markAsEscaped(VD);
if (const auto *VarD = dyn_cast<VarDecl>(VD)) {
if (!isa<ParmVarDecl>(VarD) && VarD->hasInit()) {
const bool SavedAllEscaped = AllEscaped;
AllEscaped = VD->getType()->isLValueReferenceType();
Visit(VarD->getInit());
AllEscaped = SavedAllEscaped;
}
}
}
void VisitOpenMPCapturedStmt(const CapturedStmt *S,
ArrayRef<OMPClause *> Clauses,
bool IsCombinedParallelRegion) {
if (!S)
return;
for (const CapturedStmt::Capture &C : S->captures()) {
if (C.capturesVariable() && !C.capturesVariableByCopy()) {
const ValueDecl *VD = C.getCapturedVar();
bool SavedIsForCombinedParallelRegion = IsForCombinedParallelRegion;
if (IsCombinedParallelRegion) {
// Check if the variable is privatized in the combined construct and
// those private copies must be shared in the inner parallel
// directive.
IsForCombinedParallelRegion = false;
for (const OMPClause *C : Clauses) {
if (!isOpenMPPrivate(C->getClauseKind()) ||
C->getClauseKind() == OMPC_reduction ||
C->getClauseKind() == OMPC_linear ||
C->getClauseKind() == OMPC_private)
continue;
ArrayRef<const Expr *> Vars;
if (const auto *PC = dyn_cast<OMPFirstprivateClause>(C))
Vars = PC->getVarRefs();
else if (const auto *PC = dyn_cast<OMPLastprivateClause>(C))
Vars = PC->getVarRefs();
else
llvm_unreachable("Unexpected clause.");
for (const auto *E : Vars) {
const Decl *D =
cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl();
if (D == VD->getCanonicalDecl()) {
IsForCombinedParallelRegion = true;
break;
}
}
if (IsForCombinedParallelRegion)
break;
}
}
markAsEscaped(VD);
if (isa<OMPCapturedExprDecl>(VD))
VisitValueDecl(VD);
IsForCombinedParallelRegion = SavedIsForCombinedParallelRegion;
}
}
}
void buildRecordForGlobalizedVars(bool IsInTTDRegion) {
assert(!GlobalizedRD &&
"Record for globalized variables is built already.");
ArrayRef<const ValueDecl *> EscapedDeclsForParallel, EscapedDeclsForTeams;
if (IsInTTDRegion)
EscapedDeclsForTeams = EscapedDecls.getArrayRef();
else
EscapedDeclsForParallel = EscapedDecls.getArrayRef();
GlobalizedRD = ::buildRecordForGlobalizedVars(
CGF.getContext(), EscapedDeclsForParallel, EscapedDeclsForTeams,
MappedDeclsFields);
}
public:
CheckVarsEscapingDeclContext(CodeGenFunction &CGF,
ArrayRef<const ValueDecl *> TeamsReductions)
: CGF(CGF), EscapedDecls(TeamsReductions.begin(), TeamsReductions.end()) {
}
virtual ~CheckVarsEscapingDeclContext() = default;
void VisitDeclStmt(const DeclStmt *S) {
if (!S)
return;
for (const Decl *D : S->decls())
if (const auto *VD = dyn_cast_or_null<ValueDecl>(D))
VisitValueDecl(VD);
}
void VisitOMPExecutableDirective(const OMPExecutableDirective *D) {
if (!D)
return;
if (!D->hasAssociatedStmt())
return;
if (const auto *S =
dyn_cast_or_null<CapturedStmt>(D->getAssociatedStmt())) {
// Do not analyze directives that do not actually require capturing,
// like `omp for` or `omp simd` directives.
llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, D->getDirectiveKind());
if (CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown) {
VisitStmt(S->getCapturedStmt());
return;
}
VisitOpenMPCapturedStmt(
S, D->clauses(),
CaptureRegions.back() == OMPD_parallel &&
isOpenMPDistributeDirective(D->getDirectiveKind()));
}
}
void VisitCapturedStmt(const CapturedStmt *S) {
if (!S)
return;
for (const CapturedStmt::Capture &C : S->captures()) {
if (C.capturesVariable() && !C.capturesVariableByCopy()) {
const ValueDecl *VD = C.getCapturedVar();
markAsEscaped(VD);
if (isa<OMPCapturedExprDecl>(VD))
VisitValueDecl(VD);
}
}
}
void VisitLambdaExpr(const LambdaExpr *E) {
if (!E)
return;
for (const LambdaCapture &C : E->captures()) {
if (C.capturesVariable()) {
if (C.getCaptureKind() == LCK_ByRef) {
const ValueDecl *VD = C.getCapturedVar();
markAsEscaped(VD);
if (E->isInitCapture(&C) || isa<OMPCapturedExprDecl>(VD))
VisitValueDecl(VD);
}
}
}
}
void VisitBlockExpr(const BlockExpr *E) {
if (!E)
return;
for (const BlockDecl::Capture &C : E->getBlockDecl()->captures()) {
if (C.isByRef()) {
const VarDecl *VD = C.getVariable();
markAsEscaped(VD);
if (isa<OMPCapturedExprDecl>(VD) || VD->isInitCapture())
VisitValueDecl(VD);
}
}
}
void VisitCallExpr(const CallExpr *E) {
if (!E)
return;
for (const Expr *Arg : E->arguments()) {
if (!Arg)
continue;
if (Arg->isLValue()) {
const bool SavedAllEscaped = AllEscaped;
AllEscaped = true;
Visit(Arg);
AllEscaped = SavedAllEscaped;
} else {
Visit(Arg);
}
}
Visit(E->getCallee());
}
void VisitDeclRefExpr(const DeclRefExpr *E) {
if (!E)
return;
const ValueDecl *VD = E->getDecl();
if (AllEscaped)
markAsEscaped(VD);
if (isa<OMPCapturedExprDecl>(VD))
VisitValueDecl(VD);
else if (const auto *VarD = dyn_cast<VarDecl>(VD))
if (VarD->isInitCapture())
VisitValueDecl(VD);
}
void VisitUnaryOperator(const UnaryOperator *E) {
if (!E)
return;
if (E->getOpcode() == UO_AddrOf) {
const bool SavedAllEscaped = AllEscaped;
AllEscaped = true;
Visit(E->getSubExpr());
AllEscaped = SavedAllEscaped;
} else {
Visit(E->getSubExpr());
}
}
void VisitImplicitCastExpr(const ImplicitCastExpr *E) {
if (!E)
return;
if (E->getCastKind() == CK_ArrayToPointerDecay) {
const bool SavedAllEscaped = AllEscaped;
AllEscaped = true;
Visit(E->getSubExpr());
AllEscaped = SavedAllEscaped;
} else {
Visit(E->getSubExpr());
}
}
void VisitExpr(const Expr *E) {
if (!E)
return;
bool SavedAllEscaped = AllEscaped;
if (!E->isLValue())
AllEscaped = false;
for (const Stmt *Child : E->children())
if (Child)
Visit(Child);
AllEscaped = SavedAllEscaped;
}
void VisitStmt(const Stmt *S) {
if (!S)
return;
for (const Stmt *Child : S->children())
if (Child)
Visit(Child);
}
/// Returns the record that handles all the escaped local variables and used
/// instead of their original storage.
const RecordDecl *getGlobalizedRecord(bool IsInTTDRegion) {
if (!GlobalizedRD)
buildRecordForGlobalizedVars(IsInTTDRegion);
return GlobalizedRD;
}
/// Returns the field in the globalized record for the escaped variable.
const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const {
assert(GlobalizedRD &&
"Record for globalized variables must be generated already.");
auto I = MappedDeclsFields.find(VD);
if (I == MappedDeclsFields.end())
return nullptr;
return I->getSecond();
}
/// Returns the list of the escaped local variables/parameters.
ArrayRef<const ValueDecl *> getEscapedDecls() const {
return EscapedDecls.getArrayRef();
}
/// Checks if the escaped local variable is actually a parameter passed by
/// value.
const llvm::SmallPtrSetImpl<const Decl *> &getEscapedParameters() const {
return EscapedParameters;
}
/// Returns the list of the escaped variables with the variably modified
/// types.
ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const {
return EscapedVariableLengthDecls.getArrayRef();
}
};
} // anonymous namespace
/// Get the GPU warp size.
static llvm::Value *getNVPTXWarpSize(CodeGenFunction &CGF) {
return CGF.EmitRuntimeCall(
llvm::Intrinsic::getDeclaration(
&CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_warpsize),
"nvptx_warp_size");
}
/// Get the id of the current thread on the GPU.
static llvm::Value *getNVPTXThreadID(CodeGenFunction &CGF) {
return CGF.EmitRuntimeCall(
llvm::Intrinsic::getDeclaration(
&CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x),
"nvptx_tid");
}
/// Get the id of the warp in the block.
/// We assume that the warp size is 32, which is always the case
/// on the NVPTX device, to generate more efficient code.
static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) {
CGBuilderTy &Bld = CGF.Builder;
return Bld.CreateAShr(getNVPTXThreadID(CGF), LaneIDBits, "nvptx_warp_id");
}
/// Get the id of the current lane in the Warp.
/// We assume that the warp size is 32, which is always the case
/// on the NVPTX device, to generate more efficient code.
static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) {
CGBuilderTy &Bld = CGF.Builder;
return Bld.CreateAnd(getNVPTXThreadID(CGF), Bld.getInt32(LaneIDMask),
"nvptx_lane_id");
}
/// Get the maximum number of threads in a block of the GPU.
static llvm::Value *getNVPTXNumThreads(CodeGenFunction &CGF) {
return CGF.EmitRuntimeCall(
llvm::Intrinsic::getDeclaration(
&CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_x),
"nvptx_num_threads");
}
/// Get the value of the thread_limit clause in the teams directive.
/// For the 'generic' execution mode, the runtime encodes thread_limit in
/// the launch parameters, always starting thread_limit+warpSize threads per
/// CTA. The threads in the last warp are reserved for master execution.
/// For the 'spmd' execution mode, all threads in a CTA are part of the team.
static llvm::Value *getThreadLimit(CodeGenFunction &CGF,
bool IsInSPMDExecutionMode = false) {
CGBuilderTy &Bld = CGF.Builder;
return IsInSPMDExecutionMode
? getNVPTXNumThreads(CGF)
: Bld.CreateNUWSub(getNVPTXNumThreads(CGF), getNVPTXWarpSize(CGF),
"thread_limit");
}
/// Get the thread id of the OMP master thread.
/// The master thread id is the first thread (lane) of the last warp in the
/// GPU block. Warp size is assumed to be some power of 2.
/// Thread id is 0 indexed.
/// E.g: If NumThreads is 33, master id is 32.
/// If NumThreads is 64, master id is 32.
/// If NumThreads is 1024, master id is 992.
static llvm::Value *getMasterThreadID(CodeGenFunction &CGF) {
CGBuilderTy &Bld = CGF.Builder;
llvm::Value *NumThreads = getNVPTXNumThreads(CGF);
// We assume that the warp size is a power of 2.
llvm::Value *Mask = Bld.CreateNUWSub(getNVPTXWarpSize(CGF), Bld.getInt32(1));
return Bld.CreateAnd(Bld.CreateNUWSub(NumThreads, Bld.getInt32(1)),
Bld.CreateNot(Mask), "master_tid");
}
CGOpenMPRuntimeNVPTX::WorkerFunctionState::WorkerFunctionState(
CodeGenModule &CGM, SourceLocation Loc)
: WorkerFn(nullptr), CGFI(CGM.getTypes().arrangeNullaryFunction()),
Loc(Loc) {
createWorkerFunction(CGM);
}
void CGOpenMPRuntimeNVPTX::WorkerFunctionState::createWorkerFunction(
CodeGenModule &CGM) {
// Create an worker function with no arguments.
WorkerFn = llvm::Function::Create(
CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
/*placeholder=*/"_worker", &CGM.getModule());
CGM.SetInternalFunctionAttributes(GlobalDecl(), WorkerFn, CGFI);
WorkerFn->setDoesNotRecurse();
}
CGOpenMPRuntimeNVPTX::ExecutionMode
CGOpenMPRuntimeNVPTX::getExecutionMode() const {
return CurrentExecutionMode;
}
static CGOpenMPRuntimeNVPTX::DataSharingMode
getDataSharingMode(CodeGenModule &CGM) {
return CGM.getLangOpts().OpenMPCUDAMode ? CGOpenMPRuntimeNVPTX::CUDA
: CGOpenMPRuntimeNVPTX::Generic;
}
/// Checks if the expression is constant or does not have non-trivial function
/// calls.
static bool isTrivial(ASTContext &Ctx, const Expr * E) {
// We can skip constant expressions.
// We can skip expressions with trivial calls or simple expressions.
return (E->isEvaluatable(Ctx, Expr::SE_AllowUndefinedBehavior) ||
!E->hasNonTrivialCall(Ctx)) &&
!E->HasSideEffects(Ctx, /*IncludePossibleEffects=*/true);
}
/// Checks if the \p Body is the \a CompoundStmt and returns its child statement
/// iff there is only one that is not evaluatable at the compile time.
static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body) {
if (const auto *C = dyn_cast<CompoundStmt>(Body)) {
const Stmt *Child = nullptr;
for (const Stmt *S : C->body()) {
if (const auto *E = dyn_cast<Expr>(S)) {
if (isTrivial(Ctx, E))
continue;
}
// Some of the statements can be ignored.
if (isa<AsmStmt>(S) || isa<NullStmt>(S) || isa<OMPFlushDirective>(S) ||
isa<OMPBarrierDirective>(S) || isa<OMPTaskyieldDirective>(S))
continue;
// Analyze declarations.
if (const auto *DS = dyn_cast<DeclStmt>(S)) {
if (llvm::all_of(DS->decls(), [&Ctx](const Decl *D) {
if (isa<EmptyDecl>(D) || isa<DeclContext>(D) ||
isa<TypeDecl>(D) || isa<PragmaCommentDecl>(D) ||
isa<PragmaDetectMismatchDecl>(D) || isa<UsingDecl>(D) ||
isa<UsingDirectiveDecl>(D) ||
isa<OMPDeclareReductionDecl>(D) ||
isa<OMPThreadPrivateDecl>(D))
return true;
const auto *VD = dyn_cast<VarDecl>(D);
if (!VD)
return false;
return VD->isConstexpr() ||
((VD->getType().isTrivialType(Ctx) ||
VD->getType()->isReferenceType()) &&
(!VD->hasInit() || isTrivial(Ctx, VD->getInit())));
}))
continue;
}
// Found multiple children - cannot get the one child only.
if (Child)
return Body;
Child = S;
}
if (Child)
return Child;
}
return Body;
}
/// Check if the parallel directive has an 'if' clause with non-constant or
/// false condition. Also, check if the number of threads is strictly specified
/// and run those directives in non-SPMD mode.
static bool hasParallelIfNumThreadsClause(ASTContext &Ctx,
const OMPExecutableDirective &D) {
if (D.hasClausesOfKind<OMPNumThreadsClause>())
return true;
for (const auto *C : D.getClausesOfKind<OMPIfClause>()) {
OpenMPDirectiveKind NameModifier = C->getNameModifier();
if (NameModifier != OMPD_parallel && NameModifier != OMPD_unknown)
continue;
const Expr *Cond = C->getCondition();
bool Result;
if (!Cond->EvaluateAsBooleanCondition(Result, Ctx) || !Result)
return true;
}
return false;
}
/// Check for inner (nested) SPMD construct, if any
static bool hasNestedSPMDDirective(ASTContext &Ctx,
const OMPExecutableDirective &D) {
const auto *CS = D.getInnermostCapturedStmt();
const auto *Body =
CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
const Stmt *ChildStmt = getSingleCompoundChild(Ctx, Body);
if (const auto *NestedDir = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
switch (D.getDirectiveKind()) {
case OMPD_target:
if (isOpenMPParallelDirective(DKind) &&
!hasParallelIfNumThreadsClause(Ctx, *NestedDir))
return true;
if (DKind == OMPD_teams) {
Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
/*IgnoreCaptured=*/true);
if (!Body)
return false;
ChildStmt = getSingleCompoundChild(Ctx, Body);
if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
DKind = NND->getDirectiveKind();
if (isOpenMPParallelDirective(DKind) &&
!hasParallelIfNumThreadsClause(Ctx, *NND))
return true;
}
}
return false;
case OMPD_target_teams:
return isOpenMPParallelDirective(DKind) &&
!hasParallelIfNumThreadsClause(Ctx, *NestedDir);
case OMPD_target_simd:
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_simd:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd:
case OMPD_parallel:
case OMPD_for:
case OMPD_parallel_for:
case OMPD_parallel_sections:
case OMPD_for_simd:
case OMPD_parallel_for_simd:
case OMPD_cancel:
case OMPD_cancellation_point:
case OMPD_ordered:
case OMPD_threadprivate:
case OMPD_task:
case OMPD_simd:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
case OMPD_master:
case OMPD_critical:
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
case OMPD_target_enter_data:
case OMPD_distribute:
case OMPD_distribute_simd:
case OMPD_distribute_parallel_for:
case OMPD_distribute_parallel_for_simd:
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd:
case OMPD_teams_distribute_parallel_for:
case OMPD_teams_distribute_parallel_for_simd:
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_requires:
case OMPD_unknown:
llvm_unreachable("Unexpected directive.");
}
}
return false;
}
static bool supportsSPMDExecutionMode(ASTContext &Ctx,
const OMPExecutableDirective &D) {
OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
switch (DirectiveKind) {
case OMPD_target:
case OMPD_target_teams:
return hasNestedSPMDDirective(Ctx, D);
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd:
return !hasParallelIfNumThreadsClause(Ctx, D);
case OMPD_target_simd:
case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_simd:
return false;
case OMPD_parallel:
case OMPD_for:
case OMPD_parallel_for:
case OMPD_parallel_sections:
case OMPD_for_simd:
case OMPD_parallel_for_simd:
case OMPD_cancel:
case OMPD_cancellation_point:
case OMPD_ordered:
case OMPD_threadprivate:
case OMPD_task:
case OMPD_simd:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
case OMPD_master:
case OMPD_critical:
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
case OMPD_target_enter_data:
case OMPD_distribute:
case OMPD_distribute_simd:
case OMPD_distribute_parallel_for:
case OMPD_distribute_parallel_for_simd:
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd:
case OMPD_teams_distribute_parallel_for:
case OMPD_teams_distribute_parallel_for_simd:
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_requires:
case OMPD_unknown:
break;
}
llvm_unreachable(
"Unknown programming model for OpenMP directive on NVPTX target.");
}
/// Check if the directive is loops based and has schedule clause at all or has
/// static scheduling.
static bool hasStaticScheduling(const OMPExecutableDirective &D) {
assert(isOpenMPWorksharingDirective(D.getDirectiveKind()) &&
isOpenMPLoopDirective(D.getDirectiveKind()) &&
"Expected loop-based directive.");
return !D.hasClausesOfKind<OMPOrderedClause>() &&
(!D.hasClausesOfKind<OMPScheduleClause>() ||
llvm::any_of(D.getClausesOfKind<OMPScheduleClause>(),
[](const OMPScheduleClause *C) {
return C->getScheduleKind() == OMPC_SCHEDULE_static;
}));
}
/// Check for inner (nested) lightweight runtime construct, if any
static bool hasNestedLightweightDirective(ASTContext &Ctx,
const OMPExecutableDirective &D) {
assert(supportsSPMDExecutionMode(Ctx, D) && "Expected SPMD mode directive.");
const auto *CS = D.getInnermostCapturedStmt();
const auto *Body =
CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
const Stmt *ChildStmt = getSingleCompoundChild(Ctx, Body);
if (const auto *NestedDir = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
switch (D.getDirectiveKind()) {
case OMPD_target:
if (isOpenMPParallelDirective(DKind) &&
isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
hasStaticScheduling(*NestedDir))
return true;
if (DKind == OMPD_parallel) {
Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
/*IgnoreCaptured=*/true);
if (!Body)
return false;
ChildStmt = getSingleCompoundChild(Ctx, Body);
if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
DKind = NND->getDirectiveKind();
if (isOpenMPWorksharingDirective(DKind) &&
isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
return true;
}
} else if (DKind == OMPD_teams) {
Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
/*IgnoreCaptured=*/true);
if (!Body)
return false;
ChildStmt = getSingleCompoundChild(Ctx, Body);
if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
DKind = NND->getDirectiveKind();
if (isOpenMPParallelDirective(DKind) &&
isOpenMPWorksharingDirective(DKind) &&
isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
return true;
if (DKind == OMPD_parallel) {
Body = NND->getInnermostCapturedStmt()->IgnoreContainers(
/*IgnoreCaptured=*/true);
if (!Body)
return false;
ChildStmt = getSingleCompoundChild(Ctx, Body);
if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
DKind = NND->getDirectiveKind();
if (isOpenMPWorksharingDirective(DKind) &&
isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
return true;
}
}
}
}
return false;
case OMPD_target_teams:
if (isOpenMPParallelDirective(DKind) &&
isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
hasStaticScheduling(*NestedDir))
return true;
if (DKind == OMPD_parallel) {
Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
/*IgnoreCaptured=*/true);
if (!Body)
return false;
ChildStmt = getSingleCompoundChild(Ctx, Body);
if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
DKind = NND->getDirectiveKind();
if (isOpenMPWorksharingDirective(DKind) &&
isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
return true;
}
}
return false;
case OMPD_target_parallel:
return isOpenMPWorksharingDirective(DKind) &&
isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NestedDir);
case OMPD_target_teams_distribute:
case OMPD_target_simd:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
case OMPD_target_teams_distribute_simd:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd:
case OMPD_parallel:
case OMPD_for:
case OMPD_parallel_for:
case OMPD_parallel_sections:
case OMPD_for_simd:
case OMPD_parallel_for_simd:
case OMPD_cancel:
case OMPD_cancellation_point:
case OMPD_ordered:
case OMPD_threadprivate:
case OMPD_task:
case OMPD_simd:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
case OMPD_master:
case OMPD_critical:
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
case OMPD_target_enter_data:
case OMPD_distribute:
case OMPD_distribute_simd:
case OMPD_distribute_parallel_for:
case OMPD_distribute_parallel_for_simd:
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd:
case OMPD_teams_distribute_parallel_for:
case OMPD_teams_distribute_parallel_for_simd:
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_requires:
case OMPD_unknown:
llvm_unreachable("Unexpected directive.");
}
}
return false;
}
/// Checks if the construct supports lightweight runtime. It must be SPMD
/// construct + inner loop-based construct with static scheduling.
static bool supportsLightweightRuntime(ASTContext &Ctx,
const OMPExecutableDirective &D) {
if (!supportsSPMDExecutionMode(Ctx, D))
return false;
OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
switch (DirectiveKind) {
case OMPD_target:
case OMPD_target_teams:
case OMPD_target_parallel:
return hasNestedLightweightDirective(Ctx, D);
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd:
// (Last|First)-privates must be shared in parallel region.
return hasStaticScheduling(D);
case OMPD_target_simd:
case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_simd:
return false;
case OMPD_parallel:
case OMPD_for:
case OMPD_parallel_for:
case OMPD_parallel_sections:
case OMPD_for_simd:
case OMPD_parallel_for_simd:
case OMPD_cancel:
case OMPD_cancellation_point:
case OMPD_ordered:
case OMPD_threadprivate:
case OMPD_task:
case OMPD_simd:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
case OMPD_master:
case OMPD_critical:
case OMPD_taskyield:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_taskgroup:
case OMPD_atomic:
case OMPD_flush:
case OMPD_teams:
case OMPD_target_data:
case OMPD_target_exit_data:
case OMPD_target_enter_data:
case OMPD_distribute:
case OMPD_distribute_simd:
case OMPD_distribute_parallel_for:
case OMPD_distribute_parallel_for_simd:
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd:
case OMPD_teams_distribute_parallel_for:
case OMPD_teams_distribute_parallel_for_simd:
case OMPD_target_update:
case OMPD_declare_simd:
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_declare_reduction:
case OMPD_declare_mapper:
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_requires:
case OMPD_unknown:
break;
}
llvm_unreachable(
"Unknown programming model for OpenMP directive on NVPTX target.");
}
void CGOpenMPRuntimeNVPTX::emitNonSPMDKernel(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) {
ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode);
EntryFunctionState EST;
WorkerFunctionState WST(CGM, D.getBeginLoc());
Work.clear();
WrapperFunctionsMap.clear();
// Emit target region as a standalone region.
class NVPTXPrePostActionTy : public PrePostActionTy {
CGOpenMPRuntimeNVPTX::EntryFunctionState &EST;
CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST;
public:
NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX::EntryFunctionState &EST,
CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST)
: EST(EST), WST(WST) {}
void Enter(CodeGenFunction &CGF) override {
auto &RT =
static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime());
RT.emitNonSPMDEntryHeader(CGF, EST, WST);
// Skip target region initialization.
RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
}
void Exit(CodeGenFunction &CGF) override {
auto &RT =
static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime());
RT.clearLocThreadIdInsertPt(CGF);
RT.emitNonSPMDEntryFooter(CGF, EST);
}
} Action(EST, WST);
CodeGen.setAction(Action);
IsInTTDRegion = true;
// Reserve place for the globalized memory.
GlobalizedRecords.emplace_back();
if (!KernelStaticGlobalized) {
KernelStaticGlobalized = new llvm::GlobalVariable(
CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
llvm::GlobalValue::InternalLinkage,
llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
"_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
llvm::GlobalValue::NotThreadLocal,
CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
}
emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
IsOffloadEntry, CodeGen);
IsInTTDRegion = false;
// Now change the name of the worker function to correspond to this target
// region's entry function.
WST.WorkerFn->setName(Twine(OutlinedFn->getName(), "_worker"));
// Create the worker function
emitWorkerFunction(WST);
}
// Setup NVPTX threads for master-worker OpenMP scheme.
void CGOpenMPRuntimeNVPTX::emitNonSPMDEntryHeader(CodeGenFunction &CGF,
EntryFunctionState &EST,
WorkerFunctionState &WST) {
CGBuilderTy &Bld = CGF.Builder;
llvm::BasicBlock *WorkerBB = CGF.createBasicBlock(".worker");
llvm::BasicBlock *MasterCheckBB = CGF.createBasicBlock(".mastercheck");
llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
EST.ExitBB = CGF.createBasicBlock(".exit");
llvm::Value *IsWorker =
Bld.CreateICmpULT(getNVPTXThreadID(CGF), getThreadLimit(CGF));
Bld.CreateCondBr(IsWorker, WorkerBB, MasterCheckBB);
CGF.EmitBlock(WorkerBB);
emitCall(CGF, WST.Loc, WST.WorkerFn);
CGF.EmitBranch(EST.ExitBB);
CGF.EmitBlock(MasterCheckBB);
llvm::Value *IsMaster =
Bld.CreateICmpEQ(getNVPTXThreadID(CGF), getMasterThreadID(CGF));
Bld.CreateCondBr(IsMaster, MasterBB, EST.ExitBB);
CGF.EmitBlock(MasterBB);
IsInTargetMasterThreadRegion = true;
// SEQUENTIAL (MASTER) REGION START
// First action in sequential region:
// Initialize the state of the OpenMP runtime library on the GPU.
// TODO: Optimize runtime initialization and pass in correct value.
llvm::Value *Args[] = {getThreadLimit(CGF),
Bld.getInt16(/*RequiresOMPRuntime=*/1)};
CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_init), Args);
// For data sharing, we need to initialize the stack.
CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(
OMPRTL_NVPTX__kmpc_data_sharing_init_stack));
emitGenericVarsProlog(CGF, WST.Loc);
}
void CGOpenMPRuntimeNVPTX::emitNonSPMDEntryFooter(CodeGenFunction &CGF,
EntryFunctionState &EST) {
IsInTargetMasterThreadRegion = false;
if (!CGF.HaveInsertPoint())
return;
emitGenericVarsEpilog(CGF);
if (!EST.ExitBB)
EST.ExitBB = CGF.createBasicBlock(".exit");
llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".termination.notifier");
CGF.EmitBranch(TerminateBB);
CGF.EmitBlock(TerminateBB);
// Signal termination condition.
// TODO: Optimize runtime initialization and pass in correct value.
llvm::Value *Args[] = {CGF.Builder.getInt16(/*IsOMPRuntimeInitialized=*/1)};
CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_deinit), Args);
// Barrier to terminate worker threads.
syncCTAThreads(CGF);
// Master thread jumps to exit point.
CGF.EmitBranch(EST.ExitBB);
CGF.EmitBlock(EST.ExitBB);
EST.ExitBB = nullptr;
}
void CGOpenMPRuntimeNVPTX::emitSPMDKernel(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) {
ExecutionRuntimeModesRAII ModeRAII(
CurrentExecutionMode, RequiresFullRuntime,
CGM.getLangOpts().OpenMPCUDAForceFullRuntime ||
!supportsLightweightRuntime(CGM.getContext(), D));
EntryFunctionState EST;
// Emit target region as a standalone region.
class NVPTXPrePostActionTy : public PrePostActionTy {
CGOpenMPRuntimeNVPTX &RT;
CGOpenMPRuntimeNVPTX::EntryFunctionState &EST;
const OMPExecutableDirective &D;
public:
NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX &RT,
CGOpenMPRuntimeNVPTX::EntryFunctionState &EST,
const OMPExecutableDirective &D)
: RT(RT), EST(EST), D(D) {}
void Enter(CodeGenFunction &CGF) override {
RT.emitSPMDEntryHeader(CGF, EST, D);
// Skip target region initialization.
RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
}
void Exit(CodeGenFunction &CGF) override {
RT.clearLocThreadIdInsertPt(CGF);
RT.emitSPMDEntryFooter(CGF, EST);
}
} Action(*this, EST, D);
CodeGen.setAction(Action);
IsInTTDRegion = true;
// Reserve place for the globalized memory.
GlobalizedRecords.emplace_back();
if (!KernelStaticGlobalized) {
KernelStaticGlobalized = new llvm::GlobalVariable(
CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
llvm::GlobalValue::InternalLinkage,
llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
"_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
llvm::GlobalValue::NotThreadLocal,
CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
}
emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
IsOffloadEntry, CodeGen);
IsInTTDRegion = false;
}
void CGOpenMPRuntimeNVPTX::emitSPMDEntryHeader(
CodeGenFunction &CGF, EntryFunctionState &EST,
const OMPExecutableDirective &D) {
CGBuilderTy &Bld = CGF.Builder;
// Setup BBs in entry function.
llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute");
EST.ExitBB = CGF.createBasicBlock(".exit");
llvm::Value *Args[] = {getThreadLimit(CGF, /*IsInSPMDExecutionMode=*/true),
/*RequiresOMPRuntime=*/
Bld.getInt16(RequiresFullRuntime ? 1 : 0),
/*RequiresDataSharing=*/Bld.getInt16(0)};
CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_spmd_kernel_init), Args);
if (RequiresFullRuntime) {
// For data sharing, we need to initialize the stack.
CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd));
}
CGF.EmitBranch(ExecuteBB);
CGF.EmitBlock(ExecuteBB);
IsInTargetMasterThreadRegion = true;
}
void CGOpenMPRuntimeNVPTX::emitSPMDEntryFooter(CodeGenFunction &CGF,
EntryFunctionState &EST) {
IsInTargetMasterThreadRegion = false;
if (!CGF.HaveInsertPoint())
return;
if (!EST.ExitBB)
EST.ExitBB = CGF.createBasicBlock(".exit");
llvm::BasicBlock *OMPDeInitBB = CGF.createBasicBlock(".omp.deinit");
CGF.EmitBranch(OMPDeInitBB);
CGF.EmitBlock(OMPDeInitBB);
// DeInitialize the OMP state in the runtime; called by all active threads.
llvm::Value *Args[] = {/*RequiresOMPRuntime=*/
CGF.Builder.getInt16(RequiresFullRuntime ? 1 : 0)};
CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(
OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2), Args);
CGF.EmitBranch(EST.ExitBB);
CGF.EmitBlock(EST.ExitBB);
EST.ExitBB = nullptr;
}
// Create a unique global variable to indicate the execution mode of this target
// region. The execution mode is either 'generic', or 'spmd' depending on the
// target directive. This variable is picked up by the offload library to setup
// the device appropriately before kernel launch. If the execution mode is
// 'generic', the runtime reserves one warp for the master, otherwise, all
// warps participate in parallel work.
static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name,
bool Mode) {
auto *GVMode =
new llvm::GlobalVariable(CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
llvm::GlobalValue::WeakAnyLinkage,
llvm::ConstantInt::get(CGM.Int8Ty, Mode ? 0 : 1),
Twine(Name, "_exec_mode"));
CGM.addCompilerUsedGlobal(GVMode);
}
void CGOpenMPRuntimeNVPTX::emitWorkerFunction(WorkerFunctionState &WST) {
ASTContext &Ctx = CGM.getContext();
CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, WST.WorkerFn, WST.CGFI, {},
WST.Loc, WST.Loc);
emitWorkerLoop(CGF, WST);
CGF.FinishFunction();
}
void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
WorkerFunctionState &WST) {
//
// The workers enter this loop and wait for parallel work from the master.
// When the master encounters a parallel region it sets up the work + variable
// arguments, and wakes up the workers. The workers first check to see if
// they are required for the parallel region, i.e., within the # of requested
// parallel threads. The activated workers load the variable arguments and
// execute the parallel work.
//
CGBuilderTy &Bld = CGF.Builder;
llvm::BasicBlock *AwaitBB = CGF.createBasicBlock(".await.work");
llvm::BasicBlock *SelectWorkersBB = CGF.createBasicBlock(".select.workers");
llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute.parallel");
llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".terminate.parallel");
llvm::BasicBlock *BarrierBB = CGF.createBasicBlock(".barrier.parallel");
llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
CGF.EmitBranch(AwaitBB);
// Workers wait for work from master.
CGF.EmitBlock(AwaitBB);
// Wait for parallel work
syncCTAThreads(CGF);
Address WorkFn =
CGF.CreateDefaultAlignTempAlloca(CGF.Int8PtrTy, /*Name=*/"work_fn");
Address ExecStatus =
CGF.CreateDefaultAlignTempAlloca(CGF.Int8Ty, /*Name=*/"exec_status");
CGF.InitTempAlloca(ExecStatus, Bld.getInt8(/*C=*/0));
CGF.InitTempAlloca(WorkFn, llvm::Constant::getNullValue(CGF.Int8PtrTy));
// TODO: Optimize runtime initialization and pass in correct value.
llvm::Value *Args[] = {WorkFn.getPointer(),
/*RequiresOMPRuntime=*/Bld.getInt16(1)};
llvm::Value *Ret = CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_parallel), Args);
Bld.CreateStore(Bld.CreateZExt(Ret, CGF.Int8Ty), ExecStatus);
// On termination condition (workid == 0), exit loop.
llvm::Value *WorkID = Bld.CreateLoad(WorkFn);
llvm::Value *ShouldTerminate = Bld.CreateIsNull(WorkID, "should_terminate");
Bld.CreateCondBr(ShouldTerminate, ExitBB, SelectWorkersBB);
// Activate requested workers.
CGF.EmitBlock(SelectWorkersBB);
llvm::Value *IsActive =
Bld.CreateIsNotNull(Bld.CreateLoad(ExecStatus), "is_active");
Bld.CreateCondBr(IsActive, ExecuteBB, BarrierBB);
// Signal start of parallel region.
CGF.EmitBlock(ExecuteBB);
// Skip initialization.
setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
// Process work items: outlined parallel functions.
for (llvm::Function *W : Work) {
// Try to match this outlined function.
llvm::Value *ID = Bld.CreatePointerBitCastOrAddrSpaceCast(W, CGM.Int8PtrTy);
llvm::Value *WorkFnMatch =
Bld.CreateICmpEQ(Bld.CreateLoad(WorkFn), ID, "work_match");
llvm::BasicBlock *ExecuteFNBB = CGF.createBasicBlock(".execute.fn");
llvm::BasicBlock *CheckNextBB = CGF.createBasicBlock(".check.next");
Bld.CreateCondBr(WorkFnMatch, ExecuteFNBB, CheckNextBB);
// Execute this outlined function.
CGF.EmitBlock(ExecuteFNBB);
// Insert call to work function via shared wrapper. The shared
// wrapper takes two arguments:
// - the parallelism level;
// - the thread ID;
emitCall(CGF, WST.Loc, W,
{Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
// Go to end of parallel region.
CGF.EmitBranch(TerminateBB);
CGF.EmitBlock(CheckNextBB);
}
// Default case: call to outlined function through pointer if the target
// region makes a declare target call that may contain an orphaned parallel
// directive.
auto *ParallelFnTy =
llvm::FunctionType::get(CGM.VoidTy, {CGM.Int16Ty, CGM.Int32Ty},
/*isVarArg=*/false);
llvm::Value *WorkFnCast =
Bld.CreateBitCast(WorkID, ParallelFnTy->getPointerTo());
// Insert call to work function via shared wrapper. The shared
// wrapper takes two arguments:
// - the parallelism level;
// - the thread ID;
emitCall(CGF, WST.Loc, {ParallelFnTy, WorkFnCast},
{Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
// Go to end of parallel region.
CGF.EmitBranch(TerminateBB);
// Signal end of parallel region.
CGF.EmitBlock(TerminateBB);
CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_end_parallel),
llvm::None);
CGF.EmitBranch(BarrierBB);
// All active and inactive workers wait at a barrier after parallel region.
CGF.EmitBlock(BarrierBB);
// Barrier after parallel region.
syncCTAThreads(CGF);
CGF.EmitBranch(AwaitBB);
// Exit target region.
CGF.EmitBlock(ExitBB);
// Skip initialization.
clearLocThreadIdInsertPt(CGF);
}
/// Returns specified OpenMP runtime function for the current OpenMP
/// implementation. Specialized for the NVPTX device.
/// \param Function OpenMP runtime function.
/// \return Specified function.
llvm::FunctionCallee
CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
llvm::FunctionCallee RTLFn = nullptr;
switch (static_cast<OpenMPRTLFunctionNVPTX>(Function)) {
case OMPRTL_NVPTX__kmpc_kernel_init: {
// Build void __kmpc_kernel_init(kmp_int32 thread_limit, int16_t
// RequiresOMPRuntime);
llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_init");
break;
}
case OMPRTL_NVPTX__kmpc_kernel_deinit: {
// Build void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized);
llvm::Type *TypeParams[] = {CGM.Int16Ty};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_deinit");
break;
}
case OMPRTL_NVPTX__kmpc_spmd_kernel_init: {
// Build void __kmpc_spmd_kernel_init(kmp_int32 thread_limit,
// int16_t RequiresOMPRuntime, int16_t RequiresDataSharing);
llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty, CGM.Int16Ty};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_init");
break;
}
case OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2: {
// Build void __kmpc_spmd_kernel_deinit_v2(int16_t RequiresOMPRuntime);
llvm::Type *TypeParams[] = {CGM.Int16Ty};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_deinit_v2");
break;
}
case OMPRTL_NVPTX__kmpc_kernel_prepare_parallel: {
/// Build void __kmpc_kernel_prepare_parallel(
/// void *outlined_function, int16_t IsOMPRuntimeInitialized);
llvm::Type *TypeParams[] = {CGM.Int8PtrTy, CGM.Int16Ty};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_prepare_parallel");
break;
}
case OMPRTL_NVPTX__kmpc_kernel_parallel: {
/// Build bool __kmpc_kernel_parallel(void **outlined_function,
/// int16_t IsOMPRuntimeInitialized);
llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy, CGM.Int16Ty};
llvm::Type *RetTy = CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
auto *FnTy =
llvm::FunctionType::get(RetTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_parallel");
break;
}
case OMPRTL_NVPTX__kmpc_kernel_end_parallel: {
/// Build void __kmpc_kernel_end_parallel();
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_end_parallel");
break;
}
case OMPRTL_NVPTX__kmpc_serialized_parallel: {
// Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
// global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
break;
}
case OMPRTL_NVPTX__kmpc_end_serialized_parallel: {
// Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
// global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
break;
}
case OMPRTL_NVPTX__kmpc_shuffle_int32: {
// Build int32_t __kmpc_shuffle_int32(int32_t element,
// int16_t lane_offset, int16_t warp_size);
llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty, CGM.Int16Ty};
auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int32");
break;
}
case OMPRTL_NVPTX__kmpc_shuffle_int64: {
// Build int64_t __kmpc_shuffle_int64(int64_t element,
// int16_t lane_offset, int16_t warp_size);
llvm::Type *TypeParams[] = {CGM.Int64Ty, CGM.Int16Ty, CGM.Int16Ty};
auto *FnTy =
llvm::FunctionType::get(CGM.Int64Ty, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int64");
break;
}
case OMPRTL_NVPTX__kmpc_parallel_reduce_nowait_v2: {
// Build int32_t kmpc_nvptx_parallel_reduce_nowait_v2(ident_t *loc,
// kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void*
// reduce_data, void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t
// lane_id, int16_t lane_offset, int16_t Algorithm Version), void
// (*kmp_InterWarpCopyFctPtr)(void* src, int warp_num));
llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
CGM.Int16Ty, CGM.Int16Ty};
auto *ShuffleReduceFnTy =
llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams,
/*isVarArg=*/false);
llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty};
auto *InterWarpCopyFnTy =
llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
/*isVarArg=*/false);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
CGM.Int32Ty,
CGM.Int32Ty,
CGM.SizeTy,
CGM.VoidPtrTy,
ShuffleReduceFnTy->getPointerTo(),
InterWarpCopyFnTy->getPointerTo()};
auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(
FnTy, /*Name=*/"__kmpc_nvptx_parallel_reduce_nowait_v2");
break;
}
case OMPRTL_NVPTX__kmpc_end_reduce_nowait: {
// Build __kmpc_end_reduce_nowait(kmp_int32 global_tid);
llvm::Type *TypeParams[] = {CGM.Int32Ty};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(
FnTy, /*Name=*/"__kmpc_nvptx_end_reduce_nowait");
break;
}
case OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_simple: {
// Build __kmpc_nvptx_teams_reduce_nowait_simple(ident_t *loc, kmp_int32
// global_tid, kmp_critical_name *lck)
llvm::Type *TypeParams[] = {
getIdentTyPointerTy(), CGM.Int32Ty,
llvm::PointerType::getUnqual(getKmpCriticalNameTy())};
auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(
FnTy, /*Name=*/"__kmpc_nvptx_teams_reduce_nowait_simple");
break;
}
case OMPRTL_NVPTX__kmpc_nvptx_teams_end_reduce_nowait_simple: {
// Build __kmpc_nvptx_teams_end_reduce_nowait_simple(ident_t *loc, kmp_int32
// global_tid, kmp_critical_name *lck)
llvm::Type *TypeParams[] = {
getIdentTyPointerTy(), CGM.Int32Ty,
llvm::PointerType::getUnqual(getKmpCriticalNameTy())};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(
FnTy, /*Name=*/"__kmpc_nvptx_teams_end_reduce_nowait_simple");
break;
}
case OMPRTL_NVPTX__kmpc_data_sharing_init_stack: {
/// Build void __kmpc_data_sharing_init_stack();
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_data_sharing_init_stack");
break;
}
case OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd: {
/// Build void __kmpc_data_sharing_init_stack_spmd();
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
RTLFn =
CGM.CreateRuntimeFunction(FnTy, "__kmpc_data_sharing_init_stack_spmd");
break;
}
case OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack: {
// Build void *__kmpc_data_sharing_coalesced_push_stack(size_t size,
// int16_t UseSharedMemory);
llvm::Type *TypeParams[] = {CGM.SizeTy, CGM.Int16Ty};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(
FnTy, /*Name=*/"__kmpc_data_sharing_coalesced_push_stack");
break;
}
case OMPRTL_NVPTX__kmpc_data_sharing_pop_stack: {
// Build void __kmpc_data_sharing_pop_stack(void *a);
llvm::Type *TypeParams[] = {CGM.VoidPtrTy};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy,
/*Name=*/"__kmpc_data_sharing_pop_stack");
break;
}
case OMPRTL_NVPTX__kmpc_begin_sharing_variables: {
/// Build void __kmpc_begin_sharing_variables(void ***args,
/// size_t n_args);
llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy->getPointerTo(), CGM.SizeTy};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_begin_sharing_variables");
break;
}
case OMPRTL_NVPTX__kmpc_end_sharing_variables: {
/// Build void __kmpc_end_sharing_variables();
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_sharing_variables");
break;
}
case OMPRTL_NVPTX__kmpc_get_shared_variables: {
/// Build void __kmpc_get_shared_variables(void ***GlobalArgs);
llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy->getPointerTo()};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_get_shared_variables");
break;
}
case OMPRTL_NVPTX__kmpc_parallel_level: {
// Build uint16_t __kmpc_parallel_level(ident_t *loc, kmp_int32 global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
auto *FnTy =
llvm::FunctionType::get(CGM.Int16Ty, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_parallel_level");
break;
}
case OMPRTL_NVPTX__kmpc_is_spmd_exec_mode: {
// Build int8_t __kmpc_is_spmd_exec_mode();
auto *FnTy = llvm::FunctionType::get(CGM.Int8Ty, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_is_spmd_exec_mode");
break;
}
case OMPRTL_NVPTX__kmpc_get_team_static_memory: {
// Build void __kmpc_get_team_static_memory(int16_t isSPMDExecutionMode,
// const void *buf, size_t size, int16_t is_shared, const void **res);
llvm::Type *TypeParams[] = {CGM.Int16Ty, CGM.VoidPtrTy, CGM.SizeTy,
CGM.Int16Ty, CGM.VoidPtrPtrTy};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_get_team_static_memory");
break;
}
case OMPRTL_NVPTX__kmpc_restore_team_static_memory: {
// Build void __kmpc_restore_team_static_memory(int16_t isSPMDExecutionMode,
// int16_t is_shared);
llvm::Type *TypeParams[] = {CGM.Int16Ty, CGM.Int16Ty};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn =
CGM.CreateRuntimeFunction(FnTy, "__kmpc_restore_team_static_memory");
break;
}
case OMPRTL__kmpc_barrier: {
// Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
cast<llvm::Function>(RTLFn.getCallee())
->addFnAttr(llvm::Attribute::Convergent);
break;
}
case OMPRTL__kmpc_barrier_simple_spmd: {
// Build void __kmpc_barrier_simple_spmd(ident_t *loc, kmp_int32
// global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn =
CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier_simple_spmd");
cast<llvm::Function>(RTLFn.getCallee())
->addFnAttr(llvm::Attribute::Convergent);
break;
}
}
return RTLFn;
}
void CGOpenMPRuntimeNVPTX::createOffloadEntry(llvm::Constant *ID,
llvm::Constant *Addr,
uint64_t Size, int32_t,
llvm::GlobalValue::LinkageTypes) {
// TODO: Add support for global variables on the device after declare target
// support.
if (!isa<llvm::Function>(Addr))
return;
llvm::Module &M = CGM.getModule();
llvm::LLVMContext &Ctx = CGM.getLLVMContext();
// Get "nvvm.annotations" metadata node
llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
llvm::Metadata *MDVals[] = {
llvm::ConstantAsMetadata::get(Addr), llvm::MDString::get(Ctx, "kernel"),
llvm::ConstantAsMetadata::get(
llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))};
// Append metadata to nvvm.annotations
MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
}
void CGOpenMPRuntimeNVPTX::emitTargetOutlinedFunction(
const OMPExecutableDirective &D, StringRef ParentName,
llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
if (!IsOffloadEntry) // Nothing to do.
return;
assert(!ParentName.empty() && "Invalid target region parent name!");
bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D);
if (Mode)
emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
CodeGen);
else
emitNonSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
CodeGen);
setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode);
}
namespace {
LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
/// Enum for accesseing the reserved_2 field of the ident_t struct.
enum ModeFlagsTy : unsigned {
/// Bit set to 1 when in SPMD mode.
KMP_IDENT_SPMD_MODE = 0x01,
/// Bit set to 1 when a simplified runtime is used.
KMP_IDENT_SIMPLE_RT_MODE = 0x02,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/KMP_IDENT_SIMPLE_RT_MODE)
};
/// Special mode Undefined. Is the combination of Non-SPMD mode + SimpleRuntime.
static const ModeFlagsTy UndefinedMode =
(~KMP_IDENT_SPMD_MODE) & KMP_IDENT_SIMPLE_RT_MODE;
} // anonymous namespace
unsigned CGOpenMPRuntimeNVPTX::getDefaultLocationReserved2Flags() const {
switch (getExecutionMode()) {
case EM_SPMD:
if (requiresFullRuntime())
return KMP_IDENT_SPMD_MODE & (~KMP_IDENT_SIMPLE_RT_MODE);
return KMP_IDENT_SPMD_MODE | KMP_IDENT_SIMPLE_RT_MODE;
case EM_NonSPMD:
assert(requiresFullRuntime() && "Expected full runtime.");
return (~KMP_IDENT_SPMD_MODE) & (~KMP_IDENT_SIMPLE_RT_MODE);
case EM_Unknown:
return UndefinedMode;
}
llvm_unreachable("Unknown flags are requested.");
}
CGOpenMPRuntimeNVPTX::CGOpenMPRuntimeNVPTX(CodeGenModule &CGM)
: CGOpenMPRuntime(CGM, "_", "$") {
if (!CGM.getLangOpts().OpenMPIsDevice)
llvm_unreachable("OpenMP NVPTX can only handle device code.");
}
void CGOpenMPRuntimeNVPTX::emitProcBindClause(CodeGenFunction &CGF,
OpenMPProcBindClauseKind ProcBind,
SourceLocation Loc) {
// Do nothing in case of SPMD mode and L0 parallel.
if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
return;
CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc);
}
void CGOpenMPRuntimeNVPTX::emitNumThreadsClause(CodeGenFunction &CGF,
llvm::Value *NumThreads,
SourceLocation Loc) {
// Do nothing in case of SPMD mode and L0 parallel.
if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
return;
CGOpenMPRuntime::emitNumThreadsClause(CGF, NumThreads, Loc);
}
void CGOpenMPRuntimeNVPTX::emitNumTeamsClause(CodeGenFunction &CGF,
const Expr *NumTeams,
const Expr *ThreadLimit,
SourceLocation Loc) {}
llvm::Function *CGOpenMPRuntimeNVPTX::emitParallelOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
// Emit target region as a standalone region.
class NVPTXPrePostActionTy : public PrePostActionTy {
bool &IsInParallelRegion;
bool PrevIsInParallelRegion;
public:
NVPTXPrePostActionTy(bool &IsInParallelRegion)
: IsInParallelRegion(IsInParallelRegion) {}
void Enter(CodeGenFunction &CGF) override {
PrevIsInParallelRegion = IsInParallelRegion;
IsInParallelRegion = true;
}
void Exit(CodeGenFunction &CGF) override {
IsInParallelRegion = PrevIsInParallelRegion;
}
} Action(IsInParallelRegion);
CodeGen.setAction(Action);
bool PrevIsInTTDRegion = IsInTTDRegion;
IsInTTDRegion = false;
bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion;
IsInTargetMasterThreadRegion = false;
auto *OutlinedFun =
cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction(
D, ThreadIDVar, InnermostKind, CodeGen));
IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion;
IsInTTDRegion = PrevIsInTTDRegion;
if (getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD &&
!IsInParallelRegion) {
llvm::Function *WrapperFun =
createParallelDataSharingWrapper(OutlinedFun, D);
WrapperFunctionsMap[OutlinedFun] = WrapperFun;
}
return OutlinedFun;
}
/// Get list of lastprivate variables from the teams distribute ... or
/// teams {distribute ...} directives.
static void
getDistributeLastprivateVars(ASTContext &Ctx, const OMPExecutableDirective &D,
llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
"expected teams directive.");
const OMPExecutableDirective *Dir = &D;
if (!isOpenMPDistributeDirective(D.getDirectiveKind())) {
if (const Stmt *S = getSingleCompoundChild(
Ctx,
D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(
/*IgnoreCaptured=*/true))) {
Dir = dyn_cast<OMPExecutableDirective>(S);
if (Dir && !isOpenMPDistributeDirective(Dir->getDirectiveKind()))
Dir = nullptr;
}
}
if (!Dir)
return;
for (const auto *C : Dir->getClausesOfKind<OMPLastprivateClause>()) {
for (const Expr *E : C->getVarRefs())
Vars.push_back(getPrivateItem(E));
}
}
/// Get list of reduction variables from the teams ... directives.
static void
getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D,
llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
"expected teams directive.");
for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
for (const Expr *E : C->privates())
Vars.push_back(getPrivateItem(E));
}
}
llvm::Function *CGOpenMPRuntimeNVPTX::emitTeamsOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
SourceLocation Loc = D.getBeginLoc();
const RecordDecl *GlobalizedRD = nullptr;
llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions;
llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
// Globalize team reductions variable unconditionally in all modes.
getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions);
if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD) {
getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions);
if (!LastPrivatesReductions.empty()) {
GlobalizedRD = ::buildRecordForGlobalizedVars(
CGM.getContext(), llvm::None, LastPrivatesReductions,
MappedDeclsFields);
}
} else if (!LastPrivatesReductions.empty()) {
assert(!TeamAndReductions.first &&
"Previous team declaration is not expected.");
TeamAndReductions.first = D.getCapturedStmt(OMPD_teams)->getCapturedDecl();
std::swap(TeamAndReductions.second, LastPrivatesReductions);
}
// Emit target region as a standalone region.
class NVPTXPrePostActionTy : public PrePostActionTy {
SourceLocation &Loc;
const RecordDecl *GlobalizedRD;
llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
&MappedDeclsFields;
public:
NVPTXPrePostActionTy(
SourceLocation &Loc, const RecordDecl *GlobalizedRD,
llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
&MappedDeclsFields)
: Loc(Loc), GlobalizedRD(GlobalizedRD),
MappedDeclsFields(MappedDeclsFields) {}
void Enter(CodeGenFunction &CGF) override {
auto &Rt =
static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime());
if (GlobalizedRD) {
auto I = Rt.FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
I->getSecond().GlobalRecord = GlobalizedRD;
I->getSecond().MappedParams =
llvm::make_unique<CodeGenFunction::OMPMapVars>();
DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
for (const auto &Pair : MappedDeclsFields) {
assert(Pair.getFirst()->isCanonicalDecl() &&
"Expected canonical declaration");
Data.insert(std::make_pair(Pair.getFirst(),
MappedVarData(Pair.getSecond(),
/*IsOnePerTeam=*/true)));
}
}
Rt.emitGenericVarsProlog(CGF, Loc);
}
void Exit(CodeGenFunction &CGF) override {
static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
.emitGenericVarsEpilog(CGF);
}
} Action(Loc, GlobalizedRD, MappedDeclsFields);
CodeGen.setAction(Action);
llvm::Function *OutlinedFun = CGOpenMPRuntime::emitTeamsOutlinedFunction(
D, ThreadIDVar, InnermostKind, CodeGen);
OutlinedFun->removeFnAttr(llvm::Attribute::NoInline);
OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone);
OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline);
return OutlinedFun;
}
void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF,
SourceLocation Loc,
bool WithSPMDCheck) {
if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic &&
getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD)
return;
CGBuilderTy &Bld = CGF.Builder;
const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
if (I == FunctionGlobalizedDecls.end())
return;
if (const RecordDecl *GlobalizedVarsRecord = I->getSecond().GlobalRecord) {
QualType GlobalRecTy = CGM.getContext().getRecordType(GlobalizedVarsRecord);
QualType SecGlobalRecTy;
// Recover pointer to this function's global record. The runtime will
// handle the specifics of the allocation of the memory.
// Use actual memory size of the record including the padding
// for alignment purposes.
unsigned Alignment =
CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
unsigned GlobalRecordSize =
CGM.getContext().getTypeSizeInChars(GlobalRecTy).getQuantity();
GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
llvm::PointerType *GlobalRecPtrTy =
CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo();
llvm::Value *GlobalRecCastAddr;
llvm::Value *IsTTD = nullptr;
if (!IsInTTDRegion &&
(WithSPMDCheck ||
getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) {
llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
llvm::BasicBlock *SPMDBB = CGF.createBasicBlock(".spmd");
llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
if (I->getSecond().SecondaryGlobalRecord.hasValue()) {
llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
llvm::Value *ThreadID = getThreadID(CGF, Loc);
llvm::Value *PL = CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_level),
{RTLoc, ThreadID});
IsTTD = Bld.CreateIsNull(PL);
}
llvm::Value *IsSPMD = Bld.CreateIsNotNull(CGF.EmitNounwindRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_is_spmd_exec_mode)));
Bld.CreateCondBr(IsSPMD, SPMDBB, NonSPMDBB);
// There is no need to emit line number for unconditional branch.
(void)ApplyDebugLocation::CreateEmpty(CGF);
CGF.EmitBlock(SPMDBB);
Address RecPtr = Address(llvm::ConstantPointerNull::get(GlobalRecPtrTy),
CharUnits::fromQuantity(Alignment));
CGF.EmitBranch(ExitBB);
// There is no need to emit line number for unconditional branch.
(void)ApplyDebugLocation::CreateEmpty(CGF);
CGF.EmitBlock(NonSPMDBB);
llvm::Value *Size = llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize);
if (const RecordDecl *SecGlobalizedVarsRecord =
I->getSecond().SecondaryGlobalRecord.getValueOr(nullptr)) {
SecGlobalRecTy =
CGM.getContext().getRecordType(SecGlobalizedVarsRecord);
// Recover pointer to this function's global record. The runtime will
// handle the specifics of the allocation of the memory.
// Use actual memory size of the record including the padding
// for alignment purposes.
unsigned Alignment =
CGM.getContext().getTypeAlignInChars(SecGlobalRecTy).getQuantity();
unsigned GlobalRecordSize =
CGM.getContext().getTypeSizeInChars(SecGlobalRecTy).getQuantity();
GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
Size = Bld.CreateSelect(
IsTTD, llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize), Size);
}
// TODO: allow the usage of shared memory to be controlled by
// the user, for now, default to global.
llvm::Value *GlobalRecordSizeArg[] = {
Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(
OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
GlobalRecordSizeArg);
GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
GlobalRecValue, GlobalRecPtrTy);
CGF.EmitBlock(ExitBB);
auto *Phi = Bld.CreatePHI(GlobalRecPtrTy,
/*NumReservedValues=*/2, "_select_stack");
Phi->addIncoming(RecPtr.getPointer(), SPMDBB);
Phi->addIncoming(GlobalRecCastAddr, NonSPMDBB);
GlobalRecCastAddr = Phi;
I->getSecond().GlobalRecordAddr = Phi;
I->getSecond().IsInSPMDModeFlag = IsSPMD;
} else if (IsInTTDRegion) {
assert(GlobalizedRecords.back().Records.size() < 2 &&
"Expected less than 2 globalized records: one for target and one "
"for teams.");
unsigned Offset = 0;
for (const RecordDecl *RD : GlobalizedRecords.back().Records) {
QualType RDTy = CGM.getContext().getRecordType(RD);
unsigned Alignment =
CGM.getContext().getTypeAlignInChars(RDTy).getQuantity();
unsigned Size = CGM.getContext().getTypeSizeInChars(RDTy).getQuantity();
Offset =
llvm::alignTo(llvm::alignTo(Offset, Alignment) + Size, Alignment);
}
unsigned Alignment =
CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
Offset = llvm::alignTo(Offset, Alignment);
GlobalizedRecords.back().Records.push_back(GlobalizedVarsRecord);
++GlobalizedRecords.back().RegionCounter;
if (GlobalizedRecords.back().Records.size() == 1) {
assert(KernelStaticGlobalized &&
"Kernel static pointer must be initialized already.");
auto *UseSharedMemory = new llvm::GlobalVariable(
CGM.getModule(), CGM.Int16Ty, /*isConstant=*/true,
llvm::GlobalValue::InternalLinkage, nullptr,
"_openmp_static_kernel$is_shared");
UseSharedMemory->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
/*DestWidth=*/16, /*Signed=*/0);
llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
Address(UseSharedMemory,
CGM.getContext().getTypeAlignInChars(Int16Ty)),
/*Volatile=*/false, Int16Ty, Loc);
auto *StaticGlobalized = new llvm::GlobalVariable(
CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
llvm::GlobalValue::CommonLinkage, nullptr);
auto *RecSize = new llvm::GlobalVariable(
CGM.getModule(), CGM.SizeTy, /*isConstant=*/true,
llvm::GlobalValue::InternalLinkage, nullptr,
"_openmp_static_kernel$size");
RecSize->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
llvm::Value *Ld = CGF.EmitLoadOfScalar(
Address(RecSize, CGM.getSizeAlign()), /*Volatile=*/false,
CGM.getContext().getSizeType(), Loc);
llvm::Value *ResAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
KernelStaticGlobalized, CGM.VoidPtrPtrTy);
llvm::Value *GlobalRecordSizeArg[] = {
llvm::ConstantInt::get(
CGM.Int16Ty,
getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD ? 1 : 0),
StaticGlobalized, Ld, IsInSharedMemory, ResAddr};
CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
OMPRTL_NVPTX__kmpc_get_team_static_memory),
GlobalRecordSizeArg);
GlobalizedRecords.back().Buffer = StaticGlobalized;
GlobalizedRecords.back().RecSize = RecSize;
GlobalizedRecords.back().UseSharedMemory = UseSharedMemory;
GlobalizedRecords.back().Loc = Loc;
}
assert(KernelStaticGlobalized && "Global address must be set already.");
Address FrameAddr = CGF.EmitLoadOfPointer(
Address(KernelStaticGlobalized, CGM.getPointerAlign()),
CGM.getContext()
.getPointerType(CGM.getContext().VoidPtrTy)
.castAs<PointerType>());
llvm::Value *GlobalRecValue =
Bld.CreateConstInBoundsGEP(FrameAddr, Offset).getPointer();
I->getSecond().GlobalRecordAddr = GlobalRecValue;
I->getSecond().IsInSPMDModeFlag = nullptr;
GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
GlobalRecValue, CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo());
} else {
// TODO: allow the usage of shared memory to be controlled by
// the user, for now, default to global.
llvm::Value *GlobalRecordSizeArg[] = {
llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize),
CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(
OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
GlobalRecordSizeArg);
GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
GlobalRecValue, GlobalRecPtrTy);
I->getSecond().GlobalRecordAddr = GlobalRecValue;
I->getSecond().IsInSPMDModeFlag = nullptr;
}
LValue Base =
CGF.MakeNaturalAlignPointeeAddrLValue(GlobalRecCastAddr, GlobalRecTy);
// Emit the "global alloca" which is a GEP from the global declaration
// record using the pointer returned by the runtime.
LValue SecBase;
decltype(I->getSecond().LocalVarData)::const_iterator SecIt;
if (IsTTD) {
SecIt = I->getSecond().SecondaryLocalVarData->begin();
llvm::PointerType *SecGlobalRecPtrTy =
CGF.ConvertTypeForMem(SecGlobalRecTy)->getPointerTo();
SecBase = CGF.MakeNaturalAlignPointeeAddrLValue(
Bld.CreatePointerBitCastOrAddrSpaceCast(
I->getSecond().GlobalRecordAddr, SecGlobalRecPtrTy),
SecGlobalRecTy);
}
for (auto &Rec : I->getSecond().LocalVarData) {
bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first);
llvm::Value *ParValue;
if (EscapedParam) {
const auto *VD = cast<VarDecl>(Rec.first);
LValue ParLVal =
CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc);
}
LValue VarAddr = CGF.EmitLValueForField(Base, Rec.second.FD);
// Emit VarAddr basing on lane-id if required.
QualType VarTy;
if (Rec.second.IsOnePerTeam) {
VarTy = Rec.second.FD->getType();
} else {
llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP(
VarAddr.getAddress().getPointer(),
{Bld.getInt32(0), getNVPTXLaneID(CGF)});
VarTy =
Rec.second.FD->getType()->castAsArrayTypeUnsafe()->getElementType();
VarAddr = CGF.MakeAddrLValue(
Address(Ptr, CGM.getContext().getDeclAlign(Rec.first)), VarTy,
AlignmentSource::Decl);
}
Rec.second.PrivateAddr = VarAddr.getAddress();
if (!IsInTTDRegion &&
(WithSPMDCheck ||
getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) {
assert(I->getSecond().IsInSPMDModeFlag &&
"Expected unknown execution mode or required SPMD check.");
if (IsTTD) {
assert(SecIt->second.IsOnePerTeam &&
"Secondary glob data must be one per team.");
LValue SecVarAddr = CGF.EmitLValueForField(SecBase, SecIt->second.FD);
VarAddr.setAddress(
Address(Bld.CreateSelect(IsTTD, SecVarAddr.getPointer(),
VarAddr.getPointer()),
VarAddr.getAlignment()));
Rec.second.PrivateAddr = VarAddr.getAddress();
}
Address GlobalPtr = Rec.second.PrivateAddr;
Address LocalAddr = CGF.CreateMemTemp(VarTy, Rec.second.FD->getName());
Rec.second.PrivateAddr = Address(
Bld.CreateSelect(I->getSecond().IsInSPMDModeFlag,
LocalAddr.getPointer(), GlobalPtr.getPointer()),
LocalAddr.getAlignment());
}
if (EscapedParam) {
const auto *VD = cast<VarDecl>(Rec.first);
CGF.EmitStoreOfScalar(ParValue, VarAddr);
I->getSecond().MappedParams->setVarAddr(CGF, VD, VarAddr.getAddress());
}
if (IsTTD)
++SecIt;
}
}
for (const ValueDecl *VD : I->getSecond().EscapedVariableLengthDecls) {
// Recover pointer to this function's global record. The runtime will
// handle the specifics of the allocation of the memory.
// Use actual memory size of the record including the padding
// for alignment purposes.
CGBuilderTy &Bld = CGF.Builder;
llvm::Value *Size = CGF.getTypeSize(VD->getType());
CharUnits Align = CGM.getContext().getDeclAlign(VD);
Size = Bld.CreateNUWAdd(
Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1));
llvm::Value *AlignVal =
llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity());
Size = Bld.CreateUDiv(Size, AlignVal);
Size = Bld.CreateNUWMul(Size, AlignVal);
// TODO: allow the usage of shared memory to be controlled by
// the user, for now, default to global.
llvm::Value *GlobalRecordSizeArg[] = {
Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(
OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
GlobalRecordSizeArg);
llvm::Value *GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
GlobalRecValue, CGF.ConvertTypeForMem(VD->getType())->getPointerTo());
LValue Base = CGF.MakeAddrLValue(GlobalRecCastAddr, VD->getType(),
CGM.getContext().getDeclAlign(VD),
AlignmentSource::Decl);
I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD),
Base.getAddress());
I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(GlobalRecValue);
}
I->getSecond().MappedParams->apply(CGF);
}
void CGOpenMPRuntimeNVPTX::emitGenericVarsEpilog(CodeGenFunction &CGF,
bool WithSPMDCheck) {
if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic &&
getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD)
return;
const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
if (I != FunctionGlobalizedDecls.end()) {
I->getSecond().MappedParams->restore(CGF);
if (!CGF.HaveInsertPoint())
return;
for (llvm::Value *Addr :
llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) {
CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
Addr);
}
if (I->getSecond().GlobalRecordAddr) {
if (!IsInTTDRegion &&
(WithSPMDCheck ||
getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) {
CGBuilderTy &Bld = CGF.Builder;
llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
Bld.CreateCondBr(I->getSecond().IsInSPMDModeFlag, ExitBB, NonSPMDBB);
// There is no need to emit line number for unconditional branch.
(void)ApplyDebugLocation::CreateEmpty(CGF);
CGF.EmitBlock(NonSPMDBB);
CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(
OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
CGF.EmitCastToVoidPtr(I->getSecond().GlobalRecordAddr));
CGF.EmitBlock(ExitBB);
} else if (IsInTTDRegion) {
assert(GlobalizedRecords.back().RegionCounter > 0 &&
"region counter must be > 0.");
--GlobalizedRecords.back().RegionCounter;
// Emit the restore function only in the target region.
if (GlobalizedRecords.back().RegionCounter == 0) {
QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
/*DestWidth=*/16, /*Signed=*/0);
llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
Address(GlobalizedRecords.back().UseSharedMemory,
CGM.getContext().getTypeAlignInChars(Int16Ty)),
/*Volatile=*/false, Int16Ty, GlobalizedRecords.back().Loc);
llvm::Value *Args[] = {
llvm::ConstantInt::get(
CGM.Int16Ty,
getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD ? 1 : 0),
IsInSharedMemory};
CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(
OMPRTL_NVPTX__kmpc_restore_team_static_memory),
Args);
}
} else {
CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
I->getSecond().GlobalRecordAddr);
}
}
}
}
void CGOpenMPRuntimeNVPTX::emitTeamsCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars) {
if (!CGF.HaveInsertPoint())
return;
Address ZeroAddr = CGF.CreateMemTemp(
CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1),
/*Name*/ ".zero.addr");
CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer());
OutlinedFnArgs.push_back(ZeroAddr.getPointer());
OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
}
void CGOpenMPRuntimeNVPTX::emitParallelCall(
CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
if (!CGF.HaveInsertPoint())
return;
if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
emitSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
else
emitNonSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
}
void CGOpenMPRuntimeNVPTX::emitNonSPMDParallelCall(
CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
llvm::Function *Fn = cast<llvm::Function>(OutlinedFn);
// Force inline this outlined function at its call site.
Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
Address ZeroAddr = CGF.CreateMemTemp(CGF.getContext().getIntTypeForBitwidth(
/*DestWidth=*/32, /*Signed=*/1),
".zero.addr");
CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
// ThreadId for serialized parallels is 0.
Address ThreadIDAddr = ZeroAddr;
auto &&CodeGen = [this, Fn, CapturedVars, Loc, ZeroAddr, &ThreadIDAddr](
CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
OutlinedFnArgs.push_back(ZeroAddr.getPointer());
OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
emitOutlinedFunctionCall(CGF, Loc, Fn, OutlinedFnArgs);
};
auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
PrePostActionTy &) {
RegionCodeGenTy RCG(CodeGen);
llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
llvm::Value *ThreadID = getThreadID(CGF, Loc);
llvm::Value *Args[] = {RTLoc, ThreadID};
NVPTXActionTy Action(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_serialized_parallel),
Args,
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_serialized_parallel),
Args);
RCG.setAction(Action);
RCG(CGF);
};
auto &&L0ParallelGen = [this, CapturedVars, Fn](CodeGenFunction &CGF,
PrePostActionTy &Action) {
CGBuilderTy &Bld = CGF.Builder;
llvm::Function *WFn = WrapperFunctionsMap[Fn];
assert(WFn && "Wrapper function does not exist!");
llvm::Value *ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
// Prepare for parallel region. Indicate the outlined function.
llvm::Value *Args[] = {ID, /*RequiresOMPRuntime=*/Bld.getInt16(1)};
CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_prepare_parallel),
Args);
// Create a private scope that will globalize the arguments
// passed from the outside of the target region.
CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF);
// There's something to share.
if (!CapturedVars.empty()) {
// Prepare for parallel region. Indicate the outlined function.
Address SharedArgs =
CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "shared_arg_refs");
llvm::Value *SharedArgsPtr = SharedArgs.getPointer();
llvm::Value *DataSharingArgs[] = {
SharedArgsPtr,
llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())};
CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
OMPRTL_NVPTX__kmpc_begin_sharing_variables),
DataSharingArgs);
// Store variable address in a list of references to pass to workers.
unsigned Idx = 0;
ASTContext &Ctx = CGF.getContext();
Address SharedArgListAddress = CGF.EmitLoadOfPointer(
SharedArgs, Ctx.getPointerType(Ctx.getPointerType(Ctx.VoidPtrTy))
.castAs<PointerType>());
for (llvm::Value *V : CapturedVars) {
Address Dst = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
llvm::Value *PtrV;
if (V->getType()->isIntegerTy())
PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy);
else
PtrV = Bld.CreatePointerBitCastOrAddrSpaceCast(V, CGF.VoidPtrTy);
CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false,
Ctx.getPointerType(Ctx.VoidPtrTy));
++Idx;
}
}
// Activate workers. This barrier is used by the master to signal
// work for the workers.
syncCTAThreads(CGF);
// OpenMP [2.5, Parallel Construct, p.49]
// There is an implied barrier at the end of a parallel region. After the
// end of a parallel region, only the master thread of the team resumes
// execution of the enclosing task region.
//
// The master waits at this barrier until all workers are done.
syncCTAThreads(CGF);
if (!CapturedVars.empty())
CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_sharing_variables));
// Remember for post-processing in worker loop.
Work.emplace_back(WFn);
};
auto &&LNParallelGen = [this, Loc, &SeqGen, &L0ParallelGen](
CodeGenFunction &CGF, PrePostActionTy &Action) {
if (IsInParallelRegion) {
SeqGen(CGF, Action);
} else if (IsInTargetMasterThreadRegion) {
L0ParallelGen(CGF, Action);
} else {
// Check for master and then parallelism:
// if (__kmpc_is_spmd_exec_mode() || __kmpc_parallel_level(loc, gtid)) {
// Serialized execution.
// } else {
// Worker call.
// }
CGBuilderTy &Bld = CGF.Builder;
llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
llvm::BasicBlock *SeqBB = CGF.createBasicBlock(".sequential");
llvm::BasicBlock *ParallelCheckBB = CGF.createBasicBlock(".parcheck");
llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
llvm::Value *IsSPMD = Bld.CreateIsNotNull(CGF.EmitNounwindRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_is_spmd_exec_mode)));
Bld.CreateCondBr(IsSPMD, SeqBB, ParallelCheckBB);
// There is no need to emit line number for unconditional branch.
(void)ApplyDebugLocation::CreateEmpty(CGF);
CGF.EmitBlock(ParallelCheckBB);
llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
llvm::Value *ThreadID = getThreadID(CGF, Loc);
llvm::Value *PL = CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_level),
{RTLoc, ThreadID});
llvm::Value *Res = Bld.CreateIsNotNull(PL);
Bld.CreateCondBr(Res, SeqBB, MasterBB);
CGF.EmitBlock(SeqBB);
SeqGen(CGF, Action);
CGF.EmitBranch(ExitBB);
// There is no need to emit line number for unconditional branch.
(void)ApplyDebugLocation::CreateEmpty(CGF);
CGF.EmitBlock(MasterBB);
L0ParallelGen(CGF, Action);
CGF.EmitBranch(ExitBB);
// There is no need to emit line number for unconditional branch.
(void)ApplyDebugLocation::CreateEmpty(CGF);
// Emit the continuation block for code after the if.
CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
}
};
if (IfCond) {
emitOMPIfClause(CGF, IfCond, LNParallelGen, SeqGen);
} else {
CodeGenFunction::RunCleanupsScope Scope(CGF);
RegionCodeGenTy ThenRCG(LNParallelGen);
ThenRCG(CGF);
}
}
void CGOpenMPRuntimeNVPTX::emitSPMDParallelCall(
CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
// Just call the outlined function to execute the parallel region.
// OutlinedFn(>id, &zero, CapturedStruct);
//
llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
Address ZeroAddr = CGF.CreateMemTemp(CGF.getContext().getIntTypeForBitwidth(
/*DestWidth=*/32, /*Signed=*/1),
".zero.addr");
CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
// ThreadId for serialized parallels is 0.
Address ThreadIDAddr = ZeroAddr;
auto &&CodeGen = [this, OutlinedFn, CapturedVars, Loc, ZeroAddr,
&ThreadIDAddr](CodeGenFunction &CGF,
PrePostActionTy &Action) {
Action.Enter(CGF);
llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
OutlinedFnArgs.push_back(ZeroAddr.getPointer());
OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
};
auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
PrePostActionTy &) {
RegionCodeGenTy RCG(CodeGen);
llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
llvm::Value *ThreadID = getThreadID(CGF, Loc);
llvm::Value *Args[] = {RTLoc, ThreadID};
NVPTXActionTy Action(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_serialized_parallel),
Args,
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_serialized_parallel),
Args);
RCG.setAction(Action);
RCG(CGF);
};
if (IsInTargetMasterThreadRegion) {
// In the worker need to use the real thread id.
ThreadIDAddr = emitThreadIDAddress(CGF, Loc);
RegionCodeGenTy RCG(CodeGen);
RCG(CGF);
} else {
// If we are not in the target region, it is definitely L2 parallelism or
// more, because for SPMD mode we always has L1 parallel level, sowe don't
// need to check for orphaned directives.
RegionCodeGenTy RCG(SeqGen);
RCG(CGF);
}
}
void CGOpenMPRuntimeNVPTX::syncCTAThreads(CodeGenFunction &CGF) {
// Always emit simple barriers!
if (!CGF.HaveInsertPoint())
return;
// Build call __kmpc_barrier_simple_spmd(nullptr, 0);
// This function does not use parameters, so we can emit just default values.
llvm::Value *Args[] = {
llvm::ConstantPointerNull::get(
cast<llvm::PointerType>(getIdentTyPointerTy())),
llvm::ConstantInt::get(CGF.Int32Ty, /*V=*/0, /*isSigned=*/true)};
CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL__kmpc_barrier_simple_spmd), Args);
}
void CGOpenMPRuntimeNVPTX::emitBarrierCall(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDirectiveKind Kind, bool,
bool) {
// Always emit simple barriers!
if (!CGF.HaveInsertPoint())
return;
// Build call __kmpc_cancel_barrier(loc, thread_id);
unsigned Flags = getDefaultFlagsForBarriers(Kind);
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
getThreadID(CGF, Loc)};
CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(OMPRTL__kmpc_barrier), Args);
}
void CGOpenMPRuntimeNVPTX::emitCriticalRegion(
CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
const Expr *Hint) {
llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.critical.loop");
llvm::BasicBlock *TestBB = CGF.createBasicBlock("omp.critical.test");
llvm::BasicBlock *SyncBB = CGF.createBasicBlock("omp.critical.sync");
llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.critical.body");
llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.critical.exit");
// Fetch team-local id of the thread.
llvm::Value *ThreadID = getNVPTXThreadID(CGF);
// Get the width of the team.
llvm::Value *TeamWidth = getNVPTXNumThreads(CGF);
// Initialize the counter variable for the loop.
QualType Int32Ty =
CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/0);
Address Counter = CGF.CreateMemTemp(Int32Ty, "critical_counter");
LValue CounterLVal = CGF.MakeAddrLValue(Counter, Int32Ty);
CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty), CounterLVal,
/*isInit=*/true);
// Block checks if loop counter exceeds upper bound.
CGF.EmitBlock(LoopBB);
llvm::Value *CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
llvm::Value *CmpLoopBound = CGF.Builder.CreateICmpSLT(CounterVal, TeamWidth);
CGF.Builder.CreateCondBr(CmpLoopBound, TestBB, ExitBB);
// Block tests which single thread should execute region, and which threads
// should go straight to synchronisation point.
CGF.EmitBlock(TestBB);
CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
llvm::Value *CmpThreadToCounter =
CGF.Builder.CreateICmpEQ(ThreadID, CounterVal);
CGF.Builder.CreateCondBr(CmpThreadToCounter, BodyBB, SyncBB);
// Block emits the body of the critical region.
CGF.EmitBlock(BodyBB);
// Output the critical statement.
CGOpenMPRuntime::emitCriticalRegion(CGF, CriticalName, CriticalOpGen, Loc,
Hint);
// After the body surrounded by the critical region, the single executing
// thread will jump to the synchronisation point.
// Block waits for all threads in current team to finish then increments the
// counter variable and returns to the loop.
CGF.EmitBlock(SyncBB);
emitBarrierCall(CGF, Loc, OMPD_unknown, /*EmitChecks=*/false,
/*ForceSimpleCall=*/true);
llvm::Value *IncCounterVal =
CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1));
CGF.EmitStoreOfScalar(IncCounterVal, CounterLVal);
CGF.EmitBranch(LoopBB);
// Block that is reached when all threads in the team complete the region.
CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
}
/// Cast value to the specified type.
static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val,
QualType ValTy, QualType CastTy,
SourceLocation Loc) {
assert(!CGF.getContext().getTypeSizeInChars(CastTy).isZero() &&
"Cast type must sized.");
assert(!CGF.getContext().getTypeSizeInChars(ValTy).isZero() &&
"Val type must sized.");
llvm::Type *LLVMCastTy = CGF.ConvertTypeForMem(CastTy);
if (ValTy == CastTy)
return Val;
if (CGF.getContext().getTypeSizeInChars(ValTy) ==
CGF.getContext().getTypeSizeInChars(CastTy))
return CGF.Builder.CreateBitCast(Val, LLVMCastTy);
if (CastTy->isIntegerType() && ValTy->isIntegerType())
return CGF.Builder.CreateIntCast(Val, LLVMCastTy,
CastTy->hasSignedIntegerRepresentation());
Address CastItem = CGF.CreateMemTemp(CastTy);
Address ValCastItem = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace()));
CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy);
return CGF.EmitLoadOfScalar(CastItem, /*Volatile=*/false, CastTy, Loc);
}
/// This function creates calls to one of two shuffle functions to copy
/// variables between lanes in a warp.
static llvm::Value *createRuntimeShuffleFunction(CodeGenFunction &CGF,
llvm::Value *Elem,
QualType ElemType,
llvm::Value *Offset,
SourceLocation Loc) {
CodeGenModule &CGM = CGF.CGM;
CGBuilderTy &Bld = CGF.Builder;
CGOpenMPRuntimeNVPTX &RT =
*(static_cast<CGOpenMPRuntimeNVPTX *>(&CGM.getOpenMPRuntime()));
CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
assert(Size.getQuantity() <= 8 &&
"Unsupported bitwidth in shuffle instruction.");
OpenMPRTLFunctionNVPTX ShuffleFn = Size.getQuantity() <= 4
? OMPRTL_NVPTX__kmpc_shuffle_int32
: OMPRTL_NVPTX__kmpc_shuffle_int64;
// Cast all types to 32- or 64-bit values before calling shuffle routines.
QualType CastTy = CGF.getContext().getIntTypeForBitwidth(
Size.getQuantity() <= 4 ? 32 : 64, /*Signed=*/1);
llvm::Value *ElemCast = castValueToType(CGF, Elem, ElemType, CastTy, Loc);
llvm::Value *WarpSize =
Bld.CreateIntCast(getNVPTXWarpSize(CGF), CGM.Int16Ty, /*isSigned=*/true);
llvm::Value *ShuffledVal = CGF.EmitRuntimeCall(
RT.createNVPTXRuntimeFunction(ShuffleFn), {ElemCast, Offset, WarpSize});
return castValueToType(CGF, ShuffledVal, CastTy, ElemType, Loc);
}
static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
Address DestAddr, QualType ElemType,
llvm::Value *Offset, SourceLocation Loc) {
CGBuilderTy &Bld = CGF.Builder;
CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
// Create the loop over the big sized data.
// ptr = (void*)Elem;
// ptrEnd = (void*) Elem + 1;
// Step = 8;
// while (ptr + Step < ptrEnd)
// shuffle((int64_t)*ptr);
// Step = 4;
// while (ptr + Step < ptrEnd)
// shuffle((int32_t)*ptr);
// ...
Address ElemPtr = DestAddr;
Address Ptr = SrcAddr;
Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast(
Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy);
for (int IntSize = 8; IntSize >= 1; IntSize /= 2) {
if (Size < CharUnits::fromQuantity(IntSize))
continue;
QualType IntType = CGF.getContext().getIntTypeForBitwidth(
CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)),
/*Signed=*/1);
llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType);
Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo());
ElemPtr =
Bld.CreatePointerBitCastOrAddrSpaceCast(ElemPtr, IntTy->getPointerTo());
if (Size.getQuantity() / IntSize > 1) {
llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond");
llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then");
llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".shuffle.exit");
llvm::BasicBlock *CurrentBB = Bld.GetInsertBlock();
CGF.EmitBlock(PreCondBB);
llvm::PHINode *PhiSrc =
Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2);
PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB);
llvm::PHINode *PhiDest =
Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2);
PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB);
Ptr = Address(PhiSrc, Ptr.getAlignment());
ElemPtr = Address(PhiDest, ElemPtr.getAlignment());
llvm::Value *PtrDiff = Bld.CreatePtrDiff(
PtrEnd.getPointer(), Bld.CreatePointerBitCastOrAddrSpaceCast(
Ptr.getPointer(), CGF.VoidPtrTy));
Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)),
ThenBB, ExitBB);
CGF.EmitBlock(ThenBB);
llvm::Value *Res = createRuntimeShuffleFunction(
CGF, CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc),
IntType, Offset, Loc);
CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType);
Address LocalPtr = Bld.CreateConstGEP(Ptr, 1);
Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB);
PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB);
CGF.EmitBranch(PreCondBB);
CGF.EmitBlock(ExitBB);
} else {
llvm::Value *Res = createRuntimeShuffleFunction(
CGF, CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc),
IntType, Offset, Loc);
CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType);
Ptr = Bld.CreateConstGEP(Ptr, 1);
ElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
}
Size = Size % IntSize;
}
}
namespace {
enum CopyAction : unsigned {
// RemoteLaneToThread: Copy over a Reduce list from a remote lane in
// the warp using shuffle instructions.
RemoteLaneToThread,
// ThreadCopy: Make a copy of a Reduce list on the thread's stack.
ThreadCopy,
// ThreadToScratchpad: Copy a team-reduced array to the scratchpad.
ThreadToScratchpad,
// ScratchpadToThread: Copy from a scratchpad array in global memory
// containing team-reduced data to a thread's stack.
ScratchpadToThread,
};
} // namespace
struct CopyOptionsTy {
llvm::Value *RemoteLaneOffset;
llvm::Value *ScratchpadIndex;
llvm::Value *ScratchpadWidth;
};
/// Emit instructions to copy a Reduce list, which contains partially
/// aggregated values, in the specified direction.
static void emitReductionListCopy(
CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy,
ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase,
CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) {
CodeGenModule &CGM = CGF.CGM;
ASTContext &C = CGM.getContext();
CGBuilderTy &Bld = CGF.Builder;
llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset;
llvm::Value *ScratchpadIndex = CopyOptions.ScratchpadIndex;
llvm::Value *ScratchpadWidth = CopyOptions.ScratchpadWidth;
// Iterates, element-by-element, through the source Reduce list and
// make a copy.
unsigned Idx = 0;
unsigned Size = Privates.size();
for (const Expr *Private : Privates) {
Address SrcElementAddr = Address::invalid();
Address DestElementAddr = Address::invalid();
Address DestElementPtrAddr = Address::invalid();
// Should we shuffle in an element from a remote lane?
bool ShuffleInElement = false;
// Set to true to update the pointer in the dest Reduce list to a
// newly created element.
bool UpdateDestListPtr = false;
// Increment the src or dest pointer to the scratchpad, for each
// new element.
bool IncrScratchpadSrc = false;
bool IncrScratchpadDest = false;
switch (Action) {
case RemoteLaneToThread: {
// Step 1.1: Get the address for the src element in the Reduce list.
Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
SrcElementAddr = CGF.EmitLoadOfPointer(
SrcElementPtrAddr,
C.getPointerType(Private->getType())->castAs<PointerType>());
// Step 1.2: Create a temporary to store the element in the destination
// Reduce list.
DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
DestElementAddr =
CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
ShuffleInElement = true;
UpdateDestListPtr = true;
break;
}
case ThreadCopy: {
// Step 1.1: Get the address for the src element in the Reduce list.
Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
SrcElementAddr = CGF.EmitLoadOfPointer(
SrcElementPtrAddr,
C.getPointerType(Private->getType())->castAs<PointerType>());
// Step 1.2: Get the address for dest element. The destination
// element has already been created on the thread's stack.
DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
DestElementAddr = CGF.EmitLoadOfPointer(
DestElementPtrAddr,
C.getPointerType(Private->getType())->castAs<PointerType>());
break;
}
case ThreadToScratchpad: {
// Step 1.1: Get the address for the src element in the Reduce list.
Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
SrcElementAddr = CGF.EmitLoadOfPointer(
SrcElementPtrAddr,
C.getPointerType(Private->getType())->castAs<PointerType>());
// Step 1.2: Get the address for dest element:
// address = base + index * ElementSizeInChars.
llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
llvm::Value *CurrentOffset =
Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
llvm::Value *ScratchPadElemAbsolutePtrVal =
Bld.CreateNUWAdd(DestBase.getPointer(), CurrentOffset);
ScratchPadElemAbsolutePtrVal =
Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
DestElementAddr = Address(ScratchPadElemAbsolutePtrVal,
C.getTypeAlignInChars(Private->getType()));
IncrScratchpadDest = true;
break;
}
case ScratchpadToThread: {
// Step 1.1: Get the address for the src element in the scratchpad.
// address = base + index * ElementSizeInChars.
llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
llvm::Value *CurrentOffset =
Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
llvm::Value *ScratchPadElemAbsolutePtrVal =
Bld.CreateNUWAdd(SrcBase.getPointer(), CurrentOffset);
ScratchPadElemAbsolutePtrVal =
Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal,
C.getTypeAlignInChars(Private->getType()));
IncrScratchpadSrc = true;
// Step 1.2: Create a temporary to store the element in the destination
// Reduce list.
DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
DestElementAddr =
CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
UpdateDestListPtr = true;
break;
}
}
// Regardless of src and dest of copy, we emit the load of src
// element as this is required in all directions
SrcElementAddr = Bld.CreateElementBitCast(
SrcElementAddr, CGF.ConvertTypeForMem(Private->getType()));
DestElementAddr = Bld.CreateElementBitCast(DestElementAddr,
SrcElementAddr.getElementType());
// Now that all active lanes have read the element in the
// Reduce list, shuffle over the value from the remote lane.
if (ShuffleInElement) {
shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(),
RemoteLaneOffset, Private->getExprLoc());
} else {
if (Private->getType()->isScalarType()) {
llvm::Value *Elem =
CGF.EmitLoadOfScalar(SrcElementAddr, /*Volatile=*/false,
Private->getType(), Private->getExprLoc());
// Store the source element value to the dest element address.
CGF.EmitStoreOfScalar(Elem, DestElementAddr, /*Volatile=*/false,
Private->getType());
} else {
CGF.EmitAggregateCopy(
CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
Private->getType(), AggValueSlot::DoesNotOverlap);
}
}
// Step 3.1: Modify reference in dest Reduce list as needed.
// Modifying the reference in Reduce list to point to the newly
// created element. The element is live in the current function
// scope and that of functions it invokes (i.e., reduce_function).
// RemoteReduceData[i] = (void*)&RemoteElem
if (UpdateDestListPtr) {
CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast(
DestElementAddr.getPointer(), CGF.VoidPtrTy),
DestElementPtrAddr, /*Volatile=*/false,
C.VoidPtrTy);
}
// Step 4.1: Increment SrcBase/DestBase so that it points to the starting
// address of the next element in scratchpad memory, unless we're currently
// processing the last one. Memory alignment is also taken care of here.
if ((IncrScratchpadDest || IncrScratchpadSrc) && (Idx + 1 < Size)) {
llvm::Value *ScratchpadBasePtr =
IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer();
llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
ScratchpadBasePtr = Bld.CreateNUWAdd(
ScratchpadBasePtr,
Bld.CreateNUWMul(ScratchpadWidth, ElementSizeInChars));
// Take care of global memory alignment for performance
ScratchpadBasePtr = Bld.CreateNUWSub(
ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
ScratchpadBasePtr = Bld.CreateUDiv(
ScratchpadBasePtr,
llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
ScratchpadBasePtr = Bld.CreateNUWAdd(
ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
ScratchpadBasePtr = Bld.CreateNUWMul(
ScratchpadBasePtr,
llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
if (IncrScratchpadDest)
DestBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
else /* IncrScratchpadSrc = true */
SrcBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
}
++Idx;
}
}
/// This function emits a helper that gathers Reduce lists from the first
/// lane of every active warp to lanes in the first warp.
///
/// void inter_warp_copy_func(void* reduce_data, num_warps)
/// shared smem[warp_size];
/// For all data entries D in reduce_data:
/// sync
/// If (I am the first lane in each warp)
/// Copy my local D to smem[warp_id]
/// sync
/// if (I am the first warp)
/// Copy smem[thread_id] to my local D
static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
ArrayRef<const Expr *> Privates,
QualType ReductionArrayTy,
SourceLocation Loc) {
ASTContext &C = CGM.getContext();
llvm::Module &M = CGM.getModule();
// ReduceList: thread local Reduce list.
// At the stage of the computation when this function is called, partially
// aggregated values reside in the first lane of every active warp.
ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
C.VoidPtrTy, ImplicitParamDecl::Other);
// NumWarps: number of warps active in the parallel region. This could
// be smaller than 32 (max warps in a CTA) for partial block reduction.
ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
C.getIntTypeForBitwidth(32, /* Signed */ true),
ImplicitParamDecl::Other);
FunctionArgList Args;
Args.push_back(&ReduceListArg);
Args.push_back(&NumWarpsArg);
const CGFunctionInfo &CGFI =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
auto *Fn = llvm::Function::Create(
CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
"_omp_reduction_inter_warp_copy_func", &CGM.getModule());
CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
Fn->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
CGBuilderTy &Bld = CGF.Builder;
// This array is used as a medium to transfer, one reduce element at a time,
// the data from the first lane of every warp to lanes in the first warp
// in order to perform the final step of a reduction in a parallel region
// (reduction across warps). The array is placed in NVPTX __shared__ memory
// for reduced latency, as well as to have a distinct copy for concurrently
// executing target regions. The array is declared with common linkage so
// as to be shared across compilation units.
StringRef TransferMediumName =
"__openmp_nvptx_data_transfer_temporary_storage";
llvm::GlobalVariable *TransferMedium =
M.getGlobalVariable(TransferMediumName);
if (!TransferMedium) {
auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize);
unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared);
TransferMedium = new llvm::GlobalVariable(
M, Ty, /*isConstant=*/false, llvm::GlobalVariable::CommonLinkage,
llvm::Constant::getNullValue(Ty), TransferMediumName,
/*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
SharedAddressSpace);
CGM.addCompilerUsedGlobal(TransferMedium);
}
// Get the CUDA thread id of the current OpenMP thread on the GPU.
llvm::Value *ThreadID = getNVPTXThreadID(CGF);
// nvptx_lane_id = nvptx_id % warpsize
llvm::Value *LaneID = getNVPTXLaneID(CGF);
// nvptx_warp_id = nvptx_id / warpsize
llvm::Value *WarpID = getNVPTXWarpID(CGF);
Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
Address LocalReduceList(
Bld.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
C.VoidPtrTy, Loc),
CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
CGF.getPointerAlign());
unsigned Idx = 0;
for (const Expr *Private : Privates) {
//
// Warp master copies reduce element to transfer medium in __shared__
// memory.
//
unsigned RealTySize =
C.getTypeSizeInChars(Private->getType())
.alignTo(C.getTypeAlignInChars(Private->getType()))
.getQuantity();
for (unsigned TySize = 4; TySize > 0 && RealTySize > 0; TySize /=2) {
unsigned NumIters = RealTySize / TySize;
if (NumIters == 0)
continue;
QualType CType = C.getIntTypeForBitwidth(
C.toBits(CharUnits::fromQuantity(TySize)), /*Signed=*/1);
llvm::Type *CopyType = CGF.ConvertTypeForMem(CType);
CharUnits Align = CharUnits::fromQuantity(TySize);
llvm::Value *Cnt = nullptr;
Address CntAddr = Address::invalid();
llvm::BasicBlock *PrecondBB = nullptr;
llvm::BasicBlock *ExitBB = nullptr;
if (NumIters > 1) {
CntAddr = CGF.CreateMemTemp(C.IntTy, ".cnt.addr");
CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.IntTy), CntAddr,
/*Volatile=*/false, C.IntTy);
PrecondBB = CGF.createBasicBlock("precond");
ExitBB = CGF.createBasicBlock("exit");
llvm::BasicBlock *BodyBB = CGF.createBasicBlock("body");
// There is no need to emit line number for unconditional branch.
(void)ApplyDebugLocation::CreateEmpty(CGF);
CGF.EmitBlock(PrecondBB);
Cnt = CGF.EmitLoadOfScalar(CntAddr, /*Volatile=*/false, C.IntTy, Loc);
llvm::Value *Cmp =
Bld.CreateICmpULT(Cnt, llvm::ConstantInt::get(CGM.IntTy, NumIters));
Bld.CreateCondBr(Cmp, BodyBB, ExitBB);
CGF.EmitBlock(BodyBB);
}
// kmpc_barrier.
CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
/*EmitChecks=*/false,
/*ForceSimpleCall=*/true);
llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
// if (lane_id == 0)
llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master");
Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
CGF.EmitBlock(ThenBB);
// Reduce element = LocalReduceList[i]
Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
// elemptr = ((CopyType*)(elemptrptr)) + I
Address ElemPtr = Address(ElemPtrPtr, Align);
ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType);
if (NumIters > 1) {
ElemPtr = Address(Bld.CreateGEP(ElemPtr.getPointer(), Cnt),
ElemPtr.getAlignment());
}
// Get pointer to location in transfer medium.
// MediumPtr = &medium[warp_id]
llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
Address MediumPtr(MediumPtrVal, Align);
// Casting to actual data type.
// MediumPtr = (CopyType*)MediumPtrAddr;
MediumPtr = Bld.CreateElementBitCast(MediumPtr, CopyType);
// elem = *elemptr
//*MediumPtr = elem
llvm::Value *Elem =
CGF.EmitLoadOfScalar(ElemPtr, /*Volatile=*/false, CType, Loc);
// Store the source element value to the dest element address.
CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/true, CType);
Bld.CreateBr(MergeBB);
CGF.EmitBlock(ElseBB);
Bld.CreateBr(MergeBB);
CGF.EmitBlock(MergeBB);
// kmpc_barrier.
CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
/*EmitChecks=*/false,
/*ForceSimpleCall=*/true);
//
// Warp 0 copies reduce element from transfer medium.
//
llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then");
llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else");
llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg);
llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc);
// Up to 32 threads in warp 0 are active.
llvm::Value *IsActiveThread =
Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
CGF.EmitBlock(W0ThenBB);
// SrcMediumPtr = &medium[tid]
llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
TransferMedium,
{llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
Address SrcMediumPtr(SrcMediumPtrVal, Align);
// SrcMediumVal = *SrcMediumPtr;
SrcMediumPtr = Bld.CreateElementBitCast(SrcMediumPtr, CopyType);
// TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I
Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
Address TargetElemPtr = Address(TargetElemPtrVal, Align);
TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType);
if (NumIters > 1) {
TargetElemPtr = Address(Bld.CreateGEP(TargetElemPtr.getPointer(), Cnt),
TargetElemPtr.getAlignment());
}
// *TargetElemPtr = SrcMediumVal;
llvm::Value *SrcMediumValue =
CGF.EmitLoadOfScalar(SrcMediumPtr, /*Volatile=*/true, CType, Loc);
CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false,
CType);
Bld.CreateBr(W0MergeBB);
CGF.EmitBlock(W0ElseBB);
Bld.CreateBr(W0MergeBB);
CGF.EmitBlock(W0MergeBB);
if (NumIters > 1) {
Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1));
CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy);
CGF.EmitBranch(PrecondBB);
(void)ApplyDebugLocation::CreateEmpty(CGF);
CGF.EmitBlock(ExitBB);
}
RealTySize %= TySize;
}
++Idx;
}
CGF.FinishFunction();
return Fn;
}
/// Emit a helper that reduces data across two OpenMP threads (lanes)
/// in the same warp. It uses shuffle instructions to copy over data from
/// a remote lane's stack. The reduction algorithm performed is specified
/// by the fourth parameter.
///
/// Algorithm Versions.
/// Full Warp Reduce (argument value 0):
/// This algorithm assumes that all 32 lanes are active and gathers
/// data from these 32 lanes, producing a single resultant value.
/// Contiguous Partial Warp Reduce (argument value 1):
/// This algorithm assumes that only a *contiguous* subset of lanes
/// are active. This happens for the last warp in a parallel region
/// when the user specified num_threads is not an integer multiple of
/// 32. This contiguous subset always starts with the zeroth lane.
/// Partial Warp Reduce (argument value 2):
/// This algorithm gathers data from any number of lanes at any position.
/// All reduced values are stored in the lowest possible lane. The set
/// of problems every algorithm addresses is a super set of those
/// addressable by algorithms with a lower version number. Overhead
/// increases as algorithm version increases.
///
/// Terminology
/// Reduce element:
/// Reduce element refers to the individual data field with primitive
/// data types to be combined and reduced across threads.
/// Reduce list:
/// Reduce list refers to a collection of local, thread-private
/// reduce elements.
/// Remote Reduce list:
/// Remote Reduce list refers to a collection of remote (relative to
/// the current thread) reduce elements.
///
/// We distinguish between three states of threads that are important to
/// the implementation of this function.
/// Alive threads:
/// Threads in a warp executing the SIMT instruction, as distinguished from
/// threads that are inactive due to divergent control flow.
/// Active threads:
/// The minimal set of threads that has to be alive upon entry to this
/// function. The computation is correct iff active threads are alive.
/// Some threads are alive but they are not active because they do not
/// contribute to the computation in any useful manner. Turning them off
/// may introduce control flow overheads without any tangible benefits.
/// Effective threads:
/// In order to comply with the argument requirements of the shuffle
/// function, we must keep all lanes holding data alive. But at most
/// half of them perform value aggregation; we refer to this half of
/// threads as effective. The other half is simply handing off their
/// data.
///
/// Procedure
/// Value shuffle:
/// In this step active threads transfer data from higher lane positions
/// in the warp to lower lane positions, creating Remote Reduce list.
/// Value aggregation:
/// In this step, effective threads combine their thread local Reduce list
/// with Remote Reduce list and store the result in the thread local
/// Reduce list.
/// Value copy:
/// In this step, we deal with the assumption made by algorithm 2
/// (i.e. contiguity assumption). When we have an odd number of lanes
/// active, say 2k+1, only k threads will be effective and therefore k
/// new values will be produced. However, the Reduce list owned by the
/// (2k+1)th thread is ignored in the value aggregation. Therefore
/// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
/// that the contiguity assumption still holds.
static llvm::Function *emitShuffleAndReduceFunction(
CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
QualType ReductionArrayTy, llvm::Function *ReduceFn, SourceLocation Loc) {
ASTContext &C = CGM.getContext();
// Thread local Reduce list used to host the values of data to be reduced.
ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
C.VoidPtrTy, ImplicitParamDecl::Other);
// Current lane id; could be logical.
ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy,
ImplicitParamDecl::Other);
// Offset of the remote source lane relative to the current lane.
ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
C.ShortTy, ImplicitParamDecl::Other);
// Algorithm version. This is expected to be known at compile time.
ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
C.ShortTy, ImplicitParamDecl::Other);
FunctionArgList Args;
Args.push_back(&ReduceListArg);
Args.push_back(&LaneIDArg);
Args.push_back(&RemoteLaneOffsetArg);
Args.push_back(&AlgoVerArg);
const CGFunctionInfo &CGFI =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
auto *Fn = llvm::Function::Create(
CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
"_omp_reduction_shuffle_and_reduce_func", &CGM.getModule());
CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
Fn->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
CGBuilderTy &Bld = CGF.Builder;
Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
Address LocalReduceList(
Bld.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
C.VoidPtrTy, SourceLocation()),
CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
CGF.getPointerAlign());
Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg);
llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar(
AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg);
llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar(
AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg);
llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar(
AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
// Create a local thread-private variable to host the Reduce list
// from a remote lane.
Address RemoteReduceList =
CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list");
// This loop iterates through the list of reduce elements and copies,
// element by element, from a remote lane in the warp to RemoteReduceList,
// hosted on the thread's stack.
emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates,
LocalReduceList, RemoteReduceList,
{/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal,
/*ScratchpadIndex=*/nullptr,
/*ScratchpadWidth=*/nullptr});
// The actions to be performed on the Remote Reduce list is dependent
// on the algorithm version.
//
// if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 &&
// LaneId % 2 == 0 && Offset > 0):
// do the reduction value aggregation
//
// The thread local variable Reduce list is mutated in place to host the
// reduced data, which is the aggregated value produced from local and
// remote lanes.
//
// Note that AlgoVer is expected to be a constant integer known at compile
// time.
// When AlgoVer==0, the first conjunction evaluates to true, making
// the entire predicate true during compile time.
// When AlgoVer==1, the second conjunction has only the second part to be
// evaluated during runtime. Other conjunctions evaluates to false
// during compile time.
// When AlgoVer==2, the third conjunction has only the second part to be
// evaluated during runtime. Other conjunctions evaluates to false
// during compile time.
llvm::Value *CondAlgo0 = Bld.CreateIsNull(AlgoVerArgVal);
llvm::Value *Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
llvm::Value *CondAlgo1 = Bld.CreateAnd(
Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal));
llvm::Value *Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2));
llvm::Value *CondAlgo2 = Bld.CreateAnd(
Algo2, Bld.CreateIsNull(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1))));
CondAlgo2 = Bld.CreateAnd(
CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0)));
llvm::Value *CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1);
CondReduce = Bld.CreateOr(CondReduce, CondAlgo2);
llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
CGF.EmitBlock(ThenBB);
// reduce_function(LocalReduceList, RemoteReduceList)
llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
LocalReduceList.getPointer(), CGF.VoidPtrTy);
llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
RemoteReduceList.getPointer(), CGF.VoidPtrTy);
CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr});
Bld.CreateBr(MergeBB);
CGF.EmitBlock(ElseBB);
Bld.CreateBr(MergeBB);
CGF.EmitBlock(MergeBB);
// if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local
// Reduce list.
Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
llvm::Value *CondCopy = Bld.CreateAnd(
Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal));
llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then");
llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else");
llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont");
Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB);
CGF.EmitBlock(CpyThenBB);
emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
RemoteReduceList, LocalReduceList);
Bld.CreateBr(CpyMergeBB);
CGF.EmitBlock(CpyElseBB);
Bld.CreateBr(CpyMergeBB);
CGF.EmitBlock(CpyMergeBB);
CGF.FinishFunction();
return Fn;
}
///
/// Design of OpenMP reductions on the GPU
///
/// Consider a typical OpenMP program with one or more reduction
/// clauses:
///
/// float foo;
/// double bar;
/// #pragma omp target teams distribute parallel for \
/// reduction(+:foo) reduction(*:bar)
/// for (int i = 0; i < N; i++) {
/// foo += A[i]; bar *= B[i];
/// }
///
/// where 'foo' and 'bar' are reduced across all OpenMP threads in
/// all teams. In our OpenMP implementation on the NVPTX device an
/// OpenMP team is mapped to a CUDA threadblock and OpenMP threads
/// within a team are mapped to CUDA threads within a threadblock.
/// Our goal is to efficiently aggregate values across all OpenMP
/// threads such that:
///
/// - the compiler and runtime are logically concise, and
/// - the reduction is performed efficiently in a hierarchical
/// manner as follows: within OpenMP threads in the same warp,
/// across warps in a threadblock, and finally across teams on
/// the NVPTX device.
///
/// Introduction to Decoupling
///
/// We would like to decouple the compiler and the runtime so that the
/// latter is ignorant of the reduction variables (number, data types)
/// and the reduction operators. This allows a simpler interface
/// and implementation while still attaining good performance.
///
/// Pseudocode for the aforementioned OpenMP program generated by the
/// compiler is as follows:
///
/// 1. Create private copies of reduction variables on each OpenMP
/// thread: 'foo_private', 'bar_private'
/// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned
/// to it and writes the result in 'foo_private' and 'bar_private'
/// respectively.
/// 3. Call the OpenMP runtime on the GPU to reduce within a team
/// and store the result on the team master:
///
/// __kmpc_nvptx_parallel_reduce_nowait_v2(...,
/// reduceData, shuffleReduceFn, interWarpCpyFn)
///
/// where:
/// struct ReduceData {
/// double *foo;
/// double *bar;
/// } reduceData
/// reduceData.foo = &foo_private
/// reduceData.bar = &bar_private
///
/// 'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two
/// auxiliary functions generated by the compiler that operate on
/// variables of type 'ReduceData'. They aid the runtime perform
/// algorithmic steps in a data agnostic manner.
///
/// 'shuffleReduceFn' is a pointer to a function that reduces data
/// of type 'ReduceData' across two OpenMP threads (lanes) in the
/// same warp. It takes the following arguments as input:
///
/// a. variable of type 'ReduceData' on the calling lane,
/// b. its lane_id,
/// c. an offset relative to the current lane_id to generate a
/// remote_lane_id. The remote lane contains the second
/// variable of type 'ReduceData' that is to be reduced.
/// d. an algorithm version parameter determining which reduction
/// algorithm to use.
///
/// 'shuffleReduceFn' retrieves data from the remote lane using
/// efficient GPU shuffle intrinsics and reduces, using the
/// algorithm specified by the 4th parameter, the two operands
/// element-wise. The result is written to the first operand.
///
/// Different reduction algorithms are implemented in different
/// runtime functions, all calling 'shuffleReduceFn' to perform
/// the essential reduction step. Therefore, based on the 4th
/// parameter, this function behaves slightly differently to
/// cooperate with the runtime to ensure correctness under
/// different circumstances.
///
/// 'InterWarpCpyFn' is a pointer to a function that transfers
/// reduced variables across warps. It tunnels, through CUDA
/// shared memory, the thread-private data of type 'ReduceData'
/// from lane 0 of each warp to a lane in the first warp.
/// 4. Call the OpenMP runtime on the GPU to reduce across teams.
/// The last team writes the global reduced value to memory.
///
/// ret = __kmpc_nvptx_teams_reduce_nowait(...,
/// reduceData, shuffleReduceFn, interWarpCpyFn,
/// scratchpadCopyFn, loadAndReduceFn)
///
/// 'scratchpadCopyFn' is a helper that stores reduced
/// data from the team master to a scratchpad array in
/// global memory.
///
/// 'loadAndReduceFn' is a helper that loads data from
/// the scratchpad array and reduces it with the input
/// operand.
///
/// These compiler generated functions hide address
/// calculation and alignment information from the runtime.
/// 5. if ret == 1:
/// The team master of the last team stores the reduced
/// result to the globals in memory.
/// foo += reduceData.foo; bar *= reduceData.bar
///
///
/// Warp Reduction Algorithms
///
/// On the warp level, we have three algorithms implemented in the
/// OpenMP runtime depending on the number of active lanes:
///
/// Full Warp Reduction
///
/// The reduce algorithm within a warp where all lanes are active
/// is implemented in the runtime as follows:
///
/// full_warp_reduce(void *reduce_data,
/// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
/// for (int offset = WARPSIZE/2; offset > 0; offset /= 2)
/// ShuffleReduceFn(reduce_data, 0, offset, 0);
/// }
///
/// The algorithm completes in log(2, WARPSIZE) steps.
///
/// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is
/// not used therefore we save instructions by not retrieving lane_id
/// from the corresponding special registers. The 4th parameter, which
/// represents the version of the algorithm being used, is set to 0 to
/// signify full warp reduction.
///
/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
///
/// #reduce_elem refers to an element in the local lane's data structure
/// #remote_elem is retrieved from a remote lane
/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
/// reduce_elem = reduce_elem REDUCE_OP remote_elem;
///
/// Contiguous Partial Warp Reduction
///
/// This reduce algorithm is used within a warp where only the first
/// 'n' (n <= WARPSIZE) lanes are active. It is typically used when the
/// number of OpenMP threads in a parallel region is not a multiple of
/// WARPSIZE. The algorithm is implemented in the runtime as follows:
///
/// void
/// contiguous_partial_reduce(void *reduce_data,
/// kmp_ShuffleReductFctPtr ShuffleReduceFn,
/// int size, int lane_id) {
/// int curr_size;
/// int offset;
/// curr_size = size;
/// mask = curr_size/2;
/// while (offset>0) {
/// ShuffleReduceFn(reduce_data, lane_id, offset, 1);
/// curr_size = (curr_size+1)/2;
/// offset = curr_size/2;
/// }
/// }
///
/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
///
/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
/// if (lane_id < offset)
/// reduce_elem = reduce_elem REDUCE_OP remote_elem
/// else
/// reduce_elem = remote_elem
///
/// This algorithm assumes that the data to be reduced are located in a
/// contiguous subset of lanes starting from the first. When there is
/// an odd number of active lanes, the data in the last lane is not
/// aggregated with any other lane's dat but is instead copied over.
///
/// Dispersed Partial Warp Reduction
///
/// This algorithm is used within a warp when any discontiguous subset of
/// lanes are active. It is used to implement the reduction operation
/// across lanes in an OpenMP simd region or in a nested parallel region.
///
/// void
/// dispersed_partial_reduce(void *reduce_data,
/// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
/// int size, remote_id;
/// int logical_lane_id = number_of_active_lanes_before_me() * 2;
/// do {
/// remote_id = next_active_lane_id_right_after_me();
/// # the above function returns 0 of no active lane
/// # is present right after the current lane.
/// size = number_of_active_lanes_in_this_warp();
/// logical_lane_id /= 2;
/// ShuffleReduceFn(reduce_data, logical_lane_id,
/// remote_id-1-threadIdx.x, 2);
/// } while (logical_lane_id % 2 == 0 && size > 1);
/// }
///
/// There is no assumption made about the initial state of the reduction.
/// Any number of lanes (>=1) could be active at any position. The reduction
/// result is returned in the first active lane.
///
/// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
///
/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
/// if (lane_id % 2 == 0 && offset > 0)
/// reduce_elem = reduce_elem REDUCE_OP remote_elem
/// else
/// reduce_elem = remote_elem
///
///
/// Intra-Team Reduction
///
/// This function, as implemented in the runtime call
/// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP
/// threads in a team. It first reduces within a warp using the
/// aforementioned algorithms. We then proceed to gather all such
/// reduced values at the first warp.
///
/// The runtime makes use of the function 'InterWarpCpyFn', which copies
/// data from each of the "warp master" (zeroth lane of each warp, where
/// warp-reduced data is held) to the zeroth warp. This step reduces (in
/// a mathematical sense) the problem of reduction across warp masters in
/// a block to the problem of warp reduction.
///
///
/// Inter-Team Reduction
///
/// Once a team has reduced its data to a single value, it is stored in
/// a global scratchpad array. Since each team has a distinct slot, this
/// can be done without locking.
///
/// The last team to write to the scratchpad array proceeds to reduce the
/// scratchpad array. One or more workers in the last team use the helper
/// 'loadAndReduceDataFn' to load and reduce values from the array, i.e.,
/// the k'th worker reduces every k'th element.
///
/// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to
/// reduce across workers and compute a globally reduced value.
///
void CGOpenMPRuntimeNVPTX::emitReduction(
CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
if (!CGF.HaveInsertPoint())
return;
bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind);
#ifndef NDEBUG
bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind);
#endif
if (Options.SimpleReduction) {
assert(!TeamsReduction && !ParallelReduction &&
"Invalid reduction selection in emitReduction.");
CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
ReductionOps, Options);
return;
}
assert((TeamsReduction || ParallelReduction) &&
"Invalid reduction selection in emitReduction.");
// Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
// RedList, shuffle_reduce_func, interwarp_copy_func);
// or
// Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>);
llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
llvm::Value *ThreadId = getThreadID(CGF, Loc);
llvm::Value *Res;
if (ParallelReduction) {
ASTContext &C = CGM.getContext();
// 1. Build a list of reduction variables.
// void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
auto Size = RHSExprs.size();
for (const Expr *E : Privates) {
if (E->getType()->isVariablyModifiedType())
// Reserve place for array size.
++Size;
}
llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
QualType ReductionArrayTy =
C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
/*IndexTypeQuals=*/0);
Address ReductionList =
CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
auto IPriv = Privates.begin();
unsigned Idx = 0;
for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
CGF.Builder.CreateStore(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy),
Elem);
if ((*IPriv)->getType()->isVariablyModifiedType()) {
// Store array size.
++Idx;
Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
llvm::Value *Size = CGF.Builder.CreateIntCast(
CGF.getVLASize(
CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
.NumElts,
CGF.SizeTy, /*isSigned=*/false);
CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
Elem);
}
}
llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
ReductionList.getPointer(), CGF.VoidPtrTy);
llvm::Function *ReductionFn = emitReductionFunction(
CGM, Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(),
Privates, LHSExprs, RHSExprs, ReductionOps);
llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
llvm::Value *InterWarpCopyFn =
emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
llvm::Value *Args[] = {RTLoc,
ThreadId,
CGF.Builder.getInt32(RHSExprs.size()),
ReductionArrayTySize,
RL,
ShuffleAndReduceFn,
InterWarpCopyFn};
Res = CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
OMPRTL_NVPTX__kmpc_parallel_reduce_nowait_v2),
Args);
} else {
assert(TeamsReduction && "expected teams reduction.");
std::string Name = getName({"reduction"});
llvm::Value *Lock = getCriticalRegionLock(Name);
llvm::Value *Args[] = {RTLoc, ThreadId, Lock};
Res = CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(
OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_simple),
Args);
}
// 5. Build if (res == 1)
llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.reduction.done");
llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.then");
llvm::Value *Cond = CGF.Builder.CreateICmpEQ(
Res, llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1));
CGF.Builder.CreateCondBr(Cond, ThenBB, ExitBB);
// 6. Build then branch: where we have reduced values in the master
// thread in each team.
// __kmpc_end_reduce{_nowait}(<gtid>);
// break;
CGF.EmitBlock(ThenBB);
// Add emission of __kmpc_end_reduce{_nowait}(<gtid>);
auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps,
this](CodeGenFunction &CGF, PrePostActionTy &Action) {
auto IPriv = Privates.begin();
auto ILHS = LHSExprs.begin();
auto IRHS = RHSExprs.begin();
for (const Expr *E : ReductionOps) {
emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
cast<DeclRefExpr>(*IRHS));
++IPriv;
++ILHS;
++IRHS;
}
};
if (ParallelReduction) {
llvm::Value *EndArgs[] = {ThreadId};
RegionCodeGenTy RCG(CodeGen);
NVPTXActionTy Action(
nullptr, llvm::None,
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_reduce_nowait),
EndArgs);
RCG.setAction(Action);
RCG(CGF);
} else {
assert(TeamsReduction && "expected teams reduction.");
llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
std::string Name = getName({"reduction"});
llvm::Value *Lock = getCriticalRegionLock(Name);
llvm::Value *EndArgs[] = {RTLoc, ThreadId, Lock};
RegionCodeGenTy RCG(CodeGen);
NVPTXActionTy Action(
nullptr, llvm::None,
createNVPTXRuntimeFunction(
OMPRTL_NVPTX__kmpc_nvptx_teams_end_reduce_nowait_simple),
EndArgs);
RCG.setAction(Action);
RCG(CGF);
}
// There is no need to emit line number for unconditional branch.
(void)ApplyDebugLocation::CreateEmpty(CGF);
CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
}
const VarDecl *
CGOpenMPRuntimeNVPTX::translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const {
if (!NativeParam->getType()->isReferenceType())
return NativeParam;
QualType ArgType = NativeParam->getType();
QualifierCollector QC;
const Type *NonQualTy = QC.strip(ArgType);
QualType PointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
if (const auto *Attr = FD->getAttr<OMPCaptureKindAttr>()) {
if (Attr->getCaptureKind() == OMPC_map) {
PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
LangAS::opencl_global);
}
}
ArgType = CGM.getContext().getPointerType(PointeeTy);
QC.addRestrict();
enum { NVPTX_local_addr = 5 };
QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr));
ArgType = QC.apply(CGM.getContext(), ArgType);
if (isa<ImplicitParamDecl>(NativeParam))
return ImplicitParamDecl::Create(
CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(),
NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other);
return ParmVarDecl::Create(
CGM.getContext(),
const_cast<DeclContext *>(NativeParam->getDeclContext()),
NativeParam->getBeginLoc(), NativeParam->getLocation(),
NativeParam->getIdentifier(), ArgType,
/*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
}
Address
CGOpenMPRuntimeNVPTX::getParameterAddress(CodeGenFunction &CGF,
const VarDecl *NativeParam,
const VarDecl *TargetParam) const {
assert(NativeParam != TargetParam &&
NativeParam->getType()->isReferenceType() &&
"Native arg must not be the same as target arg.");
Address LocalAddr = CGF.GetAddrOfLocalVar(TargetParam);
QualType NativeParamType = NativeParam->getType();
QualifierCollector QC;
const Type *NonQualTy = QC.strip(NativeParamType);
QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
unsigned NativePointeeAddrSpace =
CGF.getContext().getTargetAddressSpace(NativePointeeTy);
QualType TargetTy = TargetParam->getType();
llvm::Value *TargetAddr = CGF.EmitLoadOfScalar(
LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation());
// First cast to generic.
TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
/*AddrSpace=*/0));
// Cast from generic to native address space.
TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
NativePointeeAddrSpace));
Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType);
CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false,
NativeParamType);
return NativeParamAddr;
}
void CGOpenMPRuntimeNVPTX::emitOutlinedFunctionCall(
CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
ArrayRef<llvm::Value *> Args) const {
SmallVector<llvm::Value *, 4> TargetArgs;
TargetArgs.reserve(Args.size());
auto *FnType = OutlinedFn.getFunctionType();
for (unsigned I = 0, E = Args.size(); I < E; ++I) {
if (FnType->isVarArg() && FnType->getNumParams() <= I) {
TargetArgs.append(std::next(Args.begin(), I), Args.end());
break;
}
llvm::Type *TargetType = FnType->getParamType(I);
llvm::Value *NativeArg = Args[I];
if (!TargetType->isPointerTy()) {
TargetArgs.emplace_back(NativeArg);
continue;
}
llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
NativeArg,
NativeArg->getType()->getPointerElementType()->getPointerTo());
TargetArgs.emplace_back(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType));
}
CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs);
}
/// Emit function which wraps the outline parallel region
/// and controls the arguments which are passed to this function.
/// The wrapper ensures that the outlined function is called
/// with the correct arguments when data is shared.
llvm::Function *CGOpenMPRuntimeNVPTX::createParallelDataSharingWrapper(
llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) {
ASTContext &Ctx = CGM.getContext();
const auto &CS = *D.getCapturedStmt(OMPD_parallel);
// Create a function that takes as argument the source thread.
FunctionArgList WrapperArgs;
QualType Int16QTy =
Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false);
QualType Int32QTy =
Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false);
ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
/*Id=*/nullptr, Int16QTy,
ImplicitParamDecl::Other);
ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
/*Id=*/nullptr, Int32QTy,
ImplicitParamDecl::Other);
WrapperArgs.emplace_back(&ParallelLevelArg);
WrapperArgs.emplace_back(&WrapperArg);
const CGFunctionInfo &CGFI =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, WrapperArgs);
auto *Fn = llvm::Function::Create(
CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
Twine(OutlinedParallelFn->getName(), "_wrapper"), &CGM.getModule());
CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
Fn->setDoesNotRecurse();
CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs,
D.getBeginLoc(), D.getBeginLoc());
const auto *RD = CS.getCapturedRecordDecl();
auto CurField = RD->field_begin();
Address ZeroAddr = CGF.CreateMemTemp(
CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1),
/*Name*/ ".zero.addr");
CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
// Get the array of arguments.
SmallVector<llvm::Value *, 8> Args;
Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer());
Args.emplace_back(ZeroAddr.getPointer());
CGBuilderTy &Bld = CGF.Builder;
auto CI = CS.capture_begin();
// Use global memory for data sharing.
// Handle passing of global args to workers.
Address GlobalArgs =
CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args");
llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer();
llvm::Value *DataSharingArgs[] = {GlobalArgsPtr};
CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_get_shared_variables),
DataSharingArgs);
// Retrieve the shared variables from the list of references returned
// by the runtime. Pass the variables to the outlined function.
Address SharedArgListAddress = Address::invalid();
if (CS.capture_size() > 0 ||
isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
SharedArgListAddress = CGF.EmitLoadOfPointer(
GlobalArgs, CGF.getContext()
.getPointerType(CGF.getContext().getPointerType(
CGF.getContext().VoidPtrTy))
.castAs<PointerType>());
}
unsigned Idx = 0;
if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
Src, CGF.SizeTy->getPointerTo());
llvm::Value *LB = CGF.EmitLoadOfScalar(
TypedAddress,
/*Volatile=*/false,
CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc());
Args.emplace_back(LB);
++Idx;
Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
Src, CGF.SizeTy->getPointerTo());
llvm::Value *UB = CGF.EmitLoadOfScalar(
TypedAddress,
/*Volatile=*/false,
CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
cast<OMPLoopDirective>(D).getUpperBoundVariable()->getExprLoc());
Args.emplace_back(UB);
++Idx;
}
if (CS.capture_size() > 0) {
ASTContext &CGFContext = CGF.getContext();
for (unsigned I = 0, E = CS.capture_size(); I < E; ++I, ++CI, ++CurField) {
QualType ElemTy = CurField->getType();
Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx);
Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)));
llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress,
/*Volatile=*/false,
CGFContext.getPointerType(ElemTy),
CI->getLocation());
if (CI->capturesVariableByCopy() &&
!CI->getCapturedVar()->getType()->isAnyPointerType()) {
Arg = castValueToType(CGF, Arg, ElemTy, CGFContext.getUIntPtrType(),
CI->getLocation());
}
Args.emplace_back(Arg);
}
}
emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedParallelFn, Args);
CGF.FinishFunction();
return Fn;
}
void CGOpenMPRuntimeNVPTX::emitFunctionProlog(CodeGenFunction &CGF,
const Decl *D) {
if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic)
return;
assert(D && "Expected function or captured|block decl.");
assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 &&
"Function is registered already.");
assert((!TeamAndReductions.first || TeamAndReductions.first == D) &&
"Team is set but not processed.");
const Stmt *Body = nullptr;
bool NeedToDelayGlobalization = false;
if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
Body = FD->getBody();
} else if (const auto *BD = dyn_cast<BlockDecl>(D)) {
Body = BD->getBody();
} else if (const auto *CD = dyn_cast<CapturedDecl>(D)) {
Body = CD->getBody();
NeedToDelayGlobalization = CGF.CapturedStmtInfo->getKind() == CR_OpenMP;
if (NeedToDelayGlobalization &&
getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
return;
}
if (!Body)
return;
CheckVarsEscapingDeclContext VarChecker(CGF, TeamAndReductions.second);
VarChecker.Visit(Body);
const RecordDecl *GlobalizedVarsRecord =
VarChecker.getGlobalizedRecord(IsInTTDRegion);
TeamAndReductions.first = nullptr;
TeamAndReductions.second.clear();
ArrayRef<const ValueDecl *> EscapedVariableLengthDecls =
VarChecker.getEscapedVariableLengthDecls();
if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty())
return;
auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
I->getSecond().MappedParams =
llvm::make_unique<CodeGenFunction::OMPMapVars>();
I->getSecond().GlobalRecord = GlobalizedVarsRecord;
I->getSecond().EscapedParameters.insert(
VarChecker.getEscapedParameters().begin(),
VarChecker.getEscapedParameters().end());
I->getSecond().EscapedVariableLengthDecls.append(
EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end());
DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
assert(VD->isCanonicalDecl() && "Expected canonical declaration");
const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
Data.insert(std::make_pair(VD, MappedVarData(FD, IsInTTDRegion)));
}
if (!IsInTTDRegion && !NeedToDelayGlobalization && !IsInParallelRegion) {
CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None);
VarChecker.Visit(Body);
I->getSecond().SecondaryGlobalRecord =
VarChecker.getGlobalizedRecord(/*IsInTTDRegion=*/true);
I->getSecond().SecondaryLocalVarData.emplace();
DeclToAddrMapTy &Data = I->getSecond().SecondaryLocalVarData.getValue();
for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
assert(VD->isCanonicalDecl() && "Expected canonical declaration");
const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
Data.insert(
std::make_pair(VD, MappedVarData(FD, /*IsInTTDRegion=*/true)));
}
}
if (!NeedToDelayGlobalization) {
emitGenericVarsProlog(CGF, D->getBeginLoc(), /*WithSPMDCheck=*/true);
struct GlobalizationScope final : EHScopeStack::Cleanup {
GlobalizationScope() = default;
void Emit(CodeGenFunction &CGF, Flags flags) override {
static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
.emitGenericVarsEpilog(CGF, /*WithSPMDCheck=*/true);
}
};
CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup);
}
}
Address CGOpenMPRuntimeNVPTX::getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD) {
if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic)
return Address::invalid();
VD = VD->getCanonicalDecl();
auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
if (I == FunctionGlobalizedDecls.end())
return Address::invalid();
auto VDI = I->getSecond().LocalVarData.find(VD);
if (VDI != I->getSecond().LocalVarData.end())
return VDI->second.PrivateAddr;
if (VD->hasAttrs()) {
for (specific_attr_iterator<OMPReferencedVarAttr> IT(VD->attr_begin()),
E(VD->attr_end());
IT != E; ++IT) {
auto VDI = I->getSecond().LocalVarData.find(
cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl())
->getCanonicalDecl());
if (VDI != I->getSecond().LocalVarData.end())
return VDI->second.PrivateAddr;
}
}
return Address::invalid();
}
void CGOpenMPRuntimeNVPTX::functionFinished(CodeGenFunction &CGF) {
FunctionGlobalizedDecls.erase(CGF.CurFn);
CGOpenMPRuntime::functionFinished(CGF);
}
void CGOpenMPRuntimeNVPTX::getDefaultDistScheduleAndChunk(
CodeGenFunction &CGF, const OMPLoopDirective &S,
OpenMPDistScheduleClauseKind &ScheduleKind,
llvm::Value *&Chunk) const {
if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD) {
ScheduleKind = OMPC_DIST_SCHEDULE_static;
Chunk = CGF.EmitScalarConversion(getNVPTXNumThreads(CGF),
CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
S.getIterationVariable()->getType(), S.getBeginLoc());
return;
}
CGOpenMPRuntime::getDefaultDistScheduleAndChunk(
CGF, S, ScheduleKind, Chunk);
}
void CGOpenMPRuntimeNVPTX::getDefaultScheduleAndChunk(
CodeGenFunction &CGF, const OMPLoopDirective &S,
OpenMPScheduleClauseKind &ScheduleKind,
const Expr *&ChunkExpr) const {
ScheduleKind = OMPC_SCHEDULE_static;
// Chunk size is 1 in this case.
llvm::APInt ChunkSize(32, 1);
ChunkExpr = IntegerLiteral::Create(CGF.getContext(), ChunkSize,
CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
SourceLocation());
}
void CGOpenMPRuntimeNVPTX::adjustTargetSpecificDataForLambdas(
CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&
" Expected target-based directive.");
const CapturedStmt *CS = D.getCapturedStmt(OMPD_target);
for (const CapturedStmt::Capture &C : CS->captures()) {
// Capture variables captured by reference in lambdas for target-based
// directives.
if (!C.capturesVariable())
continue;
const VarDecl *VD = C.getCapturedVar();
const auto *RD = VD->getType()
.getCanonicalType()
.getNonReferenceType()
->getAsCXXRecordDecl();
if (!RD || !RD->isLambda())
continue;
Address VDAddr = CGF.GetAddrOfLocalVar(VD);
LValue VDLVal;
if (VD->getType().getCanonicalType()->isReferenceType())
VDLVal = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType());
else
VDLVal = CGF.MakeAddrLValue(
VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
FieldDecl *ThisCapture = nullptr;
RD->getCaptureFields(Captures, ThisCapture);
if (ThisCapture && CGF.CapturedStmtInfo->isCXXThisExprCaptured()) {
LValue ThisLVal =
CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
llvm::Value *CXXThis = CGF.LoadCXXThis();
CGF.EmitStoreOfScalar(CXXThis, ThisLVal);
}
for (const LambdaCapture &LC : RD->captures()) {
if (LC.getCaptureKind() != LCK_ByRef)
continue;
const VarDecl *VD = LC.getCapturedVar();
if (!CS->capturesVariable(VD))
continue;
auto It = Captures.find(VD);
assert(It != Captures.end() && "Found lambda capture without field.");
LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
Address VDAddr = CGF.GetAddrOfLocalVar(VD);
if (VD->getType().getCanonicalType()->isReferenceType())
VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr,
VD->getType().getCanonicalType())
.getAddress();
CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal);
}
}
}
// Get current CudaArch and ignore any unknown values
static CudaArch getCudaArch(CodeGenModule &CGM) {
if (!CGM.getTarget().hasFeature("ptx"))
return CudaArch::UNKNOWN;
llvm::StringMap<bool> Features;
CGM.getTarget().initFeatureMap(Features, CGM.getDiags(),
CGM.getTarget().getTargetOpts().CPU,
CGM.getTarget().getTargetOpts().Features);
for (const auto &Feature : Features) {
if (Feature.getValue()) {
CudaArch Arch = StringToCudaArch(Feature.getKey());
if (Arch != CudaArch::UNKNOWN)
return Arch;
}
}
return CudaArch::UNKNOWN;
}
/// Check to see if target architecture supports unified addressing which is
/// a restriction for OpenMP requires clause "unified_shared_memory".
void CGOpenMPRuntimeNVPTX::checkArchForUnifiedAddressing(
CodeGenModule &CGM, const OMPRequiresDecl *D) const {
for (const OMPClause *Clause : D->clauselists()) {
if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
switch (getCudaArch(CGM)) {
case CudaArch::SM_20:
case CudaArch::SM_21:
case CudaArch::SM_30:
case CudaArch::SM_32:
case CudaArch::SM_35:
case CudaArch::SM_37:
case CudaArch::SM_50:
case CudaArch::SM_52:
case CudaArch::SM_53:
case CudaArch::SM_60:
case CudaArch::SM_61:
case CudaArch::SM_62:
CGM.Error(Clause->getBeginLoc(),
"Target architecture does not support unified addressing");
return;
case CudaArch::SM_70:
case CudaArch::SM_72:
case CudaArch::SM_75:
case CudaArch::GFX600:
case CudaArch::GFX601:
case CudaArch::GFX700:
case CudaArch::GFX701:
case CudaArch::GFX702:
case CudaArch::GFX703:
case CudaArch::GFX704:
case CudaArch::GFX801:
case CudaArch::GFX802:
case CudaArch::GFX803:
case CudaArch::GFX810:
case CudaArch::GFX900:
case CudaArch::GFX902:
case CudaArch::GFX904:
case CudaArch::GFX906:
case CudaArch::GFX909:
case CudaArch::UNKNOWN:
break;
case CudaArch::LAST:
llvm_unreachable("Unexpected Cuda arch.");
}
}
}
}
/// Get number of SMs and number of blocks per SM.
static std::pair<unsigned, unsigned> getSMsBlocksPerSM(CodeGenModule &CGM) {
std::pair<unsigned, unsigned> Data;
if (CGM.getLangOpts().OpenMPCUDANumSMs)
Data.first = CGM.getLangOpts().OpenMPCUDANumSMs;
if (CGM.getLangOpts().OpenMPCUDABlocksPerSM)
Data.second = CGM.getLangOpts().OpenMPCUDABlocksPerSM;
if (Data.first && Data.second)
return Data;
switch (getCudaArch(CGM)) {
case CudaArch::SM_20:
case CudaArch::SM_21:
case CudaArch::SM_30:
case CudaArch::SM_32:
case CudaArch::SM_35:
case CudaArch::SM_37:
case CudaArch::SM_50:
case CudaArch::SM_52:
case CudaArch::SM_53:
return {16, 16};
case CudaArch::SM_60:
case CudaArch::SM_61:
case CudaArch::SM_62:
return {56, 32};
case CudaArch::SM_70:
case CudaArch::SM_72:
case CudaArch::SM_75:
return {84, 32};
case CudaArch::GFX600:
case CudaArch::GFX601:
case CudaArch::GFX700:
case CudaArch::GFX701:
case CudaArch::GFX702:
case CudaArch::GFX703:
case CudaArch::GFX704:
case CudaArch::GFX801:
case CudaArch::GFX802:
case CudaArch::GFX803:
case CudaArch::GFX810:
case CudaArch::GFX900:
case CudaArch::GFX902:
case CudaArch::GFX904:
case CudaArch::GFX906:
case CudaArch::GFX909:
case CudaArch::UNKNOWN:
break;
case CudaArch::LAST:
llvm_unreachable("Unexpected Cuda arch.");
}
llvm_unreachable("Unexpected NVPTX target without ptx feature.");
}
void CGOpenMPRuntimeNVPTX::clear() {
if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
if (CGM.getCodeGenOpts().getDebugInfo() >=
codegenoptions::LimitedDebugInfo) {
ASTContext &C = CGM.getContext();
auto *VD = VarDecl::Create(
C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
&C.Idents.get("_$_"), C.IntTy, /*TInfo=*/nullptr, SC_Static);
auto *Var = cast<llvm::GlobalVariable>(
CGM.CreateRuntimeVariable(CGM.IntTy, "_$_"));
Var->setInitializer(llvm::ConstantInt::getNullValue(CGM.IntTy));
Var->setLinkage(llvm::GlobalVariable::CommonLinkage);
CGM.addCompilerUsedGlobal(Var);
DI->EmitGlobalVariable(Var, VD);
}
if (!GlobalizedRecords.empty()) {
ASTContext &C = CGM.getContext();
llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> GlobalRecs;
llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> SharedRecs;
RecordDecl *StaticRD = C.buildImplicitRecord(
"_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
StaticRD->startDefinition();
RecordDecl *SharedStaticRD = C.buildImplicitRecord(
"_shared_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
SharedStaticRD->startDefinition();
for (const GlobalPtrSizeRecsTy &Records : GlobalizedRecords) {
if (Records.Records.empty())
continue;
unsigned Size = 0;
unsigned RecAlignment = 0;
for (const RecordDecl *RD : Records.Records) {
QualType RDTy = C.getRecordType(RD);
unsigned Alignment = C.getTypeAlignInChars(RDTy).getQuantity();
RecAlignment = std::max(RecAlignment, Alignment);
unsigned RecSize = C.getTypeSizeInChars(RDTy).getQuantity();
Size =
llvm::alignTo(llvm::alignTo(Size, Alignment) + RecSize, Alignment);
}
Size = llvm::alignTo(Size, RecAlignment);
llvm::APInt ArySize(/*numBits=*/64, Size);
QualType SubTy = C.getConstantArrayType(
C.CharTy, ArySize, ArrayType::Normal, /*IndexTypeQuals=*/0);
const bool UseSharedMemory = Size <= SharedMemorySize;
auto *Field =
FieldDecl::Create(C, UseSharedMemory ? SharedStaticRD : StaticRD,
SourceLocation(), SourceLocation(), nullptr, SubTy,
C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
/*BW=*/nullptr, /*Mutable=*/false,
/*InitStyle=*/ICIS_NoInit);
Field->setAccess(AS_public);
if (UseSharedMemory) {
SharedStaticRD->addDecl(Field);
SharedRecs.push_back(&Records);
} else {
StaticRD->addDecl(Field);
GlobalRecs.push_back(&Records);
}
Records.RecSize->setInitializer(llvm::ConstantInt::get(CGM.SizeTy, Size));
Records.UseSharedMemory->setInitializer(
llvm::ConstantInt::get(CGM.Int16Ty, UseSharedMemory ? 1 : 0));
}
// Allocate SharedMemorySize buffer for the shared memory.
// FIXME: nvlink does not handle weak linkage correctly (object with the
// different size are reported as erroneous).
// Restore this code as sson as nvlink is fixed.
if (!SharedStaticRD->field_empty()) {
llvm::APInt ArySize(/*numBits=*/64, SharedMemorySize);
QualType SubTy = C.getConstantArrayType(
C.CharTy, ArySize, ArrayType::Normal, /*IndexTypeQuals=*/0);
auto *Field = FieldDecl::Create(
C, SharedStaticRD, SourceLocation(), SourceLocation(), nullptr, SubTy,
C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
/*BW=*/nullptr, /*Mutable=*/false,
/*InitStyle=*/ICIS_NoInit);
Field->setAccess(AS_public);
SharedStaticRD->addDecl(Field);
}
SharedStaticRD->completeDefinition();
if (!SharedStaticRD->field_empty()) {
QualType StaticTy = C.getRecordType(SharedStaticRD);
llvm::Type *LLVMStaticTy = CGM.getTypes().ConvertTypeForMem(StaticTy);
auto *GV = new llvm::GlobalVariable(
CGM.getModule(), LLVMStaticTy,
/*isConstant=*/false, llvm::GlobalValue::CommonLinkage,
llvm::Constant::getNullValue(LLVMStaticTy),
"_openmp_shared_static_glob_rd_$_", /*InsertBefore=*/nullptr,
llvm::GlobalValue::NotThreadLocal,
C.getTargetAddressSpace(LangAS::cuda_shared));
auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
GV, CGM.VoidPtrTy);
for (const GlobalPtrSizeRecsTy *Rec : SharedRecs) {
Rec->Buffer->replaceAllUsesWith(Replacement);
Rec->Buffer->eraseFromParent();
}
}
StaticRD->completeDefinition();
if (!StaticRD->field_empty()) {
QualType StaticTy = C.getRecordType(StaticRD);
std::pair<unsigned, unsigned> SMsBlockPerSM = getSMsBlocksPerSM(CGM);
llvm::APInt Size1(32, SMsBlockPerSM.second);
QualType Arr1Ty =
C.getConstantArrayType(StaticTy, Size1, ArrayType::Normal,
/*IndexTypeQuals=*/0);
llvm::APInt Size2(32, SMsBlockPerSM.first);
QualType Arr2Ty = C.getConstantArrayType(Arr1Ty, Size2, ArrayType::Normal,
/*IndexTypeQuals=*/0);
llvm::Type *LLVMArr2Ty = CGM.getTypes().ConvertTypeForMem(Arr2Ty);
auto *GV = new llvm::GlobalVariable(
CGM.getModule(), LLVMArr2Ty,
/*isConstant=*/false, llvm::GlobalValue::CommonLinkage,
llvm::Constant::getNullValue(LLVMArr2Ty),
"_openmp_static_glob_rd_$_");
auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
GV, CGM.VoidPtrTy);
for (const GlobalPtrSizeRecsTy *Rec : GlobalRecs) {
Rec->Buffer->replaceAllUsesWith(Replacement);
Rec->Buffer->eraseFromParent();
}
}
}
CGOpenMPRuntime::clear();
}
| 41.65646 | 80 | 0.67267 | [
"object",
"model"
] |
f66f0424072b0a04a28159234e51987275a4898f | 34,414 | cc | C++ | source/IRTKSimple2/image++/src/irtkGenericImage.cc | gordon-n-stevenson/fetalReconstruction | 6a1e4a15bdf92e86439791d836d1b20ede793293 | [
"Zlib",
"Unlicense",
"Intel",
"MIT"
] | null | null | null | source/IRTKSimple2/image++/src/irtkGenericImage.cc | gordon-n-stevenson/fetalReconstruction | 6a1e4a15bdf92e86439791d836d1b20ede793293 | [
"Zlib",
"Unlicense",
"Intel",
"MIT"
] | null | null | null | source/IRTKSimple2/image++/src/irtkGenericImage.cc | gordon-n-stevenson/fetalReconstruction | 6a1e4a15bdf92e86439791d836d1b20ede793293 | [
"Zlib",
"Unlicense",
"Intel",
"MIT"
] | null | null | null | /*=========================================================================
Library : Image Registration Toolkit (IRTK)
Module : $Id: irtkGenericImage.cc 968 2013-08-15 08:48:21Z kpk09 $
Copyright : Imperial College, Department of Computing
Visual Information Processing (VIP), 2008 onwards
Date : $Date: 2013-08-15 09:48:21 +0100 (Thu, 15 Aug 2013) $
Version : $Revision: 968 $
Changes : $Author: kpk09 $
=========================================================================*/
#define _IMPLEMENTS_GENERICIMAGE_
#include <irtkImage.h>
#include <irtkFileToImage.h>
#include <irtkImageToFile.h>
template <class VoxelType> irtkGenericImage<VoxelType>::irtkGenericImage(void) : irtkBaseImage()
{
_attr._x = 0;
_attr._y = 0;
_attr._z = 0;
_attr._t = 0;
// Initialize data
_matrix = NULL;
}
template <class VoxelType> irtkGenericImage<VoxelType>::irtkGenericImage(int x, int y, int z, int t) : irtkBaseImage()
{
irtkImageAttributes attr;
attr._x = x;
attr._y = y;
attr._z = z;
attr._t = t;
// Initialize data
_matrix = NULL;
// Initialize rest of class
this->Initialize(attr);
}
template <class VoxelType> irtkGenericImage<VoxelType>::irtkGenericImage(char *filename)
{
// Initialize data
_matrix = NULL;
// Read image
this->Read(filename);
}
template <class VoxelType> irtkGenericImage<VoxelType>::irtkGenericImage(const irtkImageAttributes &attr) : irtkBaseImage()
{
// Initialize data
_matrix = NULL;
// Initialize rest of class
this->Initialize(attr);
}
template <class VoxelType> irtkGenericImage<VoxelType>::irtkGenericImage(const irtkGenericImage &image) : irtkBaseImage()
{
int i, n;
VoxelType *ptr1, *ptr2;
// Initialize data
_matrix = NULL;
// Initialize rest of class
this->Initialize(image._attr);
n = this->GetNumberOfVoxels();
ptr1 = this->GetPointerToVoxels();
ptr2 = image.GetPointerToVoxels();
for (i = 0; i < n; i++) {
ptr1[i] = ptr2[i];
}
}
template <class VoxelType> template <class VoxelType2> irtkGenericImage<VoxelType>::irtkGenericImage(const irtkGenericImage<VoxelType2> &image)
{
int i, n;
VoxelType *ptr1;
VoxelType2 *ptr2;
// Initialize data
_matrix = NULL;
// Initialize rest of class
this->Initialize(image.GetImageAttributes());
n = this->GetNumberOfVoxels();
ptr1 = this->GetPointerToVoxels();
ptr2 = image.GetPointerToVoxels();
for (i = 0; i < n; i++) {
ptr1[i] = static_cast<VoxelType>(ptr2[i]);
}
}
template <class VoxelType> irtkGenericImage<VoxelType>::~irtkGenericImage(void)
{
if (_matrix != NULL) {
Deallocate<VoxelType>(_matrix);
_matrix = NULL;
}
_attr._x = 0;
_attr._y = 0;
_attr._z = 0;
_attr._t = 0;
}
template <> const char *irtkGenericImage<char>::NameOfClass()
{
return "irtkGenericImage<char>";
}
template <> const char *irtkGenericImage<unsigned char>::NameOfClass()
{
return "irtkGenericImage<unsigned char>";
}
template <> const char *irtkGenericImage<short>::NameOfClass()
{
return "irtkGenericImage<short>";
}
template <> const char *irtkGenericImage<unsigned short>::NameOfClass()
{
return "irtkGenericImage<unsigned short>";
}
template <> const char *irtkGenericImage<int>::NameOfClass()
{
return "irtkGenericImage<int>";
}
template <> const char *irtkGenericImage<unsigned int>::NameOfClass()
{
return "irtkGenericImage<unsigned int>";
}
template <> const char *irtkGenericImage<float>::NameOfClass()
{
return "irtkGenericImage<float>";
}
template <> const char *irtkGenericImage<double>::NameOfClass()
{
return "irtkGenericImage<double>";
}
template <class VoxelType> void irtkGenericImage<VoxelType>::Initialize(const irtkImageAttributes &attr)
{
// Free memory
if ((_attr._x != attr._x) || (_attr._y != attr._y) || (_attr._z != attr._z) || (_attr._t != attr._t)) {
// Free old memory
if (_matrix != NULL) Deallocate<VoxelType>(_matrix);
// Allocate new memory
if (attr._x*attr._y*attr._z*attr._t > 0) {
_matrix = Allocate(_matrix, attr._x, attr._y, attr._z, attr._t);
} else {
_matrix = NULL;
}
}
// Initialize base class
this->irtkBaseImage::Update(attr);
// Initialize voxels
*this = VoxelType();
}
template <class VoxelType> void irtkGenericImage<VoxelType>::Clear()
{
// Free memory
if (_matrix != NULL)
Deallocate<VoxelType>(_matrix);
_attr._x = 0;
_attr._y = 0;
_attr._z = 0;
_attr._t = 0;
}
template <class VoxelType> void irtkGenericImage<VoxelType>::Read(const char *filename)
{
irtkBaseImage *image;
// Allocate file reader
irtkFileToImage *reader = irtkFileToImage::New(filename);
// Get output
image = reader->GetOutput();
// Convert image
switch (reader->GetDataType()) {
case IRTK_VOXEL_CHAR: { *this = *(dynamic_cast<irtkGenericImage<char> *>(image)); } break;
case IRTK_VOXEL_UNSIGNED_CHAR: { *this = *(dynamic_cast<irtkGenericImage<unsigned char> *>(image)); } break;
case IRTK_VOXEL_SHORT: { *this = *(dynamic_cast<irtkGenericImage<short> *>(image)); } break;
case IRTK_VOXEL_UNSIGNED_SHORT: { *this = *(dynamic_cast<irtkGenericImage<unsigned short> *>(image)); } break;
case IRTK_VOXEL_INT: { *this = *(dynamic_cast<irtkGenericImage<int> *>(image)); } break;
case IRTK_VOXEL_UNSIGNED_INT: { *this = *(dynamic_cast<irtkGenericImage<unsigned int> *>(image)); } break;
case IRTK_VOXEL_FLOAT: { *this = *(dynamic_cast<irtkGenericImage<float> *>(image)); } break;
case IRTK_VOXEL_DOUBLE: { *this = *(dynamic_cast<irtkGenericImage<double> *>(image)); } break;
default:
cout << "irtkGenericImage::GetOutput: Unknown voxel type" << endl;
}
if (reader->GetSlope() != 0) {
switch (this->GetScalarType()) {
case IRTK_VOXEL_FLOAT: {
*this *= static_cast<float>(reader->GetSlope());
*this += static_cast<float>(reader->GetIntercept());
}
break;
case IRTK_VOXEL_DOUBLE: {
*this *= static_cast<double>(reader->GetSlope());
*this += static_cast<double>(reader->GetIntercept());
}
break;
default:
if ((reader->GetSlope() != 1) || (reader->GetIntercept() != 0)) {
cerr << this->NameOfClass() << "::Read: Ignore slope and intercept, use irtkGenericImage<float> or " << endl;
cerr << "irtkGenericImage<double> instead" << endl;
}
}
}
// Delete reader
delete reader;
// Delete image
delete image;
}
template <class VoxelType> void irtkGenericImage<VoxelType>::Write(const char *filename)
{
// Allocate file reader
irtkImageToFile *writer = irtkImageToFile::New(filename);
// Set input
writer->SetInput(this);
// Run writer
writer->Run();
// Delete writer
delete writer;
}
template <class VoxelType> void irtkGenericImage<VoxelType>::GetMinMax(VoxelType *min, VoxelType *max) const
{
int i, n;
VoxelType *ptr;
*min = VoxelType();
*max = VoxelType();
// Initialize pixels
n = this->GetNumberOfVoxels();
ptr = this->GetPointerToVoxels();
if (n > 0) {
*min = ptr[0];
*max = ptr[0];
for (i = 0; i < n; i++) {
if (ptr[i] < *min)
*min = ptr[i];
if (ptr[i] > *max)
*max = ptr[i];
}
}
}
template <class VoxelType> VoxelType irtkGenericImage<VoxelType>::GetAverage(int toggle) const
{
float average = 0;
int i, n, m;
VoxelType *ptr;
// Initialize pixels
n = this->GetNumberOfVoxels();
ptr = this->GetPointerToVoxels();
m = 0;
if (n > 0) {
for (i = 0; i < n; i++) {
if(*ptr > 0) {
m ++;
}
ptr ++;
}
ptr = this->GetPointerToVoxels();
for (i = 0; i < n; i++) {
if(toggle == 1){
if(*ptr > 0) {
average += (float)((VoxelType)*ptr)/(float)m;
}
}
else{
average += (float)((VoxelType)*ptr)/(float)n;
}
ptr ++;
}
}
return average;
}
template <class VoxelType> VoxelType irtkGenericImage<VoxelType>::GetSD(int toggle) const
{
// Initialize pixels
float average = 0, std = 0;
int i, n;
VoxelType *ptr;
n = this->GetNumberOfVoxels();
ptr = this->GetPointerToVoxels();
average = this->GetAverage(toggle);
if (n > 0) {
for (i = 0; i < n; i++) {
if(*ptr > 0 && toggle == 1) {
std += pow((double)*ptr - average,2)/(double)n;
} else {
std += pow((double)*ptr - average,2)/(double)n;
}
ptr ++;
}
}
return sqrt(std);
}
template <class VoxelType> void irtkGenericImage<VoxelType>::GetMaxPosition(irtkPoint& p, int ds, int) const
{
double i, j, k;
VoxelType *ptr;
double x, y , z;
this->WorldToImage(p);
VoxelType max = 0;
x = round(p._x); y = round(p._y); z = round(p._z);
k = round(p._z);
ptr = this->GetPointerToVoxels();
for (j = round(p._y) - ds; j < round(p._y) +ds + 1; j++) {
for (i = round(p._x) - ds; i < round(p._x)+ds + 1; i++) {
// Initialize pixels
if (max < *ptr) {
max = *ptr;
x = i; y = j; z = k;
}
ptr++;
}
}
p._x = x; p._y = y; p._z = z;
this->ImageToWorld(p);
}
template <class VoxelType> void irtkGenericImage<VoxelType>::GravityCenter(irtkPoint& p, int ds, int t) const
{
double i, j, k;
//VoxelType *ptr = new VoxelType;
VoxelType *ptr;
double x,y,z;
double si,sj,sk;
double sweight;
//irtkInterpolateImageFunction *interpolator = irtkInterpolateImageFunction::New(Interpolation_CSpline, (irtkBaseImage*)this);
// Setup interpolation for the source image
//interpolator->SetInput((irtkImage*)this);
//interpolator->Initialize();
this->WorldToImage(p);
si = 0; sj = 0; sk = 0; sweight = 0;
x = round(p._x); y = round(p._y); z = round(p._z);
for (k = round(p._z) - ds; k < round(p._z) +ds + 1; k++) {
for (j = round(p._y) - ds; j < round(p._y) +ds + 1; j++) {
for (i = round(p._x) - ds; i < round(p._x)+ds + 1; i++) {
// Initialize pixels
ptr = this->GetPointerToVoxels(i,j,k,t);
si += *ptr * i;
sj += *ptr * j;
sk += *ptr * k;
sweight += *ptr;
}
}
}
x = si/sweight; y = sj/sweight; z = sk/sweight;
p._x = x; p._y = y; p._z = z;
this->ImageToWorld(p);
//delete interpolator;
//delete ptr;
}
template <class VoxelType> void irtkGenericImage<VoxelType>::GetMinMaxPad(VoxelType *min, VoxelType *max, VoxelType pad) const
{
int i, n;
VoxelType *ptr;
bool first=true;
*min = VoxelType();
*max = VoxelType();
// Initialize pixels
n = this->GetNumberOfVoxels();
ptr = this->GetPointerToVoxels();
if (n > 0) {
for (i = 0; i < n; i++) {
if (ptr[i]>pad) {
if (first) {
first=false;
*min = ptr[i];
*max = ptr[i];
} else {
if (ptr[i] < *min) *min = ptr[i];
if (ptr[i] > *max) *max = ptr[i];
}
}
}
}
}
template <class VoxelType> void irtkGenericImage<VoxelType>::PutMinMax(VoxelType min, VoxelType max)
{
int i, n;
VoxelType *ptr, min_val, max_val;
// Get lower and upper bound
this->GetMinMax(&min_val, &max_val);
n = this->GetNumberOfVoxels();
ptr = this->GetPointerToVoxels();
for (i = 0; i < n; i++) {
ptr[i] = VoxelType(((ptr[i] - min_val) / double(max_val - min_val)) *
(max - min) + min);
}
}
template <class VoxelType> void irtkGenericImage<VoxelType>::Saturate( double q0, double q1 )
{
if ((q0==0) && (q1==1))
return;
if ((q0<0) || (q1>1)) {
stringstream msg;
msg << "irtkGenericImage<VoxelType>::Saturate: Parameter out of range 0.0 - 1.0\n";
cerr << msg.str();
throw irtkException( msg.str(),
__FILE__,
__LINE__ );
}
int i, n;
VoxelType *ptr, q0_val, q1_val;
// find quantiles
n = this->GetNumberOfVoxels();
ptr = this->GetPointerToVoxels();
vector<VoxelType> voxel_tmp(n);
for (i=0;i<n;i++)
voxel_tmp[i] = ptr[i];
sort(voxel_tmp.begin(),voxel_tmp.end());
q0_val = voxel_tmp[round((voxel_tmp.size()-1)*q0)];
q1_val = voxel_tmp[round((voxel_tmp.size()-1)*q1)];
for (i = 0; i < n; i++) {
if (ptr[i] < q0_val)
ptr[i] = q0_val;
if (ptr[i] > q1_val)
ptr[i] = q1_val;
}
}
template <class VoxelType> irtkGenericImage<VoxelType> irtkGenericImage<VoxelType>::GetRegion(int k, int m) const
{
int i, j;
double x1, y1, z1, t1, x2, y2, z2, t2;
if ((k < 0) || (k >= _attr._z) || (m < 0) || (m >= _attr._t)) {
stringstream msg;
msg << "irtkGenericImage<VoxelType>::GetRegion: Parameter out of range\n";
cerr << msg.str();
throw irtkException( msg.str(),
__FILE__,
__LINE__ );
}
// Initialize
irtkImageAttributes attr = this->_attr;
attr._z = 1;
attr._t = 1;
attr._xorigin = 0;
attr._yorigin = 0;
attr._zorigin = 0;
irtkGenericImage<VoxelType> image(attr);
// Calculate position of first voxel in roi in original image
x1 = 0;
y1 = 0;
z1 = k;
this->ImageToWorld(x1, y1, z1);
t1 = this->ImageToTime(m);
// Calculate position of first voxel in roi in new image
x2 = 0;
y2 = 0;
z2 = 0;
t2 = 0;
image.ImageToWorld(x2, y2, z2);
t2 = image.ImageToTime(0);
// Shift origin of new image accordingly
image.PutOrigin(x1 - x2, y1 - y2, z1 - z2, t1 - t2);
// Copy region
for (j = 0; j < _attr._y; j++) {
for (i = 0; i < _attr._x; i++) {
image._matrix[0][0][j][i] = _matrix[m][k][j][i];
}
}
return image;
}
template <class VoxelType> irtkGenericImage<VoxelType> irtkGenericImage<VoxelType>::GetFrame(int l) const
{
int i, j, k;
if ((l < 0) || (l >= _attr._t)) {
stringstream msg;
msg << "irtkGenericImage<VoxelType>::GetRegion: Parameter out of range\n";
cerr << msg.str();
throw irtkException( msg.str(),
__FILE__,
__LINE__ );
}
// Initialize
irtkImageAttributes attr = this->_attr;
attr._t = 1;
irtkGenericImage<VoxelType> image(attr);
// Copy region
for (k = 0; k < _attr._z; k++) {
for (j = 0; j < _attr._y; j++) {
for (i = 0; i < _attr._x; i++) {
image._matrix[0][k][j][i] = _matrix[l][k][j][i];
}
}
}
return image;
}
template <class VoxelType> irtkGenericImage<VoxelType> irtkGenericImage<VoxelType>::GetRegion(int i1, int j1, int k1, int i2, int j2, int k2) const
{
int i, j, k, l;
double x1, y1, z1, x2, y2, z2;
if ((i1 < 0) || (i1 >= i2) ||
(j1 < 0) || (j1 >= j2) ||
(k1 < 0) || (k1 >= k2) ||
(i2 > _attr._x) || (j2 > _attr._y) || (k2 > _attr._z)) {
stringstream msg;
msg << "irtkGenericImage<VoxelType>::GetRegion: Parameter out of range\n";
cerr << msg.str();
throw irtkException( msg.str(),
__FILE__,
__LINE__ );
}
// Initialize
irtkImageAttributes attr = this->_attr;
attr._x = i2 - i1;
attr._y = j2 - j1;
attr._z = k2 - k1;
attr._xorigin = 0;
attr._yorigin = 0;
attr._zorigin = 0;
irtkGenericImage<VoxelType> image(attr);
// Calculate position of first voxel in roi in original image
x1 = i1;
y1 = j1;
z1 = k1;
this->ImageToWorld(x1, y1, z1);
// Calculate position of first voxel in roi in new image
x2 = 0;
y2 = 0;
z2 = 0;
image.ImageToWorld(x2, y2, z2);
// Shift origin of new image accordingly
image.PutOrigin(x1 - x2, y1 - y2, z1 - z2);
// Copy region
for (l = 0; l < _attr._t; l++) {
for (k = k1; k < k2; k++) {
for (j = j1; j < j2; j++) {
for (i = i1; i < i2; i++) {
image._matrix[l][k-k1][j-j1][i-i1] = _matrix[l][k][j][i];
}
}
}
}
return image;
}
template <class VoxelType> irtkGenericImage<VoxelType> irtkGenericImage<VoxelType>::GetRegion(int i1, int j1, int k1, int l1, int i2, int j2, int k2, int l2) const
{
int i, j, k, l;
double x1, y1, z1, x2, y2, z2;
if ((i1 < 0) || (i1 >= i2) ||
(j1 < 0) || (j1 >= j2) ||
(k1 < 0) || (k1 >= k2) ||
(l1 < 0) || (l1 >= l2) ||
(i2 > _attr._x) || (j2 > _attr._y) || (k2 > _attr._z) || (l2 > _attr._t)) {
stringstream msg;
msg << "irtkGenericImage<VoxelType>::GetRegion: Parameter out of range\n";
cerr << msg.str();
throw irtkException( msg.str(),
__FILE__,
__LINE__ );
}
// Initialize
irtkImageAttributes attr = this->_attr;
attr._x = i2 - i1;
attr._y = j2 - j1;
attr._z = k2 - k1;
attr._t = l2 - l1;
attr._xorigin = 0;
attr._yorigin = 0;
attr._zorigin = 0;
irtkGenericImage<VoxelType> image(attr);
// Calculate position of first voxel in roi in original image
x1 = i1;
y1 = j1;
z1 = k1;
this->ImageToWorld(x1, y1, z1);
// Calculate position of first voxel in roi in new image
x2 = 0;
y2 = 0;
z2 = 0;
image.ImageToWorld(x2, y2, z2);
// Shift origin of new image accordingly
image.PutOrigin(x1 - x2, y1 - y2, z1 - z2);
// Copy region
for (l = l1; l < l2; l++) {
for (k = k1; k < k2; k++) {
for (j = j1; j < j2; j++) {
for (i = i1; i < i2; i++) {
image._matrix[l-l1][k-k1][j-j1][i-i1] = _matrix[l][k][j][i];
}
}
}
}
return image;
}
template <class VoxelType> irtkGenericImage<VoxelType>& irtkGenericImage<VoxelType>::operator=(const irtkGenericImage<VoxelType> &image)
{
int i, n;
VoxelType *ptr1, *ptr2;
if (this == &image) return *this;
this->Initialize(image._attr);
n = this->GetNumberOfVoxels();
ptr1 = this->GetPointerToVoxels();
ptr2 = image.GetPointerToVoxels();
for (i = 0; i < n; i++) {
ptr1[i] = ptr2[i];
}
return *this;
}
template <class VoxelType> template <class VoxelType2> irtkGenericImage<VoxelType>& irtkGenericImage<VoxelType>::operator=(const irtkGenericImage<VoxelType2> &image)
{
int i, n;
VoxelType *ptr1;
VoxelType2 *ptr2;
this->Initialize(image.GetImageAttributes());
n = this->GetNumberOfVoxels();
ptr1 = this->GetPointerToVoxels();
ptr2 = image.GetPointerToVoxels();
for (i = 0; i < n; i++) {
ptr1[i] = static_cast<VoxelType>(ptr2[i]);
}
return *this;
}
template <class VoxelType> irtkGenericImage<VoxelType>& irtkGenericImage<VoxelType>::operator+=(const irtkGenericImage<VoxelType> &image)
{
int i, n;
VoxelType *ptr1, *ptr2;
if (!(this->GetImageAttributes() == image.GetImageAttributes())) {
stringstream msg;
msg << "irtkGenericImage<VoxelType>::operator+=: Size mismatch in images\n";
cerr << msg.str();
this->GetImageAttributes().Print();
image.GetImageAttributes().Print();
throw irtkException( msg.str(),
__FILE__,
__LINE__ );
}
n = this->GetNumberOfVoxels();
ptr1 = this->GetPointerToVoxels();
ptr2 = image.GetPointerToVoxels();
for (i = 0; i < n; i++) {
ptr1[i] += ptr2[i];
}
return *this;
}
template <class VoxelType> irtkGenericImage<VoxelType> irtkGenericImage<VoxelType>::operator+(const irtkGenericImage<VoxelType> &image)
{
irtkGenericImage<VoxelType> tmp(*this); tmp += image; return tmp;
}
template <class VoxelType> irtkGenericImage<VoxelType>& irtkGenericImage<VoxelType>::operator-=(const irtkGenericImage<VoxelType> &image)
{
int i, n;
VoxelType *ptr1, *ptr2;
if (!(this->GetImageAttributes() == image.GetImageAttributes())) {
stringstream msg;
msg << "irtkGenericImage<VoxelType>::operator-=: Size mismatch in images\n";
cerr << msg.str();
throw irtkException( msg.str(),
__FILE__,
__LINE__ );
}
n = this->GetNumberOfVoxels();
ptr1 = this->GetPointerToVoxels();
ptr2 = image.GetPointerToVoxels();
for (i = 0; i < n; i++) {
ptr1[i] -= ptr2[i];
}
return *this;
}
template <class VoxelType> irtkGenericImage<VoxelType> irtkGenericImage<VoxelType>::operator-(const irtkGenericImage<VoxelType> &image)
{
irtkGenericImage<VoxelType> tmp(*this); tmp -= image; return tmp;
}
template <class VoxelType> irtkGenericImage<VoxelType>& irtkGenericImage<VoxelType>::operator*=(const irtkGenericImage<VoxelType> &image)
{
int i, n;
VoxelType *ptr1, *ptr2;
if (!(this->GetImageAttributes() == image.GetImageAttributes())) {
stringstream msg;
msg << "irtkGenericImage<VoxelType>::operator*=: Size mismatch in images\n";
cerr << msg.str();
throw irtkException( msg.str(),
__FILE__,
__LINE__ );
}
n = this->GetNumberOfVoxels();
ptr1 = this->GetPointerToVoxels();
ptr2 = image.GetPointerToVoxels();
for (i = 0; i < n; i++) {
ptr1[i] *= ptr2[i];
}
return *this;
}
template <class VoxelType> irtkGenericImage<VoxelType> irtkGenericImage<VoxelType>::operator*(const irtkGenericImage<VoxelType> &image)
{
irtkGenericImage<VoxelType> tmp(*this); tmp *= image; return tmp;
}
template <class VoxelType> irtkGenericImage<VoxelType>& irtkGenericImage<VoxelType>::operator/=(const irtkGenericImage<VoxelType> &image)
{
int i, n;
VoxelType *ptr1, *ptr2;
if (!(this->GetImageAttributes() == image.GetImageAttributes())) {
stringstream msg;
msg << "irtkGenericImage<VoxelType>::operator/=: Size mismatch in images\n";
cerr << msg.str();
throw irtkException( msg.str(),
__FILE__,
__LINE__ );
}
n = this->GetNumberOfVoxels();
ptr1 = this->GetPointerToVoxels();
ptr2 = image.GetPointerToVoxels();
for (i = 0; i < n; i++) {
if (ptr2[i] != VoxelType()) {
ptr1[i] /= ptr2[i];
} else {
ptr1[i] = VoxelType();
}
}
return *this;
}
template <class VoxelType> irtkGenericImage<VoxelType> irtkGenericImage<VoxelType>::operator/(const irtkGenericImage<VoxelType> &image)
{
irtkGenericImage<VoxelType> tmp(*this); tmp /= image; return tmp;
}
template <class VoxelType> bool irtkGenericImage<VoxelType>::operator==(const irtkGenericImage<VoxelType> &image)
{
int i, n;
VoxelType *ptr1, *ptr2;
if (!(this->GetImageAttributes() == image.GetImageAttributes())) {
return false;
}
n = this->GetNumberOfVoxels();
ptr1 = this->GetPointerToVoxels();
ptr2 = image.GetPointerToVoxels();
for (i = 0; i < n; i++) {
if (ptr1[i] != ptr2[i]) return false;
}
return true;
}
template <class VoxelType> irtkGenericImage<VoxelType>& irtkGenericImage<VoxelType>::operator=(VoxelType pixel)
{
int i, n;
VoxelType *ptr;
n = this->GetNumberOfVoxels();
ptr = this->GetPointerToVoxels();
for (i = 0; i < n; i++) {
ptr[i] = pixel;
}
return *this;
}
template <class VoxelType> irtkGenericImage<VoxelType>& irtkGenericImage<VoxelType>::operator+=(VoxelType pixel)
{
int i, n;
VoxelType *ptr;
n = this->GetNumberOfVoxels();
ptr = this->GetPointerToVoxels();
for (i = 0; i < n; i++) {
ptr[i] += pixel;
}
return *this;
}
template <class VoxelType> irtkGenericImage<VoxelType> irtkGenericImage<VoxelType>::operator+(VoxelType pixel)
{
irtkGenericImage<VoxelType> tmp(*this); tmp += pixel; return tmp;
}
template <class VoxelType> irtkGenericImage<VoxelType>& irtkGenericImage<VoxelType>::operator-=(VoxelType pixel)
{
int i, n;
VoxelType *ptr;
n = this->GetNumberOfVoxels();
ptr = this->GetPointerToVoxels();
for (i = 0; i < n; i++) {
ptr[i] -= pixel;
}
return *this;
}
template <class VoxelType> irtkGenericImage<VoxelType> irtkGenericImage<VoxelType>::operator-(VoxelType pixel)
{
irtkGenericImage<VoxelType> tmp(*this); tmp -= pixel; return tmp;
}
template <class VoxelType> irtkGenericImage<VoxelType>& irtkGenericImage<VoxelType>::operator*=(VoxelType pixel)
{
int i, n;
VoxelType *ptr;
n = this->GetNumberOfVoxels();
ptr = this->GetPointerToVoxels();
for (i = 0; i < n; i++) {
ptr[i] *= pixel;
}
return *this;
}
template <class VoxelType> irtkGenericImage<VoxelType> irtkGenericImage<VoxelType>::operator*(VoxelType pixel)
{
irtkGenericImage<VoxelType> tmp(*this); tmp *= pixel; return tmp;
}
template <class VoxelType> irtkGenericImage<VoxelType>& irtkGenericImage<VoxelType>::operator/=(VoxelType pixel)
{
int i, n;
VoxelType *ptr;
if (pixel != VoxelType()) {
n = this->GetNumberOfVoxels();
ptr = this->GetPointerToVoxels();
for (i = 0; i < n; i++) {
ptr[i] /= pixel;
}
} else {
cerr << "irtkGenericImage<VoxelType>::operator/=: Division by zero" << endl;
}
return *this;
}
template <class VoxelType> irtkGenericImage<VoxelType> irtkGenericImage<VoxelType>::operator/(VoxelType pixel)
{
irtkGenericImage<VoxelType> tmp(*this); tmp /= pixel; return tmp;
}
template <class VoxelType> irtkGenericImage<VoxelType> irtkGenericImage<VoxelType>::operator>(VoxelType pixel)
{
int i, n;
VoxelType *ptr1, *ptr2;
irtkGenericImage<VoxelType> image(*this);
n = this->GetNumberOfVoxels();
ptr1 = this->GetPointerToVoxels();
ptr2 = image.GetPointerToVoxels();
for (i = 0; i < n; i++) {
if (ptr1[i] > pixel) {
ptr2[i] = pixel;
} else {
ptr2[i] = ptr1[i];
}
}
return image;
}
template <class VoxelType> irtkGenericImage<VoxelType> &irtkGenericImage<VoxelType>::operator>=(VoxelType pixel)
{
int i, n;
VoxelType *ptr;
n = this->GetNumberOfVoxels();
ptr = this->GetPointerToVoxels();
for (i = 0; i < n; i++) {
if (ptr[i] > pixel) {
ptr[i] = pixel;
}
}
return *this;
}
template <class VoxelType> irtkGenericImage<VoxelType> irtkGenericImage<VoxelType>::operator!=(VoxelType pixel)
{
int i, n;
VoxelType *ptr1, *ptr2;
irtkGenericImage<VoxelType> image(*this);
n = this->GetNumberOfVoxels();
ptr1 = this->GetPointerToVoxels();
ptr2 = image.GetPointerToVoxels();
for (i = 0; i < n; i++) {
if (ptr1[i] != pixel) {
ptr2[i] = 1;
}
else {
ptr2[i] = 0;
}
}
return image;
}
template <class VoxelType> irtkGenericImage<VoxelType> irtkGenericImage<VoxelType>::operator<(VoxelType pixel)
{
int i, n;
VoxelType *ptr1, *ptr2;
irtkGenericImage<VoxelType> image(*this);
n = this->GetNumberOfVoxels();
ptr1 = this->GetPointerToVoxels();
ptr2 = image.GetPointerToVoxels();
for (i = 0; i < n; i++) {
if (ptr1[i] < pixel) {
ptr2[i] = pixel;
} else {
ptr2[i] = ptr1[i];
}
}
return image;
}
template <class VoxelType> irtkGenericImage<VoxelType>& irtkGenericImage<VoxelType>::operator<=(VoxelType pixel)
{
int i, n;
VoxelType *ptr;
n = this->GetNumberOfVoxels();
ptr = this->GetPointerToVoxels();
for (i = 0; i < n; i++) {
if (ptr[i] < pixel) {
ptr[i] = pixel;
}
}
return *this;
}
template <class VoxelType> void irtkGenericImage<VoxelType>::ReflectX()
{
int x, y, z, t;
for (t = 0; t < _attr._t; t++) {
for (z = 0; z < _attr._z; z++) {
for (y = 0; y < _attr._y; y++) {
for (x = 0; x < _attr._x / 2; x++) {
swap(_matrix[t][z][y][x], _matrix[t][z][y][_attr._x-(x+1)]);
}
}
}
}
}
template <class VoxelType> void irtkGenericImage<VoxelType>::ReflectY()
{
int x, y, z, t;
for (t = 0; t < _attr._t; t++) {
for (z = 0; z < _attr._z; z++) {
for (y = 0; y < _attr._y / 2; y++) {
for (x = 0; x < _attr._x; x++) {
swap(_matrix[t][z][y][x], _matrix[t][z][_attr._y-(y+1)][x]);
}
}
}
}
}
template <class VoxelType> void irtkGenericImage<VoxelType>::ReflectZ()
{
int x, y, z, t;
for (t = 0; t < _attr._t; t++) {
for (z = 0; z < _attr._z / 2; z++) {
for (y = 0; y < _attr._y; y++) {
for (x = 0; x < _attr._x; x++) {
swap(_matrix[t][z][y][x], _matrix[t][_attr._z-(z+1)][y][x]);
}
}
}
}
}
template <class VoxelType> void irtkGenericImage<VoxelType>::FlipXY(int modifyOrigin)
{
int i, j, k, m;
VoxelType ****matrix = NULL;
// Allocate memory
matrix = Allocate(matrix, _attr._y, _attr._x, _attr._z, _attr._t);
for (m = 0; m < _attr._t; m++) {
for (k = 0; k < _attr._z; k++) {
for (j = 0; j < _attr._y; j++) {
for (i = 0; i < _attr._x; i++) {
matrix[m][k][i][j] = _matrix[m][k][j][i];
}
}
}
}
// Swap pointers
swap(matrix, _matrix);
// Deallocate memory
matrix = Deallocate<VoxelType>(matrix);
// Swap image dimensions
swap(_attr._x, _attr._y);
// Swap voxel dimensions
swap(_attr._dx, _attr._dy);
if (modifyOrigin > 0){
// Swap origin coordinates
swap(_attr._xorigin, _attr._yorigin);
}
// Update transformation matrix
this->UpdateMatrix();
}
template <class VoxelType> void irtkGenericImage<VoxelType>::FlipXZ(int modifyOrigin)
{
int i, j, k, l;
VoxelType ****matrix = NULL;
// Allocate memory
matrix = Allocate(matrix, _attr._z, _attr._y, _attr._x, _attr._t);
for (l = 0; l < _attr._t; l++) {
for (k = 0; k < _attr._z; k++) {
for (j = 0; j < _attr._y; j++) {
for (i = 0; i < _attr._x; i++) {
matrix[l][i][j][k] = _matrix[l][k][j][i];
}
}
}
}
// Swap pointers
swap(matrix, _matrix);
// Deallocate memory
matrix = Deallocate<VoxelType>(matrix);
// Swap image dimensions
swap(_attr._x, _attr._z);
// Swap voxel dimensions
swap(_attr._dx, _attr._dz);
if (modifyOrigin > 0){
// Swap origin coordinates
swap(_attr._xorigin, _attr._zorigin);
}
// Update transformation matrix
this->UpdateMatrix();
}
template <class VoxelType> void irtkGenericImage<VoxelType>::FlipYZ(int modifyOrigin)
{
int i, j, k, l;
VoxelType ****matrix = NULL;
// Allocate memory
matrix = Allocate(matrix, _attr._x, _attr._z, _attr._y, _attr._t);
for (l = 0; l < _attr._t; l++) {
for (k = 0; k < _attr._z; k++) {
for (j = 0; j < _attr._y; j++) {
for (i = 0; i < _attr._x; i++) {
matrix[l][j][k][i] = _matrix[l][k][j][i];
}
}
}
}
// Swap pointers
swap(matrix, _matrix);
// Deallocate memory
matrix = Deallocate<VoxelType>(matrix);
// Swap image dimensions
swap(_attr._y, _attr._z);
// Swap voxel dimensions
swap(_attr._dy, _attr._dz);
if (modifyOrigin > 0){
// Swap origin coordinates
swap(_attr._yorigin, _attr._zorigin);
}
// Update transformation matrix
this->UpdateMatrix();
}
template <class VoxelType> void irtkGenericImage<VoxelType>::FlipXT(int modifyOrigin)
{
int i, j, k, m;
VoxelType ****matrix = NULL;
// Allocate memory
matrix = Allocate(matrix, _attr._t, _attr._y, _attr._z, _attr._x);
for (m = 0; m < _attr._t; m++) {
for (k = 0; k < _attr._z; k++) {
for (j = 0; j < _attr._y; j++) {
for (i = 0; i < _attr._x; i++) {
matrix[i][k][j][m] = _matrix[m][k][j][i];
}
}
}
}
// Swap pointers
swap(matrix, _matrix);
// Deallocate memory
matrix = Deallocate<VoxelType>(matrix);
// Swap image dimensions
swap(_attr._x, _attr._t);
// Swap voxel dimensions
swap(_attr._dx, _attr._dt);
if (modifyOrigin > 0){
// Swap origin coordinates
swap(_attr._xorigin, _attr._torigin);
}
// Update transformation matrix
this->UpdateMatrix();
}
template <class VoxelType> void irtkGenericImage<VoxelType>::FlipYT(int modifyOrigin)
{
int i, j, k, m;
VoxelType ****matrix = NULL;
// Allocate memory
matrix = Allocate(matrix, _attr._x, _attr._t, _attr._z, _attr._y);
for (m = 0; m < _attr._t; m++) {
for (k = 0; k < _attr._z; k++) {
for (j = 0; j < _attr._y; j++) {
for (i = 0; i < _attr._x; i++) {
matrix[j][k][m][i] = _matrix[m][k][j][i];
}
}
}
}
// Swap pointers
swap(matrix, _matrix);
// Deallocate memory
matrix = Deallocate<VoxelType>(matrix);
// Swap image dimensions
swap(_attr._y, _attr._t);
// Swap voxel dimensions
swap(_attr._dy, _attr._dt);
if (modifyOrigin > 0){
// Swap origin coordinates
swap(_attr._yorigin, _attr._torigin);
}
// Update transformation matrix
this->UpdateMatrix();
}
template <class VoxelType> void irtkGenericImage<VoxelType>::FlipZT(int modifyOrigin)
{
int i, j, k, m;
VoxelType ****matrix = NULL;
// Allocate memory
matrix = Allocate(matrix, _attr._x, _attr._y, _attr._t, _attr._z);
for (m = 0; m < _attr._t; m++) {
for (k = 0; k < _attr._z; k++) {
for (j = 0; j < _attr._y; j++) {
for (i = 0; i < _attr._x; i++) {
matrix[k][m][j][i] = _matrix[m][k][j][i];
}
}
}
}
// Swap pointers
swap(matrix, _matrix);
// Deallocate memory
matrix = Deallocate<VoxelType>(matrix);
// Swap image dimensions
swap(_attr._z, _attr._t);
// Swap voxel dimensions
swap(_attr._dz, _attr._dt);
if (modifyOrigin > 0){
// Swap origin coordinates
swap(_attr._zorigin, _attr._torigin);
}
// Update transformation matrix
this->UpdateMatrix();
}
#ifdef HAS_VTK
template <> int irtkGenericImage<char>::ImageToVTKScalarType()
{
return VTK_CHAR;
}
template <> int irtkGenericImage<unsigned char>::ImageToVTKScalarType()
{
return VTK_UNSIGNED_CHAR;
}
template <> int irtkGenericImage<short>::ImageToVTKScalarType()
{
return VTK_SHORT;
}
template <> int irtkGenericImage<unsigned short>::ImageToVTKScalarType()
{
return VTK_UNSIGNED_SHORT;
}
template <> int irtkGenericImage<int>::ImageToVTKScalarType()
{
return VTK_INT;
}
template <> int irtkGenericImage<unsigned int>::ImageToVTKScalarType()
{
return VTK_UNSIGNED_INT;
}
template <> int irtkGenericImage<float>::ImageToVTKScalarType()
{
return VTK_FLOAT;
}
template <> int irtkGenericImage<double>::ImageToVTKScalarType()
{
return VTK_DOUBLE;
}
template <class Type> void irtkGenericImage<Type>::ImageToVTK(vtkStructuredPoints *vtk)
{
int i, n;
double x, y, z;
Type *ptr1, *ptr2;
// Calculate the VTK origin of an IRTK image
x = 0;
y = 0;
z = 0;
this->ImageToWorld(x, y, z);
// Allocate the VTK image
vtk->SetOrigin(x, y, z);
vtk->SetDimensions(_attr._x, _attr._y, _attr._z);
vtk->SetSpacing(_attr._dx, _attr._dy, _attr._dz);
vtk->SetScalarType(this->ImageToVTKScalarType());
vtk->AllocateScalars();
// Initialize the VTK image
n = this->GetNumberOfVoxels();
ptr1 = this->GetPointerToVoxels();
ptr2 = (Type *)vtk->GetScalarPointer();
for (i = 0; i < this->GetNumberOfVoxels(); i++) {
*ptr2 = *ptr1;
ptr1++;
ptr2++;
}
}
template <class Type> void irtkGenericImage<Type>::VTKToImage(vtkStructuredPoints *)
{}
#endif
#include <irtkTemplate.h>
| 24.722701 | 165 | 0.604144 | [
"vector"
] |
f6701a1663c7f5774c6978d603b48799113daa55 | 7,900 | inl | C++ | include/mobreg/include/Misha/FourierS2.inl | josefgraus/self_similiarity | c032daa3009f60fdc8a52c437a07c6e3ba2efe4b | [
"MIT"
] | 1 | 2021-02-25T09:35:14.000Z | 2021-02-25T09:35:14.000Z | include/mobreg/include/Misha/FourierS2.inl | josefgraus/self_similiarity | c032daa3009f60fdc8a52c437a07c6e3ba2efe4b | [
"MIT"
] | null | null | null | include/mobreg/include/Misha/FourierS2.inl | josefgraus/self_similiarity | c032daa3009f60fdc8a52c437a07c6e3ba2efe4b | [
"MIT"
] | 2 | 2020-09-22T13:02:45.000Z | 2020-10-08T00:21:36.000Z | /*
Copyright (c) 2018, Michael Kazhdan, Alex Baden, and Keenan Crane
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer. Redistributions in binary form must reproduce
the above copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the distribution.
Neither the name of the Johns Hopkins University nor the names of its contributors
may be used to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <FST_semi_memo_fftw.h>
#include <cospmls.h>
#include "fftw3.h"
#include <math.h>
//////////////////
// FourierKeyS2 //
//////////////////
template<class Real> FourierKeyS2<Real>::FourierKeyS2(void){
bw = 0;
values = NullPointer< Complex< Real > >();
}
template<class Real> FourierKeyS2<Real>::~FourierKeyS2(void){
if( values ) FreePointer( values );
values = NullPointer< Complex< Real > >();
bw=0;
}
template<class Real> int FourierKeyS2<Real>::read(const char* fileName)
{
FILE* fp=fopen(fileName,"rb");
if(!fp){return 0;}
int r=read(fp);
fclose(fp);
return r;
}
template<class Real> int FourierKeyS2<Real>::write(const char* fileName) const
{
FILE* fp=fopen(fileName,"wb");
if(!fp){return 0;}
int w=write(fp);
fclose(fp);
return w;
}
template<class Real> int FourierKeyS2<Real>::read(FILE* fp){
int b,r;
r=int(fread(&b,sizeof(int),1,fp));
if(!r){return 0;}
resize(b);
r=int(fread(values,sizeof(Complex<Real>),((bw*bw+bw)>>1),fp));
if(r==((bw*bw+bw)>>1)){return 1;}
else{return 0;}
}
template<class Real> int FourierKeyS2<Real>::write(FILE* fp) const {
int w;
w=int(fwrite(&bw,sizeof(int),1,fp));
if(!w){return 0;}
w=int(fwrite(values,sizeof(Complex<Real>),((bw*bw+bw)>>1),fp));
if(w==((bw*bw+bw)>>1)){return 1;}
else{return 0;}
}
template<class Real> int FourierKeyS2<Real>::bandWidth( void ) const{return bw;}
template<class Real> int FourierKeyS2<Real>::resolution( void ) const {return bw*2;}
template<class Real> int FourierKeyS2<Real>::resize( int resolution , bool clr )
{
int b=resolution>>1;
if( b<0 ) return 0;
else if( b!=bw )
{
if( values ) FreePointer( values );
values = NullPointer< Complex< Real > >();
bw=0;
if(b)
{
values = AllocPointer< Complex< Real > >( (b*b+b)>>1 );
if( !values ) return 0;
else bw=b;
}
}
if(clr) clear();
return 1;
}
template<class Real> void FourierKeyS2<Real>::clear(void){if(bw){memset(values,0,sizeof(Complex<Real>)*((bw*bw+bw)>>1));}}
template<class Real> Complex<Real>& FourierKeyS2<Real>::operator() ( int i , int j ) { return values[(i-j)+(j*bw)-(j*j-j)/2]; }
template<class Real> Complex<Real> FourierKeyS2<Real>::operator() ( int i , int j ) const { return values[(i-j)+(j*bw)-(j*j-j)/2]; }
template<class Real> Real FourierKeyS2<Real>::squareNorm( void ) const { return Dot(*this,*this); }
template<class Real> Real FourierKeyS2<Real>::SquareDifference( const FourierKeyS2& g1,const FourierKeyS2& g2){ return g1.squareNorm() + g2.squareNorm() - 2*Dot(g1,g2); }
template<class Real> Real FourierKeyS2<Real>::Dot( const FourierKeyS2& g1,const FourierKeyS2& g2 )
{
Real d = Real(0);
int idx=0;
if(g1.bw != g2.bw) fprintf( stderr , "Could not compare arrays of different sizes: %d != %d\n" , g1.bw , g2.bw ) , exit(0);
for( int i=0 ; i<g1.bw ; i++ ) d+=g1.values[i].r * g2.values[i].r;
for( int i=g1.bw ; i<((g1.bw*g1.bw+g1.bw)>>1) ; i++ ) d += ( g1.values[i]*g2.values[i].conjugate() ).r * 2;
return d;
}
template< class Real > int FourierKeyS2< Real >::Entries( int bw ) { return (bw*bw+bw)>>1; }
/////////////////////////////////////
// HarmonicTransform::ScratchSpace //
/////////////////////////////////////
template<class Real>
HarmonicTransform<Real>::ScratchSpace::ScratchSpace(void){
bw=0;
workSpace = resultSpace = transposeResultSpace = NullPointer< Real >();
table=transposeTable=NULL;
}
template<class Real>
HarmonicTransform<Real>::ScratchSpace::~ScratchSpace(void){resize(0);}
template<class Real>
void HarmonicTransform<Real>::ScratchSpace::resize(const int& b){
if(b!=bw)
{
int size=b*2;
if(workSpace) {delete[] workSpace;}
if(resultSpace) {delete[] resultSpace;}
if(transposeResultSpace) {delete[] transposeResultSpace;}
if(table) {delete[] table;}
if(transposeTable) {delete[] transposeTable;}
bw=0;
workSpace = NullPointer< Real >();
resultSpace = NullPointer< Real >() ;
transposeResultSpace = NullPointer< Real >();
table = NullPointer< Real* >();
transposeTable = NullPointer< Real* >();
if( b>0 )
{
bw=b;
workSpace = AllocPointer< Real >( 4*bw*bw+36*bw );
resultSpace = AllocPointer< Real >( Spharmonic_TableSize(bw) );
transposeResultSpace = AllocPointer< Real >( Spharmonic_TableSize(bw) );
table = Spharmonic_Pml_Table(bw,resultSpace,workSpace);
transposeTable =Transpose_Spharmonic_Pml_Table(table,bw,transposeResultSpace,workSpace);
}
}
}
///////////////////////
// HarmonicTransform //
///////////////////////
template<class Real>
void HarmonicTransform<Real>::resize(const int& resolution){scratch.resize(resolution>>1);}
template<>
int HarmonicTransform<double>::ForwardFourier(SphericalGrid<double>& g,FourierKeyS2<double>& key){
int sz,bw;
sz=g.resolution();
bw=sz>>1;
if(key.resolution()!=sz){key.resize(sz);}
scratch.resize(bw);
FST_semi_memo_fftw( g[0] , (fftw_complex*)&key(0,0) , sz , scratch.table , GetAddress( scratch.workSpace ) );
return 1;
}
template<>
int HarmonicTransform<float>::ForwardFourier(SphericalGrid<float>& g,FourierKeyS2<float>& key){
int sz,bw;
sz=g.resolution();
bw=sz>>1;
if(key.resolution()!=sz){key.resize(sz);}
scratch.resize(bw);
FST_semi_memo_fftw(g[0],(fftwf_complex*)&key(0,0),sz,scratch.table,scratch.workSpace);
return 1;
}
template<class Real>
int HarmonicTransform<Real>::ForwardFourier(SphericalGrid<Real>&,FourierKeyS2<Real>&){
fprintf(stderr,"Harmonic Transform only supported for floats and doubles\n");
return 0;
}
template<>
int HarmonicTransform<double>::InverseFourier(FourierKeyS2<double>& key,SphericalGrid<double>& g){
if(key.resolution()!=g.resolution()){g.resize(key.resolution());}
int bw=key.bandWidth(),sz=g.resolution();
scratch.resize(bw);
InvFST_semi_memo_fftw((fftw_complex*)&key(0,0),g[0],sz,scratch.transposeTable,scratch.workSpace);
return 1;
}
template<>
int HarmonicTransform<float>::InverseFourier(FourierKeyS2<float>& key,SphericalGrid<float>& g){
if(key.resolution()!=g.resolution()){g.resize(key.resolution());}
int bw=key.bandWidth(),sz=g.resolution();
scratch.resize(bw);
InvFST_semi_memo_fftw((fftwf_complex*)&key(0,0),g[0],sz,scratch.transposeTable,scratch.workSpace);
return 1;
}
template<class Real>
int HarmonicTransform<Real>::InverseFourier(FourierKeyS2<Real>&,SphericalGrid<Real>&){
fprintf(stderr,"Harmonic Transform only supported for floats and doubles\n");
return 0;
} | 37.619048 | 170 | 0.704304 | [
"transform"
] |
f6755d028a7516d54165d97877b30cd9114fd9ce | 1,618 | cpp | C++ | codeforces/H - Missing Number/Accepted.cpp | kzvd4729/Problem-Solving | 13b105e725a4c2f8db7fecc5d7a8f932b9fef4ab | [
"MIT"
] | 1 | 2022-02-11T16:55:36.000Z | 2022-02-11T16:55:36.000Z | codeforces/H - Missing Number/Accepted.cpp | kzvd4729/Problem-Solving | 13b105e725a4c2f8db7fecc5d7a8f932b9fef4ab | [
"MIT"
] | null | null | null | codeforces/H - Missing Number/Accepted.cpp | kzvd4729/Problem-Solving | 13b105e725a4c2f8db7fecc5d7a8f932b9fef4ab | [
"MIT"
] | null | null | null | /****************************************************************************************
* @author: kzvd4729 created: Dec/11/2019 20:21
* solution_verdict: Accepted language: GNU C++14
* run_time: 46 ms memory_used: 0 KB
* problem: https://codeforces.com/gym/102215/problem/H
****************************************************************************************/
#include<bits/stdc++.h>
#define long long long
using namespace std;
const int N=1e3,inf=1e9;
int cnt[2],aa[N+2];
int cal(int n,int ans,int d)
{
//if(d==0)cout<<"ans= "<<ans<<endl;
int one=0;
for(int i=0;i<=n;i++)
{
int f=0;
for(int j=0;j<=d;j++)
{
if((i&(1<<j))!=(ans&(1<<j)))f=1;
}
if(!f)one+=(bool)(i&(1<<(d+1)));
}
return one;
}
int main()
{
ios_base::sync_with_stdio(0);cin.tie(0);
int n;cin>>n;
int m=31-__builtin_clz(n);
//cout<<m<<endl;
vector<int>u,v;int lt=0,rt=n;
for(int i=1;i<=n;i++)u.push_back(i);
int ans=0,d=-1;int ask=0;
for(int j=0;j<=m;j++)
{
//if(u.size()==0)assert(0);
cnt[0]=0,cnt[1]=0;
for(auto x:u)
{
ask++;if(ask>2*n+19)assert(0);
cout<<"? "<<x<<" "<<j<<endl;
int c;cin>>c;cnt[c]++;aa[x]=c;
}
int on=cal(n,ans,d);//cout<<"*"<<on<<endl;
d++;
int tr=0;if(on>cnt[1])tr=1;
ans|=(tr<<j);
v.clear();
for(auto x:u)
if(aa[x]==tr)v.push_back(x);
u=v;
}
cout<<"! "<<ans<<endl;
return 0;
} | 28.385965 | 111 | 0.401731 | [
"vector"
] |
f676d3b3f0f67d600bf5793a71b6bca69bb5a962 | 11,679 | cpp | C++ | test/dcu/level3/mm_bsr_s_dcu_test.cpp | xupinjie/AlphaSparse | 06bf06a57f9112c2f940741841485243d8073c7c | [
"MIT"
] | 18 | 2022-02-22T15:10:18.000Z | 2022-03-29T07:54:57.000Z | test/dcu/level3/mm_bsr_s_dcu_test.cpp | xupinjie/AlphaSparse | 06bf06a57f9112c2f940741841485243d8073c7c | [
"MIT"
] | null | null | null | test/dcu/level3/mm_bsr_s_dcu_test.cpp | xupinjie/AlphaSparse | 06bf06a57f9112c2f940741841485243d8073c7c | [
"MIT"
] | 2 | 2022-02-23T09:25:57.000Z | 2022-02-25T08:01:03.000Z | /**
* @brief ict dcu mm bsr test
* @author HPCRC, ICT
*/
#include <hip/hip_runtime_api.h>
#include <rocsparse.h>
#include <stdio.h>
#include <stdlib.h>
#include <iomanip>
#include <iostream>
#include <vector>
using namespace std;
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
#include <alpha_spblas_dcu.h>
const char *file;
bool check;
alphasparse_operation_t transA, transB;
rocsparse_operation roctransA, roctransB;
struct alpha_matrix_descr descr;
alphasparse_layout_t layout;
rocsparse_direction roclayout;
ALPHA_INT columns;
// bsr format
ALPHA_INT A_rowsb, A_colsb, nnzb, bs = 2;
ALPHA_INT *bsr_row_ptr, *bsr_row_ptr_end, *bsr_col_index;
float *bsr_values;
// parms for kernel
float *matB, *matC_ict, *matC_roc;
ALPHA_INT C_rows, C_cols, C_k;
ALPHA_INT ldb, ldc;
const float alpha = 2.f;
const float beta = 3.f;
const ALPHA_INT warm_up = 5;
const ALPHA_INT trials = 10;
const int batch_size = 1;
static void roc_mm_dcu()
{
// rocSPARSE handle
rocsparse_handle handle;
rocsparse_create_handle(&handle);
hipDeviceProp_t devProp;
int device_id = 0;
hipGetDevice(&device_id);
hipGetDeviceProperties(&devProp, device_id);
// std::cout << "Device: " << devProp.name << std::endl;
rocsparse_int m = C_rows / bs;
rocsparse_int n = C_cols;
rocsparse_int k = C_k / bs;
rocsparse_int nnz = nnzb * bs * bs;
rocsparse_int nnb = nnzb;
// Generate problem
std::vector<rocsparse_int> hAptr(m + 1);
std::vector<rocsparse_int> hAcol(nnb);
std::vector<float> hAval(nnz);
for (int i = 0; i < m; i++)
hAptr[i] = bsr_row_ptr[i];
hAptr[m] = bsr_row_ptr_end[m - 1];
for (int i = 0; i < nnb; i++) {
hAcol[i] = bsr_col_index[i];
}
for (int i = 0; i < nnz; i++) {
hAval[i] = bsr_values[i];
}
// Offload data to device
rocsparse_int *dAptr = NULL;
rocsparse_int *dAcol = NULL;
float *dAval = NULL;
float *dmatB = NULL;
float *dmatC = NULL;
hipMalloc((void **)&dAptr, sizeof(rocsparse_int) * (m + 1));
hipMalloc((void **)&dAcol, sizeof(rocsparse_int) * nnb);
hipMalloc((void **)&dAval, sizeof(float) * nnz);
hipMalloc((void **)&dmatB, sizeof(float) * C_k * ldb);
hipMalloc((void **)&dmatC, sizeof(float) * C_cols * ldc);
hipMemcpy(dAptr, hAptr.data(), sizeof(rocsparse_int) * (m + 1), hipMemcpyHostToDevice);
hipMemcpy(dAcol, hAcol.data(), sizeof(rocsparse_int) * nnb, hipMemcpyHostToDevice);
hipMemcpy(dAval, hAval.data(), sizeof(float) * nnz, hipMemcpyHostToDevice);
hipMemcpy(dmatB, matB, sizeof(float) * C_k * ldb, hipMemcpyHostToDevice);
hipMemcpy(dmatC, matC_roc, sizeof(float) * C_cols * ldc, hipMemcpyHostToDevice);
float halpha = alpha;
float hbeta = beta;
// Matrix descriptor
rocsparse_mat_descr descrA;
rocsparse_create_mat_descr(&descrA);
// Warm up
for (int i = 0; i < warm_up; ++i) {
// Call rocsparse bsrmm
rocsparse_sbsrmm(handle, roclayout, roctransA, roctransB, m, n, k, nnb, &halpha, descrA, dAval, dAptr, dAcol, bs, dmatB, ldb, &hbeta, dmatC, ldc);
}
// Device synchronization
hipDeviceSynchronize();
// Start time measurement
double time = get_time_us();
// cout << "m:" << m << " n:" << n << " k:" << k << " nnz:" << nnz << endl;
// cout << "ldb:" << ldb << " ldc" << ldc << endl;
// CSR matrix vector multiplication
for (int i = 0; i < trials; ++i) {
for (int i = 0; i < batch_size; ++i) {
// Call rocsparse bsrmm
rocsparse_status x = rocsparse_sbsrmm(
handle, roclayout, roctransA, roctransB, m, n, k, nnb, &halpha, descrA, dAval, dAptr, dAcol, bs, dmatB, ldb, &hbeta, dmatC, ldc);
if (x) {
cout << "status num: \n"
<< x << endl;
exit(-1);
}
}
// Device synchronization
hipDeviceSynchronize();
}
time = (get_time_us() - time) / (trials * batch_size * 1e3);
std::cout << time << std::endl;
hipMemcpy(matC_roc, dmatC, sizeof(float) * C_cols * ldc, hipMemcpyDeviceToHost);
// Clear up on device
hipFree(dAptr);
hipFree(dAcol);
hipFree(dAval);
hipFree(dmatB);
hipFree(dmatC);
rocsparse_destroy_mat_descr(descrA);
rocsparse_destroy_handle(handle);
}
static void alpha_mm_dcu()
{
// rocSPARSE handle
alphasparse_dcu_handle_t handle;
init_handle(&handle);
alphasparse_dcu_get_handle(&handle);
hipDeviceProp_t devProp;
int device_id = 0;
hipGetDevice(&device_id);
hipGetDeviceProperties(&devProp, device_id);
// std::cout << "Device: " << devProp.name << std::endl;
// Generate problem
ALPHA_INT m = C_rows / bs;
ALPHA_INT n = C_cols;
ALPHA_INT k = C_k / bs;
ALPHA_INT nnz = nnzb * bs * bs;
ALPHA_INT nnb = nnzb;
ALPHA_INT *hAptr = (ALPHA_INT *)alpha_malloc(sizeof(ALPHA_INT) * (m + 1));
ALPHA_INT *hAcol = (ALPHA_INT *)alpha_malloc(sizeof(ALPHA_INT) * nnb);
float *hAval = (float *)alpha_malloc(sizeof(float) * nnz);
for (int i = 0; i < m; i++)
hAptr[i] = bsr_row_ptr[i];
hAptr[m] = bsr_row_ptr_end[m - 1];
for (int i = 0; i < nnz; i++) {
hAval[i] = bsr_values[i];
}
for (int i = 0; i < nnb; i++) {
hAcol[i] = bsr_col_index[i];
}
// Offload data to device
ALPHA_INT *dAptr = NULL;
ALPHA_INT *dAcol = NULL;
float *dAval = NULL;
float *dmatB = NULL;
float *dmatC = NULL;
PRINT_IF_HIP_ERROR(hipMalloc((void **)&dAptr, sizeof(ALPHA_INT) * (m + 1)));
PRINT_IF_HIP_ERROR(hipMalloc((void **)&dAcol, sizeof(ALPHA_INT) * nnb));
PRINT_IF_HIP_ERROR(hipMalloc((void **)&dAval, sizeof(float) * nnz));
PRINT_IF_HIP_ERROR(hipMalloc((void **)&dmatB, sizeof(float) * C_k * ldb));
PRINT_IF_HIP_ERROR(hipMalloc((void **)&dmatC, sizeof(float) * C_cols * ldc));
PRINT_IF_HIP_ERROR(hipMemcpy(dAptr, hAptr, sizeof(ALPHA_INT) * (m + 1), hipMemcpyHostToDevice));
PRINT_IF_HIP_ERROR(
hipMemcpy(dAcol, hAcol, sizeof(ALPHA_INT) * nnb, hipMemcpyHostToDevice));
PRINT_IF_HIP_ERROR(
hipMemcpy(dAval, hAval, sizeof(float) * nnz, hipMemcpyHostToDevice));
PRINT_IF_HIP_ERROR(
hipMemcpy(dmatB, matB, sizeof(float) * C_k * ldb, hipMemcpyHostToDevice));
PRINT_IF_HIP_ERROR(hipMemcpy(dmatC, matC_ict, sizeof(float) * C_cols * ldc, hipMemcpyHostToDevice));
float halpha = alpha;
float hbeta = beta;
// Matrix descriptor
alpha_dcu_matrix_descr_t descrA;
alphasparse_dcu_create_mat_descr(&descrA);
// Warm up
for (int i = 0; i < warm_up; ++i) {
// Call alphasparse_dcu bsrmm
alphasparse_dcu_s_bsrmm(handle, layout, transA, transB, m, n, k, nnb, &halpha, descrA, dAval, dAptr, dAcol, bs, dmatB, ldb, &hbeta, dmatC, ldc);
}
// Device synchronization
hipDeviceSynchronize();
// Start time measurement
double time = get_time_us();
// CSR matrix vector multiplication
for (int i = 0; i < trials; ++i) {
for (int i = 0; i < batch_size; ++i) {
// Call alphasparse_dcu bsrmm
alphasparse_dcu_s_bsrmm(handle, layout, transA, transB, m, n, k, nnb, &halpha, descrA, dAval, dAptr, dAcol, bs, dmatB, ldb, &hbeta, dmatC, ldc);
}
// Device synchronization
hipDeviceSynchronize();
}
time = (get_time_us() - time) / (trials * batch_size * 1e3);
std::cout << time << ",";
hipMemcpy(matC_ict, dmatC, sizeof(float) * C_cols * ldc, hipMemcpyDeviceToHost);
// Clear up on device
hipFree(dAptr);
hipFree(dAcol);
hipFree(dAval);
hipFree(dmatB);
hipFree(dmatC);
alphasparse_dcu_destroy_mat_descr(descrA);
alphasparse_dcu_destory_handle(handle);
}
int main(int argc, const char *argv[])
{
// args
args_help(argc, argv);
file = args_get_data_file(argc, argv);
check = args_get_if_check(argc, argv);
transA = alpha_args_get_transA(argc, argv);
transB = alpha_args_get_transB(argc, argv);
descr = alpha_args_get_matrix_descrA(argc, argv);
alphasparse_index_base_t bsr_index;
alphasparse_matrix_t coo, bsr;
ALPHA_INT *coo_row_index, *coo_col_index;
float *coo_values;
// read coo
alpha_read_coo(file, &A_rowsb, &A_colsb, &nnzb, &coo_row_index, &coo_col_index, &coo_values);
columns = args_get_columns(argc, argv, A_colsb); // 默认C是方阵
// 创建coo格式稀疏矩阵
alpha_call_exit(alphasparse_s_create_coo(&coo, ALPHA_SPARSE_INDEX_BASE_ZERO, A_rowsb, A_colsb, nnzb, coo_row_index, coo_col_index, coo_values),
"alphasparse_s_create_coo");
// 将稀疏矩阵从coo格式转换成bsr格式
alpha_call_exit(alphasparse_convert_bsr(
coo, bs, layout, ALPHA_SPARSE_OPERATION_NON_TRANSPOSE, &bsr),
"alphasparse_convert_bsr");
// 获取bsr格式里的数据
alpha_call_exit(
alphasparse_s_export_bsr(bsr, &bsr_index, &layout, &A_rowsb, &A_colsb, &bs, &bsr_row_ptr, &bsr_row_ptr_end, &bsr_col_index, &bsr_values),
"alphasparse_s_export_bsr");
nnzb = bsr_row_ptr_end[A_rowsb - 1];
if (layout == ALPHA_SPARSE_LAYOUT_ROW_MAJOR)
roclayout = rocsparse_direction_row;
else
roclayout = rocsparse_direction_column;
if (transA == ALPHA_SPARSE_OPERATION_NON_TRANSPOSE) {
if (transB == ALPHA_SPARSE_OPERATION_NON_TRANSPOSE) {
C_rows = A_rowsb * bs;
C_cols = columns;
C_k = A_colsb * bs;
ldb = A_colsb * bs;
ldc = A_rowsb * bs;
} else // transB, conjB, B转置就用方阵测
{
C_rows = A_rowsb * bs;
C_cols = A_colsb * bs;
C_k = A_colsb * bs;
columns = ldb = A_colsb * bs;
ldc = A_rowsb * bs;
}
} else // transA, conjA
{
if (transB == ALPHA_SPARSE_OPERATION_NON_TRANSPOSE) {
C_rows = A_colsb * bs;
C_cols = columns;
C_k = A_rowsb * bs;
ldb = C_cols;
ldc = C_rows;
} else // transB, conjB, B转置就用方阵测
{
C_rows = A_rowsb * bs;
C_cols = A_colsb * bs;
C_k = A_rowsb * bs;
columns = ldb = C_cols;
ldc = C_rows;
}
}
if (transA == ALPHA_SPARSE_OPERATION_NON_TRANSPOSE)
roctransA = rocsparse_operation_none;
else if (transA == ALPHA_SPARSE_OPERATION_TRANSPOSE)
roctransA = rocsparse_operation_transpose;
else
roctransA = rocsparse_operation_conjugate_transpose;
if (transB == ALPHA_SPARSE_OPERATION_NON_TRANSPOSE)
roctransB = rocsparse_operation_none;
else if (transB == ALPHA_SPARSE_OPERATION_TRANSPOSE)
roctransB = rocsparse_operation_transpose;
else
roctransB = rocsparse_operation_conjugate_transpose;
// init B C
matB = (float *)alpha_malloc(C_k * ldb * sizeof(float));
matC_ict = (float *)alpha_malloc(C_cols * ldc * sizeof(float));
matC_roc = (float *)alpha_malloc(C_cols * ldc * sizeof(float));
alpha_fill_random_s(matB, 0, C_k * ldb);
alpha_fill_random_s(matC_ict, 1, C_cols * ldc);
alpha_fill_random_s(matC_roc, 1, C_cols * ldc);
alpha_mm_dcu();
if (check) {
roc_mm_dcu();
check_s((float *)matC_roc, C_cols * ldc, (float *)matC_ict, C_cols * ldc);
}
alpha_free(matB);
alpha_free(matC_ict);
alpha_free(matC_roc);
alpha_free(coo_row_index);
alpha_free(coo_col_index);
alpha_free(coo_values);
return 0;
}
#ifdef __cplusplus
}
#endif /*__cplusplus */
| 31.227273 | 156 | 0.623855 | [
"vector"
] |
f678284de4b133ea289275c9b3a41d89073b051a | 34,897 | cpp | C++ | roxie/udplib/udptrs.cpp | rpastrana/HPCC-Platform | 489454bd326ed6a39228c81f5d837c276724c022 | [
"Apache-2.0"
] | null | null | null | roxie/udplib/udptrs.cpp | rpastrana/HPCC-Platform | 489454bd326ed6a39228c81f5d837c276724c022 | [
"Apache-2.0"
] | null | null | null | roxie/udplib/udptrs.cpp | rpastrana/HPCC-Platform | 489454bd326ed6a39228c81f5d837c276724c022 | [
"Apache-2.0"
] | null | null | null | /*##############################################################################
HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems®.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
############################################################################## */
#include "udplib.hpp"
#include "udpsha.hpp"
#include "udptrs.hpp"
#include "udpipmap.hpp"
#include "jsocket.hpp"
#include "jlog.hpp"
#include "roxie.hpp"
#ifdef _WIN32
#include <winsock.h>
#else
#include <sys/socket.h>
#include <sys/time.h>
#include <sys/resource.h>
#endif
#include <math.h>
#include <atomic>
unsigned udpOutQsPriority = 0;
unsigned udpMaxRetryTimedoutReqs = 0; // 0 means off (keep retrying forever)
unsigned udpRequestToSendTimeout = 0; // value in milliseconds - 0 means calculate from query timeouts
unsigned udpRequestToSendAckTimeout = 10; // value in milliseconds
bool udpSnifferEnabled = true;
using roxiemem::DataBuffer;
// MORE - why use DataBuffers on output side?? We could use zeroCopy techniques if we had a dedicated memory area.
// But using them on this side means we guarantee that the packets fit into databuffers on the other side... But so would matching their size
/*
*
* There are 3 threads running to manage the data transfer from slave back to server:
* send_resend_flow
* - checks periodically that nothing is waiting for a "request to send" that timed out
* send_receive_flow
* - waits on socket receiving "ok_to_send" packets from servers
* - updates state of relevant receivers
* - pushes permission tokens to a queue
* send_data
* - waits on queue of permission tokens
* - broadcasts "busy"
* - writes data to server
* - broadcasts "no longer "
* - sends "completed" or "completed but I want to send more" flow message to server
*
* Queueing up data packets is done by the slave worker threads.
* *
*
* Data races to watch for
* 1. Two slave threads add data at same time - only one should sent rts (use atomic_inc for the count)
* 2. We check for timeout and resend rts or fail just as permission comes in
* - resend rts is harmless ?
* - fail is acceptable
* 3. After sending data, we need to decide whether to set state to 'pending' (and send rts) or empty. If we read count, decide it's zero
* and then (before we set state) someone adds data (and sends rts), we must not set state to empty. CAS to set state empty only if
* it's sending_data perhaps?
* 4. While sending data, someone adds new data. They need to send rts and set state to pending whether empty or sending_data
* 5. Do we need sending_data state? Is it the same as empty, really? Is empty the same as 'count==0' ? Do we even need state?
* - send rts whenever incrementing count from zero
* - resend rts if count is non-zero and timed out
* - resend rts if we send data but there is some remaining
*/
class UdpReceiverEntry : public IUdpReceiverEntry
{
private:
queue_t *output_queue = nullptr;
bool initialized = false;
const bool isLocal = false;
ISocket *send_flow_socket = nullptr;
ISocket *data_socket = nullptr;
const unsigned numQueues;
int current_q = 0;
int currentQNumPkts = 0; // Current Queue Number of Consecutive Processed Packets.
int *maxPktsPerQ = nullptr; // to minimise power function re-calc for every packet
void sendRequest(flowType::flowCmd cmd, unsigned packets )
{
UdpRequestToSendMsg msg = { cmd, static_cast<unsigned short>(packets), sourceIP };
try
{
if (udpTraceLevel > 3)
{
StringBuffer s;
DBGLOG("UdpSender: sending flowType::%s msg to node=%s", flowType::name(cmd), ip.getIpText(s).str());
}
send_flow_socket->write(&msg, sizeof(UdpRequestToSendMsg));
}
catch(IException *e)
{
StringBuffer s;
DBGLOG("UdpSender: sendRequest write failed - %s", e->errorMessage(s).str());
e->Release();
}
catch (...)
{
DBGLOG("UdpSender: sendRequest write failed - unknown error");
}
}
const IpAddress sourceIP;
public:
const IpAddress ip;
unsigned timeouts = 0; // Number of consecutive timeouts
unsigned requestExpiryTime = 0;
static bool comparePacket(const void *pkData, const void *key)
{
UdpPacketHeader *dataHdr = (UdpPacketHeader*) ((DataBuffer*)pkData)->data;
UdpPacketHeader *keyHdr = (UdpPacketHeader*) key;
return ( (dataHdr->ruid == keyHdr->ruid) && (dataHdr->msgId == keyHdr->msgId) );
}
std::atomic<unsigned> packetsQueued = { 0 };
void sendDone(unsigned packets)
{
bool dataRemaining = packetsQueued.load(std::memory_order_relaxed);
// If dataRemaining says 0, but someone adds a row in this window, the request_to_send will be sent BEFORE the send_completed
// So long as receiver handles that, are we good?
if (dataRemaining)
{
requestExpiryTime = msTick() + udpRequestToSendAckTimeout;
sendRequest(flowType::request_to_send_more, packets);
}
else
{
requestExpiryTime = 0;
sendRequest(flowType::send_completed, packets);
}
timeouts = 0;
}
void requestToSend()
{
requestExpiryTime = msTick() + udpRequestToSendAckTimeout;
sendRequest(flowType::request_to_send, 0);
}
void requestAcknowledged()
{
if (requestExpiryTime)
requestExpiryTime = msTick() + udpRequestToSendTimeout;
}
// MORE - consider where/if we need critsecs in here!
unsigned sendData(const UdpPermitToSendMsg &permit, TokenBucket *bucket)
{
requestExpiryTime = 0;
unsigned maxPackets = permit.max_data;
std::vector<DataBuffer *> toSend;
unsigned totalSent = 0;
while (toSend.size() < maxPackets && packetsQueued.load(std::memory_order_relaxed))
{
DataBuffer *buffer = popQueuedData();
if (buffer) // Aborted slave queries leave NULL records on queue
{
UdpPacketHeader *header = (UdpPacketHeader*) buffer->data;
toSend.push_back(buffer);
totalSent += header->length;
#if defined(__linux__) || defined(__APPLE__)
if (isLocal && (totalSent> 100000)) // Avoids sending too fast to local node, for reasons lost in the mists of time
break;
#endif
}
}
for (DataBuffer *buffer: toSend)
{
UdpPacketHeader *header = (UdpPacketHeader*) buffer->data;
unsigned length = header->length;
if (bucket)
{
MTIME_SECTION(queryActiveTimer(), "bucket_wait");
bucket->wait((length / 1024)+1);
}
try
{
data_socket->write(buffer->data, length);
}
catch(IException *e)
{
StringBuffer s;
DBGLOG("UdpSender: write exception - write(%p, %u) - %s", buffer->data, length, e->errorMessage(s).str());
e->Release();
}
catch(...)
{
DBGLOG("UdpSender: write exception - unknown exception");
}
::Release(buffer);
}
sendDone(toSend.size());
return totalSent;
}
bool dataQueued(const UdpPacketHeader &key)
{
// Used when a retry packet is received, to determine whether the query is in fact completed
// but just stuck in transit queues
if (packetsQueued.load(std::memory_order_relaxed))
{
for (unsigned i = 0; i < numQueues; i++)
{
if (output_queue[i].dataQueued(&key, &comparePacket))
return true;
}
}
return false;
}
bool removeData(void *key, PKT_CMP_FUN pkCmpFn)
{
// Used after receiving an abort, to avoid sending data that is no longer required
bool anyRemoved = false;
if (packetsQueued.load(std::memory_order_relaxed))
{
// NOTE - removeData replaces entries by null (so value of packetsQueued is not affected)
for (unsigned i = 0; i < numQueues; i++)
{
if (output_queue[i].removeData(key, pkCmpFn))
anyRemoved = true;
}
}
return anyRemoved;
}
void abort()
{
// Called if too many timeouts on a request to send
if (udpTraceLevel > 3)
{
StringBuffer s;
DBGLOG("UdpSender: abort sending queued data to node=%s", ip.getIpText(s).str());
}
timeouts = 0;
requestExpiryTime = 0;
removeData(nullptr, nullptr);
}
inline void pushData(unsigned queue, DataBuffer *buffer)
{
output_queue[queue].pushOwn(buffer);
if (!packetsQueued++)
requestToSend();
}
DataBuffer *popQueuedData()
{
DataBuffer *buffer;
while (1)
{
for (unsigned i = 0; i < numQueues; i++)
{
if (udpOutQsPriority)
{
if (output_queue[current_q].empty())
{
if (udpTraceLevel >= 5)
DBGLOG("UdpSender: ---------- Empty Q %d", current_q);
currentQNumPkts = 0;
current_q = (current_q + 1) % numQueues;
}
else
{
buffer = output_queue[current_q].pop();
currentQNumPkts++;
if (udpTraceLevel >= 5)
DBGLOG("UdpSender: ---------- Packet from Q %d", current_q);
if (currentQNumPkts >= maxPktsPerQ[current_q])
{
currentQNumPkts = 0;
current_q = (current_q + 1) % numQueues;
}
packetsQueued--;
return buffer;
}
}
else
{
current_q = (current_q + 1) % numQueues;
if (!output_queue[current_q].empty())
{
packetsQueued--;
return output_queue[current_q].pop();
}
}
}
// If we get here, it suggests we were told to get a buffer but no queue has one
// Should never happen
MilliSleep(10);
DBGLOG("UdpSender: ------------- this code should never execute --------------- ");
}
}
UdpReceiverEntry(const IpAddress &_ip, const IpAddress &_sourceIP, unsigned _numQueues, unsigned _queueSize, unsigned _sendFlowPort, unsigned _dataPort)
: ip (_ip), sourceIP(_sourceIP), numQueues(_numQueues), isLocal(_ip.isLocal())
{
assert(!initialized);
assert(numQueues > 0);
if (!ip.isNull())
{
try
{
SocketEndpoint sendFlowEp(_sendFlowPort, ip);
SocketEndpoint dataEp(_dataPort, ip);
send_flow_socket = ISocket::udp_connect(sendFlowEp);
data_socket = ISocket::udp_connect(dataEp);
if (isLocal)
{
data_socket->set_send_buffer_size(udpLocalWriteSocketSize);
if (udpTraceLevel > 0)
DBGLOG("UdpSender: sendbuffer set for local socket (size=%d)", udpLocalWriteSocketSize);
}
}
catch(IException *e)
{
StringBuffer error, ipstr;
DBGLOG("UdpSender: udp_connect failed %s %s", ip.getIpText(ipstr).str(), e->errorMessage(error).str());
throw;
}
catch(...)
{
StringBuffer ipstr;
DBGLOG("UdpSender: udp_connect failed %s %s", ip.getIpText(ipstr).str(), "Unknown error");
throw;
}
output_queue = new queue_t[numQueues];
maxPktsPerQ = new int[numQueues];
for (unsigned j = 0; j < numQueues; j++)
{
output_queue[j].set_queue_size(_queueSize);
maxPktsPerQ[j] = (int) pow((double)udpOutQsPriority, (double)numQueues - j - 1);
}
initialized = true;
if (udpTraceLevel > 0)
{
StringBuffer ipStr;
DBGLOG("UdpSender: added entry for ip=%s to receivers table - send_flow_port=%d", ip.getIpText(ipStr).str(), _sendFlowPort);
}
}
}
~UdpReceiverEntry()
{
if (send_flow_socket) send_flow_socket->Release();
if (data_socket) data_socket->Release();
if (output_queue) delete [] output_queue;
if (maxPktsPerQ) delete [] maxPktsPerQ;
}
};
class CSendManager : implements ISendManager, public CInterface
{
class StartedThread : public Thread
{
private:
Semaphore started;
virtual int run()
{
started.signal();
return doRun();
}
protected:
bool running;
public:
StartedThread(const char *name) : Thread(name)
{
running = false;
}
~StartedThread()
{
running = false;
join();
}
virtual void start()
{
running = true;
Thread::start();
started.wait();
}
virtual int doRun() = 0;
};
class send_resend_flow : public StartedThread
{
// Check if any senders have timed out
CSendManager &parent;
Semaphore terminated;
virtual int doRun() override
{
if (udpTraceLevel > 0)
DBGLOG("UdpSender: send_resend_flow started");
unsigned timeout = udpRequestToSendTimeout;
while (running)
{
if (terminated.wait(timeout) || !running)
break;
unsigned now = msTick();
timeout = udpRequestToSendTimeout;
for (auto&& dest: parent.receiversTable)
{
unsigned expireTime = dest.requestExpiryTime;
if (expireTime)
{
if (expireTime < now)
{
dest.timeouts++;
{
StringBuffer s;
EXCLOG(MCoperatorError,"ERROR: UdpSender: timed out %i times (max=%i) waiting ok_to_send msg from node=%s",
dest.timeouts, udpMaxRetryTimedoutReqs, dest.ip.getIpText(s).str());
}
// 0 (zero) value of udpMaxRetryTimedoutReqs means NO limit on retries
if (udpMaxRetryTimedoutReqs && (dest.timeouts >= udpMaxRetryTimedoutReqs))
dest.abort();
else
dest.requestToSend();
}
else if (expireTime-now < timeout)
timeout = expireTime-now;
}
}
}
return 0;
}
public:
send_resend_flow(CSendManager &_parent)
: StartedThread("UdpLib::send_resend_flow"), parent(_parent)
{
start();
}
~send_resend_flow()
{
running = false;
terminated.signal();
join();
}
};
class send_receive_flow : public StartedThread
{
CSendManager &parent;
int receive_port;
Owned<ISocket> flow_socket;
public:
send_receive_flow(CSendManager &_parent, int r_port) : StartedThread("UdpLib::send_receive_flow"), parent(_parent)
{
receive_port = r_port;
if (check_max_socket_read_buffer(udpFlowSocketsSize) < 0)
throw MakeStringException(ROXIE_UDP_ERROR, "System Socket max read buffer is less than %i", udpFlowSocketsSize);
flow_socket.setown(ISocket::udp_create(receive_port));
flow_socket->set_receive_buffer_size(udpFlowSocketsSize);
size32_t actualSize = flow_socket->get_receive_buffer_size();
DBGLOG("UdpSender: rcv_flow_socket created port=%d sockbuffsize=%d actualsize=%d", receive_port, udpFlowSocketsSize, actualSize);
start();
}
~send_receive_flow()
{
running = false;
if (flow_socket)
flow_socket->close();
join();
}
virtual int doRun()
{
if (udpTraceLevel > 0)
DBGLOG("UdpSender: send_receive_flow started");
#ifdef __linux__
setLinuxThreadPriority(2);
#endif
while(running)
{
UdpPermitToSendMsg f = { flowType::ok_to_send, 0, { } };
while (running)
{
try
{
unsigned int res ;
flow_socket->read(&f, sizeof(f), sizeof(f), res, 5);
assert(res==sizeof(f));
switch (f.cmd)
{
case flowType::ok_to_send:
if (udpTraceLevel > 1)
{
StringBuffer s;
DBGLOG("UdpSender: received ok_to_send msg max %d packets from node=%s", f.max_data, f.destNode.getTraceText(s).str());
}
parent.data->ok_to_send(f);
break;
case flowType::request_received:
if (udpTraceLevel > 1)
{
StringBuffer s;
DBGLOG("UdpSender: received request_received msg from node=%s", f.destNode.getTraceText(s).str());
}
parent.receiversTable[f.destNode.getNodeAddress()].requestAcknowledged();
break;
default:
DBGLOG("UdpSender: received unknown flow message type=%d", f.cmd);
}
}
catch (IException *e)
{
if (running && e->errorCode() != JSOCKERR_timeout_expired)
{
StringBuffer s;
DBGLOG("UdpSender: send_receive_flow::read failed port=%i %s", receive_port, e->errorMessage(s).str());
}
e->Release();
}
catch (...)
{
if (running)
DBGLOG("UdpSender: send_receive_flow::unknown exception");
MilliSleep(0);
}
}
}
return 0;
}
};
class send_data : public StartedThread
{
CSendManager &parent;
ISocket *sniffer_socket;
SocketEndpoint ep;
simple_queue<UdpPermitToSendMsg> send_queue;
Linked<TokenBucket> bucket;
void send_sniff(sniffType::sniffCmd busy)
{
sniff_msg msg = { busy, parent.myIP};
try
{
if (!sniffer_socket)
{
sniffer_socket = ISocket::multicast_connect(ep, multicastTTL);
if (udpTraceLevel > 1)
{
StringBuffer url;
DBGLOG("UdpSender: multicast_connect ok to %s", ep.getUrlStr(url).str());
}
}
sniffer_socket->write(&msg, sizeof(msg));
if (udpTraceLevel > 1)
DBGLOG("UdpSender: sent busy=%d multicast msg", busy);
}
catch(IException *e)
{
StringBuffer s;
StringBuffer url;
DBGLOG("UdpSender: multicast_connect or write failed ep=%s - %s", ep.getUrlStr(url).str(), e->errorMessage(s).str());
e->Release();
}
catch(...)
{
StringBuffer url;
DBGLOG("UdpSender: multicast_connect or write unknown exception - ep=%s", ep.getUrlStr(url).str());
if (sniffer_socket)
{
sniffer_socket->Release();
sniffer_socket = NULL;
}
}
}
public:
send_data(CSendManager &_parent, int s_port, const IpAddress &snif_ip, TokenBucket *_bucket)
: StartedThread("UdpLib::send_data"), parent(_parent), bucket(_bucket), ep(s_port, snif_ip), send_queue(100) // MORE - send q size should be configurable and/or related to size of cluster?
{
sniffer_socket = NULL;
if (check_max_socket_write_buffer(udpLocalWriteSocketSize) < 0)
throw MakeStringException(ROXIE_UDP_ERROR, "System Socket max write buffer is less than %i", udpLocalWriteSocketSize);
start();
}
~send_data()
{
running = false;
UdpPermitToSendMsg dummy;
send_queue.push(dummy);
join();
if (sniffer_socket)
sniffer_socket->Release();
}
bool ok_to_send(const UdpPermitToSendMsg &msg)
{
if (send_queue.push(msg, 15))
return true;
else
{
StringBuffer s;
DBGLOG("UdpSender: push() failed - ignored ok_to_send msg - node=%s, maxData=%u", msg.destNode.getTraceText(s).str(), msg.max_data);
return false;
}
}
virtual int doRun()
{
if (udpTraceLevel > 0)
DBGLOG("UdpSender: send_data started");
#ifdef __linux__
setLinuxThreadPriority(1); // MORE - windows? Is this even a good idea? Must not send faster than receiver can pull off the socket
#endif
UdpPermitToSendMsg permit;
while (running)
{
send_queue.pop(permit);
if (!running)
return 0;
if (udpSnifferEnabled)
send_sniff(sniffType::busy);
UdpReceiverEntry &receiverInfo = parent.receiversTable[permit.destNode.getNodeAddress()];
unsigned payload = receiverInfo.sendData(permit, bucket);
if (udpSnifferEnabled)
send_sniff(sniffType::idle);
if (udpTraceLevel > 1)
{
StringBuffer s;
DBGLOG("UdpSender: sent %u bytes to node=%s", payload, permit.destNode.getTraceText(s).str());
}
}
if (udpTraceLevel > 0)
DBGLOG("UdpSender: send_data stopped");
return 0;
}
};
friend class send_resend_flow;
friend class send_receive_flow;
friend class send_data;
unsigned numQueues;
IpMapOf<UdpReceiverEntry> receiversTable;
send_resend_flow *resend_flow;
send_receive_flow *receive_flow;
send_data *data;
Linked<TokenBucket> bucket;
IpAddress myIP;
std::atomic<unsigned> msgSeq{0};
inline unsigned getNextMessageSequence()
{
unsigned res;
do
{
res = ++msgSeq;
} while (unlikely(!res));
return res;
}
public:
IMPLEMENT_IINTERFACE;
CSendManager(int server_flow_port, int data_port, int client_flow_port, int sniffer_port, const IpAddress &sniffer_multicast_ip, int q_size, int _numQueues, const IpAddress &_myIP, TokenBucket *_bucket)
: bucket(_bucket),
myIP(_myIP),
receiversTable([_myIP, _numQueues, q_size, server_flow_port, data_port](const IpAddress &ip) { return new UdpReceiverEntry(ip, _myIP, _numQueues, q_size, server_flow_port, data_port);})
{
#ifndef _WIN32
setpriority(PRIO_PROCESS, 0, -3);
#endif
numQueues = _numQueues;
data = new send_data(*this, sniffer_port, sniffer_multicast_ip, bucket);
resend_flow = new send_resend_flow(*this);
receive_flow = new send_receive_flow(*this, client_flow_port);
}
~CSendManager()
{
delete resend_flow;
delete receive_flow;
delete data;
}
// Interface ISendManager
virtual void writeOwn(IUdpReceiverEntry &receiver, DataBuffer *buffer, unsigned len, unsigned queue) override
{
// NOTE: takes ownership of the DataBuffer
assert(queue < numQueues);
static_cast<UdpReceiverEntry &>(receiver).pushData(queue, buffer);
}
virtual IMessagePacker *createMessagePacker(ruid_t ruid, unsigned sequence, const void *messageHeader, unsigned headerSize, const ServerIdentifier &destNode, int queue) override
{
const IpAddress &dest = destNode.getNodeAddress();
return ::createMessagePacker(ruid, sequence, messageHeader, headerSize, *this, receiversTable[dest], myIP, getNextMessageSequence(), queue);
}
virtual bool dataQueued(ruid_t ruid, unsigned msgId, const ServerIdentifier &destNode) override
{
const IpAddress &dest = destNode.getNodeAddress();
UdpPacketHeader pkHdr;
pkHdr.ruid = ruid;
pkHdr.msgId = msgId;
return receiversTable[dest].dataQueued(pkHdr);
}
virtual bool abortData(ruid_t ruid, unsigned msgId, const ServerIdentifier &destNode)
{
const IpAddress &dest = destNode.getNodeAddress();
UdpPacketHeader pkHdr;
pkHdr.ruid = ruid;
pkHdr.msgId = msgId;
return receiversTable[dest].removeData((void*) &pkHdr, &UdpReceiverEntry::comparePacket);
}
virtual bool allDone()
{
// Used for some timing tests only
for (auto&& dest: receiversTable)
{
if (dest.packetsQueued.load(std::memory_order_relaxed))
return false;
}
return true;
}
};
ISendManager *createSendManager(int server_flow_port, int data_port, int client_flow_port, int sniffer_port, const IpAddress &sniffer_multicast_ip, int queue_size_pr_server, int queues_pr_server, TokenBucket *rateLimiter)
{
assertex(!myNode.getNodeAddress().isNull());
return new CSendManager(server_flow_port, data_port, client_flow_port, sniffer_port, sniffer_multicast_ip, queue_size_pr_server, queues_pr_server, myNode.getNodeAddress(), rateLimiter);
}
class CMessagePacker : implements IMessagePacker, public CInterface
{
ISendManager &parent;
IUdpReceiverEntry &receiver;
UdpPacketHeader package_header;
DataBuffer *part_buffer;
unsigned data_buffer_size;
unsigned data_used;
void *mem_buffer;
unsigned mem_buffer_size;
unsigned totalSize;
bool packed_request;
MemoryBuffer metaInfo;
bool last_message_done;
int queue_number;
public:
IMPLEMENT_IINTERFACE;
CMessagePacker(ruid_t ruid, unsigned msgId, const void *messageHeader, unsigned headerSize, ISendManager &_parent, IUdpReceiverEntry &_receiver, const IpAddress & _sourceNode, unsigned _msgSeq, unsigned _queue)
: parent(_parent), receiver(_receiver)
{
queue_number = _queue;
package_header.length = 0; // filled in with proper value later
package_header.metalength = 0;
package_header.ruid = ruid;
package_header.msgId = msgId;
package_header.pktSeq = 0;
package_header.node.setIp(_sourceNode);
package_header.msgSeq = _msgSeq;
packed_request = false;
part_buffer = bufferManager->allocate();
data_buffer_size = DATA_PAYLOAD - sizeof(UdpPacketHeader);
assertex(data_buffer_size >= headerSize + sizeof(unsigned short));
*(unsigned short *) (&part_buffer->data[sizeof(UdpPacketHeader)]) = headerSize;
memcpy(&part_buffer->data[sizeof(UdpPacketHeader)+sizeof(unsigned short)], messageHeader, headerSize);
data_used = headerSize + sizeof(unsigned short);
mem_buffer = 0;
mem_buffer_size = 0;
last_message_done = false;
totalSize = 0;
}
~CMessagePacker()
{
if (part_buffer)
part_buffer->Release();
if (mem_buffer) free (mem_buffer);
}
virtual void *getBuffer(unsigned len, bool variable) override
{
if (variable)
len += sizeof(RecordLengthType);
if (DATA_PAYLOAD - sizeof(UdpPacketHeader) < len)
{
// Won't fit in one, so allocate temp location
if (mem_buffer_size < len)
{
free(mem_buffer);
mem_buffer = checked_malloc(len, ROXIE_MEMORY_ERROR);
mem_buffer_size = len;
}
packed_request = false;
if (variable)
return ((char *) mem_buffer) + sizeof(RecordLengthType);
else
return mem_buffer;
}
if (part_buffer && ((data_buffer_size - data_used) < len))
flush(false); // Note that we never span records that are small enough to fit - this can result in significant wastage if record just over DATA_PAYLOAD/2
if (!part_buffer)
{
part_buffer = bufferManager->allocate();
data_buffer_size = DATA_PAYLOAD - sizeof(UdpPacketHeader);
}
packed_request = true;
if (variable)
return &part_buffer->data[data_used + sizeof(UdpPacketHeader) + sizeof(RecordLengthType)];
else
return &part_buffer->data[data_used + sizeof(UdpPacketHeader)];
}
virtual void putBuffer(const void *buf, unsigned len, bool variable) override
{
if (variable)
{
assertex(len < MAX_RECORD_LENGTH);
buf = ((char *) buf) - sizeof(RecordLengthType);
*(RecordLengthType *) buf = len;
len += sizeof(RecordLengthType);
}
totalSize += len;
if (packed_request)
{
assert(len <= (data_buffer_size - data_used));
data_used += len;
}
else
{
while (len)
{
if (!part_buffer)
{
part_buffer = bufferManager->allocate();
data_buffer_size = DATA_PAYLOAD - sizeof(UdpPacketHeader);
data_used = 0;
}
unsigned chunkLen = data_buffer_size - data_used;
if (chunkLen > len)
chunkLen = len;
memcpy(&part_buffer->data[sizeof(UdpPacketHeader)+data_used], buf, chunkLen);
data_used += chunkLen;
len -= chunkLen;
buf = &(((char*)buf)[chunkLen]);
if (len)
flush(false);
}
}
}
virtual void sendMetaInfo(const void *buf, unsigned len) override {
metaInfo.append(len, buf);
}
virtual void flush() override { flush(true); }
virtual unsigned size() const override
{
return totalSize;
}
private:
void flush(bool last_msg)
{
if (!last_message_done && last_msg)
{
last_message_done = true;
if (!part_buffer)
part_buffer = bufferManager->allocate();
const char *metaData = metaInfo.toByteArray();
unsigned metaLength = metaInfo.length();
unsigned maxMetaLength = DATA_PAYLOAD - (sizeof(UdpPacketHeader) + data_used);
while (metaLength > maxMetaLength)
{
memcpy(&part_buffer->data[sizeof(UdpPacketHeader)+data_used], metaData, maxMetaLength);
put_package(part_buffer, data_used, maxMetaLength);
metaLength -= maxMetaLength;
metaData += maxMetaLength;
data_used = 0;
maxMetaLength = DATA_PAYLOAD - sizeof(UdpPacketHeader);
part_buffer = bufferManager->allocate();
}
memcpy(&part_buffer->data[sizeof(UdpPacketHeader)+data_used], metaData, metaLength);
package_header.pktSeq |= UDP_PACKET_COMPLETE;
put_package(part_buffer, data_used, metaLength);
}
else if (part_buffer)
{
// Just flush current - used when no room for current row
if (data_used)
put_package(part_buffer, data_used, 0); // buffer released in put_package
else
part_buffer->Release(); // If NO data in buffer, release buffer back to pool
}
part_buffer = 0;
data_buffer_size = 0;
data_used = 0;
}
void put_package(DataBuffer *dataBuff, unsigned datalength, unsigned metalength)
{
package_header.length = datalength + metalength + sizeof(UdpPacketHeader);
package_header.metalength = metalength;
memcpy(dataBuff->data, &package_header, sizeof(package_header));
parent.writeOwn(receiver, dataBuff, package_header.length, queue_number);
package_header.pktSeq++;
}
};
extern UDPLIB_API IMessagePacker *createMessagePacker(ruid_t ruid, unsigned msgId, const void *messageHeader, unsigned headerSize, ISendManager &_parent, IUdpReceiverEntry &_receiver, const IpAddress & _sourceNode, unsigned _msgSeq, unsigned _queue)
{
return new CMessagePacker(ruid, msgId, messageHeader, headerSize, _parent, _receiver, _sourceNode, _msgSeq, _queue);
}
| 36.351042 | 249 | 0.55211 | [
"vector"
] |
f67a6a927c709da20412d8b7ce3a03e964dff7c3 | 15,045 | cpp | C++ | TAO/orbsvcs/orbsvcs/Notify/EventChannelFactory.cpp | cflowe/ACE | 5ff60b41adbe1772372d1a43bcc1f2726ff8f810 | [
"DOC"
] | 36 | 2015-01-10T07:27:33.000Z | 2022-03-07T03:32:08.000Z | TAO/orbsvcs/orbsvcs/Notify/EventChannelFactory.cpp | cflowe/ACE | 5ff60b41adbe1772372d1a43bcc1f2726ff8f810 | [
"DOC"
] | 2 | 2018-08-13T07:30:51.000Z | 2019-02-25T03:04:31.000Z | TAO/orbsvcs/orbsvcs/Notify/EventChannelFactory.cpp | cflowe/ACE | 5ff60b41adbe1772372d1a43bcc1f2726ff8f810 | [
"DOC"
] | 38 | 2015-01-08T14:12:06.000Z | 2022-01-19T08:33:00.000Z | // $Id: EventChannelFactory.cpp 97014 2013-04-12 22:47:02Z mitza $
#include "orbsvcs/Notify/EventChannelFactory.h"
#include "orbsvcs/Notify/Properties.h"
#include "orbsvcs/Notify/Factory.h"
#include "orbsvcs/Notify/Builder.h"
#include "orbsvcs/Notify/Topology_Saver.h"
#include "orbsvcs/Notify/Topology_Loader.h"
#include "orbsvcs/Notify/Save_Persist_Worker_T.h"
#include "orbsvcs/Notify/Reconnect_Worker_T.h"
#include "orbsvcs/Notify/Event_Persistence_Strategy.h"
#include "orbsvcs/Notify/Routing_Slip_Persistence_Manager.h"
#include "orbsvcs/Notify/EventChannel.h"
#include "orbsvcs/Notify/Container_T.h"
#include "orbsvcs/Notify/Find_Worker_T.h"
#include "orbsvcs/Notify/Seq_Worker_T.h"
#include "orbsvcs/Notify/POA_Helper.h"
#include "orbsvcs/Notify/Validate_Worker_T.h"
#include "orbsvcs/Notify/Validate_Client_Task.h"
#include "orbsvcs/Notify/FilterFactory.h"
#include "ace/Dynamic_Service.h"
#include "tao/debug.h"
//#define DEBUG_LEVEL 9
#ifndef DEBUG_LEVEL
# define DEBUG_LEVEL TAO_debug_level
#endif //DEBUG_LEVEL
TAO_BEGIN_VERSIONED_NAMESPACE_DECL
// Include this here since this is the only file that
// requires Topology_Factory.
namespace TAO_Notify
{
// virtual
Topology_Factory::~Topology_Factory ()
{
}
}
typedef TAO_Notify_Find_Worker_T<TAO_Notify_EventChannel
, CosNotifyChannelAdmin::EventChannel
, CosNotifyChannelAdmin::EventChannel_ptr
, CosNotifyChannelAdmin::ChannelNotFound>
TAO_Notify_EventChannel_Find_Worker;
typedef TAO_Notify_Seq_Worker_T<TAO_Notify_EventChannel> TAO_Notify_EventChannel_Seq_Worker;
TAO_Notify_EventChannelFactory::TAO_Notify_EventChannelFactory (void)
: topology_save_seq_ (0)
, topology_factory_(0)
, reconnect_registry_(*this)
, loading_topology_ (false)
{
}
TAO_Notify_EventChannelFactory::~TAO_Notify_EventChannelFactory ()
{
}
void
TAO_Notify_EventChannelFactory::destroy (void)
{
if (this->shutdown () == 1)
return;
TAO_Notify_Properties* properties = TAO_Notify_PROPERTIES::instance();
// Reset references to CORBA objects.
properties->orb (CORBA::ORB::_nil ());
properties->default_poa (PortableServer::POA::_nil ());
ec_container_.reset( 0 );
}
void
TAO_Notify_EventChannelFactory::init (PortableServer::POA_ptr poa)
{
this->poa_ = PortableServer::POA::_duplicate (poa);
ACE_ASSERT (this->ec_container_.get() == 0);
// Init ec_container_
TAO_Notify_EventChannel_Container* ecc = 0;
ACE_NEW_THROW_EX (ecc,
TAO_Notify_EventChannel_Container (),
CORBA::INTERNAL ());
this->ec_container_.reset( ecc );
this->ec_container().init ();
TAO_Notify_POA_Helper* object_poa = 0;
// Bootstrap initial Object POA
ACE_NEW_THROW_EX (object_poa,
TAO_Notify_POA_Helper (),
CORBA::NO_MEMORY ());
ACE_Auto_Ptr<TAO_Notify_POA_Helper> auto_object_poa (object_poa);
ACE_CString poa_name = object_poa->get_unique_id ();
#if defined (CORBA_E_MICRO)
object_poa->init (poa, poa_name.c_str ());
#else
object_poa->init_persistent (poa, poa_name.c_str ());
#endif /* CORBA_E_MICRO */
this->adopt_poa (auto_object_poa.release ());
// Note topology factory is configured separately from the "builder" mediated
// objects since it is independant of the "style" of Notification Service.
this->topology_factory_ =
ACE_Dynamic_Service <TAO_Notify::Topology_Factory>::instance ("Topology_Factory");
this->load_topology ();
this->load_event_persistence ();
if (TAO_Notify_PROPERTIES::instance()->validate_client() == true)
{
TAO_Notify_validate_client_Task* validate_client_task = 0;
ACE_NEW_THROW_EX (validate_client_task,
TAO_Notify_validate_client_Task (TAO_Notify_PROPERTIES::instance()->validate_client_delay (),
TAO_Notify_PROPERTIES::instance()->validate_client_interval (),
this),
CORBA::INTERNAL ());
this->validate_client_task_.reset (validate_client_task);
}
}
void
TAO_Notify_EventChannelFactory::_add_ref (void)
{
this->_incr_refcnt ();
}
void
TAO_Notify_EventChannelFactory::_remove_ref (void)
{
this->_decr_refcnt ();
}
void
TAO_Notify_EventChannelFactory::release (void)
{
delete this;
//@@ inform factory
}
void
TAO_Notify_EventChannelFactory::remove (TAO_Notify_EventChannel* event_channel)
{
this->ec_container().remove (event_channel);
this->self_change ();
}
int
TAO_Notify_EventChannelFactory::shutdown (void)
{
this->stop_validator();
if (TAO_Notify_Object::shutdown () == 1)
return 1;
this->ec_container().shutdown ();
return 0;
}
CosNotifyChannelAdmin::EventChannel_ptr
TAO_Notify_EventChannelFactory::create_named_channel (
const CosNotification::QoSProperties& initial_qos,
const CosNotification::AdminProperties& initial_admin,
CosNotifyChannelAdmin::ChannelID_out id,
const char*)
{
return this->create_channel (initial_qos, initial_admin, id);
}
::CosNotifyChannelAdmin::EventChannel_ptr TAO_Notify_EventChannelFactory::create_channel (
const CosNotification::QoSProperties & initial_qos,
const CosNotification::AdminProperties & initial_admin,
CosNotifyChannelAdmin::ChannelID_out id
)
{
CosNotifyChannelAdmin::EventChannel_var ec =
TAO_Notify_PROPERTIES::instance()->builder()->build_event_channel (this
, initial_qos
, initial_admin
, id);
this->self_change ();
return ec._retn ();
}
CosNotifyChannelAdmin::ChannelIDSeq*
TAO_Notify_EventChannelFactory::get_all_channels (void)
{
TAO_Notify_EventChannel_Seq_Worker seq_worker;
return seq_worker.create (this->ec_container());
}
CosNotifyChannelAdmin::EventChannel_ptr
TAO_Notify_EventChannelFactory::get_event_channel (CosNotifyChannelAdmin::ChannelID id)
{
TAO_Notify_EventChannel_Find_Worker find_worker;
return find_worker.resolve (id, this->ec_container());
}
void
TAO_Notify_EventChannelFactory::set_topology_factory(TAO_Notify::Topology_Factory* f)
{
ORBSVCS_DEBUG ((LM_DEBUG,
ACE_TEXT ("(%P,%t) Debug Topology_Factory installed in EventChannelFactory.\n")
));
// If the above meessage appears when you don't expect it
// use svc.conf to install the topology factory rather
// than calling this method.
this->topology_factory_ = f;
}
void
TAO_Notify_EventChannelFactory::load_topology (void)
{
this->loading_topology_ = true;
if (this->topology_factory_ != 0)
{
// create_loader will open and load the persistence file for validation
auto_ptr<TAO_Notify::Topology_Loader> tl(this->topology_factory_->create_loader());
if (tl.get () != 0)
{
tl->load (this);
}
}
else
{
if (TAO_debug_level > 0)
ORBSVCS_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) Topology persistence disabled.\n")));
}
this->loading_topology_ = false;
}
void
TAO_Notify_EventChannelFactory::validate ()
{
TAO_Notify::Validate_Worker<TAO_Notify_EventChannel> wrk;
this->ec_container().collection()->for_each(&wrk);
}
void
TAO_Notify_EventChannelFactory::stop_validator ()
{
if (this->validate_client_task_.get () != 0)
{
this->validate_client_task_->shutdown ();
}
}
bool
TAO_Notify_EventChannelFactory::is_persistent () const
{
return true;
}
void
TAO_Notify_EventChannelFactory::save_persistent (TAO_Notify::Topology_Saver& saver)
{
bool changed = this->self_changed_;
this->self_changed_ = false;
this->children_changed_ = false;
TAO_Notify::NVPList attrs; // ECF has no attributes
bool want_all_children =
saver.begin_object(0, "channel_factory", attrs, changed);
// for each deleted child
// delete_child // if the child has persistence.
TAO_Notify::Save_Persist_Worker<TAO_Notify_EventChannel> wrk(saver, want_all_children);
this->ec_container().collection()->for_each(&wrk);
if (want_all_children || this->reconnect_registry_.is_changed ())
{
this->reconnect_registry_.save_persistent(saver);
}
saver.end_object(0, "channel_factory");
}
void
TAO_Notify_EventChannelFactory::load_event_persistence (void)
{
TAO_Notify::Event_Persistence_Strategy * strategy =
ACE_Dynamic_Service <TAO_Notify::Event_Persistence_Strategy>::instance ("Event_Persistence");
if (strategy != 0)
{
if (this->topology_factory_ != 0)
{
TAO_Notify::Event_Persistence_Factory * factory = strategy->get_factory ();
if (factory != 0)
{
for (
TAO_Notify::Routing_Slip_Persistence_Manager * rspm = factory->first_reload_manager();
rspm != 0;
rspm = rspm->load_next ())
{
TAO_Notify::Routing_Slip_Ptr routing_slip = TAO_Notify::Routing_Slip::create (*this, rspm);
if (!routing_slip.null ())
{
this->routing_slip_restart_set_.insert (routing_slip);
}
else
{
//@@todo: tell the rspm it's an orphan, but we can't during reload
// we need collect these and come back later to remove them
ORBSVCS_DEBUG ((LM_DEBUG,
ACE_TEXT ("(%P|%t) Reload persistent event failed.\n")
));
}
}
}
}
else
{
ORBSVCS_ERROR ((LM_ERROR,
ACE_TEXT ("(%P|%t) Notify Service: Configuration error. Event Persistence requires Topology Persistence.\n")
));
throw CORBA::PERSIST_STORE();
}
}
}
bool
TAO_Notify_EventChannelFactory::change_to_parent (void)
{
bool saving = false;
if (! this->loading_topology_)
{
// A null pointer means that saving of topology is disabled.
if (this->topology_factory_ != 0)
{
saving = true;
// seq is used to check save-in-progress
// if it changes while we're waiting for the lock
// then our change may have already been saved, so
// just return. Caller will signal change again if necessary.
short seq = this->topology_save_seq_;
ACE_GUARD_THROW_EX (TAO_SYNCH_MUTEX, ace_mon, this->topology_save_lock_, CORBA::INTERNAL ());
if (seq == this->topology_save_seq_)
{
auto_ptr<TAO_Notify::Topology_Saver> saver(this->topology_factory_->create_saver());
if (saver.get() != 0)
{
this->save_persistent(*saver);
saver->close ();
}
this->topology_save_seq_ += 1;
}
}
}
return saving;
}
TAO_Notify::Topology_Object*
TAO_Notify_EventChannelFactory::load_child (const ACE_CString& type,
CORBA::Long id,
const TAO_Notify::
NVPList& attrs)
{
// ignore anything but our valid children (ie channel)
TAO_Notify::Topology_Object * result = this;
if (type == "channel")
{
if (DEBUG_LEVEL) ORBSVCS_DEBUG ((LM_DEBUG,
ACE_TEXT ("(%P|%t) EventChannelFactory reload channel %d\n")
, static_cast<int> (id)
));
TAO_Notify_Builder* bld = TAO_Notify_PROPERTIES::instance()->builder();
TAO_Notify_EventChannel * ec = bld->build_event_channel(
this ,
id);
ec->load_attrs (attrs);
result = ec;
}
else if (type == TAO_Notify::REGISTRY_TYPE)
{
result = & this->reconnect_registry_;
}
return result;
}
void
TAO_Notify_EventChannelFactory::reconnect (void)
{
// Reconnect all children first
TAO_Notify::Reconnect_Worker<TAO_Notify_EventChannel> wrk;
this->ec_container().collection()->for_each(&wrk);
// Then send reconnection announcement to registered clients
ACE_ASSERT (!CORBA::is_nil (this->channel_factory_.in ()));
this->reconnect_registry_.send_reconnect (this->channel_factory_.in ());
// reactivate events in-progress
Routing_Slip_Set::CONST_ITERATOR iter (this->routing_slip_restart_set_);
TAO_Notify::Routing_Slip_Ptr * routing_slip;
for (iter.first(); iter.next(routing_slip); iter.advance())
{
(*routing_slip)->reconnect();
}
this->routing_slip_restart_set_.reset ();
}
NotifyExt::ReconnectionRegistry::ReconnectionID
TAO_Notify_EventChannelFactory::register_callback (
NotifyExt::ReconnectionCallback_ptr reconnection)
{
return this->reconnect_registry_.register_callback (
reconnection);
}
void
TAO_Notify_EventChannelFactory::unregister_callback (
NotifyExt::ReconnectionRegistry::ReconnectionID id)
{
this->reconnect_registry_.unregister_callback (
id);
}
CORBA::Boolean
TAO_Notify_EventChannelFactory::is_alive (void)
{
return CORBA::Boolean (1);
}
void
TAO_Notify_EventChannelFactory::save_topology (void)
{
this->self_change ();
}
TAO_Notify_ProxyConsumer *
TAO_Notify_EventChannelFactory::find_proxy_consumer (TAO_Notify::IdVec & id_path, size_t position)
{
TAO_Notify_ProxyConsumer * result = 0;
size_t path_size = id_path.size ();
// EventChannelFactory only: The first id is proably for the ECF itself
// if so, silently consume it.
if (position < path_size && id_path[position] == this->id())
{
++position;
}
if (position < path_size)
{
TAO_Notify_EventChannel_Find_Worker find_worker;
TAO_Notify_EventChannel * ec = find_worker.find (id_path[position], this->ec_container());
++position;
if (ec != 0)
{
result = ec->find_proxy_consumer (id_path, position);
}
}
return result;
}
TAO_Notify_ProxySupplier *
TAO_Notify_EventChannelFactory::find_proxy_supplier (TAO_Notify::IdVec & id_path, size_t position)
{
TAO_Notify_ProxySupplier * result = 0;
size_t path_size = id_path.size ();
// EventChannelFactory only: The first id is proably for the ECF itself
// if so, silently consume it.
if (position < path_size && id_path[position] == this->id())
{
++position;
}
if (position < path_size)
{
TAO_Notify_EventChannel_Find_Worker find_worker;
TAO_Notify_EventChannel * ec = find_worker.find (id_path[position], this->ec_container());
++position;
if (ec != 0)
{
result = ec->find_proxy_supplier (id_path, position);
}
}
return result;
}
CosNotifyChannelAdmin::EventChannelFactory_ptr
TAO_Notify_EventChannelFactory::activate_self (void)
{
CORBA::Object_var obj = this->activate (this);
this->channel_factory_
= CosNotifyChannelAdmin::EventChannelFactory::_narrow (obj.in());
try
{
if (DEBUG_LEVEL > 9)
{
ORBSVCS_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) TAO_Notify_EventChannelFactory::activate_self\n") ));
}
this->reconnect ();
}
catch (const CORBA::Exception&)
{
// ignore for now
}
return this->channel_factory_._retn();
}
TAO_Notify_Object::ID
TAO_Notify_EventChannelFactory::get_id () const
{
return id();
}
TAO_Notify_EventChannelFactory::TAO_Notify_EventChannel_Container&
TAO_Notify_EventChannelFactory::ec_container()
{
ACE_ASSERT( this->ec_container_.get() != 0 );
return *ec_container_;
}
TAO_END_VERSIONED_NAMESPACE_DECL
| 27.861111 | 117 | 0.69784 | [
"object"
] |
f67c20635064adfd9211767ca00029267db87948 | 894 | cpp | C++ | clang-tools-extra/test/clang-tidy/infrastructure/duplicate-fixes-of-alias-checkers.cpp | mkinsner/llvm | 589d48844edb12cd357b3024248b93d64b6760bf | [
"Apache-2.0"
] | 2,338 | 2018-06-19T17:34:51.000Z | 2022-03-31T11:00:37.000Z | clang-tools-extra/test/clang-tidy/infrastructure/duplicate-fixes-of-alias-checkers.cpp | mkinsner/llvm | 589d48844edb12cd357b3024248b93d64b6760bf | [
"Apache-2.0"
] | 3,740 | 2019-01-23T15:36:48.000Z | 2022-03-31T22:01:13.000Z | clang-tools-extra/test/clang-tidy/infrastructure/duplicate-fixes-of-alias-checkers.cpp | mkinsner/llvm | 589d48844edb12cd357b3024248b93d64b6760bf | [
"Apache-2.0"
] | 500 | 2019-01-23T07:49:22.000Z | 2022-03-30T02:59:37.000Z | // RUN: %check_clang_tidy %s cppcoreguidelines-pro-type-member-init,hicpp-member-init,modernize-use-emplace,hicpp-use-emplace %t
namespace std {
template <typename T>
class vector {
public:
void push_back(const T &) {}
void push_back(T &&) {}
template <typename... Args>
void emplace_back(Args &&... args){};
};
} // namespace std
class Foo {
public:
Foo() : _num1(0)
// CHECK-MESSAGES: warning: constructor does not initialize these fields: _num2 [cppcoreguidelines-pro-type-member-init,hicpp-member-init]
{
_num1 = 10;
}
int use_the_members() const {
return _num1 + _num2;
}
private:
int _num1;
int _num2;
// CHECK-FIXES: _num2{};
};
int should_use_emplace(std::vector<Foo> &v) {
v.push_back(Foo());
// CHECK-FIXES: v.emplace_back();
// CHECK-MESSAGES: warning: use emplace_back instead of push_back [hicpp-use-emplace,modernize-use-emplace]
}
| 22.35 | 140 | 0.689038 | [
"vector"
] |
f67c45884124ace0df0a512d4abda9fec6f6ca89 | 7,716 | cpp | C++ | src/caffe/layers/lpq_loss_layer.cpp | AyaLotfy/flownet2 | e3e3dd043d9a65bc8727429938a0d88539f906fd | [
"FSFAP"
] | 1,081 | 2017-04-25T11:46:20.000Z | 2022-03-29T03:24:45.000Z | src/caffe/layers/lpq_loss_layer.cpp | AyaLotfy/flownet2 | e3e3dd043d9a65bc8727429938a0d88539f906fd | [
"FSFAP"
] | 220 | 2017-04-28T04:47:30.000Z | 2021-03-27T09:49:43.000Z | src/caffe/layers/lpq_loss_layer.cpp | AyaLotfy/flownet2 | e3e3dd043d9a65bc8727429938a0d88539f906fd | [
"FSFAP"
] | 361 | 2017-04-26T02:16:49.000Z | 2022-02-21T04:21:09.000Z | #include <iomanip>
#include <sstream>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/layers/lpq_loss_layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void LpqLossLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
LossLayer<Dtype>::LayerSetUp(bottom, top);
/// P/Q parameters schedule
{
const google::protobuf::RepeatedField<unsigned int>& start_iters =
this->layer_param_.lpq_loss_param().pq_episode_starts_at_iter();
const google::protobuf::RepeatedField<float>& ps =
this->layer_param_.lpq_loss_param().p();
const google::protobuf::RepeatedField<float>& qs =
this->layer_param_.lpq_loss_param().q();
/// Special case: If one of each p and q is given, it's okay to not
/// specify a start frame
if (start_iters.size() == 0 and
ps.size() == 1 and
qs.size() == 1)
{
schedule_.push(new ScheduleStep_(0, ps.Get(0), qs.Get(0)));
LOG(INFO) << "Lpq loss layer: Constant parameters"
<< " p = " << schedule_.front()->p
<< ", q = " << schedule_.front()->q;
}
else
{
/// Ensure that all three vectors are the same size...
if (start_iters.size() != ps.size() or ps.size() != qs.size()) {
LOG(FATAL) << "Incompatible sizes: ("
<< "pq_episode_starts_at_iter -> "
<< start_iters.size() << ", "
<< "p -> " << ps.size() << ", "
<< "q -> " << qs.size() << ")";
}
/// ...and are not empty
if (start_iters.size() < 1) {
LOG(FATAL) << "Lpq schedule parameters must not be empty";
}
/// The start frames must also be ordered and not contain duplicates
if (start_iters.size() > 1) {
for (unsigned int i = 0; i < start_iters.size()-1; ++i) {
if (start_iters.Get(i) >= start_iters.Get(i+1)) {
std::ostringstream oss;
for (unsigned int j = 0; j < start_iters.size(); ++j) {
oss << " " << start_iters.Get(j);
}
LOG(FATAL) << "pq_episode_starts_at_frame is not ordered "
<< "or contains duplicates:" << oss.str();
}
}
}
/// Also it is probably a mistake if the first start frame is NOT zero
if (start_iters.Get(0) != 0) {
LOG(FATAL) << "First entry in pq_episode_starts_at_frame is not 0, "
<< "this is probably a mistake.";
}
/// Make schedule
for (unsigned int i = 0; i < start_iters.size(); ++i) {
schedule_.push(new ScheduleStep_(start_iters.Get(i),
ps.Get(i),
qs.Get(i)));
LOG(INFO) << "Lpq loss layer: Starting at iteration "
<< std::setw(7) << std::setfill(' ')
<< schedule_.back()->start_iter
<< ": p = " << schedule_.back()->p
<< ", q = " << schedule_.back()->q;
}
}
}
if(bottom.size() == 1) {
diff_top_vec_.clear();
diff_top_vec_.push_back(bottom[0]);
} else if(bottom.size() == 2) {
// Set up eltwise layer to compute elementwise difference
diff_top_vec_.clear();
diff_top_vec_.push_back(&diff_);
LayerParameter diff_param;
diff_param.mutable_eltwise_param()->add_coeff(1.);
diff_param.mutable_eltwise_param()->add_coeff(-1.);
diff_param.mutable_eltwise_param()->set_operation(
EltwiseParameter_EltwiseOp_SUM);
diff_layer_.reset(new EltwiseLayer<Dtype>(diff_param));
diff_layer_->SetUp(bottom, diff_top_vec_);
} else {
LOG(FATAL) << "LpqLossLayer needs one or two input blobs.";
}
/// Set up power layer to compute elementwise p-power
p_top_vec_.clear();
p_top_vec_.push_back(&p_output_);
LayerParameter p_param;
p_param.mutable_power_param()->set_power(schedule_.front()->p);
p_param.mutable_power_param()->set_shift(
this->layer_param_.lpq_loss_param().p_epsilon());
p_layer_.reset(new PowerLayer<Dtype>(p_param));
p_layer_->SetUp(diff_top_vec_, p_top_vec_);
/// Set up convolutional layer to sum all channels
sum_top_vec_.clear();
sum_top_vec_.push_back(&sum_output_);
LayerParameter sum_param;
sum_param.mutable_convolution_param()->set_num_output(1);
sum_param.mutable_convolution_param()->add_kernel_size(1);
sum_param.mutable_convolution_param()->mutable_weight_filler()
->set_type("constant");
if(this->layer_param_.lpq_loss_param().l2_prescale_by_channels()) {
sum_param.mutable_convolution_param()->mutable_weight_filler()
->set_value(Dtype(1)/
Dtype(bottom[0]->channels()));
} else {
sum_param.mutable_convolution_param()->mutable_weight_filler()
->set_value(Dtype(1));
}
sum_layer_.reset(new ConvolutionLayer<Dtype>(sum_param));
sum_layer_->SetUp(p_top_vec_, sum_top_vec_);
/// Set up power layer to compute elementwise q-power
q_top_vec_.clear();
q_top_vec_.push_back(&q_output_);
LayerParameter q_param;
q_param.mutable_power_param()->set_power(schedule_.front()->q);
q_param.mutable_power_param()->set_shift(
this->layer_param_.lpq_loss_param().q_epsilon());
q_layer_.reset(new PowerLayer<Dtype>(q_param));
q_layer_->SetUp(sum_top_vec_, q_top_vec_);
/// Discard first parameter schedule step
schedule_.pop();
}
template <typename Dtype>
void LpqLossLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
vector<int> loss_shape(0); // Loss layers output a scalar; 0 axes.
top[0]->Reshape(loss_shape);
if(bottom.size() > 1) {
diff_layer_->Reshape(bottom, diff_top_vec_);
}
sign_.Reshape(bottom[0]->num(), bottom[0]->channels(),
bottom[0]->height(), bottom[0]->width());
mask_.Reshape(bottom[0]->num(), bottom[0]->channels(),
bottom[0]->height(), bottom[0]->width());
// plateau_l2_.ReshapeLike(sum_output_);
ones_.Reshape(bottom[0]->num(), bottom[0]->channels(),
bottom[0]->height(), bottom[0]->width());
caffe_set(ones_.count()/ones_.channels(), Dtype(1), ones_.mutable_cpu_data());
p_layer_->Reshape(diff_top_vec_, p_top_vec_);
sum_layer_->Reshape(p_top_vec_, sum_top_vec_);
q_layer_->Reshape(sum_top_vec_, q_top_vec_);
//caffe_set(sign_.count()/sign_.channels(), Dtype(1), sign_.mutable_cpu_data());
}
template <typename Dtype>
void LpqLossLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
NOT_IMPLEMENTED;
}
template <typename Dtype>
void LpqLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom)
{
NOT_IMPLEMENTED;
}
#ifdef CPU_ONLY
STUB_GPU(LpqLossLayer);
#endif
INSTANTIATE_CLASS(LpqLossLayer);
REGISTER_LAYER_CLASS(LpqLoss);
} // namespace caffe
| 38.969697 | 85 | 0.570503 | [
"vector"
] |
f67ff59cd028b520ffe96eb5507a07f22b1128e6 | 15,614 | cpp | C++ | src/qt/tokenitemmodel.cpp | GoStartupsLtd/hydra-core | 293c20204be9eb04e491420aa4c94b6c2adf6757 | [
"MIT"
] | 18 | 2021-02-11T16:36:38.000Z | 2021-12-15T11:33:14.000Z | src/qt/tokenitemmodel.cpp | GoStartupsLtd/hydra-core | 293c20204be9eb04e491420aa4c94b6c2adf6757 | [
"MIT"
] | 10 | 2021-01-17T05:57:32.000Z | 2022-03-03T12:49:32.000Z | src/qt/tokenitemmodel.cpp | GoStartupsLtd/hydra-core | 293c20204be9eb04e491420aa4c94b6c2adf6757 | [
"MIT"
] | 3 | 2021-08-23T05:29:30.000Z | 2022-03-25T20:18:00.000Z | #include <qt/tokenitemmodel.h>
#include <qt/token.h>
#include <qt/walletmodel.h>
#include <interfaces/wallet.h>
#include <validation.h>
#include <qt/bitcoinunits.h>
#include <interfaces/node.h>
#include <interfaces/handler.h>
#include <algorithm>
#include <QDateTime>
#include <QFont>
#include <QDebug>
#include <QThread>
class TokenItemEntry
{
public:
TokenItemEntry()
{}
TokenItemEntry(const interfaces::TokenInfo &tokenInfo)
{
hash = tokenInfo.hash;
createTime.setTime_t(tokenInfo.time);
contractAddress = QString::fromStdString(tokenInfo.contract_address);
tokenName = QString::fromStdString(tokenInfo.token_name);
tokenSymbol = QString::fromStdString(tokenInfo.token_symbol);
decimals = tokenInfo.decimals;
senderAddress = QString::fromStdString(tokenInfo.sender_address);
}
TokenItemEntry( const TokenItemEntry &obj)
{
hash = obj.hash;
createTime = obj.createTime;
contractAddress = obj.contractAddress;
tokenName = obj.tokenName;
tokenSymbol = obj.tokenSymbol;
decimals = obj.decimals;
senderAddress = obj.senderAddress;
balance = obj.balance;
}
~TokenItemEntry()
{}
uint256 hash;
QDateTime createTime;
QString contractAddress;
QString tokenName;
QString tokenSymbol;
quint8 decimals;
QString senderAddress;
int256_t balance;
};
class TokenTxWorker : public QObject
{
Q_OBJECT
public:
WalletModel *walletModel;
bool first;
Token tokenAbi;
TokenTxWorker(WalletModel *_walletModel):
walletModel(_walletModel), first(true) {}
private Q_SLOTS:
void updateTokenTx(const QString &hash)
{
// Initialize variables
uint256 tokenHash = uint256S(hash.toStdString());
int64_t fromBlock = 0;
int64_t toBlock = -1;
interfaces::TokenInfo tokenInfo;
uint256 blockHash;
bool found = false;
int64_t backInPast = first ? COINBASE_MATURITY : 10;
first = false;
// Get current height and block hash
toBlock = walletModel->node().getNumBlocks();
blockHash = walletModel->node().getBlockHash(toBlock);
if(toBlock > -1)
{
// Find the token tx in the wallet
tokenInfo = walletModel->wallet().getToken(tokenHash);
found = tokenInfo.hash == tokenHash;
if(found)
{
// Get the start location for search the event log
if(tokenInfo.block_number < toBlock)
{
if(walletModel->node().getBlockHash(tokenInfo.block_number) == tokenInfo.block_hash)
{
fromBlock = tokenInfo.block_number;
}
else
{
fromBlock = tokenInfo.block_number - backInPast;
}
}
else
{
fromBlock = toBlock - backInPast;
}
if(fromBlock < 0)
fromBlock = 0;
tokenInfo.block_hash = blockHash;
tokenInfo.block_number = toBlock;
}
}
if(found)
{
// List the events and update the token tx
std::vector<TokenEvent> tokenEvents;
tokenAbi.setAddress(tokenInfo.contract_address);
tokenAbi.setSender(tokenInfo.sender_address);
tokenAbi.transferEvents(tokenEvents, fromBlock, toBlock);
for(size_t i = 0; i < tokenEvents.size(); i++)
{
TokenEvent event = tokenEvents[i];
interfaces::TokenTx tokenTx;
tokenTx.contract_address = event.address;
tokenTx.sender_address = event.sender;
tokenTx.receiver_address = event.receiver;
tokenTx.value = event.value;
tokenTx.tx_hash = event.transactionHash;
tokenTx.block_hash = event.blockHash;
tokenTx.block_number = event.blockNumber;
walletModel->wallet().addTokenTxEntry(tokenTx, false);
}
walletModel->wallet().addTokenEntry(tokenInfo);
}
}
void cleanTokenTxEntries()
{
if(walletModel) walletModel->wallet().cleanTokenTxEntries();
}
void updateBalance(QString hash, QString contractAddress, QString senderAddress)
{
tokenAbi.setAddress(contractAddress.toStdString());
tokenAbi.setSender(senderAddress.toStdString());
std::string strBalance;
if(tokenAbi.balanceOf(strBalance))
{
QString balance = QString::fromStdString(strBalance);
Q_EMIT balanceChanged(hash, balance);
}
}
Q_SIGNALS:
// Signal that balance in token changed
void balanceChanged(QString hash, QString balance);
};
#include "tokenitemmodel.moc"
struct TokenItemEntryLessThan
{
bool operator()(const TokenItemEntry &a, const TokenItemEntry &b) const
{
return a.hash < b.hash;
}
bool operator()(const TokenItemEntry &a, const uint256 &b) const
{
return a.hash < b;
}
bool operator()(const uint256 &a, const TokenItemEntry &b) const
{
return a < b.hash;
}
};
class TokenItemPriv
{
public:
QList<TokenItemEntry> cachedTokenItem;
TokenItemModel *parent;
TokenItemPriv(TokenItemModel *_parent):
parent(_parent) {}
void refreshTokenItem(interfaces::Wallet& wallet)
{
cachedTokenItem.clear();
{
for(interfaces::TokenInfo token : wallet.getTokens())
{
TokenItemEntry tokenItem(token);
if(parent)
{
parent->updateBalance(tokenItem);
}
cachedTokenItem.append(tokenItem);
}
}
std::sort(cachedTokenItem.begin(), cachedTokenItem.end(), TokenItemEntryLessThan());
}
void updateEntry(const TokenItemEntry &_item, int status)
{
// Find address / label in model
TokenItemEntry item;
QList<TokenItemEntry>::iterator lower = qLowerBound(
cachedTokenItem.begin(), cachedTokenItem.end(), _item, TokenItemEntryLessThan());
QList<TokenItemEntry>::iterator upper = qUpperBound(
cachedTokenItem.begin(), cachedTokenItem.end(), _item, TokenItemEntryLessThan());
int lowerIndex = (lower - cachedTokenItem.begin());
int upperIndex = (upper - cachedTokenItem.begin());
bool inModel = (lower != upper);
item = _item;
if(inModel)
{
item.balance = cachedTokenItem[lowerIndex].balance;
}
switch(status)
{
case CT_NEW:
if(inModel)
{
qWarning() << "TokenItemPriv::updateEntry: Warning: Got CT_NEW, but entry is already in model";
break;
}
parent->beginInsertRows(QModelIndex(), lowerIndex, lowerIndex);
cachedTokenItem.insert(lowerIndex, item);
parent->endInsertRows();
break;
case CT_UPDATED:
if(!inModel)
{
qWarning() << "TokenItemPriv::updateEntry: Warning: Got CT_UPDATED, but entry is not in model";
break;
}
cachedTokenItem[lowerIndex] = item;
parent->emitDataChanged(lowerIndex);
break;
case CT_DELETED:
if(!inModel)
{
qWarning() << "TokenItemPriv::updateEntry: Warning: Got CT_DELETED, but entry is not in model";
break;
}
parent->beginRemoveRows(QModelIndex(), lowerIndex, upperIndex-1);
cachedTokenItem.erase(lower, upper);
parent->endRemoveRows();
break;
}
}
int updateBalance(QString hash, QString balance)
{
uint256 updated;
updated.SetHex(hash.toStdString());
int256_t val(balance.toStdString());
for(int i = 0; i < cachedTokenItem.size(); i++)
{
TokenItemEntry item = cachedTokenItem[i];
if(item.hash == updated && item.balance != val)
{
item.balance = val;
cachedTokenItem[i] = item;
return i;
}
}
return -1;
}
int size()
{
return cachedTokenItem.size();
}
TokenItemEntry *index(int idx)
{
if(idx >= 0 && idx < cachedTokenItem.size())
{
return &cachedTokenItem[idx];
}
else
{
return 0;
}
}
};
TokenItemModel::TokenItemModel(WalletModel *parent):
QAbstractItemModel(parent),
walletModel(parent),
priv(0),
worker(0),
tokenTxCleaned(false)
{
columns << tr("Token Name") << tr("Token Symbol") << tr("Balance");
priv = new TokenItemPriv(this);
priv->refreshTokenItem(walletModel->wallet());
worker = new TokenTxWorker(walletModel);
worker->tokenAbi.setModel(walletModel);
worker->moveToThread(&(t));
connect(worker, SIGNAL(balanceChanged(QString,QString)), this, SLOT(balanceChanged(QString,QString)));
t.start();
subscribeToCoreSignals();
}
TokenItemModel::~TokenItemModel()
{
unsubscribeFromCoreSignals();
t.quit();
t.wait();
if(priv)
{
delete priv;
priv = 0;
}
}
QModelIndex TokenItemModel::index(int row, int column, const QModelIndex &parent) const
{
Q_UNUSED(parent);
TokenItemEntry *data = priv->index(row);
if(data)
{
return createIndex(row, column, priv->index(row));
}
return QModelIndex();
}
QModelIndex TokenItemModel::parent(const QModelIndex &child) const
{
Q_UNUSED(child);
return QModelIndex();
}
int TokenItemModel::rowCount(const QModelIndex &parent) const
{
Q_UNUSED(parent);
return priv->size();
}
int TokenItemModel::columnCount(const QModelIndex &parent) const
{
Q_UNUSED(parent);
return columns.length();
}
QVariant TokenItemModel::data(const QModelIndex &index, int role) const
{
if(!index.isValid())
return QVariant();
TokenItemEntry *rec = static_cast<TokenItemEntry*>(index.internalPointer());
switch (role) {
case Qt::DisplayRole:
switch(index.column())
{
case Name:
return rec->tokenName;
case Symbol:
return rec->tokenSymbol;
case Balance:
return BitcoinUnits::formatToken(rec->decimals, rec->balance, false, BitcoinUnits::separatorAlways);
default:
break;
}
break;
case TokenItemModel::HashRole:
return QString::fromStdString(rec->hash.ToString());
break;
case TokenItemModel::AddressRole:
return rec->contractAddress;
break;
case TokenItemModel::NameRole:
return rec->tokenName;
break;
case TokenItemModel::SymbolRole:
return rec->tokenSymbol;
break;
case TokenItemModel::DecimalsRole:
return rec->decimals;
break;
case TokenItemModel::SenderRole:
return rec->senderAddress;
break;
case TokenItemModel::BalanceRole:
return BitcoinUnits::formatToken(rec->decimals, rec->balance, false, BitcoinUnits::separatorAlways);
break;
case TokenItemModel::RawBalanceRole:
return QString::fromStdString(rec->balance.str());
break;
default:
break;
}
return QVariant();
}
void TokenItemModel::updateToken(const QString &hash, int status, bool showToken)
{
// Find token in wallet
uint256 updated;
updated.SetHex(hash.toStdString());
interfaces::TokenInfo token =walletModel->wallet().getToken(updated);
showToken &= token.hash == updated;
TokenItemEntry tokenEntry;
if(showToken)
{
tokenEntry = TokenItemEntry(token);
updateBalance(tokenEntry);
}
else
{
tokenEntry.hash = updated;
}
priv->updateEntry(tokenEntry, status);
}
void TokenItemModel::checkTokenBalanceChanged()
{
if(!priv)
return;
// Update token balance
for(int i = 0; i < priv->cachedTokenItem.size(); i++)
{
TokenItemEntry tokenEntry = priv->cachedTokenItem[i];
updateBalance(tokenEntry);
}
// Update token transactions
if(fLogEvents)
{
// Search for token transactions
for(int i = 0; i < priv->cachedTokenItem.size(); i++)
{
TokenItemEntry tokenEntry = priv->cachedTokenItem[i];
QString hash = QString::fromStdString(tokenEntry.hash.ToString());
QMetaObject::invokeMethod(worker, "updateTokenTx", Qt::QueuedConnection,
Q_ARG(QString, hash));
}
// Clean token transactions
if(!tokenTxCleaned)
{
tokenTxCleaned = true;
QMetaObject::invokeMethod(worker, "cleanTokenTxEntries", Qt::QueuedConnection);
}
}
}
void TokenItemModel::emitDataChanged(int idx)
{
Q_EMIT dataChanged(index(idx, 0, QModelIndex()), index(idx, columns.length()-1, QModelIndex()));
}
struct TokenNotification
{
public:
TokenNotification() {}
TokenNotification(uint256 _hash, ChangeType _status, bool _showToken):
hash(_hash), status(_status), showToken(_showToken) {}
void invoke(QObject *tim)
{
QString strHash = QString::fromStdString(hash.GetHex());
qDebug() << "NotifyTokenChanged: " + strHash + " status= " + QString::number(status);
QMetaObject::invokeMethod(tim, "updateToken", Qt::QueuedConnection,
Q_ARG(QString, strHash),
Q_ARG(int, status),
Q_ARG(bool, showToken));
}
private:
uint256 hash;
ChangeType status;
bool showToken;
};
static void NotifyTokenChanged(TokenItemModel *tim, const uint256 &hash, ChangeType status)
{
TokenNotification notification(hash, status, true);
notification.invoke(tim);
}
void TokenItemModel::subscribeToCoreSignals()
{
// Connect signals to wallet
m_handler_token_changed = walletModel->wallet().handleTokenChanged(boost::bind(NotifyTokenChanged, this, _1, _2));
}
void TokenItemModel::unsubscribeFromCoreSignals()
{
// Disconnect signals from wallet
m_handler_token_changed->disconnect();
}
void TokenItemModel::balanceChanged(QString hash, QString balance)
{
int index = priv->updateBalance(hash, balance);
if(index > -1)
{
emitDataChanged(index);
}
}
void TokenItemModel::updateBalance(const TokenItemEntry &entry)
{
QString hash = QString::fromStdString(entry.hash.ToString());
QMetaObject::invokeMethod(worker, "updateBalance", Qt::QueuedConnection,
Q_ARG(QString, hash), Q_ARG(QString, entry.contractAddress), Q_ARG(QString, entry.senderAddress));
}
| 29.516068 | 129 | 0.58012 | [
"vector",
"model"
] |
f680989433eb2203ac23807a25b22a6348c0bcbe | 9,164 | cpp | C++ | Components/Overlay/src/OgreOverlayElementCommands.cpp | resttime/ogre-next | 7435e60bd6df422d2fb4c742a493c3f37ef9a7a9 | [
"MIT"
] | 701 | 2019-09-08T15:56:41.000Z | 2022-03-31T05:51:26.000Z | Components/Overlay/src/OgreOverlayElementCommands.cpp | resttime/ogre-next | 7435e60bd6df422d2fb4c742a493c3f37ef9a7a9 | [
"MIT"
] | 204 | 2019-09-01T23:02:32.000Z | 2022-03-28T14:58:39.000Z | Components/Overlay/src/OgreOverlayElementCommands.cpp | resttime/ogre-next | 7435e60bd6df422d2fb4c742a493c3f37ef9a7a9 | [
"MIT"
] | 188 | 2019-09-05T05:14:46.000Z | 2022-03-22T21:51:39.000Z | /*
-----------------------------------------------------------------------------
This source file is part of OGRE-Next
(Object-oriented Graphics Rendering Engine)
For the latest info, see http://www.ogre3d.org/
Copyright (c) 2000-2014 Torus Knot Software Ltd
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
-----------------------------------------------------------------------------
*/
#include "OgreOverlayElementCommands.h"
#include "OgreOverlayElement.h"
#include "OgreStringConverter.h"
#include "OgreLogManager.h"
namespace Ogre {
namespace v1 {
namespace OverlayElementCommands {
//-----------------------------------------------------------------------
String CmdLeft::doGet(const void* target) const
{
return StringConverter::toString(
static_cast<const OverlayElement*>(target)->getLeft() );
}
void CmdLeft::doSet(void* target, const String& val)
{
Real r = StringConverter::parseReal(val);
static_cast<OverlayElement*>(target)->setLeft(r);
}
//-----------------------------------------------------------------------
String CmdTop::doGet(const void* target) const
{
return StringConverter::toString(
static_cast<const OverlayElement*>(target)->getTop() );
}
void CmdTop::doSet(void* target, const String& val)
{
Real r = StringConverter::parseReal(val);
static_cast<OverlayElement*>(target)->setTop(r);
}
//-----------------------------------------------------------------------
String CmdWidth::doGet(const void* target) const
{
return StringConverter::toString(
static_cast<const OverlayElement*>(target)->getWidth() );
}
void CmdWidth::doSet(void* target, const String& val)
{
Real r = StringConverter::parseReal(val);
static_cast<OverlayElement*>(target)->setWidth(r);
}
//-----------------------------------------------------------------------
String CmdHeight::doGet(const void* target) const
{
return StringConverter::toString(
static_cast<const OverlayElement*>(target)->getHeight() );
}
void CmdHeight::doSet(void* target, const String& val)
{
Real r = StringConverter::parseReal(val);
static_cast<OverlayElement*>(target)->setHeight(r);
}
//-----------------------------------------------------------------------
String CmdMaterial::doGet(const void* target) const
{
return static_cast<const OverlayElement*>(target)->getMaterialName();
}
void CmdMaterial::doSet(void* target, const String& val)
{
if (val != "")
{
static_cast<OverlayElement*>(target)->setMaterialName(val);
}
}
//-----------------------------------------------------------------------
//-----------------------------------------------------------------------
String CmdCaption::doGet(const void* target) const
{
return static_cast<const OverlayElement*>(target)->getCaption();
}
void CmdCaption::doSet(void* target, const String& val)
{
static_cast<OverlayElement*>(target)->setCaption(val);
}
//-----------------------------------------------------------------------
//-----------------------------------------------------------------------
//-----------------------------------------------------------------------
String CmdMetricsMode::doGet(const void* target) const
{
GuiMetricsMode gmm =
static_cast<const OverlayElement*>(target)->getMetricsMode();
switch (gmm)
{
case GMM_PIXELS :
return "pixels";
case GMM_RELATIVE_ASPECT_ADJUSTED :
return "relative_aspect_adjusted";
default :
return "relative";
}
}
void CmdMetricsMode::doSet(void* target, const String& val)
{
if (val == "pixels")
{
static_cast<OverlayElement*>(target)->setMetricsMode(GMM_PIXELS);
}
else if (val == "relative_aspect_adjusted")
{
static_cast<OverlayElement*>(target)->setMetricsMode(GMM_RELATIVE_ASPECT_ADJUSTED);
}
else
{
static_cast<OverlayElement*>(target)->setMetricsMode(GMM_RELATIVE);
}
}
//-----------------------------------------------------------------------
//-----------------------------------------------------------------------
//-----------------------------------------------------------------------
String CmdHorizontalAlign::doGet(const void* target) const
{
GuiHorizontalAlignment gha =
static_cast<const OverlayElement*>(target)->getHorizontalAlignment();
switch(gha)
{
case GHA_LEFT:
return "left";
case GHA_RIGHT:
return "right";
case GHA_CENTER:
return "center";
}
// To keep compiler happy
return "center";
}
void CmdHorizontalAlign::doSet(void* target, const String& val)
{
if (val == "left")
{
static_cast<OverlayElement*>(target)->setHorizontalAlignment(GHA_LEFT);
}
else if (val == "right")
{
static_cast<OverlayElement*>(target)->setHorizontalAlignment(GHA_RIGHT);
}
else
{
static_cast<OverlayElement*>(target)->setHorizontalAlignment(GHA_CENTER);
}
}
//-----------------------------------------------------------------------
//-----------------------------------------------------------------------
//-----------------------------------------------------------------------
String CmdVerticalAlign::doGet(const void* target) const
{
GuiVerticalAlignment gva =
static_cast<const OverlayElement*>(target)->getVerticalAlignment();
switch(gva)
{
case GVA_TOP:
return "top";
case GVA_BOTTOM:
return "bottom";
case GVA_CENTER:
return "center";
}
// To keep compiler happy
return "center";
}
void CmdVerticalAlign::doSet(void* target, const String& val)
{
if (val == "top")
{
static_cast<OverlayElement*>(target)->setVerticalAlignment(GVA_TOP);
}
else if (val == "bottom")
{
static_cast<OverlayElement*>(target)->setVerticalAlignment(GVA_BOTTOM);
}
else
{
static_cast<OverlayElement*>(target)->setVerticalAlignment(GVA_CENTER);
}
}
//-----------------------------------------------------------------------
//-----------------------------------------------------------------------
//-----------------------------------------------------------------------
//-----------------------------------------------------------------------
String CmdVisible::doGet(const void* target) const
{
return StringConverter::toString(static_cast<const OverlayElement*>(target)->isVisible());
}
void CmdVisible::doSet(void* target, const String& val)
{
if (val == "true")
{
static_cast<OverlayElement*>(target)->show();
}
else if (val == "false")
{
static_cast<OverlayElement*>(target)->hide();
}
}
//-----------------------------------------------------------------------
}
}
}
| 39.670996 | 102 | 0.454168 | [
"object"
] |
f689a4a750800c74f9e70e6ffa01c51a37f086e0 | 2,467 | cpp | C++ | WalEngine/src/editor/LayerMaterialPanel.cpp | QuincyKing/WalEngine | 8f30c4ad200615641777f17c5b0b1dbd15ba6e03 | [
"MIT"
] | 1 | 2018-11-09T09:56:52.000Z | 2018-11-09T09:56:52.000Z | WalEngine/src/editor/LayerMaterialPanel.cpp | QuincyKing/WalnutEngine | 8f30c4ad200615641777f17c5b0b1dbd15ba6e03 | [
"MIT"
] | 1 | 2019-04-13T15:46:33.000Z | 2019-04-13T15:46:33.000Z | WalEngine/src/editor/LayerMaterialPanel.cpp | QuincyKing/WalnutEngine | 8f30c4ad200615641777f17c5b0b1dbd15ba6e03 | [
"MIT"
] | null | null | null | #include "LayerMaterialPanel.h"
#include <limits.h>
#include "../render/RenderEngine.h"
//init variances
float LayerMaterialPanel::x = 1.0f;
float LayerMaterialPanel::y = 1.0f;
float LayerMaterialPanel::metal = 0.0f;
float LayerMaterialPanel::delectricIOR = 1.68f;
float LayerMaterialPanel::coatSmoothness = 1.0f;
float LayerMaterialPanel::coatIOR = 2.5f;
float LayerMaterialPanel::coatThickness = 0.25f;
ImVec4 LayerMaterialPanel::coatExtinction = ImColor(0.0f, 0.0f, 0.0f);
void LayerMaterialPanel::show()
{
const float drag_speed = 0.05f;
const float one = 1.0f;
const float zero = 0.0f;
const float iorh = 3.0f;
const float tile = 30;
ImGui::Text("Base Layer:");
ImGui::Text("Tiling:");
ImGui::DragScalar("x", ImGuiDataType_Float, &x, drag_speed, &one, &tile);
ImGui::DragScalar("y", ImGuiDataType_Float, &y, drag_speed, &one, &tile);
ImGui::DragScalar("metallic", ImGuiDataType_Float, &metal, drag_speed, &zero, &one);
ImGui::DragScalar("DelectricIOR", ImGuiDataType_Float, &delectricIOR, drag_speed, &one, &iorh);
ImGui::Text("Coat Layer:");
ImGui::DragScalar("Coat Smoothness", ImGuiDataType_Float, &coatSmoothness, drag_speed, &zero, &one);
ImGui::DragScalar("Coat IOR", ImGuiDataType_Float, &coatIOR, drag_speed, &one, &iorh);
ImGui::DragScalar("Coat Thickness", ImGuiDataType_Float, &coatThickness, drag_speed, &zero, &iorh);
static bool alpha_preview = false;
static bool alpha_half_preview = false;
static bool drag_and_drop = true;
static bool options_menu = true;
static bool hdr = false;
int misc_flags = (hdr ? ImGuiColorEditFlags_HDR : 0) | (drag_and_drop ? 0 : ImGuiColorEditFlags_NoDragDrop) | (alpha_half_preview ? ImGuiColorEditFlags_AlphaPreviewHalf : (alpha_preview ? ImGuiColorEditFlags_AlphaPreview : 0)) | (options_menu ? 0 : ImGuiColorEditFlags_NoOptions);
ImGui::ColorEdit3("Coat Extinction", (float*)&coatExtinction, ImGuiColorEditFlags_Float | misc_flags);
update_variances();
}
void LayerMaterialPanel::update_variances()
{
RenderEngine::Data.set_vec3("ST", glm::vec3(x, y, 0.0));
RenderEngine::Data.set_float("metallic", metal);
RenderEngine::Data.set_float("DelectricIOR", delectricIOR);
RenderEngine::Data.set_float("CoatPerceptualRoughness", coatSmoothness);
RenderEngine::Data.set_float("CoatIOR", coatIOR);
RenderEngine::Data.set_float("CoatThickness", coatThickness);
RenderEngine::Data.set_vec3("CoatExtinction", glm::vec3(coatExtinction.x, coatExtinction.y, coatExtinction.z));
} | 45.685185 | 281 | 0.755979 | [
"render"
] |
f68cbb8fbc9ad254e32e0b2dc95a962fd16faed0 | 26,417 | cpp | C++ | game/graphics/opengl_renderer/background/Tie3.cpp | Hat-Kid/jak-project | 0e2320ca9584118316313e41e646b179a1083feb | [
"ISC"
] | null | null | null | game/graphics/opengl_renderer/background/Tie3.cpp | Hat-Kid/jak-project | 0e2320ca9584118316313e41e646b179a1083feb | [
"ISC"
] | null | null | null | game/graphics/opengl_renderer/background/Tie3.cpp | Hat-Kid/jak-project | 0e2320ca9584118316313e41e646b179a1083feb | [
"ISC"
] | null | null | null | #include "Tie3.h"
#include "third-party/imgui/imgui.h"
Tie3::Tie3(const std::string& name, BucketId my_id, int level_id)
: BucketRenderer(name, my_id), m_level_id(level_id) {
// regardless of how many we use some fixed max
// we won't actually interp or upload to gpu the unused ones, but we need a fixed maximum so
// indexing works properly.
m_color_result.resize(TIME_OF_DAY_COLOR_COUNT);
}
Tie3::~Tie3() {
discard_tree_cache();
}
void Tie3::update_load(const LevelData* loader_data) {
const tfrag3::Level* lev_data = loader_data->level.get();
m_wind_vectors.clear();
// We changed level!
discard_tree_cache();
for (int geo = 0; geo < 4; ++geo) {
m_trees[geo].resize(lev_data->tie_trees[geo].size());
}
size_t vis_temp_len = 0;
size_t max_draws = 0;
size_t max_num_grps = 0;
u16 max_wind_idx = 0;
size_t time_of_day_count = 0;
size_t max_inds = 0;
for (u32 l_geo = 0; l_geo < tfrag3::TIE_GEOS; l_geo++) {
for (u32 l_tree = 0; l_tree < lev_data->tie_trees[l_geo].size(); l_tree++) {
size_t wind_idx_buffer_len = 0;
size_t num_grps = 0;
const auto& tree = lev_data->tie_trees[l_geo][l_tree];
max_draws = std::max(tree.static_draws.size(), max_draws);
for (auto& draw : tree.static_draws) {
num_grps += draw.vis_groups.size();
}
max_num_grps = std::max(max_num_grps, num_grps);
for (auto& draw : tree.instanced_wind_draws) {
wind_idx_buffer_len += draw.vertex_index_stream.size();
}
for (auto& inst : tree.wind_instance_info) {
max_wind_idx = std::max(max_wind_idx, inst.wind_idx);
}
time_of_day_count = std::max(tree.colors.size(), time_of_day_count);
max_inds = std::max(tree.unpacked.indices.size(), max_inds);
u32 verts = tree.packed_vertices.color_indices.size();
auto& lod_tree = m_trees.at(l_geo);
glGenVertexArrays(1, &lod_tree[l_tree].vao);
glBindVertexArray(lod_tree[l_tree].vao);
lod_tree[l_tree].vertex_buffer = loader_data->tie_data[l_geo][l_tree].vertex_buffer;
lod_tree[l_tree].vert_count = verts;
lod_tree[l_tree].draws = &tree.static_draws;
lod_tree[l_tree].colors = &tree.colors;
lod_tree[l_tree].vis = &tree.bvh;
lod_tree[l_tree].index_data = tree.unpacked.indices.data();
lod_tree[l_tree].instance_info = &tree.wind_instance_info;
lod_tree[l_tree].wind_draws = &tree.instanced_wind_draws;
vis_temp_len = std::max(vis_temp_len, tree.bvh.vis_nodes.size());
lod_tree[l_tree].tod_cache = swizzle_time_of_day(tree.colors);
glBindBuffer(GL_ARRAY_BUFFER, lod_tree[l_tree].vertex_buffer);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
glVertexAttribPointer(0, // location 0 in the shader
3, // 3 values per vert
GL_FLOAT, // floats
GL_FALSE, // normalized
sizeof(tfrag3::PreloadedVertex), // stride
(void*)offsetof(tfrag3::PreloadedVertex, x) // offset (0)
);
glVertexAttribPointer(1, // location 1 in the shader
3, // 3 values per vert
GL_FLOAT, // floats
GL_FALSE, // normalized
sizeof(tfrag3::PreloadedVertex), // stride
(void*)offsetof(tfrag3::PreloadedVertex, s) // offset (0)
);
glVertexAttribIPointer(2, // location 2 in the shader
1, // 1 values per vert
GL_UNSIGNED_SHORT, // u16
sizeof(tfrag3::PreloadedVertex), // stride
(void*)offsetof(tfrag3::PreloadedVertex, color_index) // offset (0)
);
glGenBuffers(1, &lod_tree[l_tree].single_draw_index_buffer);
glGenBuffers(1, &lod_tree[l_tree].index_buffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, lod_tree[l_tree].index_buffer);
// todo: move to loader, this will probably be quite slow.
glBufferData(GL_ELEMENT_ARRAY_BUFFER, tree.unpacked.indices.size() * sizeof(u32),
tree.unpacked.indices.data(), GL_STATIC_DRAW);
if (wind_idx_buffer_len > 0) {
lod_tree[l_tree].wind_matrix_cache.resize(tree.wind_instance_info.size());
lod_tree[l_tree].has_wind = true;
lod_tree[l_tree].wind_vertex_index_buffer =
loader_data->tie_data[l_geo][l_tree].wind_indices;
u32 off = 0;
for (auto& draw : tree.instanced_wind_draws) {
lod_tree[l_tree].wind_vertex_index_offsets.push_back(off);
off += draw.vertex_index_stream.size();
}
}
glActiveTexture(GL_TEXTURE10);
glGenTextures(1, &lod_tree[l_tree].time_of_day_texture);
glBindTexture(GL_TEXTURE_1D, lod_tree[l_tree].time_of_day_texture);
glTexImage1D(GL_TEXTURE_1D, 0, GL_RGBA, TIME_OF_DAY_COLOR_COUNT, 0, GL_RGBA,
GL_UNSIGNED_INT_8_8_8_8, nullptr);
glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glBindVertexArray(0);
}
}
m_cache.vis_temp.resize(vis_temp_len);
m_cache.multidraw_offset_per_stripdraw.resize(max_draws);
m_cache.multidraw_count_buffer.resize(max_num_grps);
m_cache.multidraw_index_offset_buffer.resize(max_num_grps);
m_wind_vectors.resize(4 * max_wind_idx + 4); // 4x u32's per wind.
m_cache.draw_idx_temp.resize(max_draws);
m_cache.index_temp.resize(max_inds);
ASSERT(time_of_day_count <= TIME_OF_DAY_COLOR_COUNT);
}
/*!
* Set up all OpenGL and temporary buffers for a given level name.
* The level name should be the 3 character short name.
*/
bool Tie3::setup_for_level(const std::string& level, SharedRenderState* render_state) {
// make sure we have the level data.
Timer tfrag3_setup_timer;
auto lev_data = render_state->loader->get_tfrag3_level(level);
if (!lev_data || (m_has_level && lev_data->load_id != m_load_id)) {
m_has_level = false;
m_textures = nullptr;
m_level_name = "";
discard_tree_cache();
return false;
}
m_textures = &lev_data->textures;
m_load_id = lev_data->load_id;
if (m_level_name != level) {
update_load(lev_data);
m_has_level = true;
m_level_name = level;
} else {
m_has_level = true;
}
if (tfrag3_setup_timer.getMs() > 5) {
fmt::print("TIE setup: {:.1f}ms\n", tfrag3_setup_timer.getMs());
}
return m_has_level;
}
void vector_min_in_place(math::Vector4f& v, float val) {
for (int i = 0; i < 4; i++) {
if (v[i] > val) {
v[i] = val;
}
}
}
math::Vector4f vector_max(const math::Vector4f& v, float val) {
math::Vector4f result;
for (int i = 0; i < 4; i++) {
result[i] = std::max(val, v[i]);
}
return result;
}
void do_wind_math(u16 wind_idx,
float* wind_vector_data,
const Tie3::WindWork& wind_work,
float stiffness,
std::array<math::Vector4f, 4>& mat) {
float* my_vector = wind_vector_data + (4 * wind_idx);
const auto& work_vector = wind_work.wind_array[(wind_work.wind_time + wind_idx) & 63];
constexpr float cx = 0.5;
constexpr float cy = 100.0;
constexpr float cz = 0.0166;
constexpr float cw = -1.0;
// ld s1, 8(s5) # load wind vector 1
// pextlw s1, r0, s1 # convert to 2x 64 bits, by shifting left
// qmtc2.i vf18, s1 # put in vf
float vf18_x = my_vector[2];
float vf18_z = my_vector[3];
// ld s2, 0(s5) # load wind vector 0
// pextlw s3, r0, s2 # convert to 2x 64 bits, by shifting left
// qmtc2.i vf17, s3 # put in vf
float vf17_x = my_vector[0];
float vf17_z = my_vector[1];
// lqc2 vf16, 12(s3) # load wind vector
math::Vector4f vf16 = work_vector;
// vmula.xyzw acc, vf16, vf1 # acc = vf16
// vmsubax.xyzw acc, vf18, vf19 # acc = vf16 - vf18 * wind_const.x
// vmsuby.xyzw vf16, vf17, vf19
//# vf16 -= (vf18 * wind_const.x) + (vf17 * wind_const.y)
vf16.x() -= cx * vf18_x + cy * vf17_x;
vf16.z() -= cx * vf18_z + cy * vf17_z;
// vmulaz.xyzw acc, vf16, vf19 # acc = vf16 * wind_const.z
// vmadd.xyzw vf18, vf1, vf18
//# vf18 += vf16 * wind_const.z
math::Vector4f vf18(vf18_x, 0.f, vf18_z, 0.f);
vf18 += vf16 * cz;
// vmulaz.xyzw acc, vf18, vf19 # acc = vf18 * wind_const.z
// vmadd.xyzw vf17, vf17, vf1
//# vf17 += vf18 * wind_const.z
math::Vector4f vf17(vf17_x, 0.f, vf17_z, 0.f);
vf17 += vf18 * cz;
// vitof12.xyzw vf11, vf11 # normal convert
// vitof12.xyzw vf12, vf12 # normal convert
// vminiw.xyzw vf17, vf17, vf0
vector_min_in_place(vf17, 1.f);
// qmfc2.i s3, vf18
// ppacw s3, r0, s3
// vmaxw.xyzw vf27, vf17, vf19
auto vf27 = vector_max(vf17, cw);
// vmulw.xyzw vf27, vf27, vf15
vf27 *= stiffness;
// vmulax.yw acc, vf0, vf0
// vmulay.xz acc, vf27, vf10
// vmadd.xyzw vf10, vf1, vf10
mat[0].x() += vf27.x() * mat[0].y();
mat[0].z() += vf27.z() * mat[0].y();
// qmfc2.i s2, vf27
if (!wind_work.paused) {
my_vector[0] = vf27.x();
my_vector[1] = vf27.z();
my_vector[2] = vf18.x();
my_vector[3] = vf18.z();
}
// vmulax.yw acc, vf0, vf0
// vmulay.xz acc, vf27, vf11
// vmadd.xyzw vf11, vf1, vf11
mat[1].x() += vf27.x() * mat[1].y();
mat[1].z() += vf27.z() * mat[1].y();
// ppacw s2, r0, s2
// vmulax.yw acc, vf0, vf0
// vmulay.xz acc, vf27, vf12
// vmadd.xyzw vf12, vf1, vf12
mat[2].x() += vf27.x() * mat[2].y();
mat[2].z() += vf27.z() * mat[2].y();
//
// if not paused
// sd s3, 8(s5)
// sd s2, 0(s5)
}
void Tie3::discard_tree_cache() {
for (int geo = 0; geo < 4; ++geo) {
for (auto& tree : m_trees[geo]) {
glBindTexture(GL_TEXTURE_1D, tree.time_of_day_texture);
glDeleteTextures(1, &tree.time_of_day_texture);
glDeleteBuffers(1, &tree.index_buffer);
glDeleteBuffers(1, &tree.single_draw_index_buffer);
glDeleteVertexArrays(1, &tree.vao);
}
m_trees[geo].clear();
}
}
void Tie3::render(DmaFollower& dma, SharedRenderState* render_state, ScopedProfilerNode& prof) {
if (!m_enabled) {
while (dma.current_tag_offset() != render_state->next_bucket) {
dma.read_and_advance();
}
return;
}
if (m_override_level && m_pending_user_level) {
m_has_level = setup_for_level(*m_pending_user_level, render_state);
m_pending_user_level = {};
}
auto data0 = dma.read_and_advance();
ASSERT(data0.vif1() == 0);
ASSERT(data0.vif0() == 0);
ASSERT(data0.size_bytes == 0);
if (dma.current_tag().kind == DmaTag::Kind::CALL) {
// renderer didn't run, let's just get out of here.
for (int i = 0; i < 4; i++) {
dma.read_and_advance();
}
ASSERT(dma.current_tag_offset() == render_state->next_bucket);
return;
}
auto gs_test = dma.read_and_advance();
ASSERT(gs_test.size_bytes == 32);
auto tie_consts = dma.read_and_advance();
ASSERT(tie_consts.size_bytes == 9 * 16);
auto mscalf = dma.read_and_advance();
ASSERT(mscalf.size_bytes == 0);
auto row = dma.read_and_advance();
ASSERT(row.size_bytes == 32);
auto next = dma.read_and_advance();
ASSERT(next.size_bytes == 0);
auto pc_port_data = dma.read_and_advance();
ASSERT(pc_port_data.size_bytes == sizeof(TfragPcPortData));
memcpy(&m_pc_port_data, pc_port_data.data, sizeof(TfragPcPortData));
m_pc_port_data.level_name[11] = '\0';
auto wind_data = dma.read_and_advance();
ASSERT(wind_data.size_bytes == sizeof(WindWork));
memcpy(&m_wind_data, wind_data.data, sizeof(WindWork));
while (dma.current_tag_offset() != render_state->next_bucket) {
dma.read_and_advance();
}
TfragRenderSettings settings;
settings.hvdf_offset = m_pc_port_data.hvdf_off;
settings.fog = m_pc_port_data.fog;
memcpy(settings.math_camera.data(), m_pc_port_data.camera[0].data(), 64);
settings.tree_idx = 0;
if (render_state->occlusion_vis[m_level_id].valid) {
settings.occlusion_culling = render_state->occlusion_vis[m_level_id].data;
}
update_render_state_from_pc_settings(render_state, m_pc_port_data);
for (int i = 0; i < 4; i++) {
settings.planes[i] = m_pc_port_data.planes[i];
}
if (false) {
// for (int i = 0; i < 8; i++) {
// settings.time_of_day_weights[i] = m_time_of_days[i];
// }
} else {
for (int i = 0; i < 8; i++) {
settings.time_of_day_weights[i] =
2 * (0xff & m_pc_port_data.itimes[i / 2].data()[2 * (i % 2)]) / 127.f;
}
}
if (!m_override_level) {
m_has_level = setup_for_level(m_pc_port_data.level_name, render_state);
}
render_all_trees(lod(), settings, render_state, prof);
}
void Tie3::render_all_trees(int geom,
const TfragRenderSettings& settings,
SharedRenderState* render_state,
ScopedProfilerNode& prof) {
Timer all_tree_timer;
if (m_override_level && m_pending_user_level) {
m_has_level = setup_for_level(*m_pending_user_level, render_state);
m_pending_user_level = {};
}
for (u32 i = 0; i < m_trees[geom].size(); i++) {
render_tree(i, geom, settings, render_state, prof);
}
m_all_tree_time.add(all_tree_timer.getSeconds());
}
void Tie3::render_tree_wind(int idx,
int geom,
const TfragRenderSettings& settings,
SharedRenderState* render_state,
ScopedProfilerNode& prof) {
auto& tree = m_trees.at(geom).at(idx);
if (tree.wind_draws->empty()) {
return;
}
// note: this isn't the most efficient because we might compute wind matrices for invisible
// instances. TODO: add vis ids to the instance info to avoid this
memset(tree.wind_matrix_cache.data(), 0, sizeof(float) * 16 * tree.wind_matrix_cache.size());
auto& cam_bad = settings.math_camera;
std::array<math::Vector4f, 4> cam;
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
cam[i][j] = cam_bad.data()[i * 4 + j];
}
}
for (size_t inst_id = 0; inst_id < tree.instance_info->size(); inst_id++) {
auto& info = tree.instance_info->operator[](inst_id);
auto& out = tree.wind_matrix_cache[inst_id];
// auto& mat = tree.instance_info->operator[](inst_id).matrix;
auto mat = info.matrix;
ASSERT(info.wind_idx * 4 <= m_wind_vectors.size());
do_wind_math(info.wind_idx, m_wind_vectors.data(), m_wind_data,
info.stiffness * m_wind_multiplier, mat);
// vmulax.xyzw acc, vf20, vf10
// vmadday.xyzw acc, vf21, vf10
// vmaddz.xyzw vf10, vf22, vf10
out[0] = cam[0] * mat[0].x() + cam[1] * mat[0].y() + cam[2] * mat[0].z();
// vmulax.xyzw acc, vf20, vf11
// vmadday.xyzw acc, vf21, vf11
// vmaddz.xyzw vf11, vf22, vf11
out[1] = cam[0] * mat[1].x() + cam[1] * mat[1].y() + cam[2] * mat[1].z();
// vmulax.xyzw acc, vf20, vf12
// vmadday.xyzw acc, vf21, vf12
// vmaddz.xyzw vf12, vf22, vf12
out[2] = cam[0] * mat[2].x() + cam[1] * mat[2].y() + cam[2] * mat[2].z();
// vmulax.xyzw acc, vf20, vf13
// vmadday.xyzw acc, vf21, vf13
// vmaddaz.xyzw acc, vf22, vf13
// vmaddw.xyzw vf13, vf23, vf0
out[3] = cam[0] * mat[3].x() + cam[1] * mat[3].y() + cam[2] * mat[3].z() + cam[3];
}
int last_texture = -1;
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, tree.wind_vertex_index_buffer);
for (size_t draw_idx = 0; draw_idx < tree.wind_draws->size(); draw_idx++) {
const auto& draw = tree.wind_draws->operator[](draw_idx);
if ((int)draw.tree_tex_id != last_texture) {
glBindTexture(GL_TEXTURE_2D, m_textures->at(draw.tree_tex_id));
last_texture = draw.tree_tex_id;
}
auto double_draw = setup_tfrag_shader(render_state, draw.mode, ShaderId::TFRAG3);
int off = 0;
for (auto& grp : draw.instance_groups) {
if (!m_debug_all_visible && !m_cache.vis_temp.at(grp.vis_idx)) {
off += grp.num;
continue; // invisible, skip.
}
glUniformMatrix4fv(
glGetUniformLocation(render_state->shaders[ShaderId::TFRAG3].id(), "camera"), 1, GL_FALSE,
tree.wind_matrix_cache.at(grp.instance_idx)[0].data());
prof.add_draw_call();
prof.add_tri(grp.num);
tree.perf.draws++;
tree.perf.wind_draws++;
glDrawElements(GL_TRIANGLE_STRIP, grp.num, GL_UNSIGNED_INT,
(void*)((off + tree.wind_vertex_index_offsets.at(draw_idx)) * sizeof(u32)));
off += grp.num;
switch (double_draw.kind) {
case DoubleDrawKind::NONE:
break;
case DoubleDrawKind::AFAIL_NO_DEPTH_WRITE:
tree.perf.draws++;
tree.perf.wind_draws++;
prof.add_draw_call();
prof.add_tri(grp.num);
glUniform1f(
glGetUniformLocation(render_state->shaders[ShaderId::TFRAG3].id(), "alpha_min"),
-10.f);
glUniform1f(
glGetUniformLocation(render_state->shaders[ShaderId::TFRAG3].id(), "alpha_max"),
double_draw.aref_second);
glDepthMask(GL_FALSE);
glDrawElements(GL_TRIANGLE_STRIP, draw.vertex_index_stream.size(), GL_UNSIGNED_INT,
(void*)0);
break;
default:
ASSERT(false);
}
}
}
}
void Tie3::render_tree(int idx,
int geom,
const TfragRenderSettings& settings,
SharedRenderState* render_state,
ScopedProfilerNode& prof) {
// reset perf
Timer tree_timer;
auto& tree = m_trees.at(geom).at(idx);
tree.perf.draws = 0;
tree.perf.wind_draws = 0;
// don't render if we haven't loaded
if (!m_has_level) {
return;
}
// update time of day
if (m_color_result.size() < tree.colors->size()) {
m_color_result.resize(tree.colors->size());
}
Timer interp_timer;
if (m_use_fast_time_of_day) {
interp_time_of_day_fast(settings.time_of_day_weights, tree.tod_cache, m_color_result.data());
} else {
interp_time_of_day_slow(settings.time_of_day_weights, *tree.colors, m_color_result.data());
}
tree.perf.tod_time.add(interp_timer.getSeconds());
Timer setup_timer;
glActiveTexture(GL_TEXTURE10);
glBindTexture(GL_TEXTURE_1D, tree.time_of_day_texture);
glTexSubImage1D(GL_TEXTURE_1D, 0, 0, tree.colors->size(), GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV,
m_color_result.data());
// setup OpenGL shader
first_tfrag_draw_setup(settings, render_state, ShaderId::TFRAG3);
glBindVertexArray(tree.vao);
glBindBuffer(GL_ARRAY_BUFFER, tree.vertex_buffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,
render_state->no_multidraw ? tree.single_draw_index_buffer : tree.index_buffer);
glActiveTexture(GL_TEXTURE0);
glEnable(GL_PRIMITIVE_RESTART);
glPrimitiveRestartIndex(UINT32_MAX);
tree.perf.tod_time.add(setup_timer.getSeconds());
int last_texture = -1;
if (!m_debug_all_visible) {
// need culling data
Timer cull_timer;
cull_check_all_slow(settings.planes, tree.vis->vis_nodes, settings.occlusion_culling,
m_cache.vis_temp.data());
tree.perf.cull_time.add(cull_timer.getSeconds());
} else {
// no culling.
tree.perf.cull_time.add(0);
}
u32 num_tris;
if (render_state->no_multidraw) {
Timer index_timer;
u32 idx_buffer_size;
if (m_debug_all_visible) {
idx_buffer_size =
make_all_visible_index_list(m_cache.draw_idx_temp.data(), m_cache.index_temp.data(),
*tree.draws, tree.index_data, &num_tris);
} else {
idx_buffer_size = make_index_list_from_vis_string(
m_cache.draw_idx_temp.data(), m_cache.index_temp.data(), *tree.draws, m_cache.vis_temp,
tree.index_data, &num_tris);
}
glBufferData(GL_ELEMENT_ARRAY_BUFFER, idx_buffer_size * sizeof(u32), m_cache.index_temp.data(),
GL_STREAM_DRAW);
tree.perf.index_time.add(index_timer.getSeconds());
} else {
if (m_debug_all_visible) {
Timer index_timer;
num_tris = make_all_visible_multidraws(
m_cache.multidraw_offset_per_stripdraw.data(), m_cache.multidraw_count_buffer.data(),
m_cache.multidraw_index_offset_buffer.data(), *tree.draws);
tree.perf.index_time.add(index_timer.getSeconds());
} else {
Timer index_timer;
num_tris = make_multidraws_from_vis_string(
m_cache.multidraw_offset_per_stripdraw.data(), m_cache.multidraw_count_buffer.data(),
m_cache.multidraw_index_offset_buffer.data(), *tree.draws, m_cache.vis_temp);
tree.perf.index_time.add(index_timer.getSeconds());
}
}
Timer draw_timer;
prof.add_tri(num_tris);
for (size_t draw_idx = 0; draw_idx < tree.draws->size(); draw_idx++) {
const auto& draw = tree.draws->operator[](draw_idx);
const auto& multidraw_indices = m_cache.multidraw_offset_per_stripdraw[draw_idx];
const auto& singledraw_indices = m_cache.draw_idx_temp[draw_idx];
if (render_state->no_multidraw) {
if (singledraw_indices.second == 0) {
continue;
}
} else {
if (multidraw_indices.second == 0) {
continue;
}
}
if ((int)draw.tree_tex_id != last_texture) {
glBindTexture(GL_TEXTURE_2D, m_textures->at(draw.tree_tex_id));
last_texture = draw.tree_tex_id;
}
auto double_draw = setup_tfrag_shader(render_state, draw.mode, ShaderId::TFRAG3);
prof.add_draw_call();
tree.perf.draws++;
if (render_state->no_multidraw) {
glDrawElements(GL_TRIANGLE_STRIP, singledraw_indices.second, GL_UNSIGNED_INT,
(void*)(singledraw_indices.first * sizeof(u32)));
} else {
glMultiDrawElements(GL_TRIANGLE_STRIP,
&m_cache.multidraw_count_buffer[multidraw_indices.first], GL_UNSIGNED_INT,
&m_cache.multidraw_index_offset_buffer[multidraw_indices.first],
multidraw_indices.second);
}
switch (double_draw.kind) {
case DoubleDrawKind::NONE:
break;
case DoubleDrawKind::AFAIL_NO_DEPTH_WRITE:
tree.perf.draws++;
prof.add_draw_call();
glUniform1f(glGetUniformLocation(render_state->shaders[ShaderId::TFRAG3].id(), "alpha_min"),
-10.f);
glUniform1f(glGetUniformLocation(render_state->shaders[ShaderId::TFRAG3].id(), "alpha_max"),
double_draw.aref_second);
glDepthMask(GL_FALSE);
if (render_state->no_multidraw) {
glDrawElements(GL_TRIANGLE_STRIP, singledraw_indices.second, GL_UNSIGNED_INT,
(void*)(singledraw_indices.first * sizeof(u32)));
} else {
glMultiDrawElements(
GL_TRIANGLE_STRIP, &m_cache.multidraw_count_buffer[multidraw_indices.first],
GL_UNSIGNED_INT, &m_cache.multidraw_index_offset_buffer[multidraw_indices.first],
multidraw_indices.second);
}
break;
default:
ASSERT(false);
}
if (m_debug_wireframe && !render_state->no_multidraw) {
render_state->shaders[ShaderId::TFRAG3_NO_TEX].activate();
glUniformMatrix4fv(
glGetUniformLocation(render_state->shaders[ShaderId::TFRAG3_NO_TEX].id(), "camera"), 1,
GL_FALSE, settings.math_camera.data());
glUniform4f(
glGetUniformLocation(render_state->shaders[ShaderId::TFRAG3_NO_TEX].id(), "hvdf_offset"),
settings.hvdf_offset[0], settings.hvdf_offset[1], settings.hvdf_offset[2],
settings.hvdf_offset[3]);
glUniform1f(
glGetUniformLocation(render_state->shaders[ShaderId::TFRAG3_NO_TEX].id(), "fog_constant"),
settings.fog.x());
glDisable(GL_BLEND);
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
glMultiDrawElements(GL_TRIANGLE_STRIP,
&m_cache.multidraw_count_buffer[multidraw_indices.first], GL_UNSIGNED_INT,
&m_cache.multidraw_index_offset_buffer[multidraw_indices.first],
multidraw_indices.second);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
prof.add_draw_call();
render_state->shaders[ShaderId::TFRAG3].activate();
}
}
if (!m_hide_wind) {
auto wind_prof = prof.make_scoped_child("wind");
render_tree_wind(idx, geom, settings, render_state, wind_prof);
}
glBindVertexArray(0);
tree.perf.draw_time.add(draw_timer.getSeconds());
tree.perf.tree_time.add(tree_timer.getSeconds());
}
void Tie3::draw_debug_window() {
ImGui::InputText("Custom Level", m_user_level, sizeof(m_user_level));
if (ImGui::Button("Go!")) {
m_pending_user_level = m_user_level;
}
ImGui::Checkbox("Override level", &m_override_level);
ImGui::Checkbox("Fast ToD", &m_use_fast_time_of_day);
ImGui::Checkbox("Wireframe", &m_debug_wireframe);
ImGui::SameLine();
ImGui::Checkbox("All Visible", &m_debug_all_visible);
ImGui::Checkbox("Hide Wind", &m_hide_wind);
ImGui::SliderFloat("Wind Multiplier", &m_wind_multiplier, 0., 40.f);
ImGui::Separator();
for (u32 i = 0; i < m_trees[lod()].size(); i++) {
auto& perf = m_trees[lod()][i].perf;
ImGui::Text("Tree: %d", i);
ImGui::Text("time of days: %d", (int)m_trees[lod()][i].colors->size());
ImGui::Text("draw: %d", perf.draws);
ImGui::Text("wind draw: %d", perf.wind_draws);
ImGui::Text("total: %.2f", perf.tree_time.get());
ImGui::Text("cull: %.2f index: %.2f tod: %.2f setup: %.2f draw: %.2f",
perf.cull_time.get() * 1000.f, perf.index_time.get() * 1000.f,
perf.tod_time.get() * 1000.f, perf.setup_time.get() * 1000.f,
perf.draw_time.get() * 1000.f);
ImGui::Separator();
}
ImGui::Text("All trees: %.2f", 1000.f * m_all_tree_time.get());
}
| 36.387052 | 100 | 0.626907 | [
"render",
"vector"
] |
f68d78aa246927d3073a0e4a60a86f047c625756 | 11,391 | cc | C++ | test/syscalls/linux/udp_bind.cc | Exhorder6/gvisor | add8bca5ba53b37096bc653900cb278e11681461 | [
"Apache-2.0"
] | 1 | 2021-05-04T06:49:23.000Z | 2021-05-04T06:49:23.000Z | test/syscalls/linux/udp_bind.cc | Exhorder6/gvisor | add8bca5ba53b37096bc653900cb278e11681461 | [
"Apache-2.0"
] | 3 | 2021-10-12T21:46:14.000Z | 2022-03-31T01:53:21.000Z | test/syscalls/linux/udp_bind.cc | Exhorder6/gvisor | add8bca5ba53b37096bc653900cb278e11681461 | [
"Apache-2.0"
] | 1 | 2021-05-04T06:49:18.000Z | 2021-05-04T06:49:18.000Z | // Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <arpa/inet.h>
#include <sys/socket.h>
#include <sys/types.h>
#include "gtest/gtest.h"
#include "test/syscalls/linux/socket_test_util.h"
#include "test/util/file_descriptor.h"
#include "test/util/test_util.h"
namespace gvisor {
namespace testing {
namespace {
struct sockaddr_in_common {
sa_family_t sin_family;
in_port_t sin_port;
};
struct SendtoTestParam {
// Human readable description of test parameter.
std::string description;
// Test is broken in gVisor, skip.
bool skip_on_gvisor;
// Domain for the socket that will do the sending.
int send_domain;
// Address to bind for the socket that will do the sending.
struct sockaddr_storage send_addr;
socklen_t send_addr_len; // 0 for unbound.
// Address to connect to for the socket that will do the sending.
struct sockaddr_storage connect_addr;
socklen_t connect_addr_len; // 0 for no connection.
// Domain for the socket that will do the receiving.
int recv_domain;
// Address to bind for the socket that will do the receiving.
struct sockaddr_storage recv_addr;
socklen_t recv_addr_len;
// Address to send to.
struct sockaddr_storage sendto_addr;
socklen_t sendto_addr_len;
// Expected errno for the sendto call.
std::vector<int> sendto_errnos; // empty on success.
};
class SendtoTest : public ::testing::TestWithParam<SendtoTestParam> {
protected:
SendtoTest() {
// gUnit uses printf, so so will we.
printf("Testing with %s\n", GetParam().description.c_str());
}
};
TEST_P(SendtoTest, Sendto) {
auto param = GetParam();
SKIP_IF(param.skip_on_gvisor && IsRunningOnGvisor());
const FileDescriptor s1 =
ASSERT_NO_ERRNO_AND_VALUE(Socket(param.send_domain, SOCK_DGRAM, 0));
const FileDescriptor s2 =
ASSERT_NO_ERRNO_AND_VALUE(Socket(param.recv_domain, SOCK_DGRAM, 0));
if (param.send_addr_len > 0) {
ASSERT_THAT(
bind(s1.get(), AsSockAddr(¶m.send_addr), param.send_addr_len),
SyscallSucceeds());
}
if (param.connect_addr_len > 0) {
ASSERT_THAT(connect(s1.get(), AsSockAddr(¶m.connect_addr),
param.connect_addr_len),
SyscallSucceeds());
}
ASSERT_THAT(bind(s2.get(), AsSockAddr(¶m.recv_addr), param.recv_addr_len),
SyscallSucceeds());
struct sockaddr_storage real_recv_addr = {};
socklen_t real_recv_addr_len = param.recv_addr_len;
ASSERT_THAT(
getsockname(s2.get(), AsSockAddr(&real_recv_addr), &real_recv_addr_len),
SyscallSucceeds());
ASSERT_EQ(real_recv_addr_len, param.recv_addr_len);
int recv_port =
reinterpret_cast<sockaddr_in_common*>(&real_recv_addr)->sin_port;
struct sockaddr_storage sendto_addr = param.sendto_addr;
reinterpret_cast<sockaddr_in_common*>(&sendto_addr)->sin_port = recv_port;
char buf[20] = {};
if (!param.sendto_errnos.empty()) {
ASSERT_THAT(
RetryEINTR(sendto)(s1.get(), buf, sizeof(buf), 0,
AsSockAddr(&sendto_addr), param.sendto_addr_len),
SyscallFailsWithErrno(ElementOf(param.sendto_errnos)));
return;
}
ASSERT_THAT(
RetryEINTR(sendto)(s1.get(), buf, sizeof(buf), 0,
AsSockAddr(&sendto_addr), param.sendto_addr_len),
SyscallSucceedsWithValue(sizeof(buf)));
struct sockaddr_storage got_addr = {};
socklen_t got_addr_len = sizeof(sockaddr_storage);
ASSERT_THAT(RetryEINTR(recvfrom)(s2.get(), buf, sizeof(buf), 0,
AsSockAddr(&got_addr), &got_addr_len),
SyscallSucceedsWithValue(sizeof(buf)));
ASSERT_GT(got_addr_len, sizeof(sockaddr_in_common));
int got_port = reinterpret_cast<sockaddr_in_common*>(&got_addr)->sin_port;
struct sockaddr_storage sender_addr = {};
socklen_t sender_addr_len = sizeof(sockaddr_storage);
ASSERT_THAT(getsockname(s1.get(), AsSockAddr(&sender_addr), &sender_addr_len),
SyscallSucceeds());
ASSERT_GT(sender_addr_len, sizeof(sockaddr_in_common));
int sender_port =
reinterpret_cast<sockaddr_in_common*>(&sender_addr)->sin_port;
EXPECT_EQ(got_port, sender_port);
}
socklen_t Ipv4Addr(sockaddr_storage* addr, int port = 0) {
auto addr4 = reinterpret_cast<sockaddr_in*>(addr);
addr4->sin_family = AF_INET;
addr4->sin_port = port;
inet_pton(AF_INET, "127.0.0.1", &addr4->sin_addr.s_addr);
return sizeof(struct sockaddr_in);
}
socklen_t Ipv6Addr(sockaddr_storage* addr, int port = 0) {
auto addr6 = reinterpret_cast<sockaddr_in6*>(addr);
addr6->sin6_family = AF_INET6;
addr6->sin6_port = port;
inet_pton(AF_INET6, "::1", &addr6->sin6_addr.s6_addr);
return sizeof(struct sockaddr_in6);
}
socklen_t Ipv4MappedIpv6Addr(sockaddr_storage* addr, int port = 0) {
auto addr6 = reinterpret_cast<sockaddr_in6*>(addr);
addr6->sin6_family = AF_INET6;
addr6->sin6_port = port;
inet_pton(AF_INET6, "::ffff:127.0.0.1", &addr6->sin6_addr.s6_addr);
return sizeof(struct sockaddr_in6);
}
INSTANTIATE_TEST_SUITE_P(
UdpBindTest, SendtoTest,
::testing::Values(
[]() {
SendtoTestParam param = {};
param.description = "IPv4 mapped IPv6 sendto IPv4 mapped IPv6";
param.send_domain = AF_INET6;
param.send_addr_len = Ipv4MappedIpv6Addr(¶m.send_addr);
param.recv_domain = AF_INET6;
param.recv_addr_len = Ipv4MappedIpv6Addr(¶m.recv_addr);
param.sendto_addr_len = Ipv4MappedIpv6Addr(¶m.sendto_addr);
return param;
}(),
[]() {
SendtoTestParam param = {};
param.description = "IPv6 sendto IPv6";
param.send_domain = AF_INET6;
param.send_addr_len = Ipv6Addr(¶m.send_addr);
param.recv_domain = AF_INET6;
param.recv_addr_len = Ipv6Addr(¶m.recv_addr);
param.sendto_addr_len = Ipv6Addr(¶m.sendto_addr);
return param;
}(),
[]() {
SendtoTestParam param = {};
param.description = "IPv4 sendto IPv4";
param.send_domain = AF_INET;
param.send_addr_len = Ipv4Addr(¶m.send_addr);
param.recv_domain = AF_INET;
param.recv_addr_len = Ipv4Addr(¶m.recv_addr);
param.sendto_addr_len = Ipv4Addr(¶m.sendto_addr);
return param;
}(),
[]() {
SendtoTestParam param = {};
param.description = "IPv4 mapped IPv6 sendto IPv4";
param.send_domain = AF_INET6;
param.send_addr_len = Ipv4MappedIpv6Addr(¶m.send_addr);
param.recv_domain = AF_INET;
param.recv_addr_len = Ipv4Addr(¶m.recv_addr);
param.sendto_addr_len = Ipv4MappedIpv6Addr(¶m.sendto_addr);
return param;
}(),
[]() {
SendtoTestParam param = {};
param.description = "IPv4 sendto IPv4 mapped IPv6";
param.send_domain = AF_INET;
param.send_addr_len = Ipv4Addr(¶m.send_addr);
param.recv_domain = AF_INET6;
param.recv_addr_len = Ipv4MappedIpv6Addr(¶m.recv_addr);
param.sendto_addr_len = Ipv4Addr(¶m.sendto_addr);
return param;
}(),
[]() {
SendtoTestParam param = {};
param.description = "unbound IPv6 sendto IPv4 mapped IPv6";
param.send_domain = AF_INET6;
param.recv_domain = AF_INET6;
param.recv_addr_len = Ipv4MappedIpv6Addr(¶m.recv_addr);
param.sendto_addr_len = Ipv4MappedIpv6Addr(¶m.sendto_addr);
return param;
}(),
[]() {
SendtoTestParam param = {};
param.description = "unbound IPv6 sendto IPv4";
param.send_domain = AF_INET6;
param.recv_domain = AF_INET;
param.recv_addr_len = Ipv4Addr(¶m.recv_addr);
param.sendto_addr_len = Ipv4MappedIpv6Addr(¶m.sendto_addr);
return param;
}(),
[]() {
SendtoTestParam param = {};
param.description = "IPv6 sendto IPv4";
param.send_domain = AF_INET6;
param.send_addr_len = Ipv6Addr(¶m.send_addr);
param.recv_domain = AF_INET;
param.recv_addr_len = Ipv4Addr(¶m.recv_addr);
param.sendto_addr_len = Ipv4MappedIpv6Addr(¶m.sendto_addr);
param.sendto_errnos = {ENETUNREACH};
return param;
}(),
[]() {
SendtoTestParam param = {};
param.description = "IPv4 mapped IPv6 sendto IPv6";
param.send_domain = AF_INET6;
param.send_addr_len = Ipv4MappedIpv6Addr(¶m.send_addr);
param.recv_domain = AF_INET6;
param.recv_addr_len = Ipv6Addr(¶m.recv_addr);
param.sendto_addr_len = Ipv6Addr(¶m.sendto_addr);
param.sendto_errnos = {EAFNOSUPPORT};
// The errno returned changed in Linux commit c8e6ad0829a723.
param.sendto_errnos = {EINVAL, EAFNOSUPPORT};
return param;
}(),
[]() {
SendtoTestParam param = {};
param.description = "connected IPv4 mapped IPv6 sendto IPv6";
param.send_domain = AF_INET6;
param.connect_addr_len =
Ipv4MappedIpv6Addr(¶m.connect_addr, 5000);
param.recv_domain = AF_INET6;
param.recv_addr_len = Ipv6Addr(¶m.recv_addr);
param.sendto_addr_len = Ipv6Addr(¶m.sendto_addr);
// The errno returned changed in Linux commit c8e6ad0829a723.
param.sendto_errnos = {EINVAL, EAFNOSUPPORT};
return param;
}(),
[]() {
SendtoTestParam param = {};
param.description = "connected IPv6 sendto IPv4 mapped IPv6";
// TODO(igudger): Determine if this inconsistent behavior is worth
// implementing.
param.skip_on_gvisor = true;
param.send_domain = AF_INET6;
param.connect_addr_len = Ipv6Addr(¶m.connect_addr, 5000);
param.recv_domain = AF_INET6;
param.recv_addr_len = Ipv4MappedIpv6Addr(¶m.recv_addr);
param.sendto_addr_len = Ipv4MappedIpv6Addr(¶m.sendto_addr);
return param;
}(),
[]() {
SendtoTestParam param = {};
param.description = "connected IPv6 sendto IPv4";
// TODO(igudger): Determine if this inconsistent behavior is worth
// implementing.
param.skip_on_gvisor = true;
param.send_domain = AF_INET6;
param.connect_addr_len = Ipv6Addr(¶m.connect_addr, 5000);
param.recv_domain = AF_INET;
param.recv_addr_len = Ipv4Addr(¶m.recv_addr);
param.sendto_addr_len = Ipv4MappedIpv6Addr(¶m.sendto_addr);
return param;
}()));
} // namespace
} // namespace testing
} // namespace gvisor
| 36.509615 | 80 | 0.656132 | [
"vector"
] |
f69169e93e938151c7b2a8131f89b097533ceb6b | 4,554 | cc | C++ | content/browser/media/media_internals.cc | devasia1000/chromium | 919a8a666862fb866a6bb7aa7f3ae8c0442b4828 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2019-02-03T05:19:48.000Z | 2021-11-15T15:07:21.000Z | content/browser/media/media_internals.cc | devasia1000/chromium | 919a8a666862fb866a6bb7aa7f3ae8c0442b4828 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | content/browser/media/media_internals.cc | devasia1000/chromium | 919a8a666862fb866a6bb7aa7f3ae8c0442b4828 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | // Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/browser/media/media_internals.h"
#include "base/memory/scoped_ptr.h"
#include "base/string16.h"
#include "base/stringprintf.h"
#include "content/public/browser/browser_thread.h"
#include "content/public/browser/web_ui.h"
#include "media/base/media_log.h"
#include "media/base/media_log_event.h"
namespace content {
MediaInternals* MediaInternals::GetInstance() {
return Singleton<MediaInternals>::get();
}
MediaInternals::~MediaInternals() {}
void MediaInternals::OnDeleteAudioStream(void* host, int stream_id) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
std::string stream = base::StringPrintf("audio_streams.%p:%d",
host, stream_id);
DeleteItem(stream);
}
void MediaInternals::OnSetAudioStreamPlaying(
void* host, int stream_id, bool playing) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
UpdateAudioStream(host, stream_id,
"playing", new base::FundamentalValue(playing));
}
void MediaInternals::OnSetAudioStreamStatus(
void* host, int stream_id, const std::string& status) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
UpdateAudioStream(host, stream_id,
"status", new base::StringValue(status));
}
void MediaInternals::OnSetAudioStreamVolume(
void* host, int stream_id, double volume) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
UpdateAudioStream(host, stream_id,
"volume", new base::FundamentalValue(volume));
}
void MediaInternals::OnMediaEvents(
int render_process_id, const std::vector<media::MediaLogEvent>& events) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
// Notify observers that |event| has occured.
for (std::vector<media::MediaLogEvent>::const_iterator event = events.begin();
event != events.end(); ++event) {
base::DictionaryValue dict;
dict.SetInteger("renderer", render_process_id);
dict.SetInteger("player", event->id);
dict.SetString("type", media::MediaLog::EventTypeToString(event->type));
dict.SetDouble("time", event->time.ToDoubleT());
dict.Set("params", event->params.DeepCopy());
SendUpdate("media.onMediaEvent", &dict);
}
}
void MediaInternals::AddUpdateCallback(const UpdateCallback& callback) {
update_callbacks_.push_back(callback);
}
void MediaInternals::RemoveUpdateCallback(const UpdateCallback& callback) {
for (size_t i = 0; i < update_callbacks_.size(); ++i) {
if (update_callbacks_[i].Equals(callback)) {
update_callbacks_.erase(update_callbacks_.begin() + i);
return;
}
}
NOTREACHED();
}
void MediaInternals::SendEverything() {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
SendUpdate("media.onReceiveEverything", &data_);
}
MediaInternals::MediaInternals() {
}
void MediaInternals::UpdateAudioStream(void* host,
int stream_id,
const std::string& property,
base::Value* value) {
std::string stream = base::StringPrintf("audio_streams.%p:%d",
host, stream_id);
UpdateItem("media.addAudioStream", stream, property, value);
}
void MediaInternals::DeleteItem(const std::string& item) {
data_.Remove(item, NULL);
scoped_ptr<base::Value> value(new base::StringValue(item));
SendUpdate("media.onItemDeleted", value.get());
}
void MediaInternals::UpdateItem(
const std::string& update_fn, const std::string& id,
const std::string& property, base::Value* value) {
base::DictionaryValue* item_properties;
if (!data_.GetDictionary(id, &item_properties)) {
item_properties = new base::DictionaryValue();
data_.Set(id, item_properties);
item_properties->SetString("id", id);
}
item_properties->Set(property, value);
SendUpdate(update_fn, item_properties);
}
void MediaInternals::SendUpdate(const std::string& function,
base::Value* value) {
// Only bother serializing the update to JSON if someone is watching.
if (update_callbacks_.empty())
return;
std::vector<const base::Value*> args;
args.push_back(value);
string16 update = WebUI::GetJavascriptCall(function, args);
for (size_t i = 0; i < update_callbacks_.size(); i++)
update_callbacks_[i].Run(update);
}
} // namespace content
| 34.5 | 80 | 0.688186 | [
"vector"
] |
f69a28b71b3bfd167346dfe91e31ed717519399a | 8,431 | cc | C++ | tensorflow/compiler/jit/flags.cc | plopresti/tensorflow | 8b0c84d30d957596cbb3bcac9245e114c3f0b65b | [
"Apache-2.0"
] | 4 | 2021-02-01T01:01:11.000Z | 2021-07-21T15:22:20.000Z | tensorflow/compiler/jit/flags.cc | plopresti/tensorflow | 8b0c84d30d957596cbb3bcac9245e114c3f0b65b | [
"Apache-2.0"
] | 8 | 2019-07-08T10:09:18.000Z | 2019-09-26T20:55:43.000Z | tensorflow/compiler/jit/flags.cc | plopresti/tensorflow | 8b0c84d30d957596cbb3bcac9245e114c3f0b65b | [
"Apache-2.0"
] | 1 | 2019-08-19T06:53:43.000Z | 2019-08-19T06:53:43.000Z | /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <mutex> // NOLINT
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "absl/strings/strip.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/xla/parse_flags_from_env.h"
#include "tensorflow/core/util/command_line_flags.h"
namespace tensorflow {
namespace {
BuildXlaOpsPassFlags* build_ops_flags;
MarkForCompilationPassFlags* mark_for_compilation_flags;
XlaDeviceFlags* device_flags;
XlaOpsCommonFlags* ops_flags;
IntroduceFloatingPointJitterPassFlags* jitter_flags;
std::vector<Flag>* flag_list;
std::once_flag flags_init;
bool SetterForXlaAutoJitFlag(const string& value) {
int32 opt_level;
// We need to use the mark_for_compilation_flags directly here instead of
// going via GetMarkForCompilationPassFlags() to avoid infinite recursion. The
// latter will try to setup and parse flags, which would bring us back to this
// setter.
if (absl::SimpleAtoi(value, &opt_level)) {
mark_for_compilation_flags->xla_auto_jit_flag
.optimization_level_single_gpu = opt_level;
mark_for_compilation_flags->xla_auto_jit_flag.optimization_level_general =
opt_level;
return true;
}
absl::string_view value_sv(value);
if (!absl::ConsumePrefix(&value_sv, "single-gpu(") ||
!absl::ConsumeSuffix(&value_sv, ")") ||
!absl::SimpleAtoi(value_sv, &opt_level)) {
return false;
}
mark_for_compilation_flags->xla_auto_jit_flag.optimization_level_single_gpu =
opt_level;
return true;
}
void AppendMarkForCompilationPassFlagsInternal(std::vector<Flag>* flag_list) {
std::vector<Flag> new_flags = {
Flag("tf_xla_auto_jit", SetterForXlaAutoJitFlag, "0",
"Control compilation of operators into XLA computations on CPU and "
"GPU devices. 0 = use ConfigProto setting; -1 = off; 1 = on for "
"things very likely to be improved; 2 = on for everything. "
"If set to single-gpu(<N>) then this resolves to <N> for single-GPU "
"graphs (graphs that have at least one node placed on a GPU and no "
"more than one GPU is in use through the entire graph) and 0 "
"otherwise. Experimental."),
Flag("tf_xla_min_cluster_size",
&mark_for_compilation_flags->tf_xla_min_cluster_size,
"Minimum number of operators in an XLA compilation. Ignored for "
"operators placed on an XLA device or operators explicitly marked "
"for compilation."),
Flag("tf_xla_max_cluster_size",
&mark_for_compilation_flags->tf_xla_max_cluster_size,
"Maximum number of operators in an XLA compilation."),
Flag("tf_xla_clustering_debug",
&mark_for_compilation_flags->tf_xla_clustering_debug,
"Dump graphs during XLA compilation."),
Flag("tf_xla_cpu_global_jit",
&mark_for_compilation_flags->tf_xla_cpu_global_jit,
"Enables global JIT compilation for CPU via SessionOptions."),
Flag("tf_xla_clustering_fuel",
&mark_for_compilation_flags->tf_xla_clustering_fuel,
"Places an artificial limit on the number of ops marked as "
"eligible for clustering."),
Flag("tf_xla_disable_deadness_safety_checks_for_debugging",
&mark_for_compilation_flags
->tf_xla_disable_deadness_safety_checks_for_debugging,
"Disable deadness related safety checks when clustering (this is "
"unsound)."),
Flag("tf_xla_disable_resource_variable_safety_checks_for_debugging",
&mark_for_compilation_flags
->tf_xla_disable_resource_variable_safety_checks_for_debugging,
"Disable resource variables related safety checks when clustering "
"(this is unsound).")};
flag_list->insert(flag_list->end(), new_flags.begin(), new_flags.end());
}
void AllocateAndParseFlags() {
build_ops_flags = new BuildXlaOpsPassFlags;
build_ops_flags->tf_xla_enable_lazy_compilation = true;
build_ops_flags->tf_xla_print_cluster_outputs = false;
build_ops_flags->tf_xla_disable_constant_folding = false;
mark_for_compilation_flags = new MarkForCompilationPassFlags;
mark_for_compilation_flags->xla_auto_jit_flag.optimization_level_single_gpu =
0;
mark_for_compilation_flags->xla_auto_jit_flag.optimization_level_general = 0;
mark_for_compilation_flags->tf_xla_min_cluster_size = 4;
mark_for_compilation_flags->tf_xla_max_cluster_size =
std::numeric_limits<int32>::max();
mark_for_compilation_flags->tf_xla_clustering_debug = false;
mark_for_compilation_flags->tf_xla_cpu_global_jit = false;
mark_for_compilation_flags->tf_xla_clustering_fuel =
std::numeric_limits<int64>::max();
mark_for_compilation_flags
->tf_xla_disable_deadness_safety_checks_for_debugging = false;
mark_for_compilation_flags
->tf_xla_disable_resource_variable_safety_checks_for_debugging = false;
device_flags = new XlaDeviceFlags;
device_flags->tf_xla_compile_on_demand = false;
ops_flags = new XlaOpsCommonFlags;
ops_flags->tf_xla_always_defer_compilation = false;
jitter_flags = new IntroduceFloatingPointJitterPassFlags;
jitter_flags->jitter_amount = 1e-5;
auto setter_for_jitter_tensor_names = [](string sequence) {
jitter_flags->tensor_names = absl::StrSplit(sequence, ',');
return true;
};
flag_list = new std::vector<Flag>(
{Flag("tf_xla_enable_lazy_compilation",
&build_ops_flags->tf_xla_enable_lazy_compilation, ""),
Flag("tf_xla_print_cluster_outputs",
&build_ops_flags->tf_xla_print_cluster_outputs,
"If true then insert Print nodes to print out values produced by "
"XLA clusters."),
Flag("tf_xla_compile_on_demand", &device_flags->tf_xla_compile_on_demand,
"Switch a device into 'on-demand' mode, where instead of "
"autoclustering ops are compiled one by one just-in-time."),
Flag("tf_xla_always_defer_compilation",
&ops_flags->tf_xla_always_defer_compilation, ""),
Flag("tf_introduce_floating_point_jitter_to_tensors",
setter_for_jitter_tensor_names, "",
"The Tensors to add the jitter to. The tensors are named in the "
"TensorId format of <node name>:<output idx>."),
Flag("tf_introduce_floating_point_jitter_amount",
&jitter_flags->jitter_amount,
"The amount of jitter to introduce. This amount is added to each "
"element in the tensors named in `tensor_names.")});
AppendMarkForCompilationPassFlagsInternal(flag_list);
xla::ParseFlagsFromEnvAndDieIfUnknown("TF_XLA_FLAGS", *flag_list);
}
} // namespace
bool SetXlaAutoJitFlagFromFlagString(const string& value) {
std::call_once(flags_init, &AllocateAndParseFlags);
return SetterForXlaAutoJitFlag(value);
}
BuildXlaOpsPassFlags* GetBuildXlaOpsPassFlags() {
std::call_once(flags_init, &AllocateAndParseFlags);
return build_ops_flags;
}
MarkForCompilationPassFlags* GetMarkForCompilationPassFlags() {
std::call_once(flags_init, &AllocateAndParseFlags);
return mark_for_compilation_flags;
}
XlaDeviceFlags* GetXlaDeviceFlags() {
std::call_once(flags_init, &AllocateAndParseFlags);
return device_flags;
}
const XlaOpsCommonFlags& GetXlaOpsCommonFlags() {
std::call_once(flags_init, &AllocateAndParseFlags);
return *ops_flags;
}
const IntroduceFloatingPointJitterPassFlags&
GetIntroduceFloatingPointJitterPassFlags() {
std::call_once(flags_init, &AllocateAndParseFlags);
return *jitter_flags;
}
void AppendMarkForCompilationPassFlags(std::vector<Flag>* flag_list) {
std::call_once(flags_init, &AllocateAndParseFlags);
AppendMarkForCompilationPassFlagsInternal(flag_list);
}
} // namespace tensorflow
| 40.927184 | 80 | 0.7355 | [
"vector"
] |
f69ac7cd4ef640d6b220f65e55f76f44b960a34a | 8,388 | cpp | C++ | 3rd/byzauth/libbpka/test/testprotocolparams.cpp | ShoufuLuo/csaw | 0d030d5ab93e61b62dff10b27a15c83fcfce3ff3 | [
"Apache-2.0"
] | null | null | null | 3rd/byzauth/libbpka/test/testprotocolparams.cpp | ShoufuLuo/csaw | 0d030d5ab93e61b62dff10b27a15c83fcfce3ff3 | [
"Apache-2.0"
] | null | null | null | 3rd/byzauth/libbpka/test/testprotocolparams.cpp | ShoufuLuo/csaw | 0d030d5ab93e61b62dff10b27a15c83fcfce3ff3 | [
"Apache-2.0"
] | null | null | null | #include "messageprocessor.h"
#include "byzantineauthenticationadapter.h"
#include <map>
#include <set>
#include <vector>
#include <string>
#include <iostream>
#include <cstring>
#include <sys/stat.h>
#include <sys/types.h>
#include <zlib.h>
#define MAX_NAME 1024
#define KEYBITS 128
/**
* Program takes in an email trace to simulate the progress of
* byzantine fault tolerant authentication on the work load
* tries various combinations of input variables: tgs expiry payload
*
* Format of the email trace file:
* timeofmail sizeofmail mesgid fromwho towho ccwho ... \n
*
* Logic
* Compute all senders and receivers and make an adapter for each.
* Load all the messages into sequence by time.
* Execute the work load in a synchronous manner using the
* receive call at end of every call
*/
int linebufsize = 64*1024;
char * linebuf = new char [linebufsize];
struct MessageTrace
{
std::string from;
std::vector<std::string> to;
unsigned date;
unsigned size;
};
std::set<std::string> peers;
std::map<unsigned, MessageTrace> message_trace;
std::map<std::string, std::vector<std::string> > trustgroups;
std::map<std::string, ByzantineAuthenticationAdapter *> peer_map;
void transmit_outstanding_messages();
std::string trim(std::string who)
{
char name[MAX_NAME];
strcpy( name, who.c_str() ) ;
int len = who.size();
for( int i = 0; i < len; i++ )
if( isupper(name[i]) )
name[i] = tolower(name[i]) ;
return name;
}
int get_compr_size(const std::string& mesg)
{
uLongf destLen = (mesg.size() + 12) + (mesg.size()/4);
Bytef *dest = new Bytef[destLen];
const Bytef *source = (Bytef *) mesg.data();
uLong sourceLen = mesg.size();
int ret = 0;
if( Z_OK != (ret = compress(dest, &destLen, source, sourceLen)))
{
std::cerr << "Error in compress " << ret << std::endl;
abort() ;
}
delete [] dest;
return destLen*3/2; // for base 64 encoding
}
bool load_trace(char * filename)
{
FILE * fp = fopen(filename, "r");
if(!fp)
return false;
std::map<std::string, int> woutcount;
std::map<std::string, int> outcount;
std::map<std::string, int> incount;
const char * tb = " \t\n" ;
while(fgets(linebuf, linebufsize, fp))
{
char * p = 0;
MessageTrace curr;
for( int tn = 0; 1; tn++)
{
if(!tn)
p = strtok( linebuf , tb ) ;
else
p = strtok( 0, tb ) ;
if(!p)
break;
switch(tn)
{
case 0: curr.date = atoi(p);
break;
case 1: curr.size = atoi(p);
break;
case 2: break;
case 3: curr.from = trim(p);
peers.insert(curr.from);
break;
default:
curr.to.push_back(trim(p));
peers.insert(trim(p));
break;
}
}
message_trace.insert(std::make_pair(curr.date, curr));
outcount[ curr.from ] ++ ;
for( std::vector<std::string>::iterator i = curr.to.begin() ; i != curr.to.end() ; i++)
incount[ *i ] ++ ;
woutcount[curr.from] += curr.to.size();
}
return true;
}
int main(int argc, char * argv[])
{
if( argc != 7 )
{
std::cout << "Usage " << argv[0] << " <bsfile> <msgtrace> <keysz> " << std::endl;
return 1;
}
// load all the traces
for( int i = 2; i < argc; i++ )
load_trace( argv[i] ) ;
// load trusted group information
FILE * bfp = fopen( argv[1] , "r" ) ;
while( fgets( linebuf, linebufsize, bfp ) )
{
char * p = strtok( linebuf, " \t\n" ) ;
if(p && peers.find(p) != peers.end())
{
std::string self = p;
int count = 0;
while( (p = strtok( 0, " \t\n" )) && peers.find(p) != peers.end() && count < atoi(argv[3]) )
{
count++;
trustgroups[self].push_back(std::string(p));
}
if(count < 4)
trustgroups.erase(self);
}
}
std::cout << "loaded trust groups" << std::endl ;
// erased peers who have no trust groups
for( std::set<std::string>::iterator i = peers.begin() ; i != peers.end() ; )
{
std::set<std::string>::iterator j = i;
j++;
if( trustgroups.find(*i) == trustgroups.end() )
peers.erase(*i);
i = j;
}
std::cout << "trimmed messages in trace " << peers.size() << " peers left " << std::endl ;
for( int tgs = 4; tgs < 129; tgs *= 2 )
for( int expiry = 3; expiry < 95; expiry *= 3 ) // days
for( int payload = 10; payload = 10000; payload *= 4 )
{
system( "rm -rf peerdata/*" ) ;
char filename[1024];
sprintf( filename, "tgs%d.expiry%d.payload%d.data", tgs, expiry, payload ) ;
run( filename, tgs, expiry, payload ) ;
}
return 0;
}
void run( char * filename, int tgs, int expiry, int payload)
{
// initialize all adapters
mkdir( "./peerdata", 0770 ) ;
for( std::set<std::string>::iterator i = peers.begin() ; i != peers.end() ; i++ )
{
ByzantineAuthenticationAdapter * adapter =
ByzantineAuthenticationAdapter::New( tgs, 86400*expiry, payload) ;
std::string filename = *i + ".xml";
filename = "./peerdata/" + filename;
peerid_t me = *i;
if(!adapter->Init(atoi(argv[6]), me, trustgroups[*i], filename.c_str()))
return false;
peer_map.insert( std::make_pair( *i, adapter ) ) ;
}
std::cout << std::endl;
std::cout << "tgs = " << tgs << "\t expiry = " << expiry << "\tpayload = " << std::endl ;
std::cout << "initialize all adapters" << std::endl ;
// run the trace
FILE * statfp = fopen( filename, "w" ) ;
int done = 0, max = message_trace.size();
int percent, prevpercent = 0;
for( std::map<unsigned, MessageTrace>::iterator i = message_trace.begin(); i != message_trace.end() ; i++ )
{
done++;
percent = int( 100.0*done/max ) ;
if( (prevpercent-percent) > 4 )
{
std::cout << percent << " % done " << std::endl;
prevpercent = percent;
}
std::cout << done << " of " << max << " done\r" << std::flush ;
if( peer_map.find(i->second.from) == peer_map.end() )
continue;
// the sender
ByzantineAuthenticationAdapter * pfrom = peer_map[i->second.from];
pfrom->LocalTime (i->second.date) ;
// gather message
std::string mesg;
std::vector<std::string> outgoingtrimmed;
for( std::vector<std::string>::iterator j = i->second.to.begin(); j != i->second.to.end(); j++ )
if( peer_map.find(*j) != peer_map.end() )
outgoingtrimmed.push_back(*j) ;
pfrom->SendMulti( outgoingtrimmed, mesg ) ; // many send
int overhead = get_compr_size(mesg);
fprintf( statfp, "SENDER=%s TIME=%lu NUMDEST=%lu OVERHEAD=%d BASEOVERHEAD=%lu SIZE=%lu\n" ,
i->second.from.c_str(),
(unsigned long) i->second.date,
(unsigned long) outgoingtrimmed.size(),
overhead,
(unsigned long)mesg.size(),
(unsigned long)i->second.size ) ;
// send to each receiver
for( std::vector<std::string>::iterator j = i->second.to.begin(); j != i->second.to.end(); j++ )
{
std::string pubkey;
bool unknown;
int trust_value;
if( peer_map.find(*j) == peer_map.end() )
continue;
// already doing inside
pfrom->GetAuthenticatedKey(*j, pubkey, trust_value, unknown);
ByzantineAuthenticationAdapter * pto = peer_map[*j];
pto->LocalTime(i->second.date) ;
pto->Recv( i->second.from, mesg ) ;
}
}
// de init all adapters
for( std::map<std::string, ByzantineAuthenticationAdapter *>::iterator i = peer_map.begin();
i != peer_map.end(); i++ )
{
i->second->DeInit();
delete i->second;
}
fclose(statfp);
| 26.544304 | 111 | 0.528851 | [
"vector"
] |
f69b8559e9235797639384ae3bb9cd886e55b9af | 14,390 | hpp | C++ | ndn-cxx/interest.hpp | Zhiyi-Zhang/bls-over-ndn-cxx | ad84f30b6750d2344e2a83c8f2e8461d682e56b1 | [
"OpenSSL"
] | null | null | null | ndn-cxx/interest.hpp | Zhiyi-Zhang/bls-over-ndn-cxx | ad84f30b6750d2344e2a83c8f2e8461d682e56b1 | [
"OpenSSL"
] | null | null | null | ndn-cxx/interest.hpp | Zhiyi-Zhang/bls-over-ndn-cxx | ad84f30b6750d2344e2a83c8f2e8461d682e56b1 | [
"OpenSSL"
] | null | null | null | /* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
/*
* Copyright (c) 2013-2020 Regents of the University of California.
*
* This file is part of ndn-cxx library (NDN C++ library with eXperimental eXtensions).
*
* ndn-cxx library is free software: you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License as published by the Free Software
* Foundation, either version 3 of the License, or (at your option) any later version.
*
* ndn-cxx library is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
* PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
*
* You should have received copies of the GNU General Public License and GNU Lesser
* General Public License along with ndn-cxx, e.g., in COPYING.md file. If not, see
* <http://www.gnu.org/licenses/>.
*
* See AUTHORS.md for complete list of ndn-cxx authors and contributors.
*/
#ifndef NDN_INTEREST_HPP
#define NDN_INTEREST_HPP
#include "ndn-cxx/delegation-list.hpp"
#include "ndn-cxx/detail/packet-base.hpp"
#include "ndn-cxx/name.hpp"
#include "ndn-cxx/util/string-helper.hpp"
#include "ndn-cxx/util/time.hpp"
#include <array>
#include <boost/endian/conversion.hpp>
#include <boost/logic/tribool.hpp>
namespace ndn {
class Data;
/** @var const unspecified_duration_type DEFAULT_INTEREST_LIFETIME;
* @brief default value for InterestLifetime
*/
const time::milliseconds DEFAULT_INTEREST_LIFETIME = 4_s;
/** @brief Represents an Interest packet.
*/
class Interest : public PacketBase, public std::enable_shared_from_this<Interest>
{
public:
class Error : public tlv::Error
{
public:
using tlv::Error::Error;
};
class Nonce final : public std::array<uint8_t, 4>
{
using Base = std::array<uint8_t, 4>;
public:
Nonce() = default;
// implicit conversion from uint32_t
Nonce(uint32_t n) noexcept
{
boost::endian::native_to_big_inplace(n);
std::memcpy(data(), &n, sizeof(n));
}
Nonce(uint8_t n1, uint8_t n2, uint8_t n3, uint8_t n4) noexcept
{
data()[0] = n1;
data()[1] = n2;
data()[2] = n3;
data()[3] = n4;
}
private: // non-member operators
// NOTE: the following "hidden friend" operators are available via
// argument-dependent lookup only and must be defined inline.
friend bool
operator==(const Nonce& lhs, const Nonce& rhs) noexcept
{
return static_cast<const Base&>(lhs) == static_cast<const Base&>(rhs);
}
friend bool
operator!=(const Nonce& lhs, const Nonce& rhs) noexcept
{
return static_cast<const Base&>(lhs) != static_cast<const Base&>(rhs);
}
friend std::ostream&
operator<<(std::ostream& os, const Nonce& nonce)
{
printHex(os, nonce.data(), nonce.size(), false);
return os;
}
};
/** @brief Construct an Interest with given @p name and @p lifetime.
*
* @throw std::invalid_argument @p name is invalid or @p lifetime is negative
* @warning In certain contexts that use `Interest::shared_from_this()`, Interest must be created
* using `make_shared`. Otherwise, `shared_from_this()` will trigger undefined behavior.
*/
explicit
Interest(const Name& name = Name(), time::milliseconds lifetime = DEFAULT_INTEREST_LIFETIME);
/** @brief Construct an Interest by decoding from @p wire.
*
* @warning In certain contexts that use `Interest::shared_from_this()`, Interest must be created
* using `make_shared`. Otherwise, `shared_from_this()` will trigger undefined behavior.
*/
explicit
Interest(const Block& wire);
/** @brief Prepend wire encoding to @p encoder according to NDN Packet Format v0.3.
*/
template<encoding::Tag TAG>
size_t
wireEncode(EncodingImpl<TAG>& encoder) const;
/** @brief Encode into a Block according to NDN Packet Format v0.3.
*/
const Block&
wireEncode() const;
/** @brief Decode from @p wire according to NDN Packet Format v0.3.
*/
void
wireDecode(const Block& wire);
/** @brief Check if this instance has cached wire encoding.
*/
bool
hasWire() const
{
return m_wire.hasWire();
}
/** @brief Return a URI-like string that represents the Interest.
*
* The string always starts with `getName().toUri()`. After the name, if any of the
* Interest's CanBePrefix, MustBeFresh, Nonce, InterestLifetime, or HopLimit fields
* are present, their textual representation is appended as a query string.
* Example: "/test/name?MustBeFresh&Nonce=123456"
*/
std::string
toUri() const;
public: // matching
/** @brief Check if Interest can be satisfied by @p data.
*
* This method considers Name, CanBePrefix, and MustBeFresh. However, MustBeFresh processing
* is limited to rejecting Data with zero/omitted FreshnessPeriod.
*/
bool
matchesData(const Data& data) const;
/** @brief Check if this Interest matches @p other
*
* Two Interests match if both have the same Name, CanBePrefix, and MustBeFresh.
*/
bool
matchesInterest(const Interest& other) const;
public: // element access
const Name&
getName() const noexcept
{
return m_name;
}
/** @brief Set the Interest's name.
* @throw std::invalid_argument @p name is invalid
*/
Interest&
setName(const Name& name);
/** @brief Declare the default CanBePrefix setting of the application.
*
* As part of transitioning to NDN Packet Format v0.3, the default setting for CanBePrefix
* will be changed from "true" to "false". Application developers are advised to review all
* Interests expressed by their application and decide what CanBePrefix setting is appropriate
* for each Interest, to avoid breaking changes when the transition occurs. Application may
* either set CanBePrefix on a per-Interest basis, or declare a default CanBePrefix setting for
* all Interests expressed by the application using this function. If an application neither
* declares a default nor sets CanBePrefix on every Interest, Interest::wireEncode will print a
* one-time warning message.
*
* @note This function should not be used in libraries or in ndn-cxx unit tests.
* @sa https://redmine.named-data.net/projects/nfd/wiki/Packet03Transition
*/
static void
setDefaultCanBePrefix(bool canBePrefix)
{
s_defaultCanBePrefix = canBePrefix;
}
/** @brief Check whether the CanBePrefix element is present.
*/
bool
getCanBePrefix() const noexcept
{
return m_canBePrefix;
}
/** @brief Add or remove CanBePrefix element.
* @param canBePrefix whether CanBePrefix element should be present.
*/
Interest&
setCanBePrefix(bool canBePrefix)
{
m_canBePrefix = canBePrefix;
m_wire.reset();
m_isCanBePrefixSet = true;
return *this;
}
/** @brief Check whether the MustBeFresh element is present.
*/
bool
getMustBeFresh() const noexcept
{
return m_mustBeFresh;
}
/** @brief Add or remove MustBeFresh element.
* @param mustBeFresh whether MustBeFresh element should be present.
*/
Interest&
setMustBeFresh(bool mustBeFresh)
{
m_mustBeFresh = mustBeFresh;
m_wire.reset();
return *this;
}
const DelegationList&
getForwardingHint() const noexcept
{
return m_forwardingHint;
}
Interest&
setForwardingHint(const DelegationList& value);
/** @brief Modify ForwardingHint in-place.
* @tparam Modifier a unary function that accepts DelegationList&
*
* This is equivalent to, but more efficient (avoids copying) than:
* @code
* auto fh = interest.getForwardingHint();
* modifier(fh);
* interest.setForwardingHint(fh);
* @endcode
*/
template<typename Modifier>
Interest&
modifyForwardingHint(const Modifier& modifier)
{
modifier(m_forwardingHint);
m_wire.reset();
return *this;
}
/** @brief Check if the Nonce element is present.
*/
bool
hasNonce() const noexcept
{
return m_nonce.has_value();
}
/** @brief Get nonce value.
*
* If nonce was not present, it is added and assigned a random value.
*/
Nonce
getNonce() const;
/** @brief Set the Interest's nonce.
*
* Use `setNonce(nullopt)` to remove any nonce from the Interest.
*/
Interest&
setNonce(optional<Nonce> nonce);
/** @brief Change nonce value.
*
* If the Nonce element is present, the new nonce value will differ from the old value.
* If the Nonce element is not present, this method does nothing.
*/
void
refreshNonce();
time::milliseconds
getInterestLifetime() const noexcept
{
return m_interestLifetime;
}
/** @brief Set the Interest's lifetime.
* @throw std::invalid_argument @p lifetime is negative
*/
Interest&
setInterestLifetime(time::milliseconds lifetime);
optional<uint8_t>
getHopLimit() const noexcept
{
return m_hopLimit;
}
/** @brief Set the Interest's hop limit.
*
* Use `setHopLimit(nullopt)` to remove any hop limit from the Interest.
*/
Interest&
setHopLimit(optional<uint8_t> hopLimit);
bool
hasApplicationParameters() const noexcept
{
return !m_parameters.empty();
}
Block
getApplicationParameters() const
{
if (m_parameters.empty())
return {};
else
return m_parameters.front();
}
/** @brief Set ApplicationParameters from a Block.
* @return a reference to this Interest
*
* If the block is default-constructed, this will set a zero-length ApplicationParameters
* element. Else, if the block's TLV-TYPE is ApplicationParameters, it will be used directly
* as this Interest's ApplicationParameters element. Else, the block will be nested into an
* ApplicationParameters element.
*
* This function will also recompute the value of the ParametersSha256DigestComponent in the
* Interest's name. If the name does not contain a ParametersSha256DigestComponent, one will
* be appended to it.
*/
Interest&
setApplicationParameters(const Block& parameters);
/** @brief Set ApplicationParameters by copying from a raw buffer.
* @param value points to a buffer from which the TLV-VALUE of the parameters will be copied;
* may be nullptr if @p length is zero
* @param length size of the buffer
* @return a reference to this Interest
*
* This function will also recompute the value of the ParametersSha256DigestComponent in the
* Interest's name. If the name does not contain a ParametersSha256DigestComponent, one will
* be appended to it.
*/
Interest&
setApplicationParameters(const uint8_t* value, size_t length);
/** @brief Set ApplicationParameters from a shared buffer.
* @param value buffer containing the TLV-VALUE of the parameters; must not be nullptr
* @return a reference to this Interest
*
* This function will also recompute the value of the ParametersSha256DigestComponent in the
* Interest's name. If the name does not contain a ParametersSha256DigestComponent, one will
* be appended to it.
*/
Interest&
setApplicationParameters(ConstBufferPtr value);
/** @brief Remove the ApplicationParameters element from this Interest.
* @post hasApplicationParameters() == false
*
* This function will also remove any ParametersSha256DigestComponents from the Interest's name.
*/
Interest&
unsetApplicationParameters();
public: // ParametersSha256DigestComponent support
static bool
getAutoCheckParametersDigest()
{
return s_autoCheckParametersDigest;
}
static void
setAutoCheckParametersDigest(bool b)
{
s_autoCheckParametersDigest = b;
}
/** @brief Check if the ParametersSha256DigestComponent in the name is valid.
*
* Returns true if there is a single ParametersSha256DigestComponent in the name and the digest
* value is correct, or if there is no ParametersSha256DigestComponent in the name and the
* Interest does not contain any parameters.
* Returns false otherwise.
*/
bool
isParametersDigestValid() const;
private:
void
setApplicationParametersInternal(Block parameters);
NDN_CXX_NODISCARD shared_ptr<Buffer>
computeParametersDigest() const;
/** @brief Append a ParametersSha256DigestComponent to the Interest's name
* or update the digest value in the existing component.
*
* @pre The name is assumed to be valid, i.e., it must not contain more than one
* ParametersSha256DigestComponent.
* @pre hasApplicationParameters() == true
*/
void
addOrReplaceParametersDigestComponent();
/** @brief Return the index of the ParametersSha256DigestComponent in @p name.
*
* @retval pos The name contains exactly one ParametersSha256DigestComponent at index `pos`.
* @retval -1 The name contains zero ParametersSha256DigestComponents.
* @retval -2 The name contains more than one ParametersSha256DigestComponents.
*/
static ssize_t
findParametersDigestComponent(const Name& name);
#ifdef NDN_CXX_HAVE_TESTS
public:
/// If true, not setting CanBePrefix results in an error in wireEncode().
static bool s_errorIfCanBePrefixUnset;
#endif // NDN_CXX_HAVE_TESTS
private:
static boost::logic::tribool s_defaultCanBePrefix;
static bool s_autoCheckParametersDigest;
Name m_name;
DelegationList m_forwardingHint;
mutable optional<Nonce> m_nonce;
time::milliseconds m_interestLifetime;
optional<uint8_t> m_hopLimit;
mutable bool m_isCanBePrefixSet = false;
bool m_canBePrefix = true;
bool m_mustBeFresh = false;
// Stores the "Interest parameters", i.e., all maybe-unrecognized non-critical TLV
// elements that appear at the end of the Interest, starting from ApplicationParameters.
// If the Interest does not contain any ApplicationParameters TLV, this vector will
// be empty. Conversely, if this vector is not empty, the first element will always
// be an ApplicationParameters block. All blocks in this vector are covered by the
// digest in the ParametersSha256DigestComponent.
std::vector<Block> m_parameters;
mutable Block m_wire;
};
NDN_CXX_DECLARE_WIRE_ENCODE_INSTANTIATIONS(Interest);
std::ostream&
operator<<(std::ostream& os, const Interest& interest);
} // namespace ndn
#endif // NDN_INTEREST_HPP
| 30.422833 | 100 | 0.709659 | [
"vector"
] |
f69dd60ad960c16484bf9c7fa1ed850831d6c25c | 1,018 | cpp | C++ | cpp_algs/local_minima.cpp | vitalir2/AlgorithmsCpp | f9a1b7a0b51c6f122ff600008d2c0ef72a26502f | [
"MIT"
] | null | null | null | cpp_algs/local_minima.cpp | vitalir2/AlgorithmsCpp | f9a1b7a0b51c6f122ff600008d2c0ef72a26502f | [
"MIT"
] | null | null | null | cpp_algs/local_minima.cpp | vitalir2/AlgorithmsCpp | f9a1b7a0b51c6f122ff600008d2c0ef72a26502f | [
"MIT"
] | null | null | null | #include <algorithm>
#include <iostream>
#include <vector>
#include "utils/utils.h"
#include "utils/container_out.h"
// O(2log n)
int localMin(const std::vector<int>& a) {
size_t l = 0;
size_t r = a.size()-1;
while (l < r) {
int mid = (l + r) / 2;
int left_n = std::max(mid-1, 0);
int right_n = std::min(mid+1, static_cast<int>(a.size() - 1));
if ((a[left_n] > a[mid] || left_n == mid)
&& (a[mid] < a[right_n] || mid == right_n)) {
return mid;
} else {
int min = std::min(a[left_n], a[right_n]);
if (min == a[left_n]) {
r = mid;
} else {
l = mid;
}
}
}
return -1; // no local min
}
int main() {
size_t n = 0;
std::cin >> n;
std::vector<int> a(n);
utils::generateRandomVector(a, n);
utils::uniqueAndShuffle(a);
int min_id = localMin(a);
if (min_id != -1) {
std::cout << a[min_id] << " is local minima of the array\n";
} else {
std::cout << "There is no local minima in the array\n";
}
return 0;
}
| 21.208333 | 66 | 0.534381 | [
"vector"
] |
f69fe8df8201e8bb9b33e76d4525ecbdc4081d26 | 800 | cpp | C++ | src/qsort-vector.cpp | conao3/n-version-programming | 8cf4d2a25b796187a2910b8677666ad0a49cdfce | [
"MIT"
] | null | null | null | src/qsort-vector.cpp | conao3/n-version-programming | 8cf4d2a25b796187a2910b8677666ad0a49cdfce | [
"MIT"
] | null | null | null | src/qsort-vector.cpp | conao3/n-version-programming | 8cf4d2a25b796187a2910b8677666ad0a49cdfce | [
"MIT"
] | null | null | null | #include <cstdio>
#include <vector>
#include <iostream>
using namespace std;
// https://ja.wikipedia.org/wiki/%E3%82%AF%E3%82%A4%E3%83%83%E3%82%AF%E3%82%BD%E3%83%BC%E3%83%88
template <class RandomAccessIterator>
void quicksort(RandomAccessIterator left, RandomAccessIterator right) {
if (distance(left, right) > 0) {
RandomAccessIterator i = left, j = right-1;
int pivot = *i;
while (1) {
while (*i < pivot) i++;
while (pivot < *j) j--;
if (distance(i, j) < 1) break;
iter_swap(i, j);
i++, j--;
}
quicksort(left, i);
quicksort(j + 1, right);
}
}
int main() {
int elm;
vector<int> lst;
while (cin >> elm) lst.push_back(elm);
quicksort(lst.begin(), lst.end());
for (auto elm: lst) printf("%d ", elm); printf("\n");
return 0;
}
| 22.857143 | 96 | 0.60125 | [
"vector"
] |
f6a43b36f699b08c3fd6183efdb4406d0ffd7acc | 32,465 | cpp | C++ | packages/arb-avm-cpp/avm/src/machinestate/machinestate.cpp | cy92830/arbitrum | b8366005a697000dda1f57a78a7bdb2313db8fe2 | [
"Apache-2.0"
] | null | null | null | packages/arb-avm-cpp/avm/src/machinestate/machinestate.cpp | cy92830/arbitrum | b8366005a697000dda1f57a78a7bdb2313db8fe2 | [
"Apache-2.0"
] | null | null | null | packages/arb-avm-cpp/avm/src/machinestate/machinestate.cpp | cy92830/arbitrum | b8366005a697000dda1f57a78a7bdb2313db8fe2 | [
"Apache-2.0"
] | null | null | null | /*
* Copyright 2019-2021, Offchain Labs, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <avm/machinestate/machinestate.hpp>
#include <avm/machine.hpp>
#include <avm/machinestate/machineoperation.hpp>
#include <avm_values/exceptions.hpp>
#include <avm_values/vmValueParser.hpp>
#include <utility>
#include <ethash/keccak.hpp>
#include <iostream>
namespace {
uint256_t max_arb_gas_remaining = std::numeric_limits<uint256_t>::max();
} // namespace
AssertionContext::AssertionContext(MachineExecutionConfig config)
: inbox_messages(std::move(config.inbox_messages)),
sideloads(std::move(config.sideloads)),
stop_on_sideload(config.stop_on_sideload),
max_gas(config.max_gas),
go_over_gas(config.go_over_gas),
inbox_messages_consumed(0) {}
MachineStateKeys::MachineStateKeys(const MachineState& machine)
: output(machine.output),
pc(machine.pc, machine.loadCurrentInstruction()),
static_hash(hash_value(machine.static_val)),
register_hash(hash_value(machine.registerVal)),
datastack_hash(machine.stack.hash()),
auxstack_hash(machine.auxstack.hash()),
arb_gas_remaining(machine.arb_gas_remaining),
state(machine.state),
err_pc(machine.errpc) {}
uint256_t MachineStateKeys::machineHash() const {
if (state == Status::Halted)
return 0;
if (state == Status::Error)
return 1;
std::array<unsigned char, 32 * 7> data{};
auto oit = data.begin();
{
auto val = ::hash(pc);
oit = to_big_endian(val, oit);
}
{ oit = to_big_endian(datastack_hash, oit); }
{ oit = to_big_endian(auxstack_hash, oit); }
{ oit = to_big_endian(register_hash, oit); }
{ oit = to_big_endian(static_hash, oit); }
{ oit = to_big_endian(arb_gas_remaining, oit); }
{
auto val = ::hash_value(err_pc);
oit = to_big_endian(val, oit);
}
assert(oit == data.end());
auto hash_val = ethash::keccak256(data.data(), data.size());
return intx::be::load<uint256_t>(hash_val);
}
void MachineState::addProcessedMessage(const MachineMessage& message) {
output.fully_processed_inbox.addMessage(message);
}
void MachineState::addProcessedSend(std::vector<uint8_t> data) {
output.send_count = output.send_count + 1;
output.send_acc = ::hash(output.send_acc, ::hash(data));
context.sends.push_back(MachineEmission<std::vector<uint8_t>>{
std::move(data), output.fully_processed_inbox});
}
void MachineState::addProcessedLog(Value log_val) {
output.log_count = output.log_count + 1;
output.log_acc = ::hash(output.log_acc, hash_value(log_val));
context.logs.push_back(MachineEmission<Value>{
std::move(log_val), output.fully_processed_inbox});
}
MachineState::MachineState() : arb_gas_remaining(max_arb_gas_remaining) {}
MachineState::MachineState(std::shared_ptr<CoreCode> code_, Value static_val_)
: pc(code_->initialCodePointRef()),
code(std::move(code_)),
static_val(std::move(static_val_)),
arb_gas_remaining(max_arb_gas_remaining) {}
MachineState::MachineState(MachineOutput output_,
CodePointRef pc_,
std::shared_ptr<Code> code_,
ValueLoader value_loader_,
Value register_val_,
Value static_val_,
Datastack stack_,
Datastack auxstack_,
uint256_t arb_gas_remaining_,
Status state_,
CodePointStub errpc_)
: output(output_),
pc(pc_),
code(std::move(code_)),
value_loader(std::move(value_loader_)),
registerVal(std::move(register_val_)),
static_val(std::move(static_val_)),
stack(std::move(stack_)),
auxstack(std::move(auxstack_)),
arb_gas_remaining(arb_gas_remaining_),
state(state_),
errpc(errpc_) {}
MachineState MachineState::loadFromFile(
const std::string& executable_filename) {
auto executable = loadExecutable(executable_filename);
auto code = std::make_shared<CoreCode>(0);
code->addSegment(std::move(executable.code));
return MachineState{std::move(code), std::move(executable.static_val)};
}
uint256_t MachineState::getMachineSize() const {
uint256_t machine_size = 0;
machine_size += getSize(static_val);
machine_size += getSize(registerVal);
machine_size += stack.getTotalValueSize();
machine_size += auxstack.getTotalValueSize();
return machine_size;
}
namespace {
void marshalState(std::vector<unsigned char>& buf,
const Code& code,
uint256_t next_codepoint_hash,
HashPreImage stackPreImage,
HashPreImage auxStackPreImage,
const Value& registerVal,
const Value& staticVal,
uint256_t arb_gas_remaining,
CodePointStub errpc) {
marshal_uint256_t(next_codepoint_hash, buf);
stackPreImage.marshal(buf);
auxStackPreImage.marshal(buf);
::marshalForProof(registerVal, 0, buf, code);
::marshalForProof(staticVal, 0, buf, code);
marshal_uint256_t(arb_gas_remaining, buf);
marshal_uint256_t(::hash(errpc), buf);
}
} // namespace
std::vector<unsigned char> MachineState::marshalState() const {
auto stackPreImage = stack.getHashPreImage();
auto auxStackPreImage = auxstack.getHashPreImage();
std::vector<unsigned char> buf;
::marshalState(buf, *code, ::hash(loadCurrentInstruction()), stackPreImage,
auxStackPreImage, registerVal, static_val, arb_gas_remaining,
errpc);
return buf;
}
void insertSizes(std::vector<unsigned char>& buf,
uint32_t sz1,
uint32_t sz2,
uint32_t sz3,
uint32_t sz4) {
uint32_t acc = 1;
buf.push_back(static_cast<uint8_t>(acc));
acc += sz1 / 32;
buf.push_back(static_cast<uint8_t>(acc));
acc += sz2 / 32;
buf.push_back(static_cast<uint8_t>(acc));
acc += sz3 / 32;
buf.push_back(static_cast<uint8_t>(acc));
acc += sz4 / 32;
buf.push_back(static_cast<uint8_t>(acc));
for (int i = 5; i < 32; i++) {
buf.push_back(0);
}
}
void makeSetBufferProof(std::vector<unsigned char>& buf,
uint64_t loc,
const Buffer& buffer,
uint256_t v,
int wordSize) {
Buffer nbuffer = buffer;
Buffer nbuffer1 = nbuffer;
bool aligned = true;
for (int i = 0; i < wordSize; i++) {
if ((loc + i) % 32 == 0 && i > 0) {
nbuffer1 = nbuffer;
aligned = false;
}
nbuffer = nbuffer.set(
loc + i,
static_cast<uint8_t>((v >> ((wordSize - 1 - i) * 8)) & 0xff));
}
auto proof1 = buffer.makeProof(loc);
if (aligned) {
auto nproof1 = nbuffer.makeNormalizationProof();
insertSizes(buf, proof1.size(), nproof1.size(), 0, 0);
buf.insert(buf.end(), proof1.begin(), proof1.end());
buf.insert(buf.end(), nproof1.begin(), nproof1.end());
} else {
auto nproof1 = nbuffer1.makeNormalizationProof();
auto proof2 = nbuffer1.makeProof(loc + (wordSize - 1));
auto nproof2 = nbuffer.makeNormalizationProof();
insertSizes(buf, proof1.size(), nproof1.size(), proof2.size(),
nproof2.size());
buf.insert(buf.end(), proof1.begin(), proof1.end());
buf.insert(buf.end(), nproof1.begin(), nproof1.end());
buf.insert(buf.end(), proof2.begin(), proof2.end());
buf.insert(buf.end(), nproof2.begin(), nproof2.end());
}
}
void MachineState::marshalBufferProof(OneStepProof& proof) const {
auto& op = loadCurrentOperation();
auto opcode = op.opcode;
if ((opcode < OpCode::GET_BUFFER8 || opcode > OpCode::SET_BUFFER256) &&
opcode != OpCode::SEND) {
return;
}
if (opcode == OpCode::SEND) {
auto buffer = op.immediate ? get_if<Buffer>(&stack[0])
: get_if<Buffer>(&stack[1]);
if (!buffer) {
return;
}
// Also need the offset
auto size = op.immediate ? get_if<uint256_t>(&*op.immediate)
: get_if<uint256_t>(&stack[0]);
if (!size) {
return;
}
auto loc = static_cast<uint64_t>(*size);
if (loc > send_size_limit) {
return;
} else if (loc < buffer->data_length()) {
// Loc must be at or past the last nonzero index in the buffer
auto buf_proof = buffer->makeProof(loc);
insertSizes(proof.buffer_proof, buf_proof.size(), 0, 0, 0);
proof.buffer_proof.insert(proof.buffer_proof.end(),
buf_proof.begin(), buf_proof.end());
} else {
auto data = buffer->toFlatVector();
proof.standard_proof.insert(proof.standard_proof.end(),
data.begin(), data.end());
std::fill_n(std::back_inserter(proof.standard_proof),
loc - data.size(), 0);
}
return;
}
if (opcode == OpCode::GET_BUFFER8 || opcode == OpCode::GET_BUFFER64 ||
opcode == OpCode::GET_BUFFER256) {
// Find the buffer
auto buffer = op.immediate ? get_if<Buffer>(&stack[0])
: get_if<Buffer>(&stack[1]);
if (!buffer) {
insertSizes(proof.buffer_proof, 0, 0, 0, 0);
return;
}
// Also need the offset
auto offset = op.immediate ? get_if<uint256_t>(&*op.immediate)
: get_if<uint256_t>(&stack[0]);
if (!offset) {
insertSizes(proof.buffer_proof, 0, 0, 0, 0);
return;
}
if (*offset > std::numeric_limits<uint64_t>::max()) {
insertSizes(proof.buffer_proof, 0, 0, 0, 0);
return;
}
auto loc = static_cast<uint64_t>(*offset);
if (opcode == OpCode::GET_BUFFER8) {
auto buf_proof = buffer->makeProof(loc);
insertSizes(proof.buffer_proof, buf_proof.size(), 0, 0, 0);
proof.buffer_proof.insert(proof.buffer_proof.end(),
buf_proof.begin(), buf_proof.end());
} else if (opcode == OpCode::GET_BUFFER64) {
auto buf_proof1 = buffer->makeProof(loc);
auto buf_proof2 = buffer->makeProof(loc + 7);
insertSizes(proof.buffer_proof, buf_proof1.size(), 0,
buf_proof2.size(), 0);
proof.buffer_proof.insert(proof.buffer_proof.end(),
buf_proof1.begin(), buf_proof1.end());
proof.buffer_proof.insert(proof.buffer_proof.end(),
buf_proof2.begin(), buf_proof2.end());
} else if (opcode == OpCode::GET_BUFFER256) {
auto buf_proof1 = buffer->makeProof(loc);
auto buf_proof2 = buffer->makeProof(loc + 31);
insertSizes(proof.buffer_proof, buf_proof1.size(), 0,
buf_proof2.size(), 0);
proof.buffer_proof.insert(proof.buffer_proof.end(),
buf_proof1.begin(), buf_proof1.end());
proof.buffer_proof.insert(proof.buffer_proof.end(),
buf_proof2.begin(), buf_proof2.end());
}
} else {
auto buffer = op.immediate ? get_if<Buffer>(&stack[1])
: get_if<Buffer>(&stack[2]);
if (!buffer) {
insertSizes(proof.buffer_proof, 0, 0, 0, 0);
return;
}
// Also need the offset
auto offset = op.immediate ? get_if<uint256_t>(&*op.immediate)
: get_if<uint256_t>(&stack[0]);
if (!offset) {
insertSizes(proof.buffer_proof, 0, 0, 0, 0);
return;
}
if (*offset > std::numeric_limits<uint64_t>::max()) {
insertSizes(proof.buffer_proof, 0, 0, 0, 0);
return;
}
auto val = op.immediate ? get_if<uint256_t>(&stack[0])
: get_if<uint256_t>(&stack[1]);
if (!val) {
insertSizes(proof.buffer_proof, 0, 0, 0, 0);
return;
}
auto loc = static_cast<uint64_t>(*offset);
if (opcode == OpCode::SET_BUFFER8) {
Buffer nbuffer = buffer->set(loc, static_cast<uint8_t>(*val));
auto buf_proof1 = buffer->makeProof(loc);
auto buf_nproof1 = nbuffer.makeNormalizationProof();
insertSizes(proof.buffer_proof, buf_proof1.size(),
buf_nproof1.size(), 0, 0);
proof.buffer_proof.insert(proof.buffer_proof.end(),
buf_proof1.begin(), buf_proof1.end());
proof.buffer_proof.insert(proof.buffer_proof.end(),
buf_nproof1.begin(), buf_nproof1.end());
} else if (opcode == OpCode::SET_BUFFER64) {
makeSetBufferProof(proof.buffer_proof, loc, *buffer, *val, 8);
} else if (opcode == OpCode::SET_BUFFER256) {
makeSetBufferProof(proof.buffer_proof, loc, *buffer, *val, 32);
}
}
}
OneStepProof MachineState::marshalForProof() const {
auto currentInstruction = loadCurrentInstruction();
auto& current_op = currentInstruction.op;
auto opcode = current_op.opcode;
std::vector<size_t> stackPops = [&]() {
auto it = InstructionStackPops.find(opcode);
if (it == InstructionStackPops.end()) {
return InstructionStackPops.at(OpCode::ERROR);
}
return it->second;
}();
std::vector<size_t> auxStackPops = [&]() {
auto it = InstructionAuxStackPops.find(opcode);
if (it == InstructionAuxStackPops.end()) {
return InstructionAuxStackPops.at(OpCode::ERROR);
}
return it->second;
}();
size_t immediateMarshalLevel = 0;
if (current_op.immediate && !stackPops.empty()) {
immediateMarshalLevel = stackPops[0];
stackPops.erase(stackPops.cbegin());
}
OneStepProof proof;
auto stackProof = stack.marshalForProof(stackPops, *code);
auto auxStackProof = auxstack.marshalForProof(auxStackPops, *code);
bool underflowed = stackProof.count < stackPops.size() ||
auxStackProof.count < auxStackPops.size();
proof.standard_proof.push_back(static_cast<uint8_t>(current_op.opcode));
proof.standard_proof.push_back(
stackProof.count +
static_cast<uint8_t>(static_cast<bool>(current_op.immediate)));
proof.standard_proof.push_back(auxStackProof.count);
proof.standard_proof.insert(proof.standard_proof.cend(),
stackProof.data.begin(), stackProof.data.end());
if (current_op.immediate) {
::marshalForProof(*current_op.immediate, immediateMarshalLevel,
proof.standard_proof, *code);
}
proof.standard_proof.insert(proof.standard_proof.cend(),
auxStackProof.data.begin(),
auxStackProof.data.end());
::marshalState(proof.standard_proof, *code, currentInstruction.nextHash,
stackProof.bottom, auxStackProof.bottom, registerVal,
static_val, arb_gas_remaining, errpc);
proof.standard_proof.push_back(current_op.immediate ? 1 : 0);
if (!underflowed) {
// Don't need a buffer proof if we're underflowing
marshalBufferProof(proof);
}
return proof;
}
BlockReason MachineState::isBlocked(bool newMessages) const {
if (state == Status::Error) {
return ErrorBlocked();
} else if (state == Status::Halted) {
return HaltBlocked();
}
auto& op = loadCurrentOperation();
if (op.opcode == OpCode::INBOX) {
if (newMessages) {
return NotBlocked();
}
return InboxBlocked();
} else {
return NotBlocked();
}
}
CodePoint MachineState::loadCurrentInstruction() const {
if (!loaded_segment || loaded_segment->segmentID() != pc.segment) {
loaded_segment = std::make_optional(code->loadCodeSegment(pc.segment));
}
return loaded_segment->loadCodePoint(pc.pc);
}
const Operation& MachineState::loadCurrentOperation() const {
if (!loaded_segment || loaded_segment->segmentID() != pc.segment) {
loaded_segment = std::make_optional(code->loadCodeSegment(pc.segment));
}
return loaded_segment->loadOperation(pc.pc);
}
uint256_t MachineState::nextGasCost() {
auto& op = loadCurrentOperation();
return gasCost(op);
}
uint256_t MachineState::gasCost(const Operation& op) {
auto base_gas = instructionGasCosts()[static_cast<size_t>(op.opcode)];
if (op.opcode == OpCode::ECPAIRING) {
base_gas += machineoperation::ec_pairing_variable_gas_cost(*this);
}
return base_gas;
}
BlockReason MachineState::runOne() {
if (state == Status::Error) {
return ErrorBlocked();
}
if (state == Status::Halted) {
return HaltBlocked();
}
auto& op = loadCurrentOperation();
static const auto error_gas_cost =
instructionGasCosts()[static_cast<size_t>(OpCode::ERROR)];
// Always push the immediate to the stack if we're not blocked
if (op.immediate) {
auto imm = *op.immediate;
stack.push(std::move(imm));
}
// save stack size for stack cleanup in case of error
uint64_t start_stack_size = stack.stacksize();
uint64_t start_auxstack_size = auxstack.stacksize();
bool is_valid_instruction =
instructionValidity()[static_cast<size_t>(op.opcode)];
uint64_t stack_arg_count = stackArgCount()[static_cast<size_t>(op.opcode)];
uint64_t auxstack_arg_count =
auxstackArgCount()[static_cast<size_t>(op.opcode)];
// We're only blocked if we can't execute at all
BlockReason blockReason = [&]() -> BlockReason {
if (stack_arg_count > stack.stacksize() ||
auxstack_arg_count > auxstack.stacksize()) {
state = Status::Error;
if (arb_gas_remaining < error_gas_cost) {
arb_gas_remaining = max_arb_gas_remaining;
} else {
arb_gas_remaining -= error_gas_cost;
}
output.arb_gas_used += error_gas_cost;
return NotBlocked();
}
uint256_t gas_cost =
is_valid_instruction ? gasCost(op) : error_gas_cost;
if (arb_gas_remaining < gas_cost) {
// If there's insufficient gas remaining, execute by transitioning
// to the error state with remaining gas set to max
output.arb_gas_used += error_gas_cost;
arb_gas_remaining = max_arb_gas_remaining;
state = Status::Error;
return NotBlocked();
}
arb_gas_remaining -= gas_cost;
output.arb_gas_used += gas_cost;
if (!is_valid_instruction) {
// The opcode is invalid, execute by transitioning to the error
// state
state = Status::Error;
return NotBlocked();
}
BlockReason blockReason = NotBlocked();
try {
blockReason = runOp(op.opcode);
} catch (const stack_too_small&) {
// Charge an error instruction instead
arb_gas_remaining += gas_cost;
output.arb_gas_used -= gas_cost;
} catch (const avm_exception&) {
state = Status::Error;
}
if (!std::holds_alternative<NotBlocked>(blockReason)) {
// Get rid of the immediate and reset the gas if the machine was
// actually blocked
arb_gas_remaining += gas_cost;
output.arb_gas_used -= gas_cost;
if (op.immediate) {
stack.popClear();
}
return blockReason;
}
return NotBlocked();
}();
if (std::holds_alternative<NotBlocked>(blockReason)) {
output.total_steps += 1;
}
if (state == Status::Error) {
// if state is Error, clean up stack
// Clear stack to base for instruction
while (stack.stacksize() > 0 &&
start_stack_size - stack.stacksize() < stack_arg_count) {
stack.popClear();
}
while (auxstack.stacksize() > 0 &&
start_auxstack_size - auxstack.stacksize() <
auxstack_arg_count) {
auxstack.popClear();
}
}
// If we're in the error state, jump to the error handler if one is set
if (state == Status::Error && !errpc.is_error()) {
pc = errpc.pc;
state = Status::Extensive;
}
context.first_instruction = false;
return blockReason;
}
BlockReason MachineState::runOp(OpCode opcode) {
switch (opcode) {
/**************************/
/* Arithmetic Operations */
/**************************/
case OpCode::ADD:
machineoperation::add(*this);
break;
case OpCode::MUL:
machineoperation::mul(*this);
break;
case OpCode::SUB:
machineoperation::sub(*this);
break;
case OpCode::DIV:
machineoperation::div(*this);
break;
case OpCode::SDIV:
machineoperation::sdiv(*this);
break;
case OpCode::MOD:
machineoperation::mod(*this);
break;
case OpCode::SMOD:
machineoperation::smod(*this);
break;
case OpCode::ADDMOD:
machineoperation::addmod(*this);
break;
case OpCode::MULMOD:
machineoperation::mulmod(*this);
break;
case OpCode::EXP:
machineoperation::exp(*this);
break;
case OpCode::SIGNEXTEND:
machineoperation::signExtend(*this);
break;
/******************************************/
/* Comparison & Bitwise Logic Operations */
/******************************************/
case OpCode::LT:
machineoperation::lt(*this);
break;
case OpCode::GT:
machineoperation::gt(*this);
break;
case OpCode::SLT:
machineoperation::slt(*this);
break;
case OpCode::SGT:
machineoperation::sgt(*this);
break;
case OpCode::EQ:
machineoperation::eq(*this);
break;
case OpCode::ISZERO:
machineoperation::iszero(*this);
break;
case OpCode::BITWISE_AND:
machineoperation::bitwiseAnd(*this);
break;
case OpCode::BITWISE_OR:
machineoperation::bitwiseOr(*this);
break;
case OpCode::BITWISE_XOR:
machineoperation::bitwiseXor(*this);
break;
case OpCode::BITWISE_NOT:
machineoperation::bitwiseNot(*this);
break;
case OpCode::BYTE:
machineoperation::byte(*this);
break;
case OpCode::SHL:
machineoperation::shl(*this);
break;
case OpCode::SHR:
machineoperation::shr(*this);
break;
case OpCode::SAR:
machineoperation::sar(*this);
break;
/***********************/
/* Hashing Operations */
/***********************/
case OpCode::HASH:
machineoperation::hashOp(*this);
break;
case OpCode::TYPE:
machineoperation::typeOp(*this);
break;
case OpCode::ETHHASH2:
machineoperation::ethhash2Op(*this);
break;
case OpCode::KECCAKF:
machineoperation::keccakF(*this);
break;
case OpCode::SHA256F:
machineoperation::sha256F(*this);
break;
/***********************************************/
/* Stack, Memory, Storage and Flow Operations */
/***********************************************/
case OpCode::POP:
machineoperation::pop(*this);
break;
case OpCode::SPUSH:
machineoperation::spush(*this);
break;
case OpCode::RPUSH:
machineoperation::rpush(*this);
break;
case OpCode::RSET:
machineoperation::rset(*this);
break;
case OpCode::JUMP:
machineoperation::jump(*this);
break;
case OpCode::CJUMP:
machineoperation::cjump(*this);
break;
case OpCode::STACKEMPTY:
machineoperation::stackEmpty(*this);
break;
case OpCode::PCPUSH:
machineoperation::pcPush(*this);
break;
case OpCode::AUXPUSH:
machineoperation::auxPush(*this);
break;
case OpCode::AUXPOP:
machineoperation::auxPop(*this);
break;
case OpCode::AUXSTACKEMPTY:
machineoperation::auxStackEmpty(*this);
break;
case OpCode::NOP:
++pc;
break;
case OpCode::ERRPUSH:
machineoperation::errPush(*this);
break;
case OpCode::ERRSET:
machineoperation::errSet(*this);
break;
/****************************************/
/* Duplication and Exchange Operations */
/****************************************/
case OpCode::DUP0:
machineoperation::dup0(*this);
break;
case OpCode::DUP1:
machineoperation::dup1(*this);
break;
case OpCode::DUP2:
machineoperation::dup2(*this);
break;
case OpCode::SWAP1:
machineoperation::swap1(*this);
break;
case OpCode::SWAP2:
machineoperation::swap2(*this);
break;
/*********************/
/* Tuple Operations */
/*********************/
case OpCode::TGET:
machineoperation::tget(*this);
break;
case OpCode::TSET:
machineoperation::tset(*this);
break;
case OpCode::TLEN:
machineoperation::tlen(*this);
break;
case OpCode::XGET:
machineoperation::xget(*this);
break;
case OpCode::XSET:
machineoperation::xset(*this);
break;
/***********************/
/* Logging Operations */
/***********************/
case OpCode::BREAKPOINT:
return machineoperation::breakpoint(*this);
case OpCode::LOG:
machineoperation::log(*this);
break;
case OpCode::DEBUG_PRINT:
machineoperation::debug(*this);
break;
/**********************/
/* System Operations */
/**********************/
case OpCode::SEND: {
machineoperation::send(*this);
break;
}
case OpCode::INBOX:
return machineoperation::inboxOp(*this);
case OpCode::ERROR:
state = Status::Error;
break;
case OpCode::HALT:
state = Status::Halted;
break;
case OpCode::SET_GAS:
machineoperation::setgas(*this);
break;
case OpCode::PUSH_GAS:
machineoperation::pushgas(*this);
break;
case OpCode::ERR_CODE_POINT:
machineoperation::errcodept(*this);
break;
case OpCode::PUSH_INSN:
machineoperation::pushinsn(*this);
break;
case OpCode::PUSH_INSN_IMM:
machineoperation::pushinsnimm(*this);
break;
case OpCode::SIDELOAD:
return machineoperation::sideload(*this);
break;
case OpCode::NEW_BUFFER:
machineoperation::newbuffer(*this);
break;
case OpCode::GET_BUFFER8:
machineoperation::getbuffer8(*this);
break;
case OpCode::GET_BUFFER64:
machineoperation::getbuffer64(*this);
break;
case OpCode::GET_BUFFER256:
machineoperation::getbuffer256(*this);
break;
case OpCode::SET_BUFFER8:
machineoperation::setbuffer8(*this);
break;
case OpCode::SET_BUFFER64:
machineoperation::setbuffer64(*this);
break;
case OpCode::SET_BUFFER256:
machineoperation::setbuffer256(*this);
break;
/*****************/
/* Precompiles */
/*****************/
case OpCode::ECRECOVER:
machineoperation::ec_recover(*this);
break;
case OpCode::ECADD:
machineoperation::ec_add(*this);
break;
case OpCode::ECMUL:
machineoperation::ec_mul(*this);
break;
case OpCode::ECPAIRING:
machineoperation::ec_pairing(*this);
break;
default:
std::cerr << "Unhandled opcode <" << InstructionNames.at(opcode)
<< ">" << std::hex << static_cast<int>(opcode);
state = Status::Error;
}
return NotBlocked{};
}
std::ostream& operator<<(std::ostream& os, const MachineState& val) {
os << "hash " << intx::to_string(val.hash(), 16) << "\n";
os << "status " << static_cast<int>(val.state) << "\n";
os << "pc " << val.pc << "\n";
os << "data stack: " << val.stack << "\n";
auto current_code_point = val.code->loadCodePoint(val.pc);
os << "operation " << current_code_point.op << "\n";
os << "codePointHash " << intx::to_string(hash(current_code_point), 16)
<< "\n";
os << "stackHash " << intx::to_string(val.stack.hash(), 16) << "\n";
os << "auxStackHash " << intx::to_string(val.auxstack.hash(), 16) << "\n";
os << "registerHash " << intx::to_string(hash_value(val.registerVal), 16)
<< "\n";
os << "staticHash " << intx::to_string(hash_value(val.static_val), 16)
<< "\n";
os << "arb_gas_remaining " << val.arb_gas_remaining << "\n";
os << "err handler " << val.errpc.pc << "\n";
auto err_code_point = val.code->loadCodePoint(val.errpc.pc);
os << "errHandlerHash " << intx::to_string(hash(err_code_point), 16)
<< "\n";
return os;
}
uint256_t MachineState::getTotalMessagesRead() const {
return output.fully_processed_inbox.count;
}
bool MachineOutput::operator==(const MachineOutput& other) const {
return fully_processed_inbox == other.fully_processed_inbox &&
total_steps == other.total_steps &&
arb_gas_used == other.arb_gas_used && send_acc == other.send_acc &&
log_acc == other.log_acc && send_count == other.send_count &&
log_count == other.log_count &&
l1_block_number == other.l1_block_number &&
l2_block_number == other.l2_block_number &&
last_inbox_timestamp == other.last_inbox_timestamp &&
last_sideload == other.last_sideload;
}
bool MachineOutput::operator!=(const MachineOutput& other) const {
return !(*this == other);
}
bool InboxState::operator==(const InboxState& other) const {
return count == other.count && accumulator == other.accumulator;
}
bool InboxState::operator!=(const InboxState& other) const {
return !(*this == other);
}
| 35.597588 | 80 | 0.560758 | [
"vector"
] |
f6a4e39e511d2e452f374e2b4080078479963b72 | 45,562 | cpp | C++ | lib/Semantics/tools.cpp | anchu-rajendran/f18 | 6e53f40e984516f89ae21f1dcd3daa2d26d05c5b | [
"Apache-2.0"
] | null | null | null | lib/Semantics/tools.cpp | anchu-rajendran/f18 | 6e53f40e984516f89ae21f1dcd3daa2d26d05c5b | [
"Apache-2.0"
] | null | null | null | lib/Semantics/tools.cpp | anchu-rajendran/f18 | 6e53f40e984516f89ae21f1dcd3daa2d26d05c5b | [
"Apache-2.0"
] | null | null | null | //===-- lib/Semantics/tools.cpp -------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "flang/Parser/tools.h"
#include "flang/Common/Fortran.h"
#include "flang/Common/indirection.h"
#include "flang/Parser/dump-parse-tree.h"
#include "flang/Parser/message.h"
#include "flang/Parser/parse-tree.h"
#include "flang/Semantics/scope.h"
#include "flang/Semantics/semantics.h"
#include "flang/Semantics/symbol.h"
#include "flang/Semantics/tools.h"
#include "flang/Semantics/type.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <set>
#include <variant>
namespace Fortran::semantics {
// Find this or containing scope that matches predicate
static const Scope *FindScopeContaining(
const Scope &start, std::function<bool(const Scope &)> predicate) {
for (const Scope *scope{&start};; scope = &scope->parent()) {
if (predicate(*scope)) {
return scope;
}
if (scope->IsGlobal()) {
return nullptr;
}
}
}
const Scope *FindModuleContaining(const Scope &start) {
return FindScopeContaining(
start, [](const Scope &scope) { return scope.IsModule(); });
}
const Symbol *FindCommonBlockContaining(const Symbol &object) {
if (const auto *details{object.detailsIf<ObjectEntityDetails>()}) {
return details->commonBlock();
} else {
return nullptr;
}
}
const Scope *FindProgramUnitContaining(const Scope &start) {
return FindScopeContaining(start, [](const Scope &scope) {
switch (scope.kind()) {
case Scope::Kind::Module:
case Scope::Kind::MainProgram:
case Scope::Kind::Subprogram:
case Scope::Kind::BlockData: return true;
default: return false;
}
});
}
const Scope *FindProgramUnitContaining(const Symbol &symbol) {
return FindProgramUnitContaining(symbol.owner());
}
const Scope *FindPureProcedureContaining(const Scope &start) {
// N.B. We only need to examine the innermost containing program unit
// because an internal subprogram of a pure subprogram must also
// be pure (C1592).
if (const Scope * scope{FindProgramUnitContaining(start)}) {
if (IsPureProcedure(*scope)) {
return scope;
}
}
return nullptr;
}
Tristate IsDefinedAssignment(
const std::optional<evaluate::DynamicType> &lhsType, int lhsRank,
const std::optional<evaluate::DynamicType> &rhsType, int rhsRank) {
if (!lhsType || !rhsType) {
return Tristate::No; // error or rhs is untyped
}
TypeCategory lhsCat{lhsType->category()};
TypeCategory rhsCat{rhsType->category()};
if (rhsRank > 0 && lhsRank != rhsRank) {
return Tristate::Yes;
} else if (lhsCat != TypeCategory::Derived) {
return ToTristate(lhsCat != rhsCat &&
(!IsNumericTypeCategory(lhsCat) || !IsNumericTypeCategory(rhsCat)));
} else {
const auto *lhsDerived{evaluate::GetDerivedTypeSpec(lhsType)};
const auto *rhsDerived{evaluate::GetDerivedTypeSpec(rhsType)};
if (lhsDerived && rhsDerived && *lhsDerived == *rhsDerived) {
return Tristate::Maybe; // TYPE(t) = TYPE(t) can be defined or
// intrinsic
} else {
return Tristate::Yes;
}
}
}
bool IsIntrinsicRelational(common::RelationalOperator opr,
const evaluate::DynamicType &type0, int rank0,
const evaluate::DynamicType &type1, int rank1) {
if (!evaluate::AreConformable(rank0, rank1)) {
return false;
} else {
auto cat0{type0.category()};
auto cat1{type1.category()};
if (IsNumericTypeCategory(cat0) && IsNumericTypeCategory(cat1)) {
// numeric types: EQ/NE always ok, others ok for non-complex
return opr == common::RelationalOperator::EQ ||
opr == common::RelationalOperator::NE ||
(cat0 != TypeCategory::Complex && cat1 != TypeCategory::Complex);
} else {
// not both numeric: only Character is ok
return cat0 == TypeCategory::Character && cat1 == TypeCategory::Character;
}
}
}
bool IsIntrinsicNumeric(const evaluate::DynamicType &type0) {
return IsNumericTypeCategory(type0.category());
}
bool IsIntrinsicNumeric(const evaluate::DynamicType &type0, int rank0,
const evaluate::DynamicType &type1, int rank1) {
return evaluate::AreConformable(rank0, rank1) &&
IsNumericTypeCategory(type0.category()) &&
IsNumericTypeCategory(type1.category());
}
bool IsIntrinsicLogical(const evaluate::DynamicType &type0) {
return type0.category() == TypeCategory::Logical;
}
bool IsIntrinsicLogical(const evaluate::DynamicType &type0, int rank0,
const evaluate::DynamicType &type1, int rank1) {
return evaluate::AreConformable(rank0, rank1) &&
type0.category() == TypeCategory::Logical &&
type1.category() == TypeCategory::Logical;
}
bool IsIntrinsicConcat(const evaluate::DynamicType &type0, int rank0,
const evaluate::DynamicType &type1, int rank1) {
return evaluate::AreConformable(rank0, rank1) &&
type0.category() == TypeCategory::Character &&
type1.category() == TypeCategory::Character &&
type0.kind() == type1.kind();
}
bool IsGenericDefinedOp(const Symbol &symbol) {
const Symbol &ultimate{symbol.GetUltimate()};
if (const auto *generic{ultimate.detailsIf<GenericDetails>()}) {
return generic->kind().IsDefinedOperator();
} else if (const auto *misc{ultimate.detailsIf<MiscDetails>()}) {
return misc->kind() == MiscDetails::Kind::TypeBoundDefinedOp;
} else {
return false;
}
}
bool IsCommonBlockContaining(const Symbol &block, const Symbol &object) {
const auto &objects{block.get<CommonBlockDetails>().objects()};
auto found{std::find(objects.begin(), objects.end(), object)};
return found != objects.end();
}
bool IsUseAssociated(const Symbol &symbol, const Scope &scope) {
const Scope *owner{FindProgramUnitContaining(symbol.GetUltimate().owner())};
return owner && owner->kind() == Scope::Kind::Module &&
owner != FindProgramUnitContaining(scope);
}
bool DoesScopeContain(
const Scope *maybeAncestor, const Scope &maybeDescendent) {
return maybeAncestor && !maybeDescendent.IsGlobal() &&
FindScopeContaining(maybeDescendent.parent(),
[&](const Scope &scope) { return &scope == maybeAncestor; });
}
bool DoesScopeContain(const Scope *maybeAncestor, const Symbol &symbol) {
return DoesScopeContain(maybeAncestor, symbol.owner());
}
bool IsHostAssociated(const Symbol &symbol, const Scope &scope) {
const Scope *subprogram{FindProgramUnitContaining(scope)};
return subprogram &&
DoesScopeContain(FindProgramUnitContaining(symbol), *subprogram);
}
bool IsDummy(const Symbol &symbol) {
if (const auto *details{symbol.detailsIf<ObjectEntityDetails>()}) {
return details->isDummy();
} else if (const auto *details{symbol.detailsIf<ProcEntityDetails>()}) {
return details->isDummy();
} else {
return false;
}
}
bool IsStmtFunction(const Symbol &symbol) {
const auto *subprogram{symbol.detailsIf<SubprogramDetails>()};
return subprogram && subprogram->stmtFunction();
}
bool IsInStmtFunction(const Symbol &symbol) {
if (const Symbol * function{symbol.owner().symbol()}) {
return IsStmtFunction(*function);
}
return false;
}
bool IsStmtFunctionDummy(const Symbol &symbol) {
return IsDummy(symbol) && IsInStmtFunction(symbol);
}
bool IsStmtFunctionResult(const Symbol &symbol) {
return IsFunctionResult(symbol) && IsInStmtFunction(symbol);
}
bool IsPointerDummy(const Symbol &symbol) {
return IsPointer(symbol) && IsDummy(symbol);
}
// variable-name
bool IsVariableName(const Symbol &symbol) {
if (const Symbol * root{GetAssociationRoot(symbol)}) {
return root->has<ObjectEntityDetails>() && !IsNamedConstant(*root);
} else {
return false;
}
}
// proc-name
bool IsProcName(const Symbol &symbol) {
return symbol.GetUltimate().has<ProcEntityDetails>();
}
bool IsFunction(const Symbol &symbol) {
return std::visit(
common::visitors{
[](const SubprogramDetails &x) { return x.isFunction(); },
[&](const SubprogramNameDetails &) {
return symbol.test(Symbol::Flag::Function);
},
[](const ProcEntityDetails &x) {
const auto &ifc{x.interface()};
return ifc.type() || (ifc.symbol() && IsFunction(*ifc.symbol()));
},
[](const ProcBindingDetails &x) { return IsFunction(x.symbol()); },
[](const UseDetails &x) { return IsFunction(x.symbol()); },
[](const auto &) { return false; },
},
symbol.details());
}
bool IsPureProcedure(const Symbol &symbol) {
if (const auto *procDetails{symbol.detailsIf<ProcEntityDetails>()}) {
if (const Symbol * procInterface{procDetails->interface().symbol()}) {
// procedure component with a pure interface
return IsPureProcedure(*procInterface);
}
} else if (const auto *details{symbol.detailsIf<ProcBindingDetails>()}) {
return IsPureProcedure(details->symbol());
} else if (!IsProcedure(symbol)) {
return false;
}
return symbol.attrs().test(Attr::PURE) ||
(symbol.attrs().test(Attr::ELEMENTAL) &&
!symbol.attrs().test(Attr::IMPURE));
}
bool IsPureProcedure(const Scope &scope) {
if (const Symbol * symbol{scope.GetSymbol()}) {
return IsPureProcedure(*symbol);
} else {
return false;
}
}
bool IsBindCProcedure(const Symbol &symbol) {
if (const auto *procDetails{symbol.detailsIf<ProcEntityDetails>()}) {
if (const Symbol * procInterface{procDetails->interface().symbol()}) {
// procedure component with a BIND(C) interface
return IsBindCProcedure(*procInterface);
}
}
return symbol.attrs().test(Attr::BIND_C) && IsProcedure(symbol);
}
bool IsBindCProcedure(const Scope &scope) {
if (const Symbol * symbol{scope.GetSymbol()}) {
return IsBindCProcedure(*symbol);
} else {
return false;
}
}
bool IsProcedure(const Symbol &symbol) {
return std::visit(
common::visitors{
[](const SubprogramDetails &) { return true; },
[](const SubprogramNameDetails &) { return true; },
[](const ProcEntityDetails &) { return true; },
[](const GenericDetails &) { return true; },
[](const ProcBindingDetails &) { return true; },
[](const UseDetails &x) { return IsProcedure(x.symbol()); },
// TODO: FinalProcDetails?
[](const auto &) { return false; },
},
symbol.details());
}
bool IsProcedurePointer(const Symbol &symbol) {
return symbol.has<ProcEntityDetails>() && IsPointer(symbol);
}
static const Symbol *FindPointerComponent(
const Scope &scope, std::set<const Scope *> &visited) {
if (!scope.IsDerivedType()) {
return nullptr;
}
if (!visited.insert(&scope).second) {
return nullptr;
}
// If there's a top-level pointer component, return it for clearer error
// messaging.
for (const auto &pair : scope) {
const Symbol &symbol{*pair.second};
if (IsPointer(symbol)) {
return &symbol;
}
}
for (const auto &pair : scope) {
const Symbol &symbol{*pair.second};
if (const auto *details{symbol.detailsIf<ObjectEntityDetails>()}) {
if (const DeclTypeSpec * type{details->type()}) {
if (const DerivedTypeSpec * derived{type->AsDerived()}) {
if (const Scope * nested{derived->scope()}) {
if (const Symbol *
pointer{FindPointerComponent(*nested, visited)}) {
return pointer;
}
}
}
}
}
}
return nullptr;
}
const Symbol *FindPointerComponent(const Scope &scope) {
std::set<const Scope *> visited;
return FindPointerComponent(scope, visited);
}
const Symbol *FindPointerComponent(const DerivedTypeSpec &derived) {
if (const Scope * scope{derived.scope()}) {
return FindPointerComponent(*scope);
} else {
return nullptr;
}
}
const Symbol *FindPointerComponent(const DeclTypeSpec &type) {
if (const DerivedTypeSpec * derived{type.AsDerived()}) {
return FindPointerComponent(*derived);
} else {
return nullptr;
}
}
const Symbol *FindPointerComponent(const DeclTypeSpec *type) {
return type ? FindPointerComponent(*type) : nullptr;
}
const Symbol *FindPointerComponent(const Symbol &symbol) {
return IsPointer(symbol) ? &symbol : FindPointerComponent(symbol.GetType());
}
// C1594 specifies several ways by which an object might be globally visible.
const Symbol *FindExternallyVisibleObject(
const Symbol &object, const Scope &scope) {
// TODO: Storage association with any object for which this predicate holds,
// once EQUIVALENCE is supported.
if (IsUseAssociated(object, scope) || IsHostAssociated(object, scope) ||
(IsPureProcedure(scope) && IsPointerDummy(object)) ||
(IsIntentIn(object) && IsDummy(object))) {
return &object;
} else if (const Symbol * block{FindCommonBlockContaining(object)}) {
return block;
} else {
return nullptr;
}
}
bool ExprHasTypeCategory(
const SomeExpr &expr, const common::TypeCategory &type) {
auto dynamicType{expr.GetType()};
return dynamicType && dynamicType->category() == type;
}
bool ExprTypeKindIsDefault(
const SomeExpr &expr, const SemanticsContext &context) {
auto dynamicType{expr.GetType()};
return dynamicType &&
dynamicType->category() != common::TypeCategory::Derived &&
dynamicType->kind() == context.GetDefaultKind(dynamicType->category());
}
// If an analyzed expr or assignment is missing, dump the node and die.
template<typename T> static void CheckMissingAnalysis(bool absent, const T &x) {
if (absent) {
std::string buf;
llvm::raw_string_ostream ss{buf};
ss << "node has not been analyzed:\n";
parser::DumpTree(ss, x);
common::die(ss.str().c_str());
}
}
const SomeExpr *GetExprHelper::Get(const parser::Expr &x) {
CheckMissingAnalysis(!x.typedExpr, x);
return common::GetPtrFromOptional(x.typedExpr->v);
}
const SomeExpr *GetExprHelper::Get(const parser::Variable &x) {
CheckMissingAnalysis(!x.typedExpr, x);
return common::GetPtrFromOptional(x.typedExpr->v);
}
const evaluate::Assignment *GetAssignment(const parser::AssignmentStmt &x) {
CheckMissingAnalysis(!x.typedAssignment, x);
return common::GetPtrFromOptional(x.typedAssignment->v);
}
const evaluate::Assignment *GetAssignment(
const parser::PointerAssignmentStmt &x) {
CheckMissingAnalysis(!x.typedAssignment, x);
return common::GetPtrFromOptional(x.typedAssignment->v);
}
const Symbol *FindInterface(const Symbol &symbol) {
return std::visit(
common::visitors{
[](const ProcEntityDetails &details) {
return details.interface().symbol();
},
[](const ProcBindingDetails &details) { return &details.symbol(); },
[](const auto &) -> const Symbol * { return nullptr; },
},
symbol.details());
}
const Symbol *FindSubprogram(const Symbol &symbol) {
return std::visit(
common::visitors{
[&](const ProcEntityDetails &details) -> const Symbol * {
if (const Symbol * interface{details.interface().symbol()}) {
return FindSubprogram(*interface);
} else {
return &symbol;
}
},
[](const ProcBindingDetails &details) {
return FindSubprogram(details.symbol());
},
[&](const SubprogramDetails &) { return &symbol; },
[](const UseDetails &details) {
return FindSubprogram(details.symbol());
},
[](const HostAssocDetails &details) {
return FindSubprogram(details.symbol());
},
[](const auto &) -> const Symbol * { return nullptr; },
},
symbol.details());
}
const Symbol *FindFunctionResult(const Symbol &symbol) {
if (const Symbol * subp{FindSubprogram(symbol)}) {
if (const auto &subpDetails{subp->detailsIf<SubprogramDetails>()}) {
if (subpDetails->isFunction()) {
return &subpDetails->result();
}
}
}
return nullptr;
}
const Symbol *FindOverriddenBinding(const Symbol &symbol) {
if (symbol.has<ProcBindingDetails>()) {
if (const DeclTypeSpec * parentType{FindParentTypeSpec(symbol.owner())}) {
if (const DerivedTypeSpec * parentDerived{parentType->AsDerived()}) {
if (const Scope * parentScope{parentDerived->typeSymbol().scope()}) {
return parentScope->FindComponent(symbol.name());
}
}
}
}
return nullptr;
}
const DeclTypeSpec *FindParentTypeSpec(const DerivedTypeSpec &derived) {
return FindParentTypeSpec(derived.typeSymbol());
}
const DeclTypeSpec *FindParentTypeSpec(const DeclTypeSpec &decl) {
if (const DerivedTypeSpec * derived{decl.AsDerived()}) {
return FindParentTypeSpec(*derived);
} else {
return nullptr;
}
}
const DeclTypeSpec *FindParentTypeSpec(const Scope &scope) {
if (scope.kind() == Scope::Kind::DerivedType) {
if (const auto *symbol{scope.symbol()}) {
return FindParentTypeSpec(*symbol);
}
}
return nullptr;
}
const DeclTypeSpec *FindParentTypeSpec(const Symbol &symbol) {
if (const Scope * scope{symbol.scope()}) {
if (const auto *details{symbol.detailsIf<DerivedTypeDetails>()}) {
if (const Symbol * parent{details->GetParentComponent(*scope)}) {
return parent->GetType();
}
}
}
return nullptr;
}
// When a construct association maps to a variable, and that variable
// is not an array with a vector-valued subscript, return the base
// Symbol of that variable, else nullptr. Descends into other construct
// associations when one associations maps to another.
static const Symbol *GetAssociatedVariable(const AssocEntityDetails &details) {
if (const MaybeExpr & expr{details.expr()}) {
if (evaluate::IsVariable(*expr) && !evaluate::HasVectorSubscript(*expr)) {
if (const Symbol * varSymbol{evaluate::GetFirstSymbol(*expr)}) {
return GetAssociationRoot(*varSymbol);
}
}
}
return nullptr;
}
// Return the Symbol of the variable of a construct association, if it exists
// Return nullptr if the name is associated with an expression
const Symbol *GetAssociationRoot(const Symbol &symbol) {
const Symbol &ultimate{symbol.GetUltimate()};
if (const auto *details{ultimate.detailsIf<AssocEntityDetails>()}) {
// We have a construct association
return GetAssociatedVariable(*details);
} else {
return &ultimate;
}
}
bool IsExtensibleType(const DerivedTypeSpec *derived) {
return derived && !IsIsoCType(derived) &&
!derived->typeSymbol().attrs().test(Attr::BIND_C) &&
!derived->typeSymbol().get<DerivedTypeDetails>().sequence();
}
bool IsBuiltinDerivedType(const DerivedTypeSpec *derived, const char *name) {
if (!derived) {
return false;
} else {
const auto &symbol{derived->typeSymbol()};
return symbol.owner().IsModule() &&
symbol.owner().GetName().value() == "__fortran_builtins" &&
symbol.name() == "__builtin_"s + name;
}
}
bool IsIsoCType(const DerivedTypeSpec *derived) {
return IsBuiltinDerivedType(derived, "c_ptr") ||
IsBuiltinDerivedType(derived, "c_funptr");
}
bool IsTeamType(const DerivedTypeSpec *derived) {
return IsBuiltinDerivedType(derived, "team_type");
}
bool IsEventTypeOrLockType(const DerivedTypeSpec *derivedTypeSpec) {
return IsBuiltinDerivedType(derivedTypeSpec, "event_type") ||
IsBuiltinDerivedType(derivedTypeSpec, "lock_type");
}
bool IsOrContainsEventOrLockComponent(const Symbol &symbol) {
if (const Symbol * root{GetAssociationRoot(symbol)}) {
if (const auto *details{root->detailsIf<ObjectEntityDetails>()}) {
if (const DeclTypeSpec * type{details->type()}) {
if (const DerivedTypeSpec * derived{type->AsDerived()}) {
return IsEventTypeOrLockType(derived) ||
FindEventOrLockPotentialComponent(*derived);
}
}
}
}
return false;
}
bool IsSaved(const Symbol &symbol) {
auto scopeKind{symbol.owner().kind()};
if (scopeKind == Scope::Kind::Module || scopeKind == Scope::Kind::BlockData) {
return true;
} else if (scopeKind == Scope::Kind::DerivedType) {
return false; // this is a component
} else if (IsNamedConstant(symbol)) {
return false;
} else if (symbol.attrs().test(Attr::SAVE)) {
return true;
} else {
if (const auto *object{symbol.detailsIf<ObjectEntityDetails>()}) {
if (object->init()) {
return true;
}
} else if (IsProcedurePointer(symbol)) {
if (symbol.get<ProcEntityDetails>().init()) {
return true;
}
}
if (const Symbol * block{FindCommonBlockContaining(symbol)}) {
if (block->attrs().test(Attr::SAVE)) {
return true;
}
}
return false;
}
}
// Check this symbol suitable as a type-bound procedure - C769
bool CanBeTypeBoundProc(const Symbol *symbol) {
if (!symbol || IsDummy(*symbol) || IsProcedurePointer(*symbol)) {
return false;
} else if (symbol->has<SubprogramNameDetails>()) {
return symbol->owner().kind() == Scope::Kind::Module;
} else if (auto *details{symbol->detailsIf<SubprogramDetails>()}) {
return symbol->owner().kind() == Scope::Kind::Module ||
details->isInterface();
} else if (const auto *proc{symbol->detailsIf<ProcEntityDetails>()}) {
return !symbol->attrs().test(Attr::INTRINSIC) &&
proc->HasExplicitInterface();
} else {
return false;
}
}
bool IsInitialized(const Symbol &symbol) {
if (symbol.test(Symbol::Flag::InDataStmt)) {
return true;
} else if (IsNamedConstant(symbol)) {
return false;
} else if (const auto *object{symbol.detailsIf<ObjectEntityDetails>()}) {
if (IsAllocatable(symbol) || object->init()) {
return true;
}
if (!IsPointer(symbol) && object->type()) {
if (const auto *derived{object->type()->AsDerived()}) {
if (derived->HasDefaultInitialization()) {
return true;
}
}
}
} else if (const auto *proc{symbol.detailsIf<ProcEntityDetails>()}) {
return proc->init().has_value();
}
return false;
}
bool IsFinalizable(const Symbol &symbol) {
if (const DeclTypeSpec * type{symbol.GetType()}) {
if (const DerivedTypeSpec * derived{type->AsDerived()}) {
return IsFinalizable(*derived);
}
}
return false;
}
bool IsFinalizable(const DerivedTypeSpec &derived) {
ScopeComponentIterator components{derived};
return std::find_if(components.begin(), components.end(),
[](const Symbol &x) { return x.has<FinalProcDetails>(); }) !=
components.end();
}
// TODO The following function returns true for all types with FINAL procedures
// This is because we don't yet fill in the data for FinalProcDetails
bool HasImpureFinal(const DerivedTypeSpec &derived) {
ScopeComponentIterator components{derived};
return std::find_if(
components.begin(), components.end(), [](const Symbol &x) {
return x.has<FinalProcDetails>() && !x.attrs().test(Attr::PURE);
}) != components.end();
}
bool IsCoarray(const Symbol &symbol) { return symbol.Corank() > 0; }
bool IsAssumedLengthCharacter(const Symbol &symbol) {
if (const DeclTypeSpec * type{symbol.GetType()}) {
return type->category() == DeclTypeSpec::Character &&
type->characterTypeSpec().length().isAssumed();
} else {
return false;
}
}
// C722 and C723: For a function to be assumed length, it must be external and
// of CHARACTER type
bool IsAssumedLengthExternalCharacterFunction(const Symbol &symbol) {
return IsAssumedLengthCharacter(symbol) &&
((symbol.has<SubprogramDetails>() && symbol.owner().IsGlobal()) ||
(symbol.test(Symbol::Flag::Function) &&
symbol.attrs().test(Attr::EXTERNAL)));
}
const Symbol *IsExternalInPureContext(
const Symbol &symbol, const Scope &scope) {
if (const auto *pureProc{FindPureProcedureContaining(scope)}) {
if (const Symbol * root{GetAssociationRoot(symbol)}) {
if (const Symbol *
visible{FindExternallyVisibleObject(*root, *pureProc)}) {
return visible;
}
}
}
return nullptr;
}
PotentialComponentIterator::const_iterator FindPolymorphicPotentialComponent(
const DerivedTypeSpec &derived) {
PotentialComponentIterator potentials{derived};
return std::find_if(
potentials.begin(), potentials.end(), [](const Symbol &component) {
if (const auto *details{component.detailsIf<ObjectEntityDetails>()}) {
const DeclTypeSpec *type{details->type()};
return type && type->IsPolymorphic();
}
return false;
});
}
bool IsOrContainsPolymorphicComponent(const Symbol &symbol) {
if (const Symbol * root{GetAssociationRoot(symbol)}) {
if (const auto *details{root->detailsIf<ObjectEntityDetails>()}) {
if (const DeclTypeSpec * type{details->type()}) {
if (type->IsPolymorphic()) {
return true;
}
if (const DerivedTypeSpec * derived{type->AsDerived()}) {
return (bool)FindPolymorphicPotentialComponent(*derived);
}
}
}
}
return false;
}
bool InProtectedContext(const Symbol &symbol, const Scope ¤tScope) {
return IsProtected(symbol) && !IsHostAssociated(symbol, currentScope);
}
// C1101 and C1158
// TODO Need to check for a coindexed object (why? C1103?)
std::optional<parser::MessageFixedText> WhyNotModifiable(
const Symbol &symbol, const Scope &scope) {
const Symbol *root{GetAssociationRoot(symbol)};
if (!root) {
return "'%s' is construct associated with an expression"_en_US;
} else if (InProtectedContext(*root, scope)) {
return "'%s' is protected in this scope"_en_US;
} else if (IsExternalInPureContext(*root, scope)) {
return "'%s' is externally visible and referenced in a pure"
" procedure"_en_US;
} else if (IsOrContainsEventOrLockComponent(*root)) {
return "'%s' is an entity with either an EVENT_TYPE or LOCK_TYPE"_en_US;
} else if (IsIntentIn(*root)) {
return "'%s' is an INTENT(IN) dummy argument"_en_US;
} else if (!IsVariableName(*root)) {
return "'%s' is not a variable"_en_US;
} else {
return std::nullopt;
}
}
std::optional<parser::Message> WhyNotModifiable(parser::CharBlock at,
const SomeExpr &expr, const Scope &scope, bool vectorSubscriptIsOk) {
if (!evaluate::IsVariable(expr)) {
return parser::Message{at, "Expression is not a variable"_en_US};
} else if (auto dataRef{evaluate::ExtractDataRef(expr)}) {
if (!vectorSubscriptIsOk && evaluate::HasVectorSubscript(expr)) {
return parser::Message{at, "Variable has a vector subscript"_en_US};
}
const Symbol &symbol{dataRef->GetFirstSymbol()};
if (auto maybeWhy{WhyNotModifiable(symbol, scope)}) {
return parser::Message{symbol.name(),
parser::MessageFormattedText{std::move(*maybeWhy), symbol.name()}};
}
} else {
// reference to function returning POINTER
}
return std::nullopt;
}
class ImageControlStmtHelper {
using ImageControlStmts = std::variant<parser::ChangeTeamConstruct,
parser::CriticalConstruct, parser::EventPostStmt, parser::EventWaitStmt,
parser::FormTeamStmt, parser::LockStmt, parser::StopStmt,
parser::SyncAllStmt, parser::SyncImagesStmt, parser::SyncMemoryStmt,
parser::SyncTeamStmt, parser::UnlockStmt>;
public:
template<typename T> bool operator()(const T &) {
return common::HasMember<T, ImageControlStmts>;
}
template<typename T> bool operator()(const common::Indirection<T> &x) {
return (*this)(x.value());
}
bool operator()(const parser::AllocateStmt &stmt) {
const auto &allocationList{std::get<std::list<parser::Allocation>>(stmt.t)};
for (const auto &allocation : allocationList) {
const auto &allocateObject{
std::get<parser::AllocateObject>(allocation.t)};
if (IsCoarrayObject(allocateObject)) {
return true;
}
}
return false;
}
bool operator()(const parser::DeallocateStmt &stmt) {
const auto &allocateObjectList{
std::get<std::list<parser::AllocateObject>>(stmt.t)};
for (const auto &allocateObject : allocateObjectList) {
if (IsCoarrayObject(allocateObject)) {
return true;
}
}
return false;
}
bool operator()(const parser::CallStmt &stmt) {
const auto &procedureDesignator{
std::get<parser::ProcedureDesignator>(stmt.v.t)};
if (auto *name{std::get_if<parser::Name>(&procedureDesignator.u)}) {
// TODO: also ensure that the procedure is, in fact, an intrinsic
if (name->source == "move_alloc") {
const auto &args{std::get<std::list<parser::ActualArgSpec>>(stmt.v.t)};
if (!args.empty()) {
const parser::ActualArg &actualArg{
std::get<parser::ActualArg>(args.front().t)};
if (const auto *argExpr{
std::get_if<common::Indirection<parser::Expr>>(
&actualArg.u)}) {
return HasCoarray(argExpr->value());
}
}
}
}
return false;
}
bool operator()(const parser::Statement<parser::ActionStmt> &stmt) {
return std::visit(*this, stmt.statement.u);
}
private:
bool IsCoarrayObject(const parser::AllocateObject &allocateObject) {
const parser::Name &name{GetLastName(allocateObject)};
return name.symbol && IsCoarray(*name.symbol);
}
};
bool IsImageControlStmt(const parser::ExecutableConstruct &construct) {
return std::visit(ImageControlStmtHelper{}, construct.u);
}
std::optional<parser::MessageFixedText> GetImageControlStmtCoarrayMsg(
const parser::ExecutableConstruct &construct) {
if (const auto *actionStmt{
std::get_if<parser::Statement<parser::ActionStmt>>(&construct.u)}) {
return std::visit(
common::visitors{
[](const common::Indirection<parser::AllocateStmt> &)
-> std::optional<parser::MessageFixedText> {
return "ALLOCATE of a coarray is an image control"
" statement"_en_US;
},
[](const common::Indirection<parser::DeallocateStmt> &)
-> std::optional<parser::MessageFixedText> {
return "DEALLOCATE of a coarray is an image control"
" statement"_en_US;
},
[](const common::Indirection<parser::CallStmt> &)
-> std::optional<parser::MessageFixedText> {
return "MOVE_ALLOC of a coarray is an image control"
" statement "_en_US;
},
[](const auto &) -> std::optional<parser::MessageFixedText> {
return std::nullopt;
},
},
actionStmt->statement.u);
}
return std::nullopt;
}
parser::CharBlock GetImageControlStmtLocation(
const parser::ExecutableConstruct &executableConstruct) {
return std::visit(
common::visitors{
[](const common::Indirection<parser::ChangeTeamConstruct>
&construct) {
return std::get<parser::Statement<parser::ChangeTeamStmt>>(
construct.value().t)
.source;
},
[](const common::Indirection<parser::CriticalConstruct> &construct) {
return std::get<parser::Statement<parser::CriticalStmt>>(
construct.value().t)
.source;
},
[](const parser::Statement<parser::ActionStmt> &actionStmt) {
return actionStmt.source;
},
[](const auto &) { return parser::CharBlock{}; },
},
executableConstruct.u);
}
bool HasCoarray(const parser::Expr &expression) {
if (const auto *expr{GetExpr(expression)}) {
for (const Symbol &symbol : evaluate::CollectSymbols(*expr)) {
if (const Symbol * root{GetAssociationRoot(symbol)}) {
if (IsCoarray(*root)) {
return true;
}
}
}
}
return false;
}
bool IsPolymorphic(const Symbol &symbol) {
if (const DeclTypeSpec * type{symbol.GetType()}) {
return type->IsPolymorphic();
}
return false;
}
bool IsPolymorphicAllocatable(const Symbol &symbol) {
return IsAllocatable(symbol) && IsPolymorphic(symbol);
}
std::optional<parser::MessageFormattedText> CheckAccessibleComponent(
const Scope &scope, const Symbol &symbol) {
CHECK(symbol.owner().IsDerivedType()); // symbol must be a component
if (symbol.attrs().test(Attr::PRIVATE)) {
if (const Scope * moduleScope{FindModuleContaining(symbol.owner())}) {
if (!moduleScope->Contains(scope)) {
return parser::MessageFormattedText{
"PRIVATE component '%s' is only accessible within module '%s'"_err_en_US,
symbol.name(), moduleScope->GetName().value()};
}
}
}
return std::nullopt;
}
std::list<SourceName> OrderParameterNames(const Symbol &typeSymbol) {
std::list<SourceName> result;
if (const DerivedTypeSpec * spec{typeSymbol.GetParentTypeSpec()}) {
result = OrderParameterNames(spec->typeSymbol());
}
const auto ¶mNames{typeSymbol.get<DerivedTypeDetails>().paramNames()};
result.insert(result.end(), paramNames.begin(), paramNames.end());
return result;
}
SymbolVector OrderParameterDeclarations(const Symbol &typeSymbol) {
SymbolVector result;
if (const DerivedTypeSpec * spec{typeSymbol.GetParentTypeSpec()}) {
result = OrderParameterDeclarations(spec->typeSymbol());
}
const auto ¶mDecls{typeSymbol.get<DerivedTypeDetails>().paramDecls()};
result.insert(result.end(), paramDecls.begin(), paramDecls.end());
return result;
}
const DeclTypeSpec &FindOrInstantiateDerivedType(Scope &scope,
DerivedTypeSpec &&spec, SemanticsContext &semanticsContext,
DeclTypeSpec::Category category) {
spec.CookParameters(semanticsContext.foldingContext());
spec.EvaluateParameters(semanticsContext.foldingContext());
if (const DeclTypeSpec *
type{scope.FindInstantiatedDerivedType(spec, category)}) {
return *type;
}
// Create a new instantiation of this parameterized derived type
// for this particular distinct set of actual parameter values.
DeclTypeSpec &type{scope.MakeDerivedType(category, std::move(spec))};
type.derivedTypeSpec().Instantiate(scope, semanticsContext);
return type;
}
// ComponentIterator implementation
template<ComponentKind componentKind>
typename ComponentIterator<componentKind>::const_iterator
ComponentIterator<componentKind>::const_iterator::Create(
const DerivedTypeSpec &derived) {
const_iterator it{};
it.componentPath_.emplace_back(derived);
it.Increment(); // cue up first relevant component, if any
return it;
}
template<ComponentKind componentKind>
const DerivedTypeSpec *
ComponentIterator<componentKind>::const_iterator::PlanComponentTraversal(
const Symbol &component) const {
if (const auto *details{component.detailsIf<ObjectEntityDetails>()}) {
if (const DeclTypeSpec * type{details->type()}) {
if (const auto *derived{type->AsDerived()}) {
bool traverse{false};
if constexpr (componentKind == ComponentKind::Ordered) {
// Order Component (only visit parents)
traverse = component.test(Symbol::Flag::ParentComp);
} else if constexpr (componentKind == ComponentKind::Direct) {
traverse = !IsAllocatableOrPointer(component);
} else if constexpr (componentKind == ComponentKind::Ultimate) {
traverse = !IsAllocatableOrPointer(component);
} else if constexpr (componentKind == ComponentKind::Potential) {
traverse = !IsPointer(component);
} else if constexpr (componentKind == ComponentKind::Scope) {
traverse = !IsAllocatableOrPointer(component);
}
if (traverse) {
const Symbol &newTypeSymbol{derived->typeSymbol()};
// Avoid infinite loop if the type is already part of the types
// being visited. It is possible to have "loops in type" because
// C744 does not forbid to use not yet declared type for
// ALLOCATABLE or POINTER components.
for (const auto &node : componentPath_) {
if (&newTypeSymbol == &node.GetTypeSymbol()) {
return nullptr;
}
}
return derived;
}
}
} // intrinsic & unlimited polymorphic not traversable
}
return nullptr;
}
template<ComponentKind componentKind>
static bool StopAtComponentPre(const Symbol &component) {
if constexpr (componentKind == ComponentKind::Ordered) {
// Parent components need to be iterated upon after their
// sub-components in structure constructor analysis.
return !component.test(Symbol::Flag::ParentComp);
} else if constexpr (componentKind == ComponentKind::Direct) {
return true;
} else if constexpr (componentKind == ComponentKind::Ultimate) {
return component.has<ProcEntityDetails>() ||
IsAllocatableOrPointer(component) ||
(component.get<ObjectEntityDetails>().type() &&
component.get<ObjectEntityDetails>().type()->AsIntrinsic());
} else if constexpr (componentKind == ComponentKind::Potential) {
return !IsPointer(component);
}
}
template<ComponentKind componentKind>
static bool StopAtComponentPost(const Symbol &component) {
return componentKind == ComponentKind::Ordered &&
component.test(Symbol::Flag::ParentComp);
}
template<ComponentKind componentKind>
void ComponentIterator<componentKind>::const_iterator::Increment() {
while (!componentPath_.empty()) {
ComponentPathNode &deepest{componentPath_.back()};
if (deepest.component()) {
if (!deepest.descended()) {
deepest.set_descended(true);
if (const DerivedTypeSpec *
derived{PlanComponentTraversal(*deepest.component())}) {
componentPath_.emplace_back(*derived);
continue;
}
} else if (!deepest.visited()) {
deepest.set_visited(true);
return; // this is the next component to visit, after descending
}
}
auto &nameIterator{deepest.nameIterator()};
if (nameIterator == deepest.nameEnd()) {
componentPath_.pop_back();
} else if constexpr (componentKind == ComponentKind::Scope) {
deepest.set_component(*nameIterator++->second);
deepest.set_descended(false);
deepest.set_visited(true);
return; // this is the next component to visit, before descending
} else {
const Scope &scope{deepest.GetScope()};
auto scopeIter{scope.find(*nameIterator++)};
if (scopeIter != scope.cend()) {
const Symbol &component{*scopeIter->second};
deepest.set_component(component);
deepest.set_descended(false);
if (StopAtComponentPre<componentKind>(component)) {
deepest.set_visited(true);
return; // this is the next component to visit, before descending
} else {
deepest.set_visited(!StopAtComponentPost<componentKind>(component));
}
}
}
}
}
template<ComponentKind componentKind>
std::string
ComponentIterator<componentKind>::const_iterator::BuildResultDesignatorName()
const {
std::string designator{""};
for (const auto &node : componentPath_) {
designator += "%" + DEREF(node.component()).name().ToString();
}
return designator;
}
template class ComponentIterator<ComponentKind::Ordered>;
template class ComponentIterator<ComponentKind::Direct>;
template class ComponentIterator<ComponentKind::Ultimate>;
template class ComponentIterator<ComponentKind::Potential>;
template class ComponentIterator<ComponentKind::Scope>;
UltimateComponentIterator::const_iterator FindCoarrayUltimateComponent(
const DerivedTypeSpec &derived) {
UltimateComponentIterator ultimates{derived};
return std::find_if(ultimates.begin(), ultimates.end(), IsCoarray);
}
UltimateComponentIterator::const_iterator FindPointerUltimateComponent(
const DerivedTypeSpec &derived) {
UltimateComponentIterator ultimates{derived};
return std::find_if(ultimates.begin(), ultimates.end(), IsPointer);
}
PotentialComponentIterator::const_iterator FindEventOrLockPotentialComponent(
const DerivedTypeSpec &derived) {
PotentialComponentIterator potentials{derived};
return std::find_if(
potentials.begin(), potentials.end(), [](const Symbol &component) {
if (const auto *details{component.detailsIf<ObjectEntityDetails>()}) {
const DeclTypeSpec *type{details->type()};
return type && IsEventTypeOrLockType(type->AsDerived());
}
return false;
});
}
UltimateComponentIterator::const_iterator FindAllocatableUltimateComponent(
const DerivedTypeSpec &derived) {
UltimateComponentIterator ultimates{derived};
return std::find_if(ultimates.begin(), ultimates.end(), IsAllocatable);
}
UltimateComponentIterator::const_iterator
FindPolymorphicAllocatableUltimateComponent(const DerivedTypeSpec &derived) {
UltimateComponentIterator ultimates{derived};
return std::find_if(
ultimates.begin(), ultimates.end(), IsPolymorphicAllocatable);
}
UltimateComponentIterator::const_iterator
FindPolymorphicAllocatableNonCoarrayUltimateComponent(
const DerivedTypeSpec &derived) {
UltimateComponentIterator ultimates{derived};
return std::find_if(ultimates.begin(), ultimates.end(), [](const Symbol &x) {
return IsPolymorphicAllocatable(x) && !IsCoarray(x);
});
}
const Symbol *FindUltimateComponent(const DerivedTypeSpec &derived,
const std::function<bool(const Symbol &)> &predicate) {
UltimateComponentIterator ultimates{derived};
if (auto it{std::find_if(ultimates.begin(), ultimates.end(),
[&predicate](const Symbol &component) -> bool {
return predicate(component);
})}) {
return &*it;
}
return nullptr;
}
const Symbol *FindUltimateComponent(const Symbol &symbol,
const std::function<bool(const Symbol &)> &predicate) {
if (predicate(symbol)) {
return &symbol;
} else if (const auto *object{symbol.detailsIf<ObjectEntityDetails>()}) {
if (const auto *type{object->type()}) {
if (const auto *derived{type->AsDerived()}) {
return FindUltimateComponent(*derived, predicate);
}
}
}
return nullptr;
}
const Symbol *FindImmediateComponent(const DerivedTypeSpec &type,
const std::function<bool(const Symbol &)> &predicate) {
if (const Scope * scope{type.scope()}) {
const Symbol *parent{nullptr};
for (const auto &pair : *scope) {
const Symbol *symbol{&*pair.second};
if (predicate(*symbol)) {
return symbol;
}
if (symbol->test(Symbol::Flag::ParentComp)) {
parent = symbol;
}
}
if (parent) {
if (const auto *object{parent->detailsIf<ObjectEntityDetails>()}) {
if (const auto *type{object->type()}) {
if (const auto *derived{type->AsDerived()}) {
return FindImmediateComponent(*derived, predicate);
}
}
}
}
}
return nullptr;
}
bool IsFunctionResult(const Symbol &symbol) {
return (symbol.has<ObjectEntityDetails>() &&
symbol.get<ObjectEntityDetails>().isFuncResult()) ||
(symbol.has<ProcEntityDetails>() &&
symbol.get<ProcEntityDetails>().isFuncResult());
}
bool IsFunctionResultWithSameNameAsFunction(const Symbol &symbol) {
if (IsFunctionResult(symbol)) {
if (const Symbol * function{symbol.owner().symbol()}) {
return symbol.name() == function->name();
}
}
return false;
}
void LabelEnforce::Post(const parser::GotoStmt &gotoStmt) {
checkLabelUse(gotoStmt.v);
}
void LabelEnforce::Post(const parser::ComputedGotoStmt &computedGotoStmt) {
for (auto &i : std::get<std::list<parser::Label>>(computedGotoStmt.t)) {
checkLabelUse(i);
}
}
void LabelEnforce::Post(const parser::ArithmeticIfStmt &arithmeticIfStmt) {
checkLabelUse(std::get<1>(arithmeticIfStmt.t));
checkLabelUse(std::get<2>(arithmeticIfStmt.t));
checkLabelUse(std::get<3>(arithmeticIfStmt.t));
}
void LabelEnforce::Post(const parser::AssignStmt &assignStmt) {
checkLabelUse(std::get<parser::Label>(assignStmt.t));
}
void LabelEnforce::Post(const parser::AssignedGotoStmt &assignedGotoStmt) {
for (auto &i : std::get<std::list<parser::Label>>(assignedGotoStmt.t)) {
checkLabelUse(i);
}
}
void LabelEnforce::Post(const parser::AltReturnSpec &altReturnSpec) {
checkLabelUse(altReturnSpec.v);
}
void LabelEnforce::Post(const parser::ErrLabel &errLabel) {
checkLabelUse(errLabel.v);
}
void LabelEnforce::Post(const parser::EndLabel &endLabel) {
checkLabelUse(endLabel.v);
}
void LabelEnforce::Post(const parser::EorLabel &eorLabel) {
checkLabelUse(eorLabel.v);
}
void LabelEnforce::checkLabelUse(const parser::Label &labelUsed) {
if (labels_.find(labelUsed) == labels_.end()) {
SayWithConstruct(context_, currentStatementSourcePosition_,
parser::MessageFormattedText{
"Control flow escapes from %s"_err_en_US, construct_},
constructSourcePosition_);
}
}
parser::MessageFormattedText LabelEnforce::GetEnclosingConstructMsg() {
return {"Enclosing %s statement"_en_US, construct_};
}
void LabelEnforce::SayWithConstruct(SemanticsContext &context,
parser::CharBlock stmtLocation, parser::MessageFormattedText &&message,
parser::CharBlock constructLocation) {
context.Say(stmtLocation, message)
.Attach(constructLocation, GetEnclosingConstructMsg());
}
}
| 34.569044 | 85 | 0.673456 | [
"object",
"vector"
] |
f6a55a5264bbcc2a09fe0b1c09eca5708b6c2e9d | 655 | cpp | C++ | Codeforces/Codefest18/A.cpp | Mindjolt2406/Competitive-Programming | d000d98bf7005ee4fb809bcea2f110e4c4793b80 | [
"MIT"
] | 2 | 2018-12-11T14:37:24.000Z | 2022-01-23T18:11:54.000Z | Codeforces/Codefest18/A.cpp | Mindjolt2406/Competitive-Programming | d000d98bf7005ee4fb809bcea2f110e4c4793b80 | [
"MIT"
] | null | null | null | Codeforces/Codefest18/A.cpp | Mindjolt2406/Competitive-Programming | d000d98bf7005ee4fb809bcea2f110e4c4793b80 | [
"MIT"
] | null | null | null | #include<bits/stdc++.h>
#define mt make_tuple
#define mp make_pair
#define pu push_back
#define INF 1000000001
#define MOD 1000000007
#define ll long long int
#define ld long double
#define vi vector<int>
#define vll vector<long long int>
#define sc(n) scanf("%d",&n);
#define scll(n) scanf("%lld",&n);
#define scld(n) scanf("%Lf",&n);
#define scr(s) {char temp[1000000];scanf("%s",temp);s = temp;}
using namespace std;
int main()
{
int n;
sc(n);
int k = n;
int count = 0;
while(n)
{
count++;
n/=2;
}
int count2 = 0;
while(k%2==0)
{
k/=2;
count2++;
}
if(count==count2) count--;
cout<<count<<endl;
return 0;
}
| 16.794872 | 62 | 0.615267 | [
"vector"
] |
f6a7ff42b59a363cee60d5db90fbbd423ee57675 | 5,492 | cpp | C++ | tests/test_innerproduct.cpp | dongxiao92/ncnn | 2532d937bc6390df6d9bd0a78c47c15857c8967b | [
"BSD-3-Clause"
] | null | null | null | tests/test_innerproduct.cpp | dongxiao92/ncnn | 2532d937bc6390df6d9bd0a78c47c15857c8967b | [
"BSD-3-Clause"
] | null | null | null | tests/test_innerproduct.cpp | dongxiao92/ncnn | 2532d937bc6390df6d9bd0a78c47c15857c8967b | [
"BSD-3-Clause"
] | null | null | null | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "layer/innerproduct.h"
#include "testutil.h"
static int test_innerproduct(const ncnn::Mat& a, int outch, int bias)
{
ncnn::ParamDict pd;
pd.set(0, outch); // num_output
pd.set(1, bias); // bias_term
pd.set(2, outch * a.w * a.h * a.c);
int activation_type = RAND() % 5; // 0 1 2 3 4
ncnn::Mat activation_params(2);
activation_params[0] = RandomFloat(-1, 0); // alpha
activation_params[1] = RandomFloat(0, 1); // beta
pd.set(9, activation_type);
pd.set(10, activation_params);
std::vector<ncnn::Mat> weights(bias ? 2 : 1);
weights[0] = RandomMat(outch * a.w * a.h * a.c);
if (bias)
weights[1] = RandomMat(outch);
ncnn::Option opt;
opt.num_threads = 1;
opt.use_vulkan_compute = true;
opt.use_int8_inference = false;
int ret = test_layer<ncnn::InnerProduct>("InnerProduct", pd, weights, opt, a);
if (ret != 0)
{
fprintf(stderr, "test_innerproduct failed a.dims=%d a=(%d %d %d) outch=%d bias=%d act=%d actparams=[%f,%f]\n", a.dims, a.w, a.h, a.c, outch, bias, activation_type, activation_params[0], activation_params[1]);
}
return ret;
}
static int test_innerproduct_0()
{
return 0
|| test_innerproduct(RandomMat(1, 3, 1), 1, 1)
|| test_innerproduct(RandomMat(3, 2, 2), 2, 1)
|| test_innerproduct(RandomMat(9, 3, 8), 7, 1)
|| test_innerproduct(RandomMat(2, 2, 8), 8, 1)
|| test_innerproduct(RandomMat(4, 3, 15), 8, 1)
|| test_innerproduct(RandomMat(6, 2, 16), 16, 1)
|| test_innerproduct(RandomMat(6, 2, 16), 7, 1)
|| test_innerproduct(RandomMat(6, 2, 5), 16, 1);
}
static int test_innerproduct_1()
{
return 0
|| test_innerproduct(RandomMat(1, 1), 1, 1)
|| test_innerproduct(RandomMat(3, 2), 2, 1)
|| test_innerproduct(RandomMat(9, 8), 7, 1)
|| test_innerproduct(RandomMat(2, 8), 8, 1)
|| test_innerproduct(RandomMat(4, 15), 8, 1)
|| test_innerproduct(RandomMat(6, 16), 16, 1)
|| test_innerproduct(RandomMat(6, 16), 7, 1)
|| test_innerproduct(RandomMat(6, 5), 16, 1);
}
static int test_innerproduct_2()
{
return 0
|| test_innerproduct(RandomMat(1), 1, 1)
|| test_innerproduct(RandomMat(2), 2, 1)
|| test_innerproduct(RandomMat(8), 7, 1)
|| test_innerproduct(RandomMat(8), 8, 1)
|| test_innerproduct(RandomMat(15), 8, 1)
|| test_innerproduct(RandomMat(16), 16, 1)
|| test_innerproduct(RandomMat(16), 7, 1)
|| test_innerproduct(RandomMat(5), 16, 1)
|| test_innerproduct(RandomMat(32), 16, 1)
|| test_innerproduct(RandomMat(12), 16, 1)
|| test_innerproduct(RandomMat(16), 12, 1)
|| test_innerproduct(RandomMat(24), 32, 1);
}
static int test_innerproduct_int8(const ncnn::Mat& a, int outch, int bias)
{
ncnn::ParamDict pd;
pd.set(0, outch); // num_output
pd.set(1, bias); // bias_term
pd.set(2, outch * a.w * a.h * a.c);
pd.set(8, 1); // int8_scale_term
std::vector<ncnn::Mat> weights(bias ? 4 : 3);
weights[0] = RandomMat(outch * a.w * a.h * a.c);
if (bias)
{
weights[1] = RandomMat(outch);
weights[2] = RandomMat(outch);
weights[3] = RandomMat(1);
}
else
{
weights[1] = RandomMat(outch);
weights[2] = RandomMat(1);
}
ncnn::Option opt;
opt.num_threads = 1;
opt.use_vulkan_compute = false;
opt.use_int8_inference = true;
int ret = test_layer<ncnn::InnerProduct>("InnerProduct", pd, weights, opt, a);
if (ret != 0)
{
fprintf(stderr, "test_innerproduct_int8 failed a.dims=%d a=(%d %d %d) outch=%d bias=%d\n", a.dims, a.w, a.h, a.c, outch, bias);
}
return 0;
}
static int test_innerproduct_3()
{
return 0
|| test_innerproduct_int8(RandomMat(1, 3, 1), 1, 1)
|| test_innerproduct_int8(RandomMat(3, 2, 2), 2, 1)
|| test_innerproduct_int8(RandomMat(5, 3, 3), 3, 1)
|| test_innerproduct_int8(RandomMat(7, 2, 3), 12, 1)
|| test_innerproduct_int8(RandomMat(9, 3, 4), 4, 1)
|| test_innerproduct_int8(RandomMat(2, 2, 7), 7, 1)
|| test_innerproduct_int8(RandomMat(4, 3, 8), 3, 1)
|| test_innerproduct_int8(RandomMat(6, 2, 8), 8, 1)
|| test_innerproduct_int8(RandomMat(8, 3, 15), 15, 1)
|| test_innerproduct_int8(RandomMat(7, 2, 16), 4, 1)
|| test_innerproduct_int8(RandomMat(6, 3, 16), 16, 1);
}
int main()
{
SRAND(7767517);
return 0
|| test_innerproduct_0()
|| test_innerproduct_1()
|| test_innerproduct_2()
|| test_innerproduct_3();
}
| 35.205128 | 216 | 0.603423 | [
"vector"
] |
f6a8949a7cbf5f4ee9a91a0d4294fbd63cf97952 | 314 | hpp | C++ | src/mesh.hpp | bioglaze/naturedemo | 29c5c0907ffaa746bcddb4e4794e16e4d3a7943c | [
"MIT"
] | null | null | null | src/mesh.hpp | bioglaze/naturedemo | 29c5c0907ffaa746bcddb4e4794e16e4d3a7943c | [
"MIT"
] | null | null | null | src/mesh.hpp | bioglaze/naturedemo | 29c5c0907ffaa746bcddb4e4794e16e4d3a7943c | [
"MIT"
] | null | null | null | #pragma once
struct aeMesh
{
int index = -1;
};
aeMesh aeCreatePlane();
const struct VertexBuffer& GetIndices( const aeMesh& mesh );
const struct VertexBuffer& GetPositions( const aeMesh& mesh );
const struct VertexBuffer& GetUVs( const aeMesh& mesh );
aeMesh aeLoadMeshFile( const struct aeFile& a3dFile );
| 24.153846 | 62 | 0.751592 | [
"mesh"
] |
f6a8ae7cd3406dd182d03d8ed705cceaf5e7e72a | 3,672 | hpp | C++ | depends/include/boost/process/detail/traits/cmd_or_exe.hpp | watchdog1023/Eurodollar | a28879c2bc78cad97ec5a2161e609299467feb04 | [
"MIT"
] | 12 | 2017-11-01T03:34:06.000Z | 2020-12-30T08:38:49.000Z | depends/include/boost/process/detail/traits/cmd_or_exe.hpp | watchdog1023/Eurodollar | a28879c2bc78cad97ec5a2161e609299467feb04 | [
"MIT"
] | 23 | 2018-03-28T15:16:14.000Z | 2020-02-05T09:42:02.000Z | depends/include/boost/process/detail/traits/cmd_or_exe.hpp | watchdog1023/Eurodollar | a28879c2bc78cad97ec5a2161e609299467feb04 | [
"MIT"
] | 17 | 2017-10-27T06:17:54.000Z | 2020-07-21T13:36:32.000Z | // Copyright (c) 2016 Klemens D. Morgenstern
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_PROCESS_DETAIL_TRAITS_CMD_OR_EXE_HPP_
#define BOOST_PROCESS_DETAIL_TRAITS_CMD_OR_EXE_HPP_
#include <string>
#include <vector>
#include <type_traits>
#include <initializer_list>
#include <boost/filesystem/path.hpp>
#include <boost/process/detail/traits/decl.hpp>
namespace boost { namespace process { namespace detail {
template<typename Char>
struct cmd_or_exe_tag {};
struct shell_;
template<> struct initializer_tag<const char* > { typedef cmd_or_exe_tag<char> type;};
template<> struct initializer_tag<const wchar_t* > { typedef cmd_or_exe_tag<wchar_t> type;};
template<> struct initializer_tag<char* > { typedef cmd_or_exe_tag<char> type;};
template<> struct initializer_tag<wchar_t* > { typedef cmd_or_exe_tag<wchar_t> type;};
template<std::size_t Size> struct initializer_tag<const char [Size]> { typedef cmd_or_exe_tag<char> type;};
template<std::size_t Size> struct initializer_tag<const wchar_t [Size]> { typedef cmd_or_exe_tag<wchar_t> type;};
template<std::size_t Size> struct initializer_tag<const char (&)[Size]> { typedef cmd_or_exe_tag<char> type;};
template<std::size_t Size> struct initializer_tag<const wchar_t (&)[Size]> { typedef cmd_or_exe_tag<wchar_t> type;};
template<> struct initializer_tag<std::basic_string<char >> { typedef cmd_or_exe_tag<char> type;};
template<> struct initializer_tag<std::basic_string<wchar_t >> { typedef cmd_or_exe_tag<wchar_t> type;};
template<> struct initializer_tag<std::vector<std::basic_string<char >>> { typedef cmd_or_exe_tag<char> type;};
template<> struct initializer_tag<std::vector<std::basic_string<wchar_t >>> { typedef cmd_or_exe_tag<wchar_t> type;};
template<> struct initializer_tag<std::initializer_list<std::basic_string<char >>> { typedef cmd_or_exe_tag<char> type;};
template<> struct initializer_tag<std::initializer_list<std::basic_string<wchar_t >>> { typedef cmd_or_exe_tag<wchar_t> type;};
template<> struct initializer_tag<std::vector<char *>> { typedef cmd_or_exe_tag<char> type;};
template<> struct initializer_tag<std::vector<wchar_t *>> { typedef cmd_or_exe_tag<wchar_t> type;};
template<> struct initializer_tag<std::initializer_list<char *>> { typedef cmd_or_exe_tag<char> type;};
template<> struct initializer_tag<std::initializer_list<wchar_t *>> { typedef cmd_or_exe_tag<wchar_t> type;};
template<> struct initializer_tag<std::initializer_list<const char *>> { typedef cmd_or_exe_tag<char> type;};
template<> struct initializer_tag<std::initializer_list<const wchar_t *>> { typedef cmd_or_exe_tag<wchar_t> type;};
template<> struct initializer_tag<shell_>
{
typedef cmd_or_exe_tag<typename boost::filesystem::path::value_type> type;
};
template<> struct initializer_tag<boost::filesystem::path>
{
typedef cmd_or_exe_tag<typename boost::filesystem::path::value_type> type;
};
template <typename Char>
struct exe_setter_;
template <typename Char, bool Append = false>
struct arg_setter_;
template <typename Char, bool Append>
struct initializer_tag<arg_setter_<Char, Append>> { typedef cmd_or_exe_tag<Char> type;};
template<typename Char> struct initializer_tag<exe_setter_<Char>> { typedef cmd_or_exe_tag<Char> type;};
template<>
struct initializer_builder<cmd_or_exe_tag<char>>;
template<>
struct initializer_builder<cmd_or_exe_tag<wchar_t>>;
}}}
#endif /* BOOST_PROCESS_DETAIL_STRING_TRAITS_HPP_ */
| 42.697674 | 129 | 0.751362 | [
"vector"
] |
f6a8d77cc5d990c9f0fe23385cd5c65d069e52a7 | 23,024 | cc | C++ | chrome/services/sharing/nearby/platform/webrtc.cc | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 76 | 2020-09-02T03:05:41.000Z | 2022-03-30T04:40:55.000Z | chrome/services/sharing/nearby/platform/webrtc.cc | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 45 | 2020-09-02T03:21:37.000Z | 2022-03-31T22:19:45.000Z | chrome/services/sharing/nearby/platform/webrtc.cc | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 8 | 2020-07-22T18:49:18.000Z | 2022-02-08T10:27:16.000Z | // Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/services/sharing/nearby/platform/webrtc.h"
#include "ash/constants/ash_features.h"
#include "base/task/thread_pool.h"
#include "chrome/services/sharing/webrtc/ipc_network_manager.h"
#include "chrome/services/sharing/webrtc/ipc_packet_socket_factory.h"
#include "chrome/services/sharing/webrtc/mdns_responder_adapter.h"
#include "chrome/services/sharing/webrtc/p2p_port_allocator.h"
#include "chromeos/services/nearby/public/mojom/webrtc_signaling_messenger.mojom-shared.h"
#include "jingle/glue/thread_wrapper.h"
#include "mojo/public/cpp/bindings/receiver.h"
#include "mojo/public/cpp/bindings/self_owned_receiver.h"
#include "third_party/nearby/src/cpp/platform/public/count_down_latch.h"
#include "third_party/nearby/src/cpp/platform/public/future.h"
#include "third_party/nearby/src/cpp/platform/public/logging.h"
#include "third_party/webrtc/api/jsep.h"
#include "third_party/webrtc/api/peer_connection_interface.h"
#include "third_party/webrtc_overrides/task_queue_factory.h"
#include "unicode/locid.h"
namespace location {
namespace nearby {
namespace chrome {
namespace {
// The following constants are RTCConfiguration defaults designed to help with
// battery life for persistent connections like Phone Hub. These values were
// chosen by doing battery drain tests on an Android phone with a persistent
// Phone Hub connection upgraded to WebRtc. The goal here is prevent chatty
// KeepAlive pings at the WebRtc layer from waking up the Phone's kernel too
// frequently and causing battery drain.
//
// NOTE: Nearby Connections also has its own KeepAlive interval and timeout that
// are different from these core WebRtc values. They operate at a different
// layer and don't directly affect the values chosen for WebRtc. However, both
// values need to be greater than the defaults for battery saving to happen and
// the most frequent ping ultimately determines the worst case number of wake
// ups.
//
// See: b/183505430 for more context.
constexpr base::TimeDelta kIceConnectionReceivingTimeout = base::Minutes(10);
constexpr base::TimeDelta kIceCheckIntervalStrongConnectivity =
base::Seconds(25);
constexpr base::TimeDelta kStableWritableConnectionPingInterval =
base::Seconds(30);
net::NetworkTrafficAnnotationTag kTrafficAnnotation =
net::DefineNetworkTrafficAnnotation("nearby_webrtc_connection", R"(
semantics {
sender: "Chrome Nearby Share via WebRTC"
description:
"Chrome Nearby Share allows users to send data securely between "
"devices. WebRTC allows Chrome to establish a secure session with "
"another Nearby instance running on a different device and to "
"transmit and receive data that users want to share across "
"devices."
trigger:
"User uses the Nearby Share feature and selects a peer device to"
" send the data to."
data:
"Text and media encrypted via AES-256-CBC. Protocol-level messages "
"for the various subprotocols employed by WebRTC (including ICE, "
"DTLS, RTCP, etc.) are encrypted via DTLS-SRTP. Note that ICE "
"connectivity checks may leak the user's IP address(es), subject "
"to the restrictions/guidance in "
"https://datatracker.ietf.org/doc/draft-ietf-rtcweb-ip-handling."
destination: OTHER
destination_other:
"A peer Nearby device that receives this data"
}
policy {
cookies_allowed: NO
setting:
"This feature is only enabled for signed-in users who enable "
"Nearby Share"
chrome_policy {
BrowserSignin {
policy_options {mode: MANDATORY}
BrowserSignin: 0
}
}
}
)");
// Returns the ISO country code for the locale currently set as the
// user's device language.
const std::string GetCurrentCountryCode() {
return std::string(icu::Locale::getDefault().getCountry());
}
class ProxyAsyncResolverFactory final : public webrtc::AsyncResolverFactory {
public:
explicit ProxyAsyncResolverFactory(
sharing::IpcPacketSocketFactory* socket_factory)
: socket_factory_(socket_factory) {
DCHECK(socket_factory_);
}
rtc::AsyncResolverInterface* Create() override {
return socket_factory_->CreateAsyncResolver();
}
private:
sharing::IpcPacketSocketFactory* socket_factory_;
};
// This object only exists to forward incoming mojo messages. It will be created
// as a SelfOwnedReceiver on a separate sequence and will be cleaned up when the
// connection goes down. This is necessary to keep it pumping messages while the
// the main WebRtc thread is blocked on a future.
class IncomingMessageListener
: public sharing::mojom::IncomingMessagesListener {
public:
explicit IncomingMessageListener(
api::WebRtcSignalingMessenger::OnSignalingMessageCallback
signaling_message_callback,
api::WebRtcSignalingMessenger::OnSignalingCompleteCallback
signaling_complete_callback)
: signaling_message_callback_(std::move(signaling_message_callback)),
signaling_complete_callback_(std::move(signaling_complete_callback)) {
DCHECK(signaling_message_callback_);
DCHECK(signaling_complete_callback_);
}
~IncomingMessageListener() override = default;
// mojom::IncomingMessagesListener:
void OnMessage(const std::string& message) override {
signaling_message_callback_(ByteArray(message));
}
// mojom::IncomingMessagesListener:
void OnComplete(bool success) override {
signaling_complete_callback_(success);
}
private:
api::WebRtcSignalingMessenger::OnSignalingMessageCallback
signaling_message_callback_;
api::WebRtcSignalingMessenger::OnSignalingCompleteCallback
signaling_complete_callback_;
};
// Used as a messenger in sending and receiving WebRTC messages between devices.
// The messages sent and received are considered untrusted since they
// originate in an untrusted sandboxed process on device.
class WebRtcSignalingMessengerImpl : public api::WebRtcSignalingMessenger {
public:
WebRtcSignalingMessengerImpl(
const std::string& self_id,
const connections::LocationHint& location_hint,
const mojo::SharedRemote<sharing::mojom::WebRtcSignalingMessenger>&
messenger)
: self_id_(self_id),
location_hint_(location_hint),
messenger_(messenger),
task_runner_(
base::ThreadPool::CreateSequencedTaskRunner({base::MayBlock()})) {}
~WebRtcSignalingMessengerImpl() override { StopReceivingMessages(); }
WebRtcSignalingMessengerImpl(const WebRtcSignalingMessengerImpl& other) =
delete;
WebRtcSignalingMessengerImpl& operator=(
const WebRtcSignalingMessengerImpl& other) = delete;
sharing::mojom::LocationHintPtr CreateLocationHint() {
sharing::mojom::LocationHintPtr location_hint_ptr =
sharing::mojom::LocationHint::New();
location_hint_ptr->location = location_hint_.location();
switch (location_hint_.format()) {
case location::nearby::connections::LocationStandard_Format::
LocationStandard_Format_E164_CALLING:
location_hint_ptr->format =
sharing::mojom::LocationStandardFormat::E164_CALLING;
break;
case location::nearby::connections::LocationStandard_Format::
LocationStandard_Format_ISO_3166_1_ALPHA_2:
location_hint_ptr->format =
sharing::mojom::LocationStandardFormat::ISO_3166_1_ALPHA_2;
break;
case location::nearby::connections::LocationStandard_Format::
LocationStandard_Format_UNKNOWN:
// Here we default to the current default country code before sending.
location_hint_ptr->location = GetCurrentCountryCode();
location_hint_ptr->format =
sharing::mojom::LocationStandardFormat::ISO_3166_1_ALPHA_2;
break;
}
return location_hint_ptr;
}
// api::WebRtcSignalingMessenger:
bool SendMessage(absl::string_view peer_id,
const ByteArray& message) override {
bool success = false;
if (!messenger_->SendMessage(self_id_, std::string(peer_id),
CreateLocationHint(), std::string(message),
&success)) {
return false;
}
return success;
}
void BindIncomingReceiver(
mojo::PendingReceiver<sharing::mojom::IncomingMessagesListener>
pending_receiver,
api::WebRtcSignalingMessenger::OnSignalingMessageCallback
message_callback,
api::WebRtcSignalingMessenger::OnSignalingCompleteCallback
complete_callback) {
mojo::MakeSelfOwnedReceiver(
std::make_unique<IncomingMessageListener>(std::move(message_callback),
std::move(complete_callback)),
std::move(pending_receiver), task_runner_);
}
// api::WebRtcSignalingMessenger:
bool StartReceivingMessages(
OnSignalingMessageCallback message_callback,
OnSignalingCompleteCallback complete_callback) override {
bool success = false;
mojo::PendingRemote<sharing::mojom::IncomingMessagesListener>
pending_remote;
mojo::PendingReceiver<sharing::mojom::IncomingMessagesListener>
pending_receiver = pending_remote.InitWithNewPipeAndPassReceiver();
// NOTE: this is a Sync mojo call that waits until Fast-Path ready is
// received on the Instant Messaging (Tachyon) stream before returning.
if (!messenger_->StartReceivingMessages(self_id_, CreateLocationHint(),
std::move(pending_remote), &success,
&pending_session_remote_) ||
!success) {
receiving_messages_ = false;
return false;
}
// Do the pending_receiver Bind call on the task runner itself so it can
// receive messages while the WebRtc thread is waiting. Any incoming
// messages will be queued until the Bind happens.
task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&WebRtcSignalingMessengerImpl::BindIncomingReceiver,
weak_ptr_factory_.GetWeakPtr(),
std::move(pending_receiver), std::move(message_callback),
std::move(complete_callback)));
receiving_messages_ = true;
return true;
}
// api::WebRtcSignalingMessenger:
void StopReceivingMessages() override {
if (receiving_messages_) {
receiving_messages_ = false;
if (pending_session_remote_) {
mojo::Remote<sharing::mojom::ReceiveMessagesSession> session(
std::move(pending_session_remote_));
// This is a one-way message so it is safe to bind, send, and forget.
// When the Remote goes out of scope it will close the pipe and cause
// the other side to clean up the ReceiveMessagesExpress instance.
// If the receiver/pipe is already down, this does nothing.
session->StopReceivingMessages();
}
}
}
private:
bool receiving_messages_ = false;
std::string self_id_;
connections::LocationHint location_hint_;
// This is received and stored on a successful StartReceiveMessages(). We
// choose to not bind right away because multiple threads end up
// creating/calling/destroying WebRtcSignalingMessengerImpl by the design
// of NearbyConnections. We need to ensure the thread that
// binds/calls/destroys the remote is the same sequence, so we do all three at
// once in StopReceivingMessages(). If the other side of the pipe is already
// down, binding, calling, and destorying will be a no-op.
mojo::PendingRemote<sharing::mojom::ReceiveMessagesSession>
pending_session_remote_;
mojo::SharedRemote<sharing::mojom::WebRtcSignalingMessenger> messenger_;
scoped_refptr<base::SequencedTaskRunner> task_runner_;
base::WeakPtrFactory<WebRtcSignalingMessengerImpl> weak_ptr_factory_{this};
};
} // namespace
WebRtcMedium::WebRtcMedium(
const mojo::SharedRemote<network::mojom::P2PSocketManager>& socket_manager,
const mojo::SharedRemote<
location::nearby::connections::mojom::MdnsResponderFactory>&
mdns_responder_factory,
const mojo::SharedRemote<sharing::mojom::IceConfigFetcher>&
ice_config_fetcher,
const mojo::SharedRemote<sharing::mojom::WebRtcSignalingMessenger>&
webrtc_signaling_messenger,
scoped_refptr<base::SingleThreadTaskRunner> task_runner)
: chrome_network_thread_(/*name=*/"WebRtc Network Thread"),
chrome_signaling_thread_(/*name=*/"WebRtc Signaling Thread"),
chrome_worker_thread_(/*name=*/"WebRtc Worker Thread"),
p2p_socket_manager_(socket_manager),
mdns_responder_factory_(mdns_responder_factory),
ice_config_fetcher_(ice_config_fetcher),
webrtc_signaling_messenger_(webrtc_signaling_messenger),
task_runner_(std::move(task_runner)) {
DCHECK(p2p_socket_manager_.is_bound());
DCHECK(mdns_responder_factory.is_bound());
DCHECK(ice_config_fetcher_.is_bound());
DCHECK(webrtc_signaling_messenger_.is_bound());
}
WebRtcMedium::~WebRtcMedium() {
VLOG(1) << "WebRtcMedium destructor is running";
// In case initialization was pending on another thread we block waiting for
// the lock before we clear peer_connection_factory_.
base::AutoLock peer_connection_factory_auto_lock(
peer_connection_factory_lock_);
peer_connection_factory_ = nullptr;
if (chrome_network_thread_.IsRunning()) {
// The network manager needs to free its resources on the thread they were
// created, which is the network thread.
if (network_manager_ || p2p_socket_manager_) {
chrome_network_thread_.task_runner()->PostTask(
FROM_HERE, base::BindOnce(&WebRtcMedium::ShutdownNetworkManager,
weak_ptr_factory_.GetWeakPtr()));
}
// Stopping the thread will wait until all tasks have been
// processed before returning. We wait for the above task to finish before
// letting the the function continue to ensure network_manager_ is cleaned
// up if it needed to be.
chrome_network_thread_.Stop();
DCHECK(!network_manager_);
DCHECK(!socket_factory_);
}
// Stop is called in thread destructor, but we want to ensure all threads are
// down before the destructor is complete and we release the lock.
chrome_signaling_thread_.Stop();
chrome_worker_thread_.Stop();
VLOG(1) << "WebRtcMedium destructor is done shutting down threads.";
}
const std::string WebRtcMedium::GetDefaultCountryCode() {
return GetCurrentCountryCode();
}
void WebRtcMedium::CreatePeerConnection(
webrtc::PeerConnectionObserver* observer,
PeerConnectionCallback callback) {
task_runner_->PostTask(
FROM_HERE, base::BindOnce(&WebRtcMedium::FetchIceServers,
weak_ptr_factory_.GetWeakPtr(), observer,
std::move(callback)));
}
void WebRtcMedium::FetchIceServers(webrtc::PeerConnectionObserver* observer,
PeerConnectionCallback callback) {
ice_config_fetcher_->GetIceServers(base::BindOnce(
&WebRtcMedium::OnIceServersFetched, weak_ptr_factory_.GetWeakPtr(),
observer, std::move(callback)));
}
void WebRtcMedium::InitWebRTCThread(rtc::Thread** thread_to_set) {
jingle_glue::JingleThreadWrapper::EnsureForCurrentMessageLoop();
jingle_glue::JingleThreadWrapper::current()->set_send_allowed(true);
*thread_to_set = jingle_glue::JingleThreadWrapper::current();
}
void WebRtcMedium::InitPeerConnectionFactory() {
DCHECK(!chrome_network_thread_.IsRunning());
DCHECK(!chrome_signaling_thread_.IsRunning());
DCHECK(!chrome_worker_thread_.IsRunning());
DCHECK(!rtc_network_thread_);
DCHECK(!rtc_signaling_thread_);
DCHECK(!rtc_worker_thread_);
jingle_glue::JingleThreadWrapper::EnsureForCurrentMessageLoop();
jingle_glue::JingleThreadWrapper::current()->set_send_allowed(true);
// We need to create three dedicated threads for WebRTC. We post tasks to the
// threads and to ensure the message loop and jingle wrapper is setup for each
// thread. Unretained(this) is used because we will wait on this thread for
// the tasks to complete before exiting.
CountDownLatch latch(3);
auto decrement_latch = base::BindRepeating(
[](CountDownLatch* latch) { latch->CountDown(); }, &latch);
chrome_network_thread_.Start();
chrome_network_thread_.task_runner()->PostTask(
FROM_HERE, base::BindOnce(&WebRtcMedium::InitNetworkThread,
base::Unretained(this), decrement_latch));
chrome_worker_thread_.Start();
chrome_worker_thread_.task_runner()->PostTask(
FROM_HERE, base::BindOnce(&WebRtcMedium::InitWorkerThread,
base::Unretained(this), decrement_latch));
chrome_signaling_thread_.Start();
chrome_signaling_thread_.task_runner()->PostTask(
FROM_HERE, base::BindOnce(&WebRtcMedium::InitSignalingThread,
base::Unretained(this), decrement_latch));
// Wait for all threads to be initialized
latch.Await();
DCHECK(rtc_network_thread_);
DCHECK(rtc_signaling_thread_);
DCHECK(rtc_worker_thread_);
webrtc::PeerConnectionFactoryDependencies factory_dependencies;
factory_dependencies.task_queue_factory = CreateWebRtcTaskQueueFactory();
factory_dependencies.network_thread = rtc_network_thread_;
factory_dependencies.worker_thread = rtc_worker_thread_;
factory_dependencies.signaling_thread = rtc_signaling_thread_;
peer_connection_factory_ = webrtc::CreateModularPeerConnectionFactory(
std::move(factory_dependencies));
}
void WebRtcMedium::InitNetworkThread(base::OnceClosure complete_callback) {
DCHECK(chrome_network_thread_.task_runner()->BelongsToCurrentThread());
DCHECK(!rtc_network_thread_);
DCHECK(!network_manager_);
DCHECK(!socket_factory_);
InitWebRTCThread(&rtc_network_thread_);
// Get a connection to the mdns responder from the factory interface
mojo::PendingRemote<network::mojom::MdnsResponder> pending_remote;
mojo::PendingReceiver<network::mojom::MdnsResponder> pending_receiver(
pending_remote.InitWithNewPipeAndPassReceiver());
mojo::Remote<network::mojom::MdnsResponder> mdns_responder{
std::move(pending_remote)};
// We don't need to wait for this call to finish (it doesn't have a callback
// anyways). The mojo pipe will queue up calls and dispatch as soon as the
// the other side is available.
mdns_responder_factory_->CreateMdnsResponder(std::move(pending_receiver));
network_manager_ = std::make_unique<sharing::IpcNetworkManager>(
p2p_socket_manager_, std::make_unique<sharing::MdnsResponderAdapter>(
std::move(mdns_responder)));
socket_factory_ = std::make_unique<sharing::IpcPacketSocketFactory>(
p2p_socket_manager_, kTrafficAnnotation);
// NOTE: IpcNetworkManager::Initialize() does not override the empty default
// implementation so this doesn't actually do anything right now. However
// the contract of rtc::NetworkManagerBase states that it should be called
// before using and explicitly on the network thread (which right now is the
// current thread). Previously this was handled by P2PPortAllocator.
network_manager_->Initialize();
std::move(complete_callback).Run();
}
void WebRtcMedium::ShutdownNetworkManager() {
DCHECK(chrome_network_thread_.task_runner()->BelongsToCurrentThread());
network_manager_.reset();
socket_factory_.reset();
}
void WebRtcMedium::InitSignalingThread(base::OnceClosure complete_callback) {
DCHECK(chrome_signaling_thread_.task_runner()->BelongsToCurrentThread());
DCHECK(!rtc_signaling_thread_);
InitWebRTCThread(&rtc_signaling_thread_);
std::move(complete_callback).Run();
}
void WebRtcMedium::InitWorkerThread(base::OnceClosure complete_callback) {
DCHECK(chrome_worker_thread_.task_runner()->BelongsToCurrentThread());
DCHECK(!rtc_worker_thread_);
InitWebRTCThread(&rtc_worker_thread_);
std::move(complete_callback).Run();
}
void WebRtcMedium::OnIceServersFetched(
webrtc::PeerConnectionObserver* observer,
PeerConnectionCallback callback,
std::vector<sharing::mojom::IceServerPtr> ice_servers) {
base::AutoLock peer_connection_factory_auto_lock(
peer_connection_factory_lock_);
if (!peer_connection_factory_) {
InitPeerConnectionFactory();
}
webrtc::PeerConnectionInterface::RTCConfiguration rtc_config;
// Add |ice_servers| into the rtc_config.servers.
for (const auto& ice_server : ice_servers) {
webrtc::PeerConnectionInterface::IceServer ice_turn_server;
for (const auto& url : ice_server->urls)
ice_turn_server.urls.push_back(url.spec());
if (ice_server->username)
ice_turn_server.username = *ice_server->username;
if (ice_server->credential)
ice_turn_server.password = *ice_server->credential;
rtc_config.servers.push_back(ice_turn_server);
}
// This prevents WebRTC from being chatty with keep alive messages which was
// causing battery drain for Phone Hub's persistent connection.
// Ideally these options should be configurable per connection, but right now
// we have a single share factory for all peer connections.
if (ash::features::IsNearbyKeepAliveFixEnabled()) {
rtc_config.ice_connection_receiving_timeout =
kIceConnectionReceivingTimeout.InMilliseconds();
rtc_config.ice_check_interval_strong_connectivity =
kIceCheckIntervalStrongConnectivity.InMilliseconds();
rtc_config.stable_writable_connection_ping_interval_ms =
kStableWritableConnectionPingInterval.InMilliseconds();
}
webrtc::PeerConnectionDependencies dependencies(observer);
sharing::P2PPortAllocator::Config port_config;
port_config.enable_multiple_routes = true;
port_config.enable_nonproxied_udp = true;
dependencies.allocator = std::make_unique<sharing::P2PPortAllocator>(
network_manager_.get(), socket_factory_.get(), port_config);
dependencies.async_resolver_factory =
std::make_unique<ProxyAsyncResolverFactory>(socket_factory_.get());
rtc::scoped_refptr<webrtc::PeerConnectionInterface> peer_connection =
peer_connection_factory_->CreatePeerConnection(rtc_config,
std::move(dependencies));
callback(std::move(peer_connection));
}
std::unique_ptr<api::WebRtcSignalingMessenger>
WebRtcMedium::GetSignalingMessenger(
absl::string_view self_id,
const connections::LocationHint& location_hint) {
return std::make_unique<WebRtcSignalingMessengerImpl>(
std::string(self_id), location_hint, webrtc_signaling_messenger_);
}
} // namespace chrome
} // namespace nearby
} // namespace location
| 41.861818 | 90 | 0.732366 | [
"object",
"vector"
] |
f6a946741c421565202d6b97ac322d945a3d0e59 | 199,669 | cpp | C++ | zsLib/eventing/tool/cpp/zsLib_eventing_tool_GenerateStructC.cpp | timothy003/zsLib-eventing | 9d4abb9779508aabdaf9867c938b82e9040abd0f | [
"BSD-2-Clause"
] | null | null | null | zsLib/eventing/tool/cpp/zsLib_eventing_tool_GenerateStructC.cpp | timothy003/zsLib-eventing | 9d4abb9779508aabdaf9867c938b82e9040abd0f | [
"BSD-2-Clause"
] | 2 | 2018-08-23T13:46:14.000Z | 2018-08-23T18:35:11.000Z | zsLib/eventing/tool/cpp/zsLib_eventing_tool_GenerateStructC.cpp | timothy003/zsLib-eventing | 9d4abb9779508aabdaf9867c938b82e9040abd0f | [
"BSD-2-Clause"
] | 2 | 2019-07-25T16:58:48.000Z | 2020-09-09T01:23:03.000Z | /*
Copyright (c) 2016, Robin Raymond
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
*/
#include <zsLib/eventing/tool/internal/zsLib_eventing_tool_GenerateStructC.h>
#include <zsLib/eventing/tool/internal/zsLib_eventing_tool_GenerateHelper.h>
#include <zsLib/eventing/tool/internal/zsLib_eventing_tool_GenerateTypesHeader.h>
#include <zsLib/eventing/tool/internal/zsLib_eventing_tool_GenerateStructHeader.h>
#include <zsLib/eventing/tool/internal/zsLib_eventing_tool_Helper.h>
#include <zsLib/eventing/tool/OutputStream.h>
#include <sstream>
#define ZS_WRAPPER_COMPILER_DIRECTIVE_EXCLUSIZE "EXCLUSIVE"
namespace zsLib { namespace eventing { namespace tool { ZS_DECLARE_SUBSYSTEM(zslib_eventing_tool) } } }
namespace zsLib
{
namespace eventing
{
ZS_DECLARE_TYPEDEF_PTR(IIDLTypes::Project, Project);
namespace tool
{
ZS_DECLARE_TYPEDEF_PTR(eventing::tool::internal::Helper, UseHelper);
ZS_DECLARE_TYPEDEF_PTR(eventing::IHasher, UseHasher);
typedef std::set<String> HashSet;
namespace internal
{
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//
// (helpers)
//
//---------------------------------------------------------------------
static void doInclude(
const String &headerFile,
std::stringstream &ss,
GenerateStructC::StringSet &alreadyIncluded
) noexcept
{
if (alreadyIncluded.end() != alreadyIncluded.find(headerFile)) return;
alreadyIncluded.insert(headerFile);
ss << "#include " << headerFile << "\n";
}
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//
// GenerateStructC::HelperFile
//
//---------------------------------------------------------------------
GenerateStructC::HelperFile::HelperFile() noexcept
{
}
//---------------------------------------------------------------------
GenerateStructC::HelperFile::~HelperFile() noexcept
{
}
//---------------------------------------------------------------------
void GenerateStructC::HelperFile::headerIncludeC(const String &headerFile) noexcept
{
doInclude(headerFile, headerCIncludeSS_, headerCAlreadyIncluded_);
}
//---------------------------------------------------------------------
void GenerateStructC::HelperFile::headerIncludeCpp(const String &headerFile) noexcept
{
doInclude(headerFile, headerCppIncludeSS_, headerCppAlreadyIncluded_);
}
//---------------------------------------------------------------------
void GenerateStructC::HelperFile::includeC(const String &headerFile) noexcept
{
doInclude(headerFile, cIncludeSS_, cAlreadyIncluded_);
}
//---------------------------------------------------------------------
void GenerateStructC::HelperFile::includeCpp(const String &headerFile) noexcept
{
doInclude(headerFile, cppIncludeSS_, cppAlreadyIncluded_);
}
//---------------------------------------------------------------------
bool GenerateStructC::HelperFile::hasBoxing(const String &namePathStr) noexcept
{
auto found = boxings_.find(namePathStr);
return found != boxings_.end();
}
//---------------------------------------------------------------------
void GenerateStructC::HelperFile::specialThrow(TypePtr rejectionType) noexcept
{
if (!rejectionType) return;
if (alreadyThrows_.end() != alreadyThrows_.find(rejectionType)) return;
alreadyThrows_.insert(rejectionType);
auto &ss = headerThrowersSS_;
ss << " void customThrow_set_Exception(exception_handle_t handle, const " << GenerateStructHeader::getWrapperTypeString(false, rejectionType) << " &error) noexcept;\n";
}
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//
// GenerateStructC::StructFile
//
//---------------------------------------------------------------------
GenerateStructC::StructFile::StructFile() noexcept
{
}
//---------------------------------------------------------------------
GenerateStructC::StructFile::~StructFile() noexcept
{
}
//---------------------------------------------------------------------
void GenerateStructC::StructFile::headerIncludeC(const String &headerFile) noexcept
{
doInclude(headerFile, headerCIncludeSS_, headerCAlreadyIncluded_);
}
//---------------------------------------------------------------------
void GenerateStructC::StructFile::headerIncludeCpp(const String &headerFile) noexcept
{
doInclude(headerFile, headerCppIncludeSS_, headerCppAlreadyIncluded_);
}
//---------------------------------------------------------------------
void GenerateStructC::StructFile::includeC(const String &headerFile) noexcept
{
doInclude(headerFile, cIncludeSS_, cAlreadyIncluded_);
}
//---------------------------------------------------------------------
void GenerateStructC::StructFile::includeCpp(const String &headerFile) noexcept
{
doInclude(headerFile, cppIncludeSS_, cppAlreadyIncluded_);
}
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//
// GenerateStructC
//
//-------------------------------------------------------------------
GenerateStructC::GenerateStructC() noexcept : IDLCompiler(Noop{})
{
}
//-------------------------------------------------------------------
GenerateStructCPtr GenerateStructC::create() noexcept
{
return make_shared<GenerateStructC>();
}
//---------------------------------------------------------------------
String GenerateStructC::fixBasicType(IEventingTypes::PredefinedTypedefs type) noexcept
{
switch (type) {
case PredefinedTypedef_void:
case PredefinedTypedef_bool:
case PredefinedTypedef_uchar: break;
case PredefinedTypedef_char: type = PredefinedTypedef_schar; break;
case PredefinedTypedef_schar:
case PredefinedTypedef_ushort: break;
case PredefinedTypedef_short: type = PredefinedTypedef_sshort; break;
case PredefinedTypedef_sshort:
case PredefinedTypedef_uint: break;
case PredefinedTypedef_int: type = PredefinedTypedef_sint; break;
case PredefinedTypedef_sint:
case PredefinedTypedef_ulong: break;
case PredefinedTypedef_long: type = PredefinedTypedef_slong; break;
case PredefinedTypedef_slong:
case PredefinedTypedef_ulonglong: break;
case PredefinedTypedef_longlong: type = PredefinedTypedef_slonglong; break;
case PredefinedTypedef_slonglong:
case PredefinedTypedef_uint8:
case PredefinedTypedef_int8:
case PredefinedTypedef_sint8:
case PredefinedTypedef_uint16:
case PredefinedTypedef_int16:
case PredefinedTypedef_sint16:
case PredefinedTypedef_uint32:
case PredefinedTypedef_int32:
case PredefinedTypedef_sint32:
case PredefinedTypedef_uint64:
case PredefinedTypedef_int64:
case PredefinedTypedef_sint64:
case PredefinedTypedef_byte:
case PredefinedTypedef_word:
case PredefinedTypedef_dword:
case PredefinedTypedef_qword:
case PredefinedTypedef_float:
case PredefinedTypedef_double:
case PredefinedTypedef_ldouble:
case PredefinedTypedef_float32:
case PredefinedTypedef_float64:
case PredefinedTypedef_pointer:
case PredefinedTypedef_binary:
case PredefinedTypedef_size:
case PredefinedTypedef_string:
case PredefinedTypedef_astring:
case PredefinedTypedef_wstring: break;
}
String result = GenerateHelper::getBasicTypeString(type);
result.replaceAll(" ", "_");
return result;
}
//---------------------------------------------------------------------
String GenerateStructC::fixCType(IEventingTypes::PredefinedTypedefs type) noexcept
{
switch (type)
{
case PredefinedTypedef_void: return "void";
case PredefinedTypedef_bool: return "bool_t";
case PredefinedTypedef_uchar: return "uchar_t";
case PredefinedTypedef_char:
case PredefinedTypedef_schar: return "schar_t";
case PredefinedTypedef_ushort: return "ushort_t";
case PredefinedTypedef_short:
case PredefinedTypedef_sshort: return "sshort_t";
case PredefinedTypedef_uint: return "uint_t";
case PredefinedTypedef_int:
case PredefinedTypedef_sint: return "sint_t";
case PredefinedTypedef_ulong: return "ulong_t";
case PredefinedTypedef_long:
case PredefinedTypedef_slong: return "slong_t";
case PredefinedTypedef_ulonglong: return "ullong_t";
case PredefinedTypedef_longlong:
case PredefinedTypedef_slonglong: return "sllong_t";
case PredefinedTypedef_uint8: return "uint8_t";
case PredefinedTypedef_int8:
case PredefinedTypedef_sint8: return "int8_t";
case PredefinedTypedef_uint16: return "uint16_t";
case PredefinedTypedef_int16:
case PredefinedTypedef_sint16: return "int16_t";
case PredefinedTypedef_uint32: return "uint32_t";
case PredefinedTypedef_int32:
case PredefinedTypedef_sint32: return "int32_t";
case PredefinedTypedef_uint64: return "uint64_t";
case PredefinedTypedef_int64:
case PredefinedTypedef_sint64: return "int64_t";
case PredefinedTypedef_byte: return "uint8_t";
case PredefinedTypedef_word: return "uint16_t";
case PredefinedTypedef_dword: return "uint32_t";
case PredefinedTypedef_qword: return "uint64_t";
case PredefinedTypedef_float: return "float_t";
case PredefinedTypedef_double: return "double_t";
case PredefinedTypedef_ldouble: return "ldouble_t";
case PredefinedTypedef_float32: return "float32_t";
case PredefinedTypedef_float64: return "float64_t";
case PredefinedTypedef_pointer: return "raw_pointer_t";
case PredefinedTypedef_binary: return "binary_t";
case PredefinedTypedef_size: return "binary_size_t";
case PredefinedTypedef_string:
case PredefinedTypedef_astring:
case PredefinedTypedef_wstring: return "string_t";
}
return String();
}
//---------------------------------------------------------------------
String GenerateStructC::fixCType(TypePtr type) noexcept
{
if (!type) return String();
{
auto basicType = type->toBasicType();
if (basicType) {
return fixCType(basicType->mBaseType);
}
}
auto result = fixType(type);
return result + "_t";
}
//---------------------------------------------------------------------
String GenerateStructC::fixCType(
bool isOptional,
TypePtr type
) noexcept
{
if (!isOptional) return fixCType(type);
if (!type) return String();
{
auto basicType = type->toBasicType();
if (basicType) {
return String("box_") + fixCType(basicType->mBaseType);
}
}
{
auto enumObj = type->toEnumType();
if (enumObj) {
return String("box_") + fixCType(enumObj);
}
}
return fixCType(type);
}
//---------------------------------------------------------------------
String GenerateStructC::fixType(TypePtr type) noexcept
{
if (!type) return String();
{
auto basicType = type->toBasicType();
if (basicType) {
return fixCType(basicType->mBaseType);
}
}
{
auto templateType = type->toTemplatedStructType();
if (templateType) {
auto result = fixType(templateType->getParentStruct());
for (auto iter = templateType->mTemplateArguments.begin(); iter != templateType->mTemplateArguments.end(); ++iter) {
auto typeArgument = (*iter);
String temp = fixType(typeArgument);
if (temp.hasData()) {
if (result.hasData()) {
result += "_";
}
result += temp;
}
}
return result;
}
}
auto result = type->getPathName();
if ("::" == result.substr(0, 2)) {
result = result.substr(2);
}
result.replaceAll("::", "_");
return result;
}
//---------------------------------------------------------------------
String GenerateStructC::getApiImplementationDefine(ContextPtr context) noexcept
{
String result = "WRAPPER_C_GENERATED_IMPLEMENTATION";
if (!context) return result;
auto project = context->getProject();
if (!project) return result;
if (project->mName.isEmpty()) return result;
auto name = project->mName;
name.toUpper();
name.replaceAll(".","_");
return name + "_WRAPPER_C_GENERATED_IMPLEMENTATION";
}
//---------------------------------------------------------------------
String GenerateStructC::getApiCastRequiredDefine(ContextPtr context) noexcept
{
String result = "WRAPPER_C_GENERATED_REQUIRES_CAST";
if (!context) return result;
auto project = context->getProject();
if (!project) return result;
if (project->mName.isEmpty()) return result;
auto name = project->mName;
name.toUpper();
name.replaceAll(".","_");
return name + "_WRAPPER_C_GENERATED_REQUIRES_CAST";
}
//---------------------------------------------------------------------
String GenerateStructC::getApiExportDefine(ContextPtr context) noexcept
{
String result = "WRAPPER_C_EXPORT_API";
if (!context) return result;
auto project = context->getProject();
if (!project) return result;
if (project->mName.isEmpty()) return result;
auto name = project->mName;
name.toUpper();
name.replaceAll(".","_");
return name + "_WRAPPER_C_EXPORT_API";
}
//---------------------------------------------------------------------
String GenerateStructC::getApiExportCastedDefine(ContextPtr context) noexcept
{
String result = "WRAPPER_C_CASTED_EXPORT_API";
if (!context) return result;
auto project = context->getProject();
if (!project) return result;
if (project->mName.isEmpty()) return result;
auto name = project->mName;
name.toUpper();
name.replaceAll(".","_");
return name + "_WRAPPER_C_CASTED_EXPORT_API";
}
//---------------------------------------------------------------------
String GenerateStructC::getApiCallingDefine(ContextPtr context) noexcept
{
String result = "WRAPPER_C_CALLING_CONVENTION";
if (!context) return result;
auto project = context->getProject();
if (!project) return result;
if (project->mName.isEmpty()) return result;
auto name = project->mName;
name.toUpper();
name.replaceAll(".", "_");
return name + "_WRAPPER_C_CALLING_CONVENTION";
}
//---------------------------------------------------------------------
String GenerateStructC::getApiGuardDefine(
ContextPtr context,
bool endGuard
) noexcept
{
String result = (!endGuard ? "WRAPPER_C_PLUS_PLUS_BEGIN_GUARD" : "WRAPPER_C_PLUS_PLUS_END_GUARD");
if (!context) return result;
auto project = context->getProject();
if (!project) return result;
if (project->mName.isEmpty()) return result;
auto name = project->mName;
name.toUpper();
name.replaceAll(".", "_");
return name + (!endGuard ? "_WRAPPER_C_PLUS_PLUS_BEGIN_GUARD" : "_WRAPPER_C_PLUS_PLUS_END_GUARD");
}
//---------------------------------------------------------------------
String GenerateStructC::getToHandleMethod(
bool isOptional,
TypePtr type
) noexcept
{
if (!type) return String();
{
auto basicType = type->toBasicType();
if (basicType) {
if (isOptional) {
return "wrapper::box_" + fixBasicType(basicType->mBaseType) + "_wrapperToHandle";
}
String cTypeStr = fixCType(basicType);
if ("string_t" == cTypeStr) return "wrapper::string_t_wrapperToHandle";
if ("binary_t" == cTypeStr) return "wrapper::binary_t_wrapperToHandle";
return String();
}
}
{
auto enumType = type->toEnumType();
if (enumType) {
if (isOptional) {
return "box_" + fixCType(enumType) + "_wrapperToHandle";
}
return String("static_cast<") + fixCType(enumType->mBaseType) + ">";
}
}
{
if (GenerateHelper::isBuiltInType(type)) {
auto structObj = type->toStruct();
if (!structObj) {
auto templatedStructObj = type->toTemplatedStructType();
if (templatedStructObj) {
auto parentObj = templatedStructObj->getParent();
if (parentObj) structObj = parentObj->toStruct();
}
}
if (!structObj) return String();
String specialName = structObj->getPathName();
if ("::zs::Any" == specialName) return "wrapper::zs_Any_wrapperToHandle";
if ("::zs::Promise" == specialName) return "wrapper::zs_Promise_wrapperToHandle";
if ("::zs::PromiseWith" == specialName) return "wrapper::zs_Promise_wrapperToHandle";
if ("::zs::PromiseRejectionReason" == specialName) return String();
// check exceptions
{
auto exceptionList = GenerateHelper::getAllExceptions(nullptr);
for (auto iter = exceptionList.begin(); iter != exceptionList.end(); ++iter) {
auto e = (*iter);
if (("::zs::exceptions::" + e) == specialName) return "wrapper::exception_" + e + "_wrapperToHandle";
}
}
if ("::zs::Time" == specialName) return "wrapper::zs_Time_wrapperToHandle";
if ("::zs::Milliseconds" == specialName) return "wrapper::zs_Milliseconds_wrapperToHandle";
if ("::zs::Microseconds" == specialName) return "wrapper::zs_Microseconds_wrapperToHandle";
if ("::zs::Nanoseconds" == specialName) return "wrapper::zs_Nanoseconds_wrapperToHandle";
if ("::zs::Seconds" == specialName) return "wrapper::zs_Seconds_wrapperToHandle";
if ("::zs::Minutes" == specialName) return "wrapper::zs_Minutes_wrapperToHandle";
if ("::zs::Hours" == specialName) return "wrapper::zs_Hours_wrapperToHandle";
if ("::zs::Days" == specialName) return "wrapper::zs_Days_wrapperToHandle";
if ("::std::set" == specialName) return String("wrapper::") + fixType(type) + "_wrapperToHandle";
if ("::std::list" == specialName) return String("wrapper::") + fixType(type) + "_wrapperToHandle";
if ("::std::map" == specialName) return String("wrapper::") + fixType(type) + "_wrapperToHandle";
}
}
{
auto structObj = type->toStruct();
if (structObj) {
return String("wrapper::") + fixType(structObj) + "_wrapperToHandle";
}
}
return String();
}
//---------------------------------------------------------------------
String GenerateStructC::getFromHandleMethod(
bool isOptional,
TypePtr type
) noexcept
{
{
auto enumType = type->toEnumType();
if (enumType) {
if (!isOptional) {
return String("static_cast<wrapper") + enumType->getPathName() + ">";
}
}
}
auto result = getToHandleMethod(isOptional, type);
result.replaceAll("_wrapperToHandle", "_wrapperFromHandle");
return result;
}
//---------------------------------------------------------------------
void GenerateStructC::includeType(
HelperFile &helperFile,
TypePtr type
) noexcept
{
if (!type) return;
if (type->toBasicType()) return;
if (type->toEnumType()) return;
if (GenerateHelper::isBuiltInType(type)) return;
{
auto templatedType = type->toTemplatedStructType();
if (templatedType) {
for (auto iter = templatedType->mTemplateArguments.begin(); iter != templatedType->mTemplateArguments.end(); ++iter) {
auto subType = (*iter);
includeType(helperFile, subType);
}
return;
}
}
{
auto structType = type->toStruct();
if (structType) {
String fileName = "\"c_" + fixType(type) + ".h\"";
helperFile.includeC(fileName);
}
}
}
//---------------------------------------------------------------------
void GenerateStructC::includeType(
StructFile &structFile,
TypePtr type
) noexcept
{
if (!type) return;
if (type->toBasicType()) return;
if (type->toEnumType()) return;
if (GenerateHelper::isBuiltInType(type)) return;
{
auto templatedType = type->toTemplatedStructType();
if (templatedType) {
for (auto iter = templatedType->mTemplateArguments.begin(); iter != templatedType->mTemplateArguments.end(); ++iter) {
auto subType = (*iter);
includeType(structFile, subType);
}
return;
}
}
{
auto structType = type->toStruct();
if (structType) {
String fileName = "\"c_" + fixType(type) + ".h\"";
structFile.includeC(fileName);
}
}
}
//---------------------------------------------------------------------
void GenerateStructC::calculateRelations(
NamespacePtr namespaceObj,
NamePathStructSetMap &ioDerivesInfo
) noexcept
{
if (!namespaceObj) return;
for (auto iter = namespaceObj->mNamespaces.begin(); iter != namespaceObj->mNamespaces.end(); ++iter) {
auto subNamespaceObj = (*iter).second;
calculateRelations(subNamespaceObj, ioDerivesInfo);
}
for (auto iter = namespaceObj->mStructs.begin(); iter != namespaceObj->mStructs.end(); ++iter) {
auto structObj = (*iter).second;
calculateRelations(structObj, ioDerivesInfo);
}
}
//---------------------------------------------------------------------
void GenerateStructC::calculateRelations(
StructPtr structObj,
NamePathStructSetMap &ioDerivesInfo
) noexcept
{
if (!structObj) return;
String currentNamePath = structObj->getPathName();
StructSet allParents;
allParents.insert(structObj);
while (allParents.size() > 0)
{
auto top = allParents.begin();
StructPtr parentStructObj = (*top);
allParents.erase(top);
if (structObj != parentStructObj) {
insertInto(parentStructObj, currentNamePath, ioDerivesInfo);
}
insertInto(structObj, parentStructObj->getPathName(), ioDerivesInfo);
for (auto iter = parentStructObj->mIsARelationships.begin(); iter != parentStructObj->mIsARelationships.end(); ++iter)
{
auto foundObj = (*iter).second;
if (!foundObj) continue;
auto foundStructObj = foundObj->toStruct();
if (!foundStructObj) continue;
allParents.insert(foundStructObj);
}
}
for (auto iter = structObj->mStructs.begin(); iter != structObj->mStructs.end(); ++iter)
{
auto foundStruct = (*iter).second;
calculateRelations(foundStruct, ioDerivesInfo);
}
}
//---------------------------------------------------------------------
void GenerateStructC::calculateBoxings(
NamespacePtr namespaceObj,
StringSet &ioBoxings
) noexcept
{
if (!namespaceObj) return;
for (auto iter = namespaceObj->mNamespaces.begin(); iter != namespaceObj->mNamespaces.end(); ++iter)
{
auto subNamespace = (*iter).second;
calculateBoxings(subNamespace, ioBoxings);
}
for (auto iter = namespaceObj->mStructs.begin(); iter != namespaceObj->mStructs.end(); ++iter)
{
auto subStruct = (*iter).second;
calculateBoxings(subStruct, ioBoxings);
}
}
//---------------------------------------------------------------------
void GenerateStructC::calculateBoxings(
StructPtr structObj,
StringSet &ioBoxings
) noexcept
{
if (!structObj) return;
for (auto iter = structObj->mStructs.begin(); iter != structObj->mStructs.end(); ++iter)
{
auto subStruct = (*iter).second;
calculateBoxings(subStruct, ioBoxings);
}
for (auto iter = structObj->mTemplatedStructs.begin(); iter != structObj->mTemplatedStructs.end(); ++iter)
{
auto subTemplate = (*iter).second;
calculateBoxings(subTemplate, ioBoxings);
}
if (structObj->mGenerics.size() > 0) return;
for (auto iter = structObj->mMethods.begin(); iter != structObj->mMethods.end(); ++iter)
{
auto method = (*iter);
calculateBoxings(method, ioBoxings);
}
for (auto iter = structObj->mProperties.begin(); iter != structObj->mProperties.end(); ++iter)
{
auto property = (*iter);
calculateBoxings(false, property, ioBoxings);
}
}
//---------------------------------------------------------------------
void GenerateStructC::calculateBoxings(
TemplatedStructTypePtr templatedStructObj,
ZS_MAYBE_USED() StringSet &ioBoxings
) noexcept
{
ZS_MAYBE_USED(ioBoxings);
if (!templatedStructObj) return;
}
//---------------------------------------------------------------------
void GenerateStructC::calculateBoxings(
MethodPtr method,
StringSet &ioBoxings,
TemplatedStructTypePtr templatedStruct
) noexcept
{
if (!method) return;
bool isEventHandler = method->hasModifier(Modifier_Method_EventHandler);
calculateBoxingType(method->hasModifier(Modifier_Optional), method->mResult, ioBoxings, templatedStruct);
for (auto iter = method->mArguments.begin(); iter != method->mArguments.end(); ++iter)
{
auto arg = (*iter);
calculateBoxings(isEventHandler, arg, ioBoxings, templatedStruct);
}
}
//---------------------------------------------------------------------
void GenerateStructC::calculateBoxings(
bool fromEventMethod,
PropertyPtr property,
StringSet &ioBoxings,
TemplatedStructTypePtr templatedStruct
) noexcept
{
if (!property) return;
calculateBoxingType(property->hasModifier(Modifier_Optional) || fromEventMethod, property->mType, ioBoxings, templatedStruct);
}
//---------------------------------------------------------------------
void GenerateStructC::calculateBoxingType(
bool isOptional,
TypePtr type,
StringSet &ioBoxings,
TemplatedStructTypePtr templatedStruct
) noexcept
{
if (!isOptional) return;
if (!type) return;
{
auto genericType = type->toGenericType();
if (genericType) {
if (!templatedStruct) return;
auto parentStruct = templatedStruct->getParentStruct();
if (!parentStruct) return;
String name = genericType->getMappingName();
size_t index {};
for (auto iter = parentStruct->mGenerics.begin(); iter != parentStruct->mGenerics.end(); ++iter, ++index)
{
auto structGeneric = (*iter);
if (!structGeneric) continue;
if (name == structGeneric->getMappingName()) break;
}
TypePtr checkType;
for (auto iter = templatedStruct->mTemplateArguments.begin(); iter != templatedStruct->mTemplateArguments.end(); ++iter, --index)
{
auto foundType = (*iter);
if (0 != index) continue;
checkType = foundType;
break;
}
if (!checkType) return;
if (checkType->toGenericType()) return;
type = checkType;
}
}
String pathName;
{
auto basicType = type->toBasicType();
if (basicType) {
pathName = fixBasicType(basicType->mBaseType);
} else {
pathName = type->getPathName();
}
}
ioBoxings.insert(pathName);
}
//---------------------------------------------------------------------
void GenerateStructC::insertInto(
StructPtr structObj,
const NamePath &namePath,
NamePathStructSetMap &ioDerivesInfo
) noexcept
{
if (!structObj) return;
auto found = ioDerivesInfo.find(namePath);
if (found == ioDerivesInfo.end()) {
StructSet newSet;
newSet.insert(structObj);
ioDerivesInfo[namePath] = newSet;
return;
}
auto &existingSet = (*found).second;
existingSet.insert(structObj);
}
//---------------------------------------------------------------------
void GenerateStructC::appendStream(
std::stringstream &output,
std::stringstream &source,
bool appendEol
) noexcept
{
String str = source.str();
if (str.isEmpty()) return;
if (appendEol) {
output << "\n";
}
output << str;
}
//---------------------------------------------------------------------
void GenerateStructC::prepareHelperFile(HelperFile &helperFile) noexcept
{
{
auto &ss = helperFile.headerCIncludeSS_;
ss << "/* " ZS_EVENTING_GENERATED_BY " */\n\n";
ss << "#pragma once\n\n";
ss << "#include \"types.h\"\n";
ss << "\n";
}
{
auto &ss = helperFile.headerCFunctionsSS_;
ss << getApiGuardDefine(helperFile.global_) << "\n";
ss << "\n";
}
{
auto &ss = helperFile.cIncludeSS_;
ss << "/* " ZS_EVENTING_GENERATED_BY " */\n\n";
ss << "\n";
ss << "#include <zsLib/date.h>\n";
ss << "#include \"c_helpers.h\"\n";
ss << "#include <zsLib/types.h>\n";
ss << "#include <zsLib/eventing/types.h>\n";
ss << "#include <zsLib/SafeInt.h>\n";
ss << "\n";
}
{
auto &ss = helperFile.cFunctionsSS_;
ss << "using namespace wrapper;\n\n";
ss << "using namespace date;\n\n";
}
{
auto &ss = helperFile.headerCppFunctionsSS_;
ss << "\n";
ss << getApiGuardDefine(helperFile.global_, true) << "\n";
ss << "\n";
ss << "#ifdef __cplusplus\n";
ss << "namespace wrapper\n";
ss << "{\n";
}
{
auto &ss = helperFile.cppFunctionsSS_;
ss << "namespace wrapper\n";
ss << "{\n";
}
prepareHelperCallback(helperFile);
prepareHelperExceptions(helperFile);
prepareHelperBoxing(helperFile);
prepareHelperString(helperFile);
prepareHelperBinary(helperFile);
prepareHelperDuration(helperFile, "Time");
prepareHelperDuration(helperFile, "Days");
prepareHelperDuration(helperFile, "Hours");
prepareHelperDuration(helperFile, "Seconds");
prepareHelperDuration(helperFile, "Minutes");
prepareHelperDuration(helperFile, "Milliseconds");
prepareHelperDuration(helperFile, "Microseconds");
prepareHelperDuration(helperFile, "Nanoseconds");
prepareHelperList(helperFile, "list");
prepareHelperList(helperFile, "set");
prepareHelperList(helperFile, "map");
prepareHelperSpecial(helperFile, "Any");
prepareHelperSpecial(helperFile, "Promise");
preparePromiseWithValue(helperFile);
preparePromiseWithRejectionReason(helperFile);
{
auto &ss = helperFile.headerCFunctionsSS_;
ss << "\n";
}
prepareHelperNamespace(helperFile, helperFile.global_);
}
//---------------------------------------------------------------------
void GenerateStructC::prepareHelperCallback(HelperFile &helperFile) noexcept
{
auto dash = GenerateHelper::getDashedComment(String());
auto dash2 = GenerateHelper::getDashedComment(String(" "));
{
auto &ss = helperFile.headerCFunctionsSS_;
ss << "\n";
ss << "/* void wrapperCallbackFunction(callback_event_t handle); */\n";
ss << "typedef void (" << getApiCallingDefine(helperFile.global_) << " *wrapperCallbackFunction)(callback_event_t);\n";
ss << "\n";
ss << getApiExportDefine(helperFile.global_) << " void " << getApiCallingDefine(helperFile.global_) << " callback_wrapperInstall(wrapperCallbackFunction function);\n";
ss << "\n";
ss << getApiExportDefine(helperFile.global_) << " void " << getApiCallingDefine(helperFile.global_) << " callback_wrapperObserverDestroy(event_observer_t handle);\n";
ss << "\n";
ss << getApiExportDefine(helperFile.global_) << " void " << getApiCallingDefine(helperFile.global_) << " callback_event_wrapperDestroy(callback_event_t handle);\n";
ss << getApiExportDefine(helperFile.global_) << " instance_id_t " << getApiCallingDefine(helperFile.global_) << " callback_event_wrapperInstanceId(callback_event_t handle);\n";
ss << getApiExportDefine(helperFile.global_) << " event_observer_t " << getApiCallingDefine(helperFile.global_) << " callback_event_get_observer(callback_event_t handle);\n";
ss << getApiExportDefine(helperFile.global_) << " const_char_star_t " << getApiCallingDefine(helperFile.global_) << " callback_event_get_namespace_actual(callback_event_t handle);\n";
ss << getApiExportDefine(helperFile.global_) << " const_char_star_t " << getApiCallingDefine(helperFile.global_) << " callback_event_get_class_actual(callback_event_t handle);\n";
ss << getApiExportDefine(helperFile.global_) << " const_char_star_t " << getApiCallingDefine(helperFile.global_) << " callback_event_get_method_actual(callback_event_t handle);\n";
ss << getApiExportDefine(helperFile.global_) << " generic_handle_t " << getApiCallingDefine(helperFile.global_) << " callback_event_get_source(callback_event_t handle);\n";
ss << getApiExportDefine(helperFile.global_) << " instance_id_t " << getApiCallingDefine(helperFile.global_) << " callback_event_get_source_instance_id(callback_event_t handle);\n";
ss << getApiExportDefine(helperFile.global_) << " generic_handle_t " << getApiCallingDefine(helperFile.global_) << " callback_event_get_data(callback_event_t handle, int argumentIndex);\n";
ss << "\n";
}
{
auto &ss = helperFile.cFunctionsSS_;
ss << dash;
ss << "static wrapperCallbackFunction &callback_get_singleton()\n";
ss << "{\n";
ss << " static wrapperCallbackFunction function {};\n";
ss << " return function;\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "void " << getApiCallingDefine(helperFile.global_) << " callback_wrapperInstall(wrapperCallbackFunction function)\n";
ss << "{\n";
ss << " callback_get_singleton() = function;\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "void " << getApiCallingDefine(helperFile.global_) << " callback_wrapperObserverDestroy(event_observer_t handle)\n";
ss << "{\n";
ss << " typedef IWrapperObserverPtr * IWrapperObserverPtrRawPtr;\n";
ss << " if (0 == handle) return;\n";
ss << " (*reinterpret_cast<IWrapperObserverPtrRawPtr>(handle))->observerCancel();\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "void " << getApiCallingDefine(helperFile.global_) << " callback_event_wrapperDestroy(callback_event_t handle)\n";
ss << "{\n";
ss << " typedef IWrapperCallbackEventPtr * IWrapperCallbackEventPtrRawPtr;\n";
ss << " if (0 == handle) return;\n";
ss << " delete reinterpret_cast<IWrapperCallbackEventPtrRawPtr>(handle);\n";
ss << "}\n";
ss << "\n";
ss << "instance_id_t " << getApiCallingDefine(helperFile.global_) << " callback_event_wrapperInstanceId(callback_event_t handle)\n";
ss << "{\n";
ss << " typedef IWrapperCallbackEventPtr * IWrapperCallbackEventPtrRawPtr;\n";
ss << " if (0 == handle) return 0;\n";
ss << " return reinterpret_cast<instance_id_t>((*reinterpret_cast<IWrapperCallbackEventPtrRawPtr>(handle)).get());\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "event_observer_t " << getApiCallingDefine(helperFile.global_) << " callback_event_get_observer(callback_event_t handle)\n";
ss << "{\n";
ss << " typedef IWrapperCallbackEventPtr * IWrapperCallbackEventPtrRawPtr;\n";
ss << " if (0 == handle) return static_cast<event_observer_t>(NULL);\n";
ss << " return (*reinterpret_cast<IWrapperCallbackEventPtrRawPtr>(handle))->getObserver();\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "const_char_star_t " << getApiCallingDefine(helperFile.global_) << " callback_event_get_namespace_actual(callback_event_t handle)\n";
ss << "{\n";
ss << " typedef IWrapperCallbackEventPtr * IWrapperCallbackEventPtrRawPtr;\n";
ss << " if (0 == handle) return 0;\n";
ss << " return reinterpret_cast<const_char_star_t>((*reinterpret_cast<IWrapperCallbackEventPtrRawPtr>(handle))->getNamespace());\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "const_char_star_t " << getApiCallingDefine(helperFile.global_) << " callback_event_get_class_actual(callback_event_t handle)\n";
ss << "{\n";
ss << " typedef IWrapperCallbackEventPtr * IWrapperCallbackEventPtrRawPtr;\n";
ss << " if (0 == handle) return 0;\n";
ss << " return reinterpret_cast<const_char_star_t>((*reinterpret_cast<IWrapperCallbackEventPtrRawPtr>(handle))->getClass());\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "const_char_star_t " << getApiCallingDefine(helperFile.global_) << " callback_event_get_method_actual(callback_event_t handle)\n";
ss << "{\n";
ss << " typedef IWrapperCallbackEventPtr * IWrapperCallbackEventPtrRawPtr;\n";
ss << " if (0 == handle) return 0;\n";
ss << " return reinterpret_cast<const_char_star_t>((*reinterpret_cast<IWrapperCallbackEventPtrRawPtr>(handle))->getMethod());\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "generic_handle_t " << getApiCallingDefine(helperFile.global_) << " callback_event_get_source(callback_event_t handle)\n";
ss << "{\n";
ss << " typedef IWrapperCallbackEventPtr * IWrapperCallbackEventPtrRawPtr;\n";
ss << " if (0 == handle) return 0;\n";
ss << " return (*reinterpret_cast<IWrapperCallbackEventPtrRawPtr>(handle))->getSource();\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "instance_id_t " << getApiCallingDefine(helperFile.global_) << " callback_event_get_source_instance_id(callback_event_t handle)\n";
ss << "{\n";
ss << " typedef IWrapperCallbackEventPtr * IWrapperCallbackEventPtrRawPtr;\n";
ss << " if (0 == handle) return 0;\n";
ss << " return (*reinterpret_cast<IWrapperCallbackEventPtrRawPtr>(handle))->getInstanceId();\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "generic_handle_t " << getApiCallingDefine(helperFile.global_) << " callback_event_get_data(callback_event_t handle, int argumentIndex)\n";
ss << "{\n";
ss << " typedef IWrapperCallbackEventPtr * IWrapperCallbackEventPtrRawPtr;\n";
ss << " if (0 == handle) return 0;\n";
ss << " return (*reinterpret_cast<IWrapperCallbackEventPtrRawPtr>(handle))->getEventData(argumentIndex);\n";
ss << "}\n";
ss << "\n";
}
{
auto &ss = helperFile.cppFunctionsSS_;
ss << dash2;
ss << " void IWrapperCallbackEvent::fireEvent(IWrapperCallbackEventPtr event)\n";
ss << " {\n";
ss << " if (!event) return;\n";
ss << " auto singleton = callback_get_singleton();\n";
ss << " if (!singleton) return;\n";
ss << " uintptr_t handle = reinterpret_cast<uintptr_t>(new IWrapperCallbackEventPtr(event));\n";
ss << " singleton(handle);\n";
ss << " }\n";
ss << "\n";
}
}
//---------------------------------------------------------------------
void GenerateStructC::prepareHelperExceptions(HelperFile &helperFile) noexcept
{
auto dash = GenerateHelper::getDashedComment(String());
auto dash2 = GenerateHelper::getDashedComment(String(" "));
{
auto &ss = helperFile.headerCFunctionsSS_;
ss << getApiExportDefine(helperFile.global_) << " exception_handle_t " << getApiCallingDefine(helperFile.global_) << " exception_wrapperCreate_exception();\n";
ss << getApiExportDefine(helperFile.global_) << " void " << getApiCallingDefine(helperFile.global_) << " exception_wrapperDestroy(exception_handle_t handle);\n";
ss << getApiExportDefine(helperFile.global_) << " instance_id_t " << getApiCallingDefine(helperFile.global_) << " exception_wrapperInstanceId(exception_handle_t handle);\n";
ss << getApiExportDefine(helperFile.global_) << " bool " << getApiCallingDefine(helperFile.global_) << " exception_hasException(exception_handle_t handle);\n";
ss << getApiExportDefine(helperFile.global_) << " const_char_star_t " << getApiCallingDefine(helperFile.global_) << " exception_what_actual(exception_handle_t handle);\n";
ss << "\n";
}
{
auto &ss = helperFile.headerThrowersSS_;
ss << "\n";
ss << " struct Throwers\n";
ss << " {\n";
ss << " static Throwers &singleton() noexcept;\n";
ss << "\n";
}
{
auto &ss = helperFile.headerCppFunctionsSS_;
ss << " void exception_set_Exception(exception_handle_t handle, shared_ptr<::zsLib::Exception> exception) noexcept;\n";
ss << "\n";
}
{
auto &ss = helperFile.cFunctionsSS_;
ss << dash;
ss << "exception_handle_t " << getApiCallingDefine(helperFile.global_) << " exception_wrapperCreate_exception()\n";
ss << "{\n";
ss << " typedef ::zsLib::Exception ExceptionType;\n";
ss << " typedef shared_ptr<ExceptionType> ExceptionTypePtr;\n";
ss << " typedef ExceptionTypePtr * ExceptionTypePtrRawPtr;\n";
ss << " return reinterpret_cast<exception_handle_t>(new ExceptionTypePtr());\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "void " << getApiCallingDefine(helperFile.global_) << " exception_wrapperDestroy(exception_handle_t handle)\n";
ss << "{\n";
ss << " typedef ::zsLib::Exception ExceptionType;\n";
ss << " typedef shared_ptr<ExceptionType> ExceptionTypePtr;\n";
ss << " typedef ExceptionTypePtr * ExceptionTypePtrRawPtr;\n";
ss << " if (0 == handle) return;\n";
ss << " delete reinterpret_cast<ExceptionTypePtrRawPtr>(handle);\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "instance_id_t " << getApiCallingDefine(helperFile.global_) << " exception_wrapperInstanceId(exception_handle_t handle)\n";
ss << "{\n";
ss << " typedef ::zsLib::Exception ExceptionType;\n";
ss << " typedef shared_ptr<ExceptionType> ExceptionTypePtr;\n";
ss << " typedef ExceptionTypePtr * ExceptionTypePtrRawPtr;\n";
ss << " if (0 == handle) return 0;\n";
ss << " return reinterpret_cast<instance_id_t>((*reinterpret_cast<ExceptionTypePtrRawPtr>(handle)).get());\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "bool " << getApiCallingDefine(helperFile.global_) << " exception_hasException(exception_handle_t handle)\n";
ss << "{\n";
ss << " typedef ::zsLib::Exception ExceptionType;\n";
ss << " typedef shared_ptr<ExceptionType> ExceptionTypePtr;\n";
ss << " typedef ExceptionTypePtr * ExceptionTypePtrRawPtr;\n";
ss << " if (0 == handle) return false;\n";
ss << " return ((bool)(*reinterpret_cast<ExceptionTypePtrRawPtr>(handle)));\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "const_char_star_t " << getApiCallingDefine(helperFile.global_) << " exception_what_actual(exception_handle_t handle)\n";
ss << "{\n";
ss << " typedef ::zsLib::Exception ExceptionType;\n";
ss << " typedef shared_ptr<ExceptionType> ExceptionTypePtr;\n";
ss << " typedef ExceptionTypePtr * ExceptionTypePtrRawPtr;\n";
ss << " if (0 == handle) return 0;\n";
ss << " return reinterpret_cast<const_char_star_t>((*reinterpret_cast<ExceptionTypePtrRawPtr>(handle))->what());\n";
ss << "}\n";
ss << "\n";
}
{
auto &ss = helperFile.cppFunctionsSS_;
ss << dash2;
ss << " void exception_set_Exception(exception_handle_t handle, shared_ptr<::zsLib::Exception> exception) noexcept\n";
ss << " {\n";
ss << " typedef ::zsLib::Exception ExceptionType;\n";
ss << " typedef shared_ptr<ExceptionType> ExceptionTypePtr;\n";
ss << " typedef ExceptionTypePtr * ExceptionTypePtrRawPtr;\n";
ss << " if (0 == handle) return;\n";
ss << " auto &ptr = (*reinterpret_cast<ExceptionTypePtrRawPtr>(handle));\n";
ss << " ptr = exception;\n";
ss << " }\n";
ss << "\n";
}
// exceptions
{
auto exceptionList = GenerateHelper::getAllExceptions(nullptr);
for (auto iter = exceptionList.begin(); iter != exceptionList.end(); ++iter) {
String e = *iter;
prepareHelperExceptions(helperFile, e);
}
}
{
auto &ss = helperFile.headerCFunctionsSS_;
ss << "\n";
}
{
auto &ss = helperFile.headerCppFunctionsSS_;
ss << "\n";
}
}
//---------------------------------------------------------------------
void GenerateStructC::prepareHelperExceptions(
HelperFile &helperFile,
const String &exceptionName
) noexcept
{
auto context = helperFile.global_->toContext()->findType("::zs::exceptions::" + exceptionName);
if (!context) return;
auto contextStruct = context->toStruct();
if (!contextStruct) return;
auto dash = GenerateHelper::getDashedComment(String());
auto dash2 = GenerateHelper::getDashedComment(String(" "));
{
auto &ss = helperFile.headerCFunctionsSS_;
ss << getApiExportDefine(helperFile.global_) << " bool " << getApiCallingDefine(helperFile.global_) << " exception_is_" << exceptionName << "(exception_handle_t handle);\n";
}
{
auto &ss = helperFile.cFunctionsSS_;
ss << dash;
ss << "bool " << getApiCallingDefine(helperFile.global_) << " exception_is_" << exceptionName << "(exception_handle_t handle)\n";
ss << "{\n";
ss << " typedef ::zsLib::Exception ExceptionType;\n";
ss << " typedef shared_ptr<ExceptionType> ExceptionTypePtr;\n";
ss << " typedef ExceptionTypePtr * ExceptionTypePtrRawPtr;\n";
ss << " if (0 == handle) return false;\n";
ss << " return ((bool)std::dynamic_pointer_cast<::zsLib::" << ("Exception" == exceptionName ? "" : "Exceptions::" ) << exceptionName << ">(*reinterpret_cast<ExceptionTypePtrRawPtr>(handle)));\n";
ss << "}\n";
ss << "\n";
}
{
auto &ss = helperFile.headerCppFunctionsSS_;
ss << " exception_handle_t exception_" << exceptionName << "_wrapperToHandle(const ::zsLib::" << (exceptionName == "Exception" ? "" : "Exceptions::") << exceptionName << " &value) noexcept;\n";
}
{
auto &ss = helperFile.cppFunctionsSS_;
ss << dash2;
ss << " exception_handle_t exception_" << exceptionName << "_wrapperToHandle(const ::zsLib::" << (exceptionName == "Exception" ? "" : "Exceptions::") << exceptionName << " &value) noexcept\n";
ss << " {\n";
ss << " typedef ::zsLib::" << (exceptionName == "Exception" ? "" : "Exceptions::") << exceptionName << " ExceptionType;\n";
ss << " auto handle = exception_wrapperCreate_exception();\n";
ss << " exception_set_Exception(handle, make_shared<ExceptionType>(value));\n";
ss << " return handle;\n";
ss << " }\n";
ss << "\n";
}
}
//---------------------------------------------------------------------
void GenerateStructC::prepareHelperBoxing(HelperFile &helperFile) noexcept
{
prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_bool);
prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_uchar);
prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_schar);
prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_ushort);
prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_sshort);
prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_uint);
prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_sint);
prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_ulong);
prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_slong);
prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_ulonglong);
prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_slonglong);
prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_uint8);
prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_sint8);
prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_uint16);
prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_sint16);
prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_uint32);
prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_sint32);
prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_uint64);
prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_sint64);
//prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_byte);
//prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_word);
//prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_dword);
//prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_qword);
prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_float);
prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_double);
prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_ldouble);
//prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_float32);
//prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_float64);
//prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_pointer);
prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_binary);
//prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_size);
prepareHelperBoxing(helperFile, IEventingTypes::PredefinedTypedef_string);
}
//---------------------------------------------------------------------
void GenerateStructC::prepareHelperBoxing(
HelperFile &helperFile,
const IEventingTypes::PredefinedTypedefs basicType
) noexcept
{
bool isBinary = IEventingTypes::PredefinedTypedef_binary == basicType;
bool isString = IEventingTypes::PredefinedTypedef_string == basicType;
String cTypeStr = fixCType(basicType);
String boxedTypeStr = "box_" + cTypeStr;
auto pathStr = fixBasicType(basicType);
if (!helperFile.hasBoxing(pathStr)) return;
auto dash = GenerateHelper::getDashedComment(String());
auto dash2 = GenerateHelper::getDashedComment(String(" "));
{
{
auto &ss = helperFile.headerCFunctionsSS_;
ss << getApiExportDefine(helperFile.global_) << " " << boxedTypeStr << " " << getApiCallingDefine(helperFile.global_) << " box_" << cTypeStr << "_wrapperCreate_" << cTypeStr << "();\n";
ss << getApiExportDefine(helperFile.global_) << " " << boxedTypeStr << " " << getApiCallingDefine(helperFile.global_) << " box_" << cTypeStr << "_wrapperCreate_" << cTypeStr << "WithValue(" << cTypeStr << " value);\n";
ss << getApiExportDefine(helperFile.global_) << " void " << getApiCallingDefine(helperFile.global_) << " box_" << cTypeStr << "_wrapperDestroy(" << boxedTypeStr << " handle);\n";
ss << getApiExportDefine(helperFile.global_) << " instance_id_t " << getApiCallingDefine(helperFile.global_) << " box_" << cTypeStr << "_wrapperInstanceId(" << boxedTypeStr << " handle);\n";
ss << getApiExportDefine(helperFile.global_) << " " << "bool " << getApiCallingDefine(helperFile.global_) << " box_" << cTypeStr << "_has_value(" << boxedTypeStr << " handle);\n";
ss << getApiExportDefine(helperFile.global_) << " " << cTypeStr << " " << getApiCallingDefine(helperFile.global_) << " box_" << cTypeStr << "_get_value(" << boxedTypeStr << " handle);\n";
ss << getApiExportDefine(helperFile.global_) << " " << "void " << getApiCallingDefine(helperFile.global_) << " box_" << cTypeStr << "_set_value(" << boxedTypeStr << " handle, " << cTypeStr << " value);\n";
ss << "\n";
}
{
auto &ss = helperFile.headerCppFunctionsSS_;
ss << " " << boxedTypeStr << " box_" << fixBasicType(basicType) << "_wrapperToHandle(Optional< " << GenerateHelper::getBasicTypeString(basicType) << " > value) noexcept;\n";
ss << " Optional< " << GenerateHelper::getBasicTypeString(basicType) << " > box_" << fixBasicType(basicType) << "_wrapperFromHandle(" << boxedTypeStr << " handle) noexcept;\n";
ss << "\n";
}
}
{
String sharedPtrStr = "typedef shared_ptr< BasicType > BasicTypePtr;";
if (isBinary) {
sharedPtrStr = "typedef BasicType BasicTypePtr;";
}
{
auto &ss = helperFile.cFunctionsSS_;
ss << dash;
ss << boxedTypeStr << " " << getApiCallingDefine(helperFile.global_) << " box_" << cTypeStr << "_wrapperCreate_" << cTypeStr << "()\n";
ss << "{\n";
ss << " typedef " << boxedTypeStr << " CBoxType;\n";
ss << " typedef " << GenerateHelper::getBasicTypeString(basicType) << " BasicType;\n";
ss << " " << sharedPtrStr << "\n";
ss << " return reinterpret_cast<CBoxType>(new BasicTypePtr());\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << boxedTypeStr << " " << getApiCallingDefine(helperFile.global_) << " box_" << cTypeStr << "_wrapperCreate_" << cTypeStr << "WithValue(" << cTypeStr << " value)\n";
ss << "{\n";
ss << " typedef " << boxedTypeStr << " CBoxType;\n";
ss << " typedef " << GenerateHelper::getBasicTypeString(basicType) << " BasicType;\n";
ss << " " << sharedPtrStr << "\n";
ss << " auto ptr = new BasicTypePtr();\n";
if (isBinary) {
ss << " (*ptr) = wrapper::binary_t_wrapperFromHandle(value);\n";
} else if (isString) {
ss << " (*ptr) = make_shared<BasicType>();\n";
ss << " (*(*ptr)) = wrapper::string_t_wrapperFromHandle(value);\n";
} else {
ss << " (*ptr) = make_shared<BasicType>();\n";
ss << " (*((*ptr).get())) = value;\n";
}
ss << " return reinterpret_cast<CBoxType>(ptr);\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "void " << getApiCallingDefine(helperFile.global_) << " box_" << cTypeStr << "_wrapperDestroy(" << boxedTypeStr << " handle)\n";
ss << "{\n";
ss << " typedef " << boxedTypeStr << " CBoxType;\n";
ss << " typedef " << GenerateHelper::getBasicTypeString(basicType) << " BasicType;\n";
ss << " " << sharedPtrStr << "\n";
ss << " typedef BasicTypePtr * BasicTypePtrRawPtr;\n";
ss << " if (0 == handle) return;\n";
ss << " delete reinterpret_cast<BasicTypePtrRawPtr>(handle);\n";
ss << "}\n";
ss << "\n";
ss << "instance_id_t " << getApiCallingDefine(helperFile.global_) << " box_" << cTypeStr << "_wrapperInstanceId(" << boxedTypeStr << " handle)\n";
ss << "{\n";
ss << " typedef " << boxedTypeStr << " CBoxType;\n";
ss << " typedef " << GenerateHelper::getBasicTypeString(basicType) << " BasicType;\n";
ss << " " << sharedPtrStr << "\n";
ss << " typedef BasicTypePtr * BasicTypePtrRawPtr;\n";
ss << " if (0 == handle) return 0;\n";
ss << " return reinterpret_cast<instance_id_t>((*reinterpret_cast<BasicTypePtrRawPtr>(handle)).get());\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "bool " << getApiCallingDefine(helperFile.global_) << " box_" << cTypeStr << "_has_value(" << boxedTypeStr << " handle)\n";
ss << "{\n";
ss << " typedef " << boxedTypeStr << " CBoxType;\n";
ss << " typedef " << GenerateHelper::getBasicTypeString(basicType) << " BasicType;\n";
ss << " " << sharedPtrStr << "\n";
ss << " typedef BasicTypePtr * BasicTypePtrRawPtr;\n";
ss << " if (0 == handle) return false;\n";
ss << " BasicTypePtr ptr = (*reinterpret_cast<BasicTypePtrRawPtr>(handle));\n";
ss << " return ((bool)ptr);\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << cTypeStr << " " << getApiCallingDefine(helperFile.global_) << " box_" << cTypeStr << "_get_value(" << boxedTypeStr << " handle)\n";
ss << "{\n";
ss << " typedef " << cTypeStr << " CType;\n";
ss << " typedef " << boxedTypeStr << " CBoxType;\n";
ss << " typedef " << GenerateHelper::getBasicTypeString(basicType) << " BasicType;\n";
ss << " " << sharedPtrStr << "\n";
ss << " typedef BasicTypePtr * BasicTypePtrRawPtr;\n";
ss << " if (0 == handle) return CType();\n";
ss << " BasicTypePtr ptr = (*reinterpret_cast<BasicTypePtrRawPtr>(handle));\n";
if (isBinary) {
ss << " return wrapper::binary_t_wrapperToHandle(ptr);\n";
} else if (isString) {
ss << " return wrapper::string_t_wrapperToHandle(*ptr);\n";
} else {
ss << " if (!ptr) return CType();\n";
ss << " return *ptr;\n";
}
ss << "}\n";
ss << "\n";
ss << dash;
ss << "void " << getApiCallingDefine(helperFile.global_) << " box_" << cTypeStr << "_set_value(" << boxedTypeStr << " handle, " << cTypeStr << " value)\n";
ss << "{\n";
ss << " typedef " << boxedTypeStr << " CBoxType;\n";
ss << " typedef " << GenerateHelper::getBasicTypeString(basicType) << " BasicType;\n";
ss << " " << sharedPtrStr << "\n";
ss << " typedef BasicTypePtr * BasicTypePtrRawPtr;\n";
ss << " if (0 == handle) return;\n";
ss << " BasicTypePtr &ptr = (*reinterpret_cast<BasicTypePtrRawPtr>(handle));\n";
if (isBinary) {
ss << " ptr = wrapper::binary_t_wrapperFromHandle(value);\n";
} else if (isString) {
ss << " ptr = make_shared<String>(wrapper::string_t_wrapperFromHandle(value));\n";
} else {
ss << " if (!ptr) ptr = make_shared<BasicType>();\n";
ss << " (*(ptr.get())) = value;\n";
}
ss << "}\n";
ss << "\n";
}
{
auto &ss = helperFile.cppFunctionsSS_;
ss << dash2;
ss << " " << boxedTypeStr << " box_" << fixBasicType(basicType) << "_wrapperToHandle(Optional< " << GenerateHelper::getBasicTypeString(basicType) << " > value) noexcept\n";
ss << " {\n";
ss << " if (!value.hasValue()) return box_" << cTypeStr << "_wrapperCreate_" << cTypeStr << "();\n";
if (isBinary) {
ss << " return box_" << cTypeStr << "_wrapperCreate_" << cTypeStr << "WithValue(binary_t_wrapperToHandle(value.value()));\n";
} else if (isString) {
ss << " return box_" << cTypeStr << "_wrapperCreate_" << cTypeStr << "WithValue(string_t_wrapperToHandle(value.value()));\n";
} else {
ss << " return box_" << cTypeStr << "_wrapperCreate_" << cTypeStr << "WithValue(value.value());\n";
}
ss << " }\n";
ss << "\n";
ss << dash2;
ss << " Optional< " << GenerateHelper::getBasicTypeString(basicType) << " > box_" << fixBasicType(basicType) << "_wrapperFromHandle(" << boxedTypeStr << " handle) noexcept\n";
ss << " {\n";
ss << " typedef " << boxedTypeStr << " CBoxType;\n";
ss << " typedef " << GenerateHelper::getBasicTypeString(basicType) << " BasicType;\n";
ss << " " << sharedPtrStr << "\n";
ss << " typedef BasicTypePtr * BasicTypePtrRawPtr;\n";
ss << " if (0 == handle) return Optional< " << GenerateHelper::getBasicTypeString(basicType) << " >();\n";
ss << " if (!box_" << cTypeStr << "_has_value(handle)) return Optional< " << GenerateHelper::getBasicTypeString(basicType) << " >();\n";
if (isBinary) {
ss << " return Optional< " << GenerateHelper::getBasicTypeString(basicType) << " >(binary_t_wrapperFromHandle(box_binary_t_get_value(handle)));\n";
} else if (isString) {
ss << " return Optional< " << GenerateHelper::getBasicTypeString(basicType) << " >(string_t_wrapperFromHandle(box_string_t_get_value(handle)));\n";
} else {
ss << " return Optional< " << GenerateHelper::getBasicTypeString(basicType) << " >(box_" << cTypeStr <<"_get_value(handle));\n";
}
ss << " }\n";
ss << "\n";
}
}
}
//---------------------------------------------------------------------
void GenerateStructC::prepareHelperNamespace(
HelperFile &helperFile,
NamespacePtr namespaceObj
) noexcept
{
if (!namespaceObj) return;
for (auto iter = namespaceObj->mNamespaces.begin(); iter != namespaceObj->mNamespaces.end(); ++iter) {
auto subNamespaceObj = (*iter).second;
prepareHelperNamespace(helperFile, subNamespaceObj);
}
for (auto iter = namespaceObj->mStructs.begin(); iter != namespaceObj->mStructs.end(); ++iter) {
auto subStructObj = (*iter).second;
prepareHelperStruct(helperFile, subStructObj);
}
for (auto iter = namespaceObj->mEnums.begin(); iter != namespaceObj->mEnums.end(); ++iter) {
auto subEnumObj = (*iter).second;
prepareHelperEnum(helperFile, subEnumObj);
}
}
//---------------------------------------------------------------------
void GenerateStructC::prepareHelperStruct(
HelperFile &helperFile,
StructPtr structObj
) noexcept
{
if (!structObj) return;
for (auto iter = structObj->mStructs.begin(); iter != structObj->mStructs.end(); ++iter) {
auto subStructObj = (*iter).second;
prepareHelperStruct(helperFile, subStructObj);
}
for (auto iter = structObj->mEnums.begin(); iter != structObj->mEnums.end(); ++iter) {
auto subEnumObj = (*iter).second;
prepareHelperEnum(helperFile, subEnumObj);
}
}
//---------------------------------------------------------------------
void GenerateStructC::prepareHelperEnum(
HelperFile &helperFile,
EnumTypePtr enumObj
) noexcept
{
if (!enumObj) return;
prepareHelperEnumBoxing(helperFile, enumObj);
}
//---------------------------------------------------------------------
void GenerateStructC::prepareHelperEnumBoxing(
HelperFile &helperFile,
EnumTypePtr enumObj
) noexcept
{
if (!enumObj) return;
if (!helperFile.hasBoxing(enumObj->getPathName())) return;
String cTypeStr = fixCType(enumObj);
String boxedTypeStr = "box_" + cTypeStr;
String wrapperTypeStr = "wrapper" + enumObj->getPathName();
auto dash = GenerateHelper::getDashedComment(String());
auto dash2 = GenerateHelper::getDashedComment(String(" "));
{
{
auto &ss = helperFile.headerCFunctionsSS_;
ss << getApiExportDefine(helperFile.global_) << " " << boxedTypeStr << " " << getApiCallingDefine(helperFile.global_) << " " << boxedTypeStr << "_wrapperCreate_" << boxedTypeStr << "();\n";
ss << getApiExportDefine(helperFile.global_) << " " << boxedTypeStr << " " << getApiCallingDefine(helperFile.global_) << " " << boxedTypeStr << "_wrapperCreate_" << boxedTypeStr << "WithValue(" << cTypeStr << " value);\n";
ss << getApiExportDefine(helperFile.global_) << " void " << getApiCallingDefine(helperFile.global_) << " " << boxedTypeStr << "_wrapperDestroy(" << boxedTypeStr << " handle);\n";
ss << getApiExportDefine(helperFile.global_) << " instance_id_t " << getApiCallingDefine(helperFile.global_) << " " << boxedTypeStr << "_wrapperInstanceId(" << boxedTypeStr << " handle);\n";
ss << getApiExportDefine(helperFile.global_) << " " << "bool " << getApiCallingDefine(helperFile.global_) << " " << boxedTypeStr << "_has_value(" << boxedTypeStr << " handle);\n";
ss << getApiExportDefine(helperFile.global_) << " " << cTypeStr << " " << getApiCallingDefine(helperFile.global_) << " " << boxedTypeStr << "_get_value(" << boxedTypeStr << " handle);\n";
ss << getApiExportDefine(helperFile.global_) << " " << "void " << getApiCallingDefine(helperFile.global_) << " " << boxedTypeStr << "_set_value(" << boxedTypeStr << " handle, " << cTypeStr << " value);\n";
ss << "\n";
}
{
auto &ss = helperFile.headerCppFunctionsSS_;
ss << " " << boxedTypeStr << " " << boxedTypeStr << "_wrapperToHandle(Optional< " << wrapperTypeStr << " > value) noexcept;\n";
ss << " Optional< " << wrapperTypeStr << " > " << boxedTypeStr << "_wrapperFromHandle(" << boxedTypeStr << " handle) noexcept;\n";
ss << "\n";
}
}
{
{
auto &ss = helperFile.cFunctionsSS_;
ss << dash;
ss << boxedTypeStr << " " << getApiCallingDefine(helperFile.global_) << " " << boxedTypeStr << "_wrapperCreate_" << boxedTypeStr << "()\n";
ss << "{\n";
ss << " typedef " << boxedTypeStr << " CBoxType;\n";
ss << " typedef " << wrapperTypeStr << " BasicType;\n";
ss << " typedef shared_ptr< BasicType > BasicTypePtr;\n";
ss << " return reinterpret_cast<CBoxType>(new BasicTypePtr());\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << boxedTypeStr << " " << getApiCallingDefine(helperFile.global_) << " " << boxedTypeStr << "_wrapperCreate_" << boxedTypeStr << "WithValue(" << cTypeStr << " value)\n";
ss << "{\n";
ss << " typedef " << boxedTypeStr << " CBoxType;\n";
ss << " typedef " << wrapperTypeStr << " BasicType;\n";
ss << " typedef shared_ptr< BasicType > BasicTypePtr;\n";
ss << " auto ptr = new BasicTypePtr();\n";
ss << " (*ptr) = make_shared<BasicType>();\n";
ss << " (*((*ptr).get())) = static_cast<BasicType>(value);\n";
ss << " return reinterpret_cast<CBoxType>(ptr);\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "void " << getApiCallingDefine(helperFile.global_) << " " << boxedTypeStr << "_wrapperDestroy(" << boxedTypeStr << " handle)\n";
ss << "{\n";
ss << " typedef " << wrapperTypeStr << " BasicType;\n";
ss << " typedef shared_ptr< BasicType > BasicTypePtr;\n";
ss << " typedef BasicTypePtr * BasicTypePtrRawPtr;\n";
ss << " if (0 == handle) return;\n";
ss << " delete reinterpret_cast<BasicTypePtrRawPtr>(handle);\n";
ss << "}\n";
ss << "\n";
ss << "instance_id_t " << getApiCallingDefine(helperFile.global_) << " " << boxedTypeStr << "_wrapperInstanceId(" << boxedTypeStr << " handle)\n";
ss << "{\n";
ss << " typedef " << wrapperTypeStr << " BasicType;\n";
ss << " typedef shared_ptr< BasicType > BasicTypePtr;\n";
ss << " typedef BasicTypePtr * BasicTypePtrRawPtr;\n";
ss << " if (0 == handle) return 0;\n";
ss << " return reinterpret_cast<instance_id_t>((*reinterpret_cast<BasicTypePtrRawPtr>(handle)).get());\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "bool " << getApiCallingDefine(helperFile.global_) << " " << boxedTypeStr << "_has_value(" << boxedTypeStr << " handle)\n";
ss << "{\n";
ss << " typedef " << wrapperTypeStr << " BasicType;\n";
ss << " typedef shared_ptr< BasicType > BasicTypePtr;\n";
ss << " typedef BasicTypePtr * BasicTypePtrRawPtr;\n";
ss << " if (0 == handle) return false;\n";
ss << " BasicTypePtr ptr = (*reinterpret_cast<BasicTypePtrRawPtr>(handle));\n";
ss << " return ((bool)ptr);\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << cTypeStr << " " << getApiCallingDefine(helperFile.global_) << " " << boxedTypeStr << "_get_value(" << boxedTypeStr << " handle)\n";
ss << "{\n";
ss << " typedef " << cTypeStr << " CType;\n";
ss << " typedef " << wrapperTypeStr << " BasicType;\n";
ss << " typedef shared_ptr< BasicType > BasicTypePtr;\n";
ss << " typedef BasicTypePtr * BasicTypePtrRawPtr;\n";
ss << " if (0 == handle) return " << cTypeStr << "();\n";
ss << " BasicTypePtr ptr = (*reinterpret_cast<BasicTypePtrRawPtr>(handle));\n";
ss << " if (!ptr) return CType();\n";
ss << " return static_cast<CType>(*ptr);\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "void " << getApiCallingDefine(helperFile.global_) << " " << boxedTypeStr << "_set_value(" << boxedTypeStr << " handle, " << cTypeStr << " value)\n";
ss << "{\n";
ss << " typedef " << wrapperTypeStr << " BasicType;\n";
ss << " typedef shared_ptr< BasicType > BasicTypePtr;\n";
ss << " typedef BasicTypePtr * BasicTypePtrRawPtr;\n";
ss << " if (0 == handle) return;\n";
ss << " BasicTypePtr &ptr = (*reinterpret_cast<BasicTypePtrRawPtr>(handle));\n";
ss << " if (!ptr) ptr = make_shared<BasicType>();\n";
ss << " (*(ptr.get())) = static_cast<BasicType>(value);\n";
ss << "}\n";
ss << "\n";
}
{
auto &ss = helperFile.cppFunctionsSS_;
ss << dash2;
ss << " " << boxedTypeStr << " " << boxedTypeStr << "_wrapperToHandle(Optional< " << wrapperTypeStr << " > value) noexcept\n";
ss << " {\n";
ss << " typedef " << cTypeStr << " CType;\n";
ss << " if (!value.hasValue()) return " << boxedTypeStr << "_wrapperCreate_" << boxedTypeStr << "();\n";
ss << " return " << boxedTypeStr << "_wrapperCreate_" << boxedTypeStr << "WithValue(static_cast<CType>(value.value()));\n";
ss << " }\n";
ss << "\n";
ss << dash2;
ss << " Optional< " << wrapperTypeStr << " > " << boxedTypeStr << "_wrapperFromHandle(" << boxedTypeStr << " handle) noexcept\n";
ss << " {\n";
ss << " typedef " << wrapperTypeStr << " BasicType;\n";
ss << " typedef shared_ptr< BasicType > BasicTypePtr;\n";
ss << " typedef BasicTypePtr * BasicTypePtrRawPtr;\n";
ss << " if (0 == handle) return Optional< BasicType >();\n";
ss << " if (!" << boxedTypeStr << "_has_value(handle)) return Optional< BasicType >();\n";
ss << " return Optional< BasicType >(static_cast<BasicType>(" << boxedTypeStr << "_get_value(handle)));\n";
ss << " }\n";
ss << "\n";
}
}
}
//---------------------------------------------------------------------
void GenerateStructC::prepareHelperString(HelperFile &helperFile) noexcept
{
auto dash = GenerateHelper::getDashedComment(String());
auto dash2 = GenerateHelper::getDashedComment(String(" "));
{
{
auto &ss = helperFile.headerCFunctionsSS_;
ss << getApiExportDefine(helperFile.global_) << " string_t " << getApiCallingDefine(helperFile.global_) << " string_t_wrapperCreate_string();\n";
ss << getApiExportDefine(helperFile.global_) << " string_t " << getApiCallingDefine(helperFile.global_) << " string_t_wrapperCreate_stringWithValue(const char *value);\n";
ss << getApiExportDefine(helperFile.global_) << " void " << getApiCallingDefine(helperFile.global_) << " string_t_wrapperDestroy(string_t handle);\n";
ss << getApiExportDefine(helperFile.global_) << " instance_id_t " << getApiCallingDefine(helperFile.global_) << " string_t_wrapperInstanceId(string_t handle);\n";
ss << getApiExportDefine(helperFile.global_) << " const_char_star_t " << getApiCallingDefine(helperFile.global_) << " string_t_get_value_actual(string_t handle);\n";
ss << getApiExportDefine(helperFile.global_) << " void " << getApiCallingDefine(helperFile.global_) << " string_t_set_value(string_t handle, const char *value);\n";
ss << "\n";
}
{
auto &ss = helperFile.headerCppFunctionsSS_;
ss << " string_t string_t_wrapperToHandle(const ::std::string &value) noexcept;\n";
ss << " ::zsLib::String string_t_wrapperFromHandle(string_t handle) noexcept;\n";
ss << "\n";
}
}
{
{
auto &ss = helperFile.cFunctionsSS_;
ss << dash;
ss << getApiExportDefine(helperFile.global_) << " string_t " << getApiCallingDefine(helperFile.global_) << " string_t_wrapperCreate_string()\n";
ss << "{\n";
ss << " return reinterpret_cast<string_t>(new ::zsLib::String());\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "string_t " << getApiCallingDefine(helperFile.global_) << " string_t_wrapperCreate_stringWithValue(const char *value)\n";
ss << "{\n";
ss << " return reinterpret_cast<string_t>(new ::zsLib::String(value));\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "void " << getApiCallingDefine(helperFile.global_) << " string_t_wrapperDestroy(string_t handle)\n";
ss << "{\n";
ss << " if (0 == handle) return;\n";
ss << " delete reinterpret_cast<::zsLib::String *>(handle);\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "instance_id_t " << getApiCallingDefine(helperFile.global_) << " string_t_wrapperInstanceId(string_t handle)\n";
ss << "{\n";
ss << " if (0 == handle) return 0;\n";
ss << " return reinterpret_cast<instance_id_t>(reinterpret_cast<::zsLib::String *>(handle));\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "const_char_star_t " << getApiCallingDefine(helperFile.global_) << " string_t_get_value_actual(string_t handle)\n";
ss << "{\n";
ss << " if (0 == handle) return 0;\n";
ss << " return reinterpret_cast<const_char_star_t>((*reinterpret_cast<::zsLib::String *>(handle)).c_str());\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "void " << getApiCallingDefine(helperFile.global_) << " string_t_set_value(string_t handle, const char *value)\n";
ss << "{\n";
ss << " if (0 == handle) return;\n";
ss << " (*reinterpret_cast<::zsLib::String *>(handle)) = ::zsLib::String(value);\n";
ss << "}\n";
ss << "\n";
}
{
auto &ss = helperFile.cppFunctionsSS_;
ss << dash2;
ss << " string_t string_t_wrapperToHandle(const ::std::string &value) noexcept\n";
ss << " {\n";
ss << " return reinterpret_cast<string_t>(new ::zsLib::String(value));\n";
ss << " }\n";
ss << "\n";
ss << dash2;
ss << " ::zsLib::String string_t_wrapperFromHandle(string_t handle) noexcept\n";
ss << " {\n";
ss << " if (0 == handle) return ::zsLib::String();\n";
ss << " return (*reinterpret_cast<::zsLib::String *>(handle));\n";
ss << " }\n";
ss << "\n";
}
}
}
//---------------------------------------------------------------------
void GenerateStructC::prepareHelperBinary(HelperFile &helperFile) noexcept
{
auto dash = GenerateHelper::getDashedComment(String());
auto dash2 = GenerateHelper::getDashedComment(String(" "));
{
{
auto &ss = helperFile.headerCFunctionsSS_;
ss << getApiExportDefine(helperFile.global_) << " binary_t " << getApiCallingDefine(helperFile.global_) << " binary_t_wrapperCreate_binary_t();\n";
ss << getApiExportDefine(helperFile.global_) << " binary_t " << getApiCallingDefine(helperFile.global_) << " binary_t_wrapperCreate_binary_tWithValue(const uint8_t *value, binary_size_t size);\n";
ss << getApiExportDefine(helperFile.global_) << " void " << getApiCallingDefine(helperFile.global_) << " binary_t_wrapperDestroy(binary_t handle);\n";
ss << getApiExportDefine(helperFile.global_) << " instance_id_t " << getApiCallingDefine(helperFile.global_) << " binary_t_wrapperInstanceId(binary_t handle);\n";
ss << getApiExportDefine(helperFile.global_) << " const uint8_t * " << getApiCallingDefine(helperFile.global_) << " binary_t_get_value(binary_t handle);\n";
ss << getApiExportDefine(helperFile.global_) << " binary_size_t " << getApiCallingDefine(helperFile.global_) << " binary_t_get_size(binary_t handle);\n";
ss << getApiExportDefine(helperFile.global_) << " void " << getApiCallingDefine(helperFile.global_) << " binary_t_set_value(binary_t handle, const uint8_t *value, binary_size_t size);\n";
ss << "\n";
}
{
auto &ss = helperFile.headerCppFunctionsSS_;
ss << " binary_t binary_t_wrapperToHandle(SecureByteBlockPtr value) noexcept;\n";
ss << " SecureByteBlockPtr binary_t_wrapperFromHandle(binary_t handle) noexcept;\n";
ss << "\n";
}
}
{
{
auto &ss = helperFile.cFunctionsSS_;
ss << dash;
ss << "binary_t " << getApiCallingDefine(helperFile.global_) << " binary_t_wrapperCreate_binary_t()\n";
ss << "{\n";
ss << " return reinterpret_cast<binary_t>(new SecureByteBlockPtr());\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "binary_t " << getApiCallingDefine(helperFile.global_) << " binary_t_wrapperCreate_binary_tWithValue(const uint8_t *value, binary_size_t size)\n";
ss << "{\n";
ss << " if ((NULL == value) || (0 == size)) return reinterpret_cast<binary_t>(new SecureByteBlockPtr());\n";
ss << " return reinterpret_cast<binary_t>(new SecureByteBlockPtr(make_shared<SecureByteBlock>(value, SafeInt<SecureByteBlock::size_type>(size))));\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "void " << getApiCallingDefine(helperFile.global_) << " binary_t_wrapperDestroy(binary_t handle)\n";
ss << "{\n";
ss << " if (0 == handle) return;\n";
ss << " delete reinterpret_cast<SecureByteBlockPtr *>(handle);\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "instance_id_t " << getApiCallingDefine(helperFile.global_) << " binary_t_wrapperInstanceId(binary_t handle)\n";
ss << "{\n";
ss << " if (0 == handle) return 0;\n";
ss << " return reinterpret_cast<instance_id_t>((*reinterpret_cast<SecureByteBlockPtr *>(handle)).get());\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "const uint8_t * " << getApiCallingDefine(helperFile.global_) << " binary_t_get_value(binary_t handle)\n";
ss << "{\n";
ss << " if (0 == handle) return static_cast<const uint8_t *>(NULL);\n";
ss << " SecureByteBlockPtr ptr = (*reinterpret_cast<SecureByteBlockPtr *>(handle));\n";
ss << " if (!ptr) return static_cast<const uint8_t *>(NULL);\n";
ss << " return ptr->BytePtr();\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "binary_size_t " << getApiCallingDefine(helperFile.global_) << " binary_t_get_size(binary_t handle)\n";
ss << "{\n";
ss << " if (0 == handle) return 0;\n";
ss << " SecureByteBlockPtr ptr = (*reinterpret_cast<SecureByteBlockPtr *>(handle));\n";
ss << " if (!ptr) return 0;\n";
ss << " return SafeInt<size_t>(ptr->SizeInBytes());\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "void " << getApiCallingDefine(helperFile.global_) << " binary_t_set_value(binary_t handle, const uint8_t *value, binary_size_t size)\n";
ss << "{\n";
ss << " if (0 == handle) return;\n";
ss << " if ((NULL == value) || (0 == size)) { (*reinterpret_cast<SecureByteBlockPtr *>(handle)) = SecureByteBlockPtr(); return; }\n";
ss << " (*reinterpret_cast<SecureByteBlockPtr *>(handle)) = make_shared<SecureByteBlock>(value, SafeInt<SecureByteBlock::size_type>(size));\n";
ss << "}\n";
ss << "\n";
}
{
auto &ss = helperFile.cppFunctionsSS_;
ss << dash2;
ss << " binary_t binary_t_wrapperToHandle(SecureByteBlockPtr value) noexcept\n";
ss << " {\n";
ss << " return reinterpret_cast<binary_t>(new SecureByteBlockPtr(value));\n";
ss << " }\n";
ss << "\n";
ss << dash2;
ss << " SecureByteBlockPtr binary_t_wrapperFromHandle(binary_t handle) noexcept\n";
ss << " {\n";
ss << " if (0 == handle) return SecureByteBlockPtr();\n";
ss << " return (*reinterpret_cast<SecureByteBlockPtr *>(handle));\n";
ss << " }\n";
ss << "\n";
}
}
}
//---------------------------------------------------------------------
void GenerateStructC::prepareHelperDuration(
HelperFile &helperFile,
const String &durationType
) noexcept
{
bool isTime = "Time" == durationType;
auto durationContext = helperFile.global_->toContext()->findType("::zs::" + durationType);
if (!durationContext) return;
auto durationStruct = durationContext->toStruct();
if (!durationStruct) return;
auto dash = GenerateHelper::getDashedComment(String());
auto dash2 = GenerateHelper::getDashedComment(String(" "));
String zsDurationType = "::zsLib::" + durationType;
{
{
auto &ss = helperFile.headerCFunctionsSS_;
ss << getApiExportDefine(durationStruct) << " " << fixCType(durationStruct) << " " << getApiCallingDefine(durationStruct) << " zs_" << durationType << "_wrapperCreate_" << durationType << "();\n";
ss << getApiExportDefine(durationStruct) << " " << fixCType(durationStruct) << " " << getApiCallingDefine(durationStruct) << " zs_" << durationType << "_wrapperCreate_" << durationType << "WithValue(int64_t value);\n";
ss << getApiExportDefine(durationStruct) << " void " << getApiCallingDefine(durationStruct) <<" zs_" << durationType << "_wrapperDestroy(" << fixCType(durationStruct) << " handle);\n";
ss << getApiExportDefine(durationStruct) << " instance_id_t " << getApiCallingDefine(durationStruct) << " zs_" << durationType << "_wrapperInstanceId(" << fixCType(durationStruct) << " handle);\n";
ss << getApiExportDefine(durationStruct) << " " << "int64_t " << getApiCallingDefine(durationStruct) << " zs_" << durationType << "_get_value(" << fixCType(durationStruct) << " handle);\n";
ss << getApiExportDefine(durationStruct) << " " << "void " << getApiCallingDefine(durationStruct) << " zs_" << durationType << "_set_value(" << fixCType(durationStruct) << " handle, int64_t value);\n";
ss << "\n";
}
{
auto &ss = helperFile.headerCppFunctionsSS_;
ss << " " << fixCType(durationStruct) << " zs_" << durationType << "_wrapperToHandle(" << zsDurationType << " value) noexcept;\n";
ss << " " << zsDurationType << " zs_" << durationType << "_wrapperFromHandle(" << fixCType(durationStruct) << " handle) noexcept;\n";
ss << "\n";
}
}
{
{
auto &ss = helperFile.cFunctionsSS_;
ss << dash;
ss << fixCType(durationStruct) << " " << getApiCallingDefine(durationStruct) << " zs_" << durationType << "_wrapperCreate_" << durationType << "()\n";
ss << "{\n";
ss << " typedef " << fixCType(durationStruct) << " CType;\n";
ss << " typedef " << zsDurationType << " DurationType;\n";
ss << " return reinterpret_cast<CType>(new DurationType());\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << fixCType(durationStruct) << " " << getApiCallingDefine(durationStruct) << " zs_" << durationType << "_wrapperCreate_" << durationType << "WithValue(int64_t value)\n";
ss << "{\n";
if (isTime) {
ss << " auto result = zs_" << durationType << "_wrapperCreate_" << durationType << "();\n";
ss << " zs_" << durationType << "_set_value(result, value);\n";
ss << " return result;\n";
} else {
ss << " typedef " << fixCType(durationStruct) << " CType;\n";
ss << " typedef " << zsDurationType << " DurationType;\n";
ss << " typedef DurationType::rep DurationTypeRep;\n";
ss << " return reinterpret_cast<CType>(new DurationType(static_cast<DurationTypeRep>(SafeInt<DurationTypeRep>(value))));\n";
}
ss << "}\n";
ss << "\n";
ss << dash;
ss << "void " << getApiCallingDefine(durationStruct) << " zs_" << durationType << "_wrapperDestroy(" << fixCType(durationStruct) << " handle)\n";
ss << "{\n";
ss << " typedef " << zsDurationType << " DurationType;\n";
ss << " typedef DurationType * DurationTypeRawPtr;\n";
ss << " if (0 == handle) return;\n";
ss << " delete reinterpret_cast<DurationTypeRawPtr>(handle);\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "instance_id_t " << getApiCallingDefine(durationStruct) << " zs_" << durationType << "_wrapperInstanceId(" << fixCType(durationStruct) << " handle)\n";
ss << "{\n";
ss << " typedef " << zsDurationType << " DurationType;\n";
ss << " typedef DurationType * DurationTypeRawPtr;\n";
ss << " if (0 == handle) return 0;\n";
ss << " return reinterpret_cast<instance_id_t>(reinterpret_cast<DurationTypeRawPtr>(handle));\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "int64_t " << getApiCallingDefine(durationStruct) << " zs_" << durationType << "_get_value(" << fixCType(durationStruct) << " handle)\n";
ss << "{\n";
ss << " typedef " << zsDurationType << " DurationType;\n";
ss << " typedef DurationType * DurationTypeRawPtr;\n";
ss << " if (0 == handle) return 0;\n";
if (isTime) {
ss << " auto t = sys_days(jan / 1 / 1601);\n";
ss << " auto diff = (*reinterpret_cast<DurationTypeRawPtr>(handle)) - t;\n";
ss << " auto nano = ::zsLib::toNanoseconds(diff);\n";
ss << " return SafeInt<int64_t>(nano.count() / static_cast<::zsLib::Nanoseconds::rep>(100));\n";
} else {
ss << " return SafeInt<int64_t>(reinterpret_cast<DurationTypeRawPtr>(handle)->count());\n";
}
ss << "}\n";
ss << "\n";
ss << dash;
ss << "void " << getApiCallingDefine(durationStruct) << " zs_" << durationType << "_set_value(" << fixCType(durationStruct) << " handle, int64_t value)\n";
ss << "{\n";
ss << " typedef " << zsDurationType << " DurationType;\n";
ss << " typedef DurationType * DurationTypeRawPtr;\n";
ss << " typedef DurationType::rep DurationTypeRep;\n";
ss << " if (0 == handle) return;\n";
if (isTime) {
ss << " ::zsLib::Time t = sys_days(jan / 1 / 1601);\n";
ss << " auto nano = std::chrono::duration_cast<::zsLib::Time::duration>(zsLib::Nanoseconds(static_cast<::zsLib::Nanoseconds::rep>(value) * static_cast<::zsLib::Nanoseconds::rep>(100)));\n";
ss << " (*reinterpret_cast<DurationTypeRawPtr>(handle)) = ::zsLib::Time(t + nano);\n";
} else {
ss << " (*reinterpret_cast<DurationTypeRawPtr>(handle)) = DurationType(SafeInt<DurationTypeRep>(value));\n";
}
ss << "}\n";
ss << "\n";
}
{
auto &ss = helperFile.cppFunctionsSS_;
ss << dash2;
ss << " " << fixCType(durationStruct) << " zs_" << durationType << "_wrapperToHandle(" << zsDurationType << " value) noexcept\n";
ss << " {\n";
ss << " typedef " << fixCType(durationStruct) << " CType;\n";
ss << " typedef " << zsDurationType << " DurationType;\n";
ss << " typedef DurationType * DurationTypeRawPtr;\n";
ss << " if (DurationType() == value) return 0;\n";
ss << " return reinterpret_cast<CType>(new DurationType(value));\n";
ss << " }\n";
ss << "\n";
ss << dash2;
ss << " " << zsDurationType << " zs_" << durationType << "_wrapperFromHandle(" << fixCType(durationStruct) << " handle) noexcept\n";
ss << " {\n";
ss << " typedef " << zsDurationType << " DurationType;\n";
ss << " typedef DurationType * DurationTypeRawPtr;\n";
ss << " if (0 == handle) return DurationType();\n";
ss << " return (*reinterpret_cast<DurationTypeRawPtr>(handle));\n";
ss << " }\n";
ss << "\n";
}
}
}
//---------------------------------------------------------------------
void GenerateStructC::prepareHelperList(
HelperFile &helperFile,
const String &listOrSetStr
) noexcept
{
bool isMap = ("map" == listOrSetStr);
bool isList = ("list" == listOrSetStr);
auto context = helperFile.global_->toContext()->findType("::std::" + listOrSetStr);
if (!context) return;
auto structType = context->toStruct();
if (!structType) return;
auto dash = GenerateHelper::getDashedComment(String());
auto dash2 = GenerateHelper::getDashedComment(String(" "));
for (auto iter = structType->mTemplatedStructs.begin(); iter != structType->mTemplatedStructs.end(); ++iter) {
auto templatedStructType = (*iter).second;
TypePtr keyType;
TypePtr listType;
auto iterArg = templatedStructType->mTemplateArguments.begin();
if (iterArg != templatedStructType->mTemplateArguments.end()) {
listType = (*iterArg);
if (isMap) {
++iterArg;
if (iterArg != templatedStructType->mTemplateArguments.end()) {
keyType = listType;
listType = (*iterArg);
}
}
}
includeType(helperFile, listType);
{
{
auto &ss = helperFile.headerCFunctionsSS_;
ss << getApiExportDefine(structType) << " " << fixCType(templatedStructType) << " " << getApiCallingDefine(structType) << " " << fixType(templatedStructType) << "_wrapperCreate_" << structType->getMappingName() << "();\n";
ss << getApiExportDefine(structType) << " void " << getApiCallingDefine(templatedStructType) << " " << fixType(templatedStructType) << "_wrapperDestroy(" << fixCType(templatedStructType) << " handle);\n";
ss << getApiExportDefine(structType) << " instance_id_t " << getApiCallingDefine(templatedStructType) << " " << fixType(templatedStructType) << "_wrapperInstanceId(" << fixCType(templatedStructType) << " handle);\n";
if (isMap) {
ss << getApiExportDefine(structType) << " " << "void " << getApiCallingDefine(templatedStructType) << " " << fixType(templatedStructType) << "_insert(" << fixCType(templatedStructType) << " handle, " << fixCType(keyType) << " key, " << fixCType(listType) << " value);\n";
}
else {
ss << getApiExportDefine(structType) << " " << "void " << getApiCallingDefine(templatedStructType) << " " << fixType(templatedStructType) << "_insert(" << fixCType(templatedStructType) << " handle, " << fixCType(listType) << " value);\n";
}
ss << getApiExportDefine(structType) << " iterator_handle_t " << getApiCallingDefine(templatedStructType) << " " << fixType(templatedStructType) << "_wrapperIterBegin(" << fixCType(templatedStructType) << " handle);\n";
ss << getApiExportDefine(structType) << " void " << getApiCallingDefine(templatedStructType) << " " << fixType(templatedStructType) << "_wrapperIterNext(iterator_handle_t iterHandle);\n";
ss << getApiExportDefine(structType) << " bool " << getApiCallingDefine(templatedStructType) << " " << fixType(templatedStructType) << "_wrapperIterIsEnd(" << fixCType(templatedStructType) << " handle, iterator_handle_t iterHandle);\n";
if (isMap) {
ss << getApiExportDefine(structType) << " " << fixCType(keyType) << " " << getApiCallingDefine(templatedStructType) << " " << fixType(templatedStructType) << "_wrapperIterKey(iterator_handle_t iterHandle);\n";
}
ss << getApiExportDefine(structType) << " " << fixCType(listType) << " " << getApiCallingDefine(templatedStructType) << " " << fixType(templatedStructType) << "_wrapperIterValue(iterator_handle_t iterHandle);\n";
ss << "\n";
}
{
auto &ss = helperFile.headerCppFunctionsSS_;
ss << " " << GenerateStructHeader::getWrapperTypeString(false, templatedStructType) << " " << fixType(templatedStructType) << "_wrapperFromHandle(" << fixCType(templatedStructType) << " handle) noexcept;\n";
ss << " " << fixCType(templatedStructType) << " " << fixType(templatedStructType) << "_wrapperToHandle(" << GenerateStructHeader::getWrapperTypeString(false, templatedStructType) << " value) noexcept;\n";
ss << "\n";
}
}
std::stringstream typedefsSS;
std::stringstream typedefsWithIterSS;
std::stringstream typedefsSS2;
{
auto &ss = typedefsSS;
ss << " typedef " << fixCType(templatedStructType) << " CType;\n";
if (isMap) {
ss << " typedef " << GenerateStructHeader::getWrapperTypeString(false, keyType) << " WrapperKeyType;\n";
}
ss << " typedef " << GenerateStructHeader::getWrapperTypeString(false, listType) << " WrapperType;\n";
ss << " typedef ::std::" << listOrSetStr << "<" << (isMap ? "WrapperKeyType, " : "") << "WrapperType> WrapperTypeList;\n";
ss << " typedef shared_ptr<WrapperTypeList> WrapperTypeListPtr;\n";
ss << " typedef WrapperTypeListPtr * WrapperTypeListPtrRawPtr;\n";
}
{
auto &ss = typedefsWithIterSS;
ss << typedefsSS.str();
ss << " typedef WrapperTypeList::iterator WrapperTypeListIterator;\n";
ss << " typedef WrapperTypeListIterator * WrapperTypeListIteratorRawPtr;\n";
}
{
String tmp = typedefsSS.str();
tmp.replaceAll(" ", " ");
typedefsSS2 << tmp;
}
{
{
auto &ss = helperFile.cFunctionsSS_;
ss << dash;
ss << fixCType(templatedStructType) << " " << getApiCallingDefine(structType) << " " << fixType(templatedStructType) << "_wrapperCreate_" << structType->getMappingName() << "()\n";
ss << "{\n";
ss << typedefsSS.str();
ss << " return reinterpret_cast<CType>(new WrapperTypeListPtr(make_shared<WrapperTypeList>()));\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "void " << getApiCallingDefine(structType) << " " << fixType(templatedStructType) << "_wrapperDestroy(" << fixCType(templatedStructType) << " handle)\n";
ss << "{\n";
ss << typedefsSS.str();
ss << " if (0 == handle) return;\n";
ss << " delete reinterpret_cast<WrapperTypeListPtrRawPtr>(handle);\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "instance_id_t " << getApiCallingDefine(structType) << " " << fixType(templatedStructType) << "_wrapperInstanceId(" << fixCType(templatedStructType) << " handle)\n";
ss << "{\n";
ss << typedefsSS.str();
ss << " if (0 == handle) return 0;\n";
ss << " return reinterpret_cast<instance_id_t>((*reinterpret_cast<WrapperTypeListPtrRawPtr>(handle)).get());\n";
ss << "}\n";
ss << "\n";
ss << dash;
if (isMap) {
ss << "void " << getApiCallingDefine(structType) << " " << fixType(templatedStructType) << "_insert(" << fixCType(templatedStructType) << " handle, " << fixCType(keyType) << " key, " << fixCType(listType) << " value)\n";
} else {
ss << "void " << getApiCallingDefine(structType) << " " << fixType(templatedStructType) << "_insert(" << fixCType(templatedStructType) << " handle, " << fixCType(listType) << " value)\n";
}
ss << "{\n";
ss << typedefsSS.str();
ss << " if (0 == handle) return;\n";
String keyTypeStr;
String listTypeStr;
if (isMap) {
if (keyType->toEnumType()) {
keyTypeStr = "static_cast<" + GenerateStructHeader::getWrapperTypeString(false, keyType) + ">(key)";
} else if ((keyType->toBasicType()) &&
("string_t" != fixCType(keyType))) {
keyTypeStr = "key";
} else {
keyTypeStr = fixType(keyType) + "_wrapperFromHandle(key)";
}
}
if (listType->toEnumType()) {
listTypeStr = "static_cast<" + GenerateStructHeader::getWrapperTypeString(false, listType) + ">(value)";
} else if ((listType->toBasicType()) &&
("string_t" != fixCType(listType))) {
listTypeStr = "value";
} else {
listTypeStr = fixType(listType) + "_wrapperFromHandle(value)";
}
if (isMap) {
ss << " (*(*reinterpret_cast<WrapperTypeListPtrRawPtr>(handle)))[" << keyTypeStr << "] = " << listTypeStr << ";\n";
} else if (isList) {
ss << " (*reinterpret_cast<WrapperTypeListPtrRawPtr>(handle))->push_back(" << listTypeStr << ");\n";
} else {
ss << " (*reinterpret_cast<WrapperTypeListPtrRawPtr>(handle))->insert(" << listTypeStr << ");\n";
}
ss << "}\n";
ss << "\n";
ss << dash;
ss << "uintptr_t " << getApiCallingDefine(structType) << " " << fixType(templatedStructType) << "_wrapperIterBegin(" << fixCType(templatedStructType) << " handle)\n";
ss << "{\n";
ss << typedefsWithIterSS.str();
ss << " if (0 == handle) return 0;\n";
ss << " return reinterpret_cast<uintptr_t>(new WrapperTypeListIterator((*reinterpret_cast<WrapperTypeListPtrRawPtr>(handle))->begin()));\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "void " << getApiCallingDefine(structType) << " " << fixType(templatedStructType) << "_wrapperIterNext(iterator_handle_t iterHandle)\n";
ss << "{\n";
ss << typedefsWithIterSS.str();
ss << " if (0 == iterHandle) return;\n";
ss << " ++(*reinterpret_cast<WrapperTypeListIteratorRawPtr>(iterHandle));\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "bool " << getApiCallingDefine(structType) << " " << fixType(templatedStructType) << "_wrapperIterIsEnd(" << fixCType(templatedStructType) << " handle, iterator_handle_t iterHandle)\n";
ss << "{\n";
ss << typedefsWithIterSS.str();
ss << " if (0 == handle) return true;\n";
ss << " if (0 == iterHandle) return true;\n";
ss << " auto iterRawPtr = reinterpret_cast<WrapperTypeListIteratorRawPtr>(iterHandle);\n";
ss << " bool isEnd = (*iterRawPtr) == (*reinterpret_cast<WrapperTypeListPtrRawPtr>(handle))->end();\n";
ss << " if (isEnd) delete iterRawPtr;\n";
ss << " return isEnd;\n";
ss << "}\n";
ss << "\n";
if (isMap) {
ss << dash;
ss << fixCType(keyType) << " " << fixType(templatedStructType) << "_wrapperIterKey(iterator_handle_t iterHandle)\n";
ss << "{\n";
ss << typedefsWithIterSS.str();
ss << " if (0 == iterHandle) return " << fixCType(keyType) << "();\n";
if (((keyType->toBasicType()) ||
(keyType->toEnumType())) &&
("string_t" != fixCType(keyType))) {
ss << " return (*(*reinterpret_cast<WrapperTypeListIteratorRawPtr>(iterHandle))).first;\n";
} else {
ss << " return " << fixType(keyType) << "_wrapperToHandle((*(*reinterpret_cast<WrapperTypeListIteratorRawPtr>(iterHandle))).first);\n";
}
ss << "}\n";
ss << "\n";
}
ss << dash;
ss << fixCType(listType) << " " << getApiCallingDefine(structType) << " " << fixType(templatedStructType) << "_wrapperIterValue(iterator_handle_t iterHandle)\n";
ss << "{\n";
ss << typedefsWithIterSS.str();
ss << " if (0 == iterHandle) return " << fixCType(listType) << "();\n";
if (((listType->toBasicType()) ||
(listType->toEnumType())) &&
("string_t" != fixCType(listType))) {
ss << " return (*(*reinterpret_cast<WrapperTypeListIteratorRawPtr>(iterHandle)))" << (isMap ? ".second" : "") << ";\n";
} else {
ss << " return " << fixType(listType) << "_wrapperToHandle((*(*reinterpret_cast<WrapperTypeListIteratorRawPtr>(iterHandle)))" << (isMap ? ".second" : "") << ");\n";
}
ss << "}\n";
ss << "\n";
}
{
auto &ss = helperFile.cppFunctionsSS_;
ss << dash2;
ss << " " << GenerateStructHeader::getWrapperTypeString(false, templatedStructType) << " " << fixType(templatedStructType) << "_wrapperFromHandle(" << fixCType(templatedStructType) << " handle) noexcept\n";
ss << " {\n";
ss << typedefsSS2.str();
ss << " if (0 == handle) return WrapperTypeListPtr();\n";
ss << " return (*reinterpret_cast<WrapperTypeListPtrRawPtr>(handle));\n";
ss << " }\n";
ss << "\n";
ss << dash2;
ss << " " << fixCType(templatedStructType) << " " << fixType(templatedStructType) << "_wrapperToHandle(" << GenerateStructHeader::getWrapperTypeString(false, templatedStructType) << " value) noexcept\n";
ss << " {\n";
ss << typedefsSS2.str();
ss << " if (!value) return 0;\n";
ss << " return reinterpret_cast<CType>(new WrapperTypeListPtr(value));\n";
ss << " }\n";
ss << "\n";
}
}
}
}
//---------------------------------------------------------------------
void GenerateStructC::prepareHelperSpecial(
HelperFile &helperFile,
const String &specialName
) noexcept
{
bool isPromise = "Promise" == specialName;
auto context = helperFile.global_->toContext()->findType("::zs::" + specialName);
if (!context) return;
auto contextStruct = context->toStruct();
if (!contextStruct) return;
auto dash = GenerateHelper::getDashedComment(String());
auto dash2 = GenerateHelper::getDashedComment(String(" "));
String zsSpecialType = "::zsLib::" + specialName;
{
{
auto &ss = helperFile.headerCFunctionsSS_;
ss << getApiExportDefine(contextStruct) << " void " << getApiCallingDefine(contextStruct) << " zs_" << specialName << "_wrapperDestroy(" << fixCType(contextStruct) << " handle);\n";
ss << getApiExportDefine(contextStruct) << " instance_id_t " << getApiCallingDefine(contextStruct) << " zs_" << specialName << "_wrapperInstanceId(" << fixCType(contextStruct) << " handle);\n";
if (isPromise) {
ss << getApiExportDefine(contextStruct) << " event_observer_t " << getApiCallingDefine(contextStruct) << " zs_" << specialName << "_wrapperObserveEvents(" << fixCType(contextStruct) << " handle);\n";
ss << getApiExportDefine(contextStruct) << " uint64_t " << getApiCallingDefine(contextStruct) << " zs_" << specialName << "_get_id(" << fixCType(contextStruct) << " handle);\n";
ss << getApiExportDefine(contextStruct) << " bool " << getApiCallingDefine(contextStruct) << " zs_" << specialName << "_isSettled(" << fixCType(contextStruct) << " handle);\n";
ss << getApiExportDefine(contextStruct) << " bool " << getApiCallingDefine(contextStruct) << " zs_" << specialName << "_isResolved(" << fixCType(contextStruct) << " handle);\n";
ss << getApiExportDefine(contextStruct) << " bool " << getApiCallingDefine(contextStruct) << " zs_" << specialName << "_isRejected(" << fixCType(contextStruct) << " handle);\n";
}
ss << "\n";
}
{
auto &ss = helperFile.headerCppFunctionsSS_;
if (isPromise) {
ss << " event_observer_t zs_" << specialName << "_wrapperObserveEvents(" << specialName << "Ptr value) noexcept;\n";
}
ss << " " << fixCType(contextStruct) << " zs_" << specialName << "_wrapperToHandle(" << specialName << "Ptr value) noexcept;\n";
ss << " " << specialName << "Ptr zs_" << specialName << "_wrapperFromHandle(" << fixCType(contextStruct) << " handle) noexcept;\n";
ss << "\n";
}
}
{
{
auto &ss = helperFile.cFunctionsSS_;
ss << dash;
ss << "void " << getApiCallingDefine(contextStruct) << " zs_" << specialName << "_wrapperDestroy(" << fixCType(contextStruct) << " handle)\n";
ss << "{\n";
ss << " typedef " << specialName << "Ptr WrapperType;\n";
ss << " typedef WrapperType * WrapperTypeRawPtr;\n";
ss << " if (0 == handle) return;\n";
ss << " delete reinterpret_cast<WrapperTypeRawPtr>(handle);\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "instance_id_t " << getApiCallingDefine(contextStruct) << " zs_" << specialName << "_wrapperInstanceId(" << fixCType(contextStruct) << " handle)\n";
ss << "{\n";
ss << " typedef " << specialName << "Ptr WrapperType;\n";
ss << " typedef WrapperType * WrapperTypeRawPtr;\n";
ss << " if (0 == handle) return 0;\n";
ss << " return reinterpret_cast<instance_id_t>((*reinterpret_cast<WrapperTypeRawPtr>(handle)).get());\n";
ss << "}\n";
ss << "\n";
if (isPromise) {
ss << dash;
ss << "event_observer_t " << getApiCallingDefine(contextStruct) << " zs_" << specialName << "_wrapperObserveEvents(" << fixCType(contextStruct) << " handle)\n";
ss << "{\n";
ss << " typedef " << specialName << "Ptr WrapperType;\n";
ss << " typedef WrapperType * WrapperTypeRawPtr;\n";
ss << " if (0 == handle) return 0;\n";
ss << " return wrapper::zs_" << specialName << "_wrapperObserveEvents((*reinterpret_cast<WrapperTypeRawPtr>(handle)));\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "uint64_t " << getApiCallingDefine(contextStruct) << " zs_" << specialName << "_get_id(" << fixCType(contextStruct) << " handle)\n";
ss << "{\n";
ss << " typedef " << specialName << "Ptr WrapperType;\n";
ss << " typedef WrapperType * WrapperTypeRawPtr;\n";
ss << " if (0 == handle) return 0;\n";
ss << " return SafeInt<uint64_t>((*reinterpret_cast<WrapperTypeRawPtr>(handle))->getID());\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "bool " << getApiCallingDefine(contextStruct) << " zs_" << specialName << "_isSettled(" << fixCType(contextStruct) << " handle)\n";
ss << "{\n";
ss << " typedef " << specialName << "Ptr WrapperType;\n";
ss << " typedef WrapperType * WrapperTypeRawPtr;\n";
ss << " if (0 == handle) return false;\n";
ss << " return (*reinterpret_cast<WrapperTypeRawPtr>(handle))->isSettled();\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "bool " << getApiCallingDefine(contextStruct) << " zs_" << specialName << "_isResolved(" << fixCType(contextStruct) << " handle)\n";
ss << "{\n";
ss << " typedef " << specialName << "Ptr WrapperType;\n";
ss << " typedef WrapperType * WrapperTypeRawPtr;\n";
ss << " if (0 == handle) return false;\n";
ss << " return (*reinterpret_cast<WrapperTypeRawPtr>(handle))->isResolved();\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "bool " << getApiCallingDefine(contextStruct) << " zs_" << specialName << "_isRejected(" << fixCType(contextStruct) << " handle)\n";
ss << "{\n";
ss << " typedef " << specialName << "Ptr WrapperType;\n";
ss << " typedef WrapperType * WrapperTypeRawPtr;\n";
ss << " if (0 == handle) return false;\n";
ss << " return (*reinterpret_cast<WrapperTypeRawPtr>(handle))->isRejected();\n";
ss << "}\n";
ss << "\n";
}
}
{
auto &ss = helperFile.cppFunctionsSS_;
if (isPromise) {
ss << dash2;
ss << dash2;
ss << dash2;
ss << dash2;
ss << " //\n";
ss << " // PromiseCallback\n";
ss << " //\n";
ss << "\n";
ss << " ZS_DECLARE_CLASS_PTR(PromiseCallback);\n";
ss << "\n";
ss << " class PromiseCallback : public IWrapperObserver,\n";
ss << " public IWrapperCallbackEvent,\n";
ss << " public ::zsLib::IPromiseSettledDelegate\n";
ss << " {\n";
ss << " public:\n";
ss << " PromiseCallback(PromisePtr promise) noexcept : promise_(promise) {}\n";
ss << "\n";
ss << " static IWrapperObserverPtr *create(PromisePtr promise) noexcept\n";
ss << " {\n";
ss << " if (!promise) return static_cast<IWrapperObserverPtr *>(NULL);\n";
ss << "\n";
ss << " auto pThis = make_shared<PromiseCallback>(promise);\n";
ss << " pThis->thisObserverRaw_ = new IWrapperObserverPtr(pThis);\n";
ss << " pThis->thisWeak_ = pThis;\n";
ss << " promise->then(pThis);\n";
ss << " promise->background();\n";
ss << " return pThis->thisObserverRaw_;\n";
ss << " }\n";
ss << "\n";
ss << " /* IWrapperObserver */\n";
ss << "\n";
ss << " virtual event_observer_t getObserver() noexcept\n";
ss << " {\n";
ss << " ::zsLib::AutoLock lock(lock_);\n";
ss << " if (NULL == thisObserverRaw_) return 0;\n";
ss << " return reinterpret_cast<event_observer_t>(thisObserverRaw_);\n";
ss << " }\n";
ss << "\n";
ss << " virtual void observerCancel() noexcept\n";
ss << " {\n";
ss << " IWrapperObserverPtr pThis;\n";
ss << " {\n";
ss << " ::zsLib::AutoLock lock(lock_);\n";
ss << " if (NULL == thisObserverRaw_) return;\n";
ss << " pThis = *thisObserverRaw_;\n";
ss << " delete thisObserverRaw_;\n";
ss << " thisObserverRaw_ = NULL;\n";
ss << " promise_.reset();\n";
ss << " }\n";
ss << " }\n";
ss << "\n";
ss << " /* IWrapperCallbackEvent */\n";
ss << "\n";
ss << " /* (duplicate) virtual event_observer_t getObserver() noexcept = 0; */;\n";
ss << " virtual const char *getNamespace() noexcept {return \"::zs\";}\n";
ss << " virtual const char *getClass() noexcept {return \"Promise\";}\n";
ss << " virtual const char *getMethod() noexcept {return \"onSettled\";}\n";
ss << " virtual generic_handle_t getSource() noexcept {return zs_Promise_wrapperToHandle(promise_);}\n";
ss << " virtual instance_id_t getInstanceId() noexcept {return reinterpret_cast<instance_id_t>(promise_.get());}\n";
ss << " virtual generic_handle_t getEventData(int argumentIndex) noexcept { ((void)argumentIndex); return 0;}\n";
ss << "\n";
ss << " virtual void onPromiseSettled(PromisePtr promise)\n";
ss << " {\n";
ss << " {\n";
ss << " ::zsLib::AutoLock lock(lock_);\n";
ss << " if (!promise_) return;\n";
ss << " }\n";
ss << "\n";
ss << " IWrapperCallbackEvent::fireEvent(thisWeak_.lock());\n";
ss << " }\n";
ss << "\n";
ss << " private:\n";
ss << " ::zsLib::Lock lock_;\n";
ss << " IWrapperObserverPtr *thisObserverRaw_ {};\n";
ss << " PromiseCallbackWeakPtr thisWeak_;\n";
ss << " PromisePtr promise_;\n";
ss << " };\n";
ss << "\n";
ss << dash2;
ss << " event_observer_t zs_" << specialName << "_wrapperObserveEvents(" << specialName << "Ptr value) noexcept\n";
ss << " {\n";
ss << " return reinterpret_cast<event_observer_t>(PromiseCallback::create(value));\n";
ss << " }\n";
ss << "\n";
}
ss << dash2;
ss << " " << fixCType(contextStruct) << " zs_" << specialName << "_wrapperToHandle(" << specialName << "Ptr value) noexcept\n";
ss << " {\n";
ss << " typedef " << fixCType(contextStruct) << " CType;\n";
ss << " typedef " << specialName << "Ptr WrapperType;\n";
ss << " if (!value) return 0;\n";
ss << " return reinterpret_cast<CType>(new WrapperType(value));\n";
ss << " }\n";
ss << "\n";
ss << dash2;
ss << " " << specialName << "Ptr zs_" << specialName << "_wrapperFromHandle(" << fixCType(contextStruct) << " handle) noexcept\n";
ss << " {\n";
ss << " typedef " << specialName << "Ptr WrapperType;\n";
ss << " typedef WrapperType * WrapperTypeRawPtr;\n";
ss << " if (0 == handle) return WrapperType();\n";
ss << " return (*reinterpret_cast<WrapperTypeRawPtr>(handle));\n";
ss << " }\n";
ss << "\n";
}
}
}
//---------------------------------------------------------------------
void GenerateStructC::preparePromiseWithValue(HelperFile &helperFile) noexcept
{
auto context = helperFile.global_->toContext()->findType("::zs::PromiseWith");
if (!context) return;
auto contextStruct = context->toStruct();
if (!contextStruct) return;
auto dash = GenerateHelper::getDashedComment(String());
auto dash2 = GenerateHelper::getDashedComment(String(" "));
{
auto &ss = helperFile.headerCFunctionsSS_;
ss << "\n";
}
for (auto iter = contextStruct->mTemplatedStructs.begin(); iter != contextStruct->mTemplatedStructs.end(); ++iter)
{
auto templatedStructType = (*iter).second;
if (!templatedStructType) continue;
TypePtr promiseType;
auto iterArg = templatedStructType->mTemplateArguments.begin();
if (iterArg != templatedStructType->mTemplateArguments.end()) {
promiseType = (*iterArg);
}
includeType(helperFile, promiseType);
{
auto &ss = helperFile.headerCFunctionsSS_;
ss << getApiExportDefine(contextStruct) << " " << fixCType(promiseType) << " " << getApiCallingDefine(contextStruct) << " zs_PromiseWith_resolveValue_" << fixType(promiseType) << "(zs_Promise_t handle);\n";
}
{
auto &ss = helperFile.cFunctionsSS_;
ss << dash;
ss << fixCType(promiseType) << " " << getApiCallingDefine(contextStruct) << " zs_PromiseWith_resolveValue_" << fixType(promiseType) << "(zs_Promise_t handle)\n";
ss << "{\n";
ss << " typedef ::zsLib::AnyHolder< " << GenerateStructHeader::getWrapperTypeString(false, promiseType) << " > AnyHolderWrapper;\n";
ss << " typedef PromisePtr * PromisePtrRawPtr;\n";
ss << " if (0 == handle) return 0;\n";
ss << " PromisePtr promise = (*reinterpret_cast<PromisePtrRawPtr>(handle));\n";
ss << " if (!promise) return 0;\n";
ss << " auto holder = promise->value<AnyHolderWrapper>();\n";
ss << " if (!holder) return 0;\n";
ss << " return " << fixType(promiseType) << "_wrapperToHandle(holder->value_);\n";
ss << "}\n";
ss << "\n";
}
}
}
//---------------------------------------------------------------------
void GenerateStructC::preparePromiseWithRejectionReason(HelperFile &helperFile) noexcept
{
auto context = helperFile.global_->toContext()->findType("::zs::PromiseRejectionReason");
if (!context) return;
auto contextStruct = context->toStruct();
if (!contextStruct) return;
auto dash = GenerateHelper::getDashedComment(String());
auto dash2 = GenerateHelper::getDashedComment(String(" "));
{
auto &ss = helperFile.headerCFunctionsSS_;
ss << "\n";
}
for (auto iter = contextStruct->mTemplatedStructs.begin(); iter != contextStruct->mTemplatedStructs.end(); ++iter)
{
auto templatedStructType = (*iter).second;
if (!templatedStructType) continue;
TypePtr promiseType;
auto iterArg = templatedStructType->mTemplateArguments.begin();
if (iterArg != templatedStructType->mTemplateArguments.end()) {
promiseType = (*iterArg);
}
includeType(helperFile, promiseType);
{
auto &ss = helperFile.headerCFunctionsSS_;
ss << getApiExportDefine(contextStruct) << " " << fixCType(promiseType) << " " << getApiCallingDefine(contextStruct) << " zs_PromiseWith_rejectReason_" << fixType(promiseType) << "(zs_Promise_t handle);\n";
}
{
auto &ss = helperFile.cFunctionsSS_;
ss << dash;
ss << fixCType(promiseType) << " " << getApiCallingDefine(contextStruct) << " zs_PromiseWith_rejectReason_" << fixType(promiseType) << "(zs_Promise_t handle)\n";
ss << "{\n";
ss << " typedef ::zsLib::AnyHolder< " << GenerateStructHeader::getWrapperTypeString(false, promiseType) << " > AnyHolderWrapper;\n";
ss << " typedef PromisePtr * PromisePtrRawPtr;\n";
ss << " if (0 == handle) return 0;\n";
ss << " PromisePtr promise = (*reinterpret_cast<PromisePtrRawPtr>(handle));\n";
ss << " if (!promise) return 0;\n";
ss << " auto holder = promise->value<AnyHolderWrapper>();\n";
ss << " if (!holder) return 0;\n";
ss << " return " << fixType(promiseType) << "_wrapperToHandle(holder->value_);\n";
ss << "}\n";
ss << "\n";
}
}
}
//---------------------------------------------------------------------
void GenerateStructC::finalizeHelperFile(HelperFile &helperFile) noexcept
{
{
auto &ss = helperFile.headerCppFunctionsSS_;
ss << helperFile.headerThrowersSS_.str();
ss << " };\n";
ss << "\n";
ss << "} /* namespace wrapper */\n";
ss << "#endif /* __cplusplus */\n";
}
{
auto &ss = helperFile.cppFunctionsSS_;
ss << "\n";
ss << "} /* namespace wrapper */\n";
}
{
std::stringstream ss;
appendStream(ss, helperFile.headerCIncludeSS_);
appendStream(ss, helperFile.headerCFunctionsSS_);
appendStream(ss, helperFile.headerCppIncludeSS_);
appendStream(ss, helperFile.headerCppFunctionsSS_);
writeBinary(helperFile.headerFileName_, UseHelper::convertToBuffer(ss.str()));
}
{
std::stringstream ss;
appendStream(ss, helperFile.cIncludeSS_);
appendStream(ss, helperFile.cFunctionsSS_);
appendStream(ss, helperFile.cppIncludeSS_);
appendStream(ss, helperFile.cppFunctionsSS_);
writeBinary(helperFile.cppFileName_, UseHelper::convertToBuffer(ss.str()));
}
}
//---------------------------------------------------------------------
void GenerateStructC::processNamespace(
HelperFile &helperFile,
NamespacePtr namespaceObj
) noexcept
{
if (!namespaceObj) return;
for (auto iter = namespaceObj->mNamespaces.begin(); iter != namespaceObj->mNamespaces.end(); ++iter) {
auto subNamespaceObj = (*iter).second;
processNamespace(helperFile, subNamespaceObj);
}
for (auto iter = namespaceObj->mStructs.begin(); iter != namespaceObj->mStructs.end(); ++iter) {
auto subStructObj = (*iter).second;
processStruct(helperFile, subStructObj);
}
}
//---------------------------------------------------------------------
void GenerateStructC::processStruct(
HelperFile &helperFile,
StructPtr structObj
) noexcept
{
if (!structObj) return;
if (GenerateHelper::isBuiltInType(structObj)) return;
if (structObj->mGenerics.size() > 0) return;
String cppFileName = "c_" + fixType(structObj) + ".cpp";
String headerFileName = "c_" + fixType(structObj) + ".h";
StructFile structFile;
structFile.cppFileName_ = UseHelper::fixRelativeFilePath(helperFile.cppFileName_, cppFileName);
structFile.headerFileName_ = UseHelper::fixRelativeFilePath(helperFile.headerFileName_, headerFileName);
String ifdefName = (structObj->hasModifier(Modifier_Special) ? "C_USE_GENERATED_" : "C_USE_CUSTOM_") + GenerateStructHeader::getStructInitName(structObj);
ifdefName.toUpper();
{
auto &ss = structFile.headerCIncludeSS_;
ss << "/* " ZS_EVENTING_GENERATED_BY " */\n\n";
ss << "\n";
ss << "#" << (structObj->hasModifier(Modifier_Special) ? "ifndef" : "ifdef") << " " << ifdefName << "\n";
ss << "#include <wrapper/override/c/" << headerFileName << ">\n";
ss << "#else /* " << ifdefName << " */\n";
ss << "#pragma once\n\n";
ss << "#include \"types.h\"\n";
ss << "\n";
}
{
auto &ss = structFile.headerCFunctionsSS_;
ss << getApiGuardDefine(helperFile.global_) << "\n";
ss << "\n";
}
{
auto &ss = structFile.cIncludeSS_;
ss << "/* " ZS_EVENTING_GENERATED_BY " */\n\n";
ss << "\n";
ss << "#" << (structObj->hasModifier(Modifier_Special) ? "ifndef" : "ifdef") << " " << ifdefName << "\n";
ss << "#include <wrapper/override/c/" << cppFileName << ">\n";
ss << "#else /* " << ifdefName << " */\n";
ss << "\n";
ss << "#include \"c_helpers.h\"\n";
ss << "#include <zsLib/types.h>\n";
ss << "#include <zsLib/eventing/types.h>\n";
ss << "#include <zsLib/SafeInt.h>\n";
ss << "\n";
}
{
auto &ss = structFile.cFunctionsSS_;
ss << "using namespace wrapper;\n\n";
}
{
auto &ss = structFile.headerCppIncludeSS_;
ss << "\n";
ss << getApiGuardDefine(helperFile.global_, true) << "\n";
ss << "\n";
ss << "#ifdef __cplusplus\n";
}
{
auto &ss = structFile.headerCppFunctionsSS_;
ss << "\n";
ss << "namespace wrapper\n";
ss << "{\n";
}
{
auto &ss = structFile.cppFunctionsSS_;
ss << "namespace wrapper\n";
ss << "{\n";
}
structFile.includeC("\"c_" + fixType(structObj) + ".h\"");
structFile.includeC("\"../" + fixType(structObj) + ".h\"");
processStruct(helperFile, structFile, structObj, structObj);
{
auto &ss = structFile.headerCppFunctionsSS_;
ss << "\n";
ss << "} /* namespace wrapper */\n";
ss << "#endif /* __cplusplus */\n";
ss << "\n";
ss << "#endif /*" << (structObj->hasModifier(Modifier_Special) ? " ifndef" : "") << " " << ifdefName << " */\n";
}
{
auto &ss = structFile.cppFunctionsSS_;
ss << "\n";
ss << "} /* namespace wrapper */\n";
ss << "\n";
ss << "#endif /*" << (structObj->hasModifier(Modifier_Special) ? " ifndef" : "") << " " << ifdefName << " */\n";
}
{
std::stringstream ss;
appendStream(ss, structFile.headerCIncludeSS_);
appendStream(ss, structFile.headerCFunctionsSS_);
appendStream(ss, structFile.headerCppIncludeSS_);
appendStream(ss, structFile.headerCppFunctionsSS_);
writeBinary(structFile.headerFileName_, UseHelper::convertToBuffer(ss.str()));
}
{
std::stringstream ss;
appendStream(ss, structFile.cIncludeSS_);
appendStream(ss, structFile.cFunctionsSS_);
appendStream(ss, structFile.cppIncludeSS_);
appendStream(ss, structFile.cppFunctionsSS_);
writeBinary(structFile.cppFileName_, UseHelper::convertToBuffer(ss.str()));
}
}
//---------------------------------------------------------------------
void GenerateStructC::processStruct(
HelperFile &helperFile,
StructFile &structFile,
StructPtr rootStructObj,
StructPtr structObj
) noexcept
{
if (!structObj) return;
if (rootStructObj == structObj) {
for (auto iter = structObj->mStructs.begin(); iter != structObj->mStructs.end(); ++iter) {
auto subStructObj = (*iter).second;
processStruct(helperFile, subStructObj);
}
}
for (auto iter = structObj->mIsARelationships.begin(); iter != structObj->mIsARelationships.end(); ++iter) {
auto relatedTypeObj = (*iter).second;
if (!relatedTypeObj) continue;
processStruct(helperFile, structFile, rootStructObj, relatedTypeObj->toStruct());
}
{
auto &ss = structFile.headerCFunctionsSS_;
ss << "\n";
ss << "/* " << fixType(structObj) << "*/\n";
ss << "\n";
}
processMethods(helperFile, structFile, rootStructObj, structObj);
processProperties(helperFile, structFile, rootStructObj, structObj);
if (rootStructObj == structObj) {
processEventHandlers(helperFile, structFile, structObj);
}
}
//---------------------------------------------------------------------
void GenerateStructC::processMethods(
HelperFile &helperFile,
StructFile &structFile,
StructPtr rootStructObj,
StructPtr structObj
) noexcept
{
auto dash = GenerateHelper::getDashedComment(String());
auto dash2 = GenerateHelper::getDashedComment(String(" "));
auto exportStr = (rootStructObj == structObj ? getApiExportDefine(helperFile.global_) : getApiExportCastedDefine(helperFile.global_));
bool foundConstructor = false;
std::stringstream headerCSS;
std::stringstream headerCppSS;
std::stringstream cSS;
std::stringstream cppSS;
bool disposable = structObj->hasModifier(Modifier_Struct_Disposable);
for (auto iter = structObj->mMethods.begin(); iter != structObj->mMethods.end(); ++iter) {
auto method = (*iter);
if (!method) continue;
if (method->hasModifier(Modifier_Method_EventHandler)) continue;
includeType(structFile, method->mResult);
bool isConstructor = method->hasModifier(Modifier_Method_Ctor);
bool isStatic = method->hasModifier(Modifier_Static);
bool hasThis = ((!isStatic) && (!isConstructor));
if (isConstructor) foundConstructor = true;
if (rootStructObj != structObj) {
if ((isStatic) || (isConstructor)) continue;
}
if (method->hasModifier(Modifier_Method_Delete)) continue;
String name = method->mName;
if (method->hasModifier(Modifier_AltName)) {name = method->getModifierValue(Modifier_AltName);}
String resultCTypeStr = (isConstructor ? fixCType(structObj) : fixCType(method->hasModifier(Modifier_Optional), method->mResult));
bool hasResult = resultCTypeStr != "void";
{
auto &ss = headerCSS;
ss << exportStr << " " << resultCTypeStr << " " << getApiCallingDefine(structObj) << " " << fixType(rootStructObj) << "_" << (isConstructor ? "wrapperCreate_" : "") << name << "(";
}
{
auto &ss = cSS;
ss << dash;
ss << resultCTypeStr << " " << getApiCallingDefine(structObj) << " " << fixType(rootStructObj) << "_" << (isConstructor ? "wrapperCreate_" : "") << name << "(";
}
std::stringstream argSS;
size_t totalArgs = method->mArguments.size();
if (hasThis) ++totalArgs;
if (method->mThrows.size() > 0) ++totalArgs;
if (totalArgs > 1) argSS << "\n ";
bool first = true;
if (method->mThrows.size() > 0) {
argSS << "exception_handle_t wrapperExceptionHandle";
first = false;
}
if (hasThis) {
if (!first) argSS << ",\n ";
argSS << fixCType(structObj) << " " << "wrapperThisHandle";
first = false;
}
for (auto iterArg = method->mArguments.begin(); iterArg != method->mArguments.end(); ++iterArg) {
auto argPropertyObj = (*iterArg);
includeType(structFile, argPropertyObj->mType);
if (!first) argSS << ",\n ";
first = false;
argSS << fixCType(argPropertyObj->mType) << " " << argPropertyObj->mName;
}
argSS << ")";
{
auto &ss = headerCSS;
ss << argSS.str() << ";\n";
}
{
auto &ss = cSS;
ss << argSS.str() << "\n";
ss << "{\n";
String indentStr = " ";
if (method->mThrows.size() > 0) {
indentStr += " ";
if (hasResult) {
ss << " " << resultCTypeStr << " wrapperResult {};\n";
}
ss << " try {\n";
}
ss << indentStr;
if (isConstructor) {
ss << "auto wrapperThis = wrapper" << structObj->getPathName() << "::wrapper_create();\n";
ss << indentStr << "wrapperThis->wrapper_init_" << GenerateStructHeader::getStructInitName(structObj) << "(";
} else {
if (hasThis) {
ss << "auto wrapperThis = " << getFromHandleMethod(false, structObj) << "(wrapperThisHandle);\n";
ss << indentStr << "if (!wrapperThis) return";
if ("void" != resultCTypeStr) {
ss << " " << resultCTypeStr << "()";
}
ss << ";\n";
ss << indentStr;
}
if (hasResult) {
ss << (method->mThrows.size() > 0 ? "wrapperResult = " : "return ") << getToHandleMethod(method->hasModifier(Modifier_Optional), method->mResult) << "(";
}
if (hasThis) {
ss << "wrapperThis->" << method->getMappingName() << "(";
} else {
ss << "wrapper" << structObj->getPathName() << "::" << method->getMappingName() << "(";
}
}
first = true;
for (auto iterNamedArgs = method->mArguments.begin(); iterNamedArgs != method->mArguments.end(); ++iterNamedArgs) {
auto propertyObj = (*iterNamedArgs);
if (!first) ss << ", ";
first = false;
ss << getFromHandleMethod(propertyObj->hasModifier(Modifier_Optional), propertyObj->mType) << "(" << propertyObj->getMappingName() << ")";
}
if (isConstructor) {
ss << ");\n";
ss << indentStr << "return " << getToHandleMethod(method->hasModifier(Modifier_Optional), structObj) << "(wrapperThis);\n";
} else {
if (hasResult) {
ss << ")";
}
ss << ");\n";
}
if (method->mThrows.size() > 0) {
for (auto iterThrow = method->mThrows.begin(); iterThrow != method->mThrows.end(); ++iterThrow) {
auto throwType = (*iterThrow);
includeType(structFile, throwType);
ss << " } catch (const " << GenerateStructHeader::getWrapperTypeString(false, throwType) << " &e) {\n";
if (GenerateHelper::isDefaultExceptionType(throwType)) {
ss << " wrapper::exception_set_Exception(wrapperExceptionHandle, make_shared<::zsLib::" << ("Exception" == throwType->getMappingName() ? "" : "Exceptions::") << throwType->getMappingName() << ">(e));\n";
} else {
ss << " wrapper::Throwers::singleton().customThrow_set_Exception(wrapperExceptionHandle, e);\n";
helperFile.specialThrow(throwType);
}
}
ss << " }\n";
if (hasResult) {
ss << " return wrapperResult;\n";
}
}
ss << "}\n";
ss << "\n";
}
}
bool onlyStatic = GenerateHelper::hasOnlyStaticMethods(structObj) || structObj->hasModifier(Modifier_Static);
if (rootStructObj == structObj) {
if (!onlyStatic) {
{
auto found = helperFile.derives_.find(structObj->getPathName());
if (found != helperFile.derives_.end()) {
auto &structSet = (*found).second;
bool foundRelated = false;
for (auto iterSet = structSet.begin(); iterSet != structSet.end(); ++iterSet) {
auto relatedStruct = (*iterSet);
if (!relatedStruct) continue;
if (relatedStruct == structObj) continue;
foundRelated = true;
includeType(structFile, relatedStruct);
structFile.includeC("\"../" + fixType(relatedStruct) + ".h\"");
{
auto &ss = structFile.headerCFunctionsSS_;
ss << exportStr << " " << fixCType(relatedStruct) << " " << getApiCallingDefine(structObj) << " " << fixType(rootStructObj) << "_wrapperCastAs_" << fixType(relatedStruct) << "(" << fixCType(structObj) << " handle);\n";
}
{
auto &ss = structFile.cFunctionsSS_;
ss << dash;
ss << fixCType(relatedStruct) << " " << getApiCallingDefine(structObj) << " " << fixType(rootStructObj) << "_wrapperCastAs_" << fixType(relatedStruct) << "(" << fixCType(structObj) << " handle)\n";
ss << "{\n";
ss << " typedef wrapper" << relatedStruct->getPathName() << " RelatedWrapperType;\n";
ss << " typedef " << GenerateStructHeader::getWrapperTypeString(false, structObj) << " WrapperTypePtr;\n";
ss << " typedef WrapperTypePtr * WrapperTypePtrRawPtr;\n";
ss << " if (0 == handle) return 0;\n";
ss << " auto originalType = *reinterpret_cast<WrapperTypePtrRawPtr>(handle);\n";
ss << " auto castType = std::dynamic_pointer_cast<RelatedWrapperType>(originalType);\n";
ss << " if (!castType) return 0;\n";
ss << " return " << getToHandleMethod(false, relatedStruct) << "(castType);\n";
ss << "}\n";
ss << "\n";
}
}
if (foundRelated) {
auto &ss = structFile.headerCFunctionsSS_;
ss << "\n";
}
}
}
if (GenerateHelper::needsDefaultConstructor(structObj)) {
{
auto &ss = structFile.headerCFunctionsSS_;
ss << exportStr << " " << fixCType(structObj) << " " << getApiCallingDefine(structObj) << " " << fixType(rootStructObj) << "_wrapperCreate_" << structObj->getMappingName() << "();\n";
}
{
auto &ss = structFile.cFunctionsSS_;
ss << dash;
ss << fixCType(structObj) << " " << getApiCallingDefine(structObj) << " " << fixType(rootStructObj) << "_wrapperCreate_" << structObj->getMappingName() << "()\n";
ss << "{\n";
ss << " typedef " << fixCType(structObj) << " CType;\n";
ss << " typedef " << GenerateStructHeader::getWrapperTypeString(false, structObj) << " WrapperTypePtr;\n";
ss << " auto result = wrapper" << structObj->getPathName() << "::wrapper_create();\n";
ss << " result->wrapper_init_" << GenerateStructHeader::getStructInitName(structObj) << "();\n";
ss << " return reinterpret_cast<CType>(new WrapperTypePtr(result));\n";
ss << "}\n";
ss << "\n";
}
}
{
auto &ss = structFile.headerCFunctionsSS_;
ss << exportStr << " " << fixCType(structObj) << " " << getApiCallingDefine(structObj) << " " << fixType(rootStructObj) << "_wrapperClone(" << fixCType(structObj) << " handle);\n";
ss << exportStr << " void " << getApiCallingDefine(structObj) << " " << fixType(rootStructObj) << "_wrapperDestroy(" << fixCType(structObj) << " handle);\n";
ss << exportStr << " instance_id_t " << getApiCallingDefine(structObj) << " " << fixType(rootStructObj) << "_wrapperInstanceId(" << fixCType(structObj) << " handle);\n";
if (disposable) {
ss << exportStr << " void " << getApiCallingDefine(structObj) << " " << fixType(rootStructObj) << "_wrapperDispose(" << fixCType(structObj) << " handle);\n";
}
}
{
auto &ss = structFile.cFunctionsSS_;
ss << dash;
ss << fixCType(structObj) << " " << getApiCallingDefine(structObj) << " " << fixType(rootStructObj) << "_wrapperClone(" << fixCType(structObj) << " handle)\n";
ss << "{\n";
ss << " typedef " << GenerateStructHeader::getWrapperTypeString(false, structObj) << " WrapperTypePtr;\n";
ss << " typedef WrapperTypePtr * WrapperTypePtrRawPtr;\n";
ss << " if (0 == handle) return 0;\n";
ss << " return reinterpret_cast<" << fixCType(structObj) << ">(new WrapperTypePtr(*reinterpret_cast<WrapperTypePtrRawPtr>(handle)));\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "void " << getApiCallingDefine(structObj) << " " << fixType(rootStructObj) << "_wrapperDestroy(" << fixCType(structObj) << " handle)\n";
ss << "{\n";
ss << " typedef " << GenerateStructHeader::getWrapperTypeString(false, structObj) << " WrapperTypePtr;\n";
ss << " typedef WrapperTypePtr * WrapperTypePtrRawPtr;\n";
ss << " if (0 == handle) return;\n";
ss << " delete reinterpret_cast<WrapperTypePtrRawPtr>(handle);\n";
ss << "}\n";
ss << "\n";
ss << dash;
ss << "instance_id_t " << getApiCallingDefine(structObj) << " " << fixType(rootStructObj) << "_wrapperInstanceId(" << fixCType(structObj) << " handle)\n";
ss << "{\n";
ss << " typedef " << GenerateStructHeader::getWrapperTypeString(false, structObj) << " WrapperTypePtr;\n";
ss << " typedef WrapperTypePtr * WrapperTypePtrRawPtr;\n";
ss << " if (0 == handle) return 0;\n";
ss << " return reinterpret_cast<instance_id_t>((*reinterpret_cast<WrapperTypePtrRawPtr>(handle)).get());\n";
ss << "}\n";
ss << "\n";
if (disposable) {
ss << dash;
ss << "void " << getApiCallingDefine(structObj) << " " << fixType(rootStructObj) << "_wrapperDispose(" << fixCType(structObj) << " handle)\n";
ss << "{\n";
ss << " typedef " << GenerateStructHeader::getWrapperTypeString(false, structObj) << " WrapperTypePtr;\n";
ss << " typedef WrapperTypePtr * WrapperTypePtrRawPtr;\n";
ss << " if (0 == handle) return;\n";
ss << " (*reinterpret_cast<WrapperTypePtrRawPtr>(handle))->wrapper_dispose();\n";
ss << "}\n";
ss << "\n";
}
}
}
{
auto &ss = structFile.headerCppFunctionsSS_;
ss << " " << fixCType(structObj) << " " << fixType(rootStructObj) << "_wrapperToHandle(" << GenerateStructHeader::getWrapperTypeString(false, structObj) << " value);\n";
ss << " " << GenerateStructHeader::getWrapperTypeString(false, structObj) << " " << fixType(rootStructObj) << "_wrapperFromHandle(" << fixCType(structObj) << " handle);\n";
}
{
auto &ss = structFile.cppFunctionsSS_;
ss << dash2;
ss << " " << fixCType(structObj) << " " << fixType(rootStructObj) << "_wrapperToHandle(" << GenerateStructHeader::getWrapperTypeString(false, structObj) << " value)\n";
ss << " {\n";
ss << " typedef " << fixCType(structObj) << " CType;\n";
ss << " typedef " << GenerateStructHeader::getWrapperTypeString(false, structObj) << " WrapperTypePtr;\n";
ss << " typedef WrapperTypePtr * WrapperTypePtrRawPtr;\n";
ss << " if (!value) return 0;\n";
ss << " return reinterpret_cast<CType>(new WrapperTypePtr(value));\n";
ss << " }\n";
ss << "\n";
ss << dash2;
ss << " " << GenerateStructHeader::getWrapperTypeString(false, structObj) << " " << fixType(rootStructObj) << "_wrapperFromHandle(" << fixCType(structObj) << " handle)\n";
ss << " {\n";
ss << " typedef " << GenerateStructHeader::getWrapperTypeString(false, structObj) << " WrapperTypePtr;\n";
ss << " typedef WrapperTypePtr * WrapperTypePtrRawPtr;\n";
ss << " if (0 == handle) return WrapperTypePtr();\n";
ss << " return (*reinterpret_cast<WrapperTypePtrRawPtr>(handle));\n";
ss << " }\n";
ss << "\n";
}
}
structFile.headerCFunctionsSS_ << headerCSS.str();
structFile.headerCppFunctionsSS_ << headerCppSS.str();
structFile.cFunctionsSS_ << cSS.str();
structFile.cppFunctionsSS_ << cppSS.str();
}
//---------------------------------------------------------------------
void GenerateStructC::processProperties(
HelperFile &helperFile,
StructFile &structFile,
StructPtr rootStructObj,
StructPtr structObj
) noexcept
{
bool onlyStatic = GenerateHelper::hasOnlyStaticMethods(structObj) || structObj->hasModifier(Modifier_Static);
if (onlyStatic) {
if (rootStructObj != structObj) return;
}
auto exportStr = (rootStructObj == structObj ? getApiExportDefine(helperFile.global_) : getApiExportCastedDefine(helperFile.global_));
bool isDictionary = structObj->hasModifier(Modifier_Struct_Dictionary);
auto dash = GenerateHelper::getDashedComment(String());
for (auto iter = structObj->mProperties.begin(); iter != structObj->mProperties.end(); ++iter) {
auto propertyObj = (*iter);
if (!propertyObj) continue;
bool isStatic = propertyObj->hasModifier(Modifier_Static);
bool hasGetter = propertyObj->hasModifier(Modifier_Property_Getter);
bool hasSetter = propertyObj->hasModifier(Modifier_Property_Setter);
if (!isDictionary) {
if ((!hasGetter) && (!hasSetter)) {
hasGetter = hasSetter = true;
}
}
includeType(structFile, propertyObj->mType);
bool hasGet = true;
bool hasSet = true;
if ((hasGetter) && (!hasSetter)) hasSet = false;
if ((hasSetter) && (!hasGetter)) hasGet = false;
if (isStatic) {
if (hasGet) hasGetter = true;
if (hasSet) hasSetter = true;
}
{
auto &ss = structFile.headerCFunctionsSS_;
if (hasGet) {
ss << exportStr << " " << fixCType(propertyObj->hasModifier(Modifier_Optional), propertyObj->mType) << " " << getApiCallingDefine(structObj) << " " << fixType(rootStructObj) << "_get_" << propertyObj->getMappingName() << "(" << (isStatic ? String("") : String(fixCType(structObj) + " wrapperThisHandle")) << ");\n";
}
if (hasSet) {
ss << exportStr << " void " << getApiCallingDefine(structObj) << " " << fixType(rootStructObj) << "_set_" << propertyObj->getMappingName() << "(" << (isStatic ? String("") : String(fixCType(structObj) + " wrapperThisHandle, ")) << fixCType(propertyObj->hasModifier(Modifier_Optional), propertyObj->mType) << " value);\n";
}
}
{
auto &ss = structFile.cFunctionsSS_;
if (hasGet) {
ss << dash;
ss << fixCType(propertyObj->hasModifier(Modifier_Optional), propertyObj->mType) << " " << getApiCallingDefine(structObj) << " " << fixType(rootStructObj) << "_get_" << propertyObj->getMappingName() << "(" << (isStatic ? String("") : String(fixCType(structObj) + " wrapperThisHandle")) << ")\n";
ss << "{\n";
if (!isStatic) {
ss << " auto wrapperThis = " << getFromHandleMethod(false, structObj) << "(wrapperThisHandle);\n";
ss << " return " << getToHandleMethod(propertyObj->hasModifier(Modifier_Optional), propertyObj->mType) << "(wrapperThis->" << (hasGetter ? "get_" : "") << propertyObj->getMappingName() << (hasGetter ? "()" : "") << ");\n";
} else {
ss << " return " << getToHandleMethod(propertyObj->hasModifier(Modifier_Optional), propertyObj->mType) << "(wrapper" << structObj->getPathName() << "::get_" << propertyObj->getMappingName() << "());\n";
}
ss << "}\n";
ss << "\n";
}
if (hasSet) {
ss << dash;
ss << "void " << getApiCallingDefine(structObj) << " " << fixType(rootStructObj) << "_set_" << propertyObj->getMappingName() << "(" << (isStatic ? String("") : String(fixCType(structObj) + " wrapperThisHandle, ")) << fixCType(propertyObj->hasModifier(Modifier_Optional), propertyObj->mType) << " value)\n";
ss << "{\n";
if (!isStatic) {
ss << " auto wrapperThis = " << getFromHandleMethod(false, structObj) << "(wrapperThisHandle);\n";
ss << " wrapperThis->" << (hasSetter ? "set_" : "") << propertyObj->getMappingName() << (hasSetter ? "(" : " = ") << getFromHandleMethod(propertyObj->hasModifier(Modifier_Optional), propertyObj->mType) << "(value)" << (hasSetter ? ")" : "") << ";\n";
} else {
ss << " wrapper" << structObj->getPathName() << "::set_" << propertyObj->getMappingName() << "(" << getFromHandleMethod(propertyObj->hasModifier(Modifier_Optional), propertyObj->mType) << "(value));\n";
}
ss << "}\n";
ss << "\n";
}
}
}
}
//---------------------------------------------------------------------
void GenerateStructC::processEventHandlers(
HelperFile &helperFile,
StructFile &structFile,
StructPtr structObj
) noexcept
{
if (!structObj) return;
auto dash = GenerateHelper::getDashedComment(String());
auto dash2 = GenerateHelper::getDashedComment(String(" "));
bool foundEvent = false;
for (auto iter = structObj->mMethods.begin(); iter != structObj->mMethods.end(); ++iter)
{
auto method = (*iter);
if (!method->hasModifier(Modifier_Method_EventHandler)) continue;
if (!foundEvent) {
foundEvent = true;
processEventHandlersStart(helperFile, structFile, structObj);
}
{
auto &ss = structFile.headerCppFunctionsSS_;
if (method->mArguments.size() > 0) {
ss << "\n";
ss << " struct WrapperEvent_" << method->getMappingName() << " : public WrapperEvent\n";
ss << " {\n";
ss << " virtual const char *getMethod() {return \"" << method->getMappingName() << "\";}\n";
ss << " virtual generic_handle_t getEventData(int argumentIndex);\n";
ss << "\n";
size_t index = 1;
for (auto iterArgs = method->mArguments.begin(); iterArgs != method->mArguments.end(); ++iterArgs, ++index) {
auto propertyObj = (*iterArgs);
ss << " " << GenerateStructHeader::getWrapperTypeString(propertyObj->hasModifier(Modifier_Optional), propertyObj->mType) << " param" << index << "_;\n";
}
ss << " };\n";
ss << "\n";
}
ss << " virtual void " << method->getMappingName() << "(";
bool first = true;
for (auto iterArgs = method->mArguments.begin(); iterArgs != method->mArguments.end(); ++iterArgs) {
auto propertyObj = (*iterArgs);
if (!first) {ss << ", ";}
first = false;
ss << GenerateStructHeader::getWrapperTypeString(propertyObj->hasModifier(Modifier_Optional), propertyObj->mType) << " " << propertyObj->getMappingName();
}
ss << ");\n";
}
{
auto &ss = structFile.cppFunctionsSS_;
if (method->mArguments.size() > 0) {
ss << dash2;
ss << " generic_handle_t " << fixType(structObj) << "_WrapperObserver::WrapperEvent_" << method->getMappingName() << "::getEventData(int argumentIndex)\n";
ss << " {\n";
size_t index = 1;
for (auto iterArgs = method->mArguments.begin(); iterArgs != method->mArguments.end(); ++iterArgs, ++index) {
auto propertyObj = (*iterArgs);
includeType(structFile, propertyObj->mType);
ss << " if (" << (index-1) << " == argumentIndex) ";
bool isSimple = false;
{
auto basicType = propertyObj->mType->toBasicType();
if (basicType) {
String basicTypeStr = fixCType(basicType->mBaseType);
if (("binary_t" != basicTypeStr) && ("string_t" != basicTypeStr)) isSimple = true;
}
}
ss << "return " << getToHandleMethod(propertyObj->hasModifier(Modifier_Optional) || isSimple, propertyObj->mType) << "(param" << index << "_);\n";
}
ss << " return 0;\n";
ss << " }\n";
ss << "\n";
}
ss << dash2;
ss << " void " << fixType(structObj) << "_WrapperObserver::" << method->getMappingName() << "(";
bool first = true;
for (auto iterArgs = method->mArguments.begin(); iterArgs != method->mArguments.end(); ++iterArgs) {
auto propertyObj = (*iterArgs);
if (!first) { ss << ", "; }
first = false;
ss << GenerateStructHeader::getWrapperTypeString(propertyObj->hasModifier(Modifier_Optional), propertyObj->mType) << " " << propertyObj->getMappingName();
}
ss << ")\n";
ss << " {\n";
if (method->mArguments.size() > 0) {
ss << " auto wrapperEvent = make_shared<" << fixType(structObj) << "_WrapperObserver::WrapperEvent_" << method->getMappingName() << ">();\n";
ss << " wrapperEvent->observer_ = thisWeak_.lock();\n";
size_t index = 1;
for (auto iterArgs = method->mArguments.begin(); iterArgs != method->mArguments.end(); ++iterArgs, ++index) {
auto propertyObj = (*iterArgs);
first = false;
ss << " wrapperEvent->param" << index << "_ = " << propertyObj->mName << ";\n";
}
ss << " wrapper::IWrapperCallbackEvent::fireEvent(wrapperEvent);\n";
} else {
ss << " auto wrapperEvent = make_shared<" << fixType(structObj) << "_WrapperObserver::WrapperEvent>();\n";
ss << " wrapperEvent->observer_ = thisWeak_.lock();\n";
ss << " wrapperEvent->method_ = \"" << method->getMappingName() << "\";\n";
ss << " wrapper::IWrapperCallbackEvent::fireEvent(wrapperEvent);\n";
}
ss << " }\n";
ss << "\n";
}
}
if (!foundEvent) return;
processEventHandlersEnd(helperFile, structFile, structObj);
}
//---------------------------------------------------------------------
void GenerateStructC::processEventHandlersStart(
ZS_MAYBE_USED() HelperFile &helperFile,
StructFile &structFile,
StructPtr structObj
) noexcept
{
ZS_MAYBE_USED(helperFile);
structFile.headerIncludeCpp("\"../" + fixType(structObj) + ".h\"");
auto dash = GenerateHelper::getDashedComment(String());
auto dash2 = GenerateHelper::getDashedComment(String(" "));
{
auto &ss = structFile.headerCFunctionsSS_;
ss << "\n";
ss << getApiExportDefine(structObj) << " event_observer_t " << getApiCallingDefine(structObj) << " " << fixType(structObj) << "_wrapperObserveEvents(" << fixCType(structObj) << " handle);\n";
}
{
auto &ss = structFile.cFunctionsSS_;
ss << dash;
ss << "event_observer_t " << getApiCallingDefine(structObj) << " " << fixType(structObj) << "_wrapperObserveEvents(" << fixCType(structObj) << " handle)\n";
ss << "{\n";
ss << " typedef wrapper" << structObj->getPathName() << " WrapperType;\n";
ss << " typedef shared_ptr<WrapperType> WrapperTypePtr;\n";
ss << " typedef WrapperTypePtr * WrapperTypePtrRawPtr;\n";
ss << " if (0 == handle) return 0;\n";
ss << " auto pWrapper = (*reinterpret_cast<WrapperTypePtrRawPtr>(handle));";
ss << " if (!pWrapper) return 0;\n";
ss << " return reinterpret_cast<event_observer_t>(" << fixType(structObj) << "_WrapperObserver::wrapperObserverCreate(pWrapper)" << ");\n";
ss << "}\n";
ss << "\n";
}
{
auto &ss = structFile.headerCppFunctionsSS_;
ss << "\n";
ss << " ZS_DECLARE_STRUCT_PTR(" << fixType(structObj) << "_WrapperObserver);\n";
ss << "\n";
ss << " struct " << fixType(structObj) << "_WrapperObserver :\n";
ss << " public wrapper" << structObj->getPathName() << "::WrapperObserver, \n";
ss << " public IWrapperObserver\n";
ss << " {\n";
ss << " static IWrapperObserverPtr *wrapperObserverCreate(" << GenerateStructHeader::getWrapperTypeString(false, structObj) << " value);\n";
ss << "\n";
ss << " /* IWrapperObserver */\n";
ss << "\n";
ss << " virtual event_observer_t getObserver();\n";
ss << " virtual void observerCancel();\n";
ss << "\n";
ss << " /* WrapperEvent */\n";
ss << "\n";
ss << " struct WrapperEvent : public IWrapperCallbackEvent\n";
ss << " {\n";
ss << " virtual event_observer_t getObserver() {return observer_->getObserver();}\n";
ss << " virtual const char *getNamespace() {return \"" << structObj->getPath() << "\";}\n";
ss << " virtual const char *getClass() {return \"" << structObj->getMappingName() << "\";}\n";
ss << " virtual const char *getMethod() {return method_.c_str();}\n";
ss << " virtual generic_handle_t getSource() {return reinterpret_cast<generic_handle_t>(new " << GenerateStructHeader::getWrapperTypeString(false, structObj) << "(observer_->source_.lock()));}\n";
ss << " virtual instance_id_t getInstanceId() {auto source = observer_->source_.lock(); if (!source) return static_cast<instance_id_t>(0); return reinterpret_cast<instance_id_t>(source.get());}\n";
ss << " virtual generic_handle_t getEventData(int argumentIndex) { ((void)argumentIndex); return 0;}\n";
ss << "\n";
ss << " " << fixType(structObj) << "_WrapperObserverPtr observer_;\n";
ss << " ::zsLib::String method_;\n";
ss << " };\n";
ss << "\n";
ss << " /* wrapper" << structObj->getPathName() << "::WrapperObserver */\n";
ss << "\n";
}
{
auto &ss = structFile.cppFunctionsSS_;
ss << dash2;
ss << " IWrapperObserverPtr * " << fixType(structObj) << "_WrapperObserver::wrapperObserverCreate(" << GenerateStructHeader::getWrapperTypeString(false, structObj) << " value)\n";
ss << " {\n";
ss << " auto pThis = make_shared<" << fixType(structObj) << "_WrapperObserver>();\n";
ss << " pThis->thisObserverRaw_ = new IWrapperObserverPtr(pThis);\n";
ss << " pThis->thisWeak_ = pThis;\n";
ss << " pThis->source_ = value;\n";
ss << " if (value) value->wrapper_installObserver(pThis);\n";
ss << " return pThis->thisObserverRaw_;\n";
ss << " }\n";
ss << "\n";
ss << dash2;
ss << " event_observer_t " << fixType(structObj) << "_WrapperObserver::getObserver()\n";
ss << " {\n";
ss << " ::zsLib::AutoLock lock(lock_);\n";
ss << " if (NULL == thisObserverRaw_) return 0;\n";
ss << " return reinterpret_cast<event_observer_t>(thisObserverRaw_);\n";
ss << " }\n";
ss << "\n";
ss << dash2;
ss << " void " << fixType(structObj) << "_WrapperObserver::observerCancel()\n";
ss << " {\n";
ss << " IWrapperObserverPtr pObserverThis;\n";
ss << " {\n";
ss << " ::zsLib::AutoLock lock(lock_);\n";
ss << " if (NULL == thisObserverRaw_) return;\n";
ss << " auto pThis = thisWeak_.lock();\n";
ss << " pObserverThis = *thisObserverRaw_;\n";
ss << " auto value = source_.lock();\n";
ss << " if (value) value->wrapper_uninstallObserver(pThis);\n";
ss << " auto temp = thisObserverRaw_;\n";
ss << " thisObserverRaw_ = NULL;\n";
ss << " delete temp;\n";
ss << " }\n";
ss << " }\n";
ss << "\n";
}
}
//---------------------------------------------------------------------
void GenerateStructC::processEventHandlersEnd(
ZS_MAYBE_USED() HelperFile &helperFile,
StructFile &structFile,
StructPtr structObj
) noexcept
{
ZS_MAYBE_USED(helperFile);
{
auto &ss = structFile.headerCppFunctionsSS_;
ss << "\n";
ss << " /* data */\n";
ss << "\n";
ss << " ::zsLib::Lock lock_;\n";
ss << " " << fixType(structObj) << "_WrapperObserverWeakPtr thisWeak_;\n";
ss << " IWrapperObserverPtr *thisObserverRaw_;\n";
ss << " wrapper" << structObj->getPathName() << "WeakPtr source_;\n";
ss << " };\n";
ss << " \n";
}
}
//---------------------------------------------------------------------
SecureByteBlockPtr GenerateStructC::generateTypesHeader(ProjectPtr project) noexcept(false)
{
if (!project) return SecureByteBlockPtr();
if (!project->mGlobal) return SecureByteBlockPtr();
std::stringstream ss;
ss << "/* " ZS_EVENTING_GENERATED_BY " */\n\n";
ss << "#pragma once\n\n";
ss << "\n";
ss << "#include <stdint.h>\n\n";
ss << "\n";
ss << "#ifdef __cplusplus\n";
ss << "#define " << getApiGuardDefine(project) << " extern \"C\" {\n";
ss << "#define " << getApiGuardDefine(project, true) << " }\n";
ss << "#else /* __cplusplus */\n";
ss << "#include <stdbool.h>\n";
ss << "#define " << getApiGuardDefine(project) << "\n";
ss << "#define " << getApiGuardDefine(project, true) << "\n";
ss << "#endif /* __cplusplus */\n";
ss << "\n";
ss << "#ifndef " << getApiExportDefine(project) << "\n";
ss << "#ifdef " << getApiImplementationDefine(project) << "\n";
ss << "#ifdef _WIN32\n";
ss << "#define " << getApiExportDefine(project) << " __declspec(dllexport)\n";
ss << "#else /* _WIN32 */\n";
ss << "#define " << getApiExportDefine(project) << " __attribute__((visibility(\"default\")))\n";
ss << "#endif /* _WIN32 */\n";
ss << "#else /* "<< getApiImplementationDefine(project) << " */\n";
ss << "#ifdef _WIN32\n";
ss << "#define " << getApiExportDefine(project) << " __declspec(dllimport)\n";
ss << "#else /* _WIN32 */\n";
ss << "#define " << getApiExportDefine(project) << " __attribute__((visibility(\"default\")))\n";
ss << "#endif /* _WIN32 */\n";
ss << "#endif /* " << getApiImplementationDefine(project) << " */\n";
ss << "#endif /* ndef " << getApiExportDefine(project) << " */\n";
ss << "\n";
ss << "#ifndef " << getApiExportCastedDefine(project) << "\n";
ss << "/* By defining " << getApiCastRequiredDefine(project) << " the wrapper will not export\n";
ss << " any base class methods and instead will expect the caller to cast the C object handle\n";
ss << " type to the base C object type to access base object methods and properties. */\n";
ss << "#ifdef " << getApiCastRequiredDefine(project) << "\n";
ss << "#define " << getApiExportCastedDefine(project) << "\n";
ss << "#else /* " << getApiCastRequiredDefine(project) << " */\n";
ss << "#define " << getApiExportCastedDefine(project) << " " << getApiExportDefine(project) << "\n";
ss << "#endif /* " << getApiCastRequiredDefine(project) << " */\n";
ss << "#endif /* ndef " << getApiExportCastedDefine(project) << " */\n";
ss << "\n";
ss << "#ifndef " << getApiCallingDefine(project) << "\n";
ss << "#ifdef _WIN32\n";
ss << "#define " << getApiCallingDefine(project) << " __cdecl\n";
ss << "#else /* _WIN32 */\n";
ss << "#define " << getApiCallingDefine(project) << " __attribute__((cdecl))\n";
ss << "#endif /* _WIN32 */\n";
ss << "#endif /* ndef " << getApiCallingDefine(project) << " */\n";
ss << "\n";
ss << getApiGuardDefine(project) << "\n";
ss << "\n";
ss << "typedef bool bool_t;\n";
ss << "typedef signed char schar_t;\n";
ss << "typedef unsigned char uchar_t;\n";
ss << "typedef signed short sshort_t;\n";
ss << "typedef unsigned short ushort_t;\n";
ss << "typedef signed int sint_t;\n";
ss << "typedef unsigned int uint_t;\n";
ss << "typedef signed long slong_t;\n";
ss << "typedef unsigned long ulong_t;\n";
ss << "typedef signed long long sllong_t;\n";
ss << "typedef unsigned long long ullong_t;\n";
ss << "typedef float float_t;\n";
ss << "typedef double double_t;\n";
ss << "typedef float float32_t;\n";
ss << "typedef double float64_t;\n";
ss << "typedef long double ldouble_t;\n";
ss << "typedef uintptr_t raw_pointer_t;\n";
ss << "typedef uintptr_t binary_t;\n";
ss << "typedef uint64_t binary_size_t;\n";
ss << "typedef uintptr_t string_t;\n";
ss << "typedef uintptr_t const_char_star_t;\n";
ss << "\n";
ss << "typedef uintptr_t box_bool_t;\n";
ss << "typedef uintptr_t box_schar_t;\n";
ss << "typedef uintptr_t box_uchar_t;\n";
ss << "typedef uintptr_t box_sshort_t;\n";
ss << "typedef uintptr_t box_ushort_t;\n";
ss << "typedef uintptr_t box_sint_t;\n";
ss << "typedef uintptr_t box_uint_t;\n";
ss << "typedef uintptr_t box_slong_t;\n";
ss << "typedef uintptr_t box_ulong_t;\n";
ss << "typedef uintptr_t box_sllong_t;\n";
ss << "typedef uintptr_t box_ullong_t;\n";
ss << "typedef uintptr_t box_float_t;\n";
ss << "typedef uintptr_t box_double_t;\n";
ss << "typedef uintptr_t box_float32_t;\n";
ss << "typedef uintptr_t box_float64_t;\n";
ss << "typedef uintptr_t box_ldouble_t;\n";
ss << "typedef uintptr_t box_int8_t;\n";
ss << "typedef uintptr_t box_uint8_t;\n";
ss << "typedef uintptr_t box_int16_t;\n";
ss << "typedef uintptr_t box_uint16_t;\n";
ss << "typedef uintptr_t box_int32_t;\n";
ss << "typedef uintptr_t box_uint32_t;\n";
ss << "typedef uintptr_t box_int64_t;\n";
ss << "typedef uintptr_t box_uint64_t;\n";
ss << "typedef uintptr_t box_raw_pointer_t;\n";
ss << "typedef uintptr_t box_binary_t;\n";
ss << "typedef uintptr_t box_binary_size_t;\n";
ss << "typedef uintptr_t box_string_t;\n";
ss << "\n";
ss << "typedef uintptr_t instance_id_t;\n";
ss << "typedef uintptr_t event_observer_t;\n";
ss << "typedef uintptr_t callback_event_t;\n";
ss << "typedef uintptr_t generic_handle_t;\n";
ss << "typedef uintptr_t iterator_handle_t;\n";
ss << "typedef uintptr_t exception_handle_t;\n";
ss << "\n";
processTypesNamespace(ss, project->mGlobal);
ss << "\n";
processTypesTemplatesAndSpecials(ss, project);
ss << "\n";
ss << getApiGuardDefine(project, true) << "\n";
ss << "\n";
ss << "#ifdef __cplusplus\n";
ss << "#include \"../types.h\"\n";
ss << "\n";
ss << "namespace wrapper\n";
ss << "{\n";
ss << " struct IWrapperObserver;\n";
ss << " typedef shared_ptr<IWrapperObserver> IWrapperObserverPtr;\n";
ss << "\n";
ss << " struct IWrapperObserver\n";
ss << " {\n";
ss << " virtual event_observer_t getObserver() = 0;\n";
ss << " virtual void observerCancel() = 0;\n";
ss << " };\n";
ss << "\n";
ss << " struct IWrapperCallbackEvent;\n";
ss << " typedef shared_ptr<IWrapperCallbackEvent> IWrapperCallbackEventPtr;\n";
ss << "\n";
ss << " struct IWrapperCallbackEvent\n";
ss << " {\n";
ss << " static void fireEvent(IWrapperCallbackEventPtr event);\n";
ss << "\n";
ss << " virtual event_observer_t getObserver() = 0;\n";
ss << " virtual const char *getNamespace() = 0;\n";
ss << " virtual const char *getClass() = 0;\n";
ss << " virtual const char *getMethod() = 0;\n";
ss << " virtual generic_handle_t getSource() = 0;\n";
ss << " virtual instance_id_t getInstanceId() = 0;\n";
ss << " virtual generic_handle_t getEventData(int argumentIndex) = 0;\n";
ss << " };\n";
ss << "\n";
ss << "} /* namespace wrapper */\n";
ss << "\n";
ss << "#endif /* __cplusplus */\n";
return UseHelper::convertToBuffer(ss.str());
}
//---------------------------------------------------------------------
void GenerateStructC::processTypesNamespace(
std::stringstream &ss,
NamespacePtr namespaceObj
) noexcept
{
if (!namespaceObj) return;
if (namespaceObj->hasModifier(Modifier_Special)) return;
for (auto iter = namespaceObj->mNamespaces.begin(); iter != namespaceObj->mNamespaces.end(); ++iter)
{
auto subNamespaceObj = (*iter).second;
processTypesNamespace(ss, subNamespaceObj);
}
processTypesEnum(ss, namespaceObj);
for (auto iter = namespaceObj->mStructs.begin(); iter != namespaceObj->mStructs.end(); ++iter)
{
auto structObj = (*iter).second;
processTypesStruct(ss, structObj);
}
}
//---------------------------------------------------------------------
void GenerateStructC::processTypesStruct(
std::stringstream &ss,
StructPtr structObj
) noexcept
{
if (!structObj) return;
if (GenerateHelper::isBuiltInType(structObj)) return;
if (structObj->mGenerics.size() > 0) return;
ss << "typedef uintptr_t " << fixCType(structObj) << ";\n";
processTypesEnum(ss, structObj);
for (auto iter = structObj->mStructs.begin(); iter != structObj->mStructs.end(); ++iter) {
auto subStructObj = (*iter).second;
processTypesStruct(ss, subStructObj);
}
}
//---------------------------------------------------------------------
void GenerateStructC::processTypesEnum(
std::stringstream &ss,
ContextPtr context
) noexcept
{
auto namespaceObj = context->toNamespace();
auto structObj = context->toStruct();
if ((!namespaceObj) && (!structObj)) return;
auto &enums = namespaceObj ? (namespaceObj->mEnums) : (structObj->mEnums);
for (auto iter = enums.begin(); iter != enums.end(); ++iter)
{
auto enumObj = (*iter).second;
ss << "typedef " << fixCType(enumObj->mBaseType) << " " << fixCType(enumObj) << ";\n";
ss << "typedef uintptr_t box_" << fixCType(enumObj) << ";\n";
}
}
//---------------------------------------------------------------------
void GenerateStructC::processTypesTemplatesAndSpecials(
std::stringstream &ss,
ProjectPtr project
) noexcept
{
if (!project) return;
ContextPtr context = project;
processTypesTemplate(ss, context->findType("::std::list"));
ss << "\n";
processTypesTemplate(ss, context->findType("::std::map"));
ss << "\n";
processTypesTemplate(ss, context->findType("::std::set"));
ss << "\n";
processTypesTemplate(ss, context->findType("::zs::PromiseWith"));
ss << "\n";
processTypesSpecialStruct(ss, context->findType("::zs::Any"));
processTypesSpecialStruct(ss, context->findType("::zs::Promise"));
{
auto exceptionList = GenerateHelper::getAllExceptions("::zs::exceptions::");
for (auto iter = exceptionList.begin(); iter != exceptionList.end(); ++iter) {
auto e = (*iter);
processTypesSpecialStruct(ss, context->findType(e));
}
}
ss << "\n";
processTypesSpecialStruct(ss, context->findType("::zs::Time"));
processTypesSpecialStruct(ss, context->findType("::zs::Milliseconds"));
processTypesSpecialStruct(ss, context->findType("::zs::Microseconds"));
processTypesSpecialStruct(ss, context->findType("::zs::Nanoseconds"));
processTypesSpecialStruct(ss, context->findType("::zs::Seconds"));
processTypesSpecialStruct(ss, context->findType("::zs::Minutes"));
processTypesSpecialStruct(ss, context->findType("::zs::Hours"));
processTypesSpecialStruct(ss, context->findType("::zs::Days"));
}
//---------------------------------------------------------------------
void GenerateStructC::processTypesTemplate(
std::stringstream &ss,
ContextPtr structContextObj
) noexcept
{
if (!structContextObj) return;
auto structObj = structContextObj->toStruct();
if (!structObj) return;
if (structObj->mGenerics.size() < 1) return;
for (auto iter = structObj->mTemplatedStructs.begin(); iter != structObj->mTemplatedStructs.end(); ++iter) {
auto templatedStruct = (*iter).second;
ss << "typedef uintptr_t " << fixCType(templatedStruct) << ";\n";
}
}
//---------------------------------------------------------------------
void GenerateStructC::processTypesSpecialStruct(
std::stringstream &ss,
ContextPtr structContextObj
) noexcept
{
if (!structContextObj) return;
auto structObj = structContextObj->toStruct();
if (!structObj) return;
if (!structObj->hasModifier(Modifier_Special)) return;
ss << "typedef uintptr_t " << fixCType(structObj) << ";\n";
}
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//
// GenerateStructHeader::IIDLCompilerTarget
//
//---------------------------------------------------------------------
String GenerateStructC::targetKeyword() noexcept
{
return String("c");
}
//---------------------------------------------------------------------
String GenerateStructC::targetKeywordHelp() noexcept
{
return String("Generate C wrapper");
}
//---------------------------------------------------------------------
void GenerateStructC::targetOutput(
const String &inPathStr,
const ICompilerTypes::Config &config
) noexcept(false)
{
typedef std::stack<NamespacePtr> NamespaceStack;
String pathStr(UseHelper::fixRelativeFilePath(inPathStr, String("wrapper")));
try {
UseHelper::mkdir(pathStr);
} catch (const StdError &e) {
ZS_THROW_CUSTOM_PROPERTIES_1(Failure, ZS_EVENTING_TOOL_SYSTEM_ERROR, "Failed to create path \"" + pathStr + "\": " + " error=" + string(e.result()) + ", reason=" + e.message());
}
pathStr += "/";
pathStr = UseHelper::fixRelativeFilePath(pathStr, String("generated"));
try {
UseHelper::mkdir(pathStr);
} catch (const StdError &e) {
ZS_THROW_CUSTOM_PROPERTIES_1(Failure, ZS_EVENTING_TOOL_SYSTEM_ERROR, "Failed to create path \"" + pathStr + "\": " + " error=" + string(e.result()) + ", reason=" + e.message());
}
pathStr += "/";
pathStr = UseHelper::fixRelativeFilePath(pathStr, String("c"));
try {
UseHelper::mkdir(pathStr);
} catch (const StdError &e) {
ZS_THROW_CUSTOM_PROPERTIES_1(Failure, ZS_EVENTING_TOOL_SYSTEM_ERROR, "Failed to create path \"" + pathStr + "\": " + " error=" + string(e.result()) + ", reason=" + e.message());
}
pathStr += "/";
const ProjectPtr &project = config.mProject;
if (!project) return;
if (!project->mGlobal) return;
writeBinary(UseHelper::fixRelativeFilePath(pathStr, String("types.h")), generateTypesHeader(project));
HelperFile helperFile;
helperFile.global_ = project->mGlobal;
calculateRelations(project->mGlobal, helperFile.derives_);
calculateBoxings(project->mGlobal, helperFile.boxings_);
helperFile.cppFileName_ = UseHelper::fixRelativeFilePath(pathStr, String("c_helpers.cpp"));
helperFile.headerFileName_ = UseHelper::fixRelativeFilePath(pathStr, String("c_helpers.h"));
prepareHelperFile(helperFile);
processNamespace(helperFile, helperFile.global_);
finalizeHelperFile(helperFile);
}
} // namespace internal
} // namespace tool
} // namespace eventing
} // namespace zsLib
| 50.396012 | 337 | 0.495896 | [
"object"
] |
f6ac5d7360a8dd85baf2bfd3509f592159fd5db1 | 1,724 | cpp | C++ | aws-cpp-sdk-cloudsearchdomain/source/model/SearchStatus.cpp | capeanalytics/aws-sdk-cpp | e88f75add5a9433601b6d46fe738e493da56ac3b | [
"Apache-2.0"
] | null | null | null | aws-cpp-sdk-cloudsearchdomain/source/model/SearchStatus.cpp | capeanalytics/aws-sdk-cpp | e88f75add5a9433601b6d46fe738e493da56ac3b | [
"Apache-2.0"
] | null | null | null | aws-cpp-sdk-cloudsearchdomain/source/model/SearchStatus.cpp | capeanalytics/aws-sdk-cpp | e88f75add5a9433601b6d46fe738e493da56ac3b | [
"Apache-2.0"
] | null | null | null | /*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <aws/cloudsearchdomain/model/SearchStatus.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
namespace Aws
{
namespace CloudSearchDomain
{
namespace Model
{
SearchStatus::SearchStatus() :
m_timems(0),
m_timemsHasBeenSet(false),
m_ridHasBeenSet(false)
{
}
SearchStatus::SearchStatus(const JsonValue& jsonValue) :
m_timems(0),
m_timemsHasBeenSet(false),
m_ridHasBeenSet(false)
{
*this = jsonValue;
}
SearchStatus& SearchStatus::operator =(const JsonValue& jsonValue)
{
if(jsonValue.ValueExists("timems"))
{
m_timems = jsonValue.GetInt64("timems");
m_timemsHasBeenSet = true;
}
if(jsonValue.ValueExists("rid"))
{
m_rid = jsonValue.GetString("rid");
m_ridHasBeenSet = true;
}
return *this;
}
JsonValue SearchStatus::Jsonize() const
{
JsonValue payload;
if(m_timemsHasBeenSet)
{
payload.WithInt64("timems", m_timems);
}
if(m_ridHasBeenSet)
{
payload.WithString("rid", m_rid);
}
return payload;
}
} // namespace Model
} // namespace CloudSearchDomain
} // namespace Aws | 20.046512 | 78 | 0.716937 | [
"model"
] |
f6b2e53df6d586069a4f2c5018c517379bc146f6 | 3,556 | cc | C++ | internal/ceres/householder_vector_test.cc | ut-amrl/ceres-solver | dfce1e128d90b7bd5251c65483e1e3a0dea3c3bd | [
"Apache-2.0"
] | null | null | null | internal/ceres/householder_vector_test.cc | ut-amrl/ceres-solver | dfce1e128d90b7bd5251c65483e1e3a0dea3c3bd | [
"Apache-2.0"
] | null | null | null | internal/ceres/householder_vector_test.cc | ut-amrl/ceres-solver | dfce1e128d90b7bd5251c65483e1e3a0dea3c3bd | [
"Apache-2.0"
] | null | null | null | // Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: vitus@google.com (Michael Vitus)
#include "ceres/internal/householder_vector.h"
#include "ceres/internal/eigen.h"
#include "glog/logging.h"
#include "gtest/gtest.h"
namespace ceres::internal {
static void HouseholderTestHelper(const Vector& x) {
const double kTolerance = 1e-14;
// Check to ensure that H * x = ||x|| * [0 ... 0 1]'.
Vector v(x.rows());
double beta;
// NOTE: The explicit template arguments are needed here because
// ComputeHouseholderVector is templated and some versions of MSVC
// have trouble deducing the type of v automatically.
ComputeHouseholderVector<Vector, double, Eigen::Dynamic>(x, &v, &beta);
Vector result = x - beta * v * (v.transpose() * x);
Vector expected_result(x.rows());
expected_result.setZero();
expected_result(x.rows() - 1) = 1;
expected_result *= x.norm();
for (int i = 0; i < x.rows(); ++i) {
EXPECT_NEAR(expected_result[i], result[i], kTolerance);
}
}
TEST(HouseholderVector, ZeroPositive) {
Vector x(3);
x << 0.0, 0.0, 0.25;
HouseholderTestHelper(x);
}
TEST(HouseholderVector, ZeroNegative) {
Vector x(3);
x << 0.0, 0.0, -0.25;
HouseholderTestHelper(x);
}
TEST(HouseholderVector, NearZeroPositive) {
Vector x(3);
x << 1e-18, 1e-18, 0.25;
HouseholderTestHelper(x);
}
TEST(HouseholderVector, NearZeroNegative) {
Vector x(3);
x << 1e-18, 1e-18, -0.25;
HouseholderTestHelper(x);
}
TEST(HouseholderVector, NonZeroNegative) {
Vector x(3);
x << 1.0, 0.0, -3.0;
HouseholderTestHelper(x);
}
TEST(HouseholderVector, NonZeroPositive) {
Vector x(3);
x << 1.0, 1.0, 1.0;
HouseholderTestHelper(x);
}
TEST(HouseholderVector, NonZeroPositive_Size4) {
Vector x(4);
x << 1.0, 1.0, 0.0, 2.0;
HouseholderTestHelper(x);
}
TEST(HouseholderVector, LastElementZero) {
Vector x(4);
x << 1.0, 1.0, 0.0, 0.0;
HouseholderTestHelper(x);
}
} // namespace ceres::internal
| 29.882353 | 78 | 0.715973 | [
"vector"
] |
f6b3a6e6118cc6b0c74183a848cbe136edb753f7 | 57,528 | cpp | C++ | src/act_info.cpp | OznOg/xania | 4effa29e1c88c7290400a8cf8a67e03ad71c851a | [
"BSD-2-Clause"
] | null | null | null | src/act_info.cpp | OznOg/xania | 4effa29e1c88c7290400a8cf8a67e03ad71c851a | [
"BSD-2-Clause"
] | null | null | null | src/act_info.cpp | OznOg/xania | 4effa29e1c88c7290400a8cf8a67e03ad71c851a | [
"BSD-2-Clause"
] | null | null | null | /*************************************************************************/
/* Xania (M)ulti(U)ser(D)ungeon server source code */
/* (C) 1995-2000 Xania Development Team */
/* See the header to file: merc.h for original code copyrights */
/* */
/* act_info.c: standard information functions */
/* */
/*************************************************************************/
#include "AFFECT_DATA.hpp"
#include "AffectFlag.hpp"
#include "Area.hpp"
#include "AreaList.hpp"
#include "ArmourClass.hpp"
#include "Char.hpp"
#include "CharActFlag.hpp"
#include "Classes.hpp"
#include "Columner.hpp"
#include "CommFlag.hpp"
#include "ContainerFlag.hpp"
#include "Descriptor.hpp"
#include "DescriptorList.hpp"
#include "Exit.hpp"
#include "ExitFlag.hpp"
#include "Help.hpp"
#include "Logging.hpp"
#include "Materials.hpp"
#include "Object.hpp"
#include "ObjectExtraFlag.hpp"
#include "ObjectIndex.hpp"
#include "ObjectType.hpp"
#include "ObjectWearFlag.hpp"
#include "PlayerActFlag.hpp"
#include "PracticeTabulator.hpp"
#include "Races.hpp"
#include "RoomFlag.hpp"
#include "SkillNumbers.hpp"
#include "SkillTables.hpp"
#include "TimeInfoData.hpp"
#include "ToleranceFlag.hpp"
#include "Wear.hpp"
#include "WeatherData.hpp"
#include "act_comm.hpp"
#include "comm.hpp"
#include "common/BitOps.hpp"
#include "db.h"
#include "fight.hpp"
#include "handler.hpp"
#include "interp.h"
#include "lookup.h"
#include "save.hpp"
#include "skills.hpp"
#include "string_utils.hpp"
#include <fmt/chrono.h>
#include <fmt/format.h>
#include <magic_enum.hpp>
#include <range/v3/algorithm/find_if.hpp>
#include <range/v3/iterator/operations.hpp>
using namespace std::literals;
namespace {
std::string wear_string_for(const Object *obj, const Wear wear_location) {
constexpr std::array<std::string_view, WearFilter::wearable_count()> where_name = {
"used as light", "worn on finger", "worn on finger",
"worn around neck", "worn around neck", "worn on body",
"worn on head", "worn on legs", "worn on feet",
"worn on hands", "worn on arms", "worn as shield",
"worn about body", "worn about waist", "worn around wrist",
"worn around wrist", "wielded", "held",
"worn on ears"};
return fmt::format("<{}>", obj->wear_string.empty() ? where_name[to_int(wear_location)] : obj->wear_string);
}
}
/* for do_count */
size_t max_on = 0;
/*
* Local functions.
*/
std::string format_obj_to_char(const Object *obj, const Char *ch, bool fShort);
void show_char_to_char_0(const Char *victim, const Char *ch);
void show_char_to_char_1(Char *victim, Char *ch);
void show_char_to_char(const GenericList<Char *> &list, const Char *ch);
bool check_blind(const Char *ch);
void set_prompt(Char *ch, std::string_view prompt);
std::string format_obj_to_char(const Object *obj, const Char *ch, bool fShort) {
std::string buf;
std::string desc = fShort ? obj->short_descr : obj->description;
if (desc.empty()) {
desc = "This object has no description. Please inform the IMP.";
bug("Object {} has no description", obj->objIndex->vnum);
}
if (obj->is_unique())
buf += "(U) ";
if (obj->is_invisible())
buf += "(|cInvis|w) ";
if (ch->is_aff_detect_evil() && obj->is_evil())
buf += "(|rRed Aura|w) ";
if (ch->is_aff_detect_magic() && obj->is_magic())
buf += "(|gMagical|w) ";
if (obj->is_glowing())
buf += "(|WGlowing|w) ";
if (obj->is_humming())
buf += "(|yHumming|w) ";
buf += desc;
return buf;
}
/*
* Show a list to a character.
* Can coalesce duplicated items.
*/
void show_list_to_char(const GenericList<Object *> &list, const Char *ch, bool fShort, bool fShowNothing) {
if (!ch->desc)
return;
struct DescAndCount {
std::string desc;
int count{1};
};
std::vector<DescAndCount> to_show;
const bool show_counts = ch->is_npc() || check_enum_bit(ch->comm, CommFlag::Combine);
// Format the list of objects.
for (auto *obj : list) {
if (obj->wear_loc == Wear::None && can_see_obj(ch, obj)) {
auto desc = format_obj_to_char(obj, ch, fShort);
auto combined_same = false;
if (show_counts) {
// Look for duplicates, case sensitive.
if (auto existing = ranges::find_if(to_show, [&](const auto &x) { return x.desc == desc; });
existing != to_show.end()) {
existing->count++;
combined_same = true;
}
}
// Couldn't combine, or didn't want to.
if (!combined_same)
to_show.emplace_back(DescAndCount{std::move(desc)});
}
}
// Output the formatted list.
std::string buffer;
auto indent = " "sv;
for (const auto &[name, count] : to_show) {
if (show_counts)
buffer += count > 1 ? fmt::format("({:2}) ", count) : indent;
buffer += name + "\n\r";
}
if (fShowNothing && to_show.empty()) {
if (show_counts)
buffer += indent;
buffer += "Nothing.\n\r";
}
ch->page_to(buffer);
}
void show_char_to_char_0(const Char *victim, const Char *ch) {
std::string buf;
if (victim->is_aff_invisible())
buf += "(|WInvis|w) ";
if (victim->is_pc() && check_enum_bit(victim->act, PlayerActFlag::PlrWizInvis))
buf += "(|RWizi|w) ";
if (victim->is_pc() && check_enum_bit(victim->act, PlayerActFlag::PlrProwl))
buf += "(|RProwl|w) ";
if (victim->is_aff_hide())
buf += "(|WHide|w) ";
if (victim->is_aff_charm())
buf += "(|yCharmed|w) ";
if (victim->is_aff_pass_door())
buf += "(|bTranslucent|w) ";
if (victim->is_aff_faerie_fire())
buf += "(|PPink Aura|w) ";
if (victim->is_aff_octarine_fire())
buf += "(|GOctarine Aura|w) ";
if (victim->is_evil() && ch->is_aff_detect_evil())
buf += "(|rRed Aura|w) ";
if (victim->is_aff_sanctuary())
buf += "(|WWhite Aura|w) ";
if (victim->is_pc() && check_enum_bit(victim->act, PlayerActFlag::PlrKiller))
buf += "(|RKILLER|w) ";
if (victim->is_pc() && check_enum_bit(victim->act, PlayerActFlag::PlrThief))
buf += "(|RTHIEF|w) ";
if (ch->is_affected_by(gsn_bless)) {
if (check_enum_bit(victim->act, CharActFlag::Undead)) {
buf += "(|bUndead|w) ";
}
}
if (victim->position == victim->start_pos && !victim->long_descr.empty()) {
buf += victim->long_descr;
ch->send_to(buf);
return;
}
buf += ch->describe(*victim);
if (victim->is_pc() && !check_enum_bit(ch->comm, CommFlag::Brief))
buf += victim->pcdata->title;
buf += " is ";
buf += victim->position.present_progressive_verb();
switch (victim->position) {
case Position::Type::Standing:
if (victim->riding != nullptr) {
buf += fmt::format(", riding {}.", victim->riding->name);
} else {
buf += ".";
}
break;
case Position::Type::Fighting:
if (victim->fighting == nullptr)
buf += " thin air??";
else if (victim->fighting == ch)
buf += " |RYOU!|w";
else if (victim->in_room == victim->fighting->in_room) {
buf += fmt::format(" {}.", ch->describe(*victim->fighting));
} else
buf += " someone who left??";
break;
default:;
}
buf += "\n\r";
buf[0] = toupper(buf[0]);
ch->send_to(buf);
}
void show_char_to_char_1(Char *victim, Char *ch) {
if (can_see(victim, ch)) {
if (ch == victim)
act("$n looks at $r.", ch);
else {
act("$n looks at you.", ch, nullptr, victim, To::Vict);
act("$n looks at $N.", ch, nullptr, victim, To::NotVict);
}
}
if (victim->description[0] != '\0') {
ch->send_to(victim->description);
} else {
act("You see nothing special about $M.", ch, nullptr, victim, To::Char);
}
ch->send_to(describe_fight_condition(*victim));
bool found = false;
for (const auto &wear : WearFilter::wearable()) {
if (const auto *obj = get_eq_char(victim, wear); obj && can_see_obj(ch, obj)) {
if (!found) {
ch->send_line("");
act("$N is using:", ch, nullptr, victim, To::Char);
found = true;
}
ch->send_line("{:<20}{}", wear_string_for(obj, wear), format_obj_to_char(obj, ch, true));
}
}
if (victim != ch && ch->is_pc() && number_percent() < ch->get_skill(gsn_peek)
&& check_enum_bit(ch->act, PlayerActFlag::PlrAutoPeek)) {
ch->send_line("\n\rYou peek at the inventory:");
check_improve(ch, gsn_peek, true, 4);
show_list_to_char(victim->carrying, ch, true, true);
}
}
void do_peek(Char *ch, ArgParser args) {
if (!ch->desc)
return;
if (ch->is_pos_stunned_or_dying()) {
ch->send_line("You can't see anything but stars!");
return;
}
if (ch->is_pos_sleeping()) {
ch->send_line("You can't see anything, you're sleeping!");
return;
}
if (!check_blind(ch))
return;
if (ch->is_pc() && !check_enum_bit(ch->act, PlayerActFlag::PlrHolyLight) && room_is_dark(ch->in_room)) {
ch->send_line("It is pitch black ... ");
show_char_to_char(ch->in_room->people, ch);
return;
}
if (auto *victim = get_char_room(ch, args.shift())) {
if (victim != ch && ch->is_pc() && number_percent() < ch->get_skill(gsn_peek)) {
ch->send_line("\n\rYou peek at their inventory:");
check_improve(ch, gsn_peek, true, 4);
show_list_to_char(victim->carrying, ch, true, true);
}
} else
ch->send_line("They aren't here.");
}
void show_char_to_char(const GenericList<Char *> &list, const Char *ch) {
for (auto *rch : list) {
if (rch == ch)
continue;
if (rch->is_pc() && check_enum_bit(rch->act, PlayerActFlag::PlrWizInvis) && ch->get_trust() < rch->invis_level)
continue;
if (can_see(ch, rch)) {
show_char_to_char_0(rch, ch);
} else if (room_is_dark(ch->in_room) && rch->is_aff_infrared()) {
ch->send_line("You see |Rglowing red|w eyes watching |RYOU!|w");
}
}
}
bool check_blind(const Char *ch) {
if (!ch->is_aff_blind() || ch->has_holylight())
return true;
ch->send_line("You can't see a thing!");
return false;
}
/* changes your scroll */
void do_scroll(Char *ch, ArgParser args) {
// Pager limited to 52 due to memory complaints relating to
// buffer code ...short term fix :) --Faramir
static constexpr int MaxScrollLength = 52;
static constexpr int MinScrollLength = 10;
if (args.empty()) {
if (ch->lines == 0) {
ch->send_line("|cPaging is set to maximum.|w");
ch->lines = MaxScrollLength;
} else {
ch->send_line("|cYou currently display {} lines per page.|w", ch->lines + 2);
}
return;
}
auto maybe_num = args.try_shift_number();
if (!maybe_num) {
ch->send_line("|cYou must provide a number.|w");
return;
}
auto lines = *maybe_num;
if (lines == 0) {
ch->send_line("|cPaging at maximum.|w");
ch->lines = MaxScrollLength;
return;
}
if (lines < MinScrollLength || lines > MaxScrollLength) {
ch->send_line("|cYou must provide a reasonable number (between {} and {}).|w", MinScrollLength,
MaxScrollLength);
return;
}
ch->send_line("\"|cScroll set to {} lines.|w", lines);
ch->lines = lines - 2;
}
/* RT Commands to replace news, motd, imotd, etc from ROM */
void do_motd(Char *ch) { do_help(ch, "motd"); }
void do_imotd(Char *ch) { do_help(ch, "imotd"); }
void do_rules(Char *ch) { do_help(ch, "rules"); }
void do_story(Char *ch) { do_help(ch, "story"); }
void do_changes(Char *ch) { do_help(ch, "changes"); }
void do_wizlist(Char *ch) { do_help(ch, "wizlist"); }
/* RT this following section holds all the auto commands from ROM, as well as
replacements for config */
namespace {
struct OnOff {
bool b;
};
}
template <>
struct fmt::formatter<OnOff> {
constexpr auto parse(format_parse_context &ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const OnOff &onoff, FormatContext &ctx) {
return fmt::format_to(ctx.out(), fmt::runtime(onoff.b ? "|RON|w" : "|ROFF|w"));
}
};
void do_autolist(Char *ch) {
/* lists most player flags */
if (ch->is_npc())
return;
ch->send_line(" action status");
ch->send_line("---------------------");
ch->send_line("ANSI colour {}", OnOff{ch->pcdata->colour});
ch->send_line("autoaffect {}", OnOff{check_enum_bit(ch->comm, CommFlag::Affect)});
ch->send_line("autoassist {}", OnOff{check_enum_bit(ch->act, PlayerActFlag::PlrAutoAssist)});
ch->send_line("autoexit {}", OnOff{check_enum_bit(ch->act, PlayerActFlag::PlrAutoExit)});
ch->send_line("autogold {}", OnOff{check_enum_bit(ch->act, PlayerActFlag::PlrAutoGold)});
ch->send_line("autoloot {}", OnOff{check_enum_bit(ch->act, PlayerActFlag::PlrAutoLoot)});
ch->send_line("autopeek {}", OnOff{check_enum_bit(ch->act, PlayerActFlag::PlrAutoPeek)});
ch->send_line("autosac {}", OnOff{check_enum_bit(ch->act, PlayerActFlag::PlrAutoSac)});
ch->send_line("autosplit {}", OnOff{check_enum_bit(ch->act, PlayerActFlag::PlrAutoSplit)});
ch->send_line("prompt {}", OnOff{check_enum_bit(ch->comm, CommFlag::Prompt)});
ch->send_line("combine items {}", OnOff{check_enum_bit(ch->comm, CommFlag::Combine)});
if (!check_enum_bit(ch->act, PlayerActFlag::PlrCanLoot))
ch->send_line("Your corpse is safe from thieves.");
else
ch->send_line("Your corpse may be looted.");
if (check_enum_bit(ch->act, PlayerActFlag::PlrNoSummon))
ch->send_line("You cannot be summoned.");
else
ch->send_line("You can be summoned.");
if (check_enum_bit(ch->act, PlayerActFlag::PlrNoFollow))
ch->send_line("You do not welcome followers.");
else
ch->send_line("You accept followers.");
if (check_enum_bit(ch->comm, CommFlag::Brief))
ch->send_line("Only brief descriptions are being shown.");
else
ch->send_line("Full descriptions are being shown.");
if (check_enum_bit(ch->comm, CommFlag::Compact))
ch->send_line("Compact mode is set.");
else
ch->send_line("Compact mode is not set.");
if (check_enum_bit(ch->comm, CommFlag::ShowAfk))
ch->send_line("Messages sent to you will be shown when afk.");
else
ch->send_line("Messages sent to you will not be shown when afk.");
if (check_enum_bit(ch->comm, CommFlag::ShowDefence))
ch->send_line("Shield blocks, parries, and dodges are being shown.");
else
ch->send_line("Shield blocks, parries, and dodges are not being shown.");
}
void do_autoaffect(Char *ch) {
if (ch->is_npc())
return;
if (check_enum_bit(ch->comm, CommFlag::Affect)) {
ch->send_line("Autoaffect removed.");
clear_enum_bit(ch->comm, CommFlag::Affect);
} else {
ch->send_line("Affects will now be shown in score.");
set_enum_bit(ch->comm, CommFlag::Affect);
}
}
void do_autoassist(Char *ch) {
if (ch->is_npc())
return;
if (check_enum_bit(ch->act, PlayerActFlag::PlrAutoAssist)) {
ch->send_line("Autoassist removed.");
clear_enum_bit(ch->act, PlayerActFlag::PlrAutoAssist);
} else {
ch->send_line("You will now assist when needed.");
set_enum_bit(ch->act, PlayerActFlag::PlrAutoAssist);
}
}
void do_autoexit(Char *ch) {
if (ch->is_npc())
return;
if (check_enum_bit(ch->act, PlayerActFlag::PlrAutoExit)) {
ch->send_line("Exits will no longer be displayed.");
clear_enum_bit(ch->act, PlayerActFlag::PlrAutoExit);
} else {
ch->send_line("Exits will now be displayed.");
set_enum_bit(ch->act, PlayerActFlag::PlrAutoExit);
}
}
void do_autogold(Char *ch) {
if (ch->is_npc())
return;
if (check_enum_bit(ch->act, PlayerActFlag::PlrAutoGold)) {
ch->send_line("Autogold removed.");
clear_enum_bit(ch->act, PlayerActFlag::PlrAutoGold);
} else {
ch->send_line("Automatic gold looting set.");
set_enum_bit(ch->act, PlayerActFlag::PlrAutoGold);
}
}
void do_autoloot(Char *ch) {
if (ch->is_npc())
return;
if (check_enum_bit(ch->act, PlayerActFlag::PlrAutoLoot)) {
ch->send_line("Autolooting removed.");
clear_enum_bit(ch->act, PlayerActFlag::PlrAutoLoot);
} else {
ch->send_line("Automatic corpse looting set.");
set_enum_bit(ch->act, PlayerActFlag::PlrAutoLoot);
}
}
void do_autopeek(Char *ch) {
if (ch->is_npc())
return;
if (check_enum_bit(ch->act, PlayerActFlag::PlrAutoPeek)) {
ch->send_line("Autopeeking removed.");
clear_enum_bit(ch->act, PlayerActFlag::PlrAutoPeek);
} else {
ch->send_line("Automatic peeking set.");
set_enum_bit(ch->act, PlayerActFlag::PlrAutoPeek);
}
}
void do_autosac(Char *ch) {
if (ch->is_npc())
return;
if (check_enum_bit(ch->act, PlayerActFlag::PlrAutoSac)) {
ch->send_line("Autosacrificing removed.");
clear_enum_bit(ch->act, PlayerActFlag::PlrAutoSac);
} else {
ch->send_line("Automatic corpse sacrificing set.");
set_enum_bit(ch->act, PlayerActFlag::PlrAutoSac);
}
}
void do_autosplit(Char *ch) {
if (ch->is_npc())
return;
if (check_enum_bit(ch->act, PlayerActFlag::PlrAutoSplit)) {
ch->send_line("Autosplitting removed.");
clear_enum_bit(ch->act, PlayerActFlag::PlrAutoSplit);
} else {
ch->send_line("Automatic gold splitting set.");
set_enum_bit(ch->act, PlayerActFlag::PlrAutoSplit);
}
}
void do_brief(Char *ch) {
if (check_enum_bit(ch->comm, CommFlag::Brief)) {
ch->send_line("Full descriptions activated.");
clear_enum_bit(ch->comm, CommFlag::Brief);
} else {
ch->send_line("Short descriptions activated.");
set_enum_bit(ch->comm, CommFlag::Brief);
}
}
void do_colour(Char *ch) {
if (ch->is_npc())
return;
if (ch->pcdata->colour) {
ch->send_line("You feel less COLOURFUL.");
ch->pcdata->colour = false;
} else {
ch->pcdata->colour = true;
ch->send_line("You feel more |RC|GO|BL|rO|gU|bR|cF|YU|PL|W!.|w");
}
}
void do_showafk(Char *ch) {
if (check_enum_bit(ch->comm, CommFlag::ShowAfk)) {
ch->send_line("Messages sent to you will now not be shown when afk.");
clear_enum_bit(ch->comm, CommFlag::ShowAfk);
} else {
ch->send_line("Messages sent to you will now be shown when afk.");
set_enum_bit(ch->comm, CommFlag::ShowAfk);
}
}
void do_showdefence(Char *ch) {
if (check_enum_bit(ch->comm, CommFlag::ShowDefence)) {
ch->send_line("Shield blocks, parries and dodges will not be shown during combat.");
clear_enum_bit(ch->comm, CommFlag::ShowDefence);
} else {
ch->send_line("Shield blocks, parries and dodges will be shown during combat.");
set_enum_bit(ch->comm, CommFlag::ShowDefence);
}
}
void do_compact(Char *ch) {
if (check_enum_bit(ch->comm, CommFlag::Compact)) {
ch->send_line("Compact mode removed.");
clear_enum_bit(ch->comm, CommFlag::Compact);
} else {
ch->send_line("Compact mode set.");
set_enum_bit(ch->comm, CommFlag::Compact);
}
}
void do_prompt(Char *ch, std::string_view argument) {
/* PCFN 24-05-97 Oh dear - it seems that you can't set prompt while switched
into a MOB. Let's change that.... */
if (ch = ch->player(); !ch)
return;
if (matches(argument, "off")) {
ch->send_line("You will no longer see prompts.");
clear_enum_bit(ch->comm, CommFlag::Prompt);
return;
}
if (matches(argument, "on")) {
ch->send_line("You will now see prompts.");
set_enum_bit(ch->comm, CommFlag::Prompt);
return;
}
/* okay that was the old stuff */
set_prompt(ch, smash_tilde(argument));
ch->send_line("Ok - prompt set.");
set_enum_bit(ch->comm, CommFlag::Prompt);
}
void do_combine(Char *ch) {
if (check_enum_bit(ch->comm, CommFlag::Combine)) {
ch->send_line("Long inventory selected.");
clear_enum_bit(ch->comm, CommFlag::Combine);
} else {
ch->send_line("Combined inventory selected.");
set_enum_bit(ch->comm, CommFlag::Combine);
}
}
void do_noloot(Char *ch) {
if (ch->is_npc())
return;
if (check_enum_bit(ch->act, PlayerActFlag::PlrCanLoot)) {
ch->send_line("Your corpse is now safe from thieves.");
clear_enum_bit(ch->act, PlayerActFlag::PlrCanLoot);
} else {
ch->send_line("Your corpse may now be looted.");
set_enum_bit(ch->act, PlayerActFlag::PlrCanLoot);
}
}
void do_nofollow(Char *ch) {
if (ch->is_npc())
return;
if (check_enum_bit(ch->act, PlayerActFlag::PlrNoFollow)) {
ch->send_line("You now accept followers.");
clear_enum_bit(ch->act, PlayerActFlag::PlrNoFollow);
} else {
ch->send_line("You no longer accept followers.");
set_enum_bit(ch->act, PlayerActFlag::PlrNoFollow);
die_follower(ch);
}
}
void do_nosummon(Char *ch) {
if (ch->is_npc()) {
if (check_enum_bit(ch->imm_flags, ToleranceFlag::Summon)) {
ch->send_line("You are no longer immune to summon.");
clear_enum_bit(ch->imm_flags, ToleranceFlag::Summon);
} else {
ch->send_line("You are now immune to summoning.");
set_enum_bit(ch->imm_flags, ToleranceFlag::Summon);
}
} else {
if (check_enum_bit(ch->act, PlayerActFlag::PlrNoSummon)) {
ch->send_line("You are no longer immune to summon.");
clear_enum_bit(ch->act, PlayerActFlag::PlrNoSummon);
} else {
ch->send_line("You are now immune to summoning.");
set_enum_bit(ch->act, PlayerActFlag::PlrNoSummon);
}
}
}
void do_lore(Char *ch, Object *obj, std::string_view description) {
if (ch->is_pc() && number_percent() > ch->get_skill(skill_lookup("lore"))) {
ch->send_line(description);
check_improve(ch, gsn_lore, false, 1);
} else {
const auto identify = skill_lookup("identify");
if (ch->is_mortal())
ch->wait_state(skill_table[identify].beats);
ch->send_line(description);
check_improve(ch, gsn_lore, true, 1);
auto spell_target = SpellTarget(obj);
(*skill_table[identify].spell_fun)(identify, ch->level, ch, spell_target);
}
}
namespace {
void room_look(const Char &ch, bool force_full) {
ch.send_line("|R{}|w\n\r", ch.in_room->name);
if (force_full || !ch.is_comm_brief()) {
ch.send_to(" {}", ch.in_room->description);
}
if (ch.should_autoexit()) {
ch.send_line("");
do_exits(&ch, "auto");
}
show_list_to_char(ch.in_room->contents, &ch, false, false);
show_char_to_char(ch.in_room->people, &ch);
}
void look_in_object(const Char &ch, const Object &obj) {
switch (obj.type) {
default: ch.send_line("That is not a container."); break;
case ObjectType::Drink: {
if (obj.value[1] <= 0) {
ch.send_line("It is empty.");
break;
}
const auto *liquid = Liquid::get_by_index(obj.value[2]);
if (!liquid) {
bug("{} attempted to look in a drink containing an unknown liquid: {} {} -> {}", ch.name,
obj.objIndex->vnum, obj.short_descr, obj.value[2]);
return;
}
const auto &liq_color = liquid->color;
ch.send_line("It's {} full of a{} {} liquid.",
obj.value[1] < obj.value[0] / 4 ? "less than"
: obj.value[1] < 3 * obj.value[0] / 4 ? "about" : "more than",
is_vowel(liq_color[0]) ? "n" : "", liq_color);
break;
}
case ObjectType::Container:
case ObjectType::Npccorpse:
case ObjectType::Pccorpse:
if (check_enum_bit(obj.value[1], ContainerFlag::Closed)) {
ch.send_line("It is closed.");
break;
}
act("$p contains:", &ch, &obj, nullptr, To::Char);
show_list_to_char(obj.contains, &ch, true, true);
break;
}
}
std::optional<std::string_view> get_extra_descr(auto name, const auto &extra_descs) {
if (auto it = ranges::find_if(extra_descs, [&name](const auto &ed) { return is_name(name, ed.keyword); });
it != extra_descs.end())
return it->description;
return std::nullopt;
}
std::optional<std::string_view> try_get_obj_descr(const Object &obj, std::string_view name) {
if (auto opt_desc = get_extra_descr(name, obj.extra_descr))
return opt_desc;
return get_extra_descr(name, obj.objIndex->extra_descr);
}
bool handled_as_look_at_object(Char &ch, std::string_view first_arg) {
auto &&[number, obj_desc] = number_argument(first_arg);
int count = 0;
for (auto *obj : ch.carrying) {
if (!ch.can_see(*obj))
continue;
if (const auto opt_desc = try_get_obj_descr(*obj, obj_desc)) {
if (++count == number) {
do_lore(&ch, obj, *opt_desc);
return true;
} else
continue;
} else if (is_name(obj_desc, obj->name)) {
if (++count == number) {
do_lore(&ch, obj, obj->description);
return true;
}
}
}
for (auto *obj : ch.in_room->contents) {
if (!ch.can_see(*obj))
continue;
if (const auto desc = try_get_obj_descr(*obj, obj_desc)) {
if (++count == number) {
ch.send_to(*desc);
return true;
}
}
if (is_name(obj_desc, obj->name))
if (++count == number) {
ch.send_line("{}", obj->description);
return true;
}
}
if (count > 0 && count != number) {
if (count == 1)
ch.send_line("You only see one {} here.", obj_desc);
else
ch.send_line("You only see {} {}s here.", count, obj_desc);
return true;
}
return false;
}
void look_direction(const Char &ch, Direction direction) {
const auto &exit = ch.in_room->exits[direction];
if (!exit) {
ch.send_line("Nothing special there.");
return;
}
if (!exit->description.empty())
ch.send_to(exit->description);
else
ch.send_line("Nothing special there.");
if (!exit->keyword.empty()) {
if (check_enum_bit(exit->exit_info, ExitFlag::Closed)) {
act("The $d is closed.", &ch, nullptr, exit->keyword, To::Char);
} else if (check_enum_bit(exit->exit_info, ExitFlag::IsDoor)) {
act("The $d is open.", &ch, nullptr, exit->keyword, To::Char);
}
}
}
bool check_look(Char *ch) {
if (ch->desc == nullptr)
return false;
if (ch->is_pos_stunned_or_dying()) {
ch->send_line("You can't see anything but stars!");
return false;
}
if (ch->is_pos_sleeping()) {
ch->send_line("You can't see anything, you're sleeping!");
return false;
}
if (!check_blind(ch))
return false;
if (!ch->has_holylight() && room_is_dark(ch->in_room)) {
ch->send_line("It is pitch black ... ");
show_char_to_char(ch->in_room->people, ch);
return false;
}
return true;
}
} // namespace
void look_auto(Char *ch) {
if (!check_look(ch))
return;
room_look(*ch, false);
}
void do_look(Char *ch, ArgParser args) {
if (!check_look(ch))
return;
auto first_arg = args.shift();
// A normal look, or a look auto to describe the room?
if (first_arg.empty() || matches(first_arg, "auto")) {
room_look(*ch, first_arg.empty());
return;
}
// Look in something?
if (matches_start(first_arg, "in")) {
if (args.empty()) {
ch->send_line("Look in what?");
return;
}
if (auto *obj = get_obj_here(ch, args.shift()))
look_in_object(*ch, *obj);
else
ch->send_line("You do not see that here.");
return;
}
// Look at a person?
if (auto *victim = get_char_room(ch, first_arg)) {
show_char_to_char_1(victim, ch);
return;
}
// Look at an object?
if (handled_as_look_at_object(*ch, first_arg))
return;
// Look at something in the extra description of the room?
if (const auto desc = get_extra_descr(first_arg, ch->in_room->extra_descr)) {
ch->send_to(*desc);
return;
}
// Look in a direction?
if (auto opt_direction = try_parse_direction(first_arg)) {
look_direction(*ch, *opt_direction);
return;
}
ch->send_line("You do not see that here.");
}
void do_examine(Char *ch, ArgParser args) {
if (args.empty()) {
ch->send_line("Examine what?");
return;
}
auto arg = args.shift();
do_look(ch, ArgParser(arg));
if (auto *obj = get_obj_here(ch, arg)) {
switch (obj->type) {
default: break;
case ObjectType::Drink:
case ObjectType::Container:
case ObjectType::Npccorpse:
case ObjectType::Pccorpse:
ch->send_line("When you look inside, you see:");
do_look(ch, ArgParser(fmt::format("in {}", arg)));
break;
}
}
}
/*
* Thanks to Zrin for auto-exit part.
*/
void do_exits(const Char *ch, std::string_view arguments) {
const auto is_compact = matches(arguments, "auto");
if (!check_blind(ch))
return;
std::string buf = is_compact ? "|W[Exits:" : "Obvious exits:\n\r";
auto found = false;
for (auto direction : all_directions) {
if (const auto &exit = ch->in_room->exits[direction]; exit && exit->u1.to_room
&& can_see_room(ch, exit->u1.to_room)
&& !check_enum_bit(exit->exit_info, ExitFlag::Closed)) {
found = true;
if (is_compact) {
buf += fmt::format(" {}", to_string(direction));
} else {
buf += fmt::format("{:<5} - {}\n\r", initial_caps_only(to_string(direction)),
!ch->has_holylight() && room_is_dark(exit->u1.to_room) ? "Too dark to tell"
: exit->u1.to_room->name);
}
}
}
if (!found)
buf += is_compact ? " none" : "None.\n\r";
if (is_compact)
buf += "]\n\r|w";
ch->send_to(buf);
}
void do_worth(Char *ch) {
if (ch->is_npc()) {
ch->send_line("You have {} gold.", (int)ch->gold);
return;
}
ch->send_line("You have {} gold, and {} experience ({} exp to level).", ch->gold, ch->exp,
((ch->level + 1) * exp_per_level(ch, ch->pcdata->points) - ch->exp));
}
namespace {
void describe_armour(Char *ch, const ArmourClass ac_slot, const char *name) {
static const std::array armour_desc = {
"hopelessly vulnerable to", "defenseless against", "barely protected from",
"slightly armoured against", "somewhat armoured against", "armoured against",
"well-armoured against", "very well-armoured against", "heavily armoured against",
"superbly armoured against", "almost invulnerable to", "divinely armoured against"};
// Armour ratings around -400 and beyond is labelled divine.
static constexpr int ArmourBucketSize = 400 / armour_desc.size();
const auto armour_class = -(ch->get_armour_class(ac_slot));
int armour_bucket = armour_class / ArmourBucketSize;
auto armour_index = std::clamp(armour_bucket, 0, static_cast<int>(armour_desc.size()) - 1);
if (ch->level < 25)
ch->send_line("|CYou are|w: |y{} |W{}|w.", armour_desc[armour_index], name);
else
ch->send_line("|CYou are|w: |y{} |W{}|w. (|W{}|w)", armour_desc[armour_index], name, armour_class);
}
const char *get_align_description(int align) {
static const std::array align_descriptions = {"|Rsatanic", "|Rdemonic", "|Yevil", "|Ymean", "|Mneutral",
"|Gkind", "|Ggood", "|Wsaintly", "|Wangelic"};
return align_descriptions[std::clamp((align + 1000) * 8 / 2000, 0,
static_cast<int>(align_descriptions.size()) - 1)];
}
}
void do_score(Char *ch) {
using namespace std::chrono;
ch->send_line("|CYou are|w: |W{}{}|w.", ch->name, ch->is_npc() ? "" : ch->pcdata->title);
Columner col2(*ch, 2);
Columner col3(*ch, 3);
if (ch->level == ch->get_trust())
col2.stat("Level", ch->level);
else
col2.kv("Level", "|W{}|w (trust |W{}|w)", ch->level, ch->get_trust());
col2.kv("Age", "|W{}|w years (|W{}|w hours)", get_age(ch), duration_cast<hours>(ch->total_played()).count());
col2.stat("Race", race_table[ch->race].name)
.stat("Class", ch->is_npc() ? "mobile" : class_table[ch->class_num].name)
.stat("Sex", ch->sex.name())
.stat("Position", ch->position.short_description())
.stat_of("Items", ch->carry_number, can_carry_n(ch))
.stat_of("Weight", ch->carry_weight, can_carry_w(ch))
.stat("Gold", ch->gold)
.flush();
col2.stat("Wimpy", ch->wimpy);
if (ch->is_pc() && ch->level < LEVEL_HERO)
col2.kv("Score", "|W{}|w (|W{}|w to next level)", ch->exp,
((ch->level + 1) * exp_per_level(ch, ch->pcdata->points) - ch->exp));
else
col2.stat("Score", ch->exp);
ch->send_line("");
col3.stat_of("Hit", ch->hit, ch->max_hit)
.stat_of("Mana", ch->mana, ch->max_mana)
.stat_of("Move", ch->move, ch->max_move);
describe_armour(ch, ArmourClass::Pierce, "piercing");
describe_armour(ch, ArmourClass::Bash, "bashing");
describe_armour(ch, ArmourClass::Slash, "slashing");
describe_armour(ch, ArmourClass::Exotic, "magic");
if (ch->level >= 15) {
col2.stat("Hit roll", ch->get_hitroll());
col2.stat("Damage roll", ch->get_damroll());
}
ch->send_line("");
col3.stat_eff("Strength", ch->perm_stat[Stat::Str], get_curr_stat(ch, Stat::Str))
.stat_eff("Intelligence", ch->perm_stat[Stat::Int], get_curr_stat(ch, Stat::Int))
.stat_eff("Wisdom", ch->perm_stat[Stat::Wis], get_curr_stat(ch, Stat::Wis));
col2.stat_eff("Dexterity", ch->perm_stat[Stat::Dex], get_curr_stat(ch, Stat::Dex))
.stat_eff("Constitution", ch->perm_stat[Stat::Con], get_curr_stat(ch, Stat::Con))
.stat("Practices", ch->practice)
.stat("Training sessions", ch->train);
if (ch->level >= 10)
col2.kv("Alignment", "{}|w (|W{}|w).", get_align_description(ch->alignment), ch->alignment);
else
col2.stat("Alignment", get_align_description(ch->alignment));
col2.flush();
if (ch->is_immortal()) {
ch->send_line("");
col3.stat("Holy light", check_enum_bit(ch->act, PlayerActFlag::PlrHolyLight) ? "on" : "off");
if (check_enum_bit(ch->act, PlayerActFlag::PlrWizInvis))
col3.stat("Invisible", ch->invis_level);
if (check_enum_bit(ch->act, PlayerActFlag::PlrProwl))
col3.stat("Prowl", ch->invis_level);
col3.flush();
}
if (const auto opt_nutrition_msg = ch->describe_nutrition()) {
ch->send_line(*opt_nutrition_msg);
}
if (check_enum_bit(ch->comm, CommFlag::Affect)) {
ch->send_line("");
do_affected(ch);
}
}
void do_affected(Char *ch) {
ch->send_line("|CYou are affected by|w:");
if (ch->affected.empty()) {
ch->send_line("Nothing.");
return;
}
for (auto &af : ch->affected)
ch->send_line("|C{}|w: '{}'{}.", af.is_skill() ? "Skill" : "Spell", skill_table[af.type].name,
ch->level >= 20 ? af.describe_char_effect() : "");
}
void do_time(Char *ch) {
ch->send_line(time_info.describe());
ch->send_line("Xania started up at {}Z.", formatted_time(boot_time));
ch->send_line("The system time is {}Z.", formatted_time(current_time));
}
void do_weather(Char *ch) {
if (ch->is_inside()) {
ch->send_line("You can't see the weather indoors.");
return;
}
ch->send_to(weather_info.describe() + "\n\r");
}
void do_help(Char *ch, std::string_view argument) {
const std::string topic{argument.empty() ? "summary" : argument};
if (auto *help = HelpList::singleton().lookup(ch->get_trust(), topic)) {
if (help->level() >= 0 && !matches(topic, "imotd"))
ch->send_line("|W{}|w", help->keyword());
ch->page_to(help->text());
} else {
ch->send_line("No help on that word.");
}
}
namespace {
std::string_view who_class_name_of(const Char &wch) {
switch (wch.level) {
case MAX_LEVEL - 0: return "|WIMP|w"sv;
case MAX_LEVEL - 1: return "|YCRE|w"sv;
case MAX_LEVEL - 2: return "|YSUP|w"sv;
case MAX_LEVEL - 3: return "|GDEI|w"sv;
case MAX_LEVEL - 4: return "|GGOD|w"sv;
case MAX_LEVEL - 5: return "|gIMM|w"sv;
case MAX_LEVEL - 6: return "|gDEM|w"sv;
case MAX_LEVEL - 7: return "ANG"sv;
case MAX_LEVEL - 8: return "AVA"sv;
}
return class_table[wch.class_num].who_name;
}
std::string_view who_race_name_of(const Char &wch) {
return wch.race < MAX_PC_RACE ? pc_race_table[wch.race].who_name : " "sv;
}
std::string_view who_clan_name_of(const Char &wch) { return wch.clan() ? wch.clan()->whoname : ""sv; }
std::string who_line_for(const Char &to, const Char &wch) {
return fmt::format(
"[{:3} {} {}] {}{}{}{}{}{}|w{}{}\n\r", wch.level, who_race_name_of(wch), who_class_name_of(wch),
who_clan_name_of(wch), check_enum_bit(wch.act, PlayerActFlag::PlrKiller) ? "(|RKILLER|w) " : "",
check_enum_bit(wch.act, PlayerActFlag::PlrThief) ? "(|RTHIEF|w) " : "",
check_enum_bit(wch.act, PlayerActFlag::PlrAfk) ? "(|cAFK|w) " : "", wch.name,
wch.is_pc() ? wch.pcdata->title : "",
wch.is_wizinvis() && to.is_immortal() ? fmt::format(" |g(Wizi at level {})|w", wch.invis_level) : "",
wch.is_prowlinvis() && to.is_immortal() ? fmt::format(" |g(Prowl level {})|w", wch.invis_level) : "");
}
}
/* whois command */
void do_whois(Char *ch, ArgParser args) {
if (args.empty()) {
ch->send_line("You must provide a name.");
return;
}
std::string output;
auto filter = args.shift();
for (auto &d : descriptors().all_visible_to(*ch)) {
auto *wch = d.person();
// TODO: can or should this be part of all_visible_to?
if (!can_see(ch, wch))
continue;
if (matches_start(filter, wch->name))
output += who_line_for(*ch, *wch);
}
if (output.empty()) {
ch->send_line("No one of that name is playing.");
return;
}
ch->page_to(output);
}
/*
* New 'who' command originally by Alander of Rivers of Mud.
*/
void do_who(Char *ch, ArgParser args) {
int iClass;
int iRace;
int iLevelLower = 0;
int iLevelUpper = MAX_LEVEL;
int nNumber;
int nMatch;
bool rgfClass[MAX_CLASS]{};
bool rgfRace[MAX_PC_RACE]{};
std::unordered_set<const Clan *> rgfClan;
bool fClassRestrict = false;
bool fRaceRestrict = false;
bool fClanRestrict = false;
bool fImmortalOnly = false;
/*
* Parse arguments.
*/
nNumber = 0;
for (;;) {
auto arg = args.shift();
if (arg.empty())
break;
if (is_number(arg)) {
switch (++nNumber) {
case 1: iLevelLower = parse_number(arg); break;
case 2: iLevelUpper = parse_number(arg); break;
default: ch->send_line("Only two level numbers allowed."); return;
}
} else {
/*
* Look for classes to turn on.
*/
if (arg[0] == 'i') {
fImmortalOnly = true;
} else {
iClass = class_lookup(arg);
if (iClass == -1) {
iRace = race_lookup(arg);
if (iRace == 0 || iRace >= MAX_PC_RACE) {
/* Check if clan exists */
const Clan *clan_ptr = nullptr; // TODO this could be much better phrased
for (auto &clan : clantable) {
if (is_name(arg, clan.name))
clan_ptr = &clan;
}
/* Check for NO match on clans */
if (!clan_ptr) {
ch->send_line("That's not a valid race, class, or clan.");
return;
} else
/* It DID match! */
{
fClanRestrict = true;
rgfClan.emplace(clan_ptr);
}
} else {
fRaceRestrict = true;
rgfRace[iRace] = true;
}
} else {
fClassRestrict = true;
rgfClass[iClass] = true;
}
}
}
}
/*
* Now show matching chars.
*/
nMatch = 0;
std::string output;
for (auto &d : descriptors().all_visible_to(*ch)) {
// Check for match against restrictions.
// Don't use trust as that exposes trusted mortals.
// added Faramir 13/8/96 because switched imms were visible to all
if (!can_see(ch, d.person()))
continue;
auto *wch = d.person();
if (wch->level < iLevelLower || wch->level > iLevelUpper || (fImmortalOnly && wch->level < LEVEL_HERO)
|| (fClassRestrict && !rgfClass[wch->class_num]) || (fRaceRestrict && !rgfRace[wch->race]))
continue;
if (fClanRestrict) {
if (!wch->clan() || rgfClan.count(wch->clan()) == 0)
continue;
}
nMatch++;
output += who_line_for(*ch, *wch);
}
output += fmt::format("\n\rPlayers found: {}\n\r", nMatch);
ch->page_to(output);
}
void do_count(Char *ch) {
auto count = static_cast<size_t>(ranges::distance(descriptors().all_visible_to(*ch)));
max_on = std::max(count, max_on);
if (max_on == count)
ch->send_line("There are {} characters on, the most so far today.", count);
else
ch->send_line("There are {} characters on, the most son today was {}.", count, max_on);
}
void do_inventory(Char *ch) {
ch->send_line("You are carrying:");
show_list_to_char(ch->carrying, ch, true, true);
}
void do_equipment(Char *ch) {
ch->send_line("You are using:");
bool found = false;
for (const auto &wear : WearFilter::wearable()) {
if (const auto *obj = get_eq_char(ch, wear)) {
ch->send_line("{:<20}{}", wear_string_for(obj, wear),
can_see_obj(ch, obj) ? format_obj_to_char(obj, ch, true) : "something.");
found = true;
}
}
if (!found)
ch->send_line("Nothing.");
}
namespace {
Object *find_comparable(Char *ch, Object *obj_to_compare_to) {
for (auto *obj : ch->carrying) {
if (obj->wear_loc != Wear::None && can_see_obj(ch, obj) && obj_to_compare_to->type == obj->type
&& (obj_to_compare_to->wear_flags & obj->wear_flags & (~to_int(ObjectWearFlag::Take))) != 0) {
return obj;
}
}
return nullptr;
}
}
void do_compare(Char *ch, ArgParser args) {
if (args.empty()) {
ch->send_line("Compare what to what?");
return;
}
auto *obj1 = ch->find_in_inventory(args.shift());
if (!obj1) {
ch->send_line("You do not have that item.");
return;
}
Object *obj2{};
if (args.empty()) {
obj2 = find_comparable(ch, obj1);
if (!obj2) {
ch->send_line("You aren't wearing anything comparable.");
return;
}
} else {
obj2 = ch->find_in_inventory(args.shift());
if (!obj2) {
ch->send_line("You do not have that item.");
return;
}
}
std::string_view msg;
int value1 = 0;
int value2 = 0;
if (obj1 == obj2) {
msg = "You compare $p to itself. It looks about the same.";
} else if (obj1->type != obj2->type) {
msg = "You can't compare $p and $P.";
} else {
switch (obj1->type) {
default: msg = "You can't compare $p and $P."; break;
case ObjectType::Armor:
value1 = obj1->value[0] + obj1->value[1] + obj1->value[2];
value2 = obj2->value[0] + obj2->value[1] + obj2->value[2];
break;
case ObjectType::Weapon:
value1 = (1 + obj1->value[2]) * obj1->value[1];
value2 = (1 + obj2->value[2]) * obj2->value[1];
break;
}
}
if (msg.empty()) {
if (value1 == value2)
msg = "$p and $P look about the same.";
else if (value1 > value2)
msg = "$p looks better than $P.";
else
msg = "$p looks worse than $P.";
}
act(msg, ch, obj1, obj2, To::Char);
}
void do_credits(Char *ch) { do_help(ch, "diku"); }
void do_where(Char *ch, ArgParser args) {
if (args.empty()) {
ch->send_line("|cYou are in {}\n\rPlayers near you:|w", ch->in_room->area->short_name());
auto found = false;
for (auto &victim : descriptors().all_visible_to(*ch) | DescriptorFilter::except(*ch)
| DescriptorFilter::same_area(*ch) | DescriptorFilter::to_character()) {
if (victim.is_pc()) {
found = true;
ch->send_line("|W{:<28}|w {}", victim.name, victim.in_room->name);
}
}
if (!found)
ch->send_line("None");
if (ch->pet && ch->pet->in_room->area == ch->in_room->area) {
ch->send_line("You sense that your pet is near {}.", ch->pet->in_room->name);
}
} else {
auto found = false;
auto name = args.shift();
for (auto *victim : char_list) {
if (victim->in_room != nullptr && victim->in_room->area == ch->in_room->area && !victim->is_aff_hide()
&& !victim->is_aff_sneak() && can_see(ch, victim) && victim != ch && is_name(name, victim->name)) {
found = true;
ch->send_line("|W{:<28}|w {}", ch->describe(*victim), victim->in_room->name);
break;
}
}
if (!found)
act("You didn't find any $T.", ch, nullptr, name, To::Char);
}
}
void do_consider(Char *ch, ArgParser args) {
Char *victim;
const char *msg;
int diff;
const auto arg = args.shift();
if (arg.empty()) {
ch->send_line("Consider killing whom?");
return;
}
if ((victim = get_char_room(ch, arg)) == nullptr) {
ch->send_line("They're not here.");
return;
}
if (is_safe(ch, victim)) {
ch->send_line("Don't even think about it.");
return;
}
diff = victim->level - ch->level;
if (diff <= -10)
msg = "You can kill $N naked and weaponless.";
else if (diff <= -5)
msg = "$N is no match for you.";
else if (diff <= -2)
msg = "$N looks like an easy kill.";
else if (diff <= 1)
msg = "The perfect match!";
else if (diff <= 4)
msg = "$N says 'Do you feel lucky, punk?'.";
else if (diff <= 9)
msg = "$N laughs at you mercilessly.";
else
msg = "|RDeath will thank you for your gift.|w";
act(msg, ch, nullptr, victim, To::Char);
if (ch->level >= LEVEL_CONSIDER) {
do_mstat(ch, arg);
}
}
void set_prompt(Char *ch, std::string_view prompt) {
if (ch->is_npc()) {
bug("Set_prompt: NPC.");
return;
}
ch->pcdata->prompt = prompt;
}
void do_title(Char *ch, std::string_view argument) {
if (ch->is_npc())
return;
if (argument.empty()) {
ch->send_line("Change your title to what?");
return;
}
auto new_title = smash_tilde(argument);
if (new_title.length() > 45)
new_title.resize(45);
ch->set_title(new_title);
ch->send_line("Ok.");
}
void do_description(Char *ch, std::string_view argument) {
if (auto desc_line = smash_tilde(argument); !desc_line.empty()) {
if (desc_line.front() == '+') {
ch->description += fmt::format("{}\n\r", ltrim(desc_line.substr(1)));
} else if (desc_line == "-") {
if (ch->description.empty()) {
ch->send_line("You have no description.");
return;
}
ch->description = remove_last_line(ch->description);
} else {
ch->description = desc_line + "\n\r";
}
if (ch->description.size() >= MAX_STRING_LENGTH - 2) {
ch->send_line("Description too long.");
return;
}
}
ch->send_to("Your description is:\n\r{}", ch->description.empty() ? "(None).\n\r" : ch->description);
}
void do_report(Char *ch) {
ch->send_line("You say 'I have {}/{} hp {}/{} mana {}/{} mv {} xp.'\n\r", ch->hit, ch->max_hit, ch->mana,
ch->max_mana, ch->move, ch->max_move, ch->exp);
act(fmt::format("$n says 'I have {}/{} hp {}/{} mana {}/{} mv {} xp.'", ch->hit, ch->max_hit, ch->mana,
ch->max_mana, ch->move, ch->max_move, ch->exp),
ch);
}
namespace {
Char *find_prac_mob(Room *room) {
for (auto *mob : room->people) {
if (mob->is_npc() && check_enum_bit(mob->act, CharActFlag::Practice))
return mob;
}
return nullptr;
}
}
void do_practice(Char *ch, ArgParser args) {
if (ch->is_npc())
return;
if (args.empty()) {
PracticeTabulator::tabulate(ch);
} else {
if (!ch->is_pos_awake()) {
ch->send_line("In your dreams, or what?");
return;
}
Char *mob = find_prac_mob(ch->in_room);
if (!mob) {
ch->send_line("You can't do that here.");
return;
}
if (ch->practice <= 0) {
ch->send_line("You have no practice sessions left.");
return;
}
auto sn = skill_lookup(args.shift());
if (sn < 0) {
ch->send_line("You can't practice that.");
return;
}
auto &skill_level = ch->pcdata->learned[sn]; // NOT ch.get_skill()
if (ch->level < get_skill_level(ch, sn) || skill_level < 1) {
ch->send_line("You can't practice that.");
return;
}
auto adept = ch->is_npc() ? 100 : class_table[ch->class_num].skill_adept;
if (skill_level >= adept) {
ch->send_line("You are already learned at {}.", skill_table[sn].name);
} else {
ch->practice--;
if (get_skill_trains(ch, sn) < 0) {
skill_level += int_app[get_curr_stat(ch, Stat::Int)].learn / 4;
} else {
skill_level += int_app[get_curr_stat(ch, Stat::Int)].learn / get_skill_difficulty(ch, sn);
}
if (skill_level < adept) {
act("You practice $T.", ch, nullptr, skill_table[sn].name, To::Char);
act("$n practices $T.", ch, nullptr, skill_table[sn].name, To::Room);
} else {
skill_level = adept;
act("You are now learned at $T.", ch, nullptr, skill_table[sn].name, To::Char);
act("$n is now learned at $T.", ch, nullptr, skill_table[sn].name, To::Room);
}
}
}
}
/*
* 'Wimpy' originally by Dionysos.
*/
void do_wimpy(Char *ch, ArgParser args) {
auto wimpy = args.try_shift_number().value_or(ch->max_hit / 5);
if (wimpy < 0) {
ch->send_line("Your courage exceeds your wisdom.");
return;
}
if (wimpy > ch->max_hit / 2) {
ch->send_line("Such cowardice ill becomes you.");
return;
}
ch->wimpy = wimpy;
ch->send_line("Wimpy set to {} hit points.", wimpy);
}
void do_password(Char *ch, ArgParser args) {
if (ch->is_npc())
return;
auto old_pass = std::string(args.shift());
auto new_pass = std::string(args.shift());
if (old_pass.empty() || new_pass.empty()) {
ch->send_line("Syntax: password <old> <new>.");
return;
}
if (!ch->pcdata->pwd.empty()) {
if (auto old_crypt = crypt(old_pass.c_str(), ch->pcdata->pwd.c_str()); old_crypt != ch->pcdata->pwd) {
ch->wait_state(40);
ch->send_line("Wrong password. Wait 10 seconds.");
return;
}
}
if (new_pass.length() < MinPasswordLen) {
ch->send_line("New password must be at least five characters long.");
return;
}
/*
* No tilde allowed because of player file format.
*/
auto new_crypt = crypt(new_pass.c_str(), ch->name.c_str());
if (matches_inside("~", new_crypt)) {
ch->send_line("New password not acceptable, try again.");
return;
}
ch->pcdata->pwd = new_crypt;
save_char_obj(ch);
ch->send_line("Ok.");
}
/* RT configure command SMASHED */
/* MrG Scan command */
void do_scan(Char *ch) {
Room *current_place;
int count_num_rooms;
int num_rooms_scan = std::max(1, ch->level / 10);
bool found_anything = false;
std::vector<sh_int> found_rooms{ch->in_room->vnum};
ch->send_line("You can see around you :");
/* Loop for each point of the compass */
for (auto direction : all_directions) {
/* No exits in that direction */
current_place = ch->in_room;
/* Loop for the distance see-able */
for (count_num_rooms = 0; count_num_rooms < num_rooms_scan; count_num_rooms++) {
const auto &exit = current_place->exits[direction];
if (!exit)
break;
if (current_place = exit->u1.to_room; !current_place || !can_see_room(ch, exit->u1.to_room)
|| check_enum_bit(exit->exit_info, ExitFlag::Closed))
break;
// Eliminate cycles in labyrinthine areas.
if (std::find(found_rooms.begin(), found_rooms.end(), exit->u1.to_room->vnum) != found_rooms.end())
break;
found_rooms.push_back(exit->u1.to_room->vnum);
/* This loop goes through each character in a room and says
whether or not they are visible */
for (auto *current_person : current_place->people) {
if (ch->can_see(*current_person)) {
ch->send_to(fmt::format("{} {:<5}: |W{}|w\n\r", count_num_rooms + 1,
initial_caps_only(to_string(direction)), current_person->short_name()));
found_anything = true;
}
} /* Closes the for_each_char_loop */
} /* Closes the for_each distance seeable loop */
} /* closes main loop for each direction */
if (!found_anything)
ch->send_line("Nothing of great interest.");
}
/*
* alist to list all areas
*/
void do_alist(Char *ch) {
auto format_str = "{:3} {:29} {:<5}-{:>5} {:12}\n\r"sv;
auto buffer = fmt::vformat(format_str, fmt::make_format_args("Num", "Area Name", "Lvnum", "Uvnum", "Filename"));
for (auto &pArea : AreaList::singleton())
buffer +=
fmt::vformat(format_str, fmt::make_format_args(pArea->num(), pArea->short_name(), pArea->lowest_vnum(),
pArea->highest_vnum(), pArea->filename()));
ch->page_to(buffer);
}
/* do_prefix added 19-05-97 PCFN */
void do_prefix(Char *ch, std::string_view argument) {
if (ch = ch->player(); !ch)
return;
auto prefix = smash_tilde(argument);
if (prefix.length() > (MAX_STRING_LENGTH - 1))
prefix.resize(MAX_STRING_LENGTH - 1);
if (prefix.empty()) {
if (ch->pcdata->prefix.empty()) {
ch->send_line("No prefix to remove.");
} else {
ch->send_line("Prefix removed.");
ch->pcdata->prefix.clear();
}
} else {
ch->pcdata->prefix = prefix;
ch->send_line("Prefix set to \"{}\"", ch->pcdata->prefix);
}
} | 32.910755 | 119 | 0.559971 | [
"object",
"vector"
] |
f6b5c768da1699fe4027fbd51765af60f5810804 | 893 | cpp | C++ | atcoder/abc047/D/main.cpp | xirc/cp-algorithm | 89c67cff2f00459c5bb020ab44bff5ae419a1728 | [
"Apache-2.0"
] | 8 | 2020-12-23T07:54:53.000Z | 2021-11-23T02:46:35.000Z | atcoder/abc047/D/main.cpp | xirc/cp-algorithm | 89c67cff2f00459c5bb020ab44bff5ae419a1728 | [
"Apache-2.0"
] | 1 | 2020-11-07T13:22:29.000Z | 2020-12-20T12:54:00.000Z | atcoder/abc047/D/main.cpp | xirc/cp-algorithm | 89c67cff2f00459c5bb020ab44bff5ae419a1728 | [
"Apache-2.0"
] | 1 | 2021-01-16T03:40:10.000Z | 2021-01-16T03:40:10.000Z | #include <bits/stdc++.h>
using namespace std;
using ll = int64_t;
using ff = long double;
int N, T;
vector<int> A;
int solve() {
vector<int> DPmax(N, 0);
DPmax[N-1] = A[N-1];
for (int i = N - 2; i >= 0; --i) {
DPmax[i] = max(A[i], DPmax[i+1]);
}
vector<int> is;
ll max_prof = 0;
for (int i = N - 2; i >= 0; --i) {
ll prof = max(DPmax[i+1] - A[i], 0) * (ll(T) / 2);
if (prof > max_prof) {
is.clear();
is.push_back(i);
max_prof = prof;
} else if (prof == max_prof) {
is.push_back(i);
}
}
assert((int)is.size() > 0);
return is.size();
}
int main() {
ios_base::sync_with_stdio(false);
cin.tie(0); cout.tie(0);
cin >> N >> T;
A.assign(N, 0);
for (int i = 0; i < N; ++i) {
cin >> A[i];
}
cout << solve() << endl;
return 0;
} | 19.844444 | 58 | 0.451288 | [
"vector"
] |
f6ba8713c2005cc3343ed3456896fedaea832c0d | 19,598 | cpp | C++ | tests/Cases/Container/TestContainer.cpp | cpv-project/cpv-framework | b0da79c8c57ceecb6b13f4d8658ec4d4c0237668 | [
"MIT"
] | 86 | 2018-04-20T04:40:20.000Z | 2022-02-09T08:36:28.000Z | tests/Cases/Container/TestContainer.cpp | cpv-project/cpv-framework | b0da79c8c57ceecb6b13f4d8658ec4d4c0237668 | [
"MIT"
] | 16 | 2018-04-25T09:34:40.000Z | 2020-10-16T03:55:05.000Z | tests/Cases/Container/TestContainer.cpp | cpv-project/cpv-framework | b0da79c8c57ceecb6b13f4d8658ec4d4c0237668 | [
"MIT"
] | 10 | 2019-10-07T08:06:15.000Z | 2021-07-26T18:46:11.000Z | #include <sstream>
#include <CPVFramework/Container/Container.hpp>
#include <CPVFramework/Container/ServicePatcher.hpp>
#include <CPVFramework/Testing/GTestUtils.hpp>
namespace {
class TestService {
public:
virtual std::string name() const = 0;
virtual ~TestService() = default;
};
class TestImplSimple : public TestService {
public:
std::string name() const override { return "ImplSimple"; }
};
class TestImplReusable : public TestService {
public:
std::string name() const override { return "ImplReusable"; }
static void reset() { }
static void freeResources() { }
};
class TestImplCustomName : public TestService {
public:
std::string name() const override { return name_; }
explicit TestImplCustomName(std::string name) : name_(std::move(name)) { }
private:
std::string name_;
};
class TestImplInject : public TestService {
public:
using DependencyTypes = std::tuple<int, std::string, std::vector<std::unique_ptr<int>>>;
std::string name() const override {
std::ostringstream s;
s << a_ << " " << b_ << " ";
for (auto& ptr : c_) {
(ptr == nullptr ? (s << "nullptr") : (s << *ptr)) << " ";
}
return s.str();
}
explicit TestImplInject(int a, std::string b, std::vector<std::unique_ptr<int>> c) :
a_(a), b_(std::move(b)), c_(std::move(c)) { }
private:
int a_;
std::string b_;
std::vector<std::unique_ptr<int>> c_;
};
}
template <>
thread_local cpv::ReusableStorageType<TestImplReusable>
cpv::ReusableStorageInstance<TestImplReusable>;
TEST(Container, addTransientServiceWithImplType) {
cpv::Container container;
container.add<TestImplSimple, TestImplSimple>();
auto instance = container.get<TestImplSimple>();
ASSERT_EQ(instance.name(), "ImplSimple");
}
TEST(Container, addPersistentServiceWithImplType) {
cpv::Container container;
cpv::ServiceStorage storageP;
container.add<
seastar::shared_ptr<TestService>,
seastar::shared_ptr<TestImplSimple>>(
cpv::ServiceLifetime::Persistent);
auto instanceFirst = container.get<seastar::shared_ptr<TestService>>();
auto instanceSecond = container.get<seastar::shared_ptr<TestService>>();
auto instanceThird = container.get<seastar::shared_ptr<TestService>>(storageP);
ASSERT_TRUE(instanceFirst.get() != nullptr);
ASSERT_EQ(instanceFirst.get(), instanceSecond.get());
ASSERT_EQ(instanceFirst.get(), instanceThird.get());
ASSERT_EQ(instanceFirst->name(), "ImplSimple");
}
TEST(Container, addStoragePersistentServiceWithImplType) {
cpv::Container container;
cpv::ServiceStorage storageP;
cpv::ServiceStorage storageQ;
container.add<
seastar::shared_ptr<TestService>,
seastar::shared_ptr<TestImplSimple>>(
cpv::ServiceLifetime::StoragePersistent);
auto instanceBuiltinFirst = container.get<seastar::shared_ptr<TestService>>();
auto instanceBuiltinSecond = container.get<seastar::shared_ptr<TestService>>();
auto instancePFirst = container.get<seastar::shared_ptr<TestService>>(storageP);
auto instancePSecond = container.get<seastar::shared_ptr<TestService>>(storageP);
auto instanceQFirst = container.get<seastar::shared_ptr<TestService>>(storageQ);
auto instanceQSecond = container.get<seastar::shared_ptr<TestService>>(storageQ);
ASSERT_TRUE(instanceBuiltinFirst.get() != nullptr);
ASSERT_EQ(instanceBuiltinFirst.get(), instanceBuiltinSecond.get());
ASSERT_TRUE(instancePFirst.get() != nullptr);
ASSERT_EQ(instancePFirst.get(), instancePSecond.get());
ASSERT_TRUE(instanceQFirst.get() != nullptr);
ASSERT_EQ(instanceQFirst.get(), instanceQSecond.get());
ASSERT_NE(instanceBuiltinFirst.get(), instancePFirst.get());
ASSERT_NE(instanceBuiltinFirst.get(), instanceQFirst.get());
ASSERT_NE(instancePFirst.get(), instanceQFirst.get());
ASSERT_EQ(instanceBuiltinFirst->name(), "ImplSimple");
ASSERT_EQ(instancePFirst->name(), "ImplSimple");
ASSERT_EQ(instanceQFirst->name(), "ImplSimple");
}
TEST(Container, addTransientUniquePtrServiceWithImplType) {
cpv::Container container;
container.add<std::unique_ptr<TestService>, std::unique_ptr<TestImplSimple>>();
auto instanceFirst = container.get<std::unique_ptr<TestService>>();
auto instanceSecond = container.get<std::unique_ptr<TestService>>();
ASSERT_NE(instanceFirst.get(), instanceSecond.get());
ASSERT_EQ(instanceFirst->name(), "ImplSimple");
ASSERT_EQ(instanceSecond->name(), "ImplSimple");
}
TEST(Container, addPersistentSharedPtrServiceWithImplType) {
cpv::Container container;
container.add<
seastar::shared_ptr<TestService>,
seastar::shared_ptr<TestImplSimple>>(
cpv::ServiceLifetime::Persistent);
auto instanceFirst = container.get<seastar::shared_ptr<TestService>>();
auto instanceSecond = container.get<seastar::shared_ptr<TestService>>();
ASSERT_TRUE(instanceFirst.get() != nullptr);
ASSERT_EQ(instanceFirst.get(), instanceSecond.get());
ASSERT_EQ(instanceFirst->name(), "ImplSimple");
}
TEST(Container, addTransientReusableServiceWithImplType) {
cpv::Container container;
container.add<
cpv::Reusable<TestService>,
cpv::Reusable<TestImplReusable>>();
auto instanceFirst = container.get<cpv::Reusable<TestService>>();
auto instanceSecond = container.get<cpv::Reusable<TestService>>();
ASSERT_NE(instanceFirst.get(), instanceSecond.get());
ASSERT_EQ(instanceFirst->name(), "ImplReusable");
ASSERT_EQ(instanceSecond->name(), "ImplReusable");
}
TEST(Container, addPersistentServiceWithInstance) {
cpv::Container container;
container.add<seastar::shared_ptr<TestService>>(
seastar::make_shared<TestImplCustomName>(
"TestAddPersistentServiceWithInstance"));
auto instanceFirst = container.get<seastar::shared_ptr<TestService>>();
auto instanceSecond = container.get<seastar::shared_ptr<TestService>>();
ASSERT_TRUE(instanceFirst.get() != nullptr);
ASSERT_EQ(instanceFirst.get(), instanceSecond.get());
ASSERT_EQ(instanceFirst->name(), "TestAddPersistentServiceWithInstance");
}
TEST(Container, addTransientServiceWithFunc2Args) {
cpv::Container container;
cpv::ServiceStorage storageP;
container.add<
seastar::shared_ptr<TestImplSimple>,
seastar::shared_ptr<TestImplSimple>>(
cpv::ServiceLifetime::StoragePersistent);
container.add<seastar::shared_ptr<TestService>>(
[] (const cpv::Container& container, cpv::ServiceStorage& storage) {
return container.get<seastar::shared_ptr<TestImplSimple>>(storage);
});
auto instanceBuiltinFirst = container.get<seastar::shared_ptr<TestService>>();
auto instanceBuiltinSecond = container.get<seastar::shared_ptr<TestImplSimple>>();
auto instancePFirst = container.get<seastar::shared_ptr<TestService>>(storageP);
auto instancePSecond = container.get<seastar::shared_ptr<TestImplSimple>>(storageP);
ASSERT_TRUE(instanceBuiltinFirst.get() != nullptr);
ASSERT_EQ(instanceBuiltinFirst.get(), instanceBuiltinSecond.get());
ASSERT_TRUE(instancePFirst.get() != nullptr);
ASSERT_EQ(instancePFirst.get(), instancePSecond.get());
ASSERT_NE(instanceBuiltinFirst.get(), instancePFirst.get());
ASSERT_EQ(instanceBuiltinFirst->name(), "ImplSimple");
ASSERT_EQ(instancePFirst->name(), "ImplSimple");
}
TEST(Container, addPersistentServiceWithFunc1Args) {
cpv::Container container;
cpv::ServiceStorage storageP;
container.add<
seastar::shared_ptr<TestImplSimple>,
seastar::shared_ptr<TestImplSimple>>();
container.add<seastar::shared_ptr<TestService>>(
[] (const cpv::Container& container) {
return container.get<seastar::shared_ptr<TestImplSimple>>();
}, cpv::ServiceLifetime::Persistent);
auto instanceFirst = container.get<seastar::shared_ptr<TestService>>();
auto instanceSecond = container.get<seastar::shared_ptr<TestService>>();
auto instanceThird = container.get<seastar::shared_ptr<TestImplSimple>>();
ASSERT_TRUE(instanceFirst.get() != nullptr);
ASSERT_EQ(instanceFirst.get(), instanceSecond.get());
ASSERT_NE(instanceFirst.get(), instanceThird.get());
ASSERT_EQ(instanceFirst->name(), "ImplSimple");
ASSERT_EQ(instanceThird->name(), "ImplSimple");
}
TEST(Container, addStoragePersistentServiceWithFunc0Args) {
cpv::Container container;
cpv::ServiceStorage storageP;
cpv::ServiceStorage storageQ;
container.add<seastar::shared_ptr<TestService>>(
[] {
return seastar::make_shared<TestImplSimple>();
}, cpv::ServiceLifetime::StoragePersistent);
auto instanceBuiltinFirst = container.get<seastar::shared_ptr<TestService>>();
auto instanceBuiltinSecond = container.get<seastar::shared_ptr<TestService>>();
auto instancePFirst = container.get<seastar::shared_ptr<TestService>>(storageP);
auto instancePSecond = container.get<seastar::shared_ptr<TestService>>(storageP);
auto instanceQFirst = container.get<seastar::shared_ptr<TestService>>(storageQ);
auto instanceQSecond = container.get<seastar::shared_ptr<TestService>>(storageQ);
ASSERT_TRUE(instanceBuiltinFirst.get() != nullptr);
ASSERT_EQ(instanceBuiltinFirst.get(), instanceBuiltinSecond.get());
ASSERT_TRUE(instancePFirst.get() != nullptr);
ASSERT_EQ(instancePFirst.get(), instancePSecond.get());
ASSERT_TRUE(instanceQFirst.get() != nullptr);
ASSERT_EQ(instanceQFirst.get(), instanceQSecond.get());
ASSERT_NE(instanceBuiltinFirst.get(), instancePFirst.get());
ASSERT_NE(instanceBuiltinFirst.get(), instanceQFirst.get());
ASSERT_NE(instancePFirst.get(), instanceQFirst.get());
ASSERT_EQ(instanceBuiltinFirst->name(), "ImplSimple");
ASSERT_EQ(instancePFirst->name(), "ImplSimple");
ASSERT_EQ(instanceQFirst->name(), "ImplSimple");
}
TEST(Container, getServiceInstanceWithInjectedDependencies) {
cpv::Container container;
// add service first to ensure descriptors are updated to date
container.add<std::unique_ptr<TestService>, std::unique_ptr<TestImplInject>>();
container.add<int>(123);
container.add<std::string>("abc");
container.add<std::unique_ptr<int>>([] { return std::make_unique<int>(100); });
container.add<std::unique_ptr<int>>([] { return nullptr; });
container.add<std::unique_ptr<int>>([] { return std::make_unique<int>(101); });
auto instanceFirst = container.get<std::unique_ptr<TestService>>();
auto instanceSecond = container.get<std::unique_ptr<TestService>>();
ASSERT_TRUE(instanceFirst.get() != nullptr);
ASSERT_TRUE(instanceSecond.get() != nullptr);
ASSERT_NE(instanceFirst.get(), instanceSecond.get());
ASSERT_EQ(instanceFirst->name(), "123 abc 100 nullptr 101 ");
ASSERT_EQ(instanceSecond->name(), "123 abc 100 nullptr 101 ");
}
TEST(Container, getManyServiceIntoVector) {
cpv::Container container;
container.add<
seastar::shared_ptr<TestService>,
seastar::shared_ptr<TestImplSimple>>();
container.add<seastar::shared_ptr<TestService>>(
[] {
return seastar::make_shared<TestImplCustomName>(
"TestGetManyServiceIntoVector");
}, cpv::ServiceLifetime::Persistent);
std::vector<seastar::shared_ptr<TestService>> instancesFirst;
std::vector<seastar::shared_ptr<TestService>> instancesSecond;
container.getMany(instancesFirst);
container.getMany(instancesSecond);
ASSERT_EQ(instancesFirst.size(), 2U);
ASSERT_EQ(instancesSecond.size(), 2U);
ASSERT_TRUE(instancesFirst.at(0).get() != nullptr);
ASSERT_TRUE(instancesFirst.at(1).get() != nullptr);
ASSERT_TRUE(instancesSecond.at(0).get() != nullptr);
ASSERT_NE(instancesFirst.at(0).get(), instancesSecond.at(0).get());
ASSERT_EQ(instancesFirst.at(1).get(), instancesSecond.at(1).get());
ASSERT_EQ(instancesFirst.at(0)->name(), "ImplSimple");
ASSERT_EQ(instancesFirst.at(1)->name(), "TestGetManyServiceIntoVector");
ASSERT_EQ(instancesSecond.at(0)->name(), "ImplSimple");
}
TEST(Container, getManyServiceIntoStackAllocatedVectorWithStorage) {
using VectorType = cpv::StackAllocatedVector<seastar::shared_ptr<TestService>, 16>;
cpv::Container container;
cpv::ServiceStorage storageP;
cpv::ServiceStorage storageQ;
container.add<
seastar::shared_ptr<TestService>,
seastar::shared_ptr<TestImplSimple>>(
cpv::ServiceLifetime::Persistent);
container.add<seastar::shared_ptr<TestService>>(
[] {
return seastar::make_shared<TestImplCustomName>(
"TestGetManyServiceIntoVector");
}, cpv::ServiceLifetime::StoragePersistent);
VectorType instancesBuiltinFirst;
VectorType instancesBuiltinSecond;
VectorType instancesPFirst;
VectorType instancesPSecond;
VectorType instancesQFirst;
VectorType instancesQSecond;
container.getMany(instancesBuiltinFirst);
container.getMany(instancesBuiltinSecond);
container.getMany(instancesPFirst, storageP);
container.getMany(instancesPSecond, storageP);
container.getMany(instancesQFirst, storageQ);
container.getMany(instancesQSecond, storageQ);
ASSERT_EQ(instancesBuiltinFirst.size(), 2U);
ASSERT_EQ(instancesBuiltinSecond.size(), 2U);
ASSERT_EQ(instancesPFirst.size(), 2U);
ASSERT_EQ(instancesPSecond.size(), 2U);
ASSERT_EQ(instancesQFirst.size(), 2U);
ASSERT_EQ(instancesQSecond.size(), 2U);
ASSERT_EQ(instancesBuiltinFirst.at(0).get(), instancesBuiltinSecond.at(0).get());
ASSERT_EQ(instancesBuiltinFirst.at(0).get(), instancesPFirst.at(0).get());
ASSERT_EQ(instancesBuiltinFirst.at(0).get(), instancesPSecond.at(0).get());
ASSERT_EQ(instancesBuiltinFirst.at(0).get(), instancesQFirst.at(0).get());
ASSERT_EQ(instancesBuiltinFirst.at(0).get(), instancesQSecond.at(0).get());
ASSERT_EQ(instancesBuiltinFirst.at(1).get(), instancesBuiltinSecond.at(1).get());
ASSERT_EQ(instancesPFirst.at(1).get(), instancesPSecond.at(1).get());
ASSERT_EQ(instancesQFirst.at(1).get(), instancesQSecond.at(1).get());
ASSERT_NE(instancesBuiltinFirst.at(1).get(), instancesPFirst.at(1).get());
ASSERT_NE(instancesBuiltinFirst.at(1).get(), instancesQFirst.at(1).get());
ASSERT_NE(instancesPFirst.at(1).get(), instancesQFirst.at(1).get());
ASSERT_EQ(instancesBuiltinFirst.at(0)->name(), "ImplSimple");
ASSERT_EQ(instancesBuiltinFirst.at(1)->name(), "TestGetManyServiceIntoVector");
ASSERT_EQ(instancesPFirst.at(1)->name(), "TestGetManyServiceIntoVector");
ASSERT_EQ(instancesQFirst.at(1)->name(), "TestGetManyServiceIntoVector");
}
TEST(Container, getManyServiceIntoVectorMultipleTimes) {
cpv::Container container;
container.add<int>(1);
container.add<int>(2);
container.add<int>(3);
std::vector<int> instances;
container.getMany(instances);
container.getMany(instances);
ASSERT_EQ(instances.size(), 6U);
ASSERT_EQ(instances.at(0), 1);
ASSERT_EQ(instances.at(1), 2);
ASSERT_EQ(instances.at(2), 3);
ASSERT_EQ(instances.at(3), 1);
ASSERT_EQ(instances.at(4), 2);
ASSERT_EQ(instances.at(5), 3);
}
TEST(Container, getVectorAsSingleService) {
cpv::Container container;
container.add(std::vector<int>({ 1, 2, 3 }));
auto instance = container.get<std::vector<int>>();
ASSERT_EQ(instance.size(), 3U);
ASSERT_EQ(instance.at(0), 1);
ASSERT_EQ(instance.at(1), 2);
ASSERT_EQ(instance.at(2), 3);
}
TEST(Container, getManyServiceIntoOptional) {
cpv::Container container;
container.add<int>(1);
container.add<int>(2);
container.add<int>(3);
std::optional<int> intInstance;
container.getMany(intInstance);
ASSERT_TRUE(intInstance.has_value());
ASSERT_EQ(*intInstance, 3);
std::optional<std::string> stringInstance;
container.getMany(stringInstance);
ASSERT_FALSE(stringInstance.has_value());
}
TEST(Container, errorWhenGetServiceNotRegistered) {
cpv::Container container;
ASSERT_THROWS_CONTAINS(
cpv::ContainerException,
container.get<int>(),
"failed: not registered");
}
TEST(Container, errorWhenGetServiceMultipleRegistered) {
cpv::Container container;
container.add<int>(1);
container.add<int>(2);
ASSERT_THROWS_CONTAINS(
cpv::ContainerException,
container.get<int>(),
"failed: registered multiple times");
}
TEST(Container, errorWhenGetPersistentServiceNotCopyConstructible) {
cpv::Container container;
container.add<
std::unique_ptr<TestService>,
std::unique_ptr<TestImplSimple>>(
cpv::ServiceLifetime::Persistent);
ASSERT_THROWS_CONTAINS(
cpv::ContainerException,
container.get<std::unique_ptr<TestService>>(),
"error: lifetime is persistent but not copy constructible");
}
TEST(Container, errorWhenGetStoragePersistentServiceNotCopyConstructible) {
cpv::Container container;
container.add<
std::unique_ptr<TestService>,
std::unique_ptr<TestImplSimple>>(
cpv::ServiceLifetime::StoragePersistent);
ASSERT_THROWS_CONTAINS(
cpv::ContainerException,
container.get<std::unique_ptr<TestService>>(),
"error: lifetime is storage persistent but not copy constructible");
}
TEST(Container, patchTransientServiceWithFunc0Args) {
cpv::Container container;
container.add<std::unique_ptr<int>>([] {
return std::make_unique<int>(1);
});
auto count = seastar::make_shared<std::size_t>(0);
cpv::ServicePatcher<std::unique_ptr<int>>::patch(
container, [count] (std::unique_ptr<int> v) {
++(*count);
*v = -*v;
return v;
});
ASSERT_EQ(*container.get<std::unique_ptr<int>>(), -1);
ASSERT_EQ(*container.get<std::unique_ptr<int>>(), -1);
ASSERT_EQ(*count, 2U);
}
TEST(Container, patchPersistentServiceWithFunc1Args) {
cpv::Container container;
container.add<int>(100);
container.add<seastar::shared_ptr<int>>([] {
return seastar::make_shared<int>(1);
}, cpv::ServiceLifetime::Persistent);
auto count = seastar::make_shared<std::size_t>(0);
cpv::ServicePatcher<seastar::shared_ptr<int>>::patch(
container, [count] (const cpv::Container& c, seastar::shared_ptr<int> v) {
++(*count);
*v += c.get<int>();
return v;
});
ASSERT_EQ(*container.get<seastar::shared_ptr<int>>(), 101);
ASSERT_EQ(*container.get<seastar::shared_ptr<int>>(), 101);
ASSERT_EQ(*count, 1U);
}
TEST(Container, patchStoragePersistentServiceWithFunc2Args) {
cpv::Container container;
cpv::ServiceStorage storageP;
cpv::ServiceStorage storageQ;
container.add<int>([v=seastar::make_shared<int>(0)] {
return ++*v;
}, cpv::ServiceLifetime::StoragePersistent);
container.add<seastar::shared_ptr<int>>([] {
return seastar::make_shared<int>(100);
}, cpv::ServiceLifetime::StoragePersistent);
auto count = seastar::make_shared<std::size_t>(0);
cpv::ServicePatcher<seastar::shared_ptr<int>>::patch(
container, [count] (
const cpv::Container& c, cpv::ServiceStorage& s, seastar::shared_ptr<int> v) {
++(*count);
*v += c.get<int>(s);
return v;
});
ASSERT_EQ(*container.get<seastar::shared_ptr<int>>(), 101);
ASSERT_EQ(*container.get<seastar::shared_ptr<int>>(), 101);
ASSERT_EQ(*container.get<seastar::shared_ptr<int>>(storageP), 102);
ASSERT_EQ(*container.get<seastar::shared_ptr<int>>(storageP), 102);
ASSERT_EQ(*container.get<seastar::shared_ptr<int>>(storageQ), 103);
ASSERT_EQ(*container.get<seastar::shared_ptr<int>>(storageQ), 103);
ASSERT_EQ(*count, 3U);
}
TEST(Container, patchNotRegisteredService) {
cpv::Container container;
cpv::ServicePatcher<int>::patch(container, [] (int) { return 0; });
ASSERT_THROWS_CONTAINS(
cpv::ContainerException,
container.get<int>(),
"failed: not registered");
}
TEST(Container, patchDoesNotBreakDIFactory) {
cpv::Container container;
container.add<std::unique_ptr<TestService>, std::unique_ptr<TestImplInject>>();
container.add<int>(123);
container.add<std::string>("abc");
container.add<std::unique_ptr<int>>([] { return std::make_unique<int>(100); });
container.add<std::unique_ptr<int>>([] { return nullptr; });
container.add<std::unique_ptr<int>>([] { return std::make_unique<int>(101); });
cpv::ServicePatcher<std::string>::patch(
container, [] (const std::string& v) { return v + ".patched"; });
cpv::ServicePatcher<std::unique_ptr<int>>::patch(
container, [] (std::unique_ptr<int> v) {
if (v != nullptr) {
*v = -*v;
}
return v;
});
auto instance = container.get<std::unique_ptr<TestService>>();
ASSERT_TRUE(instance.get() != nullptr);
ASSERT_EQ(instance->name(), "123 abc.patched -100 nullptr -101 ");
}
| 39.512097 | 90 | 0.750485 | [
"vector"
] |
f6bbd1d2df638ec9c1f76db4d40bbb7f7173d570 | 19,554 | cpp | C++ | Terra/gui/GUI.cpp | leeairw/Terra | 9387c064b727633da34e3c2146a67b7fa9b59c62 | [
"MIT"
] | null | null | null | Terra/gui/GUI.cpp | leeairw/Terra | 9387c064b727633da34e3c2146a67b7fa9b59c62 | [
"MIT"
] | null | null | null | Terra/gui/GUI.cpp | leeairw/Terra | 9387c064b727633da34e3c2146a67b7fa9b59c62 | [
"MIT"
] | null | null | null | #include "./GUI.hpp"
#include "../App.hpp"
#include "../project/Project.hpp"
#include <wx/stdpaths.h>
#include <wx/display.h>
#include <wx/dnd.h>
#include <wx/tglbtn.h>
#include <vector>
#include <pluginterfaces/vst/ivstaudioprocessor.h>
#include "../misc/StrCnv.hpp"
#include "../misc/MathUtil.hpp"
#include "../plugin/PluginScanner.hpp"
#include "./Controls.hpp"
#include "./PluginEditor.hpp"
#include "./Keyboard.hpp"
#include "./UnitData.hpp"
#include "./GraphEditor.hpp"
#include "../resource/ResourceHelper.hpp"
#include "./PianoRoll.hpp"
#include "./PCKeyboardInput.hpp"
#if !defined(_MSC_VER)
#include "./OSXMenuBar.h"
#endif
NS_HWM_BEGIN
enum
{
ID_Play = 1,
ID_RescanPlugin,
ID_ForceRescanPlugin,
ID_Setting,
ID_File_New,
ID_File_Open,
ID_File_Save,
ID_File_SaveAs,
ID_View_ShowPianoRoll,
};
class TransportPanel
: public wxPanel
, MyApp::ChangeProjectListener
, Transporter::ITransportStateListener
{
static
String GetImagePath(String filename)
{
return GetResourcePath({L"transport", filename});
}
public:
TransportPanel(wxWindow *parent)
: wxPanel(parent, wxID_ANY, wxDefaultPosition, wxDefaultSize)
{
asset_ = ImageAsset(GetImagePath(L"transport_buttons.png"), 6, 4);
btn_rewind_ = new ImageButton(this, false, asset_.GetImage(0, 0), asset_.GetImage(0, 1), asset_.GetImage(0, 0), asset_.GetImage(0, 1));
btn_stop_ = new ImageButton(this, false, asset_.GetImage(1, 0), asset_.GetImage(1, 1), asset_.GetImage(1, 0), asset_.GetImage(1, 1));
btn_play_ = new ImageButton(this, true, asset_.GetImage(2, 0), asset_.GetImage(2, 1), asset_.GetImage(2, 2), asset_.GetImage(2, 3));
btn_forward_ = new ImageButton(this, false, asset_.GetImage(3, 0), asset_.GetImage(3, 1), asset_.GetImage(3, 0), asset_.GetImage(3, 1));
btn_loop_ = new ImageButton(this, true, asset_.GetImage(4, 0), asset_.GetImage(4, 1), asset_.GetImage(4, 2), asset_.GetImage(4, 3));
//btn_metronome_ = new ImageButton(this, true, asset_.GetImage(5, 0), asset_.GetImage(5, 1), asset_.GetImage(5, 2), asset_.GetImage(5, 3));
auto hbox = new wxBoxSizer(wxHORIZONTAL);
hbox->Add(btn_rewind_, wxSizerFlags(0).Border(wxTOP|wxBOTTOM|wxRIGHT, 1));
hbox->Add(btn_stop_, wxSizerFlags(0).Border(wxTOP|wxBOTTOM|wxRIGHT, 1));
hbox->Add(btn_play_, wxSizerFlags(0).Border(wxTOP|wxBOTTOM|wxRIGHT, 1));
hbox->Add(btn_forward_, wxSizerFlags(0).Border(wxTOP|wxBOTTOM|wxRIGHT, 1));
hbox->Add(btn_loop_, wxSizerFlags(0).Border(wxTOP|wxBOTTOM|wxRIGHT, 1));
//hbox->Add(btn_metronome_, wxSizerFlags(0));
hbox->AddStretchSpacer(1);
SetSizer(hbox);
SetBackgroundColour(wxColour(0x1B, 0x1B, 0x1B));
btn_rewind_->Bind(wxEVT_BUTTON, [this](auto &ev) { OnRewind(); });
btn_stop_->Bind(wxEVT_BUTTON, [this](auto &ev) { OnStop(); });
btn_play_->Bind(wxEVT_TOGGLEBUTTON, [this](auto &ev) { OnPlay(); });
btn_forward_->Bind(wxEVT_BUTTON, [this](auto &ev) { OnForward(); });
btn_loop_->Bind(wxEVT_TOGGLEBUTTON, [this](auto &ev) { OnLoop(); });
//btn_metronome_->Bind(wxEVT_TOGGLEBUTTON, [this](auto &ev) { OnMetronome(); });
slr_change_project_.reset(MyApp::GetInstance()->GetChangeProjectListeners(), this);
auto pj = Project::GetCurrentProject();
assert(pj);
auto &tp = pj->GetTransporter();
slr_transporter_.reset(tp.GetListeners(), this);
btn_play_->SetPushed(tp.IsPlaying());
btn_loop_->SetPushed(tp.IsLoopEnabled());
}
~TransportPanel()
{}
private:
ImageAsset asset_;
ImageButton *btn_rewind_;
ImageButton *btn_stop_;
ImageButton *btn_play_;
ImageButton *btn_forward_;
ImageButton *btn_loop_;
ImageButton *btn_metronome_;
ScopedListenerRegister<MyApp::ChangeProjectListener> slr_change_project_;
ScopedListenerRegister<Transporter::ITransportStateListener> slr_transporter_;
void OnRewind()
{
auto pj = Project::GetCurrentProject();
if(!pj) { return; }
auto &tp = pj->GetTransporter();
tp.Rewind();
}
void OnStop()
{
auto pj = Project::GetCurrentProject();
if(!pj) { return; }
auto &tp = pj->GetTransporter();
tp.SetStop();
}
void OnPlay()
{
auto pj = Project::GetCurrentProject();
if(!pj) { return; }
auto &tp = pj->GetTransporter();
tp.SetPlaying(tp.IsPlaying() == false);
}
void OnForward()
{
auto pj = Project::GetCurrentProject();
if(!pj) { return; }
auto &tp = pj->GetTransporter();
tp.FastForward();
}
void OnLoop()
{
auto pj = Project::GetCurrentProject();
if(!pj) { return; }
auto &tp = pj->GetTransporter();
tp.SetLoopEnabled(btn_loop_->IsPushed());
}
void OnMetronome()
{
// auto pj = Project::GetActiveProject();
// pj->SetMetronome(btn_metronome_->GetValue());;
}
void OnChangeCurrentProject(Project *old_pj, Project *new_pj) override
{
if(old_pj) {
slr_transporter_.reset();
}
if(new_pj) {
slr_transporter_.reset(new_pj->GetTransporter().GetListeners(), this);
}
}
void OnChanged(TransportInfo const &old_state,
TransportInfo const &new_state) override
{
auto differ = [&](auto const member) {
return old_state.*member != new_state.*member;
};
if(differ(&TransportInfo::playing_)) {
btn_play_->SetPushed(new_state.playing_);
btn_play_->Refresh();
}
if(differ(&TransportInfo::loop_enabled_)) {
btn_loop_->SetPushed(new_state.loop_enabled_);
btn_loop_->Refresh();
}
}
};
class TimeIndicator
: public wxPanel
, public MyApp::ChangeProjectListener
, public Transporter::ITransportStateListener
{
public:
TimeIndicator(wxWindow *parent, wxPoint pos, wxSize size)
: wxPanel(parent, wxID_ANY, pos, size)
{
SetDoubleBuffered(true);
timer_.Bind(wxEVT_TIMER, [this](auto &ev) { OnTimer(); });
timer_.Start(kIntervalSlow);
text_ = new wxStaticText(this, wxID_ANY, "", wxDefaultPosition, wxDefaultSize, wxALIGN_CENTRE_HORIZONTAL|wxST_NO_AUTORESIZE);
#if defined(_MSC_VER)
auto font = wxFont(wxFontInfo(22).Family(wxFONTFAMILY_MODERN).FaceName("Tahoma"));
#else
auto font = wxFont(wxFontInfo(26).Family(wxFONTFAMILY_MODERN).FaceName("Geneva"));
#endif
text_->SetFont(font);
text_->SetForegroundColour(wxColour(0xCB, 0xCB, 0xCB));
UpdateTime(MBT(0, 0, 0));
auto vbox = new wxBoxSizer(wxVERTICAL);
vbox->AddStretchSpacer(1);
vbox->Add(text_, wxSizerFlags(0).Expand());
vbox->AddStretchSpacer(1);
SetSizer(vbox);
Layout();
slr_change_project_.reset(MyApp::GetInstance()->GetChangeProjectListeners(), this);
auto pj = Project::GetCurrentProject();
assert(pj);
auto &tp = pj->GetTransporter();
slr_transporter_.reset(tp.GetListeners(), this);
SetBackgroundColour(wxColour(0x3B, 0x3B, 0x3B));
}
~TimeIndicator()
{}
private:
UInt32 kIntervalSlow = 200;
UInt32 kIntervalFast = 16;
wxTimer timer_;
TransportInfo last_info_;
wxStaticText *text_;
ScopedListenerRegister<MyApp::ChangeProjectListener> slr_change_project_;
ScopedListenerRegister<Transporter::ITransportStateListener> slr_transporter_;
void OnChangeCurrentProject(Project *old_pj, Project *new_pj) override
{
if(old_pj) {
slr_transporter_.reset();
}
if(new_pj) {
slr_transporter_.reset(new_pj->GetTransporter().GetListeners(), this);
}
}
void UpdateTime(MBT mbt)
{
text_->SetLabel("{:03d}:{:02d}:{:03d}"_format(mbt.measure_ + 1,
mbt.beat_ + 1,
mbt.tick_));
Layout();
}
void OnChanged(TransportInfo const &old_state,
TransportInfo const &new_state) override
{
auto to_tuple = [](TransportInfo const &info) {
return std::tie(info.play_.begin_, info.meter_);
};
if(to_tuple(old_state) == to_tuple(new_state)) {
return;
}
auto pj = Project::GetCurrentProject();
if(!pj) { return; }
auto mbt = pj->TickToMBT(new_state.play_.begin_.tick_);
UpdateTime(mbt);
}
void OnTimer()
{
auto pj = Project::GetCurrentProject();
if(!pj) { return; }
auto &tp = pj->GetTransporter();
if(tp.IsPlaying() && timer_.GetInterval() == kIntervalSlow) {
timer_.Start(kIntervalFast);
} else if(tp.IsPlaying() == false && timer_.GetInterval() == kIntervalFast) {
timer_.Start(kIntervalSlow);
}
auto new_info = tp.GetCurrentState();
OnChanged(last_info_, new_info);
last_info_ = new_info;
}
};
class HeaderPanel
: public wxPanel
{
public:
HeaderPanel(wxWindow *parent)
: wxPanel(parent, wxID_ANY, wxDefaultPosition, wxDefaultSize)
, col_bg_(10, 10, 10)
{
transport_buttons_ = new TransportPanel(this);
time_indicator_ = new TimeIndicator(this, wxDefaultPosition, wxSize(220, 38));
auto hbox = new wxBoxSizer(wxHORIZONTAL);
hbox->Add(transport_buttons_, wxSizerFlags(0).Expand());
hbox->Add(time_indicator_, wxSizerFlags(0).Expand());
hbox->AddStretchSpacer();
SetSizer(hbox);
SetBackgroundColour(col_bg_);
}
private:
wxPanel *transport_buttons_;
wxPanel *time_indicator_;
wxColor col_bg_;
};
class MyPanel
: public wxPanel
, public SingleInstance<MyPanel>
, public PluginScanner::Listener
{
public:
MyPanel(wxWindow *parent, wxSize size)
: wxPanel(parent)
{
this->SetBackgroundColour(wxColour(0x09, 0x21, 0x33));
header_panel_ = new HeaderPanel(this);
auto pj = Project::GetCurrentProject();
graph_panel_ = CreateGraphEditorComponent(this, pj->GetGraph()).release();
graph_panel_->Show();
pianoroll_ = CreatePianoRollWindow(this);
pianoroll_->SetSize(size);
pianoroll_->Hide();
keyboard_ = CreateVirtualKeyboard(this);
auto vbox = new wxBoxSizer(wxVERTICAL);
vbox->Add(header_panel_, wxSizerFlags(0).Expand());
vbox->Add(graph_panel_, wxSizerFlags(1).Expand());
vbox->Add(pianoroll_, wxSizerFlags(1).Expand());
auto hbox = new wxBoxSizer(wxHORIZONTAL);
hbox->AddStretchSpacer(1);
hbox->Add(keyboard_, wxSizerFlags(100000).Expand());
hbox->AddStretchSpacer(1);
vbox->Add(hbox, wxSizerFlags(0).Expand());
SetSizer(vbox);
SetClientSize(size);
graph_panel_->RearrangeNodes();
IMainFrame::GetInstance()->Bind(wxEVT_COMMAND_MENU_SELECTED, [this](auto &ev) { SwitchPianoRoll(ev); }, ID_View_ShowPianoRoll);
Bind(wxEVT_PAINT, [this](auto &ev) { OnPaint(ev); });
}
~MyPanel()
{
}
private:
void OnPaint(wxPaintEvent &)
{
wxPaintDC pdc(this);
wxGCDC dc(pdc);
Draw(dc);
}
void Draw(wxDC &dc)
{
//dc.SetBrush(wxBrush(wxColour(0x09, 0x21, 0x33)));
//dc.DrawRectangle(GetClientRect());
}
class ComponentData : public wxClientData
{
public:
ComponentData(schema::PluginDescription const &desc)
: desc_(desc)
{}
schema::PluginDescription desc_;
};
void SwitchPianoRoll(wxCommandEvent &ev)
{
if(ev.IsChecked()) {
graph_panel_->Hide();
pianoroll_->Show();
keyboard_->Disable();
keyboard_->Hide();
} else {
graph_panel_->Show();
pianoroll_->Hide();
keyboard_->Enable();
keyboard_->Show();
}
Layout();
}
class MainPanelPianoRollViewStatus
: public IPianoRollViewStatus
{
Int32 GetScrollPosition(wxOrientation ort) const override
{
return 0;
}
void SetScrollPosition(wxOrientation ort, Int32 pos) override
{}
//! Get zoom factor.
/*! @return the zoom factor for the orientation.
* a value greater then 1.0 means zoom-in, less then 1.0 means zoom-out.
* the value always greater than 0.0.
*/
float GetZoomFactor(wxOrientation ort) const override
{
return 1.0;
}
void SetZoomFactor(wxOrientation ort, float factor, int zooming_pos) override
{}
};
wxWindow *keyboard_ = nullptr;
wxPanel *header_panel_ = nullptr;
GraphEditor *graph_panel_ = nullptr;
wxWindow *pianoroll_ = nullptr;
MainPanelPianoRollViewStatus pianoroll_view_status_;
};
class MyPanel;
IMainFrame::IMainFrame()
: wxFrame(nullptr, wxID_ANY, "", wxDefaultPosition, wxDefaultSize)
{}
class MainFrame
: public IMainFrame
, MyApp::ChangeProjectListener
{
public:
MainFrame(wxSize initial_size);
~MainFrame();
private:
bool Destroy() override;
void OnExit();
void OnAbout(wxCommandEvent& event);
void OnPlay(wxCommandEvent& event);
void OnTimer();
void OnBeforeSaveProject(Project *pj, schema::Project &schema) override;
void OnAfterLoadProject(Project *pj, schema::Project const &schema) override;
private:
std::string msg_;
wxTimer timer_;
MyPanel *my_panel_;
ScopedListenerRegister<MyApp::ChangeProjectListener> slr_change_project_;
};
MainFrame::MainFrame(wxSize initial_size)
: IMainFrame()
{
SetClientSize(initial_size);
SetTitle("Untitled");
wxMenu *menuFile = new wxMenu;
menuFile->Append(ID_File_New, "&New File\tCTRL-N", "New File");
menuFile->Append(ID_File_Open, "&Open...\tCTRL-O", "Open File");
menuFile->Append(ID_File_Save, "&Save\tCTRL-S", "Save File");
menuFile->Append(ID_File_SaveAs, "&Save As\tCTRL-SHIFT-S", "Save File As");
menuFile->AppendSeparator();
menuFile->Append(ID_RescanPlugin, "&Rescan Plugins", "Rescan Plugins");
menuFile->Append(ID_ForceRescanPlugin, "&Clear and Rescan Plugins", "Clear and Rescan Plugins");
menuFile->AppendSeparator();
menuFile->Append(wxID_EXIT);
wxMenu *menuEdit = new wxMenu;
menuEdit->Append(ID_Setting, "&Setting\tCTRL-,", "Open Setting Dialog");
wxMenu *menuView = new wxMenu;
menuView->AppendCheckItem(ID_View_ShowPianoRoll, "Show &Piano Roll\tCTRL-P", "Show Piano Roll");
wxMenu *menuPlay = new wxMenu;
menuPlay->Append(ID_Play, "&Play\tSPACE", "Start playback", wxITEM_CHECK);
wxMenu *menuHelp = new wxMenu;
menuHelp->Append(wxID_ABOUT);
wxMenuBar *menuBar = new wxMenuBar;
menuBar->Append( menuFile, "&File" );
menuBar->Append( menuEdit, "&Edit" );
menuBar->Append( menuView, "&View" );
menuBar->Append( menuPlay, "&Play" );
menuBar->Append( menuHelp, "&Help" );
SetMenuBar( menuBar );
Bind(wxEVT_MENU, [this](auto &ev) { OnExit(); }, wxID_EXIT);
//Bind(wxEVT_CLOSE_WINDOW, [this](auto &ev) { OnExit(); });
Bind(wxEVT_COMMAND_MENU_SELECTED, [](auto &ev) { MyApp::GetInstance()->OnFileNew(); }, ID_File_New);
Bind(wxEVT_COMMAND_MENU_SELECTED, [](auto &ev) { MyApp::GetInstance()->OnFileOpen(); }, ID_File_Open);
Bind(wxEVT_COMMAND_MENU_SELECTED, [](auto &ev) { MyApp::GetInstance()->OnFileSave(false, false); }, ID_File_Save);
Bind(wxEVT_COMMAND_MENU_SELECTED, [](auto &ev) { MyApp::GetInstance()->OnFileSave(true, false); }, ID_File_SaveAs);
Bind(wxEVT_COMMAND_MENU_SELECTED, [](auto &ev) { MyApp::GetInstance()->RescanPlugins(); }, ID_RescanPlugin);
Bind(wxEVT_COMMAND_MENU_SELECTED, [](auto &ev) { MyApp::GetInstance()->ForceRescanPlugins(); }, ID_ForceRescanPlugin);
Bind(wxEVT_COMMAND_MENU_SELECTED, [](auto &ev) { MyApp::GetInstance()->ShowSettingDialog(); }, ID_Setting);
Bind(wxEVT_COMMAND_MENU_SELECTED, [this](auto &ev) { OnPlay(ev); }, ID_Play);
Bind(wxEVT_MENU, [this](auto &ev) { OnAbout(ev); }, wxID_ABOUT);
timer_.SetOwner(this);
Bind(wxEVT_TIMER, [this](auto &ev) { OnTimer(); });
timer_.Start(1000);
my_panel_ = new MyPanel(this, GetClientSize());
slr_change_project_.reset(MyApp::GetInstance()->GetChangeProjectListeners(), this);
PCKeyboardInput::GetInstance()->ApplyTo(this);
}
MainFrame::~MainFrame()
{
}
bool MainFrame::Destroy()
{
MyApp::GetInstance()->BeforeExit();
RemoveChild(my_panel_);
my_panel_->Destroy();
return wxFrame::Destroy();
}
void MainFrame::OnExit()
{
auto app = MyApp::GetInstance();
auto saved = app->OnFileSave(false, true);
if(!saved) { return; }
Close(false);
}
void MainFrame::OnAbout(wxCommandEvent& event)
{
wxMessageBox(kAppName,
"created by hotwatermorning@gmail.com", wxOK | wxICON_INFORMATION );
}
void MainFrame::OnPlay(wxCommandEvent &ev)
{
auto &tp = Project::GetCurrentProject()->GetTransporter();
tp.SetPlaying(ev.IsChecked());
}
void MainFrame::OnTimer()
{
}
void MainFrame::OnBeforeSaveProject(Project *pj, schema::Project &schema)
{
auto schema_rect = schema.mutable_frame_rect();
auto rect = GetScreenRect();
auto schema_pos = schema_rect->mutable_pos();
schema_pos->set_x(rect.GetX());
schema_pos->set_y(rect.GetY());
auto schema_size = schema_rect->mutable_size();
schema_size->set_width(rect.GetWidth());
schema_size->set_height(rect.GetHeight());
}
void MainFrame::OnAfterLoadProject(Project *pj, schema::Project const &schema)
{
wxRect rc;
if(schema.has_frame_rect()) {
auto const &rect = schema.frame_rect();
if(rect.has_pos()) {
auto const &pos = rect.pos();
rc.SetPosition(wxPoint{pos.x(), pos.y()});
}
if(rect.has_size()) {
auto const &size = rect.size();
rc.SetSize(wxSize{size.width(), size.height()});
}
}
wxDisplay disp{};
auto client = disp.GetClientArea();
int menu_height = 0;
#if defined(_MSC_VER)
// do nothing
#else
menu_height = GetMenuBarHeight();
#endif
// constrain
rc.SetWidth(Clamp<int>(rc.GetWidth(), GetMinWidth(), GetMaxWidth()));
rc.SetHeight(Clamp<int>(rc.GetHeight(), GetMinHeight(), GetMaxHeight()));
rc.SetX(Clamp<int>(rc.GetX(), 0, client.GetWidth()-100));
rc.SetY(Clamp<int>(rc.GetY(), menu_height, client.GetHeight()-100));
auto origin = GetClientAreaOrigin();
rc.Offset(origin);
SetSize(rc);
}
IMainFrame * CreateMainFrame(wxSize initial_size)
{
return new MainFrame(initial_size);
}
NS_HWM_END
| 30.50546 | 149 | 0.614401 | [
"vector"
] |
f6beea4a6fb7ab1b0ef6f6d82ae75342456b35cf | 6,859 | cpp | C++ | srcs/common/trackrunbox.cpp | Reflectioner/heif | bdac2fc9c66d8c1e8994eaf81f1a5db116b863ab | [
"BSD-3-Clause"
] | null | null | null | srcs/common/trackrunbox.cpp | Reflectioner/heif | bdac2fc9c66d8c1e8994eaf81f1a5db116b863ab | [
"BSD-3-Clause"
] | null | null | null | srcs/common/trackrunbox.cpp | Reflectioner/heif | bdac2fc9c66d8c1e8994eaf81f1a5db116b863ab | [
"BSD-3-Clause"
] | null | null | null | /* This file is part of Nokia HEIF library
*
* Copyright (c) 2015-2020 Nokia Corporation and/or its subsidiary(-ies). All rights reserved.
*
* Contact: heif@nokia.com
*
* This software, including documentation, is protected by copyright controlled by Nokia Corporation and/ or its
* subsidiaries. All rights are reserved.
*
* Copying, including reproducing, storing, adapting or translating, any or all of this material requires the prior
* written consent of Nokia.
*/
#include "trackrunbox.hpp"
#include <stdexcept>
TrackRunBox::TrackRunBox(uint8_t version, std::uint32_t tr_flags)
: FullBox("trun", version, tr_flags)
, mSampleDefaultsSet(false)
, mSampleDefaults()
, mSampleCount(0)
, mDataOffset(0)
, mFirstSampleFlags()
, mSampleDetails()
{
}
void TrackRunBox::setSampleCount(const uint32_t sampleCount)
{
mSampleCount = sampleCount;
}
uint32_t TrackRunBox::getSampleCount() const
{
return mSampleCount;
}
void TrackRunBox::setDataOffset(const int32_t dataOffset)
{
mDataOffset = dataOffset;
setFlags(getFlags() | TrackRunFlags::DataOffsetPresent);
}
int32_t TrackRunBox::getDataOffset() const
{
if ((getFlags() & TrackRunFlags::DataOffsetPresent) != 0)
{
return mDataOffset;
}
else
{
throw RuntimeError("TrackRunBox::getDataOffset() according to flags DataOffsetPresent not present.");
}
}
void TrackRunBox::setFirstSampleFlags(const MOVIEFRAGMENTS::SampleFlags firstSampleFlags)
{
mFirstSampleFlags = firstSampleFlags;
setFlags(getFlags() | TrackRunFlags::FirstSampleFlagsPresent);
}
MOVIEFRAGMENTS::SampleFlags TrackRunBox::getFirstSampleFlags() const
{
if ((getFlags() & TrackRunFlags::FirstSampleFlagsPresent) != 0)
{
return mFirstSampleFlags;
}
else
{
throw RuntimeError(
"TrackRunBox::getFirstSampleFlags() according to flags FirstSampleFlagsPresent not present.");
}
}
void TrackRunBox::addSampleDetails(SampleDetails sampleDetails)
{
mSampleDetails.push_back(sampleDetails);
}
const Vector<TrackRunBox::SampleDetails>& TrackRunBox::getSampleDetails() const
{
return mSampleDetails;
}
void TrackRunBox::setSampleDefaults(MOVIEFRAGMENTS::SampleDefaults& sampleDefaults)
{
mSampleDefaultsSet = true;
mSampleDefaults = sampleDefaults;
}
void TrackRunBox::writeBox(ISOBMFF::BitStream& bitstr) const
{
writeFullBoxHeader(bitstr);
bitstr.write32Bits(mSampleCount);
if ((getFlags() & TrackRunFlags::DataOffsetPresent) != 0)
{
bitstr.write32Bits(static_cast<uint32_t>(mDataOffset));
}
if ((getFlags() & TrackRunFlags::FirstSampleFlagsPresent) != 0)
{
MOVIEFRAGMENTS::SampleFlags::write(bitstr, mFirstSampleFlags);
}
for (uint32_t i = 0; i < mSampleCount; i++)
{
if ((getFlags() & TrackRunFlags::SampleDurationPresent) != 0)
{
bitstr.write32Bits(mSampleDetails.at(i).version0.sampleDuration);
}
if ((getFlags() & TrackRunFlags::SampleSizePresent) != 0)
{
bitstr.write32Bits(mSampleDetails.at(i).version0.sampleSize);
}
if ((getFlags() & TrackRunFlags::FirstSampleFlagsPresent) == 0)
{
if ((getFlags() & TrackRunFlags::SampleFlagsPresent) != 0)
{
MOVIEFRAGMENTS::SampleFlags::write(bitstr, mSampleDetails.at(i).version0.sampleFlags);
}
}
if ((getFlags() & TrackRunFlags::SampleCompositionTimeOffsetsPresent) != 0)
{
if (getVersion() == 0)
{
bitstr.write32Bits(mSampleDetails.at(i).version0.sampleCompositionTimeOffset);
}
else
{
bitstr.write32Bits(static_cast<uint32_t>(mSampleDetails.at(i).version1.sampleCompositionTimeOffset));
}
}
}
updateSize(bitstr);
}
void TrackRunBox::parseBox(ISOBMFF::BitStream& bitstr)
{
parseFullBoxHeader(bitstr);
mSampleCount = bitstr.read32Bits();
if (mSampleCount > IMPLEMENTATION_ABSOLUTE_MAX_SAMPLE_COUNT)
{
throw RuntimeError("Over max sample counts from TrackRunBox::parseBox");
}
if ((getFlags() & TrackRunFlags::DataOffsetPresent) != 0)
{
mDataOffset = static_cast<int32_t>(bitstr.read32Bits());
}
if ((getFlags() & TrackRunFlags::FirstSampleFlagsPresent) != 0)
{
mFirstSampleFlags = MOVIEFRAGMENTS::SampleFlags::read(bitstr);
}
SampleDetails sampleDetails;
for (uint32_t i = 0; i < mSampleCount; i++)
{
if (mSampleDefaultsSet)
{
sampleDetails.version0.sampleDuration = mSampleDefaults.defaultSampleDuration;
sampleDetails.version0.sampleSize = mSampleDefaults.defaultSampleSize;
sampleDetails.version0.sampleFlags.flagsAsUInt = mSampleDefaults.defaultSampleFlags.flagsAsUInt;
}
else
{
// these should never be used if right boxes are present.
sampleDetails.version0.sampleDuration = 0;
sampleDetails.version0.sampleSize = 0;
sampleDetails.version0.sampleFlags.flagsAsUInt = 0;
}
if ((getFlags() & TrackRunFlags::SampleDurationPresent) != 0)
{
sampleDetails.version0.sampleDuration = bitstr.read32Bits();
}
if ((getFlags() & TrackRunFlags::SampleSizePresent) != 0)
{
sampleDetails.version0.sampleSize = bitstr.read32Bits();
}
if ((getFlags() & TrackRunFlags::FirstSampleFlagsPresent) != 0)
{
sampleDetails.version0.sampleFlags.flagsAsUInt = mFirstSampleFlags.flagsAsUInt;
// Treat the remaining samples as non-sync samples
if (i > 0)
{
sampleDetails.version0.sampleFlags.flags.sample_is_non_sync_sample = 1;
}
}
else if ((getFlags() & TrackRunFlags::SampleFlagsPresent) != 0)
{
sampleDetails.version0.sampleFlags = MOVIEFRAGMENTS::SampleFlags::read(bitstr);
}
if ((getFlags() & TrackRunFlags::SampleCompositionTimeOffsetsPresent) != 0)
{
if (getVersion() == 0)
{
sampleDetails.version0.sampleCompositionTimeOffset = bitstr.read32Bits();
}
else
{
sampleDetails.version1.sampleCompositionTimeOffset = static_cast<int32_t>(bitstr.read32Bits());
}
}
else
{
if (getVersion() == 0)
{
sampleDetails.version0.sampleCompositionTimeOffset = 0;
}
else
{
sampleDetails.version1.sampleCompositionTimeOffset = 0;
}
}
mSampleDetails.push_back(sampleDetails);
}
}
| 30.896396 | 117 | 0.637702 | [
"vector"
] |
f6bf25ebfec51947e0b56aefff8f884b98e2574c | 25,105 | cpp | C++ | src/chess/game.cpp | utk003/chess-ai | 121ec015f9e2e8c116df320187e040bd7ec9c80c | [
"MIT"
] | 4 | 2020-12-01T18:51:30.000Z | 2021-12-08T19:29:14.000Z | src/chess/game.cpp | utk003/chess-ai | 121ec015f9e2e8c116df320187e040bd7ec9c80c | [
"MIT"
] | 3 | 2021-04-16T16:43:04.000Z | 2021-06-10T14:05:11.000Z | src/chess/game.cpp | utk003/chess-ai | 121ec015f9e2e8c116df320187e040bd7ec9c80c | [
"MIT"
] | 1 | 2021-12-08T19:29:17.000Z | 2021-12-08T19:29:17.000Z | // ------------------------------------------------------------------------------ //
// MIT License //
// //
// Copyright (c) 2020 Utkarsh Priyam //
// //
// Permission is hereby granted, free of charge, to any person obtaining a copy //
// of this software and associated documentation files (the "Software"), to deal //
// in the Software without restriction, including without limitation the rights //
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell //
// copies of the Software, and to permit persons to whom the Software is //
// furnished to do so, subject to the following conditions: //
// //
// The above copyright notice and this permission notice shall be included in all //
// copies or substantial portions of the Software. //
// //
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR //
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, //
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE //
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER //
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, //
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE //
// SOFTWARE. //
// ------------------------------------------------------------------------------ //
#include "game.h"
#include <string>
#include <iostream>
#include <sstream>
#include <vector>
#include <utility>
#include "piece.h"
#include "../util/thread_util.h"
#include "../graphics/opengl.h"
// Board Class
game::Board::Board(int l, int w) {
_length = l;
_width = w;
_pawn_upgrade_type = piece::PieceType::NONE;
int i, total = _length * _width;
for (i = 0; i < total; ++i)
_pieces.push_back(nullptr);
}
game::Board::~Board() {
piece::Piece *piece;
for (int i = 0; i < _length * _width; ++i) {
piece = _pieces[i];
_pieces[i] = nullptr;
delete piece;
}
_pieces.clear();
game::Move *move;
while (!_move_stack.empty()) {
move = _move_stack.top();
_move_stack.pop();
delete move;
}
}
int game::Board::getPositionThreats(int r, int c, piece::PieceColor kingColor) const {
if (!isValidPosition(r, c)) {
DEBUG_ASSERT
return 0;
}
piece::PieceColor enemyColor = !kingColor;
int x, y, dangerCounter = 0;
piece::Piece *checkPiece;
// Check axis attacks (queen/rook)
std::vector<std::vector<int>> axisCheck = {{1, 0},
{-1, 0},
{0, 1},
{0, -1}};
for (std::vector<int> axis: axisCheck) {
x = r + axis[0];
y = c + axis[1];
while (isValidPosition(x, y)) {
checkPiece = getPiece(x, y);
if (checkPiece->color() == enemyColor) {
if (checkPiece->type().isQueen() || checkPiece->type().isRook())
++dangerCounter;
break;
}
if (checkPiece->color() == kingColor) // Long range attacks can be blocked
break;
x += axis[0];
y += axis[1];
}
}
// Check diagonal attacks (queen/bishop)
std::vector<std::vector<int>> diagCheck = {{1, 1},
{1, -1},
{-1, 1},
{-1, -1}};
for (std::vector<int> diag: diagCheck) {
x = r + diag[0];
y = c + diag[1];
while (isValidPosition(x, y)) {
checkPiece = getPiece(x, y);
if (checkPiece->color() == enemyColor) {
if (checkPiece->type().isQueen() || checkPiece->type().isBishop())
++dangerCounter;
break;
}
if (checkPiece->color() == kingColor) // Long range attacks can be blocked
break;
x += diag[0];
y += diag[1];
}
}
// Check king attacks
for (x = r - 1; x <= r + 1; ++x)
for (y = c - 1; y <= c + 1; ++y) {
if (!isValidPosition(x, y))
continue;
checkPiece = getPiece(x, y);
if (checkPiece->color() == enemyColor && checkPiece->type().isKing())
dangerCounter++;
}
std::vector<int> pm1 = {1, -1};
// Check pawn attacks
x = r + (enemyColor.isWhite() ? -1: 1); // if enemy is white, pawn attacks from below; otherwise from above
for (int dc: pm1) {
y = c + dc;
if (!isValidPosition(x, y))
continue;
checkPiece = getPiece(x, y);
if (checkPiece->color() == enemyColor && checkPiece->type().isPawn())
dangerCounter++;
}
// Check knight attacks
std::vector<std::vector<int>> knightMoves = {{1, 2},
{2, 1}};
for (std::vector<int> move: knightMoves)
for (int mr: pm1)
for (int mc: pm1) {
x = r + mr * move[0];
y = c + mc * move[1];
if (!isValidPosition(x, y))
continue;
checkPiece = getPiece(x, y);
if (checkPiece->color() == enemyColor && checkPiece->type().isKnight())
dangerCounter++;
}
return dangerCounter; // Return total danger count
}
std::pair<int, int> game::Board::getKingPosition(piece::PieceColor color) const {
piece::Piece *checkPiece;
int xKing = -1, yKing = -1, r, c;
for (r = 0; r < _length; ++r) {
for (c = 0; c < _width; ++c) {
checkPiece = getPiece(r, c);
if (checkPiece->type().isKing() && checkPiece->color() == color) {
xKing = r;
yKing = c;
break;
}
}
if (xKing != -1)
break;
}
return {xKing, yKing};
}
bool game::Board::canPieceMove(int r, int c, int toR, int toC) {
// get piece indices
int from = locMap(r, c), to = locMap(toR, toC);
if (from < 0 || to < 0) {
DEBUG_ASSERT
return false;
}
// get pieces
piece::PieceColor pieceColor = _pieces[from]->color();
if (!pieceColor.isColored()) {
DEBUG_ASSERT
return false;
}
// save replaced piece (b/c to is overwritten by from)
piece::Piece *copy = _pieces[to];
auto *newPiece = new piece::Piece();
// simulate move
_pieces[to] = _pieces[from];
_pieces[from] = newPiece;
// score threats
bool isSafe = isKingSafe(pieceColor);
// undo move
_pieces[from] = _pieces[to];
_pieces[to] = copy;
// free memory
delete newPiece;
// return result
return isSafe; // move allowed iff king is safe post-move
}
void game::Board::getMovesFromSquare(int r, int c, std::vector<game::Move> *moves) {
if (moves == nullptr)
return;
if (isValidPosition(r, c)) {
int r2, c2;
for (r2 = 0; r2 < 8; ++r2)
for (c2 = 0; c2 < 8; ++c2) {
if (r == r2 && c == c2)
continue;
std::vector<Move> poss_moves = Move::getMoves(r, c, r2, c2, this);
for (const Move &move: poss_moves)
if (move.verify(this))
moves->push_back(move);
}
} else DEBUG_ASSERT
}
void game::Board::getPossibleMoves(std::vector<game::Move> *white, std::vector<game::Move> *black) {
int r1, c1;
for (r1 = 0; r1 < 8; ++r1)
for (c1 = 0; c1 < 8; ++c1)
switch (getPiece(r1, c1)->color()) {
case piece::PieceColor::WHITE:
getMovesFromSquare(r1, c1, white);
break;
case piece::PieceColor::BLACK:
getMovesFromSquare(r1, c1, black);
break;
default:
continue;
}
}
bool game::Board::doMove(Move *move, Game *game) {
if (move == nullptr) {
DEBUG_ASSERT
return false;
}
_move_stack.push(move);
_move_count++;
bool isCaptureMove = move->doMove(this);
if (game != nullptr) {
game->_current_player_color = _move_count % 2 == 0 ? piece::PieceColor::WHITE: piece::PieceColor::BLACK;
game->updateGraphicsBoard(this);
}
return isCaptureMove;
}
void game::Board::undoMove(Game *game, const int depth) {
if (depth <= 0 || _move_stack.size() < depth) {
DEBUG_ASSERT
return;
}
_move_count++;
Move *move = _move_stack.top();
move->undoMove(this);
_move_stack.pop();
delete move;
if (depth == 1) {
if (game != nullptr) {
game->_current_player_color = _move_count % 2 == 0 ? piece::PieceColor::WHITE: piece::PieceColor::BLACK;
game->updateGraphicsBoard(this);
game->resetSelection();
game->updateGameState();
}
} else
undoMove(game, depth - 1);
}
game::Move *game::Board::getLastMove() const {
return _move_stack.empty() ? nullptr: new Move(*_move_stack.top());
}
game::Board *game::Board::clone() const {
auto *newBoard = new Board(_length, _width);
int i, total_count = _length * _width;
for (i = 0; i < total_count; ++i)
newBoard->_pieces[i] = _pieces[i]->clone();
newBoard->_pawn_upgrade_type = piece::PieceType::NONE;
newBoard->_move_count.store(_move_count.operator int());
if (!_move_stack.empty())
newBoard->_move_stack.push(new Move(*_move_stack.top()));
return newBoard;
}
double game::Board::score(const std::function<double(piece::Piece *)> &piece_scorer) const {
double score = 0;
for (const auto &piece: _pieces)
score += piece_scorer(piece);
return score;
}
double game::Board::score(const std::function<double(piece::Piece *, int, int)> &piece_scorer) const {
double score = 0;
for (int r = 0; r < _length; ++r)
for (int c = 0; c < _width; ++c)
score += piece_scorer(getPiece(r, c), r, c);
return score;
}
namespace game {
std::istream &operator>>(std::istream &input, Board *&b) {
// memory management -> clear old board completely
while (!b->_move_stack.empty()) {
delete b->_move_stack.top();
b->_move_stack.pop();
}
b->_pawn_upgrade_type = piece::PieceType::NONE;
for (auto &p: b->_pieces)
delete p;
b->_pieces.clear();
// Load new board in
input >> b->_length >> b->_width;
int max_index = b->_length * b->_width;
b->_pieces.resize(max_index);
int ind;
std::string spacer;
for (int i = 0; i < max_index; ++i) {
input >> ind >> spacer;
if (ind == i)
input >> b->_pieces[i];
else DEBUG_ASSERT // -> Malformed input file!!
getline(input, spacer); // skip to end of line
}
return input;
}
std::ostream &operator<<(std::ostream &output, Board *&b) {
output << b->_length << " " << b->_width << std::endl;
for (int i = 0; i < b->_length * b->_width; ++i)
output << i << " - " << b->_pieces[i] << std::endl;
return output;
}
}
void game::Board::saveToFile(const std::string &file_path,
const std::function<void(std::ofstream &)> &do_later,
bool pad_file_path) {
std::ofstream out_stream(pad_file_path ? "game_state/" + file_path + ".txt": file_path);
if (out_stream.is_open()) {
Board *b = this;
out_stream << b;
do_later(out_stream);
} else DEBUG_ASSERT
}
void game::Board::loadFromFile(const std::string &file_path, const std::function<void(std::ifstream &)> &do_later) {
std::ifstream in_stream(file_path);
if (in_stream.is_open()) {
Board *b = this;
in_stream >> b;
do_later(in_stream);
} else DEBUG_ASSERT
}
// Move Class
std::vector<game::Move> game::Move::getMoves(int r1, int c1, int r2, int c2, game::Board *b) {
std::vector<game::Move> moves;
piece::Piece *piece = b->getPiece(r1, c1);
if (piece != nullptr) {
if (piece->type().isPawn() && (r2 == 0 || r2 == 7)) {
moves.emplace_back(r1, c1, r2, c2, piece::PieceType::QUEEN);
moves.emplace_back(r1, c1, r2, c2, piece::PieceType::ROOK);
moves.emplace_back(r1, c1, r2, c2, piece::PieceType::KNIGHT);
moves.emplace_back(r1, c1, r2, c2, piece::PieceType::BISHOP);
} else
moves.emplace_back(r1, c1, r2, c2, piece::PieceType::NONE);
} else DEBUG_ASSERT
return moves;
}
game::Move::Move(int r1, int c1, int r2, int c2, piece::PieceType promotionType) {
_start_row = r1;
_start_col = c1;
_end_row = r2;
_end_col = c2;
_pawn_promotion_type = promotionType;
}
game::Move::Move(const Move &m) {
_start_row = m._start_row;
_start_col = m._start_col;
_end_row = m._end_row;
_end_col = m._end_col;
_pawn_promotion_type = m._pawn_promotion_type;
}
game::Move::~Move() {
piece::Piece *piece;
for (auto &_other_replaced_piece : _other_replaced_pieces) {
piece = _other_replaced_pieces[_other_replaced_piece.first];
_other_replaced_pieces[_other_replaced_piece.first] = nullptr;
delete piece;
}
_other_replaced_pieces.clear();
_piece_setting_changes.clear();
}
game::Move &game::Move::operator=(const Move &m) {
_start_row = m._start_row;
_start_col = m._start_col;
_end_row = m._end_row;
_end_col = m._end_col;
_pawn_promotion_type = m._pawn_promotion_type;
for (auto &it : _other_replaced_pieces)
delete it.second;
_other_replaced_pieces.clear();
_piece_setting_changes.clear();
return *this;
}
bool game::Move::verify(Board *board) const {
// Implicit calls to:
piece::Piece *p1 = board->getPiece(_start_row, _start_col);
if (p1 == nullptr) {
DEBUG_ASSERT
return false;
}
piece::Piece *p2 = board->getPiece(_end_row, _end_col);
if (p2 == nullptr) {
DEBUG_ASSERT
return false;
}
if (p1->type().isEmpty()) // fail if not moving a piece
return false;
if (p1->color() == p2->color()) // fail if moving to same color piece
return false;
if (!p1->verifyMove(*this, board)) // fail if invalid move pattern
return false;
// succeed iff moving piece does NOT put king in check/checkmate
return board->canPieceMove(_start_row, _start_col, _end_row, _end_col);
}
bool game::Move::isAttack(Board *board) const {
if (!board->getPiece(_end_row, _end_col)->type().isEmpty())
return true; // target in attacked square
if (!board->getPiece(_start_row, _start_col)->type().isPawn())
return false; // only pawns have "janky" attack (don't move onto captured piece cell)
return _start_col != _end_col; // is pawn attack iff pawn moved sideways
}
void game::Move::addReplacedPiece(int r, int c, piece::Piece *p) {
if (p != nullptr)
_other_replaced_pieces[std::pair<int, int>(r, c)] = p;
}
void game::Move::addSettingChange(int r, int c, bool oldSetting) {
std::pair<int, int> coords = {r, c};
if (!_piece_setting_changes.count(coords))
_piece_setting_changes[coords] = false;
_piece_setting_changes[coords] |= oldSetting;
}
bool game::Move::doMove(Board *board) {
std::vector<piece::Piece *> &pieces = getBoard(board);
piece::Piece *p;
int r, c;
for (r = 0; r < 8; ++r)
for (c = 0; c < 8; ++c) {
p = pieces[locMap(board, r, c)];
if (p->type().isPawn() && ((piece::Pawn *) p)->moved2x()) {
addSettingChange(r, c, true);
update_flag((piece::Pawn *) p, false);
}
}
int r1 = _start_row, c1 = _start_col, r2 = _end_row, c2 = _end_col;
int from = locMap(board, r1, c1), to = locMap(board, r2, c2);
piece::Piece *removedPiece = pieces[to];
pieces[to] = pieces[from];
pieces[from] = new piece::Piece();
addReplacedPiece(r2, c2, removedPiece);
switch (removedPiece->type()) {
case piece::PieceType::ROOK:
addSettingChange(r2, c2, ((piece::Rook *) removedPiece)->moved());
break;
case piece::PieceType::KING:
addSettingChange(r2, c2, ((piece::King *) removedPiece)->moved());
break;
case piece::PieceType::PAWN:
addSettingChange(r2, c2, ((piece::Pawn *) removedPiece)->moved2x());
break;
default:
break;
}
piece::Piece *piece = board->getPiece(r2, c2);
piece::PieceType type = piece->type();
// Piece Settings
switch (type) {
case piece::PieceType::ROOK:
if (!((piece::Rook *) piece)->moved()) {
addSettingChange(r1, c1, false);
update_flag((piece::Rook *) piece, true);
}
break;
case piece::PieceType::KING:
if (!((piece::King *) piece)->moved()) {
addSettingChange(r1, c1, false);
update_flag((piece::King *) piece, true);
}
break;
case piece::PieceType::PAWN:
if (abs(r1 - r2) == 2) {
addSettingChange(r1, c1, false);
update_flag((piece::Pawn *) piece, true);
}
break;
default:
break;
}
// Other changed pieces
switch (type) {
case piece::PieceType::KING:
if (abs(c1 - c2) == 2) {
int rookCol = (c2 > c1) * 7; // if c2 > c1, then king moved right, so rookCol = 7; else, rookCol = 0
int newC = (c1 + c2) / 2;
int oldPos = locMap(board, r1, rookCol);
int newPos = locMap(board, r2, newC);
// clone rook b/c it gets deleted in undoMove() while restoring pieces -> segfault
addReplacedPiece(r1, rookCol, pieces[oldPos]->clone());
addReplacedPiece(r2, newC, pieces[newPos]);
pieces[newPos] = pieces[oldPos];
pieces[oldPos] = new piece::Piece();
}
break;
case piece::PieceType::PAWN:
if (r2 == 7 || r2 == 0) {
piece::Piece *newPiece = nullptr;
int timer = 0;
piece::PieceType upgrade_type = _pawn_promotion_type;
while (newPiece == nullptr && timer++ < 600) { // Time out after 600 seconds = 10 minutes
switch (upgrade_type) {
case piece::PieceType::QUEEN:
case piece::PieceType::ROOK:
case piece::PieceType::KNIGHT:
case piece::PieceType::BISHOP:
newPiece = upgrade_type.getPieceOfType(piece->color());
break;
default:
thread::sleep(1); // Sleep for 1 second
upgrade_type = board->pawn_upgrade_type();
break;
}
}
// timer hit 600 <--> Program time out
if (newPiece == nullptr) {
std::cout << "Program timed out" << std::endl;
FATAL_ASSERT
}
addReplacedPiece(r1, c1, piece);
pieces[locMap(board, r2, c2)] = newPiece;
}
if (abs(c1 - c2) == 1 && removedPiece->type().isEmpty()) {
int captured = locMap(board, r1, c2);
removedPiece = pieces[captured];
pieces[captured] = new piece::Piece();
addReplacedPiece(r1, c2, removedPiece);
}
break;
default:
break;
}
return !removedPiece->type().isEmpty();
}
void game::Move::updateSetting(Board *board, int r, int c, bool setting) {
piece::Piece *piece = getBoard(board)[locMap(board, r, c)];
switch (piece->type()) {
case piece::PieceType::ROOK:
update_flag((piece::Rook *) piece, setting);
break;
case piece::PieceType::KING:
update_flag((piece::King *) piece, setting);
break;
case piece::PieceType::PAWN:
update_flag((piece::Pawn *) piece, setting);
break;
default:
break;
}
}
void game::Move::undoMove(Board *board) const {
std::vector<piece::Piece *> &pieces = getBoard(board);
int r1 = _start_row, c1 = _start_col, r2 = _end_row, c2 = _end_col;
int i1 = locMap(board, r1, c1), i2 = locMap(board, r2, c2);
delete pieces[i1]; // Delete old replacement piece!!!
pieces[i1] = pieces[i2]; // Move back main piece
pieces[i2] = nullptr; // Get rid of copy of moved piece so we don't segfault when putting back pieces (below)
// Put back all other pieces
int index;
for (auto &it : _other_replaced_pieces) {
index = locMap(board, it.first.first, it.first.second);
delete pieces[index]; // free memory to avoid memory leaks
pieces[index] = it.second->clone();
}
// Fix move states
for (auto &it : _piece_setting_changes)
updateSetting(board, it.first.first, it.first.second, it.second);
}
std::string game::Move::toString() const {
piece::PieceType t = _pawn_promotion_type;
std::ostringstream ss;
ss << "(" << _start_row << ", " << _start_col << ") to (" << _end_row << ", " << _end_col << ") -> " << t;
return ss.str();
}
// Game Class
game::Game::Game(int length, int width) : Game(new Board(length, width)) {}
game::Game::Game(Board *b) {
_board = b;
_graphics = nullptr;
_started = false;
_over = false;
_selected_x = -1;
_selected_y = -1;
_white_player = nullptr;
_black_player = nullptr;
_is_move_complete = false;
_is_ready_to_delete = false;
_current_player_color = piece::PieceColor::WHITE;
_result = game::GameResult::NONE;
_moves_since_last_capture = 0;
}
game::Game::~Game() {
delete _board;
delete _white_player;
delete _black_player;
_white_moves.clear();
_black_moves.clear();
}
void game::Game::setPlayer(piece::PieceColor color, player::PlayerType type) {
if (!color.isColored()) {
DEBUG_ASSERT
return;
}
player::Player *player = type.getPlayerOfType(this, color);
if (color.isWhite()) {
delete _white_player;
_white_player = player;
} else /* if (color.isBlack()) */ {
delete _black_player;
_black_player = player;
}
}
void game::Game::setGraphics(graphics::OpenGL *graphics) {
delete _graphics;
_graphics = graphics;
}
void game::Game::updateGraphicsBoard(Board *board) {
if (_graphics != nullptr)
_graphics->updateGraphics(board);
}
void game::Game::startGame() {
if (_white_player == nullptr) {
DEBUG_ASSERT
return;
}
if (_black_player == nullptr) {
DEBUG_ASSERT
return;
}
_is_ready_to_delete = false;
_started = true;
generatePossibleMoveVectors();
thread::create([&] {
while (!_over) {
_is_move_complete = false;
if (_current_player_color.isWhite())
_white_player->playNextMove();
else
_black_player->playNextMove();
}
_is_ready_to_delete = true;
});
}
void game::Game::endGame() {
_over = true;
_is_move_complete = true;
}
void game::Game::waitForDelete() const {
thread::wait_for(_is_ready_to_delete);
}
game::Game *game::Game::clone() const {
Game *copy = new Game(_board->clone());
copy->_current_player_color = _current_player_color;
// selected x,y are already -1
// players are already nullptr
copy->_is_move_complete = false;
copy->_started = _started;
copy->_over = _over;
copy->_moves_since_last_capture = _moves_since_last_capture;
copy->_result = _result;
copy->generatePossibleMoveVectors();
return copy;
}
void game::Game::selectSquare(int x, int y) {
if (!_started) {
DEBUG_ASSERT
return;
}
if (!_board->isValidPosition(x, y)) {
DEBUG_ASSERT
return;
}
// Reset pawn upgrade type for move
_board->set_pawn_upgrade_type(piece::PieceType::NONE);
if (_board->getPiece(x, y)->color() == _current_player_color) {
if (_selected_x == x && _selected_y == y)
resetSelection();
else {
_selected_x = x;
_selected_y = y;
}
} else if (_selected_x != -1 && _selected_y != -1) {
bool moveSucceeded = tryMove(Move(_selected_x, _selected_y, x, y, _board->pawn_upgrade_type()));
if (moveSucceeded)
resetSelection();
}
}
bool game::Game::tryMove(const Move &move) {
// check if move is valid
if (!move.verify(_board))
return false;
// do move
bool isCapture = _board->doMove(new Move(move), this);
// check for 50 move no-capture stalemate
if (!isCapture)
_moves_since_last_capture++;
else
_moves_since_last_capture = 0;
if (_moves_since_last_capture >= 50) { // 50 non-capture moves = Stalemate
_over = true;
_result = game::GameResult::STALEMATE;
} else
updateGameState();
// move complete
_is_move_complete = true;
return true;
}
void game::Game::updateGameState() {
// check for checkmate/stalemate
generatePossibleMoveVectors();
int movesSize = _current_player_color.isWhite() ? _white_moves.size(): _black_moves.size();
if (movesSize == 0) {
_over = true;
if (_board->isKingSafe(_current_player_color))
_result = game::GameResult::STALEMATE;
else
_result = _current_player_color.isWhite() ? game::GameResult::BLACK: game::GameResult::WHITE;
} else {
if (_over) {
_over = false;
_result = game::GameResult::NONE;
startGame();
}
}
}
void game::Game::generatePossibleMoveVectors() {
_white_moves.clear();
_black_moves.clear();
_board->getPossibleMoves(&_white_moves, &_black_moves);
}
std::vector<game::Move> game::Game::possibleMoves() const { return possibleMoves(piece::PieceColor::NONE); }
std::vector<game::Move> game::Game::possibleMoves(piece::PieceColor color) const {
std::vector<Move> moves;
if (!color.isBlack()) // Load white moves
for (auto &_white_move : _white_moves)
moves.push_back(_white_move);
if (!color.isWhite()) // Load black moves
for (auto &_black_move : _black_moves)
moves.push_back(_black_move);
return moves;
} | 27.527412 | 116 | 0.594145 | [
"vector"
] |
f6bfc0fa556dfba7591e5fdb8f7fd9cc90ccdaee | 6,449 | hpp | C++ | lang.cpp/Algorithms/Trees/BinaryHeap.hpp | gahcep/Algorithms | 33e42023b29bbc9bea629df8835e4ab7fea03c89 | [
"MIT"
] | 1 | 2015-04-25T12:02:16.000Z | 2015-04-25T12:02:16.000Z | lang.cpp/Algorithms/Trees/BinaryHeap.hpp | gahcep/Algorithms | 33e42023b29bbc9bea629df8835e4ab7fea03c89 | [
"MIT"
] | null | null | null | lang.cpp/Algorithms/Trees/BinaryHeap.hpp | gahcep/Algorithms | 33e42023b29bbc9bea629df8835e4ab7fea03c89 | [
"MIT"
] | null | null | null | #pragma once
#include <vector>
#include <initializer_list>
#include <stdexcept>
#include "../Abstractions/TypePredicates.h"
template <class Cont>
class Heap
{
// Allow only integral or floating-point types
static_assert(std::is_arithmetic<typename Cont::value_type>::value, "Given type not allowed");
// Check that container has random access iterator
static_assert(HasRandomAccessIterator<Cont>::value,
"Please provide a valid container type with random access iterator");
public:
using T = typename Cont::value_type;
// Default ctor
Heap() {};
// Custom ctor
Heap(std::initializer_list<T> values) { make_heap(values); }
// Custom ctor
Heap(Cont& values) { make_heap(values); }
// Copy ctor
Heap(const Heap& other)
{
heap_.resize(other.size());
std::copy(other.heap_.begin(), other.heap_.end(), heap_.begin());
}
// Copy assign op
Heap& operator=(const Heap& other)
{
if (this == &other) return *this;
heap_.resize(other.size());
std::copy(other.heap_.begin(), other.heap_.end(), heap_.begin());
return *this;
}
// Move ctor
Heap(Heap&& other)
{
heap_.resize(other.size());
std::swap(other.heap_, heap_);
}
// Move assign op
Heap& operator=(Heap&& other)
{
if (this == &other) return *this;
heap_.resize(other.size());
std::swap(other.heap_, heap_);
return *this;
}
inline auto empty() const -> bool { return heap_.size() == 0; }
inline auto size() const -> size_t { return heap_.size(); }
auto is_heap(std::initializer_list<T> list) -> bool;
auto is_heap(Cont& list) -> bool;
auto has_childs(size_t idx) const -> std::pair<bool, bool>;
auto get_childs(size_t idx) const -> std::pair<T, T>;
auto make_heap(std::initializer_list<T> values) -> void;
auto make_heap(Cont& values) -> void;
auto make_heap(Cont&& values) -> void;
auto insert(T key) -> void;
auto get_min() const -> T;
auto extract_min() -> T;
auto parent(size_t idx) const -> T;
auto left(size_t idx) const -> std::pair<bool, T>;
auto right(size_t idx) const -> std::pair<bool, T>;
auto print() const -> void;
private:
auto sift_up(size_t idx) -> void;
auto sift_down(size_t idx) -> void;
auto make_heap() -> void;
Cont heap_;
};
template <class Cont>
auto Heap<Cont>::print() const -> void
{
if (empty())
std::cout << "Heap is empty" << std::endl;
for (auto x& : heap_)
std::cout << x << " " << std::endl;
}
template <class Cont>
auto Heap<Cont>::sift_up(size_t idx) -> void
{
if (heap_.empty()) return;
if (idx >= heap_.size())
throw std::length_error("sift_down(): Index out of range");
size_t parent;
while (idx > 0)
{
parent = (idx - 1) / 2;
if (heap_[parent] < heap_[idx])
return;
std::swap(heap_[parent], heap_[idx]);
idx = parent;
}
}
template <class Cont>
auto Heap<Cont>::sift_down(size_t idx) -> void
{
if (heap_.empty()) return;
if (idx >= heap_.size())
throw std::length_error("sift_down(): Index out of range");
size_t left = 2 * idx + 1;
size_t right = left + 1;
size_t idx_min = idx;
if (left < heap_.size() && heap_[left] < heap_[idx_min])
idx_min = left;
if (right < heap_.size() && heap_[right] < heap_[idx_min])
idx_min = right;
if (idx_min != idx)
{
std::swap(heap_[idx], heap_[idx_min]);
sift_down(idx_min);
}
}
template <class Cont>
auto Heap<Cont>::left(size_t idx) const -> std::pair<bool, T>
{
if (idx >= heap_.size())
throw std::length_error("left(): Index out of range");
std::pair<bool, T> value = std::make_pair<bool, T>(false, T{});
size_t left = 2 * idx + 1;
if (left < heap_.size())
{
value.first = true;
value.second = heap_[left];
}
return value;
}
template <class Cont>
auto Heap<Cont>::right(size_t idx) const -> std::pair<bool, T>
{
if (idx >= heap_.size())
throw std::length_error("right(): Index out of range");
std::pair<bool, Cont::value_type> value = std::make_pair<bool, Cont::value_type>(false, Cont::value_type{});
size_t right = 2 * idx + 2;
if (right < heap_.size())
{
value.first = true;
value.second = heap_[right];
}
return value;
}
template <class Cont>
auto Heap<Cont>::parent(size_t idx) const -> T
{
if (idx >= heap_.size())
throw std::length_error("parent(): Index out of range");
if (idx == 0) return heap_[0];
return heap_[(idx - 1) / 2];
}
template <class Cont>
auto Heap<Cont>::insert(T key) -> void
{
heap_.push_back(key);
sift_up(heap_.size() - 1);
}
template <class Cont>
auto Heap<Cont>::get_min() const -> T
{
return heap_[0];
}
template <class Cont>
auto Heap<Cont>::extract_min() -> T
{
Cont::value_type value = heap_[0];
heap_[0] = heap_[heap_.size() - 1];
heap_.erase(heap_.end() - 1);
sift_down(0);
return value;
}
template <class Cont>
auto Heap<Cont>::make_heap() -> void
{
if (heap_.size() == 0)
throw std::length_error("make_heap(): Heap is empty");
// No need to check last (N/2 - 1) items
size_t bound = heap_.size() / 2 - 1;
// Loop from (N/2 - 1) to 0
for (size_t i = bound + 1; i-- > 0;)
sift_down(i);
}
template <class Cont>
auto Heap<Cont>::make_heap(std::initializer_list<T> values) -> void
{
heap_ = values;
make_heap();
}
template <class Cont>
auto Heap<Cont>::make_heap(Cont& values) -> void
{
heap_ = values;
make_heap();
}
template <class Cont>
auto Heap<Cont>::make_heap(Cont&& values) -> void
{
heap_ = std::move(values);
make_heap();
}
template <class Cont>
auto Heap<Cont>::is_heap(std::initializer_list<T> values) -> bool
{
return is_heap(std::move(Cont{ values }));
}
template <class Cont>
auto Heap<Cont>::is_heap(Cont& values) -> bool
{
size_t left, right, bound = values.size() / 2 - 1;
for (int i = 0; i <= bound; i++)
{
left = 2 * i + 1;
right = left + 1;
if (left < values.size() && values[left] < values[i])
return false;
if (right < values.size() && values[right] < values[i])
return false;
}
return true;
}
template <class Cont>
auto Heap<Cont>::has_childs(size_t idx) const -> std::pair<bool, bool>
{
auto idx_left = idx * 2 + 1;
auto idx_right = idx_left + 1;
return std::make_pair<Cont::value_type, Cont::value_type>(idx_left < heap_.size(), idx_right < heap_.size());
}
template <class Cont>
auto Heap<Cont>::get_childs(size_t idx) const -> std::pair<T, T>
{
auto idx_left = idx * 2 + 1;
auto idx_right = idx_left + 1;
return std::make_pair<Cont::value_type, Cont::value_type>(
idx_left < heap_.size() ? heap_[idx_left] : Cont::value_type{},
idx_right < heap_.size() ? heap_[idx_right] : Cont::value_type{});
}
| 21.568562 | 110 | 0.647852 | [
"vector"
] |
f6c31ef6dbe9b7878cd8ce52f8a54b3cdb535e9c | 9,291 | cpp | C++ | lonestar/experimental/hsssp/compilerInputs/pageRankPull_gen.cpp | rohankadekodi/compilers_project | 2f9455a5d0c516b9f1766afd1cdac1b86c930ec0 | [
"BSD-3-Clause"
] | null | null | null | lonestar/experimental/hsssp/compilerInputs/pageRankPull_gen.cpp | rohankadekodi/compilers_project | 2f9455a5d0c516b9f1766afd1cdac1b86c930ec0 | [
"BSD-3-Clause"
] | 7 | 2020-02-27T19:24:51.000Z | 2020-04-10T21:04:28.000Z | lonestar/experimental/hsssp/compilerInputs/pageRankPull_gen.cpp | rohankadekodi/compilers_project | 2f9455a5d0c516b9f1766afd1cdac1b86c930ec0 | [
"BSD-3-Clause"
] | 2 | 2021-07-26T14:46:51.000Z | 2021-11-09T11:32:09.000Z | /*
* This file belongs to the Galois project, a C++ library for exploiting parallelism.
* The code is being released under the terms of the 3-Clause BSD License (a
* copy is located in LICENSE.txt at the top-level directory).
*
* Copyright (C) 2018, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*/
#include <iostream>
#include <limits>
#include "galois/Galois.h"
#include "Lonestar/BoilerPlate.h"
#include "galois/gstl.h"
#include "galois/runtime/CompilerHelperFunctions.h"
#include "galois/runtime/Tracer.h"
#include "galois/Dist/DistGraph.h"
#include "galois/DistAccumulator.h"
#ifdef __GALOIS_HET_CUDA__
#include "galois/Cuda/cuda_mtypes.h"
#include "gen_cuda.h"
struct CUDA_Context* cuda_ctx;
#endif
static const char* const name =
"PageRank - Compiler Generated Distributed Heterogeneous";
static const char* const desc = "PageRank Pull version on Distributed Galois.";
static const char* const url = 0;
#ifdef __GALOIS_HET_CUDA__
enum Personality { CPU, GPU_CUDA, GPU_OPENCL };
std::string personality_str(Personality p) {
switch (p) {
case CPU:
return "CPU";
case GPU_CUDA:
return "GPU_CUDA";
case GPU_OPENCL:
return "GPU_OPENCL";
}
assert(false && "Invalid personality");
return "";
}
#endif
namespace cll = llvm::cl;
static cll::opt<std::string>
inputFile(cll::Positional, cll::desc("<input file>"), cll::Required);
static cll::opt<float> tolerance("tolerance", cll::desc("tolerance"),
cll::init(0.01));
static cll::opt<bool>
verify("verify", cll::desc("Verify ranks by printing to the output stream"),
cll::init(false));
#ifdef __GALOIS_HET_CUDA__
static cll::opt<int> gpudevice(
"gpu",
cll::desc("Select GPU to run on, default is to choose automatically"),
cll::init(-1));
static cll::opt<Personality>
personality("personality", cll::desc("Personality"),
cll::values(clEnumValN(CPU, "cpu", "Galois CPU"),
clEnumValN(GPU_CUDA, "gpu/cuda", "GPU/CUDA"),
clEnumValN(GPU_OPENCL, "gpu/opencl", "GPU/OpenCL"),
clEnumValEnd),
cll::init(CPU));
static cll::opt<std::string>
personality_set("pset",
cll::desc("String specifying personality for each host. "
"'c'=CPU,'g'=GPU/CUDA and 'o'=GPU/OpenCL"),
cll::init(""));
static cll::opt<unsigned>
scalegpu("scalegpu",
cll::desc("Scale GPU workload w.r.t. CPU, default is "
"proportionally equal workload to CPU and GPU (1)"),
cll::init(1));
static cll::opt<unsigned>
scalecpu("scalecpu",
cll::desc("Scale CPU workload w.r.t. GPU, default is "
"proportionally equal workload to CPU and GPU (1)"),
cll::init(1));
#endif
static const float alpha = (1.0 - 0.85);
struct PR_NodeData {
float value;
std::atomic<int> nout;
};
typedef DistGraph<PR_NodeData, void> Graph;
typedef typename Graph::GraphNode GNode;
struct InitializeGraph {
Graph* graph;
InitializeGraph(Graph* _graph) : graph(_graph) {}
void static go(Graph& _graph) {
galois::do_all(_graph.begin(), _graph.end(), InitializeGraph{&_graph},
galois::loopname("Init"));
}
void operator()(GNode src) const {
PR_NodeData& sdata = graph->getData(src);
sdata.value = 1.0 - alpha;
for (auto nbr = graph->edge_begin(src); nbr != graph->edge_end(src);
++nbr) {
GNode dst = graph->getEdgeDst(nbr);
PR_NodeData& ddata = graph->getData(dst);
galois::atomicAdd(ddata.nout, 1);
}
}
};
struct PageRank_pull {
Graph* graph;
PageRank_pull(Graph* _graph) : graph(_graph) {}
void static go(Graph& _graph) {
do {
DGAccumulator_accum.reset();
galois::do_all(_graph.begin(), _graph.end(), PageRank_pull{&_graph},
galois::loopname("pageRank"));
} while (DGAccumulator_accum.reduce());
}
static galois::DGAccumulator<int> DGAccumulator_accum;
void operator()(GNode src) const {
PR_NodeData& sdata = graph->getData(src);
float sum = 0;
for (auto nbr = graph->edge_begin(src); nbr != graph->edge_end(src);
++nbr) {
GNode dst = graph->getEdgeDst(nbr);
PR_NodeData& ddata = graph->getData(dst);
unsigned dnout = ddata.nout;
if (dnout > 0) {
sum += ddata.value / dnout;
}
}
float pr_value = sum * (1.0 - alpha) + alpha;
float diff = std::fabs(pr_value - sdata.value);
if (diff > tolerance) {
sdata.value = pr_value;
DGAccumulator_accum += 1;
}
}
};
galois::DGAccumulator<int> PageRank_pull::DGAccumulator_accum;
int main(int argc, char** argv) {
try {
LonestarStart(argc, argv, name, desc, url);
auto& net = galois::runtime::getSystemNetworkInterface();
galois::Timer T_total, T_DistGraph_init, T_init, T_pageRank;
#ifdef __GALOIS_HET_CUDA__
const unsigned my_host_id = galois::runtime::getHostID();
int gpu_device = gpudevice;
// Parse arg string when running on multiple hosts and update/override
// personality with corresponding value.
if (personality_set.length() == galois::runtime::NetworkInterface::Num) {
switch (personality_set.c_str()[my_host_id]) {
case 'g':
personality = GPU_CUDA;
break;
case 'o':
assert(0);
personality = GPU_OPENCL;
break;
case 'c':
default:
personality = CPU;
break;
}
#ifdef __GALOIS_SINGLE_HOST_MULTIPLE_GPUS__
if (gpu_device == -1) {
gpu_device = 0;
for (unsigned i = 0; i < my_host_id; ++i) {
if (personality_set.c_str()[i] != 'c')
++gpu_device;
}
}
#endif
}
std::vector<unsigned> scalefactor;
for (unsigned i = 0; i < personality_set.length(); ++i) {
if (personality_set.c_str()[i] == 'c')
scalefactor.push_back(scalecpu);
else
scalefactor.push_back(scalegpu);
}
#endif
T_total.start();
T_DistGraph_init.start();
#ifndef __GALOIS_HET_CUDA__
Graph hg(inputFile, net.ID, net.Num);
#else
Graph hg(inputFile, net.ID, net.Num, scalefactor);
if (personality == GPU_CUDA) {
cuda_ctx = get_CUDA_context(my_host_id);
if (!init_CUDA_context(cuda_ctx, gpu_device))
return -1;
MarshalGraph m = hg.getMarshalGraph(my_host_id);
load_graph_CUDA(cuda_ctx, m);
} else if (personality == GPU_OPENCL) {
// galois::opencl::cl_env.init(cldevice.Value);
}
#endif
T_DistGraph_init.stop();
std::cout << "[" << net.ID << "] InitializeGraph::go called\n";
T_init.start();
InitializeGraph::go(hg);
T_init.stop();
// Verify
/*if(verify){
#ifdef __GALOIS_HET_CUDA__
if (personality == CPU) {
#endif
for(auto ii = hg.begin(); ii != hg.end(); ++ii) {
galois::runtime::printOutput("% %\n", hg.getGID(*ii),
hg.getData(*ii).nout);
}
#ifdef __GALOIS_HET_CUDA__
} else if(personality == GPU_CUDA) {
for(auto ii = hg.begin(); ii != hg.end(); ++ii) {
galois::runtime::printOutput("% %\n", hg.getGID(*ii),
get_node_nout_cuda(cuda_ctx, *ii));
}
}
#endif
}*/
std::cout << "[" << net.ID << "] PageRank_pull::go called\n";
T_pageRank.start();
PageRank_pull::go(hg);
T_pageRank.stop();
T_total.stop();
std::cout << "[" << net.ID << "]"
<< " Total Time : " << T_total.get()
<< " DistGraph : " << T_DistGraph_init.get()
<< " Init : " << T_init.get()
<< " PageRank_pull : " << T_pageRank.get() << "(msec)\n\n";
// Verify
if (verify) {
#ifdef __GALOIS_HET_CUDA__
if (personality == CPU) {
#endif
for (auto ii = hg.begin(); ii != hg.end(); ++ii) {
galois::runtime::printOutput("% %\n", hg.getGID(*ii),
hg.getData(*ii).value);
}
#ifdef __GALOIS_HET_CUDA__
} else if (personality == GPU_CUDA) {
for (auto ii = hg.begin(); ii != hg.end(); ++ii) {
galois::runtime::printOutput("% %\n", hg.getGID(*ii),
get_node_value_cuda(cuda_ctx, *ii));
}
}
#endif
}
return 0;
} catch (const char* c) {
std::cerr << "Error: " << c << "\n";
return 1;
}
}
| 32.148789 | 85 | 0.611775 | [
"vector"
] |
f6c3807741cdbe4a5e0bfde550fa387b77d048e5 | 1,449 | cpp | C++ | 0x14-Convex/esp8266_0x14_Convex/Polygon.cpp | thiagohersan/orgorithms | b202e30d8deb7d898d765e72e5640d675f5d9129 | [
"MIT"
] | 1 | 2021-05-21T18:46:26.000Z | 2021-05-21T18:46:26.000Z | 0x14-Convex/esp8266_0x14_Convex/Polygon.cpp | thiagohersan/orgorithms | b202e30d8deb7d898d765e72e5640d675f5d9129 | [
"MIT"
] | null | null | null | 0x14-Convex/esp8266_0x14_Convex/Polygon.cpp | thiagohersan/orgorithms | b202e30d8deb7d898d765e72e5640d675f5d9129 | [
"MIT"
] | null | null | null | #ifndef _CONVEXHULL_CLASS_
#define _CONVEXHULL_CLASS_
#include "Point.cpp"
#include <algorithm>
#include <vector>
class Polygon {
private:
std::vector<Point> _polygon;
std::vector<Point> _convexHull;
public:
Polygon (int* input, int input_length) {
for (int i = 0; i < input_length / 2; i++) {
_polygon.push_back(Point(input[2 * i + 0], input[2 * i + 1]));
_convexHull.push_back(Point(input[2 * i + 0], input[2 * i + 1]));
}
}
std::vector<Point>& convexHull() {
int n = _polygon.size();
int head = 0;
if (n < 3) return _convexHull;
std::vector<Point> LU;
for (int p = 0; p < 2 * n; p++) {
LU.push_back(Point(0, 0));
}
std::sort(_polygon.begin(), _polygon.end(), Point::compare);
for (int i = 0; i < n; i++) {
while (head >= 2 && !Point::ccw(LU[head - 2], LU[head - 1], _polygon[i])) head--;
LU[head].x = _polygon[i].x;
LU[head].y = _polygon[i].y;
head++;
}
for (int i = n - 2, t = head + 1; i >= 0; i--) {
while (head >= t && !Point::ccw(LU[head - 2], LU[head - 1], _polygon[i])) head--;
LU[head].x = _polygon[i].x;
LU[head].y = _polygon[i].y;
head++;
}
head = (head > 1) ? head : 1;
_convexHull.clear();
for (int i = 0; i < head - 1; i++) {
_convexHull.push_back(LU[i]);
}
return _convexHull;
}
};
#endif
| 24.982759 | 89 | 0.510007 | [
"vector"
] |
f6c51f66487d9ede2a46934d0aba9a118ef5900a | 21,773 | cpp | C++ | searchlib/src/vespa/searchlib/features/attributefeature.cpp | robbinfan/vespa | a1616e33c5c12ca21efcb18b7b274009fb4b106b | [
"Apache-2.0"
] | null | null | null | searchlib/src/vespa/searchlib/features/attributefeature.cpp | robbinfan/vespa | a1616e33c5c12ca21efcb18b7b274009fb4b106b | [
"Apache-2.0"
] | null | null | null | searchlib/src/vespa/searchlib/features/attributefeature.cpp | robbinfan/vespa | a1616e33c5c12ca21efcb18b7b274009fb4b106b | [
"Apache-2.0"
] | null | null | null | // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "attributefeature.h"
#include "utils.h"
#include "valuefeature.h"
#include "constant_tensor_executor.h"
#include "dense_tensor_attribute_executor.h"
#include "direct_tensor_attribute_executor.h"
#include "tensor_attribute_executor.h"
#include <vespa/searchcommon/common/undefinedvalues.h>
#include <vespa/searchcommon/attribute/attributecontent.h>
#include <vespa/searchlib/tensor/dense_tensor_attribute.h>
#include <vespa/searchlib/tensor/direct_tensor_attribute.h>
#include <vespa/searchlib/fef/indexproperties.h>
#include <vespa/searchlib/attribute/singlenumericattribute.h>
#include <vespa/searchlib/attribute/multinumericattribute.h>
#include <vespa/searchlib/attribute/singleboolattribute.h>
#include <vespa/vespalib/util/issue.h>
#include <vespa/log/log.h>
LOG_SETUP(".features.attributefeature");
using search::attribute::IAttributeVector;
using search::attribute::BasicType;
using search::attribute::CollectionType;
using search::attribute::ConstCharContent;
using search::tensor::DenseTensorAttribute;
using search::tensor::DirectTensorAttribute;
using search::attribute::IntegerContent;
using search::attribute::FloatContent;
using search::tensor::ITensorAttribute;
using search::attribute::WeightedConstCharContent;
using search::attribute::WeightedIntegerContent;
using search::attribute::WeightedFloatContent;
using search::fef::FeatureExecutor;
using search::features::util::ConstCharPtr;
using vespalib::Issue;
using vespalib::eval::ValueType;
using search::fef::FeatureType;
using namespace search::index;
using namespace search::fef::indexproperties;
namespace search::features {
namespace {
template <typename X, typename Y>
bool equals(X lhs, Y rhs) {
return lhs == rhs;
}
template <>
bool equals<ConstCharPtr, vespalib::stringref>(ConstCharPtr lhs, vespalib::stringref rhs) {
return strcmp(lhs, rhs.data()) == 0;
}
template <typename T>
bool
isUndefined(T value, BasicType::Type type)
{
switch (type) {
case BasicType::INT8:
return attribute::isUndefined<int8_t>(static_cast<int8_t>(value));
case BasicType::INT16:
return attribute::isUndefined<int16_t>(static_cast<int16_t>(value));
case BasicType::INT32:
return attribute::isUndefined<int32_t>(static_cast<int32_t>(value));
case BasicType::INT64:
return attribute::isUndefined<int64_t>(static_cast<int64_t>(value));
case BasicType::FLOAT:
return attribute::isUndefined<float>(static_cast<float>(value));
case BasicType::DOUBLE:
return attribute::isUndefined<double>(static_cast<double>(value));
default:
return false;
}
}
template <>
bool
isUndefined<vespalib::stringref>(vespalib::stringref, BasicType::Type)
{
return false;
}
template <typename T>
feature_t
considerUndefined(T value, BasicType::Type type)
{
if (isUndefined(value, type)) {
return attribute::getUndefined<feature_t>();
}
return util::getAsFeature(value);
}
template <>
feature_t
considerUndefined<ConstCharPtr>(ConstCharPtr value, BasicType::Type )
{
return util::getAsFeature(value);
}
/**
* Implements the executor for fetching values from a single or array attribute vector
*/
template <typename T>
class SingleAttributeExecutor final : public fef::FeatureExecutor {
private:
const T & _attribute;
public:
/**
* Constructs an executor.
*
* @param attribute The attribute vector to use.
*/
SingleAttributeExecutor(const T & attribute) : _attribute(attribute) { }
void handle_bind_outputs(vespalib::ArrayRef<fef::NumberOrObject> outputs_in) override {
fef::FeatureExecutor::handle_bind_outputs(outputs_in);
auto o = outputs().get_bound();
o[1].as_number = 0; // weight
o[2].as_number = 0; // contains
o[3].as_number = 1; // count
}
void execute(uint32_t docId) override;
};
class BoolAttributeExecutor final : public fef::FeatureExecutor {
private:
const SingleBoolAttribute & _attribute;
public:
BoolAttributeExecutor(const SingleBoolAttribute & attribute)
: _attribute(attribute)
{}
void execute(uint32_t docId) override {
outputs().set_number(0, _attribute.getFloat(docId));
}
};
/**
* Implements the executor for fetching values from a single or array attribute vector
*/
template <typename T>
class MultiAttributeExecutor final : public fef::FeatureExecutor {
private:
const T & _attribute;
uint32_t _idx;
public:
MultiAttributeExecutor(const T & attribute, uint32_t idx) : _attribute(attribute), _idx(idx) { }
void execute(uint32_t docId) override;
void handle_bind_outputs(vespalib::ArrayRef<fef::NumberOrObject> outputs_in) override {
fef::FeatureExecutor::handle_bind_outputs(outputs_in);
auto o = outputs().get_bound();
o[1].as_number = 0; // weight
o[2].as_number = 0; // contains
o[3].as_number = 0; // count
}
};
class CountOnlyAttributeExecutor final : public fef::FeatureExecutor {
private:
const attribute::IAttributeVector & _attribute;
public:
CountOnlyAttributeExecutor(const attribute::IAttributeVector & attribute) : _attribute(attribute) { }
void execute(uint32_t docId) override;
void handle_bind_outputs(vespalib::ArrayRef<fef::NumberOrObject> outputs_in) override {
fef::FeatureExecutor::handle_bind_outputs(outputs_in);
auto o = outputs().get_bound();
o[0].as_number = 0; // value
o[1].as_number = 0; // weight
o[2].as_number = 0; // contains
}
};
/**
* Implements the executor for fetching values from a single or array attribute vector
*/
template <typename T>
class AttributeExecutor final : public fef::FeatureExecutor {
private:
const attribute::IAttributeVector * _attribute;
attribute::BasicType::Type _attrType;
uint32_t _idx;
T _buffer; // used when fetching values from the attribute
feature_t _defaultCount;
public:
/**
* Constructs an executor.
*
* @param attribute The attribute vector to use.
* @param idx The index used for an array attribute.
*/
AttributeExecutor(const attribute::IAttributeVector * attribute, uint32_t idx);
void execute(uint32_t docId) override;
void handle_bind_outputs(vespalib::ArrayRef<fef::NumberOrObject> outputs_in) override {
fef::FeatureExecutor::handle_bind_outputs(outputs_in);
auto o = outputs().get_bound();
o[1].as_number = 0; // weight
o[2].as_number = 0; // contains
o[3].as_number = _defaultCount; // count
}
};
/**
* Implements the executor for fetching weights from a weighted set attribute
*/
template <typename BT, typename T>
class WeightedSetAttributeExecutor : public fef::FeatureExecutor {
private:
const attribute::IAttributeVector * _attribute;
attribute::BasicType::Type _attrType;
BT _buffer; // used when fetching values and weights from the attribute
T _key; // the key to find a weight for
bool _useKey;
public:
/**
* Constructs an executor.
*
* @param attribue The attribute vector to use.
* @param key The key to find a corresponding weight for.
* @param useKey Whether we should consider the key.
*/
WeightedSetAttributeExecutor(const attribute::IAttributeVector * attribute, T key, bool useKey);
void execute(uint32_t docId) override;
};
template <typename T>
void
SingleAttributeExecutor<T>::execute(uint32_t docId)
{
typename T::LoadedValueType v = _attribute.getFast(docId);
// value
auto o = outputs().get_bound();
o[0].as_number = __builtin_expect(attribute::isUndefined(v), false)
? attribute::getUndefined<feature_t>()
: util::getAsFeature(v);
}
template <typename T>
void
MultiAttributeExecutor<T>::execute(uint32_t docId)
{
const multivalue::Value<typename T::BaseType> * values = nullptr;
uint32_t numValues = _attribute.getRawValues(docId, values);
auto o = outputs().get_bound();
o[0].as_number = __builtin_expect(_idx < numValues, true) ? values[_idx].value() : 0;
}
void
CountOnlyAttributeExecutor::execute(uint32_t docId)
{
auto o = outputs().get_bound();
o[3].as_number = _attribute.getValueCount(docId); // count
}
template <typename T>
AttributeExecutor<T>::AttributeExecutor(const IAttributeVector * attribute, uint32_t idx) :
fef::FeatureExecutor(),
_attribute(attribute),
_attrType(attribute->getBasicType()),
_idx(idx),
_buffer(),
_defaultCount((attribute->getCollectionType() == CollectionType::ARRAY) ? 0 : 1)
{
_buffer.allocate(_attribute->getMaxValueCount());
}
template <typename T>
void
AttributeExecutor<T>::execute(uint32_t docId)
{
feature_t value = 0.0f;
_buffer.fill(*_attribute, docId);
if (_idx < _buffer.size()) {
value = considerUndefined(_buffer[_idx], _attrType);
}
auto o = outputs().get_bound();
o[0].as_number = value; // value
}
template <typename BT, typename T>
WeightedSetAttributeExecutor<BT, T>::WeightedSetAttributeExecutor(const IAttributeVector * attribute, T key, bool useKey) :
fef::FeatureExecutor(),
_attribute(attribute),
_attrType(attribute->getBasicType()),
_buffer(),
_key(key),
_useKey(useKey)
{
}
template <typename BT, typename T>
void
WeightedSetAttributeExecutor<BT, T>::execute(uint32_t docId)
{
feature_t value = 0.0f;
feature_t weight = 0.0f;
feature_t contains = 0.0f;
feature_t count = 0.0f;
if (_useKey) {
_buffer.fill(*_attribute, docId);
for (uint32_t i = 0; i < _buffer.size(); ++i) {
if (equals(_buffer[i].getValue(), _key)) {
value = considerUndefined(_key, _attrType);
weight = static_cast<feature_t>(_buffer[i].getWeight());
contains = 1.0f;
break;
}
}
} else {
count = _attribute->getValueCount(docId);
}
outputs().set_number(0, value); // value
outputs().set_number(1, weight); // weight
outputs().set_number(2, contains); // contains
outputs().set_number(3, count); // count
}
template <typename T>
struct SingleValueExecutorCreator {
using AttrType = SingleValueNumericAttribute<T>;
using PtrType = const AttrType *;
using ExecType = SingleAttributeExecutor<AttrType>;
SingleValueExecutorCreator() : ptr(nullptr) {}
bool handle(const IAttributeVector *attribute) {
ptr = dynamic_cast<PtrType>(attribute);
return ptr != nullptr;
}
fef::FeatureExecutor & create(vespalib::Stash &stash) const {
return stash.create<ExecType>(*ptr);
}
private:
PtrType ptr;
};
template <typename T>
struct MultiValueExecutorCreator {
using AttrType = MultiValueNumericAttribute<T, multivalue::Value<typename T::BaseType>>;
using PtrType = const AttrType *;
using ExecType = MultiAttributeExecutor<AttrType>;
MultiValueExecutorCreator() : ptr(nullptr) {}
bool handle(const IAttributeVector *attribute) {
ptr = dynamic_cast<PtrType>(attribute);
return ptr != nullptr;
}
fef::FeatureExecutor & create(vespalib::Stash &stash, uint32_t idx) const {
return stash.create<ExecType>(*ptr, idx);
}
private:
PtrType ptr;
};
fef::FeatureExecutor &
createAttributeExecutor(uint32_t numOutputs, const IAttributeVector *attribute, const vespalib::string &attrName, const vespalib::string &extraParam, vespalib::Stash &stash)
{
if (attribute == nullptr) {
Issue::report("The attribute vector '%s' was not found in the attribute manager, returning default values.",
attrName.c_str());
std::vector<feature_t> values(numOutputs, 0.0f);
return stash.create<ValueExecutor>(values);
}
CollectionType collectionType = attribute->getCollectionType();
if (collectionType == CollectionType::WSET) {
assert(numOutputs == 4);
bool useKey = !extraParam.empty();
if (useKey) {
if (attribute->isStringType()) {
return stash.create<WeightedSetAttributeExecutor<WeightedConstCharContent, vespalib::stringref>>(attribute, extraParam, useKey);
} else if (attribute->isIntegerType()) {
return stash.create<WeightedSetAttributeExecutor<WeightedIntegerContent, int64_t>>(attribute, util::strToNum<int64_t>(extraParam), useKey);
} else { // FLOAT
return stash.create<WeightedSetAttributeExecutor<WeightedFloatContent, double>>(attribute, util::strToNum<double>(extraParam), useKey);
}
} else {
return stash.create<CountOnlyAttributeExecutor>(*attribute);
}
} else { // SINGLE or ARRAY
BasicType basicType = attribute->getBasicType();
if (collectionType == CollectionType::SINGLE) {
if (attribute->isIntegerType()) {
if (basicType == BasicType::BOOL) {
auto boolAttribute = dynamic_cast<const SingleBoolAttribute *>(attribute);
assert (boolAttribute && (numOutputs == 1));
return stash.create<BoolAttributeExecutor>(*boolAttribute);
} else {
assert(numOutputs == 4);
if (basicType == BasicType::INT8) {
SingleValueExecutorCreator<IntegerAttributeTemplate<int8_t>> creator;
if (creator.handle(attribute)) return creator.create(stash);
} else if (basicType == BasicType::INT32) {
SingleValueExecutorCreator<IntegerAttributeTemplate<int32_t>> creator;
if (creator.handle(attribute)) return creator.create(stash);
}
SingleValueExecutorCreator<IntegerAttributeTemplate<int64_t>> creator;
if (creator.handle(attribute)) return creator.create(stash);
}
} else if (attribute->isFloatingPointType()) {
assert(numOutputs == 4);
if (basicType == BasicType::DOUBLE) {
SingleValueExecutorCreator<FloatingPointAttributeTemplate<double>> creator;
if (creator.handle(attribute)) return creator.create(stash);
} else {
SingleValueExecutorCreator<FloatingPointAttributeTemplate<float>> creator;
if (creator.handle(attribute)) return creator.create(stash);
}
}
}
assert(numOutputs == 4);
uint32_t idx = 0;
if (!extraParam.empty()) {
idx = util::strToNum<uint32_t>(extraParam);
} else if (attribute->getCollectionType() == CollectionType::ARRAY) {
return stash.create<CountOnlyAttributeExecutor>(*attribute);
}
if (attribute->isStringType()) {
return stash.create<AttributeExecutor<ConstCharContent>>(attribute, idx);
} else if (attribute->isIntegerType()) {
if (basicType == BasicType::INT32) {
MultiValueExecutorCreator<IntegerAttributeTemplate<int32_t>> creator;
if (creator.handle(attribute)) return creator.create(stash, idx);
} else if (basicType == BasicType::INT64) {
MultiValueExecutorCreator<IntegerAttributeTemplate<int64_t>> creator;
if (creator.handle(attribute)) return creator.create(stash, idx);
}
return stash.create<AttributeExecutor<IntegerContent>>(attribute, idx);
} else { // FLOAT
if (basicType == BasicType::DOUBLE) {
MultiValueExecutorCreator<FloatingPointAttributeTemplate<double>> creator;
if (creator.handle(attribute)) return creator.create(stash, idx);
} else {
MultiValueExecutorCreator<FloatingPointAttributeTemplate<float>> creator;
if (creator.handle(attribute)) return creator.create(stash, idx);
}
return stash.create<AttributeExecutor<FloatContent>>(attribute, idx);
}
}
}
fef::FeatureExecutor &
createTensorAttributeExecutor(const IAttributeVector *attribute, const vespalib::string &attrName,
const ValueType &tensorType,
vespalib::Stash &stash)
{
if (attribute == nullptr) {
Issue::report("The attribute vector '%s' was not found in the attribute manager."
" Returning empty tensor.", attrName.c_str());
return ConstantTensorExecutor::createEmpty(tensorType, stash);
}
if (attribute->getCollectionType() != attribute::CollectionType::SINGLE ||
attribute->getBasicType() != attribute::BasicType::TENSOR)
{
Issue::report("The attribute vector '%s' is NOT of type tensor."
"Returning empty tensor.", attribute->getName().c_str());
return ConstantTensorExecutor::createEmpty(tensorType, stash);
}
const ITensorAttribute *tensorAttribute = attribute->asTensorAttribute();
if (tensorAttribute == nullptr) {
Issue::report("The attribute vector '%s' could not be converted to a tensor attribute."
" Returning empty tensor.", attribute->getName().c_str());
return ConstantTensorExecutor::createEmpty(tensorType, stash);
}
if (tensorType != tensorAttribute->getTensorType()) {
Issue::report("The tensor attribute '%s' has tensor type '%s',"
" while the feature executor expects type '%s'. Returning empty tensor.",
attribute->getName().c_str(),
tensorAttribute->getTensorType().to_spec().c_str(),
tensorType.to_spec().c_str());
return ConstantTensorExecutor::createEmpty(tensorType, stash);
}
if (tensorAttribute->supports_extract_cells_ref()) {
return stash.create<DenseTensorAttributeExecutor>(*tensorAttribute);
}
if (tensorAttribute->supports_get_tensor_ref()) {
return stash.create<DirectTensorAttributeExecutor>(*tensorAttribute);
}
return stash.create<TensorAttributeExecutor>(*tensorAttribute);
}
bool
isSingleValueBoolField(const fef::FieldInfo & fInfo) {
return (fInfo.collection() == schema::CollectionType::SINGLE)
&& (fInfo.get_data_type() == schema::DataType::BOOL);
}
}
AttributeBlueprint::AttributeBlueprint() :
fef::Blueprint("attribute"),
_attrName(),
_attrKey(),
_extra(),
_tensorType(ValueType::double_type()),
_numOutputs(0)
{
}
AttributeBlueprint::~AttributeBlueprint() = default;
void
AttributeBlueprint::visitDumpFeatures(const fef::IIndexEnvironment &,
fef::IDumpFeatureVisitor &) const
{
}
bool
AttributeBlueprint::setup(const fef::IIndexEnvironment & env,
const fef::ParameterList & params)
{
// params[0] = attribute name
// params[1] = index (array attribute) or key (weighted set attribute)
_attrName = params[0].getValue();
_attrKey = createAttributeKey(_attrName);
if (params.size() == 2) {
_extra = params[1].getValue();
}
vespalib::string attrType = type::Attribute::lookup(env.getProperties(), _attrName);
if (!attrType.empty()) {
_tensorType = ValueType::from_spec(attrType);
if (_tensorType.is_error()) {
LOG(error, "%s: invalid type: '%s'", getName().c_str(), attrType.c_str());
}
}
FeatureType output_type = _tensorType.is_double()
? FeatureType::number()
: FeatureType::object(_tensorType);
describeOutput("value", "The value of a single value attribute, "
"the value at the given index of an array attribute, "
"the given key of a weighted set attribute, or"
"the tensor of a tensor attribute", output_type);
const fef::FieldInfo * fInfo = env.getFieldByName(_attrName);
if (_tensorType.has_dimensions() || isSingleValueBoolField(*fInfo)) {
_numOutputs = 1;
} else {
describeOutput("weight", "The weight associated with the given key in a weighted set attribute.");
describeOutput("contains", "1 if the given key is present in a weighted set attribute, 0 otherwise.");
describeOutput("count", "Returns the number of elements in this array or weighted set attribute.");
_numOutputs = 4;
}
env.hintAttributeAccess(_attrName);
return !_tensorType.is_error();
}
fef::Blueprint::UP
AttributeBlueprint::createInstance() const
{
return std::make_unique<AttributeBlueprint>();
}
void
AttributeBlueprint::prepareSharedState(const fef::IQueryEnvironment & env, fef::IObjectStore & store) const
{
lookupAndStoreAttribute(_attrKey, _attrName, env, store);
}
fef::FeatureExecutor &
AttributeBlueprint::createExecutor(const fef::IQueryEnvironment &env, vespalib::Stash &stash) const
{
const IAttributeVector * attribute = lookupAttribute(_attrKey, _attrName, env);
if (_tensorType.has_dimensions()) {
return createTensorAttributeExecutor(attribute, _attrName, _tensorType, stash);
} else {
return createAttributeExecutor(_numOutputs, attribute, _attrName, _extra, stash);
}
}
fef::ParameterDescriptions
AttributeBlueprint::getDescriptions() const
{
auto dataTypeSet = fef::ParameterDataTypeSet::normalOrTensorTypeSet();
return fef::ParameterDescriptions().
desc().attribute(dataTypeSet, fef::ParameterCollection::ANY).
desc().attribute(dataTypeSet, fef::ParameterCollection::ANY).string();
}
}
| 37.539655 | 173 | 0.668626 | [
"object",
"vector"
] |
f6c530792a9788ec14c27bc217cd1268f4bd3a45 | 15,547 | cpp | C++ | kernel/dev/usb/device/usb-keyboard.cpp | otaviopace/ananas | 849925915b0888543712a8ca625318cd7bca8dd9 | [
"Zlib"
] | null | null | null | kernel/dev/usb/device/usb-keyboard.cpp | otaviopace/ananas | 849925915b0888543712a8ca625318cd7bca8dd9 | [
"Zlib"
] | null | null | null | kernel/dev/usb/device/usb-keyboard.cpp | otaviopace/ananas | 849925915b0888543712a8ca625318cd7bca8dd9 | [
"Zlib"
] | null | null | null | /*-
* SPDX-License-Identifier: Zlib
*
* Copyright (c) 2009-2018 Rink Springer <rink@rink.nu>
* For conditions of distribution and use, see LICENSE file
*/
#include <ananas/types.h>
#include <ananas/util/array.h>
#include "kernel/dev/kbdmux.h"
#include "kernel/device.h"
#include "kernel/driver.h"
#include "kernel/lib.h"
#include "kernel/mm.h"
#include "kernel/result.h"
#include "../core/config.h"
#include "../core/usb-core.h"
#include "../core/usb-device.h"
#include "../core/usb-transfer.h"
namespace
{
using namespace keyboard_mux::code;
namespace modifier = keyboard_mux::modifier;
using Key = keyboard_mux::Key;
using KeyType = keyboard_mux::Key::Type;
// Modifier byte is defined in HID 1.11 8.3
namespace modifierByte
{
constexpr uint8_t LeftControl = (1 << 0);
constexpr uint8_t LeftShift = (1 << 1);
constexpr uint8_t LeftAlt = (1 << 2);
constexpr uint8_t LeftGui = (1 << 3);
constexpr uint8_t RightControl = (1 << 4);
constexpr uint8_t RightShift = (1 << 5);
constexpr uint8_t RightAlt = (1 << 6);
constexpr uint8_t RightGui = (1 << 7);
} // namespace modifierByte
struct KeyMap {
Key standard;
Key shift;
};
// As outlined in USB HID Usage Tables 1.12, chapter 10
constexpr util::array<KeyMap, 128> keymap{
/* 00 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 01 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 02 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 03 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 04 */ KeyMap{Key{KeyType::Character, 'a'}, Key{KeyType::Character, 'A'}},
/* 05 */ KeyMap{Key{KeyType::Character, 'b'}, Key{KeyType::Character, 'B'}},
/* 06 */ KeyMap{Key{KeyType::Character, 'c'}, Key{KeyType::Character, 'C'}},
/* 07 */ KeyMap{Key{KeyType::Character, 'd'}, Key{KeyType::Character, 'D'}},
/* 08 */ KeyMap{Key{KeyType::Character, 'e'}, Key{KeyType::Character, 'E'}},
/* 09 */ KeyMap{Key{KeyType::Character, 'f'}, Key{KeyType::Character, 'F'}},
/* 0a */ KeyMap{Key{KeyType::Character, 'g'}, Key{KeyType::Character, 'G'}},
/* 0b */ KeyMap{Key{KeyType::Character, 'h'}, Key{KeyType::Character, 'H'}},
/* 0c */ KeyMap{Key{KeyType::Character, 'i'}, Key{KeyType::Character, 'I'}},
/* 0d */ KeyMap{Key{KeyType::Character, 'j'}, Key{KeyType::Character, 'J'}},
/* 0e */ KeyMap{Key{KeyType::Character, 'k'}, Key{KeyType::Character, 'K'}},
/* 0f */ KeyMap{Key{KeyType::Character, 'l'}, Key{KeyType::Character, 'L'}},
/* 10 */ KeyMap{Key{KeyType::Character, 'm'}, Key{KeyType::Character, 'M'}},
/* 11 */ KeyMap{Key{KeyType::Character, 'n'}, Key{KeyType::Character, 'N'}},
/* 12 */ KeyMap{Key{KeyType::Character, 'o'}, Key{KeyType::Character, 'O'}},
/* 13 */ KeyMap{Key{KeyType::Character, 'p'}, Key{KeyType::Character, 'P'}},
/* 14 */ KeyMap{Key{KeyType::Character, 'q'}, Key{KeyType::Character, 'Q'}},
/* 15 */ KeyMap{Key{KeyType::Character, 'r'}, Key{KeyType::Character, 'R'}},
/* 16 */ KeyMap{Key{KeyType::Character, 's'}, Key{KeyType::Character, 'S'}},
/* 17 */ KeyMap{Key{KeyType::Character, 't'}, Key{KeyType::Character, 'T'}},
/* 18 */ KeyMap{Key{KeyType::Character, 'u'}, Key{KeyType::Character, 'U'}},
/* 19 */ KeyMap{Key{KeyType::Character, 'v'}, Key{KeyType::Character, 'V'}},
/* 1a */ KeyMap{Key{KeyType::Character, 'w'}, Key{KeyType::Character, 'W'}},
/* 1b */ KeyMap{Key{KeyType::Character, 'x'}, Key{KeyType::Character, 'X'}},
/* 1c */ KeyMap{Key{KeyType::Character, 'y'}, Key{KeyType::Character, 'Y'}},
/* 1d */ KeyMap{Key{KeyType::Character, 'z'}, Key{KeyType::Character, 'Z'}},
/* 1e */ KeyMap{Key{KeyType::Character, '1'}, Key{KeyType::Character, '!'}},
/* 1f */ KeyMap{Key{KeyType::Character, '2'}, Key{KeyType::Character, '@'}},
/* 20 */ KeyMap{Key{KeyType::Character, '3'}, Key{KeyType::Character, '#'}},
/* 21 */ KeyMap{Key{KeyType::Character, '4'}, Key{KeyType::Character, '$'}},
/* 22 */ KeyMap{Key{KeyType::Character, '5'}, Key{KeyType::Character, '%'}},
/* 23 */ KeyMap{Key{KeyType::Character, '6'}, Key{KeyType::Character, '^'}},
/* 24 */ KeyMap{Key{KeyType::Character, '7'}, Key{KeyType::Character, '&'}},
/* 25 */ KeyMap{Key{KeyType::Character, '8'}, Key{KeyType::Character, '*'}},
/* 26 */ KeyMap{Key{KeyType::Character, '9'}, Key{KeyType::Character, '('}},
/* 27 */ KeyMap{Key{KeyType::Character, '0'}, Key{KeyType::Character, ')'}},
/* 28 */ KeyMap{Key{KeyType::Character, 0x0d}, Key{KeyType::Character, 0x0d}},
/* 29 */ KeyMap{Key{KeyType::Character, 0x1b}, Key{KeyType::Character, 0x1b}},
/* 2a */ KeyMap{Key{KeyType::Character, 0x08}, Key{KeyType::Character, 0x08}},
/* 2b */ KeyMap{Key{KeyType::Character, 0x09}, Key{KeyType::Character, 0x09}},
/* 2c */ KeyMap{Key{KeyType::Character, ' '}, Key{KeyType::Character, ' '}},
/* 2d */ KeyMap{Key{KeyType::Character, '-'}, Key{KeyType::Character, '_'}},
/* 2e */ KeyMap{Key{KeyType::Character, '='}, Key{KeyType::Character, '+'}},
/* 2f */ KeyMap{Key{KeyType::Character, '['}, Key{KeyType::Character, '{'}},
/* 30 */ KeyMap{Key{KeyType::Character, ']'}, Key{KeyType::Character, '}'}},
/* 31 */ KeyMap{Key{KeyType::Character, '\\'}, Key{KeyType::Character, '|'}},
/* 32 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 33 */ KeyMap{Key{KeyType::Character, ';'}, Key{KeyType::Character, ':'}},
/* 34 */ KeyMap{Key{KeyType::Character, '\''}, Key{KeyType::Character, '"'}},
/* 35 */ KeyMap{Key{KeyType::Character, '`'}, Key{KeyType::Character, '~'}},
/* 36 */ KeyMap{Key{KeyType::Character, ','}, Key{KeyType::Character, '<'}},
/* 37 */ KeyMap{Key{KeyType::Character, '.'}, Key{KeyType::Character, '>'}},
/* 38 */ KeyMap{Key{KeyType::Character, '/'}, Key{KeyType::Character, '?'}},
/* 39 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 3a */ KeyMap{Key{KeyType::Special, F1}, Key{KeyType::Special, F1}},
/* 3b */ KeyMap{Key{KeyType::Special, F2}, Key{KeyType::Special, F2}},
/* 3c */ KeyMap{Key{KeyType::Special, F3}, Key{KeyType::Special, F3}},
/* 3d */ KeyMap{Key{KeyType::Special, F4}, Key{KeyType::Special, F4}},
/* 3e */ KeyMap{Key{KeyType::Special, F5}, Key{KeyType::Special, F5}},
/* 3f */ KeyMap{Key{KeyType::Special, F6}, Key{KeyType::Special, F6}},
/* 40 */ KeyMap{Key{KeyType::Special, F7}, Key{KeyType::Special, F7}},
/* 41 */ KeyMap{Key{KeyType::Special, F8}, Key{KeyType::Special, F8}},
/* 42 */ KeyMap{Key{KeyType::Special, F9}, Key{KeyType::Special, F9}},
/* 43 */ KeyMap{Key{KeyType::Special, F10}, Key{KeyType::Special, F10}},
/* 44 */ KeyMap{Key{KeyType::Special, F11}, Key{KeyType::Special, F11}},
/* 45 */ KeyMap{Key{KeyType::Special, F12}, Key{KeyType::Special, F12}},
/* 46 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 47 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 48 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 49 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 4a */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 4b */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 4c */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 4d */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 4e */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 4f */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 50 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 51 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 52 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 53 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 54 */ KeyMap{Key{KeyType::Character, '/'}, Key{KeyType::Character, '/'}},
/* 55 */ KeyMap{Key{KeyType::Character, '*'}, Key{KeyType::Character, '*'}},
/* 56 */ KeyMap{Key{KeyType::Character, '-'}, Key{KeyType::Character, '-'}},
/* 57 */ KeyMap{Key{KeyType::Character, '+'}, Key{KeyType::Character, '+'}},
/* 58 */ KeyMap{Key{KeyType::Character, 0x0d}, Key{KeyType::Character, 0x0d}},
/* 59 */ KeyMap{Key{KeyType::Character, '1'}, Key{KeyType::Character, '1'}},
/* 5a */ KeyMap{Key{KeyType::Character, '2'}, Key{KeyType::Character, '2'}},
/* 5b */ KeyMap{Key{KeyType::Character, '3'}, Key{KeyType::Character, '3'}},
/* 5c */ KeyMap{Key{KeyType::Character, '4'}, Key{KeyType::Character, '4'}},
/* 5d */ KeyMap{Key{KeyType::Character, '5'}, Key{KeyType::Character, '5'}},
/* 5e */ KeyMap{Key{KeyType::Character, '6'}, Key{KeyType::Character, '6'}},
/* 5f */ KeyMap{Key{KeyType::Character, '7'}, Key{KeyType::Character, '7'}},
/* 60 */ KeyMap{Key{KeyType::Character, '8'}, Key{KeyType::Character, '8'}},
/* 61 */ KeyMap{Key{KeyType::Character, '9'}, Key{KeyType::Character, '9'}},
/* 62 */ KeyMap{Key{KeyType::Character, '0'}, Key{KeyType::Character, '0'}},
/* 63 */ KeyMap{Key{KeyType::Character, '.'}, Key{KeyType::Character, '.'}},
/* 64 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 65 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 66 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 67 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 68 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 69 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 6a */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 6b */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 6c */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 6d */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 6e */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 6f */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 70 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 71 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 72 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 73 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 74 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 75 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 76 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 77 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 78 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 79 */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 7a */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 7b */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 7c */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 7d */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 7e */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
/* 7f */ KeyMap{Key{KeyType::Invalid, 0}, Key{KeyType::Invalid, 0}},
};
class USBKeyboard : public Device, private IDeviceOperations, private usb::IPipeCallback
{
public:
using Device::Device;
virtual ~USBKeyboard() = default;
IDeviceOperations& GetDeviceOperations() override { return *this; }
Result Attach() override;
Result Detach() override;
protected:
void OnPipeCallback(usb::Pipe& pipe) override;
private:
usb::USBDevice* uk_Device = nullptr;
usb::Pipe* uk_Pipe = nullptr;
};
Result USBKeyboard::Attach()
{
uk_Device = static_cast<usb::USBDevice*>(
d_ResourceSet.AllocateResource(Resource::RT_USB_Device, 0));
if (auto result =
uk_Device->AllocatePipe(0, TRANSFER_TYPE_INTERRUPT, EP_DIR_IN, 0, *this, uk_Pipe);
result.IsFailure()) {
Printf("endpoint 0 not interrupt/in");
return result;
}
return uk_Pipe->Start();
}
Result USBKeyboard::Detach()
{
if (uk_Device == nullptr)
return Result::Success();
if (uk_Pipe != nullptr)
uk_Device->FreePipe(*uk_Pipe);
uk_Pipe = nullptr;
return Result::Success();
}
void USBKeyboard::OnPipeCallback(usb::Pipe& pipe)
{
usb::Transfer& xfer = pipe.p_xfer;
if (xfer.t_flags & TRANSFER_FLAG_ERROR)
return;
// See if there's anything worthwhile to report here. We lazily use the USB boot class as
// it's much easier to process: HID 1.1 B.1 Protocol 1 (keyboard) lists everything
for (int n = 2; n < 8; n++) {
int scancode = xfer.t_data[n];
if (scancode == 0 || scancode > keymap.size())
continue;
const int modifiers = [](int d) {
int m = 0;
if (d & (modifierByte::LeftShift | modifierByte::RightShift))
m |= modifier::Shift;
if (d & (modifierByte::LeftControl | modifierByte::RightControl))
m |= modifier::Control;
if (d & (modifierByte::LeftAlt | modifierByte::RightAlt))
m |= modifier::Alt;
return m;
}(xfer.t_data[0]);
// Look up the scancode
const auto& key = [](int scancode, int modifiers) {
const auto& km = keymap[scancode];
if (modifiers & modifier::Control)
return km.standard;
if (modifiers & modifier::Shift)
return km.shift;
return km.standard;
}(scancode, modifiers);
if (key.IsValid())
keyboard_mux::OnKey(key, modifiers);
}
/* Reschedule the pipe for future updates */
uk_Pipe->Start();
}
struct USBKeyboard_Driver : public Driver {
USBKeyboard_Driver() : Driver("usbkeyboard") {}
const char* GetBussesToProbeOn() const override { return "usbbus"; }
Device* CreateDevice(const CreateDeviceProperties& cdp) override
{
auto res = cdp.cdp_ResourceSet.GetResource(Resource::RT_USB_Device, 0);
if (res == nullptr)
return nullptr;
auto usb_dev = static_cast<usb::USBDevice*>(reinterpret_cast<void*>(res->r_Base));
auto& iface = usb_dev->ud_interface[usb_dev->ud_cur_interface];
if (iface.if_class == USB_IF_CLASS_HID && iface.if_subclass == 1 /* boot interface */ &&
iface.if_protocol == 1 /* keyboard */)
return new USBKeyboard(cdp);
return nullptr;
}
};
const RegisterDriver<USBKeyboard_Driver> registerDriver;
} // unnamed namespace
| 53.426117 | 100 | 0.570721 | [
"3d"
] |
f6cc560a7e1b9246e8d35e88ed8865eebbb29f79 | 47,117 | cpp | C++ | Plugins/org.mitk.gui.qt.basicimageprocessing/src/internal/QmitkBasicImageProcessingView.cpp | zhaomengxiao/MITK | a09fd849a4328276806008bfa92487f83a9e2437 | [
"BSD-3-Clause"
] | null | null | null | Plugins/org.mitk.gui.qt.basicimageprocessing/src/internal/QmitkBasicImageProcessingView.cpp | zhaomengxiao/MITK | a09fd849a4328276806008bfa92487f83a9e2437 | [
"BSD-3-Clause"
] | 1 | 2021-12-22T10:19:02.000Z | 2021-12-22T10:19:02.000Z | Plugins/org.mitk.gui.qt.basicimageprocessing/src/internal/QmitkBasicImageProcessingView.cpp | zhaomengxiao/MITK_lancet | a09fd849a4328276806008bfa92487f83a9e2437 | [
"BSD-3-Clause"
] | null | null | null | /*============================================================================
The Medical Imaging Interaction Toolkit (MITK)
Copyright (c) German Cancer Research Center (DKFZ)
All rights reserved.
Use of this source code is governed by a 3-clause BSD license that can be
found in the LICENSE file.
============================================================================*/
#include "QmitkBasicImageProcessingView.h"
// QT includes (GUI)
#include <qlabel.h>
#include <qspinbox.h>
#include <qpushbutton.h>
#include <qcheckbox.h>
#include <qgroupbox.h>
#include <qradiobutton.h>
#include <qmessagebox.h>
// MITK includes (general)
#include <mitkNodePredicateDataType.h>
#include <mitkNodePredicateDimension.h>
#include <mitkNodePredicateNot.h>
#include <mitkNodePredicateOr.h>
#include <mitkNodePredicateProperty.h>
#include <mitkImageTimeSelector.h>
#include <mitkVectorImageMapper2D.h>
#include <mitkProperties.h>
#include <mitkLevelWindowProperty.h>
#include <mitkImageStatisticsHolder.h>
// Includes for image casting between ITK and MITK
#include <mitkImageCast.h>
#include <mitkITKImageImport.h>
// ITK includes (general)
#include <itkVectorImage.h>
#include <itkImageFileWriter.h>
// Morphological Operations
#include <itkBinaryBallStructuringElement.h>
#include <itkGrayscaleDilateImageFilter.h>
#include <itkGrayscaleErodeImageFilter.h>
#include <itkGrayscaleMorphologicalOpeningImageFilter.h>
#include <itkGrayscaleMorphologicalClosingImageFilter.h>
// Smoothing
#include <itkMedianImageFilter.h>
#include <itkDiscreteGaussianImageFilter.h>
#include <itkTotalVariationDenoisingImageFilter.h>
// Threshold
#include <itkBinaryThresholdImageFilter.h>
// Inversion
#include <itkInvertIntensityImageFilter.h>
// Derivatives
#include <itkGradientMagnitudeRecursiveGaussianImageFilter.h>
#include <itkLaplacianImageFilter.h>
#include <itkSobelEdgeDetectionImageFilter.h>
// Resampling
#include <itkResampleImageFilter.h>
#include <itkNearestNeighborInterpolateImageFunction.h>
#include <itkBSplineInterpolateImageFunction.h>
#include <itkCastImageFilter.h>
#include <itkLinearInterpolateImageFunction.h>
// Image Arithmetics
#include <itkAddImageFilter.h>
#include <itkSubtractImageFilter.h>
#include <itkMultiplyImageFilter.h>
#include <itkDivideImageFilter.h>
// Boolean operations
#include <itkOrImageFilter.h>
#include <itkAndImageFilter.h>
#include <itkXorImageFilter.h>
// Flip Image
#include <itkFlipImageFilter.h>
#include <itkRescaleIntensityImageFilter.h>
#include <itkShiftScaleImageFilter.h>
// Convenient Definitions
typedef itk::Image<short, 3> ImageType;
typedef itk::Image<unsigned char, 3> SegmentationImageType;
typedef itk::Image<double, 3> DoubleImageType;
typedef itk::Image<itk::Vector<float,3>, 3> VectorImageType;
typedef itk::BinaryBallStructuringElement<ImageType::PixelType, 3> BallType;
typedef itk::GrayscaleDilateImageFilter<ImageType, ImageType, BallType> DilationFilterType;
typedef itk::GrayscaleErodeImageFilter<ImageType, ImageType, BallType> ErosionFilterType;
typedef itk::GrayscaleMorphologicalOpeningImageFilter<ImageType, ImageType, BallType> OpeningFilterType;
typedef itk::GrayscaleMorphologicalClosingImageFilter<ImageType, ImageType, BallType> ClosingFilterType;
typedef itk::MedianImageFilter< ImageType, ImageType > MedianFilterType;
typedef itk::DiscreteGaussianImageFilter< ImageType, ImageType> GaussianFilterType;
typedef itk::TotalVariationDenoisingImageFilter<DoubleImageType, DoubleImageType> TotalVariationFilterType;
typedef itk::TotalVariationDenoisingImageFilter<VectorImageType, VectorImageType> VectorTotalVariationFilterType;
typedef itk::BinaryThresholdImageFilter< ImageType, ImageType > ThresholdFilterType;
typedef itk::InvertIntensityImageFilter< ImageType, ImageType > InversionFilterType;
typedef itk::GradientMagnitudeRecursiveGaussianImageFilter< ImageType, ImageType > GradientFilterType;
typedef itk::LaplacianImageFilter< DoubleImageType, DoubleImageType > LaplacianFilterType;
typedef itk::SobelEdgeDetectionImageFilter< DoubleImageType, DoubleImageType > SobelFilterType;
typedef itk::ResampleImageFilter< ImageType, ImageType > ResampleImageFilterType;
typedef itk::ResampleImageFilter< ImageType, ImageType > ResampleImageFilterType2;
typedef itk::CastImageFilter< ImageType, DoubleImageType > ImagePTypeToFloatPTypeCasterType;
typedef itk::AddImageFilter< ImageType, ImageType, ImageType > AddFilterType;
typedef itk::SubtractImageFilter< ImageType, ImageType, ImageType > SubtractFilterType;
typedef itk::MultiplyImageFilter< ImageType, ImageType, ImageType > MultiplyFilterType;
typedef itk::DivideImageFilter< ImageType, ImageType, DoubleImageType > DivideFilterType;
typedef itk::OrImageFilter< ImageType, ImageType > OrImageFilterType;
typedef itk::AndImageFilter< ImageType, ImageType > AndImageFilterType;
typedef itk::XorImageFilter< ImageType, ImageType > XorImageFilterType;
typedef itk::FlipImageFilter< ImageType > FlipImageFilterType;
typedef itk::LinearInterpolateImageFunction< ImageType, double > LinearInterpolatorType;
typedef itk::NearestNeighborInterpolateImageFunction< ImageType, double > NearestInterpolatorType;
const std::string QmitkBasicImageProcessing::VIEW_ID = "org.mitk.views.basicimageprocessing";
QmitkBasicImageProcessing::QmitkBasicImageProcessing()
: QmitkAbstractView()
, m_Controls(new Ui::QmitkBasicImageProcessingViewControls)
, m_TimeStepperAdapter(nullptr)
{
auto isImage = mitk::TNodePredicateDataType<mitk::Image>::New();
auto isNotHelperObject = mitk::NodePredicateNot::New(
mitk::NodePredicateProperty::New("helper object", mitk::BoolProperty::New(true)));
auto dimensionPredicate = mitk::NodePredicateOr::New(
mitk::NodePredicateDimension::New(3), mitk::NodePredicateDimension::New(4));
m_IsImagePredicate = mitk::NodePredicateAnd::New(
isImage, isNotHelperObject, dimensionPredicate);
}
QmitkBasicImageProcessing::~QmitkBasicImageProcessing()
{
}
void QmitkBasicImageProcessing::CreateQtPartControl(QWidget *parent)
{
m_Controls->setupUi(parent);
m_Controls->selectedImageWidget->SetDataStorage(this->GetDataStorage());
m_Controls->selectedImageWidget->SetNodePredicate(m_IsImagePredicate);
m_Controls->selectedImageWidget->SetSelectionIsOptional(true);
m_Controls->selectedImageWidget->SetAutoSelectNewNodes(true);
m_Controls->selectedImageWidget->SetEmptyInfo(QString("Please select a 3D / 4D image"));
m_Controls->selectedImageWidget->SetPopUpTitel(QString("Select an image"));
m_Controls->selectedImageWidget_2->SetDataStorage(this->GetDataStorage());
m_Controls->selectedImageWidget_2->SetNodePredicate(m_IsImagePredicate);
m_Controls->selectedImageWidget_2->SetSelectionIsOptional(true);
m_Controls->selectedImageWidget_2->SetAutoSelectNewNodes(true);
m_Controls->selectedImageWidget_2->SetEmptyInfo(QString("Please select a 3D / 4D image"));
m_Controls->selectedImageWidget_2->SetPopUpTitel(QString("Select an image"));
m_Controls->gbTwoImageOps->hide();
m_Controls->cbWhat1->clear();
m_Controls->cbWhat1->insertItem(NOACTIONSELECTED, "Please select an operation");
m_Controls->cbWhat1->insertItem(CATEGORY_DENOISING, "--- Denoising ---");
m_Controls->cbWhat1->insertItem(GAUSSIAN, "Gaussian");
m_Controls->cbWhat1->insertItem(MEDIAN, "Median");
m_Controls->cbWhat1->insertItem(TOTALVARIATION, "Total Variation");
m_Controls->cbWhat1->insertItem(CATEGORY_MORPHOLOGICAL, "--- Morphological ---");
m_Controls->cbWhat1->insertItem(DILATION, "Dilation");
m_Controls->cbWhat1->insertItem(EROSION, "Erosion");
m_Controls->cbWhat1->insertItem(OPENING, "Opening");
m_Controls->cbWhat1->insertItem(CLOSING, "Closing");
m_Controls->cbWhat1->insertItem(CATEGORY_EDGE_DETECTION, "--- Edge Detection ---");
m_Controls->cbWhat1->insertItem(GRADIENT, "Gradient");
m_Controls->cbWhat1->insertItem(LAPLACIAN, "Laplacian (2nd Derivative)");
m_Controls->cbWhat1->insertItem(SOBEL, "Sobel Operator");
m_Controls->cbWhat1->insertItem(CATEGORY_MISC, "--- Misc ---");
m_Controls->cbWhat1->insertItem(THRESHOLD, "Threshold");
m_Controls->cbWhat1->insertItem(INVERSION, "Image Inversion");
m_Controls->cbWhat1->insertItem(DOWNSAMPLING, "Downsampling");
m_Controls->cbWhat1->insertItem(FLIPPING, "Flipping");
m_Controls->cbWhat1->insertItem(RESAMPLING, "Resample to");
m_Controls->cbWhat1->insertItem(RESCALE, "Rescale values to interval");
m_Controls->cbWhat1->insertItem(RESCALE2, "Rescale values by scalar");
m_Controls->cbWhat2->clear();
m_Controls->cbWhat2->insertItem(TWOIMAGESNOACTIONSELECTED, "Please select an operation");
m_Controls->cbWhat2->insertItem(CATEGORY_ARITHMETIC, "--- Arithmetric operations ---");
m_Controls->cbWhat2->insertItem(ADD, "Add to Image 1:");
m_Controls->cbWhat2->insertItem(SUBTRACT, "Subtract from Image 1:");
m_Controls->cbWhat2->insertItem(MULTIPLY, "Multiply with Image 1:");
m_Controls->cbWhat2->insertItem(RESAMPLE_TO, "Resample Image 1 to fit geometry:");
m_Controls->cbWhat2->insertItem(DIVIDE, "Divide Image 1 by:");
m_Controls->cbWhat2->insertItem(CATEGORY_BOOLEAN, "--- Boolean operations ---");
m_Controls->cbWhat2->insertItem(AND, "AND");
m_Controls->cbWhat2->insertItem(OR, "OR");
m_Controls->cbWhat2->insertItem(XOR, "XOR");
m_Controls->cbParam4->clear();
m_Controls->cbParam4->insertItem(LINEAR, "Linear");
m_Controls->cbParam4->insertItem(NEAREST, "Nearest neighbor");
m_Controls->dsbParam1->hide();
m_Controls->dsbParam2->hide();
m_Controls->dsbParam3->hide();
m_Controls->tlParam3->hide();
m_Controls->tlParam4->hide();
m_Controls->cbParam4->hide();
this->CreateConnections();
}
void QmitkBasicImageProcessing::CreateConnections()
{
connect(m_Controls->cbWhat1, QOverload<int>::of(&QComboBox::activated), this, &QmitkBasicImageProcessing::SelectAction);
connect(m_Controls->btnDoIt, &QPushButton::clicked, this, &QmitkBasicImageProcessing::StartButtonClicked);
connect(m_Controls->cbWhat2, QOverload<int>::of(&QComboBox::activated), this, &QmitkBasicImageProcessing::SelectAction2);
connect(m_Controls->btnDoIt2, &QPushButton::clicked, this, &QmitkBasicImageProcessing::StartButton2Clicked);
connect(m_Controls->rBOneImOp, &QRadioButton::clicked, this, &QmitkBasicImageProcessing::ChangeGUI);
connect(m_Controls->rBTwoImOp, &QRadioButton::clicked, this, &QmitkBasicImageProcessing::ChangeGUI);
connect(m_Controls->cbParam4, QOverload<int>::of(&QComboBox::activated), this, &QmitkBasicImageProcessing::SelectInterpolator);
connect(m_Controls->selectedImageWidget, &QmitkAbstractNodeSelectionWidget::CurrentSelectionChanged,
this, &QmitkBasicImageProcessing::OnCurrentSelectionChanged);
connect(m_Controls->selectedImageWidget_2, &QmitkAbstractNodeSelectionWidget::CurrentSelectionChanged,
this, &QmitkBasicImageProcessing::OnCurrentSelectionChanged);
}
void QmitkBasicImageProcessing::InternalGetTimeNavigationController()
{
auto renwin_part = GetRenderWindowPart();
if( renwin_part != nullptr )
{
auto tnc = renwin_part->GetTimeNavigationController();
if( tnc != nullptr )
{
m_TimeStepperAdapter = new QmitkStepperAdapter((QObject*) m_Controls->sliceNavigatorTime, tnc->GetTime(), "sliceNavigatorTimeFromBIP");
}
}
}
void QmitkBasicImageProcessing::SetFocus()
{
m_Controls->rBOneImOp->setFocus();
}
void QmitkBasicImageProcessing::OnCurrentSelectionChanged(const QList<mitk::DataNode::Pointer>& nodes)
{
if (nodes.empty() || nodes.front().IsNull())
{
m_Controls->sliceNavigatorTime->setEnabled(false);
m_Controls->tlTime->setEnabled(false);
m_Controls->tlWhat1->setEnabled(false);
m_Controls->cbWhat1->setEnabled(false);
m_Controls->tlWhat2->setEnabled(false);
m_Controls->cbWhat2->setEnabled(false);
return;
}
auto selectedImage = dynamic_cast<mitk::Image*>(nodes.front()->GetData());
if (nullptr == selectedImage)
{
return;
}
if (selectedImage->GetDimension() > 3)
{
// try to retrieve the TNC (for 4-D Processing )
this->InternalGetTimeNavigationController();
m_Controls->sliceNavigatorTime->setEnabled(true);
m_Controls->tlTime->setEnabled(true);
}
m_Controls->tlWhat1->setEnabled(true);
m_Controls->cbWhat1->setEnabled(true);
m_Controls->tlWhat2->setEnabled(true);
m_Controls->cbWhat2->setEnabled(true);
}
void QmitkBasicImageProcessing::ChangeGUI()
{
if(m_Controls->rBOneImOp->isChecked())
{
m_Controls->gbTwoImageOps->hide();
m_Controls->gbOneImageOps->show();
}
else if(m_Controls->rBTwoImOp->isChecked())
{
m_Controls->gbOneImageOps->hide();
m_Controls->gbTwoImageOps->show();
}
}
void QmitkBasicImageProcessing::ResetParameterPanel()
{
m_Controls->tlParam->setEnabled(false);
m_Controls->tlParam1->setEnabled(false);
m_Controls->tlParam2->setEnabled(false);
m_Controls->tlParam3->setEnabled(false);
m_Controls->tlParam4->setEnabled(false);
m_Controls->sbParam1->setEnabled(false);
m_Controls->sbParam2->setEnabled(false);
m_Controls->dsbParam1->setEnabled(false);
m_Controls->dsbParam2->setEnabled(false);
m_Controls->dsbParam3->setEnabled(false);
m_Controls->cbParam4->setEnabled(false);
m_Controls->sbParam1->setValue(0);
m_Controls->sbParam2->setValue(0);
m_Controls->dsbParam1->setValue(0);
m_Controls->dsbParam2->setValue(0);
m_Controls->dsbParam3->setValue(0);
m_Controls->sbParam1->show();
m_Controls->sbParam2->show();
m_Controls->dsbParam1->hide();
m_Controls->dsbParam2->hide();
m_Controls->dsbParam3->hide();
m_Controls->cbParam4->hide();
m_Controls->tlParam3->hide();
m_Controls->tlParam4->hide();
}
void QmitkBasicImageProcessing::SelectAction(int action)
{
auto selectedImage = m_Controls->selectedImageWidget->GetSelectedNode();
if (selectedImage.IsNull())
{
return;
}
// Prepare GUI
this->ResetParameterPanel();
m_Controls->btnDoIt->setEnabled(false);
m_Controls->cbHideOrig->setEnabled(false);
QString text1 = tr("No Parameters");
QString text2 = text1;
QString text3 = text1;
QString text4 = text1;
if (action != 19)
{
m_Controls->dsbParam1->hide();
m_Controls->dsbParam2->hide();
m_Controls->dsbParam3->hide();
m_Controls->tlParam1->show();
m_Controls->tlParam2->show();
m_Controls->tlParam3->hide();
m_Controls->tlParam4->hide();
m_Controls->sbParam1->show();
m_Controls->sbParam2->show();
m_Controls->cbParam4->hide();
}
switch (action)
{
case 2:
{
m_SelectedAction = GAUSSIAN;
m_Controls->tlParam1->setEnabled(true);
m_Controls->sbParam1->hide();
m_Controls->dsbParam1->show();
m_Controls->dsbParam1->setEnabled(true);
text1 = tr("&Variance:");
m_Controls->tlParam2->hide();
m_Controls->sbParam2->hide();
m_Controls->dsbParam1->setMinimum( 0 );
m_Controls->dsbParam1->setMaximum( 200 );
m_Controls->dsbParam1->setValue( 2 );
break;
}
case 3:
{
m_SelectedAction = MEDIAN;
m_Controls->tlParam1->setEnabled(true);
m_Controls->sbParam1->setEnabled(true);
text1 = tr("&Radius:");
m_Controls->sbParam1->setMinimum( 0 );
m_Controls->sbParam1->setMaximum( 200 );
m_Controls->sbParam1->setValue( 3 );
break;
}
case 4:
{
m_SelectedAction = TOTALVARIATION;
m_Controls->tlParam1->setEnabled(true);
m_Controls->sbParam1->setEnabled(true);
m_Controls->tlParam2->setEnabled(true);
m_Controls->sbParam2->setEnabled(true);
text1 = tr("Number Iterations:");
text2 = tr("Regularization\n(Lambda/1000):");
m_Controls->sbParam1->setMinimum( 1 );
m_Controls->sbParam1->setMaximum( 1000 );
m_Controls->sbParam1->setValue( 40 );
m_Controls->sbParam2->setMinimum( 0 );
m_Controls->sbParam2->setMaximum( 100000 );
m_Controls->sbParam2->setValue( 1 );
break;
}
case 6:
{
m_SelectedAction = DILATION;
m_Controls->tlParam1->setEnabled(true);
m_Controls->sbParam1->setEnabled(true);
text1 = tr("&Radius:");
m_Controls->sbParam1->setMinimum( 0 );
m_Controls->sbParam1->setMaximum( 200 );
m_Controls->sbParam1->setValue( 3 );
break;
}
case 7:
{
m_SelectedAction = EROSION;
m_Controls->tlParam1->setEnabled(true);
m_Controls->sbParam1->setEnabled(true);
text1 = tr("&Radius:");
m_Controls->sbParam1->setMinimum( 0 );
m_Controls->sbParam1->setMaximum( 200 );
m_Controls->sbParam1->setValue( 3 );
break;
}
case 8:
{
m_SelectedAction = OPENING;
m_Controls->tlParam1->setEnabled(true);
m_Controls->sbParam1->setEnabled(true);
text1 = tr("&Radius:");
m_Controls->sbParam1->setMinimum( 0 );
m_Controls->sbParam1->setMaximum( 200 );
m_Controls->sbParam1->setValue( 3 );
break;
}
case 9:
{
m_SelectedAction = CLOSING;
m_Controls->tlParam1->setEnabled(true);
m_Controls->sbParam1->setEnabled(true);
text1 = tr("&Radius:");
m_Controls->sbParam1->setMinimum( 0 );
m_Controls->sbParam1->setMaximum( 200 );
m_Controls->sbParam1->setValue( 3 );
break;
}
case 11:
{
m_SelectedAction = GRADIENT;
m_Controls->tlParam1->setEnabled(true);
m_Controls->sbParam1->hide();
m_Controls->dsbParam1->show();
m_Controls->dsbParam1->setEnabled(true);
text1 = tr("Sigma of Gaussian Kernel:\n(in Image Spacing Units)");
m_Controls->tlParam2->hide();
m_Controls->sbParam2->hide();
m_Controls->dsbParam1->setMinimum( 0 );
m_Controls->dsbParam1->setMaximum( 200 );
m_Controls->dsbParam1->setValue( 2 );
break;
}
case 12:
{
m_SelectedAction = LAPLACIAN;
break;
}
case 13:
{
m_SelectedAction = SOBEL;
break;
}
case 15:
{
m_SelectedAction = THRESHOLD;
m_Controls->tlParam1->setEnabled(true);
m_Controls->sbParam1->setEnabled(true);
m_Controls->tlParam2->setEnabled(true);
m_Controls->sbParam2->setEnabled(true);
text1 = tr("Lower threshold:");
text2 = tr("Upper threshold:");
m_Controls->sbParam1->setMinimum( -100000 );
m_Controls->sbParam1->setMaximum( 100000 );
m_Controls->sbParam1->setValue( 0 );
m_Controls->sbParam2->setMinimum( -100000 );
m_Controls->sbParam2->setMaximum( 100000 );
m_Controls->sbParam2->setValue( 300 );
break;
}
case 16:
{
m_SelectedAction = INVERSION;
break;
}
case 17:
{
m_SelectedAction = DOWNSAMPLING;
m_Controls->tlParam1->setEnabled(true);
m_Controls->sbParam1->setEnabled(true);
text1 = tr("Downsampling by Factor:");
m_Controls->sbParam1->setMinimum( 1 );
m_Controls->sbParam1->setMaximum( 100 );
m_Controls->sbParam1->setValue( 2 );
break;
}
case 18:
{
m_SelectedAction = FLIPPING;
m_Controls->tlParam1->setEnabled(true);
m_Controls->sbParam1->setEnabled(true);
text1 = tr("Flip across axis:");
m_Controls->sbParam1->setMinimum( 0 );
m_Controls->sbParam1->setMaximum( 2 );
m_Controls->sbParam1->setValue( 1 );
break;
}
case 19:
{
m_SelectedAction = RESAMPLING;
m_Controls->tlParam1->setEnabled(true);
m_Controls->sbParam1->setEnabled(false);
m_Controls->sbParam1->hide();
m_Controls->dsbParam1->show();
m_Controls->dsbParam1->setEnabled(true);
m_Controls->tlParam2->setEnabled(true);
m_Controls->sbParam2->setEnabled(false);
m_Controls->sbParam2->hide();
m_Controls->dsbParam2->show();
m_Controls->dsbParam2->setEnabled(true);
m_Controls->tlParam3->show();
m_Controls->tlParam3->setEnabled(true);
m_Controls->dsbParam3->show();
m_Controls->dsbParam3->setEnabled(true);
m_Controls->tlParam4->show();
m_Controls->tlParam4->setEnabled(true);
m_Controls->cbParam4->show();
m_Controls->cbParam4->setEnabled(true);
m_Controls->dsbParam1->setMinimum(0.01);
m_Controls->dsbParam1->setMaximum(10.0);
m_Controls->dsbParam1->setSingleStep(0.1);
m_Controls->dsbParam1->setValue(0.3);
m_Controls->dsbParam2->setMinimum(0.01);
m_Controls->dsbParam2->setMaximum(10.0);
m_Controls->dsbParam2->setSingleStep(0.1);
m_Controls->dsbParam2->setValue(0.3);
m_Controls->dsbParam3->setMinimum(0.01);
m_Controls->dsbParam3->setMaximum(10.0);
m_Controls->dsbParam3->setSingleStep(0.1);
m_Controls->dsbParam3->setValue(1.5);
text1 = tr("x-spacing:");
text2 = tr("y-spacing:");
text3 = tr("z-spacing:");
text4 = tr("Interplation:");
break;
}
case 20:
{
m_SelectedAction = RESCALE;
m_Controls->dsbParam1->show();
m_Controls->tlParam1->show();
m_Controls->dsbParam1->setEnabled(true);
m_Controls->tlParam1->setEnabled(true);
m_Controls->dsbParam2->show();
m_Controls->tlParam2->show();
m_Controls->dsbParam2->setEnabled(true);
m_Controls->tlParam2->setEnabled(true);
text1 = tr("Output minimum:");
text2 = tr("Output maximum:");
break;
}
case 21:
{
m_SelectedAction = RESCALE2;
m_Controls->dsbParam1->show();
m_Controls->tlParam1->show();
m_Controls->dsbParam1->setEnabled(true);
m_Controls->tlParam1->setEnabled(true);
text1 = tr("Scaling value:");
break;
}
default:
return;
}
m_Controls->tlParam->setEnabled(true);
m_Controls->tlParam1->setText(text1);
m_Controls->tlParam2->setText(text2);
m_Controls->tlParam3->setText(text3);
m_Controls->tlParam4->setText(text4);
m_Controls->btnDoIt->setEnabled(true);
m_Controls->cbHideOrig->setEnabled(true);
}
void QmitkBasicImageProcessing::StartButtonClicked()
{
auto selectedNode = m_Controls->selectedImageWidget->GetSelectedNode();
if (selectedNode.IsNull())
{
return;
}
this->BusyCursorOn();
mitk::Image::Pointer newImage;
try
{
newImage = dynamic_cast<mitk::Image*>(selectedNode->GetData());
}
catch ( std::exception &e )
{
QString exceptionString = tr("An error occured during image loading:\n");
exceptionString.append( e.what() );
QMessageBox::warning( nullptr, "Basic Image Processing", exceptionString , QMessageBox::Ok, QMessageBox::NoButton );
this->BusyCursorOff();
return;
}
// check if input image is valid, casting does not throw exception when casting from 'nullptr-Object'
if ( (! newImage) || (newImage->IsInitialized() == false) )
{
this->BusyCursorOff();
QMessageBox::warning( nullptr, "Basic Image Processing", tr("Input image is broken or not initialized. Returning."), QMessageBox::Ok, QMessageBox::NoButton );
return;
}
// check if operation is done on 4D a image time step
if(newImage->GetDimension() > 3)
{
auto timeSelector = mitk::ImageTimeSelector::New();
timeSelector->SetInput(newImage);
timeSelector->SetTimeNr( ((QmitkSliderNavigatorWidget*)m_Controls->sliceNavigatorTime)->GetPos() );
timeSelector->Update();
newImage = timeSelector->GetOutput();
}
// check if image or vector image
auto itkImage = ImageType::New();
auto itkVecImage = VectorImageType::New();
int isVectorImage = newImage->GetPixelType().GetNumberOfComponents();
if(isVectorImage > 1)
{
CastToItkImage( newImage, itkVecImage );
}
else
{
CastToItkImage( newImage, itkImage );
}
std::stringstream nameAddition("");
int param1 = m_Controls->sbParam1->value();
int param2 = m_Controls->sbParam2->value();
double dparam1 = m_Controls->dsbParam1->value();
double dparam2 = m_Controls->dsbParam2->value();
double dparam3 = m_Controls->dsbParam3->value();
try
{
switch (m_SelectedAction)
{
case GAUSSIAN:
{
GaussianFilterType::Pointer gaussianFilter = GaussianFilterType::New();
gaussianFilter->SetInput( itkImage );
gaussianFilter->SetVariance( dparam1 );
gaussianFilter->UpdateLargestPossibleRegion();
newImage = mitk::ImportItkImage(gaussianFilter->GetOutput())->Clone();
nameAddition << "_Gaussian_var_" << dparam1;
std::cout << "Gaussian filtering successful." << std::endl;
break;
}
case MEDIAN:
{
MedianFilterType::Pointer medianFilter = MedianFilterType::New();
MedianFilterType::InputSizeType size;
size.Fill(param1);
medianFilter->SetRadius( size );
medianFilter->SetInput(itkImage);
medianFilter->UpdateLargestPossibleRegion();
newImage = mitk::ImportItkImage(medianFilter->GetOutput())->Clone();
nameAddition << "_Median_radius_" << param1;
std::cout << "Median Filtering successful." << std::endl;
break;
}
case TOTALVARIATION:
{
if(isVectorImage > 1)
{
VectorTotalVariationFilterType::Pointer TVFilter
= VectorTotalVariationFilterType::New();
TVFilter->SetInput( itkVecImage.GetPointer() );
TVFilter->SetNumberIterations(param1);
TVFilter->SetLambda(double(param2)/1000.);
TVFilter->UpdateLargestPossibleRegion();
newImage = mitk::ImportItkImage(TVFilter->GetOutput())->Clone();
}
else
{
ImagePTypeToFloatPTypeCasterType::Pointer floatCaster = ImagePTypeToFloatPTypeCasterType::New();
floatCaster->SetInput( itkImage );
floatCaster->Update();
DoubleImageType::Pointer fImage = floatCaster->GetOutput();
TotalVariationFilterType::Pointer TVFilter
= TotalVariationFilterType::New();
TVFilter->SetInput( fImage.GetPointer() );
TVFilter->SetNumberIterations(param1);
TVFilter->SetLambda(double(param2)/1000.);
TVFilter->UpdateLargestPossibleRegion();
newImage = mitk::ImportItkImage(TVFilter->GetOutput())->Clone();
}
nameAddition << "_TV_Iter_" << param1 << "_L_" << param2;
std::cout << "Total Variation Filtering successful." << std::endl;
break;
}
case DILATION:
{
BallType binaryBall;
binaryBall.SetRadius( param1 );
binaryBall.CreateStructuringElement();
DilationFilterType::Pointer dilationFilter = DilationFilterType::New();
dilationFilter->SetInput( itkImage );
dilationFilter->SetKernel( binaryBall );
dilationFilter->UpdateLargestPossibleRegion();
newImage = mitk::ImportItkImage(dilationFilter->GetOutput())->Clone();
nameAddition << "_Dilated_by_" << param1;
std::cout << "Dilation successful." << std::endl;
break;
}
case EROSION:
{
BallType binaryBall;
binaryBall.SetRadius( param1 );
binaryBall.CreateStructuringElement();
ErosionFilterType::Pointer erosionFilter = ErosionFilterType::New();
erosionFilter->SetInput( itkImage );
erosionFilter->SetKernel( binaryBall );
erosionFilter->UpdateLargestPossibleRegion();
newImage = mitk::ImportItkImage(erosionFilter->GetOutput())->Clone();
nameAddition << "_Eroded_by_" << param1;
std::cout << "Erosion successful." << std::endl;
break;
}
case OPENING:
{
BallType binaryBall;
binaryBall.SetRadius( param1 );
binaryBall.CreateStructuringElement();
OpeningFilterType::Pointer openFilter = OpeningFilterType::New();
openFilter->SetInput( itkImage );
openFilter->SetKernel( binaryBall );
openFilter->UpdateLargestPossibleRegion();
newImage = mitk::ImportItkImage(openFilter->GetOutput())->Clone();
nameAddition << "_Opened_by_" << param1;
std::cout << "Opening successful." << std::endl;
break;
}
case CLOSING:
{
BallType binaryBall;
binaryBall.SetRadius( param1 );
binaryBall.CreateStructuringElement();
ClosingFilterType::Pointer closeFilter = ClosingFilterType::New();
closeFilter->SetInput( itkImage );
closeFilter->SetKernel( binaryBall );
closeFilter->UpdateLargestPossibleRegion();
newImage = mitk::ImportItkImage(closeFilter->GetOutput())->Clone();
nameAddition << "_Closed_by_" << param1;
std::cout << "Closing successful." << std::endl;
break;
}
case GRADIENT:
{
GradientFilterType::Pointer gradientFilter = GradientFilterType::New();
gradientFilter->SetInput( itkImage );
gradientFilter->SetSigma( dparam1 );
gradientFilter->UpdateLargestPossibleRegion();
newImage = mitk::ImportItkImage(gradientFilter->GetOutput())->Clone();
nameAddition << "_Gradient_sigma_" << dparam1;
std::cout << "Gradient calculation successful." << std::endl;
break;
}
case LAPLACIAN:
{
// the laplace filter requires a float type image as input, we need to cast the itkImage
// to correct type
ImagePTypeToFloatPTypeCasterType::Pointer caster = ImagePTypeToFloatPTypeCasterType::New();
caster->SetInput( itkImage );
caster->Update();
DoubleImageType::Pointer fImage = caster->GetOutput();
LaplacianFilterType::Pointer laplacianFilter = LaplacianFilterType::New();
laplacianFilter->SetInput( fImage );
laplacianFilter->UpdateLargestPossibleRegion();
newImage = mitk::ImportItkImage(laplacianFilter->GetOutput())->Clone();
nameAddition << "_Second_Derivative";
std::cout << "Laplacian filtering successful." << std::endl;
break;
}
case SOBEL:
{
// the sobel filter requires a float type image as input, we need to cast the itkImage
// to correct type
ImagePTypeToFloatPTypeCasterType::Pointer caster = ImagePTypeToFloatPTypeCasterType::New();
caster->SetInput( itkImage );
caster->Update();
DoubleImageType::Pointer fImage = caster->GetOutput();
SobelFilterType::Pointer sobelFilter = SobelFilterType::New();
sobelFilter->SetInput( fImage );
sobelFilter->UpdateLargestPossibleRegion();
newImage = mitk::ImportItkImage(sobelFilter->GetOutput())->Clone();
nameAddition << "_Sobel";
std::cout << "Edge Detection successful." << std::endl;
break;
}
case THRESHOLD:
{
ThresholdFilterType::Pointer thFilter = ThresholdFilterType::New();
thFilter->SetLowerThreshold(param1 < param2 ? param1 : param2);
thFilter->SetUpperThreshold(param2 > param1 ? param2 : param1);
thFilter->SetInsideValue(1);
thFilter->SetOutsideValue(0);
thFilter->SetInput(itkImage);
thFilter->UpdateLargestPossibleRegion();
newImage = mitk::ImportItkImage(thFilter->GetOutput())->Clone();
nameAddition << "_Threshold";
std::cout << "Thresholding successful." << std::endl;
break;
}
case INVERSION:
{
InversionFilterType::Pointer invFilter = InversionFilterType::New();
mitk::ScalarType min = newImage->GetStatistics()->GetScalarValueMin();
mitk::ScalarType max = newImage->GetStatistics()->GetScalarValueMax();
invFilter->SetMaximum( max + min );
invFilter->SetInput(itkImage);
invFilter->UpdateLargestPossibleRegion();
newImage = mitk::ImportItkImage(invFilter->GetOutput())->Clone();
nameAddition << "_Inverted";
std::cout << "Image inversion successful." << std::endl;
break;
}
case DOWNSAMPLING:
{
ResampleImageFilterType::Pointer downsampler = ResampleImageFilterType::New();
downsampler->SetInput( itkImage );
NearestInterpolatorType::Pointer interpolator = NearestInterpolatorType::New();
downsampler->SetInterpolator( interpolator );
downsampler->SetDefaultPixelValue( 0 );
ResampleImageFilterType::SpacingType spacing = itkImage->GetSpacing();
spacing *= (double) param1;
downsampler->SetOutputSpacing( spacing );
downsampler->SetOutputOrigin( itkImage->GetOrigin() );
downsampler->SetOutputDirection( itkImage->GetDirection() );
ResampleImageFilterType::SizeType size = itkImage->GetLargestPossibleRegion().GetSize();
for ( int i = 0; i < 3; ++i )
{
size[i] /= param1;
}
downsampler->SetSize( size );
downsampler->UpdateLargestPossibleRegion();
newImage = mitk::ImportItkImage(downsampler->GetOutput())->Clone();
nameAddition << "_Downsampled_by_" << param1;
std::cout << "Downsampling successful." << std::endl;
break;
}
case FLIPPING:
{
FlipImageFilterType::Pointer flipper = FlipImageFilterType::New();
flipper->SetInput( itkImage );
itk::FixedArray<bool, 3> flipAxes;
for(int i=0; i<3; ++i)
{
if(i == param1)
{
flipAxes[i] = true;
}
else
{
flipAxes[i] = false;
}
}
flipper->SetFlipAxes(flipAxes);
flipper->UpdateLargestPossibleRegion();
newImage = mitk::ImportItkImage(flipper->GetOutput())->Clone();
std::cout << "Image flipping successful." << std::endl;
break;
}
case RESAMPLING:
{
std::string selectedInterpolator;
ResampleImageFilterType::Pointer resampler = ResampleImageFilterType::New();
switch (m_SelectedInterpolation)
{
case LINEAR:
{
LinearInterpolatorType::Pointer interpolator = LinearInterpolatorType::New();
resampler->SetInterpolator(interpolator);
selectedInterpolator = "Linear";
break;
}
case NEAREST:
{
NearestInterpolatorType::Pointer interpolator = NearestInterpolatorType::New();
resampler->SetInterpolator(interpolator);
selectedInterpolator = "Nearest";
break;
}
default:
{
LinearInterpolatorType::Pointer interpolator = LinearInterpolatorType::New();
resampler->SetInterpolator(interpolator);
selectedInterpolator = "Linear";
break;
}
}
resampler->SetInput( itkImage );
resampler->SetOutputOrigin( itkImage->GetOrigin() );
ImageType::SizeType input_size = itkImage->GetLargestPossibleRegion().GetSize();
ImageType::SpacingType input_spacing = itkImage->GetSpacing();
ImageType::SizeType output_size;
ImageType::SpacingType output_spacing;
output_size[0] = input_size[0] * (input_spacing[0] / dparam1);
output_size[1] = input_size[1] * (input_spacing[1] / dparam2);
output_size[2] = input_size[2] * (input_spacing[2] / dparam3);
output_spacing [0] = dparam1;
output_spacing [1] = dparam2;
output_spacing [2] = dparam3;
resampler->SetSize( output_size );
resampler->SetOutputSpacing( output_spacing );
resampler->SetOutputDirection( itkImage->GetDirection() );
resampler->UpdateLargestPossibleRegion();
ImageType::Pointer resampledImage = resampler->GetOutput();
newImage = mitk::ImportItkImage( resampledImage )->Clone();
nameAddition << "_Resampled_" << selectedInterpolator;
std::cout << "Resampling successful." << std::endl;
break;
}
case RESCALE:
{
DoubleImageType::Pointer floatImage = DoubleImageType::New();
CastToItkImage( newImage, floatImage );
itk::RescaleIntensityImageFilter<DoubleImageType,DoubleImageType>::Pointer filter = itk::RescaleIntensityImageFilter<DoubleImageType,DoubleImageType>::New();
filter->SetInput(0, floatImage);
filter->SetOutputMinimum(dparam1);
filter->SetOutputMaximum(dparam2);
filter->Update();
floatImage = filter->GetOutput();
newImage = mitk::Image::New();
newImage->InitializeByItk(floatImage.GetPointer());
newImage->SetVolume(floatImage->GetBufferPointer());
nameAddition << "_Rescaled";
std::cout << "Rescaling successful." << std::endl;
break;
}
case RESCALE2:
{
DoubleImageType::Pointer floatImage = DoubleImageType::New();
CastToItkImage( newImage, floatImage );
itk::ShiftScaleImageFilter<DoubleImageType,DoubleImageType>::Pointer filter = itk::ShiftScaleImageFilter<DoubleImageType,DoubleImageType>::New();
filter->SetInput(0, floatImage);
filter->SetScale(dparam1);
filter->Update();
floatImage = filter->GetOutput();
newImage = mitk::Image::New();
newImage->InitializeByItk(floatImage.GetPointer());
newImage->SetVolume(floatImage->GetBufferPointer());
nameAddition << "_Rescaled";
std::cout << "Rescaling successful." << std::endl;
break;
}
default:
this->BusyCursorOff();
return;
}
}
catch (...)
{
this->BusyCursorOff();
QMessageBox::warning(nullptr, "Warning", "Problem when applying filter operation. Check your input...");
return;
}
newImage->DisconnectPipeline();
// adjust level/window to new image
mitk::LevelWindow levelwindow;
levelwindow.SetAuto( newImage );
auto levWinProp = mitk::LevelWindowProperty::New();
levWinProp->SetLevelWindow( levelwindow );
// compose new image name
std::string name = selectedNode->GetName();
if (name.find(".nrrd") == name.size() -5 )
{
name = name.substr(0,name.size() -5);
}
name.append( nameAddition.str() );
// create final result MITK data storage node
auto result = mitk::DataNode::New();
result->SetProperty( "levelwindow", levWinProp );
result->SetProperty( "name", mitk::StringProperty::New( name.c_str() ) );
result->SetData( newImage );
// for vector images, a different mapper is needed
if(isVectorImage > 1)
{
auto mapper = mitk::VectorImageMapper2D::New();
result->SetMapper(1,mapper);
}
// add new image to data storage and set as active to ease further processing
GetDataStorage()->Add(result, selectedNode);
if (m_Controls->cbHideOrig->isChecked() == true)
{
selectedNode->SetProperty("visible", mitk::BoolProperty::New(false));
}
// show the results
mitk::RenderingManager::GetInstance()->RequestUpdateAll();
this->BusyCursorOff();
}
void QmitkBasicImageProcessing::SelectAction2(int operation)
{
switch (operation)
{
case 2:
m_SelectedOperation = ADD;
break;
case 3:
m_SelectedOperation = SUBTRACT;
break;
case 4:
m_SelectedOperation = MULTIPLY;
break;
case 5:
m_SelectedOperation = DIVIDE;
break;
case 6:
m_SelectedOperation = RESAMPLE_TO;
break;
case 8:
m_SelectedOperation = AND;
break;
case 9:
m_SelectedOperation = OR;
break;
case 10:
m_SelectedOperation = XOR;
break;
default:
return;
}
m_Controls->selectedImageLabel_2->setEnabled(true);
m_Controls->selectedImageWidget_2->setEnabled(true);
m_Controls->btnDoIt2->setEnabled(true);
}
void QmitkBasicImageProcessing::StartButton2Clicked()
{
auto selectedNode = m_Controls->selectedImageWidget->GetSelectedNode();
if (selectedNode.IsNull())
{
return;
}
auto selectedNode2 = m_Controls->selectedImageWidget_2->GetSelectedNode();
if (selectedNode2.IsNull())
{
return;
}
mitk::Image::Pointer newImage1 = dynamic_cast<mitk::Image*>(selectedNode->GetData());
mitk::Image::Pointer newImage2 = dynamic_cast<mitk::Image*>(selectedNode2->GetData());
// check if images are valid
if(newImage1.IsNull() || newImage2.IsNull() || false == newImage1->IsInitialized() || false == newImage2->IsInitialized())
{
itkGenericExceptionMacro(<< "At least one of the input images is broken or not initialized.");
return;
}
this->BusyCursorOn();
// check if 4D image and use filter on correct time step
if(newImage1->GetDimension() > 3)
{
auto timeSelector = mitk::ImageTimeSelector::New();
auto sn_widget = static_cast<QmitkSliderNavigatorWidget*>( m_Controls->sliceNavigatorTime );
int time = 0;
if( sn_widget != nullptr )
time = sn_widget->GetPos();
timeSelector->SetInput(newImage1);
timeSelector->SetTimeNr( time );
timeSelector->UpdateLargestPossibleRegion();
newImage1 = timeSelector->GetOutput();
newImage1->DisconnectPipeline();
timeSelector->SetInput(newImage2);
timeSelector->SetTimeNr( time );
timeSelector->UpdateLargestPossibleRegion();
newImage2 = timeSelector->GetOutput();
newImage2->DisconnectPipeline();
}
auto itkImage1 = ImageType::New();
auto itkImage2 = ImageType::New();
CastToItkImage( newImage1, itkImage1 );
CastToItkImage( newImage2, itkImage2 );
std::string nameAddition = "";
try
{
switch (m_SelectedOperation)
{
case ADD:
{
AddFilterType::Pointer addFilter = AddFilterType::New();
addFilter->SetInput1( itkImage1 );
addFilter->SetInput2( itkImage2 );
addFilter->UpdateLargestPossibleRegion();
newImage1 = mitk::ImportItkImage(addFilter->GetOutput())->Clone();
nameAddition = "_Added";
}
break;
case SUBTRACT:
{
SubtractFilterType::Pointer subFilter = SubtractFilterType::New();
subFilter->SetInput1( itkImage1 );
subFilter->SetInput2( itkImage2 );
subFilter->UpdateLargestPossibleRegion();
newImage1 = mitk::ImportItkImage(subFilter->GetOutput())->Clone();
nameAddition = "_Subtracted";
}
break;
case MULTIPLY:
{
MultiplyFilterType::Pointer multFilter = MultiplyFilterType::New();
multFilter->SetInput1( itkImage1 );
multFilter->SetInput2( itkImage2 );
multFilter->UpdateLargestPossibleRegion();
newImage1 = mitk::ImportItkImage(multFilter->GetOutput())->Clone();
nameAddition = "_Multiplied";
}
break;
case DIVIDE:
{
DivideFilterType::Pointer divFilter = DivideFilterType::New();
divFilter->SetInput1( itkImage1 );
divFilter->SetInput2( itkImage2 );
divFilter->UpdateLargestPossibleRegion();
newImage1 = mitk::ImportItkImage<DoubleImageType>(divFilter->GetOutput())->Clone();
nameAddition = "_Divided";
}
break;
case AND:
{
AndImageFilterType::Pointer andFilter = AndImageFilterType::New();
andFilter->SetInput1( itkImage1 );
andFilter->SetInput2( itkImage2 );
andFilter->UpdateLargestPossibleRegion();
newImage1 = mitk::ImportItkImage(andFilter->GetOutput())->Clone();
nameAddition = "_AND";
break;
}
case OR:
{
OrImageFilterType::Pointer orFilter = OrImageFilterType::New();
orFilter->SetInput1( itkImage1 );
orFilter->SetInput2( itkImage2 );
orFilter->UpdateLargestPossibleRegion();
newImage1 = mitk::ImportItkImage(orFilter->GetOutput())->Clone();
nameAddition = "_OR";
break;
}
case XOR:
{
XorImageFilterType::Pointer xorFilter = XorImageFilterType::New();
xorFilter->SetInput1( itkImage1 );
xorFilter->SetInput2( itkImage2 );
xorFilter->UpdateLargestPossibleRegion();
newImage1 = mitk::ImportItkImage(xorFilter->GetOutput())->Clone();
nameAddition = "_XOR";
break;
}
case RESAMPLE_TO:
{
itk::BSplineInterpolateImageFunction<DoubleImageType, double>::Pointer bspl_interpolator
= itk::BSplineInterpolateImageFunction<DoubleImageType, double>::New();
bspl_interpolator->SetSplineOrder( 3 );
itk::NearestNeighborInterpolateImageFunction< DoubleImageType >::Pointer nn_interpolator
= itk::NearestNeighborInterpolateImageFunction< DoubleImageType>::New();
DoubleImageType::Pointer itkImage1 = DoubleImageType::New();
DoubleImageType::Pointer itkImage2 = DoubleImageType::New();
CastToItkImage( newImage1, itkImage1 );
CastToItkImage( newImage2, itkImage2 );
itk::ResampleImageFilter< DoubleImageType, DoubleImageType >::Pointer resampleFilter = itk::ResampleImageFilter< DoubleImageType, DoubleImageType >::New();
resampleFilter->SetInput( itkImage1 );
resampleFilter->SetReferenceImage( itkImage2 );
resampleFilter->SetUseReferenceImage( true );
// use NN interp with binary images
if(selectedNode->GetProperty("binary") )
resampleFilter->SetInterpolator( nn_interpolator );
else
resampleFilter->SetInterpolator( bspl_interpolator );
resampleFilter->SetDefaultPixelValue( 0 );
try
{
resampleFilter->UpdateLargestPossibleRegion();
}
catch( const itk::ExceptionObject &e)
{
MITK_WARN << "Updating resampling filter failed. ";
MITK_WARN << "REASON: " << e.what();
}
DoubleImageType::Pointer resampledImage = resampleFilter->GetOutput();
newImage1 = mitk::ImportItkImage( resampledImage )->Clone();
nameAddition = "_Resampled";
break;
}
default:
std::cout << "Something went wrong..." << std::endl;
this->BusyCursorOff();
return;
}
}
catch (const itk::ExceptionObject& e )
{
this->BusyCursorOff();
QMessageBox::warning(nullptr, "ITK Exception", e.what() );
QMessageBox::warning(nullptr, "Warning", tr("Problem when applying arithmetic operation to two images. Check dimensions of input images."));
return;
}
// disconnect pipeline; images will not be reused
newImage1->DisconnectPipeline();
itkImage1 = nullptr;
itkImage2 = nullptr;
// adjust level/window to new image and compose new image name
mitk::LevelWindow levelwindow;
levelwindow.SetAuto( newImage1 );
auto levWinProp = mitk::LevelWindowProperty::New();
levWinProp->SetLevelWindow( levelwindow );
std::string name = selectedNode->GetName();
if (name.find(".nrrd") == name.size() -5 )
{
name = name.substr(0,name.size() -5);
}
// create final result MITK data storage node
auto result = mitk::DataNode::New();
result->SetProperty( "levelwindow", levWinProp );
result->SetProperty( "name", mitk::StringProperty::New( (name + nameAddition ).c_str() ));
result->SetData( newImage1 );
this->GetDataStorage()->Add(result, selectedNode);
if (m_Controls->cbHideOrig->isChecked() == true)
{
selectedNode->SetProperty("visible", mitk::BoolProperty::New(false));
selectedNode2->SetProperty("visible", mitk::BoolProperty::New(false));
}
// show the newly created image
mitk::RenderingManager::GetInstance()->RequestUpdateAll();
this->BusyCursorOff();
}
void QmitkBasicImageProcessing::SelectInterpolator(int interpolator)
{
switch (interpolator)
{
case 0:
{
m_SelectedInterpolation = LINEAR;
break;
}
case 1:
{
m_SelectedInterpolation = NEAREST;
break;
}
}
}
| 35.005201 | 163 | 0.683002 | [
"geometry",
"object",
"vector",
"3d"
] |
f6d21b909a87bc28b2c00de9c392ae7903412419 | 26,997 | cc | C++ | java_predict_client/src/main/proto/tensorflow/core/ops/state_ops.cc | tobegit3hub/deep_cnn | 8a53edecd9e00678b278bb10f6fb4bdb1e4ee25e | [
"Apache-2.0"
] | 101 | 2016-12-03T11:40:52.000Z | 2017-12-23T02:02:03.000Z | tensorflow/core/ops/state_ops.cc | yaochengbupt/tensorflow | 7731cb02b2378d5f69684acb40c985f7b2432f58 | [
"Apache-2.0"
] | 9 | 2016-12-14T03:27:46.000Z | 2017-09-13T02:29:07.000Z | tensorflow/core/ops/state_ops.cc | yaochengbupt/tensorflow | 7731cb02b2378d5f69684acb40c985f7b2432f58 | [
"Apache-2.0"
] | 47 | 2016-12-04T12:37:24.000Z | 2018-01-14T18:13:07.000Z | /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
namespace tensorflow {
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
REGISTER_OP("Variable")
.Output("ref: Ref(dtype)")
.Attr("shape: shape")
.Attr("dtype: type")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
PartialTensorShape shape;
TF_RETURN_IF_ERROR(c->GetAttr("shape", &shape));
// Variable has legacy behavior where we cannot tell the difference
// between a scalar shape attribute and 'unknown shape'. So if the shape
// is a scalar, we return an unknown shape.
if (shape.dims() <= 0) {
return shape_inference::UnknownShape(c);
}
TensorShapeProto shape_proto;
shape.AsProto(&shape_proto);
ShapeHandle out;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeProto(shape_proto, &out));
c->set_output(0, out);
return Status::OK();
})
.Doc(R"doc(
Holds state in the form of a tensor that persists across steps.
Outputs a ref to the tensor state so it may be read or modified.
TODO(zhifengc/mrry): Adds a pointer to a more detail document
about sharing states in tensorflow.
ref: A reference to the variable tensor.
shape: The shape of the variable tensor.
dtype: The type of elements in the variable tensor.
container: If non-empty, this variable is placed in the given container.
Otherwise, a default container is used.
shared_name: If non-empty, this variable is named in the given bucket
with this shared_name. Otherwise, the node name is used instead.
)doc");
REGISTER_OP("IsVariableInitialized")
.Input("ref: Ref(dtype)")
.Output("is_initialized: bool")
.Attr("dtype: type")
.SetAllowsUninitializedInput()
.SetShapeFn(shape_inference::ScalarShape)
.Doc(R"doc(
Checks whether a tensor has been initialized.
Outputs boolean scalar indicating whether the tensor has been initialized.
ref: Should be from a `Variable` node. May be uninitialized.
dtype: The type of elements in the variable tensor.
)doc");
REGISTER_OP("TemporaryVariable")
.Output("ref: Ref(dtype)")
.Attr("shape: shape")
.Attr("dtype: type")
.Attr("var_name: string = ''")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
TensorShapeProto shape_proto;
TF_RETURN_IF_ERROR(c->GetAttr("shape", &shape_proto));
ShapeHandle output;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeProto(shape_proto, &output));
c->set_output(0, output);
return Status::OK();
})
.Doc(R"doc(
Returns a tensor that may be mutated, but only persists within a single step.
This is an experimental op for internal use only and it is possible to use this
op in unsafe ways. DO NOT USE unless you fully understand the risks.
It is the caller's responsibility to ensure that 'ref' is eventually passed to a
matching 'DestroyTemporaryVariable' op after all other uses have completed.
Outputs a ref to the tensor state so it may be read or modified.
E.g.
var = state_ops._temporary_variable([1, 2], types.float_)
var_name = var.op.name
var = state_ops.assign(var, [[4.0, 5.0]])
var = state_ops.assign_add(var, [[6.0, 7.0]])
final = state_ops._destroy_temporary_variable(var, var_name=var_name)
ref: A reference to the variable tensor.
shape: The shape of the variable tensor.
dtype: The type of elements in the variable tensor.
var_name: Overrides the name used for the temporary variable resource. Default
value is the name of the 'TemporaryVariable' op (which is guaranteed unique).
)doc");
REGISTER_OP("DestroyTemporaryVariable")
.Input("ref: Ref(T)")
.Output("value: T")
.Attr("T: type")
.Attr("var_name: string")
.SetShapeFn(shape_inference::UnchangedShape)
.Doc(R"doc(
Destroys the temporary variable and returns its final value.
Sets output to the value of the Tensor pointed to by 'ref', then destroys
the temporary variable called 'var_name'.
All other uses of 'ref' *must* have executed before this op.
This is typically achieved by chaining the ref through each assign op, or by
using control dependencies.
Outputs the final value of the tensor pointed to by 'ref'.
ref: A reference to the temporary variable tensor.
var_name: Name of the temporary variable, usually the name of the matching
'TemporaryVariable' op.
)doc");
REGISTER_OP("Assign")
.Input("ref: Ref(T)")
.Input("value: T")
.Output("output_ref: Ref(T)")
.Attr("T: type")
.Attr("validate_shape: bool = true")
.Attr("use_locking: bool = true")
.SetAllowsUninitializedInput()
.SetShapeFn([](InferenceContext* c) {
bool validate_shape;
TF_RETURN_IF_ERROR(c->GetAttr("validate_shape", &validate_shape));
if (validate_shape) {
return shape_inference::MergeBothInputsShapeFn(c);
}
c->set_output(0, c->input(1));
return Status::OK();
})
.Doc(R"doc(
Update 'ref' by assigning 'value' to it.
This operation outputs "ref" after the assignment is done.
This makes it easier to chain operations that need to use the reset value.
ref: Should be from a `Variable` node. May be uninitialized.
value: The value to be assigned to the variable.
validate_shape: If true, the operation will validate that the shape
of 'value' matches the shape of the Tensor being assigned to. If false,
'ref' will take on the shape of 'value'.
use_locking: If True, the assignment will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
output_ref:= Same as "ref". Returned as a convenience for operations that want
to use the new value after the variable has been reset.
)doc");
REGISTER_OP("AssignAdd")
.Input("ref: Ref(T)")
.Input("value: T")
.Output("output_ref: Ref(T)")
.Attr("T: numbertype")
.Attr("use_locking: bool = false")
.SetShapeFn(shape_inference::MergeBothInputsShapeFn)
.Doc(R"doc(
Update 'ref' by adding 'value' to it.
This operation outputs "ref" after the update is done.
This makes it easier to chain operations that need to use the reset value.
ref: Should be from a `Variable` node.
value: The value to be added to the variable.
use_locking: If True, the addition will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
output_ref:= Same as "ref". Returned as a convenience for operations that want
to use the new value after the variable has been updated.
)doc");
REGISTER_OP("AssignSub")
.Input("ref: Ref(T)")
.Input("value: T")
.Output("output_ref: Ref(T)")
.Attr("T: numbertype")
.Attr("use_locking: bool = false")
.SetShapeFn(shape_inference::MergeBothInputsShapeFn)
.Doc(R"doc(
Update 'ref' by subtracting 'value' from it.
This operation outputs "ref" after the update is done.
This makes it easier to chain operations that need to use the reset value.
ref: Should be from a `Variable` node.
value: The value to be subtracted to the variable.
use_locking: If True, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
output_ref:= Same as "ref". Returned as a convenience for operations that want
to use the new value after the variable has been updated.
)doc");
namespace {
Status ScatterUpdateShape(InferenceContext* c) {
ShapeHandle var_shape = c->input(0);
ShapeHandle indices_shape = c->input(1);
ShapeHandle unused_updates_shape;
ShapeHandle concat;
ShapeHandle var_subshape;
TF_RETURN_IF_ERROR(c->Subshape(var_shape, 1, &var_subshape));
TF_RETURN_IF_ERROR(c->Concatenate(indices_shape, var_subshape, &concat));
TF_RETURN_IF_ERROR(c->Merge(c->input(2), concat, &unused_updates_shape));
c->set_output(0, var_shape);
return Status::OK();
}
} // namespace
REGISTER_OP("ScatterUpdate")
.Input("ref: Ref(T)")
.Input("indices: Tindices")
.Input("updates: T")
.Output("output_ref: Ref(T)")
.Attr("T: type")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = true")
.SetShapeFn(ScatterUpdateShape)
.Doc(R"doc(
Applies sparse updates to a variable reference.
This operation computes
# Scalar indices
ref[indices, ...] = updates[...]
# Vector indices (for each i)
ref[indices[i], ...] = updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
If values in `ref` is to be updated more than once, because there are
duplicate entires in `indices`, the order at which the updates happen
for each value is undefined.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="../../images/ScatterUpdate.png" alt>
</div>
ref: Should be from a `Variable` node.
indices: A tensor of indices into the first dimension of `ref`.
updates: A tensor of updated values to store in `ref`.
output_ref:= Same as `ref`. Returned as a convenience for operations that want
to use the updated values after the update is done.
use_locking: If True, the assignment will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
)doc");
REGISTER_OP("ScatterAdd")
.Input("ref: Ref(T)")
.Input("indices: Tindices")
.Input("updates: T")
.Output("output_ref: Ref(T)")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.SetShapeFn(ScatterUpdateShape)
.Doc(R"doc(
Adds sparse updates to a variable reference.
This operation computes
# Scalar indices
ref[indices, ...] += updates[...]
# Vector indices (for each i)
ref[indices[i], ...] += updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
Duplicate entries are handled correctly: if multiple `indices` reference
the same location, their contributions add.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="../../images/ScatterAdd.png" alt>
</div>
ref: Should be from a `Variable` node.
indices: A tensor of indices into the first dimension of `ref`.
updates: A tensor of updated values to add to `ref`.
output_ref:= Same as `ref`. Returned as a convenience for operations that want
to use the updated values after the update is done.
use_locking: If True, the addition will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
)doc");
REGISTER_OP("ScatterSub")
.Input("ref: Ref(T)")
.Input("indices: Tindices")
.Input("updates: T")
.Output("output_ref: Ref(T)")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.SetShapeFn(ScatterUpdateShape)
.Doc(R"doc(
Subtracts sparse updates to a variable reference.
# Scalar indices
ref[indices, ...] -= updates[...]
# Vector indices (for each i)
ref[indices[i], ...] -= updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
Duplicate entries are handled correctly: if multiple `indices` reference
the same location, their (negated) contributions add.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="../../images/ScatterSub.png" alt>
</div>
ref: Should be from a `Variable` node.
indices: A tensor of indices into the first dimension of `ref`.
updates: A tensor of updated values to subtract from `ref`.
output_ref:= Same as `ref`. Returned as a convenience for operations that want
to use the updated values after the update is done.
use_locking: If True, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
)doc");
REGISTER_OP("ScatterMul")
.Input("ref: Ref(T)")
.Input("indices: Tindices")
.Input("updates: T")
.Output("output_ref: Ref(T)")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.SetShapeFn(ScatterUpdateShape)
.Doc(R"doc(
Multiplies sparse updates into a variable reference.
This operation computes
# Scalar indices
ref[indices, ...] *= updates[...]
# Vector indices (for each i)
ref[indices[i], ...] *= updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
Duplicate entries are handled correctly: if multiple `indices` reference
the same location, their contributions multiply.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
ref: Should be from a `Variable` node.
indices: A tensor of indices into the first dimension of `ref`.
updates: A tensor of updated values to multiply to `ref`.
output_ref:= Same as `ref`. Returned as a convenience for operations that want
to use the updated values after the update is done.
use_locking: If True, the operation will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
)doc");
REGISTER_OP("ScatterDiv")
.Input("ref: Ref(T)")
.Input("indices: Tindices")
.Input("updates: T")
.Output("output_ref: Ref(T)")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.SetShapeFn(ScatterUpdateShape)
.Doc(R"doc(
Divides a variable reference by sparse updates.
This operation computes
# Scalar indices
ref[indices, ...] /= updates[...]
# Vector indices (for each i)
ref[indices[i], ...] /= updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
Duplicate entries are handled correctly: if multiple `indices` reference
the same location, their contributions divide.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
ref: Should be from a `Variable` node.
indices: A tensor of indices into the first dimension of `ref`.
updates: A tensor of values that `ref` is divided by.
output_ref:= Same as `ref`. Returned as a convenience for operations that want
to use the updated values after the update is done.
use_locking: If True, the operation will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
)doc");
REGISTER_OP("ScatterNdUpdate")
.Input("ref: Ref(T)")
.Input("indices: Tindices")
.Input("updates: T")
.Output("output_ref: Ref(T)")
.Attr("T: type")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = true")
.Doc(R"doc(
Applies sparse `updates` to individual values or slices within a given
variable according to `indices`.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to update 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
update = tf.scatter_nd_update(ref, indices, updates)
with tf.Session() as sess:
print sess.run(update)
The resulting update to ref would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to
slices.
ref: A mutable Tensor. Should be from a Variable node.
indices: A Tensor. Must be one of the following types: int32, int64.
A tensor of indices into ref.
updates: A Tensor. Must have the same type as ref. A tensor of updated
values to add to ref.
use_locking: An optional bool. Defaults to True. If True, the assignment will
be protected by a lock; otherwise the behavior is undefined,
but may exhibit less contention.
output_ref: Same as ref. Returned as a convenience for operations that want to
use the updated values after the update is done.
)doc");
REGISTER_OP("ScatterNdAdd")
.Input("ref: Ref(T)")
.Input("indices: Tindices")
.Input("updates: T")
.Output("output_ref: Ref(T)")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.Doc(R"doc(
Applies sparse addition between `updates` and individual values or slices
within a given variable according to `indices`.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to 8
elements. In Python, that addition would look like this:
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1], [7]])
updates = tf.constant([9, 10, 11, 12])
add = tf.scatter_nd_add(ref, indices, updates)
with tf.Session() as sess:
print sess.run(add)
The resulting update to ref would look like this:
[1, 13, 3, 14, 14, 6, 7, 20]
See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to
slices.
ref: A mutable Tensor. Should be from a Variable node.
indices: A Tensor. Must be one of the following types: int32, int64.
A tensor of indices into ref.
updates: A Tensor. Must have the same type as ref. A tensor of updated values
to add to ref.
use_locking: An optional bool. Defaults to True. If True, the assignment will
be protected by a lock; otherwise the behavior is undefined,
but may exhibit less contention.
output_ref: Same as ref. Returned as a convenience for operations that want
to use the updated values after the update is done.
)doc");
REGISTER_OP("ScatterNdSub")
.Input("ref: Ref(T)")
.Input("indices: Tindices")
.Input("updates: T")
.Output("output_ref: Ref(T)")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.Doc(R"doc(
Applies sparse subtraction between `updates` and individual values or slices
within a given variable according to `indices`.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to subtract 4 scattered elements from a rank-1 tensor
with 8 elements. In Python, that subtraction would look like this:
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1], [7]])
updates = tf.constant([9, 10, 11, 12])
sub = tf.scatter_nd_sub(ref, indices, updates)
with tf.Session() as sess:
print sess.run(sub)
The resulting update to ref would look like this:
[1, -9, 3, -6, -4, 6, 7, -4]
See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to
slices.
ref: A mutable Tensor. Should be from a Variable node.
indices: A Tensor. Must be one of the following types: int32, int64.
A tensor of indices into ref.
updates: A Tensor. Must have the same type as ref. A tensor of updated values
to subtract from ref.
use_locking: An optional bool. Defaults to True. If True, the assignment will
be protected by a lock; otherwise the behavior is undefined,
but may exhibit less contention.
output_ref: Same as ref. Returned as a convenience for operations that want
to use the updated values after the update is done.
)doc");
// TODO(simister): Re-enable once these additional ops do not dramatically
// increase binary size.
// REGISTER_OP("ScatterNdMul")
// .Input("ref: Ref(T)")
// .Input("indices: Tindices")
// .Input("updates: T")
// .Output("output_ref: Ref(T)")
// .Attr("T: numbertype")
// .Attr("Tindices: {int32, int64}")
// .Attr("use_locking: bool = false")
// .Doc(
// R"doc(Applies sparse subtraction between `updates` and individual
// values or slices within a given variable according to `indices`.
// `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
// `indices` must be integer tensor, containing indices into `ref`.
// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
// The innermost dimension of `indices` (with length `K`) corresponds to
// indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
// dimension of `ref`.
// `updates` is `Tensor` of rank `Q-1+P-K` with shape:
// ```
// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
// ```
// For example, say we want to multiply 4 scattered elements with a rank-1
// tensor with 8 elements. In Python, that multiplication would look like this:
// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
// indices = tf.constant([[4], [3], [1], [7]])
// updates = tf.constant([9, 10, 11, 12])
// sub = tf.scatter_nd_mul(ref, indices, updates)
// with tf.Session() as sess:
// print sess.run(sub)
// The resulting update to ref would look like this:
// [1, 22, 3, 40, 45, 6, 7, 96]
// See [tf.scatter_nd](#scatter_nd) for more details about how to make updates
// to slices.
// ref: A mutable Tensor. Should be from a Variable node.
// indices: A Tensor. Must be one of the following types: int32, int64. A tensor
// of indices into ref.
// updates: A Tensor. Must have the same type as ref. A tensor of updated values
// to subtract from ref.
// use_locking: An optional bool. Defaults to True. If True, the assignment will
// be protected by a lock; otherwise the behavior is undefined, but may exhibit
// less contention.
// output_ref: Same as ref. Returned as a convenience for operations that want
// to use the updated values after the update is done.)doc");
// REGISTER_OP("ScatterNdDiv")
// .Input("ref: Ref(T)")
// .Input("indices: Tindices")
// .Input("updates: T")
// .Output("output_ref: Ref(T)")
// .Attr("T: numbertype")
// .Attr("Tindices: {int32, int64}")
// .Attr("use_locking: bool = false")
// .Doc(
// R"doc(Applies sparse subtraction between `updates` and individual
// values or slices within a given variable according to `indices`.
// `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
// `indices` must be integer tensor, containing indices into `ref`.
// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
// The innermost dimension of `indices` (with length `K`) corresponds to
// indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
// dimension of `ref`.
// `updates` is `Tensor` of rank `Q-1+P-K` with shape:
// ```
// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
// ```
// For example, say we want to divide a rank-1 tensor with 8 elements by 4
// scattered elements. In Python, that division would look like this:
// ref = tf.Variable([10, 20, 30, 40, 50, 60, 70, 80])
// indices = tf.constant([[4], [3], [1], [7]])
// updates = tf.constant([2, 3, 4, 5])
// sub = tf.scatter_nd_div(ref, indices, updates)
// with tf.Session() as sess:
// print sess.run(sub)
// The resulting update to ref would look like this:
// [10, 5, 30, 13, 25, 60, 70, 16]
// See [tf.scatter_nd](#scatter_nd) for more details about how to make updates
// to slices.
// ref: A mutable Tensor. Should be from a Variable node.
// indices: A Tensor. Must be one of the following types: int32, int64. A tensor
// of indices into ref.
// updates: A Tensor. Must have the same type as ref. A tensor of updated values
// to subtract from ref.
// use_locking: An optional bool. Defaults to True. If True, the assignment will
// be protected by a lock; otherwise the behavior is undefined, but may exhibit
// less contention.
// output_ref: Same as ref. Returned as a convenience for operations that want
// to use the updated values after the update is done.)doc");
REGISTER_OP("CountUpTo")
.Input("ref: Ref(T)")
.Output("output: T")
.Attr("limit: int")
.Attr("T: {int32, int64}")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle output;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &output));
c->set_output(0, output);
return Status::OK();
})
.Doc(R"doc(
Increments 'ref' until it reaches 'limit'.
ref: Should be from a scalar `Variable` node.
limit: If incrementing ref would bring it above limit, instead generates an
'OutOfRange' error.
output: A copy of the input before increment. If nothing else modifies the
input, the values produced will all be distinct.
)doc");
} // namespace tensorflow
| 35.948069 | 80 | 0.683372 | [
"shape",
"vector"
] |
f6d430f237f8d3eb9a7b030501aa501e6c5733b0 | 78,524 | cpp | C++ | lgc/patch/PatchEntryPointMutate.cpp | vettoreldaniele/llpc | 41fd5608f7b697c1416b983b89b2f62f0907d4a3 | [
"MIT"
] | null | null | null | lgc/patch/PatchEntryPointMutate.cpp | vettoreldaniele/llpc | 41fd5608f7b697c1416b983b89b2f62f0907d4a3 | [
"MIT"
] | 5 | 2021-12-22T19:28:25.000Z | 2022-01-11T18:40:06.000Z | lgc/patch/PatchEntryPointMutate.cpp | vettoreldaniele/llpc | 41fd5608f7b697c1416b983b89b2f62f0907d4a3 | [
"MIT"
] | null | null | null | /*
***********************************************************************************************************************
*
* Copyright (c) 2017-2021 Advanced Micro Devices, Inc. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************************************************************************/
/**
***********************************************************************************************************************
* @file PatchEntryPointMutate.cpp
* @brief The lgc::PatchEntryPointMutate pass determines the final user data layout of shaders.
*
* This consists of
* - removing unused user data
* - unspilling root descriptors if possible (moving from spill table into user data registers)
* - unspilling push constants if we never need a pointer to them
* - putting push constants into registers if no code needs a pointer to it
* - figuring out where to put user data.
*
* The final user data is written into a limited number of sgprs starting with s0. If the user data does not fit in
* there completely, the last i32 is changed to be a pointer to a spill table in memory, that contains the rest of the
* user data.
*
* Root descriptors are dynamic uniform buffer descriptors in Vulkan, that can be changed without modifying a descriptor
* set and rebuilding the pipeline. They get put into the spill table but can be unspilled.
*
* Special care is required for compute libraries. Similar to unlinked shader compilation, we do not know the final
* layout for non-entrypoint shaders. For compute libraries, user data args must be passed to other functions, whose
* implementation is unknown at compile time. Therefore, computation of user data arguments must be independent of any
* instructions or uses. This is important, even for functions that have no calls, as we still need to compute the taken
* arguments in a deterministic layout. For library functions, only a prefix of the user data is known at compile time.
* There can be more user data at runtime, and that needs to be passed on to called functions. Therefore, we
* - always pass all possible user data registers, even if they have no content for the current shader
* - have a spill table pointer in the largest user data sgpr
* - cannot remove unused user data as it might be used by a callee.
***********************************************************************************************************************
*/
#include "lgc/patch/PatchEntryPointMutate.h"
#include "lgc/LgcContext.h"
#include "lgc/patch/ShaderInputs.h"
#include "lgc/state/AbiUnlinked.h"
#include "lgc/state/IntrinsDefs.h"
#include "lgc/state/PalMetadata.h"
#include "lgc/state/PipelineShaders.h"
#include "lgc/state/PipelineState.h"
#include "lgc/state/TargetInfo.h"
#include "lgc/util/AddressExtender.h"
#include "lgc/util/BuilderBase.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#define DEBUG_TYPE "lgc-patch-entry-point-mutate"
using namespace llvm;
using namespace lgc;
namespace llvm {
namespace cl {
// -inreg-esgs-lds-size: Add a dummy "inreg" argument for ES-GS LDS size, this is to keep consistent with PAL's
// GS on-chip behavior. In the future, if PAL allows hardcoded ES-GS LDS size, this option could be deprecated.
opt<bool> InRegEsGsLdsSize("inreg-esgs-lds-size", desc("For GS on-chip, add esGsLdsSize in user data"), init(true));
} // namespace cl
} // namespace llvm
// =====================================================================================================================
// Initializes static members.
char LegacyPatchEntryPointMutate::ID = 0;
// =====================================================================================================================
// Pass creator, creates the pass of LLVM patching operations for entry-point mutation
ModulePass *lgc::createLegacyPatchEntryPointMutate() {
return new LegacyPatchEntryPointMutate();
}
// =====================================================================================================================
PatchEntryPointMutate::PatchEntryPointMutate() : m_hasTs(false), m_hasGs(false) {
}
// =====================================================================================================================
PatchEntryPointMutate::UserDataArg::UserDataArg(llvm::Type *argTy, const llvm::Twine &name, unsigned userDataValue,
unsigned *argIndex)
: argTy(argTy), name(name.str()), userDataValue(userDataValue), argIndex(argIndex) {
if (llvm::isa<llvm::PointerType>(argTy))
argDwordSize = argTy->getPointerAddressSpace() == ADDR_SPACE_CONST_32BIT ? 1 : 2;
else
argDwordSize = argTy->getPrimitiveSizeInBits() / 32;
}
// =====================================================================================================================
PatchEntryPointMutate::UserDataArg::UserDataArg(llvm::Type *argTy, const llvm::Twine &name,
UserDataMapping userDataValue, unsigned *argIndex)
: UserDataArg(argTy, name, static_cast<unsigned>(userDataValue), argIndex) {
}
// =====================================================================================================================
LegacyPatchEntryPointMutate::LegacyPatchEntryPointMutate() : ModulePass(ID) {
}
// =====================================================================================================================
// Executes this LLVM patching pass on the specified LLVM module.
//
// @param [in/out] module : LLVM module to be run on
// @returns : True if the module was modified by the transformation and false otherwise
bool LegacyPatchEntryPointMutate::runOnModule(Module &module) {
PipelineState *pipelineState = getAnalysis<LegacyPipelineStateWrapper>().getPipelineState(&module);
PipelineShadersResult &pipelineShaders = getAnalysis<LegacyPipelineShaders>().getResult();
return m_impl.runImpl(module, pipelineShaders, pipelineState);
}
// =====================================================================================================================
// Executes this LLVM patching pass on the specified LLVM module.
//
// @param [in/out] module : LLVM module to be run on
// @param [in/out] analysisManager : Analysis manager to use for this transformation
// @returns : The preserved analyses (The Analyses that are still valid after this pass)
PreservedAnalyses PatchEntryPointMutate::run(Module &module, ModuleAnalysisManager &analysisManager) {
PipelineState *pipelineState = analysisManager.getResult<PipelineStateWrapper>(module).getPipelineState();
PipelineShadersResult &pipelineShaders = analysisManager.getResult<PipelineShaders>(module);
runImpl(module, pipelineShaders, pipelineState);
return PreservedAnalyses::none();
}
// =====================================================================================================================
// Executes this LLVM patching pass on the specified LLVM module.
//
// @param [in/out] module : LLVM module to be run on
// @param pipelineShaders : Pipeline shaders analysis result
// @param pipelineState : Pipeline state
// @returns : True if the module was modified by the transformation and false otherwise
bool PatchEntryPointMutate::runImpl(Module &module, PipelineShadersResult &pipelineShaders,
PipelineState *pipelineState) {
LLVM_DEBUG(dbgs() << "Run the pass Patch-Entry-Point-Mutate\n");
Patch::init(&module);
m_pipelineState = pipelineState;
const unsigned stageMask = m_pipelineState->getShaderStageMask();
m_hasTs = (stageMask & (shaderStageToMask(ShaderStageTessControl) | shaderStageToMask(ShaderStageTessEval))) != 0;
m_hasGs = (stageMask & shaderStageToMask(ShaderStageGeometry)) != 0;
// Gather user data usage.
gatherUserDataUsage(&module);
// Create ShaderInputs object and gather shader input usage.
ShaderInputs shaderInputs;
shaderInputs.gatherUsage(module);
setupComputeWithCalls(&module);
if (m_pipelineState->isGraphics()) {
// Process each shader in turn, but not the copy shader.
for (unsigned shaderStage = 0; shaderStage < ShaderStageNativeStageCount; ++shaderStage) {
m_entryPoint = pipelineShaders.getEntryPoint(static_cast<ShaderStage>(shaderStage));
if (m_entryPoint) {
m_shaderStage = static_cast<ShaderStage>(shaderStage);
processShader(&shaderInputs);
}
}
} else {
processComputeFuncs(&shaderInputs, module);
}
// Fix up user data uses to use entry args.
fixupUserDataUses(*m_module);
m_userDataUsage.clear();
// Fix up shader input uses to use entry args.
shaderInputs.fixupUses(*m_module, m_pipelineState);
return true;
}
// =====================================================================================================================
// Set up compute-with-calls flag. It is set for either of these two cases:
// 1. a compute library;
// 2. a compute pipeline that does indirect calls or calls to external functions.
//
// When set, this pass behaves differently, not attempting to omit unused shader inputs, since all shader inputs
// are potentially used in other functions. It also modifies each call to pass the shader inputs between functions.
//
// @param module : IR module
void PatchEntryPointMutate::setupComputeWithCalls(Module *module) {
m_computeWithCalls = false;
if (m_pipelineState->isComputeLibrary()) {
m_computeWithCalls = true;
return;
}
// We have a compute pipeline. Check whether there are any non-shader-entry-point functions (other than lgc.*
// functions and intrinsics).
for (Function &func : *module) {
if (func.isDeclaration() && func.getIntrinsicID() == Intrinsic::not_intrinsic &&
!func.getName().startswith(lgcName::InternalCallPrefix) && !func.user_empty()) {
m_computeWithCalls = true;
return;
}
// Search for indirect calls
for (const BasicBlock &block : func) {
for (const Instruction &inst : block) {
if (auto *call = dyn_cast<CallInst>(&inst)) {
Value *calledVal = call->getCalledOperand();
if (isa<Function>(calledVal) || call->isInlineAsm())
continue;
m_computeWithCalls = true;
return;
}
}
}
}
}
// =====================================================================================================================
// Gather user data usage in all shaders
//
// @param module : IR module
void PatchEntryPointMutate::gatherUserDataUsage(Module *module) {
// Find lgc.spill.table, lgc.push.constants, lgc.root.descriptor, lgc.descriptor.set functions, and from
// there all calls to them. Add each call to the applicable list in the UserDataUsage struct for the
// (merged) shader stage.
// Find lgc.special.user.data functions, and from there all calls to them. Add each call to the applicable
// list in the UserDataUsage struct for the (merged) shader stage.
// Also find lgc.input.import.generic calls in VS, indicating that the vertex buffer table is needed.
// Also find lgc.output.export.xfb calls anywhere, indicating that the streamout table is needed in the
// last vertex-processing stage.
for (Function &func : *module) {
if (!func.isDeclaration())
continue;
if (func.getName().startswith(lgcName::SpillTable)) {
for (User *user : func.users()) {
CallInst *call = cast<CallInst>(user);
ShaderStage stage = getShaderStage(call->getFunction());
assert(stage != ShaderStageCopyShader);
getUserDataUsage(stage)->spillTable.users.push_back(call);
}
continue;
}
if (func.getName().startswith(lgcName::PushConst)) {
for (User *user : func.users()) {
// For this call to lgc.push.const, attempt to find all loads with a constant dword-aligned offset and
// push into userDataUsage->pushConstOffsets. If we fail, set userDataUsage->pushConstSpill to indicate that
// we need to keep the pointer to the push const, derived as an offset into the spill table.
CallInst *call = cast<CallInst>(user);
ShaderStage stage = getShaderStage(call->getFunction());
assert(stage != ShaderStageCopyShader);
auto userDataUsage = getUserDataUsage(stage);
userDataUsage->pushConst.users.push_back(call);
SmallVector<std::pair<Instruction *, unsigned>, 4> users;
users.push_back({call, 0});
for (unsigned i = 0; i != users.size(); ++i) {
Instruction *inst = users[i].first;
for (User *user : inst->users()) {
unsigned dwordOffset = users[i].second;
if (auto bitcast = dyn_cast<BitCastInst>(user)) {
// See through a bitcast.
users.push_back({bitcast, dwordOffset});
continue;
}
if (isa<LoadInst>(user) && !user->getType()->isAggregateType()) {
unsigned byteSize = module->getDataLayout().getTypeStoreSize(user->getType());
if (byteSize % 4 == 0) {
// This is a scalar or vector load with dword-aligned size. We can attempt to unspill it, but, for
// a particular dword offset, we only attempt to unspill ones with the same (minimum) size.
unsigned dwordSize = byteSize / 4;
userDataUsage->pushConstOffsets.resize(
std::max(unsigned(userDataUsage->pushConstOffsets.size()), dwordOffset + 1));
auto &pushConstOffset = userDataUsage->pushConstOffsets[dwordOffset];
if (pushConstOffset.dwordSize == 0 || pushConstOffset.dwordSize >= dwordSize) {
if (pushConstOffset.dwordSize != 0 && pushConstOffset.dwordSize != dwordSize) {
// This load type is smaller than previously seen ones at this offset. Forget the earlier
// ones (and mark that some uses of the push const pointer remain).
userDataUsage->pushConstSpill = true;
pushConstOffset.users.clear();
}
// Remember this load for possible unspilling.
pushConstOffset.dwordSize = dwordSize;
userDataUsage->pushConstOffsets[dwordOffset].users.push_back(cast<Instruction>(user));
continue;
}
}
} else if (auto gep = dyn_cast<GetElementPtrInst>(user)) {
// For a gep, calculate the new constant offset.
APInt gepOffset(64, 0);
if (gep->accumulateConstantOffset(module->getDataLayout(), gepOffset)) {
unsigned gepByteOffset = gepOffset.getZExtValue();
if (gepByteOffset % 4 == 0) {
// We still have a constant offset that is 4-aligned. Push it so we look at its users.
dwordOffset += gepByteOffset / 4;
users.push_back({gep, dwordOffset});
continue;
}
}
}
// We have found some user we can't handle. Mark that we need to keep the push const pointer.
userDataUsage->pushConstSpill = true;
}
}
}
continue;
}
if (func.getName().startswith(lgcName::RootDescriptor)) {
for (User *user : func.users()) {
CallInst *call = cast<CallInst>(user);
unsigned dwordOffset = cast<ConstantInt>(call->getArgOperand(0))->getZExtValue();
ShaderStage stage = getShaderStage(call->getFunction());
assert(stage != ShaderStageCopyShader);
auto &rootDescriptors = getUserDataUsage(stage)->rootDescriptors;
rootDescriptors.resize(std::max(rootDescriptors.size(), size_t(dwordOffset + 1)));
rootDescriptors[dwordOffset].users.push_back(call);
}
continue;
}
if (func.getName().startswith(lgcName::SpecialUserData)) {
for (User *user : func.users()) {
CallInst *call = cast<CallInst>(user);
ShaderStage stage = getShaderStage(call->getFunction());
assert(stage != ShaderStageCopyShader);
auto &specialUserData = getUserDataUsage(stage)->specialUserData;
unsigned index = cast<ConstantInt>(call->getArgOperand(0))->getZExtValue() -
static_cast<unsigned>(UserDataMapping::GlobalTable);
specialUserData.resize(std::max(specialUserData.size(), size_t(index + 1)));
specialUserData[index].users.push_back(call);
}
continue;
}
if (func.getName().startswith(lgcName::DescriptorTableAddr)) {
for (User *user : func.users()) {
CallInst *call = cast<CallInst>(user);
ResourceNodeType resType = ResourceNodeType(cast<ConstantInt>(call->getArgOperand(0))->getZExtValue());
unsigned set = cast<ConstantInt>(call->getArgOperand(1))->getZExtValue();
unsigned binding = cast<ConstantInt>(call->getArgOperand(2))->getZExtValue();
ShaderStage stage = getShaderStage(call->getFunction());
assert(stage != ShaderStageCopyShader);
auto &descriptorTable = getUserDataUsage(stage)->descriptorTables;
if (m_pipelineState->isUnlinked() && m_pipelineState->getUserDataNodes().empty()) {
// The user data nodes are not available, so we use the set as the
// index.
descriptorTable.resize(std::max(descriptorTable.size(), size_t(set + 1)));
descriptorTable[set].users.push_back(call);
} else {
// The user data nodes are available, so we use the offset of the node as the
// index.
const ResourceNode *node;
node = m_pipelineState->findResourceNode(resType, set, binding).first;
assert(node && "Could not find resource node");
uint32_t descTableIndex = node - &m_pipelineState->getUserDataNodes().front();
descriptorTable.resize(std::max(descriptorTable.size(), size_t(descTableIndex + 1)));
descriptorTable[descTableIndex].users.push_back(call);
}
}
} else if (func.getName().startswith(lgcName::OutputExportXfb) && !func.use_empty()) {
auto lastVertexStage = m_pipelineState->getLastVertexProcessingStage();
lastVertexStage = lastVertexStage == ShaderStageCopyShader ? ShaderStageGeometry : lastVertexStage;
getUserDataUsage(lastVertexStage)->usesStreamOutTable = true;
}
}
}
// =====================================================================================================================
// Fix up user data uses in all shaders: For unspilled ones, use the entry arg directly; for spilled ones,
// insert a load from the spill table, shared for the function.
// This uses the entryArgIdx fields in UserDataUsage; each one was set as follows:
// 1. addUserDataArgs constructed a UserDataArg for it, giving it a pointer to the applicable entryArgIdx field;
// 2. In determineUnspilledUserDataArgs, where it decides to unspill (i.e. keep in shader entry SGPR), it stores the
// argument index into that pointed to value;
// 3. In this function, we use the entryArgIdx field to get the argument index. If it is 0, then the item was
// spilled.
//
// @param module : IR module
void PatchEntryPointMutate::fixupUserDataUses(Module &module) {
BuilderBase builder(module.getContext());
// For each function definition...
for (Function &func : module) {
if (func.isDeclaration())
continue;
ShaderStage stage = getShaderStage(&func);
auto userDataUsage = getUserDataUsage(stage);
// If needed, generate code for the spill table pointer (as pointer to i8) at the start of the function.
Instruction *spillTable = nullptr;
AddressExtender addressExtender(&func);
if (userDataUsage->spillTable.entryArgIdx != 0) {
builder.SetInsertPoint(addressExtender.getFirstInsertionPt());
Argument *arg = getFunctionArgument(&func, userDataUsage->spillTable.entryArgIdx);
spillTable = addressExtender.extend(arg, builder.getInt32(HighAddrPc),
builder.getInt8Ty()->getPointerTo(ADDR_SPACE_CONST), builder);
}
// Handle direct uses of the spill table that were generated in DescBuilder.
for (Instruction *&call : userDataUsage->spillTable.users) {
if (call && call->getFunction() == &func) {
call->replaceAllUsesWith(spillTable);
call->eraseFromParent();
call = nullptr;
}
}
// Handle unspilled parts of the push constant.
for (unsigned dwordOffset = 0; dwordOffset != userDataUsage->pushConstOffsets.size(); ++dwordOffset) {
UserDataNodeUsage &pushConstOffset = userDataUsage->pushConstOffsets[dwordOffset];
if (!pushConstOffset.users.empty()) {
if (pushConstOffset.entryArgIdx) {
// This offset into the push constant is unspilled. Replace the loads with the entry arg, with a
// bitcast. (We know that all loads are non-aggregates of the same size, so we can bitcast.)
Argument *arg = getFunctionArgument(&func, pushConstOffset.entryArgIdx);
for (Instruction *&load : pushConstOffset.users) {
if (load && load->getFunction() == &func) {
builder.SetInsertPoint(load);
Value *replacement = nullptr;
if (!isa<PointerType>(load->getType()))
replacement = builder.CreateBitCast(arg, load->getType());
else {
// For a pointer, we need to bitcast to a single int first, then to the pointer.
replacement = builder.CreateBitCast(arg, builder.getIntNTy(arg->getType()->getPrimitiveSizeInBits()));
replacement = builder.CreateIntToPtr(replacement, load->getType());
}
load->replaceAllUsesWith(replacement);
load->eraseFromParent();
load = nullptr;
}
}
} else {
// This offset into the push constant is spilled. All we need to do is ensure that the push constant
// pointer (derived as an offset into the spill table) remains.
userDataUsage->pushConstSpill = true;
}
}
}
// Handle the push constant pointer, always do that for compute libraries.
if (!userDataUsage->pushConst.users.empty() || isComputeWithCalls()) {
// If all uses of the push constant pointer are unspilled, we can just replace the lgc.push.const call
// with undef, as the address is ultimately not used anywhere.
Value *replacementVal = nullptr;
if (userDataUsage->pushConstSpill) {
// At least one use of the push constant pointer remains.
const ResourceNode *node = m_pipelineState->findSingleRootResourceNode(ResourceNodeType::PushConst);
Value *byteOffset = nullptr;
builder.SetInsertPoint(spillTable->getNextNode());
if (node) {
byteOffset = builder.getInt32(node->offsetInDwords * 4);
// Ensure we mark spill table usage.
m_pipelineState->getPalMetadata()->setUserDataSpillUsage(node->offsetInDwords);
} else if (!m_pipelineState->isUnlinked()) {
byteOffset = UndefValue::get(builder.getInt32Ty());
} else {
// Unlinked shader compilation: Use a reloc.
byteOffset = builder.CreateRelocationConstant(reloc::Pushconst);
}
replacementVal = builder.CreateGEP(builder.getInt8Ty(), spillTable, byteOffset);
}
for (Instruction *&call : userDataUsage->pushConst.users) {
if (call && call->getFunction() == &func) {
Value *thisReplacementVal = replacementVal;
if (!thisReplacementVal) {
// No use of the push constant pointer remains. Just replace with undef.
thisReplacementVal = UndefValue::get(call->getType());
} else {
builder.SetInsertPoint(call);
thisReplacementVal = builder.CreateBitCast(thisReplacementVal, call->getType());
}
call->replaceAllUsesWith(thisReplacementVal);
call->eraseFromParent();
call = nullptr;
}
}
}
// Root descriptors ("dynamic descriptors").
for (unsigned dwordOffset = 0; dwordOffset != userDataUsage->rootDescriptors.size(); ++dwordOffset) {
auto &rootDescriptor = userDataUsage->rootDescriptors[dwordOffset];
if (rootDescriptor.users.empty())
continue;
if (rootDescriptor.entryArgIdx != 0) {
// The root descriptor is unspilled, and uses an entry arg.
Argument *arg = getFunctionArgument(&func, rootDescriptor.entryArgIdx);
for (Instruction *&call : rootDescriptor.users) {
if (call && call->getFunction() == &func) {
call->replaceAllUsesWith(arg);
call->eraseFromParent();
call = nullptr;
}
}
} else {
// The root descriptor is spilled. Ensure we mark spill table usage.
m_pipelineState->getPalMetadata()->setUserDataSpillUsage(dwordOffset);
Value *byteOffset = builder.getInt32(dwordOffset * 4);
for (Instruction *&call : rootDescriptor.users) {
if (call && call->getFunction() == &func) {
builder.SetInsertPoint(call);
Value *descPtr = builder.CreateGEP(builder.getInt8Ty(), spillTable, byteOffset);
descPtr = builder.CreateBitCast(descPtr, call->getType()->getPointerTo(ADDR_SPACE_CONST));
Value *desc = builder.CreateLoad(call->getType(), descPtr);
desc->setName("rootDesc" + Twine(dwordOffset));
call->replaceAllUsesWith(desc);
call->eraseFromParent();
call = nullptr;
}
}
}
}
// Descriptor tables
Type *ptrType = builder.getInt8Ty()->getPointerTo(ADDR_SPACE_CONST);
for (unsigned userDataIdx = 0; userDataIdx != userDataUsage->descriptorTables.size(); ++userDataIdx) {
auto &descriptorTable = userDataUsage->descriptorTables[userDataIdx];
Instruction *spillTableLoad = nullptr;
const bool isDescTableSpilled = descriptorTable.entryArgIdx == 0;
SmallDenseMap<Value *, Value *> addrExtMap[2];
for (Instruction *&inst : descriptorTable.users) {
Value *descTableVal = nullptr;
if (inst && inst->getFunction() == &func) {
auto call = cast<CallInst>(inst);
assert(call->getType() == ptrType);
if (isDescTableSpilled && !spillTableLoad) {
// The descriptor table is spilled. At the start of the function, create the GEP and load which are then
// shared by all users.
std::string namePrefix = "descTable";
builder.SetInsertPoint(spillTable->getNextNode());
Value *offset = nullptr;
if (!m_pipelineState->isUnlinked() || !m_pipelineState->getUserDataNodes().empty()) {
const ResourceNode *node = &m_pipelineState->getUserDataNodes()[userDataIdx];
m_pipelineState->getPalMetadata()->setUserDataSpillUsage(node->offsetInDwords);
offset = builder.getInt32(node->offsetInDwords * 4);
} else {
// Shader compilation. Use a relocation to get the descriptor
// table offset for the descriptor set userDataIdx.
offset = builder.CreateRelocationConstant(reloc::DescriptorTableOffset + Twine(userDataIdx));
namePrefix = "descSet";
}
Value *addr = builder.CreateGEP(builder.getInt8Ty(), spillTable, offset);
addr = builder.CreateBitCast(addr, builder.getInt32Ty()->getPointerTo(ADDR_SPACE_CONST));
spillTableLoad = builder.CreateLoad(builder.getInt32Ty(), addr);
spillTableLoad->setName(namePrefix + Twine(userDataIdx));
}
// The address extension code only depends on descriptorTable (which is constant for the lifetime of the map)
// and highHalf. Use map with highHalf keys to avoid creating redundant nodes for the extensions.
Value *highHalf = call->getArgOperand(3);
auto it = addrExtMap[isDescTableSpilled].find(highHalf);
if (it != addrExtMap[isDescTableSpilled].end()) {
descTableVal = it->second;
} else {
if (!isDescTableSpilled) {
// The descriptor set is unspilled, and uses an entry arg.
descTableVal = getFunctionArgument(&func, descriptorTable.entryArgIdx);
if (isa<ConstantInt>(highHalf)) {
// Set builder to insert the 32-to-64 extension code at the start of the function.
builder.SetInsertPoint(addressExtender.getFirstInsertionPt());
} else {
// Set builder to insert the 32-to-64 extension code after the instruction containing the high half.
Instruction *highHalfInst = cast<Instruction>(highHalf);
builder.SetInsertPoint(highHalfInst->getNextNode());
}
} else {
// The descriptor table is spilled, the load at the start of the function has been created.
assert(descriptorTable.entryArgIdx == 0);
assert(spillTableLoad);
descTableVal = spillTableLoad;
// Set builder to insert the 32-to-64 extension code just after the load.
builder.SetInsertPoint(spillTableLoad->getNextNode());
}
// Now we want to extend the loaded 32-bit value to a 64-bit pointer, using either PC or the provided
// high half.
descTableVal = addressExtender.extend(descTableVal, highHalf, ptrType, builder);
addrExtMap[isDescTableSpilled].insert({highHalf, descTableVal});
}
// Replace uses of the call and erase it.
call->replaceAllUsesWith(descTableVal);
call->eraseFromParent();
inst = nullptr;
}
}
}
// Special user data from lgc.special.user.data calls
for (unsigned idx = 0; idx != userDataUsage->specialUserData.size(); ++idx) {
auto &specialUserData = userDataUsage->specialUserData[idx];
if (!specialUserData.users.empty()) {
Value *arg = nullptr;
if (specialUserData.entryArgIdx == 0) {
// This is the case that no arg was created for this value. That can happen, for example when
// ViewIndex is used but is not enabled in pipeline state. So we need to handle it. We just replace
// it with UndefValue.
arg = UndefValue::get(specialUserData.users[0]->getType());
} else {
arg = getFunctionArgument(&func, specialUserData.entryArgIdx);
}
for (Instruction *&inst : specialUserData.users) {
if (inst && inst->getFunction() == &func) {
Value *replacementVal = arg;
auto call = dyn_cast<CallInst>(inst);
if (call->arg_size() >= 2) {
// There is a second operand, used by ShaderInputs::getSpecialUserDataAsPoint to indicate that we
// need to extend the loaded 32-bit value to a 64-bit pointer, using either PC or the provided
// high half.
builder.SetInsertPoint(call);
Value *highHalf = call->getArgOperand(1);
replacementVal = addressExtender.extend(replacementVal, highHalf, call->getType(), builder);
}
inst->replaceAllUsesWith(replacementVal);
inst->eraseFromParent();
inst = nullptr;
}
}
}
}
}
}
// =====================================================================================================================
// Process a single shader
//
// @param shaderInputs : ShaderInputs object representing hardware-provided shader inputs
void PatchEntryPointMutate::processShader(ShaderInputs *shaderInputs) {
// Create new entry-point from the original one
SmallVector<Type *, 8> argTys;
SmallVector<std::string, 8> argNames;
uint64_t inRegMask = generateEntryPointArgTys(shaderInputs, argTys, argNames, 0);
Function *origEntryPoint = m_entryPoint;
// Create the new function and transfer code and attributes to it.
Function *entryPoint =
addFunctionArgs(origEntryPoint, origEntryPoint->getFunctionType()->getReturnType(), argTys, argNames, inRegMask);
// Set Attributes on new function.
setFuncAttrs(entryPoint);
// Remove original entry-point
int argOffset = origEntryPoint->getFunctionType()->getNumParams();
origEntryPoint->eraseFromParent();
processCalls(*entryPoint, argTys, argNames, inRegMask, argOffset);
}
// =====================================================================================================================
// Process all functions in a compute pipeline or library.
//
// @param shaderInputs : ShaderInputs object representing hardware-provided shader inputs
// @param [in/out] module : Module
void PatchEntryPointMutate::processComputeFuncs(ShaderInputs *shaderInputs, Module &module) {
m_shaderStage = ShaderStageCompute;
// We no longer support compute shader fixed layout required before PAL interface version 624.
if (m_pipelineState->getLgcContext()->getPalAbiVersion() < 624)
report_fatal_error("Compute shader not supported before PAL version 624");
// Process each function definition.
SmallVector<Function *, 4> origFuncs;
for (Function &func : module) {
if (!func.isDeclaration())
origFuncs.push_back(&func);
}
for (Function *origFunc : origFuncs) {
auto *origType = origFunc->getFunctionType();
// Determine what args need to be added on to all functions.
SmallVector<Type *, 20> shaderInputTys;
SmallVector<std::string, 20> shaderInputNames;
uint64_t inRegMask =
generateEntryPointArgTys(shaderInputs, shaderInputTys, shaderInputNames, origType->getNumParams());
// Create the new function and transfer code and attributes to it.
Function *newFunc =
addFunctionArgs(origFunc, origType->getReturnType(), shaderInputTys, shaderInputNames, inRegMask, true);
// Set Attributes on new function.
setFuncAttrs(newFunc);
// Change any uses of the old function to a bitcast of the new function.
SmallVector<Use *, 4> funcUses;
for (auto &use : origFunc->uses())
funcUses.push_back(&use);
Constant *bitCastFunc = ConstantExpr::getBitCast(newFunc, origFunc->getType());
for (Use *use : funcUses)
*use = bitCastFunc;
// Remove original function.
int argOffset = origType->getNumParams();
origFunc->eraseFromParent();
if (isComputeWithCalls())
processCalls(*newFunc, shaderInputTys, shaderInputNames, inRegMask, argOffset);
}
}
// =====================================================================================================================
// Process all real function calls and passes arguments to them.
//
// @param [in/out] module : Module
void PatchEntryPointMutate::processCalls(Function &func, SmallVectorImpl<Type *> &shaderInputTys,
SmallVectorImpl<std::string> &shaderInputNames, uint64_t inRegMask,
unsigned argOffset) {
// This is one of:
// - a compute pipeline with non-inlined functions;
// - a compute pipeline with calls to library functions;
// - a compute library.
// We need to scan the code and modify each call to append the extra args.
IRBuilder<> builder(func.getContext());
for (BasicBlock &block : func) {
// Use early increment iterator, so we can safely erase the instruction.
for (Instruction &inst : make_early_inc_range(block)) {
auto call = dyn_cast<CallInst>(&inst);
if (!call)
continue;
// Got a call. Skip it if it calls an intrinsic or an internal lgc.* function.
Value *calledVal = call->getCalledOperand();
Function *calledFunc = dyn_cast<Function>(calledVal);
if (calledFunc) {
if (calledFunc->isIntrinsic() || calledFunc->getName().startswith(lgcName::InternalCallPrefix))
continue;
} else if (call->isInlineAsm()) {
continue;
}
// Build a new arg list, made of the ABI args shared by all functions (user data and hardware shader
// inputs), plus the original args on the call.
SmallVector<Type *, 20> argTys;
SmallVector<Value *, 20> args;
for (unsigned idx = 0; idx != call->arg_size(); ++idx) {
argTys.push_back(call->getArgOperand(idx)->getType());
args.push_back(call->getArgOperand(idx));
}
for (unsigned idx = 0; idx != shaderInputTys.size(); ++idx) {
argTys.push_back(func.getArg(idx + argOffset)->getType());
args.push_back(func.getArg(idx + argOffset));
}
// Get the new called value as a bitcast of the old called value. If the old called value is already
// the inverse bitcast, just drop that bitcast.
// If the old called value was a function declaration, we did not insert a bitcast
FunctionType *calledTy = FunctionType::get(call->getType(), argTys, false);
builder.SetInsertPoint(call);
Type *calledPtrTy = calledTy->getPointerTo(calledVal->getType()->getPointerAddressSpace());
auto bitCast = dyn_cast<BitCastOperator>(calledVal);
Value *newCalledVal = nullptr;
if (bitCast && bitCast->getOperand(0)->getType() == calledPtrTy)
newCalledVal = bitCast->getOperand(0);
else
newCalledVal = builder.CreateBitCast(calledVal, calledPtrTy);
// Create the call.
CallInst *newCall = builder.CreateCall(calledTy, newCalledVal, args);
newCall->setCallingConv(CallingConv::AMDGPU_Gfx);
// Prevent calling convention mismatch
if (calledFunc)
calledFunc->setCallingConv(CallingConv::AMDGPU_Gfx);
// Mark sgpr arguments as inreg
for (unsigned idx = 0; idx != shaderInputTys.size(); ++idx) {
if ((inRegMask >> idx) & 1)
newCall->addParamAttr(idx + call->arg_size(), Attribute::InReg);
}
// Replace and erase the old one.
call->replaceAllUsesWith(newCall);
call->eraseFromParent();
}
}
}
// =====================================================================================================================
// Set Attributes on new function
void PatchEntryPointMutate::setFuncAttrs(Function *entryPoint) {
#if LLVM_MAIN_REVISION && LLVM_MAIN_REVISION < 409358
// Old version of the code
AttrBuilder builder;
#else
// New version of the code (also handles unknown version, which we treat as latest)
AttrBuilder builder(entryPoint->getContext());
#endif
if (m_shaderStage == ShaderStageFragment) {
auto &builtInUsage = m_pipelineState->getShaderResourceUsage(ShaderStageFragment)->builtInUsage.fs;
SpiPsInputAddr spiPsInputAddr = {};
spiPsInputAddr.bits.perspSampleEna =
((builtInUsage.smooth && builtInUsage.sample) || builtInUsage.baryCoordSmoothSample);
spiPsInputAddr.bits.perspCenterEna = ((builtInUsage.smooth && builtInUsage.center) || builtInUsage.baryCoordSmooth);
spiPsInputAddr.bits.perspCentroidEna =
((builtInUsage.smooth && builtInUsage.centroid) || builtInUsage.baryCoordSmoothCentroid);
spiPsInputAddr.bits.perspPullModelEna =
((builtInUsage.smooth && builtInUsage.pullMode) || builtInUsage.baryCoordPullModel);
spiPsInputAddr.bits.linearSampleEna =
((builtInUsage.noperspective && builtInUsage.sample) || builtInUsage.baryCoordNoPerspSample);
spiPsInputAddr.bits.linearCenterEna =
((builtInUsage.noperspective && builtInUsage.center) || builtInUsage.baryCoordNoPersp);
spiPsInputAddr.bits.linearCentroidEna =
((builtInUsage.noperspective && builtInUsage.centroid) || builtInUsage.baryCoordNoPerspCentroid);
spiPsInputAddr.bits.posXFloatEna = builtInUsage.fragCoord;
spiPsInputAddr.bits.posYFloatEna = builtInUsage.fragCoord;
spiPsInputAddr.bits.posZFloatEna = builtInUsage.fragCoord;
spiPsInputAddr.bits.posWFloatEna = builtInUsage.fragCoord;
spiPsInputAddr.bits.frontFaceEna = builtInUsage.frontFacing;
spiPsInputAddr.bits.ancillaryEna = builtInUsage.sampleId;
spiPsInputAddr.bits.ancillaryEna |= builtInUsage.shadingRate;
spiPsInputAddr.bits.sampleCoverageEna = builtInUsage.sampleMaskIn;
builder.addAttribute("InitialPSInputAddr", std::to_string(spiPsInputAddr.u32All));
bool hasDepthExport = builtInUsage.sampleMask || builtInUsage.fragStencilRef || builtInUsage.fragDepth;
builder.addAttribute("amdgpu-depth-export", hasDepthExport ? "1" : "0");
// mmSPI_SHADER_COL_FORMAT is used for fully compiled shaders
unsigned colFormat = m_pipelineState->getPalMetadata()->getRegister(mmSPI_SHADER_COL_FORMAT);
// getColorExportCount() is used for partially compiled shaders
unsigned colorExportCount = m_pipelineState->getPalMetadata()->getColorExportCount();
bool hasColorExport = (colFormat != EXP_FORMAT_ZERO) || (colorExportCount > (hasDepthExport ? 1 : 0));
builder.addAttribute("amdgpu-color-export", hasColorExport ? "1" : "0");
}
// Set VGPR, SGPR, and wave limits
auto shaderOptions = &m_pipelineState->getShaderOptions(m_shaderStage);
auto resUsage = m_pipelineState->getShaderResourceUsage(m_shaderStage);
unsigned vgprLimit = shaderOptions->vgprLimit;
unsigned sgprLimit = shaderOptions->sgprLimit;
if (vgprLimit != 0) {
builder.addAttribute("amdgpu-num-vgpr", std::to_string(vgprLimit));
resUsage->numVgprsAvailable = std::min(vgprLimit, resUsage->numVgprsAvailable);
}
resUsage->numVgprsAvailable =
std::min(resUsage->numVgprsAvailable, m_pipelineState->getTargetInfo().getGpuProperty().maxVgprsAvailable);
if (sgprLimit != 0) {
builder.addAttribute("amdgpu-num-sgpr", std::to_string(sgprLimit));
resUsage->numSgprsAvailable = std::min(sgprLimit, resUsage->numSgprsAvailable);
}
resUsage->numSgprsAvailable =
std::min(resUsage->numSgprsAvailable, m_pipelineState->getTargetInfo().getGpuProperty().maxSgprsAvailable);
if (shaderOptions->maxThreadGroupsPerComputeUnit != 0) {
std::string wavesPerEu = std::string("1,") + std::to_string(shaderOptions->maxThreadGroupsPerComputeUnit);
builder.addAttribute("amdgpu-waves-per-eu", wavesPerEu);
}
if (shaderOptions->unrollThreshold != 0)
builder.addAttribute("amdgpu-unroll-threshold", std::to_string(shaderOptions->unrollThreshold));
else {
// use a default unroll threshold of 700
builder.addAttribute("amdgpu-unroll-threshold", "700");
}
#if LLVM_MAIN_REVISION && LLVM_MAIN_REVISION < 396807
// Old version of the code
AttributeList::AttrIndex attribIdx = AttributeList::AttrIndex(AttributeList::FunctionIndex);
entryPoint->addAttributes(attribIdx, builder);
#else
// New version of the code (also handles unknown version, which we treat as
// latest)
entryPoint->addFnAttrs(builder);
#endif
// NOTE: Remove "readnone" attribute for entry-point. If GS is empty, this attribute will allow
// LLVM optimization to remove sendmsg(GS_DONE). It is unexpected.
if (entryPoint->hasFnAttribute(Attribute::ReadNone))
entryPoint->removeFnAttr(Attribute::ReadNone);
}
// =====================================================================================================================
// Generates the type for the new entry-point based on already-collected info.
// This is what decides what SGPRs and VGPRs are passed to the shader at wave dispatch:
//
// * (For a GFX9+ merged shader or NGG primitive shader, the 8 system SGPRs at the start are not accounted for here.)
// * The "user data" SGPRs, up to 32 (GFX9+ non-compute shader) or 16 (compute shader or <=GFX8). Many of the values
// here are pointers, but are passed as a single 32-bit register and then expanded to 64-bit in the shader code:
// - The "global information table", containing various descriptors such as the inter-shader rings
// - The "per-shader table", which is added here but appears to be unused
// - The streamout table if needed
// - Nodes from the root user data layout, including pointers to descriptor sets.
// - Various other system values set up by PAL, such as the vertex buffer table and the vertex base index
// - The spill table pointer if needed. This is typically in the last register (s15 or s31), but not necessarily.
// * The system value SGPRs and VGPRs determined by hardware, some of which are enabled or disabled by bits in SPI
// registers.
//
// In GFX9+ shader merging, shaders have not yet been merged, and this function is called for each
// unmerged shader stage. The code here needs to ensure that it gets the same SGPR user data layout for
// both shaders that are going to be merged (VS-HS, VS-GS if no tessellation, ES-GS).
//
// @param shaderInputs : ShaderInputs object representing hardware-provided shader inputs
// @param [out] argTys : The argument types for the new function type
// @param [out] argNames : The argument names corresponding to the argument types
// @returns inRegMask : "Inreg" bit mask for the arguments, with a bit set to indicate that the corresponding
// arg needs to have an "inreg" attribute to put the arg into SGPRs rather than VGPRs
//
uint64_t PatchEntryPointMutate::generateEntryPointArgTys(ShaderInputs *shaderInputs, SmallVectorImpl<Type *> &argTys,
SmallVectorImpl<std::string> &argNames, unsigned argOffset) {
uint64_t inRegMask = 0;
IRBuilder<> builder(*m_context);
auto intfData = m_pipelineState->getShaderInterfaceData(m_shaderStage);
auto &entryArgIdxs = intfData->entryArgIdxs;
entryArgIdxs.initialized = true;
// First we collect the user data args in two vectors:
// - userDataArgs: global table, per-shader table and streamout table, followed by the nodes from the root user
// data layout (excluding vertex buffer and streamout tables). Some of them may need to be spilled due to
// running out of entry SGPRs
// - specialUserDataArgs: special values that go at the end, such as ViewId.
//
// The UserDataArg for each arg pushed into these vectors contains:
// - argTy: The IR type of the arg
// - argDwordSize: Size of the arg in dwords
// - userDataValue: The PAL metadata value to be passed to PalMetadata::setUserDataEntry, or Invalid for none
// - argIndex: Pointer to the location where we will store the actual arg number, or nullptr
SmallVector<UserDataArg, 8> userDataArgs;
SmallVector<UserDataArg, 4> specialUserDataArgs;
// Global internal table
userDataArgs.push_back(UserDataArg(builder.getInt32Ty(), "globalTable", UserDataMapping::GlobalTable));
// Per-shader table
// TODO: We need add per shader table per real usage after switch to PAL new interface.
// if (pResUsage->perShaderTable)
userDataArgs.push_back(UserDataArg(builder.getInt32Ty(), "perShaderTable"));
addSpecialUserDataArgs(userDataArgs, specialUserDataArgs, builder);
addUserDataArgs(userDataArgs, builder);
// Determine which user data args are going to be "unspilled", and put them in unspilledArgs.
SmallVector<UserDataArg, 8> unspilledArgs;
determineUnspilledUserDataArgs(userDataArgs, specialUserDataArgs, builder, unspilledArgs);
// Scan unspilledArgs: for each one:
// * add it to the arg type array
// * set user data PAL metadata
// * store the arg index into the pointer provided to the xxxArgs.push()
// * if it's special user data, also store the arg index into the specialUserData entry.
unsigned userDataIdx = 0;
for (const auto &userDataArg : unspilledArgs) {
if (userDataArg.argIndex)
*userDataArg.argIndex = argTys.size() + argOffset;
unsigned dwordSize = userDataArg.argDwordSize;
if (userDataArg.userDataValue != static_cast<unsigned>(UserDataMapping::Invalid)) {
m_pipelineState->getPalMetadata()->setUserDataEntry(m_shaderStage, userDataIdx, userDataArg.userDataValue,
dwordSize);
if (userDataArg.userDataValue >= static_cast<unsigned>(UserDataMapping::GlobalTable)) {
unsigned index = userDataArg.userDataValue - static_cast<unsigned>(UserDataMapping::GlobalTable);
auto &specialUserData = getUserDataUsage(m_shaderStage)->specialUserData;
if (index < specialUserData.size())
specialUserData[index].entryArgIdx = argTys.size() + argOffset;
}
}
argTys.push_back(userDataArg.argTy);
argNames.push_back(userDataArg.name);
userDataIdx += dwordSize;
}
intfData->userDataCount = userDataIdx;
inRegMask = (1ull << argTys.size()) - 1;
// Push the fixed system (not user data) register args.
inRegMask |= shaderInputs->getShaderArgTys(m_pipelineState, m_shaderStage, argTys, argNames, argOffset);
return inRegMask;
}
// =====================================================================================================================
// Add a UserDataArg to the appropriate vector for each special argument (e.g. ViewId) needed in user data SGPRs.
// In here, we need to check whether an argument is needed in two ways:
// 1. Whether a flag is set saying it will be needed after PatchEntryPointMutate
// 2. Whether there is an actual use of the special user data value (lgc.special.user.data call) generated
// before PatchEntryPointMutate, which we check with userDataUsage->isSpecialUserDataUsed().
//
// @param userDataArgs : Vector to add args to when they need to go before user data nodes (just streamout)
// @param specialUserDataArgs : Vector to add args to when they need to go after user data nodes (all the rest)
// @param builder : IRBuilder to get types from
void PatchEntryPointMutate::addSpecialUserDataArgs(SmallVectorImpl<UserDataArg> &userDataArgs,
SmallVectorImpl<UserDataArg> &specialUserDataArgs,
IRBuilder<> &builder) {
auto userDataUsage = getUserDataUsage(m_shaderStage);
auto intfData = m_pipelineState->getShaderInterfaceData(m_shaderStage);
auto &entryArgIdxs = intfData->entryArgIdxs;
bool enableNgg = m_pipelineState->isGraphics() ? m_pipelineState->getNggControl()->enableNgg : false;
if (m_shaderStage == ShaderStageVertex || m_shaderStage == ShaderStageTessControl ||
m_shaderStage == ShaderStageTessEval || m_shaderStage == ShaderStageGeometry) {
// Shader stage in the vertex-processing half of a graphics pipeline.
// We need to ensure that the layout is the same between two shader stages that will be merged on GFX9+,
// that is, VS-TCS, VS-GS (if no tessellation), TES-GS.
// NOTE: The user data to emulate gl_ViewIndex is somewhat common. To make it consistent for GFX9
// merged shader, we place it prior to any other special user data.
if (m_pipelineState->getInputAssemblyState().enableMultiView) {
unsigned *argIdx = nullptr;
auto userDataValue = UserDataMapping::ViewId;
switch (m_shaderStage) {
case ShaderStageVertex:
argIdx = &entryArgIdxs.vs.viewIndex;
break;
case ShaderStageTessControl:
argIdx = &entryArgIdxs.tcs.viewIndex;
break;
case ShaderStageTessEval:
argIdx = &entryArgIdxs.tes.viewIndex;
break;
case ShaderStageGeometry:
argIdx = &entryArgIdxs.gs.viewIndex;
break;
default:
llvm_unreachable("Unexpected shader stage");
}
specialUserDataArgs.push_back(UserDataArg(builder.getInt32Ty(), "viewId", userDataValue, argIdx));
}
// NOTE: Add a dummy "inreg" argument for ES-GS LDS size, this is to keep consistent
// with PAL's GS on-chip behavior (VS is in NGG primitive shader).
bool wantEsGsLdsSize = false;
switch (getMergedShaderStage(m_shaderStage)) {
case ShaderStageVertex:
wantEsGsLdsSize = enableNgg;
break;
case ShaderStageTessControl:
wantEsGsLdsSize = false;
break;
case ShaderStageTessEval:
wantEsGsLdsSize = enableNgg;
break;
case ShaderStageGeometry:
wantEsGsLdsSize = (m_pipelineState->isGsOnChip() && cl::InRegEsGsLdsSize) || enableNgg;
break;
default:
llvm_unreachable("Unexpected shader stage");
}
if (wantEsGsLdsSize) {
auto userDataValue = UserDataMapping::EsGsLdsSize;
// For a standalone TCS (which can only happen in unit testing, not in a real pipeline), don't add
// the PAL metadata for it, for consistency with the old code.
if (m_shaderStage == ShaderStageVertex && !m_pipelineState->hasShaderStage(ShaderStageVertex))
userDataValue = UserDataMapping::Invalid;
specialUserDataArgs.push_back(UserDataArg(builder.getInt32Ty(), "esGsLdsSize", userDataValue));
}
if (getMergedShaderStage(m_shaderStage) == getMergedShaderStage(ShaderStageVertex)) {
// This is the VS, or the shader that VS is merged into on GFX9+.
auto vsIntfData = m_pipelineState->getShaderInterfaceData(ShaderStageVertex);
auto vsResUsage = m_pipelineState->getShaderResourceUsage(ShaderStageVertex);
// Detect whether this is an unlinked compile that will need a fetch shader. If so, we need to
// add the vertex buffer table and base vertex and base instance, even if they appear unused here.
bool willHaveFetchShader = m_pipelineState->getPalMetadata()->getVertexFetchCount() != 0;
// Vertex buffer table.
if (willHaveFetchShader || userDataUsage->isSpecialUserDataUsed(UserDataMapping::VertexBufferTable)) {
specialUserDataArgs.push_back(UserDataArg(builder.getInt32Ty(), "vertexBufferTable",
UserDataMapping::VertexBufferTable,
&vsIntfData->entryArgIdxs.vs.vbTablePtr));
}
// Base vertex and base instance.
if (willHaveFetchShader || vsResUsage->builtInUsage.vs.baseVertex || vsResUsage->builtInUsage.vs.baseInstance ||
userDataUsage->isSpecialUserDataUsed(UserDataMapping::BaseVertex) ||
userDataUsage->isSpecialUserDataUsed(UserDataMapping::BaseInstance)) {
specialUserDataArgs.push_back(UserDataArg(builder.getInt32Ty(), "baseVertex", UserDataMapping::BaseVertex,
&vsIntfData->entryArgIdxs.vs.baseVertex));
specialUserDataArgs.push_back(UserDataArg(builder.getInt32Ty(), "baseInstance", UserDataMapping::BaseInstance,
&vsIntfData->entryArgIdxs.vs.baseInstance));
}
// Draw index.
if (userDataUsage->isSpecialUserDataUsed(UserDataMapping::DrawIndex))
specialUserDataArgs.push_back(UserDataArg(builder.getInt32Ty(), "drawIndex", UserDataMapping::DrawIndex));
}
} else if (m_shaderStage == ShaderStageCompute) {
// Pass the gl_NumWorkgroups pointer in user data registers.
// Always enable this, even if unused, if compute library is in use.
// Unlike all the special user data values above, which go after the user data node args, this goes before.
// That is to ensure that, with a compute pipeline using a library, library code knows where to find it
// even if it thinks that the user data layout is a prefix of what the pipeline thinks it is.
if (isComputeWithCalls() || userDataUsage->isSpecialUserDataUsed(UserDataMapping::Workgroup)) {
auto numWorkgroupsPtrTy = PointerType::get(FixedVectorType::get(builder.getInt32Ty(), 3), ADDR_SPACE_CONST);
userDataArgs.push_back(UserDataArg(numWorkgroupsPtrTy, "numWorkgroupsPtr", UserDataMapping::Workgroup, nullptr));
}
}
// Allocate register for stream-out buffer table, to go before the user data node args (unlike all the ones
// above, which go after the user data node args).
if (userDataUsage->usesStreamOutTable || userDataUsage->isSpecialUserDataUsed(UserDataMapping::StreamOutTable)) {
if (enableNgg || !m_pipelineState->getShaderResourceUsage(ShaderStageCopyShader)->inOutUsage.enableXfb) {
// If no NGG, stream out table will be set to copy shader's user data entry, we should not set it duplicately.
switch (m_shaderStage) {
case ShaderStageVertex:
userDataArgs.push_back(UserDataArg(builder.getInt32Ty(), "streamOutTable", UserDataMapping::StreamOutTable,
&intfData->entryArgIdxs.vs.streamOutData.tablePtr));
break;
case ShaderStageTessEval:
userDataArgs.push_back(UserDataArg(builder.getInt32Ty(), "streamOutTable", UserDataMapping::StreamOutTable,
&intfData->entryArgIdxs.tes.streamOutData.tablePtr));
break;
case ShaderStageGeometry:
if (m_pipelineState->getTargetInfo().getGfxIpVersion().major <= 10) {
// Allocate dummy stream-out register for geometry shader
userDataArgs.push_back(UserDataArg(builder.getInt32Ty(), "dummyStreamOut"));
}
break;
default:
llvm_unreachable("Should never be called!");
break;
}
}
}
}
// =====================================================================================================================
// Add a UserDataArg to the vector for each user data node needed in user data SGPRs.
//
// @param userDataArgs : Vector to add args to
// @param builder : IRBuilder to get types from
void PatchEntryPointMutate::addUserDataArgs(SmallVectorImpl<UserDataArg> &userDataArgs, IRBuilder<> &builder) {
auto userDataUsage = getUserDataUsage(m_shaderStage);
if (m_pipelineState->isUnlinked() && m_pipelineState->getUserDataNodes().empty()) {
// Shader compilation with no user data layout. Add descriptor sets directly from the user data usage
// gathered at the start of this pass.
for (unsigned descSetIdx = 0; descSetIdx != userDataUsage->descriptorTables.size(); ++descSetIdx) {
auto &descriptorTable = userDataUsage->descriptorTables[descSetIdx];
if (!descriptorTable.users.empty()) {
// Set the PAL metadata user data value to indicate that it needs modifying at link time.
assert(descSetIdx <= static_cast<unsigned>(UserDataMapping::DescriptorSetMax) -
static_cast<unsigned>(UserDataMapping::DescriptorSet0));
unsigned userDataValue = static_cast<unsigned>(UserDataMapping::DescriptorSet0) + descSetIdx;
userDataArgs.push_back(UserDataArg(builder.getInt32Ty(), "descTable" + Twine(descSetIdx), userDataValue,
&descriptorTable.entryArgIdx));
}
}
// Add push constants (if used).
// We add a potential unspilled arg for each separate dword offset of the push const at which there is a load.
// We already know that loads we have on our pushConstOffsets lists are at dword-aligned offset and dword-aligned
// size. We need to ensure that all loads are the same size, by removing ones that are bigger than the
// minimum size.
for (unsigned dwordOffset = 0, dwordEndOffset = userDataUsage->pushConstOffsets.size();
dwordOffset != dwordEndOffset; ++dwordOffset) {
UserDataNodeUsage &pushConstOffset = userDataUsage->pushConstOffsets[dwordOffset];
if (pushConstOffset.users.empty())
continue;
// Check that the load size does not overlap with the next used offset in the push constant.
bool haveOverlap = false;
unsigned endOffset =
std::min(dwordOffset + pushConstOffset.dwordSize, unsigned(userDataUsage->pushConstOffsets.size()));
for (unsigned followingOffset = dwordOffset + 1; followingOffset != endOffset; ++followingOffset) {
if (!userDataUsage->pushConstOffsets[followingOffset].users.empty()) {
haveOverlap = true;
break;
}
}
if (haveOverlap) {
userDataUsage->pushConstSpill = true;
continue;
}
// Add the arg (part of the push const) that we can potentially unspill.
assert(dwordOffset + pushConstOffset.dwordSize - 1 <=
static_cast<unsigned>(UserDataMapping::PushConstMax) - static_cast<unsigned>(UserDataMapping::PushConst0));
addUserDataArg(userDataArgs, static_cast<unsigned>(UserDataMapping::PushConst0) + dwordOffset,
pushConstOffset.dwordSize, "pushConst" + Twine(dwordOffset), &pushConstOffset.entryArgIdx,
builder);
}
return;
}
// We do have user data layout.
// Add entries from the root user data layout (not vertex buffer or streamout, and not unused ones).
for (unsigned userDataNodeIdx = 0; userDataNodeIdx != m_pipelineState->getUserDataNodes().size(); ++userDataNodeIdx) {
const ResourceNode &node = m_pipelineState->getUserDataNodes()[userDataNodeIdx];
switch (node.type) {
case ResourceNodeType::IndirectUserDataVaPtr:
case ResourceNodeType::StreamOutTableVaPtr:
break;
case ResourceNodeType::DescriptorTableVaPtr: {
// Check if the descriptor set is in use. For compute with calls, enable it anyway.
UserDataNodeUsage *descSetUsage = nullptr;
if (userDataUsage->descriptorTables.size() > userDataNodeIdx)
descSetUsage = &userDataUsage->descriptorTables[userDataNodeIdx];
if (!isComputeWithCalls() && (!descSetUsage || descSetUsage->users.empty()))
break;
unsigned userDataValue = node.offsetInDwords;
if (m_pipelineState->getShaderOptions(m_shaderStage).updateDescInElf && m_shaderStage == ShaderStageFragment) {
// Put set number to register first, will update offset after merge ELFs
// For partial pipeline compile, only fragment shader needs to adjust offset of root descriptor.
// This is part of the original "partial pipeline compile" scheme, and it uses a magic number for the
// PAL metadata register value because the code to fix it up in llpcElfWriter.cpp just fixes up any
// register with the magic value, and hopes it lucks out by not getting a false positive.
// TODO: Remove all that code once the new "shader/part-pipeline compile" scheme can replace it.
static const unsigned DescRelocMagic = 0xA5A5A500;
userDataValue = DescRelocMagic | node.innerTable[0].set;
}
// Add the arg (descriptor set pointer) that we can potentially unspill.
unsigned *argIndex = descSetUsage == nullptr ? nullptr : &descSetUsage->entryArgIdx;
addUserDataArg(userDataArgs, userDataValue, node.sizeInDwords, "descTable" + Twine(userDataNodeIdx), argIndex,
builder);
break;
}
case ResourceNodeType::PushConst: {
// Always spill for compute libraries.
if (!isComputeWithCalls()) {
// We add a potential unspilled arg for each separate dword offset of the push const at which there is a load.
// We already know that loads we have on our pushConstOffsets lists are at dword-aligned offset and
// dword-aligned size. We need to ensure that all loads are the same size, by removing ones that are bigger than
// the minimum size.
//
// First cope with the case that the app uses more push const than the size of the resource node. This is
// a workaround for an incorrect application; according to the Vulkan spec (version 1.2.151, section 14.6.1
// "Push Constant Interface"):
//
// Each statically used member of a push constant block must be placed at an Offset such that the entire
// member is entirely contained within the VkPushConstantRange for each OpEntryPoint that uses it, and
// the stageFlags for that range must specify the appropriate VkShaderStageFlagBits for that stage.
unsigned dwordEndOffset = userDataUsage->pushConstOffsets.size();
if (dwordEndOffset > node.sizeInDwords) {
userDataUsage->pushConstSpill = true;
dwordEndOffset = node.sizeInDwords;
}
for (unsigned dwordOffset = 0; dwordOffset != dwordEndOffset; ++dwordOffset) {
UserDataNodeUsage &pushConstOffset = userDataUsage->pushConstOffsets[dwordOffset];
if (pushConstOffset.users.empty())
continue;
// Check that the load size does not overlap with the next used offset in the push constant.
bool haveOverlap = false;
unsigned endOffset =
std::min(dwordOffset + pushConstOffset.dwordSize, unsigned(userDataUsage->pushConstOffsets.size()));
for (unsigned followingOffset = dwordOffset + 1; followingOffset != endOffset; ++followingOffset) {
if (!userDataUsage->pushConstOffsets[followingOffset].users.empty()) {
haveOverlap = true;
break;
}
}
if (haveOverlap) {
userDataUsage->pushConstSpill = true;
continue;
}
// Add the arg (part of the push const) that we can potentially unspill.
addUserDataArg(userDataArgs, node.offsetInDwords + dwordOffset, pushConstOffset.dwordSize,
"pushConst" + Twine(dwordOffset), &pushConstOffset.entryArgIdx, builder);
}
} else {
// Mark push constant for spill for compute library.
userDataUsage->pushConstSpill = true;
}
// Ensure we mark the push constant's part of the spill table as used.
if (userDataUsage->pushConstSpill)
userDataUsage->spillUsage = std::min(userDataUsage->spillUsage, node.offsetInDwords);
break;
}
default:
if (isComputeWithCalls()) {
// Always spill for compute libraries.
break;
}
for (unsigned dwordOffset = node.offsetInDwords; dwordOffset != node.offsetInDwords + node.sizeInDwords;
++dwordOffset) {
if (userDataUsage->rootDescriptors.size() <= dwordOffset)
break;
auto &rootDescUsage = userDataUsage->rootDescriptors[dwordOffset];
// Skip unused descriptor.
if (rootDescUsage.users.empty())
continue;
unsigned dwordSize = rootDescUsage.users[0]->getType()->getPrimitiveSizeInBits() / 32;
// Add the arg (root descriptor) that we can potentially unspill.
addUserDataArg(userDataArgs, dwordOffset, dwordSize, "rootDesc" + Twine(dwordOffset),
&rootDescUsage.entryArgIdx, builder);
}
break;
}
}
}
// =====================================================================================================================
// Add a single UserDataArg
//
// @param userDataArgs : Vector to add UserDataArg to
// @param userDataValue : PAL metadata user data value, ~0U (UserDataMapping::Invalid) for none
// @param sizeInDwords : Size of argument in dwords
// @param argIndex : Where to store arg index once it is allocated, nullptr for none
// @param builder : IRBuilder (just for getting types)
void PatchEntryPointMutate::addUserDataArg(SmallVectorImpl<UserDataArg> &userDataArgs, unsigned userDataValue,
unsigned sizeInDwords, const Twine &name, unsigned *argIndex,
IRBuilder<> &builder) {
Type *argTy = builder.getInt32Ty();
if (sizeInDwords != 1)
argTy = FixedVectorType::get(argTy, sizeInDwords);
userDataArgs.push_back(UserDataArg(argTy, name, userDataValue, argIndex));
}
// =====================================================================================================================
// Determine which user data args are going to be "unspilled" (passed in shader entry SGPRs rather than loaded
// from spill table)
//
// @param userDataArgs : First array of UserDataArg structs for candidate args
// @param specialUserDataArgs : Second array of UserDataArg structs for candidate args
// @param builder : IRBuilder to get types from
// @param [out] unspilledArgs : Output vector of UserDataArg structs that will be "unspilled". Mostly these are
// copied from the input arrays, plus an extra one for the spill table pointer if
// needed.
// @param [out] unspilledArgNames : Argument names of unspilled arguments.
void PatchEntryPointMutate::determineUnspilledUserDataArgs(ArrayRef<UserDataArg> userDataArgs,
ArrayRef<UserDataArg> specialUserDataArgs,
IRBuilder<> &builder,
SmallVectorImpl<UserDataArg> &unspilledArgs) {
Optional<UserDataArg> spillTableArg;
auto userDataUsage = getUserDataUsage(m_shaderStage);
if (!userDataUsage->spillTable.users.empty() || userDataUsage->pushConstSpill ||
userDataUsage->spillUsage != UINT_MAX) {
// Spill table is already in use by code added in DescBuilder, or by uses of the push const pointer not
// all being of the form that can be unspilled.
spillTableArg = UserDataArg(builder.getInt32Ty(), "spillTable", UserDataMapping::SpillTable,
&userDataUsage->spillTable.entryArgIdx);
// Determine the lowest offset at which the spill table is used, so we can set PAL metadata accordingly.
// (This only covers uses of the spill table generated by DescBuilder. It excludes the push const and args
// that are unspill candidates but we decide to spill; those ones are separately set in userDataUsage->spillUsage.)
SmallVector<Instruction *, 4> spillUsers;
spillUsers.insert(spillUsers.end(), userDataUsage->spillTable.users.begin(), userDataUsage->spillTable.users.end());
unsigned minByteOffset = UINT_MAX;
for (unsigned i = 0; i != spillUsers.size(); ++i) {
for (User *user : spillUsers[i]->users()) {
auto inst = cast<Instruction>(user);
if (isa<BitCastInst>(inst)) {
spillUsers.push_back(inst);
continue;
}
if (auto gep = dyn_cast<GetElementPtrInst>(inst)) {
APInt gepOffset(64, 0);
if (gep->accumulateConstantOffset(m_module->getDataLayout(), gepOffset)) {
minByteOffset = std::min(minByteOffset, unsigned(gepOffset.getZExtValue()));
continue;
}
}
minByteOffset = 0;
break;
}
}
// In relocatable shader compilation userDataUsage is unknown until linking.
if (minByteOffset != UINT_MAX && !m_pipelineState->isUnlinked())
m_pipelineState->getPalMetadata()->setUserDataSpillUsage(std::min(userDataUsage->spillUsage, minByteOffset / 4));
}
// In compute-with-calls, we need to ensure that the compute shader and library code agree that s15 is the spill
// table pointer, even if it is not needed, because library code does not know whether a spill table pointer is
// needed in the pipeline. Thus we cannot use s15 for anything else. Using the single-arg UserDataArg
// constructor like this means that the arg is not used, so it will not be set up in PAL metadata.
if (m_computeWithCalls && !spillTableArg.hasValue())
spillTableArg = UserDataArg(builder.getInt32Ty(), "spillTable", UserDataMapping::SpillTable,
&userDataUsage->spillTable.entryArgIdx);
// Figure out how many sgprs we have available for userDataArgs.
// We have s0-s31 (s0-s15 for <=GFX8, or for a compute shader on any chip) for everything, so take off the number
// of registers used by specialUserDataArgs.
unsigned userDataEnd = m_shaderStage == ShaderStageCompute
? InterfaceData::MaxCsUserDataCount
: m_pipelineState->getTargetInfo().getGpuProperty().maxUserDataCount;
// FIXME Restricting user data as the backend does not support more sgprs as arguments
if (isComputeWithCalls() && userDataEnd > 16)
userDataEnd = 16;
for (auto &userDataArg : specialUserDataArgs)
userDataEnd -= userDataArg.argDwordSize;
// ... and the one used by the spill table if already added.
if (spillTableArg.hasValue())
userDataEnd -= 1;
// See if we need to spill any user data nodes in userDataArgs, copying the unspilled ones across to unspilledArgs.
unsigned userDataIdx = 0;
for (const UserDataArg &userDataArg : userDataArgs) {
unsigned afterUserDataIdx = userDataIdx + userDataArg.argDwordSize;
if (afterUserDataIdx > userDataEnd) {
// Spill this node. Allocate the spill table arg.
if (!spillTableArg.hasValue()) {
spillTableArg = UserDataArg(builder.getInt32Ty(), "spillTable", UserDataMapping::SpillTable,
&userDataUsage->spillTable.entryArgIdx);
--userDataEnd;
if (userDataIdx > userDataEnd) {
// We over-ran the available SGPRs by filling them up and then realizing we needed a spill table pointer.
// Remove the last unspilled node (and any padding arg before that), and ensure that spill usage is
// set correctly so that PAL metadata spill threshold is correct.
// (Note that this path cannot happen in compute-with-calls, because we pre-reserved a slot for the
// spill table pointer.)
userDataIdx -= unspilledArgs.back().argDwordSize;
userDataUsage->spillUsage = std::min(userDataUsage->spillUsage, unspilledArgs.back().userDataValue);
unspilledArgs.pop_back();
}
} else if (!spillTableArg->argIndex) {
// This is the compute-with-calls case that we reserved s15 for the spill table pointer above,
// without setting its PAL metadata or spillTable.entryArgIdx, but now we find we do need to set
// them.
spillTableArg = UserDataArg(builder.getInt32Ty(), "spillTable", UserDataMapping::SpillTable,
&userDataUsage->spillTable.entryArgIdx);
}
// Ensure that spillUsage includes this offset. (We might be on a compute shader padding node, in which
// case userDataArg.userDataValue is Invalid, and this call has no effect.)
userDataUsage->spillUsage = std::min(userDataUsage->spillUsage, userDataArg.userDataValue);
continue;
}
// Keep this node on the unspilled list.
userDataIdx = afterUserDataIdx;
unspilledArgs.push_back(userDataArg);
}
// For compute-with-calls, add extra padding unspilled args until we get to s15. s15 will then be used for
// the spill table pointer below, even if we didn't appear to need one.
if (isComputeWithCalls()) {
while (userDataIdx < userDataEnd) {
unspilledArgs.push_back(UserDataArg(builder.getInt32Ty(), Twine()));
++userDataIdx;
}
}
// Add the special args and the spill table pointer (if any) to unspilledArgs.
// (specialUserDataArgs is empty for compute, and thus for compute-with-calls.)
unspilledArgs.insert(unspilledArgs.end(), specialUserDataArgs.begin(), specialUserDataArgs.end());
if (spillTableArg.hasValue())
unspilledArgs.insert(unspilledArgs.end(), *spillTableArg);
}
// =====================================================================================================================
// Get UserDataUsage struct for the merged shader stage that contains the given shader stage
//
// @param stage : Shader stage
PatchEntryPointMutate::UserDataUsage *PatchEntryPointMutate::getUserDataUsage(ShaderStage stage) {
stage = getMergedShaderStage(stage);
m_userDataUsage.resize(std::max(m_userDataUsage.size(), static_cast<size_t>(stage) + 1));
if (!m_userDataUsage[stage])
m_userDataUsage[stage] = std::make_unique<UserDataUsage>();
return &*m_userDataUsage[stage];
}
// =====================================================================================================================
// Get the shader stage that the given shader stage is merged into.
// For GFX9+:
// VS -> TCS (if it exists)
// VS -> GS (if it exists)
// TES -> GS (if it exists)
//
// @param stage : Shader stage
ShaderStage PatchEntryPointMutate::getMergedShaderStage(ShaderStage stage) const {
if (m_pipelineState->getTargetInfo().getGfxIpVersion().major >= 9) {
switch (stage) {
case ShaderStageVertex:
if (m_pipelineState->hasShaderStage(ShaderStageTessControl))
return ShaderStageTessControl;
LLVM_FALLTHROUGH;
case ShaderStageTessEval:
if (m_pipelineState->hasShaderStage(ShaderStageGeometry))
return ShaderStageGeometry;
break;
default:
break;
}
}
return stage;
}
// =====================================================================================================================
bool PatchEntryPointMutate::isComputeWithCalls() const {
return m_computeWithCalls;
}
// =====================================================================================================================
bool PatchEntryPointMutate::UserDataUsage::isSpecialUserDataUsed(UserDataMapping kind) {
unsigned index = static_cast<unsigned>(kind) - static_cast<unsigned>(UserDataMapping::GlobalTable);
return specialUserData.size() > index && !specialUserData[index].users.empty();
}
// =====================================================================================================================
// Initializes the pass of LLVM patching operations for entry-point mutation.
INITIALIZE_PASS(LegacyPatchEntryPointMutate, DEBUG_TYPE, "Patch LLVM for entry-point mutation", false, false)
| 51.558766 | 120 | 0.654717 | [
"geometry",
"object",
"vector"
] |
f6d5603a5d630b0dd4dac294b0ca4d50d69e2ec9 | 9,543 | hpp | C++ | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_lpts_pa_oper.hpp | CiscoDevNet/ydk-cpp | ef7d75970f2ef1154100e0f7b0a2ee823609b481 | [
"ECL-2.0",
"Apache-2.0"
] | 17 | 2016-12-02T05:45:49.000Z | 2022-02-10T19:32:54.000Z | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_lpts_pa_oper.hpp | CiscoDevNet/ydk-cpp | ef7d75970f2ef1154100e0f7b0a2ee823609b481 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2017-03-27T15:22:38.000Z | 2019-11-05T08:30:16.000Z | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_lpts_pa_oper.hpp | CiscoDevNet/ydk-cpp | ef7d75970f2ef1154100e0f7b0a2ee823609b481 | [
"ECL-2.0",
"Apache-2.0"
] | 11 | 2016-12-02T05:45:52.000Z | 2019-11-07T08:28:17.000Z | #ifndef _CISCO_IOS_XR_LPTS_PA_OPER_
#define _CISCO_IOS_XR_LPTS_PA_OPER_
#include <memory>
#include <vector>
#include <string>
#include <ydk/types.hpp>
#include <ydk/errors.hpp>
namespace cisco_ios_xr {
namespace Cisco_IOS_XR_lpts_pa_oper {
class LptsPa : public ydk::Entity
{
public:
LptsPa();
~LptsPa();
bool has_data() const override;
bool has_operation() const override;
std::vector<std::pair<std::string, ydk::LeafData> > get_name_leaf_data() const override;
std::string get_segment_path() const override;
std::shared_ptr<ydk::Entity> get_child_by_name(const std::string & yang_name, const std::string & segment_path) override;
void set_value(const std::string & value_path, const std::string & value, const std::string & name_space, const std::string & name_space_prefix) override;
void set_filter(const std::string & value_path, ydk::YFilter yfliter) override;
std::map<std::string, std::shared_ptr<ydk::Entity>> get_children() const override;
bool has_leaf_or_child_of_name(const std::string & name) const override;
std::shared_ptr<ydk::Entity> clone_ptr() const override;
ydk::augment_capabilities_function get_augment_capabilities_function() const override;
std::string get_bundle_yang_models_location() const override;
std::string get_bundle_name() const override;
std::map<std::pair<std::string, std::string>, std::string> get_namespace_identity_lookup() const override;
class EntryXr; //type: LptsPa::EntryXr
class Entries; //type: LptsPa::Entries
std::shared_ptr<cisco_ios_xr::Cisco_IOS_XR_lpts_pa_oper::LptsPa::EntryXr> entry_xr;
std::shared_ptr<cisco_ios_xr::Cisco_IOS_XR_lpts_pa_oper::LptsPa::Entries> entries;
}; // LptsPa
class LptsPa::EntryXr : public ydk::Entity
{
public:
EntryXr();
~EntryXr();
bool has_data() const override;
bool has_operation() const override;
std::vector<std::pair<std::string, ydk::LeafData> > get_name_leaf_data() const override;
std::string get_segment_path() const override;
std::shared_ptr<ydk::Entity> get_child_by_name(const std::string & yang_name, const std::string & segment_path) override;
void set_value(const std::string & value_path, const std::string & value, const std::string & name_space, const std::string & name_space_prefix) override;
void set_filter(const std::string & value_path, ydk::YFilter yfliter) override;
std::map<std::string, std::shared_ptr<ydk::Entity>> get_children() const override;
bool has_leaf_or_child_of_name(const std::string & name) const override;
std::string get_absolute_path() const override;
class Entry; //type: LptsPa::EntryXr::Entry
ydk::YList entry;
}; // LptsPa::EntryXr
class LptsPa::EntryXr::Entry : public ydk::Entity
{
public:
Entry();
~Entry();
bool has_data() const override;
bool has_operation() const override;
std::vector<std::pair<std::string, ydk::LeafData> > get_name_leaf_data() const override;
std::string get_segment_path() const override;
std::shared_ptr<ydk::Entity> get_child_by_name(const std::string & yang_name, const std::string & segment_path) override;
void set_value(const std::string & value_path, const std::string & value, const std::string & name_space, const std::string & name_space_prefix) override;
void set_filter(const std::string & value_path, ydk::YFilter yfliter) override;
std::map<std::string, std::shared_ptr<ydk::Entity>> get_children() const override;
bool has_leaf_or_child_of_name(const std::string & name) const override;
std::string get_absolute_path() const override;
ydk::YLeaf entry; //type: string
ydk::YLeaf location; //type: uint32
ydk::YLeaf client_id; //type: uint32
ydk::YLeaf vid; //type: uint32
ydk::YLeaf cookie; //type: uint32
ydk::YLeaf l3protocol; //type: uint32
ydk::YLeaf l4protocol; //type: uint32
ydk::YLeaf smask; //type: uint32
ydk::YLeaf ifs; //type: uint32
ydk::YLeaf ptype; //type: uint32
ydk::YLeaf local_ip; //type: string
ydk::YLeaf remote_ip; //type: string
ydk::YLeaf local_len; //type: uint8
ydk::YLeaf remote_len; //type: uint8
ydk::YLeaf local_port; //type: uint16
ydk::YLeaf remote_port; //type: uint16
ydk::YLeaf packet_misc; //type: uint32
ydk::YLeaf scope; //type: uint32
ydk::YLeaf client_flags; //type: uint32
ydk::YLeaf min_ttl; //type: uint8
ydk::YLeaf lazy_bindq_delay; //type: uint32
ydk::YLeaf ptq_delay; //type: uint32
class Ctime; //type: LptsPa::EntryXr::Entry::Ctime
class Utime; //type: LptsPa::EntryXr::Entry::Utime
std::shared_ptr<cisco_ios_xr::Cisco_IOS_XR_lpts_pa_oper::LptsPa::EntryXr::Entry::Ctime> ctime;
std::shared_ptr<cisco_ios_xr::Cisco_IOS_XR_lpts_pa_oper::LptsPa::EntryXr::Entry::Utime> utime;
}; // LptsPa::EntryXr::Entry
class LptsPa::EntryXr::Entry::Ctime : public ydk::Entity
{
public:
Ctime();
~Ctime();
bool has_data() const override;
bool has_operation() const override;
std::vector<std::pair<std::string, ydk::LeafData> > get_name_leaf_data() const override;
std::string get_segment_path() const override;
std::shared_ptr<ydk::Entity> get_child_by_name(const std::string & yang_name, const std::string & segment_path) override;
void set_value(const std::string & value_path, const std::string & value, const std::string & name_space, const std::string & name_space_prefix) override;
void set_filter(const std::string & value_path, ydk::YFilter yfliter) override;
std::map<std::string, std::shared_ptr<ydk::Entity>> get_children() const override;
bool has_leaf_or_child_of_name(const std::string & name) const override;
ydk::YLeaf tv_sec; //type: uint32
ydk::YLeaf tv_nsec; //type: uint32
}; // LptsPa::EntryXr::Entry::Ctime
class LptsPa::EntryXr::Entry::Utime : public ydk::Entity
{
public:
Utime();
~Utime();
bool has_data() const override;
bool has_operation() const override;
std::vector<std::pair<std::string, ydk::LeafData> > get_name_leaf_data() const override;
std::string get_segment_path() const override;
std::shared_ptr<ydk::Entity> get_child_by_name(const std::string & yang_name, const std::string & segment_path) override;
void set_value(const std::string & value_path, const std::string & value, const std::string & name_space, const std::string & name_space_prefix) override;
void set_filter(const std::string & value_path, ydk::YFilter yfliter) override;
std::map<std::string, std::shared_ptr<ydk::Entity>> get_children() const override;
bool has_leaf_or_child_of_name(const std::string & name) const override;
ydk::YLeaf tv_sec; //type: uint32
ydk::YLeaf tv_nsec; //type: uint32
}; // LptsPa::EntryXr::Entry::Utime
class LptsPa::Entries : public ydk::Entity
{
public:
Entries();
~Entries();
bool has_data() const override;
bool has_operation() const override;
std::vector<std::pair<std::string, ydk::LeafData> > get_name_leaf_data() const override;
std::string get_segment_path() const override;
std::shared_ptr<ydk::Entity> get_child_by_name(const std::string & yang_name, const std::string & segment_path) override;
void set_value(const std::string & value_path, const std::string & value, const std::string & name_space, const std::string & name_space_prefix) override;
void set_filter(const std::string & value_path, ydk::YFilter yfliter) override;
std::map<std::string, std::shared_ptr<ydk::Entity>> get_children() const override;
bool has_leaf_or_child_of_name(const std::string & name) const override;
std::string get_absolute_path() const override;
class Entry; //type: LptsPa::Entries::Entry
ydk::YList entry;
}; // LptsPa::Entries
class LptsPa::Entries::Entry : public ydk::Entity
{
public:
Entry();
~Entry();
bool has_data() const override;
bool has_operation() const override;
std::vector<std::pair<std::string, ydk::LeafData> > get_name_leaf_data() const override;
std::string get_segment_path() const override;
std::shared_ptr<ydk::Entity> get_child_by_name(const std::string & yang_name, const std::string & segment_path) override;
void set_value(const std::string & value_path, const std::string & value, const std::string & name_space, const std::string & name_space_prefix) override;
void set_filter(const std::string & value_path, ydk::YFilter yfliter) override;
std::map<std::string, std::shared_ptr<ydk::Entity>> get_children() const override;
bool has_leaf_or_child_of_name(const std::string & name) const override;
std::string get_absolute_path() const override;
ydk::YLeaf entry; //type: string
ydk::YLeaf flags; //type: uint32
ydk::YLeaf open_flags; //type: uint32
ydk::YLeaf location; //type: uint32
ydk::YLeaf client_id; //type: uint32
ydk::YLeaf times; //type: string
}; // LptsPa::Entries::Entry
}
}
#endif /* _CISCO_IOS_XR_LPTS_PA_OPER_ */
| 44.386047 | 162 | 0.673792 | [
"vector"
] |
f6d5de04ce6714901c1cbb374c0378f87da74073 | 28,491 | cpp | C++ | telegram/TMessagesProj/jni/TgNetWrapper.cpp | SAFE-anwang/SafeWallet-android | ac1ddfc262b34e398b4504c65ac74911b7ca4381 | [
"MIT"
] | null | null | null | telegram/TMessagesProj/jni/TgNetWrapper.cpp | SAFE-anwang/SafeWallet-android | ac1ddfc262b34e398b4504c65ac74911b7ca4381 | [
"MIT"
] | null | null | null | telegram/TMessagesProj/jni/TgNetWrapper.cpp | SAFE-anwang/SafeWallet-android | ac1ddfc262b34e398b4504c65ac74911b7ca4381 | [
"MIT"
] | 2 | 2022-03-26T11:06:16.000Z | 2022-03-26T11:13:21.000Z | #include <jni.h>
#include "tgnet/ApiScheme.h"
#include "tgnet/BuffersStorage.h"
#include "tgnet/NativeByteBuffer.h"
#include "tgnet/ConnectionsManager.h"
#include "tgnet/MTProtoScheme.h"
#include "tgnet/ConnectionSocket.h"
JavaVM *java;
jclass jclass_RequestDelegateInternal;
jmethodID jclass_RequestDelegateInternal_run;
jclass jclass_RequestTimeDelegate;
jmethodID jclass_RequestTimeDelegate_run;
jclass jclass_QuickAckDelegate;
jmethodID jclass_QuickAckDelegate_run;
jclass jclass_WriteToSocketDelegate;
jmethodID jclass_WriteToSocketDelegate_run;
jclass jclass_ConnectionsManager;
jmethodID jclass_ConnectionsManager_onUnparsedMessageReceived;
jmethodID jclass_ConnectionsManager_onUpdate;
jmethodID jclass_ConnectionsManager_onSessionCreated;
jmethodID jclass_ConnectionsManager_onLogout;
jmethodID jclass_ConnectionsManager_onConnectionStateChanged;
jmethodID jclass_ConnectionsManager_onInternalPushReceived;
jmethodID jclass_ConnectionsManager_onUpdateConfig;
jmethodID jclass_ConnectionsManager_onBytesSent;
jmethodID jclass_ConnectionsManager_onBytesReceived;
jmethodID jclass_ConnectionsManager_onRequestNewServerIpAndPort;
jmethodID jclass_ConnectionsManager_onProxyError;
jmethodID jclass_ConnectionsManager_getHostByName;
jmethodID jclass_ConnectionsManager_getInitFlags;
bool check_utf8(const char *data, size_t len);
jlong getFreeBuffer(JNIEnv *env, jclass c, jint length) {
return (jlong) (intptr_t) BuffersStorage::getInstance().getFreeBuffer((uint32_t) length);
}
jint limit(JNIEnv *env, jclass c, jlong address) {
NativeByteBuffer *buffer = (NativeByteBuffer *) (intptr_t) address;
return buffer->limit();
}
jint position(JNIEnv *env, jclass c, jlong address) {
NativeByteBuffer *buffer = (NativeByteBuffer *) (intptr_t) address;
return buffer->position();
}
void reuse(JNIEnv *env, jclass c, jlong address) {
NativeByteBuffer *buffer = (NativeByteBuffer *) (intptr_t) address;
buffer->reuse();
}
jobject getJavaByteBuffer(JNIEnv *env, jclass c, jlong address) {
NativeByteBuffer *buffer = (NativeByteBuffer *) (intptr_t) address;
if (buffer == nullptr) {
return nullptr;
}
return buffer->getJavaByteBuffer();
}
static const char *NativeByteBufferClassPathName = "org/telegram/tgnet/NativeByteBuffer";
static JNINativeMethod NativeByteBufferMethods[] = {
{"native_getFreeBuffer", "(I)J", (void *) getFreeBuffer},
{"native_limit", "(J)I", (void *) limit},
{"native_position", "(J)I", (void *) position},
{"native_reuse", "(J)V", (void *) reuse},
{"native_getJavaByteBuffer", "(J)Ljava/nio/ByteBuffer;", (void *) getJavaByteBuffer}
};
jlong getCurrentTimeMillis(JNIEnv *env, jclass c, jint instanceNum) {
return ConnectionsManager::getInstance(instanceNum).getCurrentTimeMillis() + ((jlong) ConnectionsManager::getInstance(instanceNum).getTimeDifference()) * 1000;
}
jint getCurrentTime(JNIEnv *env, jclass c, jint instanceNum) {
return ConnectionsManager::getInstance(instanceNum).getCurrentTime();
}
jint getCurrentDatacenterId(JNIEnv *env, jclass c, jint instanceNum) {
return ConnectionsManager::getInstance(instanceNum).getCurrentDatacenterId();
}
jint isTestBackend(JNIEnv *env, jclass c, jint instanceNum) {
return ConnectionsManager::getInstance(instanceNum).isTestBackend() ? 1 : 0;
}
jint getTimeDifference(JNIEnv *env, jclass c, jint instanceNum) {
return ConnectionsManager::getInstance(instanceNum).getTimeDifference();
}
void sendRequest(JNIEnv *env, jclass c, jint instanceNum, jlong object, jobject onComplete, jobject onQuickAck, jobject onWriteToSocket, jint flags, jint datacenterId, jint connetionType, jboolean immediate, jint token) {
TL_api_request *request = new TL_api_request();
request->request = (NativeByteBuffer *) (intptr_t) object;
if (onComplete != nullptr) {
onComplete = env->NewGlobalRef(onComplete);
}
if (onQuickAck != nullptr) {
onQuickAck = env->NewGlobalRef(onQuickAck);
}
if (onWriteToSocket != nullptr) {
onWriteToSocket = env->NewGlobalRef(onWriteToSocket);
}
ConnectionsManager::getInstance(instanceNum).sendRequest(request, ([onComplete, instanceNum](TLObject *response, TL_error *error, int32_t networkType, int64_t responseTime) {
TL_api_response *resp = (TL_api_response *) response;
jlong ptr = 0;
jint errorCode = 0;
jstring errorText = nullptr;
if (resp != nullptr) {
ptr = (jlong) resp->response.get();
} else if (error != nullptr) {
errorCode = error->code;
const char *text = error->text.c_str();
size_t size = error->text.size();
if (check_utf8(text, size)) {
errorText = jniEnv[instanceNum]->NewStringUTF(text);
} else {
errorText = jniEnv[instanceNum]->NewStringUTF("UTF-8 ERROR");
}
}
if (onComplete != nullptr) {
jniEnv[instanceNum]->CallVoidMethod(onComplete, jclass_RequestDelegateInternal_run, ptr, errorCode, errorText, networkType, responseTime);
}
if (errorText != nullptr) {
jniEnv[instanceNum]->DeleteLocalRef(errorText);
}
}), ([onQuickAck, instanceNum] {
if (onQuickAck != nullptr) {
jniEnv[instanceNum]->CallVoidMethod(onQuickAck, jclass_QuickAckDelegate_run);
}
}), ([onWriteToSocket, instanceNum] {
if (onWriteToSocket != nullptr) {
jniEnv[instanceNum]->CallVoidMethod(onWriteToSocket, jclass_WriteToSocketDelegate_run);
}
}), (uint32_t) flags, (uint32_t) datacenterId, (ConnectionType) connetionType, immediate, token, onComplete, onQuickAck, onWriteToSocket);
}
void cancelRequest(JNIEnv *env, jclass c, jint instanceNum, jint token, jboolean notifyServer) {
return ConnectionsManager::getInstance(instanceNum).cancelRequest(token, notifyServer);
}
void cleanUp(JNIEnv *env, jclass c, jint instanceNum, jboolean resetKeys) {
return ConnectionsManager::getInstance(instanceNum).cleanUp(resetKeys, -1);
}
void cancelRequestsForGuid(JNIEnv *env, jclass c, jint instanceNum, jint guid) {
return ConnectionsManager::getInstance(instanceNum).cancelRequestsForGuid(guid);
}
void bindRequestToGuid(JNIEnv *env, jclass c, jint instanceNum, jint requestToken, jint guid) {
return ConnectionsManager::getInstance(instanceNum).bindRequestToGuid(requestToken, guid);
}
void applyDatacenterAddress(JNIEnv *env, jclass c, jint instanceNum, jint datacenterId, jstring ipAddress, jint port) {
const char *valueStr = env->GetStringUTFChars(ipAddress, 0);
ConnectionsManager::getInstance(instanceNum).applyDatacenterAddress((uint32_t) datacenterId, std::string(valueStr), (uint32_t) port);
if (valueStr != 0) {
env->ReleaseStringUTFChars(ipAddress, valueStr);
}
}
void setProxySettings(JNIEnv *env, jclass c, jint instanceNum, jstring address, jint port, jstring username, jstring password, jstring secret) {
const char *addressStr = env->GetStringUTFChars(address, 0);
const char *usernameStr = env->GetStringUTFChars(username, 0);
const char *passwordStr = env->GetStringUTFChars(password, 0);
const char *secretStr = env->GetStringUTFChars(secret, 0);
ConnectionsManager::getInstance(instanceNum).setProxySettings(addressStr, (uint16_t) port, usernameStr, passwordStr, secretStr);
if (addressStr != 0) {
env->ReleaseStringUTFChars(address, addressStr);
}
if (usernameStr != 0) {
env->ReleaseStringUTFChars(username, usernameStr);
}
if (passwordStr != 0) {
env->ReleaseStringUTFChars(password, passwordStr);
}
if (secretStr != 0) {
env->ReleaseStringUTFChars(secret, secretStr);
}
}
jint getConnectionState(JNIEnv *env, jclass c, jint instanceNum) {
return ConnectionsManager::getInstance(instanceNum).getConnectionState();
}
void setUserId(JNIEnv *env, jclass c, jint instanceNum, int64_t id) {
ConnectionsManager::getInstance(instanceNum).setUserId(id);
}
void switchBackend(JNIEnv *env, jclass c, jint instanceNum, jboolean restart) {
ConnectionsManager::getInstance(instanceNum).switchBackend(restart);
}
void pauseNetwork(JNIEnv *env, jclass c, jint instanceNum) {
ConnectionsManager::getInstance(instanceNum).pauseNetwork();
}
void resumeNetwork(JNIEnv *env, jclass c, jint instanceNum, jboolean partial) {
ConnectionsManager::getInstance(instanceNum).resumeNetwork(partial);
}
void updateDcSettings(JNIEnv *env, jclass c, jint instanceNum) {
ConnectionsManager::getInstance(instanceNum).updateDcSettings(0, false);
}
void setIpStrategy(JNIEnv *env, jclass c, jint instanceNum, jbyte value) {
ConnectionsManager::getInstance(instanceNum).setIpStrategy((uint8_t) value);
}
void setNetworkAvailable(JNIEnv *env, jclass c, jint instanceNum, jboolean value, jint networkType, jboolean slow) {
ConnectionsManager::getInstance(instanceNum).setNetworkAvailable(value, networkType, slow);
}
void setPushConnectionEnabled(JNIEnv *env, jclass c, jint instanceNum, jboolean value) {
ConnectionsManager::getInstance(instanceNum).setPushConnectionEnabled(value);
}
void applyDnsConfig(JNIEnv *env, jclass c, jint instanceNum, jlong address, jstring phone, jint date) {
const char *phoneStr = env->GetStringUTFChars(phone, 0);
ConnectionsManager::getInstance(instanceNum).applyDnsConfig((NativeByteBuffer *) (intptr_t) address, phoneStr, date);
if (phoneStr != 0) {
env->ReleaseStringUTFChars(phone, phoneStr);
}
}
jlong checkProxy(JNIEnv *env, jclass c, jint instanceNum, jstring address, jint port, jstring username, jstring password, jstring secret, jobject requestTimeFunc) {
const char *addressStr = env->GetStringUTFChars(address, 0);
const char *usernameStr = env->GetStringUTFChars(username, 0);
const char *passwordStr = env->GetStringUTFChars(password, 0);
const char *secretStr = env->GetStringUTFChars(secret, 0);
if (requestTimeFunc != nullptr) {
requestTimeFunc = env->NewGlobalRef(requestTimeFunc);
}
jlong result = ConnectionsManager::getInstance(instanceNum).checkProxy(addressStr, (uint16_t) port, usernameStr, passwordStr, secretStr, [instanceNum, requestTimeFunc](int64_t time) {
if (requestTimeFunc != nullptr) {
jniEnv[instanceNum]->CallVoidMethod(requestTimeFunc, jclass_RequestTimeDelegate_run, time);
}
}, requestTimeFunc);
if (addressStr != 0) {
env->ReleaseStringUTFChars(address, addressStr);
}
if (usernameStr != 0) {
env->ReleaseStringUTFChars(username, usernameStr);
}
if (passwordStr != 0) {
env->ReleaseStringUTFChars(password, passwordStr);
}
if (secretStr != 0) {
env->ReleaseStringUTFChars(secret, secretStr);
}
return result;
}
class Delegate : public ConnectiosManagerDelegate {
void onUpdate(int32_t instanceNum) {
jniEnv[instanceNum]->CallStaticVoidMethod(jclass_ConnectionsManager, jclass_ConnectionsManager_onUpdate, instanceNum);
}
void onSessionCreated(int32_t instanceNum) {
jniEnv[instanceNum]->CallStaticVoidMethod(jclass_ConnectionsManager, jclass_ConnectionsManager_onSessionCreated, instanceNum);
}
void onConnectionStateChanged(ConnectionState state, int32_t instanceNum) {
jniEnv[instanceNum]->CallStaticVoidMethod(jclass_ConnectionsManager, jclass_ConnectionsManager_onConnectionStateChanged, state, instanceNum);
}
void onUnparsedMessageReceived(int64_t reqMessageId, NativeByteBuffer *buffer, ConnectionType connectionType, int32_t instanceNum) {
if (connectionType == ConnectionTypeGeneric) {
jniEnv[instanceNum]->CallStaticVoidMethod(jclass_ConnectionsManager, jclass_ConnectionsManager_onUnparsedMessageReceived, (jlong) (intptr_t) buffer, instanceNum);
}
}
void onLogout(int32_t instanceNum) {
jniEnv[instanceNum]->CallStaticVoidMethod(jclass_ConnectionsManager, jclass_ConnectionsManager_onLogout, instanceNum);
}
void onUpdateConfig(TL_config *config, int32_t instanceNum) {
NativeByteBuffer *buffer = BuffersStorage::getInstance().getFreeBuffer(config->getObjectSize());
config->serializeToStream(buffer);
buffer->position(0);
jniEnv[instanceNum]->CallStaticVoidMethod(jclass_ConnectionsManager, jclass_ConnectionsManager_onUpdateConfig, (jlong) (intptr_t) buffer, instanceNum);
buffer->reuse();
}
void onInternalPushReceived(int32_t instanceNum) {
jniEnv[instanceNum]->CallStaticVoidMethod(jclass_ConnectionsManager, jclass_ConnectionsManager_onInternalPushReceived, instanceNum);
}
void onBytesReceived(int32_t amount, int32_t networkType, int32_t instanceNum) {
jniEnv[instanceNum]->CallStaticVoidMethod(jclass_ConnectionsManager, jclass_ConnectionsManager_onBytesReceived, amount, networkType, instanceNum);
}
void onBytesSent(int32_t amount, int32_t networkType, int32_t instanceNum) {
jniEnv[instanceNum]->CallStaticVoidMethod(jclass_ConnectionsManager, jclass_ConnectionsManager_onBytesSent, amount, networkType, instanceNum);
}
void onRequestNewServerIpAndPort(int32_t second, int32_t instanceNum) {
jniEnv[instanceNum]->CallStaticVoidMethod(jclass_ConnectionsManager, jclass_ConnectionsManager_onRequestNewServerIpAndPort, second, instanceNum);
}
void onProxyError(int32_t instanceNum) {
jniEnv[instanceNum]->CallStaticVoidMethod(jclass_ConnectionsManager, jclass_ConnectionsManager_onProxyError);
}
void getHostByName(std::string domain, int32_t instanceNum, ConnectionSocket *socket) {
jstring domainName = jniEnv[instanceNum]->NewStringUTF(domain.c_str());
jniEnv[instanceNum]->CallStaticVoidMethod(jclass_ConnectionsManager, jclass_ConnectionsManager_getHostByName, domainName, (jlong) (intptr_t) socket);
jniEnv[instanceNum]->DeleteLocalRef(domainName);
}
int32_t getInitFlags(int32_t instanceNum) {
return (int32_t) jniEnv[instanceNum]->CallStaticIntMethod(jclass_ConnectionsManager, jclass_ConnectionsManager_getInitFlags);
}
};
void onHostNameResolved(JNIEnv *env, jclass c, jstring host, jlong address, jstring ip) {
const char *ipStr = env->GetStringUTFChars(ip, 0);
const char *hostStr = env->GetStringUTFChars(host, 0);
std::string i = std::string(ipStr);
std::string h = std::string(hostStr);
if (ipStr != 0) {
env->ReleaseStringUTFChars(ip, ipStr);
}
if (hostStr != 0) {
env->ReleaseStringUTFChars(host, hostStr);
}
ConnectionSocket *socket = (ConnectionSocket *) (intptr_t) address;
socket->onHostNameResolved(h, i, false);
}
void setLangCode(JNIEnv *env, jclass c, jint instanceNum, jstring langCode) {
const char *langCodeStr = env->GetStringUTFChars(langCode, 0);
ConnectionsManager::getInstance(instanceNum).setLangCode(std::string(langCodeStr));
if (langCodeStr != 0) {
env->ReleaseStringUTFChars(langCode, langCodeStr);
}
}
void setRegId(JNIEnv *env, jclass c, jint instanceNum, jstring regId) {
const char *regIdStr = env->GetStringUTFChars(regId, 0);
ConnectionsManager::getInstance(instanceNum).setRegId(std::string(regIdStr));
if (regIdStr != 0) {
env->ReleaseStringUTFChars(regId, regIdStr);
}
}
void setSystemLangCode(JNIEnv *env, jclass c, jint instanceNum, jstring langCode) {
const char *langCodeStr = env->GetStringUTFChars(langCode, 0);
ConnectionsManager::getInstance(instanceNum).setSystemLangCode(std::string(langCodeStr));
if (langCodeStr != 0) {
env->ReleaseStringUTFChars(langCode, langCodeStr);
}
}
void init(JNIEnv *env, jclass c, jint instanceNum, jint version, jint layer, jint apiId, jstring deviceModel, jstring systemVersion, jstring appVersion, jstring langCode, jstring systemLangCode, jstring configPath, jstring logPath, jstring regId, jstring cFingerprint, jstring installerId, jstring packageId, jint timezoneOffset, jlong userId, jboolean enablePushConnection, jboolean hasNetwork, jint networkType) {
const char *deviceModelStr = env->GetStringUTFChars(deviceModel, 0);
const char *systemVersionStr = env->GetStringUTFChars(systemVersion, 0);
const char *appVersionStr = env->GetStringUTFChars(appVersion, 0);
const char *langCodeStr = env->GetStringUTFChars(langCode, 0);
const char *systemLangCodeStr = env->GetStringUTFChars(systemLangCode, 0);
const char *configPathStr = env->GetStringUTFChars(configPath, 0);
const char *logPathStr = env->GetStringUTFChars(logPath, 0);
const char *regIdStr = env->GetStringUTFChars(regId, 0);
const char *cFingerprintStr = env->GetStringUTFChars(cFingerprint, 0);
const char *installerIdStr = env->GetStringUTFChars(installerId, 0);
const char *packageIdStr = env->GetStringUTFChars(packageId, 0);
ConnectionsManager::getInstance(instanceNum).init((uint32_t) version, layer, apiId, std::string(deviceModelStr), std::string(systemVersionStr), std::string(appVersionStr), std::string(langCodeStr), std::string(systemLangCodeStr), std::string(configPathStr), std::string(logPathStr), std::string(regIdStr), std::string(cFingerprintStr), std::string(installerIdStr), std::string(packageIdStr), timezoneOffset, userId, true, enablePushConnection, hasNetwork, networkType);
if (deviceModelStr != 0) {
env->ReleaseStringUTFChars(deviceModel, deviceModelStr);
}
if (systemVersionStr != 0) {
env->ReleaseStringUTFChars(systemVersion, systemVersionStr);
}
if (appVersionStr != 0) {
env->ReleaseStringUTFChars(appVersion, appVersionStr);
}
if (langCodeStr != 0) {
env->ReleaseStringUTFChars(langCode, langCodeStr);
}
if (systemLangCodeStr != 0) {
env->ReleaseStringUTFChars(systemLangCode, systemLangCodeStr);
}
if (configPathStr != 0) {
env->ReleaseStringUTFChars(configPath, configPathStr);
}
if (logPathStr != 0) {
env->ReleaseStringUTFChars(logPath, logPathStr);
}
if (regIdStr != 0) {
env->ReleaseStringUTFChars(regId, regIdStr);
}
if (cFingerprintStr != 0) {
env->ReleaseStringUTFChars(cFingerprint, cFingerprintStr);
}
if (installerIdStr != 0) {
env->ReleaseStringUTFChars(installerId, installerIdStr);
}
if (packageIdStr != 0) {
env->ReleaseStringUTFChars(packageId, packageIdStr);
}
}
void setJava(JNIEnv *env, jclass c, jboolean useJavaByteBuffers) {
ConnectionsManager::useJavaVM(java, useJavaByteBuffers);
for (int a = 0; a < MAX_ACCOUNT_COUNT; a++) {
ConnectionsManager::getInstance(a).setDelegate(new Delegate());
}
}
static const char *ConnectionsManagerClassPathName = "org/telegram/tgnet/ConnectionsManager";
static JNINativeMethod ConnectionsManagerMethods[] = {
{"native_getCurrentTimeMillis", "(I)J", (void *) getCurrentTimeMillis},
{"native_getCurrentTime", "(I)I", (void *) getCurrentTime},
{"native_getCurrentDatacenterId", "(I)I", (void *) getCurrentDatacenterId},
{"native_isTestBackend", "(I)I", (void *) isTestBackend},
{"native_getTimeDifference", "(I)I", (void *) getTimeDifference},
{"native_sendRequest", "(IJLorg/telegram/tgnet/RequestDelegateInternal;Lorg/telegram/tgnet/QuickAckDelegate;Lorg/telegram/tgnet/WriteToSocketDelegate;IIIZI)V", (void *) sendRequest},
{"native_cancelRequest", "(IIZ)V", (void *) cancelRequest},
{"native_cleanUp", "(IZ)V", (void *) cleanUp},
{"native_cancelRequestsForGuid", "(II)V", (void *) cancelRequestsForGuid},
{"native_bindRequestToGuid", "(III)V", (void *) bindRequestToGuid},
{"native_applyDatacenterAddress", "(IILjava/lang/String;I)V", (void *) applyDatacenterAddress},
{"native_setProxySettings", "(ILjava/lang/String;ILjava/lang/String;Ljava/lang/String;Ljava/lang/String;)V", (void *) setProxySettings},
{"native_getConnectionState", "(I)I", (void *) getConnectionState},
{"native_setUserId", "(IJ)V", (void *) setUserId},
{"native_init", "(IIIILjava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;IJZZI)V", (void *) init},
{"native_setLangCode", "(ILjava/lang/String;)V", (void *) setLangCode},
{"native_setRegId", "(ILjava/lang/String;)V", (void *) setRegId},
{"native_setSystemLangCode", "(ILjava/lang/String;)V", (void *) setSystemLangCode},
{"native_switchBackend", "(IZ)V", (void *) switchBackend},
{"native_pauseNetwork", "(I)V", (void *) pauseNetwork},
{"native_resumeNetwork", "(IZ)V", (void *) resumeNetwork},
{"native_updateDcSettings", "(I)V", (void *) updateDcSettings},
{"native_setIpStrategy", "(IB)V", (void *) setIpStrategy},
{"native_setNetworkAvailable", "(IZIZ)V", (void *) setNetworkAvailable},
{"native_setPushConnectionEnabled", "(IZ)V", (void *) setPushConnectionEnabled},
{"native_setJava", "(Z)V", (void *) setJava},
{"native_applyDnsConfig", "(IJLjava/lang/String;I)V", (void *) applyDnsConfig},
{"native_checkProxy", "(ILjava/lang/String;ILjava/lang/String;Ljava/lang/String;Ljava/lang/String;Lorg/telegram/tgnet/RequestTimeDelegate;)J", (void *) checkProxy},
{"native_onHostNameResolved", "(Ljava/lang/String;JLjava/lang/String;)V", (void *) onHostNameResolved}
};
inline int registerNativeMethods(JNIEnv *env, const char *className, JNINativeMethod *methods, int methodsCount) {
jclass clazz;
clazz = env->FindClass(className);
if (clazz == NULL) {
return JNI_FALSE;
}
if (env->RegisterNatives(clazz, methods, methodsCount) < 0) {
return JNI_FALSE;
}
return JNI_TRUE;
}
extern "C" int registerNativeTgNetFunctions(JavaVM *vm, JNIEnv *env) {
java = vm;
if (!registerNativeMethods(env, NativeByteBufferClassPathName, NativeByteBufferMethods, sizeof(NativeByteBufferMethods) / sizeof(NativeByteBufferMethods[0]))) {
return JNI_FALSE;
}
if (!registerNativeMethods(env, ConnectionsManagerClassPathName, ConnectionsManagerMethods, sizeof(ConnectionsManagerMethods) / sizeof(ConnectionsManagerMethods[0]))) {
return JNI_FALSE;
}
jclass_RequestDelegateInternal = (jclass) env->NewGlobalRef(env->FindClass("org/telegram/tgnet/RequestDelegateInternal"));
if (jclass_RequestDelegateInternal == 0) {
return JNI_FALSE;
}
jclass_RequestDelegateInternal_run = env->GetMethodID(jclass_RequestDelegateInternal, "run", "(JILjava/lang/String;IJ)V");
if (jclass_RequestDelegateInternal_run == 0) {
return JNI_FALSE;
}
jclass_RequestTimeDelegate = (jclass) env->NewGlobalRef(env->FindClass("org/telegram/tgnet/RequestTimeDelegate"));
if (jclass_RequestTimeDelegate == 0) {
return JNI_FALSE;
}
jclass_RequestTimeDelegate_run = env->GetMethodID(jclass_RequestTimeDelegate, "run", "(J)V");
if (jclass_RequestTimeDelegate_run == 0) {
return JNI_FALSE;
}
jclass_QuickAckDelegate = (jclass) env->NewGlobalRef(env->FindClass("org/telegram/tgnet/QuickAckDelegate"));
if (jclass_RequestDelegateInternal == 0) {
return JNI_FALSE;
}
jclass_QuickAckDelegate_run = env->GetMethodID(jclass_QuickAckDelegate, "run", "()V");
if (jclass_QuickAckDelegate_run == 0) {
return JNI_FALSE;
}
jclass_WriteToSocketDelegate = (jclass) env->NewGlobalRef(env->FindClass("org/telegram/tgnet/WriteToSocketDelegate"));
if (jclass_WriteToSocketDelegate == 0) {
return JNI_FALSE;
}
jclass_WriteToSocketDelegate_run = env->GetMethodID(jclass_WriteToSocketDelegate, "run", "()V");
if (jclass_WriteToSocketDelegate_run == 0) {
return JNI_FALSE;
}
jclass_ConnectionsManager = (jclass) env->NewGlobalRef(env->FindClass("org/telegram/tgnet/ConnectionsManager"));
if (jclass_ConnectionsManager == 0) {
return JNI_FALSE;
}
jclass_ConnectionsManager_onUnparsedMessageReceived = env->GetStaticMethodID(jclass_ConnectionsManager, "onUnparsedMessageReceived", "(JI)V");
if (jclass_ConnectionsManager_onUnparsedMessageReceived == 0) {
return JNI_FALSE;
}
jclass_ConnectionsManager_onUpdate = env->GetStaticMethodID(jclass_ConnectionsManager, "onUpdate", "(I)V");
if (jclass_ConnectionsManager_onUpdate == 0) {
return JNI_FALSE;
}
jclass_ConnectionsManager_onSessionCreated = env->GetStaticMethodID(jclass_ConnectionsManager, "onSessionCreated", "(I)V");
if (jclass_ConnectionsManager_onSessionCreated == 0) {
return JNI_FALSE;
}
jclass_ConnectionsManager_onLogout = env->GetStaticMethodID(jclass_ConnectionsManager, "onLogout", "(I)V");
if (jclass_ConnectionsManager_onLogout == 0) {
return JNI_FALSE;
}
jclass_ConnectionsManager_onConnectionStateChanged = env->GetStaticMethodID(jclass_ConnectionsManager, "onConnectionStateChanged", "(II)V");
if (jclass_ConnectionsManager_onConnectionStateChanged == 0) {
return JNI_FALSE;
}
jclass_ConnectionsManager_onInternalPushReceived = env->GetStaticMethodID(jclass_ConnectionsManager, "onInternalPushReceived", "(I)V");
if (jclass_ConnectionsManager_onInternalPushReceived == 0) {
return JNI_FALSE;
}
jclass_ConnectionsManager_onUpdateConfig = env->GetStaticMethodID(jclass_ConnectionsManager, "onUpdateConfig", "(JI)V");
if (jclass_ConnectionsManager_onUpdateConfig == 0) {
return JNI_FALSE;
}
jclass_ConnectionsManager_onBytesSent = env->GetStaticMethodID(jclass_ConnectionsManager, "onBytesSent", "(III)V");
if (jclass_ConnectionsManager_onBytesSent == 0) {
return JNI_FALSE;
}
jclass_ConnectionsManager_onBytesReceived = env->GetStaticMethodID(jclass_ConnectionsManager, "onBytesReceived", "(III)V");
if (jclass_ConnectionsManager_onBytesReceived == 0) {
return JNI_FALSE;
}
jclass_ConnectionsManager_onRequestNewServerIpAndPort = env->GetStaticMethodID(jclass_ConnectionsManager, "onRequestNewServerIpAndPort", "(II)V");
if (jclass_ConnectionsManager_onRequestNewServerIpAndPort == 0) {
return JNI_FALSE;
}
jclass_ConnectionsManager_onProxyError = env->GetStaticMethodID(jclass_ConnectionsManager, "onProxyError", "()V");
if (jclass_ConnectionsManager_onProxyError == 0) {
return JNI_FALSE;
}
jclass_ConnectionsManager_getHostByName = env->GetStaticMethodID(jclass_ConnectionsManager, "getHostByName", "(Ljava/lang/String;J)V");
if (jclass_ConnectionsManager_getHostByName == 0) {
return JNI_FALSE;
}
jclass_ConnectionsManager_getInitFlags = env->GetStaticMethodID(jclass_ConnectionsManager, "getInitFlags", "()I");
if (jclass_ConnectionsManager_getInitFlags == 0) {
return JNI_FALSE;
}
return JNI_TRUE;
}
//
// Copyright Aliaksei Levin (levlam@telegram.org), Arseny Smirnov (arseny30@gmail.com) 2014-2018
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
bool check_utf8(const char *data, size_t len) {
const char *data_end = data + len;
do {
unsigned int a = (unsigned char) (*data++);
if ((a & 0x80) == 0) {
if (data == data_end + 1) {
return true;
}
continue;
}
#define ENSURE(condition) \
if (!(condition)) { \
return false; \
}
ENSURE((a & 0x40) != 0);
unsigned int b = (unsigned char) (*data++);
ENSURE((b & 0xc0) == 0x80);
if ((a & 0x20) == 0) {
ENSURE((a & 0x1e) > 0);
continue;
}
unsigned int c = (unsigned char) (*data++);
ENSURE((c & 0xc0) == 0x80);
if ((a & 0x10) == 0) {
int x = (((a & 0x0f) << 6) | (b & 0x20));
ENSURE(x != 0 && x != 0x360);
continue;
}
unsigned int d = (unsigned char) (*data++);
ENSURE((d & 0xc0) == 0x80);
if ((a & 0x08) == 0) {
int t = (((a & 0x07) << 6) | (b & 0x30));
ENSURE(0 < t && t < 0x110);
continue;
}
return false;
#undef ENSURE
} while (1);
}
| 44.938486 | 473 | 0.716016 | [
"object"
] |
f6d7088395f18f70e2b3d0954a082cee91cef16c | 12,321 | cpp | C++ | gm/attributes.cpp | TheRakeshPurohit/skia | 817dd601f85f986a99d102de8dc42ee8638a56f9 | [
"BSD-3-Clause"
] | null | null | null | gm/attributes.cpp | TheRakeshPurohit/skia | 817dd601f85f986a99d102de8dc42ee8638a56f9 | [
"BSD-3-Clause"
] | null | null | null | gm/attributes.cpp | TheRakeshPurohit/skia | 817dd601f85f986a99d102de8dc42ee8638a56f9 | [
"BSD-3-Clause"
] | null | null | null | /*
* Copyright 2021 Google LLC
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "gm/gm.h"
#include "include/core/SkPoint.h"
#include "include/core/SkRect.h"
#include "include/gpu/GrRecordingContext.h"
#include "src/core/SkCanvasPriv.h"
#include "src/gpu/KeyBuilder.h"
#include "src/gpu/ganesh/GrBuffer.h"
#include "src/gpu/ganesh/GrGeometryProcessor.h"
#include "src/gpu/ganesh/GrGpuBuffer.h"
#include "src/gpu/ganesh/GrOpFlushState.h"
#include "src/gpu/ganesh/GrProcessor.h"
#include "src/gpu/ganesh/GrProcessorSet.h"
#include "src/gpu/ganesh/GrProgramInfo.h"
#include "src/gpu/ganesh/GrResourceProvider.h"
#include "src/gpu/ganesh/GrShaderVar.h"
#include "src/gpu/ganesh/glsl/GrGLSLFragmentShaderBuilder.h"
#include "src/gpu/ganesh/glsl/GrGLSLVertexGeoBuilder.h"
#include "src/gpu/ganesh/ops/GrDrawOp.h"
#include "src/gpu/ganesh/ops/GrOp.h"
#include "src/gpu/ganesh/v1/SurfaceDrawContext_v1.h"
#include "tools/gpu/ProxyUtils.h"
#include <memory>
#include <vector>
class GrAppliedClip;
class GrGLSLProgramDataManager;
namespace {
enum class AttrMode {
kAuto,
kManual,
kWacky
};
class AttributeTestProcessor : public GrGeometryProcessor {
public:
static GrGeometryProcessor* Make(SkArenaAlloc* arena, AttrMode mode) {
return arena->make([&](void* ptr) { return new (ptr) AttributeTestProcessor(mode); });
}
const char* name() const final { return "AttributeTestProcessor"; }
void addToKey(const GrShaderCaps&, skgpu::KeyBuilder* b) const final {
b->add32(static_cast<uint32_t>(fMode));
}
std::unique_ptr<ProgramImpl> makeProgramImpl(const GrShaderCaps&) const final;
private:
AttributeTestProcessor(AttrMode mode)
: GrGeometryProcessor(kAttributeTestProcessor_ClassID), fMode(mode) {
switch (fMode) {
case AttrMode::kAuto:
fAttributes.emplace_back("pos", kFloat2_GrVertexAttribType, SkSLType::kFloat2);
fAttributes.emplace_back("color", kUByte4_norm_GrVertexAttribType,
SkSLType::kHalf4);
this->setVertexAttributesWithImplicitOffsets(fAttributes.data(),
fAttributes.size());
break;
case AttrMode::kManual:
// Same result as kAuto but with explicitly specified offsets and stride.
fAttributes.emplace_back("pos", kFloat2_GrVertexAttribType, SkSLType::kFloat2, 0);
fAttributes.emplace_back("color", kUByte4_norm_GrVertexAttribType,
SkSLType::kHalf4, 8);
this->setVertexAttributes(fAttributes.data(), fAttributes.size(), 12);
break;
case AttrMode::kWacky:
// 0 thru 7 : float2 aliased to "pos0" and "pos1"
// 8 thru 11: pad
// 12 thru 15: unorm4 "color"
// 16 thru 19: pad
fAttributes.emplace_back("pos0", kFloat2_GrVertexAttribType, SkSLType::kFloat2, 0);
fAttributes.emplace_back("pos1", kFloat2_GrVertexAttribType, SkSLType::kFloat2, 0);
fAttributes.emplace_back("color", kUByte4_norm_GrVertexAttribType,
SkSLType::kHalf4, 12);
this->setVertexAttributes(fAttributes.data(), fAttributes.size(), 20);
break;
}
}
const AttrMode fMode;
std::vector<Attribute> fAttributes;
using INHERITED = GrGeometryProcessor;
};
std::unique_ptr<GrGeometryProcessor::ProgramImpl> AttributeTestProcessor::makeProgramImpl(
const GrShaderCaps&) const {
class Impl : public ProgramImpl {
public:
void setData(const GrGLSLProgramDataManager&,
const GrShaderCaps&,
const GrGeometryProcessor&) override {}
private:
void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
const AttributeTestProcessor& proc = args.fGeomProc.cast<AttributeTestProcessor>();
args.fVaryingHandler->emitAttributes(proc);
if (proc.fMode == AttrMode::kWacky) {
args.fVertBuilder->codeAppend("float2 pos = pos0 + pos1;");
}
args.fFragBuilder->codeAppendf("half4 %s;", args.fOutputColor);
args.fVaryingHandler->addPassThroughAttribute(GrShaderVar("color", SkSLType::kHalf4),
args.fOutputColor);
gpArgs->fPositionVar.set(SkSLType::kFloat2, "pos");
args.fFragBuilder->codeAppendf("const half4 %s = half4(1);", args.fOutputCoverage);
}
};
return std::make_unique<Impl>();
}
class AttributeTestOp : public GrDrawOp {
public:
DEFINE_OP_CLASS_ID
static GrOp::Owner Make(GrRecordingContext* context, AttrMode mode, const SkRect& r) {
return GrOp::Make<AttributeTestOp>(context, mode, r);
}
private:
AttributeTestOp(AttrMode mode, SkRect rect) : GrDrawOp(ClassID()), fMode(mode), fRect(rect) {
this->setBounds(fRect, HasAABloat::kNo, IsHairline::kNo);
}
const char* name() const override { return "AttributeTestOp"; }
FixedFunctionFlags fixedFunctionFlags() const override { return FixedFunctionFlags::kNone; }
GrProcessorSet::Analysis finalize(const GrCaps&, const GrAppliedClip*, GrClampType) override {
return GrProcessorSet::EmptySetAnalysis();
}
GrProgramInfo* createProgramInfo(const GrCaps* caps,
SkArenaAlloc* arena,
const GrSurfaceProxyView& writeView,
bool usesMSAASurface,
GrAppliedClip&& appliedClip,
const GrDstProxyView& dstProxyView,
GrXferBarrierFlags renderPassXferBarriers,
GrLoadOp colorLoadOp) const {
GrGeometryProcessor* geomProc = AttributeTestProcessor::Make(arena, fMode);
return sk_gpu_test::CreateProgramInfo(caps,
arena,
writeView,
usesMSAASurface,
std::move(appliedClip),
dstProxyView,
geomProc,
SkBlendMode::kSrcOver,
GrPrimitiveType::kTriangleStrip,
renderPassXferBarriers,
colorLoadOp);
}
GrProgramInfo* createProgramInfo(GrOpFlushState* flushState) const {
return this->createProgramInfo(&flushState->caps(),
flushState->allocator(),
flushState->writeView(),
flushState->usesMSAASurface(),
flushState->detachAppliedClip(),
flushState->dstProxyView(),
flushState->renderPassBarriers(),
flushState->colorLoadOp());
}
void onPrePrepare(GrRecordingContext* context,
const GrSurfaceProxyView& writeView,
GrAppliedClip* clip,
const GrDstProxyView& dstProxyView,
GrXferBarrierFlags renderPassXferBarriers,
GrLoadOp colorLoadOp) final {
SkArenaAlloc* arena = context->priv().recordTimeAllocator();
// DMSAA is not supported on DDL.
bool usesMSAASurface = writeView.asRenderTargetProxy()->numSamples() > 1;
// This is equivalent to a GrOpFlushState::detachAppliedClip
GrAppliedClip appliedClip = clip ? std::move(*clip) : GrAppliedClip::Disabled();
fProgramInfo = this->createProgramInfo(context->priv().caps(),
arena,
writeView,
usesMSAASurface,
std::move(appliedClip),
dstProxyView,
renderPassXferBarriers,
colorLoadOp);
context->priv().recordProgramInfo(fProgramInfo);
}
template <typename V> void makeVB(GrOpFlushState* flushState, const SkRect rect) {
V v[4];
v[0].p = {rect.left() , rect.top() };
v[1].p = {rect.right(), rect.top() };
v[2].p = {rect.left() , rect.bottom()};
v[3].p = {rect.right(), rect.bottom()};
v[0].color = SK_ColorRED;
v[1].color = SK_ColorGREEN;
v[2].color = SK_ColorYELLOW;
v[3].color = SK_ColorMAGENTA;
fVertexBuffer = flushState->resourceProvider()->createBuffer(v,
sizeof(v),
GrGpuBufferType::kVertex,
kStatic_GrAccessPattern);
}
void onPrepare(GrOpFlushState* flushState) override {
if (fMode == AttrMode::kWacky) {
struct V {
SkPoint p;
uint32_t pad0;
uint32_t color;
uint32_t pad1;
};
SkRect rect {fRect.fLeft/2.f, fRect.fTop/2.f, fRect.fRight/2.f, fRect.fBottom/2.f};
this->makeVB<V>(flushState, rect);
} else {
struct V {
SkPoint p;
uint32_t color;
};
this->makeVB<V>(flushState, fRect);
}
}
void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
if (!fVertexBuffer) {
return;
}
if (!fProgramInfo) {
fProgramInfo = this->createProgramInfo(flushState);
}
flushState->bindPipeline(*fProgramInfo, fRect);
flushState->bindBuffers(nullptr, nullptr, std::move(fVertexBuffer));
flushState->draw(4, 0);
}
sk_sp<GrBuffer> fVertexBuffer;
const AttrMode fMode;
const SkRect fRect;
// The program info (and both the GrPipeline and GrGeometryProcessor it relies on), when
// allocated, are allocated in either the ddl-record-time or flush-time arena. It is the
// arena's job to free up their memory so we just have a bare programInfo pointer here. We
// don't even store the GrPipeline and GrGeometryProcessor pointers here bc they are
// guaranteed to have the same lifetime as the program info.
GrProgramInfo* fProgramInfo = nullptr;
friend class ::GrOp; // for ctor
using INHERITED = GrDrawOp;
};
} // namespace
namespace skiagm {
/**
* This is a GPU-backend specific test that exercises explicit and implicit attribute offsets and
* strides.
*/
class AttributesGM : public GpuGM {
SkString onShortName() override { return SkString("attributes"); }
SkISize onISize() override { return {120, 340}; }
DrawResult onDraw(GrRecordingContext*, SkCanvas*, SkString* errorMsg) override;
};
DrawResult AttributesGM::onDraw(GrRecordingContext* rc, SkCanvas* canvas, SkString* errorMsg) {
auto sdc = SkCanvasPriv::TopDeviceSurfaceDrawContext(canvas);
if (!sdc) {
*errorMsg = kErrorMsg_DrawSkippedGpuOnly;
return DrawResult::kSkip;
}
sdc->clear(SK_PMColor4fBLACK);
// Draw the test directly to the frame buffer.
auto r = SkRect::MakeXYWH(10, 10, 100, 100);
for (AttrMode m : {AttrMode::kAuto, AttrMode::kManual, AttrMode::kWacky}) {
sdc->addDrawOp(AttributeTestOp::Make(rc, m, r));
r.offset(0, 110);
}
return DrawResult::kOk;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
DEF_GM( return new AttributesGM(); )
} // namespace skiagm
| 40.396721 | 100 | 0.565295 | [
"vector"
] |
f6dbaf05018fa94c7895b622bb175bf2674efd9b | 4,165 | cpp | C++ | Shader Labb 2/ComputedInstance.cpp | Cousken/3DEngine | dd8a58f1cc08f17b1286c136215913123f234eb4 | [
"MIT"
] | 1 | 2015-04-11T14:41:38.000Z | 2015-04-11T14:41:38.000Z | Shader Labb 2/ComputedInstance.cpp | Cousken/3DEngine | dd8a58f1cc08f17b1286c136215913123f234eb4 | [
"MIT"
] | null | null | null | Shader Labb 2/ComputedInstance.cpp | Cousken/3DEngine | dd8a58f1cc08f17b1286c136215913123f234eb4 | [
"MIT"
] | null | null | null | #include "ComputedInstance.h"
#include "Engine.h"
ComputedInstance::~ComputedInstance(void)
{
}
bool ComputedInstance::Init( )
{
HRESULT hr = S_OK;
// Define the input layout
D3D10_INPUT_ELEMENT_DESC layout[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D10_INPUT_PER_VERTEX_DATA, 0 },
{ "COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 12, D3D10_INPUT_PER_VERTEX_DATA, 0 },
{ "NORMAL", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 28, D3D10_INPUT_PER_VERTEX_DATA, 0 },
};
UINT numElements = ARRAYSIZE( layout );
// Create the input layout
D3D10_PASS_DESC PassDesc;
myModel.GetEffect()->GetTechnique()->GetPassByIndex( 0 )->GetDesc( &PassDesc );
hr = Engine::GetInstance()->GetDevice()->CreateInputLayout( layout, numElements, PassDesc.pIAInputSignature,
PassDesc.IAInputSignatureSize, &myVertexLayout );
if( FAILED( hr ) )
return false;
if (InitVertexBuffer() == false)
{
return false;
}
if (InitiIndexBuffer() == false)
{
return false;
}
return true;
}
bool ComputedInstance::InitVertexBuffer()
{
HRESULT hr = S_OK;
// Create vertex buffer
D3D10_BUFFER_DESC bd;
bd.Usage = D3D10_USAGE_DEFAULT;
bd.ByteWidth = sizeof( VertexPosCol ) * myModel.GetVertexData().Count();
bd.BindFlags = D3D10_BIND_VERTEX_BUFFER;
bd.CPUAccessFlags = 0;
bd.MiscFlags = 0;
D3D10_SUBRESOURCE_DATA InitData;
InitData.pSysMem = &myModel.GetVertexData()[0];
hr = Engine::GetInstance()->GetDevice()->CreateBuffer( &bd, &InitData, &myVertexBuffer );
if( FAILED( hr ) )
return false;
return true;
}
bool ComputedInstance::InitiIndexBuffer()
{
HRESULT hr = S_OK;
// Create index buffer
D3D10_BUFFER_DESC bd;
bd.Usage = D3D10_USAGE_DEFAULT;
bd.ByteWidth = sizeof( unsigned int ) * myModel.GetVertextIndexes().Count(); // 36 vertices needed for 12 triangles in a triangle list
bd.BindFlags = D3D10_BIND_INDEX_BUFFER;
bd.CPUAccessFlags = 0;
bd.MiscFlags = 0;
D3D10_SUBRESOURCE_DATA InitData;
InitData.pSysMem = &myModel.GetVertextIndexes()[0];
hr = Engine::GetInstance()->GetDevice()->CreateBuffer( &bd, &InitData, &myIndexBuffer );
if( FAILED( hr ) )
return hr;
return true;
}
ComputedInstance::ComputedInstance( ComputedModel& aModel )
: myModel(aModel)
{
myEffectFile = "";
myVertexBuffer = 0;
myVertexLayout = 0;
}
void ComputedInstance::Render( Camera& aCamera )
{
// Set the input layout
// Engine::GetInstance()->GetDevice()->IASetInputLayout( myVertexLayout );
//
// // Set vertex buffer
// UINT stride = sizeof( VertexPosCol );
// UINT offset = 0;
// Engine::GetInstance()->GetDevice()->IASetVertexBuffers( 0, 1, &myVertexBuffer, &stride, &offset );
//
// // Set index buffer
// Engine::GetInstance()->GetDevice()->IASetIndexBuffer( myIndexBuffer, DXGI_FORMAT_R32_UINT, 0 );
//
// // Set primitive topology
// Engine::GetInstance()->GetDevice()->IASetPrimitiveTopology( D3D10_PRIMITIVE_TOPOLOGY_TRIANGLELIST );
//
// myOrientation.SetPosition(myPosition);
// myModel.GetEffect()->SetMatrices(myOrientation, aCamera.GetOrientation().Inverse(), aCamera.GetProjection());
//
// D3D10_TECHNIQUE_DESC techDesc;
// myModel.GetEffect()->GetTechnique()->GetDesc( &techDesc );
// for( UINT p = 0; p < techDesc.Passes; ++p )
// {
// myModel.GetEffect()->GetTechnique()->GetPassByIndex( p )->Apply( 0 );
// Engine::GetInstance()->GetDevice()->DrawIndexed( myModel.GetVertextIndexes().Count(), 0, 0 );
// }
}
void ComputedInstance::SetOrientation( Matrix44f& aOrientation )
{
myOrientation = aOrientation;
myPosition = myOrientation.GetPosition();
}
Matrix44f& ComputedInstance::GetOrientation()
{
myOrientation.SetPosition(myPosition);
return myOrientation;
}
void ComputedInstance::SetPosition( Vector3f& aPosition )
{
myPosition = aPosition;
}
void ComputedInstance::GetPosition( Vector3f& aPosition )
{
aPosition = myPosition;
}
void ComputedInstance::PerformRotation( Matrix33f& aOrientation )
{
myOrientation = myOrientation * aOrientation;
}
void ComputedInstance::PerformTransformation( Matrix44f& aOrientation )
{
myOrientation.SetPosition(myPosition);
myOrientation *= aOrientation;
myPosition = myOrientation.GetPosition();
}
| 26.360759 | 142 | 0.722449 | [
"render"
] |
f6e2b654053ddf39143d3eb7c8b71560e7f63bc7 | 90,306 | cc | C++ | chrome/browser/translate/translate_manager_browsertest.cc | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2021-11-16T13:10:29.000Z | 2021-11-16T13:10:29.000Z | chrome/browser/translate/translate_manager_browsertest.cc | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | chrome/browser/translate/translate_manager_browsertest.cc | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | // Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <memory>
#include "base/bind.h"
#include "base/strings/utf_string_conversions.h"
#include "base/test/metrics/histogram_tester.h"
#include "base/test/scoped_feature_list.h"
#include "build/build_config.h"
#include "chrome/browser/prefs/session_startup_pref.h"
#include "chrome/browser/profiles/profile.h"
#include "chrome/browser/search_engines/template_url_service_factory.h"
#include "chrome/browser/translate/chrome_translate_client.h"
#include "chrome/browser/translate/translate_accept_languages_factory.h"
#include "chrome/browser/translate/translate_test_utils.h"
#include "chrome/browser/ui/browser.h"
#include "chrome/browser/ui/tabs/tab_strip_model.h"
#include "chrome/test/base/in_process_browser_test.h"
#include "chrome/test/base/search_test_utils.h"
#include "chrome/test/base/ui_test_utils.h"
#include "components/translate/core/browser/translate_accept_languages.h"
#include "components/translate/core/browser/translate_browser_metrics.h"
#include "components/translate/core/browser/translate_error_details.h"
#include "components/translate/core/browser/translate_manager.h"
#include "components/translate/core/common/language_detection_details.h"
#include "components/translate/core/common/translate_switches.h"
#include "components/translate/core/common/translate_util.h"
#include "content/public/common/content_switches.h"
#include "content/public/test/browser_test.h"
#include "content/public/test/prerender_test_util.h"
#include "net/dns/mock_host_resolver.h"
#include "net/test/embedded_test_server/http_request.h"
#include "net/test/embedded_test_server/http_response.h"
#include "url/gurl.h"
namespace translate {
namespace {
static const char kTestValidScript[] =
"var google = {};"
"google.translate = (function() {"
" return {"
" TranslateService: function() {"
" return {"
" isAvailable : function() {"
" return true;"
" },"
" restore : function() {"
" return;"
" },"
" getDetectedLanguage : function() {"
" return \"fr\";"
" },"
" translatePage : function(sourceLang, targetLang,"
" onTranslateProgress) {"
" var error = (sourceLang == 'auto') ? true : false;"
" onTranslateProgress(100, true, error);"
" }"
" };"
" }"
" };"
"})();"
"cr.googleTranslate.onTranslateElementLoad();";
static const char kTestScriptInitializationError[] =
"var google = {};"
"google.translate = (function() {"
" return {"
" TranslateService: function() {"
" return error;"
" }"
" };"
"})();"
"cr.googleTranslate.onTranslateElementLoad();";
static const char kTestScriptIdenticalLanguages[] =
"var google = {};"
"google.translate = (function() {"
" return {"
" TranslateService: function() {"
" return {"
" isAvailable : function() {"
" return true;"
" },"
" restore : function() {"
" return;"
" },"
" getDetectedLanguage : function() {"
" return \"en\";"
" },"
" translatePage : function(sourceLang, targetLang,"
" onTranslateProgress) {"
" onTranslateProgress(100, true, 0);"
" }"
" };"
" }"
" };"
"})();"
"cr.googleTranslate.onTranslateElementLoad();";
static const char kTestScriptAvailableTimeout[] =
"var google = {};"
"google.translate = (function() {"
" return {"
" TranslateService: function() {"
" return {"
" isAvailable : function() {"
" return false;"
" },"
" };"
" }"
" };"
"})();"
"cr.googleTranslate.onTranslateElementLoad();";
static const char kTestScriptTranslateTimeout[] =
"var google = {};"
"google.translate = (function() {"
" return {"
" TranslateService: function() {"
" return {"
" isAvailable : function() {"
" return true;"
" },"
" restore : function() {"
" return;"
" },"
" getDetectedLanguage : function() {"
" return \"fr\";"
" },"
" translatePage : function(sourceLang, targetLang,"
" onTranslateProgress) {"
" onTranslateProgress(33, false, 0);"
" }"
" };"
" }"
" };"
"})();"
"cr.googleTranslate.onTranslateElementLoad();";
static const char kTestScriptUnexpectedScriptError[] =
"var google = {};"
"google.translate = (function() {"
" return {"
" TranslateService: function() {"
" return {"
" isAvailable : function() {"
" return true;"
" },"
" restore : function() {"
" return;"
" },"
" getDetectedLanguage : function() {"
" return \"fr\";"
" },"
" translatePage : function(sourceLang, targetLang,"
" onTranslateProgress) {"
" return error;"
" }"
" };"
" }"
" };"
"})();"
"cr.googleTranslate.onTranslateElementLoad();";
static const char kTestScriptBadOrigin[] =
"var google = {};"
"google.translate = (function() {"
" return {"
" TranslateService: function() {"
" return {"
" isAvailable : function() {"
" return true;"
" },"
" restore : function() {"
" return;"
" },"
" getDetectedLanguage : function() {"
" return \"fr\";"
" },"
" translatePage : function(sourceLang, targetLang,"
" onTranslateProgress) {"
" var url = \"\";"
" cr.googleTranslate.onLoadJavascript(url);"
" }"
" };"
" }"
" };"
"})();"
"cr.googleTranslate.onTranslateElementLoad();";
static const char kTestScriptLoadError[] =
"var google = {};"
"google.translate = (function() {"
" return {"
" TranslateService: function() {"
" return {"
" isAvailable : function() {"
" return true;"
" },"
" restore : function() {"
" return;"
" },"
" getDetectedLanguage : function() {"
" return \"fr\";"
" },"
" translatePage : function(sourceLang, targetLang,"
" onTranslateProgress) {"
" var url = \"https://translate.googleapis.com/INVALID\";"
" cr.googleTranslate.onLoadJavascript(url);"
" }"
" };"
" }"
" };"
"})();"
"cr.googleTranslate.onTranslateElementLoad();";
static const char kTranslateHrefHintStatusHistogram[] =
"Translate.HrefHint.Status";
static const char kTranslateHrefHintPrefsFilterStatusHistogram[] =
"Translate.HrefHint.PrefsFilterStatus";
class TranslateManagerBrowserTest : public InProcessBrowserTest {
public:
TranslateManagerBrowserTest() {
scoped_feature_list_.InitWithFeatures(
std::vector<base::Feature>(),
{translate::kTranslateSubFrames,
translate::kOverrideLanguagePrefsForHrefTranslate,
translate::kOverrideSitePrefsForHrefTranslate});
error_subscription_ = TranslateManager::RegisterTranslateErrorCallback(
base::BindRepeating(&TranslateManagerBrowserTest::OnTranslateError,
base::Unretained(this)));
}
TranslateManagerBrowserTest(const TranslateManagerBrowserTest&) = delete;
TranslateManagerBrowserTest& operator=(const TranslateManagerBrowserTest&) =
delete;
~TranslateManagerBrowserTest() override = default;
void WaitUntilLanguageDetermined() { language_determined_waiter_->Wait(); }
void WaitUntilPageTranslated() {
translate::CreateTranslateWaiter(
browser()->tab_strip_model()->GetActiveWebContents(),
TranslateWaiter::WaitEvent::kPageTranslated)
->Wait();
}
void ResetObserver() {
language_determined_waiter_ = translate::CreateTranslateWaiter(
browser()->tab_strip_model()->GetActiveWebContents(),
TranslateWaiter::WaitEvent::kLanguageDetermined);
}
std::unique_ptr<net::test_server::HttpResponse> HandleRequest(
const net::test_server::HttpRequest& request) {
if (request.GetURL().path() != "/mock_translate_script.js")
return nullptr;
std::unique_ptr<net::test_server::BasicHttpResponse> http_response(
new net::test_server::BasicHttpResponse);
http_response->set_code(net::HTTP_OK);
http_response->set_content(script_);
http_response->set_content_type("text/javascript");
return std::move(http_response);
}
void OnTranslateError(const TranslateErrorDetails& details) {
error_type_ = details.error;
}
TranslateErrors::Type GetPageTranslatedResult() { return error_type_; }
ChromeTranslateClient* GetChromeTranslateClient() {
return ChromeTranslateClient::FromWebContents(
browser()->tab_strip_model()->GetActiveWebContents());
}
void ClickFrenchHrefTranslateLinkOnGooglePage() {
SetTranslateScript(kTestValidScript);
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
// Load a page with hrefTranslate tags.
AddTabAtIndex(0,
GURL(embedded_test_server()->GetURL(
"www.google.com", "/href_translate_test.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
// TODO(crbug.com/1258185): Migrate to better mechanism for testing around
// language detection. All pages are detected as "fr".
//
// In the case of href translate, we don't actually care if the current
// page is french, only that it loaded and whether href translate
// updates the current language state or not.
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Navigate to the French page by way of a link on the original page
ResetObserver();
content::WebContents* web_contents =
browser()->tab_strip_model()->GetWebContentsAt(0);
const std::string click_link_js =
"(function() { document.getElementById('test').click(); })();";
ASSERT_TRUE(content::ExecuteScript(web_contents, click_link_js));
// Detect language on the new page
// TODO(crbug.com/1258185): Migrate to better mechanism for testing around
// language detection. All pages are currently detected as "fr" due to the
// override.
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
}
protected:
// InProcessBrowserTest members.
void SetUp() override { InProcessBrowserTest::SetUp(); }
void SetUpOnMainThread() override {
ResetObserver();
error_type_ = TranslateErrors::NONE;
host_resolver()->AddRule("www.google.com", "127.0.0.1");
embedded_test_server()->RegisterRequestHandler(base::BindRepeating(
&TranslateManagerBrowserTest::HandleRequest, base::Unretained(this)));
embedded_test_server()->StartAcceptingConnections();
GetChromeTranslateClient()->GetTranslatePrefs()->ResetToDefaults();
}
void SetUpCommandLine(base::CommandLine* command_line) override {
ASSERT_TRUE(embedded_test_server()->InitializeAndListen());
// Enable Experimental web platform features for HrefTranslate tests
command_line->AppendSwitch(
::switches::kEnableExperimentalWebPlatformFeatures);
command_line->AppendSwitchASCII(
switches::kTranslateScriptURL,
embedded_test_server()->GetURL("/mock_translate_script.js").spec());
// TODO(crbug.com/1258185): Migrate to better mechanism for testing around
// language detection.
// All pages will have language detected as "fr". These tests are around
// the manager logic so the language detection behavior should be
// deterministic rather than relying on the page content.
command_line->AppendSwitch(::switches::kOverrideLanguageDetection);
}
void TearDownOnMainThread() override {
language_determined_waiter_.reset();
EXPECT_TRUE(embedded_test_server()->ShutdownAndWaitUntilComplete());
InProcessBrowserTest::TearDownOnMainThread();
}
void SetTranslateScript(const std::string& script) { script_ = script; }
private:
base::test::ScopedFeatureList scoped_feature_list_;
TranslateErrors::Type error_type_;
base::CallbackListSubscription error_subscription_;
std::unique_ptr<TranslateWaiter> language_determined_waiter_;
std::string script_;
};
// Tests that language detection returns a response.
// TODO(crbug.com/1258185): Migrate to better mechanism for testing around
// language detection. Seeding the TFLite model can racy/flaky on browsertests
// so we override the response.
IN_PROC_BROWSER_TEST_F(TranslateManagerBrowserTest, PageLanguageDetection) {
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
// Open a new tab with a page in French.
AddTabAtIndex(0, GURL(embedded_test_server()->GetURL("/french_page.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
}
// Tests that the language detection / HTML attribute override works correctly.
// For languages in the always-translate list, the detected language should
// override the HTML attribute. For all other languages, the HTML attribute
// should be used. Flaky on all platforms. https://crbug.com/1148703
IN_PROC_BROWSER_TEST_F(TranslateManagerBrowserTest,
DISABLED_PageLanguageDetectionConflict) {
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
// Open a new tab with a page in French with incorrect HTML language
// attribute specified. The language attribute should be overridden by the
// language detection.
AddTabAtIndex(
0,
GURL(embedded_test_server()->GetURL("/french_page_lang_conflict.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Open a new tab with a page in Korean with incorrect HTML language
// attribute specified. The language attribute should not be overridden by the
// language detection.
AddTabAtIndex(
0,
GURL(embedded_test_server()->GetURL("/korean_page_lang_conflict.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("en",
chrome_translate_client->GetLanguageState().source_language());
}
// Test that the translation was successful.
IN_PROC_BROWSER_TEST_F(TranslateManagerBrowserTest, PageTranslationSuccess) {
SetTranslateScript(kTestValidScript);
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
base::HistogramTester histograms;
// Open a new tab with a page in French.
AddTabAtIndex(0, GURL(embedded_test_server()->GetURL("/french_page.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
if (chrome_translate_client->GetLanguageState().source_language() != "fr")
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Translate the page through TranslateManager.
TranslateManager* manager = chrome_translate_client->GetTranslateManager();
manager->TranslatePage(
chrome_translate_client->GetLanguageState().source_language(), "en",
true);
WaitUntilPageTranslated();
EXPECT_FALSE(chrome_translate_client->GetLanguageState().translation_error());
EXPECT_EQ(TranslateErrors::NONE, GetPageTranslatedResult());
histograms.ExpectTotalCount("Translate.LanguageDetection.ContentLength", 1);
histograms.ExpectBucketCount("Translate.LanguageDetection.ContentLength", 148,
1);
histograms.ExpectTotalCount("Translate.LanguageDeterminedDuration", 1);
}
// Test that the translation was successful in an about:blank page.
// This is a regression test for https://crbug.com/943685.
// Disabled due to flakiness: https://crbug.com/1202065.
IN_PROC_BROWSER_TEST_F(TranslateManagerBrowserTest,
DISABLED_PageTranslationAboutBlank) {
AddTabAtIndex(0, GURL(embedded_test_server()->GetURL("/french_page.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
// Open a pop-up window and leave it at the initial about:blank URL.
content::WebContentsAddedObserver popup_observer;
ASSERT_TRUE(
content::ExecJs(browser()->tab_strip_model()->GetActiveWebContents(),
"window.open('about:blank', 'popup')"));
content::WebContents* popup = popup_observer.GetWebContents();
// A round-trip to the renderer process helps avoid a race where the
// browser-side translate structures are not yet ready for the translate call.
EXPECT_EQ("ping", content::EvalJs(popup, "'ping'"));
// Translate the about:blank page.
ChromeTranslateClient* chrome_translate_client =
ChromeTranslateClient::FromWebContents(popup);
TranslateManager* manager = chrome_translate_client->GetTranslateManager();
manager->TranslatePage("fr", "en", true);
// Verify that the crash from https://crbug.com/943685 didn't happen.
EXPECT_EQ("still alive", content::EvalJs(popup, "'still alive'"));
// Wait for translation to finish and verify it was successful.
WaitUntilPageTranslated();
EXPECT_FALSE(chrome_translate_client->GetLanguageState().translation_error());
EXPECT_EQ(TranslateErrors::NONE, GetPageTranslatedResult());
}
// Test that hrefTranslate is propagating properly.
IN_PROC_BROWSER_TEST_F(TranslateManagerBrowserTest, HrefTranslateSuccess) {
base::HistogramTester histograms;
GetChromeTranslateClient()
->GetTranslateManager()
->SetIgnoreMissingKeyForTesting(true);
ClickFrenchHrefTranslateLinkOnGooglePage();
// See that the page was translated automatically.
WaitUntilPageTranslated();
EXPECT_EQ("ja",
GetChromeTranslateClient()->GetLanguageState().current_language());
// The target shouldn't be added to accept languages.
EXPECT_FALSE(TranslateAcceptLanguagesFactory::GetForBrowserContext(
browser()->profile())
->IsAcceptLanguage("ja"));
histograms.ExpectUniqueSample(
kTranslateHrefHintStatusHistogram,
static_cast<int>(
TranslateBrowserMetrics::HrefTranslateStatus::kAutoTranslated),
1);
histograms.ExpectUniqueSample(
kTranslateHrefHintPrefsFilterStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslatePrefsFilterStatus::
kNotInBlocklists),
1);
}
// Test that hrefTranslate doesn't auto-translate if the originator of the
// navigation isn't a Google origin.
IN_PROC_BROWSER_TEST_F(TranslateManagerBrowserTest,
HrefTranslateNotFromGoogle) {
base::HistogramTester histograms;
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
chrome_translate_client->GetTranslateManager()->SetIgnoreMissingKeyForTesting(
true);
SetTranslateScript(kTestValidScript);
// Load a page with hrefTranslate tags.
AddTabAtIndex(
0, GURL(embedded_test_server()->GetURL("/href_translate_test.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
// TODO(crbug.com/1258185): Migrate to better mechanism for testing around
// language detection. All pages will return "fr" as the detected language.
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Navigate to the French page by way of a link on the original page.
ResetObserver();
content::WebContents* web_contents =
browser()->tab_strip_model()->GetWebContentsAt(0);
const std::string click_link_js =
"(function() { document.getElementById('test').click(); })();";
ASSERT_TRUE(content::ExecuteScript(web_contents, click_link_js));
// Detect language on the new page.
// TODO(crbug.com/1258185): Migrate to better mechanism for testing around
// language detection. Note: this only tests that the source language was
// whatever the page was before. The real test is that the href translate
// update did not occur, tested by AutoTranslateTo() below and the histograms.
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
EXPECT_EQ("", chrome_translate_client->GetLanguageState().AutoTranslateTo());
histograms.ExpectTotalCount(kTranslateHrefHintStatusHistogram, 0);
histograms.ExpectTotalCount(kTranslateHrefHintPrefsFilterStatusHistogram, 0);
}
// Test that hrefTranslate with an unsupported language doesn't trigger.
// Flaky on Mac: crbug.com/1269389
#if defined(OS_MAC)
#define MAYBE_HrefTranslateUnsupported DISABLED_HrefTranslateUnsupported
#else
#define MAYBE_HrefTranslateUnsupported HrefTranslateUnsupported
#endif
IN_PROC_BROWSER_TEST_F(TranslateManagerBrowserTest,
MAYBE_HrefTranslateUnsupported) {
base::HistogramTester histograms;
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
chrome_translate_client->GetTranslateManager()->SetIgnoreMissingKeyForTesting(
true);
SetTranslateScript(kTestValidScript);
// Load a page with hrefTranslate tags.
AddTabAtIndex(0,
GURL(embedded_test_server()->GetURL(
"www.google.com", "/href_translate_test.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Navigate to the French page by way of a link on the original page. This
// link has the hrefTranslate attribute set to "unsupported", so it shouldn't
// trigger translate.
ResetObserver();
content::WebContents* web_contents =
browser()->tab_strip_model()->GetWebContentsAt(0);
const std::string click_link_js =
"(function() { "
"document.getElementById('test-unsupported-language').click(); })();";
ASSERT_TRUE(content::ExecuteScript(web_contents, click_link_js));
// Detect language on the new page.
// TODO(crbug.com/1258185): Migrate to better mechanism for testing around
// language detection. Note: this only tests that the source language was
// whatever the page was before. The real test is that the href translate
// update did not occur, tested by AutoTranslateTo() below and the histograms.
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
EXPECT_EQ("", chrome_translate_client->GetLanguageState().AutoTranslateTo());
histograms.ExpectUniqueSample(
kTranslateHrefHintStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslateStatus::
kNoUiShownNotAutoTranslated),
1);
histograms.ExpectUniqueSample(
kTranslateHrefHintPrefsFilterStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslatePrefsFilterStatus::
kNotInBlocklists),
1);
}
// Test an href translate link to a conflicted page.
IN_PROC_BROWSER_TEST_F(TranslateManagerBrowserTest, HrefTranslateConflict) {
base::HistogramTester histograms;
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
chrome_translate_client->GetTranslateManager()->SetIgnoreMissingKeyForTesting(
true);
SetTranslateScript(kTestValidScript);
// Load a page with hrefTranslate tags.
AddTabAtIndex(0,
GURL(embedded_test_server()->GetURL(
"www.google.com", "/href_translate_test.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
// TODO(crbug.com/1258185): Migrate to better mechanism for testing around
// language detection. All pages will return "fr" as the detected language.
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Navigate to the French page that thinks its in English by way of a link on
// the original page.
ResetObserver();
content::WebContents* web_contents =
browser()->tab_strip_model()->GetWebContentsAt(0);
const std::string click_link_js =
"(function() { document.getElementById('test-conflict').click(); })();";
ASSERT_TRUE(content::ExecuteScript(web_contents, click_link_js));
// Detect language on the new page.
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// See that the page was translated automatically.
WaitUntilPageTranslated();
EXPECT_EQ("en",
chrome_translate_client->GetLanguageState().current_language());
histograms.ExpectUniqueSample(
kTranslateHrefHintStatusHistogram,
static_cast<int>(
TranslateBrowserMetrics::HrefTranslateStatus::kAutoTranslated),
1);
histograms.ExpectUniqueSample(
kTranslateHrefHintPrefsFilterStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslatePrefsFilterStatus::
kNotInBlocklists),
1);
}
// Test an href translate link without an href lang for the landing page.
IN_PROC_BROWSER_TEST_F(TranslateManagerBrowserTest, HrefTranslateNoHrefLang) {
base::HistogramTester histograms;
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
chrome_translate_client->GetTranslateManager()->SetIgnoreMissingKeyForTesting(
true);
SetTranslateScript(kTestValidScript);
// Load a page with hrefTranslate tags.
AddTabAtIndex(0,
GURL(embedded_test_server()->GetURL(
"www.google.com", "/href_translate_test.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
// TODO(crbug.com/1258185): Migrate to better mechanism for testing around
// language detection. All pages will return "fr" as the detected language.
if (chrome_translate_client->GetLanguageState().source_language() != "fr")
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Use a link with no hrefLang to navigate to a French page.
ResetObserver();
content::WebContents* web_contents =
browser()->tab_strip_model()->GetWebContentsAt(0);
const std::string click_link_js =
"(function() { document.getElementById('test-no-hrefLang').click(); "
"})();";
ASSERT_TRUE(content::ExecuteScript(web_contents, click_link_js));
// Detect language on the new page
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// See that the page was translated automatically
WaitUntilPageTranslated();
EXPECT_EQ("en",
chrome_translate_client->GetLanguageState().current_language());
histograms.ExpectUniqueSample(
kTranslateHrefHintStatusHistogram,
static_cast<int>(
TranslateBrowserMetrics::HrefTranslateStatus::kAutoTranslated),
1);
histograms.ExpectUniqueSample(
kTranslateHrefHintPrefsFilterStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslatePrefsFilterStatus::
kNotInBlocklists),
1);
}
// Test an href translate link that's overridden by the auto translate settings.
IN_PROC_BROWSER_TEST_F(TranslateManagerBrowserTest,
HrefTranslateOverridenByAutoTranslate) {
base::HistogramTester histograms;
GetChromeTranslateClient()
->GetTranslateManager()
->SetIgnoreMissingKeyForTesting(true);
// Before browsing: set auto translate from French to Chinese.
GetChromeTranslateClient()
->GetTranslatePrefs()
->AddLanguagePairToAlwaysTranslateList("fr", "zh-CN");
ClickFrenchHrefTranslateLinkOnGooglePage();
// See that the page was translated automatically.
WaitUntilPageTranslated();
EXPECT_EQ("zh-CN",
GetChromeTranslateClient()->GetLanguageState().current_language());
histograms.ExpectUniqueSample(
kTranslateHrefHintStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslateStatus::
kAutoTranslatedDifferentTargetLanguage),
1);
histograms.ExpectUniqueSample(
kTranslateHrefHintPrefsFilterStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslatePrefsFilterStatus::
kNotInBlocklists),
1);
}
// Test that hrefTranslate doesn't translate if the target language is in the
// user's language blocklist.
IN_PROC_BROWSER_TEST_F(TranslateManagerBrowserTest,
HrefTranslateLanguageBlocked) {
base::HistogramTester histograms;
GetChromeTranslateClient()
->GetTranslateManager()
->SetIgnoreMissingKeyForTesting(true);
GetChromeTranslateClient()->GetTranslatePrefs()->AddToLanguageList("fr",
true);
ClickFrenchHrefTranslateLinkOnGooglePage();
// The page should not have been automatically translated.
histograms.ExpectUniqueSample(
kTranslateHrefHintStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslateStatus::
kNoUiShownNotAutoTranslated),
1);
histograms.ExpectUniqueSample(
kTranslateHrefHintPrefsFilterStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslatePrefsFilterStatus::
kLanguageInBlocklist),
1);
}
// Test that hrefTranslate doesn't translate if the website is in the user's
// site blocklist.
IN_PROC_BROWSER_TEST_F(TranslateManagerBrowserTest, HrefTranslateSiteBlocked) {
base::HistogramTester histograms;
GetChromeTranslateClient()
->GetTranslateManager()
->SetIgnoreMissingKeyForTesting(true);
GetChromeTranslateClient()->GetTranslatePrefs()->AddSiteToNeverPromptList(
"www.google.com");
ClickFrenchHrefTranslateLinkOnGooglePage();
// The page should not have been automatically translated.
histograms.ExpectUniqueSample(
kTranslateHrefHintStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslateStatus::
kNoUiShownNotAutoTranslated),
1);
histograms.ExpectUniqueSample(
kTranslateHrefHintPrefsFilterStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslatePrefsFilterStatus::
kSiteInBlocklist),
1);
}
// Test that hrefTranslate doesn't translate if the language is in the user's
// language blocklist and the website is in the user's site blocklist.
IN_PROC_BROWSER_TEST_F(TranslateManagerBrowserTest,
HrefTranslateLanguageAndSiteBlocked) {
base::HistogramTester histograms;
GetChromeTranslateClient()
->GetTranslateManager()
->SetIgnoreMissingKeyForTesting(true);
GetChromeTranslateClient()->GetTranslatePrefs()->AddToLanguageList("fr",
true);
GetChromeTranslateClient()->GetTranslatePrefs()->AddSiteToNeverPromptList(
"www.google.com");
ClickFrenchHrefTranslateLinkOnGooglePage();
// The page should not have been automatically translated.
histograms.ExpectUniqueSample(
kTranslateHrefHintStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslateStatus::
kNoUiShownNotAutoTranslated),
1);
histograms.ExpectUniqueSample(
kTranslateHrefHintPrefsFilterStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslatePrefsFilterStatus::
kBothLanguageAndSiteInBlocklist),
1);
}
class OverrideLanguagePrefsForUiOnlyHrefTranslateBrowserTest
: public TranslateManagerBrowserTest {
public:
OverrideLanguagePrefsForUiOnlyHrefTranslateBrowserTest() {
scoped_feature_list_.InitAndEnableFeatureWithParameters(
translate::kOverrideLanguagePrefsForHrefTranslate,
{{translate::kForceAutoTranslateKey, "false"}});
}
~OverrideLanguagePrefsForUiOnlyHrefTranslateBrowserTest() override = default;
private:
base::test::ScopedFeatureList scoped_feature_list_;
};
// Test the case when the hrefTranslate feature is configured to override the
// language blocklist for showing the translate UI but not for auto
// translation. In this case, hrefTranslate won't auto translate if the
// source language is in the user's language blocklist. The translate UI
// should still be shown though.
IN_PROC_BROWSER_TEST_F(OverrideLanguagePrefsForUiOnlyHrefTranslateBrowserTest,
HrefTranslateOverrideForTranslateUi) {
base::HistogramTester histograms;
GetChromeTranslateClient()
->GetTranslateManager()
->SetIgnoreMissingKeyForTesting(true);
GetChromeTranslateClient()->GetTranslatePrefs()->AddToLanguageList("fr",
true);
ClickFrenchHrefTranslateLinkOnGooglePage();
// The page should not have been automatically translated, since the UI is
// shown alone.
histograms.ExpectUniqueSample(
kTranslateHrefHintStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslateStatus::
kUiShownNotAutoTranslated),
1);
histograms.ExpectUniqueSample(
kTranslateHrefHintPrefsFilterStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslatePrefsFilterStatus::
kLanguageInBlocklist),
1);
}
class OverrideLanguagePrefsForAutoHrefTranslateBrowserTest
: public TranslateManagerBrowserTest {
public:
OverrideLanguagePrefsForAutoHrefTranslateBrowserTest() {
scoped_feature_list_.InitAndEnableFeatureWithParameters(
translate::kOverrideLanguagePrefsForHrefTranslate,
{{translate::kForceAutoTranslateKey, "true"}});
}
~OverrideLanguagePrefsForAutoHrefTranslateBrowserTest() override = default;
private:
base::test::ScopedFeatureList scoped_feature_list_;
};
// Test that hrefTranslate will auto translate if the target language is on the
// user's language blocklist, but the feature is configured to override the
// language blocklist for auto translation.
IN_PROC_BROWSER_TEST_F(OverrideLanguagePrefsForAutoHrefTranslateBrowserTest,
HrefTranslateOverrideForAutoTranslate) {
base::HistogramTester histograms;
GetChromeTranslateClient()
->GetTranslateManager()
->SetIgnoreMissingKeyForTesting(true);
GetChromeTranslateClient()->GetTranslatePrefs()->AddToLanguageList("fr",
true);
ClickFrenchHrefTranslateLinkOnGooglePage();
// See that the page was translated automatically.
WaitUntilPageTranslated();
EXPECT_EQ("ja",
GetChromeTranslateClient()->GetLanguageState().current_language());
// The target shouldn't be added to accept languages.
EXPECT_FALSE(TranslateAcceptLanguagesFactory::GetForBrowserContext(
browser()->profile())
->IsAcceptLanguage("ja"));
histograms.ExpectUniqueSample(
kTranslateHrefHintStatusHistogram,
static_cast<int>(
TranslateBrowserMetrics::HrefTranslateStatus::kAutoTranslated),
1);
histograms.ExpectUniqueSample(
kTranslateHrefHintPrefsFilterStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslatePrefsFilterStatus::
kLanguageInBlocklist),
1);
}
class OverrideSitePrefsForUiOnlyHrefTranslateBrowserTest
: public TranslateManagerBrowserTest {
public:
OverrideSitePrefsForUiOnlyHrefTranslateBrowserTest() {
scoped_feature_list_.InitAndEnableFeatureWithParameters(
translate::kOverrideSitePrefsForHrefTranslate,
{{translate::kForceAutoTranslateKey, "false"}});
}
~OverrideSitePrefsForUiOnlyHrefTranslateBrowserTest() override = default;
private:
base::test::ScopedFeatureList scoped_feature_list_;
};
// Test the case when the hrefTranslate feature is configured to override the
// site blocklist for showing the translate UI but not for auto translation.
// In this case, hrefTranslate won't auto translate if the website is in the
// user's site blocklist. The translate UI should still be shown though.
IN_PROC_BROWSER_TEST_F(OverrideSitePrefsForUiOnlyHrefTranslateBrowserTest,
HrefTranslateOverrideForTranslateUi) {
base::HistogramTester histograms;
GetChromeTranslateClient()
->GetTranslateManager()
->SetIgnoreMissingKeyForTesting(true);
GetChromeTranslateClient()->GetTranslatePrefs()->AddSiteToNeverPromptList(
"www.google.com");
ClickFrenchHrefTranslateLinkOnGooglePage();
// The page should not have been automatically translated, since the UI is
// shown alone.
histograms.ExpectUniqueSample(
kTranslateHrefHintStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslateStatus::
kUiShownNotAutoTranslated),
1);
histograms.ExpectUniqueSample(
kTranslateHrefHintPrefsFilterStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslatePrefsFilterStatus::
kSiteInBlocklist),
1);
}
class OverrideSitePrefsForAutoHrefTranslateBrowserTest
: public TranslateManagerBrowserTest {
public:
OverrideSitePrefsForAutoHrefTranslateBrowserTest() {
scoped_feature_list_.InitAndEnableFeatureWithParameters(
translate::kOverrideSitePrefsForHrefTranslate,
{{translate::kForceAutoTranslateKey, "true"}});
}
~OverrideSitePrefsForAutoHrefTranslateBrowserTest() override = default;
private:
base::test::ScopedFeatureList scoped_feature_list_;
};
// Test that hrefTranslate will auto translate if the website is on the user's
// site blocklist, but the feature is configured to override the site blocklist
// for auto translation.
IN_PROC_BROWSER_TEST_F(OverrideSitePrefsForAutoHrefTranslateBrowserTest,
HrefTranslateOverrideForAutoTranslate) {
base::HistogramTester histograms;
GetChromeTranslateClient()
->GetTranslateManager()
->SetIgnoreMissingKeyForTesting(true);
GetChromeTranslateClient()->GetTranslatePrefs()->AddSiteToNeverPromptList(
"www.google.com");
ClickFrenchHrefTranslateLinkOnGooglePage();
// See that the page was translated automatically.
WaitUntilPageTranslated();
EXPECT_EQ("ja",
GetChromeTranslateClient()->GetLanguageState().current_language());
// The target shouldn't be added to accept languages.
EXPECT_FALSE(TranslateAcceptLanguagesFactory::GetForBrowserContext(
browser()->profile())
->IsAcceptLanguage("ja"));
histograms.ExpectUniqueSample(
kTranslateHrefHintStatusHistogram,
static_cast<int>(
TranslateBrowserMetrics::HrefTranslateStatus::kAutoTranslated),
1);
histograms.ExpectUniqueSample(
kTranslateHrefHintPrefsFilterStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslatePrefsFilterStatus::
kSiteInBlocklist),
1);
}
// Test if there was an error during translation.
// Flaky on Linux: crbug.com/1200687
#if defined(OS_LINUX) || defined(OS_CHROMEOS) || BUILDFLAG(IS_CHROMEOS_LACROS)
#define MAYBE_PageTranslationError DISABLED_PageTranslationError
#else
#define MAYBE_PageTranslationError PageTranslationError
#endif
IN_PROC_BROWSER_TEST_F(TranslateManagerBrowserTest,
MAYBE_PageTranslationError) {
SetTranslateScript(kTestValidScript);
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
// Open a new tab with a page in French and translate to French to force an
// error.
// TODO(crbug.com/1258185): Migrate to better mechanism for testing around
// language detection. All pages will return "fr" as the detected language.
AddTabAtIndex(0, GURL(embedded_test_server()->GetURL("/french_page.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
// Translate the page through TranslateManager.
TranslateManager* manager = chrome_translate_client->GetTranslateManager();
manager->TranslatePage(
chrome_translate_client->GetLanguageState().source_language(), "fr",
true);
WaitUntilPageTranslated();
EXPECT_TRUE(chrome_translate_client->GetLanguageState().translation_error());
EXPECT_EQ(TranslateErrors::TRANSLATION_ERROR, GetPageTranslatedResult());
}
// Test if there was an error during translate library initialization.
IN_PROC_BROWSER_TEST_F(TranslateManagerBrowserTest,
PageTranslationInitializationError) {
SetTranslateScript(kTestScriptInitializationError);
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
// Open a new tab with a page in French.
AddTabAtIndex(0, GURL(embedded_test_server()->GetURL("/french_page.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Translate the page through TranslateManager.
TranslateManager* manager = chrome_translate_client->GetTranslateManager();
manager->TranslatePage(
chrome_translate_client->GetLanguageState().source_language(), "en",
true);
WaitUntilPageTranslated();
EXPECT_TRUE(chrome_translate_client->GetLanguageState().translation_error());
EXPECT_EQ(TranslateErrors::INITIALIZATION_ERROR, GetPageTranslatedResult());
}
// Test the checks translate lib never gets ready and throws timeout.
IN_PROC_BROWSER_TEST_F(TranslateManagerBrowserTest,
PageTranslationTimeoutError) {
SetTranslateScript(kTestScriptAvailableTimeout);
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
// Open a new tab with a page in French.
AddTabAtIndex(0, GURL(embedded_test_server()->GetURL("/french_page.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Translate the page through TranslateManager.
TranslateManager* manager = chrome_translate_client->GetTranslateManager();
manager->TranslatePage(
chrome_translate_client->GetLanguageState().source_language(), "en",
true);
WaitUntilPageTranslated();
EXPECT_TRUE(chrome_translate_client->GetLanguageState().translation_error());
EXPECT_EQ(TranslateErrors::TRANSLATION_TIMEOUT, GetPageTranslatedResult());
}
// Test the checks if both source and target languages mentioned are identical.
IN_PROC_BROWSER_TEST_F(TranslateManagerBrowserTest,
PageTranslationIdenticalLanguagesError) {
SetTranslateScript(kTestScriptIdenticalLanguages);
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
// Open a new tab with a page in French.
AddTabAtIndex(0, GURL(embedded_test_server()->GetURL("/french_page.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Translate the page through TranslateManager.
TranslateManager* manager = chrome_translate_client->GetTranslateManager();
manager->TranslatePage("aa", "en", true);
WaitUntilPageTranslated();
EXPECT_TRUE(chrome_translate_client->GetLanguageState().translation_error());
EXPECT_EQ(TranslateErrors::IDENTICAL_LANGUAGES, GetPageTranslatedResult());
}
// Test if there was an error during translatePage script execution.
IN_PROC_BROWSER_TEST_F(TranslateManagerBrowserTest,
PageTranslationUnexpectedScriptError) {
SetTranslateScript(kTestScriptUnexpectedScriptError);
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
// Open a new tab with a page in French.
AddTabAtIndex(0, GURL(embedded_test_server()->GetURL("/french_page.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Translate the page through TranslateManager.
TranslateManager* manager = chrome_translate_client->GetTranslateManager();
manager->TranslatePage(
chrome_translate_client->GetLanguageState().source_language(), "en",
true);
WaitUntilPageTranslated();
EXPECT_TRUE(chrome_translate_client->GetLanguageState().translation_error());
EXPECT_EQ(TranslateErrors::UNEXPECTED_SCRIPT_ERROR,
GetPageTranslatedResult());
}
// Test if securityOrigin mentioned in url is valid.
IN_PROC_BROWSER_TEST_F(TranslateManagerBrowserTest,
PageTranslationBadOriginError) {
SetTranslateScript(kTestScriptBadOrigin);
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
AddTabAtIndex(0, GURL(embedded_test_server()->GetURL("/french_page.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Translate the page through TranslateManager.
TranslateManager* manager = chrome_translate_client->GetTranslateManager();
manager->TranslatePage(
chrome_translate_client->GetLanguageState().source_language(), "en",
true);
WaitUntilPageTranslated();
EXPECT_TRUE(chrome_translate_client->GetLanguageState().translation_error());
EXPECT_EQ(TranslateErrors::BAD_ORIGIN, GetPageTranslatedResult());
}
// Test if there was an error during script load.
IN_PROC_BROWSER_TEST_F(TranslateManagerBrowserTest,
PageTranslationScriptLoadError) {
SetTranslateScript(kTestScriptLoadError);
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
// Open a new tab with a page in French.
AddTabAtIndex(0, GURL(embedded_test_server()->GetURL("/french_page.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Translate the page through TranslateManager.
TranslateManager* manager = chrome_translate_client->GetTranslateManager();
manager->TranslatePage(
chrome_translate_client->GetLanguageState().source_language(), "en",
true);
WaitUntilPageTranslated();
EXPECT_TRUE(chrome_translate_client->GetLanguageState().translation_error());
EXPECT_EQ(TranslateErrors::SCRIPT_LOAD_ERROR, GetPageTranslatedResult());
}
// Test that session restore restores the translate infobar and other translate
// settings.
IN_PROC_BROWSER_TEST_F(TranslateManagerBrowserTest,
PRE_TranslateSessionRestore) {
SessionStartupPref pref(SessionStartupPref::LAST);
SessionStartupPref::SetStartupPref(browser()->profile(), pref);
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
ResetObserver();
GURL french_url = ui_test_utils::GetTestUrl(
base::FilePath(), base::FilePath(FILE_PATH_LITERAL("french_page.html")));
ASSERT_TRUE(ui_test_utils::NavigateToURL(browser(), french_url));
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
}
IN_PROC_BROWSER_TEST_F(TranslateManagerBrowserTest, TranslateSessionRestore) {
// Make restored tab active to (on some platforms) initiate language
// detection.
browser()->tab_strip_model()->ActivateTabAt(
0, {TabStripModel::GestureType::kOther});
content::WebContents* restored_web_contents =
browser()->tab_strip_model()->GetWebContentsAt(0);
ChromeTranslateClient* restored_translate_client =
ChromeTranslateClient::FromWebContents(restored_web_contents);
if (restored_translate_client->GetLanguageState()
.current_language()
.empty()) {
ResetObserver();
WaitUntilLanguageDetermined();
}
EXPECT_EQ("fr",
restored_translate_client->GetLanguageState().current_language());
}
// Test that hrefTranslate overrides manual translate
IN_PROC_BROWSER_TEST_F(TranslateManagerBrowserTest,
HrefTranslateOverridesManualTranslate) {
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
TranslateManager* manager = chrome_translate_client->GetTranslateManager();
manager->SetIgnoreMissingKeyForTesting(true);
// Set target language manually
manager->SetPredefinedTargetLanguage("ru");
EXPECT_EQ("ru", chrome_translate_client->GetLanguageState()
.GetPredefinedTargetLanguage());
SetTranslateScript(kTestValidScript);
// Load a German page and detect it's language
AddTabAtIndex(0,
GURL(embedded_test_server()->GetURL(
"www.google.com", "/href_translate_test.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
if (chrome_translate_client->GetLanguageState().source_language() != "de")
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Navigate to the French page by way of a link on the original page
ResetObserver();
content::WebContents* web_contents =
browser()->tab_strip_model()->GetWebContentsAt(0);
const std::string click_link_js =
"(function() { document.getElementById('test').click(); })();";
ASSERT_TRUE(content::ExecuteScript(web_contents, click_link_js));
// Detect language on the new page
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Href-translate to ja should override manual translate to ru.
WaitUntilPageTranslated();
EXPECT_EQ("ja",
chrome_translate_client->GetLanguageState().current_language());
}
// Test that iframes not translated.
// TODO(https://crbug.com/1075446) disabled due to flakiness
IN_PROC_BROWSER_TEST_F(TranslateManagerBrowserTest,
DISABLED_TranslateIframeNotTranslated) {
SetTranslateScript(kTestValidScript);
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
base::HistogramTester histograms;
// Open a new tab with a page in French.
AddTabAtIndex(
0, GURL(embedded_test_server()->GetURL("/translate/fr_iframe_test.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
if (chrome_translate_client->GetLanguageState().source_language() != "fr")
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Translate the page through TranslateManager.
TranslateManager* manager = chrome_translate_client->GetTranslateManager();
manager->TranslatePage(
chrome_translate_client->GetLanguageState().source_language(), "en",
true);
WaitUntilPageTranslated();
EXPECT_FALSE(chrome_translate_client->GetLanguageState().translation_error());
EXPECT_EQ(TranslateErrors::NONE, GetPageTranslatedResult());
histograms.ExpectTotalCount("Translate.TranslateFrameCount", 0);
histograms.ExpectTotalCount("Translate.LanguageDetection.ContentLength", 1);
// Only 54 characters of main frame used for language detection.
histograms.ExpectBucketCount("Translate.LanguageDetection.ContentLength", 54,
1);
}
class TranslateManagerWithSubFrameSupportBrowserTest
: public TranslateManagerBrowserTest {
public:
TranslateManagerWithSubFrameSupportBrowserTest(
const TranslateManagerWithSubFrameSupportBrowserTest&) = delete;
TranslateManagerWithSubFrameSupportBrowserTest& operator=(
const TranslateManagerWithSubFrameSupportBrowserTest&) = delete;
protected:
TranslateManagerWithSubFrameSupportBrowserTest() {
scoped_feature_list_.InitAndEnableFeature(translate::kTranslateSubFrames);
}
private:
base::test::ScopedFeatureList scoped_feature_list_;
};
// TODO(crbug/1258234): All subframe translation tests are disabled now that
// CLD3 is no longer used. Re-enable if subframe translation is migrated to the
// new detection mechanism.
// Tests that the CLD (Compact Language Detection) works properly.
IN_PROC_BROWSER_TEST_F(TranslateManagerWithSubFrameSupportBrowserTest,
DISABLED_PageLanguageDetection) {
// Open a new tab with a page in English.
AddTabAtIndex(0, GURL(embedded_test_server()->GetURL("/english_page.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("en",
chrome_translate_client->GetLanguageState().source_language());
ResetObserver();
// Now navigate to a page in French.
ASSERT_TRUE(ui_test_utils::NavigateToURL(
browser(), GURL(embedded_test_server()->GetURL("/french_page.html"))));
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
}
// Tests that the language detection / HTML attribute override works correctly.
// For languages in the always-translate list, the detected language should
// override the HTML attribute. For all other languages, the HTML attribute
// should be used.
IN_PROC_BROWSER_TEST_F(TranslateManagerWithSubFrameSupportBrowserTest,
DISABLED_PageLanguageDetectionConflict) {
// Open a new tab with a page in French with incorrect HTML language
// attribute specified. The language attribute should be overridden by the
// language detection.
AddTabAtIndex(
0,
GURL(embedded_test_server()->GetURL("/french_page_lang_conflict.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("de",
chrome_translate_client->GetLanguageState().source_language());
// Open a new tab with a page in Korean with incorrect HTML language
// attribute specified. The language attribute should not be overridden by the
// language detection.
AddTabAtIndex(
0,
GURL(embedded_test_server()->GetURL("/korean_page_lang_conflict.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("en",
chrome_translate_client->GetLanguageState().source_language());
}
// Test that the translation was successful.
IN_PROC_BROWSER_TEST_F(TranslateManagerWithSubFrameSupportBrowserTest,
DISABLED_PageTranslationSuccess) {
base::HistogramTester histograms;
SetTranslateScript(kTestValidScript);
// Open a new tab with a page in French.
AddTabAtIndex(0, GURL(embedded_test_server()->GetURL("/french_page.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Translate the page through TranslateManager.
TranslateManager* manager = chrome_translate_client->GetTranslateManager();
manager->TranslatePage(
chrome_translate_client->GetLanguageState().source_language(), "en",
true);
WaitUntilPageTranslated();
EXPECT_FALSE(chrome_translate_client->GetLanguageState().translation_error());
EXPECT_EQ(TranslateErrors::NONE, GetPageTranslatedResult());
histograms.ExpectTotalCount("Translate.LanguageDetection.ContentLength", 1);
histograms.ExpectBucketCount("Translate.LanguageDetection.ContentLength", 148,
1);
histograms.ExpectTotalCount("Translate.LanguageDeterminedDuration", 1);
}
// Test that hrefTranslate is propagating properly
IN_PROC_BROWSER_TEST_F(TranslateManagerWithSubFrameSupportBrowserTest,
DISABLED_HrefTranslateSuccess) {
base::HistogramTester histograms;
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
chrome_translate_client->GetTranslateManager()->SetIgnoreMissingKeyForTesting(
true);
SetTranslateScript(kTestValidScript);
// Load a German page and detect it's language
AddTabAtIndex(0,
GURL(embedded_test_server()->GetURL(
"www.google.com", "/href_translate_test.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("de",
chrome_translate_client->GetLanguageState().source_language());
// Navigate to the French page by way of a link on the original page
ResetObserver();
content::WebContents* web_contents =
browser()->tab_strip_model()->GetWebContentsAt(0);
const std::string click_link_js =
"(function() { document.getElementById('test').click(); })();";
ASSERT_TRUE(content::ExecuteScript(web_contents, click_link_js));
// Detect language on the new page
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// See that the page was translated automatically
WaitUntilPageTranslated();
EXPECT_EQ("ja",
chrome_translate_client->GetLanguageState().current_language());
// The target shouldn't be added to accept languages.
EXPECT_FALSE(TranslateAcceptLanguagesFactory::GetForBrowserContext(
browser()->profile())
->IsAcceptLanguage("ja"));
histograms.ExpectUniqueSample(
kTranslateHrefHintStatusHistogram,
static_cast<int>(
TranslateBrowserMetrics::HrefTranslateStatus::kAutoTranslated),
1);
histograms.ExpectUniqueSample(
kTranslateHrefHintPrefsFilterStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslatePrefsFilterStatus::
kNotInBlocklists),
1);
}
// Test that hrefTranslate doesn't auto-translate if the originator of the
// navigation isn't a Google origin.
IN_PROC_BROWSER_TEST_F(TranslateManagerWithSubFrameSupportBrowserTest,
DISABLED_HrefTranslateNotFromGoogle) {
base::HistogramTester histograms;
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
chrome_translate_client->GetTranslateManager()->SetIgnoreMissingKeyForTesting(
true);
SetTranslateScript(kTestValidScript);
// Load a German page and detect it's language
AddTabAtIndex(
0, GURL(embedded_test_server()->GetURL("/href_translate_test.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Navigate to the French page by way of a link on the original page
ResetObserver();
content::WebContents* web_contents =
browser()->tab_strip_model()->GetWebContentsAt(0);
const std::string click_link_js =
"(function() { document.getElementById('test').click(); })();";
ASSERT_TRUE(content::ExecuteScript(web_contents, click_link_js));
// Detect language on the new page
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
EXPECT_EQ("", chrome_translate_client->GetLanguageState().AutoTranslateTo());
histograms.ExpectTotalCount(kTranslateHrefHintStatusHistogram, 0);
histograms.ExpectTotalCount(kTranslateHrefHintPrefsFilterStatusHistogram, 0);
}
// Test that hrefTranslate with an unsupported language doesn't trigger.
IN_PROC_BROWSER_TEST_F(TranslateManagerWithSubFrameSupportBrowserTest,
DISABLED_HrefTranslateUnsupported) {
base::HistogramTester histograms;
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
chrome_translate_client->GetTranslateManager()->SetIgnoreMissingKeyForTesting(
true);
SetTranslateScript(kTestValidScript);
// Load a German page and detect it's language
AddTabAtIndex(0,
GURL(embedded_test_server()->GetURL(
"www.google.com", "/href_translate_test.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("de",
chrome_translate_client->GetLanguageState().source_language());
// Navigate to the French page by way of a link on the original page. This
// link has the hrefTranslate attribute set to "unsupported", so it shouldn't
// trigger translate.
ResetObserver();
content::WebContents* web_contents =
browser()->tab_strip_model()->GetWebContentsAt(0);
const std::string click_link_js =
"(function() { "
"document.getElementById('test-unsupported-language').click(); })();";
ASSERT_TRUE(content::ExecuteScript(web_contents, click_link_js));
// Detect language on the new page
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
EXPECT_EQ("", chrome_translate_client->GetLanguageState().AutoTranslateTo());
histograms.ExpectUniqueSample(
kTranslateHrefHintStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslateStatus::
kNoUiShownNotAutoTranslated),
1);
histograms.ExpectUniqueSample(
kTranslateHrefHintPrefsFilterStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslatePrefsFilterStatus::
kNotInBlocklists),
1);
}
// Test an href translate link to a conflicted page
IN_PROC_BROWSER_TEST_F(TranslateManagerWithSubFrameSupportBrowserTest,
DISABLED_HrefTranslateConflict) {
base::HistogramTester histograms;
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
chrome_translate_client->GetTranslateManager()->SetIgnoreMissingKeyForTesting(
true);
SetTranslateScript(kTestValidScript);
// Load a German page and detect it's language
AddTabAtIndex(0,
GURL(embedded_test_server()->GetURL(
"www.google.com", "/href_translate_test.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Navigate to the French page that thinks its in English by way of a link on
// the original page
ResetObserver();
content::WebContents* web_contents =
browser()->tab_strip_model()->GetWebContentsAt(0);
const std::string click_link_js =
"(function() { document.getElementById('test-conflict').click(); })();";
ASSERT_TRUE(content::ExecuteScript(web_contents, click_link_js));
// Detect language on the new page
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// See that the page was translated automatically
WaitUntilPageTranslated();
EXPECT_EQ("en",
chrome_translate_client->GetLanguageState().current_language());
histograms.ExpectUniqueSample(
kTranslateHrefHintStatusHistogram,
static_cast<int>(
TranslateBrowserMetrics::HrefTranslateStatus::kAutoTranslated),
1);
histograms.ExpectUniqueSample(
kTranslateHrefHintPrefsFilterStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslatePrefsFilterStatus::
kNotInBlocklists),
1);
}
// Test an href translate link without an href lang for the landing page
IN_PROC_BROWSER_TEST_F(TranslateManagerWithSubFrameSupportBrowserTest,
DISABLED_HrefTranslateNoHrefLang) {
base::HistogramTester histograms;
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
chrome_translate_client->GetTranslateManager()->SetIgnoreMissingKeyForTesting(
true);
SetTranslateScript(kTestValidScript);
// Load a German page and detect it's language
AddTabAtIndex(0,
GURL(embedded_test_server()->GetURL(
"www.google.com", "/href_translate_test.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("de",
chrome_translate_client->GetLanguageState().source_language());
// Use a link with no hrefLang to navigate to a French page
ResetObserver();
content::WebContents* web_contents =
browser()->tab_strip_model()->GetWebContentsAt(0);
const std::string click_link_js =
"(function() { document.getElementById('test-no-hrefLang').click(); "
"})();";
ASSERT_TRUE(content::ExecuteScript(web_contents, click_link_js));
// Detect language on the new page
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// See that the page was translated automatically
WaitUntilPageTranslated();
EXPECT_EQ("en",
chrome_translate_client->GetLanguageState().current_language());
histograms.ExpectUniqueSample(
kTranslateHrefHintStatusHistogram,
static_cast<int>(
TranslateBrowserMetrics::HrefTranslateStatus::kAutoTranslated),
1);
histograms.ExpectUniqueSample(
kTranslateHrefHintPrefsFilterStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslatePrefsFilterStatus::
kNotInBlocklists),
1);
}
// Test an href translate link that's overridden by the auto translate settings
IN_PROC_BROWSER_TEST_F(TranslateManagerWithSubFrameSupportBrowserTest,
DISABLED_HrefTranslateOverridenByAutoTranslate) {
base::HistogramTester histograms;
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
chrome_translate_client->GetTranslateManager()->SetIgnoreMissingKeyForTesting(
true);
SetTranslateScript(kTestValidScript);
// Before browsing: set auto translate from French to Chinese.
chrome_translate_client->GetTranslatePrefs()
->AddLanguagePairToAlwaysTranslateList("fr", "zh-CN");
// Load a German page and detect it's language
AddTabAtIndex(0,
GURL(embedded_test_server()->GetURL(
"www.google.com", "/href_translate_test.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("de",
chrome_translate_client->GetLanguageState().source_language());
// Navigate to the French page by way of a link on the original page
ResetObserver();
content::WebContents* web_contents =
browser()->tab_strip_model()->GetWebContentsAt(0);
const std::string click_link_js =
"(function() { document.getElementById('test').click(); })();";
ASSERT_TRUE(content::ExecuteScript(web_contents, click_link_js));
// Detect language on the new page
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// See that the page was translated automatically
WaitUntilPageTranslated();
EXPECT_EQ("zh-CN",
chrome_translate_client->GetLanguageState().current_language());
histograms.ExpectUniqueSample(
kTranslateHrefHintStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslateStatus::
kAutoTranslatedDifferentTargetLanguage),
1);
histograms.ExpectUniqueSample(
kTranslateHrefHintPrefsFilterStatusHistogram,
static_cast<int>(TranslateBrowserMetrics::HrefTranslatePrefsFilterStatus::
kNotInBlocklists),
1);
}
// Test if there was an error during translation.
IN_PROC_BROWSER_TEST_F(TranslateManagerWithSubFrameSupportBrowserTest,
DISABLED_PageTranslationError) {
SetTranslateScript(kTestValidScript);
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
AddTabAtIndex(0, GURL(embedded_test_server()->GetURL("/french_page.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
TranslateManager* manager = chrome_translate_client->GetTranslateManager();
manager->TranslatePage(
chrome_translate_client->GetLanguageState().source_language(), "en",
true);
WaitUntilPageTranslated();
EXPECT_TRUE(chrome_translate_client->GetLanguageState().translation_error());
EXPECT_EQ(TranslateErrors::TRANSLATION_ERROR, GetPageTranslatedResult());
}
// Test if there was an error during translate library initialization.
IN_PROC_BROWSER_TEST_F(TranslateManagerWithSubFrameSupportBrowserTest,
DISABLED_PageTranslationInitializationError) {
SetTranslateScript(kTestScriptInitializationError);
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
// Open a new tab with a page in French.
AddTabAtIndex(0, GURL(embedded_test_server()->GetURL("/french_page.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Translate the page through TranslateManager.
TranslateManager* manager = chrome_translate_client->GetTranslateManager();
manager->TranslatePage(
chrome_translate_client->GetLanguageState().source_language(), "en",
true);
WaitUntilPageTranslated();
EXPECT_TRUE(chrome_translate_client->GetLanguageState().translation_error());
EXPECT_EQ(TranslateErrors::INITIALIZATION_ERROR, GetPageTranslatedResult());
}
// Test the checks translate lib never gets ready and throws timeout.
IN_PROC_BROWSER_TEST_F(TranslateManagerWithSubFrameSupportBrowserTest,
DISABLED_PageTranslationAvailableTimeoutError) {
SetTranslateScript(kTestScriptAvailableTimeout);
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
// Open a new tab with a page in French.
AddTabAtIndex(0, GURL(embedded_test_server()->GetURL("/french_page.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Translate the page through TranslateManager.
TranslateManager* manager = chrome_translate_client->GetTranslateManager();
manager->TranslatePage(
chrome_translate_client->GetLanguageState().source_language(), "en",
true);
WaitUntilPageTranslated();
EXPECT_TRUE(chrome_translate_client->GetLanguageState().translation_error());
EXPECT_EQ(TranslateErrors::TRANSLATION_TIMEOUT, GetPageTranslatedResult());
}
// Test the checks translate operation status never resolves.
// TODO(1064974): consolidate the common test logic here that is used between
// several error type tests from different script inputs.
IN_PROC_BROWSER_TEST_F(TranslateManagerWithSubFrameSupportBrowserTest,
DISABLED_PageTranslationTranslateTimeoutError) {
SetTranslateScript(kTestScriptTranslateTimeout);
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
// Open a new tab with a page in French.
AddTabAtIndex(0, GURL(embedded_test_server()->GetURL("/french_page.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Translate the page through TranslateManager.
TranslateManager* manager = chrome_translate_client->GetTranslateManager();
manager->TranslatePage(
chrome_translate_client->GetLanguageState().source_language(), "en",
true);
WaitUntilPageTranslated();
EXPECT_TRUE(chrome_translate_client->GetLanguageState().translation_error());
EXPECT_EQ(TranslateErrors::TRANSLATION_TIMEOUT, GetPageTranslatedResult());
}
// Test the checks if both source and target languages mentioned are identical.
IN_PROC_BROWSER_TEST_F(TranslateManagerWithSubFrameSupportBrowserTest,
DISABLED_PageTranslationIdenticalLanguagesError) {
SetTranslateScript(kTestScriptIdenticalLanguages);
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
// Open a new tab with a page in French.
AddTabAtIndex(0, GURL(embedded_test_server()->GetURL("/french_page.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Translate the page through TranslateManager.
TranslateManager* manager = chrome_translate_client->GetTranslateManager();
manager->TranslatePage("aa", "en", true);
WaitUntilPageTranslated();
EXPECT_TRUE(chrome_translate_client->GetLanguageState().translation_error());
EXPECT_EQ(TranslateErrors::IDENTICAL_LANGUAGES, GetPageTranslatedResult());
}
// Test if there was an error during translatePage script execution.
IN_PROC_BROWSER_TEST_F(TranslateManagerWithSubFrameSupportBrowserTest,
DISABLED_PageTranslationUnexpectedScriptError) {
SetTranslateScript(kTestScriptUnexpectedScriptError);
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
// Open a new tab with a page in French.
AddTabAtIndex(0, GURL(embedded_test_server()->GetURL("/french_page.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Translate the page through TranslateManager.
TranslateManager* manager = chrome_translate_client->GetTranslateManager();
manager->TranslatePage(
chrome_translate_client->GetLanguageState().source_language(), "en",
true);
WaitUntilPageTranslated();
EXPECT_TRUE(chrome_translate_client->GetLanguageState().translation_error());
EXPECT_EQ(TranslateErrors::UNEXPECTED_SCRIPT_ERROR,
GetPageTranslatedResult());
}
// Test if securityOrigin mentioned in url is valid.
IN_PROC_BROWSER_TEST_F(TranslateManagerWithSubFrameSupportBrowserTest,
DISABLED_PageTranslationBadOriginError) {
SetTranslateScript(kTestScriptBadOrigin);
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
// Open a new tab with a page in French.
AddTabAtIndex(0, GURL(embedded_test_server()->GetURL("/french_page.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Translate the page through TranslateManager.
TranslateManager* manager = chrome_translate_client->GetTranslateManager();
manager->TranslatePage(
chrome_translate_client->GetLanguageState().source_language(), "en",
true);
WaitUntilPageTranslated();
EXPECT_TRUE(chrome_translate_client->GetLanguageState().translation_error());
EXPECT_EQ(TranslateErrors::BAD_ORIGIN, GetPageTranslatedResult());
}
// Test if there was an error during script load.
IN_PROC_BROWSER_TEST_F(TranslateManagerWithSubFrameSupportBrowserTest,
DISABLED_PageTranslationScriptLoadError) {
SetTranslateScript(kTestScriptLoadError);
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
// Open a new tab with a page in French.
AddTabAtIndex(0, GURL(embedded_test_server()->GetURL("/french_page.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Translate the page through TranslateManager.
TranslateManager* manager = chrome_translate_client->GetTranslateManager();
manager->TranslatePage(
chrome_translate_client->GetLanguageState().source_language(), "en",
true);
WaitUntilPageTranslated();
EXPECT_TRUE(chrome_translate_client->GetLanguageState().translation_error());
EXPECT_EQ(TranslateErrors::SCRIPT_LOAD_ERROR, GetPageTranslatedResult());
}
// Test that session restore restores the translate infobar and other translate
// settings.
IN_PROC_BROWSER_TEST_F(TranslateManagerWithSubFrameSupportBrowserTest,
DISABLED_PRE_TranslateSessionRestore) {
SessionStartupPref pref(SessionStartupPref::LAST);
SessionStartupPref::SetStartupPref(browser()->profile(), pref);
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
ResetObserver();
GURL french_url = ui_test_utils::GetTestUrl(
base::FilePath(), base::FilePath(FILE_PATH_LITERAL("french_page.html")));
ASSERT_TRUE(ui_test_utils::NavigateToURL(browser(), french_url));
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
}
IN_PROC_BROWSER_TEST_F(TranslateManagerWithSubFrameSupportBrowserTest,
DISABLED_TranslateSessionRestore) {
// Make restored tab active to (on some platforms) initiate language
// detection.
browser()->tab_strip_model()->ActivateTabAt(
0, {TabStripModel::GestureType::kOther});
content::WebContents* restored_web_contents =
browser()->tab_strip_model()->GetWebContentsAt(0);
ChromeTranslateClient* restored_translate_client =
ChromeTranslateClient::FromWebContents(restored_web_contents);
if (restored_translate_client->GetLanguageState()
.current_language()
.empty()) {
ResetObserver();
WaitUntilLanguageDetermined();
}
EXPECT_EQ("fr",
restored_translate_client->GetLanguageState().current_language());
}
// Test that hrefTranslate overrides manual translate
IN_PROC_BROWSER_TEST_F(TranslateManagerWithSubFrameSupportBrowserTest,
DISABLED_HrefTranslateOverridesManualTranslate) {
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
TranslateManager* manager = chrome_translate_client->GetTranslateManager();
manager->SetIgnoreMissingKeyForTesting(true);
// Set target language manually
manager->SetPredefinedTargetLanguage("ru");
EXPECT_EQ("ru", chrome_translate_client->GetLanguageState()
.GetPredefinedTargetLanguage());
SetTranslateScript(kTestValidScript);
// Load a German page and detect it's language
AddTabAtIndex(0,
GURL(embedded_test_server()->GetURL(
"www.google.com", "/href_translate_test.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("de",
chrome_translate_client->GetLanguageState().source_language());
// Navigate to the French page by way of a link on the original page
ResetObserver();
content::WebContents* web_contents =
browser()->tab_strip_model()->GetWebContentsAt(0);
const std::string click_link_js =
"(function() { document.getElementById('test').click(); })();";
ASSERT_TRUE(content::ExecuteScript(web_contents, click_link_js));
// Detect language on the new page
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Href-translate to ja should override manual translate to ru.
WaitUntilPageTranslated();
EXPECT_EQ("ja",
chrome_translate_client->GetLanguageState().current_language());
}
// Test that iframes can be translated.
IN_PROC_BROWSER_TEST_F(TranslateManagerWithSubFrameSupportBrowserTest,
DISABLED_TranslateIframe) {
base::HistogramTester histograms;
SetTranslateScript(kTestValidScript);
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
// Open a new tab with a page in French.
AddTabAtIndex(
0, GURL(embedded_test_server()->GetURL("/translate/fr_iframe_test.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Translate the page through TranslateManager.
TranslateManager* manager = chrome_translate_client->GetTranslateManager();
manager->TranslatePage(
chrome_translate_client->GetLanguageState().source_language(), "en",
true);
WaitUntilPageTranslated();
EXPECT_FALSE(chrome_translate_client->GetLanguageState().translation_error());
EXPECT_EQ(TranslateErrors::NONE, GetPageTranslatedResult());
// 3 frames are translated.
histograms.ExpectBucketCount("Translate.TranslateFrameCount", 3, 1);
histograms.ExpectBucketCount("Translate.TranslateSubframe.SuccessPercentage",
100, 1);
histograms.ExpectTotalCount("Translate.TranslateSubframe.ErrorType", 0);
histograms.ExpectTotalCount("Translate.LanguageDetection.ContentLength", 1);
// More than the 54 characters of main frame are used for language detection.
histograms.ExpectBucketCount("Translate.LanguageDetection.ContentLength", 550,
1);
}
class TranslateManagerWithMainFrameLanguageDetectionBrowserTest
: public TranslateManagerBrowserTest {
public:
TranslateManagerWithMainFrameLanguageDetectionBrowserTest(
const TranslateManagerWithMainFrameLanguageDetectionBrowserTest&) =
delete;
TranslateManagerWithMainFrameLanguageDetectionBrowserTest& operator=(
const TranslateManagerWithMainFrameLanguageDetectionBrowserTest&) =
delete;
protected:
TranslateManagerWithMainFrameLanguageDetectionBrowserTest() {
// Enable sub frame translation but with sub frame language
// detection turned off.
scoped_feature_list_.InitAndEnableFeatureWithParameters(
translate::kTranslateSubFrames,
{std::make_pair("detect_language_in_sub_frames", "false")});
}
private:
base::test::ScopedFeatureList scoped_feature_list_;
};
// Test that iframes can be translated.
//
// TODO(https://crbug.com/1106620): Disabled due to flake and crashes.
IN_PROC_BROWSER_TEST_F(
TranslateManagerWithMainFrameLanguageDetectionBrowserTest,
DISABLED_TranslateIframe) {
base::HistogramTester histograms;
SetTranslateScript(kTestValidScript);
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
// Open a new tab with a page in French.
AddTabAtIndex(
0, GURL(embedded_test_server()->GetURL("/translate/fr_iframe_test.html")),
ui::PAGE_TRANSITION_TYPED);
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
if (chrome_translate_client->GetLanguageState().source_language().empty())
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
// Translate the page through TranslateManager.
TranslateManager* manager = chrome_translate_client->GetTranslateManager();
manager->TranslatePage(
chrome_translate_client->GetLanguageState().source_language(), "en",
true);
WaitUntilPageTranslated();
EXPECT_FALSE(chrome_translate_client->GetLanguageState().translation_error());
EXPECT_EQ(TranslateErrors::NONE, GetPageTranslatedResult());
// 3 frames are translated.
histograms.ExpectBucketCount("Translate.TranslateFrameCount", 3, 1);
histograms.ExpectBucketCount("Translate.TranslateSubframe.SuccessPercentage",
100, 1);
histograms.ExpectTotalCount("Translate.TranslateSubframe.ErrorType", 0);
histograms.ExpectTotalCount("Translate.LanguageDetection.ContentLength", 1);
// But only the 54 characters of main frame are used for language detection.
histograms.ExpectBucketCount("Translate.LanguageDetection.ContentLength", 54,
1);
}
class TranslateManagerPrerenderBrowserTest
: public TranslateManagerBrowserTest,
public ::testing::WithParamInterface<bool> {
public:
TranslateManagerPrerenderBrowserTest()
: prerender_helper_(base::BindRepeating(
&TranslateManagerPrerenderBrowserTest::web_contents,
base::Unretained(this))) {
if (GetParam() /* enable kTranslateSubFrames */) {
scoped_feature_list_.InitAndEnableFeature(translate::kTranslateSubFrames);
} else {
scoped_feature_list_.InitAndDisableFeature(
translate::kTranslateSubFrames);
}
}
content::WebContents* web_contents() {
return browser()->tab_strip_model()->GetActiveWebContents();
}
protected:
content::test::PrerenderTestHelper prerender_helper_;
private:
base::test::ScopedFeatureList scoped_feature_list_;
};
IN_PROC_BROWSER_TEST_P(TranslateManagerPrerenderBrowserTest,
SkipPrerenderPage) {
SetTranslateScript(kTestValidScript);
ChromeTranslateClient* chrome_translate_client = GetChromeTranslateClient();
base::HistogramTester histograms;
// Load a French page.
prerender_helper_.NavigatePrimaryPage(
embedded_test_server()->GetURL("/french_page.html"));
ResetObserver();
// Prerender a German page.
prerender_helper_.AddPrerender(
embedded_test_server()->GetURL("/german_page.html"));
// The prerendering page should not affect the primary page.
chrome_translate_client = GetChromeTranslateClient();
if (chrome_translate_client->GetLanguageState().source_language() != "fr")
WaitUntilLanguageDetermined();
EXPECT_EQ("fr",
chrome_translate_client->GetLanguageState().source_language());
TranslateManager* manager = chrome_translate_client->GetTranslateManager();
manager->TranslatePage(
chrome_translate_client->GetLanguageState().source_language(), "en",
true);
WaitUntilPageTranslated();
EXPECT_FALSE(chrome_translate_client->GetLanguageState().translation_error());
EXPECT_EQ(TranslateErrors::NONE, GetPageTranslatedResult());
histograms.ExpectTotalCount("Translate.LanguageDetection.ContentLength", 1);
histograms.ExpectTotalCount("Translate.LanguageDeterminedDuration", 1);
// Activate the prerendered page.
prerender_helper_.NavigatePrimaryPage(
embedded_test_server()->GetURL("/german_page.html"));
// Check that the translation service still works well.
ResetObserver();
chrome_translate_client = GetChromeTranslateClient();
// TODO(crbug.com/1258185): Migrate to better mechanism for testing around
// language detection. Subframe translation is disabled and not under
// experimentation otherwise, language detection return "fr".
std::string expected_lang = GetParam() ? "de" : "fr";
if (chrome_translate_client->GetLanguageState().source_language() !=
expected_lang)
WaitUntilLanguageDetermined();
EXPECT_EQ(expected_lang,
chrome_translate_client->GetLanguageState().source_language());
manager->TranslatePage(
chrome_translate_client->GetLanguageState().source_language(), "en",
true);
WaitUntilPageTranslated();
EXPECT_FALSE(chrome_translate_client->GetLanguageState().translation_error());
EXPECT_EQ(TranslateErrors::NONE, GetPageTranslatedResult());
histograms.ExpectTotalCount("Translate.LanguageDetection.ContentLength", 2);
// Check noisy data was filtered out.
histograms.ExpectTotalCount("Translate.LanguageDeterminedDuration", 1);
}
INSTANTIATE_TEST_SUITE_P(All,
TranslateManagerPrerenderBrowserTest,
::testing::Bool());
} // namespace
} // namespace translate
| 39.009071 | 80 | 0.726718 | [
"vector",
"model"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.