hexsha
stringlengths
40
40
size
int64
19
11.4M
ext
stringclasses
13 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
270
max_stars_repo_name
stringlengths
5
110
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
9
max_stars_count
float64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
270
max_issues_repo_name
stringlengths
5
116
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
9
max_issues_count
float64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
270
max_forks_repo_name
stringlengths
5
116
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
9
max_forks_count
float64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
19
11.4M
avg_line_length
float64
1.93
229k
max_line_length
int64
12
688k
alphanum_fraction
float64
0.07
0.99
matches
listlengths
1
10
bad027f37fac9a796a48c8da594f400b2eb2b70c
11,561
cc
C++
src/unittest/jsproc.cc
RebirthDB/rethinkdb
54a76551512bebfe1ab1071d9b19dec2cd9c40e6
[ "Apache-2.0" ]
41
2019-02-09T09:31:06.000Z
2022-02-23T01:08:55.000Z
src/unittest/jsproc.cc
RethonkDB/rethonkdb
8c9c1ddc71b1b891fdb8aad7ca5891fc036b80ee
[ "Apache-2.0" ]
4
2019-03-24T00:31:37.000Z
2022-03-14T13:23:48.000Z
src/unittest/jsproc.cc
RethonkDB/rethonkdb
8c9c1ddc71b1b891fdb8aad7ca5891fc036b80ee
[ "Apache-2.0" ]
2
2019-02-12T12:43:33.000Z
2021-09-30T16:05:33.000Z
// Copyright 2018-present RebirthDB // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use // this file except in compliance with the License. You may obtain a copy of the // License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. // // This file incorporates work covered by the following copyright: // // Copyright 2010-present, The Linux Foundation, portions copyright Google and // others and used with permission or subject to their respective license // agreements. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "containers/archive/archive.hpp" #include "extproc/extproc_pool.hpp" #include "extproc/extproc_spawner.hpp" #include "extproc/js_runner.hpp" #include "rpc/serialize_macros.hpp" #include "unittest/extproc_test.hpp" #include "unittest/gtest.hpp" #include "rdb_protocol/env.hpp" SPAWNER_TEST(JSProc, DISABLED_EvalTimeout) { ql::configured_limits_t limits; js_runner_t js_runner(limits); const std::string loop_source = "for (var x = 0; x < 4e10; x++) {}"; js_runner_t::req_config_t config; config.timeout_ms = 10; ASSERT_NO_THROW({ js_result_t result = js_runner.eval(loop_source, config); std::string value = boost::get<std::string>(result); ASSERT_EQ(strprintf("JavaScript query `%s` timed out after 0.010 seconds.", loop_source.c_str()), value); }); } SPAWNER_TEST(JSProc, DISABLED_CallTimeout) { ql::configured_limits_t limits; js_runner_t js_runner(limits); const std::string loop_source = "(function () { for (var x = 0; x < 4e10; x++) {}})"; js_runner_t::req_config_t config; config.timeout_ms = 10000; js_result_t result = js_runner.eval(loop_source, config); js_id_t *any_id = boost::get<js_id_t>(&result); ASSERT_TRUE(any_id != nullptr); config.timeout_ms = 10; ASSERT_NO_THROW({ result = js_runner.call(loop_source, std::vector<ql::datum_t>(), config); std::string value = boost::get<std::string>(result); ASSERT_EQ(strprintf("JavaScript query `%s` timed out after 0.010 seconds.", loop_source.c_str()), value); }); } void run_datum_test(const std::string &source_code, ql::datum_t *res_out) { ql::configured_limits_t limits; js_runner_t js_runner(limits); js_runner_t::req_config_t config; config.timeout_ms = 10000; ASSERT_NO_THROW({ js_result_t result = js_runner.eval(source_code, config); ql::datum_t *res_datum = boost::get<ql::datum_t>(&result); ASSERT_TRUE(res_datum != nullptr); *res_out = *res_datum; }); } SPAWNER_TEST(JSProc, LiteralNumber) { ql::datum_t result; run_datum_test("9467923", &result); ASSERT_TRUE(result.has()); ASSERT_TRUE(result.get_type() == ql::datum_t::R_NUM); ASSERT_EQ(result.as_int(), 9467923); } SPAWNER_TEST(JSProc, LiteralString) { ql::datum_t result; run_datum_test("\"string data\"", &result); ASSERT_TRUE(result.has()); ASSERT_TRUE(result.get_type() == ql::datum_t::R_STR); ASSERT_EQ(result.as_str(), "string data"); } SPAWNER_TEST(JSProc, EvalAndCall) { ql::configured_limits_t limits; js_runner_t js_runner(limits); const std::string source_code = "(function () { return 10337; })"; js_runner_t::req_config_t config; config.timeout_ms = 10000; js_result_t result = js_runner.eval(source_code, config); // Get the id of the function out js_id_t *js_id = boost::get<js_id_t>(&result); ASSERT_TRUE(js_id != nullptr); // Call the function ASSERT_NO_THROW({ result = js_runner.call(source_code, std::vector<ql::datum_t>(), config); // Check results ql::datum_t *res_datum = boost::get<ql::datum_t>(&result); ASSERT_TRUE(res_datum != nullptr); ASSERT_TRUE(res_datum->has()); ASSERT_TRUE(res_datum->get_type() == ql::datum_t::R_NUM); ASSERT_EQ(res_datum->as_int(), 10337); }); } SPAWNER_TEST(JSProc, BrokenFunction) { ql::configured_limits_t limits; js_runner_t js_runner(limits); const std::string source_code = "(function () { return 4 / 0; })"; js_runner_t::req_config_t config; config.timeout_ms = 10000; js_result_t result = js_runner.eval(source_code, config); // Get the id of the function out js_id_t *js_id = boost::get<js_id_t>(&result); ASSERT_TRUE(js_id != nullptr); // Call the function ASSERT_NO_THROW({ result = js_runner.call(source_code, std::vector<ql::datum_t>(), config); // Get the error message std::string *error = boost::get<std::string>(&result); ASSERT_TRUE(error != nullptr); }); } SPAWNER_TEST(JSProc, InvalidFunction) { ql::configured_limits_t limits; js_runner_t js_runner(limits); const std::string source_code = "(function() {)"; js_runner_t::req_config_t config; config.timeout_ms = 10000; ASSERT_NO_THROW({ js_result_t result = js_runner.eval(source_code, config); // Get the error message std::string *error = boost::get<std::string>(&result); ASSERT_TRUE(error != nullptr); }); } SPAWNER_TEST(JSProc, InfiniteRecursionFunction) { ql::configured_limits_t limits; js_runner_t js_runner(limits); const std::string source_code = "(function f(x) { x = x + f(x); return x; })"; js_runner_t::req_config_t config; config.timeout_ms = 60000; js_result_t result = js_runner.eval(source_code, config); // Get the id of the function out js_id_t *js_id = boost::get<js_id_t>(&result); ASSERT_TRUE(js_id != nullptr); // Call the function std::vector<ql::datum_t> args; args.push_back(ql::datum_t(1.0)); ASSERT_NO_THROW({ result = js_runner.call(source_code, args, config); std::string *err_msg = boost::get<std::string>(&result); ASSERT_EQ("RangeError: callstack limit", *err_msg); }); } void run_overalloc_function_test() { ql::configured_limits_t limits; js_runner_t js_runner(limits); const std::string source_code = "(function f() {" " var res = \"\";" " while (true) {" " res = res + \"blah\";" " }" " return res;" "})"; js_runner_t::req_config_t config; config.timeout_ms = 60000; js_result_t result = js_runner.eval(source_code, config); // Get the id of the function out js_id_t *js_id = boost::get<js_id_t>(&result); ASSERT_TRUE(js_id != nullptr); // Call the function ASSERT_THROW(js_runner.call(source_code, std::vector<ql::datum_t>(), config), extproc_worker_exc_t); } // Disabling this test because it may cause complications depending on the user's system // ^^^ WHAT COMPLICATIONS??? TEST(JSProc, DISABLED_OverallocFunction) { extproc_spawner_t extproc_spawner; unittest::run_in_thread_pool(run_overalloc_function_test); } static void passthrough_test_internal(const ql::datum_t &arg) { guarantee(arg.has()); ql::configured_limits_t limits; js_runner_t js_runner(limits); const std::string source_code = "(function f(arg) { return arg; })"; js_runner_t::req_config_t config; config.timeout_ms = 60000; js_result_t result = js_runner.eval(source_code, config); // Get the id of the function out js_id_t *js_id = boost::get<js_id_t>(&result); ASSERT_TRUE(js_id != nullptr); // Call the function ASSERT_NO_THROW({ js_result_t res = js_runner.call(source_code, std::vector<ql::datum_t>(1, arg), config); ql::datum_t *res_datum = boost::get<ql::datum_t>(&res); ASSERT_TRUE(res_datum != nullptr); ASSERT_TRUE(res_datum->has()); ASSERT_EQ(*res_datum, arg); }); } // This test will make sure that conversion of datum_t to and from duktape types works // correctly SPAWNER_TEST(JSProc, Passthrough) { ql::configured_limits_t limits; // Number passthrough_test_internal(ql::datum_t(99.9999)); passthrough_test_internal(ql::datum_t(99.9999)); // String passthrough_test_internal(ql::datum_t("")); passthrough_test_internal(ql::datum_t("string str")); passthrough_test_internal(ql::datum_t(datum_string_t())); passthrough_test_internal(ql::datum_t(datum_string_t("string str"))); // Boolean passthrough_test_internal(ql::datum_t::boolean(true)); passthrough_test_internal(ql::datum_t::boolean(false)); // Array ql::datum_t array_datum; { std::vector<ql::datum_t> array_data; array_datum = ql::datum_t(std::move(array_data), limits); passthrough_test_internal(array_datum); for (size_t i = 0; i < 100; ++i) { array_data.push_back( ql::datum_t(datum_string_t(std::string(i, 'a')))); std::vector<ql::datum_t> copied_data(array_data); array_datum = ql::datum_t(std::move(copied_data), limits); passthrough_test_internal(array_datum); } } // Object ql::datum_t object_datum; { std::map<datum_string_t, ql::datum_t> object_data; object_datum = ql::datum_t(std::move(object_data)); passthrough_test_internal(array_datum); for (size_t i = 0; i < 100; ++i) { object_data.insert(std::make_pair(datum_string_t(std::string(i, 'a')), ql::datum_t(static_cast<double>(i)))); std::map<datum_string_t, ql::datum_t> copied_data(object_data); object_datum = ql::datum_t(std::move(copied_data)); passthrough_test_internal(array_datum); } } // Nested structure ql::datum_t nested_datum; { std::vector<ql::datum_t> nested_data; nested_datum = ql::datum_t(std::move(nested_data), limits); passthrough_test_internal(nested_datum); nested_data.push_back(array_datum); std::vector<ql::datum_t> copied_data(nested_data); nested_datum = ql::datum_t(std::move(copied_data), limits); passthrough_test_internal(nested_datum); nested_data.push_back(object_datum); copied_data = nested_data; nested_datum = ql::datum_t(std::move(copied_data), limits); passthrough_test_internal(nested_datum); } }
33.804094
113
0.641813
[ "object", "vector" ]
bad1178b2c16aa119f3febde6fa48132b635d0fc
67,479
cpp
C++
src/CLR/Core/CLR_RT_HeapBlock.cpp
axionbio/sw-fork-nf-interpreter
4befd3bc4acc19d0c289b1b4df44b54c66edf981
[ "MIT" ]
null
null
null
src/CLR/Core/CLR_RT_HeapBlock.cpp
axionbio/sw-fork-nf-interpreter
4befd3bc4acc19d0c289b1b4df44b54c66edf981
[ "MIT" ]
null
null
null
src/CLR/Core/CLR_RT_HeapBlock.cpp
axionbio/sw-fork-nf-interpreter
4befd3bc4acc19d0c289b1b4df44b54c66edf981
[ "MIT" ]
null
null
null
// // Copyright (c) .NET Foundation and Contributors // Portions Copyright (c) Microsoft Corporation. All rights reserved. // See LICENSE file in the project root for full license information. // #include "Core.h" #include <nanoHAL.h> #include <nanoPAL_NativeDouble.h> //////////////////////////////////////////////////////////////////////////////////////////////////// void CLR_RT_HeapBlock::InitializeToZero() { NATIVE_PROFILE_CLR_CORE(); CLR_RT_Memory::ZeroFill(&m_data, this->DataSize() * sizeof(*this) - offsetof(CLR_RT_HeapBlock, m_data)); } //--//--//--//--//--// #if defined(NANOCLR_EMULATED_FLOATINGPOINT) HRESULT CLR_RT_HeapBlock::SetFloatIEEE754(const CLR_UINT32 arg) { CLR_INT32 res; CLR_UINT32 mantissa = (arg & 0x007FFFFF) | 0x00800000; int exponent = (int)((arg >> 23) & 0x000000FF) - 127; exponent -= (23 - CLR_RT_HeapBlock::HB_FloatShift); if (arg == 0) { res = 0; } else if (exponent <= -31) { res = 0; // // Uncomment to produce an overflow exception for emulated floating points // // return CLR_E_OUT_OF_RANGE; } else if (exponent >= 31) { res = 0x7FFFFFFF; // // Uncomment to produce an overflow exception for emulated floating points // // return CLR_E_OUT_OF_RANGE; } else { if (exponent > 0) { CLR_UINT64 tmpRes; tmpRes = ((CLR_UINT64)mantissa) << exponent; if (0 != (tmpRes >> 31)) { res = 0x7FFFFFFF; // // Uncomment to produce an overflow exception for emulated floating points // // return CLR_E_OUT_OF_RANGE; } else { res = (CLR_UINT32)tmpRes; } } else if (exponent < 0) res = mantissa >> (-exponent); else res = mantissa; } if (arg & 0x80000000) res = -res; SetFloat(res); return S_OK; } HRESULT CLR_RT_HeapBlock::SetDoubleIEEE754(const CLR_UINT64 &arg) { CLR_INT64 res; CLR_UINT64 mantissa = (arg & ULONGLONGCONSTANT(0x000FFFFFFFFFFFFF)) | ULONGLONGCONSTANT(0x0010000000000000); int exponent = (int)((arg >> 52) & ULONGLONGCONSTANT(0x00000000000007FF)) - 1023; CLR_UINT64 mask = ULONGLONGCONSTANT(0xFFFFFFFFFFFFFFFF); exponent -= (52 - CLR_RT_HeapBlock::HB_DoubleShift); if (arg == 0) { res = 0; } else if (exponent <= -63) { res = 0; // // Uncomment to produce an overflow exception for emulated floating points // // return CLR_E_OUT_OF_RANGE; } else if (exponent >= 63) { res = ULONGLONGCONSTANT(0x7FFFFFFFFFFFFFFF); // // Uncomment to produce an overflow exception for emulated floating points // // return CLR_E_OUT_OF_RANGE; } else { if (exponent > 0) { mask <<= (63 - exponent); if (0 != (mask & mantissa)) { res = ULONGLONGCONSTANT(0x7FFFFFFFFFFFFFFF); // // Uncomment to produce an overflow exception for emulated floating points // // return CLR_E_OUT_OF_RANGE; } else { res = mantissa << exponent; } } else if (exponent < 0) res = mantissa >> (-exponent); else res = mantissa; } if (arg & ULONGLONGCONSTANT(0x8000000000000000)) res = -res; SetDouble(res); return S_OK; } #endif HRESULT CLR_RT_HeapBlock::EnsureObjectReference(CLR_RT_HeapBlock *&obj) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); switch (this->DataType()) { case DATATYPE_OBJECT: case DATATYPE_BYREF: { obj = Dereference(); FAULT_ON_NULL(obj); #if defined(NANOCLR_APPDOMAINS) if (obj->DataType() == DATATYPE_TRANSPARENT_PROXY) { NANOCLR_CHECK_HRESULT(obj->TransparentProxyValidate()); obj = obj->TransparentProxyDereference(); FAULT_ON_NULL(obj); } #endif switch (obj->DataType()) { case DATATYPE_CLASS: case DATATYPE_VALUETYPE: case DATATYPE_DATETIME: // Special case. case DATATYPE_TIMESPAN: // Special case. NANOCLR_SET_AND_LEAVE(S_OK); default: // the remaining data types aren't to be handled break; } } break; case DATATYPE_DATETIME: // Special case. case DATATYPE_TIMESPAN: // Special case. obj = this; NANOCLR_SET_AND_LEAVE(S_OK); default: // the remaining data types aren't to be handled break; } NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); NANOCLR_NOCLEANUP(); } //--// HRESULT CLR_RT_HeapBlock::SetReflection(const CLR_RT_ReflectionDef_Index &reflex) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); m_id.raw = CLR_RT_HEAPBLOCK_RAW_ID(DATATYPE_REFLECTION, 0, 1); m_data.reflection = reflex; NANOCLR_NOCLEANUP_NOLABEL(); } HRESULT CLR_RT_HeapBlock::SetReflection(const CLR_RT_Assembly_Index &assm) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); m_id.raw = CLR_RT_HEAPBLOCK_RAW_ID(DATATYPE_REFLECTION, 0, 1); m_data.reflection.m_kind = REFLECTION_ASSEMBLY; m_data.reflection.m_levels = 0; m_data.reflection.m_data.m_assm = assm; NANOCLR_NOCLEANUP_NOLABEL(); } HRESULT CLR_RT_HeapBlock::SetReflection(const CLR_RT_TypeSpec_Index &sig) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); CLR_RT_TypeDescriptor desc; NANOCLR_CHECK_HRESULT(desc.InitializeFromTypeSpec(sig)); m_id.raw = CLR_RT_HEAPBLOCK_RAW_ID(DATATYPE_REFLECTION, 0, 1); m_data.reflection = desc.m_reflex; NANOCLR_NOCLEANUP(); } HRESULT CLR_RT_HeapBlock::SetReflection(const CLR_RT_TypeDef_Index &cls) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); m_id.raw = CLR_RT_HEAPBLOCK_RAW_ID(DATATYPE_REFLECTION, 0, 1); m_data.reflection.m_kind = REFLECTION_TYPE; m_data.reflection.m_levels = 0; m_data.reflection.m_data.m_type = cls; NANOCLR_NOCLEANUP_NOLABEL(); } HRESULT CLR_RT_HeapBlock::SetReflection(const CLR_RT_FieldDef_Index &fd) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); m_id.raw = CLR_RT_HEAPBLOCK_RAW_ID(DATATYPE_REFLECTION, 0, 1); m_data.reflection.m_kind = REFLECTION_FIELD; m_data.reflection.m_levels = 0; m_data.reflection.m_data.m_field = fd; NANOCLR_NOCLEANUP_NOLABEL(); } HRESULT CLR_RT_HeapBlock::SetReflection(const CLR_RT_MethodDef_Index &md) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); CLR_RT_MethodDef_Instance inst; if (inst.InitializeFromIndex(md) == false) { NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } m_id.raw = CLR_RT_HEAPBLOCK_RAW_ID(DATATYPE_REFLECTION, 0, 1); m_data.reflection.m_kind = (inst.m_target->flags & CLR_RECORD_METHODDEF::MD_Constructor) ? REFLECTION_CONSTRUCTOR : REFLECTION_METHOD; m_data.reflection.m_levels = 0; m_data.reflection.m_data.m_method = md; NANOCLR_NOCLEANUP(); } HRESULT CLR_RT_HeapBlock::SetObjectCls(const CLR_RT_TypeDef_Index &cls) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); CLR_RT_TypeDef_Instance inst; if (inst.InitializeFromIndex(cls) == false) { NANOCLR_SET_AND_LEAVE(CLR_E_FAIL); } m_data.objectHeader.cls = cls; m_data.objectHeader.lock = NULL; NANOCLR_NOCLEANUP(); } //--// HRESULT CLR_RT_HeapBlock::InitializeArrayReference(CLR_RT_HeapBlock &ref, int index) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); CLR_RT_HeapBlock_Array *array; if (ref.DataType() != DATATYPE_OBJECT) { NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } array = ref.DereferenceArray(); FAULT_ON_NULL(array); if (array->DataType() != DATATYPE_SZARRAY) { NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } if (index < 0 || index >= (CLR_INT32)array->m_numOfElements) { NANOCLR_SET_AND_LEAVE(CLR_E_INDEX_OUT_OF_RANGE); } InitializeArrayReferenceDirect(*array, index); NANOCLR_NOCLEANUP(); } void CLR_RT_HeapBlock::InitializeArrayReferenceDirect(CLR_RT_HeapBlock_Array &array, int index) { NATIVE_PROFILE_CLR_CORE(); m_id.raw = CLR_RT_HEAPBLOCK_RAW_ID(DATATYPE_ARRAY_BYREF, 0, 1); m_data.arrayReference.array = &array; m_data.arrayReference.index = index; } void CLR_RT_HeapBlock::FixArrayReferenceForValueTypes() { NATIVE_PROFILE_CLR_CORE(); CLR_RT_HeapBlock_Array *array = m_data.arrayReference.array; // // ValueTypes are implemented as pointers to objects, // so getting an array reference to a ValueType has to be treated like assigning a pointer! // // DateTime and TimeSpan are optimized as primitive types, // so getting an array reference to them is like getting a reference to them. // switch (array->m_typeOfElement) { case DATATYPE_VALUETYPE: this->SetReference(*(CLR_RT_HeapBlock *)array->GetElement(m_data.arrayReference.index)); break; case DATATYPE_DATETIME: case DATATYPE_TIMESPAN: m_id.raw = CLR_RT_HEAPBLOCK_RAW_ID(DATATYPE_BYREF, 0, 1); m_data.objectReference.ptr = (CLR_RT_HeapBlock *)array->GetElement(m_data.arrayReference.index); break; } } HRESULT CLR_RT_HeapBlock::LoadFromReference(CLR_RT_HeapBlock &ref) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); CLR_RT_HeapBlock tmp; CLR_RT_HeapBlock *obj; CLR_DataType dt = ref.DataType(); if (dt == DATATYPE_ARRAY_BYREF) { CLR_RT_HeapBlock_Array *array = ref.m_data.arrayReference.array; FAULT_ON_NULL(array); CLR_UINT8 *src = array->GetElement(ref.m_data.arrayReference.index); CLR_UINT32 size = array->m_sizeOfElement; if (!array->m_fReference) { CLR_UINT32 second = 0; CLR_UINT32 first; SetDataId(CLR_RT_HEAPBLOCK_RAW_ID(array->m_typeOfElement, 0, 1)); if (size == 4) { first = ((CLR_UINT32 *)src)[0]; } else if (size == 8) { first = ((CLR_UINT32 *)src)[0]; second = ((CLR_UINT32 *)src)[1]; } else if (size == 1) { first = ((CLR_UINT8 *)src)[0]; } else { first = ((CLR_UINT16 *)src)[0]; } ((CLR_UINT32 *)&NumericByRef())[0] = first; ((CLR_UINT32 *)&NumericByRef())[1] = second; NANOCLR_SET_AND_LEAVE(S_OK); } // // It's a pointer to a full CLR_RT_HeapBlock. // obj = (CLR_RT_HeapBlock *)src; } else if (dt == DATATYPE_BYREF) { obj = ref.Dereference(); FAULT_ON_NULL(obj); if (obj->DataType() == DATATYPE_VALUETYPE) { tmp.SetObjectReference(obj); obj = &tmp; } } else if (c_CLR_RT_DataTypeLookup[dt].m_flags & CLR_RT_DataTypeLookup::c_Direct) { obj = &ref; if (dt == DATATYPE_OBJECT) { CLR_RT_HeapBlock *objT = ref.Dereference(); if (objT && objT->IsBoxed()) { CLR_RT_TypeDef_Instance inst; if (objT->DataType() != DATATYPE_VALUETYPE) { NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } if (!inst.InitializeFromIndex(objT->ObjectCls())) { NANOCLR_SET_AND_LEAVE(CLR_E_TYPE_UNAVAILABLE); } if (inst.m_target->dataType != DATATYPE_VALUETYPE) // It's a boxed primitive/enum type. { obj = &objT[1]; } } } } else { NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } if (obj->IsAValueType()) { NANOCLR_SET_AND_LEAVE(g_CLR_RT_ExecutionEngine.CloneObject(*this, *obj)); } this->Assign(*obj); NANOCLR_NOCLEANUP(); } HRESULT CLR_RT_HeapBlock::StoreToReference(CLR_RT_HeapBlock &ref, int size) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); CLR_RT_HeapBlock *obj; CLR_DataType dt = ref.DataType(); if (dt == DATATYPE_ARRAY_BYREF) { CLR_RT_HeapBlock_Array *array = ref.m_data.arrayReference.array; FAULT_ON_NULL(array); CLR_UINT8 *dst = array->GetElement(ref.m_data.arrayReference.index); if (!array->m_fReference) { CLR_INT32 sizeArray = array->m_sizeOfElement; // // Cannot copy NULL reference to a primitive type array. // obj = FixBoxingReference(); FAULT_ON_NULL(obj); if (size == -1) { // size == -1 tells StoreToReference to allow the value 'this' to have more precision than the dest // array. This fixes the following bug. // : conv.u1 // : stobj [mscorlib]System.Byte // The conv.u1 will convert the top of the eval stack to a u1. But since the eval stack is required // to contain at least 4 byte values, this heap block will be promoted to an I4. // stobj ignores the type token (System.Byte) and calls Reassign, which calls StoreToReference. if (c_CLR_RT_DataTypeLookup[this->DataType()].m_sizeInBytes < sizeArray) { // Not enough precision here. NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } #if defined(_DEBUG) { CLR_DataType dtElem = (CLR_DataType)array->m_typeOfElement; CLR_RT_HeapBlock blk; blk.Assign(*this); NANOCLR_CHECK_HRESULT(blk.Convert( dtElem, false, (c_CLR_RT_DataTypeLookup[dtElem].m_flags & CLR_RT_DataTypeLookup::c_Signed) == 0)); switch (sizeArray) { case 1: _ASSERTE(blk.NumericByRefConst().u1 == this->NumericByRefConst().u1); break; case 2: _ASSERTE(blk.NumericByRefConst().u2 == this->NumericByRefConst().u2); break; case 4: _ASSERTE(blk.NumericByRefConst().u4 == this->NumericByRefConst().u4); break; case 8: _ASSERTE(blk.NumericByRefConst().u8 == this->NumericByRefConst().u8); break; } } #endif } else if (size == 0) { if (obj->DataType() != array->m_typeOfElement) { NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } } else { if (size != sizeArray) { NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } } CLR_UINT32 first = ((CLR_UINT32 *)&obj->NumericByRef())[0]; CLR_UINT32 second = ((CLR_UINT32 *)&obj->NumericByRef())[1]; if (sizeArray == 4) { ((CLR_UINT32 *)dst)[0] = (CLR_UINT32)first; } else if (sizeArray == 8) { ((CLR_UINT32 *)dst)[0] = (CLR_UINT32)first; ((CLR_UINT32 *)dst)[1] = (CLR_UINT32)second; } else if (sizeArray == 1) { ((CLR_UINT8 *)dst)[0] = (CLR_UINT8)first; } else { ((CLR_UINT16 *)dst)[0] = (CLR_UINT16)first; } NANOCLR_SET_AND_LEAVE(S_OK); } else { // // If the source is not null, make sure the types are compatible. // if (this->DataType() == DATATYPE_OBJECT && this->Dereference()) { CLR_RT_TypeDescriptor descSrc; CLR_RT_TypeDescriptor descDst; CLR_RT_TypeDescriptor descDstSub; NANOCLR_CHECK_HRESULT(descSrc.InitializeFromObject(*this)); NANOCLR_CHECK_HRESULT(descDst.InitializeFromObject(*array)); descDst.GetElementType(descDstSub); if (CLR_RT_ExecutionEngine::IsInstanceOf(descSrc, descDstSub, false) == false) { NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } } obj = (CLR_RT_HeapBlock *)dst; NANOCLR_SET_AND_LEAVE(obj->Reassign(*this)); } } else if (dt == DATATYPE_BYREF) { obj = ref.Dereference(); FAULT_ON_NULL(obj); if (obj->DataType() == DATATYPE_VALUETYPE) { NANOCLR_SET_AND_LEAVE(ref.Reassign(*this)); } } else if (c_CLR_RT_DataTypeLookup[dt].m_flags & CLR_RT_DataTypeLookup::c_Direct) { obj = &ref; } else { NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } obj->Assign(*this); NANOCLR_NOCLEANUP(); } HRESULT CLR_RT_HeapBlock::Reassign(const CLR_RT_HeapBlock &value) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); CLR_RT_HeapBlock *obj; CLR_RT_HeapBlock ref; if (this->DataType() == DATATYPE_BYREF) { obj = this->Dereference(); FAULT_ON_NULL(obj); // // Real value types can be passed as references. // if (obj->DataType() == DATATYPE_VALUETYPE) { ref.SetObjectReference(obj); obj = &ref; } NANOCLR_SET_AND_LEAVE(obj->Reassign(value)); } else if (value.DataType() == DATATYPE_BYREF) { obj = value.Dereference(); FAULT_ON_NULL(obj); // // Real value types can be passed as references. // if (obj->DataType() == DATATYPE_VALUETYPE) { ref.SetObjectReference(obj); obj = &ref; } NANOCLR_SET_AND_LEAVE(this->Reassign(*obj)); } else if (this->DataType() == DATATYPE_ARRAY_BYREF) { NANOCLR_CHECK_HRESULT(ref.LoadFromReference(*this)); NANOCLR_CHECK_HRESULT(ref.Reassign(value)); NANOCLR_SET_AND_LEAVE(ref.StoreToReference(*this, -1)); } else if (value.DataType() == DATATYPE_ARRAY_BYREF) { _ASSERTE(false); // not tested CLR_RT_HeapBlock valueT; valueT.Assign(value); NANOCLR_CHECK_HRESULT(ref.LoadFromReference(valueT)); NANOCLR_SET_AND_LEAVE(this->Reassign(ref)); } else { bool fDestination = this->IsAValueType(); bool fSource = value.IsAValueType(); if (fSource != fDestination) { // For value type objects we don't care if the source item is boxed because // CopyValueType will take care of the boxing/unboxing if (fDestination != value.IsAReferenceOfThisType(DATATYPE_VALUETYPE)) { NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } } if (fDestination) { NANOCLR_SET_AND_LEAVE(g_CLR_RT_ExecutionEngine.CopyValueType(this->Dereference(), value.Dereference())); } this->Assign(value); } NANOCLR_NOCLEANUP(); } void CLR_RT_HeapBlock::AssignAndPinReferencedObject(const CLR_RT_HeapBlock &value) { // This is very special case that we have local variable with pinned attribute in metadata. // This code is called only if "fixed" keyword is present in the managed code. Executed on assignment to "fixed" // pointer. First check if there is object referenced by the local var. We unpin it, since the reference is // replaced. if ((m_data.objectReference.ptr != NULL && m_id.type.dataType == DATATYPE_ARRAY_BYREF) || m_id.type.dataType == DATATYPE_BYREF) { // Unpin the object that has been pointed by local variable. m_data.objectReference.ptr->Unpin(); } // Move the data. m_data = value.m_data; // Leave the same logic as in AssignAndPreserveType if (DataType() > DATATYPE_LAST_PRIMITIVE_TO_PRESERVE) { m_id.type.dataType = value.m_id.type.dataType; m_id.type.size = value.m_id.type.size; // We take new flags, but preserve "pinned" attribute m_id.type.flags = m_id.type.flags | HB_Pinned; } // Pin the object referenced by local variable. if ((m_data.objectReference.ptr != NULL && m_id.type.dataType == DATATYPE_ARRAY_BYREF) || m_id.type.dataType == DATATYPE_BYREF) { m_data.objectReference.ptr->Pin(); } } HRESULT CLR_RT_HeapBlock::PerformBoxingIfNeeded() { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); // we need to box the optimized value types... bool fBox = (c_CLR_RT_DataTypeLookup[this->DataType()].m_flags & CLR_RT_DataTypeLookup::c_OptimizedValueType) != 0; // ... but also the value types under object types if (!fBox && this->DataType() == DATATYPE_OBJECT) { CLR_RT_HeapBlock *src = this->Dereference(); if (src && src->DataType() == DATATYPE_VALUETYPE && !src->IsBoxed()) { fBox = true; } } if (fBox) { CLR_RT_TypeDescriptor desc; NANOCLR_CHECK_HRESULT(desc.InitializeFromObject(*this)); NANOCLR_CHECK_HRESULT(PerformBoxing(desc.m_handlerCls)); } NANOCLR_NOCLEANUP(); } HRESULT CLR_RT_HeapBlock::PerformBoxing(const CLR_RT_TypeDef_Instance &cls) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); CLR_RT_HeapBlock tmp; CLR_RT_HeapBlock *obj = this; CLR_DataType dt = obj->DataType(); // // System.DateTime and System.TimeSpan are real value types, so sometimes they are passed by reference. // if (dt == DATATYPE_BYREF) { obj = obj->Dereference(); FAULT_ON_NULL(obj); dt = obj->DataType(); // // Real value types can be passed as references. // if (dt == DATATYPE_VALUETYPE) { tmp.SetObjectReference(obj); obj = &tmp; dt = DATATYPE_OBJECT; } } { CLR_DataType dataType = (CLR_DataType)cls.m_target->dataType; const CLR_RT_DataTypeLookup &dtl = c_CLR_RT_DataTypeLookup[dataType]; if (dtl.m_flags & CLR_RT_DataTypeLookup::c_OptimizedValueType) { CLR_RT_HeapBlock *ptr = g_CLR_RT_ExecutionEngine.ExtractHeapBlocksForClassOrValueTypes(DATATYPE_VALUETYPE, HB_Boxed, cls, 2); FAULT_ON_NULL(ptr); switch (dataType) { case DATATYPE_DATETIME: // Special case. case DATATYPE_TIMESPAN: // Special case. dataType = DATATYPE_I8; break; default: // the remaining data types aren't to be handled break; } ptr[1].SetDataId(CLR_RT_HEAPBLOCK_RAW_ID(dataType, 0, 1)); ptr[1].AssignData(*this); this->SetObjectReference(ptr); } else if (dt == DATATYPE_OBJECT) { CLR_RT_HeapBlock *ptr = this->Dereference(); if (ptr->IsBoxed() || ptr->DataType() != DATATYPE_VALUETYPE) { NANOCLR_SET_AND_LEAVE(S_FALSE); // Don't box twice... } NANOCLR_CHECK_HRESULT(g_CLR_RT_ExecutionEngine.CloneObject(*this, *ptr)); this->Dereference()->Box(); } else { NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } } NANOCLR_NOCLEANUP(); } /******************************************************************************************* * Thefunction CLR_RT_HeapBlock::PerformUnboxing is used during processing of unbox.any IL instruction. * Example * unbox.any [mscorlib]System.Int32 * unbox.any takes the value at the top of evaluation stack and performs unboxing into the type * specified after the instruction. In this case the type is [mscorlib]System.Int32. * Function parameters: * 1. this - Heap block at the top of evaluation stack. * 2. cls - Runtime Type Definition of the type specified after instruction. * The functoin takes the object pointed by top of ev. stack. Then it does 3 operatioins: * 1. Dereferences the object * 2. Validates the type of data kept by object corresponds to type in cls. * 3. Moves de-referenced date to top of evaluation stack. *******************************************************************************************/ HRESULT CLR_RT_HeapBlock::PerformUnboxing(const CLR_RT_TypeDef_Instance &cls) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); CLR_RT_HeapBlock *src; if (this->DataType() != DATATYPE_OBJECT) { NANOCLR_SET_AND_LEAVE(CLR_E_INVALID_CAST); } // Finds the object that keeps the boxed type. src = this->Dereference(); FAULT_ON_NULL(src); // Validates that src keeps something boxed and the boxed value is VALUE type. if (src->IsBoxed() == false || src->DataType() != DATATYPE_VALUETYPE) { NANOCLR_SET_AND_LEAVE(CLR_E_INVALID_CAST); } // Validates the type of data kept by object corresponds to type in cls. // If typedef indexes are the same, then skip and go to assigment of objects. if (src->ObjectCls().m_data != cls.m_data) { // The typedef indexes are different, but src and cls may have identical basic data type. // Need to check it. If identical - the unboxing is allowed. // This "if" compares underlying type in object and cls. Should be equal in order to continue. if (!(src->DataSize() > 1 && (src[1].DataType() == cls.m_target->dataType))) { // No luck. The types in src object and specified by cls are different. Need to throw exceptioin. NANOCLR_SET_AND_LEAVE(CLR_E_INVALID_CAST); } } if (cls.m_target->dataType == DATATYPE_VALUETYPE) { NANOCLR_CHECK_HRESULT(g_CLR_RT_ExecutionEngine.CloneObject(*this, *this)); this->Dereference()->Unbox(); } else // It's a boxed primitive/enum type. { this->Assign(src[1]); this->ChangeDataType(cls.m_target->dataType); } NANOCLR_NOCLEANUP(); } CLR_RT_HeapBlock *CLR_RT_HeapBlock::FixBoxingReference() { NATIVE_PROFILE_CLR_CORE(); // // Not boxed, nothing to do. // if (this->DataType() == DATATYPE_OBJECT) { CLR_RT_HeapBlock *src = this->Dereference(); if (!src) return src; if (src->DataType() == DATATYPE_VALUETYPE && src->IsBoxed()) { CLR_RT_TypeDef_Instance inst; if (!inst.InitializeFromIndex(src->ObjectCls())) return NULL; if (inst.m_target->dataType != DATATYPE_VALUETYPE) // It's a boxed primitive/enum type. { return &src[1]; } } } return this; } //--// bool CLR_RT_HeapBlock::IsZero() const { NATIVE_PROFILE_CLR_CORE(); switch (DataType()) { case DATATYPE_OBJECT: return (m_data.objectReference.ptr == NULL); case DATATYPE_I8: case DATATYPE_U8: return (m_data.numeric.u8 == 0); case DATATYPE_R4: return (m_data.numeric.r4 == 0); case DATATYPE_R8: return (m_data.numeric.r8 == 0); default: return (m_data.numeric.u4 == 0); } } //--// void CLR_RT_HeapBlock::Promote() { NATIVE_PROFILE_CLR_CORE(); switch (DataType()) { case DATATYPE_I1: m_id.type.dataType = DATATYPE_I4; m_data.numeric.s4 = (CLR_INT32)m_data.numeric.s1; break; case DATATYPE_I2: m_id.type.dataType = DATATYPE_I4; m_data.numeric.s4 = (CLR_INT32)m_data.numeric.s2; break; case DATATYPE_BOOLEAN: case DATATYPE_U1: m_id.type.dataType = DATATYPE_I4; m_data.numeric.u4 = (CLR_UINT32)m_data.numeric.u1; break; case DATATYPE_CHAR: case DATATYPE_U2: m_id.type.dataType = DATATYPE_I4; m_data.numeric.u4 = (CLR_UINT32)m_data.numeric.u2; break; default: // this data type is not to be promoted break; } } //--// CLR_UINT32 CLR_RT_HeapBlock::GetHashCode(CLR_RT_HeapBlock *ptr, bool fRecurse, CLR_UINT32 crc = 0) { NATIVE_PROFILE_CLR_CORE(); if (!ptr) return 0; switch (ptr->DataType()) { case DATATYPE_OBJECT: crc = GetHashCode(ptr->Dereference(), fRecurse, crc); break; case DATATYPE_STRING: { const char *szText = ptr->StringText(); crc = SUPPORT_ComputeCRC(szText, (int)hal_strlen_s(szText), crc); } break; case DATATYPE_CLASS: case DATATYPE_VALUETYPE: if (fRecurse) { CLR_RT_TypeDef_Instance cls; cls.InitializeFromIndex(ptr->ObjectCls()); int totFields = cls.CrossReference().m_totalFields; if (totFields > 0) { while (totFields-- > 0) { crc = GetHashCode(++ptr, false, crc); } } else { crc = SUPPORT_ComputeCRC(&ptr, sizeof(ptr), crc); } } break; case DATATYPE_DELEGATE_HEAD: { CLR_RT_HeapBlock_Delegate *dlg = (CLR_RT_HeapBlock_Delegate *)ptr; const CLR_RT_MethodDef_Index &ftn = dlg->DelegateFtn(); crc = GetHashCode(&dlg->m_object, false, crc); crc = SUPPORT_ComputeCRC(&ftn, sizeof(ftn), crc); } break; case DATATYPE_OBJECT_TO_EVENT: { CLR_RT_ObjectToEvent_Source *evtSrc = (CLR_RT_ObjectToEvent_Source *)ptr; crc = GetHashCode(evtSrc->m_eventPtr, false, crc); crc = GetHashCode(evtSrc->m_objectPtr, false, crc); } break; default: crc = SUPPORT_ComputeCRC(&ptr->DataByRefConst(), ptr->GetAtomicDataUsedBytes(), crc); break; } return crc; } CLR_UINT32 CLR_RT_HeapBlock::GetAtomicDataUsedBytes() const { switch (DataType()) { case DATATYPE_BOOLEAN: // Fall through, hashDataSize = 1 case DATATYPE_U1: // Fall through, hashDataSize = 1 case DATATYPE_CHAR: return 1; case DATATYPE_I2: // Fall through, hashDataSize = 2 case DATATYPE_U2: return 2; break; case DATATYPE_I4: case DATATYPE_U4: case DATATYPE_R4: return 4; break; case DATATYPE_I8: // Fall through, hashDataSize = 8 case DATATYPE_U8: // Fall through, hashDataSize = 8 case DATATYPE_R8: // Fall through, hashDataSize = 8 case DATATYPE_DATETIME: // Fall through, hashDataSize = 8 case DATATYPE_TIMESPAN: return 8; break; // Default full size of CLR_RT_HeapBlock_AtomicData default: return sizeof(CLR_RT_HeapBlock_AtomicData); } // The same as default. This is never reached, but I put it to avoid potential compiler warning. return sizeof(CLR_RT_HeapBlock_AtomicData); } bool CLR_RT_HeapBlock::ObjectsEqual( const CLR_RT_HeapBlock &pArgLeft, const CLR_RT_HeapBlock &pArgRight, bool fSameReference) { NATIVE_PROFILE_CLR_CORE(); if (&pArgLeft == &pArgRight) return true; if (pArgLeft.DataType() == pArgRight.DataType()) { switch (pArgLeft.DataType()) { case DATATYPE_VALUETYPE: if (pArgLeft.ObjectCls().m_data == pArgRight.ObjectCls().m_data) { const CLR_RT_HeapBlock *objLeft = &pArgLeft; const CLR_RT_HeapBlock *objRight = &pArgRight; CLR_UINT32 num = pArgLeft.DataSize(); while (--num) { if (ObjectsEqual(*++objLeft, *++objRight, false) == false) return false; } return true; } break; #if defined(NANOCLR_APPDOMAINS) case DATATYPE_TRANSPARENT_PROXY: #endif case DATATYPE_OBJECT: { CLR_RT_HeapBlock *objLeft = pArgLeft.Dereference(); CLR_RT_HeapBlock *objRight = pArgRight.Dereference(); if (objLeft == objRight) return true; if (objLeft && objRight) { if (!fSameReference || (objLeft->DataType() == DATATYPE_REFLECTION)) return ObjectsEqual(*objLeft, *objRight, false); } } break; case DATATYPE_SZARRAY: if (fSameReference == false) { _ASSERTE(false); // can this code path ever be executed? CLR_RT_HeapBlock_Array *objLeft = (CLR_RT_HeapBlock_Array *)&pArgLeft; CLR_RT_HeapBlock_Array *objRight = (CLR_RT_HeapBlock_Array *)&pArgRight; if (objLeft->m_numOfElements == objRight->m_numOfElements && objLeft->m_sizeOfElement == objRight->m_sizeOfElement && objLeft->m_typeOfElement == objRight->m_typeOfElement) { if (!objLeft->m_fReference) { if (memcmp( objLeft->GetFirstElement(), objRight->GetFirstElement(), objLeft->m_numOfElements * objLeft->m_sizeOfElement) == 0) { return true; } } } } break; case DATATYPE_REFLECTION: if (pArgLeft.SameHeader(pArgRight)) return true; break; default: if (fSameReference == false) { const CLR_RT_DataTypeLookup &dtl = c_CLR_RT_DataTypeLookup[pArgLeft.DataType()]; if ((dtl.m_flags & CLR_RT_DataTypeLookup::c_Reference) == 0) { CLR_UINT32 size = (dtl.m_sizeInBits + 7) / 8; if (memcmp(&pArgLeft.DataByRefConst(), &pArgRight.DataByRefConst(), size) == 0) { return true; } } } break; } } return false; } //--// static const CLR_RT_HeapBlock *FixReflectionForType(const CLR_RT_HeapBlock &src, CLR_RT_HeapBlock &tmp) { NATIVE_PROFILE_CLR_CORE(); const CLR_RT_ReflectionDef_Index &rd = src.ReflectionDataConst(); if (rd.m_kind == REFLECTION_TYPE) { CLR_RT_TypeDef_Instance inst; CLR_UINT32 levels; if (inst.InitializeFromReflection(rd, &levels) && levels == 0) { tmp.Assign(src); CLR_RT_ReflectionDef_Index &rd2 = tmp.ReflectionData(); rd2.InitializeFromHash(inst.CrossReference().m_hash); return &tmp; } } return &src; } //--// static inline int CompareValues_Numeric(CLR_INT32 left, CLR_INT32 right) { NATIVE_PROFILE_CLR_CORE(); if (left > right) return 1; if (left < right) return -1; /**************/ return 0; } static inline int CompareValues_Numeric(CLR_UINT32 left, CLR_UINT32 right) { NATIVE_PROFILE_CLR_CORE(); if (left > right) return 1; if (left < right) return -1; /**************/ return 0; } static int CompareValues_Numeric(const CLR_INT64 left, const CLR_INT64 right) { NATIVE_PROFILE_CLR_CORE(); if (left > right) return 1; if (left < right) return -1; /**************/ return 0; } static int CompareValues_Numeric(const CLR_UINT64 left, const CLR_UINT64 right) { NATIVE_PROFILE_CLR_CORE(); if (left > right) return 1; if (left < right) return -1; /**************/ return 0; } static int CompareValues_Numeric(const CLR_RT_HeapBlock &left, const CLR_RT_HeapBlock &right, bool fSigned, int bytes) { NATIVE_PROFILE_CLR_CORE(); switch (bytes) { case 4: if (fSigned) return CompareValues_Numeric( (CLR_INT32)left.NumericByRefConst().s4, (CLR_INT32)right.NumericByRefConst().s4); else return CompareValues_Numeric( (CLR_UINT32)left.NumericByRefConst().u4, (CLR_UINT32)right.NumericByRefConst().u4); case 8: if (fSigned) return CompareValues_Numeric( (CLR_INT64)left.NumericByRefConst().s8, (CLR_INT64)right.NumericByRefConst().s8); else return CompareValues_Numeric( (CLR_UINT64)left.NumericByRefConst().u8, (CLR_UINT64)right.NumericByRefConst().u8); case 2: if (fSigned) return CompareValues_Numeric( (CLR_INT32)left.NumericByRefConst().s2, (CLR_INT32)right.NumericByRefConst().s2); else return CompareValues_Numeric( (CLR_UINT32)left.NumericByRefConst().u2, (CLR_UINT32)right.NumericByRefConst().u2); case 1: if (fSigned) return CompareValues_Numeric( (CLR_INT32)left.NumericByRefConst().s1, (CLR_INT32)right.NumericByRefConst().s1); else return CompareValues_Numeric( (CLR_UINT32)left.NumericByRefConst().u1, (CLR_UINT32)right.NumericByRefConst().u1); default: return -1; } } static inline int CompareValues_Pointers(const CLR_RT_HeapBlock *left, const CLR_RT_HeapBlock *right) { NATIVE_PROFILE_CLR_CORE(); if (left > right) return 1; if (left < right) return -1; /**************/ return 0; } CLR_INT32 CLR_RT_HeapBlock::Compare_Values(const CLR_RT_HeapBlock &left, const CLR_RT_HeapBlock &right, bool fSigned) { NATIVE_PROFILE_CLR_CORE(); CLR_DataType leftDataType = left.DataType(); CLR_DataType rightDataType = right.DataType(); if (leftDataType == rightDataType) { switch (leftDataType) { #if defined(NANOCLR_APPDOMAINS) case DATATYPE_TRANSPARENT_PROXY: #endif case DATATYPE_OBJECT: case DATATYPE_BYREF: { CLR_RT_HeapBlock *leftObj = left.Dereference(); CLR_RT_HeapBlock *rightObj = right.Dereference(); if (!leftObj) { return !rightObj ? 0 : -1; // NULL references always compare smaller than non-NULL ones. } else if (!rightObj) { return 1; // NULL references always compare smaller than non-NULL ones. } return Compare_Values(*leftObj, *rightObj, fSigned); } case DATATYPE_STRING: { CLR_RT_HeapBlock_String *leftStr = (CLR_RT_HeapBlock_String *)&left; CLR_RT_HeapBlock_String *rightStr = (CLR_RT_HeapBlock_String *)&right; return strcmp(leftStr->StringText(), rightStr->StringText()); } case DATATYPE_DELEGATELIST_HEAD: { CLR_RT_HeapBlock_Delegate_List *leftDlg = (CLR_RT_HeapBlock_Delegate_List *)&left; CLR_RT_HeapBlock_Delegate_List *rightDlg = (CLR_RT_HeapBlock_Delegate_List *)&right; CLR_RT_HeapBlock *leftPtr = leftDlg->GetDelegates(); CLR_RT_HeapBlock *rightPtr = rightDlg->GetDelegates(); CLR_UINT32 leftLen = leftDlg->m_length; CLR_UINT32 rightLen = rightDlg->m_length; while (leftLen > 0 && rightLen > 0) { int res = CLR_RT_HeapBlock::Compare_Values(*leftPtr++, *rightPtr++, fSigned); if (res) return res; leftLen--; rightLen--; } if (!leftLen) { return !rightLen ? 0 : -1; // NULL references always compare smaller than non-NULL ones. } else // rightLen != 0 for sure. { return 1; // NULL references always compare smaller than non-NULL ones. } } case DATATYPE_DELEGATE_HEAD: { CLR_RT_HeapBlock_Delegate *leftDlg = (CLR_RT_HeapBlock_Delegate *)&left; CLR_RT_HeapBlock_Delegate *rightDlg = (CLR_RT_HeapBlock_Delegate *)&right; CLR_UINT32 leftData = leftDlg->DelegateFtn().m_data; CLR_UINT32 rightData = rightDlg->DelegateFtn().m_data; if (leftData > rightData) return 1; if (leftData < rightData) return -1; return Compare_Values(leftDlg->m_object, rightDlg->m_object, fSigned); } case DATATYPE_CLASS: case DATATYPE_VALUETYPE: case DATATYPE_SZARRAY: case DATATYPE_WEAKCLASS: return CompareValues_Pointers(&left, &right); case DATATYPE_REFLECTION: { const CLR_RT_HeapBlock *ptrLeft; const CLR_RT_HeapBlock *ptrRight; CLR_RT_HeapBlock hbLeft; CLR_RT_HeapBlock hbRight; if (left.ReflectionDataConst().m_kind != right.ReflectionDataConst().m_kind) { ptrLeft = FixReflectionForType(left, hbLeft); ptrRight = FixReflectionForType(right, hbRight); } else { ptrLeft = &left; ptrRight = &right; } return CompareValues_Numeric(*ptrLeft, *ptrRight, false, 8); } //--// #if !defined(NANOCLR_EMULATED_FLOATINGPOINT) case DATATYPE_R4: // deal with special cases: // return 0 if the numbers are unordered (either or both are NaN) // this is post processed in interpreter so '1' will turn into '0' if (__isnand(left.NumericByRefConst().r4) || __isnand(right.NumericByRefConst().r4)) { return 1; } // The infinite values are equal to themselves. // this is post processed in interpreter so '0' will turn into '1' else if (__isinfd(left.NumericByRefConst().r4) && __isinfd(right.NumericByRefConst().r4)) { return 0; } // all the rest now else { if (isgreater(left.NumericByRefConst().r4, right.NumericByRefConst().r4)) { return 1; } else if (isless(left.NumericByRefConst().r4, right.NumericByRefConst().r4)) { return -1; } else { return 0; } } case DATATYPE_R8: // deal with special cases: // return 0 if the numbers are unordered (either or both are NaN) // this is post processed in interpreter so '1' will turn into '0' if (__isnand((double)left.NumericByRefConst().r8) || __isnand((double)right.NumericByRefConst().r8)) { return 1; } // The infinite values are equal to themselves. // this is post processed in interpreter so '0' will turn into '1' else if ( __isinfd((double)left.NumericByRefConst().r8) && __isinfd((double)right.NumericByRefConst().r8)) { return 0; } // all the rest now else { if (isgreater((double)left.NumericByRefConst().r8, (double)right.NumericByRefConst().r8)) { return 1; } else if (isless((double)left.NumericByRefConst().r8, (double)right.NumericByRefConst().r8)) { return -1; } else { return 0; } } #else case DATATYPE_R4: case DATATYPE_R8: fSigned = true; #endif case DATATYPE_BOOLEAN: case DATATYPE_I1: case DATATYPE_U1: case DATATYPE_CHAR: case DATATYPE_I2: case DATATYPE_U2: case DATATYPE_I4: case DATATYPE_U4: case DATATYPE_I8: case DATATYPE_U8: case DATATYPE_DATETIME: case DATATYPE_TIMESPAN: return CompareValues_Numeric(left, right, fSigned, c_CLR_RT_DataTypeLookup[leftDataType].m_sizeInBytes); default: // the remaining data types aren't to be handled break; } } else { if (leftDataType == DATATYPE_STRING && rightDataType == DATATYPE_OBJECT) { CLR_RT_HeapBlock *rightObj = right.Dereference(); if (!rightObj) { return 1; // NULL references always compare smaller than non-NULL ones. } return Compare_Values(left, *rightObj, fSigned); } if (leftDataType == DATATYPE_OBJECT && rightDataType == DATATYPE_STRING) { CLR_RT_HeapBlock *leftObj = left.Dereference(); if (!leftObj) { return -1; // NULL references always compare smaller than non-NULL ones. } return Compare_Values(*leftObj, right, fSigned); } //--// const CLR_RT_DataTypeLookup &leftDtl = c_CLR_RT_DataTypeLookup[leftDataType]; const CLR_RT_DataTypeLookup &rightDtl = c_CLR_RT_DataTypeLookup[rightDataType]; if ((leftDtl.m_flags & CLR_RT_DataTypeLookup::c_Numeric) && (rightDtl.m_flags & CLR_RT_DataTypeLookup::c_Numeric)) { if (leftDtl.m_sizeInBytes == rightDtl.m_sizeInBytes) { return CompareValues_Numeric(left, right, fSigned, leftDtl.m_sizeInBytes); } else { CLR_Debug::Printf( "\r\n\r\nRUNTIME ERROR: comparing two values of different size: %d vs. %d!!!\r\n\r\n\r\n", leftDataType, rightDataType); #if defined(NANOCLR_PROFILE_NEW) g_CLR_PRF_Profiler.DumpHeap(); #endif } } } return -1; // Not comparable... } //////////////////////////////////////////////////////////////////////////////////////////////////// HRESULT CLR_RT_HeapBlock::NumericAdd(const CLR_RT_HeapBlock &right) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); switch (DataType()) { case DATATYPE_I4: m_data.numeric.s4 += right.m_data.numeric.s4; break; case DATATYPE_U4: m_data.numeric.u4 += right.m_data.numeric.u4; break; case DATATYPE_I8: m_data.numeric.s8 += right.m_data.numeric.s8; break; case DATATYPE_U8: m_data.numeric.u8 += right.m_data.numeric.u8; break; case DATATYPE_R4: #if defined(NANOCLR_EMULATED_FLOATINGPOINT) { CLR_INT32 orig = (CLR_INT32)m_data.numeric.r4; CLR_INT32 rhs = (CLR_INT32)right.m_data.numeric.r4; #endif m_data.numeric.r4 += right.m_data.numeric.r4; #if defined(NANOCLR_EMULATED_FLOATINGPOINT) if (rhs > 0 && orig > 0 && orig > (CLR_INT32)m_data.numeric.r4) { m_data.numeric.r4 = 0x7FFFFFFF; /*return CLR_E_OUT_OF_RANGE*/ } else if (rhs < 0 && orig < 0 && orig < (CLR_INT32)m_data.numeric.r4) { m_data.numeric.r4 = (CLR_INT32)(CLR_UINT32)0x80000000; /*return CLR_E_OUT_OF_RANGE*/ } } #endif break; case DATATYPE_R8: #if defined(NANOCLR_EMULATED_FLOATINGPOINT) { CLR_INT64 orig = (CLR_INT64)m_data.numeric.r8; CLR_INT64 rhs = (CLR_INT64)right.m_data.numeric.r8; #endif m_data.numeric.r8 += right.m_data.numeric.r8; #if defined(NANOCLR_EMULATED_FLOATINGPOINT) if (rhs > 0 && orig > 0 && orig > (CLR_INT64)m_data.numeric.r8) { m_data.numeric.r8 = (CLR_INT64)ULONGLONGCONSTANT(0x7FFFFFFFFFFFFFFF); /*return CLR_E_OUT_OF_RANGE*/ } else if (rhs < 0 && orig < 0 && orig < (CLR_INT64)m_data.numeric.r8) { m_data.numeric.r8 = (CLR_INT64)ULONGLONGCONSTANT(0x8000000000000000); /*return CLR_E_OUT_OF_RANGE*/ } } #endif break; // Adding of value to array reference is like advancing the index in array. case DATATYPE_ARRAY_BYREF: { // Retrieve refernced array. Test if it is not NULL CLR_RT_HeapBlock_Array *array = m_data.arrayReference.array; FAULT_ON_NULL(array); // Advance current index. C# on pointer operations multiplies the offset by object size. We need to reverse // it. m_data.arrayReference.index += right.m_data.numeric.s4 / array->m_sizeOfElement; } break; default: NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } NANOCLR_NOCLEANUP(); } HRESULT CLR_RT_HeapBlock::NumericSub(const CLR_RT_HeapBlock &right) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); switch (DataType()) { case DATATYPE_U4: m_data.numeric.u4 -= right.m_data.numeric.u4; break; case DATATYPE_I4: m_data.numeric.s4 -= right.m_data.numeric.s4; break; case DATATYPE_U8: m_data.numeric.u8 -= right.m_data.numeric.u8; break; case DATATYPE_I8: m_data.numeric.s8 -= right.m_data.numeric.s8; break; case DATATYPE_R4: #if defined(NANOCLR_EMULATED_FLOATINGPOINT) { CLR_INT32 orig = (CLR_INT32)m_data.numeric.r8; CLR_INT32 rhs = (CLR_INT32)right.m_data.numeric.r4; #endif m_data.numeric.r4 -= right.m_data.numeric.r4; #if defined(NANOCLR_EMULATED_FLOATINGPOINT) if (rhs < 0 && orig > 0 && orig > (CLR_INT32)m_data.numeric.r4) { m_data.numeric.r4 = 0x7FFFFFFF; /*return CLR_E_OUT_OF_RANGE*/ } else if (rhs > 0 && orig < 0 && orig < (CLR_INT32)m_data.numeric.r4) { m_data.numeric.r4 = (CLR_INT32)(CLR_UINT32)0x80000000; /*return CLR_E_OUT_OF_RANGE*/ } } #endif break; case DATATYPE_R8: #if defined(NANOCLR_EMULATED_FLOATINGPOINT) { CLR_INT64 orig = (CLR_INT64)m_data.numeric.r8; CLR_INT64 rhs = (CLR_INT64)right.m_data.numeric.r8; #endif m_data.numeric.r8 -= right.m_data.numeric.r8; #if defined(NANOCLR_EMULATED_FLOATINGPOINT) if (rhs < 0 && orig > 0 && orig > (CLR_INT64)m_data.numeric.r8) { m_data.numeric.r8 = (CLR_INT64)ULONGLONGCONSTANT(0x7FFFFFFFFFFFFFFF); /*return CLR_E_OUT_OF_RANGE*/ } else if (rhs > 0 && orig < 0 && orig < (CLR_INT64)m_data.numeric.r8) { m_data.numeric.r8 = (CLR_INT64)ULONGLONGCONSTANT(0x8000000000000000); /*return CLR_E_OUT_OF_RANGE*/ } } #endif break; // Substructing of value to array reference is like decreasing the index in array. case DATATYPE_ARRAY_BYREF: { // Retrieve refernced array. Test if it is not NULL CLR_RT_HeapBlock_Array *array = m_data.arrayReference.array; FAULT_ON_NULL(array); // Advance current index. C# on pointer operations multiplies the offset by object size. We need to reverse // it. m_data.arrayReference.index -= right.m_data.numeric.s4 / array->m_sizeOfElement; } break; default: NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } NANOCLR_NOCLEANUP(); } HRESULT CLR_RT_HeapBlock::NumericMul(const CLR_RT_HeapBlock &right) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); switch (DataType()) { case DATATYPE_U4: m_data.numeric.u4 = m_data.numeric.u4 * right.m_data.numeric.u4; break; case DATATYPE_I4: m_data.numeric.s4 = m_data.numeric.s4 * right.m_data.numeric.s4; break; case DATATYPE_U8: m_data.numeric.u8 = m_data.numeric.u8 * right.m_data.numeric.u8; break; case DATATYPE_I8: m_data.numeric.s8 = m_data.numeric.s8 * right.m_data.numeric.s8; break; case DATATYPE_R4: #if defined(NANOCLR_EMULATED_FLOATINGPOINT) { CLR_INT32 orig = (CLR_INT32)m_data.numeric.r4; CLR_INT32 rhs; #endif m_data.numeric.r4 = m_data.numeric.r4 * right.m_data.numeric.r4; #if defined(NANOCLR_EMULATED_FLOATINGPOINT) rhs = (CLR_INT32)right.m_data.numeric.r4; if (orig != 0 && rhs != 0) { CLR_INT32 ret_value = (CLR_INT32)m_data.numeric.r4; bool isNeg = orig < 0; if (rhs < 0) isNeg = !isNeg; if (!isNeg && (ret_value < 0 || ret_value < orig || ret_value < rhs)) { m_data.numeric.r4 = 0x7FFFFFFF; /* return CLR_E_OUT_OF_RANGE; */ } else if (isNeg && (ret_value > 0 || ret_value > orig || ret_value > rhs)) { m_data.numeric.r4 = (CLR_INT32)(CLR_UINT32)0x80000000; /* return CLR_E_OUT_OF_RANGE; */ } } } #endif break; case DATATYPE_R8: #if defined(NANOCLR_EMULATED_FLOATINGPOINT) { CLR_INT64 orig = (CLR_INT64)m_data.numeric.r8; CLR_INT64 rhs; #endif m_data.numeric.r8 = m_data.numeric.r8 * right.m_data.numeric.r8; #if defined(NANOCLR_EMULATED_FLOATINGPOINT) rhs = (CLR_INT64)right.m_data.numeric.r8; if (orig != 0 && rhs != 0) { CLR_INT64 ret_value = (CLR_INT64)m_data.numeric.r8; bool isNeg = orig < 0; if (rhs < 0) isNeg = !isNeg; if (!isNeg && (ret_value < 0 || ret_value < orig || ret_value < rhs)) { m_data.numeric.r8 = (CLR_INT64)ULONGLONGCONSTANT(0x7FFFFFFFFFFFFFFF); /* return CLR_E_OUT_OF_RANGE; */ } else if (isNeg && (ret_value > 0 || ret_value > orig || ret_value > rhs)) { m_data.numeric.r8 = (CLR_INT64)ULONGLONGCONSTANT(0x8000000000000000); /* return CLR_E_OUT_OF_RANGE; */ } } } #endif break; default: NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } NANOCLR_NOCLEANUP(); } HRESULT CLR_RT_HeapBlock::NumericDiv(const CLR_RT_HeapBlock &right) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); if (right.IsZero()) NANOCLR_SET_AND_LEAVE(CLR_E_DIVIDE_BY_ZERO); switch (DataType()) { case DATATYPE_U4: case DATATYPE_I4: m_data.numeric.s4 = m_data.numeric.s4 / right.m_data.numeric.s4; break; case DATATYPE_U8: case DATATYPE_I8: m_data.numeric.s8 = m_data.numeric.s8 / right.m_data.numeric.s8; break; case DATATYPE_R4: m_data.numeric.r4 = m_data.numeric.r4 / right.m_data.numeric.r4; break; case DATATYPE_R8: m_data.numeric.r8 = m_data.numeric.r8 / right.m_data.numeric.r8; break; default: NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } NANOCLR_NOCLEANUP(); } HRESULT CLR_RT_HeapBlock::NumericDivUn(const CLR_RT_HeapBlock &right) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); if (right.IsZero()) NANOCLR_SET_AND_LEAVE(CLR_E_DIVIDE_BY_ZERO); switch (DataType()) { case DATATYPE_I4: case DATATYPE_U4: m_data.numeric.u4 = m_data.numeric.u4 / right.m_data.numeric.u4; break; case DATATYPE_I8: case DATATYPE_U8: m_data.numeric.u8 = m_data.numeric.u8 / right.m_data.numeric.u8; break; default: NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } NANOCLR_NOCLEANUP(); } HRESULT CLR_RT_HeapBlock::NumericRem(const CLR_RT_HeapBlock &right) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); if (right.IsZero()) NANOCLR_SET_AND_LEAVE(CLR_E_DIVIDE_BY_ZERO); switch (DataType()) { case DATATYPE_U4: m_data.numeric.u4 %= right.m_data.numeric.u4; break; case DATATYPE_I4: m_data.numeric.s4 %= right.m_data.numeric.s4; break; case DATATYPE_U8: m_data.numeric.u8 %= right.m_data.numeric.u8; break; case DATATYPE_I8: m_data.numeric.s8 %= right.m_data.numeric.s8; break; #if !defined(NANOCLR_EMULATED_FLOATINGPOINT) case DATATYPE_R4: m_data.numeric.r4 = fmod(m_data.numeric.r4, right.m_data.numeric.r4); break; case DATATYPE_R8: m_data.numeric.r8 = fmod((CLR_DOUBLE_TEMP_CAST)m_data.numeric.r8, (CLR_DOUBLE_TEMP_CAST)right.m_data.numeric.r8); break; #else case DATATYPE_R4: m_data.numeric.r4 %= right.m_data.numeric.r4; break; case DATATYPE_R8: m_data.numeric.r8 %= right.m_data.numeric.r8; break; #endif default: NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } NANOCLR_NOCLEANUP(); } HRESULT CLR_RT_HeapBlock::NumericRemUn(const CLR_RT_HeapBlock &right) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); if (right.IsZero()) NANOCLR_SET_AND_LEAVE(CLR_E_DIVIDE_BY_ZERO); switch (DataType()) { case DATATYPE_I4: case DATATYPE_U4: m_data.numeric.u4 %= right.m_data.numeric.u4; break; case DATATYPE_I8: case DATATYPE_U8: m_data.numeric.u8 %= right.m_data.numeric.u8; break; default: NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } NANOCLR_NOCLEANUP(); } HRESULT CLR_RT_HeapBlock::NumericShl(const CLR_RT_HeapBlock &right) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); switch (DataType()) { case DATATYPE_I4: case DATATYPE_U4: m_data.numeric.u4 <<= right.m_data.numeric.u4; break; case DATATYPE_I8: case DATATYPE_U8: m_data.numeric.u8 <<= right.m_data.numeric.u4; break; default: NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } NANOCLR_NOCLEANUP(); } HRESULT CLR_RT_HeapBlock::NumericShr(const CLR_RT_HeapBlock &right) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); switch (DataType()) { case DATATYPE_U4: m_data.numeric.u4 >>= right.m_data.numeric.u4; break; case DATATYPE_I4: m_data.numeric.s4 >>= right.m_data.numeric.u4; break; case DATATYPE_U8: m_data.numeric.u8 >>= right.m_data.numeric.u4; break; case DATATYPE_I8: m_data.numeric.s8 >>= right.m_data.numeric.u4; break; default: NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } NANOCLR_NOCLEANUP(); } HRESULT CLR_RT_HeapBlock::NumericShrUn(const CLR_RT_HeapBlock &right) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); switch (DataType()) { case DATATYPE_I4: case DATATYPE_U4: m_data.numeric.u4 >>= right.m_data.numeric.u4; break; case DATATYPE_I8: case DATATYPE_U8: m_data.numeric.u8 >>= right.m_data.numeric.u4; break; default: NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } NANOCLR_NOCLEANUP(); } HRESULT CLR_RT_HeapBlock::NumericNeg() { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); switch (DataType()) { case DATATYPE_U4: case DATATYPE_I4: m_data.numeric.s4 = -m_data.numeric.s4; break; case DATATYPE_U8: case DATATYPE_I8: m_data.numeric.s8 = -m_data.numeric.s8; break; case DATATYPE_R4: m_data.numeric.r4 = -m_data.numeric.r4; break; case DATATYPE_R8: m_data.numeric.r8 = -m_data.numeric.r8; break; default: NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } NANOCLR_NOCLEANUP(); } //////////////////////////////////////////////////////////////////////////////////////////////////// CLR_RT_HeapBlock *CLR_RT_HeapBlock::ExtractValueBlock(int offset) { NATIVE_PROFILE_CLR_CORE(); CLR_RT_HeapBlock *ptr = Dereference(); if (ptr) { ptr = &ptr[offset]; } return ptr; } void CLR_RT_HeapBlock::ReadValue(CLR_INT64 &val, int offset) { NATIVE_PROFILE_CLR_CORE(); CLR_RT_HeapBlock *ptr = ExtractValueBlock(offset); if (ptr) { val = ptr->NumericByRefConst().s8; } else { CLR_INT32 val2 = 0; val = val2; } } void CLR_RT_HeapBlock::WriteValue(const CLR_INT64 &val, int offset) { NATIVE_PROFILE_CLR_CORE(); CLR_RT_HeapBlock *ptr = ExtractValueBlock(offset); if (ptr) ptr->NumericByRef().s8 = val; } //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(NANOCLR_APPDOMAINS) void CLR_RT_HeapBlock::SetTransparentProxyReference(CLR_RT_AppDomain *appDomain, CLR_RT_HeapBlock *ptr) { #if defined(_DEBUG) if (ptr) { // Make sure the data points to a MBRO. CLR_RT_TypeDef_Instance inst; _ASSERTE(ptr->DataType() == DATATYPE_CLASS); inst.InitializeFromIndex(ptr->ObjectCls()); _ASSERTE((inst.CrossReference().m_flags & CLR_RT_TypeDef_CrossReference::TD_CR_IsMarshalByRefObject) != 0); } #endif m_data.transparentProxy.appDomain = appDomain; m_data.transparentProxy.ptr = ptr; } HRESULT CLR_RT_HeapBlock::TransparentProxyValidate() const { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); CLR_RT_AppDomain *appDomain = TransparentProxyAppDomain(); CLR_RT_HeapBlock *obj = TransparentProxyDereference(); if (appDomain == NULL || !appDomain->IsLoaded()) NANOCLR_SET_AND_LEAVE(CLR_E_APPDOMAIN_EXITED); FAULT_ON_NULL(obj); NANOCLR_NOCLEANUP(); } #endif // NANOCLR_APPDOMAINS //////////////////////////////////////////////////////////////////////////////////////////////////// void CLR_RT_HeapBlock::Relocate__HeapBlock() { NATIVE_PROFILE_CLR_CORE(); CLR_RT_HEAPBLOCK_RELOCATE(this); } void CLR_RT_HeapBlock::Relocate_String() { NATIVE_PROFILE_CLR_CORE(); CLR_RT_GarbageCollector::Heap_Relocate((void **)&m_data.string.m_text); #if !defined(NANOCLR_NO_ASSEMBLY_STRINGS) CLR_RT_GarbageCollector::Heap_Relocate((void **)&m_data.string.m_assm); #endif } void CLR_RT_HeapBlock::Relocate_Obj() { NATIVE_PROFILE_CLR_CORE(); CLR_RT_GarbageCollector::Heap_Relocate((void **)&m_data.objectReference.ptr); } void CLR_RT_HeapBlock::Relocate_Cls() { NATIVE_PROFILE_CLR_CORE(); CLR_RT_GarbageCollector::Heap_Relocate((void **)&m_data.objectHeader.lock); CLR_RT_GarbageCollector::Heap_Relocate(this + 1, DataSize() - 1); } void CLR_RT_HeapBlock::Relocate_Ref() { NATIVE_PROFILE_CLR_CORE(); CLR_RT_GarbageCollector::Heap_Relocate((void **)&m_data.objectReference.ptr); } void CLR_RT_HeapBlock::Relocate_ArrayRef() { NATIVE_PROFILE_CLR_CORE(); CLR_RT_GarbageCollector::Heap_Relocate((void **)&m_data.arrayReference.array); } #if defined(NANOCLR_APPDOMAINS) void CLR_RT_HeapBlock::Relocate_TransparentProxy() { NATIVE_PROFILE_CLR_CORE(); CLR_RT_GarbageCollector::Heap_Relocate((void **)&m_data.transparentProxy.ptr); } #endif //--// #if defined(NANOCLR_FILL_MEMORY_WITH_DIRTY_PATTERN) void CLR_RT_HeapBlock::Debug_CheckPointer() const { NATIVE_PROFILE_CLR_CORE(); if (m_id.type.dataType == DATATYPE_OBJECT) { Debug_CheckPointer(Dereference()); } } void CLR_RT_HeapBlock::Debug_CheckPointer(void *ptr) { NATIVE_PROFILE_CLR_CORE(); switch ((size_t)ptr) { case 0xCFCFCFCF: case 0xCBCBCBCB: case 0xABABABAB: case 0xADADADAD: case 0xDFDFDFDF: NANOCLR_STOP(); break; } } void CLR_RT_HeapBlock::Debug_ClearBlock(int data) { NATIVE_PROFILE_CLR_CORE(); CLR_UINT32 size = DataSize(); if (size > 1) { CLR_RT_HeapBlock_Raw *ptr = (CLR_RT_HeapBlock_Raw *)this; CLR_UINT32 raw1 = CLR_RT_HEAPBLOCK_RAW_ID(DATATYPE_OBJECT, 0, 1); CLR_UINT32 raw2; raw2 = data & 0xFF; raw2 = raw2 | (raw2 << 8); raw2 = raw2 | (raw2 << 16); while (--size) { ptr++; ptr->data[0] = raw1; ptr->data[1] = raw2; ptr->data[2] = raw2; } } } #endif
28.861848
120
0.560382
[ "object" ]
bad1f106bbce3a33dcb9a71a73238cd76be144f3
4,762
hpp
C++
Saturn/include/Managers/MemoryManager.hpp
Tackwin/BossRoom
ecad5853e591b9edc54e75448547e20e14964f72
[ "MIT" ]
null
null
null
Saturn/include/Managers/MemoryManager.hpp
Tackwin/BossRoom
ecad5853e591b9edc54e75448547e20e14964f72
[ "MIT" ]
null
null
null
Saturn/include/Managers/MemoryManager.hpp
Tackwin/BossRoom
ecad5853e591b9edc54e75448547e20e14964f72
[ "MIT" ]
null
null
null
#pragma once #include <algorithm> #include <cstdint> #include <utility> #include <type_traits> #include <vector> #include <memory> #include <cmath> // trying the singleton thing // and snake_case sssssssss class MemoryManager { using u08 = std::uint8_t; struct mem_place { u08* location; std::size_t size; }; public: template<typename T> using unique_ptr = std::unique_ptr<T, void(*)(T*)>; template<typename T> struct Allocator { using value_type = std::remove_cv_t<T>; Allocator() = default; template<class U> constexpr Allocator(const Allocator<U>&) noexcept {}; T* allocate(size_t n) { return MemoryManager::I().allocate<T>(n); } void deallocate(T* ptr, size_t n) { MemoryManager::I().deallocate(ptr, n); } template<class U> bool operator==(const Allocator<U>&) const noexcept { return true; } template<class U> bool operator!=(const Allocator<U>&) const noexcept { return false; } }; static MemoryManager& I(); template<typename T, class... Args> static std::shared_ptr<T> make_shared(Args&&... args) { const auto& ptr = MemoryManager::I().allocate<T>(std::forward<Args>(args)...); return std::shared_ptr<T>( ptr, [](T* ptr) { MemoryManager::I().deallocate(ptr); } ); } template<typename T, class... Args> static unique_ptr<T> make_unique(Args&&... args) { T* ptr = MemoryManager::I().allocate<T>(std::forward<Args>(args)...); return unique_ptr<T>( ptr, [](T* ptr) { MemoryManager::I().deallocate(ptr); } ); } size_t get_buffer_size() const; size_t get_free_size() const; void initialize_buffer(size_t size); template<typename T> T* allocate(size_t n) { u08* emplacement = nullptr; //testing if the memory place is big enough to hold T for (size_t i = 0u; i < _free_memory.size(); ++i) { mem_place free_place = _free_memory[i]; mem_place free_place_aligned = _free_memory[i]; void* free_place_aligned_location = (void*)free_place_aligned.location; if (!std::align( alignof(T), n * sizeof(T), free_place_aligned_location, free_place_aligned.size )) // if we can't properly align the memory place it's no use continue; free_place_aligned.location = reinterpret_cast<u08*>(free_place_aligned_location); // if it's big enough to hold T then let's use it ! if (sizeof(T) <= free_place_aligned.size) { // we remove the old one _free_memory.erase(std::begin(_free_memory) + i); // so if it isn't _exactly_ the size there's a new place // at the end if (n * sizeof(T) < free_place_aligned.size) { _free_memory.insert( std::begin(_free_memory) + i, { free_place_aligned.location + n * sizeof(T), free_place_aligned.size - n * sizeof(T) } ); } // if the aligned ptr is not the normal one, then there's // a new memory place at the begining if (free_place.location != free_place_aligned.location) { _free_memory.insert( std::begin(_free_memory) + i, { free_place.location, (u32)free_place_aligned.location - (u32)free_place.location } ); } // all the insertion hsould keep the array sorted emplacement = free_place_aligned.location; break; } } if (!emplacement) { throw(std::bad_alloc()); } return reinterpret_cast<T*>(emplacement); } template<typename T, class... Args> T* create(Args&&... args) { auto emplacement = MemoryManager::I().allocate<T>(1); return new(emplacement) T(std::forward<Args>(args)...); } template<typename T> void deallocate(T* ptr, size_t n = 1) { bool flag = true; // if the memory place is just following another, we merge them for (size_t i = 0u; i < _free_memory.size(); ++i) { mem_place free_place = _free_memory[i]; if ( (std::size_t)free_place.location > n * sizeof(T) && (free_place.location - n * sizeof(T)) == (u08*)ptr ) { _free_memory.erase(_free_memory.begin() + i); _free_memory.insert( _free_memory.begin() + i, { free_place.location - n * sizeof(T), free_place.size + n * sizeof(T) } ); flag = false; break; } } if (flag) { // else we just push a new memory place _free_memory.push_back({ (u08*)ptr, n * sizeof(T) }); } std::sort( _free_memory.begin(), _free_memory.end(), [](const mem_place& A, const mem_place& B) -> bool { return A.location < B.location; } ); } private: MemoryManager(); ~MemoryManager(); MemoryManager(const MemoryManager&) = delete; MemoryManager& operator=(const MemoryManager&) = delete; size_t _main_size = 0u; u08* _main_buffer = nullptr; std::vector<mem_place> _free_memory; }; // Optional using MM = MemoryManager;
22.046296
71
0.645527
[ "vector" ]
bad85e834604f7eb75b87381b0cf9dd0cdada4cc
341
cpp
C++
src/rasperi_lib/rasperi_model.cpp
kuumies/rasperi
5e854a3c15e788a714b61395fd2064a037154610
[ "MIT" ]
null
null
null
src/rasperi_lib/rasperi_model.cpp
kuumies/rasperi
5e854a3c15e788a714b61395fd2064a037154610
[ "MIT" ]
null
null
null
src/rasperi_lib/rasperi_model.cpp
kuumies/rasperi
5e854a3c15e788a714b61395fd2064a037154610
[ "MIT" ]
null
null
null
/* ---------------------------------------------------------------- * Antti Jumpponen <kuumies@gmail.com> The implementation of kuu::rasperi::Model class. * ---------------------------------------------------------------- */ #include "rasperi_model.h" namespace kuu { namespace rasperi { } // namespace rasperi } // namespace kuu
22.733333
70
0.42522
[ "model" ]
badc8d956e6a5ab6ac1f34855fae102ffc823aa1
5,776
cc
C++
generator/cc/generate_enums.cc
NTNAEEM/hotentot
e578a2185c473301076ece5634113ab663996a3e
[ "MIT" ]
null
null
null
generator/cc/generate_enums.cc
NTNAEEM/hotentot
e578a2185c473301076ece5634113ab663996a3e
[ "MIT" ]
null
null
null
generator/cc/generate_enums.cc
NTNAEEM/hotentot
e578a2185c473301076ece5634113ab663996a3e
[ "MIT" ]
null
null
null
/* The MIT License (MIT) * * Copyright (c) 2015 LabCrypto Org. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <iostream> #include <fstream> #include "cc_generator.h" #include "type_helper.h" #include "templates/templates.h" #include "../hot.h" #include "../service.h" #include "../method.h" #include "../module.h" #include "../argument.h" #include "../struct.h" #include "../declaration.h" #include "../os.h" #include "../string_helper.h" #include "../datetime_helper.h" namespace org { namespace labcrypto { namespace hottentot { namespace generator { namespace cc { void CCGenerator::GenerateEnums ( std::vector< ::org::labcrypto::hottentot::generator::Module*> modules, ::org::labcrypto::hottentot::generator::GenerationConfig &generationConfig, std::map<std::string, std::string> &templates ) { std::string indent = generationConfig.GetIndentString(); /* * Making needed variables and assigning values to them */ std::string enumsFilePath = generationConfig.GetOutDir() + "/enums.h"; /* * Making real values */ std::vector<std::string> totalPackageTokens; std::string namespacesAndEnums = ""; for (uint32_t i = 0; i < modules.size(); i++) { ::org::labcrypto::hottentot::generator::Module *module = modules[i]; std::string ns = "::" + ::org::labcrypto::hottentot::generator::StringHelper::Concat ( ::org::labcrypto::hottentot::generator::StringHelper::Split ( module->GetPackage(), '.' ), "::" ); std::vector<std::string> packageTokens = ::org::labcrypto::hottentot::generator::StringHelper::Split ( module->GetPackage(), '.' ); totalPackageTokens.insert(totalPackageTokens.end(), packageTokens.begin(), packageTokens.end()); std::string namespacesStart = ""; for (uint32_t i = 0; i < packageTokens.size(); i++) { namespacesStart += "namespace " + ::org::labcrypto::hottentot::generator::StringHelper::MakeLowerCase(packageTokens[i]) + " {\r\n"; } std::string namespacesEnd = ""; for (int32_t i = packageTokens.size() - 1; i >= 0; i--) { namespacesEnd += "} // END OF NAMESPACE " + packageTokens[i] + "\r\n"; } namespacesStart = ::org::labcrypto::hottentot::generator::StringHelper::Trim(namespacesStart); namespacesEnd = ::org::labcrypto::hottentot::generator::StringHelper::Trim(namespacesEnd); std::string enums = ""; for (uint32_t i = 0; i < module->enums_.size(); i++) { enums += indent + "enum " + module->enums_[i]->GetName() + " {\r\n"; std::map<uint16_t, std::string>::iterator finalItemIter = module->enums_[i]->revItems_.end(); --finalItemIter; for (std::map<uint16_t, std::string>::iterator it = module->enums_[i]->revItems_.begin(); it != module->enums_[i]->revItems_.end(); it++) { std::stringstream ss; ss << indent << indent << "k" << module->enums_[i]->GetName() << "___" << it->second << " = " << it->first; if (it != finalItemIter) { ss << ","; } ss << std::endl; enums += ss.str(); } enums += indent + "};\r\n"; } namespacesAndEnums += namespacesStart + "\r\n" + enums + namespacesEnd; } /* * Filling templates with real values */ std::map<std::string, std::string> params; params.insert(std::pair<std::string, std::string>("GENERATION_DATE", ::org::labcrypto::hottentot::generator::DateTimeHelper::GetCurrentDateTime())); params.insert(std::pair<std::string, std::string>("FILENAME", "enums.h")); params.insert(std::pair<std::string, std::string>("HEADER_GUARD", "_" + ::org::labcrypto::hottentot::generator::StringHelper::MakeScreamingSnakeCase ( totalPackageTokens ) + "__ENUMS_H_")); params.insert(std::pair<std::string, std::string>("NAMESPACES_AND_ENUMS", namespacesAndEnums)); params.insert(std::pair<std::string, std::string>("INDENT", indent)); std::string enumsTemplate = templates["enums"]; for (std::map<std::string, std::string>::iterator it = params.begin(); it != params.end(); ++it) { enumsTemplate = ::org::labcrypto::hottentot::generator::StringHelper::Replace ( enumsTemplate, "[[[" + it->first + "]]]", it->second ); } /* * Writing final results to files */ std::fstream f; f.open(enumsFilePath.c_str(), std::fstream::out | std::fstream::binary); f << enumsTemplate; f.close(); } } } } } }
38
107
0.61946
[ "vector" ]
badcec92bce128c38525a9289386b1b85fd738a7
3,356
cpp
C++
problems/codeforces/1530/d-secret-santa/code.cpp
brunodccarvalho/competitive
4177c439174fbe749293b9da3445ce7303bd23c2
[ "MIT" ]
7
2020-10-15T22:37:10.000Z
2022-02-26T17:23:49.000Z
problems/codeforces/1530/d-secret-santa/code.cpp
brunodccarvalho/competitive
4177c439174fbe749293b9da3445ce7303bd23c2
[ "MIT" ]
null
null
null
problems/codeforces/1530/d-secret-santa/code.cpp
brunodccarvalho/competitive
4177c439174fbe749293b9da3445ce7303bd23c2
[ "MIT" ]
null
null
null
#include <bits/stdc++.h> #ifdef LOCAL #include "code/formatting.hpp" #else #define debug(...) (void)0 #endif using namespace std; #define long int64_t static_assert(sizeof(int) == 4 && sizeof(long) == 8); mt19937 mt(73); auto stress() { uniform_int_distribution<int> distN(4, 10); int N = distN(mt); uniform_int_distribution<int> distV(0, N - 1); vector<int> a(N); for (int i = 0; i < N; i++) { int j = distV(mt); while (i == j) { j = distV(mt); } a[i] = j; } return a; } int main() { ios::sync_with_stdio(false), cin.tie(nullptr); int T; cin >> T; while (T--) { #ifdef OMEGALUL auto a = stress(); int N = a.size(); debug(N, a); #else int N; cin >> N; vector<int> a(N); for (int i = 0; i < N; i++) { cin >> a[i], a[i]--; } #endif vector<bool> receives_gift(N); vector<int> keep, givers; for (int i = 0; i < N; i++) { if (receives_gift[a[i]]) { givers.push_back(i); } else { keep.push_back(i); receives_gift[a[i]] = true; } } int k = keep.size(); if (k == N - 1) { int i = givers[0]; if (!receives_gift[i]) { for (int j = 0; j < k; j++) { int l = keep[j]; assert(i != l); if (a[l] == a[i]) { keep[j] = i; givers[0] = l; break; } } assert(givers[0] != i); } } vector<bool> is_giver(N); for (int i : givers) { is_giver[i] = true; } vector<int> takers, both; for (int i = 0; i < N; i++) { if (!receives_gift[i]) { if (is_giver[i]) { both.push_back(i); } else { takers.push_back(i); } } } assert(takers.size() + both.size() == givers.size()); debug(keep, givers, takers, both); vector<int> b(N, -1); for (int i : keep) { b[i] = a[i]; } if (takers.empty() && k < N) { assert(k < N - 1); for (int i = 0; i < N - k; i++) { b[givers[i]] = givers[(i + 1) % (N - k)]; } } else { for (int g : givers) { for (int i = 0, B = both.size(); i < B; i++) { int c = both[i]; if (c != g) { if (i + 1 != B) { swap(both[i], both.back()); } both.pop_back(); b[g] = c; break; } } if (b[g] == -1) { assert(g != takers.back()); b[g] = takers.back(); takers.pop_back(); } } assert(both.empty() && takers.empty()); } cout << k << '\n'; for (int i = 0; i < N; i++) { cout << 1 + b[i] << " \n"[i + 1 == N]; } } return 0; }
25.233083
62
0.345054
[ "vector" ]
badd46f7604977dddf7dc9948a0c4bcd9a120d11
5,283
cc
C++
examples/pingpong/client.cc
chengchenwish/evpp
750f87eb7c0f0313c85e280f41ba0f424ad455ce
[ "BSD-3-Clause" ]
3,189
2017-03-04T02:56:39.000Z
2022-03-31T16:06:08.000Z
examples/pingpong/client.cc
chengchenwish/evpp
750f87eb7c0f0313c85e280f41ba0f424ad455ce
[ "BSD-3-Clause" ]
259
2017-03-07T02:01:25.000Z
2022-03-27T09:16:19.000Z
examples/pingpong/client.cc
chengchenwish/evpp
750f87eb7c0f0313c85e280f41ba0f424ad455ce
[ "BSD-3-Clause" ]
990
2017-03-06T03:55:43.000Z
2022-03-24T10:50:18.000Z
// Modified from https://github.com/chenshuo/muduo/blob/master/examples/pingpong/client.cc #include <evpp/tcp_client.h> #include <evpp/event_loop_thread_pool.h> #include <evpp/buffer.h> #include <evpp/tcp_conn.h> class Client; class Session { public: Session(evpp::EventLoop* loop, const std::string& serverAddr/*ip:port*/, const std::string& name, Client* owner) : client_(loop, serverAddr, name), owner_(owner), bytes_read_(0), bytes_written_(0), messages_read_(0) { client_.SetConnectionCallback( std::bind(&Session::OnConnection, this, std::placeholders::_1)); client_.SetMessageCallback( std::bind(&Session::OnMessage, this, std::placeholders::_1, std::placeholders::_2)); } void Start() { client_.Connect(); } void Stop() { client_.Disconnect(); } int64_t bytes_read() const { return bytes_read_; } int64_t messages_read() const { return messages_read_; } private: void OnConnection(const evpp::TCPConnPtr& conn); void OnMessage(const evpp::TCPConnPtr& conn, evpp::Buffer* buf) { LOG_TRACE << "bytes_read=" << bytes_read_ << " bytes_writen=" << bytes_written_; ++messages_read_; bytes_read_ += buf->size(); bytes_written_ += buf->size(); conn->Send(buf); } private: evpp::TCPClient client_; Client* owner_; int64_t bytes_read_; int64_t bytes_written_; int64_t messages_read_; }; class Client { public: Client(evpp::EventLoop* loop, const std::string& serverAddr, // ip:port int blockSize, int sessionCount, int timeout_sec, int threadCount) : loop_(loop), session_count_(sessionCount), timeout_(timeout_sec), connected_count_(0) { loop->RunAfter(evpp::Duration(double(timeout_sec)), std::bind(&Client::HandleTimeout, this)); tpool_.reset(new evpp::EventLoopThreadPool(loop, threadCount)); tpool_->Start(true); for (int i = 0; i < blockSize; ++i) { message_.push_back(static_cast<char>(i % 128)); } for (int i = 0; i < sessionCount; ++i) { char buf[32]; snprintf(buf, sizeof buf, "C%05d", i); Session* session = new Session(tpool_->GetNextLoop(), serverAddr, buf, this); session->Start(); sessions_.push_back(session); } } ~Client() { } const std::string& message() const { return message_; } void OnConnect() { if (++connected_count_ == session_count_) { LOG_WARN << "all connected"; } } void OnDisconnect(const evpp::TCPConnPtr& conn) { if (--connected_count_ == 0) { LOG_WARN << "all disconnected"; int64_t totalBytesRead = 0; int64_t totalMessagesRead = 0; for (auto &it : sessions_) { totalBytesRead += it->bytes_read(); totalMessagesRead += it->messages_read(); } LOG_WARN << totalBytesRead << " total bytes read"; LOG_WARN << totalMessagesRead << " total messages read"; LOG_WARN << static_cast<double>(totalBytesRead) / static_cast<double>(totalMessagesRead) << " average message size"; LOG_WARN << static_cast<double>(totalBytesRead) / (timeout_ * 1024 * 1024) << " MiB/s throughput"; loop_->QueueInLoop(std::bind(&Client::Quit, this)); } } private: void Quit() { tpool_->Stop(); loop_->Stop(); for (auto &it : sessions_) { delete it; } sessions_.clear(); while (!tpool_->IsStopped() || !loop_->IsStopped()) { std::this_thread::sleep_for(std::chrono::seconds(1)); } tpool_.reset(); } void HandleTimeout() { LOG_WARN << "stop"; for (auto &it : sessions_) { it->Stop(); } } private: evpp::EventLoop* loop_; std::shared_ptr<evpp::EventLoopThreadPool> tpool_; int session_count_; int timeout_; std::vector<Session*> sessions_; std::string message_; std::atomic<int> connected_count_; }; void Session::OnConnection(const evpp::TCPConnPtr& conn) { if (conn->IsConnected()) { conn->SetTCPNoDelay(true); conn->Send(owner_->message()); owner_->OnConnect(); } else { owner_->OnDisconnect(conn); } } int main(int argc, char* argv[]) { if (argc != 7) { fprintf(stderr, "Usage: client <host_ip> <port> <threads> <blocksize> <sessions> <time_seconds>\n"); return -1; } const char* ip = argv[1]; uint16_t port = static_cast<uint16_t>(atoi(argv[2])); int threadCount = atoi(argv[3]); int blockSize = atoi(argv[4]); int sessionCount = atoi(argv[5]); int timeout = atoi(argv[6]); evpp::EventLoop loop; std::string serverAddr = std::string(ip) + ":" + std::to_string(port); Client client(&loop, serverAddr, blockSize, sessionCount, timeout, threadCount); loop.Run(); return 0; } #ifdef WIN32 #include "../winmain-inl.h" #endif
27.515625
108
0.575998
[ "vector" ]
bae0d2decf8f6b393b274a77ce17e724c7d5bc49
2,004
hpp
C++
src/library/include/aseq/io/variant.hpp
mlinderm/aseq
497dfd9b42bc0fa09a09c2db79c063d28c9a5fa0
[ "Apache-2.0" ]
null
null
null
src/library/include/aseq/io/variant.hpp
mlinderm/aseq
497dfd9b42bc0fa09a09c2db79c063d28c9a5fa0
[ "Apache-2.0" ]
null
null
null
src/library/include/aseq/io/variant.hpp
mlinderm/aseq
497dfd9b42bc0fa09a09c2db79c063d28c9a5fa0
[ "Apache-2.0" ]
null
null
null
// // Created by Michael Linderman on 12/13/15. // #pragma once #include <memory> #include <boost/optional.hpp> #include "aseq/io/file_format.hpp" #include "aseq/io/line.hpp" #include "aseq/model/region.hpp" #include "aseq/model/variant_context.hpp" namespace boost { namespace filesystem { class path; } } namespace aseq { namespace io { class VariantHeaderInterface { public: virtual size_t NumSamples() const = 0; virtual const model::Sample& sample(size_t idx) const = 0; virtual void SetSitesOnly() = 0; }; class VariantSourceInterface { public: typedef std::unique_ptr<VariantSourceInterface> FactoryResult; typedef boost::optional<model::VariantContext> NextResult; public: virtual ~VariantSourceInterface() {} virtual FileFormat file_format() const = 0; virtual const VariantHeaderInterface& header() const = 0; virtual bool IsIndexed() const { return false; } virtual void SetRegion(model::Contig contig, model::Pos pos, model::Pos end) { throw util::indexed_access_not_supported(); } virtual NextResult NextVariant() = 0; static FactoryResult MakeVariantSource(std::istream& istream); static FactoryResult MakeVariantSource(const boost::filesystem::path& path); }; class VariantSinkInterface { public: typedef std::unique_ptr<VariantSinkInterface> FactoryResult; public: virtual ~VariantSinkInterface() {} virtual void PushVariant(const model::VariantContext&) = 0; static FactoryResult MakeVariantSink(FileFormat format, std::ostream& ostream); static FactoryResult MakeVariantSink(FileFormat format, const boost::filesystem::path& path); static FactoryResult MakeVariantSink(const VariantSourceInterface& source, std::ostream& ostream, bool sites_only = false); static FactoryResult MakeVariantSink(const VariantSourceInterface& source, const boost::filesystem::path& path, bool sites_only = false); }; } }
27.452055
99
0.717066
[ "model" ]
bae136a1d4f779ca086bbf9f91d034e62035f531
6,298
cpp
C++
deploy/cpp/model_deploy/engine/src/ppinference_engine.cpp
cheneyveron/PaddleX
86f73fc6a66b12c638f642524bfd1cf730e26c4b
[ "Apache-2.0" ]
3,655
2020-03-28T09:19:50.000Z
2022-03-31T13:28:39.000Z
deploy/cpp/model_deploy/engine/src/ppinference_engine.cpp
cheneyveron/PaddleX
86f73fc6a66b12c638f642524bfd1cf730e26c4b
[ "Apache-2.0" ]
829
2020-03-28T04:03:18.000Z
2022-03-31T14:34:30.000Z
deploy/cpp/model_deploy/engine/src/ppinference_engine.cpp
cheneyveron/PaddleX
86f73fc6a66b12c638f642524bfd1cf730e26c4b
[ "Apache-2.0" ]
738
2020-03-28T03:56:46.000Z
2022-03-31T13:11:03.000Z
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "model_deploy/engine/include/ppinference_engine.h" namespace PaddleDeploy { bool Model::PaddleEngineInit(const PaddleEngineConfig& engine_config) { infer_engine_ = std::make_shared<PaddleInferenceEngine>(); InferenceConfig config("paddle"); *(config.paddle_config) = engine_config; return infer_engine_->Init(config); } bool PaddleInferenceEngine::Init(const InferenceConfig& infer_config) { const PaddleEngineConfig& engine_config = *(infer_config.paddle_config); paddle_infer::Config config; if ("" == engine_config.key) { config.SetModel(engine_config.model_filename, engine_config.params_filename); } else { #ifdef PADDLEX_DEPLOY_ENCRYPTION std::string model = decrypt_file(engine_config.model_filename.c_str(), engine_config.key.c_str()); std::string params = decrypt_file(engine_config.params_filename.c_str(), engine_config.key.c_str()); config.SetModelBuffer(model.c_str(), model.size(), params.c_str(), params.size()); #else std::cerr << "Don't open with_encryption on compile" << std::endl; return false; #endif // PADDLEX_DEPLOY_ENCRYPTION } if (engine_config.use_mkl && !engine_config.use_gpu) { config.EnableMKLDNN(); config.SetCpuMathLibraryNumThreads(engine_config.mkl_thread_num); config.SetMkldnnCacheCapacity(10); } if (engine_config.use_gpu) { config.EnableUseGpu(100, engine_config.gpu_id); } else { config.DisableGpu(); } config.SwitchUseFeedFetchOps(false); config.SwitchSpecifyInputNames(true); config.SwitchIrOptim(engine_config.use_ir_optim); config.EnableMemoryOptim(); if (engine_config.use_trt && engine_config.use_gpu) { paddle_infer::PrecisionType precision; if (engine_config.precision == 0) { precision = paddle_infer::PrecisionType::kFloat32; } else if (engine_config.precision == 1) { precision = paddle_infer::PrecisionType::kHalf; } else if (engine_config.precision == 2) { precision = paddle_infer::PrecisionType::kInt8; } else { std::cerr << "Can not support the set precision" << std::endl; return false; } config.EnableTensorRtEngine( engine_config.max_workspace_size /* workspace_size*/, engine_config.max_batch_size /* max_batch_size*/, engine_config.min_subgraph_size /* min_subgraph_size*/, precision /* precision*/, engine_config.use_static /* use_static*/, engine_config.use_calib_mode /* use_calib_mode*/); if (engine_config.min_input_shape.size() != 0) { config.SetTRTDynamicShapeInfo(engine_config.min_input_shape, engine_config.max_input_shape, engine_config.optim_input_shape); } } predictor_ = std::move(paddle_infer::CreatePredictor(config)); return true; } bool PaddleInferenceEngine::Infer(const std::vector<DataBlob>& inputs, std::vector<DataBlob>* outputs) { if (inputs.size() == 0) { std::cerr << "empty input image on PaddleInferenceEngine" << std::endl; return true; } auto input_names = predictor_->GetInputNames(); for (int i = 0; i < inputs.size(); i++) { auto in_tensor = predictor_->GetInputHandle(input_names[i]); in_tensor->Reshape(inputs[i].shape); if (inputs[i].dtype == FLOAT32) { float* im_tensor_data; im_tensor_data = (float*)(inputs[i].data.data()); // NOLINT in_tensor->CopyFromCpu(im_tensor_data); } else if (inputs[i].dtype == INT64) { int64_t* im_tensor_data; im_tensor_data = (int64_t*)(inputs[i].data.data()); // NOLINT in_tensor->CopyFromCpu(im_tensor_data); } else if (inputs[i].dtype == INT32) { int* im_tensor_data; im_tensor_data = (int*)(inputs[i].data.data()); // NOLINT in_tensor->CopyFromCpu(im_tensor_data); } else if (inputs[i].dtype == INT8) { uint8_t* im_tensor_data; im_tensor_data = (uint8_t*)(inputs[i].data.data()); // NOLINT in_tensor->CopyFromCpu(im_tensor_data); } else { std::cerr << "There's unexpected input dtype: " << inputs[i].dtype << std::endl; return false; } } // predict predictor_->Run(); // get output auto output_names = predictor_->GetOutputNames(); for (const auto output_name : output_names) { auto output_tensor = predictor_->GetOutputHandle(output_name); auto output_tensor_shape = output_tensor->shape(); DataBlob output; output.name = output_name; output.shape.assign(output_tensor_shape.begin(), output_tensor_shape.end()); output.dtype = paddle_infer::DataType(output_tensor->type()); output.lod = output_tensor->lod(); int size = 1; for (const auto &i : output_tensor_shape) { size *= i; } if (output.dtype == 0) { output.data.resize(size * sizeof(float)); output_tensor->CopyToCpu(reinterpret_cast<float *>(output.data.data())); } else if (output.dtype == 1) { output.data.resize(size * sizeof(int64_t)); output_tensor->CopyToCpu(reinterpret_cast<int64_t *>(output.data.data())); } else if (output.dtype == 2) { output.data.resize(size * sizeof(int)); output_tensor->CopyToCpu(reinterpret_cast<int *>(output.data.data())); } else if (output.dtype == 3) { output.data.resize(size * sizeof(uint8_t)); output_tensor->CopyToCpu(reinterpret_cast<uint8_t *>(output.data.data())); } outputs->push_back(std::move(output)); } return true; } } // namespace PaddleDeploy
39.860759
80
0.666084
[ "shape", "vector", "model" ]
bae70c5a653fe09e90f1029a0c285fbdae2df222
19,292
cpp
C++
novatel_gps_driver/test/parser_tests.cpp
siqb/novatel_gps_driver
333ff62ea9fab635ba001c751ed6814c066c9bdf
[ "BSD-3-Clause" ]
null
null
null
novatel_gps_driver/test/parser_tests.cpp
siqb/novatel_gps_driver
333ff62ea9fab635ba001c751ed6814c066c9bdf
[ "BSD-3-Clause" ]
null
null
null
novatel_gps_driver/test/parser_tests.cpp
siqb/novatel_gps_driver
333ff62ea9fab635ba001c751ed6814c066c9bdf
[ "BSD-3-Clause" ]
null
null
null
// ***************************************************************************** // // Copyright (c) 2017, Southwest Research Institute® (SwRI®) // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of Southwest Research Institute® (SwRI®) nor the // names of its contributors may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL SOUTHWEST RESEARCH INSTITUTE BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // ***************************************************************************** #include <novatel_gps_driver/parsers/bestpos.h> #include <novatel_gps_driver/parsers/gpgsv.h> #include <novatel_gps_driver/novatel_message_extractor.h> #include <novatel_gps_driver/parsers/bestxyz.h> #include <novatel_gps_driver/parsers/heading2.h> #include <novatel_gps_driver/parsers/dual_antenna_heading.h> #include <gtest/gtest.h> #include <novatel_gps_driver/parsers/inspva.h> #include <novatel_gps_driver/parsers/insstdev.h> #include <novatel_gps_driver/parsers/corrimudata.h> #include <novatel_gps_driver/parsers/inscov.h> TEST(ParserTestSuite, testBestposAsciiParsing) { novatel_gps_driver::BestposParser parser; std::string bestpos_str = "#BESTPOSA,ICOM1,0,87.5,FINESTEERING,1956,157432.000,00000800,7145,6938;" "SOL_COMPUTED,SINGLE,29.44391220792,-98.61476921244,261.4344,-26.0000,WGS84,2.1382," "3.1092,4.0429,\"\",0.000,0.000,8,8,8,8,0,06,00,03*ecf2202b\r\n" "#BESTPOSA,COM1,0,83.5,FINESTEERING,1419,336148.000,02000040,6145,2724;SOL_COMPUTED,SINGLE," "51.11636418888,-114.03832502118,1064.9520,-16.2712,WGS84,1.6961,1.3636,3.6449,\"\"," "0.000,0.000,8,8,8,8,0,0,0,06,0,03*f181ad10\r\n" "#BESTPOSA,COM1,0,78.5,FINESTEERING,1419,336208.000,02000040,6145,2724;SOL_COMPUTED," "NARROW_INT,51.11635910984,-114.03833105168,1063.8416,-16.2712,WGS84,0.0135,0.0084," "0.0172,\"AAAA\",1.000,0.000,8,8,8,8,0,01,0,03*072421c0\r\n"; std::string extracted_str; novatel_gps_driver::NovatelMessageExtractor extractor; std::vector<novatel_gps_driver::NmeaSentence> nmea_sentences; std::vector<novatel_gps_driver::NovatelSentence> novatel_sentences; std::vector<novatel_gps_driver::BinaryMessage> binary_messages; std::string remaining; extractor.ExtractCompleteMessages(bestpos_str, nmea_sentences, novatel_sentences, binary_messages, remaining); ASSERT_EQ(0, nmea_sentences.size()); ASSERT_EQ(0, binary_messages.size()); ASSERT_EQ(3, novatel_sentences.size()); novatel_gps_driver::NovatelSentence sentence = novatel_sentences.front(); ASSERT_EQ(parser.GetMessageName() + "A", sentence.id); novatel_gps_msgs::NovatelPositionPtr msg = parser.ParseAscii(sentence); ASSERT_NE(msg.get(), nullptr); ASSERT_EQ(0x800, msg->novatel_msg_header.receiver_status.original_status_code); ASSERT_EQ(true, msg->novatel_msg_header.receiver_status.usb_buffer_overrun); ASSERT_EQ("SOL_COMPUTED", msg->solution_status); ASSERT_DOUBLE_EQ(29.44391220792, msg->lat); ASSERT_DOUBLE_EQ(-98.61476921244, msg->lon); } TEST(ParserTestSuite, testCorrimudataAsciiParsing) { novatel_gps_driver::CorrImuDataParser parser; std::string sentence_str = "#CORRIMUDATAA,COM1,0,77.5,FINESTEERING,1769,237601.000,02000020," "bdba,12597;1769,237601.000000000,-0.000003356,0.000002872,0.000001398,0.000151593," "0.000038348,-0.000078820*e370e1d9\r\n"; std::string extracted_str; novatel_gps_driver::NovatelMessageExtractor extractor; std::vector<novatel_gps_driver::NmeaSentence> nmea_sentences; std::vector<novatel_gps_driver::NovatelSentence> novatel_sentences; std::vector<novatel_gps_driver::BinaryMessage> binary_messages; std::string remaining; extractor.ExtractCompleteMessages(sentence_str, nmea_sentences, novatel_sentences, binary_messages, remaining); ASSERT_EQ(0, nmea_sentences.size()); ASSERT_EQ(0, binary_messages.size()); ASSERT_EQ(1, novatel_sentences.size()); novatel_gps_driver::NovatelSentence sentence = novatel_sentences.front(); ASSERT_EQ(parser.GetMessageName() + "A", sentence.id); novatel_gps_msgs::NovatelCorrectedImuDataPtr msg = parser.ParseAscii(sentence); ASSERT_NE(msg.get(), nullptr); ASSERT_EQ(1769, msg->gps_week_num); ASSERT_DOUBLE_EQ(237601.0, msg->gps_seconds); ASSERT_DOUBLE_EQ(-0.000003356, msg->pitch_rate); ASSERT_DOUBLE_EQ(0.000002872, msg->roll_rate); ASSERT_DOUBLE_EQ(0.000001398, msg->yaw_rate); ASSERT_DOUBLE_EQ(0.000151593, msg->lateral_acceleration); ASSERT_DOUBLE_EQ(0.000038348, msg->longitudinal_acceleration); ASSERT_DOUBLE_EQ(-0.00007882, msg->vertical_acceleration); } TEST(ParserTestSuite, testGpgsvParsing) { novatel_gps_driver::GpgsvParser parser; std::string sentence_str = "$GPGSV,3,3,11,12,07,00.,32,13,03,227,36,22,0.,041,*4A\r\n"; std::string extracted_str; novatel_gps_driver::NovatelMessageExtractor extractor; std::vector<novatel_gps_driver::NmeaSentence> nmea_sentences; std::vector<novatel_gps_driver::NovatelSentence> novatel_sentences; std::vector<novatel_gps_driver::BinaryMessage> binary_messages; std::string remaining; extractor.ExtractCompleteMessages(sentence_str, nmea_sentences, novatel_sentences, binary_messages, remaining); ASSERT_EQ(1, nmea_sentences.size()); ASSERT_EQ(0, binary_messages.size()); ASSERT_EQ(0, novatel_sentences.size()); novatel_gps_driver::NmeaSentence sentence = nmea_sentences.front(); ASSERT_EQ(parser.GetMessageName(), sentence.id); ASSERT_FALSE(sentence.body.empty()); novatel_gps_msgs::GpgsvPtr msg = parser.ParseAscii(sentence); ASSERT_NE(msg.get(), nullptr); ASSERT_EQ(3, msg->n_msgs); ASSERT_EQ(3, msg->msg_number); ASSERT_EQ(3, msg->satellites.size()); ASSERT_EQ(11, msg->n_satellites); ASSERT_EQ(12, msg->satellites[0].prn); ASSERT_EQ(7, msg->satellites[0].elevation); ASSERT_EQ(0, msg->satellites[0].azimuth); ASSERT_EQ(32, msg->satellites[0].snr); ASSERT_EQ(13, msg->satellites[1].prn); ASSERT_EQ(3, msg->satellites[1].elevation); ASSERT_EQ(227, msg->satellites[1].azimuth); ASSERT_EQ(36, msg->satellites[1].snr); ASSERT_EQ(22, msg->satellites[2].prn); ASSERT_EQ(0, msg->satellites[2].elevation); ASSERT_EQ(41, msg->satellites[2].azimuth); ASSERT_EQ(-1, msg->satellites[2].snr); } TEST(ParserTestSuite, testInscovAsciiParsing) { novatel_gps_driver::InscovParser parser; std::string sentence_str = "#INSCOVA,COM1,0,66.5,FINESTEERING,1959,336623.000,00000000," "f078,13754;1959,336623.000000000,0.0211295047125775,-0.0018214277429204,-0.0016153828661881," "-0.0018214277429204,0.0174981375607521,0.0049878113409426,-0.0016153828661881," "0.0049878113409426,0.0285474196118174,0.0000332609098342,-0.0000003409117564," "0.0000018468158360,-0.0000003409117564,0.0000340563145112,-0.0000136598582783," "0.0000018468158360,-0.0000136598582783,0.1515644215075894,0.0000008850783906," "0.0000000006144488,-0.0000001633832672,0.0000000006144488,0.0000010749675168," "0.0000000004985751,-0.0000001633832672,0.0000000004985751,0.0000009343218169*bc5352ab\r\n"; std::string extracted_str; novatel_gps_driver::NovatelMessageExtractor extractor; std::vector<novatel_gps_driver::NmeaSentence> nmea_sentences; std::vector<novatel_gps_driver::NovatelSentence> novatel_sentences; std::vector<novatel_gps_driver::BinaryMessage> binary_messages; std::string remaining; extractor.ExtractCompleteMessages(sentence_str, nmea_sentences, novatel_sentences, binary_messages, remaining); ASSERT_EQ(0, nmea_sentences.size()); ASSERT_EQ(0, binary_messages.size()); ASSERT_EQ(1, novatel_sentences.size()); novatel_gps_driver::NovatelSentence sentence = novatel_sentences.front(); ASSERT_EQ(parser.GetMessageName() + "A", sentence.id); novatel_gps_msgs::InscovPtr msg = parser.ParseAscii(sentence); ASSERT_NE(msg.get(), nullptr); ASSERT_EQ(1959, msg->week); ASSERT_DOUBLE_EQ(336623.0, msg->seconds); ASSERT_DOUBLE_EQ(0.0211295047125775, msg->position_covariance[0]); ASSERT_DOUBLE_EQ(0.0000009343218169, msg->velocity_covariance[8]); } TEST(ParserTestSuite, testInspvaAsciiParsing) { novatel_gps_driver::InspvaParser parser; std::string sentence_str = "#INSPVAA,COM1,0,31.0,FINESTEERING,1264,144088.000,02040000," "5615,1541;1264,144088.002284950,51.116827527,-114.037738908,401.191547167,354.846489850," "108.429407241,-10.837482850,1.116219952,-3.476059035,7.372686190,INS_ALIGNMENT_COMPLETE*a2913d36\r\n"; std::string extracted_str; novatel_gps_driver::NovatelMessageExtractor extractor; std::vector<novatel_gps_driver::NmeaSentence> nmea_sentences; std::vector<novatel_gps_driver::NovatelSentence> novatel_sentences; std::vector<novatel_gps_driver::BinaryMessage> binary_messages; std::string remaining; extractor.ExtractCompleteMessages(sentence_str, nmea_sentences, novatel_sentences, binary_messages, remaining); ASSERT_EQ(0, nmea_sentences.size()); ASSERT_EQ(0, binary_messages.size()); ASSERT_EQ(1, novatel_sentences.size()); novatel_gps_driver::NovatelSentence sentence = novatel_sentences.front(); ASSERT_EQ(parser.GetMessageName() + "A", sentence.id); novatel_gps_msgs::InspvaPtr msg = parser.ParseAscii(sentence); ASSERT_NE(msg.get(), nullptr); ASSERT_EQ(1264, msg->week); ASSERT_DOUBLE_EQ(144088.002284950, msg->seconds); ASSERT_DOUBLE_EQ(51.116827527, msg->latitude); ASSERT_DOUBLE_EQ(-114.037738908, msg->longitude); ASSERT_DOUBLE_EQ(401.191547167, msg->height); ASSERT_DOUBLE_EQ(354.846489850, msg->north_velocity); ASSERT_DOUBLE_EQ(108.429407241, msg->east_velocity); ASSERT_DOUBLE_EQ(-10.837482850, msg->up_velocity); ASSERT_DOUBLE_EQ(1.116219952, msg->roll); ASSERT_DOUBLE_EQ(-3.476059035, msg->pitch); ASSERT_DOUBLE_EQ(7.372686190, msg->azimuth); ASSERT_EQ("INS_ALIGNMENT_COMPLETE", msg->status); } TEST(ParserTestSuite, testInsstdevAsciiParsing) { novatel_gps_driver::InsstdevParser parser; std::string sentence_str = "#INSSTDEVA,COM1,0,78.0,FINESTEERING,1907,233990.000,02000020," "3e6d,32768;0.4372,0.3139,0.7547,0.0015,0.0015,0.0014,3.7503,3.7534,5.1857,26000005," "0,0,01ffd1bf,0*3deca7d2\r\n"; std::string extracted_str; novatel_gps_driver::NovatelMessageExtractor extractor; std::vector<novatel_gps_driver::NmeaSentence> nmea_sentences; std::vector<novatel_gps_driver::NovatelSentence> novatel_sentences; std::vector<novatel_gps_driver::BinaryMessage> binary_messages; std::string remaining; extractor.ExtractCompleteMessages(sentence_str, nmea_sentences, novatel_sentences, binary_messages, remaining); ASSERT_EQ(0, nmea_sentences.size()); ASSERT_EQ(0, binary_messages.size()); ASSERT_EQ(1, novatel_sentences.size()); novatel_gps_driver::NovatelSentence sentence = novatel_sentences.front(); ASSERT_EQ(parser.GetMessageName() + "A", sentence.id); novatel_gps_msgs::InsstdevPtr msg = parser.ParseAscii(sentence); ASSERT_NE(msg.get(), nullptr); ASSERT_FLOAT_EQ(0.4372, msg->latitude_dev); ASSERT_FLOAT_EQ(0.3139, msg->longitude_dev); ASSERT_FLOAT_EQ(0.7547, msg->height_dev); ASSERT_FLOAT_EQ(0.0015, msg->north_velocity_dev); ASSERT_FLOAT_EQ(0.0015, msg->east_velocity_dev); ASSERT_FLOAT_EQ(0.0014, msg->up_velocity_dev); ASSERT_FLOAT_EQ(3.7503, msg->roll_dev); ASSERT_FLOAT_EQ(3.7534, msg->pitch_dev); ASSERT_FLOAT_EQ(5.1857, msg->azimuth_dev); ASSERT_EQ(26000005, msg->extended_solution_status.original_mask); } TEST(ParserTestSuite, testBestxyzAsciiParsing) { novatel_gps_driver::BestxyzParser parser; std::string bestxyz_str = "#BESTXYZA,COM1,0,55.0,FINESTEERING,1419,340033.000,02000040,d821,2724;" "SOL_COMPUTED,NARROW_INT,-1634531.5683,-3664618.0326,4942496.3270,0.0099," "0.0219,0.0115,SOL_COMPUTED,NARROW_INT,0.0011,-0.0049,-0.0001,0.0199,0.0439," "0.0230,\"AAAA\",0.250,1.000,0.000,12,11,11,11,0,01,0,33*1fe2f509\r\n"; std::string extracted_str; novatel_gps_driver::NovatelMessageExtractor extractor; std::vector<novatel_gps_driver::NmeaSentence> nmea_sentences; std::vector<novatel_gps_driver::NovatelSentence> novatel_sentences; std::vector<novatel_gps_driver::BinaryMessage> binary_messages; std::string remaining; extractor.ExtractCompleteMessages(bestxyz_str, nmea_sentences, novatel_sentences, binary_messages, remaining); ASSERT_EQ(0, nmea_sentences.size()); ASSERT_EQ(0, binary_messages.size()); ASSERT_EQ(1, novatel_sentences.size()); novatel_gps_driver::NovatelSentence sentence = novatel_sentences.front(); ASSERT_EQ(parser.GetMessageName() + "A", sentence.id); novatel_gps_msgs::NovatelXYZPtr msg = parser.ParseAscii(sentence); ASSERT_NE(msg.get(), nullptr); ASSERT_EQ("SOL_COMPUTED", msg->solution_status); ASSERT_EQ("NARROW_INT", msg->position_type); ASSERT_DOUBLE_EQ(-1634531.5683, msg->x); ASSERT_DOUBLE_EQ(-3664618.0326, msg->y); ASSERT_DOUBLE_EQ(4942496.3270, msg->z); ASSERT_FLOAT_EQ(0.0099, msg->x_sigma); ASSERT_FLOAT_EQ(0.0219, msg->y_sigma); ASSERT_FLOAT_EQ(0.0115, msg->z_sigma); ASSERT_EQ("SOL_COMPUTED", msg->velocity_solution_status); ASSERT_EQ("NARROW_INT", msg->velocity_type); ASSERT_DOUBLE_EQ(0.0011, msg->x_vel); ASSERT_DOUBLE_EQ(-0.0049, msg->y_vel); ASSERT_DOUBLE_EQ(-0.0001, msg->z_vel); ASSERT_FLOAT_EQ(0.0199, msg->x_vel_sigma); ASSERT_FLOAT_EQ(0.0439, msg->y_vel_sigma); ASSERT_FLOAT_EQ(0.0230, msg->z_vel_sigma); ASSERT_EQ("\"AAAA\"", msg->base_station_id); ASSERT_FLOAT_EQ(0.250, msg->velocity_latency); ASSERT_FLOAT_EQ(1.000, msg->diff_age); ASSERT_FLOAT_EQ(0.000, msg->solution_age); ASSERT_EQ(12, msg->num_satellites_tracked); ASSERT_EQ(11, msg->num_satellites_used_in_solution); ASSERT_EQ(11, msg->num_gps_and_glonass_l1_used_in_solution); ASSERT_EQ(11, msg->num_gps_and_glonass_l1_and_l2_used_in_solution); ASSERT_EQ(1, msg->extended_solution_status.original_mask); } TEST(ParserTestSuite, testHeading2AsciiParsing) { novatel_gps_driver::Heading2Parser parser; std::string heading2_str = "#HEADING2A,COM1,0,39.5,FINESTEERING,1622,422892.200,02040000,f9bf,6521;" "SOL_COMPUTED,NARROW_INT,0.927607417,178.347869873,-1.3037414550,0,0.261901051,0.391376048," "\"R222\",\"AAAA\",18,17,17,16,0,01,0,33*8c48d77c\r\n"; std::string extracted_str; novatel_gps_driver::NovatelMessageExtractor extractor; std::vector<novatel_gps_driver::NmeaSentence> nmea_sentences; std::vector<novatel_gps_driver::NovatelSentence> novatel_sentences; std::vector<novatel_gps_driver::BinaryMessage> binary_messages; std::string remaining; extractor.ExtractCompleteMessages(heading2_str, nmea_sentences, novatel_sentences, binary_messages, remaining); ASSERT_EQ(0, nmea_sentences.size()); ASSERT_EQ(0, binary_messages.size()); ASSERT_EQ(1, novatel_sentences.size()); novatel_gps_driver::NovatelSentence sentence = novatel_sentences.front(); ASSERT_EQ(parser.GetMessageName() + "A", sentence.id); novatel_gps_msgs::NovatelHeading2Ptr msg = parser.ParseAscii(sentence); ASSERT_NE(msg.get(), nullptr); ASSERT_EQ("SOL_COMPUTED", msg->solution_status); ASSERT_EQ("NARROW_INT", msg->position_type); ASSERT_FLOAT_EQ(0.927607417, msg->baseline_length); ASSERT_FLOAT_EQ(178.347869873, msg->heading); ASSERT_FLOAT_EQ(-1.3037414550, msg->pitch); ASSERT_FLOAT_EQ(0.261901051, msg->heading_sigma); ASSERT_FLOAT_EQ(0.391376048, msg->pitch_sigma); ASSERT_EQ("\"R222\"", msg->rover_station_id); ASSERT_EQ("\"AAAA\"", msg->master_station_id); ASSERT_EQ(18, msg->num_satellites_tracked); ASSERT_EQ(17, msg->num_satellites_used_in_solution); ASSERT_EQ(17, msg->num_satellites_above_elevation_mask_angle); ASSERT_EQ(16, msg->num_satellites_above_elevation_mask_angle_l2); ASSERT_EQ(0, msg->solution_source); ASSERT_EQ(1, msg->extended_solution_status.original_mask); } TEST(ParserTestSuite, testDualAntennaHeadingAsciiParsing) { novatel_gps_driver::DualAntennaHeadingParser parser; std::string heading_str = "#DUALANTENNAHEADINGA,UNKNOWN,0,66.5,FINESTEERING,1949,575614.000,02000000,d426,32768;" "SOL_COMPUTED,NARROW_INT,-1.000000000,255.538528442,0.006041416,0.0,0.043859947,0.052394450," "\"J56X\",24,18,18,17,04,01,00,33*1f082ec5\r\n"; std::string extracted_str; novatel_gps_driver::NovatelMessageExtractor extractor; std::vector<novatel_gps_driver::NmeaSentence> nmea_sentences; std::vector<novatel_gps_driver::NovatelSentence> novatel_sentences; std::vector<novatel_gps_driver::BinaryMessage> binary_messages; std::string remaining; extractor.ExtractCompleteMessages(heading_str, nmea_sentences, novatel_sentences, binary_messages, remaining); ASSERT_EQ(0, nmea_sentences.size()); ASSERT_EQ(0, binary_messages.size()); ASSERT_EQ(1, novatel_sentences.size()); novatel_gps_driver::NovatelSentence sentence = novatel_sentences.front(); ASSERT_EQ(parser.GetMessageName() + "A", sentence.id); novatel_gps_msgs::NovatelDualAntennaHeadingPtr msg = parser.ParseAscii(sentence); ASSERT_NE(msg.get(), nullptr); ASSERT_EQ("SOL_COMPUTED", msg->solution_status); ASSERT_EQ("NARROW_INT", msg->position_type); ASSERT_FLOAT_EQ(-1.000000000, msg->baseline_length); ASSERT_FLOAT_EQ(255.538528442, msg->heading); ASSERT_FLOAT_EQ(0.006041416, msg->pitch); ASSERT_FLOAT_EQ(0.043859947, msg->heading_sigma); ASSERT_FLOAT_EQ(0.052394450, msg->pitch_sigma); ASSERT_EQ("\"J56X\"", msg->station_id); ASSERT_EQ(24, msg->num_satellites_tracked); ASSERT_EQ(18, msg->num_satellites_used_in_solution); ASSERT_EQ(18, msg->num_satellites_above_elevation_mask_angle); ASSERT_EQ(17, msg->num_satellites_above_elevation_mask_angle_l2); ASSERT_EQ(1, msg->solution_source); ASSERT_EQ(1, msg->extended_solution_status.original_mask); } int main(int argc, char **argv) { testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
41.757576
115
0.755287
[ "vector" ]
bae8f9066576b553a40c364404e7e96af0469f45
2,664
cpp
C++
pkgs/apps/facesim/src/Public_Library/Rigid_Bodies/MASS_PROPERTIES_3D.cpp
relokin/parsec
75d63d9bd2368913343be9037e301947ecf78f7f
[ "BSD-3-Clause" ]
2
2017-04-24T22:37:28.000Z
2020-05-26T01:57:37.000Z
pkgs/apps/facesim/src/Public_Library/Rigid_Bodies/MASS_PROPERTIES_3D.cpp
cota/parsec2-aarch64
cdf7da348afd231dbe067266f24dc14d22f5cebf
[ "BSD-3-Clause" ]
null
null
null
pkgs/apps/facesim/src/Public_Library/Rigid_Bodies/MASS_PROPERTIES_3D.cpp
cota/parsec2-aarch64
cdf7da348afd231dbe067266f24dc14d22f5cebf
[ "BSD-3-Clause" ]
null
null
null
//##################################################################### // Copyright 2003-2005, Zhaosheng Bao, Ron Fedkiw, Eran Guendelman, Geoffrey Irving, Joseph Teran. // This file is part of PhysBAM whose distribution is governed by the license contained in the accompanying file PHYSBAM_COPYRIGHT.txt. //##################################################################### #include "MASS_PROPERTIES_3D.h" #include "../Geometry/TRIANGULATED_SURFACE.h" #include "../Utilities/DEBUG_UTILITIES.h" using namespace PhysBAM; //##################################################################### // Function Compute_Volume_Integrals //##################################################################### template<class T> void MASS_PROPERTIES_3D<T>:: Compute_Volume_Integrals() {NOT_IMPLEMENTED();} //##################################################################### // Function Compute_Thin_Shell_Integrals //##################################################################### template<class T> void MASS_PROPERTIES_3D<T>:: Compute_Thin_Shell_Integrals() {NOT_IMPLEMENTED();} //##################################################################### // Function Compute_Face_Integrals //##################################################################### template<class T> void MASS_PROPERTIES_3D<T>:: Compute_Face_Integrals(const TRIANGLE_3D<T>& triangle) {NOT_IMPLEMENTED();} //##################################################################### // Function Compute_Projection_Integrals //##################################################################### template<class T> void MASS_PROPERTIES_3D<T>:: Compute_Projection_Integrals(const TRIANGLE_3D<T> &triangle) {NOT_IMPLEMENTED();} //##################################################################### // Function Get_Center_Of_Mass_And_Inertia_Tensor //##################################################################### template<class T> void MASS_PROPERTIES_3D<T>:: Get_Center_Of_Mass_And_Inertia_Tensor(VECTOR_3D<T>& center_of_mass,SYMMETRIC_MATRIX_3X3<T>& inertia_tensor) const {NOT_IMPLEMENTED();} //##################################################################### // Function Transform_To_Object_Frame //##################################################################### template<class T> void MASS_PROPERTIES_3D<T>:: Transform_To_Object_Frame(VECTOR_3D<T>& center_of_mass,QUATERNION<T>& orientation,DIAGONAL_MATRIX_3X3<T>& moment_of_inertia,SOLIDS_PARTICLE<T,VECTOR_3D<T> >& particles) const {NOT_IMPLEMENTED();} //##################################################################### template class MASS_PROPERTIES_3D<double>; template class MASS_PROPERTIES_3D<float>;
49.333333
174
0.488739
[ "geometry" ]
baee1abdc0d3c0b082665cf3b33a46690bc0f7b4
4,055
cxx
C++
Testing/Code/Filter/MainSplitUnstructuredHexahedronGridCellTest.cxx
Piyusha23/IAFEMesh
e91b34c9eaa9c8ecc4ebb5d16f4c13f330d75c9f
[ "BSD-4-Clause-UC" ]
null
null
null
Testing/Code/Filter/MainSplitUnstructuredHexahedronGridCellTest.cxx
Piyusha23/IAFEMesh
e91b34c9eaa9c8ecc4ebb5d16f4c13f330d75c9f
[ "BSD-4-Clause-UC" ]
null
null
null
Testing/Code/Filter/MainSplitUnstructuredHexahedronGridCellTest.cxx
Piyusha23/IAFEMesh
e91b34c9eaa9c8ecc4ebb5d16f4c13f330d75c9f
[ "BSD-4-Clause-UC" ]
null
null
null
/*========================================================================= Program: MIMX Meshing Toolkit Module: $RCSfile: MainSplitUnstructuredHexahedronGridCellTest.cxx,v $ Language: C++ Date: $Date: 2012/12/07 19:08:59 $ Version: $Revision: 1.1.1.1 $ Musculoskeletal Imaging, Modelling and Experimentation (MIMX) Center for Computer Aided Design The University of Iowa Iowa City, IA 52242 http://www.ccad.uiowa.edu/mimx/ Copyright (c) The University of Iowa. All rights reserved. See MIMXCopyright.txt or http://www.ccad.uiowa.edu/mimx/Copyright.htm for details. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the above copyright notices for more information. =========================================================================*/ #if defined(_MSC_VER) #pragma warning ( disable : 4786 ) #endif #include "vtkMimxSplitUnstructuredHexahedronGridCell.h" #include "vtkDataSetMapper.h" #include "vtkIdList.h" #include "vtkRenderer.h" #include "vtkRenderWindow.h" #include "vtkRenderWindowInteractor.h" #include "vtkUnstructuredGrid.h" #include "vtkUnstructuredGridReader.h" int main(int argc, char * argv []) { if( argc < 2 ) { std::cerr << "MainSplitUnstructuredHexahedronGridCellTest " << " InputVtkUnstructuredGrid" << std::endl; return EXIT_FAILURE; } vtkUnstructuredGridReader *reader = vtkUnstructuredGridReader::New(); reader->SetFileName( argv[1] ); vtkMimxSplitUnstructuredHexahedronGridCell *split1 = vtkMimxSplitUnstructuredHexahedronGridCell::New(); vtkIdList *idlist1 = vtkIdList::New(); idlist1->SetNumberOfIds(2); idlist1->SetId(0,11); idlist1->SetId(1,13); split1->SetInput(reader->GetOutput()); split1->SetIdList(idlist1); split1->Update(); std::cout << "Test 1 - Number of Cells: " << split1->GetOutput()->GetNumberOfCells() << std::endl; if ( split1->GetOutput()->GetNumberOfCells() != 4 ) { std::cout << "Test 1 - Invalid number of cells" << std::endl; return EXIT_FAILURE; } vtkMimxSplitUnstructuredHexahedronGridCell *split2 = vtkMimxSplitUnstructuredHexahedronGridCell::New(); split2->SetInput(split1->GetOutput()); vtkIdList *idlist2 = vtkIdList::New(); idlist2->SetNumberOfIds(2); idlist2->SetId(0,13); idlist2->SetId(1,17); split2->SetIdList(idlist2); split2->Update(); std::cout << "Test 2 - Number of Cells: " << split2->GetOutput()->GetNumberOfCells() << std::endl; if ( split2->GetOutput()->GetNumberOfCells() != 5 ) { std::cout << "Test 2 - Invalid number of cells" << std::endl; return EXIT_FAILURE; } vtkMimxSplitUnstructuredHexahedronGridCell *split3 = vtkMimxSplitUnstructuredHexahedronGridCell::New(); split3->SetInput(split2->GetOutput()); vtkIdList *idlist3 = vtkIdList::New(); idlist3->SetNumberOfIds(2); idlist3->SetId(0,13); idlist3->SetId(1,21); split3->SetIdList(idlist3); split3->Update(); std::cout << "Test 3 - Number of Cells: " << split3->GetOutput()->GetNumberOfCells() << std::endl; if ( split3->GetOutput()->GetNumberOfCells() != 6 ) { std::cout << "Test 3 - Invalid number of cells" << std::endl; return EXIT_FAILURE; } /* Turn off the Graphical User Interface testing ************************************************* vtkRenderer* ren = vtkRenderer::New(); vtkDataSetMapper *mapper = vtkDataSetMapper::New(); mapper->SetInput(split1->GetOutput()); vtkActor *actor = vtkActor::New(); actor->SetMapper(mapper); ren->AddActor(actor); vtkRenderWindow* renwin = vtkRenderWindow::New(); renwin->AddRenderer(ren); vtkRenderWindowInteractor* iren = vtkRenderWindowInteractor::New(); iren->SetRenderWindow(renwin); renwin->Render(); iren->Start(); reader->Delete(); split1->Delete(); actor->Delete(); mapper->Delete(); idlist1->Delete(); idlist2->Delete(); ren->Delete(); renwin->Delete(); iren->Delete(); *************************************************/ return EXIT_SUCCESS; }
31.434109
105
0.661159
[ "render" ]
baf165f88af50ae0657f166b614c1a96ddc0dd2e
16,597
hpp
C++
include/NUnit/Framework/Constraints/Constraint.hpp
marksteward/BeatSaber-Quest-Codegen
a76f063f71cef207a9f048ad7613835f554911a7
[ "Unlicense" ]
null
null
null
include/NUnit/Framework/Constraints/Constraint.hpp
marksteward/BeatSaber-Quest-Codegen
a76f063f71cef207a9f048ad7613835f554911a7
[ "Unlicense" ]
null
null
null
include/NUnit/Framework/Constraints/Constraint.hpp
marksteward/BeatSaber-Quest-Codegen
a76f063f71cef207a9f048ad7613835f554911a7
[ "Unlicense" ]
null
null
null
// Autogenerated from CppHeaderCreator // Created by Sc2ad // ========================================================================= #pragma once // Begin includes #include "extern/beatsaber-hook/shared/utils/typedefs.h" #include <initializer_list> #include "extern/beatsaber-hook/shared/utils/byref.hpp" // Including type: NUnit.Framework.Constraints.IConstraint #include "NUnit/Framework/Constraints/IConstraint.hpp" #include "extern/beatsaber-hook/shared/utils/il2cpp-utils-methods.hpp" #include "extern/beatsaber-hook/shared/utils/il2cpp-utils-properties.hpp" #include "extern/beatsaber-hook/shared/utils/il2cpp-utils-fields.hpp" #include "extern/beatsaber-hook/shared/utils/utils.h" // Completed includes // Begin forward declares // Forward declaring namespace: System namespace System { // Forward declaring type: Lazy`1<T> template<typename T> class Lazy_1; } // Forward declaring namespace: NUnit::Framework::Constraints namespace NUnit::Framework::Constraints { // Forward declaring type: ConstraintBuilder class ConstraintBuilder; // Forward declaring type: ConstraintResult class ConstraintResult; } // Completed forward declares // Type namespace: NUnit.Framework.Constraints namespace NUnit::Framework::Constraints { // Size: 0x30 #pragma pack(push, 1) // Autogenerated type: NUnit.Framework.Constraints.Constraint // [TokenAttribute] Offset: FFFFFFFF class Constraint : public ::Il2CppObject/*, public NUnit::Framework::Constraints::IConstraint*/ { public: // private System.Lazy`1<System.String> _displayName // Size: 0x8 // Offset: 0x10 System::Lazy_1<::Il2CppString*>* displayName; // Field size check static_assert(sizeof(System::Lazy_1<::Il2CppString*>*) == 0x8); // [DebuggerBrowsableAttribute] Offset: 0xE9D538 // private System.String <Description>k__BackingField // Size: 0x8 // Offset: 0x18 ::Il2CppString* Description; // Field size check static_assert(sizeof(::Il2CppString*) == 0x8); // [DebuggerBrowsableAttribute] Offset: 0xE9D574 // private System.Object[] <Arguments>k__BackingField // Size: 0x8 // Offset: 0x20 ::Array<::Il2CppObject*>* Arguments; // Field size check static_assert(sizeof(::Array<::Il2CppObject*>*) == 0x8); // [DebuggerBrowsableAttribute] Offset: 0xE9D5B0 // private NUnit.Framework.Constraints.ConstraintBuilder <Builder>k__BackingField // Size: 0x8 // Offset: 0x28 NUnit::Framework::Constraints::ConstraintBuilder* Builder; // Field size check static_assert(sizeof(NUnit::Framework::Constraints::ConstraintBuilder*) == 0x8); // Creating value type constructor for type: Constraint Constraint(System::Lazy_1<::Il2CppString*>* displayName_ = {}, ::Il2CppString* Description_ = {}, ::Array<::Il2CppObject*>* Arguments_ = {}, NUnit::Framework::Constraints::ConstraintBuilder* Builder_ = {}) noexcept : displayName{displayName_}, Description{Description_}, Arguments{Arguments_}, Builder{Builder_} {} // Creating interface conversion operator: operator NUnit::Framework::Constraints::IConstraint operator NUnit::Framework::Constraints::IConstraint() noexcept { return *reinterpret_cast<NUnit::Framework::Constraints::IConstraint*>(this); } // Get instance field: private System.Lazy`1<System.String> _displayName System::Lazy_1<::Il2CppString*>* _get__displayName(); // Set instance field: private System.Lazy`1<System.String> _displayName void _set__displayName(System::Lazy_1<::Il2CppString*>* value); // Get instance field: private System.String <Description>k__BackingField ::Il2CppString* _get_$Description$k__BackingField(); // Set instance field: private System.String <Description>k__BackingField void _set_$Description$k__BackingField(::Il2CppString* value); // Get instance field: private System.Object[] <Arguments>k__BackingField ::Array<::Il2CppObject*>* _get_$Arguments$k__BackingField(); // Set instance field: private System.Object[] <Arguments>k__BackingField void _set_$Arguments$k__BackingField(::Array<::Il2CppObject*>* value); // Get instance field: private NUnit.Framework.Constraints.ConstraintBuilder <Builder>k__BackingField NUnit::Framework::Constraints::ConstraintBuilder* _get_$Builder$k__BackingField(); // Set instance field: private NUnit.Framework.Constraints.ConstraintBuilder <Builder>k__BackingField void _set_$Builder$k__BackingField(NUnit::Framework::Constraints::ConstraintBuilder* value); // public System.String get_DisplayName() // Offset: 0x1419974 ::Il2CppString* get_DisplayName(); // public System.String get_Description() // Offset: 0x14199CC ::Il2CppString* get_Description(); // protected System.Void set_Description(System.String value) // Offset: 0x14199D4 void set_Description(::Il2CppString* value); // public System.Object[] get_Arguments() // Offset: 0x14199DC ::Array<::Il2CppObject*>* get_Arguments(); // private System.Void set_Arguments(System.Object[] value) // Offset: 0x14199E4 void set_Arguments(::Array<::Il2CppObject*>* value); // public NUnit.Framework.Constraints.ConstraintBuilder get_Builder() // Offset: 0x14199EC NUnit::Framework::Constraints::ConstraintBuilder* get_Builder(); // public System.Void set_Builder(NUnit.Framework.Constraints.ConstraintBuilder value) // Offset: 0x14199F4 void set_Builder(NUnit::Framework::Constraints::ConstraintBuilder* value); // protected System.Void .ctor(params System.Object[] args) // Offset: 0x1419584 template<::il2cpp_utils::CreationType creationType = ::il2cpp_utils::CreationType::Temporary> static Constraint* New_ctor(::Array<::Il2CppObject*>* args) { static auto ___internal__logger = ::Logger::get().WithContext("NUnit::Framework::Constraints::Constraint::.ctor"); return THROW_UNLESS((::il2cpp_utils::New<Constraint*, creationType>(args))); } // Creating initializer_list -> params proxy for: System.Void .ctor(params System.Object[] args) template<::il2cpp_utils::CreationType creationType = ::il2cpp_utils::CreationType::Temporary> static Constraint* New_ctor(std::initializer_list<::Il2CppObject*> args) { return New_ctor<creationType>(::Array<::Il2CppObject*>::New(args)); } // Creating TArgs -> initializer_list proxy for: System.Void .ctor(params System.Object[] args) template<::il2cpp_utils::CreationType creationType = ::il2cpp_utils::CreationType::Temporary, class ...TParams> static Constraint* New_ctor(TParams&&... args) { return New_ctor<creationType>({args...}); } // public NUnit.Framework.Constraints.ConstraintResult ApplyTo(System.Object actual) // Offset: 0xFFFFFFFF NUnit::Framework::Constraints::ConstraintResult* ApplyTo(::Il2CppObject* actual); // protected System.String GetStringRepresentation() // Offset: 0x1419A7C ::Il2CppString* GetStringRepresentation(); // static private System.String _displayable(System.Object o) // Offset: 0x1419BD0 static ::Il2CppString* _displayable(::Il2CppObject* o); // private NUnit.Framework.Constraints.IConstraint NUnit.Framework.Constraints.IResolveConstraint.Resolve() // Offset: 0x1419CFC NUnit::Framework::Constraints::IConstraint* NUnit_Framework_Constraints_IResolveConstraint_Resolve(); // private System.String <.ctor>b__1_0() // Offset: 0x1419DD8 ::Il2CppString* $_ctor$b__1_0(); // public override System.String ToString() // Offset: 0x14199FC // Implemented from: System.Object // Base method: System.String Object::ToString() ::Il2CppString* ToString(); }; // NUnit.Framework.Constraints.Constraint #pragma pack(pop) static check_size<sizeof(Constraint), 40 + sizeof(NUnit::Framework::Constraints::ConstraintBuilder*)> __NUnit_Framework_Constraints_ConstraintSizeCheck; static_assert(sizeof(Constraint) == 0x30); } DEFINE_IL2CPP_ARG_TYPE(NUnit::Framework::Constraints::Constraint*, "NUnit.Framework.Constraints", "Constraint"); #include "extern/beatsaber-hook/shared/utils/il2cpp-utils-methods.hpp" // Writing MetadataGetter for method: NUnit::Framework::Constraints::Constraint::get_DisplayName // Il2CppName: get_DisplayName template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<::Il2CppString* (NUnit::Framework::Constraints::Constraint::*)()>(&NUnit::Framework::Constraints::Constraint::get_DisplayName)> { static const MethodInfo* get() { return ::il2cpp_utils::FindMethod(classof(NUnit::Framework::Constraints::Constraint*), "get_DisplayName", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{}); } }; // Writing MetadataGetter for method: NUnit::Framework::Constraints::Constraint::get_Description // Il2CppName: get_Description template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<::Il2CppString* (NUnit::Framework::Constraints::Constraint::*)()>(&NUnit::Framework::Constraints::Constraint::get_Description)> { static const MethodInfo* get() { return ::il2cpp_utils::FindMethod(classof(NUnit::Framework::Constraints::Constraint*), "get_Description", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{}); } }; // Writing MetadataGetter for method: NUnit::Framework::Constraints::Constraint::set_Description // Il2CppName: set_Description template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (NUnit::Framework::Constraints::Constraint::*)(::Il2CppString*)>(&NUnit::Framework::Constraints::Constraint::set_Description)> { static const MethodInfo* get() { static auto* value = &::il2cpp_utils::GetClassFromName("System", "String")->byval_arg; return ::il2cpp_utils::FindMethod(classof(NUnit::Framework::Constraints::Constraint*), "set_Description", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{value}); } }; // Writing MetadataGetter for method: NUnit::Framework::Constraints::Constraint::get_Arguments // Il2CppName: get_Arguments template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<::Array<::Il2CppObject*>* (NUnit::Framework::Constraints::Constraint::*)()>(&NUnit::Framework::Constraints::Constraint::get_Arguments)> { static const MethodInfo* get() { return ::il2cpp_utils::FindMethod(classof(NUnit::Framework::Constraints::Constraint*), "get_Arguments", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{}); } }; // Writing MetadataGetter for method: NUnit::Framework::Constraints::Constraint::set_Arguments // Il2CppName: set_Arguments template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (NUnit::Framework::Constraints::Constraint::*)(::Array<::Il2CppObject*>*)>(&NUnit::Framework::Constraints::Constraint::set_Arguments)> { static const MethodInfo* get() { static auto* value = &il2cpp_functions::array_class_get(::il2cpp_utils::GetClassFromName("System", "Object"), 1)->byval_arg; return ::il2cpp_utils::FindMethod(classof(NUnit::Framework::Constraints::Constraint*), "set_Arguments", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{value}); } }; // Writing MetadataGetter for method: NUnit::Framework::Constraints::Constraint::get_Builder // Il2CppName: get_Builder template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<NUnit::Framework::Constraints::ConstraintBuilder* (NUnit::Framework::Constraints::Constraint::*)()>(&NUnit::Framework::Constraints::Constraint::get_Builder)> { static const MethodInfo* get() { return ::il2cpp_utils::FindMethod(classof(NUnit::Framework::Constraints::Constraint*), "get_Builder", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{}); } }; // Writing MetadataGetter for method: NUnit::Framework::Constraints::Constraint::set_Builder // Il2CppName: set_Builder template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (NUnit::Framework::Constraints::Constraint::*)(NUnit::Framework::Constraints::ConstraintBuilder*)>(&NUnit::Framework::Constraints::Constraint::set_Builder)> { static const MethodInfo* get() { static auto* value = &::il2cpp_utils::GetClassFromName("NUnit.Framework.Constraints", "ConstraintBuilder")->byval_arg; return ::il2cpp_utils::FindMethod(classof(NUnit::Framework::Constraints::Constraint*), "set_Builder", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{value}); } }; // Writing MetadataGetter for method: NUnit::Framework::Constraints::Constraint::New_ctor // Il2CppName: .ctor // Cannot get method pointer of value based method overload from template for constructor! // Try using FindMethod instead! // Writing MetadataGetter for method: NUnit::Framework::Constraints::Constraint::ApplyTo // Il2CppName: ApplyTo template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<NUnit::Framework::Constraints::ConstraintResult* (NUnit::Framework::Constraints::Constraint::*)(::Il2CppObject*)>(&NUnit::Framework::Constraints::Constraint::ApplyTo)> { static const MethodInfo* get() { static auto* actual = &::il2cpp_utils::GetClassFromName("System", "Object")->byval_arg; return ::il2cpp_utils::FindMethod(classof(NUnit::Framework::Constraints::Constraint*), "ApplyTo", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{actual}); } }; // Writing MetadataGetter for method: NUnit::Framework::Constraints::Constraint::GetStringRepresentation // Il2CppName: GetStringRepresentation template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<::Il2CppString* (NUnit::Framework::Constraints::Constraint::*)()>(&NUnit::Framework::Constraints::Constraint::GetStringRepresentation)> { static const MethodInfo* get() { return ::il2cpp_utils::FindMethod(classof(NUnit::Framework::Constraints::Constraint*), "GetStringRepresentation", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{}); } }; // Writing MetadataGetter for method: NUnit::Framework::Constraints::Constraint::_displayable // Il2CppName: _displayable template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<::Il2CppString* (*)(::Il2CppObject*)>(&NUnit::Framework::Constraints::Constraint::_displayable)> { static const MethodInfo* get() { static auto* o = &::il2cpp_utils::GetClassFromName("System", "Object")->byval_arg; return ::il2cpp_utils::FindMethod(classof(NUnit::Framework::Constraints::Constraint*), "_displayable", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{o}); } }; // Writing MetadataGetter for method: NUnit::Framework::Constraints::Constraint::NUnit_Framework_Constraints_IResolveConstraint_Resolve // Il2CppName: NUnit.Framework.Constraints.IResolveConstraint.Resolve template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<NUnit::Framework::Constraints::IConstraint* (NUnit::Framework::Constraints::Constraint::*)()>(&NUnit::Framework::Constraints::Constraint::NUnit_Framework_Constraints_IResolveConstraint_Resolve)> { static const MethodInfo* get() { return ::il2cpp_utils::FindMethod(classof(NUnit::Framework::Constraints::Constraint*), "NUnit.Framework.Constraints.IResolveConstraint.Resolve", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{}); } }; // Writing MetadataGetter for method: NUnit::Framework::Constraints::Constraint::$_ctor$b__1_0 // Il2CppName: <.ctor>b__1_0 template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<::Il2CppString* (NUnit::Framework::Constraints::Constraint::*)()>(&NUnit::Framework::Constraints::Constraint::$_ctor$b__1_0)> { static const MethodInfo* get() { return ::il2cpp_utils::FindMethod(classof(NUnit::Framework::Constraints::Constraint*), "<.ctor>b__1_0", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{}); } }; // Writing MetadataGetter for method: NUnit::Framework::Constraints::Constraint::ToString // Il2CppName: ToString template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<::Il2CppString* (NUnit::Framework::Constraints::Constraint::*)()>(&NUnit::Framework::Constraints::Constraint::ToString)> { static const MethodInfo* get() { return ::il2cpp_utils::FindMethod(classof(NUnit::Framework::Constraints::Constraint*), "ToString", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{}); } };
62.394737
319
0.735253
[ "object", "vector" ]
baf3344a89ef8240d2d2dac67ef8257f5f3bec68
2,911
cpp
C++
android-28/android/content/pm/LauncherApps_Callback.cpp
YJBeetle/QtAndroidAPI
1468b5dc6eafaf7709f0b00ba1a6ec2b70684266
[ "Apache-2.0" ]
12
2020-03-26T02:38:56.000Z
2022-03-14T08:17:26.000Z
android-28/android/content/pm/LauncherApps_Callback.cpp
YJBeetle/QtAndroidAPI
1468b5dc6eafaf7709f0b00ba1a6ec2b70684266
[ "Apache-2.0" ]
1
2021-01-27T06:07:45.000Z
2021-11-13T19:19:43.000Z
android-30/android/content/pm/LauncherApps_Callback.cpp
YJBeetle/QtAndroidAPI
1468b5dc6eafaf7709f0b00ba1a6ec2b70684266
[ "Apache-2.0" ]
3
2021-02-02T12:34:55.000Z
2022-03-08T07:45:57.000Z
#include "../../../JArray.hpp" #include "../../os/Bundle.hpp" #include "../../os/UserHandle.hpp" #include "../../../JString.hpp" #include "./LauncherApps_Callback.hpp" namespace android::content::pm { // Fields // QJniObject forward LauncherApps_Callback::LauncherApps_Callback(QJniObject obj) : JObject(obj) {} // Constructors LauncherApps_Callback::LauncherApps_Callback() : JObject( "android.content.pm.LauncherApps$Callback", "()V" ) {} // Methods void LauncherApps_Callback::onPackageAdded(JString arg0, android::os::UserHandle arg1) const { callMethod<void>( "onPackageAdded", "(Ljava/lang/String;Landroid/os/UserHandle;)V", arg0.object<jstring>(), arg1.object() ); } void LauncherApps_Callback::onPackageChanged(JString arg0, android::os::UserHandle arg1) const { callMethod<void>( "onPackageChanged", "(Ljava/lang/String;Landroid/os/UserHandle;)V", arg0.object<jstring>(), arg1.object() ); } void LauncherApps_Callback::onPackageRemoved(JString arg0, android::os::UserHandle arg1) const { callMethod<void>( "onPackageRemoved", "(Ljava/lang/String;Landroid/os/UserHandle;)V", arg0.object<jstring>(), arg1.object() ); } void LauncherApps_Callback::onPackagesAvailable(JArray arg0, android::os::UserHandle arg1, jboolean arg2) const { callMethod<void>( "onPackagesAvailable", "([Ljava/lang/String;Landroid/os/UserHandle;Z)V", arg0.object<jarray>(), arg1.object(), arg2 ); } void LauncherApps_Callback::onPackagesSuspended(JArray arg0, android::os::UserHandle arg1) const { callMethod<void>( "onPackagesSuspended", "([Ljava/lang/String;Landroid/os/UserHandle;)V", arg0.object<jarray>(), arg1.object() ); } void LauncherApps_Callback::onPackagesSuspended(JArray arg0, android::os::UserHandle arg1, android::os::Bundle arg2) const { callMethod<void>( "onPackagesSuspended", "([Ljava/lang/String;Landroid/os/UserHandle;Landroid/os/Bundle;)V", arg0.object<jarray>(), arg1.object(), arg2.object() ); } void LauncherApps_Callback::onPackagesUnavailable(JArray arg0, android::os::UserHandle arg1, jboolean arg2) const { callMethod<void>( "onPackagesUnavailable", "([Ljava/lang/String;Landroid/os/UserHandle;Z)V", arg0.object<jarray>(), arg1.object(), arg2 ); } void LauncherApps_Callback::onPackagesUnsuspended(JArray arg0, android::os::UserHandle arg1) const { callMethod<void>( "onPackagesUnsuspended", "([Ljava/lang/String;Landroid/os/UserHandle;)V", arg0.object<jarray>(), arg1.object() ); } void LauncherApps_Callback::onShortcutsChanged(JString arg0, JObject arg1, android::os::UserHandle arg2) const { callMethod<void>( "onShortcutsChanged", "(Ljava/lang/String;Ljava/util/List;Landroid/os/UserHandle;)V", arg0.object<jstring>(), arg1.object(), arg2.object() ); } } // namespace android::content::pm
26.706422
123
0.702508
[ "object" ]
baf87a100baf2ac3da6d2b0d59ba01fbd189f607
5,291
cpp
C++
ThirdParty/oglplus-develop/tools/reshape_raw_cube.cpp
vif/3D-STG
721402e76a9b9b99b88ba3eb06beb6abb17a9254
[ "MIT" ]
2
2017-06-09T00:28:35.000Z
2017-06-09T00:28:43.000Z
ThirdParty/oglplus-develop/tools/reshape_raw_cube.cpp
vif/3D-STG
721402e76a9b9b99b88ba3eb06beb6abb17a9254
[ "MIT" ]
null
null
null
ThirdParty/oglplus-develop/tools/reshape_raw_cube.cpp
vif/3D-STG
721402e76a9b9b99b88ba3eb06beb6abb17a9254
[ "MIT" ]
8
2017-01-30T22:06:41.000Z
2020-01-14T17:24:36.000Z
/** * .file tools/reshape_raw_cube.hpp * .brief Tool for reshaping raw 3d texture date * * @author Matus Chochlik * * Copyright 2013 Matus Chochlik. Distributed under the Boost * Software License, Version 1.0. (See accompanying file * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ #include <iostream> #include <iostream> #include <fstream> #include <sstream> #include <vector> #include <stdexcept> #include <cstring> #include <cassert> bool parse_dim(const char* str, int& w, int& h, int& d) { std::stringstream ss(str); char x; if(!(ss >> w).good()) return false; if(!(ss >> x).good()) return false; if(!(ss >> h).good()) return false; if(!(ss >> x).good()) return false; if(!(ss >> d). eof()) return false; return true; } void parse_and_check_dim( int& arg, const int argc, const char* argv[], int& w, int& h, int& d ) { if(arg+1 >= argc) { std::stringstream msg; msg << "Missing argument after '" << argv[arg] << "', "; msg << "expecting dimensions"; throw std::runtime_error(msg.str()); } else if(!parse_dim(argv[arg+1], w, h, d)) { std::stringstream msg; msg << "Invalid argument " << arg+1 << ", "; msg << "error parsing dimensions: '"; msg << argv[arg+1] << "'"; throw std::runtime_error(msg.str()); } else if(w <= 0) { std::stringstream msg; msg << "Invalid argument " << arg+1 << ", "; msg << "invalid width value '"; msg << w << "' in specified dimensions."; throw std::runtime_error(msg.str()); } else if(h <= 0) { std::stringstream msg; msg << "Invalid argument " << arg+1 << ", "; msg << "invalid height value '"; msg << h << "' in specified dimensions."; throw std::runtime_error(msg.str()); } else if(d <= 0) { std::stringstream msg; msg << "Invalid argument " << arg+1 << ", "; msg << "invalid depth value '"; msg << d << "' in specified dimensions."; throw std::runtime_error(msg.str()); } else { ++arg; } } typedef unsigned char byte; template <typename T> void read_raw_data(std::istream& input, std::vector<T>& data) { if(!input.good()) { throw std::runtime_error("Failed reading input"); } if(input.read((char*)data.data(), data.size()).gcount() < data.size()) { throw std::runtime_error("Not enough data on input"); } } template <typename T> void write_raw_data(std::ostream& output, const std::vector<T>& data) { if(!output.good()) { throw std::runtime_error("Failed writing output"); } output.write((const char*)data.data(), data.size()); } struct reshape_info { int off_x, off_y, off_z; int ori_w, ori_h, ori_d; int new_w, new_h, new_d; byte nullbyte; }; byte translate_input( const reshape_info& r, byte value ) { if(value == 0xFF) value = r.nullbyte; return value; } byte sample_input( int x, int y, int z, const reshape_info& r, const std::vector<byte>& input_data ) { if(z < 0 || z >= r.ori_d) return r.nullbyte; if(y < 0 || y >= r.ori_h) return r.nullbyte; if(x < 0 || x >= r.ori_w) return r.nullbyte; int i = z*r.ori_w*r.ori_h + y*r.ori_w + x; return translate_input(r, input_data[i]); } void reshape_raw_cube( const reshape_info& r, const std::vector<byte>& input_data, std::vector<byte>& output_data ) { auto o=output_data.begin(); for(int oz=0; oz<r.new_d; ++oz) { int iz = oz-r.off_z; for(int oy=0; oy<r.new_h; ++oy) { int iy = oy-r.off_y; for(int ox=0; ox<r.new_w; ++ox) { int ix = ox-r.off_x; assert(o != output_data.end()); *o++ = sample_input(ix, iy, iz, r, input_data); } } } assert(o == output_data.end()); } int run(int argc, const char* argv[]) { reshape_info r = { 0, 0, 0, 0, 0, 0, 512, 512, 512 }; const char* input_path = nullptr; const char* output_path = nullptr; for(int arg=1; arg<argc; ++arg) { if(std::strncmp(argv[arg], "-output", 7) == 0) { if(arg+1 >= argc) { throw std::runtime_error("Missing output path."); } output_path = argv[arg+1]; ++arg; } else if(std::strncmp(argv[arg], "-offset", 7) == 0) { parse_and_check_dim(arg, argc, argv, r.off_x, r.off_y, r.off_z); } else if(std::strncmp(argv[arg], "-ori-dim", 8) == 0) { parse_and_check_dim(arg, argc, argv, r.ori_w, r.ori_h, r.ori_d); } else if(std::strncmp(argv[arg], "-new-dim", 8) == 0) { parse_and_check_dim(arg, argc, argv, r.new_w, r.new_h, r.new_d); } else { input_path = argv[arg]; } } if(r.ori_w*r.ori_h*r.ori_d == 0) { throw std::runtime_error("Missing input dimensions."); } if(r.off_x*r.off_y*r.off_z == 0) { r.off_x = (r.new_w-r.ori_w)/2; r.off_y = (r.new_h-r.ori_h)/2; r.off_z = (r.new_d-r.ori_d)/2; } std::vector<byte> input_data(r.ori_w*r.ori_h*r.ori_d); if(input_path) { std::ifstream input(input_path); read_raw_data(input, input_data); } else { read_raw_data(std::cin, input_data); } std::vector<byte> output_data(r.new_w*r.new_h*r.new_d); reshape_raw_cube(r, input_data, output_data); if(output_path) { std::ofstream output(output_path); write_raw_data(output, output_data); } else { write_raw_data(std::cout, output_data); } return 0; } int main(int argc, const char* argv[]) { try { return run(argc, argv); } catch(std::exception& error) { std::cerr << "Error: "; std::cerr << error.what(); std::cerr << std::endl; } }
20.507752
71
0.621622
[ "vector", "3d" ]
bafa152def5cf82fe3be098a26152b60e5ed78c6
1,035
cpp
C++
SummonEngine/ComponentSystem/Components/TriggerCollisionComponent.cpp
Raboni/SummonEngine
2a1078d60b604a918b920d098f3f0e60e5ce35bc
[ "MIT" ]
1
2020-03-27T15:22:17.000Z
2020-03-27T15:22:17.000Z
SummonEngine/ComponentSystem/Components/TriggerCollisionComponent.cpp
Raboni/SummonEngine
2a1078d60b604a918b920d098f3f0e60e5ce35bc
[ "MIT" ]
null
null
null
SummonEngine/ComponentSystem/Components/TriggerCollisionComponent.cpp
Raboni/SummonEngine
2a1078d60b604a918b920d098f3f0e60e5ce35bc
[ "MIT" ]
1
2021-01-22T03:49:01.000Z
2021-01-22T03:49:01.000Z
#include "stdafx.h" #include "TriggerCollisionComponent.h" #include "TransformComponent.h" TriggerCollisionComponent::TriggerCollisionComponent(const int aID, const int aObjectID, ComponentManager* aManager) : Component(aID, aObjectID, aManager) { } void TriggerCollisionComponent::InitBox(const CU::Vector3f& aSize, const unsigned int aIdentityFlags, const unsigned int aCollisionFlags, PhysicsMaterial* aMaterial) { TransformComponent* transformComp = GetComponent<TransformComponent>(); if (transformComp != nullptr) { myPhysicsObject.InitBox(transformComp->GetTransform(), aSize, aIdentityFlags, aCollisionFlags, aMaterial); } else { myPhysicsObject.InitBox(CU::Transform(), aSize, aIdentityFlags, aCollisionFlags, aMaterial); } } void TriggerCollisionComponent::OnAdd() { } void TriggerCollisionComponent::OnActivate() { myPhysicsObject.AddToScene(); } void TriggerCollisionComponent::OnDeactivate() { myPhysicsObject.RemoveFromScene(); } void TriggerCollisionComponent::OnRelease() { myPhysicsObject.Release(); }
28.75
165
0.799034
[ "transform" ]
bafa8a532406ccc52a393f8df7a914aa2bc4dd87
9,010
cpp
C++
src/Render.cpp
olesgedz/HumanGL
cec16c66c38e6fad3e4c8813699b2bd7e3b844e1
[ "Apache-2.0" ]
1
2021-01-13T04:09:23.000Z
2021-01-13T04:09:23.000Z
src/Render.cpp
olesgedz/HumanGL
cec16c66c38e6fad3e4c8813699b2bd7e3b844e1
[ "Apache-2.0" ]
null
null
null
src/Render.cpp
olesgedz/HumanGL
cec16c66c38e6fad3e4c8813699b2bd7e3b844e1
[ "Apache-2.0" ]
null
null
null
#include "render.h" #include "glad.h" #include <iostream> #include "glm/glm.hpp" #include "engine.h" void Render::init() { projection = perspective(60.0f * M_PI / 180.0f, 1280.0f / 720.0f, 0.1f, 100.0f); animation_key = "idle"; } void Render::draw_child(Entity* ent, Animator *animator, Scene *scene, Camera *cam, mat4 par_model) { Model *mod = ent->mod; glUseProgram(mod->shader_id); glBindVertexArray(mod->vao); mat4 model = mat4(1.0f); model = translate(model, ent->position); model = rotate(model, ent->angle); mat4 ani_model = mat4(1.0f); model = translate(model, -1 * ent->positionOffset); ani_model = animator->animations[animation_key][ent->ID].GetAnimationMatrix(*ent, Engine::delta_time) * ani_model; model = model * ani_model; model = translate(model, ent->positionOffset); model = model * par_model; par_model = model; model = scale(model, ent->e_scale); unsigned int model_loc = glGetUniformLocation(mod->shader_id, "u_M"); glUniformMatrix4fv(model_loc, 1, GL_FALSE, model.mat); unsigned int view_loc = glGetUniformLocation(mod->shader_id, "u_V"); glUniformMatrix4fv(view_loc, 1, GL_FALSE, cam->view.mat); unsigned int proj_loc = glGetUniformLocation(mod->shader_id, "u_P"); glUniformMatrix4fv(proj_loc, 1, GL_FALSE, projection.mat); glUniform1i(glGetUniformLocation(mod->shader_id, "lightNumb"), 3); glUniform3f(glGetUniformLocation(mod->shader_id, "lightPos"), scene->point_lights[0].position.x, scene->point_lights[0].position.y, scene->point_lights[0].position.z); glUniform3f(glGetUniformLocation(mod->shader_id, "viewPos"), cam->pos.x, cam->pos.y, cam->pos.z); glUniform3f(glGetUniformLocation(mod->shader_id, "material.diffuse"), ent->color.x, ent->color.y, ent->color.z); glUniform3f(glGetUniformLocation(mod->shader_id, "material.specular"), 0.4f, 0.4f, 0.4f); glUniform1f(glGetUniformLocation(mod->shader_id, "material.shininess"), 16.0f); glUniform3f(glGetUniformLocation(mod->shader_id, "light.ambient"), 0.3f, 0.3f, 0.3f); glUniform3f(glGetUniformLocation(mod->shader_id, "light.diffuse"), 0.8f, 0.8f, 0.8f); glUniform3f(glGetUniformLocation(mod->shader_id, "light.specular"), 0.5f, 0.5f, 0.5f); glUniform1f(glGetUniformLocation(mod->shader_id, "light.constant"), 1.0f); glUniform1f(glGetUniformLocation(mod->shader_id, "light.linear"), 0.045f); glUniform1f(glGetUniformLocation(mod->shader_id, "light.quadratic"), 0.0075f); glDrawArrays(GL_TRIANGLES, 0, mod->ind_number); int child_numb = ent->childrens.size(); for (int j = 0; j < child_numb; ++j) draw_child(ent->childrens[j], animator, scene, cam, par_model); } void Render::draw_scene(Animator *animator, Scene *scene, Camera *cam) { int length = scene->ents.size(); for (int i = 0; i < length; ++i) { Entity *ent = scene->ents[i]; Model *mod = ent->mod; glUseProgram(mod->shader_id); glBindVertexArray(mod->vao); mat4 model = mat4(1.0f); model = translate(model, ent->position); model = rotate(model, ent->angle); mat4 ani_model = mat4(1.0f); if (ent->ID == 0) { ani_model = animator->animations[animation_key][0].GetAnimationMatrix(*ent, Engine::delta_time); ani_model = scale(ani_model, vec3(1.0f, 1.0f, 1.0f) * scaler); model = model * ani_model; } model = scale(model, ent->e_scale); unsigned int model_loc = glGetUniformLocation(mod->shader_id, "u_M"); glUniformMatrix4fv(model_loc, 1, GL_FALSE, model.mat); unsigned int view_loc = glGetUniformLocation(mod->shader_id, "u_V"); glUniformMatrix4fv(view_loc, 1, GL_FALSE, cam->view.mat); unsigned int proj_loc = glGetUniformLocation(mod->shader_id, "u_P"); glUniformMatrix4fv(proj_loc, 1, GL_FALSE, projection.mat); glUniform1i(glGetUniformLocation(mod->shader_id, "lightNumb"), 3); glUniform3f(glGetUniformLocation(mod->shader_id, "lightPos"), scene->point_lights[0].position.x, scene->point_lights[0].position.y, scene->point_lights[0].position.z); glUniform3f(glGetUniformLocation(mod->shader_id, "viewPos"), cam->pos.x, cam->pos.y, cam->pos.z); glUniform3f(glGetUniformLocation(mod->shader_id, "material.diffuse"), ent->color.x, ent->color.y, ent->color.z); glUniform3f(glGetUniformLocation(mod->shader_id, "material.specular"), 0.4f, 0.4f, 0.4f); glUniform1f(glGetUniformLocation(mod->shader_id, "material.shininess"), 16.0f); glUniform3f(glGetUniformLocation(mod->shader_id, "light.ambient"), 0.3f, 0.3f, 0.3f); glUniform3f(glGetUniformLocation(mod->shader_id, "light.diffuse"), 0.8f, 0.8f, 0.8f); glUniform3f(glGetUniformLocation(mod->shader_id, "light.specular"), 0.5f, 0.5f, 0.5f); glUniform1f(glGetUniformLocation(mod->shader_id, "light.constant"), 1.0f); glUniform1f(glGetUniformLocation(mod->shader_id, "light.linear"), 0.045f); glUniform1f(glGetUniformLocation(mod->shader_id, "light.quadratic"), 0.0075f); glDrawArrays(GL_TRIANGLES, 0, mod->ind_number); int child_numb = ent->childrens.size(); for (int j = 0; j < child_numb; ++j) draw_child(ent->childrens[j], animator, scene, cam, ani_model); } } void Render::draw_pbr(Scene* scene, Camera* cam) { int length = scene->ents.size(); for (int i = 0; i < length; ++i) { Entity* ent = scene->ents[i]; Model* mod = ent->mod; glUseProgram(mod->shader_id); glBindVertexArray(mod->vao); mat4 model = mat4(1.0f); model = translate(model, ent->position); model = rotate(model, ent->angle); model = scale(model, ent->e_scale); unsigned int model_loc = glGetUniformLocation(mod->shader_id, "u_M"); glUniformMatrix4fv(model_loc, 1, GL_FALSE, model.mat); unsigned int view_loc = glGetUniformLocation(mod->shader_id, "u_V"); glUniformMatrix4fv(view_loc, 1, GL_FALSE, cam->view.mat); unsigned int proj_loc = glGetUniformLocation(mod->shader_id, "u_P"); glUniformMatrix4fv(proj_loc, 1, GL_FALSE, projection.mat); glUniform3f(glGetUniformLocation(mod->shader_id, "albedo"), 0.5f, 0.0f, 0.0f); glUniform1f(glGetUniformLocation(mod->shader_id, "ao"), 1.0f); glUniform3f(glGetUniformLocation(mod->shader_id, "camPos"), cam->pos.x, cam->pos.y, cam->pos.z); glUniform1f(glGetUniformLocation(mod->shader_id, "metallic"), 0.9f); glUniform3f(glGetUniformLocation(mod->shader_id, "roughness"), 0.9f, 0.05f, 1.0f); glUniform3f(glGetUniformLocation(mod->shader_id, "lightPositions[0]"), scene->point_lights[0].position.x, scene->point_lights[0].position.y, scene->point_lights[0].position.z); glUniform3f(glGetUniformLocation(mod->shader_id, "lightColors[0]"), scene->point_lights[0].color.x, scene->point_lights[0].color.y, scene->point_lights[0].color.z); glUniform3f(glGetUniformLocation(mod->shader_id, "lightPositions[1]"), scene->point_lights[1].position.x, scene->point_lights[1].position.y, scene->point_lights[1].position.z); glUniform3f(glGetUniformLocation(mod->shader_id, "lightColors[1]"), scene->point_lights[1].color.x, scene->point_lights[1].color.y, scene->point_lights[1].color.z); glUniform3f(glGetUniformLocation(mod->shader_id, "lightPositions[2]"), scene->point_lights[2].position.x, scene->point_lights[2].position.y, scene->point_lights[2].position.z); glUniform3f(glGetUniformLocation(mod->shader_id, "lightColors[2]"), scene->point_lights[2].color.x, scene->point_lights[2].color.y, scene->point_lights[2].color.z); glUniform3f(glGetUniformLocation(mod->shader_id, "lightPositions[3]"), scene->point_lights[3].position.x, scene->point_lights[3].position.y, scene->point_lights[3].position.z); glUniform3f(glGetUniformLocation(mod->shader_id, "lightColors[3]"), scene->point_lights[3].color.x, scene->point_lights[3].color.y, scene->point_lights[3].color.z); // for (int j = 0; j < scene->point_lights.size(); ++j) // { // glUniform3f(glGetUniformLocation(mod->shader_id, ("lightPositions[" + std::to_string(j) + " ]").c_str()), scene->point_lights[j].position.x, scene->point_lights[j].position.y, scene->point_lights[j].position.z); // glUniform3f(glGetUniformLocation(mod->shader_id, ("lightColors[" + std::to_string(j) + " ]").c_str()), scene->point_lights[j].color.x, scene->point_lights[j].color.y, scene->point_lights[j].color.z); // } glDrawArrays(GL_TRIANGLES, 0, mod->ind_number); } } void Render::draw_skybox(Skybox *skybox, Camera* cam) { glDepthMask(GL_FALSE); glUseProgram(skybox->shader_id); mat3 tmp = mat3(cam->view); mat4 view = mat4(tmp); unsigned int view_loc = glGetUniformLocation(skybox->shader_id, "u_V"); glUniformMatrix4fv(view_loc, 1, GL_FALSE, view.mat); unsigned int proj_loc = glGetUniformLocation(skybox->shader_id, "u_P"); glUniformMatrix4fv(proj_loc, 1, GL_FALSE, projection.mat); glBindVertexArray(skybox->vao); glBindTexture(GL_TEXTURE_CUBE_MAP, skybox->texture); glDrawArrays(GL_TRIANGLES, 0, 36); glDepthMask(GL_TRUE); }
54.939024
216
0.703552
[ "render", "model" ]
bafb549b1a4b93eb126f43af70e1311e4cd23467
529
cpp
C++
test/PersistentUnionFind.test.cpp
yuruhi/library
fecbd92ec6c6997d50bf954c472ac4bfeff74de5
[ "Apache-2.0" ]
null
null
null
test/PersistentUnionFind.test.cpp
yuruhi/library
fecbd92ec6c6997d50bf954c472ac4bfeff74de5
[ "Apache-2.0" ]
6
2021-01-05T07:39:05.000Z
2021-01-05T07:44:31.000Z
test/PersistentUnionFind.test.cpp
yuruhi/library
fecbd92ec6c6997d50bf954c472ac4bfeff74de5
[ "Apache-2.0" ]
null
null
null
#define PROBLEM "https://judge.yosupo.jp/problem/persistent_unionfind" #include "./../DataStructure/PersistentUnionFind.cpp" #include <iostream> using namespace std; int main() { cin.tie(nullptr); ios_base::sync_with_stdio(false); int n, q; cin >> n >> q; vector<PersistentUnionFind> g(q + 1); g[0] = PersistentUnionFind(n); for (int i = 1; i <= q; ++i) { int com, k, u, v; cin >> com >> k >> u >> v; k++; if (com == 0) { g[i] = g[k].unite(u, v).second; } else { cout << g[k].same(u, v) << '\n'; } } }
22.041667
70
0.586011
[ "vector" ]
bafe4828e8238c2395ef784e339440cb3336a873
35,581
cpp
C++
Minecraft/src/scene_minecraft.cpp
Iansa9/GraficasComputacionales
864774919b5a9f8931595cddee5431cbfc02255f
[ "MIT" ]
null
null
null
Minecraft/src/scene_minecraft.cpp
Iansa9/GraficasComputacionales
864774919b5a9f8931595cddee5431cbfc02255f
[ "MIT" ]
null
null
null
Minecraft/src/scene_minecraft.cpp
Iansa9/GraficasComputacionales
864774919b5a9f8931595cddee5431cbfc02255f
[ "MIT" ]
null
null
null
#include "scene_minecraft.h" #include <GL/freeglut.h> #include <algorithm> #include <iostream> #include <forward_list> #include <atomic> #include <mutex> #include <cmath> #include "cgmath.h" #include "mat3.h" #include "mat4.h" #include "perlin_noise.h" #include "time_utils.h" #include "vec2.h" #include "vec3i.h" #include "vec3.h" #include "vec3d.h" #include "vec4.h" #include "utils.h" using cgmath::lookAt; using cgmath::mat3; using cgmath::mat4; using cgmath::radians; using cgmath::rotate_around; using cgmath::rotation_matrix; using cgmath::translation_matrix; using cgmath::vec2; using cgmath::vec3i; using cgmath::vec3; using cgmath::vec3d; using cgmath::vec4; using cgmath::AreEqual; using utils::randf; using std::max; using std::min; using std::atomic; using std::mutex; using std::truncf; float sign(float val) { return val < 0.0f ? -1.0f : val > 0.0f ? 1.0f : 0.0f ; } vec3 sign(const vec3& v) { return vec3(sign(v.x), sign(v.y), sign(v.z)); } float abs_ceil(float x) { return x < 0 ? floor(x) : ceil(x); } scene_minecraft::~scene_minecraft() { glDeleteProgram(shader_program); glDeleteProgram(light_shader_program); glDeleteTextures(1, &textureId); } void scene_minecraft::compile_shaders() { shader_program = generateShaderProgram( { {"shaders/shadow.vert", GL_VERTEX_SHADER}, {"shaders/shadow.frag", GL_FRAGMENT_SHADER} }, { "position_in", "offset_in", "normal_in", "texture_coord_up_in", "texture_coord_side_in", "texture_coord_down_in", "face_in", "texture_offset_in" }); glUseProgram(shader_program); time_location = glGetUniformLocation(shader_program, "u_time"); mvp_matrix_location = glGetUniformLocation(shader_program, "u_mvp_matrix"); normal_matrix_location = glGetUniformLocation(shader_program, "u_normal_matrix"); texture_location = glGetUniformLocation(shader_program, "u_texture1"); light_texture_location = glGetUniformLocation(shader_program, "u_light_texture"); light_position_location = glGetUniformLocation(shader_program, "u_light_position"); light_color_location = glGetUniformLocation(shader_program, "u_light_color"); camera_position_location = glGetUniformLocation(shader_program, "u_camera.position"); resolution_location = glGetUniformLocation(shader_program, "u_resolution"); shadow_light_mvp_location = glGetUniformLocation(shader_program, "u_light_mvp_matrix"); GLuint poisson_location = glGetUniformLocation(shader_program, "u_poisson_disk"); glUniform2fv(poisson_location, 64, &(poisson_disk[0][0])); glUniform3f(light_color_location, 1.0f, 1.0f, 1.0f); glUniform2f(resolution_location, 800, 800); glUseProgram(0); } void scene_minecraft::compile_light_shaders() { light_shader_program = generateShaderProgram({ {"shaders/depth.vert", GL_VERTEX_SHADER}, {"shaders/depth.frag", GL_FRAGMENT_SHADER}, }, {"position_in", "offset_in"}); light_mvp_location = glGetUniformLocation(light_shader_program, "u_light_mvp_matrix"); } vec2 scene_minecraft::get_texture_coords(int x, int y) { x--; y--; return vec2(x / 16.0f, y / 23.0f ); } //vector<vec2> scene_minecraft::get_texture_coords(int x, int y) { /*x--; y--; vec2 lower_left = {x / 16.0f, (y / 23.0f)}; vec2 lower_right = {(x + 1.0f) / 16.0f, (y / 23.0f)}; vec2 upper_left = {x / 16.0f, (y + 1.0f) / 23.0f}; vec2 upper_right = {(x + 1.0f) / 16.0f, (y + 1.0f) / 23.0f}; vector<vec2> texture_coords = { lower_left, lower_right, upper_right, upper_left, }; return texture_coords;*/ //} vector<vec3> scene_minecraft::cube_positions() { vec3 back_lower_left = {-0.5f, -0.5f, -0.5f}; vec3 back_lower_right = {0.5f, -0.5f, -0.5f}; vec3 back_upper_right = {0.5f, 0.5f, -0.5f}; vec3 back_upper_left = {-0.5f, 0.5f, -0.5f}; vec3 front_lower_left = {-0.5f, -0.5f, 0.5f}; vec3 front_lower_right = {0.5f, -0.5f, 0.5f}; vec3 front_upper_right = {0.5f, 0.5f, 0.5f}; vec3 front_upper_left = {-0.5f, 0.5f, 0.5f}; vector<vec3> positions = { // front front_lower_left, front_lower_right, front_upper_right, front_upper_left, // left back_lower_left, front_lower_left, front_upper_left, back_upper_left, // right front_lower_right, back_lower_right, back_upper_right, front_upper_right, // up front_upper_left, front_upper_right, back_upper_right, back_upper_left, // down back_lower_left, back_lower_right, front_lower_right, front_lower_left, // back back_lower_right, back_lower_left, back_upper_left, back_upper_right, }; return positions; } //vector<vec2> scene_minecraft::cube_texture_coords(int id) { // vector<vec2> texture_coords_up = // get_texture_coords(textures[id].x_up, textures[id].y_up); // vector<vec2> texture_coords_side = // get_texture_coords(textures[id].x_side, textures[id].y_side); // vector<vec2> texture_coords_down = // get_texture_coords(textures[id].x_down, textures[id].y_down); // vector<vec2> texture_coords = { // // front // texture_coords_side[0], // texture_coords_side[1], // texture_coords_side[2], // texture_coords_side[3], // // left // texture_coords_side[0], // texture_coords_side[1], // texture_coords_side[2], // texture_coords_side[3], // // right // texture_coords_side[0], // texture_coords_side[1], // texture_coords_side[2], // texture_coords_side[3], // // up // texture_coords_up[0], // texture_coords_up[1], // texture_coords_up[2], // texture_coords_up[3], // // down // texture_coords_down[0], // texture_coords_down[1], // texture_coords_down[2], // texture_coords_down[3], // // back // texture_coords_side[0], // texture_coords_side[1], // texture_coords_side[2], // texture_coords_side[3], // }; // return texture_coords; //} vector<vec3> scene_minecraft::cube_normal_vectors() { vec3 front = {0.0f, 0.0f, 1.0f}; vec3 back = {0.0f, 0.0f, -1.0f}; vec3 up = {0.0f, 1.0f, 0.0f}; vec3 down = {0.0f, -1.0f, 0.0f}; vec3 left = {-1.0f, 0.0f, 0.0f}; vec3 right = {1.0f, 0.0f, 0.0f}; vector<vec3> normal_vectors = {front, front, front, front, left, left, left, left, right, right, right, right, up, up, up, up, down, down, down, down, back, back, back, back}; return normal_vectors; } bool scene_minecraft::is_block_visible(int i, int j, int k) { if (k == 0) { return true; } int a_start = i == 0 ? 0 : -1; int a_end = i == MAP_SIZE ? 0 : 1; int b_start = j == 0 ? 0 : -1; int b_end = j == MAP_SIZE ? 0 : 1; int c_start = k == 0 ? 0 : -1; int c_end = k == MAP_HEIGHT ? 0 : 1; short currentBlock = blocks[i][j][k]; short block = 0; for (int a = a_start; a <= a_end; a++) { for (int b = b_start; b <= b_end; b++) { for (int c = c_start; c <= c_end; c++) { block = blocks[i + a][j + b][k + c]; if (block == 0) { return true; } if (currentBlock != WATER_TYPE && block == WATER_TYPE) { return true; } } } } return false; } void scene_minecraft::make_near_blocks_visible(int i, int j, int k) { int a_start = i == 0 ? 0 : -1; int a_end = i == MAP_SIZE ? 0 : 1; int b_start = j == 0 ? 0 : -1; int b_end = j == MAP_SIZE ? 0 : 1; int c_start = k == 0 ? 0 : -1; int c_end = k == MAP_HEIGHT ? 0 : 1; int ni, nj, nk; for (int a = a_start; a <= a_end; a++) { for (int b = b_start; b <= b_end; b++) { for (int c = c_start; c <= c_end; c++) { ni = i + a; nj = j + b; nk = k + c; if (blocks[ni][nj][nk] != 0 && coords_to_offset_index[ni][nj][nk] == 0) { add_block(ni, nj, nk, blocks[ni][nj][nk]); } } } } } void scene_minecraft::add_block(int i, int j, int k, int type) { if (coords_to_offset_index[i][j][k] != 0) return; map_mutex.lock(); blocks[i][j][k] = type; texture_up_coords.push_back(get_texture_coords(textures[type - 1].x_up, textures[type - 1].y_up)); texture_side_coords.push_back(get_texture_coords(textures[type - 1].x_side, textures[type - 1].y_side)); texture_down_coords.push_back(get_texture_coords(textures[type - 1].x_down, textures[type - 1].y_down)); coords_to_offset_index[i][j][k] = offsets.size(); //offsets.push_back(vec3(i, k, j)); offsets.push_back(vec3(i + 0.5f, k + 0.5f, j + 0.5f)); map_mutex.unlock(); } void scene_minecraft::generate_tree(int i, int j, int k, int biome) { int leaves = 0; int wood = 0; if (biome == SNOW_BIOME) { leaves = SNOW_LEAVES_TYPE; wood = DARK_WOOD_TYPE; } else if (biome == FOREST_BIOME) { leaves = LEAVES_TYPE; wood = WOOD_TYPE; } else if (biome == PLAINS_BIOME) { if (randf() > 0.01) { return; } leaves = LEAVES_TYPE; wood = WOOD_TYPE; } else if (biome == DESERT_BIOME) { wood = CACTUS_TYPE; } else if (biome == AUTUM_FOREST) { wood = DARK_WOOD_TYPE; leaves = AUTUMN_LEAVES_TYPE; } else { return; } int height = int(randf() * 3.0f + 4.0f); for (int h = k; h < k+height; h++) { blocks[i][j][h] = wood; } blocks[i][j][k + height] = leaves; int width = height; if (width % 2 == 0) { width++; } int limit = width / 2; for (int a = -limit; a <= limit; a++) { for (int b = -limit; b <= limit; b++) { for (int h = -limit; h <= limit; h++) { if (i + a > 1000 || i + a < 0 || j+b > 1000 || j + b < 0) { continue; } if (randf() < 0.7f && blocks[i+a][j+b][k+h+height] == 0) { blocks[i+a][j+b][k+h+height] = leaves; } } } } } int scene_minecraft::get_biome(float map_height, float biome_height) { if (map_height > 0.7) return SNOW_BIOME; if (map_height > 0.45) { if (biome_height < 0.3) return PLAINS_BIOME; if (biome_height < 0.5) return FOREST_BIOME; if (biome_height < 0.7) return AUTUM_FOREST; return DESERT_BIOME; } if(map_height > 0.44) return BEACH_BIOME; return OCEAN_BIOME; } void scene_minecraft::generate_map() { const int MAP_PERLIN_SIZE = 10; const int BIOME_PERLIN_SIZE = 5; const float MAP_PERLIN_RESOLUTION = (MAP_SIZE + 0.0f) / MAP_PERLIN_SIZE; const float BIOME_PERLIN_RESOLUTION = (MAP_SIZE + 0.0f) / BIOME_PERLIN_SIZE; perlin map_noise; perlin biome_noise; for (int i = 0; i <= MAP_SIZE; i++) { for (int j = 0; j <= MAP_SIZE; j++) { for (int k = 0; k <= MAP_HEIGHT; k++) { blocks[i][j][k] = 0; } } } map_noise.generate_grid(MAP_PERLIN_SIZE + 1, MAP_PERLIN_SIZE + 1, time(nullptr)); biome_noise.generate_grid(BIOME_PERLIN_SIZE + 1, BIOME_PERLIN_SIZE + 1, time(nullptr)); for (int i = 0; i <= MAP_SIZE; i++) { for (int j = 0; j <= MAP_SIZE; j++) { float map_range = (0.5f*map_noise.height_at(i / MAP_PERLIN_RESOLUTION, j / MAP_PERLIN_RESOLUTION)) + 0.5f; float biome_range = (0.5f*biome_noise.height_at(i / BIOME_PERLIN_RESOLUTION, j / BIOME_PERLIN_RESOLUTION)) + 0.5f; int height = int(MAP_HEIGHT * map_range); int biome = get_biome(map_range, biome_range); int grass; int dirt; if (biome == SNOW_BIOME) { grass = SNOW_TYPE; dirt = DIRT_TYPE; } else if (biome == FOREST_BIOME) { grass = GRASS_TYPE; dirt = DIRT_TYPE; } else if (biome == PLAINS_BIOME) { grass = GRASS_TYPE; dirt = DIRT_TYPE; } else if (biome == DESERT_BIOME || biome == BEACH_BIOME || biome == OCEAN_BIOME) { grass = SAND_TYPE; dirt = SAND_TYPE; } else if (biome == AUTUM_FOREST) { grass = DARK_GRASS_TYPE; dirt = DIRT_TYPE; } for (int k = height; k >= 0; k--) { if (blocks[i][j][k + 1] == 0) { blocks[i][j][k] = grass; } else if (blocks[i][j][k + 3] == 0) { blocks[i][j][k] = dirt; } else { blocks[i][j][k] = STONE_TYPE; } } for (int k = ceil(0.44f*MAP_HEIGHT); k > height; k--) { if (blocks[i][j][k] == 0) { blocks[i][j][k] = WATER_TYPE; } } if (randf() < 0.01) { generate_tree(i, j,height + 1, biome); } } } offsets.clear(); offsets.reserve(RENDER_DISTANCE*RENDER_DISTANCE * 3); texture_up_coords.clear(); texture_up_coords.reserve(RENDER_DISTANCE*RENDER_DISTANCE * 3); texture_side_coords.clear(); texture_side_coords.reserve(RENDER_DISTANCE*RENDER_DISTANCE * 3); texture_down_coords.clear(); texture_down_coords.reserve(RENDER_DISTANCE*RENDER_DISTANCE * 3); int start_x = max(int(camera.position.x - RENDER_DISTANCE), 0); int end_x = min(int(camera.position.x + RENDER_DISTANCE), MAP_SIZE); int start_z = max(int(camera.position.z - RENDER_DISTANCE), 0); int end_z = min(int(camera.position.z + RENDER_DISTANCE), MAP_SIZE); for (int k = 0; k <= MAP_HEIGHT; k++) { for (int i = start_x; i <= end_x; i++) { for (int j = start_z; j <= end_z; j++) { coords_to_offset_index[i][j][k] = 0; bool in_range = (vec3(i, k, j) - camera.position).magnitude() < RENDER_DISTANCE; if (in_range && blocks[i][j][k] != 0 && is_block_visible(i, j, k)) { add_block(i,j,k, blocks[i][j][k]); } } } } } void scene_minecraft::init() { init_poisson(); vector<vec3> positions = cube_positions(); vector<vec3> normal_vectors = cube_normal_vectors(); generate_map(); //update_map(); vector<float> faces = { // front 1.0f,1.0f,1.0f,1.0f, // left 1.0f,1.0f,1.0f,1.0f, // right 1.0f,1.0f,1.0f,1.0f, // up 0.0f,0.0f,0.0f,0.0f, // down 2.0f,2.0f,2.0f,2.0f, // back 1.0f,1.0f,1.0f,1.0f }; vector<vec2> texture_offsets = { // front {0.0f, 0.0f}, {1.0f / 16.0f, 0.0f}, {1.0f / 16.0f, 1.0f / 23.0f}, {0.0f, 1.0f / 23.0f}, // left {0.0f, 0.0f}, {1.0f / 16.0f, 0.0f}, {1.0f / 16.0f, 1.0f / 23.0f}, {0.0f, 1.0f / 23.0f}, // right {0.0f, 0.0f}, {1.0f / 16.0f, 0.0f}, {1.0f / 16.0f, 1.0f / 23.0f}, {0.0f, 1.0f / 23.0f}, // up {0.0f, 0.0f}, {1.0f / 16.0f, 0.0f}, {1.0f / 16.0f, 1.0f / 23.0f}, {0.0f, 1.0f / 23.0f}, // down {0.0f, 0.0f}, {1.0f / 16.0f, 0.0f}, {1.0f / 16.0f, 1.0f / 23.0f}, {0.0f, 1.0f / 23.0f}, // back {0.0f, 0.0f}, {1.0f / 16.0f, 0.0f}, {1.0f / 16.0f, 1.0f / 23.0f}, {0.0f, 1.0f / 23.0f}, }; vector<unsigned int> indices = { // front 0,1,2,0,2,3, // left 4,5,6,4,6,7, // right 8,9,10,8,10,11, // up 12,13,14,12,14,15, // down 16,17,18,16,18,19, // back 20,21,22,20,22,23 }; compile_shaders(); compile_light_shaders(); glGenVertexArrays(1, &vao); map_mutex.lock(); indexIBO = generateIBO(vao, indices, GL_STATIC_DRAW); positionsVBO = generateVBO(vao, 0, positions, 3, GL_FLOAT, GL_STATIC_DRAW); offsets_coordsVBO = generateInstancedVBO(vao, 1, offsets, 3, GL_FLOAT, GL_STATIC_DRAW, 1); normal_vectorsVBO = generateVBO(vao, 2, normal_vectors, 3, GL_FLOAT, GL_STATIC_DRAW); texture_coords_up_VBO = generateInstancedVBO(vao, 3, texture_up_coords, 2, GL_FLOAT, GL_STATIC_DRAW, 1); texture_coords_side_VBO = generateInstancedVBO(vao, 4, texture_side_coords, 2, GL_FLOAT, GL_STATIC_DRAW, 1); texture_coords_down_VBO = generateInstancedVBO(vao, 5, texture_down_coords, 2, GL_FLOAT, GL_STATIC_DRAW, 1); texture_face_VBO = generateVBO(vao, 6, faces, 1, GL_FLOAT, GL_STATIC_DRAW); texture_offsets_VBO = generateVBO(vao, 7, texture_offsets, 2, GL_FLOAT, GL_STATIC_DRAW); map_mutex.unlock(); textureId = generateTexture("assets/spritesheet2.png"); buffer.create(8192); //glEnable(GL_BLEND); //glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); } void scene_minecraft::awake() { glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED); double x, y; glfwGetCursorPos(window, &x, &y); mouse_x = float(x); mouse_y = float(y); glClearColor(208.0f / 255.0f, 183.0f / 255.0f, 249.0f / 255.0f, 1.0f); } void scene_minecraft::sleep() { glClearColor(1.0f, 1.0f, 0.5f, 1.0f); glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_NORMAL); } void scene_minecraft::first_render() { buffer.bind(); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glUseProgram(light_shader_program); glUniformMatrix4fv(light_mvp_location, 1, GL_FALSE, &((light_proyection_matrix * light_view_matrix)[0][0])); glDrawElementsInstanced(GL_TRIANGLES, 36, GL_UNSIGNED_INT, nullptr, offsets.size()); glUseProgram(0); buffer.unbind(); } void scene_minecraft::second_render() { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glUseProgram(shader_program); glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, textureId); float u_time = time::elapsed_time().count(); glUniform1f(time_location, u_time); glUniform1i(texture_location, 0); glUniform1i(light_texture_location, 1); mat3 normal_matrix = mat3::transpose(mat3::inverse(mat3(0.5f))); glUniformMatrix3fv(normal_matrix_location, 1, GL_FALSE, &(normal_matrix[0][0])); glUniform3fv(camera_position_location, 1, &(camera.position[0])); glUniform3f(light_position_location, 10.0f, 20.0f, 10.0f); mat4 view_matrix = lookAt(camera.position, camera.position + forward, { 0.0f, 1.0f, 0.0f }); glUniformMatrix4fv(mvp_matrix_location, 1, GL_FALSE, &((proyection_matrix*view_matrix)[0][0])); glUniformMatrix4fv(shadow_light_mvp_location, 1, GL_FALSE, &((light_proyection_matrix * light_view_matrix)[0][0])); glActiveTexture(GL_TEXTURE1); buffer.bindDepthMap(); glDrawElementsInstanced(GL_TRIANGLES, 36, GL_UNSIGNED_INT, nullptr, offsets.size()); glActiveTexture(GL_TEXTURE1); buffer.unbindDepthMap(); glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, 0); glUseProgram(0); } bool scene_minecraft::has_object(const vec3& v) { if (is_outside(v)) { return false; } int x = int(v.x); int y = int(v.y); int z = int(v.z); return blocks[x][z][y] != 0; } bool scene_minecraft::is_outside(const vec3& v) { if (v.x < 0 || MAP_SIZE < v.x || v.y < 0 || MAP_HEIGHT < v.y || v.z < 0 || MAP_SIZE < v.z) { return true; } return false; } vector<vec3> scene_minecraft::voxels(const vec3& ray, const vec3& origin) { vector<vec3> result; if (!is_outside(origin)) { result.push_back(origin); } vec3 t_max, t_delta; vec3 current_voxel = origin; vec3 end = origin + ray; end = vec3(int(end.x), int(end.y), int(end.z)); vec3 step = vec3(sign(ray.x), sign(ray.y), sign(ray.z)); float border; for (int i = 0; i < 3; i++) { if (step[i] > 0.0) { border = floor(origin[i] + 1.0); } else { border = ceil(origin[i] - 1.0); } if (AreEqual(ray[i], 0.0)) { t_max[i] = 100000.0; t_delta[i] = 100000.0; } else { t_max[i] = (border - origin[i]) / ray[i]; t_delta[i] = step[i] / ray[i]; } } while (true) { float t_min = t_max[0]; int t_min_dimension = 0; for (int i = 1; i < 3; i++) { if (t_max[i] < t_min) { t_min = t_max[i]; t_min_dimension = i; } } current_voxel[t_min_dimension] += step[t_min_dimension]; t_max[t_min_dimension] += t_delta[t_min_dimension]; if ((current_voxel - origin).magnitude() > ray.magnitude()) break; result.push_back(current_voxel); } return result; } void scene_minecraft::update_map(std::atomic<bool>& program_is_running) { int start_x, end_x; int start_z, end_z; bool in_range; int i, j, k, type; while (program_is_running) { start_x = max(int(camera.position.x - RENDER_DISTANCE), 0); end_x = min(int(camera.position.x + RENDER_DISTANCE), MAP_SIZE); start_z = max(int(camera.position.z - RENDER_DISTANCE), 0); end_z = min(int(camera.position.z + RENDER_DISTANCE), MAP_SIZE); for (k = 0; k <= MAP_HEIGHT; k++) { for (i = start_x; i <= end_x; i++) { for (j = start_z; j <= end_z; j++) { in_range = (vec3(i, 0.0f, j) - vec3(camera.position.x, 0.0f, camera.position.z)).magnitude() < RENDER_DISTANCE; if (in_range && blocks[i][j][k] != 0 && is_block_visible(i, j, k)) { type = blocks[i][j][k] - 1; add_block(i, j, k, blocks[i][j][k]); } } } } } } void scene_minecraft::mainLoop() { handle_movement(); handle_rotation(); handle_gravity(); handle_raycasting(); light_view_matrix = lookAt(light_camera_position, { camera.position.x, 0.0f, camera.position.z }, { 0.0f, 1.0f, 0.0f }); glBindVertexArray(vao); map_mutex.lock(); updateInstancedVBO(vao, offsets_coordsVBO, 1, offsets, 3, GL_FLOAT, GL_STATIC_DRAW, 1); updateInstancedVBO(vao, texture_coords_up_VBO, 3, texture_up_coords, 2, GL_FLOAT, GL_STATIC_DRAW, 1); updateInstancedVBO(vao, texture_coords_side_VBO, 4, texture_side_coords, 2, GL_FLOAT, GL_STATIC_DRAW, 1); updateInstancedVBO(vao, texture_coords_down_VBO, 5, texture_down_coords, 2, GL_FLOAT, GL_STATIC_DRAW, 1); map_mutex.unlock(); first_render(); second_render(); glBindVertexArray(0); } void scene_minecraft::resize(int width, int height) { glViewport(0, 0, width, height); glUseProgram(shader_program); proyection_matrix = mat4::transpose({ {1.0f / (((width + 0.0f) / (height + 0.0f)) * tan(radians(60.0f) / 2.0f)), 0.0f, 0.0f, 0.0f}, {0.0f, 1.0f / (tan(radians(60.0f) / 2.0f)), 0.0f, 0.0f}, {0.0f, 0.0f, -(1000.0f + 1.0f) / (1000.0f - 1.0f), -(2.0f * 1000.0f * 1.0f) / (1000.0f - 1.0f)}, {0.0f, 0.0f, -1.0f, 0.0f}, }); glUniform2f(resolution_location, width, height); glUseProgram(0); } void scene_minecraft::handle_gravity() { float delta_time = min(time::delta_time().count(), 0.03f); camera.velocity += vec3(0.0f, -10.0f, 0.0f) * delta_time; short block = 0; vec3i next_block; body new_camera = camera; bool collision; vector<vec3i> collision_blocks; vector<vec3i> ceil_collision_blocks; new_camera.position = camera.position - (camera.dimensions / 2.0f) - vec3(0.0f, 0.75f, 0.0f); next_block = vec3i(new_camera.position); collision_blocks.push_back({ next_block.x - 1, next_block.y, next_block.z - 1 }); collision_blocks.push_back({ next_block.x - 1, next_block.y, next_block.z }); collision_blocks.push_back({ next_block.x - 1, next_block.y, next_block.z + 1 }); collision_blocks.push_back({ next_block.x , next_block.y, next_block.z - 1 }); collision_blocks.push_back({ next_block.x , next_block.y, next_block.z + 1 }); collision_blocks.push_back({ next_block.x + 1, next_block.y, next_block.z - 1 }); collision_blocks.push_back({ next_block.x + 1, next_block.y, next_block.z }); collision_blocks.push_back({ next_block.x + 1, next_block.y, next_block.z + 1 }); collision_blocks.push_back({ next_block.x - 1, next_block.y + 1, next_block.z - 1 }); collision_blocks.push_back({ next_block.x - 1, next_block.y + 1, next_block.z }); collision_blocks.push_back({ next_block.x - 1, next_block.y + 1, next_block.z + 1 }); collision_blocks.push_back({ next_block.x , next_block.y + 1, next_block.z - 1 }); collision_blocks.push_back({ next_block.x , next_block.y + 1, next_block.z + 1 }); collision_blocks.push_back({ next_block.x + 1, next_block.y + 1, next_block.z - 1 }); collision_blocks.push_back({ next_block.x + 1, next_block.y + 1, next_block.z }); collision_blocks.push_back({ next_block.x + 1, next_block.y + 1, next_block.z + 1 }); collision_blocks.push_back({ next_block.x - 1, next_block.y - 1, next_block.z - 1 }); collision_blocks.push_back({ next_block.x - 1, next_block.y - 1, next_block.z }); collision_blocks.push_back({ next_block.x - 1, next_block.y - 1, next_block.z + 1 }); collision_blocks.push_back({ next_block.x , next_block.y - 1, next_block.z - 1 }); collision_blocks.push_back({ next_block.x , next_block.y - 1, next_block.z }); collision_blocks.push_back({ next_block.x , next_block.y - 1, next_block.z + 1 }); collision_blocks.push_back({ next_block.x + 1, next_block.y - 1, next_block.z - 1 }); collision_blocks.push_back({ next_block.x + 1, next_block.y - 1, next_block.z }); collision_blocks.push_back({ next_block.x + 1, next_block.y - 1, next_block.z + 1 }); ceil_collision_blocks.push_back({ next_block.x - 1, next_block.y + 2, next_block.z - 1 }); ceil_collision_blocks.push_back({ next_block.x - 1, next_block.y + 2, next_block.z }); ceil_collision_blocks.push_back({ next_block.x - 1, next_block.y + 2, next_block.z + 1 }); ceil_collision_blocks.push_back({ next_block.x , next_block.y + 2, next_block.z - 1 }); ceil_collision_blocks.push_back({ next_block.x , next_block.y + 2, next_block.z }); ceil_collision_blocks.push_back({ next_block.x , next_block.y + 2, next_block.z + 1 }); ceil_collision_blocks.push_back({ next_block.x + 1, next_block.y + 2, next_block.z - 1 }); ceil_collision_blocks.push_back({ next_block.x + 1, next_block.y + 2, next_block.z }); ceil_collision_blocks.push_back({ next_block.x + 1, next_block.y + 2, next_block.z + 1 }); for (int i = 0; i < 3; i++) { if (new_camera.velocity[i] == 0.0f) { continue; } new_camera.position = camera.position - (camera.dimensions/2.0f) - vec3(0.0f, 0.75f, 0.0f); new_camera.position[i] = new_camera.position[i] + (camera.velocity[i] * delta_time); for (auto &collision_block : collision_blocks) { block = blocks[collision_block.x][collision_block.z][collision_block.y]; if (block != 0 && block != WATER_TYPE) { collision = body::check_collision(new_camera, body( collision_block, { 1.0f, 1.0f, 1.0f }, { 0.0f, 0.0f, 0.0f } )); if (collision) { if (i == 1 && up_input){ camera.velocity.y = 5.0f; } else { camera.velocity[i] = 0; } } } } for (auto &collision_block : ceil_collision_blocks) { block = blocks[collision_block.x][collision_block.z][collision_block.y]; if (block != 0 && block != WATER_TYPE) { collision = body::check_collision(new_camera, body( collision_block, { 1.0f, 1.0f, 1.0f }, { 0.0f, 0.0f, 0.0f } )); if (collision) { camera.velocity[i] = 0; } } } } camera.position += camera.velocity * delta_time; light_camera_position.x += camera.velocity.x * delta_time; light_camera_position.z += camera.velocity.z * delta_time; } void scene_minecraft::handle_rotation() { float delta_time = min(time::delta_time().count(), 0.03f); yaw_input = delta_x; pitch_input = delta_y; delta_x = 0; delta_y = 0; if (abs(yaw_input) < 0.05f && abs(pitch_input) < 0.05f) { return; } float rotation_speed = radians(delta_time); vec2 rotation = rotation_speed * vec2(yaw_input, pitch_input); const vec3 UP = { 0.0f, 1.0f, 0.0f }; mat4 yaw_rotation_matrix = rotate_around(rotation.x, UP); mat4 pitch_rotation_matrix = rotate_around(rotation.y, right); vec3 new_forward = pitch_rotation_matrix * vec4(forward.x, forward.y, forward.z, 1.0f); vec3 front = vec3::normalize(vec3::cross(UP, right)); float temp_dot = vec3::dot(new_forward, front); float temp_dot2 = vec3::dot(UP, new_forward); if (acosf(vec3::dot(new_forward, front)) > radians(89.0f)) { bool is_looking_up = vec3::dot(UP, new_forward) > 0.0f; float _radians = is_looking_up ? radians(89.0f): radians(-89.0f); forward = yaw_rotation_matrix * rotate_around(_radians, right) * vec4(front.x, front.y, front.z, 1.0f); } else { forward = yaw_rotation_matrix * pitch_rotation_matrix * vec4(forward.x, forward.y, forward.z, 1.0f); } forward = vec3::normalize(forward); right = vec3::normalize(vec3::cross(forward, UP)); upward = vec3::normalize(vec3::cross(right, forward)); } void scene_minecraft::handle_movement() { if (horizontal_input == 0 && vertical_input == 0 ) { camera.velocity.x *= 0.1; camera.velocity.z *= 0.1; return; } float speed = 5.0f; vec2 movement = speed * vec2::normalize(vec2(float(horizontal_input), float(vertical_input))); vec3 front = vec3::normalize(vec3::cross({ 0.0f, 1.0f, 0.0f }, right)); camera.velocity.x = (right * movement.x).x + (front * movement.y).x; camera.velocity.z = (right * movement.x).z + (front * movement.y).z; //camera.position.x += (right * movement.x).x + (front * movement.y).x; //camera.position.z += (right * movement.x).z + (front * movement.y).z; } void scene_minecraft::handle_add_block() { vector<vec3> _voxels = voxels(forward * 10.0f, camera.position); vec3 prev_voxel; for (auto &voxel : _voxels) { if (has_object(voxel)) { int x = int(prev_voxel.x); int y = int(prev_voxel.y); int z = int(prev_voxel.z); vec3i current_position = camera.position; if (x != current_position.x || (y != current_position.y || y != current_position.y - 1) || z != current_position.z) { add_block(x, z, y, WOOD_TYPE); } return; } else { prev_voxel = voxel; } } } void scene_minecraft::handle_remove_block() { vector<vec3> _voxels = voxels(forward * 10.0f, camera.position); for (auto &voxel : _voxels) { if (has_object(voxel)) { int x = int(voxel.x); int y = int(voxel.y); int z = int(voxel.z); make_near_blocks_visible(x, z, y); map_mutex.lock(); blocks[x][z][y] = 0; int nx = int(offsets[offsets.size() - 1].x); int ny = int(offsets[offsets.size() - 1].y); int nz = int(offsets[offsets.size() - 1].z); int index = coords_to_offset_index[x][z][y]; coords_to_offset_index[nx][nz][ny] = index; coords_to_offset_index[x][z][y] = 0; offsets[index] = offsets[offsets.size() - 1]; offsets.pop_back(); texture_up_coords[index] = texture_up_coords[texture_up_coords.size() - 1]; texture_up_coords.pop_back(); texture_side_coords[index] = texture_side_coords[texture_side_coords.size() - 1]; texture_side_coords.pop_back(); texture_down_coords[index] = texture_down_coords[texture_down_coords.size() - 1]; texture_down_coords.pop_back(); map_mutex.unlock(); return; } } } void scene_minecraft::handle_raycasting() { if (mouse_input == 0) { return; } if (mouse_input == 1) { handle_remove_block(); } if (mouse_input == 2) { handle_add_block(); } mouse_input = 0; } void scene_minecraft::keysDown(int key) { if (key == GLFW_KEY_C) { compile_shaders(); compile_light_shaders(); } if (key == GLFW_KEY_W) { vertical_input = 1; } if (key == GLFW_KEY_A) { horizontal_input = -1; } if (key == GLFW_KEY_S) { vertical_input = -1; } if (key == GLFW_KEY_D) { horizontal_input = 1; } if (key == GLFW_KEY_SPACE) { up_input = 1; } /*if (key == GLFW_KEY_UP) { pitch_input = -1; } if (key == GLFW_KEY_DOWN) { pitch_input = 1; } if (key == GLFW_KEY_RIGHT) { yaw_input = 1; } if (key == GLFW_KEY_LEFT) { yaw_input = -1; }*/ } void scene_minecraft::keysUp(int key) { if (key == GLFW_KEY_W && vertical_input == 1) { vertical_input = 0; } if (key == GLFW_KEY_A && horizontal_input == -1) { horizontal_input = 0; } if (key == GLFW_KEY_S && vertical_input == -1) { vertical_input = 0; } if (key == GLFW_KEY_D && horizontal_input == 1) { horizontal_input = 0; } if (key == GLFW_KEY_SPACE && up_input == 1) { up_input = 0; } if (key == GLFW_KEY_LEFT_CONTROL) { capture_mouse = !capture_mouse; if (capture_mouse) { double x; double y; glfwGetCursorPos(window, &x, &y); mouse_x = float(x); mouse_y = float(y); glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED); } else { glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_NORMAL); } } } void scene_minecraft::passiveMotion(float x, float y) { if (!capture_mouse) { return; } delta_x = (mouse_x - x); delta_y = (mouse_y - y); mouse_x = x; mouse_y = y; } void scene_minecraft::mouseButton(int button, int action) { if (action == GLFW_PRESS) { if (button == GLFW_MOUSE_BUTTON_LEFT) { std::cout << "left clicked!" << std::endl; mouse_input = 1; } if (button == GLFW_MOUSE_BUTTON_RIGHT) { std::cout << "right clicked!" << std::endl; mouse_input = 2; } } } void scene_minecraft::init_poisson() { poisson_disk[0] = vec2(-0.613392f, 0.617481f); poisson_disk[1] = vec2(0.170019f, -0.040254f); poisson_disk[2] = vec2(-0.299417f, 0.791925f); poisson_disk[3] = vec2(0.645680f, 0.493210f); poisson_disk[4] = vec2(-0.651784f, 0.717887f); poisson_disk[5] = vec2(0.421003f, 0.027070f); poisson_disk[6] = vec2(-0.817194f, -0.271096f); poisson_disk[7] = vec2(-0.705374f, -0.668203f); poisson_disk[8] = vec2(0.977050f, -0.108615f); poisson_disk[9] = vec2(0.063326f, 0.142369f); poisson_disk[10] = vec2(0.203528f, 0.214331f); poisson_disk[11] = vec2(-0.667531f, 0.326090f); poisson_disk[12] = vec2(-0.098422f, -0.295755f); poisson_disk[13] = vec2(-0.885922f, 0.215369f); poisson_disk[14] = vec2(0.566637f, 0.605213f); poisson_disk[15] = vec2(0.039766f, -0.396100f); poisson_disk[16] = vec2(0.751946f, 0.453352f); poisson_disk[17] = vec2(0.078707f, -0.715323f); poisson_disk[18] = vec2(-0.075838f, -0.529344f); poisson_disk[19] = vec2(0.724479f, -0.580798f); poisson_disk[20] = vec2(0.222999f, -0.215125f); poisson_disk[21] = vec2(-0.467574f, -0.405438f); poisson_disk[22] = vec2(-0.248268f, -0.814753f); poisson_disk[23] = vec2(0.354411f, -0.887570f); poisson_disk[24] = vec2(0.175817f, 0.382366f); poisson_disk[25] = vec2(0.487472f, -0.063082f); poisson_disk[26] = vec2(-0.084078f, 0.898312f); poisson_disk[27] = vec2(0.488876f, -0.783441f); poisson_disk[28] = vec2(0.470016f, 0.217933f); poisson_disk[29] = vec2(-0.696890f, -0.549791f); poisson_disk[30] = vec2(-0.149693f, 0.605762f); poisson_disk[31] = vec2(0.034211f, 0.979980f); poisson_disk[32] = vec2(0.503098f, -0.308878f); poisson_disk[33] = vec2(-0.016205f, -0.872921f); poisson_disk[34] = vec2(0.385784f, -0.393902f); poisson_disk[35] = vec2(-0.146886f, -0.859249f); poisson_disk[36] = vec2(0.643361f, 0.164098f); poisson_disk[37] = vec2(0.634388f, -0.049471f); poisson_disk[38] = vec2(-0.688894f, 0.007843f); poisson_disk[39] = vec2(0.464034f, -0.188818f); poisson_disk[40] = vec2(-0.440840f, 0.137486f); poisson_disk[41] = vec2(0.364483f, 0.511704f); poisson_disk[42] = vec2(0.034028f, 0.325968f); poisson_disk[43] = vec2(0.099094f, -0.308023f); poisson_disk[44] = vec2(0.693960f, -0.366253f); poisson_disk[45] = vec2(0.678884f, -0.204688f); poisson_disk[46] = vec2(0.001801f, 0.780328f); poisson_disk[47] = vec2(0.145177f, -0.898984f); poisson_disk[48] = vec2(0.062655f, -0.611866f); poisson_disk[49] = vec2(0.315226f, -0.604297f); poisson_disk[50] = vec2(-0.780145f, 0.486251f); poisson_disk[51] = vec2(-0.371868f, 0.882138f); poisson_disk[52] = vec2(0.200476f, 0.494430f); poisson_disk[53] = vec2(-0.494552f, -0.711051f); poisson_disk[54] = vec2(0.612476f, 0.705252f); poisson_disk[55] = vec2(-0.578845f, -0.768792f); poisson_disk[56] = vec2(-0.772454f, -0.090976f); poisson_disk[57] = vec2(0.504440f, 0.372295f); poisson_disk[58] = vec2(0.155736f, 0.065157f); poisson_disk[59] = vec2(0.391522f, 0.849605f); poisson_disk[60] = vec2(-0.620106f, -0.328104f); poisson_disk[61] = vec2(0.789239f, -0.419965f); poisson_disk[62] = vec2(-0.545396f, 0.538133f); poisson_disk[63] = vec2(-0.178564f, -0.596057f); }
30.02616
121
0.639302
[ "vector" ]
bafecc67a6c9914acf6708770d1615ba6c1e6987
60,732
cpp
C++
src/hardware/fpga/fpga_core.cpp
simleo/eddl
78e2177d22d78b3298b059adce37a48d93ee648c
[ "MIT" ]
null
null
null
src/hardware/fpga/fpga_core.cpp
simleo/eddl
78e2177d22d78b3298b059adce37a48d93ee648c
[ "MIT" ]
null
null
null
src/hardware/fpga/fpga_core.cpp
simleo/eddl
78e2177d22d78b3298b059adce37a48d93ee648c
[ "MIT" ]
null
null
null
/* * FPGA support for EDDL Library - European Distributed Deep Learning Library. * Version: 0.6 * copyright (c) 2020, Universidad Politécnica de Valencia (UPV), GAP research group * Date: June 2020 * Author: GAP Research Group (UPV), contact: carlherlu@gap.upv.es, jflich@disca.upv.es * All rights reserved */ #ifdef cFPGA #include "eddl/hardware/fpga/xcl2.hpp" #include <vector> #include <math.h> #include <float.h> #include "eddl/tensor/tensor.h" #include "eddl/descriptors/descriptors.h" #include "eddl/hardware/fpga/fpga_hw.h" #include <sys/time.h> #include "eddl/hardware/cpu/cpu_tensor.h" int next_fpga_tensor_id = 1; int num_tensors_created = 0; #define MAX_BUFFER_POOL 10000 cl::Buffer *fpga_ptr_buffer_pool[MAX_BUFFER_POOL]; long fpga_size_buffer_pool[MAX_BUFFER_POOL]; int fpga_inuse_buffer_pool[MAX_BUFFER_POOL]; int fpga_free_buffer_pool[MAX_BUFFER_POOL]; int fpga_num_buffer_pool_slots; cl::Context context; cl::CommandQueue q; cl::CommandQueue com; cl::Program program; // activation kernels (22) cl::Kernel kernel_relu, kernel_d_relu, kernel_thresholded_relu, kernel_d_thresholded_relu, kernel_leaky_relu, kernel_d_leaky_relu; cl::Kernel kernel_elu, kernel_d_elu, kernel_softplus, kernel_d_softplus, kernel_softsign, kernel_d_softsign; cl::Kernel kernel_linear, kernel_d_linear,kernel_sigmoid, kernel_d_sigmoid, kernel_hard_sigmoid, kernel_d_hard_sigmoid; cl::Kernel kernel_exp, kernel_d_exp, kernel_tanh, kernel_d_tanh, kernel_softmax, kernel_d_softmax; // bn kernels (4) cl::Kernel kernel_permute_channels_last, kernel_permute_channels_first; cl::Kernel kernel_permute_batch_last, kernel_permute_batch_first; // comparison kernels (20) cl::Kernel kernel_all, kernel_any, kernel_isfinite, kernel_isinf; cl::Kernel kernel_isnan, kernel_isneginf, kernel_isposinf, kernel_equal2; cl::Kernel kernel_logical_and, kernel_logical_or, kernel_logical_not, kernel_logical_xor; cl::Kernel kernel_allclose, kernel_isclose, kernel_greater, kernel_greater_equal; cl::Kernel kernel_less, kernel_less_equal, kernel_equal, kernel_not_equal; cl::Kernel kernel_greater_vector, kernel_greater_equal_vector, kernel_less_vector; cl::Kernel kernel_less_equal_vector, kernel_equal_vector, kernel_not_equal_vector; // core kernels (11) cl::Kernel kernel_transpose, kernel_copy, kernel_fill_, kernel_fill; cl::Kernel kernel_select, kernel_select_back, kernel_set_select, kernel_set_select_back; cl::Kernel kernel_set_select2, kernel_deselect, kernel_concat; cl::Kernel kernel_select_nn, kernel_select_back_nn, kernel_set_select_nn, kernel_set_select_back_nn; // conv kernels (2) cl::Kernel kernel_im2col, kernel_conv2d; // create kernels (3) cl::Kernel kernel_range, kernel_eye, kernel_diag; // da kernels (6) cl::Kernel kernel_single_shift, kernel_single_rotate, kernel_single_scale; cl::Kernel kernel_single_flip, kernel_single_crop; cl::Kernel kernel_crop_scale_random; // generator kernels (4) cl::Kernel kernel_rand_uniform, kernel_signed_uniform, kernel_rand_binary, kernel_rand_normal; // losses kernels (1) cl::Kernel kernel_cent; // metrics kernels (22) cl::Kernel kernel_accuracy, kernel_bin_accuracy; // pool kernels (4) cl::Kernel kernel_mpool2D, kernel_mpool2D_back, kernel_avgpool2D, kernel_avgpool2D_back; // reduction kernels (5) cl::Kernel kernel_reduce, kernel_reduce_op, kernel_reduce_sum2D, kernel_reduction, kernel_reduction_back; // tensor_nn kernels (2) cl::Kernel kernel_repeat_nn, kernel_d_repeat_nn; // math kernels (46) cl::Kernel kernel_abs, kernel_acos, kernel_add, kernel_asin, kernel_atan, kernel_ceil, kernel_clamp; cl::Kernel kernel_cos, kernel_cosh, kernel_mod, kernel_mult, kernel_trunc, kernel_sum_abs; cl::Kernel kernel_floor, kernel_inv, kernel_log, kernel_log2, kernel_log10, kernel_logn; cl::Kernel kernel_normalize, kernel_pow, kernel_powb, kernel_reciprocal, kernel_remainder, kernel_round, kernel_rsqrt; cl::Kernel kernel_sign, kernel_sin, kernel_sinh, kernel_sqr, kernel_sqrt, kernel_tan; cl::Kernel kernel_inc, kernel_el_div, kernel_el_mult, kernel_sign2, kernel_sum2D_rowwise, kernel_sum2D_colwise; cl::Kernel kernel_max, kernel_min, kernel_sum, kernel_mult2d; // profiling int num_instances_fpga[_NUM_FPGA_FUNCS]; float mb_memory_needed_fpga; // profiling functions void _profile_fpga_funcname(int i, char *name) { switch(i) { case _FPGA_ALL : strcpy(name, "all"); break; case _FPGA_ANY : strcpy(name, "any"); break; case _FPGA_ISFINITE : strcpy(name, "isfinite"); break; case _FPGA_ISINF : strcpy(name, "isinf"); break; case _FPGA_ISNAN : strcpy(name, "isnan"); break; case _FPGA_ISNEGINF : strcpy(name, "isneginf"); break; case _FPGA_ISPOSINF : strcpy(name, "isposinf"); break; case _FPGA_LOGICAL_AND : strcpy(name, "logical_and"); break; case _FPGA_LOGICAL_OR : strcpy(name, "logical_or"); break; case _FPGA_LOGICAL_NOT : strcpy(name, "logical_not"); break; case _FPGA_LOGICAL_XOR : strcpy(name, "logical_xor"); break; case _FPGA_ALLCLOSE : strcpy(name, "allclose"); break; case _FPGA_ISCLOSE : strcpy(name, "isclose"); break; case _FPGA_GREATER : strcpy(name, "greater"); break; case _FPGA_GREATER_EQUAL : strcpy(name, "greater_equal"); break; case _FPGA_LESS : strcpy(name, "less"); break; case _FPGA_LESS_EQUAL : strcpy(name, "less_equal"); break; case _FPGA_EQUAL : strcpy(name, "equal"); break; case _FPGA_NOT_EQUAL : strcpy(name, "not_equal"); break; case _FPGA_EQUAL2 : strcpy(name, "equal2"); break; case _FPGA_TRANSPOSE : strcpy(name, "transpose"); break; case _FPGA_COPY : strcpy(name, "copy"); break; case _FPGA_FILL_ : strcpy(name, "fill_"); break; case _FPGA_FILL : strcpy(name, "fill"); break; case _FPGA_SELECT : strcpy(name, "select"); break; case _FPGA_SELECT_BACK : strcpy(name, "select_back"); break; case _FPGA_SET_SELECT : strcpy(name, "set_select"); break; case _FPGA_SET_SELECT_BACK : strcpy(name, "set_select_back"); break; case _FPGA_SELECT2 : strcpy(name, "select2"); break; case _FPGA_DESELECT : strcpy(name, "deselect"); break; case _FPGA_CONCAT : strcpy(name, "concat"); break; case _FPGA_RANGE : strcpy(name, "range"); break; case _FPGA_EYE : strcpy(name, "eye"); break; case _FPGA_SINGLE_SHIFT : strcpy(name, "single_shift"); break; case _FPGA_SINGLE_ROTATE : strcpy(name, "single_rotate"); break; case _FPGA_SINGLE_SCALE : strcpy(name, "single_scale"); break; case _FPGA_SINGLE_FLIP : strcpy(name, "single_flip"); break; case _FPGA_SINGLE_CROP : strcpy(name, "single_crop"); break; case _FPGA_SINGLE_CROP_SCALE : strcpy(name, "single_crop_scale"); break; case _FPGA_SHIFT : strcpy(name, "shift"); break; case _FPGA_ROTATE : strcpy(name, "rotate"); break; case _FPGA_SCALE : strcpy(name, "scale"); break; case _FPGA_CROP : strcpy(name, "crop"); break; case _FPGA_CROP_SCALE : strcpy(name, "crop_scale"); break; case _FPGA_SHIFT_RANDOM : strcpy(name, "shift_random"); break; case _FPGA_ROTATE_RANDOM : strcpy(name, "rotate_random"); break; case _FPGA_SCALE_RANDOM : strcpy(name, "scale_random"); break; case _FPGA_FLIP_RANDOM : strcpy(name, "flip_random"); break; case _FPGA_CROP_RANDOM : strcpy(name, "crop_random"); break; case _FPGA_CROP_SCALE_RANDOM : strcpy(name, "crop_scale_random"); break; case _FPGA_CUTOUT_RANDOM : strcpy(name, "cutout_random"); break; case _FPGA_RAND_UNIFORM : strcpy(name, "rand_uniform"); break; case _FPGA_RAND_SIGNED_UNIFORM : strcpy(name, "rand_signed_uniform"); break; case _FPGA_BINARY : strcpy(name, "binary"); break; case _FPGA_RAND_NORMAL : strcpy(name, "rand_normal"); break; case _FPGA_ABS : strcpy(name, "abs"); break; case _FPGA_ACOS : strcpy(name, "acos"); break; case _FPGA_ASIN : strcpy(name, "asin"); break; case _FPGA_ATAN : strcpy(name, "atan"); break; case _FPGA_CEIL : strcpy(name, "ceil"); break; case _FPGA_CLAMP : strcpy(name, "clamp"); break; case _FPGA_COS : strcpy(name, "cos"); break; case _FPGA_COSH : strcpy(name, "cosh"); break; case _FPGA_FLOOR : strcpy(name, "floor"); break; case _FPGA_INV : strcpy(name, "inv"); break; case _FPGA_LOG : strcpy(name, "log"); break; case _FPGA_LOG2 : strcpy(name, "log2"); break; case _FPGA_LOG10 : strcpy(name, "log10"); break; case _FPGA_LOGN : strcpy(name, "logn"); break; case _FPGA_MOD : strcpy(name, "mod"); break; case _FPGA_MULT : strcpy(name, "mult"); break; case _FPGA_NORMALIZE : strcpy(name, "normalize"); break; case _FPGA_POW : strcpy(name, "pow"); break; case _FPGA_POWB : strcpy(name, "powb"); break; case _FPGA_RECIPROCAL : strcpy(name, "reciprocal"); break; case _FPGA_REMAINDER : strcpy(name, "remainder"); break; case _FPGA_ROUND : strcpy(name, "round"); break; case _FPGA_RSQRT : strcpy(name, "rsqrt"); break; case _FPGA_SIGN : strcpy(name, "sign"); break; case _FPGA_SIN : strcpy(name, "sin"); break; case _FPGA_SINH : strcpy(name, "sinh"); break; case _FPGA_SQR : strcpy(name, "sqr"); break; case _FPGA_SQRT : strcpy(name, "sqrt"); break; case _FPGA_TAN : strcpy(name, "tan"); break; case _FPGA_TRUNC : strcpy(name, "trunc"); break; case _FPGA_ADD : strcpy(name, "add"); break; case _FPGA_INC : strcpy(name, "inc"); break; case _FPGA_MULT2D : strcpy(name, "mult2D"); break; case _FPGA_EL_DIV : strcpy(name, "el_div"); break; case _FPGA_EL_MULT : strcpy(name, "el_mult"); break; case _FPGA_SIGN2 : strcpy(name, "sign2"); break; case _FPGA_SUM2D_ROWWISE : strcpy(name, "sum2D_rowwise"); break; case _FPGA_SUM2D_COLWISE : strcpy(name, "sum2D_colwise"); break; case _FPGA_MAX : strcpy(name, "max"); break; case _FPGA_MIN : strcpy(name, "min"); break; case _FPGA_SUM : strcpy(name, "sum"); break; case _FPGA_SUM_ABS : strcpy(name, "sum_abs"); break; case _FPGA_REDUCE : strcpy(name, "reduce"); break; case _FPGA_REDUCE_OP : strcpy(name, "reduce_op"); break; case _FPGA_REDUCE_SUM2D : strcpy(name, "reduce_sum2D"); break; case _FPGA_REDUCTION : strcpy(name, "reduction"); break; case _FPGA_REDUCTION_BACK : strcpy(name, "reduction_back"); break; case _FPGA_RELU : strcpy(name, "relu"); break; case _FPGA_D_RELU : strcpy(name, "d_relu"); break; case _FPGA_THRESHOLDED_RELU : strcpy(name, "thresholded_relu"); break; case _FPGA_D_THRESHOLDED_RELU : strcpy(name, "d_thresholded_relu"); break; case _FPGA_LEAKY_RELU : strcpy(name, "leaky_relu"); break; case _FPGA_D_LEAKY_RELU : strcpy(name, "d_leaky_relu"); break; case _FPGA_ELU : strcpy(name, "elu"); break; case _FPGA_D_ELU : strcpy(name, "d_elu"); break; case _FPGA_SOFTPLUS : strcpy(name, "softplus"); break; case _FPGA_D_SOFTPLUS : strcpy(name, "d_softplus"); break; case _FPGA_SOFTSIGN : strcpy(name, "softsign"); break; case _FPGA_D_SOFTSIGN : strcpy(name, "d_softsign"); break; case _FPGA_LINEAR : strcpy(name, "linear"); break; case _FPGA_D_LINEAR : strcpy(name, "d_linear"); break; case _FPGA_SIGMOID : strcpy(name, "sigmoid"); break; case _FPGA_D_SIGMOID : strcpy(name, "d_sigmoid"); break; case _FPGA_HARD_SIGMOID : strcpy(name, "hard_sigmoid"); break; case _FPGA_D_HARD_SIGMOID : strcpy(name, "d_hard_sigmoid"); break; case _FPGA_EXP : strcpy(name, "exp"); break; case _FPGA_D_EXP : strcpy(name, "d_exp"); break; case _FPGA_TANH : strcpy(name, "tanh"); break; case _FPGA_D_TANH : strcpy(name, "d_tanh"); break; case _FPGA_SOFTMAX : strcpy(name, "softmax"); break; case _FPGA_D_SOFTMAX : strcpy(name, "d_softmax"); break; case _FPGA_PERMUTE_CHANELS_LAST : strcpy(name, "permute_channels_last"); break; case _FPGA_PERMUTE_CHANELS_FIRST : strcpy(name, "permute_channels_first"); break; case _FPGA_PERMUTE_BATCH_LAST : strcpy(name, "permute_batch_last"); break; case _FPGA_PERMUTE_BATCH_FIRST : strcpy(name, "permute_batch_first"); break; case _FPGA_IM2COL : strcpy(name, "im2col"); break; case _FPGA_CONV2D : strcpy(name, "conv2d"); break; case _FPGA_CONV2D_GRAD : strcpy(name, "conv2d_grad"); break; case _FPGA_CONV2D_BACK : strcpy(name, "conv2d_back"); break; case _FPGA_CENT : strcpy(name, "cent"); break; case _FPGA_ACCURACY : strcpy(name, "accuracy"); break; case _FPGA_MPOOL2D : strcpy(name, "mpool2d"); break; case _FPGA_MPOOL2D_BACK : strcpy(name, "mpool2d_back"); break; case _FPGA_AVGPOOL2D : strcpy(name, "avgpool2d"); break; case _FPGA_AVGPOOL2D_BACK : strcpy(name, "avgpool2d_back"); break; case _FPGA_REPEAT_NN : strcpy(name, "repeat_nn"); break; case _FPGA_D_REPEAT_NN : strcpy(name, "d_repeat_nn"); break; case _FPGA_SUM_2 : strcpy(name, "sum_2"); break; default : strcpy(name, "?????"); break; } } struct timeval time_ini_fpga[_NUM_FPGA_FUNCS]; unsigned long long acc_time_fpga[_NUM_FPGA_FUNCS]; void _profile_fpga(int f_id, int end) { #ifdef FPGA_DEBUG char func_name[50]; _profile_fpga_funcname(f_id, func_name); if (!end) printf("%s\n", func_name); if (end) printf("\n"); #endif num_instances_fpga[f_id]++; if (!end) gettimeofday(&time_ini_fpga[f_id], NULL); else { timeval t1; gettimeofday(&t1, NULL); acc_time_fpga[f_id] += ((t1.tv_sec - time_ini_fpga[f_id].tv_sec) * 1000000) + (t1.tv_usec - time_ini_fpga[f_id].tv_usec); } } void _profile_fpga_tensor(Tensor *T) { #ifdef FPGA_DEBUG // We read the tensor from FPGA fpga_copy_from_fpga(T, T->ptr); float min = FLT_MAX; float max = FLT_MIN; float sum = 0.f; float avg; for (int i=0; i<T->size; i++) { if (T->ptr[i] > max) max = T->ptr[i]; if (T->ptr[i] < min) min = T->ptr[i]; sum += T->ptr[i]; } avg = sum / (float)T->size; printf(" - Tensor id %d size %d size_fpga %d shape0 %d shape1 %d (cpu_ptr %p). Min %8.4f Max %8.4f Avg %8.4f\n", T->fpga_tensor_id, T->size, T->fpga_size, T->shape[0], T->shape[1], T->ptr, min, max, avg); #endif } void _show_profile_fpga() { #ifdef FPGA_DEBUG printf("\n---------------------------------------\nFPGA functions called:\n"); for (int i=0; i<_NUM_FPGA_FUNCS; i++) { if (num_instances_fpga[i] != 0) { char func_name[50]; _profile_fpga_funcname(i, func_name); printf("%-50s: %d instances, %llu us\n", func_name, num_instances_fpga[i], acc_time_fpga[i]); } } printf("Memory: %f MB\n", mb_memory_needed_fpga); printf("---------------------------------------\n"); #endif } void _profile_fpga_add_tensor(int size) { // printf("tensor add: size in MB: %6.4f\n", (float)size / 1024.0 / 1024.0); mb_memory_needed_fpga += (float)size / 1024.0 / 1024.0; num_tensors_created++; // printf("tensor add: size in MB: %6.4f (active tensors %d)\n", (float)size / 1024.0 / 1024.0, num_tensors_created); #ifdef FPGA_DEBUG printf(" (accumulated tensor memory %f MB)\n", mb_memory_needed_fpga); #endif } void _profile_fpga_remove_tensor(int size) { mb_memory_needed_fpga -= (float)size / 1024.0 / 1024.0; num_tensors_created--; } // FPGA initialization and finalization ---------------------- // void fpga_init(){ // initialize only once cl_int err; std::string binaryFile = "eddl.xclbin"; unsigned fileBufSize; std::vector<cl::Device> devices = xcl::get_xil_devices(); cl::Device device = devices[0]; OCL_CHECK(err, context = cl::Context(device, NULL, NULL, NULL, &err)); OCL_CHECK(err, q = cl::CommandQueue(context, device, CL_QUEUE_PROFILING_ENABLE | CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE, &err)); char *fileBuf = xcl::read_binary_file(binaryFile, fileBufSize); cl::Program::Binaries bins{{fileBuf, fileBufSize}}; devices.resize(1); OCL_CHECK(err, program = cl::Program(context, devices, bins, NULL, &err)); #ifdef K_ENABLED_RELU OCL_CHECK(err, kernel_relu = cl::Kernel(program,"k_relu", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_D_RELU OCL_CHECK(err, kernel_d_relu = cl::Kernel(program,"k_d_relu", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_D_THRESHOLDED_RELU OCL_CHECK(err, kernel_thresholded_relu = cl::Kernel(program,"k_thresholded_relu", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_D_THRESHOLDED_RELU OCL_CHECK(err, kernel_d_thresholded_relu = cl::Kernel(program,"k_d_thresholded_relu", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_LEAKY_RELU OCL_CHECK(err, kernel_leaky_relu = cl::Kernel(program,"k_leaky_relu", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_D_LEAKY_RELU OCL_CHECK(err, kernel_d_leaky_relu = cl::Kernel(program,"k_d_leaky_relu", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_ELU OCL_CHECK(err, kernel_elu = cl::Kernel(program,"k_elu", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_D_ELU OCL_CHECK(err, kernel_d_elu = cl::Kernel(program,"k_d_elu", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SOFTPLUS OCL_CHECK(err, kernel_softplus = cl::Kernel(program,"k_softplus", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_D_SOFTPLUS OCL_CHECK(err, kernel_d_softplus = cl::Kernel(program,"k_d_softplus", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SOFTSIGN OCL_CHECK(err, kernel_softsign = cl::Kernel(program,"k_softsign", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_D_SOFTPLUS OCL_CHECK(err, kernel_d_softsign = cl::Kernel(program,"k_d_softsign", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_LINEAR OCL_CHECK(err, kernel_linear = cl::Kernel(program,"k_linear", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_D_LINEAR OCL_CHECK(err, kernel_d_linear = cl::Kernel(program,"k_d_linear", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_D_LINEAR OCL_CHECK(err, kernel_d_softplus = cl::Kernel(program,"k_d_linear", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SIGMOID OCL_CHECK(err, kernel_sigmoid = cl::Kernel(program,"k_sigmoid", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_D_SIGMOID OCL_CHECK(err, kernel_d_sigmoid = cl::Kernel(program,"k_d_sigmoid", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_HARD_SIGMOID OCL_CHECK(err, kernel_hard_sigmoid = cl::Kernel(program,"k_hard_sigmoid", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_D_HARD_SIGMOID OCL_CHECK(err, kernel_d_hard_sigmoid = cl::Kernel(program,"k_d_hard_sigmoid", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_EXP OCL_CHECK(err, kernel_exp = cl::Kernel(program,"k_exp", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_D_EXP OCL_CHECK(err, kernel_d_exp = cl::Kernel(program,"k_d_exp", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_TANH OCL_CHECK(err, kernel_tanh = cl::Kernel(program,"k_tanh", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_D_TANH OCL_CHECK(err, kernel_d_tanh = cl::Kernel(program,"k_d_tanh", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SOFTMAX OCL_CHECK(err, kernel_softmax = cl::Kernel(program,"k_softmax", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_D_SOFTMAX OCL_CHECK(err, kernel_d_softmax = cl::Kernel(program,"k_d_softmax", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_PERMUTE_CHANNELS_LAST OCL_CHECK(err, kernel_permute_channels_last = cl::Kernel(program,"k_permute_channels_last", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_PERMUTE_CHANNELS_FIRST OCL_CHECK(err, kernel_permute_channels_first = cl::Kernel(program,"k_permute_channels_first", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_PERMUTE_BATCH_LAST OCL_CHECK(err, kernel_permute_batch_last = cl::Kernel(program,"k_permute_batch_last", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_PERMUTE_BATCH_FIRST OCL_CHECK(err, kernel_permute_batch_first = cl::Kernel(program,"k_permute_batch_first", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_ALL OCL_CHECK(err, kernel_all = cl::Kernel(program,"k_all", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_ANY OCL_CHECK(err, kernel_any = cl::Kernel(program,"k_any", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_ISFINITE OCL_CHECK(err, kernel_isfinite = cl::Kernel(program,"k_isfinite", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_ISINF OCL_CHECK(err, kernel_isinf = cl::Kernel(program,"k_isinf", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_ISNAN OCL_CHECK(err, kernel_isnan = cl::Kernel(program,"k_isnan", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_ISNEGINF OCL_CHECK(err, kernel_isneginf = cl::Kernel(program,"k_isneginf", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_ISPOSINF OCL_CHECK(err, kernel_isposinf = cl::Kernel(program,"k_isposinf", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_LOGICAL_AND OCL_CHECK(err, kernel_logical_and = cl::Kernel(program,"k_logical_and", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_LOGICAL_OR OCL_CHECK(err, kernel_logical_or = cl::Kernel(program,"k_logical_or", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_LOGICAL_NOT OCL_CHECK(err, kernel_logical_not = cl::Kernel(program,"k_logical_not", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_LOGICAL_XOR OCL_CHECK(err, kernel_logical_xor = cl::Kernel(program,"k_logical_xor", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_ALLCLOSE OCL_CHECK(err, kernel_allclose = cl::Kernel(program,"k_allclose", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_ISCLOSE OCL_CHECK(err, kernel_isclose = cl::Kernel(program,"k_isclose", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_GREATER OCL_CHECK(err, kernel_greater = cl::Kernel(program,"k_greater", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_GREATER_EQUAL OCL_CHECK(err, kernel_greater_equal = cl::Kernel(program,"k_greater_equal", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_LESS OCL_CHECK(err, kernel_less = cl::Kernel(program,"k_less", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_LESS_EQUAL OCL_CHECK(err, kernel_less_equal = cl::Kernel(program,"k_less_equal", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_EQUAL OCL_CHECK(err, kernel_equal = cl::Kernel(program,"k_equal", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_NOT_EQUAL OCL_CHECK(err, kernel_not_equal = cl::Kernel(program,"k_not_equal", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_GREATER_VECTOR OCL_CHECK(err, kernel_greater_vector = cl::Kernel(program,"k_greater_vector", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_GREATER_EQUAL_VECTOR OCL_CHECK(err, kernel_greater_equal_vector = cl::Kernel(program,"k_greater_equal_vecotr", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_LESS_VECTOR OCL_CHECK(err, kernel_less_vector = cl::Kernel(program,"k_less_vector", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_LESS_EQUAL_VECTOR OCL_CHECK(err, kernel_less_equal_vector = cl::Kernel(program,"k_less_equal_vector", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_EQUAL_VECTOR OCL_CHECK(err, kernel_equal_vector = cl::Kernel(program,"k_equal_vector", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_NOT_EQUAL_VECTOR OCL_CHECK(err, kernel_not_equal_vector = cl::Kernel(program,"k_not_equal_vector", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_EQUAL2 OCL_CHECK(err, kernel_equal2 = cl::Kernel(program,"k_equal2", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_TRANSPOSE OCL_CHECK(err, kernel_transpose = cl::Kernel(program,"k_transpose", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_COPY OCL_CHECK(err, kernel_copy = cl::Kernel(program,"k_copy", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_FILL_ OCL_CHECK(err, kernel_fill_ = cl::Kernel(program,"k_fill_", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_FILL OCL_CHECK(err, kernel_fill = cl::Kernel(program,"k_fill", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SELECT OCL_CHECK(err, kernel_select = cl::Kernel(program,"k_select", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SELECT_BACK OCL_CHECK(err, kernel_select_back = cl::Kernel(program,"k_select_back", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SET_SELECT OCL_CHECK(err, kernel_set_select = cl::Kernel(program,"k_set_select", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SET_SELECT_BACK OCL_CHECK(err, kernel_set_select_back = cl::Kernel(program,"k_set_select_back", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SELECT_NN OCL_CHECK(err, kernel_select_nn = cl::Kernel(program,"k_select_nn", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SELECT_BACK_NN OCL_CHECK(err, kernel_select_back = cl::Kernel(program,"k_select_back_nn", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SET_SELECT_NN OCL_CHECK(err, kernel_set_select_nn = cl::Kernel(program,"k_set_select_nn", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SET_SELECT_BACK_NN OCL_CHECK(err, kernel_set_select_back_nn = cl::Kernel(program,"k_set_select_back_nn", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SET_SELECT2 OCL_CHECK(err, kernel_set_select2 = cl::Kernel(program,"k_set_select2", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_DESELECT OCL_CHECK(err, kernel_deselect = cl::Kernel(program,"k_deselect", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_CONCAT OCL_CHECK(err, kernel_concat = cl::Kernel(program,"k_concat", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_IM2COL OCL_CHECK(err, kernel_im2col = cl::Kernel(program,"k_im2col", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_CONV2D OCL_CHECK(err, kernel_conv2d = cl::Kernel(program,"k_conv2d", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_RANGE OCL_CHECK(err, kernel_range = cl::Kernel(program,"k_range", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_EYE OCL_CHECK(err, kernel_eye = cl::Kernel(program,"k_eye", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_DIAG OCL_CHECK(err, kernel_diag = cl::Kernel(program,"k_diag", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SINGLE_SHIFT OCL_CHECK(err, kernel_single_shift = cl::Kernel(program,"k_single_shift", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SINGLE_ROTATE OCL_CHECK(err, kernel_single_rotate = cl::Kernel(program,"k_single_rotate", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SINGLE_SCALE OCL_CHECK(err, kernel_single_scale = cl::Kernel(program,"k_single_scale", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SINGLE_FLIP OCL_CHECK(err, kernel_single_flip = cl::Kernel(program,"k_single_flip", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SINGLE_CROP OCL_CHECK(err, kernel_single_crop = cl::Kernel(program,"k_single_crop", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_CROP_SCALE_RANDOM OCL_CHECK(err, kernel_crop_scale_random = cl::Kernel(program,"k_crop_scale_random", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_RAND_UNIFORM OCL_CHECK(err, kernel_rand_uniform = cl::Kernel(program,"k_single_rand_uniform", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_RAND_SIGNED_UNIFORM OCL_CHECK(err, kernel_signed_uniform = cl::Kernel(program,"k_single_signed_uniform", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_RAND_BINARY OCL_CHECK(err, kernel_rand_binary = cl::Kernel(program,"k_single_rand_binary", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_RAND_NORMAL OCL_CHECK(err, kernel_rand_normal = cl::Kernel(program,"k_single_rand_normal", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_CENT OCL_CHECK(err, kernel_cent = cl::Kernel(program,"k_cent", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_ACCURACY OCL_CHECK(err, kernel_accuracy = cl::Kernel(program,"k_accuracy", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_BIN_ACCURACY OCL_CHECK(err, kernel_bin_accuracy = cl::Kernel(program,"k_bin_accuracy", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_MPOOL2D OCL_CHECK(err, kernel_mpool2D = cl::Kernel(program,"k_mpool2D", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_MPOOL2D_BACK OCL_CHECK(err, kernel_mpool2D_back = cl::Kernel(program,"k_mpool2D_back", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_AVGPOOL2D OCL_CHECK(err, kernel_avgpool2D = cl::Kernel(program,"k_mpool2D", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_AVGPOOL2D_BACK OCL_CHECK(err, kernel_avgpool2D_back = cl::Kernel(program,"k_avgpool2D_back", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_REDUCE OCL_CHECK(err, kernel_reduce = cl::Kernel(program,"k_reduce", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_REDUCE_OP OCL_CHECK(err, kernel_reduce_op = cl::Kernel(program,"k_reduce_op", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_REDUCE_SUM2D OCL_CHECK(err, kernel_reduce_sum2D = cl::Kernel(program,"k_reduce_sum2d", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_REDUCTION OCL_CHECK(err, kernel_reduction = cl::Kernel(program,"k_reduction", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_REDUCTION_BACK OCL_CHECK(err, kernel_reduction_back = cl::Kernel(program,"k_reduction_back", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_REPEAT_NN OCL_CHECK(err, kernel_repeat_nn = cl::Kernel(program,"k_repeat_nn", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_D_REPEAT_NN OCL_CHECK(err, kernel_d_repeat_nn = cl::Kernel(program,"k_d_repeat_nn", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_ABS_ OCL_CHECK(err, kernel_abs = cl::Kernel(program,"k_abs", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_ACOS_ OCL_CHECK(err, kernel_acos = cl::Kernel(program,"k_acos", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_ADD_ OCL_CHECK(err, kernel_add = cl::Kernel(program,"k_add", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_ASIN_ OCL_CHECK(err, kernel_asin = cl::Kernel(program,"k_asin", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_ATAN_ OCL_CHECK(err, kernel_atan = cl::Kernel(program,"k_atan", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_CEIL_ OCL_CHECK(err, kernel_ceil = cl::Kernel(program,"k_ceil", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_CLAMP_ OCL_CHECK(err, kernel_clamp = cl::Kernel(program,"k_clamp", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_K_COS_ OCL_CHECK(err, kernel_cos = cl::Kernel(program,"k_cos", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_COSH_ OCL_CHECK(err, kernel_cosh = cl::Kernel(program,"k_cosh", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_FLOOR_ OCL_CHECK(err, kernel_floor = cl::Kernel(program,"k_floor", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_INV_ OCL_CHECK(err, kernel_inv = cl::Kernel(program,"k_inv", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_LOG_ OCL_CHECK(err, kernel_log = cl::Kernel(program,"k_log", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_LOG2_ OCL_CHECK(err, kernel_log2 = cl::Kernel(program,"k_log2", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_LOG10_ OCL_CHECK(err, kernel_log10 = cl::Kernel(program,"k_log10", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_LOGN_ OCL_CHECK(err, kernel_logn = cl::Kernel(program,"k_logn", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_MOD_ OCL_CHECK(err, kernel_mod = cl::Kernel(program,"k_mod", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_MULT_ OCL_CHECK(err, kernel_mult = cl::Kernel(program,"k_mult", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_NORMALIZE_ OCL_CHECK(err, kernel_normalize = cl::Kernel(program,"k_normalize", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_POW_ OCL_CHECK(err, kernel_pow = cl::Kernel(program,"k_pow", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_POWB_ OCL_CHECK(err, kernel_powb = cl::Kernel(program,"k_powb", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_RECIPROCAL_ OCL_CHECK(err, kernel_reciprocal = cl::Kernel(program,"k_reciprocal", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_REMAINDER_ OCL_CHECK(err, kernel_remainder = cl::Kernel(program,"k_remainder", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_ROUND_ OCL_CHECK(err, kernel_round = cl::Kernel(program,"k_round", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_RSQRT_ OCL_CHECK(err, kernel_rsqrt = cl::Kernel(program,"k_rsqrt", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SIGN_ OCL_CHECK(err, kernel_sign = cl::Kernel(program,"k_sign", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SIN_ OCL_CHECK(err, kernel_sin = cl::Kernel(program,"k_sin", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SINH_ OCL_CHECK(err, kernel_sinh = cl::Kernel(program,"k_sinh", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SQR_ OCL_CHECK(err, kernel_sqr = cl::Kernel(program,"k_sqr", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SQRT_ OCL_CHECK(err, kernel_sqrt = cl::Kernel(program,"k_sqrt", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_TAN_ OCL_CHECK(err, kernel_tan = cl::Kernel(program,"k_tan", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_TRUNC_ OCL_CHECK(err, kernel_trunc = cl::Kernel(program,"k_trunc", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_INC OCL_CHECK(err, kernel_inc = cl::Kernel(program,"k_inc", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_MULT2D OCL_CHECK(err, kernel_mult2d = cl::Kernel(program,"k_mult2d", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_EL_DIV OCL_CHECK(err, kernel_el_div = cl::Kernel(program,"k_el_div", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_EL_MULT OCL_CHECK(err, kernel_el_mult = cl::Kernel(program,"k_el_mult", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SIGN2 OCL_CHECK(err, kernel_sign2 = cl::Kernel(program,"k_sign2", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SUM2D_ROWWISE OCL_CHECK(err, kernel_sum2D_rowwise = cl::Kernel(program,"k_sum2d_rowwise", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SUM2D_COLWISE OCL_CHECK(err, kernel_sum2D_colwise = cl::Kernel(program,"k_sum2d_colwise", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_MAX OCL_CHECK(err, kernel_max = cl::Kernel(program,"k_max", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_MIN OCL_CHECK(err, kernel_min = cl::Kernel(program,"k_min", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SUM OCL_CHECK(err, kernel_sum = cl::Kernel(program,"k_sum", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif #ifdef K_ENABLED_SUM_ABS OCL_CHECK(err, kernel_sum_abs = cl::Kernel(program,"k_sum_abs", &err)); if (err != CL_SUCCESS) printf("Error creating kernel\n"); #endif // Initializing buffer pool for (int e=0; e<MAX_BUFFER_POOL; e++) { fpga_ptr_buffer_pool[e] = (cl::Buffer *)nullptr; fpga_size_buffer_pool[e] = 0; fpga_inuse_buffer_pool[e] = 0; fpga_free_buffer_pool[e] = 1; } fpga_num_buffer_pool_slots = 0; // printf("end of fpga_init\n"); } void close_fpga(){ //delete fileBuf; } // ---------------------------------------------- // Tensor creation and delete operations // cl::Buffer *fpga_create_tensor(int device, int size) { cl::Buffer *buffer; cl_int err; #ifdef FPGA_DEBUG printf(" (creating tensor in fpga, size %d)\n", size); #endif _profile_fpga_add_tensor(size*sizeof(float)); // search an available slot int e; for (e=0; e<fpga_num_buffer_pool_slots; e++) { if (!fpga_inuse_buffer_pool[e] && !fpga_free_buffer_pool[e] & (fpga_size_buffer_pool[e] == size)) break; } if (e!=fpga_num_buffer_pool_slots) { #ifdef FPGA_DEBUG printf(" reasigning buffer pool entry\n"); #endif fpga_inuse_buffer_pool[e] = 1; return fpga_ptr_buffer_pool[e]; } // create a new buffer pool if (fpga_num_buffer_pool_slots == MAX_BUFFER_POOL) { printf("Error, too many buffer pools\n"); exit(1); } // buffer pool slot creation #ifdef FPGA_DEBUG printf("Creating new buffer pool entry\n"); #endif OCL_CHECK(err,buffer = new cl::Buffer(context,CL_MEM_READ_WRITE, size*sizeof(float), NULL, &err)); e = fpga_num_buffer_pool_slots; fpga_ptr_buffer_pool[e] = buffer; fpga_size_buffer_pool[e] = size; fpga_inuse_buffer_pool[e] = 1; fpga_free_buffer_pool[e] = 0; fpga_num_buffer_pool_slots++; return fpga_ptr_buffer_pool[e]; } void fpga_delete_tensor(int device, cl::Buffer *ptr, int fpga_tensor_id_p, int size) { #ifdef FPGA_DEBUG printf(" (deleting tensor in fpga, id %d)\n", fpga_tensor_id_p); #endif _profile_fpga_remove_tensor(size*sizeof(float)); // we just update the buffer pool // int e; // printf("ptr to delete %p size %d\n", ptr, size); for (e=0; e<fpga_num_buffer_pool_slots; e++) { // printf("slot %d: inuse %d free %d size %d ptr %p\n", e, fpga_inuse_buffer_pool[e], fpga_free_buffer_pool[e], fpga_size_buffer_pool[e], fpga_ptr_buffer_pool[e]); if (fpga_inuse_buffer_pool[e] && !fpga_free_buffer_pool[e] && (fpga_size_buffer_pool[e] == size) && (fpga_ptr_buffer_pool[e] == ptr)) break; } if (e==fpga_num_buffer_pool_slots) { printf("Error, delete tensor function did not find the buffer in the pool\n"); exit(1); } fpga_inuse_buffer_pool[e] = 0; //delete ptr; } // --------------------------------------------------- // Copy operations // /////////////////////////////////////////// void fpga_copy_fpga(Tensor *A, Tensor *B) { #ifdef FPGA_DEBUG printf(" (copy fpga: tensor id %d (size %d, ptr %p) -> tensor id %d (size %d, ptr %p))\n", A->fpga_tensor_id, A->size, A->fpga_ptr, B->fpga_tensor_id, B->size, B->fpga_ptr); #endif cl_int err; cl::Event blocking_event; cl::Buffer *bufferA = A->fpga_ptr; cl::Buffer *bufferB = B->fpga_ptr; if (A->size > B->size) {printf("Error, copy_fpga beyond limits\n"); exit(1);} OCL_CHECK(err, err= q.enqueueCopyBuffer(*bufferA, *bufferB, 0, 0, A->size*sizeof(float), NULL, &blocking_event)); q.finish(); #ifdef FPGA_DEBUG printf("copy completed\n"); #endif } void fpga_copy_to_fpga(float *nptr, Tensor *A) { #ifdef FPGA_DEBUG printf(" (copy to fpga: tensor id %d, size %d, from_cpu_ptr %p)\n", A->fpga_tensor_id, A->size, nptr); #endif cl_int err; cl::Event blocking_event; cl::Buffer *buf = A->fpga_ptr; OCL_CHECK(err, err= q.enqueueWriteBuffer(*buf, CL_TRUE, 0, A->size*sizeof(float), nptr, nullptr, &blocking_event)); q.finish(); } /////////////////////////////////////////// void fpga_copy_from_fpga(Tensor *A,float *nptr) { #ifdef FPGA_DEBUG printf(" (copy from fpga: tensor id %d, size %d, to_cpu_ptr %p)\n", A->fpga_tensor_id, A->size, nptr); #endif cl_int err; cl::Event event; OCL_CHECK(err, err= q.enqueueReadBuffer(*(A->fpga_ptr), CL_TRUE, 0, A->size*sizeof(float), nptr, nullptr, &event)); q.finish();; } void fpga_copy_addresses_from_fpga(SelDescriptor *SD, int size, int *nptr) { cl_int err; cl::Event event; cl::Buffer *buf = SD->fpga_ptr; OCL_CHECK(err, err= q.enqueueReadBuffer(*buf, CL_TRUE, 0, size, nptr, nullptr, &event)); q.finish();; } void fpga_destroy_memory(cl::Buffer *fpga_ptrI) { if (fpga_ptrI != (cl::Buffer *)nullptr) delete fpga_ptrI; } cl::Buffer *fpga_create_memory(long int size) { cl::Buffer *buffer; cl_int err; #ifdef FPGA_DEBUG printf(" (creating memory in fpga size %d)\n", size); #endif OCL_CHECK(err,buffer = new cl::Buffer(context,CL_MEM_READ_WRITE, size, NULL, &err)); return buffer; } void fpga_copy_memory_to_fpga(void *ptr_cpu, cl::Buffer *ptr_fpga, long int size) { #ifdef FPGA_DEBUG printf(" (copy memory to fpga: size %d, ptr_cpu %p)\n", size, ptr_cpu); #endif cl_int err; cl::Event blocking_event; OCL_CHECK(err, err= q.enqueueWriteBuffer(*ptr_fpga, CL_TRUE, 0, size, ptr_cpu, nullptr, &blocking_event)); q.finish(); } void fpga_copy_memory_from_fpga(cl::Buffer *ptr_fpga, void *ptr_cpu, long int size) { #ifdef FPGA_DEBUG printf(" (copy memory from fpga: size %d, ptr_cpu %p)\n", size, ptr_cpu); #endif cl_int err; cl::Event event; OCL_CHECK(err, err= q.enqueueReadBuffer(*ptr_fpga, CL_TRUE, 0, size, ptr_cpu, nullptr, &event)); q.finish(); } // --------------------------------------------------- // Support functions // ----------------------------------------------------------------- // all // void fpga_cpuemu_transpose(Tensor *A, Tensor *B) { fpga_copy_from_fpga(A, A->ptr); cpu_transpose(A, B); fpga_copy_to_fpga(B->ptr, B); } void fpga_transpose(Tensor * A, Tensor * B) { _profile_fpga(_FPGA_TRANSPOSE, 0); #ifndef K_ENABLED_TRANSPOSE fpga_cpuemu_transpose(A, B); #else cl_int err; cl::Event event; OCL_CHECK(err, err = kernel_transpose.setArg(0, *(A->fpga_ptr))); OCL_CHECK(err, err = kernel_transpose.setArg(1, *(B->fpga_ptr))); OCL_CHECK(err, err = kernel_transpose.setArg(2, (long int)A->size)); OCL_CHECK(err, err = q.enqueueTask(kernel_transpose, NULL, &event)); q.finish(); #endif _profile_fpga(_FPGA_TRANSPOSE, 1); } // ----------------------------------------------------------------- // copy // void fpga_cpuemu_copy(Tensor *A, Tensor *B) { fpga_copy_from_fpga(A, A->ptr); cpu_copy(A, B); fpga_copy_to_fpga(B->ptr, B); } void fpga_copy(Tensor * A, Tensor * B){ _profile_fpga(_FPGA_COPY, 0); #ifndef K_ENABLED_COPY fpga_cpuemu_copy(A, B); #else int Asize = A->size * sizeof(float); if (A->ptr == NULL) A->ptr = (float *)malloc(Asize); fpga_copy_from_fpga(A, A->ptr); fpga_copy_to_fpga(A->ptr, B); #endif _profile_fpga(_FPGA_COPY, 1); } // ----------------------------------------------------------------- // fill_ // void fpga_cpuemu_fill_(Tensor *A, float v) { cpu_fill_(A, v); fpga_copy_to_fpga(A->ptr, A); } void fpga_fill_(Tensor *A, float v){ _profile_fpga(_FPGA_FILL_, 0); #ifndef K_ENABLED_FILL_ fpga_cpuemu_fill_(A, v); #else cl_int err; cl::Event event; OCL_CHECK(err, err = kernel_fill_.setArg(0, *(A->fpga_ptr))); OCL_CHECK(err, err = kernel_fill_.setArg(1, v)); OCL_CHECK(err, err = kernel_fill_.setArg(2, (long int)A->size)); OCL_CHECK(err, err = q.enqueueTask(kernel_fill_, NULL, &event)); q.finish(); #endif _profile_fpga(_FPGA_FILL_, 1); } // ----------------------------------------------------------------- // fill // void fpga_cpuemu_fill(Tensor *A, int aini, int aend, Tensor *B, int bini, int bend, int inc) { fpga_copy_from_fpga(A, A->ptr); cpu_fill(A, aini, aend, B, bini, bend, inc); fpga_copy_to_fpga(B->ptr, B); } void fpga_fill(Tensor *A, int aini, int aend, Tensor *B, int bini, int bend, int inc){ _profile_fpga(_FPGA_FILL, 0); #ifndef K_ENABLED_FILL fpga_cpuemu_fill(A, aini, aend, B, bini, bend, inc); #else cl_int err; cl::Event event; OCL_CHECK(err, err = kernel_fill.setArg(0, *(A->fpga_ptr))); OCL_CHECK(err, err = kernel_fill.setArg(1, (int)aini)); OCL_CHECK(err, err = kernel_fill.setArg(2, (int)aend)); OCL_CHECK(err, err = kernel_fill.setArg(3, *(B->fpga_ptr))); OCL_CHECK(err, err = kernel_fill.setArg(4, (int)bini)); OCL_CHECK(err, err = kernel_fill.setArg(5, (int)bend)); OCL_CHECK(err, err = kernel_fill.setArg(6, (int)inc)); OCL_CHECK(err, err = kernel_fill.setArg(7, (int)A->ndim)); OCL_CHECK(err, err = kernel_fill.setArg(8, (long int)A->size)); OCL_CHECK(err, err = kernel_fill.setArg(9, (int)A->shape[0])); OCL_CHECK(err, err = kernel_fill.setArg(10, (int)B->size)); OCL_CHECK(err, err = kernel_fill.setArg(11, (int)B->shape[0])); OCL_CHECK(err, err = q.enqueueTask(kernel_fill, NULL, &event)); q.finish(); #endif _profile_fpga(_FPGA_FILL, 1); } // ----------------------------------------------------------------- // select // void fpga_cpuemu_select(Tensor *A, Tensor *B, SelDescriptor *sd) { int ADDRsize = B->size * sizeof(int); fpga_copy_from_fpga(A, A->ptr); fpga_copy_addresses_from_fpga(sd, ADDRsize, sd->cpu_addresses); cpu_select(A, B, sd); fpga_copy_to_fpga(B->ptr, B); } void fpga_select(Tensor *A, Tensor *B, SelDescriptor *sd){ _profile_fpga(_FPGA_SELECT, 0); #ifndef K_ENABLED_SELECT fpga_cpuemu_select(A, B, sd); #else printf("fpga_select not implemented yet\n"); exit(1); // cl_int err; // cl::Event event; // // OCL_CHECK(err, err = kernel_select.setArg(0, *(A->fpga_ptr))); // OCL_CHECK(err, err = kernel_select.setArg(1, *(B->fpga_ptr))); // OCL_CHECK(err, err = kernel_select.setArg(2, ((int)sd->fpga_addresses))); //TOCHECK // OCL_CHECK(err, err = kernel_select.setArg(3, (long int)A->size)); // // OCL_CHECK(err, err = q.enqueueTask(kernel_select, NULL, &event)); // q.finish(); #endif _profile_fpga(_FPGA_SELECT, 1); } // ----------------------------------------------------------------- // select_back // void fpga_cpuemu_select_back(Tensor *A, Tensor *B, SelDescriptor *sd) { int ADDRsize = B->size * sizeof(int); fpga_copy_from_fpga(A, A->ptr); fpga_copy_addresses_from_fpga(sd, ADDRsize, sd->cpu_addresses); cpu_select_back(A, B, sd); fpga_copy_to_fpga(B->ptr, B); } void fpga_select_back(Tensor *A, Tensor *B, SelDescriptor *sd){ _profile_fpga(_FPGA_SELECT_BACK, 0); #ifndef K_ENABLED_SELECT_BACK fpga_cpuemu_select_back(A, B, sd); #else printf("fpga_select_back not implemented yet\n"); exit(1); // cl_int err; // cl::Event event; // // OCL_CHECK(err, err = kernel_select_back.setArg(0, *(A->fpga_ptr))); // OCL_CHECK(err, err = kernel_select_back.setArg(1, *(B->fpga_ptr))); // OCL_CHECK(err, err = kernel_select_back.setArg(2, ((int)sd->fpga_addresses))); //TOCHECK // OCL_CHECK(err, err = kernel_select_back.setArg(3, (long int)A->size)); // // OCL_CHECK(err, err = q.enqueueTask(kernel_select_back, NULL, &event)); // q.finish(); #endif _profile_fpga(_FPGA_SELECT_BACK, 1); } // ----------------------------------------------------------------- // set_select // void fpga_cpuemu_set_select(Tensor *A, Tensor *B, SelDescriptor *sd) { int ADDRsize = B->size * sizeof(int); fpga_copy_from_fpga(A, A->ptr); fpga_copy_addresses_from_fpga(sd, ADDRsize, sd->cpu_addresses); cpu_set_select(A, B, sd); fpga_copy_to_fpga(B->ptr, B); } void fpga_set_select(Tensor *A, Tensor *B, SelDescriptor *sd){ _profile_fpga(_FPGA_SET_SELECT, 0); #ifndef K_ENABLED_SET_SELECT fpga_cpuemu_set_select(A, B, sd); #else printf("fpga_set_select not implemented yet\n"); exit(1); // cl_int err; // cl::Event event; // // OCL_CHECK(err, err = kernel_set_select.setArg(0, *(A->fpga_ptr))); // OCL_CHECK(err, err = kernel_set_select.setArg(1, *(B->fpga_ptr))); // OCL_CHECK(err, err = kernel_set_select.setArg(2, ((int)sd->fpga_addresses))); //TOCHECK // OCL_CHECK(err, err = kernel_set_select.setArg(3, (long int)A->size)); // // OCL_CHECK(err, err = q.enqueueTask(kernel_set_select, NULL, &event)); // q.finish(); #endif _profile_fpga(_FPGA_SET_SELECT, 1); } // ----------------------------------------------------------------- // set_select_back // void fpga_cpuemu_set_select_back(Tensor *A, Tensor *B, SelDescriptor *sd) { int ADDRsize = B->size * sizeof(int); fpga_copy_from_fpga(A, A->ptr); fpga_copy_addresses_from_fpga(sd, ADDRsize, sd->cpu_addresses); cpu_set_select_back(A, B, sd); fpga_copy_to_fpga(B->ptr, B); } void fpga_set_select_back(Tensor *A, Tensor *B, SelDescriptor *sd){ _profile_fpga(_FPGA_SET_SELECT_BACK, 0); #ifndef K_ENABLED_SET_SELECT_BACK fpga_cpuemu_set_select_back(A, B, sd); #else printf("fpga_set_select_back not implemented yet\n"); exit(1); // cl_int err; // cl::Event event; // // OCL_CHECK(err, err = kernel_set_select_back.setArg(0, *(A->fpga_ptr))); // OCL_CHECK(err, err = kernel_set_select_back.setArg(1, *(B->fpga_ptr))); // OCL_CHECK(err, err = kernel_set_select_back.setArg(2, ((int)sd->fpga_addresses))); //TOCHECK // OCL_CHECK(err, err = kernel_set_select_back.setArg(3, (long int)A->size)); // // OCL_CHECK(err, err = q.enqueueTask(kernel_set_select_back, NULL, &event)); // q.finish(); #endif _profile_fpga(_FPGA_SET_SELECT_BACK, 1); } // ----------------------------------------------------------------- // select2 // void fpga_cpuemu_select(Tensor * A, Tensor * B, vector<int> sind, int ini, int end,bool mask_zeros) { fpga_copy_from_fpga(A, A->ptr); cpu_select(A, B, sind, ini, end, mask_zeros); fpga_copy_to_fpga(B->ptr, B); } void fpga_select(Tensor * A, Tensor * B, vector<int> sind, int ini, int end,bool mask_zeros){ _profile_fpga(_FPGA_SELECT2, 0); #ifndef K_ENABLED_SELECT fpga_cpuemu_select(A, B, sind, ini, end, mask_zeros); #else printf("fpga_select not implemented yet\n"); exit(1); // cl_int err; // cl::Event event; // // OCL_CHECK(err, err = kernel_set_select_back.setArg(0, *(A->fpga_ptr))); // OCL_CHECK(err, err = kernel_set_select_back.setArg(1, *(B->fpga_ptr))); // OCL_CHECK(err, err = kernel_set_select_back.setArg(2, (sind))); //TOCHECK // OCL_CHECK(err, err = kernel_set_select_back.setArg(3, (int)ini)); // OCL_CHECK(err, err = kernel_set_select_back.setArg(4, (int)end)); // OCL_CHECK(err, err = kernel_set_select_back.setArg(5, (bool)mask_zeros)); // OCL_CHECK(err, err = kernel_set_select_back.setArg(6, (long int)A->size)); // OCL_CHECK(err, err = kernel_set_select_back.setArg(7, (int)A->shape[0])); // // OCL_CHECK(err, err = q.enqueueTask(kernel_set_select_back, NULL, &event)); // q.finish(); #endif _profile_fpga(_FPGA_SELECT2, 1); } // ----------------------------------------------------------------- // deselect // void fpga_cpuemu_deselect(Tensor * A, Tensor * B, vector<int> sind, int ini, int end, int inc, bool mask_zeros) { fpga_copy_from_fpga(A, A->ptr); cpu_deselect(A, B, sind, ini, end, inc, mask_zeros); fpga_copy_to_fpga(B->ptr, B); } void fpga_deselect(Tensor * A, Tensor * B, vector<int> sind, int ini, int end,int inc,bool mask_zeros){ _profile_fpga(_FPGA_DESELECT, 0); #ifndef K_ENABLED_DESELECT fpga_cpuemu_deselect(A, B, sind, ini, end, inc, mask_zeros); #else printf("fpga_deselect not implemented yet\n"); exit(1); // cl_int err; // cl::Event event; // // OCL_CHECK(err, err = kernel_deselect.setArg(0, *(A->fpga_ptr))); // OCL_CHECK(err, err = kernel_deselect.setArg(1, *(B->fpga_ptr))); // OCL_CHECK(err, err = kernel_deselect.setArg(2, (sind))); //TOCHECK // OCL_CHECK(err, err = kernel_deselect.setArg(3, (int)ini)); // OCL_CHECK(err, err = kernel_deselect.setArg(4, (int)end)); // OCL_CHECK(err, err = kernel_deselect.setArg(5, (int)inc)); // OCL_CHECK(err, err = kernel_deselect.setArg(6, (bool)mask_zeros)); // OCL_CHECK(err, err = kernel_deselect.setArg(7, (long int)A->size)); // OCL_CHECK(err, err = kernel_deselect.setArg(8, (int)A->shape[0])); // // OCL_CHECK(err, err = q.enqueueTask(kernel_deselect, NULL, &event)); // q.finish(); #endif _profile_fpga(_FPGA_DESELECT, 1); } // ----------------------------------------------------------------- // concat // void fpga_cpuemu_concat(Tensor *A, vector<Tensor*> t, unsigned int axis, bool derivative) { int Asize = A->size * sizeof(float); if (A->ptr == NULL) A->ptr = (float *)malloc(Asize); for (unsigned int i = 0; i < t.size(); i++) { int Tsize = t[i]->size * sizeof(float); if (t[i]->ptr == NULL) t[i]->ptr = (float *)malloc(Tsize); fpga_copy_from_fpga(t[i], t[i]->ptr); } cpu_concat(A, t, axis, derivative); fpga_copy_to_fpga(A->ptr, A); } void fpga_concat(Tensor *A, vector<Tensor*> t, unsigned int axis, bool derivative){ _profile_fpga(_FPGA_CONCAT, 0); #ifndef K_ENABLED_CONCAT fpga_cpuemu_concat(A, t, axis, derivative); #else printf("fpga_concat not implemented yet\n"); exit(1); #endif _profile_fpga(_FPGA_CONCAT, 1); } #endif
42.648876
207
0.656738
[ "shape", "vector" ]
24023b1b042c3a1b52e3d8506901b9df2f707c0d
4,789
cc
C++
Alignment/CommonAlignmentMonitor/plugins/AlignmentMonitorGeneric.cc
pasmuss/cmssw
566f40c323beef46134485a45ea53349f59ae534
[ "Apache-2.0" ]
null
null
null
Alignment/CommonAlignmentMonitor/plugins/AlignmentMonitorGeneric.cc
pasmuss/cmssw
566f40c323beef46134485a45ea53349f59ae534
[ "Apache-2.0" ]
null
null
null
Alignment/CommonAlignmentMonitor/plugins/AlignmentMonitorGeneric.cc
pasmuss/cmssw
566f40c323beef46134485a45ea53349f59ae534
[ "Apache-2.0" ]
null
null
null
#include "TrackingTools/TrackFitters/interface/TrajectoryStateCombiner.h" #include "Alignment/CommonAlignment/interface/AlignableObjectId.h" #include "Alignment/CommonAlignmentMonitor/plugins/AlignmentMonitorGeneric.h" #include <DataFormats/GeometrySurface/interface/LocalError.h> #include "Geometry/CommonDetUnit/interface/TrackerGeomDet.h" #include "TObject.h" #include <TString.h> AlignmentMonitorGeneric::AlignmentMonitorGeneric(const edm::ParameterSet& cfg): AlignmentMonitorBase(cfg, "AlignmentMonitorGeneric") { } void AlignmentMonitorGeneric::book() { std::vector<std::string> residNames; // names of residual histograms residNames.push_back("x hit residuals pos track"); residNames.push_back("x hit residuals neg track"); residNames.push_back("y hit residuals pos track"); residNames.push_back("y hit residuals neg track"); auto alignableObjectId = AlignableObjectId::commonObjectIdProvider(pTracker(), pMuon()); const std::vector<Alignable*>& alignables = pStore()->alignables(); unsigned int nAlignable = alignables.size(); unsigned int nResidName = residNames.size(); for (unsigned int i = 0; i < nAlignable; ++i) { const Alignable* ali = alignables[i]; Hist1Ds& hists = m_resHists[ali]; hists.resize(nResidName, 0); align::ID id = ali->id(); align::StructureType type = ali->alignableObjectId(); for (unsigned int n = 0; n < nResidName; ++n) { const std::string& name = residNames[n]; TString histName(name.c_str()); histName += Form("_%s_%d", alignableObjectId.idToString(type), id); histName.ReplaceAll(" ", ""); TString histTitle(name.c_str()); histTitle += Form(" for %s with ID %d (subdet %d)", alignableObjectId.idToString(type), id, DetId(id).subdetId()); hists[n] = book1D(std::string("/iterN/") + std::string(name) + std::string("/"), std::string(histName.Data()), std::string(histTitle.Data()), nBin_, -5., 5.); } } m_trkHists.resize(6, 0); m_trkHists[0] = book1D("/iterN/", "pt" , "track p_{t} (GeV)" , nBin_, 0.0,100.0); m_trkHists[1] = book1D("/iterN/", "eta" , "track #eta" , nBin_, - 3.0, 3.0); m_trkHists[2] = book1D("/iterN/", "phi" , "track #phi" , nBin_, -M_PI, M_PI); m_trkHists[3] = book1D("/iterN/", "d0" , "track d0 (cm)" , nBin_, -0.02, 0.02); m_trkHists[4] = book1D("/iterN/", "dz" , "track dz (cm)" , nBin_, -20.0, 20.0); m_trkHists[5] = book1D("/iterN/", "chi2", "track #chi^{2}/dof", nBin_, 0.0, 20.0); } void AlignmentMonitorGeneric::event(const edm::Event &iEvent, const edm::EventSetup&, const ConstTrajTrackPairCollection& tracks) { TrajectoryStateCombiner tsoscomb; for (unsigned int t = 0; t < tracks.size(); ++t) { const reco::Track* track = tracks[t].second; float charge = tracks[t].second->charge(); const std::vector<TrajectoryMeasurement>& meass = tracks[t].first->measurements(); for (unsigned int m = 0; m < meass.size(); ++m) { const TrajectoryMeasurement& meas = meass[m]; const TransientTrackingRecHit& hit = *meas.recHit(); if ( hit.isValid() ) { const Alignable* ali = pNavigator()->alignableFromDetId( hit.geographicalId() ); while (ali) { std::map<const Alignable*, Hist1Ds>::iterator h = m_resHists.find(ali); if ( h != m_resHists.end() ) { TrajectoryStateOnSurface tsos = tsoscomb( meas.forwardPredictedState(), meas.backwardPredictedState() ); align::LocalVector res = tsos.localPosition() - hit.localPosition(); LocalError err1 = tsos.localError().positionError(); LocalError err2 = hit.localPositionError(); // CPE+APE // subtract APEs from err2 (if existing) from covariance matrix auto det = static_cast<const TrackerGeomDet*>(hit.det()); const auto localAPE = det->localAlignmentError(); if (localAPE.valid()) { err2 = LocalError(err2.xx() - localAPE.xx(), err2.xy() - localAPE.xy(), err2.yy() - localAPE.yy()); } float errX = std::sqrt( err1.xx() + err2.xx() ); float errY = std::sqrt( err1.yy() + err2.yy() ); h->second[charge > 0 ? 0 : 1]->Fill(res.x() / errX); h->second[charge > 0 ? 2 : 3]->Fill(res.y() / errY); } ali = ali->mother(); } } } m_trkHists[0]->Fill( track->pt() ); m_trkHists[1]->Fill( track->eta() ); m_trkHists[2]->Fill( track->phi() ); m_trkHists[3]->Fill( track->d0() ); m_trkHists[4]->Fill( track->dz() ); m_trkHists[5]->Fill( track->normalizedChi2() ); } } #include "Alignment/CommonAlignmentMonitor/interface/AlignmentMonitorPluginFactory.h" DEFINE_EDM_PLUGIN(AlignmentMonitorPluginFactory, AlignmentMonitorGeneric, "AlignmentMonitorGeneric");
34.453237
164
0.645646
[ "geometry", "vector" ]
2402fb1fa216bcce92f04021bd3802e25fa8755f
2,820
cpp
C++
dynamic_vino_lib/src/models/base_model.cpp
pqLee/ros2_openvino_toolkit
6ba38446bf9778567be2df14d1141c3669e7ac92
[ "Apache-2.0" ]
120
2018-09-30T05:36:25.000Z
2022-01-28T17:52:47.000Z
dynamic_vino_lib/src/models/base_model.cpp
pqLee/ros2_openvino_toolkit
6ba38446bf9778567be2df14d1141c3669e7ac92
[ "Apache-2.0" ]
151
2018-10-16T17:46:24.000Z
2022-03-11T14:38:54.000Z
dynamic_vino_lib/src/models/base_model.cpp
pqLee/ros2_openvino_toolkit
6ba38446bf9778567be2df14d1141c3669e7ac92
[ "Apache-2.0" ]
83
2018-09-30T05:09:35.000Z
2022-01-26T04:56:09.000Z
// Copyright (c) 2018 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /** * @brief a header file with declaration of BaseModel class * @file base_model.cpp */ #include <fstream> #include <string> #include <memory> #include <algorithm> #include <iostream> #include <unistd.h> #include "dynamic_vino_lib/models/base_model.hpp" #include "dynamic_vino_lib/slog.hpp" #include "dynamic_vino_lib/models/attributes/base_attribute.hpp" // Validated Base Network Models::BaseModel::BaseModel( const std::string & model_loc, int max_batch_size) : model_loc_(model_loc), max_batch_size_(max_batch_size), ModelAttribute(model_loc) { if (model_loc.empty()) { throw std::logic_error("model file name is empty!"); } ///net_reader_ = std::make_shared<InferenceEngine::CNNNetReader>(); } void Models::BaseModel::modelInit() { slog::info << "Loading network files" << slog::endl; // Read network model ///net_reader_->ReadNetwork(model_loc_); net_reader_ = engine.ReadNetwork(model_loc_); // Extract model name and load it's weights // remove extension size_t last_index = model_loc_.find_last_of("."); std::string raw_name = model_loc_.substr(0, last_index); ///std::string bin_file_name = raw_name + ".bin"; ///net_reader_->ReadWeights(bin_file_name); // Read labels (if any) std::string label_file_name = raw_name + ".labels"; loadLabelsFromFile(label_file_name); // Set batch size to given max_batch_size_ slog::info << "Batch size is set to " << max_batch_size_ << slog::endl; ///net_reader_->getNetwork().setBatchSize(max_batch_size_); net_reader_.setBatchSize(max_batch_size_); updateLayerProperty(net_reader_); } #if 0 bool Models::BaseModel::updateLayerProperty( InferenceEngine::CNNNetReader::Ptr net_reader) { #if 0 if (!updateLayerProperty(net_reader)){ slog::warn << "The model(name: " << getModelName() << ") failed to update Layer Property!" << slog::endl; return false; } #endif if(!isVerified()){ slog::warn << "The model(name: " << getModelName() << ") does NOT pass Attribute Check!" << slog::endl; return false; } return true; } #endif Models::ObjectDetectionModel::ObjectDetectionModel( const std::string & model_loc, int max_batch_size) : BaseModel(model_loc, max_batch_size) {}
30.322581
94
0.721277
[ "model" ]
240b0cde67c7b4b2d5993a2273cc3c36f8d7ca72
4,069
cpp
C++
Utilities/ftgl/src/FTGLTextureFont.cpp
Lin1225/vtk_v5.10.0
b54ac74f4716572862365fbff28cd0ecb8d08c3d
[ "BSD-3-Clause" ]
3
2020-06-20T23:31:06.000Z
2021-01-11T02:17:16.000Z
Utilities/ftgl/src/FTGLTextureFont.cpp
Lin1225/vtk_v5.10.0
b54ac74f4716572862365fbff28cd0ecb8d08c3d
[ "BSD-3-Clause" ]
2
2015-06-19T07:24:42.000Z
2015-06-19T13:39:01.000Z
Utilities/ftgl/src/FTGLTextureFont.cpp
Lin1225/vtk_v5.10.0
b54ac74f4716572862365fbff28cd0ecb8d08c3d
[ "BSD-3-Clause" ]
5
2015-03-23T21:13:19.000Z
2022-01-03T11:15:39.000Z
#include "FTGLTextureFont.h" #include "FTTextureGlyph.h" #ifdef FTGL_DEBUG #include "mmgr.h" #endif #ifdef FTGL_USE_NAMESPACE namespace ftgl { #endif inline GLuint NextPowerOf2( GLuint in) { in -= 1; in |= in >> 16; in |= in >> 8; in |= in >> 4; in |= in >> 2; in |= in >> 1; return in + 1; } FTGLTextureFont::FTGLTextureFont() : maxTextSize(0), textureWidth(0), textureHeight(0), numTextures(0), textMem(0), glyphHeight(0), glyphWidth(0), padding(1), remGlyphs(0), xOffset(0), yOffset(0) {} FTGLTextureFont::~FTGLTextureFont() { glDeleteTextures( numTextures, (const GLuint*)glTextureID); } FTGlyph* FTGLTextureFont::MakeGlyph( unsigned int g) { FT_Glyph* ftGlyph = face.Glyph( g, FT_LOAD_NO_HINTING); if( ftGlyph) { // Estimate the glyph size size - global bbox glyphHeight = ( charSize.Height()); glyphWidth = ( charSize.Width()); // Is there a current texture if( numTextures == 0) { glTextureID[0] = CreateTexture(); xOffset = yOffset = padding; ++numTextures; } // will it fit in the current texture if( xOffset > ( textureWidth - glyphWidth)) { xOffset = padding; yOffset += glyphHeight; if( yOffset > ( textureHeight - glyphHeight)) { // no - make a new texture glTextureID[numTextures] = CreateTexture(); yOffset = padding; ++numTextures; } } // yes - load the glyph FTTextureGlyph* tempGlyph = new FTTextureGlyph( *ftGlyph, glTextureID[numTextures - 1], xOffset, yOffset, textureWidth, textureHeight); // FIXME ceiling xOffset += (int)(tempGlyph->BBox().x2 - tempGlyph->BBox().x1 + padding); --remGlyphs; return tempGlyph; } err = face.Error(); return NULL; } bool FTGLTextureFont::MakeGlyphList() { if( !maxTextSize) glGetIntegerv( GL_MAX_TEXTURE_SIZE, (GLint*)&maxTextSize); remGlyphs = numGlyphs; FTFont::MakeGlyphList(); return !err; // FIXME what err? } void FTGLTextureFont::GetSize() { //work out the max width. Most likely maxTextSize textureWidth = NextPowerOf2( (remGlyphs * glyphWidth) + padding * 2); if( textureWidth > maxTextSize) { textureWidth = maxTextSize; } int h = static_cast<int>( (textureWidth - padding * 2) / glyphWidth); textureHeight = NextPowerOf2( (( numGlyphs / h) + 1) * glyphHeight); textureHeight = textureHeight > maxTextSize ? maxTextSize : textureHeight; } int FTGLTextureFont::CreateTexture() { // calc the size GetSize(); // allocate some mem and clear it to black int totalMem = textureWidth * textureHeight; textMem = new unsigned char[totalMem]; // GL_ALPHA texture; memset( textMem, 0, totalMem); // Create the blank texture int textID; glGenTextures( 1, (GLuint*)&textID); glPixelStorei( GL_UNPACK_ALIGNMENT, 1); //What does this do exactly? glBindTexture( GL_TEXTURE_2D, textID); glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP); glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP); glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexImage2D( GL_TEXTURE_2D, 0, GL_ALPHA, textureWidth, textureHeight, 0, GL_ALPHA, GL_UNSIGNED_BYTE, textMem); delete [] textMem; return textID; } void FTGLTextureFont::render( const char* string) { glPushAttrib( GL_ENABLE_BIT | GL_HINT_BIT | GL_LINE_BIT | GL_PIXEL_MODE_BIT); glEnable(GL_BLEND); glBlendFunc( GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); // GL_ONE FTFont::render( string); glPopAttrib(); } void FTGLTextureFont::render( const wchar_t* string) { glPushAttrib( GL_ENABLE_BIT | GL_HINT_BIT | GL_LINE_BIT | GL_PIXEL_MODE_BIT); glEnable(GL_BLEND); glBlendFunc( GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); // GL_ONE FTFont::render( string); glPopAttrib(); } #ifdef FTGL_USE_NAMESPACE } // namespace ftgl #endif
22.234973
113
0.661588
[ "render" ]
240b23e9bcebc616bdc933dd6de88640fe634b95
759
hpp
C++
src/mbgl/tile/geojson_tile.hpp
roblabs/maplibre-gl-native
d62ff400c6f75750d71b563344b1ca1e07b9b576
[ "BSD-2-Clause", "BSD-3-Clause" ]
4,234
2015-01-09T08:10:16.000Z
2022-03-30T14:13:55.000Z
src/mbgl/tile/geojson_tile.hpp
roblabs/maplibre-gl-native
d62ff400c6f75750d71b563344b1ca1e07b9b576
[ "BSD-2-Clause", "BSD-3-Clause" ]
12,771
2015-01-01T20:27:42.000Z
2022-03-24T18:14:44.000Z
src/mbgl/tile/geojson_tile.hpp
roblabs/maplibre-gl-native
d62ff400c6f75750d71b563344b1ca1e07b9b576
[ "BSD-2-Clause", "BSD-3-Clause" ]
1,571
2015-01-08T08:24:53.000Z
2022-03-28T06:30:53.000Z
#pragma once #include <mbgl/tile/geometry_tile.hpp> #include <mbgl/util/feature.hpp> namespace mbgl { namespace style { class GeoJSONData; } // namespace style class TileParameters; class GeoJSONTile : public GeometryTile { public: GeoJSONTile(const OverscaledTileID&, std::string sourceID, const TileParameters&, std::shared_ptr<style::GeoJSONData>); void updateData(std::shared_ptr<style::GeoJSONData> data, bool needsRelayout = false); void querySourceFeatures( std::vector<Feature>& result, const SourceQueryOptions&) override; private: std::shared_ptr<style::GeoJSONData> data; mapbox::base::WeakPtrFactory<GeoJSONTile> weakFactory{this}; }; } // namespace mbgl
23.71875
90
0.694335
[ "vector" ]
240cadd6831deda194346efd6e4f24e7fe527e5a
4,672
cc
C++
chrome/browser/chromeos/fileapi/recent_drive_source.cc
DamieFC/chromium
54ce2d3c77723697efd22cfdb02aea38f9dfa25c
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
1
2020-10-18T02:33:40.000Z
2020-10-18T02:33:40.000Z
chrome/browser/chromeos/fileapi/recent_drive_source.cc
DamieFC/chromium
54ce2d3c77723697efd22cfdb02aea38f9dfa25c
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
3
2021-05-17T16:28:52.000Z
2021-05-21T22:42:22.000Z
chrome/browser/chromeos/fileapi/recent_drive_source.cc
DamieFC/chromium
54ce2d3c77723697efd22cfdb02aea38f9dfa25c
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/chromeos/fileapi/recent_drive_source.h" #include <utility> #include <vector> #include "base/bind.h" #include "base/files/file_path.h" #include "base/metrics/histogram_macros.h" #include "base/task/post_task.h" #include "chrome/browser/ash/drive/drive_integration_service.h" #include "chrome/browser/ash/drive/file_system_util.h" #include "chrome/browser/chromeos/file_manager/fileapi_util.h" #include "chrome/browser/chromeos/fileapi/recent_file.h" #include "chromeos/components/drivefs/drivefs_util.h" #include "content/public/browser/browser_task_traits.h" #include "content/public/browser/browser_thread.h" #include "storage/browser/file_system/file_system_operation.h" #include "storage/browser/file_system/file_system_operation_runner.h" #include "storage/browser/file_system/file_system_url.h" #include "storage/common/file_system/file_system_types.h" #include "url/origin.h" using content::BrowserThread; namespace chromeos { const char RecentDriveSource::kLoadHistogramName[] = "FileBrowser.Recent.LoadDrive"; const char kAudioMimeType[] = "audio"; const char kImageMimeType[] = "image"; const char kVideoMimeType[] = "video"; RecentDriveSource::RecentDriveSource(Profile* profile) : profile_(profile) { DCHECK_CURRENTLY_ON(BrowserThread::UI); } RecentDriveSource::~RecentDriveSource() { DCHECK_CURRENTLY_ON(BrowserThread::UI); } void RecentDriveSource::GetRecentFiles(Params params) { DCHECK_CURRENTLY_ON(BrowserThread::UI); DCHECK(!params_.has_value()); DCHECK(files_.empty()); DCHECK(build_start_time_.is_null()); params_.emplace(std::move(params)); build_start_time_ = base::TimeTicks::Now(); auto* integration_service = drive::util::GetIntegrationServiceByProfile(profile_); if (!integration_service) { // |integration_service| is nullptr if Drive is disabled. OnComplete(); return; } auto query_params = drivefs::mojom::QueryParameters::New(); query_params->page_size = params_->max_files(); query_params->query_source = drivefs::mojom::QueryParameters::QuerySource::kLocalOnly; query_params->sort_field = drivefs::mojom::QueryParameters::SortField::kLastModified; query_params->sort_direction = drivefs::mojom::QueryParameters::SortDirection::kDescending; switch (params_->file_type()) { case FileType::kAudio: query_params->mime_type = kAudioMimeType; break; case FileType::kImage: query_params->mime_type = kImageMimeType; break; case FileType::kVideo: query_params->mime_type = kVideoMimeType; break; default: // Leave the mime_type null to query all files. break; } integration_service->GetDriveFsInterface()->StartSearchQuery( search_query_.BindNewPipeAndPassReceiver(), std::move(query_params)); search_query_->GetNextPage(base::BindOnce( &RecentDriveSource::GotSearchResults, weak_ptr_factory_.GetWeakPtr())); } void RecentDriveSource::OnComplete() { DCHECK_CURRENTLY_ON(BrowserThread::UI); DCHECK(params_.has_value()); DCHECK(!build_start_time_.is_null()); UMA_HISTOGRAM_TIMES(kLoadHistogramName, base::TimeTicks::Now() - build_start_time_); build_start_time_ = base::TimeTicks(); Params params = std::move(params_.value()); params_.reset(); std::vector<RecentFile> files = std::move(files_); files_.clear(); DCHECK(!params_.has_value()); DCHECK(files_.empty()); DCHECK(build_start_time_.is_null()); std::move(params.callback()).Run(std::move(files)); } void RecentDriveSource::GotSearchResults( drive::FileError error, absl::optional<std::vector<drivefs::mojom::QueryItemPtr>> results) { search_query_.reset(); auto* integration_service = drive::util::GetIntegrationServiceByProfile(profile_); if (!results || !integration_service) { OnComplete(); return; } files_.reserve(results->size()); for (auto& result : *results) { if (!drivefs::IsAFile(result->metadata->type)) { continue; } base::FilePath path = integration_service->GetMountPointPath().BaseName(); if (!base::FilePath("/").AppendRelativePath(result->path, &path)) { path = path.Append(result->path); } files_.emplace_back( params_.value().file_system_context()->CreateCrackedFileSystemURL( url::Origin::Create(params_->origin()), storage::kFileSystemTypeExternal, path), result->metadata->last_viewed_by_me_time); } OnComplete(); } } // namespace chromeos
32.671329
78
0.731592
[ "vector" ]
240df42ddc85e98c7232139fa4bfb986aac4ebe0
10,841
cpp
C++
src/Profile.cpp
jselmani/HapTex
d11f960b980491129d1ecdc6aeea1777c5f991c8
[ "MIT" ]
null
null
null
src/Profile.cpp
jselmani/HapTex
d11f960b980491129d1ecdc6aeea1777c5f991c8
[ "MIT" ]
null
null
null
src/Profile.cpp
jselmani/HapTex
d11f960b980491129d1ecdc6aeea1777c5f991c8
[ "MIT" ]
null
null
null
#include "Profile.h" namespace a1 { Profile::Profile(const char* in, std::ofstream& os, ARAIG_Sensors& araig) { std::ifstream file(in, std::ios::in); ofs = &os; if (file.fail()) { throw std::string("Could not open the profile configuration. Please check your file for errors."); } else { std::getline(file, sFName, ','); std::getline(file, sLName, ','); file >> sNum; file.ignore(); std::getline(file, iFName, ','); std::getline(file, iLName, ','); file >> iNum; file.ignore(); file >> calibrate.max; file.ignore(); file >> calibrate.min; file.ignore(); while (!file.eof()) { std::string tmp; std::getline(file, tmp); try{ ToRun.push_back(new Task(araig.getTask(tmp))); } catch (std::string msg) { std::cerr << msg << std::endl; } } } file.close(); } Profile::~Profile() { ToRun.~deque(); Completed.~vector(); for (unsigned short i = 0; i < ToRun.size(); i++) { ToRun[i] = nullptr; } for (unsigned short i = 0; i < Completed.size(); i++) { Completed[i] = nullptr; } } void Profile::displayToRun(std::ostream& os) const { std::cout << "Tasks To Run" << std::endl; std::cout << "============" << std::endl << std::endl; std::cout << "Student Name: " << sFName << " " << sLName << std::endl; std::cout << "Student ID : " << sNum << std::endl; os << "Tasks To Run" << std::endl; os << "============" << std::endl << std::endl; os << "Student Name: " << sFName << " " << sLName << std::endl; os << "Student ID : " << sNum << std::endl; for (unsigned short i = 0; i < ToRun.size(); i++) { ToRun[i]->dump(os); } } void Profile::displayCompleted(std::ostream& os) const { std::cout << "Completed Tasks" << std::endl; std::cout << "===============" << std::endl << std::endl; std::cout << "Student Name: " << sFName << " " << sLName << std::endl; std::cout << "Student ID : " << sNum << std::endl; std::cout << "MAX = " << calibrate.max << std::endl; std::cout << "MIN = " << calibrate.min << std::endl; os << "Completed Tasks" << std::endl; os << "===============" << std::endl << std::endl; os << "Student Name: " << sFName << " " << sLName << std::endl; os << "Student ID : " << sNum << std::endl; os << "MAX = " << calibrate.max << std::endl; os << "MIN = " << calibrate.min << std::endl; for (int i = 0; i < Completed.size(); i++) { Completed[i]->dump(os); } std::cout << std::endl; os << std::endl; } void Profile::displayNext(std::ostream& os) const { os << "Next Task" << std::endl; os << "=========" << std::endl << std::endl; std::cout << "Next Task" << std::endl; std::cout << "=========" << std::endl << std::endl; ToRun[0]->execute(os); } void Profile::displayPrev(std::ostream& os) const { os << "Previous Task" << std::endl; os << "=============" << std::endl; std::cout << "Previous Task" << std::endl; std::cout << "=============" << std::endl; Completed.back()->dump(os); std::cout << std::endl; os << std::endl; } void Profile::flightInfo() const { std::cout << "Student Name: " << sFName << " " << sLName << std::endl; std::cout << "Student ID : " << sNum << std::endl; std::cout << "Instructor Name: " << iFName << " " << iLName << std::endl; std::cout << "Instructor ID : " << iNum << std::endl; std::cout << "Available Tasks:" << std::endl << std::endl; for (auto i = 0; i < ToRun.size(); i++) { std::cout << ToRun[i]->getTaskName() << std::endl; } std::cout << std::endl; std::cout << "Calibrating ARAIG to a maximum intensity of " << calibrate.max << std::endl; std::cout << "and a minimum intensity of " << calibrate.min << std::endl << std::endl; char enter; std::cout << "Press any LETTER to continue..." << std::endl; std::cin >> enter; std::cout << std::endl << std::endl; } void Profile::flightPlan() { unsigned short option = -1; unsigned short x = 1; unsigned short count = 0; do{ std::cout << std::endl; std::cout << " Choose how to proceed " << std::endl; std::cout << "=========================" << std::endl; std::cout << "1 - View Next Task" << std::endl; std::cout << "2 - View Previous Task" << std::endl; std::cout << "3 - View Tasks To Run" << std::endl; std::cout << "4 - View Completed Tasks" << std::endl; std::cout << "5 - Delete A Stimulation" << std::endl; std::cout << "6 - Initiate Flight Plan" << std::endl; std::cout << "0 - Back To Main Menu" << std::endl; std::cout << "> "; std::cin >> option; std::cin.clear(); std::cin.ignore(); if (!std::cin.fail()) { if (option >= 0 && option <= 6) { switch (option) { case 0: { std::cin.clear(); x = 0; std::cout << std::endl; break; } case 1: { if (!ToRun.empty()) { std::cout << std::endl; displayNext(*ofs); } else { std::cout << "\nTasks are completed OR no Tasks available." << std::endl; } break; } case 2: { if (count > 0) { std::cout << std::endl; displayPrev(*ofs); } else { std::cout << "\nNo previous tasks." << std::endl; } break; } case 3: { if (!ToRun.empty()) { std::cout << std::endl; displayToRun(*ofs); } else { std::cout << "\nNo tasks available." << std::endl; } break; } case 4: { if (!Completed.empty()) { std::cout << std::endl; displayCompleted(*ofs); } else { std::cout << "\nNo tasks have been completed." << std::endl; } break; } case 5: { std::string task, stim, comp; *ofs << "Delete A Stimulation" << std::endl; *ofs << "====================" << std::endl << std::endl; std::cout << "Delete A Stimulation" << std::endl; std::cout << "====================" << std::endl << std::endl; for (unsigned short i = 0; i < ToRun.size(); i++) //list available tasks { ToRun[i]->dump(*ofs); } *ofs << std::endl; std::cout << std::endl; *ofs << "Please choose an available task from the list:" << std::endl; *ofs << "> "; std::cout << "Please choose an available task from the list:" << std::endl; std::cout << "> "; std::cin >> task; std::cout << std::endl; try{ for (unsigned short i = 0; i < ToRun.size(); i++) //loop through to locate task and set Task object to located task { comp = ToRun[i]->getTaskName(); if (task == comp) { ToRun[i]->dump(*ofs); *ofs << std::endl; std::cout << std::endl; *ofs << "Please choose an available stimulation from the list:" << std::endl; std::cout << "Please choose an available stimulation from the list:" << std::endl; *ofs << "> "; std::cout << "> "; std::cin >> stim; ToRun[i]->delStim(stim); *ofs << std::endl; std::cout << std::endl; ToRun[i]->dump(*ofs); *ofs << std::endl; std::cout << std::endl; *ofs << stim << " successfully deleted." << std::endl; std::cout << stim << " successfully deleted." << std::endl; break; } } if (task != comp) throw std::string("Please ensure that the Task name is spelt EXACTLY the same as in the list above."); } catch (std::string msg) { std::cerr << msg << std::endl; } break; } case 6: { std::string decision; std::cout << std::endl << "Begin Flight Simulation" << std::endl; std::cout << "=======================" << std::endl << std::endl; *ofs << std::endl << "Begin Flight Simulation" << std::endl; *ofs << "=======================" << std::endl << std::endl; while (!ToRun.empty()) { if (ToRun[0]) //if ToRun at location 0 exists, keep running { ToRun[0]->execute(*ofs); Completed.push_back(std::move(ToRun[0])); } ToRun.pop_front(); count++; std::cout << "Would you like to initiate the next task? (Y/N) > "; std::cin >> decision; if (decision == "n" || decision == "N") { std::cout << std::endl; break; } else if (decision == "y" || decision == "Y") { continue; } else if (decision != "y" || decision != "Y") { std::cout << std::endl << "===INVALID INPUT=== Please input Y or N next time\n\n"; break; } std::cout << std::endl << std::endl; } if (ToRun.empty()) { std::cout << "Simulation complete." << std::endl; *ofs << "Simulation complete." << std::endl; } break; } } } } } while (x == 1); } int Profile::run() { unsigned short choice; do{ choice = menu(); switch (choice) { case 1: { flightPlan(); break; } case 2: { flightInfo(); break; } case 3: { char exit; std::cout << "Thank you for using ARAIG." << std::endl; std::cout << "We're glad you didn't throw up on our very expensive flight suit!" << std::endl; std::cout << "Goodbye!" << std::endl; std::cin >> exit; break; } default: { std::cout << "---INVALID SELECTION---" << std::endl; std::cout << "Please input a valid number." << std::endl; } } } while (choice != 3); return 0; } int Profile::menu() { unsigned short valid = 0; std::cout << "ARAIG (As Real As It Gets) Flight Simulation Suit" << std::endl; std::cout << "=================================================" << std::endl; std::cout << "1 - Flight Simulator" << std::endl; std::cout << "2 - Student/Instructor Information & Flight Plan" << std::endl; std::cout << "3 - Exit Program" << std::endl; std::cout << "> "; std::cin >> valid; std::cout << std::endl; std::cin.clear(); std::cin.ignore(); if (!std::cin.fail()) //make sure that the user inputs the correct input { if (valid >= 1 && valid <= 3) { switch (valid) { case 1: { valid = 1; break; } case 2: { valid = 2; break; } case 3: { valid = 3; break; } } } } return valid; } }
26.377129
125
0.472742
[ "object", "vector" ]
240f95da9daa5ff947dd9ae91b8bcd34ffe94cd1
2,558
cpp
C++
sdlttf.cpp
szymor/pv2x
c5da95b9e097f9d1d121f0e06891717f723dab6a
[ "BSD-3-Clause" ]
null
null
null
sdlttf.cpp
szymor/pv2x
c5da95b9e097f9d1d121f0e06891717f723dab6a
[ "BSD-3-Clause" ]
2
2021-09-13T21:55:03.000Z
2021-11-26T08:38:33.000Z
sdlttf.cpp
szymor/pv2x
c5da95b9e097f9d1d121f0e06891717f723dab6a
[ "BSD-3-Clause" ]
null
null
null
// TTF_Init() must be called on once on application start #include "sdlttf.h" SDLTTF::SDLTTF(std::string fontfile, int pointSize) { this->error=0; // FIXME: why does this end up with invisible fonts on some linux systems? #ifdef GP2X this->renderMode=TEXTMODE_SOLID; #endif #ifdef LINUX this->renderMode=TEXTMODE_SHADED; #endif #ifdef MIYOO this->renderMode=TEXTMODE_SHADED; #endif this->font=NULL; this->setForegroundColor(0xff,0xff,0xff); this->setBackgroundColor(0,0,0); this->font = TTF_OpenFont(fontfile.c_str(), pointSize); if (this->font == NULL){ this->error=2; } int w,h; TTF_SizeText(this->font, "TPgq", &w, &h); this->height=h; } SDLTTF::~SDLTTF() { if (this->font != NULL) { TTF_CloseFont(this->font); } } TTF_Font *SDLTTF::getFont() { return this->font; } void SDLTTF::setForegroundColor(Uint8 r, Uint8 g, Uint8 b) { this->setForegroundColor(r,g,b,0xff); } void SDLTTF::setForegroundColor(Uint8 r, Uint8 g, Uint8 b, Uint8 a) { this->foreground.r=r; this->foreground.g=g; this->foreground.b=b; this->foreground.unused=a; } void SDLTTF::setBackgroundColor(Uint8 r, Uint8 g, Uint8 b) { this->setBackgroundColor(r,g,b,0xff); } void SDLTTF::setBackgroundColor(Uint8 r, Uint8 g, Uint8 b, Uint8 a) { this->background.r=r; this->background.g=g; this->background.b=b; this->background.unused=a; } void SDLTTF::setRenderMode(int mode) { this->renderMode=mode; } void SDLTTF::render(SDL_Surface *target, std::string text) { this->render(target, text, 0, 0); } void SDLTTF::render(SDL_Surface *target, std::string text, int x, int y) { SDL_Surface *renderedText; switch (this->renderMode) { case TEXTMODE_SHADED: renderedText = TTF_RenderText_Shaded(this->font, text.c_str(), this->foreground, this->background); break; case TEXTMODE_BLENDED: renderedText = TTF_RenderText_Blended(this->font, text.c_str(), this->foreground); break; default: renderedText = TTF_RenderText_Solid(this->font, text.c_str(), this->foreground); break; } SDL_Rect targetPos; targetPos.x=x; targetPos.y=y; SDL_BlitSurface(renderedText,NULL,target,&targetPos); SDL_FreeSurface(renderedText); } int SDLTTF::getTextHeight() { return this->height; } int SDLTTF::getStringWidth(std::string str) { int w,h; TTF_SizeText(this->font, str.c_str(), &w, &h); return w; }
25.078431
124
0.647381
[ "render" ]
24107d3d0e20abbc2d0272fd4cd11b4cfc0ae603
63,902
hh
C++
ads-ql/RecordType.hh
lairofthegoldinblair/trecul
41953c22f18f76e5add7a35a13775f70459fcd96
[ "BSD-3-Clause" ]
null
null
null
ads-ql/RecordType.hh
lairofthegoldinblair/trecul
41953c22f18f76e5add7a35a13775f70459fcd96
[ "BSD-3-Clause" ]
null
null
null
ads-ql/RecordType.hh
lairofthegoldinblair/trecul
41953c22f18f76e5add7a35a13775f70459fcd96
[ "BSD-3-Clause" ]
null
null
null
/** * Copyright (c) 2012, Akamai Technologies * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Akamai Technologies nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef __RECORDTYPE_HH #define __RECORDTYPE_HH #include <cstring> #include <string> #include <vector> #include <map> #include <set> #include <stdexcept> #include <boost/asio/ip/address_v4.hpp> #include <boost/asio/ip/address_v6.hpp> #include <boost/format.hpp> #include <boost/shared_ptr.hpp> #include <boost/date_time/posix_time/posix_time_types.hpp> #include <boost/serialization/serialization.hpp> #include <boost/serialization/vector.hpp> namespace llvm { class Type; class Value; } extern "C" { #include "decimal128.h" } #include "RecordBuffer.hh" class CodeGenerationContext; class FieldType; class RecordType; class RecordTypeFree; class RecordTypePrint; // Runtime representations of primitive fields. // We require a C interface for these so LLVM can // call functions with these parameters. typedef struct { bool Large : 1; unsigned Size : 31; const char * Ptr; } VarcharLarge; typedef struct { bool Large : 1; unsigned Size : 7; char Data[sizeof(VarcharLarge)-1]; } VarcharSmall; typedef union tagVarchar { enum Constants { MIN_LARGE_STRING_SIZE = sizeof(VarcharLarge)-1 }; VarcharLarge Large; VarcharSmall Small; char * allocateLarge(int32_t len, class InterpreterContext * ctxt); void assign(const char * lhs, int32_t len, class InterpreterContext * ctxt) { char * buf; if (len < MIN_LARGE_STRING_SIZE) { buf = &Small.Data[0]; Small.Size = len; Small.Large = 0; } else { buf = allocateLarge(len+1, ctxt); Large.Size = len; Large.Ptr = buf; Large.Large = 1; } ::memcpy(buf, lhs, len); buf[len] = 0; } void assign(const char * lhs, int32_t len) { char * buf; if (len < MIN_LARGE_STRING_SIZE) { buf = &Small.Data[0]; Small.Size = len; Small.Large = 0; } else { buf = (char *) ::malloc(len + 1); Large.Size = len; Large.Ptr = buf; Large.Large = 1; } ::memcpy(buf, lhs, len); buf[len] = 0; } void append(const char * lhs, int32_t len) { if (!Large.Large) { int32_t before = Small.Size; int32_t after = before + len; if (after < MIN_LARGE_STRING_SIZE) { char * buf = &Small.Data[0]; memcpy(buf + before, lhs, len); Small.Size = after; Small.Large = 0; buf[after] = 0; } else { char * buf = (char *) ::malloc(after + 1); memcpy(buf, &Small.Data[0], before); memcpy(buf + before, lhs, len); Large.Size = after; Large.Ptr = buf; Large.Large = 1; buf[after] = 0; } } else { int32_t before = Large.Size; int32_t after = before + len; char * buf = (char *) ::realloc(const_cast<char *>(Large.Ptr), after + 1); memcpy(buf + before, lhs, len); Large.Size = after; Large.Ptr = buf; Large.Large = 1; buf[after] = 0; } } int32_t size() const { return Large.Large ? Large.Size : Small.Size; } const char * c_str() const { return Large.Large ? Large.Ptr : &Small.Data[0]; } } Varchar; typedef struct { bool Large : 1; unsigned Size : 31; const char * Ptr; } VararrayLarge; typedef struct { bool Large : 1; unsigned Size : 7; char Data[sizeof(VararrayLarge)-1]; } VararraySmall; typedef union tagVararray { enum Constants { MIN_LARGE_STRING_SIZE = sizeof(VararrayLarge)-1 }; VararrayLarge Large; VararraySmall Small; char * allocateLarge(int32_t len, class InterpreterContext * ctxt); void assign(const char * lhs, int32_t len, class InterpreterContext * ctxt) { char * buf; if (len < MIN_LARGE_STRING_SIZE) { buf = &Small.Data[0]; Small.Size = len; Small.Large = 0; } else { buf = allocateLarge(len+1, ctxt); Large.Size = len; Large.Ptr = buf; Large.Large = 1; } ::memcpy(buf, lhs, len); buf[len] = 0; } void assign(const char * lhs, int32_t len) { char * buf; if (len < MIN_LARGE_STRING_SIZE) { buf = &Small.Data[0]; Small.Size = len; Small.Large = 0; } else { buf = (char *) ::malloc(len + 1); Large.Size = len; Large.Ptr = buf; Large.Large = 1; } ::memcpy(buf, lhs, len); buf[len] = 0; } void append(const char * lhs, int32_t len) { if (!Large.Large) { int32_t before = Small.Size; int32_t after = before + len; if (after < MIN_LARGE_STRING_SIZE) { char * buf = &Small.Data[0]; memcpy(buf + before, lhs, len); Small.Size = after; Small.Large = 0; buf[after] = 0; } else { char * buf = (char *) ::malloc(after + 1); memcpy(buf, &Small.Data[0], before); memcpy(buf + before, lhs, len); Large.Size = after; Large.Ptr = buf; Large.Large = 1; buf[after] = 0; } } else { int32_t before = Large.Size; int32_t after = before + len; char * buf = (char *) ::realloc(const_cast<char *>(Large.Ptr), after + 1); memcpy(buf + before, lhs, len); Large.Size = after; Large.Ptr = buf; Large.Large = 1; buf[after] = 0; } } int32_t size() const { return Large.Large ? Large.Size : Small.Size; } const char * c_str() const { return Large.Large ? Large.Ptr : &Small.Data[0]; } } Vararray; struct CidrV4 { boost::asio::ip::address_v4 prefix; uint8_t prefix_length; }; struct CidrV4Runtime { boost::asio::ip::address_v4::bytes_type prefix; uint8_t prefix_length; }; template <typename Elem, typename Traits> std::basic_ostream<Elem, Traits>& operator<<( std::basic_ostream<Elem, Traits>& os, const CidrV4& pfx) { return os << pfx.prefix << "/" << (int32_t) pfx.prefix_length; } struct CidrV6 { boost::asio::ip::address_v6 prefix; uint8_t prefix_length; }; struct CidrV6Runtime { boost::asio::ip::address_v6::bytes_type prefix; uint8_t prefix_length; }; template <typename Elem, typename Traits> std::basic_ostream<Elem, Traits>& operator<<( std::basic_ostream<Elem, Traits>& os, const CidrV6& pfx) { return os << pfx.prefix << "/" << (int32_t) pfx.prefix_length; } class FieldAddress { private: uint32_t mOffset; uint32_t mPosition; // Serialization friend class boost::serialization::access; template <class Archive> void serialize(Archive & ar, const unsigned int version) { ar & BOOST_SERIALIZATION_NVP(mOffset); ar & BOOST_SERIALIZATION_NVP(mPosition); } public: FieldAddress(uint32_t offset=0, uint32_t position=0) : mOffset(offset), mPosition(position) { } void setNull(RecordBuffer buffer) const { if (mPosition == 0xffffffff) throw std::runtime_error("Can't set NULL on a non-nullable field"); // put 0 into bit uint32_t dwordPos = mPosition >> 5; uint32_t mask = 1 << (mPosition - (dwordPos << 5)); ((uint32_t *) buffer.Ptr)[dwordPos] &= (~mask); } void clearNull(RecordBuffer buffer) const { if (mPosition != 0xffffffff) { // put 1 into bit uint32_t dwordPos = mPosition >> 5; uint32_t mask = 1 << (mPosition - (dwordPos << 5)); ((uint32_t *) buffer.Ptr)[dwordPos] |= mask; } } bool isNull(RecordBuffer buffer) const { if (mPosition != 0xffffffff) { // NULL means there is a zero bit uint32_t dwordPos = mPosition >> 5; uint32_t mask = 1 << (mPosition - (dwordPos << 5)); uint32_t ret = ((uint32_t *) buffer.Ptr)[dwordPos] & mask; return ret == 0; } else { return false; } } // Nullity of elements of a fixed array void setArrayNull(RecordBuffer buffer, const class FixedArrayType * ty, int32_t idx) const; void clearArrayNull(RecordBuffer buffer, const class FixedArrayType * ty, int32_t idx) const; bool isArrayNull(RecordBuffer buffer, const class FixedArrayType * ty, int32_t idx) const; // Nullity of elements of a variable array void setArrayNull(RecordBuffer buffer, const class VariableArrayType * ty, int32_t idx) const; void clearArrayNull(RecordBuffer buffer, const class VariableArrayType * ty, int32_t idx) const; bool isArrayNull(RecordBuffer buffer, const class VariableArrayType * ty, int32_t idx) const; void setInt8(int8_t val, RecordBuffer buffer) const { clearNull(buffer); *(int8_t *) (buffer.Ptr + mOffset) = val; } void setInt16(int16_t val, RecordBuffer buffer) const { clearNull(buffer); *(int16_t *) (buffer.Ptr + mOffset) = val; } void setInt32(int32_t val, RecordBuffer buffer) const { clearNull(buffer); *(int32_t *) (buffer.Ptr + mOffset) = val; } void setInt64(int64_t val, RecordBuffer buffer) const { clearNull(buffer); *(int64_t *) (buffer.Ptr + mOffset) = val; } void setFloat(float val, RecordBuffer buffer) const { clearNull(buffer); *(float *) (buffer.Ptr + mOffset) = val; } void setDouble(double val, RecordBuffer buffer) const { clearNull(buffer); *(double *) (buffer.Ptr + mOffset) = val; } void setDatetime(boost::posix_time::ptime val, RecordBuffer buffer) const { clearNull(buffer); *(boost::posix_time::ptime *) (buffer.Ptr + mOffset) = val; } void setDate(boost::gregorian::date val, RecordBuffer buffer) const { clearNull(buffer); *(boost::gregorian::date *) (buffer.Ptr + mOffset) = val; } void setDecimal(decimal128 & val, RecordBuffer buffer) const { clearNull(buffer); *(decimal128 *) (buffer.Ptr + mOffset) = val; } void setIPv4(boost::asio::ip::address_v4 val, RecordBuffer buffer) const { typedef boost::asio::ip::address_v4::bytes_type bytes_type; clearNull(buffer); *(bytes_type *) (buffer.Ptr + mOffset) = val.to_bytes(); } void setCIDRv4(CidrV4 val, RecordBuffer buffer) const { clearNull(buffer); CidrV4Runtime * bufVal = (CidrV4Runtime *) (buffer.Ptr + mOffset) ; bufVal->prefix = val.prefix.to_bytes(); bufVal->prefix_length = val.prefix_length; } void setIPv6(const boost::asio::ip::address_v6 & val, RecordBuffer buffer) const { clearNull(buffer); auto arr = val.to_bytes(); memcpy(buffer.Ptr + mOffset, &arr[0], 16); } void setCIDRv6(CidrV6 val, RecordBuffer buffer) const { clearNull(buffer); auto arr = val.prefix.to_bytes(); memcpy(buffer.Ptr + mOffset, &arr[0], 16); *(buffer.Ptr + mOffset + 16) = val.prefix_length; } int8_t getInt8(RecordBuffer buffer) const { return *(int8_t *) (buffer.Ptr + mOffset); } int8_t getArrayInt8(RecordBuffer buffer, int idx) const { return ((int8_t *) (buffer.Ptr + mOffset))[idx]; } int8_t getVarArrayInt8(RecordBuffer buffer, int idx) const { return ((int8_t *) ((Vararray *)(buffer.Ptr + mOffset))->c_str())[idx]; } int16_t getInt16(RecordBuffer buffer) const { return *(int16_t *) (buffer.Ptr + mOffset); } int16_t getArrayInt16(RecordBuffer buffer, int idx) const { return ((int16_t *) (buffer.Ptr + mOffset))[idx]; } int16_t getVarArrayInt16(RecordBuffer buffer, int idx) const { return ((int16_t *) ((Vararray *)(buffer.Ptr + mOffset))->c_str())[idx]; } int32_t getInt32(RecordBuffer buffer) const { return *(int32_t *) (buffer.Ptr + mOffset); } int32_t getArrayInt32(RecordBuffer buffer, int idx) const { return ((int32_t *) (buffer.Ptr + mOffset))[idx]; } int32_t getVarArrayInt32(RecordBuffer buffer, int idx) const { return ((int32_t *) ((Vararray *)(buffer.Ptr + mOffset))->c_str())[idx]; } int64_t getInt64(RecordBuffer buffer) const { return *(int64_t *) (buffer.Ptr + mOffset); } int64_t getArrayInt64(RecordBuffer buffer, int idx) const { return ((int64_t *) (buffer.Ptr + mOffset))[idx]; } int64_t getVarArrayInt64(RecordBuffer buffer, int idx) const { return ((int64_t *) ((Vararray *)(buffer.Ptr + mOffset))->c_str())[idx]; } float getFloat(RecordBuffer buffer) const { return *(float *) (buffer.Ptr + mOffset); } float getArrayFloat(RecordBuffer buffer, int idx) const { return ((float *) (buffer.Ptr + mOffset))[idx]; } float getVarArrayFloat(RecordBuffer buffer, int idx) const { return ((float *) ((Vararray *)(buffer.Ptr + mOffset))->c_str())[idx]; } double getDouble(RecordBuffer buffer) const { return *(double *) (buffer.Ptr + mOffset); } double getArrayDouble(RecordBuffer buffer, int idx) const { return ((double *) (buffer.Ptr + mOffset))[idx]; } double getVarArrayDouble(RecordBuffer buffer, int idx) const { return ((double *) ((Vararray *)(buffer.Ptr + mOffset))->c_str())[idx]; } boost::posix_time::ptime getDatetime(RecordBuffer buffer) const { return *(boost::posix_time::ptime *) (buffer.Ptr + mOffset); } boost::posix_time::ptime getArrayDatetime(RecordBuffer buffer, int idx) const { return ((boost::posix_time::ptime *) (buffer.Ptr + mOffset))[idx]; } boost::posix_time::ptime getVarArrayDatetime(RecordBuffer buffer, int idx) const { return ((boost::posix_time::ptime *) ((Vararray *)(buffer.Ptr + mOffset))->c_str())[idx]; } boost::gregorian::date getDate(RecordBuffer buffer) const { return *(boost::gregorian::date *) (buffer.Ptr + mOffset); } boost::gregorian::date getArrayDate(RecordBuffer buffer, int idx) const { return ((boost::gregorian::date *) (buffer.Ptr + mOffset))[idx]; } boost::gregorian::date getVarArrayDate(RecordBuffer buffer, int idx) const { return ((boost::gregorian::date *) ((Vararray *)(buffer.Ptr + mOffset))->c_str())[idx]; } boost::asio::ip::address_v4 getIPv4(RecordBuffer buffer) const { typedef boost::asio::ip::address_v4::bytes_type bytes_type; return boost::asio::ip::make_address_v4(*(bytes_type *) (buffer.Ptr + mOffset)); } boost::asio::ip::address_v4 getArrayIPv4(RecordBuffer buffer, int idx) const { typedef boost::asio::ip::address_v4::bytes_type bytes_type; return boost::asio::ip::make_address_v4(((bytes_type *) (buffer.Ptr + mOffset))[idx]); } boost::asio::ip::address_v4 getVarArrayIPv4(RecordBuffer buffer, int idx) const { typedef boost::asio::ip::address_v4::bytes_type bytes_type; return boost::asio::ip::make_address_v4(((bytes_type *) ((Vararray *)(buffer.Ptr + mOffset))->c_str())[idx]); } CidrV4 getCIDRv4(RecordBuffer buffer) const { typedef boost::asio::ip::address_v4::bytes_type bytes_type; CidrV4 ret; ret.prefix = boost::asio::ip::make_address_v4(*(bytes_type *) (buffer.Ptr + mOffset)); ret.prefix_length = *(buffer.Ptr + mOffset + sizeof(bytes_type)); return ret; } CidrV4 getArrayCIDRv4(RecordBuffer buffer, int idx) const { CidrV4 ret; ret.prefix = boost::asio::ip::make_address_v4(((CidrV4Runtime *) (buffer.Ptr + mOffset))[idx].prefix); ret.prefix_length = ((CidrV4Runtime *)(buffer.Ptr + mOffset))[idx].prefix_length; return ret; } CidrV4 getVarArrayCIDRv4(RecordBuffer buffer, int idx) const { CidrV4 ret; ret.prefix = boost::asio::ip::make_address_v4(((CidrV4Runtime *) (buffer.Ptr + mOffset))[idx].prefix); ret.prefix_length = ((CidrV4Runtime *)((Vararray *)(buffer.Ptr + mOffset))->c_str())[idx].prefix_length; return ret; } boost::asio::ip::address_v6 getIPv6(RecordBuffer buffer) const { boost::asio::ip::address_v6::bytes_type arr; memcpy(&arr[0], buffer.Ptr + mOffset, 16); return boost::asio::ip::make_address_v6(arr); } boost::asio::ip::address_v6 getArrayIPv6(RecordBuffer buffer, int idx) const { typedef boost::asio::ip::address_v6::bytes_type bytes_type; return boost::asio::ip::make_address_v6(((bytes_type *) (buffer.Ptr + mOffset))[idx]); } boost::asio::ip::address_v6 getVarArrayIPv6(RecordBuffer buffer, int idx) const { typedef boost::asio::ip::address_v6::bytes_type bytes_type; return boost::asio::ip::make_address_v6(((bytes_type *) ((Vararray *)(buffer.Ptr + mOffset))->c_str())[idx]); } CidrV6 getCIDRv6(RecordBuffer buffer) const { boost::asio::ip::address_v6::bytes_type arr; memcpy(&arr[0], buffer.Ptr + mOffset, 16); CidrV6 ret = { boost::asio::ip::make_address_v6(arr), *(buffer.Ptr + mOffset + 16) }; return ret; } CidrV6 getArrayCIDRv6(RecordBuffer buffer, int idx) const { CidrV6 ret; ret.prefix = boost::asio::ip::make_address_v6(((CidrV6Runtime *) (buffer.Ptr + mOffset))[idx].prefix); ret.prefix_length = ((CidrV6Runtime *)(buffer.Ptr + mOffset))[idx].prefix_length; return ret; } CidrV6 getVarArrayCIDRv6(RecordBuffer buffer, int idx) const { CidrV6 ret; ret.prefix = boost::asio::ip::make_address_v6(((CidrV6Runtime *) (buffer.Ptr + mOffset))[idx].prefix); ret.prefix_length = ((CidrV6Runtime *)((Vararray *)(buffer.Ptr + mOffset))->c_str())[idx].prefix_length; return ret; } void SetFixedLengthString(RecordBuffer buffer, const char * begin, std::size_t sz) const { clearNull(buffer); memcpy(buffer.Ptr + mOffset, begin, sz); *(buffer.Ptr + mOffset + sz) = 0; } void SetFixedLengthString(RecordBuffer buffer, const char * begin, int32_t strSz, uint8_t padChar, int32_t totalSz) const { clearNull(buffer); memcpy(buffer.Ptr + mOffset, begin, strSz); if (totalSz != strSz) memset(buffer.Ptr + mOffset + strSz, padChar, (totalSz - strSz)); // Null terminate *(buffer.Ptr + mOffset + totalSz) = 0; } void SetVariableLengthString(RecordBuffer buffer, const char * begin, std::size_t sz) const { clearNull(buffer); Varchar * internalString = (Varchar *) (buffer.Ptr + mOffset); internalString->assign(begin, (int32_t) sz); } int8_t * getInt8Ptr(RecordBuffer buffer) const { return (int8_t *) (buffer.Ptr + mOffset); } int8_t * getArrayInt8Ptr(RecordBuffer buffer, int idx) const { return ((int8_t *) (buffer.Ptr + mOffset)) + idx; } int16_t * getInt16Ptr(RecordBuffer buffer) const { return (int16_t *) (buffer.Ptr + mOffset); } int16_t * getArrayInt16Ptr(RecordBuffer buffer, int idx) const { return ((int16_t *) (buffer.Ptr + mOffset)) + idx; } int32_t * getInt32Ptr(RecordBuffer buffer) const { return (int32_t *) (buffer.Ptr + mOffset); } int32_t * getArrayInt32Ptr(RecordBuffer buffer, int idx) const { return ((int32_t *) (buffer.Ptr + mOffset)) + idx; } int64_t * getInt64Ptr(RecordBuffer buffer) const { return (int64_t *) (buffer.Ptr + mOffset); } int64_t * getArrayInt64Ptr(RecordBuffer buffer, int idx) const { return ((int64_t *) (buffer.Ptr + mOffset)) + idx; } float * getFloatPtr(RecordBuffer buffer) const { return (float *) (buffer.Ptr + mOffset); } float * getArrayFloatPtr(RecordBuffer buffer, int idx) const { return ((float *) (buffer.Ptr + mOffset)) + idx; } double * getDoublePtr(RecordBuffer buffer) const { return (double *) (buffer.Ptr + mOffset); } double * getArrayDoublePtr(RecordBuffer buffer, int idx) const { return ((double *) (buffer.Ptr + mOffset)) + idx; } boost::posix_time::ptime * getDatetimePtr(RecordBuffer buffer) const { return (boost::posix_time::ptime *) (buffer.Ptr + mOffset); } boost::posix_time::ptime * getArrayDatetimePtr(RecordBuffer buffer, int idx) const { return ((boost::posix_time::ptime *) (buffer.Ptr + mOffset)) + idx; } boost::gregorian::date * getDatePtr(RecordBuffer buffer) const { return (boost::gregorian::date *) (buffer.Ptr + mOffset); } boost::gregorian::date * getArrayDatePtr(RecordBuffer buffer, int idx) const { return ((boost::gregorian::date *) (buffer.Ptr + mOffset)) + idx; } Varchar * getVarcharPtr(RecordBuffer buffer) const { return (Varchar *) (buffer.Ptr + mOffset); } Varchar * getArrayVarcharPtr(RecordBuffer buffer, int idx) const { return ((Varchar *) (buffer.Ptr + mOffset)) + idx; } Varchar * getVarArrayVarcharPtr(RecordBuffer buffer, int idx) const { return ((Varchar *) ((Vararray *)(buffer.Ptr + mOffset))->c_str()) + idx; } char * getCharPtr(RecordBuffer buffer) const { return (char *) (buffer.Ptr + mOffset); } decimal128 * getDecimalPtr(RecordBuffer buffer) const { return (decimal128 *) (buffer.Ptr + mOffset); } decimal128 * getArrayDecimalPtr(RecordBuffer buffer, int idx) const { return ((decimal128 *) (buffer.Ptr + mOffset)) + idx; } bool operator<(const FieldAddress & rhs) const { return mOffset < rhs.mOffset; } bool operator==(const FieldAddress & rhs) const { return mOffset == rhs.mOffset; } bool operator!=(const FieldAddress & rhs) const { return mOffset != rhs.mOffset; } std::ptrdiff_t operator-(const FieldAddress & rhs) const { return std::ptrdiff_t(mOffset) - std::ptrdiff_t(rhs.mOffset); } bool contiguous(const FieldAddress& rhs, std::size_t sz) const { return mOffset + sz == rhs.mOffset; } /** * Get the address of the dword containing the bit for * this field. */ FieldAddress getBitwordAddress() const { return FieldAddress((mPosition >> 5)*sizeof(uint32_t), 0xffffffff); } // Code generate instructions to get an untyped pointer to member given a base pointer. llvm::Value * getPointer(const std::string& member, CodeGenerationContext * ctxt, llvm::Value * basePointer) const; /** * Code generate instructions to get the NULL value. */ llvm::Value * isNull(const std::string& member, CodeGenerationContext * ctxt, llvm::Value * basePointer) const; /** * Code generate instructions to set/clear the NULL value. */ void setNull(const std::string& member, CodeGenerationContext * ctxt, llvm::Value * basePointer, bool isNull) const; void dump() const; }; class Digest { private: uint8_t mDigest[16]; public: Digest() { memset(&mDigest[0], 0, 16); } Digest(unsigned char digest[16]) { memcpy(&mDigest[0], &digest[0], 16); } ~Digest() { } bool operator < (const Digest & rhs) const { for(int i=0; i<16; i++) { if (mDigest[i] < rhs.mDigest[i]) return true; else if (mDigest[i] > rhs.mDigest[i]) return false; } return false; } }; class DynamicRecordContext { private: std::map<Digest, FieldType *> mTypes; std::set<const RecordType *> mRecords; std::set<class IQLExpression *> mExprs; public: DynamicRecordContext(); ~DynamicRecordContext(); FieldType * lookup(const Digest& id) const; void add(const Digest& id, FieldType * val); void add(const RecordType * ty); void add(class IQLExpression * expr); }; class FieldType { public: enum FieldTypeEnum { VARCHAR, /* Variable length strings */ CHAR, /* Fixed length string */ BIGDECIMAL, /* 128-bit decimals */ INT8, /* Signed 8 bit Integers */ INT16, /* Signed 16 bit Integers */ INT32, /* Signed 32 bit Integers */ INT64, /* Signed 64 bit Integers */ FLOAT, /* IEEE single precision */ DOUBLE, /* IEEE double precision */ DATETIME, /* Boost datetime */ DATE, /* Boost gregorian date */ IPV4, /* V4 IP address */ CIDRV4, /* V4 CIDR */ IPV6, /* V6 IP address */ CIDRV6, /* V6 CIDR */ FUNCTION, /* Function types are NOT allowed as fields at this point. */ FIXED_ARRAY, /* Fixed Length Array. */ VARIABLE_ARRAY, /* Variable Length Array. */ INTERVAL, /* Interval types */ NIL /* Type of literal NULL */ }; private: DynamicRecordContext & mContext; FieldTypeEnum mType; int32_t mSize; bool mNullable; public: FieldType(DynamicRecordContext& ctxt, FieldTypeEnum ty, bool nullable) : mContext(ctxt), mType(ty), mSize(0), mNullable(nullable) { } FieldType(DynamicRecordContext& ctxt, FieldTypeEnum ty, int32_t sz, bool nullable) : mContext(ctxt), mType(ty), mSize(sz), mNullable(nullable) { } virtual ~FieldType() { } FieldTypeEnum GetEnum() const { return mType; } int32_t GetSize() const { return mSize; } bool isNullable() const { return mNullable; } // TODO: Convert the following into double dispatch calls // as they depend on the actual field type and the system architecture. virtual std::size_t GetAlignment() const { switch(mType) { case VARCHAR: return 8; case CHAR: return 1; case BIGDECIMAL: return 1; case INT8: return 1; case INT16: return 2; case INT32: return 4; case INT64: return 8; case FLOAT: return sizeof(float); case DOUBLE: return sizeof(double); case DATETIME: return sizeof(boost::posix_time::ptime); case DATE: return sizeof(boost::gregorian::date); case IPV4: return 4; case CIDRV4: return 4; case IPV6: return 1; case CIDRV6: return 1; case FUNCTION: throw std::runtime_error("Function types cannot be field values"); case INTERVAL: return 4; default: throw std::runtime_error((boost::format("Invalid Type value: %1%") % mType).str()); } } // Size in bytes of successive objects of this type in memory accounting for alignment virtual std::size_t GetAllocSize() const { switch(mType) { case VARCHAR: return sizeof(Varchar); case CHAR: return mSize + 1; case BIGDECIMAL: return 16; case INT8: return 1; case INT16: return 2; case INT32: return 4; case INT64: return 8; case FLOAT: return sizeof(float); case DOUBLE: return sizeof(double); case DATETIME: return sizeof(boost::posix_time::ptime); case DATE: return sizeof(boost::gregorian::date); case IPV4: return 4; case CIDRV4: return 8; case IPV6: return 16; case CIDRV6: return 17; case INTERVAL: return 4; case FUNCTION: throw std::runtime_error("Function types cannot be field values"); default: throw std::runtime_error("Invalid Type value"); } } virtual llvm::Type * LLVMGetType(CodeGenerationContext * ctxt) const; /** * Append my state to an md5 hash */ virtual void AppendTo(struct md5_state_s * md5) const; /** * Text representation of type. */ virtual std::string toString() const=0; /** * Create a version of this type with appropriate * nullability. * TODO: Make pure virutal */ virtual const FieldType * clone(bool nullable) const { if (nullable == isNullable()) return this; throw std::runtime_error("Cloning to change nullability not supported"); } /** * Get special values if possible */ virtual llvm::Value * getMinValue(class CodeGenerationContext * ctxt) const; virtual llvm::Value * getMaxValue(CodeGenerationContext * ctxt) const; virtual llvm::Value * getZero(CodeGenerationContext * ctxt) const; /** * Predicates */ virtual bool isNumeric() const { return false; } virtual bool isIntegral() const { return false; } virtual bool isFloatingPoint() const { return false; } /** * Context this type was created in. */ DynamicRecordContext & getContext() const { return mContext; } }; class VarcharType : public FieldType { public: typedef Varchar runtime_type; private: VarcharType(DynamicRecordContext& ctxt, int32_t sz, bool nullable) : FieldType(ctxt, FieldType::VARCHAR, sz, nullable) { } public: static VarcharType * Get(DynamicRecordContext& ctxt); static VarcharType * Get(DynamicRecordContext& ctxt, bool nullable); static VarcharType * Get(DynamicRecordContext& ctxt, int32_t sz, bool nullable); ~VarcharType(); /** * Text representation of type. */ std::string toString() const; const FieldType * clone(bool nullable) const; llvm::Value * getMinValue(CodeGenerationContext * ctxt) const; llvm::Value * getMaxValue(CodeGenerationContext * ctxt) const; llvm::Value * getZero(CodeGenerationContext * ctxt) const; }; class CharType : public FieldType { private: CharType(DynamicRecordContext& ctxt, int32_t sz, bool nullable) : FieldType(ctxt, FieldType::CHAR, sz, nullable) { } public: static CharType * Get(DynamicRecordContext& ctxt, int32_t sz, bool nullable=false); ~CharType(); /** * Text representation of type. */ std::string toString() const; const FieldType * clone(bool nullable) const; llvm::Value * getMinValue(CodeGenerationContext * ctxt) const; llvm::Value * getMaxValue(CodeGenerationContext * ctxt) const; llvm::Value * getZero(CodeGenerationContext * ctxt) const; }; class Int8Type : public FieldType { private: Int8Type(DynamicRecordContext& ctxt, bool nullable) : FieldType(ctxt, FieldType::INT8, 1, nullable) { } public: static Int8Type * Get(DynamicRecordContext& ctxt, bool nullable = false); ~Int8Type(); /** * Text representation of type. */ std::string toString() const; const FieldType * clone(bool nullable) const; llvm::Value * getMinValue(CodeGenerationContext * ctxt) const; llvm::Value * getMaxValue(CodeGenerationContext * ctxt) const; llvm::Value * getZero(CodeGenerationContext * ctxt) const; bool isNumeric() const; bool isIntegral() const; }; class Int16Type : public FieldType { private: Int16Type(DynamicRecordContext& ctxt, bool nullable) : FieldType(ctxt, FieldType::INT16, 2, nullable) { } public: static Int16Type * Get(DynamicRecordContext& ctxt, bool nullable = false); ~Int16Type(); /** * Text representation of type. */ std::string toString() const; const FieldType * clone(bool nullable) const; llvm::Value * getMinValue(CodeGenerationContext * ctxt) const; llvm::Value * getMaxValue(CodeGenerationContext * ctxt) const; llvm::Value * getZero(CodeGenerationContext * ctxt) const; bool isNumeric() const; bool isIntegral() const; }; class Int32Type : public FieldType { private: Int32Type(DynamicRecordContext& ctxt, bool nullable) : FieldType(ctxt, FieldType::INT32, 4, nullable) { } public: static Int32Type * Get(DynamicRecordContext& ctxt, bool nullable = false); ~Int32Type(); /** * Text representation of type. */ std::string toString() const; const FieldType * clone(bool nullable) const; llvm::Value * getMinValue(CodeGenerationContext * ctxt) const; llvm::Value * getMaxValue(CodeGenerationContext * ctxt) const; llvm::Value * getZero(CodeGenerationContext * ctxt) const; bool isNumeric() const; bool isIntegral() const; }; class Int64Type : public FieldType { private: Int64Type(DynamicRecordContext& ctxt, bool nullable) : FieldType(ctxt, FieldType::INT64, 8, nullable) { } public: static Int64Type * Get(DynamicRecordContext& ctxt, bool nullable=false); ~Int64Type(); /** * Text representation of type. */ std::string toString() const; const FieldType * clone(bool nullable) const; llvm::Value * getMinValue(CodeGenerationContext * ctxt) const; llvm::Value * getMaxValue(CodeGenerationContext * ctxt) const; llvm::Value * getZero(CodeGenerationContext * ctxt) const; bool isNumeric() const; bool isIntegral() const; }; class FloatType : public FieldType { private: FloatType(DynamicRecordContext& ctxt, bool nullable) : FieldType(ctxt, FieldType::FLOAT, sizeof(float), nullable) { } public: static FloatType * Get(DynamicRecordContext& ctxt, bool nullable = false); ~FloatType(); /** * Text representation of type. */ std::string toString() const; const FieldType * clone(bool nullable) const; /** * Special Values */ llvm::Value * getMinValue(CodeGenerationContext * ctxt) const; llvm::Value * getMaxValue(CodeGenerationContext * ctxt) const; llvm::Value * getZero(CodeGenerationContext * ctxt) const; bool isNumeric() const; bool isFloatingPoint() const; }; class DoubleType : public FieldType { private: DoubleType(DynamicRecordContext& ctxt, bool nullable) : FieldType(ctxt, FieldType::DOUBLE, sizeof(double), nullable) { } public: static DoubleType * Get(DynamicRecordContext& ctxt, bool nullable = false); ~DoubleType(); /** * Text representation of type. */ std::string toString() const; const FieldType * clone(bool nullable) const; /** * Special Values */ llvm::Value * getMinValue(CodeGenerationContext * ctxt) const; llvm::Value * getMaxValue(CodeGenerationContext * ctxt) const; llvm::Value * getZero(CodeGenerationContext * ctxt) const; bool isNumeric() const; bool isFloatingPoint() const; }; /** * A decimal type. */ class DecimalType : public FieldType { private: DecimalType(DynamicRecordContext& ctxt, bool nullable) : FieldType(ctxt, FieldType::BIGDECIMAL, 16, nullable) { } llvm::Value * createGlobalValue(CodeGenerationContext * ctxt, const decimal128& dec) const; public: static DecimalType * Get(DynamicRecordContext& ctxt, bool nullable=false); ~DecimalType(); /** * Text representation of type. */ std::string toString() const; const FieldType * clone(bool nullable) const; /** * Special Values */ llvm::Value * getMinValue(CodeGenerationContext * ctxt) const; llvm::Value * getMaxValue(CodeGenerationContext * ctxt) const; llvm::Value * getZero(CodeGenerationContext * ctxt) const; bool isNumeric() const; }; /** * A datetime type. */ class DatetimeType : public FieldType { private: DatetimeType(DynamicRecordContext& ctxt, bool nullable) : FieldType(ctxt, FieldType::DATETIME, sizeof(double), nullable) { } public: static DatetimeType * Get(DynamicRecordContext& ctxt, bool nullable = false); ~DatetimeType(); /** * Text representation of type. */ std::string toString() const; const FieldType * clone(bool nullable) const; llvm::Value * getMinValue(CodeGenerationContext * ctxt) const; llvm::Value * getMaxValue(CodeGenerationContext * ctxt) const; llvm::Value * getZero(CodeGenerationContext * ctxt) const; }; /** * A date type for Gregorian calendar. */ class DateType : public FieldType { private: DateType(DynamicRecordContext& ctxt, bool nullable) : FieldType(ctxt, FieldType::DATE, sizeof(boost::gregorian::date), nullable) { } public: static DateType * Get(DynamicRecordContext& ctxt, bool nullable=false); ~DateType(); /** * Text representation of type. */ std::string toString() const; const FieldType * clone(bool nullable) const; llvm::Value * getMinValue(CodeGenerationContext * ctxt) const; llvm::Value * getMaxValue(CodeGenerationContext * ctxt) const; llvm::Value * getZero(CodeGenerationContext * ctxt) const; }; /** * A v4 IP address type. */ class IPv4Type : public FieldType { private: IPv4Type(DynamicRecordContext& ctxt, bool nullable) : FieldType(ctxt, FieldType::IPV4, 4, nullable) { } public: static IPv4Type * Get(DynamicRecordContext& ctxt, bool nullable=false); ~IPv4Type(); /** * Text representation of type. */ std::string toString() const; const FieldType * clone(bool nullable) const; }; /** * A v4 CIDR type. */ class CIDRv4Type : public FieldType { private: CIDRv4Type(DynamicRecordContext& ctxt, bool nullable) : FieldType(ctxt, FieldType::CIDRV4, 5, nullable) { } public: static CIDRv4Type * Get(DynamicRecordContext& ctxt, bool nullable=false); ~CIDRv4Type(); /** * Text representation of type. */ std::string toString() const; const FieldType * clone(bool nullable) const; }; /** * A v6 IP address type. */ class IPv6Type : public FieldType { private: IPv6Type(DynamicRecordContext& ctxt, bool nullable) : FieldType(ctxt, FieldType::IPV6, 4, nullable) { } public: static IPv6Type * Get(DynamicRecordContext& ctxt, bool nullable=false); ~IPv6Type(); /** * Text representation of type. */ std::string toString() const; const FieldType * clone(bool nullable) const; }; /** * A v6 CIDR type. */ class CIDRv6Type : public FieldType { private: CIDRv6Type(DynamicRecordContext& ctxt, bool nullable) : FieldType(ctxt, FieldType::CIDRV6, 5, nullable) { } public: static CIDRv6Type * Get(DynamicRecordContext& ctxt, bool nullable=false); ~CIDRv6Type(); /** * Text representation of type. */ std::string toString() const; const FieldType * clone(bool nullable) const; }; class FunctionType : public FieldType { private: std::vector<const FieldType *> mArgs; const FieldType * mRet; FunctionType(DynamicRecordContext& ctxt, const std::vector<const FieldType *>& args, const FieldType * ret) : FieldType(ctxt, FieldType::FUNCTION, args.size()), mArgs(args), mRet(ret) { } static void AppendTo(const std::vector<const FieldType *>& args, const FieldType * ret, struct md5_state_s * md5); public: static FunctionType * Get(DynamicRecordContext& ctxt, const std::vector<const FieldType *>& args, const FieldType * ret); static FunctionType * Get(DynamicRecordContext& ctxt, const FieldType * ret); static FunctionType * Get(DynamicRecordContext& ctxt, const FieldType * arg1, const FieldType * ret); static FunctionType * Get(DynamicRecordContext& ctxt, const FieldType * arg1, const FieldType * arg2, const FieldType * ret); static FunctionType * Get(DynamicRecordContext& ctxt, const FieldType * arg1, const FieldType * arg2, const FieldType * arg3, const FieldType * ret); ~FunctionType(); const std::vector<const FieldType *>& GetArgs() const { return mArgs; } const FieldType * GetReturn() const { return mRet; } /** * Append my state to an md5 hash */ void AppendTo(struct md5_state_s * md5) const; /** * Text representation of type. */ std::string toString() const; }; /** * Base class for homogeneous container types (e.g. arrays) */ class SequentialType : public FieldType { private: const FieldType * mElementTy; protected: SequentialType(DynamicRecordContext& ctxt, FieldTypeEnum ty, int32_t sz, const FieldType * elementTy, bool nullable) : FieldType(ctxt, ty, sz, nullable), mElementTy(elementTy) { } public: const FieldType * getElementType() const { return mElementTy; } }; /** * A fixed length array. */ class FixedArrayType : public SequentialType { private: FixedArrayType(DynamicRecordContext& ctxt, int32_t sz, const FieldType * elementTy, bool nullable) : SequentialType(ctxt, FieldType::FIXED_ARRAY, sz, elementTy, nullable) { } static void AppendTo(int32_t sz, const FieldType * element, bool nullable, struct md5_state_s * md5); public: static FixedArrayType * Get(DynamicRecordContext& ctxt, int32_t sz, const FieldType * element, bool nullable); ~FixedArrayType(); std::size_t GetAlignment() const { return getElementType()->GetAlignment(); } std::size_t GetAllocSize() const { // TODO: Should assert that AllocSize is a multiple of alignment return GetAlignment()*((GetDataSize() + GetNullSize() + GetAlignment() - 1)/GetAlignment()); } std::size_t GetDataSize() const { return getElementType()->GetAllocSize()*((std::size_t)GetSize()); } std::size_t GetDataOffset() const { return 0; } std::size_t GetNullSize() const { // Divide by 8 and round up to nearest integer. return getElementType()->isNullable() ? (GetSize()+7)/8 : 0; } std::size_t GetNullOffset() const { return GetDataSize(); } /** * Append my state to an md5 hash */ void AppendTo(struct md5_state_s * md5) const; /** * Text representation of type. */ std::string toString() const; const FieldType * clone(bool nullable) const; llvm::Type * LLVMGetType(CodeGenerationContext * ctxt) const; }; /** * A variable length array. */ class VariableArrayType : public SequentialType { private: VariableArrayType(DynamicRecordContext& ctxt, const FieldType * elementTy, bool nullable) : SequentialType(ctxt, FieldType::VARIABLE_ARRAY, 0, elementTy, nullable) { } static void AppendTo(const FieldType * element, bool nullable, struct md5_state_s * md5); public: static VariableArrayType * Get(DynamicRecordContext& ctxt, const FieldType * element, bool nullable); ~VariableArrayType(); std::size_t GetAlignment() const { return 8; } std::size_t GetAllocSize() const { return sizeof(Vararray); } /** * Append my state to an md5 hash */ void AppendTo(struct md5_state_s * md5) const; /** * Text representation of type. */ std::string toString() const; const FieldType * clone(bool nullable) const; llvm::Type * LLVMGetType(CodeGenerationContext * ctxt) const; }; class IntervalType : public FieldType { public: enum IntervalUnit { DAY, HOUR, MINUTE, MONTH, SECOND, YEAR }; static const int32_t native_type_size = sizeof(int32_t); private: IntervalUnit mIntervalUnit; IntervalType(DynamicRecordContext& ctxt, bool nullable, IntervalUnit intervalUnit) : FieldType(ctxt, FieldType::INTERVAL, native_type_size, nullable), mIntervalUnit(intervalUnit) { } public: static IntervalType * Get(DynamicRecordContext& ctxt, IntervalUnit intervalUnit, bool nullable = false); ~IntervalType(); /** * What is the unit of the interval. */ IntervalUnit getIntervalUnit() const { return mIntervalUnit; } /** * When added to a date type what is the result type? */ const FieldType * getDateResultType(DynamicRecordContext& ctxt, bool nullable) const; /** * Append my state to an md5 hash */ void AppendTo(struct md5_state_s * md5) const; /** * Text representation of type. */ std::string toString() const; const FieldType * clone(bool nullable) const; }; class NilType : public FieldType { private: NilType(DynamicRecordContext& ctxt) : FieldType(ctxt, FieldType::NIL, 0, true) { } public: static NilType * Get(DynamicRecordContext& ctxt); ~NilType(); /** * Text representation of type. */ std::string toString() const; }; class TaggedFieldAddress { private: FieldAddress mAddress; FieldType::FieldTypeEnum mTag; // If non-zero then this is a fixed array of type mTag uint32_t mSize; // Serialization friend class boost::serialization::access; template <class Archive> void serialize(Archive & ar, const unsigned int version) { ar & BOOST_SERIALIZATION_NVP(mAddress); ar & BOOST_SERIALIZATION_NVP(mTag); ar & BOOST_SERIALIZATION_NVP(mSize); } static void printEscaped(const char * begin, int32_t sz, char escapeChar, std::ostream& ostr); public: TaggedFieldAddress() : mTag(FieldType::INT32), mSize(0) { } TaggedFieldAddress(const FieldAddress& address, FieldType::FieldTypeEnum tag, uint32_t sz=0) : mAddress(address), mTag(tag), mSize(sz) { } void print(RecordBuffer buf, char arrayDelimiter, char escapeChar, std::ostream& ostr) const; }; // class RecordTypePrint // { // private: // std::vector<TaggedFieldAddress> mFields; // char mFieldDelimiter; // char mRecordDelimiter; // char mArrayDelimiter; // char mEscapeChar; // // Serialization // friend class boost::serialization::access; // template <class Archive> // void serialize(Archive & ar, const unsigned int version) // { // ar & BOOST_SERIALIZATION_NVP(mFields); // ar & BOOST_SERIALIZATION_NVP(mFieldDelimiter); // ar & BOOST_SERIALIZATION_NVP(mRecordDelimiter); // ar & BOOST_SERIALIZATION_NVP(mArrayDelimiter); // ar & BOOST_SERIALIZATION_NVP(mEscapeChar); // } // public: // RecordTypePrint(); // RecordTypePrint(const std::vector<TaggedFieldAddress>& fields); // RecordTypePrint(const std::vector<TaggedFieldAddress>& fields, // char fieldDelimter, char recordDelimiter, // char arrayDelimiter, char escapeChar); // RecordTypePrint(const TaggedFieldAddress& field) // : // mFields(1, field), // mFieldDelimiter('\t'), // mRecordDelimiter('\n'), // mArrayDelimiter(','), // mEscapeChar('\\') // { // } // ~RecordTypePrint(); // void imbue(std::ostream& ostr) const; // void print(RecordBuffer buf, std::ostream& ostr, bool emitNewLine=true) const; // }; struct RecordBufferIterator { // Which variable length field (if any) have I descended into. std::size_t offset; uint8_t * ptr; RecordBufferIterator() { clear(); } void init(RecordBuffer buf) { offset = 0; ptr = buf.Ptr; } void clear() { // Make sure this causes the maximum damage! offset = std::numeric_limits<std::size_t>::max(); ptr = NULL; } }; class RecordTypeSerialize { private: std::size_t mSize; std::vector<FieldAddress> mOffsets; // Serialization friend class boost::serialization::access; template <class Archive> void serialize(Archive & ar, const unsigned int version) { ar & BOOST_SERIALIZATION_NVP(mSize); ar & BOOST_SERIALIZATION_NVP(mOffsets); } public: RecordTypeSerialize(); RecordTypeSerialize(std::size_t sz, const std::vector<FieldAddress>& offsets); ~RecordTypeSerialize(); bool doit(uint8_t * & output, uint8_t * outputEnd, RecordBufferIterator & inputPos, RecordBuffer buf) const; std::size_t getRecordLength(RecordBuffer buf) const; }; class RecordTypeDeserialize { private: std::size_t mSize; std::vector<FieldAddress> mOffsets; // Serialization friend class boost::serialization::access; template <class Archive> void serialize(Archive & ar, const unsigned int version) { ar & BOOST_SERIALIZATION_NVP(mSize); ar & BOOST_SERIALIZATION_NVP(mOffsets); } public: RecordTypeDeserialize(); RecordTypeDeserialize(std::size_t sz, const std::vector<FieldAddress>& offsets); ~RecordTypeDeserialize(); bool Do(uint8_t * & input, uint8_t * inputEnd, RecordBufferIterator & outputPos, RecordBuffer buf) const; }; // class RecordTypeFree // { // private: // std::vector<FieldAddress> mOffsets; // std::size_t mSize; // // Serialization // friend class boost::serialization::access; // template <class Archive> // void serialize(Archive & ar, const unsigned int version) // { // ar & BOOST_SERIALIZATION_NVP(mOffsets); // ar & BOOST_SERIALIZATION_NVP(mSize); // } // public: // RecordTypeFree(); // RecordTypeFree(std::size_t sz, const std::vector<FieldAddress>& offsets); // ~RecordTypeFree(); // void free(RecordBuffer & buf) const; // }; class RecordTypeMalloc { private: std::size_t mSize; // Serialization friend class boost::serialization::access; template <class Archive> void serialize(Archive & ar, const unsigned int version) { ar & BOOST_SERIALIZATION_NVP(mSize); } public: RecordTypeMalloc(std::size_t sz=0); ~RecordTypeMalloc(); RecordBuffer malloc() const; }; class RecordMember { private: const FieldType * mType; std::string mName; public: RecordMember(const std::string& name, const FieldType * ty) : mType(ty), mName(name) { BOOST_ASSERT(ty != NULL); } const std::string& GetName() const { return mName; } const FieldType * GetType() const { return mType; } }; /** * Represents the copying of some number of * bits from dword offset to another. * TODO: Templatize on chunk size. */ class BitcpyOp { public: // The Source and Target byte offsets of the dwords // Not really honest FieldAddresses, but we leverage // the existing abstraction FieldAddress.getPointer // to get the location of the bitfield FieldAddress mSourceOffset; FieldAddress mTargetOffset; // Mask of the bits in the source we are copying. uint32_t mSourceBitmask; // Shift of the source bits to target. May be positive // or negative (shift left vs. right); so bit i in the source // bitmask goes to bit i+mShift in the target. int32_t mShift; BitcpyOp(FieldAddress sourceOffset, FieldAddress targetOffset, int32_t shift, uint32_t sourceBitmask) : mSourceOffset(sourceOffset), mTargetOffset(targetOffset), mSourceBitmask(sourceBitmask), mShift(shift) { } bool operator<(const BitcpyOp & rhs) const { return this->mSourceOffset < rhs.mSourceOffset || (this->mSourceOffset == rhs.mSourceOffset && this->mTargetOffset < rhs.mTargetOffset) || (this->mSourceOffset == rhs.mSourceOffset && this->mTargetOffset == rhs.mTargetOffset && this->mShift < rhs.mShift); } /** * N.B. This resorts the input in place. * Essential we group by sourceOffset,targetOffset,shift with aggregate of * the OR of all masks. */ static void coalesce(std::vector<BitcpyOp>& input, std::vector<BitcpyOp>& output); }; class BitsetOp { public: // The Target byte offsets of the dwords // Not really honest FieldAddresses, but we leverage // the existing abstraction FieldAddress.getPointer // to get the location of the bitfield FieldAddress mTargetOffset; // Mask of the bits we are to set uint32_t mTargetBitmask; BitsetOp(FieldAddress targetOffset, uint32_t targetBitmask) : mTargetOffset(targetOffset), mTargetBitmask(targetBitmask) { } bool operator<(const BitsetOp & rhs) const { return this->mTargetOffset < rhs.mTargetOffset; } /** * N.B. This resorts the input in place. * Essentially we group by targetOffset with aggregate of * the OR of all masks. */ static void coalesce(std::vector<BitsetOp>& input, std::vector<BitsetOp>& output); }; class MemcpyOp { public: FieldAddress mSourceOffset; FieldAddress mTargetOffset; size_t mSize; MemcpyOp(const FieldAddress& sourceOffset, const FieldAddress& targetOffset, size_t sz) : mSourceOffset(sourceOffset), mTargetOffset(targetOffset), mSize(sz) { } bool operator<(const MemcpyOp & rhs) const { return this->mSourceOffset < rhs.mSourceOffset; } /** * N.B. This resorts the input in place. */ static void coalesce(std::vector<MemcpyOp>& input, std::vector<MemcpyOp>& output); }; class MemsetOp { public: FieldAddress mSourceOffset; int mValue; size_t mSize; MemsetOp(const FieldAddress& sourceOffset, int value, size_t sz) : mSourceOffset(sourceOffset), mValue(value), mSize(sz) { } bool operator<(const MemsetOp & rhs) const { return this->mSourceOffset < rhs.mSourceOffset; } /** * N.B. This resorts the input in place. */ static void coalesce(std::vector<MemsetOp>& input, std::vector<MemsetOp>& output); }; class RecordType { friend class RecordTypeMove; friend class RecordTypeCopy; private: DynamicRecordContext & mContext; std::vector<RecordMember> mMembers; std::map<std::string, std::size_t> mMemberNames; boost::shared_ptr<RecordTypeMalloc> mMalloc; boost::shared_ptr<RecordTypeFree> mFree; boost::shared_ptr<RecordTypeSerialize> mSerialize; boost::shared_ptr<RecordTypeDeserialize> mDeserialize; boost::shared_ptr<RecordTypePrint> mPrint; std::vector<FieldAddress> mMemberOffsets; // Index to lookup up position of a field by its byte offset std::map<uint32_t, uint32_t> mByteOffsetToPosition; // Are any of our members nullable? // This determines whether we have a NULL bitmask or not. bool mHasNullFields; typedef std::map<std::string, std::size_t>::const_iterator const_member_name_iterator; public: /** * Create a record type with members as described. * Record types allocated by this method are owned * by the ctxt argument. * TODO: Structurally equivalent types should always * return the same pointer? Probably not for our purposes * since names matter. */ static const RecordType * get(DynamicRecordContext & ctxt, const std::vector<RecordMember>& members); template<typename _InputIterator> static const RecordType * get(DynamicRecordContext & ctxt, const RecordType * input, _InputIterator begin, _InputIterator end) { std::set<std::string> ref(begin, end); std::vector<RecordMember> members; for(const_member_iterator m = input->begin_members(), e=input->end_members(); e != m; ++m) { if (ref.find(m->GetName()) != ref.end()) { members.push_back(*m); } } return get(ctxt, members); } RecordType(DynamicRecordContext & ctxt, const std::vector<RecordMember>& members); ~RecordType(); const RecordTypeMalloc * GetMalloc() const; const RecordTypeFree * GetFree() const; const RecordTypeMalloc & getMalloc() const { return *GetMalloc(); } const RecordTypeFree & getFree() const { return *GetFree(); } const RecordTypeSerialize& getSerialize() const { return *mSerialize.get(); } const RecordTypeDeserialize& getDeserialize() const { return *mDeserialize.get(); } const RecordTypePrint& getPrint() const { return *mPrint.get(); } typedef std::vector<RecordMember>::const_iterator const_member_iterator; const_member_iterator begin_members() const { return mMembers.begin(); } const_member_iterator end_members() const { return mMembers.end(); } typedef std::vector<FieldAddress>::const_iterator const_offset_iterator; const_offset_iterator begin_offsets() const { return mMemberOffsets.begin(); } const_offset_iterator end_offsets() const { return mMemberOffsets.end(); } /** * Does this record type have null fields? */ bool hasNullFields() const { return mHasNullFields; } /** * Number of members in the record. */ std::size_t size() const { return mMembers.size(); } /** * hasMember * Returns true if the record have a member of this name. */ bool hasMember(const std::string& memberName) const { return mMemberNames.find(memberName) != mMemberNames.end(); } /** * Get the offset of the member with this name. */ const RecordMember& getMember(const std::string& memberName) const { const_member_name_iterator it = mMemberNames.find(memberName); if (it == mMemberNames.end()) throw std::runtime_error((boost::format("Member with name %1% does not exist") % memberName).str()); return mMembers[it->second]; } /** * Get the offset of the member with this name. */ const FieldAddress& getMemberOffset(const std::string& memberName) const { const_member_name_iterator it = mMemberNames.find(memberName); if (it == mMemberNames.end()) throw std::runtime_error((boost::format("Member with name %1% does not exist") % memberName).str()); return mMemberOffsets[it->second]; } /** * Get a field address object for a member. */ const FieldAddress& getFieldAddress(const std::string& memberName) const { return getMemberOffset(memberName); } // LLVM Instructions to get a pointer to a member of the struct llvm::Value * LLVMMemberGetPointer(const std::string& member, CodeGenerationContext * ctxt, llvm::Value * basePointer, bool populateSymbolTable, const char * symbolTablePrefix = "") const; bool isMemberPointer(llvm::Value * val, llvm::Value * basePointer, FieldAddress& addr) const; llvm::Value * LLVMMemberGetNull(const std::string& member, CodeGenerationContext * ctxt, llvm::Value * basePointer) const; void LLVMMemberSetNull(const std::string& member, CodeGenerationContext * ctxt, llvm::Value * basePointer, bool isNull) const; // TODO: I'd rather not have this const RecordMember & GetMember(int32_t index) const; // Are these types the same. bool operator==(const RecordType & rhs) const; bool operator!=(const RecordType & rhs) const { return !this->operator==(rhs); } DynamicRecordContext & getContext() const { return mContext; } /** * Physical format descriptor of the default text layout associated * with this record type. */ std::string dumpTextFormat() const; void dump() const; // Interpreter methods for getting/setting void setInt8(const std::string& field, int8_t val, RecordBuffer buf) const; void setArrayInt8(const std::string& field, int32_t idx, int8_t val, RecordBuffer buf) const; void setInt16(const std::string& field, int16_t val, RecordBuffer buf) const; void setArrayInt16(const std::string& field, int32_t idx, int16_t val, RecordBuffer buf) const; void setInt32(const std::string& field, int32_t val, RecordBuffer buf) const; void setArrayInt32(const std::string& field, int32_t idx, int32_t val, RecordBuffer buf) const; void setInt64(const std::string& field, int64_t val, RecordBuffer buf) const; void setArrayInt64(const std::string& field, int32_t idx, int64_t val, RecordBuffer buf) const; void setFloat(const std::string& field, float val, RecordBuffer buf) const; void setArrayFloat(const std::string& field, int32_t idx, float val, RecordBuffer buf) const; void setDouble(const std::string& field, double val, RecordBuffer buf) const; void setArrayDouble(const std::string& field, int32_t idx, double val, RecordBuffer buf) const; void setDecimal(const std::string& field, decimal128 & val, RecordBuffer buf) const; void setArrayDecimal(const std::string& field, int32_t idx, decimal128 & val, RecordBuffer buf) const; void setDatetime(const std::string& field, boost::posix_time::ptime val, RecordBuffer buf) const; void setDate(const std::string& field, boost::gregorian::date val, RecordBuffer buf) const; void setIPv4(const std::string& field, boost::asio::ip::address_v4 val, RecordBuffer buffer) const; void setCIDRv4(const std::string& field, CidrV4 val, RecordBuffer buffer) const; void setIPv6(const std::string& field, const boost::asio::ip::address_v6 & val, RecordBuffer buffer) const; void setCIDRv6(const std::string& field, CidrV6 val, RecordBuffer buffer) const; // These have copy semantics void setVarchar(const std::string& field, const char* val, RecordBuffer buf) const; void setArrayVarchar(const std::string& field, int32_t idx, const char * val, RecordBuffer buf) const; void setChar(const std::string& field, const char* val, RecordBuffer buf) const; void setNull(const std::string& field, RecordBuffer buf) const; int8_t getInt8(const std::string& field, RecordBuffer buf) const; int8_t getArrayInt8(const std::string& field, int32_t idx, RecordBuffer buf) const; int16_t getInt16(const std::string& field, RecordBuffer buf) const; int16_t getArrayInt16(const std::string& field, int32_t idx, RecordBuffer buf) const; int32_t getInt32(const std::string& field, RecordBuffer buf) const; int32_t getArrayInt32(const std::string& field, int32_t idx, RecordBuffer buf) const; int64_t getInt64(const std::string& field, RecordBuffer buf) const; int64_t getArrayInt64(const std::string& field, int32_t idx, RecordBuffer buf) const; float getFloat(const std::string& field, RecordBuffer buf) const; float getArrayFloat(const std::string& field, int32_t idx, RecordBuffer buf) const; double getDouble(const std::string& field, RecordBuffer buf) const; double getArrayDouble(const std::string& field, int32_t idx, RecordBuffer buf) const; Varchar * getVarcharPtr(const std::string& field, RecordBuffer buf) const; Varchar * getArrayVarcharPtr(const std::string& field, int32_t idx, RecordBuffer buf) const; boost::asio::ip::address_v4 getIPv4(const std::string& field, RecordBuffer buffer) const; CidrV4 getCIDRv4(const std::string& field, RecordBuffer buffer) const; boost::asio::ip::address_v6 getIPv6(const std::string& field, RecordBuffer buffer) const; CidrV6 getCIDRv6(const std::string& field, RecordBuffer buffer) const; bool isArrayNull(const std::string& field, int32_t idx, RecordBuffer buf) const; }; class IQLRecordTypeBuilder { private: DynamicRecordContext & mContext; std::vector<RecordMember> mMembers; public: IQLRecordTypeBuilder(DynamicRecordContext& ctxt, const std::string& spec, bool isFile); ~IQLRecordTypeBuilder(); void buildField(const char * name, const FieldType * ty) { mMembers.push_back(RecordMember(name, ty)); } const RecordType * getProduct() const { return RecordType::get(mContext, mMembers); } }; class RecordTypeMove { private: std::vector<MemcpyOp> mMemcpy; std::vector<MemsetOp> mMemset; public: // TODO: Handle cases with renaming. RecordTypeMove(const RecordType * source, const RecordType * target); /** * Implement move as a sequence of memcpy's (and later bitcpy for null). * The source must also be modified to remove references if needed. */ const std::vector<MemcpyOp> & getMemcpy() const { return mMemcpy; } const std::vector<MemsetOp> & getMemset() const { return mMemset; } // Move the contents of source to target. The references // to source is cleared and all resources for source are either // reassigned or freed. void Execute(RecordBuffer& source, RecordBuffer target); }; class RecordTypeCopy { public: typedef std::vector<std::pair<RecordMember, int> > set_type; private: std::vector<MemcpyOp> mMemcpy; // Bits that must be copied from source to target std::vector<BitcpyOp> mBitcpy; // Bits that must be set in the target std::vector<BitsetOp> mBitset; set_type mSet; public: // TODO: Handle cases with renaming. RecordTypeCopy(const RecordType * source, const RecordType * target, const std::string& sourceColumnsRegex, int * pos); RecordTypeCopy(const RecordType * source, const RecordType * target, const std::string& sourceColumnsRegex, const std::string& targetColumnsPattern, int * pos); /** * A coalesced sequence of memcpy for copying POD fields. */ const std::vector<MemcpyOp> & getMemcpy() const { return mMemcpy; } /** * A coalesced sequence of bitcpy for copying POD fields. */ const std::vector<BitcpyOp> & getBitcpy() const { return mBitcpy; } /** * A coalesced sequence of bitset for copying POD fields. */ const std::vector<BitsetOp> & getBitset() const { return mBitset; } /** * Fields that have to be moved explicitly. */ const set_type& getSet() const { return mSet; } }; #endif
27.892623
128
0.677475
[ "object", "vector" ]
24139f2b9ee1659b86ed7e784811061dd129675d
23,073
cpp
C++
bolt/lib/Passes/LongJmp.cpp
ornata/llvm-project
494913b8b4e4bce0b3525e5569d8e486e82b9a52
[ "Apache-2.0" ]
null
null
null
bolt/lib/Passes/LongJmp.cpp
ornata/llvm-project
494913b8b4e4bce0b3525e5569d8e486e82b9a52
[ "Apache-2.0" ]
null
null
null
bolt/lib/Passes/LongJmp.cpp
ornata/llvm-project
494913b8b4e4bce0b3525e5569d8e486e82b9a52
[ "Apache-2.0" ]
null
null
null
//===- bolt/Passes/LongJmp.cpp --------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements the LongJmpPass class. // //===----------------------------------------------------------------------===// #include "bolt/Passes/LongJmp.h" #define DEBUG_TYPE "longjmp" using namespace llvm; namespace opts { extern cl::OptionCategory BoltOptCategory; extern llvm::cl::opt<unsigned> AlignText; extern cl::opt<unsigned> AlignFunctions; extern cl::opt<bool> UseOldText; extern cl::opt<bool> HotFunctionsAtEnd; static cl::opt<bool> GroupStubs("group-stubs", cl::desc("share stubs across functions"), cl::init(true), cl::ZeroOrMore, cl::cat(BoltOptCategory)); } namespace llvm { namespace bolt { namespace { constexpr unsigned ColdFragAlign = 16; void relaxStubToShortJmp(BinaryBasicBlock &StubBB, const MCSymbol *Tgt) { const BinaryContext &BC = StubBB.getFunction()->getBinaryContext(); InstructionListType Seq; BC.MIB->createShortJmp(Seq, Tgt, BC.Ctx.get()); StubBB.clear(); StubBB.addInstructions(Seq.begin(), Seq.end()); } void relaxStubToLongJmp(BinaryBasicBlock &StubBB, const MCSymbol *Tgt) { const BinaryContext &BC = StubBB.getFunction()->getBinaryContext(); InstructionListType Seq; BC.MIB->createLongJmp(Seq, Tgt, BC.Ctx.get()); StubBB.clear(); StubBB.addInstructions(Seq.begin(), Seq.end()); } BinaryBasicBlock *getBBAtHotColdSplitPoint(BinaryFunction &Func) { if (!Func.isSplit() || Func.empty()) return nullptr; assert(!(*Func.begin()).isCold() && "Entry cannot be cold"); for (auto I = Func.layout_begin(), E = Func.layout_end(); I != E; ++I) { auto Next = std::next(I); if (Next != E && (*Next)->isCold()) return *I; } llvm_unreachable("No hot-colt split point found"); } bool shouldInsertStub(const BinaryContext &BC, const MCInst &Inst) { return (BC.MIB->isBranch(Inst) || BC.MIB->isCall(Inst)) && !BC.MIB->isIndirectBranch(Inst) && !BC.MIB->isIndirectCall(Inst); } } // end anonymous namespace std::pair<std::unique_ptr<BinaryBasicBlock>, MCSymbol *> LongJmpPass::createNewStub(BinaryBasicBlock &SourceBB, const MCSymbol *TgtSym, bool TgtIsFunc, uint64_t AtAddress) { BinaryFunction &Func = *SourceBB.getFunction(); const BinaryContext &BC = Func.getBinaryContext(); const bool IsCold = SourceBB.isCold(); MCSymbol *StubSym = BC.Ctx->createNamedTempSymbol("Stub"); std::unique_ptr<BinaryBasicBlock> StubBB = Func.createBasicBlock(0, StubSym); MCInst Inst; BC.MIB->createUncondBranch(Inst, TgtSym, BC.Ctx.get()); if (TgtIsFunc) BC.MIB->convertJmpToTailCall(Inst); StubBB->addInstruction(Inst); StubBB->setExecutionCount(0); // Register this in stubs maps auto registerInMap = [&](StubGroupsTy &Map) { StubGroupTy &StubGroup = Map[TgtSym]; StubGroup.insert( std::lower_bound( StubGroup.begin(), StubGroup.end(), std::make_pair(AtAddress, nullptr), [&](const std::pair<uint64_t, BinaryBasicBlock *> &LHS, const std::pair<uint64_t, BinaryBasicBlock *> &RHS) { return LHS.first < RHS.first; }), std::make_pair(AtAddress, StubBB.get())); }; Stubs[&Func].insert(StubBB.get()); StubBits[StubBB.get()] = BC.MIB->getUncondBranchEncodingSize(); if (IsCold) { registerInMap(ColdLocalStubs[&Func]); if (opts::GroupStubs && TgtIsFunc) registerInMap(ColdStubGroups); ++NumColdStubs; } else { registerInMap(HotLocalStubs[&Func]); if (opts::GroupStubs && TgtIsFunc) registerInMap(HotStubGroups); ++NumHotStubs; } return std::make_pair(std::move(StubBB), StubSym); } BinaryBasicBlock *LongJmpPass::lookupStubFromGroup( const StubGroupsTy &StubGroups, const BinaryFunction &Func, const MCInst &Inst, const MCSymbol *TgtSym, uint64_t DotAddress) const { const BinaryContext &BC = Func.getBinaryContext(); auto CandidatesIter = StubGroups.find(TgtSym); if (CandidatesIter == StubGroups.end()) return nullptr; const StubGroupTy &Candidates = CandidatesIter->second; if (Candidates.empty()) return nullptr; auto Cand = std::lower_bound( Candidates.begin(), Candidates.end(), std::make_pair(DotAddress, nullptr), [&](const std::pair<uint64_t, BinaryBasicBlock *> &LHS, const std::pair<uint64_t, BinaryBasicBlock *> &RHS) { return LHS.first < RHS.first; }); if (Cand == Candidates.end()) return nullptr; if (Cand != Candidates.begin()) { const StubTy *LeftCand = std::prev(Cand); if (Cand->first - DotAddress > DotAddress - LeftCand->first) Cand = LeftCand; } int BitsAvail = BC.MIB->getPCRelEncodingSize(Inst) - 1; uint64_t Mask = ~((1ULL << BitsAvail) - 1); uint64_t PCRelTgtAddress = Cand->first; PCRelTgtAddress = DotAddress > PCRelTgtAddress ? DotAddress - PCRelTgtAddress : PCRelTgtAddress - DotAddress; LLVM_DEBUG({ if (Candidates.size() > 1) dbgs() << "Considering stub group with " << Candidates.size() << " candidates. DotAddress is " << Twine::utohexstr(DotAddress) << ", chosen candidate address is " << Twine::utohexstr(Cand->first) << "\n"; }); return PCRelTgtAddress & Mask ? nullptr : Cand->second; } BinaryBasicBlock * LongJmpPass::lookupGlobalStub(const BinaryBasicBlock &SourceBB, const MCInst &Inst, const MCSymbol *TgtSym, uint64_t DotAddress) const { const BinaryFunction &Func = *SourceBB.getFunction(); const StubGroupsTy &StubGroups = SourceBB.isCold() ? ColdStubGroups : HotStubGroups; return lookupStubFromGroup(StubGroups, Func, Inst, TgtSym, DotAddress); } BinaryBasicBlock *LongJmpPass::lookupLocalStub(const BinaryBasicBlock &SourceBB, const MCInst &Inst, const MCSymbol *TgtSym, uint64_t DotAddress) const { const BinaryFunction &Func = *SourceBB.getFunction(); const DenseMap<const BinaryFunction *, StubGroupsTy> &StubGroups = SourceBB.isCold() ? ColdLocalStubs : HotLocalStubs; const auto Iter = StubGroups.find(&Func); if (Iter == StubGroups.end()) return nullptr; return lookupStubFromGroup(Iter->second, Func, Inst, TgtSym, DotAddress); } std::unique_ptr<BinaryBasicBlock> LongJmpPass::replaceTargetWithStub(BinaryBasicBlock &BB, MCInst &Inst, uint64_t DotAddress, uint64_t StubCreationAddress) { const BinaryFunction &Func = *BB.getFunction(); const BinaryContext &BC = Func.getBinaryContext(); std::unique_ptr<BinaryBasicBlock> NewBB; const MCSymbol *TgtSym = BC.MIB->getTargetSymbol(Inst); assert(TgtSym && "getTargetSymbol failed"); BinaryBasicBlock::BinaryBranchInfo BI{0, 0}; BinaryBasicBlock *TgtBB = BB.getSuccessor(TgtSym, BI); auto LocalStubsIter = Stubs.find(&Func); // If already using stub and the stub is from another function, create a local // stub, since the foreign stub is now out of range if (!TgtBB) { auto SSIter = SharedStubs.find(TgtSym); if (SSIter != SharedStubs.end()) { TgtSym = BC.MIB->getTargetSymbol(*SSIter->second->begin()); --NumSharedStubs; } } else if (LocalStubsIter != Stubs.end() && LocalStubsIter->second.count(TgtBB)) { // If we are replacing a local stub (because it is now out of range), // use its target instead of creating a stub to jump to another stub TgtSym = BC.MIB->getTargetSymbol(*TgtBB->begin()); TgtBB = BB.getSuccessor(TgtSym, BI); } BinaryBasicBlock *StubBB = lookupLocalStub(BB, Inst, TgtSym, DotAddress); // If not found, look it up in globally shared stub maps if it is a function // call (TgtBB is not set) if (!StubBB && !TgtBB) { StubBB = lookupGlobalStub(BB, Inst, TgtSym, DotAddress); if (StubBB) { SharedStubs[StubBB->getLabel()] = StubBB; ++NumSharedStubs; } } MCSymbol *StubSymbol = StubBB ? StubBB->getLabel() : nullptr; if (!StubBB) { std::tie(NewBB, StubSymbol) = createNewStub(BB, TgtSym, /*is func?*/ !TgtBB, StubCreationAddress); StubBB = NewBB.get(); } // Local branch if (TgtBB) { uint64_t OrigCount = BI.Count; uint64_t OrigMispreds = BI.MispredictedCount; BB.replaceSuccessor(TgtBB, StubBB, OrigCount, OrigMispreds); StubBB->setExecutionCount(StubBB->getExecutionCount() + OrigCount); if (NewBB) { StubBB->addSuccessor(TgtBB, OrigCount, OrigMispreds); StubBB->setIsCold(BB.isCold()); } // Call / tail call } else { StubBB->setExecutionCount(StubBB->getExecutionCount() + BB.getExecutionCount()); if (NewBB) { assert(TgtBB == nullptr); StubBB->setIsCold(BB.isCold()); // Set as entry point because this block is valid but we have no preds StubBB->getFunction()->addEntryPoint(*StubBB); } } BC.MIB->replaceBranchTarget(Inst, StubSymbol, BC.Ctx.get()); return NewBB; } void LongJmpPass::updateStubGroups() { auto update = [&](StubGroupsTy &StubGroups) { for (auto &KeyVal : StubGroups) { for (StubTy &Elem : KeyVal.second) Elem.first = BBAddresses[Elem.second]; std::sort(KeyVal.second.begin(), KeyVal.second.end(), [&](const std::pair<uint64_t, BinaryBasicBlock *> &LHS, const std::pair<uint64_t, BinaryBasicBlock *> &RHS) { return LHS.first < RHS.first; }); } }; for (auto &KeyVal : HotLocalStubs) update(KeyVal.second); for (auto &KeyVal : ColdLocalStubs) update(KeyVal.second); update(HotStubGroups); update(ColdStubGroups); } void LongJmpPass::tentativeBBLayout(const BinaryFunction &Func) { const BinaryContext &BC = Func.getBinaryContext(); uint64_t HotDot = HotAddresses[&Func]; uint64_t ColdDot = ColdAddresses[&Func]; bool Cold = false; for (BinaryBasicBlock *BB : Func.layout()) { if (Cold || BB->isCold()) { Cold = true; BBAddresses[BB] = ColdDot; ColdDot += BC.computeCodeSize(BB->begin(), BB->end()); } else { BBAddresses[BB] = HotDot; HotDot += BC.computeCodeSize(BB->begin(), BB->end()); } } } uint64_t LongJmpPass::tentativeLayoutRelocColdPart( const BinaryContext &BC, std::vector<BinaryFunction *> &SortedFunctions, uint64_t DotAddress) { DotAddress = alignTo(DotAddress, llvm::Align(opts::AlignFunctions)); for (BinaryFunction *Func : SortedFunctions) { if (!Func->isSplit()) continue; DotAddress = alignTo(DotAddress, BinaryFunction::MinAlign); uint64_t Pad = offsetToAlignment(DotAddress, llvm::Align(Func->getAlignment())); if (Pad <= Func->getMaxColdAlignmentBytes()) DotAddress += Pad; ColdAddresses[Func] = DotAddress; LLVM_DEBUG(dbgs() << Func->getPrintName() << " cold tentative: " << Twine::utohexstr(DotAddress) << "\n"); DotAddress += Func->estimateColdSize(); DotAddress = alignTo(DotAddress, Func->getConstantIslandAlignment()); DotAddress += Func->estimateConstantIslandSize(); } return DotAddress; } uint64_t LongJmpPass::tentativeLayoutRelocMode( const BinaryContext &BC, std::vector<BinaryFunction *> &SortedFunctions, uint64_t DotAddress) { // Compute hot cold frontier uint32_t LastHotIndex = -1u; uint32_t CurrentIndex = 0; if (opts::HotFunctionsAtEnd) { for (BinaryFunction *BF : SortedFunctions) { if (BF->hasValidIndex()) { LastHotIndex = CurrentIndex; break; } ++CurrentIndex; } } else { for (BinaryFunction *BF : SortedFunctions) { if (!BF->hasValidIndex()) { LastHotIndex = CurrentIndex; break; } ++CurrentIndex; } } // Hot CurrentIndex = 0; bool ColdLayoutDone = false; for (BinaryFunction *Func : SortedFunctions) { if (!BC.shouldEmit(*Func)) { HotAddresses[Func] = Func->getAddress(); continue; } if (!ColdLayoutDone && CurrentIndex >= LastHotIndex) { DotAddress = tentativeLayoutRelocColdPart(BC, SortedFunctions, DotAddress); ColdLayoutDone = true; if (opts::HotFunctionsAtEnd) DotAddress = alignTo(DotAddress, opts::AlignText); } DotAddress = alignTo(DotAddress, BinaryFunction::MinAlign); uint64_t Pad = offsetToAlignment(DotAddress, llvm::Align(Func->getAlignment())); if (Pad <= Func->getMaxAlignmentBytes()) DotAddress += Pad; HotAddresses[Func] = DotAddress; LLVM_DEBUG(dbgs() << Func->getPrintName() << " tentative: " << Twine::utohexstr(DotAddress) << "\n"); if (!Func->isSplit()) DotAddress += Func->estimateSize(); else DotAddress += Func->estimateHotSize(); DotAddress = alignTo(DotAddress, Func->getConstantIslandAlignment()); DotAddress += Func->estimateConstantIslandSize(); ++CurrentIndex; } // BBs for (BinaryFunction *Func : SortedFunctions) tentativeBBLayout(*Func); return DotAddress; } void LongJmpPass::tentativeLayout( const BinaryContext &BC, std::vector<BinaryFunction *> &SortedFunctions) { uint64_t DotAddress = BC.LayoutStartAddress; if (!BC.HasRelocations) { for (BinaryFunction *Func : SortedFunctions) { HotAddresses[Func] = Func->getAddress(); DotAddress = alignTo(DotAddress, ColdFragAlign); ColdAddresses[Func] = DotAddress; if (Func->isSplit()) DotAddress += Func->estimateColdSize(); tentativeBBLayout(*Func); } return; } // Relocation mode uint64_t EstimatedTextSize = 0; if (opts::UseOldText) { EstimatedTextSize = tentativeLayoutRelocMode(BC, SortedFunctions, 0); // Initial padding if (EstimatedTextSize <= BC.OldTextSectionSize) { DotAddress = BC.OldTextSectionAddress; uint64_t Pad = offsetToAlignment(DotAddress, llvm::Align(opts::AlignText)); if (Pad + EstimatedTextSize <= BC.OldTextSectionSize) { DotAddress += Pad; } } } if (!EstimatedTextSize || EstimatedTextSize > BC.OldTextSectionSize) DotAddress = alignTo(BC.LayoutStartAddress, opts::AlignText); tentativeLayoutRelocMode(BC, SortedFunctions, DotAddress); } bool LongJmpPass::usesStub(const BinaryFunction &Func, const MCInst &Inst) const { const MCSymbol *TgtSym = Func.getBinaryContext().MIB->getTargetSymbol(Inst); const BinaryBasicBlock *TgtBB = Func.getBasicBlockForLabel(TgtSym); auto Iter = Stubs.find(&Func); if (Iter != Stubs.end()) return Iter->second.count(TgtBB); return false; } uint64_t LongJmpPass::getSymbolAddress(const BinaryContext &BC, const MCSymbol *Target, const BinaryBasicBlock *TgtBB) const { if (TgtBB) { auto Iter = BBAddresses.find(TgtBB); assert(Iter != BBAddresses.end() && "Unrecognized BB"); return Iter->second; } uint64_t EntryID = 0; const BinaryFunction *TargetFunc = BC.getFunctionForSymbol(Target, &EntryID); auto Iter = HotAddresses.find(TargetFunc); if (Iter == HotAddresses.end() || (TargetFunc && EntryID)) { // Look at BinaryContext's resolution for this symbol - this is a symbol not // mapped to a BinaryFunction ErrorOr<uint64_t> ValueOrError = BC.getSymbolValue(*Target); assert(ValueOrError && "Unrecognized symbol"); return *ValueOrError; } return Iter->second; } bool LongJmpPass::relaxStub(BinaryBasicBlock &StubBB) { const BinaryFunction &Func = *StubBB.getFunction(); const BinaryContext &BC = Func.getBinaryContext(); const int Bits = StubBits[&StubBB]; // Already working with the largest range? if (Bits == static_cast<int>(BC.AsmInfo->getCodePointerSize() * 8)) return false; const static int RangeShortJmp = BC.MIB->getShortJmpEncodingSize(); const static int RangeSingleInstr = BC.MIB->getUncondBranchEncodingSize(); const static uint64_t ShortJmpMask = ~((1ULL << RangeShortJmp) - 1); const static uint64_t SingleInstrMask = ~((1ULL << (RangeSingleInstr - 1)) - 1); const MCSymbol *RealTargetSym = BC.MIB->getTargetSymbol(*StubBB.begin()); const BinaryBasicBlock *TgtBB = Func.getBasicBlockForLabel(RealTargetSym); uint64_t TgtAddress = getSymbolAddress(BC, RealTargetSym, TgtBB); uint64_t DotAddress = BBAddresses[&StubBB]; uint64_t PCRelTgtAddress = DotAddress > TgtAddress ? DotAddress - TgtAddress : TgtAddress - DotAddress; // If it fits in one instruction, do not relax if (!(PCRelTgtAddress & SingleInstrMask)) return false; // Fits short jmp if (!(PCRelTgtAddress & ShortJmpMask)) { if (Bits >= RangeShortJmp) return false; LLVM_DEBUG(dbgs() << "Relaxing stub to short jump. PCRelTgtAddress = " << Twine::utohexstr(PCRelTgtAddress) << " RealTargetSym = " << RealTargetSym->getName() << "\n"); relaxStubToShortJmp(StubBB, RealTargetSym); StubBits[&StubBB] = RangeShortJmp; return true; } // The long jmp uses absolute address on AArch64 // So we could not use it for PIC binaries if (BC.isAArch64() && !BC.HasFixedLoadAddress) { errs() << "BOLT-ERROR: Unable to relax stub for PIC binary\n"; exit(1); } LLVM_DEBUG(dbgs() << "Relaxing stub to long jump. PCRelTgtAddress = " << Twine::utohexstr(PCRelTgtAddress) << " RealTargetSym = " << RealTargetSym->getName() << "\n"); relaxStubToLongJmp(StubBB, RealTargetSym); StubBits[&StubBB] = static_cast<int>(BC.AsmInfo->getCodePointerSize() * 8); return true; } bool LongJmpPass::needsStub(const BinaryBasicBlock &BB, const MCInst &Inst, uint64_t DotAddress) const { const BinaryFunction &Func = *BB.getFunction(); const BinaryContext &BC = Func.getBinaryContext(); const MCSymbol *TgtSym = BC.MIB->getTargetSymbol(Inst); assert(TgtSym && "getTargetSymbol failed"); const BinaryBasicBlock *TgtBB = Func.getBasicBlockForLabel(TgtSym); // Check for shared stubs from foreign functions if (!TgtBB) { auto SSIter = SharedStubs.find(TgtSym); if (SSIter != SharedStubs.end()) TgtBB = SSIter->second; } int BitsAvail = BC.MIB->getPCRelEncodingSize(Inst) - 1; uint64_t Mask = ~((1ULL << BitsAvail) - 1); uint64_t PCRelTgtAddress = getSymbolAddress(BC, TgtSym, TgtBB); PCRelTgtAddress = DotAddress > PCRelTgtAddress ? DotAddress - PCRelTgtAddress : PCRelTgtAddress - DotAddress; return PCRelTgtAddress & Mask; } bool LongJmpPass::relax(BinaryFunction &Func) { const BinaryContext &BC = Func.getBinaryContext(); bool Modified = false; assert(BC.isAArch64() && "Unsupported arch"); constexpr int InsnSize = 4; // AArch64 std::vector<std::pair<BinaryBasicBlock *, std::unique_ptr<BinaryBasicBlock>>> Insertions; BinaryBasicBlock *Frontier = getBBAtHotColdSplitPoint(Func); uint64_t FrontierAddress = Frontier ? BBAddresses[Frontier] : 0; if (FrontierAddress) FrontierAddress += Frontier->getNumNonPseudos() * InsnSize; // Add necessary stubs for branch targets we know we can't fit in the // instruction for (BinaryBasicBlock &BB : Func) { uint64_t DotAddress = BBAddresses[&BB]; // Stubs themselves are relaxed on the next loop if (Stubs[&Func].count(&BB)) continue; for (MCInst &Inst : BB) { if (BC.MIB->isPseudo(Inst)) continue; if (!shouldInsertStub(BC, Inst)) { DotAddress += InsnSize; continue; } // Check and relax direct branch or call if (!needsStub(BB, Inst, DotAddress)) { DotAddress += InsnSize; continue; } Modified = true; // Insert stubs close to the patched BB if call, but far away from the // hot path if a branch, since this branch target is the cold region // (but first check that the far away stub will be in range). BinaryBasicBlock *InsertionPoint = &BB; if (Func.isSimple() && !BC.MIB->isCall(Inst) && FrontierAddress && !BB.isCold()) { int BitsAvail = BC.MIB->getPCRelEncodingSize(Inst) - 1; uint64_t Mask = ~((1ULL << BitsAvail) - 1); assert(FrontierAddress > DotAddress && "Hot code should be before the frontier"); uint64_t PCRelTgt = FrontierAddress - DotAddress; if (!(PCRelTgt & Mask)) InsertionPoint = Frontier; } // Always put stubs at the end of the function if non-simple. We can't // change the layout of non-simple functions because it has jump tables // that we do not control. if (!Func.isSimple()) InsertionPoint = &*std::prev(Func.end()); // Create a stub to handle a far-away target Insertions.emplace_back(InsertionPoint, replaceTargetWithStub(BB, Inst, DotAddress, InsertionPoint == Frontier ? FrontierAddress : DotAddress)); DotAddress += InsnSize; } } // Relax stubs if necessary for (BinaryBasicBlock &BB : Func) { if (!Stubs[&Func].count(&BB) || !BB.isValid()) continue; Modified |= relaxStub(BB); } for (std::pair<BinaryBasicBlock *, std::unique_ptr<BinaryBasicBlock>> &Elmt : Insertions) { if (!Elmt.second) continue; std::vector<std::unique_ptr<BinaryBasicBlock>> NewBBs; NewBBs.emplace_back(std::move(Elmt.second)); Func.insertBasicBlocks(Elmt.first, std::move(NewBBs), true); } return Modified; } void LongJmpPass::runOnFunctions(BinaryContext &BC) { outs() << "BOLT-INFO: Starting stub-insertion pass\n"; std::vector<BinaryFunction *> Sorted = BC.getSortedFunctions(); bool Modified; uint32_t Iterations = 0; do { ++Iterations; Modified = false; tentativeLayout(BC, Sorted); updateStubGroups(); for (BinaryFunction *Func : Sorted) { if (relax(*Func)) { // Don't ruin non-simple functions, they can't afford to have the layout // changed. if (Func->isSimple()) Func->fixBranches(); Modified = true; } } } while (Modified); outs() << "BOLT-INFO: Inserted " << NumHotStubs << " stubs in the hot area and " << NumColdStubs << " stubs in the cold area. Shared " << NumSharedStubs << " times, iterated " << Iterations << " times.\n"; } } // namespace bolt } // namespace llvm
35.551618
80
0.642959
[ "vector" ]
2413fbded1fc4bb798539f04f7315327c0721649
37,532
cpp
C++
scene/2d/physics_body_2d.cpp
dragonsoulz/godot
6b106b319b0b4c9436a992bf26c88d93180905e9
[ "MIT" ]
null
null
null
scene/2d/physics_body_2d.cpp
dragonsoulz/godot
6b106b319b0b4c9436a992bf26c88d93180905e9
[ "MIT" ]
null
null
null
scene/2d/physics_body_2d.cpp
dragonsoulz/godot
6b106b319b0b4c9436a992bf26c88d93180905e9
[ "MIT" ]
null
null
null
/*************************************************************************/ /* physics_body_2d.cpp */ /*************************************************************************/ /* This file is part of: */ /* GODOT ENGINE */ /* http://www.godotengine.org */ /*************************************************************************/ /* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */ /* */ /* Permission is hereby granted, free of charge, to any person obtaining */ /* a copy of this software and associated documentation files (the */ /* "Software"), to deal in the Software without restriction, including */ /* without limitation the rights to use, copy, modify, merge, publish, */ /* distribute, sublicense, and/or sell copies of the Software, and to */ /* permit persons to whom the Software is furnished to do so, subject to */ /* the following conditions: */ /* */ /* The above copyright notice and this permission notice shall be */ /* included in all copies or substantial portions of the Software. */ /* */ /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /*************************************************************************/ #include "physics_body_2d.h" #include "scene/scene_string_names.h" void PhysicsBody2D::_notification(int p_what) { /* switch(p_what) { case NOTIFICATION_TRANSFORM_CHANGED: { Physics2DServer::get_singleton()->body_set_state(get_rid(),Physics2DServer::BODY_STATE_TRANSFORM,get_global_transform()); } break; } */ } void PhysicsBody2D::_bind_methods() { ObjectTypeDB::bind_method(_MD("set_layer_mask","mask"),&PhysicsBody2D::set_layer_mask); ObjectTypeDB::bind_method(_MD("get_layer_mask"),&PhysicsBody2D::get_layer_mask); ObjectTypeDB::bind_method(_MD("add_collision_exception_with","body:PhysicsBody2D"),&PhysicsBody2D::add_collision_exception_with); ObjectTypeDB::bind_method(_MD("remove_collision_exception_with","body:PhysicsBody2D"),&PhysicsBody2D::remove_collision_exception_with); ADD_PROPERTY(PropertyInfo(Variant::INT,"layers",PROPERTY_HINT_ALL_FLAGS),_SCS("set_layer_mask"),_SCS("get_layer_mask")); } void PhysicsBody2D::set_layer_mask(uint32_t p_mask) { mask=p_mask; Physics2DServer::get_singleton()->body_set_layer_mask(get_rid(),p_mask); } uint32_t PhysicsBody2D::get_layer_mask() const { return mask; } PhysicsBody2D::PhysicsBody2D(Physics2DServer::BodyMode p_mode) : CollisionObject2D( Physics2DServer::get_singleton()->body_create(p_mode), false) { mask=1; } void PhysicsBody2D::add_collision_exception_with(Node* p_node) { ERR_FAIL_NULL(p_node); PhysicsBody2D *physics_body = p_node->cast_to<PhysicsBody2D>(); if (!physics_body) { ERR_EXPLAIN("Collision exception only works between two objects of PhysicsBody type"); } ERR_FAIL_COND(!physics_body); Physics2DServer::get_singleton()->body_add_collision_exception(get_rid(),physics_body->get_rid()); } void PhysicsBody2D::remove_collision_exception_with(Node* p_node) { ERR_FAIL_NULL(p_node); PhysicsBody2D *physics_body = p_node->cast_to<PhysicsBody2D>(); if (!physics_body) { ERR_EXPLAIN("Collision exception only works between two objects of PhysicsBody type"); } ERR_FAIL_COND(!physics_body); Physics2DServer::get_singleton()->body_remove_collision_exception(get_rid(),physics_body->get_rid()); } void StaticBody2D::set_constant_linear_velocity(const Vector2& p_vel) { constant_linear_velocity=p_vel; Physics2DServer::get_singleton()->body_set_state(get_rid(),Physics2DServer::BODY_STATE_LINEAR_VELOCITY,constant_linear_velocity); } void StaticBody2D::set_constant_angular_velocity(real_t p_vel) { constant_angular_velocity=p_vel; Physics2DServer::get_singleton()->body_set_state(get_rid(),Physics2DServer::BODY_STATE_ANGULAR_VELOCITY,constant_angular_velocity); } Vector2 StaticBody2D::get_constant_linear_velocity() const { return constant_linear_velocity; } real_t StaticBody2D::get_constant_angular_velocity() const { return constant_angular_velocity; } #if 0 void StaticBody2D::_update_xform() { if (!pre_xform || !pending) return; setting=true; Matrix32 new_xform = get_global_transform(); //obtain the new one set_block_transform_notify(true); Physics2DServer::get_singleton()->body_set_state(get_rid(),Physics2DServer::BODY_STATE_TRANSFORM,*pre_xform); //then simulate motion! set_global_transform(*pre_xform); //but restore state to previous one in both visual and physics set_block_transform_notify(false); Physics2DServer::get_singleton()->body_static_simulate_motion(get_rid(),new_xform); //then simulate motion! setting=false; pending=false; } #endif void StaticBody2D::set_friction(real_t p_friction){ ERR_FAIL_COND(p_friction<0 || p_friction>1); friction=p_friction; Physics2DServer::get_singleton()->body_set_param(get_rid(),Physics2DServer::BODY_PARAM_FRICTION,friction); } real_t StaticBody2D::get_friction() const{ return friction; } void StaticBody2D::set_bounce(real_t p_bounce){ ERR_FAIL_COND(p_bounce<0 || p_bounce>1); bounce=p_bounce; Physics2DServer::get_singleton()->body_set_param(get_rid(),Physics2DServer::BODY_PARAM_BOUNCE,bounce); } real_t StaticBody2D::get_bounce() const{ return bounce; } void StaticBody2D::_bind_methods() { ObjectTypeDB::bind_method(_MD("set_constant_linear_velocity","vel"),&StaticBody2D::set_constant_linear_velocity); ObjectTypeDB::bind_method(_MD("set_constant_angular_velocity","vel"),&StaticBody2D::set_constant_angular_velocity); ObjectTypeDB::bind_method(_MD("get_constant_linear_velocity"),&StaticBody2D::get_constant_linear_velocity); ObjectTypeDB::bind_method(_MD("get_constant_angular_velocity"),&StaticBody2D::get_constant_angular_velocity); ObjectTypeDB::bind_method(_MD("set_friction","friction"),&StaticBody2D::set_friction); ObjectTypeDB::bind_method(_MD("get_friction"),&StaticBody2D::get_friction); ObjectTypeDB::bind_method(_MD("set_bounce","bounce"),&StaticBody2D::set_bounce); ObjectTypeDB::bind_method(_MD("get_bounce"),&StaticBody2D::get_bounce); ADD_PROPERTY(PropertyInfo(Variant::VECTOR2,"constant_linear_velocity"),_SCS("set_constant_linear_velocity"),_SCS("get_constant_linear_velocity")); ADD_PROPERTY(PropertyInfo(Variant::REAL,"constant_angular_velocity"),_SCS("set_constant_angular_velocity"),_SCS("get_constant_angular_velocity")); ADD_PROPERTY( PropertyInfo(Variant::REAL,"friction",PROPERTY_HINT_RANGE,"0,1,0.01"),_SCS("set_friction"),_SCS("get_friction")); ADD_PROPERTY( PropertyInfo(Variant::REAL,"bounce",PROPERTY_HINT_RANGE,"0,1,0.01"),_SCS("set_bounce"),_SCS("get_bounce")); } StaticBody2D::StaticBody2D() : PhysicsBody2D(Physics2DServer::BODY_MODE_STATIC) { constant_angular_velocity=0; bounce=0; friction=1; } StaticBody2D::~StaticBody2D() { } void RigidBody2D::_body_enter_tree(ObjectID p_id) { Object *obj = ObjectDB::get_instance(p_id); Node *node = obj ? obj->cast_to<Node>() : NULL; ERR_FAIL_COND(!node); Map<ObjectID,BodyState>::Element *E=contact_monitor->body_map.find(p_id); ERR_FAIL_COND(!E); ERR_FAIL_COND(E->get().in_scene); E->get().in_scene=true; emit_signal(SceneStringNames::get_singleton()->body_enter,node); for(int i=0;i<E->get().shapes.size();i++) { emit_signal(SceneStringNames::get_singleton()->body_enter_shape,p_id,node,E->get().shapes[i].body_shape,E->get().shapes[i].local_shape); } } void RigidBody2D::_body_exit_tree(ObjectID p_id) { Object *obj = ObjectDB::get_instance(p_id); Node *node = obj ? obj->cast_to<Node>() : NULL; ERR_FAIL_COND(!node); Map<ObjectID,BodyState>::Element *E=contact_monitor->body_map.find(p_id); ERR_FAIL_COND(!E); ERR_FAIL_COND(!E->get().in_scene); E->get().in_scene=false; emit_signal(SceneStringNames::get_singleton()->body_exit,node); for(int i=0;i<E->get().shapes.size();i++) { emit_signal(SceneStringNames::get_singleton()->body_exit_shape,p_id,node,E->get().shapes[i].body_shape,E->get().shapes[i].local_shape); } } void RigidBody2D::_body_inout(int p_status, ObjectID p_instance, int p_body_shape,int p_local_shape) { bool body_in = p_status==1; ObjectID objid=p_instance; Object *obj = ObjectDB::get_instance(objid); Node *node = obj ? obj->cast_to<Node>() : NULL; Map<ObjectID,BodyState>::Element *E=contact_monitor->body_map.find(objid); ERR_FAIL_COND(!body_in && !E); if (body_in) { if (!E) { E = contact_monitor->body_map.insert(objid,BodyState()); // E->get().rc=0; E->get().in_scene=node && node->is_inside_tree(); if (node) { node->connect(SceneStringNames::get_singleton()->enter_tree,this,SceneStringNames::get_singleton()->_body_enter_tree,make_binds(objid)); node->connect(SceneStringNames::get_singleton()->exit_tree,this,SceneStringNames::get_singleton()->_body_exit_tree,make_binds(objid)); if (E->get().in_scene) { emit_signal(SceneStringNames::get_singleton()->body_enter,node); } } //E->get().rc++; } if (node) E->get().shapes.insert(ShapePair(p_body_shape,p_local_shape)); if (E->get().in_scene) { emit_signal(SceneStringNames::get_singleton()->body_enter_shape,objid,node,p_body_shape,p_local_shape); } } else { //E->get().rc--; if (node) E->get().shapes.erase(ShapePair(p_body_shape,p_local_shape)); bool in_scene = E->get().in_scene; if (E->get().shapes.empty()) { if (node) { node->disconnect(SceneStringNames::get_singleton()->enter_tree,this,SceneStringNames::get_singleton()->_body_enter_tree); node->disconnect(SceneStringNames::get_singleton()->exit_tree,this,SceneStringNames::get_singleton()->_body_exit_tree); if (in_scene) emit_signal(SceneStringNames::get_singleton()->body_exit,obj); } contact_monitor->body_map.erase(E); } if (node && in_scene) { emit_signal(SceneStringNames::get_singleton()->body_exit_shape,objid,obj,p_body_shape,p_local_shape); } } } struct _RigidBody2DInOut { ObjectID id; int shape; int local_shape; }; void RigidBody2D::_direct_state_changed(Object *p_state) { //eh.. fuck #ifdef DEBUG_ENABLED state=p_state->cast_to<Physics2DDirectBodyState>(); #else state=(Physics2DDirectBodyState*)p_state; //trust it #endif if (contact_monitor) { //untag all int rc=0; for( Map<ObjectID,BodyState>::Element *E=contact_monitor->body_map.front();E;E=E->next()) { for(int i=0;i<E->get().shapes.size();i++) { E->get().shapes[i].tagged=false; rc++; } } _RigidBody2DInOut *toadd=(_RigidBody2DInOut*)alloca(state->get_contact_count()*sizeof(_RigidBody2DInOut)); int toadd_count=0;//state->get_contact_count(); RigidBody2D_RemoveAction *toremove=(RigidBody2D_RemoveAction*)alloca(rc*sizeof(RigidBody2D_RemoveAction)); int toremove_count=0; //put the ones to add for(int i=0;i<state->get_contact_count();i++) { ObjectID obj = state->get_contact_collider_id(i); int local_shape = state->get_contact_local_shape(i); int shape = state->get_contact_collider_shape(i); toadd[i].local_shape=local_shape; toadd[i].id=obj; toadd[i].shape=shape; // bool found=false; Map<ObjectID,BodyState>::Element *E=contact_monitor->body_map.find(obj); if (!E) { toadd_count++; continue; } ShapePair sp( shape,local_shape ); int idx = E->get().shapes.find(sp); if (idx==-1) { toadd_count++; continue; } E->get().shapes[idx].tagged=true; } //put the ones to remove for( Map<ObjectID,BodyState>::Element *E=contact_monitor->body_map.front();E;E=E->next()) { for(int i=0;i<E->get().shapes.size();i++) { if (!E->get().shapes[i].tagged) { toremove[toremove_count].body_id=E->key(); toremove[toremove_count].pair=E->get().shapes[i]; toremove_count++; } } } //process remotions for(int i=0;i<toremove_count;i++) { _body_inout(0,toremove[i].body_id,toremove[i].pair.body_shape,toremove[i].pair.local_shape); } //process aditions for(int i=0;i<toadd_count;i++) { _body_inout(1,toadd[i].id,toadd[i].shape,toadd[i].local_shape); } } set_block_transform_notify(true); // don't want notify (would feedback loop) if (mode!=MODE_KINEMATIC) set_global_transform(state->get_transform()); linear_velocity=state->get_linear_velocity(); angular_velocity=state->get_angular_velocity(); sleeping=state->is_sleeping(); if (get_script_instance()) get_script_instance()->call("_integrate_forces",state); set_block_transform_notify(false); // want it back state=NULL; } void RigidBody2D::set_mode(Mode p_mode) { mode=p_mode; switch(p_mode) { case MODE_RIGID: { Physics2DServer::get_singleton()->body_set_mode(get_rid(),Physics2DServer::BODY_MODE_RIGID); } break; case MODE_STATIC: { Physics2DServer::get_singleton()->body_set_mode(get_rid(),Physics2DServer::BODY_MODE_STATIC); } break; case MODE_KINEMATIC: { Physics2DServer::get_singleton()->body_set_mode(get_rid(),Physics2DServer::BODY_MODE_KINEMATIC); } break; case MODE_CHARACTER: { Physics2DServer::get_singleton()->body_set_mode(get_rid(),Physics2DServer::BODY_MODE_CHARACTER); } break; } } RigidBody2D::Mode RigidBody2D::get_mode() const{ return mode; } void RigidBody2D::set_mass(real_t p_mass){ ERR_FAIL_COND(p_mass<=0); mass=p_mass; _change_notify("mass"); _change_notify("weight"); Physics2DServer::get_singleton()->body_set_param(get_rid(),Physics2DServer::BODY_PARAM_MASS,mass); } real_t RigidBody2D::get_mass() const{ return mass; } void RigidBody2D::set_weight(real_t p_weight){ set_mass(p_weight/9.8); } real_t RigidBody2D::get_weight() const{ return mass*9.8; } void RigidBody2D::set_friction(real_t p_friction){ ERR_FAIL_COND(p_friction<0 || p_friction>1); friction=p_friction; Physics2DServer::get_singleton()->body_set_param(get_rid(),Physics2DServer::BODY_PARAM_FRICTION,friction); } real_t RigidBody2D::get_friction() const{ return friction; } void RigidBody2D::set_bounce(real_t p_bounce){ ERR_FAIL_COND(p_bounce<0 || p_bounce>1); bounce=p_bounce; Physics2DServer::get_singleton()->body_set_param(get_rid(),Physics2DServer::BODY_PARAM_BOUNCE,bounce); } real_t RigidBody2D::get_bounce() const{ return bounce; } void RigidBody2D::set_gravity_scale(real_t p_gravity_scale){ gravity_scale=p_gravity_scale; Physics2DServer::get_singleton()->body_set_param(get_rid(),Physics2DServer::BODY_PARAM_GRAVITY_SCALE,gravity_scale); } real_t RigidBody2D::get_gravity_scale() const{ return gravity_scale; } void RigidBody2D::set_linear_damp(real_t p_linear_damp){ ERR_FAIL_COND(p_linear_damp<-1); linear_damp=p_linear_damp; Physics2DServer::get_singleton()->body_set_param(get_rid(),Physics2DServer::BODY_PARAM_LINEAR_DAMP,linear_damp); } real_t RigidBody2D::get_linear_damp() const{ return linear_damp; } void RigidBody2D::set_angular_damp(real_t p_angular_damp){ ERR_FAIL_COND(p_angular_damp<-1); angular_damp=p_angular_damp; Physics2DServer::get_singleton()->body_set_param(get_rid(),Physics2DServer::BODY_PARAM_ANGULAR_DAMP,angular_damp); } real_t RigidBody2D::get_angular_damp() const{ return angular_damp; } void RigidBody2D::set_axis_velocity(const Vector2& p_axis) { Vector2 v = state? state->get_linear_velocity() : linear_velocity; Vector2 axis = p_axis.normalized(); v-=axis*axis.dot(v); v+=p_axis; if (state) { set_linear_velocity(v); } else { Physics2DServer::get_singleton()->body_set_axis_velocity(get_rid(),p_axis); linear_velocity=v; } } void RigidBody2D::set_linear_velocity(const Vector2& p_velocity){ linear_velocity=p_velocity; if (state) state->set_linear_velocity(linear_velocity); else { Physics2DServer::get_singleton()->body_set_state(get_rid(),Physics2DServer::BODY_STATE_LINEAR_VELOCITY,linear_velocity); } } Vector2 RigidBody2D::get_linear_velocity() const{ return linear_velocity; } void RigidBody2D::set_angular_velocity(real_t p_velocity){ angular_velocity=p_velocity; if (state) state->set_angular_velocity(angular_velocity); else Physics2DServer::get_singleton()->body_set_state(get_rid(),Physics2DServer::BODY_STATE_ANGULAR_VELOCITY,angular_velocity); } real_t RigidBody2D::get_angular_velocity() const{ return angular_velocity; } void RigidBody2D::set_use_custom_integrator(bool p_enable){ if (custom_integrator==p_enable) return; custom_integrator=p_enable; Physics2DServer::get_singleton()->body_set_omit_force_integration(get_rid(),p_enable); } bool RigidBody2D::is_using_custom_integrator(){ return custom_integrator; } void RigidBody2D::set_sleeping(bool p_sleeping) { sleeping=p_sleeping; Physics2DServer::get_singleton()->body_set_state(get_rid(),Physics2DServer::BODY_STATE_SLEEPING,sleeping); } void RigidBody2D::set_can_sleep(bool p_active) { can_sleep=p_active; Physics2DServer::get_singleton()->body_set_state(get_rid(),Physics2DServer::BODY_STATE_CAN_SLEEP,p_active); } bool RigidBody2D::is_able_to_sleep() const { return can_sleep; } bool RigidBody2D::is_sleeping() const { return sleeping; } void RigidBody2D::set_max_contacts_reported(int p_amount) { max_contacts_reported=p_amount; Physics2DServer::get_singleton()->body_set_max_contacts_reported(get_rid(),p_amount); } int RigidBody2D::get_max_contacts_reported() const{ return max_contacts_reported; } void RigidBody2D::apply_impulse(const Vector2& p_pos, const Vector2& p_impulse) { Physics2DServer::get_singleton()->body_apply_impulse(get_rid(),p_pos,p_impulse); } void RigidBody2D::set_applied_force(const Vector2& p_force) { Physics2DServer::get_singleton()->body_set_applied_force(get_rid(), p_force); }; Vector2 RigidBody2D::get_applied_force() const { return Physics2DServer::get_singleton()->body_get_applied_force(get_rid()); }; void RigidBody2D::set_continuous_collision_detection_mode(CCDMode p_mode) { ccd_mode=p_mode; Physics2DServer::get_singleton()->body_set_continuous_collision_detection_mode(get_rid(),Physics2DServer::CCDMode(p_mode)); } RigidBody2D::CCDMode RigidBody2D::get_continuous_collision_detection_mode() const { return ccd_mode; } Array RigidBody2D::get_colliding_bodies() const { ERR_FAIL_COND_V(!contact_monitor,Array()); Array ret; ret.resize(contact_monitor->body_map.size()); int idx=0; for (const Map<ObjectID,BodyState>::Element *E=contact_monitor->body_map.front();E;E=E->next()) { Object *obj = ObjectDB::get_instance(E->key()); if (!obj) { ret.resize( ret.size() -1 ); //ops } else { ret[idx++]=obj; } } return ret; } void RigidBody2D::set_contact_monitor(bool p_enabled) { if (p_enabled==is_contact_monitor_enabled()) return; if (!p_enabled) { for(Map<ObjectID,BodyState>::Element *E=contact_monitor->body_map.front();E;E=E->next()) { //clean up mess } memdelete( contact_monitor ); contact_monitor=NULL; } else { contact_monitor = memnew( ContactMonitor ); } } bool RigidBody2D::is_contact_monitor_enabled() const { return contact_monitor!=NULL; } void RigidBody2D::_bind_methods() { ObjectTypeDB::bind_method(_MD("set_mode","mode"),&RigidBody2D::set_mode); ObjectTypeDB::bind_method(_MD("get_mode"),&RigidBody2D::get_mode); ObjectTypeDB::bind_method(_MD("set_mass","mass"),&RigidBody2D::set_mass); ObjectTypeDB::bind_method(_MD("get_mass"),&RigidBody2D::get_mass); ObjectTypeDB::bind_method(_MD("set_weight","weight"),&RigidBody2D::set_weight); ObjectTypeDB::bind_method(_MD("get_weight"),&RigidBody2D::get_weight); ObjectTypeDB::bind_method(_MD("set_friction","friction"),&RigidBody2D::set_friction); ObjectTypeDB::bind_method(_MD("get_friction"),&RigidBody2D::get_friction); ObjectTypeDB::bind_method(_MD("set_bounce","bounce"),&RigidBody2D::set_bounce); ObjectTypeDB::bind_method(_MD("get_bounce"),&RigidBody2D::get_bounce); ObjectTypeDB::bind_method(_MD("set_gravity_scale","gravity_scale"),&RigidBody2D::set_gravity_scale); ObjectTypeDB::bind_method(_MD("get_gravity_scale"),&RigidBody2D::get_gravity_scale); ObjectTypeDB::bind_method(_MD("set_linear_damp","linear_damp"),&RigidBody2D::set_linear_damp); ObjectTypeDB::bind_method(_MD("get_linear_damp"),&RigidBody2D::get_linear_damp); ObjectTypeDB::bind_method(_MD("set_angular_damp","angular_damp"),&RigidBody2D::set_angular_damp); ObjectTypeDB::bind_method(_MD("get_angular_damp"),&RigidBody2D::get_angular_damp); ObjectTypeDB::bind_method(_MD("set_linear_velocity","linear_velocity"),&RigidBody2D::set_linear_velocity); ObjectTypeDB::bind_method(_MD("get_linear_velocity"),&RigidBody2D::get_linear_velocity); ObjectTypeDB::bind_method(_MD("set_angular_velocity","angular_velocity"),&RigidBody2D::set_angular_velocity); ObjectTypeDB::bind_method(_MD("get_angular_velocity"),&RigidBody2D::get_angular_velocity); ObjectTypeDB::bind_method(_MD("set_max_contacts_reported","amount"),&RigidBody2D::set_max_contacts_reported); ObjectTypeDB::bind_method(_MD("get_max_contacts_reported"),&RigidBody2D::get_max_contacts_reported); ObjectTypeDB::bind_method(_MD("set_use_custom_integrator","enable"),&RigidBody2D::set_use_custom_integrator); ObjectTypeDB::bind_method(_MD("is_using_custom_integrator"),&RigidBody2D::is_using_custom_integrator); ObjectTypeDB::bind_method(_MD("set_contact_monitor","enabled"),&RigidBody2D::set_contact_monitor); ObjectTypeDB::bind_method(_MD("is_contact_monitor_enabled"),&RigidBody2D::is_contact_monitor_enabled); ObjectTypeDB::bind_method(_MD("set_continuous_collision_detection_mode","mode"),&RigidBody2D::set_continuous_collision_detection_mode); ObjectTypeDB::bind_method(_MD("get_continuous_collision_detection_mode"),&RigidBody2D::get_continuous_collision_detection_mode); ObjectTypeDB::bind_method(_MD("set_axis_velocity","axis_velocity"),&RigidBody2D::set_axis_velocity); ObjectTypeDB::bind_method(_MD("apply_impulse","pos","impulse"),&RigidBody2D::apply_impulse); ObjectTypeDB::bind_method(_MD("set_applied_force","force"),&RigidBody2D::set_applied_force); ObjectTypeDB::bind_method(_MD("get_applied_force"),&RigidBody2D::get_applied_force); ObjectTypeDB::bind_method(_MD("set_sleeping","sleeping"),&RigidBody2D::set_sleeping); ObjectTypeDB::bind_method(_MD("is_sleeping"),&RigidBody2D::is_sleeping); ObjectTypeDB::bind_method(_MD("set_can_sleep","able_to_sleep"),&RigidBody2D::set_can_sleep); ObjectTypeDB::bind_method(_MD("is_able_to_sleep"),&RigidBody2D::is_able_to_sleep); ObjectTypeDB::bind_method(_MD("_direct_state_changed"),&RigidBody2D::_direct_state_changed); ObjectTypeDB::bind_method(_MD("_body_enter_tree"),&RigidBody2D::_body_enter_tree); ObjectTypeDB::bind_method(_MD("_body_exit_tree"),&RigidBody2D::_body_exit_tree); ObjectTypeDB::bind_method(_MD("get_colliding_bodies"),&RigidBody2D::get_colliding_bodies); BIND_VMETHOD(MethodInfo("_integrate_forces",PropertyInfo(Variant::OBJECT,"state:Physics2DDirectBodyState"))); ADD_PROPERTY( PropertyInfo(Variant::INT,"mode",PROPERTY_HINT_ENUM,"Rigid,Static,Character,Kinematic"),_SCS("set_mode"),_SCS("get_mode")); ADD_PROPERTY( PropertyInfo(Variant::REAL,"mass",PROPERTY_HINT_EXP_RANGE,"0.01,65535,0.01"),_SCS("set_mass"),_SCS("get_mass")); ADD_PROPERTY( PropertyInfo(Variant::REAL,"weight",PROPERTY_HINT_EXP_RANGE,"0.01,65535,0.01",PROPERTY_USAGE_EDITOR),_SCS("set_weight"),_SCS("get_weight")); ADD_PROPERTY( PropertyInfo(Variant::REAL,"friction",PROPERTY_HINT_RANGE,"0,1,0.01"),_SCS("set_friction"),_SCS("get_friction")); ADD_PROPERTY( PropertyInfo(Variant::REAL,"bounce",PROPERTY_HINT_RANGE,"0,1,0.01"),_SCS("set_bounce"),_SCS("get_bounce")); ADD_PROPERTY( PropertyInfo(Variant::REAL,"gravity_scale",PROPERTY_HINT_RANGE,"-128,128,0.01"),_SCS("set_gravity_scale"),_SCS("get_gravity_scale")); ADD_PROPERTY( PropertyInfo(Variant::BOOL,"custom_integrator"),_SCS("set_use_custom_integrator"),_SCS("is_using_custom_integrator")); ADD_PROPERTY( PropertyInfo(Variant::INT,"continuous_cd",PROPERTY_HINT_ENUM,"Disabled,Cast Ray,Cast Shape"),_SCS("set_continuous_collision_detection_mode"),_SCS("get_continuous_collision_detection_mode")); ADD_PROPERTY( PropertyInfo(Variant::INT,"contacts_reported"),_SCS("set_max_contacts_reported"),_SCS("get_max_contacts_reported")); ADD_PROPERTY( PropertyInfo(Variant::BOOL,"contact_monitor"),_SCS("set_contact_monitor"),_SCS("is_contact_monitor_enabled")); ADD_PROPERTY( PropertyInfo(Variant::BOOL,"sleeping"),_SCS("set_sleeping"),_SCS("is_sleeping")); ADD_PROPERTY( PropertyInfo(Variant::BOOL,"can_sleep"),_SCS("set_can_sleep"),_SCS("is_able_to_sleep")); ADD_PROPERTY( PropertyInfo(Variant::VECTOR2,"velocity/linear"),_SCS("set_linear_velocity"),_SCS("get_linear_velocity")); ADD_PROPERTY( PropertyInfo(Variant::REAL,"velocity/angular"),_SCS("set_angular_velocity"),_SCS("get_angular_velocity")); ADD_PROPERTY( PropertyInfo(Variant::REAL,"damp_override/linear",PROPERTY_HINT_RANGE,"-1,128,0.01"),_SCS("set_linear_damp"),_SCS("get_linear_damp")); ADD_PROPERTY( PropertyInfo(Variant::REAL,"damp_override/angular",PROPERTY_HINT_RANGE,"-1,128,0.01"),_SCS("set_angular_damp"),_SCS("get_angular_damp")); ADD_SIGNAL( MethodInfo("body_enter_shape",PropertyInfo(Variant::INT,"body_id"),PropertyInfo(Variant::OBJECT,"body"),PropertyInfo(Variant::INT,"body_shape"),PropertyInfo(Variant::INT,"local_shape"))); ADD_SIGNAL( MethodInfo("body_exit_shape",PropertyInfo(Variant::INT,"body_id"),PropertyInfo(Variant::OBJECT,"body"),PropertyInfo(Variant::INT,"body_shape"),PropertyInfo(Variant::INT,"local_shape"))); ADD_SIGNAL( MethodInfo("body_enter",PropertyInfo(Variant::OBJECT,"body"))); ADD_SIGNAL( MethodInfo("body_exit",PropertyInfo(Variant::OBJECT,"body"))); BIND_CONSTANT( MODE_STATIC ); BIND_CONSTANT( MODE_KINEMATIC ); BIND_CONSTANT( MODE_RIGID ); BIND_CONSTANT( MODE_CHARACTER ); BIND_CONSTANT( CCD_MODE_DISABLED ); BIND_CONSTANT( CCD_MODE_CAST_RAY ); BIND_CONSTANT( CCD_MODE_CAST_SHAPE ); } RigidBody2D::RigidBody2D() : PhysicsBody2D(Physics2DServer::BODY_MODE_RIGID) { mode=MODE_RIGID; bounce=0; mass=1; friction=1; gravity_scale=1; linear_damp=-1; angular_damp=-1; max_contacts_reported=0; state=NULL; angular_velocity=0; sleeping=false; ccd_mode=CCD_MODE_DISABLED; custom_integrator=false; contact_monitor=NULL; can_sleep=true; Physics2DServer::get_singleton()->body_set_force_integration_callback(get_rid(),this,"_direct_state_changed"); } RigidBody2D::~RigidBody2D() { if (contact_monitor) memdelete( contact_monitor ); } ////////////////////////// Variant KinematicBody2D::_get_collider() const { ObjectID oid=get_collider(); if (oid==0) return Variant(); Object *obj = ObjectDB::get_instance(oid); if (!obj) return Variant(); Reference *ref = obj->cast_to<Reference>(); if (ref) { return Ref<Reference>(ref); } return obj; } bool KinematicBody2D::_ignores_mode(Physics2DServer::BodyMode p_mode) const { switch(p_mode) { case Physics2DServer::BODY_MODE_STATIC: return !collide_static; case Physics2DServer::BODY_MODE_KINEMATIC: return !collide_kinematic; case Physics2DServer::BODY_MODE_RIGID: return !collide_rigid; case Physics2DServer::BODY_MODE_CHARACTER: return !collide_character; } return true; } Vector2 KinematicBody2D::move(const Vector2& p_motion) { //give me back regular physics engine logic //this is madness //and most people using this function will think //what it does is simpler than using physics //this took about a week to get right.. //but is it right? who knows at this point.. colliding=false; ERR_FAIL_COND_V(!is_inside_tree(),Vector2()); Physics2DDirectSpaceState *dss = Physics2DServer::get_singleton()->space_get_direct_state(get_world_2d()->get_space()); ERR_FAIL_COND_V(!dss,Vector2()); const int max_shapes=32; Vector2 sr[max_shapes*2]; int res_shapes; Set<RID> exclude; exclude.insert(get_rid()); //recover first int recover_attempts=4; bool collided=false; uint32_t mask=0; if (collide_static) mask|=Physics2DDirectSpaceState::TYPE_MASK_STATIC_BODY; if (collide_kinematic) mask|=Physics2DDirectSpaceState::TYPE_MASK_KINEMATIC_BODY; if (collide_rigid) mask|=Physics2DDirectSpaceState::TYPE_MASK_RIGID_BODY; if (collide_character) mask|=Physics2DDirectSpaceState::TYPE_MASK_CHARACTER_BODY; // print_line("motion: "+p_motion+" margin: "+rtos(margin)); //print_line("margin: "+rtos(margin)); do { //motion recover for(int i=0;i<get_shape_count();i++) { if (is_shape_set_as_trigger(i)) continue; if (dss->collide_shape(get_shape(i)->get_rid(), get_global_transform() * get_shape_transform(i),Vector2(),margin,sr,max_shapes,res_shapes,exclude,get_layer_mask(),mask)) collided=true; } if (!collided) break; Vector2 recover_motion; for(int i=0;i<res_shapes;i++) { Vector2 a = sr[i*2+0]; Vector2 b = sr[i*2+1]; float d = a.distance_to(b); //if (d<margin) /// continue; recover_motion+=(b-a)*0.2; } if (recover_motion==Vector2()) { collided=false; break; } Matrix32 gt = get_global_transform(); gt.elements[2]+=recover_motion; set_global_transform(gt); recover_attempts--; } while (recover_attempts); //move second float safe = 1.0; float unsafe = 1.0; int best_shape=-1; for(int i=0;i<get_shape_count();i++) { if (is_shape_set_as_trigger(i)) continue; float lsafe,lunsafe; bool valid = dss->cast_motion(get_shape(i)->get_rid(), get_global_transform() * get_shape_transform(i), p_motion, 0,lsafe,lunsafe,exclude,get_layer_mask(),mask); //print_line("shape: "+itos(i)+" travel:"+rtos(ltravel)); if (!valid) { safe=0; unsafe=0; best_shape=i; //sadly it's the best break; } if (lsafe==1.0) { continue; } if (lsafe < safe) { safe=lsafe; unsafe=lunsafe; best_shape=i; } } //print_line("best shape: "+itos(best_shape)+" motion "+p_motion); if (safe>=1) { //not collided colliding=false; } else { //it collided, let's get the rest info in unsafe advance Matrix32 ugt = get_global_transform(); ugt.elements[2]+=p_motion*unsafe; Physics2DDirectSpaceState::ShapeRestInfo rest_info; bool c2 = dss->rest_info(get_shape(best_shape)->get_rid(), ugt*get_shape_transform(best_shape), Vector2(), margin,&rest_info,exclude,get_layer_mask(),mask); if (!c2) { //should not happen, but floating point precision is so weird.. colliding=false; } else { //print_line("Travel: "+rtos(travel)); colliding=true; collision=rest_info.point; normal=rest_info.normal; collider=rest_info.collider_id; collider_vel=rest_info.linear_velocity; collider_shape=rest_info.shape; collider_metadata=rest_info.metadata; } } Vector2 motion=p_motion*safe; Matrix32 gt = get_global_transform(); gt.elements[2]+=motion; set_global_transform(gt); return p_motion-motion; } Vector2 KinematicBody2D::move_to(const Vector2& p_position) { return move(p_position-get_global_pos()); } bool KinematicBody2D::can_move_to(const Vector2& p_position, bool p_discrete) { ERR_FAIL_COND_V(!is_inside_tree(),false); Physics2DDirectSpaceState *dss = Physics2DServer::get_singleton()->space_get_direct_state(get_world_2d()->get_space()); ERR_FAIL_COND_V(!dss,false); uint32_t mask=0; if (collide_static) mask|=Physics2DDirectSpaceState::TYPE_MASK_STATIC_BODY; if (collide_kinematic) mask|=Physics2DDirectSpaceState::TYPE_MASK_KINEMATIC_BODY; if (collide_rigid) mask|=Physics2DDirectSpaceState::TYPE_MASK_RIGID_BODY; if (collide_character) mask|=Physics2DDirectSpaceState::TYPE_MASK_CHARACTER_BODY; Vector2 motion = p_position-get_global_pos(); Matrix32 xform=get_global_transform(); if (p_discrete) { xform.elements[2]+=motion; motion=Vector2(); } Set<RID> exclude; exclude.insert(get_rid()); //fill exclude list.. for(int i=0;i<get_shape_count();i++) { bool col = dss->intersect_shape(get_shape(i)->get_rid(), xform * get_shape_transform(i),motion,0,NULL,0,exclude,get_layer_mask(),mask); if (col) return false; } return true; } bool KinematicBody2D::is_colliding() const { ERR_FAIL_COND_V(!is_inside_tree(),false); return colliding; } Vector2 KinematicBody2D::get_collision_pos() const { ERR_FAIL_COND_V(!colliding,Vector2()); return collision; } Vector2 KinematicBody2D::get_collision_normal() const { ERR_FAIL_COND_V(!colliding,Vector2()); return normal; } Vector2 KinematicBody2D::get_collider_velocity() const { return collider_vel; } ObjectID KinematicBody2D::get_collider() const { ERR_FAIL_COND_V(!colliding,0); return collider; } int KinematicBody2D::get_collider_shape() const { ERR_FAIL_COND_V(!colliding,0); return collider_shape; } Variant KinematicBody2D::get_collider_metadata() const { ERR_FAIL_COND_V(!colliding,0); return collider_metadata; } void KinematicBody2D::set_collide_with_static_bodies(bool p_enable) { collide_static=p_enable; } bool KinematicBody2D::can_collide_with_static_bodies() const { return collide_static; } void KinematicBody2D::set_collide_with_rigid_bodies(bool p_enable) { collide_rigid=p_enable; } bool KinematicBody2D::can_collide_with_rigid_bodies() const { return collide_rigid; } void KinematicBody2D::set_collide_with_kinematic_bodies(bool p_enable) { collide_kinematic=p_enable; } bool KinematicBody2D::can_collide_with_kinematic_bodies() const { return collide_kinematic; } void KinematicBody2D::set_collide_with_character_bodies(bool p_enable) { collide_character=p_enable; } bool KinematicBody2D::can_collide_with_character_bodies() const { return collide_character; } void KinematicBody2D::set_collision_margin(float p_margin) { margin=p_margin; } float KinematicBody2D::get_collision_margin() const{ return margin; } void KinematicBody2D::_bind_methods() { ObjectTypeDB::bind_method(_MD("move","rel_vec"),&KinematicBody2D::move); ObjectTypeDB::bind_method(_MD("move_to","position"),&KinematicBody2D::move_to); ObjectTypeDB::bind_method(_MD("can_move_to","position"),&KinematicBody2D::can_move_to); ObjectTypeDB::bind_method(_MD("is_colliding"),&KinematicBody2D::is_colliding); ObjectTypeDB::bind_method(_MD("get_collision_pos"),&KinematicBody2D::get_collision_pos); ObjectTypeDB::bind_method(_MD("get_collision_normal"),&KinematicBody2D::get_collision_normal); ObjectTypeDB::bind_method(_MD("get_collider_velocity"),&KinematicBody2D::get_collider_velocity); ObjectTypeDB::bind_method(_MD("get_collider:Object"),&KinematicBody2D::_get_collider); ObjectTypeDB::bind_method(_MD("get_collider_shape"),&KinematicBody2D::get_collider_shape); ObjectTypeDB::bind_method(_MD("get_collider_metadata"),&KinematicBody2D::get_collider_metadata); ObjectTypeDB::bind_method(_MD("set_collide_with_static_bodies","enable"),&KinematicBody2D::set_collide_with_static_bodies); ObjectTypeDB::bind_method(_MD("can_collide_with_static_bodies"),&KinematicBody2D::can_collide_with_static_bodies); ObjectTypeDB::bind_method(_MD("set_collide_with_kinematic_bodies","enable"),&KinematicBody2D::set_collide_with_kinematic_bodies); ObjectTypeDB::bind_method(_MD("can_collide_with_kinematic_bodies"),&KinematicBody2D::can_collide_with_kinematic_bodies); ObjectTypeDB::bind_method(_MD("set_collide_with_rigid_bodies","enable"),&KinematicBody2D::set_collide_with_rigid_bodies); ObjectTypeDB::bind_method(_MD("can_collide_with_rigid_bodies"),&KinematicBody2D::can_collide_with_rigid_bodies); ObjectTypeDB::bind_method(_MD("set_collide_with_character_bodies","enable"),&KinematicBody2D::set_collide_with_character_bodies); ObjectTypeDB::bind_method(_MD("can_collide_with_character_bodies"),&KinematicBody2D::can_collide_with_character_bodies); ObjectTypeDB::bind_method(_MD("set_collision_margin","pixels"),&KinematicBody2D::set_collision_margin); ObjectTypeDB::bind_method(_MD("get_collision_margin","pixels"),&KinematicBody2D::get_collision_margin); ADD_PROPERTY( PropertyInfo(Variant::BOOL,"collide_with/static"),_SCS("set_collide_with_static_bodies"),_SCS("can_collide_with_static_bodies")); ADD_PROPERTY( PropertyInfo(Variant::BOOL,"collide_with/kinematic"),_SCS("set_collide_with_kinematic_bodies"),_SCS("can_collide_with_kinematic_bodies")); ADD_PROPERTY( PropertyInfo(Variant::BOOL,"collide_with/rigid"),_SCS("set_collide_with_rigid_bodies"),_SCS("can_collide_with_rigid_bodies")); ADD_PROPERTY( PropertyInfo(Variant::BOOL,"collide_with/character"),_SCS("set_collide_with_character_bodies"),_SCS("can_collide_with_character_bodies")); ADD_PROPERTY( PropertyInfo(Variant::REAL,"collision/margin",PROPERTY_HINT_RANGE,"0.001,256,0.001"),_SCS("set_collision_margin"),_SCS("get_collision_margin")); } KinematicBody2D::KinematicBody2D() : PhysicsBody2D(Physics2DServer::BODY_MODE_KINEMATIC){ collide_static=true; collide_rigid=true; collide_kinematic=true; collide_character=true; colliding=false; collider=0; collider_shape=0; margin=0.08; } KinematicBody2D::~KinematicBody2D() { }
30.763934
205
0.752132
[ "object", "shape" ]
241452ec2f6b0e6f95e70fe44e4d0e680e763f40
4,732
hpp
C++
multiple_board/main/communication.hpp
jsbruglie/scdtr
114e6574bf7cf944ef58aa0ee60e58197459f42f
[ "MIT" ]
2
2018-01-11T09:27:45.000Z
2018-01-13T15:31:12.000Z
multiple_board/main/communication.hpp
jsbruglie/scdtr
114e6574bf7cf944ef58aa0ee60e58197459f42f
[ "MIT" ]
null
null
null
multiple_board/main/communication.hpp
jsbruglie/scdtr
114e6574bf7cf944ef58aa0ee60e58197459f42f
[ "MIT" ]
1
2018-10-22T21:44:01.000Z
2018-10-22T21:44:01.000Z
/** * @file multiple_board/main/communication.hpp * * @brief I2C communication protocol headers * * Defines I2C communication protocol. * Implements functions to send supported packets. * * Messages can be exchanged using essentially 4 types of packets: * 1. Header-only, e.g ACKs and RESet packets; * 2. Consensus; * 3. Info. * * Header [ Id | Type ] * * Consensus [ Header | d_i_1 d_i_2 ... d_i_N ], * CON (during consensus) d_i is the solution vector for a given iteration * ICO (before consessus) d_i is the lower_bound vector * * Info [ Header | lux | dc | lb | ext | ref | occ ] * * @author João Borrego */ #ifndef INO_COMMUNICATION_HPP #define INO_COMMUNICATION_HPP #include <Arduino.h> #include <Wire.h> #include "constants.hpp" /** Packet maximum size (bytes) - imposed by Wire.h */ const size_t MAX_SIZE = 32; /** Packet header size (bytes) */ const size_t HEADER_SIZE = sizeof(uint8_t) * 2; /** Consensus data size (bytes) */ const size_t DATA_CONSENSUS_SIZE = sizeof(float) * N; /** Information data size (bytes) */ const size_t DATA_INFO_SIZE = sizeof(float) * 5 + sizeof(uint8_t); /* Packet types */ /** SYNchronise */ const byte SYN = 0x00; /** ACKnowledgement */ const byte ACK = 0x01; /** CONsensus */ const byte CON = 0x02; /** Initiate COnsensus */ const byte ICO = 0x04; /** INFo */ const byte INF = 0x05; /** Distributed ON */ const byte DON = 0x06; /** Distributed OFf */ const byte DOF = 0x07; /** RESet */ const byte RES = 0x0F; /* Packet offsets */ /** ID header field offset */ const size_t ID = 0; /** TYPE header field offset */ const size_t TYPE = 1; namespace Communication { /** * @brief Converts float to byte array and vice-versa. * * As seen in <a href="http://mbed.org/forum/helloworld/topic/2053/">link</a> */ typedef union float_to_bytes_t{ /** Float variable */ float f; /** Float byte array */ byte b[sizeof(float)]; } float_bytes; /* Functions */ /** * @brief Sets up important variables. * * @param[in] id The node identifier * @param[in] reset_ptr The reset pointer * @param[in] changed_ptr The changed pointer * @param[in] ref_ptr The reference pointer * @param[in] lower_bound_ptr The lower bound pointer * @param[in] occupancy_ptr The occupancy pointer * @param[in] distributed_ptr The distributed control flag pointer */ void setup( const uint8_t *id, const bool *reset_ptr, const bool *changed_ptr, const float *ref_ptr, const float *lower_bound_ptr, const bool *occupancy_ptr, const bool *distributed_ptr); /** * @brief Empty callback function to ignore I2C messages. * * @param[in] bytes The bytes to be read */ void nop(int bytes); /** * @brief Sends a header-only packet. * * @param[in] dest The destination * @param[in] type The type */ void sendPacket(uint8_t dest, uint8_t type); /** * @brief Reads a packet. * * @param id_ The identifier * @param type The type * @param[in] size The size * @param packet The packet */ void readPacket(byte *id_, byte *type, size_t size, byte *packet); /** * @brief Sends a consensus packet * * @param[in] dest The destination * @param[in] start Whether consensus has started * @param d_i The solution or lower_bound arrays accordingly */ void sendConsensus(uint8_t dest, bool start, float *d_i); /** * @brief Reads a consensus packet. * * @param buffer The message buffer * @param out The array of read floats */ void readConsensus(byte *buffer, float *out); /** * @brief Sends an information packet * * @param[in] dest The destination * @param[in] lux The lux * @param[in] duty_cycle The duty cycle * @param[in] lower_bound The lower bound * @param[in] ext The external lux * @param[in] ref The reference * @param[in] occupancy The occupancy */ void sendInfo( uint8_t dest, float lux, float duty_cycle, float lower_bound, float ext, float ref, bool occupancy); /** * @brief Callback function for processing received data * * @param[in] bytes The bytes */ void onReceive(int bytes); } #endif
26.886364
81
0.584531
[ "vector" ]
24153393d3664f10c8df062777ae7d898c34a673
7,776
cpp
C++
tests/1_math/102_vec3.cpp
mozman/ezdxf.cpp
09295f7dafe2a76253807fccd92560fd45eb97a5
[ "MIT" ]
2
2021-02-10T08:14:59.000Z
2021-12-09T08:55:01.000Z
tests/1_math/102_vec3.cpp
mozman/ezdxf.cpp
09295f7dafe2a76253807fccd92560fd45eb97a5
[ "MIT" ]
null
null
null
tests/1_math/102_vec3.cpp
mozman/ezdxf.cpp
09295f7dafe2a76253807fccd92560fd45eb97a5
[ "MIT" ]
1
2021-02-10T08:25:20.000Z
2021-02-10T08:25:20.000Z
// Copyright (c) 2020, Manfred Moitzi // License: MIT License // #include <catch2/catch.hpp> #include <vector> #include "ezdxf/math/vec3.hpp" using ezdxf::math::Vec3; TEST_CASE("Test Vec3 basic usage.", "[math][vec3]") { SECTION("Test access to data members.") { auto v = Vec3{1, 2, 3}; REQUIRE(v.x() == 1.0); REQUIRE(v.y() == 2.0); REQUIRE(v.z() == 3.0); } SECTION("Test tuple deconstruction of data members.") { auto v = Vec3{1, 2, 3}; auto[x, y, z] = v.tuple(); REQUIRE(x == 1.0); REQUIRE(y == 2.0); REQUIRE(z == 3.0); } SECTION("Test Vec3 as variable.") { auto sum = Vec3{}; for (auto d: std::vector<double>{1, 2, 3}) { // Operator "+=" is not implemented, because Vec3() is designed as // immutable data type: sum = sum + Vec3(d, d, d); } REQUIRE(sum == Vec3(6, 6, 6)); } SECTION("Test Vec3 usage in a vector container.") { auto v = Vec3{1, 2, 3}; auto vertices = std::vector<Vec3>{}; vertices.push_back(v); vertices.push_back(v); vertices.push_back(v); REQUIRE(vertices.size() == 3); REQUIRE(vertices[0] == v); // Remove last vertex from container: auto v2 = vertices[2]; vertices.pop_back(); REQUIRE(v2 == v); } } TEST_CASE("Test vector comparisons.", "[math][vec3]") { // See also testing of ezdxf::math::is_close in test_math_base.cpp SECTION ("Test comparison operators near 0.0") { // Default absolute tolerance is 1e-12. auto v = Vec3{}; const double x = 1e-12; REQUIRE(v == Vec3(0, 0, 0)); REQUIRE(v == Vec3(x, 0, 0)); REQUIRE(v == Vec3(0, x, 0)); REQUIRE(v == Vec3(0, 0, x)); REQUIRE(v.is_close(Vec3(x, x, x)) == true); // Leaving the tolerance zone: const double y = 1e-11; REQUIRE(v != Vec3(y, 0, 0)); REQUIRE(v != Vec3(0, y, 0)); REQUIRE(v != Vec3(0, 0, y)); REQUIRE(v.is_close(Vec3(y, y, y)) == false); } SECTION ("Test is_close_zero()") { // Default absolute tolerance is 1e-12. const double x = 1e-12; REQUIRE(Vec3().is_close_zero() == true); REQUIRE(Vec3(x, 0, 0).is_close_zero() == true); REQUIRE(Vec3(0, x, 0).is_close_zero() == true); REQUIRE(Vec3(0, 0, x).is_close_zero() == true); REQUIRE(Vec3(x, x, x).is_close_zero() == true); } SECTION ("Test not is_close_zero()") { // Default absolute tolerance is 1e-12. const double x = 1e-10; REQUIRE(Vec3(x, 0, 0).is_close_zero() == false); REQUIRE(Vec3(0, x, 0).is_close_zero() == false); REQUIRE(Vec3(0, 0, x).is_close_zero() == false); REQUIRE(Vec3(x, x, x).is_close_zero() == false); } SECTION ("Test is_close_zero() with user defined tolerance") { const double usr_tol = 1e-9; const double x = 1e-10; const double y = 1e-8; REQUIRE(Vec3(x, x, x).is_close_zero(usr_tol) == true); REQUIRE(Vec3(usr_tol, 0, 0).is_close_zero(usr_tol) == true); REQUIRE(Vec3(y, y, y).is_close_zero(usr_tol) == false); } SECTION ("Test comparison operators near 1.0") { // Default relative tolerance is 1e-9: auto v = Vec3{1, 0, 0}; REQUIRE(v == Vec3(1 + 1e-10, 0, 0)); REQUIRE(v.is_close(Vec3(1 + 1e-10, 0, 0)) == true); // This should be true, but floating point imprecision shows up: REQUIRE(v != Vec3(1 + 1e-9, 0, 0)); // Leaving the tolerance zone: REQUIRE(v != Vec3(1 + 1e-8, 0, 0)); REQUIRE(v.is_close(Vec3(1 + 1e-8, 0, 0)) == false); } SECTION ("Test is_close() with user defined tolerance.") { // Default relative tolerance is 1e-9: auto v = Vec3{1, 1, 1}; const double usr_tol = 1e-8; const double in = 1.0 + 1e-9; // 'in' the tolerance zone const double out = 1.0 + 1e-7; // 'out' the tolerance zone REQUIRE(v.is_close(Vec3(in, 1, 1), usr_tol) == true); REQUIRE(v.is_close(Vec3(1, in, 1), usr_tol) == true); REQUIRE(v.is_close(Vec3(1, 1, in), usr_tol) == true); REQUIRE(v.is_close(Vec3(in, in, in), usr_tol) == true); REQUIRE(v.is_close(Vec3(1 + usr_tol, 1, 1), usr_tol) == true); // Leaving the tolerance zone: REQUIRE(v.is_close(Vec3(out, 1, 1), usr_tol) == false); REQUIRE(v.is_close(Vec3(1, out, 1), usr_tol) == false); REQUIRE(v.is_close(Vec3(1, 1, out), usr_tol) == false); REQUIRE(v.is_close(Vec3(out, out, out), usr_tol) == false); } } TEST_CASE("Test math operators", "[math][vec3]") { auto v1 = Vec3{1, 2, 3}; auto v2 = Vec3{4, 5, 6}; SECTION("Test add operator '+' ") { REQUIRE(v1 + v2 == Vec3{5, 7, 9}); REQUIRE(v1 + Vec3{} == v1); } SECTION("Test subtract operator '-' ") { REQUIRE(v2 - v1 == Vec3{3, 3, 3}); REQUIRE((v1 - v1) == Vec3{}); REQUIRE((v1 - v1).is_close_zero()); } SECTION("Test scalar multiply operator '*' ") { REQUIRE(v1 * 2 == Vec3{2, 4, 6}); REQUIRE(2 * v1 == Vec3{2, 4, 6}); REQUIRE(v1 * 0 == Vec3{}); } } TEST_CASE("Test member functions", "[math][vec3]") { SECTION("Test from_radians()") { REQUIRE(Vec3::from_radians(0) == Vec3{1, 0, 0}); REQUIRE(Vec3::from_radians(ezdxf::math::kDeg2Rad * 45.0) == Vec3{0.7071067811865476, 0.7071067811865476, 0.0}); } SECTION("Test from_radians() with scaling") { REQUIRE(Vec3::from_radians(0, 5) == Vec3{5, 0, 0}); REQUIRE(Vec3::from_radians(ezdxf::math::kPi2, 3) == Vec3{0, 3, 0}); } SECTION("Test magnitude2()") { REQUIRE(Vec3{2, 0, 0}.magnitude2() == 4.0); REQUIRE(Vec3{2, 2, 0}.magnitude2() == 8.0); REQUIRE(Vec3{2, 2, 2}.magnitude2() == 12.0); } SECTION("Test magnitude()") { REQUIRE(Vec3{2, 0, 0}.magnitude() == 2.0); REQUIRE(Vec3{0, 2, 0}.magnitude() == 2.0); REQUIRE(Vec3{0, 0, 2}.magnitude() == 2.0); REQUIRE(Vec3{3, 4, 5}.magnitude() == 7.0710678118654755); REQUIRE(Vec3{3, 4, 5}.magnitude() == Vec3{-3, -4, -5}.magnitude()); } SECTION("Test normalize()") { REQUIRE(Vec3{2, 0, 0}.normalize() == Vec3{1, 0, 0}); REQUIRE(Vec3{0, 3, 0}.normalize() == Vec3{0, 1, 0}); REQUIRE(Vec3{0, 0, -5}.normalize() == Vec3{0, 0, -1}); } SECTION("Test normalize() with scaling") { REQUIRE(Vec3{2, 0, 0}.normalize(3) == Vec3{3, 0, 0}); REQUIRE(Vec3{0, 3, 0}.normalize(3) == Vec3{0, 3, 0}); REQUIRE(Vec3{0, 0, -5}.normalize(3) == Vec3{0, 0, -3}); } SECTION("Test dot() product") { auto v1 = Vec3{2, 7, 1}; auto v2 = Vec3{3, 9, 8}; REQUIRE(v1.dot(v2) == 77); } SECTION("Test cross() product") { auto v1 = Vec3{2, 7, 9}; auto v2 = Vec3{3, 9, 1}; REQUIRE(v1.cross(v2) == Vec3{-74, 25, -3}); } SECTION("Test linear interpolation lerp()") { auto v1 = Vec3{1, 1, 1}; auto v2 = Vec3{3, 3, 3}; REQUIRE(v1.lerp(v2, 0.5) == Vec3{2, 2, 2}); REQUIRE(v1.lerp(v2, 0.25) == Vec3{1.5, 1.5, 1.5}); REQUIRE(v1.lerp(v2, 0) == v1); REQUIRE(v1.lerp(v2, 1) == v2); } SECTION("Test distance()") { auto v1 = Vec3{}; auto v2 = Vec3{3, 4, 5}; REQUIRE(v1.distance(v2) == 7.0710678118654755); REQUIRE(v1.distance(v1) == 0); REQUIRE(v2.distance(v2) == 0); } SECTION("Test stringify str()") { REQUIRE(Vec3{}.str() == "Vec3{0, 0, 0}"); REQUIRE(Vec3{1, 2, 3}.str() == "Vec3{1, 2, 3}"); } }
33.808696
78
0.525849
[ "vector" ]
24154f8eee2cfe839f2f5b7401b1dee959c5e03a
51,278
hpp
C++
include/cilantro/non_rigid_registration_utilities.hpp
eglrp/cilantro
669da069c3ec06006d1347eca7b67cd93a9e9801
[ "MIT" ]
null
null
null
include/cilantro/non_rigid_registration_utilities.hpp
eglrp/cilantro
669da069c3ec06006d1347eca7b67cd93a9e9801
[ "MIT" ]
null
null
null
include/cilantro/non_rigid_registration_utilities.hpp
eglrp/cilantro
669da069c3ec06006d1347eca7b67cd93a9e9801
[ "MIT" ]
1
2019-03-22T06:53:28.000Z
2019-03-22T06:53:28.000Z
#pragma once #include <Eigen/Sparse> #include <cilantro/space_transformations.hpp> #include <cilantro/nearest_neighbors.hpp> #include <cilantro/correspondence.hpp> namespace cilantro { // Values interpreted as weights template <typename ScalarT, ptrdiff_t EigenDim> void resampleTransformations(const RigidTransformationSet<ScalarT,EigenDim> &old_transforms, const std::vector<NeighborSet<ScalarT>> &new_to_old_map, RigidTransformationSet<ScalarT,EigenDim> &new_transforms) { new_transforms.resize(new_to_old_map.size()); ScalarT total_weight; #pragma omp parallel for shared (new_transforms) private (total_weight) for (size_t i = 0; i < new_transforms.size(); i++) { total_weight = (ScalarT)0.0; new_transforms[i].linear().setZero(); new_transforms[i].translation().setZero(); for (size_t j = 0; j < new_to_old_map[i].size(); j++) { total_weight += new_to_old_map[i][j].value; new_transforms[i].linear() += new_to_old_map[i][j].value*old_transforms[new_to_old_map[i][j].index].linear(); new_transforms[i].translation() += new_to_old_map[i][j].value*old_transforms[new_to_old_map[i][j].index].translation(); } if (total_weight == (ScalarT)0.0) { new_transforms[i].setIdentity(); } else { total_weight = (ScalarT)(1.0)/total_weight; new_transforms[i].linear() *= total_weight; new_transforms[i].linear() = new_transforms[i].rotation(); new_transforms[i].translation() *= total_weight; } } } // Values interpreted as weights template <typename ScalarT, ptrdiff_t EigenDim> inline RigidTransformationSet<ScalarT,EigenDim> resampleTransformations(const RigidTransformationSet<ScalarT,EigenDim> &old_transforms, const std::vector<NeighborSet<ScalarT>> &new_to_old_map) { RigidTransformationSet<ScalarT,EigenDim> new_transforms; resampleTransformations<ScalarT,EigenDim>(old_transforms, new_to_old_map, new_transforms); return new_transforms; } // Values interpreted as distances template <typename ScalarT, ptrdiff_t EigenDim> void resampleTransformations(const RigidTransformationSet<ScalarT,EigenDim> &old_transforms, const std::vector<NeighborSet<ScalarT>> &new_to_old_map, ScalarT distance_sigma, RigidTransformationSet<ScalarT,EigenDim> &new_transforms) { new_transforms.resize(new_to_old_map.size()); const ScalarT sigma_inv_sq = (ScalarT)(1.0)/(distance_sigma*distance_sigma); ScalarT curr_weight, total_weight; #pragma omp parallel for shared (new_transforms) private (curr_weight, total_weight) for (size_t i = 0; i < new_transforms.size(); i++) { total_weight = (ScalarT)0.0; new_transforms[i].linear().setZero(); new_transforms[i].translation().setZero(); for (size_t j = 0; j < new_to_old_map[i].size(); j++) { curr_weight = std::exp(-(ScalarT)(0.5)*new_to_old_map[i][j].value*sigma_inv_sq); total_weight += curr_weight; new_transforms[i].linear() += curr_weight*old_transforms[new_to_old_map[i][j].index].linear(); new_transforms[i].translation() += curr_weight*old_transforms[new_to_old_map[i][j].index].translation(); } if (total_weight == (ScalarT)0.0) { new_transforms[i].setIdentity(); } else { total_weight = (ScalarT)(1.0)/total_weight; new_transforms[i].linear() *= total_weight; new_transforms[i].linear() = new_transforms[i].rotation(); new_transforms[i].translation() *= total_weight; } } } // Values interpreted as distances template <typename ScalarT, ptrdiff_t EigenDim> inline RigidTransformationSet<ScalarT,EigenDim> resampleTransformations(const RigidTransformationSet<ScalarT,EigenDim> &old_transforms, const std::vector<NeighborSet<ScalarT>> &new_to_old_map, ScalarT distance_sigma) { RigidTransformationSet<ScalarT,EigenDim> new_transforms; resampleTransformations<ScalarT,EigenDim>(old_transforms, new_to_old_map, distance_sigma, new_transforms); return new_transforms; } template <typename ScalarT, ptrdiff_t EigenDim, NeighborhoodType NT> void resampleTransformations(const KDTree<ScalarT,EigenDim,KDTreeDistanceAdaptors::L2> &old_support_kd_tree, const RigidTransformationSet<ScalarT,EigenDim> &old_transforms, const ConstVectorSetMatrixMap<ScalarT,EigenDim> &new_support, const NeighborhoodSpecification<ScalarT> &nh, ScalarT distance_sigma, RigidTransformationSet<ScalarT,EigenDim> &new_transforms) { new_transforms.resize(new_support.cols()); const ScalarT sigma_inv_sq = (ScalarT)(1.0)/(distance_sigma*distance_sigma); NeighborSet<ScalarT> nn; ScalarT curr_weight, total_weight; #pragma omp parallel for shared (new_transforms) private (nn, curr_weight, total_weight) for (size_t i = 0; i < new_transforms.size(); i++) { old_support_kd_tree.template search<NT>(new_support.col(i), nh, nn); total_weight = (ScalarT)0.0; new_transforms[i].linear().setZero(); new_transforms[i].translation().setZero(); for (size_t j = 0; j < nn.size(); j++) { curr_weight = std::exp(-(ScalarT)(0.5)*nn[j].value*sigma_inv_sq); total_weight += curr_weight; new_transforms[i].linear() += curr_weight*old_transforms[nn[j].index].linear(); new_transforms[i].translation() += curr_weight*old_transforms[nn[j].index].translation(); } if (total_weight == (ScalarT)0.0) { new_transforms[i].setIdentity(); } else { total_weight = (ScalarT)(1.0)/total_weight; new_transforms[i].linear() *= total_weight; new_transforms[i].linear() = new_transforms[i].rotation(); new_transforms[i].translation() *= total_weight; } } } template <typename ScalarT, ptrdiff_t EigenDim, NeighborhoodType NT> inline RigidTransformationSet<ScalarT,EigenDim> resampleTransformations(const KDTree<ScalarT,EigenDim,KDTreeDistanceAdaptors::L2> &old_support_kd_tree, const RigidTransformationSet<ScalarT,EigenDim> &old_transforms, const ConstVectorSetMatrixMap<ScalarT,EigenDim> &new_support, const NeighborhoodSpecification<ScalarT> &nh, ScalarT distance_sigma) { RigidTransformationSet<ScalarT,EigenDim> new_transforms; resampleTransformations<ScalarT,EigenDim,NT>(old_support_kd_tree, old_transforms, new_support, nh, distance_sigma, new_transforms); return new_transforms; } template <typename ScalarT, ptrdiff_t EigenDim> void resampleTransformations(const KDTree<ScalarT,EigenDim,KDTreeDistanceAdaptors::L2> &old_support_kd_tree, const RigidTransformationSet<ScalarT,EigenDim> &old_transforms, const ConstVectorSetMatrixMap<ScalarT,EigenDim> &new_support, const NeighborhoodSpecification<ScalarT> &nh, ScalarT distance_sigma, RigidTransformationSet<ScalarT,EigenDim> &new_transforms) { switch (nh.type) { case NeighborhoodType::KNN: resampleTransformations<ScalarT,EigenDim,NeighborhoodType::KNN>(old_support_kd_tree, old_transforms, new_support, nh, distance_sigma, new_transforms); break; case NeighborhoodType::RADIUS: resampleTransformations<ScalarT,EigenDim,NeighborhoodType::RADIUS>(old_support_kd_tree, old_transforms, new_support, nh, distance_sigma, new_transforms); break; case NeighborhoodType::KNN_IN_RADIUS: resampleTransformations<ScalarT,EigenDim,NeighborhoodType::KNN_IN_RADIUS>(old_support_kd_tree, old_transforms, new_support, nh, distance_sigma, new_transforms); break; } } template <typename ScalarT, ptrdiff_t EigenDim> inline RigidTransformationSet<ScalarT,EigenDim> resampleTransformations(const KDTree<ScalarT,EigenDim,KDTreeDistanceAdaptors::L2> &old_support_kd_tree, const RigidTransformationSet<ScalarT,EigenDim> &old_transforms, const ConstVectorSetMatrixMap<ScalarT,EigenDim> &new_support, const NeighborhoodSpecification<ScalarT> &nh, ScalarT distance_sigma) { RigidTransformationSet<ScalarT,EigenDim> new_transforms; resampleTransformations<ScalarT,EigenDim>(old_support_kd_tree, old_transforms, new_support, nh, distance_sigma, new_transforms); return new_transforms; } template <typename ScalarT> inline ScalarT sqrtHuberLoss(ScalarT x, ScalarT delta = (ScalarT)1.0) { const ScalarT x_abs = std::abs(x); if (x_abs > delta) { return std::sqrt(delta*(x_abs - (ScalarT)(0.5)*delta)); } else { return std::sqrt((ScalarT)(0.5))*x_abs; } } template <typename ScalarT> inline ScalarT sqrtHuberLossDerivative(ScalarT x, ScalarT delta = (ScalarT)1.0) { const ScalarT x_abs = std::abs(x); if (x < (ScalarT)0.0) { if (x_abs > delta) { return -delta/((ScalarT)(2.0)*std::sqrt(delta*(x_abs - (ScalarT)(0.5)*delta))); } else { return -std::sqrt((ScalarT)(0.5)); } } else { if (x_abs > delta) { return delta/((ScalarT)(2.0)*std::sqrt(delta*(x_abs - (ScalarT)(0.5)*delta))); } else { return std::sqrt((ScalarT)0.5); } } } template <typename ScalarT> void computeRotationTerms(ScalarT a, ScalarT b, ScalarT c, Eigen::Matrix<ScalarT,3,3> &rot_coeffs, Eigen::Matrix<ScalarT,3,3> &d_rot_coeffs_da, Eigen::Matrix<ScalarT,3,3> &d_rot_coeffs_db, Eigen::Matrix<ScalarT,3,3> &d_rot_coeffs_dc) { const ScalarT sina = std::sin(a); const ScalarT cosa = std::cos(a); const ScalarT sinb = std::sin(b); const ScalarT cosb = std::cos(b); const ScalarT sinc = std::sin(c); const ScalarT cosc = std::cos(c); rot_coeffs(0,0) = cosc*cosb; rot_coeffs(1,0) = -sinc*cosa + cosc*sinb*sina; rot_coeffs(2,0) = sinc*sina + cosc*sinb*cosa; rot_coeffs(0,1) = sinc*cosb; rot_coeffs(1,1) = cosc*cosa + sinc*sinb*sina; rot_coeffs(2,1) = -cosc*sina + sinc*sinb*cosa; rot_coeffs(0,2) = -sinb; rot_coeffs(1,2) = cosb*sina; rot_coeffs(2,2) = cosb*cosa; d_rot_coeffs_da(0,0) = (ScalarT)0.0; d_rot_coeffs_da(1,0) = sinc*sina + cosc*sinb*cosa; d_rot_coeffs_da(2,0) = sinc*cosa - cosc*sinb*sina; d_rot_coeffs_da(0,1) = (ScalarT)0.0; d_rot_coeffs_da(1,1) = -cosc*sina + sinc*sinb*cosa; d_rot_coeffs_da(2,1) = -cosc*cosa - sinc*sinb*sina; d_rot_coeffs_da(0,2) = (ScalarT)0.0; d_rot_coeffs_da(1,2) = cosb*cosa; d_rot_coeffs_da(2,2) = -cosb*sina; d_rot_coeffs_db(0,0) = -cosc*sinb; d_rot_coeffs_db(1,0) = cosc*cosb*sina; d_rot_coeffs_db(2,0) = cosc*cosb*cosa; d_rot_coeffs_db(0,1) = -sinc*sinb; d_rot_coeffs_db(1,1) = sinc*cosb*sina; d_rot_coeffs_db(2,1) = sinc*cosb*cosa; d_rot_coeffs_db(0,2) = -cosb; d_rot_coeffs_db(1,2) = -sinb*sina; d_rot_coeffs_db(2,2) = -sinb*cosa; d_rot_coeffs_dc(0,0) = -sinc*cosb; d_rot_coeffs_dc(1,0) = -cosc*cosa - sinc*sinb*sina; d_rot_coeffs_dc(2,0) = cosc*sina - sinc*sinb*cosa; d_rot_coeffs_dc(0,1) = cosc*cosb; d_rot_coeffs_dc(1,1) = -sinc*cosa + cosc*sinb*sina; d_rot_coeffs_dc(2,1) = sinc*sina + cosc*sinb*cosa; d_rot_coeffs_dc(0,2) = (ScalarT)0.0; d_rot_coeffs_dc(1,2) = (ScalarT)0.0; d_rot_coeffs_dc(2,2) = (ScalarT)0.0; } template <typename ScalarT, typename CorrValueT = ScalarT> bool estimateDenseWarpFieldCombinedMetric3(const ConstVectorSetMatrixMap<ScalarT,3> &dst_p, const ConstVectorSetMatrixMap<ScalarT,3> &dst_n, const ConstVectorSetMatrixMap<ScalarT,3> &src_p, const CorrespondenceSet<CorrValueT> &correspondences, const std::vector<NeighborSet<ScalarT>> &regularization_neighborhoods, RigidTransformationSet<ScalarT,3> &transforms, ScalarT point_to_point_weight, ScalarT point_to_plane_weight, ScalarT stiffness_weight, ScalarT huber_boundary = (ScalarT)(1e-6), size_t max_gn_iter = 10, ScalarT gn_conv_tol = (ScalarT)1e-5, size_t max_cg_iter = 1000, ScalarT cg_conv_tol = (ScalarT)1e-5) { if (dst_p.cols() != dst_n.cols() || (point_to_point_weight == (ScalarT)0.0 && point_to_plane_weight == (ScalarT)0.0)) { transforms.resize(src_p.cols()); transforms.setIdentity(); return false; } // if (point_to_point_weight == (ScalarT)0.0) { // // Do point-to-plane // return estimateWarpFieldDensePointToPlane3D<ScalarT,CorrValueT>(dst_p, dst_n, src_p, correspondences, regularization_neighborhoods, transforms, stiffness_weight/point_to_plane_weight, max_iter, convergence_tol); // } // // if (point_to_plane_weight == (ScalarT)0.0) { // // Do point-to-point // return estimateWarpFieldDensePointToPoint3D<ScalarT,CorrValueT>(dst_p, src_p, correspondences, regularization_neighborhoods, transforms, stiffness_weight/point_to_point_weight, max_iter, convergence_tol); // } const ScalarT point_to_point_weight_sqrt = std::sqrt(point_to_point_weight); const ScalarT point_to_plane_weight_sqrt = std::sqrt(point_to_plane_weight); const ScalarT stiffness_weight_sqrt = std::sqrt(stiffness_weight); const ScalarT gn_conv_tol_sq = gn_conv_tol*gn_conv_tol; // Compute number of equations and unknowns const size_t num_unknowns = 6*src_p.cols(); const size_t num_data_term_equations = 4*correspondences.size(); std::vector<size_t> reg_eq_ind(regularization_neighborhoods.size()); size_t num_reg_arcs = 0; if (!regularization_neighborhoods.empty()) { reg_eq_ind[0] = 0; num_reg_arcs = std::max((size_t)0, regularization_neighborhoods[0].size() - 1); } for (size_t i = 1; i < regularization_neighborhoods.size(); i++) { reg_eq_ind[i] = reg_eq_ind[i-1] + 6*std::max((size_t)0, regularization_neighborhoods[i-1].size() - 1); num_reg_arcs += std::max((size_t)0, regularization_neighborhoods[i].size() - 1); } const size_t num_regularization_equations = 6*num_reg_arcs; const size_t num_equations = num_data_term_equations + num_regularization_equations; const size_t num_non_zeros = 6*num_data_term_equations + 2*num_regularization_equations; // Jacobian Eigen::SparseMatrix<ScalarT> At(num_unknowns,num_equations); At.reserve(num_non_zeros); // Values ScalarT * const values = At.valuePtr(); // Outer pointers typename Eigen::SparseMatrix<ScalarT>::StorageIndex * const outer_ptr = At.outerIndexPtr(); #pragma omp parallel for for (size_t i = 0; i < num_data_term_equations + 1; i++) { outer_ptr[i] = 6*i; } #pragma omp parallel for for (size_t i = 1; i < num_regularization_equations + 1; i++) { outer_ptr[num_data_term_equations + i] = 6*num_data_term_equations + 2*i; } // Inner indices typename Eigen::SparseMatrix<ScalarT>::StorageIndex * const inner_ind = At.innerIndexPtr(); // Vector of (negative) residuals Eigen::Matrix<ScalarT,Eigen::Dynamic,1> b(num_equations); // Vector of unknowns (Euler angles and translation offsets per point) Eigen::Matrix<ScalarT,Eigen::Dynamic,1> tforms_vec(num_unknowns); tforms_vec.setZero(); Eigen::SparseMatrix<ScalarT> AtA; Eigen::Matrix<ScalarT,Eigen::Dynamic,1> Atb; // Conjugate Gradient solver // Eigen::ConjugateGradient<Eigen::SparseMatrix<ScalarT>,Eigen::Lower|Eigen::Upper,Eigen::IncompleteCholesky<ScalarT,Eigen::Lower|Eigen::Upper>> solver; // Eigen::ConjugateGradient<Eigen::SparseMatrix<ScalarT>,Eigen::Lower|Eigen::Upper,Eigen::IdentityPreconditioner> solver; Eigen::ConjugateGradient<Eigen::SparseMatrix<ScalarT>,Eigen::Lower|Eigen::Upper,Eigen::DiagonalPreconditioner<ScalarT>> solver; // Eigen::ConjugateGradient<Eigen::SparseMatrix<ScalarT>,Eigen::Lower|Eigen::Upper,BlockDiagonalPreconditioner<ScalarT,6>> solver; solver.setMaxIterations(max_cg_iter); solver.setTolerance(cg_conv_tol); // Temporaries Eigen::Matrix<ScalarT,3,3> rot_coeffs, d_rot_coeffs_da, d_rot_coeffs_db, d_rot_coeffs_dc; Eigen::Matrix<ScalarT,3,1> trans_s, d_rot_da_s, d_rot_db_s, d_rot_dc_s; Eigen::Matrix<ScalarT,Eigen::Dynamic,1> delta; ScalarT weight, diff, d_sqrt_huber_loss, curr_delta_sq, max_delta_sq; size_t eq_ind, nz_ind; bool has_converged = false; size_t iter = 0; while (iter < max_gn_iter) { // Data term #pragma omp parallel for shared (At, b) private (eq_ind, nz_ind, rot_coeffs, d_rot_coeffs_da, d_rot_coeffs_db, d_rot_coeffs_dc, trans_s, d_rot_da_s, d_rot_db_s, d_rot_dc_s) for (size_t i = 0; i < correspondences.size(); i++) { const auto d = dst_p.col(correspondences[i].indexInFirst); const auto n = dst_n.col(correspondences[i].indexInFirst); const auto s = src_p.col(correspondences[i].indexInSecond); const size_t offset = 6*correspondences[i].indexInSecond; computeRotationTerms(tforms_vec[offset], tforms_vec[offset + 1], tforms_vec[offset + 2], rot_coeffs, d_rot_coeffs_da, d_rot_coeffs_db, d_rot_coeffs_dc); const auto trans_coeffs = tforms_vec.template segment<3>(offset + 3); trans_s = rot_coeffs.transpose()*s + trans_coeffs - d; d_rot_da_s = d_rot_coeffs_da.transpose()*s; d_rot_db_s = d_rot_coeffs_db.transpose()*s; d_rot_dc_s = d_rot_coeffs_dc.transpose()*s; eq_ind = 4*i; nz_ind = 24*i; // Point to plane values[nz_ind] = (n.dot(d_rot_da_s))*point_to_plane_weight_sqrt; inner_ind[nz_ind++] = offset; values[nz_ind] = (n.dot(d_rot_db_s))*point_to_plane_weight_sqrt; inner_ind[nz_ind++] = offset + 1; values[nz_ind] = (n.dot(d_rot_dc_s))*point_to_plane_weight_sqrt; inner_ind[nz_ind++] = offset + 2; values[nz_ind] = n[0]*point_to_plane_weight_sqrt; inner_ind[nz_ind++] = offset + 3; values[nz_ind] = n[1]*point_to_plane_weight_sqrt; inner_ind[nz_ind++] = offset + 4; values[nz_ind] = n[2]*point_to_plane_weight_sqrt; inner_ind[nz_ind++] = offset + 5; b[eq_ind++] = -(n.dot(trans_s))*point_to_plane_weight_sqrt; // Point to point values[nz_ind] = d_rot_da_s[0]*point_to_point_weight_sqrt; inner_ind[nz_ind++] = offset; values[nz_ind] = d_rot_db_s[0]*point_to_point_weight_sqrt; inner_ind[nz_ind++] = offset + 1; values[nz_ind] = d_rot_dc_s[0]*point_to_point_weight_sqrt; inner_ind[nz_ind++] = offset + 2; values[nz_ind] = point_to_point_weight_sqrt; inner_ind[nz_ind++] = offset + 3; values[nz_ind] = (ScalarT)0.0; inner_ind[nz_ind++] = offset + 4; values[nz_ind] = (ScalarT)0.0; inner_ind[nz_ind++] = offset + 5; b[eq_ind++] = -(trans_s[0])*point_to_point_weight_sqrt; values[nz_ind] = d_rot_da_s[1]*point_to_point_weight_sqrt; inner_ind[nz_ind++] = offset; values[nz_ind] = d_rot_db_s[1]*point_to_point_weight_sqrt; inner_ind[nz_ind++] = offset + 1; values[nz_ind] = d_rot_dc_s[1]*point_to_point_weight_sqrt; inner_ind[nz_ind++] = offset + 2; values[nz_ind] = (ScalarT)0.0; inner_ind[nz_ind++] = offset + 3; values[nz_ind] = point_to_point_weight_sqrt; inner_ind[nz_ind++] = offset + 4; values[nz_ind] = (ScalarT)0.0; inner_ind[nz_ind++] = offset + 5; b[eq_ind++] = -(trans_s[1])*point_to_point_weight_sqrt; values[nz_ind] = d_rot_da_s[2]*point_to_point_weight_sqrt; inner_ind[nz_ind++] = offset; values[nz_ind] = d_rot_db_s[2]*point_to_point_weight_sqrt; inner_ind[nz_ind++] = offset + 1; values[nz_ind] = d_rot_dc_s[2]*point_to_point_weight_sqrt; inner_ind[nz_ind++] = offset + 2; values[nz_ind] = (ScalarT)0.0; inner_ind[nz_ind++] = offset + 3; values[nz_ind] = (ScalarT)0.0; inner_ind[nz_ind++] = offset + 4; values[nz_ind] = point_to_point_weight_sqrt; inner_ind[nz_ind++] = offset + 5; b[eq_ind++] = -(trans_s[2])*point_to_point_weight_sqrt; } // Regularization term #pragma omp parallel for shared (At, b) private (eq_ind, nz_ind, weight, diff, d_sqrt_huber_loss) for (size_t i = 0; i < regularization_neighborhoods.size(); i++) { eq_ind = num_data_term_equations + reg_eq_ind[i]; nz_ind = 6*num_data_term_equations + 2*reg_eq_ind[i]; for (size_t j = 1; j < regularization_neighborhoods[i].size(); j++) { size_t s_offset = 6*regularization_neighborhoods[i][0].index; size_t n_offset = 6*regularization_neighborhoods[i][j].index; weight = stiffness_weight_sqrt*std::sqrt(regularization_neighborhoods[i][j].value); if (n_offset < s_offset) std::swap(s_offset, n_offset); diff = tforms_vec[s_offset + 0] - tforms_vec[n_offset + 0]; d_sqrt_huber_loss = weight*sqrtHuberLossDerivative<ScalarT>(diff, huber_boundary); values[nz_ind] = d_sqrt_huber_loss; inner_ind[nz_ind++] = s_offset; values[nz_ind] = -d_sqrt_huber_loss; inner_ind[nz_ind++] = n_offset; b[eq_ind++] = -weight*sqrtHuberLoss<ScalarT>(diff, huber_boundary); diff = tforms_vec[s_offset + 1] - tforms_vec[n_offset + 1]; d_sqrt_huber_loss = weight*sqrtHuberLossDerivative<ScalarT>(diff, huber_boundary); values[nz_ind] = d_sqrt_huber_loss; inner_ind[nz_ind++] = s_offset + 1; values[nz_ind] = -d_sqrt_huber_loss; inner_ind[nz_ind++] = n_offset + 1; b[eq_ind++] = -weight*sqrtHuberLoss<ScalarT>(diff, huber_boundary); diff = tforms_vec[s_offset + 2] - tforms_vec[n_offset + 2]; d_sqrt_huber_loss = weight*sqrtHuberLossDerivative<ScalarT>(diff, huber_boundary); values[nz_ind] = d_sqrt_huber_loss; inner_ind[nz_ind++] = s_offset + 2; values[nz_ind] = -d_sqrt_huber_loss; inner_ind[nz_ind++] = n_offset + 2; b[eq_ind++] = -weight*sqrtHuberLoss<ScalarT>(diff, huber_boundary); diff = tforms_vec[s_offset + 3] - tforms_vec[n_offset + 3]; d_sqrt_huber_loss = weight*sqrtHuberLossDerivative<ScalarT>(diff, huber_boundary); values[nz_ind] = d_sqrt_huber_loss; inner_ind[nz_ind++] = s_offset + 3; values[nz_ind] = -d_sqrt_huber_loss; inner_ind[nz_ind++] = n_offset + 3; b[eq_ind++] = -weight*sqrtHuberLoss<ScalarT>(diff, huber_boundary); diff = tforms_vec[s_offset + 4] - tforms_vec[n_offset + 4]; d_sqrt_huber_loss = weight*sqrtHuberLossDerivative<ScalarT>(diff, huber_boundary); values[nz_ind] = d_sqrt_huber_loss; inner_ind[nz_ind++] = s_offset + 4; values[nz_ind] = -d_sqrt_huber_loss; inner_ind[nz_ind++] = n_offset + 4; b[eq_ind++] = -weight*sqrtHuberLoss<ScalarT>(diff, huber_boundary); diff = tforms_vec[s_offset + 5] - tforms_vec[n_offset + 5]; d_sqrt_huber_loss = weight*sqrtHuberLossDerivative<ScalarT>(diff, huber_boundary); values[nz_ind] = d_sqrt_huber_loss; inner_ind[nz_ind++] = s_offset + 5; values[nz_ind] = -d_sqrt_huber_loss; inner_ind[nz_ind++] = n_offset + 5; b[eq_ind++] = -weight*sqrtHuberLoss<ScalarT>(diff, huber_boundary); } } // Solve linear system using CG AtA = At*At.transpose(); Atb = At*b; // solver.compute(AtA); if (iter == 0) solver.analyzePattern(AtA); solver.factorize(AtA); delta = solver.solve(Atb); tforms_vec += delta; iter++; // Check for convergence max_delta_sq = (ScalarT)0.0; #pragma omp parallel for private (curr_delta_sq) reduction (max: max_delta_sq) for (size_t i = 0; i < src_p.cols(); i++) { curr_delta_sq = delta.template segment<6>(6*i).squaredNorm(); if (curr_delta_sq > max_delta_sq) max_delta_sq = curr_delta_sq; } // std::cout << iter << ": " << std::sqrt(max_delta_sq) << std::endl; if (max_delta_sq < gn_conv_tol_sq) { has_converged = true; break; } } // Convert to output format transforms.resize(src_p.cols()); #pragma omp parallel for for (size_t i = 0; i < transforms.size(); i++) { transforms[i].linear() = (Eigen::AngleAxis<ScalarT>(tforms_vec[6*i + 2],Eigen::Matrix<ScalarT,3,1>::UnitZ()) * Eigen::AngleAxis<ScalarT>(tforms_vec[6*i + 1],Eigen::Matrix<ScalarT,3,1>::UnitY()) * Eigen::AngleAxis<ScalarT>(tforms_vec[6*i + 0],Eigen::Matrix<ScalarT,3,1>::UnitX())).matrix(); transforms[i].linear() = transforms[i].rotation(); transforms[i].translation() = tforms_vec.template segment<3>(6*i + 3); } return has_converged; } template <typename ScalarT> bool estimateSparseWarpFieldCombinedMetric3(const ConstVectorSetMatrixMap<ScalarT,3> &dst_p, const ConstVectorSetMatrixMap<ScalarT,3> &dst_n, const ConstVectorSetMatrixMap<ScalarT,3> &src_p, size_t num_ctrl_points, const std::vector<NeighborSet<ScalarT>> &src_to_ctrl_neighborhoods, const std::vector<NeighborSet<ScalarT>> &regularization_neighborhoods, RigidTransformationSet<ScalarT,3> &transforms, ScalarT point_to_point_weight, ScalarT point_to_plane_weight, ScalarT stiffness_weight, ScalarT huber_boundary = (ScalarT)(1e-6), size_t max_gn_iter = 10, ScalarT gn_conv_tol = (ScalarT)1e-5, size_t max_cg_iter = 1000, ScalarT cg_conv_tol = (ScalarT)1e-5) { if (dst_p.cols() != dst_n.cols() || dst_p.cols() != src_p.cols() || src_to_ctrl_neighborhoods.size() != src_p.cols() || (point_to_point_weight == (ScalarT)0.0 && point_to_plane_weight == (ScalarT)0.0)) { transforms.resize(num_ctrl_points); transforms.setIdentity(); return false; } // if (point_to_point_weight == (ScalarT)0.0) { // // Do point-to-plane // return estimateWarpFieldSparsePointToPlane3D<ScalarT>(dst_p, dst_n, src_p, src_to_ctrl_neighborhoods, ctrl_regularization_neighborhoods, ctrl_transforms, stiffness_weight/point_to_plane_weight, max_iter, convergence_tol); // } // // if (point_to_plane_weight == (ScalarT)0.0) { // // Do point-to-point // return estimateWarpFieldSparsePointToPoint3D<ScalarT>(dst_p, src_p, src_to_ctrl_neighborhoods, ctrl_regularization_neighborhoods, ctrl_transforms, stiffness_weight/point_to_point_weight, max_iter, convergence_tol); // } const ScalarT point_to_point_weight_sqrt = std::sqrt(point_to_point_weight); const ScalarT point_to_plane_weight_sqrt = std::sqrt(point_to_plane_weight); const ScalarT stiffness_weight_sqrt = std::sqrt(stiffness_weight); const ScalarT gn_conv_tol_sq = gn_conv_tol*gn_conv_tol; // Compute number of equations and unknowns const size_t num_unknowns = 6*num_ctrl_points; const size_t num_data_term_equations = 4*src_p.cols(); std::vector<size_t> reg_eq_ind(regularization_neighborhoods.size()); size_t num_reg_arcs = 0; if (!regularization_neighborhoods.empty()) { reg_eq_ind[0] = 0; num_reg_arcs = std::max((size_t)0, regularization_neighborhoods[0].size() - 1); } for (size_t i = 1; i < regularization_neighborhoods.size(); i++) { reg_eq_ind[i] = reg_eq_ind[i-1] + 6*std::max((size_t)0, regularization_neighborhoods[i-1].size() - 1); num_reg_arcs += std::max((size_t)0, regularization_neighborhoods[i].size() - 1); } std::vector<size_t> nz_coeff_ind(src_to_ctrl_neighborhoods.size() + 1); nz_coeff_ind[0] = 0; for (size_t i = 1; i < src_to_ctrl_neighborhoods.size() + 1; i++) { nz_coeff_ind[i] = nz_coeff_ind[i-1] + 24*src_to_ctrl_neighborhoods[i-1].size(); } const size_t num_regularization_equations = 6*num_reg_arcs; const size_t num_equations = num_data_term_equations + num_regularization_equations; const size_t num_non_zeros = nz_coeff_ind.back() + 2*num_regularization_equations; // Jacobian Eigen::SparseMatrix<ScalarT> At(num_unknowns,num_equations); At.reserve(num_non_zeros); // Values ScalarT * const values = At.valuePtr(); // Outer pointers std::vector<NeighborSet<ScalarT>> src_to_ctrl_sorted(src_to_ctrl_neighborhoods); typename Eigen::SparseMatrix<ScalarT>::StorageIndex * const outer_ptr = At.outerIndexPtr(); #pragma omp parallel for for (size_t i = 0; i < src_to_ctrl_neighborhoods.size(); i++) { std::sort(src_to_ctrl_sorted[i].begin(), src_to_ctrl_sorted[i].end(), typename Neighbor<ScalarT>::IndexLessComparator()); const size_t offset = 6*src_to_ctrl_neighborhoods[i].size(); outer_ptr[4*i] = nz_coeff_ind[i]; outer_ptr[4*i + 1] = nz_coeff_ind[i] + offset; outer_ptr[4*i + 2] = nz_coeff_ind[i] + offset + offset; outer_ptr[4*i + 3] = nz_coeff_ind[i] + offset + offset + offset; } outer_ptr[num_data_term_equations] = nz_coeff_ind.back(); #pragma omp parallel for for (size_t i = 1; i < num_regularization_equations + 1; i++) { outer_ptr[num_data_term_equations + i] = nz_coeff_ind.back() + 2*i; } // Inner indices typename Eigen::SparseMatrix<ScalarT>::StorageIndex * const inner_ind = At.innerIndexPtr(); // Vector of (negative) residuals Eigen::Matrix<ScalarT,Eigen::Dynamic,1> b(num_equations); // Vector of unknowns (Euler angles and translation offsets per point) Eigen::Matrix<ScalarT,Eigen::Dynamic,1> tforms_vec(num_unknowns); tforms_vec.setZero(); // Sum of control point influences std::vector<ScalarT> total_weight(src_to_ctrl_sorted.size()); #pragma omp parallel for shared (total_weight) for (size_t i = 0; i < src_to_ctrl_sorted.size(); i++) { total_weight[i] = (ScalarT)0.0; for (size_t j = 0; j < src_to_ctrl_sorted[i].size(); j++) { total_weight[i] += src_to_ctrl_sorted[i][j].value; } } Eigen::SparseMatrix<ScalarT> AtA; Eigen::Matrix<ScalarT,Eigen::Dynamic,1> Atb; // Conjugate Gradient solver // Eigen::ConjugateGradient<Eigen::SparseMatrix<ScalarT>,Eigen::Lower|Eigen::Upper,Eigen::IncompleteCholesky<ScalarT,Eigen::Lower|Eigen::Upper>> solver; // Eigen::ConjugateGradient<Eigen::SparseMatrix<ScalarT>,Eigen::Lower|Eigen::Upper,Eigen::IdentityPreconditioner> solver; Eigen::ConjugateGradient<Eigen::SparseMatrix<ScalarT>,Eigen::Lower|Eigen::Upper,Eigen::DiagonalPreconditioner<ScalarT>> solver; // Eigen::ConjugateGradient<Eigen::SparseMatrix<ScalarT>,Eigen::Lower|Eigen::Upper,BlockDiagonalPreconditioner<ScalarT,6>> solver; solver.setMaxIterations(max_cg_iter); solver.setTolerance(cg_conv_tol); // Temporaries Eigen::Matrix<ScalarT,3,3> rot_coeffs, d_rot_coeffs_da, d_rot_coeffs_db, d_rot_coeffs_dc; Eigen::Matrix<ScalarT,3,1> trans_s, d_rot_da_s, d_rot_db_s, d_rot_dc_s; Eigen::Matrix<ScalarT,3,1> angles_curr, trans_curr; Eigen::Matrix<ScalarT,Eigen::Dynamic,1> delta; ScalarT weight, diff, d_sqrt_huber_loss, curr_delta_sq, max_delta_sq; size_t eq_ind, nz_ind; bool has_converged = false; size_t iter = 0; while (iter < max_gn_iter) { // Data term #pragma omp parallel for shared (At, b) private (eq_ind, nz_ind, weight, angles_curr, trans_curr, rot_coeffs, d_rot_coeffs_da, d_rot_coeffs_db, d_rot_coeffs_dc, trans_s, d_rot_da_s, d_rot_db_s, d_rot_dc_s) for (size_t i = 0; i < src_p.cols(); i++) { // Compute weighted influence from control nodes angles_curr.setZero(); trans_curr.setZero(); for (size_t j = 0; j < src_to_ctrl_sorted[i].size(); j++) { const size_t offset = 6*src_to_ctrl_sorted[i][j].index; angles_curr += src_to_ctrl_sorted[i][j].value*tforms_vec.template segment<3>(offset); trans_curr += src_to_ctrl_sorted[i][j].value*tforms_vec.template segment<3>(offset + 3); } if (total_weight[i] != (ScalarT)0.0) { weight = (ScalarT)(1.0)/total_weight[i]; angles_curr *= weight; trans_curr *= weight; } const auto d = dst_p.col(i); const auto n = dst_n.col(i); const auto s = src_p.col(i); computeRotationTerms(angles_curr[0], angles_curr[1], angles_curr[2], rot_coeffs, d_rot_coeffs_da, d_rot_coeffs_db, d_rot_coeffs_dc); trans_s = rot_coeffs.transpose()*s + trans_curr - d; d_rot_da_s = d_rot_coeffs_da.transpose()*s; d_rot_db_s = d_rot_coeffs_db.transpose()*s; d_rot_dc_s = d_rot_coeffs_dc.transpose()*s; eq_ind = 4*i; for (size_t j = 0; j < src_to_ctrl_sorted[i].size(); j++) { const size_t offset = 6*src_to_ctrl_sorted[i][j].index; weight = (total_weight[i] == (ScalarT)0.0) ? (ScalarT)0.0 : src_to_ctrl_sorted[i][j].value/total_weight[i]; // weight = src_to_ctrl_sorted[i][j].value/total_weight[i]; // Point to plane nz_ind = outer_ptr[eq_ind] + 6*j; values[nz_ind] = (n.dot(d_rot_da_s))*weight*point_to_plane_weight_sqrt; inner_ind[nz_ind++] = offset; values[nz_ind] = (n.dot(d_rot_db_s))*weight*point_to_plane_weight_sqrt; inner_ind[nz_ind++] = offset + 1; values[nz_ind] = (n.dot(d_rot_dc_s))*weight*point_to_plane_weight_sqrt; inner_ind[nz_ind++] = offset + 2; values[nz_ind] = n[0]*weight*point_to_plane_weight_sqrt; inner_ind[nz_ind++] = offset + 3; values[nz_ind] = n[1]*weight*point_to_plane_weight_sqrt; inner_ind[nz_ind++] = offset + 4; values[nz_ind] = n[2]*weight*point_to_plane_weight_sqrt; inner_ind[nz_ind++] = offset + 5; // Point to point nz_ind = outer_ptr[eq_ind + 1] + 6*j; values[nz_ind] = d_rot_da_s[0]*weight*point_to_point_weight_sqrt; inner_ind[nz_ind++] = offset; values[nz_ind] = d_rot_db_s[0]*weight*point_to_point_weight_sqrt; inner_ind[nz_ind++] = offset + 1; values[nz_ind] = d_rot_dc_s[0]*weight*point_to_point_weight_sqrt; inner_ind[nz_ind++] = offset + 2; values[nz_ind] = weight*point_to_point_weight_sqrt; inner_ind[nz_ind++] = offset + 3; values[nz_ind] = (ScalarT)0.0; inner_ind[nz_ind++] = offset + 4; values[nz_ind] = (ScalarT)0.0; inner_ind[nz_ind++] = offset + 5; nz_ind = outer_ptr[eq_ind + 2] + 6*j; values[nz_ind] = d_rot_da_s[1]*weight*point_to_point_weight_sqrt; inner_ind[nz_ind++] = offset; values[nz_ind] = d_rot_db_s[1]*weight*point_to_point_weight_sqrt; inner_ind[nz_ind++] = offset + 1; values[nz_ind] = d_rot_dc_s[1]*weight*point_to_point_weight_sqrt; inner_ind[nz_ind++] = offset + 2; values[nz_ind] = (ScalarT)0.0; inner_ind[nz_ind++] = offset + 3; values[nz_ind] = weight*point_to_point_weight_sqrt; inner_ind[nz_ind++] = offset + 4; values[nz_ind] = (ScalarT)0.0; inner_ind[nz_ind++] = offset + 5; nz_ind = outer_ptr[eq_ind + 3] + 6*j; values[nz_ind] = d_rot_da_s[2]*weight*point_to_point_weight_sqrt; inner_ind[nz_ind++] = offset; values[nz_ind] = d_rot_db_s[2]*weight*point_to_point_weight_sqrt; inner_ind[nz_ind++] = offset + 1; values[nz_ind] = d_rot_dc_s[2]*weight*point_to_point_weight_sqrt; inner_ind[nz_ind++] = offset + 2; values[nz_ind] = (ScalarT)0.0; inner_ind[nz_ind++] = offset + 3; values[nz_ind] = (ScalarT)0.0; inner_ind[nz_ind++] = offset + 4; values[nz_ind] = weight*point_to_point_weight_sqrt; inner_ind[nz_ind++] = offset + 5; } weight = (total_weight[i] == (ScalarT)0.0) ? (ScalarT)0.0 : (ScalarT)1.0; // weight = (ScalarT)1.0; // Point to plane b[eq_ind] = -(n.dot(trans_s))*weight*point_to_plane_weight_sqrt; // Point to point b[eq_ind + 1] = -(trans_s[0])*weight*point_to_point_weight_sqrt; b[eq_ind + 2] = -(trans_s[1])*weight*point_to_point_weight_sqrt; b[eq_ind + 3] = -(trans_s[2])*weight*point_to_point_weight_sqrt; } // Regularization term #pragma omp parallel for shared (At, b) private (eq_ind, nz_ind, weight, diff, d_sqrt_huber_loss) for (size_t i = 0; i < regularization_neighborhoods.size(); i++) { eq_ind = num_data_term_equations + reg_eq_ind[i]; nz_ind = nz_coeff_ind.back() + 2*reg_eq_ind[i]; for (size_t j = 1; j < regularization_neighborhoods[i].size(); j++) { size_t s_offset = 6*regularization_neighborhoods[i][0].index; size_t n_offset = 6*regularization_neighborhoods[i][j].index; weight = stiffness_weight_sqrt*std::sqrt(regularization_neighborhoods[i][j].value); if (n_offset < s_offset) std::swap(s_offset, n_offset); diff = tforms_vec[s_offset + 0] - tforms_vec[n_offset + 0]; d_sqrt_huber_loss = weight*sqrtHuberLossDerivative<ScalarT>(diff, huber_boundary); values[nz_ind] = d_sqrt_huber_loss; inner_ind[nz_ind++] = s_offset; values[nz_ind] = -d_sqrt_huber_loss; inner_ind[nz_ind++] = n_offset; b[eq_ind++] = -weight*sqrtHuberLoss<ScalarT>(diff, huber_boundary); diff = tforms_vec[s_offset + 1] - tforms_vec[n_offset + 1]; d_sqrt_huber_loss = weight*sqrtHuberLossDerivative<ScalarT>(diff, huber_boundary); values[nz_ind] = d_sqrt_huber_loss; inner_ind[nz_ind++] = s_offset + 1; values[nz_ind] = -d_sqrt_huber_loss; inner_ind[nz_ind++] = n_offset + 1; b[eq_ind++] = -weight*sqrtHuberLoss<ScalarT>(diff, huber_boundary); diff = tforms_vec[s_offset + 2] - tforms_vec[n_offset + 2]; d_sqrt_huber_loss = weight*sqrtHuberLossDerivative<ScalarT>(diff, huber_boundary); values[nz_ind] = d_sqrt_huber_loss; inner_ind[nz_ind++] = s_offset + 2; values[nz_ind] = -d_sqrt_huber_loss; inner_ind[nz_ind++] = n_offset + 2; b[eq_ind++] = -weight*sqrtHuberLoss<ScalarT>(diff, huber_boundary); diff = tforms_vec[s_offset + 3] - tforms_vec[n_offset + 3]; d_sqrt_huber_loss = weight*sqrtHuberLossDerivative<ScalarT>(diff, huber_boundary); values[nz_ind] = d_sqrt_huber_loss; inner_ind[nz_ind++] = s_offset + 3; values[nz_ind] = -d_sqrt_huber_loss; inner_ind[nz_ind++] = n_offset + 3; b[eq_ind++] = -weight*sqrtHuberLoss<ScalarT>(diff, huber_boundary); diff = tforms_vec[s_offset + 4] - tforms_vec[n_offset + 4]; d_sqrt_huber_loss = weight*sqrtHuberLossDerivative<ScalarT>(diff, huber_boundary); values[nz_ind] = d_sqrt_huber_loss; inner_ind[nz_ind++] = s_offset + 4; values[nz_ind] = -d_sqrt_huber_loss; inner_ind[nz_ind++] = n_offset + 4; b[eq_ind++] = -weight*sqrtHuberLoss<ScalarT>(diff, huber_boundary); diff = tforms_vec[s_offset + 5] - tforms_vec[n_offset + 5]; d_sqrt_huber_loss = weight*sqrtHuberLossDerivative<ScalarT>(diff, huber_boundary); values[nz_ind] = d_sqrt_huber_loss; inner_ind[nz_ind++] = s_offset + 5; values[nz_ind] = -d_sqrt_huber_loss; inner_ind[nz_ind++] = n_offset + 5; b[eq_ind++] = -weight*sqrtHuberLoss<ScalarT>(diff, huber_boundary); } } // Eigen::SparseMatrix<double> AtA = (At*At.transpose()).template cast<double>(); // Eigen::VectorXd Atb = (At*b).template cast<double>(); // // ScalarT shift = std::sqrt(std::numeric_limits<ScalarT>::epsilon()); // Eigen::CholmodSupernodalLLT<Eigen::SparseMatrix<double>> solver; // solver.compute(AtA); // while (solver.info() != Eigen::Success) { // solver.setShift(shift); // solver.compute(AtA); // shift *= 5.0; // } // delta = solver.solve(Atb).template cast<ScalarT>(); // tforms_vec += delta; // Solve linear system using CG AtA = At*At.transpose(); Atb = At*b; // solver.compute(AtA); if (iter == 0) solver.analyzePattern(AtA); solver.factorize(AtA); delta = solver.solve(Atb); tforms_vec += delta; iter++; // Check for convergence max_delta_sq = (ScalarT)0.0; #pragma omp parallel for private (curr_delta_sq) reduction (max: max_delta_sq) for (size_t i = 0; i < num_ctrl_points; i++) { curr_delta_sq = delta.template segment<6>(6*i).squaredNorm(); if (curr_delta_sq > max_delta_sq) max_delta_sq = curr_delta_sq; } // std::cout << iter << ": " << std::sqrt(max_delta_sq) << std::endl; if (max_delta_sq < gn_conv_tol_sq) { has_converged = true; break; } } // Convert to output format transforms.resize(num_ctrl_points); #pragma omp parallel for for (size_t i = 0; i < transforms.size(); i++) { transforms[i].linear() = (Eigen::AngleAxis<ScalarT>(tforms_vec[6*i + 2],Eigen::Matrix<ScalarT,3,1>::UnitZ()) * Eigen::AngleAxis<ScalarT>(tforms_vec[6*i + 1],Eigen::Matrix<ScalarT,3,1>::UnitY()) * Eigen::AngleAxis<ScalarT>(tforms_vec[6*i + 0],Eigen::Matrix<ScalarT,3,1>::UnitX())).matrix(); transforms[i].linear() = transforms[i].rotation(); transforms[i].translation() = tforms_vec.template segment<3>(6*i + 3); } return has_converged; } template <typename ScalarT, typename CorrValueT = ScalarT> bool estimateSparseWarpFieldCombinedMetric3(const ConstVectorSetMatrixMap<ScalarT,3> &dst_p, const ConstVectorSetMatrixMap<ScalarT,3> &dst_n, const ConstVectorSetMatrixMap<ScalarT,3> &src_p, const CorrespondenceSet<CorrValueT> &corr, size_t num_ctrl_points, const std::vector<NeighborSet<ScalarT>> &src_to_ctrl_neighborhoods, const std::vector<NeighborSet<ScalarT>> &regularization_neighborhoods, RigidTransformationSet<ScalarT,3> &transforms, ScalarT point_to_point_weight, ScalarT point_to_plane_weight, ScalarT stiffness_weight, ScalarT huber_boundary = (ScalarT)(1e-6), size_t max_gn_iter = 10, ScalarT gn_conv_tol = (ScalarT)1e-5, size_t max_cg_iter = 1000, ScalarT cg_conv_tol = (ScalarT)1e-5) { VectorSet<ScalarT,3> dst_p_corr(3, corr.size()); VectorSet<ScalarT,3> dst_n_corr(3, corr.size()); VectorSet<ScalarT,3> src_p_corr(3, corr.size()); std::vector<NeighborSet<ScalarT>> src_to_ctrl_neighborhoods_corr(corr.size()); #pragma omp parallel for for (size_t i = 0; i < corr.size(); i++) { dst_p_corr.col(i) = dst_p.col(corr[i].indexInFirst); dst_n_corr.col(i) = dst_n.col(corr[i].indexInFirst); src_p_corr.col(i) = src_p.col(corr[i].indexInSecond); src_to_ctrl_neighborhoods_corr[i] = src_to_ctrl_neighborhoods[corr[i].indexInSecond]; } return estimateSparseWarpFieldCombinedMetric3<ScalarT>(dst_p_corr, dst_n_corr, src_p_corr, num_ctrl_points, src_to_ctrl_neighborhoods_corr, regularization_neighborhoods, transforms, point_to_point_weight, point_to_plane_weight, stiffness_weight, huber_boundary, max_gn_iter, gn_conv_tol, max_cg_iter, cg_conv_tol); } }
54.435244
235
0.568041
[ "vector" ]
24176972877cacf2581fecb7b7c9bb159e609300
706
hpp
C++
lib/include/sway/webcore/treenodeelementdescriptor.hpp
timcowebapps/sway.module_webcore
3c22b4e9293557b3968227fccb920906b2ab204b
[ "MIT" ]
null
null
null
lib/include/sway/webcore/treenodeelementdescriptor.hpp
timcowebapps/sway.module_webcore
3c22b4e9293557b3968227fccb920906b2ab204b
[ "MIT" ]
null
null
null
lib/include/sway/webcore/treenodeelementdescriptor.hpp
timcowebapps/sway.module_webcore
3c22b4e9293557b3968227fccb920906b2ab204b
[ "MIT" ]
null
null
null
#ifndef SWAY_WEBCORE_TREENODEELEMENTDESCRIPTOR_HPP #define SWAY_WEBCORE_TREENODEELEMENTDESCRIPTOR_HPP #include <sway/webcore/css/selectors/cnselectordescriptor.hpp> #include <sway/webcore/prereqs.hpp> NAMESPACE_BEGIN(sway) NAMESPACE_BEGIN(webcore) struct TreeNodeElementDescriptor { std::string tagname; /*!< Имя тега. */ std::string id; /*!< Уникальный идентификатор. */ #ifdef _EMSCRIPTEN emscripten::val stylesheet; /*!< Ассоциативный массив стилей. */ emscripten::val classes; /*!< std::vector<css::CnSelectorDescriptor> */ #else std::map<std::string, std::string> stylesheet; std::vector<css::CnSelectorDescriptor> classes; #endif }; NAMESPACE_END(webcore) NAMESPACE_END(sway) #endif
27.153846
73
0.773371
[ "vector" ]
241a6efe2ad632326169fc0fc0bfced7ac5e19c5
6,652
cpp
C++
sdk/src/system/User.cpp
andrie/rstudio-launcher-plugin-sdk
0b13a149fe263b578c90c98c8ab2d739b830ca60
[ "MIT" ]
4
2021-01-14T08:06:35.000Z
2021-09-24T12:39:31.000Z
sdk/src/system/User.cpp
andrie/rstudio-launcher-plugin-sdk
0b13a149fe263b578c90c98c8ab2d739b830ca60
[ "MIT" ]
14
2021-01-19T21:21:16.000Z
2022-03-03T22:17:23.000Z
sdk/src/system/User.cpp
andrie/rstudio-launcher-plugin-sdk
0b13a149fe263b578c90c98c8ab2d739b830ca60
[ "MIT" ]
3
2021-01-14T07:54:47.000Z
2021-11-23T18:20:19.000Z
/* * User.cpp * * Copyright (C) 2020 by RStudio, PBC * * Unless you have received this program directly from RStudio pursuant to the terms of a commercial license agreement * with RStudio, then this program is licensed to you under the following terms: * * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated * documentation files (the "Software"), to deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include <system/User.hpp> #include <pwd.h> #include <boost/algorithm/string.hpp> #include <Error.hpp> #include <SafeConvert.hpp> #include <system/FilePath.hpp> #include <system/PosixSystem.hpp> namespace rstudio { namespace launcher_plugins { namespace system { struct User::Impl { template<class T> using GetPasswdFunc = std::function<int(T, struct passwd*, char*, size_t, struct passwd**)>; Impl() : UserId(-1), GroupId(-1) { }; template<typename T> Error populateUser(const GetPasswdFunc<T>& in_getPasswdFunc, T in_value) { struct passwd pwd; struct passwd* tempPtrPwd; // Get the maximum size of a passwd for this system. long buffSize = ::sysconf(_SC_GETPW_R_SIZE_MAX); if (buffSize < 0) buffSize = 4096; // some systems return -1, be conservative! std::vector<char> buffer(buffSize); int result = in_getPasswdFunc(in_value, &pwd, &(buffer[0]), buffSize, &tempPtrPwd); if (tempPtrPwd == nullptr) { Error error; if (result == 0) { // A successful result code but no user details means that we couldn't find the user. // This could stem from a permissions issue but is more likely just an incorrectly // formed username. error = systemError(ENOENT, "User not found.", ERROR_LOCATION); } else { error = systemError(result, "Failed to get user details.", ERROR_LOCATION); } error.addProperty("user-value", safe_convert::numberToString(in_value)); return error; } else { UserId = pwd.pw_uid; GroupId = pwd.pw_gid; Name = pwd.pw_name; HomeDirectory = FilePath(pwd.pw_dir); Shell = pwd.pw_shell; } return Success(); } UidType UserId; GidType GroupId; std::string Name; FilePath HomeDirectory; std::string Shell; }; PRIVATE_IMPL_DELETER_IMPL(User) User::User(bool in_isEmpty) : m_impl(new Impl()) { m_impl->Name = in_isEmpty ? "" : "*"; } User::User(const User& in_other) : m_impl(new Impl(*in_other.m_impl)) { } Error User::getCurrentUser(User& out_currentUser) { return getUserFromIdentifier(::geteuid(), out_currentUser); } Error User::getUserFromIdentifier(const std::string& in_username, User& out_user) { User user; Error error = user.m_impl->populateUser<const char*>(::getpwnam_r, in_username.c_str()); if (!error) out_user = user; return error; } Error User::getUserFromIdentifier(UidType in_userId, User& out_user) { User user; Error error = user.m_impl->populateUser<UidType>(::getpwuid_r, in_userId); if (!error) out_user = user; return error; } FilePath User::getUserHomePath(const std::string& in_envOverride) { // use environment override if specified if (!in_envOverride.empty()) { using namespace boost::algorithm; for (split_iterator<std::string::const_iterator> it = make_split_iterator(in_envOverride, first_finder("|", is_iequal())); it != split_iterator<std::string::const_iterator>(); ++it) { std::string envHomePath = posix::getEnvironmentVariable(boost::copy_range<std::string>(*it)); if (!envHomePath.empty()) { FilePath userHomePath(envHomePath); if (userHomePath.exists()) return userHomePath; } } } // otherwise use standard unix HOME return FilePath(posix::getEnvironmentVariable("HOME")); } User& User::operator=(const User& in_other) { if (this == &in_other) return *this; if ((m_impl == nullptr) && (in_other.m_impl == nullptr)) return *this; if (in_other.m_impl == nullptr) { m_impl.reset(); return *this; } if (m_impl == nullptr) m_impl.reset(new Impl()); *m_impl = *in_other.m_impl; return *this; } bool User::operator==(const User& in_other) const { // If one or the other is empty but not both, these objects aren't equal. if (isEmpty() != in_other.isEmpty()) return false; // Otherwise they're both empty or they're both not, so just return true if this user is empty. if (isEmpty()) return true; // If one or the other is all users but not both, these aren't the same user. if (isAllUsers() != in_other.isAllUsers()) return false; // Otherwise they're both all users or they're both not, so just return true if this user is all users. if (isAllUsers()) return true; return getUserId() == in_other.getUserId(); } bool User::operator!=(const User &in_other) const { return !(*this == in_other); } bool User::exists() const { return !isEmpty() && !isAllUsers(); } bool User::isAllUsers() const { return m_impl->Name == "*"; } bool User::isEmpty() const { return m_impl->Name.empty(); } const FilePath& User::getHomePath() const { return m_impl->HomeDirectory; } GidType User::getGroupId() const { return m_impl->GroupId; } UidType User::getUserId() const { return m_impl->UserId; } const std::string& User::getUsername() const { return m_impl->Name; } const std::string& User::getShell() const { return m_impl->Shell; } } // namespace system } // namespace launcher_plugins } // namespace rstudio
26.188976
120
0.66386
[ "vector" ]
241e60b81d8e4140ae8e167f481f43dd94d4de08
21,928
cpp
C++
code/extlibs/librocket/Rocket/Core/LayoutEngine.cpp
gscept/nebula-trifid
e7c0a0acb05eedad9ed37a72c1bdf2d658511b42
[ "BSD-2-Clause" ]
67
2015-03-30T19:56:16.000Z
2022-03-11T13:52:17.000Z
Source/Core/LayoutEngine.cpp
OffByOneStudios/libRocket
96d91c337416473d5d46a8d4a507ae917a61049e
[ "Unlicense", "MIT" ]
5
2015-04-15T17:17:33.000Z
2016-02-11T00:40:17.000Z
Source/Core/LayoutEngine.cpp
OffByOneStudios/libRocket
96d91c337416473d5d46a8d4a507ae917a61049e
[ "Unlicense", "MIT" ]
34
2015-03-30T15:08:00.000Z
2021-09-23T05:55:10.000Z
/* * This source file is part of libRocket, the HTML/CSS Interface Middleware * * For the latest information, see http://www.librocket.com * * Copyright (c) 2008-2010 CodePoint Ltd, Shift Technology Ltd * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * */ #include "precompiled.h" #include "LayoutEngine.h" #include <Rocket/Core/Math.h> #include "Pool.h" #include "LayoutBlockBoxSpace.h" #include "LayoutInlineBoxText.h" #include <Rocket/Core/Element.h> #include <Rocket/Core/ElementScroll.h> #include <Rocket/Core/ElementText.h> #include <Rocket/Core/Property.h> #include <Rocket/Core/Types.h> #include <Rocket/Core/StyleSheetKeywords.h> #include <math.h> namespace Rocket { namespace Core { #define MAX(a, b) (a > b ? a : b) struct LayoutChunk { LayoutChunk() { memset(buffer, 0, size); } static const unsigned int size = MAX(sizeof(LayoutBlockBox), MAX(sizeof(LayoutInlineBox), MAX(sizeof(LayoutInlineBoxText), MAX(sizeof(LayoutLineBox), sizeof(LayoutBlockBoxSpace))))); char buffer[size]; }; static Pool< LayoutChunk > layout_chunk_pool(200, true); LayoutEngine::LayoutEngine() { block_box = NULL; block_context_box = NULL; } LayoutEngine::~LayoutEngine() { } // Formats the contents for a root-level element (usually a document or floating element). bool LayoutEngine::FormatElement(Element* element, const Vector2f& containing_block) { block_box = new LayoutBlockBox(this, NULL, NULL); block_box->GetBox().SetContent(containing_block); block_context_box = block_box->AddBlockElement(element); for (int i = 0; i < element->GetNumChildren(); i++) { if (!FormatElement(element->GetChild(i))) i = -1; } block_context_box->Close(); block_context_box->CloseAbsoluteElements(); element->OnLayout(); delete block_box; return true; } // Generates the box for an element. void LayoutEngine::BuildBox(Box& box, const Vector2f& containing_block, Element* element, bool inline_element) { if (element == NULL) { box.SetContent(containing_block); return; } // Calculate the padding area. const Property *padding_top, *padding_bottom, *padding_left, *padding_right; element->GetPaddingProperties (&padding_top, &padding_bottom, &padding_left, &padding_right); float padding = element->ResolveProperty(padding_top, containing_block.x); box.SetEdge(Box::PADDING, Box::TOP, Math::Max(0.0f, padding)); padding = element->ResolveProperty(padding_right, containing_block.x); box.SetEdge(Box::PADDING, Box::RIGHT, Math::Max(0.0f, padding)); padding = element->ResolveProperty(padding_bottom, containing_block.x); box.SetEdge(Box::PADDING, Box::BOTTOM, Math::Max(0.0f, padding)); padding = element->ResolveProperty(padding_left, containing_block.x); box.SetEdge(Box::PADDING, Box::LEFT, Math::Max(0.0f, padding)); // Calculate the border area. const Property *border_top_width, *border_bottom_width, *border_left_width, *border_right_width; element->GetBorderWidthProperties (&border_top_width, &border_bottom_width, &border_left_width, &border_right_width); float border = element->ResolveProperty(border_top_width, containing_block.x); box.SetEdge(Box::BORDER, Box::TOP, Math::Max(0.0f, border)); border = element->ResolveProperty(border_right_width, containing_block.x); box.SetEdge(Box::BORDER, Box::RIGHT, Math::Max(0.0f, border)); border = element->ResolveProperty(border_bottom_width, containing_block.x); box.SetEdge(Box::BORDER, Box::BOTTOM, Math::Max(0.0f, border)); border = element->ResolveProperty(border_left_width, containing_block.x); box.SetEdge(Box::BORDER, Box::LEFT, Math::Max(0.0f, border)); // Calculate the size of the content area. Vector2f content_area(-1, -1); bool replaced_element = false; // If the element has intrinsic dimensions, then we use those as the basis for the content area and only adjust // them if a non-auto style has been applied to them. if (element->GetIntrinsicDimensions(content_area)) { replaced_element = true; Vector2f original_content_area = content_area; // The element has resized itself, so we only resize it if a RCSS width or height was set explicitly. A value of // 'auto' (or 'auto-fit', ie, both keywords) means keep (or adjust) the intrinsic dimensions. bool auto_width = false, auto_height = false; const Property* width_property, *height_property; element->GetDimensionProperties(&width_property, &height_property); if (width_property->unit != Property::KEYWORD) content_area.x = element->ResolveProperty(width_property, containing_block.x); else auto_width = true; if (height_property->unit != Property::KEYWORD) content_area.y = element->ResolveProperty(height_property, containing_block.y); else auto_height = true; // If one of the dimensions is 'auto' then we need to scale it such that the original ratio is preserved. if (auto_width && !auto_height) content_area.x = (content_area.y / original_content_area.y) * original_content_area.x; else if (auto_height && !auto_width) content_area.y = (content_area.x / original_content_area.x) * original_content_area.y; // Reduce the width and height to make up for borders and padding. content_area.x -= (box.GetEdge(Box::BORDER, Box::LEFT) + box.GetEdge(Box::PADDING, Box::LEFT) + box.GetEdge(Box::BORDER, Box::RIGHT) + box.GetEdge(Box::PADDING, Box::RIGHT)); content_area.y -= (box.GetEdge(Box::BORDER, Box::TOP) + box.GetEdge(Box::PADDING, Box::TOP) + box.GetEdge(Box::BORDER, Box::BOTTOM) + box.GetEdge(Box::PADDING, Box::BOTTOM)); content_area.x = Math::Max(content_area.x, 0.0f); content_area.y = Math::Max(content_area.y, 0.0f); } // If the element is inline, then its calculations are much more straightforward (no worrying about auto margins // and dimensions, etc). All we do is calculate the margins, set the content area and bail. if (inline_element) { if (replaced_element) { content_area.x = ClampWidth(content_area.x, element, containing_block.x); content_area.y = ClampWidth(content_area.y, element, containing_block.y); } // If the element was not replaced, then we leave its dimension as unsized (-1, -1) and ignore the width and // height properties. box.SetContent(content_area); // Evaluate the margins. Any declared as 'auto' will resolve to 0. const Property *margin_top, *margin_bottom, *margin_left, *margin_right; element->GetMarginProperties(&margin_top, &margin_bottom, &margin_left, &margin_right); box.SetEdge(Box::MARGIN, Box::TOP, element->ResolveProperty(margin_top, containing_block.x)); box.SetEdge(Box::MARGIN, Box::RIGHT, element->ResolveProperty(margin_right, containing_block.x)); box.SetEdge(Box::MARGIN, Box::BOTTOM, element->ResolveProperty(margin_bottom, containing_block.x)); box.SetEdge(Box::MARGIN, Box::LEFT, element->ResolveProperty(margin_left, containing_block.x)); } // The element is block, so we need to run the box through the ringer to potentially evaluate auto margins and // dimensions. else { box.SetContent(content_area); BuildBoxWidth(box, element, containing_block.x); BuildBoxHeight(box, element, containing_block.y); } } // Generates the box for an element placed in a block box. void LayoutEngine::BuildBox(Box& box, float& min_height, float& max_height, LayoutBlockBox* containing_box, Element* element, bool inline_element) { Vector2f containing_block = GetContainingBlock(containing_box); BuildBox(box, containing_block, element, inline_element); float box_height = box.GetSize().y; if (box_height < 0) { if (element->GetLocalProperty(MIN_HEIGHT) != NULL) min_height = element->ResolveProperty(MIN_HEIGHT, containing_block.y); else min_height = 0; if (element->GetLocalProperty(MAX_HEIGHT) != NULL) max_height = element->ResolveProperty(MAX_HEIGHT, containing_block.y); else max_height = FLT_MAX; } else { min_height = box_height; max_height = box_height; } } // Clamps the width of an element based from its min-width and max-width properties. float LayoutEngine::ClampWidth(float width, Element* element, float containing_block_width) { float min_width, max_width; if (element->GetLocalProperty(MIN_WIDTH) != NULL) min_width = element->ResolveProperty(MIN_WIDTH, containing_block_width); else min_width = 0; if (element->GetLocalProperty(MAX_WIDTH) != NULL) max_width = element->ResolveProperty(MAX_WIDTH, containing_block_width); else max_width = FLT_MAX; return Math::Clamp(width, min_width, max_width); } // Clamps the height of an element based from its min-height and max-height properties. float LayoutEngine::ClampHeight(float height, Element* element, float containing_block_height) { float min_height, max_height; if (element->GetLocalProperty(MIN_HEIGHT) != NULL) min_height = element->ResolveProperty(MIN_HEIGHT, containing_block_height); else min_height = 0; if (element->GetLocalProperty(MAX_HEIGHT) != NULL) max_height = element->ResolveProperty(MAX_HEIGHT, containing_block_height); else max_height = FLT_MAX; return Math::Clamp(height, min_height, max_height); } // Rounds a vector of two floating-point values to integral values. Vector2f& LayoutEngine::Round(Vector2f& value) { value.x = Round(value.x); value.y = Round(value.y); return value; } // Rounds a floating-point value to an integral value. float LayoutEngine::Round(float value) { return ceilf(value); } void* LayoutEngine::AllocateLayoutChunk(size_t size) { (size); ROCKET_ASSERT(size <= LayoutChunk::size); return layout_chunk_pool.AllocateObject(); } void LayoutEngine::DeallocateLayoutChunk(void* chunk) { layout_chunk_pool.DeallocateObject((LayoutChunk*) chunk); } // Positions a single element and its children within this layout. bool LayoutEngine::FormatElement(Element* element) { // Check if we have to do any special formatting for any elements that don't fit into the standard layout scheme. if (FormatElementSpecial(element)) return true; // Fetch the display property, and don't lay this element out if it is set to a display type of none. int display_property = element->GetDisplay(); if (display_property == DISPLAY_NONE) return true; // Check for an absolute position; if this has been set, then we remove it from the flow and add it to the current // block box to be laid out and positioned once the block has been closed and sized. int position_property = element->GetPosition(); if (position_property == POSITION_ABSOLUTE || position_property == POSITION_FIXED) { // Display the element as a block element. block_context_box->AddAbsoluteElement(element); return true; } // If the element is floating, we remove it from the flow. int float_property = element->GetFloat(); if (float_property != FLOAT_NONE) { // Format the element as a block element. LayoutEngine layout_engine; layout_engine.FormatElement(element, GetContainingBlock(block_context_box)); return block_context_box->AddFloatElement(element); } // The element is nothing exceptional, so we treat it as a normal block, inline or replaced element. switch (display_property) { case DISPLAY_BLOCK: return FormatElementBlock(element); break; case DISPLAY_INLINE: return FormatElementInline(element); break; case DISPLAY_INLINE_BLOCK: FormatElementReplaced(element); break; default: ROCKET_ERROR; } return true; } // Formats and positions an element as a block element. bool LayoutEngine::FormatElementBlock(Element* element) { LayoutBlockBox* new_block_context_box = block_context_box->AddBlockElement(element); if (new_block_context_box == NULL) return false; block_context_box = new_block_context_box; // Format the element's children. for (int i = 0; i < element->GetNumChildren(); i++) { if (!FormatElement(element->GetChild(i))) i = -1; } // Close the block box, and check the return code; we may have overflowed either this element or our parent. new_block_context_box = block_context_box->GetParent(); switch (block_context_box->Close()) { // We need to reformat ourself; format all of our children again and close the box. No need to check for error // codes, as we already have our vertical slider bar. case LayoutBlockBox::LAYOUT_SELF: { for (int i = 0; i < element->GetNumChildren(); i++) FormatElement(element->GetChild(i)); if (block_context_box->Close() == LayoutBlockBox::OK) { element->OnLayout(); break; } } // We caused our parent to add a vertical scrollbar; bail out! case LayoutBlockBox::LAYOUT_PARENT: { block_context_box = new_block_context_box; return false; } break; default: element->OnLayout(); } block_context_box = new_block_context_box; return true; } // Formats and positions an element as an inline element. bool LayoutEngine::FormatElementInline(Element* element) { Box box; float min_height, max_height; BuildBox(box, min_height, max_height, block_context_box, element, true); LayoutInlineBox* inline_box = block_context_box->AddInlineElement(element, box); // Format the element's children. for (int i = 0; i < element->GetNumChildren(); i++) { if (!FormatElement(element->GetChild(i))) return false; } inline_box->Close(); // element->OnLayout(); return true; } // Positions an element as a sized inline element, formatting its internal hierarchy as a block element. void LayoutEngine::FormatElementReplaced(Element* element) { // Format the element separately as a block element, then position it inside our own layout as an inline element. LayoutEngine layout_engine; layout_engine.FormatElement(element, GetContainingBlock(block_context_box)); block_context_box->AddInlineElement(element, element->GetBox())->Close(); } // Executes any special formatting for special elements. bool LayoutEngine::FormatElementSpecial(Element* element) { static String br("br"); // Check for a <br> tag. if (element->GetTagName() == br) { block_context_box->AddBreak(); element->OnLayout(); return true; } return false; } // Returns the fully-resolved, fixed-width and -height containing block from a block box. Vector2f LayoutEngine::GetContainingBlock(const LayoutBlockBox* containing_box) { Vector2f containing_block; containing_block.x = containing_box->GetBox().GetSize(Box::CONTENT).x; if (containing_box->GetElement() != NULL) containing_block.x -= containing_box->GetElement()->GetElementScroll()->GetScrollbarSize(ElementScroll::VERTICAL); while ((containing_block.y = containing_box->GetBox().GetSize(Box::CONTENT).y) < 0) { containing_box = containing_box->GetParent(); if (containing_box == NULL) { ROCKET_ERROR; containing_block.y = 0; } } if (containing_box != NULL && containing_box->GetElement() != NULL) containing_block.y -= containing_box->GetElement()->GetElementScroll()->GetScrollbarSize(ElementScroll::HORIZONTAL); containing_block.x = Math::Max(0.0f, containing_block.x); containing_block.y = Math::Max(0.0f, containing_block.y); return containing_block; } // Builds the block-specific width and horizontal margins of a Box. void LayoutEngine::BuildBoxWidth(Box& box, Element* element, float containing_block_width) { Vector2f content_area = box.GetSize(); // Determine if the element has an automatic width, and if not calculate it. bool width_auto; if (content_area.x >= 0) width_auto = false; else { const Property* width_property; element->GetDimensionProperties(&width_property, NULL); if (width_property->unit == Property::KEYWORD) { width_auto = true; } else { width_auto = false; content_area.x = element->ResolveProperty(width_property, containing_block_width); } } // Determine if the element has automatic margins. bool margins_auto[2]; int num_auto_margins = 0; const Property *margin_left, *margin_right; element->GetMarginProperties(NULL, NULL, &margin_left, &margin_right); for (int i = 0; i < 2; ++i) { const Property* margin_property = i == 0 ? margin_left : margin_right; if (margin_property != NULL && margin_property->unit == Property::KEYWORD) { margins_auto[i] = true; num_auto_margins++; } else { margins_auto[i] = false; box.SetEdge(Box::MARGIN, i == 0 ? Box::LEFT : Box::RIGHT, element->ResolveProperty(margin_property, containing_block_width)); } } // If the width is set to auto, then any margins also set to auto are resolved to 0 and the width is set to the // whatever if left of the containing block. if (width_auto) { if (margins_auto[0]) box.SetEdge(Box::MARGIN, Box::LEFT, 0); if (margins_auto[1]) box.SetEdge(Box::MARGIN, Box::RIGHT, 0); content_area.x = containing_block_width - (box.GetCumulativeEdge(Box::CONTENT, Box::LEFT) + box.GetCumulativeEdge(Box::CONTENT, Box::RIGHT)); content_area.x = Math::Max(0.0f, content_area.x); } // Otherwise, the margins that are set to auto will pick up the remaining width of the containing block. else if (num_auto_margins > 0) { float margin = (containing_block_width - (box.GetCumulativeEdge(Box::CONTENT, Box::LEFT) + box.GetCumulativeEdge(Box::CONTENT, Box::RIGHT) + content_area.x)) / num_auto_margins; if (margins_auto[0]) box.SetEdge(Box::MARGIN, Box::LEFT, margin); if (margins_auto[1]) box.SetEdge(Box::MARGIN, Box::RIGHT, margin); } // Clamp the calculated width; if the width is changed by the clamp, then the margins need to be recalculated if // they were set to auto. float clamped_width = ClampWidth(content_area.x, element, containing_block_width); if (clamped_width != content_area.x) { content_area.x = clamped_width; box.SetContent(content_area); if (num_auto_margins > 0) { // Reset the automatic margins. if (margins_auto[0]) box.SetEdge(Box::MARGIN, Box::LEFT, 0); if (margins_auto[1]) box.SetEdge(Box::MARGIN, Box::RIGHT, 0); BuildBoxWidth(box, element, containing_block_width); } } else box.SetContent(content_area); } // Builds the block-specific height and vertical margins of a Box. void LayoutEngine::BuildBoxHeight(Box& box, Element* element, float containing_block_height) { Vector2f content_area = box.GetSize(); // Determine if the element has an automatic height, and if not calculate it. bool height_auto; if (content_area.y >= 0) height_auto = false; else { const Property* height_property; element->GetDimensionProperties(NULL, &height_property); if (height_property == NULL) { height_auto = false; } else if (height_property->unit == Property::KEYWORD) { height_auto = true; } else { height_auto = false; content_area.y = element->ResolveProperty(height_property, containing_block_height); } } // Determine if the element has automatic margins. bool margins_auto[2]; int num_auto_margins = 0; const Property *margin_top, *margin_bottom; element->GetMarginProperties(&margin_top, &margin_bottom, NULL, NULL); for (int i = 0; i < 2; ++i) { const Property* margin_property = i == 0 ? margin_top : margin_bottom; if (margin_property != NULL && margin_property->unit == Property::KEYWORD) { margins_auto[i] = true; num_auto_margins++; } else { margins_auto[i] = false; box.SetEdge(Box::MARGIN, i == 0 ? Box::TOP : Box::BOTTOM, element->ResolveProperty(margin_property, containing_block_height)); } } // If the height is set to auto, then any margins also set to auto are resolved to 0 and the height is set to -1. if (height_auto) { if (margins_auto[0]) box.SetEdge(Box::MARGIN, Box::TOP, 0); if (margins_auto[1]) box.SetEdge(Box::MARGIN, Box::BOTTOM, 0); content_area.y = -1; } // Otherwise, the margins that are set to auto will pick up the remaining width of the containing block. else if (num_auto_margins > 0) { float margin; if (content_area.y >= 0) { margin = (containing_block_height - (box.GetCumulativeEdge(Box::CONTENT, Box::TOP) + box.GetCumulativeEdge(Box::CONTENT, Box::BOTTOM) + content_area.y)) / num_auto_margins; } else margin = 0; if (margins_auto[0]) box.SetEdge(Box::MARGIN, Box::TOP, margin); if (margins_auto[1]) box.SetEdge(Box::MARGIN, Box::BOTTOM, margin); } if (content_area.y >= 0) { // Clamp the calculated height; if the height is changed by the clamp, then the margins need to be recalculated if // they were set to auto. float clamped_height = ClampHeight(content_area.y, element, containing_block_height); if (clamped_height != content_area.y) { content_area.y = clamped_height; box.SetContent(content_area); if (num_auto_margins > 0) { // Reset the automatic margins. if (margins_auto[0]) box.SetEdge(Box::MARGIN, Box::TOP, 0); if (margins_auto[1]) box.SetEdge(Box::MARGIN, Box::BOTTOM, 0); BuildBoxHeight(box, element, containing_block_height); } return; } } box.SetContent(content_area); } } }
32.342183
183
0.732306
[ "vector" ]
241e84f5ff21cea49a69c41f506b1437c2acf588
915
cpp
C++
avk/algorithms/lsd_radix_sort.cpp
hanxingyixue-arch/AVK
0a19ba3e71bac1c5944b084311669f23ff5fda72
[ "BSD-2-Clause" ]
1
2021-03-09T02:39:45.000Z
2021-03-09T02:39:45.000Z
avk/algorithms/lsd_radix_sort.cpp
hanxingyixue-arch/AVK
0a19ba3e71bac1c5944b084311669f23ff5fda72
[ "BSD-2-Clause" ]
null
null
null
avk/algorithms/lsd_radix_sort.cpp
hanxingyixue-arch/AVK
0a19ba3e71bac1c5944b084311669f23ff5fda72
[ "BSD-2-Clause" ]
null
null
null
#include "all.h" #include <vector> #include <array> #include "sort_config.h" void lsd_radix_sort(main_array array) { const uint radix_size = sort_config::radix_size; static std::vector<item> buffer; static std::vector<uint> counts; buffer.resize(array.size()); counts.resize(radix_size); //std::vector zero-initializes the elements for (uint radix_index = 0; radix_index < item::max_radix(radix_size); ++radix_index) { std::fill(counts.begin(), counts.end(), 0); for (auto& e : array) { uint radix = extract_radix(e, radix_index, radix_size); ++counts[radix]; } uint offset = 0; for (auto& e : counts) { uint tmp = e; e = offset; offset += tmp; } for (auto& e : array) { uint radix = extract_radix(e, radix_index, radix_size); uint& offset = counts[radix]; buffer[offset] = e; ++offset; } std::copy(buffer.begin(), buffer.end(), array.begin()); } }
20.795455
85
0.654645
[ "vector" ]
241fd0d6dff03bc5744cb0c7269c7872c989b7ce
2,285
cc
C++
src/artm/score/topic_mass_phi.cc
MelLain/bigartm
79126b68500bd5b378d6d6168f1b68eb03971bcb
[ "BSD-3-Clause" ]
638
2015-02-03T22:17:00.000Z
2022-03-23T18:47:50.000Z
src/artm/score/topic_mass_phi.cc
MelLain/bigartm
79126b68500bd5b378d6d6168f1b68eb03971bcb
[ "BSD-3-Clause" ]
566
2015-01-01T21:49:00.000Z
2022-02-14T09:14:35.000Z
src/artm/score/topic_mass_phi.cc
bt2901/bigartm
92c9d5746c122d0124bab700469d8a2a7f58ff40
[ "BSD-3-Clause" ]
148
2015-01-06T15:30:07.000Z
2022-02-12T18:40:17.000Z
// Copyright 2017, Additive Regularization of Topic Models. // Author: Murat Apishev (great-mel@yandex.ru) #include "artm/core/exceptions.h" #include "artm/core/protobuf_helpers.h" #include "artm/score/topic_mass_phi.h" namespace artm { namespace score { std::shared_ptr<Score> TopicMassPhi::CalculateScore(const artm::core::PhiMatrix& p_wt) { // parameters preparation const int topic_size = p_wt.topic_size(); const int token_size = p_wt.token_size(); std::vector<bool> topics_to_score; int topics_to_score_size = topic_size; if (config_.topic_name_size() == 0) { topics_to_score.assign(topic_size, true); } else { topics_to_score = core::is_member(p_wt.topic_name(), config_.topic_name()); topics_to_score_size = config_.topic_name_size(); } bool use_all_classes = false; if (config_.class_id_size() == 0) { use_all_classes = true; } std::vector<float> topic_mass; topic_mass.assign(topics_to_score_size, 0.0f); double denominator = 0.0; double numerator = 0.0; for (int token_index = 0; token_index < token_size; token_index++) { const auto& token = p_wt.token(token_index); if ((!use_all_classes && !core::is_member(token.class_id, config_.class_id()))) { continue; } int real_topic_index = 0; for (int topic_index = 0; topic_index < topic_size; ++topic_index) { float value = p_wt.get(token_index, topic_index); denominator += value; if (topics_to_score[topic_index]) { numerator += value; topic_mass[real_topic_index++] += value; } } } TopicMassPhiScore* topic_mass_score = new TopicMassPhiScore(); std::shared_ptr<Score> retval(topic_mass_score); float value = 0.0f; if (denominator > config_.eps()) { value = static_cast<float>(numerator / denominator); } topic_mass_score->set_value(value); for (int i = 0; i < topic_size; ++i) { if (topics_to_score[i]) { topic_mass_score->add_topic_name(p_wt.topic_name(i)); } } for (const auto& elem : topic_mass) { // don't check denominator value: if it's near zero the 'value' will show it topic_mass_score->add_topic_mass(elem); topic_mass_score->add_topic_ratio(elem / denominator); } return retval; } } // namespace score } // namespace artm
28.209877
88
0.689716
[ "vector" ]
2425f1b49a9f69ad620c1dded87b09c3ab88605e
1,077
cpp
C++
Codeforces/Bubble cup 13/J.cpp
noobie7/Codes
4d8265f4b7042bd7e8c0e0402d417c7e160ae6d4
[ "MIT" ]
2
2021-09-14T15:57:24.000Z
2022-03-18T14:11:04.000Z
Codeforces/Bubble cup 13/J.cpp
noobie7/Codes
4d8265f4b7042bd7e8c0e0402d417c7e160ae6d4
[ "MIT" ]
null
null
null
Codeforces/Bubble cup 13/J.cpp
noobie7/Codes
4d8265f4b7042bd7e8c0e0402d417c7e160ae6d4
[ "MIT" ]
null
null
null
/* "Won't stop until I'm phenomenal." - Phenomenal, Eminem */ #include<bits/stdc++.h> using namespace std; typedef long long int ll; #define ff first #define Shazam ios_base::sync_with_stdio(false); cin.tie(NULL); cout.tie(NULL); #define ss second #define all(c) c.begin(),c.end() #define endl "\n" #define test() int t; cin>>t; while(t--) #define fl(i,a,b) for(int i = a ; i <b ;i++) #define get(a) fl(i,0,a.size()) cin>>a[i]; #define pra(a) fl(i,0,a.size()) cout<<a[i]<<" "; cout<<endl; #define pr(a,n) fl(i,0,n) cout<<a[i]<<" "; cout<<endl; const ll INF = 2e18; const int inf = 2e9; const int mod1 = 1e9 + 7; int main(){ Shazam; int n = 1e6+4; vector<int> p(n,1), pref(n); p[0] = p[1] = 0; for(int i = 2 ; i*i <=n; i++){ if(p[i]){ for(int j = 2*i; j < n; j+=i){ p[j] = 0; } } } for(int i = 1; i < n; i++){ pref[i] = p[i]; pref[i]+=pref[i-1]; } test(){ int k; cin>>k; cout<<pref[k]-pref[(k+1)/2]+1 + (p[(k+1)/2])<<endl; } return 0; }
22.914894
81
0.498607
[ "vector" ]
2426cdfd698bdab09ab28dd1720619e2fbe20692
1,030
cpp
C++
cpp/godot-cpp/src/gen/AudioEffectPanner.cpp
GDNative-Gradle/proof-of-concept
162f467430760cf959f68f1638adc663fd05c5fd
[ "MIT" ]
1
2021-03-16T09:51:00.000Z
2021-03-16T09:51:00.000Z
cpp/godot-cpp/src/gen/AudioEffectPanner.cpp
GDNative-Gradle/proof-of-concept
162f467430760cf959f68f1638adc663fd05c5fd
[ "MIT" ]
null
null
null
cpp/godot-cpp/src/gen/AudioEffectPanner.cpp
GDNative-Gradle/proof-of-concept
162f467430760cf959f68f1638adc663fd05c5fd
[ "MIT" ]
null
null
null
#include "AudioEffectPanner.hpp" #include <core/GodotGlobal.hpp> #include <core/CoreTypes.hpp> #include <core/Ref.hpp> #include <core/Godot.hpp> #include "__icalls.hpp" namespace godot { AudioEffectPanner::___method_bindings AudioEffectPanner::___mb = {}; void AudioEffectPanner::___init_method_bindings() { ___mb.mb_get_pan = godot::api->godot_method_bind_get_method("AudioEffectPanner", "get_pan"); ___mb.mb_set_pan = godot::api->godot_method_bind_get_method("AudioEffectPanner", "set_pan"); } AudioEffectPanner *AudioEffectPanner::_new() { return (AudioEffectPanner *) godot::nativescript_1_1_api->godot_nativescript_get_instance_binding_data(godot::_RegisterState::language_index, godot::api->godot_get_class_constructor((char *)"AudioEffectPanner")()); } real_t AudioEffectPanner::get_pan() const { return ___godot_icall_float(___mb.mb_get_pan, (const Object *) this); } void AudioEffectPanner::set_pan(const real_t cpanume) { ___godot_icall_void_float(___mb.mb_set_pan, (const Object *) this, cpanume); } }
28.611111
215
0.78835
[ "object" ]
2428977dfd06dff3f6dee8c467320bb8bae6a885
42,092
hpp
C++
libraries/include/rpc_stubs/CommonApiRpcClient.hpp
tiaotiao00/HSR00qianbao
a88afebeb98e786389f369447bcf9c3a2a352cfa
[ "MIT" ]
66
2017-09-29T07:09:59.000Z
2020-01-12T06:45:08.000Z
libraries/include/rpc_stubs/CommonApiRpcClient.hpp
tiaotiao00/HSR00qianbao
a88afebeb98e786389f369447bcf9c3a2a352cfa
[ "MIT" ]
5
2017-12-13T13:12:05.000Z
2018-01-18T10:34:02.000Z
libraries/include/rpc_stubs/CommonApiRpcClient.hpp
tiaotiao00/HSR00qianbao
a88afebeb98e786389f369447bcf9c3a2a352cfa
[ "MIT" ]
11
2017-12-05T07:02:05.000Z
2018-01-28T02:52:50.000Z
// _ _ __ _ _ // | | | | / _(_) | // __ _ ___ _ __ ___ _ __ __ _| |_ ___ __| | | |_ _| | ___ // / _` |/ _ \ '_ \ / _ \ '__/ _` | __/ _ \/ _` | | _| | |/ _ \` // | (_| | __/ | | | __/ | | (_| | || __/ (_| | | | | | | __/ // \__, |\___|_| |_|\___|_| \__,_|\__\___|\__,_| |_| |_|_|\___| // __/ | // |___/ // // // Warning: this is a generated file, any changes made here will be // overwritten by the build process. If you need to change what is // generated here, you should either modify the input json files // (network_api.json, wallet_api.json, etc) or modify the code // generator (hsrcore_api_generator.cpp) itself // #pragma once #include <fc/rpc/json_connection.hpp> #include <api/CommonApi.hpp> namespace hsrcore { namespace rpc_stubs { class CommonApiRpcClient : public hsrcore::api::CommonApi { public: virtual fc::rpc::json_connection_ptr get_json_connection() const = 0; fc::variant_object blockchain_get_info() const override; void blockchain_generate_snapshot(const std::string& filename) const override; std::vector<hsrcore::blockchain::HSRTrxidBalance> blockchain_get_hsr_account_balance_entry(uint32_t block_num) override; void blockchain_generate_issuance_map(const std::string& symbol, const std::string& filename) const override; hsrcore::blockchain::Asset blockchain_calculate_supply(const std::string& asset) const override; bool blockchain_is_synced() const override; uint32_t blockchain_get_block_count() const override; hsrcore::blockchain::BlockchainSecurityState blockchain_get_security_state() const override; std::vector<hsrcore::blockchain::AccountEntry> blockchain_list_accounts(const std::string& first_account_name = fc::json::from_string("\"\"").as<std::string>(), uint32_t limit = fc::json::from_string("20").as<uint32_t>()) const override; std::vector<hsrcore::blockchain::AccountEntry> blockchain_list_recently_updated_accounts() const override; std::vector<hsrcore::blockchain::AccountEntry> blockchain_list_recently_registered_accounts() const override; std::vector<hsrcore::blockchain::AssetEntry> blockchain_list_assets(const std::string& first_symbol = fc::json::from_string("\"\"").as<std::string>(), uint32_t limit = fc::json::from_string("20").as<uint32_t>()) const override; std::vector<std::pair<hsrcore::blockchain::TransactionIdType, hsrcore::blockchain::SignedTransaction>> blockchain_list_pending_transactions() const override; std::pair<hsrcore::blockchain::TransactionIdType, hsrcore::blockchain::TransactionEntry> blockchain_get_transaction(const std::string& transaction_id_prefix, bool exact = fc::json::from_string("false").as<bool>()) const override; hsrcore::wallet::PrettyTransaction blockchain_get_pretty_transaction(const std::string& transaction_id_prefix, bool exact = fc::json::from_string("false").as<bool>()) const override; hsrcore::wallet::PrettyContractTransaction blockchain_get_pretty_contract_transaction(const std::string& transaction_id_prefix, bool exact = fc::json::from_string("false").as<bool>()) const override; fc::optional<hsrcore::blockchain::BlockEntry> blockchain_get_block(const std::string& block) const override; std::map<hsrcore::blockchain::TransactionIdType, hsrcore::blockchain::TransactionEntry> blockchain_get_block_transactions(const std::string& block) const override; fc::optional<hsrcore::blockchain::AccountEntry> blockchain_get_account(const std::string& account) const override; std::map<hsrcore::blockchain::AccountIdType, std::string> blockchain_get_slate(const std::string& slate) const override; hsrcore::blockchain::BalanceEntry blockchain_get_balance(const hsrcore::blockchain::Address& balance_id) const override; std::unordered_map<hsrcore::blockchain::BalanceIdType, hsrcore::blockchain::BalanceEntry> blockchain_list_balances(const std::string& first_balance_id = fc::json::from_string("\"\"").as<std::string>(), uint32_t limit = fc::json::from_string("20").as<uint32_t>()) const override; std::unordered_map<hsrcore::blockchain::BalanceIdType, hsrcore::blockchain::BalanceEntry> blockchain_list_address_balances(const std::string& addr, const fc::time_point& chanced_since = fc::json::from_string("\"1970-1-1T00:00:01\"").as<fc::time_point>()) const override; fc::variant_object blockchain_list_address_transactions(const std::string& addr, uint32_t filter_before = fc::json::from_string("\"0\"").as<uint32_t>()) const override; hsrcore::wallet::AccountBalanceSummaryType blockchain_get_account_public_balance(const std::string& account_name) const override; std::unordered_map<hsrcore::blockchain::BalanceIdType, hsrcore::blockchain::BalanceEntry> blockchain_list_key_balances(const hsrcore::blockchain::PublicKeyType& key) const override; fc::optional<hsrcore::blockchain::AssetEntry> blockchain_get_asset(const std::string& asset) const override; std::vector<hsrcore::blockchain::AccountEntry> blockchain_list_active_delegates(uint32_t first = fc::json::from_string("0").as<uint32_t>(), uint32_t count = fc::json::from_string("20").as<uint32_t>()) const override; std::vector<hsrcore::blockchain::AccountEntry> blockchain_list_delegates(uint32_t first = fc::json::from_string("0").as<uint32_t>(), uint32_t count = fc::json::from_string("20").as<uint32_t>()) const override; std::vector<hsrcore::blockchain::BlockEntry> blockchain_list_blocks(uint32_t max_block_num = fc::json::from_string("-1").as<uint32_t>(), uint32_t limit = fc::json::from_string("20").as<uint32_t>()) override; std::vector<std::string> blockchain_list_missing_block_delegates(uint32_t block_number) override; std::string blockchain_export_fork_graph(uint32_t start_block = fc::json::from_string("1").as<uint32_t>(), uint32_t end_block = fc::json::from_string("-1").as<uint32_t>(), const hsrcore::blockchain::FilePath& filename = fc::json::from_string("\"\"").as<hsrcore::blockchain::FilePath>()) const override; std::map<uint32_t, std::vector<hsrcore::blockchain::ForkEntry>> blockchain_list_forks() const override; std::vector<hsrcore::blockchain::SlotEntry> blockchain_get_delegate_slot_entrys(const std::string& delegate_name, uint32_t limit = fc::json::from_string("\"10\"").as<uint32_t>()) const override; std::string blockchain_get_block_signee(const std::string& block) const override; hsrcore::blockchain::Asset blockchain_unclaimed_genesis() const override; bool blockchain_verify_signature(const std::string& signer, const fc::sha256& hash, const fc::ecc::compact_signature& signature) const override; void blockchain_dump_state(const std::string& path) const override; void blockchain_broadcast_transaction(const hsrcore::blockchain::SignedTransaction& trx) override; void blockchain_btc_address_convert(const std::string& path) const override; std::string blockchain_get_transaction_rpc(const std::string& transaction_id_prefix, bool exact = fc::json::from_string("false").as<bool>()) const override; void blockchain_set_node_vm_enabled(bool enabled) override; bool blockchain_get_node_vm_enabled() const override; vector<string> blockchain_get_all_contracts() const override; unordered_map<string, string> blockchain_get_forever_contracts() const override; std::vector<std::string> blockchain_list_pub_all_address(const std::string& pub_key) const override; std::vector<hsrcore::wallet::ContractTransactionSummary> blockchain_list_contract_transaction_history(uint32_t from_block_num, uint32_t block_count, const std::string& contract_id = fc::json::from_string("\"\"").as<std::string>(), uint32_t trx_type = fc::json::from_string("\"99\"").as<uint32_t>(), const std::string& call_method = fc::json::from_string("\"\"").as<std::string>()) const override; void network_add_node(const std::string& node, const std::string& command = fc::json::from_string("\"add\"").as<std::string>()) override; uint32_t network_get_connection_count() const override; std::vector<fc::variant_object> network_get_peer_info(bool not_firewalled = fc::json::from_string("false").as<bool>()) const override; hsrcore::blockchain::TransactionIdType network_broadcast_transaction(const hsrcore::blockchain::SignedTransaction& transaction_to_broadcast) override; void network_set_advanced_node_parameters(const fc::variant_object& params) override; fc::variant_object network_get_advanced_node_parameters() const override; hsrcore::net::MessagePropagationData network_get_transaction_propagation_data(const hsrcore::blockchain::TransactionIdType& transaction_id) override; hsrcore::net::MessagePropagationData network_get_block_propagation_data(const hsrcore::blockchain::BlockIdType& block_hash) override; fc::variant_object network_get_info() const override; std::vector<hsrcore::net::PotentialPeerEntry> network_list_potential_peers() const override; fc::variant_object network_get_upnp_info() const override; std::vector<std::string> network_get_blocked_ips() const override; std::string debug_get_client_name() const override; fc::variant delegate_get_config() const override; void delegate_set_network_min_connection_count(uint32_t count) override; void delegate_set_block_max_transaction_count(uint32_t count) override; void delegate_set_soft_max_imessage_length(int64_t soft_length) override; void delegate_set_imessage_fee_coe(const std::string& fee_coe) override; void delegate_set_block_max_size(uint32_t size) override; void delegate_set_transaction_max_size(uint32_t size) override; void delegate_set_transaction_canonical_signatures_required(bool required) override; void delegate_set_transaction_min_fee(const std::string& fee) override; void delegate_blacklist_add_transaction(const hsrcore::blockchain::TransactionIdType& id) override; void delegate_blacklist_remove_transaction(const hsrcore::blockchain::TransactionIdType& id) override; void delegate_blacklist_add_operation(const hsrcore::blockchain::OperationTypeEnum& id) override; void delegate_blacklist_remove_operation(const hsrcore::blockchain::OperationTypeEnum& id) override; fc::variant_object wallet_get_info() override; void wallet_open(const std::string& wallet_name) override; void wallet_create(const std::string& wallet_name, const std::string& new_passphrase, const std::string& brain_key = fc::json::from_string("\"\"").as<std::string>()) override; fc::optional<std::string> wallet_get_name() const override; std::string wallet_import_private_key(const std::string& wif_key, const std::string& account_name = fc::json::from_string("null").as<std::string>(), bool create_new_account = fc::json::from_string("false").as<bool>(), bool rescan = fc::json::from_string("false").as<bool>()) override; void wallet_close() override; void wallet_backup_create(const fc::path& json_filename) const override; void wallet_backup_restore(const fc::path& json_filename, const std::string& wallet_name, const std::string& imported_wallet_passphrase) override; bool wallet_set_automatic_backups(bool enabled) override; uint32_t wallet_set_transaction_expiration_time(uint32_t seconds) override; std::vector<hsrcore::wallet::PrettyTransaction> wallet_account_transaction_history(const std::string& account_name = fc::json::from_string("\"\"").as<std::string>(), const std::string& asset_symbol = fc::json::from_string("\"\"").as<std::string>(), int32_t limit = fc::json::from_string("0").as<int32_t>(), uint32_t start_block_num = fc::json::from_string("0").as<uint32_t>(), uint32_t end_block_num = fc::json::from_string("-1").as<uint32_t>()) const override; std::vector<hsrcore::wallet::PrettyTransaction> wallet_transaction_history_splite(const std::string& account_name = fc::json::from_string("\"\"").as<std::string>(), const std::string& asset_symbol = fc::json::from_string("\"\"").as<std::string>(), int32_t limit = fc::json::from_string("0").as<int32_t>(), int32_t transaction_type = fc::json::from_string("\"2\"").as<int32_t>()) const override; hsrcore::wallet::AccountBalanceSummaryType wallet_account_historic_balance(const fc::time_point& time, const std::string& account_name = fc::json::from_string("\"\"").as<std::string>()) const override; void wallet_remove_transaction(const std::string& transaction_id) override; std::map<hsrcore::blockchain::TransactionIdType, fc::exception> wallet_get_pending_transaction_errors(const hsrcore::blockchain::FilePath& filename = fc::json::from_string("\"\"").as<hsrcore::blockchain::FilePath>()) const override; void wallet_lock() override; void wallet_unlock(uint32_t timeout, const std::string& passphrase) override; void wallet_change_passphrase(const std::string& old_passphrase, const std::string& passphrase) override; bool wallet_check_passphrase(const std::string& passphrase) override; bool wallet_check_address(const std::string& address, int8_t address_type = fc::json::from_string("0").as<int8_t>()) override; std::vector<std::string> wallet_list() const override; hsrcore::blockchain::Address wallet_account_create(const std::string& account_name, const fc::variant& private_data = fc::json::from_string("null").as<fc::variant>()) override; int8_t wallet_account_set_approval(const std::string& account_name, int8_t approval = fc::json::from_string("1").as<int8_t>()) override; std::vector<hsrcore::blockchain::AccountEntry> wallet_get_all_approved_accounts(int8_t approval = fc::json::from_string("1").as<int8_t>()) override; std::string wallet_address_create(const std::string& account_name, const std::string& label = fc::json::from_string("\"\"").as<std::string>(), int32_t legacy_network_byte = fc::json::from_string("-1").as<int32_t>()) override; hsrcore::wallet::WalletTransactionEntry wallet_transfer_to_address(const std::string& amount_to_transfer, const std::string& asset_symbol, const std::string& from_account_name, const std::string& to_address, const hsrcore::blockchain::Imessage& memo_message = fc::json::from_string("\"\"").as<hsrcore::blockchain::Imessage>(), const hsrcore::wallet::VoteStrategy& strategy = fc::json::from_string("\"vote_recommended\"").as<hsrcore::wallet::VoteStrategy>()) override; hsrcore::wallet::WalletTransactionEntry wallet_transfer_to_address_build(const std::string& amount_to_transfer, const std::string& asset_symbol, const std::string& from_account_public_key, const std::string& to_address, const hsrcore::blockchain::Imessage& memo_message = fc::json::from_string("\"\"").as<hsrcore::blockchain::Imessage>(), const hsrcore::wallet::VoteStrategy& strategy = fc::json::from_string("\"vote_none\"").as<hsrcore::wallet::VoteStrategy>()) override; hsrcore::wallet::WalletTransactionEntry wallet_transfer_to_public_account(const std::string& amount_to_transfer, const std::string& asset_symbol, const std::string& from_account_name, const std::string& to_account_name, const hsrcore::blockchain::Imessage& memo_message = fc::json::from_string("\"\"").as<hsrcore::blockchain::Imessage>(), const hsrcore::wallet::VoteStrategy& strategy = fc::json::from_string("\"vote_recommended\"").as<hsrcore::wallet::VoteStrategy>()) override; hsrcore::wallet::TransactionBuilder wallet_withdraw_from_address(const std::string& amount, const std::string& symbol, const hsrcore::blockchain::Address& from_address, const std::string& to, const hsrcore::wallet::VoteStrategy& strategy = fc::json::from_string("\"vote_none\"").as<hsrcore::wallet::VoteStrategy>(), bool sign_and_broadcast = fc::json::from_string("true").as<bool>(), const std::string& builder_path = fc::json::from_string("\"\"").as<std::string>()) override; void wallet_rescan_blockchain(uint32_t start_block_num = fc::json::from_string("0").as<uint32_t>(), uint32_t limit = fc::json::from_string("-1").as<uint32_t>()) override; void wallet_cancel_scan() override; hsrcore::wallet::WalletTransactionEntry wallet_get_transaction(const std::string& transaction_id) override; hsrcore::wallet::WalletTransactionEntry wallet_scan_transaction(const std::string& transaction_id, bool overwrite_existing = fc::json::from_string("false").as<bool>()) override; void wallet_rebroadcast_transaction(const std::string& transaction_id) override; hsrcore::wallet::WalletTransactionEntry wallet_account_register(const std::string& account_name, const std::string& pay_from_account, const fc::variant& public_data = fc::json::from_string("null").as<fc::variant>(), uint8_t delegate_pay_rate = fc::json::from_string("-1").as<uint8_t>(), const std::string& account_type = fc::json::from_string("\"titan_account\"").as<std::string>()) override; void wallet_account_update_private_data(const std::string& account_name, const fc::variant& private_data = fc::json::from_string("null").as<fc::variant>()) override; hsrcore::wallet::WalletTransactionEntry wallet_account_update_registration(const std::string& account_name, const std::string& pay_from_account, const fc::variant& public_data = fc::json::from_string("null").as<fc::variant>(), uint8_t delegate_pay_rate = fc::json::from_string("-1").as<uint8_t>()) override; hsrcore::wallet::WalletTransactionEntry wallet_account_update_active_key(const std::string& account_to_update, const std::string& pay_from_account, const std::string& new_active_key = fc::json::from_string("\"\"").as<std::string>()) override; std::vector<hsrcore::wallet::WalletAccountEntry> wallet_list_accounts() const override; std::vector<hsrcore::wallet::WalletAccountEntry> wallet_list_unregistered_accounts() const override; std::vector<hsrcore::wallet::WalletAccountEntry> wallet_list_my_accounts() const override; std::vector<hsrcore::wallet::AccountAddressData> wallet_list_my_addresses() const override; hsrcore::wallet::WalletAccountEntry wallet_get_account(const std::string& account_name) const override; std::string wallet_get_account_public_address(const std::string& account_name) const override; void wallet_remove_contact_account(const std::string& account_name) override; void wallet_account_rename(const std::string& current_account_name, const std::string& new_account_name) override; hsrcore::wallet::WalletTransactionEntry wallet_asset_create(const std::string& symbol, const std::string& asset_name, const std::string& issuer_name, const std::string& description, const std::string& maximum_share_supply, uint64_t precision, const fc::variant& public_data = fc::json::from_string("null").as<fc::variant>(), bool is_market_issued = fc::json::from_string("false").as<bool>()) override; hsrcore::wallet::WalletTransactionEntry wallet_asset_issue(const std::string& amount, const std::string& symbol, const std::string& to_account_name, const hsrcore::blockchain::Imessage& memo_message = fc::json::from_string("\"\"").as<hsrcore::blockchain::Imessage>()) override; hsrcore::wallet::WalletTransactionEntry wallet_asset_issue_to_addresses(const std::string& symbol, const std::map<std::string, hsrcore::blockchain::ShareType>& addresses) override; hsrcore::wallet::AccountBalanceSummaryType wallet_account_balance(const std::string& account_name = fc::json::from_string("\"\"").as<std::string>()) const override; hsrcore::wallet::AccountBalanceIdSummaryType wallet_account_balance_ids(const std::string& account_name = fc::json::from_string("\"\"").as<std::string>()) const override; std::vector<hsrcore::wallet::PublicKeySummary> wallet_account_list_public_keys(const std::string& account_name) override; hsrcore::wallet::WalletTransactionEntry wallet_delegate_withdraw_pay(const std::string& delegate_name, const std::string& to_account_name, const std::string& amount_to_withdraw) override; hsrcore::blockchain::DelegatePaySalary wallet_delegate_pay_balance_query(const std::string& delegate_name) override; std::map<std::string,hsrcore::blockchain::DelegatePaySalary> wallet_active_delegate_salary() override; bool wallet_get_delegate_statue(const std::string& account_name) override; void wallet_set_transaction_imessage_fee_coe(const std::string& fee_coe) override; double wallet_get_transaction_imessage_fee_coe() override; void wallet_set_transaction_imessage_soft_max_length(int64_t soft_length) override; int64_t wallet_get_transaction_imessage_soft_max_length() override; hsrcore::blockchain::Asset wallet_set_transaction_fee(const std::string& fee) override; hsrcore::blockchain::Asset wallet_get_transaction_fee(const std::string& symbol = fc::json::from_string("\"\"").as<std::string>()) override; fc::optional<std::string> wallet_dump_private_key(const std::string& input) const override; fc::optional<std::string> wallet_dump_account_private_key(const std::string& account_name, const hsrcore::wallet::AccountKeyType& key_type) const override; hsrcore::wallet::AccountVoteSummaryType wallet_account_vote_summary(const std::string& account_name = fc::json::from_string("\"\"").as<std::string>()) const override; hsrcore::wallet::VoteSummary wallet_check_vote_status(const std::string& account) override; void wallet_set_setting(const std::string& name, const fc::variant& value) override; fc::optional<fc::variant> wallet_get_setting(const std::string& name) override; void wallet_delegate_set_block_production(const std::string& delegate_name, bool enabled) override; bool wallet_set_transaction_scanning(bool enabled) override; fc::ecc::compact_signature wallet_sign_hash(const std::string& signer, const fc::sha256& hash) override; std::string wallet_login_start(const std::string& server_account) override; fc::variant wallet_login_finish(const hsrcore::blockchain::PublicKeyType& server_key, const hsrcore::blockchain::PublicKeyType& client_key, const fc::ecc::compact_signature& client_signature) override; hsrcore::wallet::TransactionBuilder wallet_balance_set_vote_info(const hsrcore::blockchain::Address& balance_id, const std::string& voter_address = fc::json::from_string("\"\"").as<std::string>(), const hsrcore::wallet::VoteStrategy& strategy = fc::json::from_string("\"vote_all\"").as<hsrcore::wallet::VoteStrategy>(), bool sign_and_broadcast = fc::json::from_string("\"true\"").as<bool>(), const std::string& builder_path = fc::json::from_string("\"\"").as<std::string>()) override; hsrcore::wallet::WalletTransactionEntry wallet_publish_slate(const std::string& publishing_account_name, const std::string& paying_account_name = fc::json::from_string("\"\"").as<std::string>()) override; hsrcore::wallet::WalletTransactionEntry wallet_publish_version(const std::string& publishing_account_name, const std::string& paying_account_name = fc::json::from_string("\"\"").as<std::string>()) override; hsrcore::wallet::WalletTransactionEntry wallet_collect_genesis_balances(const std::string& account_name) override; int32_t wallet_recover_accounts(int32_t accounts_to_recover, int32_t maximum_number_of_attempts = fc::json::from_string("1000").as<int32_t>()) override; fc::optional<fc::variant_object> wallet_verify_titan_deposit(const std::string& transaction_id_prefix) override; void wallet_repair_entrys(const std::string& collecting_account_name = fc::json::from_string("\"\"").as<std::string>()) override; int32_t wallet_regenerate_keys(const std::string& account_name, uint32_t max_key_number) override; hsrcore::wallet::WalletTransactionEntry wallet_account_retract(const std::string& account_to_retract, const std::string& pay_from_account) override; bool wallet_account_delete(const std::string& account_name) override; std::string wallet_transfer_to_address_rpc(const std::string& amount_to_transfer, const std::string& asset_symbol, const std::string& from_account_name, const std::string& to_address, const hsrcore::blockchain::Imessage& memo_message = fc::json::from_string("\"\"").as<hsrcore::blockchain::Imessage>(), const hsrcore::wallet::VoteStrategy& strategy = fc::json::from_string("\"vote_recommended\"").as<hsrcore::wallet::VoteStrategy>()) override; std::string wallet_account_balance_rpc(const std::string& account_name = fc::json::from_string("\"\"").as<std::string>()) const override; std::string wallet_transfer_to_public_account_rpc(const std::string& amount_to_transfer, const std::string& asset_symbol, const std::string& from_account_name, const std::string& to_account_name, const hsrcore::blockchain::Imessage& memo_message = fc::json::from_string("\"\"").as<hsrcore::blockchain::Imessage>(), const hsrcore::wallet::VoteStrategy& strategy = fc::json::from_string("\"vote_recommended\"").as<hsrcore::wallet::VoteStrategy>()) override; hsrcore::blockchain::PublicKeyType wallet_get_account_owner_publickey(const std::string& account_name) override; hsrcore::wallet::WalletTransactionEntry wallet_transfer_to_contract(double amount_to_transfer, const std::string& asset_symbol, const std::string& from_account_name, const std::string& to_contract, double amount_for_exec) override; hsrcore::wallet::WalletTransactionEntry sign_build_transaction(const hsrcore::wallet::WalletTransactionEntry& trasaction_building) override; bool broadcast_building_transaction(const hsrcore::wallet::WalletTransactionEntry& trasaction_building) override; hsrcore::wallet::WalletTransactionEntry wallet_transfer_to_contract_build(double amount_to_transfer, const std::string& asset_symbol, const std::string& from_account_public_key, const std::string& to_contract, double amount_for_exec) override; std::vector<hsrcore::blockchain::Asset> wallet_transfer_to_contract_testing(double amount_to_transfer, const std::string& asset_symbol, const std::string& from_account_name, const std::string& to_contract) override; vector<string> wallet_get_contracts(const std::string& account_name = fc::json::from_string("\"\"").as<std::string>()) override; void wallet_scan_contracts() override; bool wallet_import_hshare_private_key(const std::string& acc_name, const hsrcore::blockchain::FilePath& infile) override; bool wallet_import_hshare_encrypted_private_key(const std::string& passwd, const std::string& acc_name, const hsrcore::blockchain::FilePath& infile) override; hsrcore::wallet::TransactionBuilder wallet_builder_add_signature(const hsrcore::wallet::TransactionBuilder& builder, bool broadcast = fc::json::from_string("false").as<bool>()) override; hsrcore::wallet::TransactionBuilder wallet_builder_file_add_signature(const hsrcore::blockchain::FilePath& builder_path, bool broadcast = fc::json::from_string("false").as<bool>()) override; hsrcore::wallet::WalletTransactionEntry wallet_multisig_deposit(const std::string& amount, const std::string& asset_symbol, const std::string& from_account, const std::string& to_account, const hsrcore::blockchain::Imessage& memo_message = fc::json::from_string("\"\"").as<hsrcore::blockchain::Imessage>()) override; fc::variant_object wallet_import_multisig_account(const hsrcore::blockchain::Address& multisig_address) override; hsrcore::blockchain::Address wallet_import_multisig_account_by_detail(const std::string& asset_symbol, uint32_t m, const std::vector<hsrcore::blockchain::Address>& addresses) override; hsrcore::wallet::TransactionBuilder wallet_receive_genesis_multisig_blanace(const hsrcore::blockchain::Address& from_address, const std::string& from_address_redeemscript, const std::string& to, const hsrcore::wallet::VoteStrategy& strategy = fc::json::from_string("\"vote_none\"").as<hsrcore::wallet::VoteStrategy>(), bool sign_and_broadcast = fc::json::from_string("true").as<bool>(), const std::string& builder_path = fc::json::from_string("\"\"").as<std::string>()) override; hsrcore::wallet::TransactionBuilder wallet_multisig_withdraw_start(const std::string& amount, const std::string& asset_symbol, const hsrcore::blockchain::Address& from, const hsrcore::blockchain::Address& to_address, const hsrcore::blockchain::Imessage& memo_message = fc::json::from_string("\"\"").as<hsrcore::blockchain::Imessage>(), const hsrcore::blockchain::FilePath& builder_path = fc::json::from_string("\"\"").as<hsrcore::blockchain::FilePath>()) override; std::pair<std::string, hsrcore::wallet::WalletTransactionEntry> wallet_create_multisig_account(const std::string& amount, const std::string& asset_symbol, const std::string& from_account, uint32_t m, const std::vector<hsrcore::blockchain::Address>& addresses, const hsrcore::blockchain::Imessage& memo_message = fc::json::from_string("\"\"").as<hsrcore::blockchain::Imessage>()) override; std::vector<hsrcore::wallet::PrettyTransaction> wallet_multisig_account_history(const std::string& account_address, const std::string& asset_symbol = fc::json::from_string("\"\"").as<std::string>(), int32_t limit = fc::json::from_string("0").as<int32_t>(), uint32_t start_block_num = fc::json::from_string("0").as<uint32_t>(), uint32_t end_block_num = fc::json::from_string("-1").as<uint32_t>()) const override; hsrcore::wallet::AccountBalanceSummaryType wallet_multisig_account_balance(const std::string& account_address = fc::json::from_string("\"\"").as<std::string>()) const override; fc::variant_object wallet_builder_get_multisig_detail(const hsrcore::wallet::TransactionBuilder& transaction_builder) const override; fc::variant_object wallet_builder_file_get_multisig_detail(const hsrcore::blockchain::FilePath& builder_path) const override; bool set_pos_generate() override; fc::variant_object about() const override; fc::variant_object get_info() const override; void stop() override; std::string help(const std::string& command_name = fc::json::from_string("\"\"").as<std::string>()) const override; fc::variant_object validate_address(const std::string& address) const override; std::string execute_command_line(const std::string& input) const override; void execute_script(const fc::path& script) const override; fc::variants batch(const std::string& method_name, const std::vector<fc::variants>& parameters_list) const override; fc::variants batch_authenticated(const std::string& method_name, const std::vector<fc::variants>& parameters_list) const override; hsrcore::wallet::WalletTransactionEntry builder_finalize_and_sign(const hsrcore::wallet::TransactionBuilder& builder) const override; std::map<std::string, hsrcore::api::MethodData> meta_help() const override; void rpc_set_username(const std::string& username = fc::json::from_string("\"\"").as<std::string>()) override; void rpc_set_password(const std::string& password = fc::json::from_string("\"\"").as<std::string>()) override; void rpc_start_server(uint32_t port = fc::json::from_string("\"65065\"").as<uint32_t>()) override; void http_start_server(uint32_t port = fc::json::from_string("\"65066\"").as<uint32_t>()) override; void ntp_update_time() override; fc::variant disk_usage() const override; fc::path compile_contract(const fc::path& filename) const override; std::string register_contract(const std::string& owner, const fc::path& codefile, const std::string& asset_symbol, const fc::optional<double>& init_limit) override; hsrcore::wallet::WalletTransactionEntry register_contract_build(const std::string& owner_public_key, const fc::path& codefile, const std::string& asset_symbol, const fc::optional<double>& init_limit) override; std::vector<hsrcore::blockchain::Asset> register_contract_testing(const std::string& owner, const fc::path& codefile) override; hsrcore::wallet::WalletTransactionEntry upgrade_contract(const std::string& contract_address, const std::string& upgrader_name, const std::string& new_contract_name, const hsrcore::blockchain::Imessage& new_contract_desc, const std::string& asset_symbol, const fc::optional<double>& exec_limit) override; hsrcore::wallet::WalletTransactionEntry upgrade_contract_build(const std::string& contract_address, const std::string& upgrader_public_key, const std::string& new_contract_name, const hsrcore::blockchain::Imessage& new_contract_desc, const std::string& asset_symbol, const fc::optional<double>& exec_limit) override; std::vector<hsrcore::blockchain::Asset> upgrade_contract_testing(const std::string& contract_address, const std::string& upgrader_name, const std::string& new_contract_name, const hsrcore::blockchain::Imessage& new_contract_desc) override; hsrcore::wallet::WalletTransactionEntry destroy_contract(const std::string& contract_address, const std::string& destroyer_name, const std::string& asset_symbol, const fc::optional<double>& exec_limit) override; hsrcore::wallet::WalletTransactionEntry destroy_contract_build(const std::string& contract_address, const std::string& destroyer_public_key, const std::string& asset_symbol, const fc::optional<double>& exec_limit) override; std::vector<hsrcore::blockchain::Asset> destroy_contract_testing(const std::string& contract_address, const std::string& destroyer_name) override; hsrcore::wallet::WalletTransactionEntry call_contract(const std::string& contract, const std::string& caller_name, const std::string& function_name, const std::string& params, const std::string& asset_symbol, const fc::optional<double>& call_limit) override; hsrcore::wallet::WalletTransactionEntry call_contract_build(const std::string& contract, const std::string& caller_public_key, const std::string& function_name, const std::string& params, const std::string& asset_symbol, const fc::optional<double>& call_limit) override; hsrcore::blockchain::ContractEntryPrintable get_contract_info(const std::string& contract) override; std::vector<hsrcore::blockchain::BalanceEntry> get_contract_balance(const std::string& contract) override; std::vector<hsrcore::blockchain::Asset> call_contract_testing(const std::string& contract, const std::string& caller_name, const std::string& function_name, const std::string& params) override; std::string call_contract_offline(const std::string& contract, const std::string& caller_name, const std::string& function_name, const std::string& params) override; hsrcore::blockchain::ContractEntryPrintable load_contract_to_file(const std::string& contract, const fc::path& file) override; hsrcore::blockchain::TransactionIdType get_result_trx_id(const hsrcore::blockchain::TransactionIdType& request_id) override; hsrcore::blockchain::TransactionIdType get_request_trx_id(const hsrcore::blockchain::TransactionIdType& request_id) override; void sandbox_open() const override; void sandbox_close() const override; std::string sandbox_register_contract(const std::string& owner, const fc::path& codefile, const std::string& asset_symbol, const fc::optional<double>& initLimit) override; hsrcore::wallet::WalletTransactionEntry sandbox_call_contract(const std::string& contract, const std::string& caller_name, const std::string& function_name, const std::string& params, const std::string& cost_asset, const fc::optional<double>& callLimit) override; hsrcore::wallet::WalletTransactionEntry sandbox_upgrade_contract(const std::string& contract_address, const std::string& upgrader_name, const std::string& new_contract_name, const hsrcore::blockchain::Imessage& new_contract_desc, const std::string& asset_symbol, const fc::optional<double>& exec_limit) override; std::vector<hsrcore::blockchain::Asset> sandbox_upgrade_contract_testing(const std::string& contract_address, const std::string& upgrader_name, const std::string& new_contract_name, const hsrcore::blockchain::Imessage& new_contract_desc) override; hsrcore::wallet::WalletTransactionEntry sandbox_destroy_contract(const std::string& contract_address, const std::string& destroyer_name, const std::string& asset_symbol, const fc::optional<double>& exec_limit) override; std::vector<hsrcore::blockchain::Asset> sandbox_destroy_contract_testing(const std::string& contract_address, const std::string& destroyer_name) override; hsrcore::blockchain::ContractEntryPrintable sandbox_get_contract_info(const std::string& contract) override; std::vector<hsrcore::blockchain::BalanceEntry> sandbox_get_contract_balance(const std::string& contract) override; hsrcore::wallet::WalletTransactionEntry sandbox_transfer_to_contract(double amount_to_transfer, const std::string& asset_symbol, const std::string& from_account_name, const std::string& to_contract, double amount_for_exec) override; hsrcore::wallet::AccountBalanceSummaryType sandbox_account_balance(const std::string& account_name = fc::json::from_string("\"\"").as<std::string>()) override; fc::path sandbox_compile_contract(const fc::path& filename) const override; hsrcore::blockchain::ContractEntryPrintable sandbox_load_contract_to_file(const std::string& contract, const fc::path& file) override; std::vector<hsrcore::blockchain::Asset> sandbox_register_contract_testing(const std::string& owner, const fc::path& codefile) override; std::vector<hsrcore::blockchain::Asset> sandbox_call_contract_testing(const std::string& contract, const std::string& caller_name, const std::string& function_name, const std::string& params) override; std::vector<hsrcore::blockchain::Asset> sandbox_transfer_to_contract_testing(double amount_to_transfer, const std::string& asset_symbol, const std::string& from_account_name, const std::string& to_contract) override; vector<hsrcore::blockchain::SandboxAccountInfo> sandbox_list_my_addresses(const std::string& account_name = fc::json::from_string("\"\"").as<std::string>()) override; std::string get_contract_registered_in_transaction(const hsrcore::blockchain::TransactionIdType& trx_id) override; hsrcore::blockchain::TransactionIdType get_transaction_id_contract_registered(const std::string& contract_id) override; hsrcore::blockchain::CodePrintAble get_contract_info_from_gpc_file(const fc::path& file) override; MiningInfo get_mining_info() override; MiningDifficulty get_difficulty() override; bool set_generate(bool fGenerate, uint32_t nThreads = fc::json::from_string("-1").as<uint32_t>()) override; MiningWorkPackage get_work() override; hsrcore::blockchain::BlockIdType submit_block(const std::string& HashNoNonce, uint64_t Nonce, uint64_t Extra_Nonce) override; hsrcore::blockchain::BlockIdType submit_blockex(const std::string& data) override; bool set_coinbase(const std::string& account_name) override; StakingInfo get_staking_info() override; fc::path compile_script(const fc::path& filename) const override; std::string add_script(const fc::path& filename, const hsrcore::blockchain::Imessage& description) override; void remove_script(const std::string& scriptid) override; hsrcore::wallet::ScriptEntryPrintable get_script_info(const std::string& scriptid) override; std::vector<hsrcore::wallet::ScriptEntryPrintable> list_scripts() override; void disable_script(const std::string& scriptid) override; void enable_script(const std::string& scriptid) override; void import_script_db(const fc::path& dbfile) override; void export_script_db(const fc::path& dbfile) override; std::vector<std::string> get_events_bound(const std::string& script_id) override; std::vector<std::string> list_event_handler(const std::string& contract_id_str, const std::string& event_type) override; void add_event_handler(const std::string& contract_id_str, const std::string& event_type, const std::string& script_id, uint32_t index) override; void delete_event_handler(const std::string& contract_id_str, const std::string& event_type, const std::string& script_id) override; }; } } // end namespace hsrcore::rpc_stubs
143.170068
496
0.734391
[ "vector" ]
2436a681f60bbe0297dd02b7415657a72a0e9bb7
12,158
hpp
C++
hlslm/xmoperators.hpp
ArcEarth/HLSLM
c6bab2aa6f2052ae44863b05adeb506aa51ece65
[ "MIT" ]
null
null
null
hlslm/xmoperators.hpp
ArcEarth/HLSLM
c6bab2aa6f2052ae44863b05adeb506aa51ece65
[ "MIT" ]
1
2019-06-09T14:32:14.000Z
2019-06-18T20:32:24.000Z
hlslm/xmoperators.hpp
ArcEarth/HLSLM
c6bab2aa6f2052ae44863b05adeb506aa51ece65
[ "MIT" ]
null
null
null
#pragma once #include "traits.hpp" #include "xmvector.hpp" // For XMVectorAddInt etc... #include <DirectXMathExtend.h> namespace DirectX { namespace hlsl { namespace traits { template <typename lhs_t, typename rhs_t> struct enable_if_binary_operator_type : public std::enable_if< binary_operator_traits<lhs_t, rhs_t>::overload, typename binary_operator_traits<lhs_t, rhs_t>::return_type > {}; template <typename lhs_t, typename rhs_t> using enable_if_binary_operator_t = typename enable_if_binary_operator_type<lhs_t, rhs_t>::type; template <typename expect_scalar_t, typename lhs_t, typename rhs_t> struct enable_binary_operator_on_scalar : public std::enable_if< binary_operator_traits<lhs_t, rhs_t>::overload && std::is_same< typename binary_operator_traits<lhs_t, rhs_t>::scalar_type, expect_scalar_t>::value, typename binary_operator_traits<lhs_t, rhs_t>::return_type > {}; template <typename expect_scalar_t, typename lhs_t, typename rhs_t> using enable_binary_operator_on_scalar_t = typename enable_binary_operator_on_scalar<expect_scalar_t, lhs_t, rhs_t>::type; template <typename expect_scalar_t, typename lhs_t> struct enable_unary_operator_on_scalar : public std::enable_if< unary_operator_traits<lhs_t>::overload && std::is_same<typename unary_operator_traits<lhs_t>::scalar_type, expect_scalar_t>::value, typename unary_operator_traits<lhs_t>::return_type> {}; template <typename expect_scalar_t, typename lhs_t> using enable_unary_operator_on_scalar_t = typename enable_unary_operator_on_scalar<expect_scalar_t, lhs_t>::type; } namespace detail { template <typename _Ty, size_t _Size> __forceinline XMVECTOR XM_CALLCONV xmfoward(const xmvector<_Ty, _Size>& xmv) { return xmv.v; }; template <typename _Ty> __forceinline XMVECTOR XM_CALLCONV xmfoward(const xmscalar<_Ty>& xms) { return xms.v; } // Load int / float template <typename _Ty> inline std::enable_if_t<traits::scalar_traits<_Ty>::value, XMVECTOR> XM_CALLCONV xmfoward(const _Ty& mvector) { return detail::replicate_scalar(mvector); } template <typename _Ty> inline std::enable_if_t<traits::is_memory_type<_Ty>::value, XMVECTOR> XM_CALLCONV xmfoward(const _Ty& mvector) { return load(mvector).v; } template <typename _Ty> inline std::enable_if_t<traits::is_expression<_Ty>::value, XMVECTOR> XM_CALLCONV xmfoward(const _Ty& mvector) { return mvector.eval().v; } } #define XM_MAKE_BINARY_CWISE_OPERATOR(stype,func,XMFunc,XMNS) \ template <typename lhs_t, typename rhs_t> \ inline traits::enable_binary_operator_on_scalar_t<stype, lhs_t, rhs_t> \ XM_CALLCONV func(const lhs_t& lhs, const rhs_t& rhs) \ { traits::binary_operator_return_type<lhs_t, rhs_t> ret; \ ret.v = XMNS XMFunc(detail::xmfoward(lhs), detail::xmfoward(rhs)); \ return ret;} #define XM_MAKE_UNARY_CWISE_OPERATOR(stype,func,XMFunc,XMNS) \ template <typename lhs_t> \ inline traits::enable_unary_operator_on_scalar_t<stype, lhs_t> \ XM_CALLCONV func(const lhs_t& lhs) \ { typename traits::unary_operator_traits<lhs_t>::return_type ret; \ ret.v = XMNS XMFunc(detail::xmfoward(lhs)); \ return ret;} // Comparison functions template <size_t _Size> inline xmvector<uint, _Size> XM_CALLCONV equal(const xmvector<float, _Size> lhs, const xmvector<float, _Size> rhs) { xmvector<uint, _Size> ret; ret.v = XMVectorEqual(lhs.v, rhs.v); return ret; } template <size_t _Size> inline xmvector<uint, _Size> XM_CALLCONV greater_equal(const xmvector<float, _Size> lhs, const xmvector<float, _Size> rhs) { xmvector<uint, _Size> ret; ret.v = XMVectorGreaterOrEqual(lhs.v, rhs.v); return ret; } template <size_t _Size> inline xmvector<uint, _Size> XM_CALLCONV greater(const xmvector<float, _Size> lhs, const xmvector<float, _Size> rhs) { xmvector<uint, _Size> ret; ret.v = XMVectorGreater(lhs.v, rhs.v); return ret; } template <size_t _Size> inline xmvector<uint, _Size> XM_CALLCONV less_equal(const xmvector<float, _Size> lhs, const xmvector<float, _Size> rhs) { xmvector<uint, _Size> ret; ret.v = XMVectorLessOrEqual(lhs.v, rhs.v); return ret; } template <size_t _Size> inline xmvector<uint, _Size> XM_CALLCONV less(const xmvector<float, _Size> lhs, const xmvector<float, _Size> rhs) { xmvector<uint, _Size> ret; ret.v = XMVectorLess(lhs.v, rhs.v); return ret; } template <size_t _Size> inline xmvector<uint, _Size> XM_CALLCONV operator>(const xmvector<float, _Size> lhs, const xmvector<float, _Size> rhs) { return greater(lhs, rhs); } template <size_t _Size> inline xmvector<uint, _Size> XM_CALLCONV operator>=(const xmvector<float, _Size> lhs, const xmvector<float, _Size> rhs) { return greater_equal(lhs, rhs); } template <size_t _Size> inline xmvector<uint, _Size> XM_CALLCONV operator<(const xmvector<float, _Size> lhs, const xmvector<float, _Size> rhs) { return less(lhs, rhs); } template <size_t _Size> inline xmvector<uint, _Size> XM_CALLCONV operator<=(const xmvector<float, _Size> lhs, const xmvector<float, _Size> rhs) { return less_equal(lhs, rhs); } template <size_t _Size> inline xmvector<uint, _Size> XM_CALLCONV near_equal(const xmvector<float, _Size> lhs, const xmvector<float, _Size> rhs, const xmscalar<float> epsilon) { xmvector<uint, _Size> ret; ret.v = XMVectorNearEqual(lhs.v, rhs.v, epsilon.v); return ret; } template <size_t _Size> inline xmvector<uint, _Size> XM_CALLCONV near_equal(const xmvector<float, _Size> lhs, const xmvector<float, _Size> rhs, const xmvector<float, _Size> epsilon) { xmvector<uint, _Size> ret; ret.v = XMVectorNearEqual(lhs.v, rhs.v, epsilon.v); return ret; } // Bitwise operators for uint vectors // lhs & ~rhs XM_MAKE_BINARY_CWISE_OPERATOR(uint, andnot, XMVectorAndCInt, XM_NAMES) XM_MAKE_BINARY_CWISE_OPERATOR(uint, nor, XMVectorNorInt, XM_NAMES) XM_MAKE_BINARY_CWISE_OPERATOR(uint, equal, XMVectorEqualInt, XM_NAMES) XM_MAKE_BINARY_CWISE_OPERATOR(uint, not_equal, XMVectorNotEqualInt, XM_NAMES) XM_MAKE_BINARY_CWISE_OPERATOR(uint, and, XMVectorAndInt, XM_NAMES) XM_MAKE_BINARY_CWISE_OPERATOR(uint, or, XMVectorOrInt, XM_NAMES) XM_MAKE_BINARY_CWISE_OPERATOR(uint, xor , XMVectorXorInt, XM_NAMES) // Float vector cwise functions XM_MAKE_BINARY_CWISE_OPERATOR(float, min, XMVectorMin, XM_NAMES) XM_MAKE_BINARY_CWISE_OPERATOR(float, max, XMVectorMax, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, round, XMVectorRound, _DXMEXT) XM_MAKE_UNARY_CWISE_OPERATOR(float, ceil, XMVectorRound, _DXMEXT) XM_MAKE_UNARY_CWISE_OPERATOR(float, floor, XMVectorFloor, _DXMEXT) XM_MAKE_UNARY_CWISE_OPERATOR(float, trunc, XMVectorTruncate, _DXMEXT) XM_MAKE_UNARY_CWISE_OPERATOR(float, saturate, XMVectorSaturate, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, abs, XMVectorAbs, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, rcp, XMVectorReciprocal, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, rcpf, XMVectorReciprocalEst, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, sqrt, XMVectorSqrt, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, sqrtf, XMVectorSqrtEst, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, rsqrt, XMVectorReciprocalSqrt, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, rsqrtf, XMVectorReciprocalSqrtEst, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, exp, XMVectorExpE, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, exp2, XMVectorExp2, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, log, XMVectorLogE, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, log2, XMVectorLog2, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, sinh, XMVectorSinH, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, cosh, XMVectorCosH, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, tanh, XMVectorTanH, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, sin, XMVectorSin, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, cos, XMVectorCos, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, tan, XMVectorTan, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, sinf, XMVectorSinEst, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, cosf, XMVectorCosEst, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, tanf, XMVectorTanEst, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, asin, XMVectorASin, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, acos, XMVectorACos, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, atan, XMVectorATan, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, asinf, XMVectorASinEst, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, acosf, XMVectorACosEst, XM_NAMES) XM_MAKE_UNARY_CWISE_OPERATOR(float, atanf, XMVectorATanEst, XM_NAMES) XM_MAKE_BINARY_CWISE_OPERATOR(float, pow, XMVectorPow, XM_NAMES) XM_MAKE_BINARY_CWISE_OPERATOR(float, atan2, XMVectorATan2, XM_NAMES) XM_MAKE_BINARY_CWISE_OPERATOR(float, atan2f, XMVectorATan2Est, XM_NAMES) template <size_t _Size> inline xmvector<float, _Size> XM_CALLCONV clamp(const xmvector<float, _Size> lhs, const xmvector<float, _Size> _min, const xmvector<float, _Size> _max) { xmvector<float, _Size> ret; ret.v = XM_NAMES XMVectorClamp(lhs.v, _min.v, _max.v); return ret; } inline xmscalar<float> XM_CALLCONV clamp(const xmscalar<float> lhs, const xmscalar<float> _min, const xmscalar<float> _max) { xmscalar<float> ret; ret.v = XM_NAMES XMVectorClamp(lhs.v, _min.v, _max.v); return ret; } template <typename _Ty, size_t _Size> inline xmvector<_Ty, _Size> XM_CALLCONV select(const xmvector<_Ty, _Size> lhs, const xmvector<_Ty, _Size> rhs, const xmvector<uint, _Size> mask) { xmvector<_Ty, _Size> ret; ret.v = XM_NAMES XMVectorSelect(lhs.v, rhs.v, mask.v); return ret; } template <size_t _Size> inline xmvector<float, _Size> XM_CALLCONV lerp(const xmvector<float, _Size> lhs, const xmvector<float, _Size> rhs, const xmscalar<float> t) { xmvector<float, _Size> ret; ret.v = XM_NAMES XMVectorLerpV(lhs.v, rhs.v, t.v); return ret; } inline xmscalar<float> XM_CALLCONV lerp(const xmscalar<float> lhs, const xmscalar<float> rhs, const xmscalar<float> t) { xmscalar<float> ret; ret.v = XM_NAMES XMVectorLerpV(lhs.v, rhs.v, t.v); return ret; } inline xmfloat XM_CALLCONV dot(const xmvector2f a, const xmvector2f b) { xmfloat ret; ret.v = _DXMEXT XMVector2Dot(a.v, b.v); return ret; } inline xmfloat XM_CALLCONV dot(const xmvector3f a, const xmvector3f b) { xmfloat ret; ret.v = _DXMEXT XMVector3Dot(a.v, b.v); return ret; } inline xmfloat XM_CALLCONV dot(const xmvector4f a, const xmvector4f b) { xmfloat ret; ret.v = _DXMEXT XMVector4Dot(a.v, b.v); return ret; } inline xmfloat XM_CALLCONV length(const xmvector2f a) { xmfloat ret; ret.v = _DXMEXT XMVector2Length(a.v); return ret; } inline xmfloat XM_CALLCONV length(const xmvector3f a) { xmfloat ret; ret.v = _DXMEXT XMVector3Length(a.v); return ret; } inline xmfloat XM_CALLCONV length(const xmvector4f a) { xmfloat ret; ret.v = _DXMEXT XMVector4Length(a.v); return ret; } inline xmvector2f XM_CALLCONV normalize(const xmvector2f a) { xmvector2f ret; ret.v = _DXMEXT XMVector2Normalize(a.v); return ret; } inline xmvector3f XM_CALLCONV normalize(const xmvector3f a) { xmvector3f ret; ret.v = _DXMEXT XMVector3Normalize(a.v); return ret; } inline xmvector4f XM_CALLCONV normalize(const xmvector4f a) { xmvector4f ret; ret.v = _DXMEXT XMVector4Normalize(a.v); return ret; } // 2d vector cross product inline xmfloat XM_CALLCONV cross(const xmvector2f a, const xmvector2f b) { xmfloat ret; ret.v = XMVector2Cross(a.v, b.v); return ret; } // 3d vector cross product inline xmvector3f XM_CALLCONV cross(const xmvector3f a, const xmvector3f b) { xmvector3f ret; ret.v = XMVector3Cross(a.v, b.v); return ret; } // 4d vector cross product inline xmvector4f XM_CALLCONV cross(const xmvector4f a, const xmvector4f b, const xmvector4f c) { xmvector4f ret; ret.v = XMVector4Cross(a.v, b.v, c.v); return ret; } } }
36.510511
159
0.748643
[ "vector", "3d" ]
0f842d7378867f6f4e485d51ca109469d215049d
24,688
cc
C++
inet-3.6.4/src/inet/linklayer/xmac/XMacLayer.cc
danushkam/rocc
d4fae7dee35869dded24985221beb218e4ce81e0
[ "CC0-1.0" ]
12
2020-11-30T08:04:23.000Z
2022-03-23T11:49:26.000Z
inet-3.6.4/src/inet/linklayer/xmac/XMacLayer.cc
danushkam/rocc
d4fae7dee35869dded24985221beb218e4ce81e0
[ "CC0-1.0" ]
1
2021-01-26T10:49:56.000Z
2021-01-31T16:58:52.000Z
inet-3.6.4/src/inet/linklayer/xmac/XMacLayer.cc
danushkam/rocc
d4fae7dee35869dded24985221beb218e4ce81e0
[ "CC0-1.0" ]
8
2021-03-15T02:05:51.000Z
2022-03-21T13:14:02.000Z
// // Copyright (C) 2017 Jan Peter Drees // Copyright (C) 2015 Joaquim Oller // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with this program; if not, see <http://www.gnu.org/licenses/>. // #include "XMacLayer.h" #include <cassert> #include "inet/common/ModuleAccess.h" #include "inet/common/FindModule.h" #include "inet/common/INETUtils.h" #include "inet/common/queue/IPassiveQueue.h" #include "inet/networklayer/contract/IInterfaceTable.h" #include "inet/linklayer/common/Ieee802Ctrl.h" #include "inet/linklayer/common/SimpleLinkLayerControlInfo.h" #include "inet/linklayer/contract/IMACProtocolControlInfo.h" #include "inet/linklayer/xmac/XMacFrame_m.h" namespace inet { Define_Module(XMacLayer); simsignal_t XMacLayer::packetFromUpperDroppedSignal = registerSignal("packetFromUpperDroppedSignal"); /** * Initialize method of XMacLayer. Init all parameters, schedule timers. */ void XMacLayer::initialize(int stage) { MACProtocolBase::initialize(stage); if (stage == INITSTAGE_LOCAL) { queueLength = hasPar("queueLength") ? par("queueLength") : 10; animation = hasPar("animation") ? par("animation") : true; slotDuration = hasPar("slotDuration") ? par("slotDuration") : 1.; bitrate = hasPar("bitrate") ? par("bitrate") : 15360.; headerLength = par("headerLength").longValue(); checkInterval = hasPar("checkInterval") ? par("checkInterval") : 0.1; txPower = hasPar("txPower") ? par("txPower") : 50.; useMacAcks = hasPar("useMACAcks") ? par("useMACAcks") : false; maxTxAttempts = hasPar("maxTxAttempts") ? par("maxTxAttempts") : 2; EV_DEBUG << "headerLength: " << headerLength << ", bitrate: " << bitrate << endl; stats = par("stats"); nbTxDataPackets = 0; nbTxPreambles = 0; nbRxDataPackets = 0; nbRxPreambles = 0; nbMissedAcks = 0; nbRecvdAcks=0; nbDroppedDataPackets=0; nbRxBrokenDataPackets = 0; nbTxAcks=0; txAttempts = 0; lastDataPktDestAddr = MACAddress::BROADCAST_ADDRESS; lastDataPktSrcAddr = MACAddress::BROADCAST_ADDRESS; cModule *radioModule = getModuleFromPar<cModule>(par("radioModule"), this); radioModule->subscribe(IRadio::radioModeChangedSignal, this); radioModule->subscribe(IRadio::transmissionStateChangedSignal, this); radio = check_and_cast<IRadio *>(radioModule); macState = INIT; WATCH(macState); } else if(stage == INITSTAGE_LINK_LAYER) { initializeMACAddress(); registerInterface(); wakeup = new cMessage("wakeup"); wakeup->setKind(XMAC_WAKE_UP); data_timeout = new cMessage("data_timeout"); data_timeout->setKind(XMAC_DATA_TIMEOUT); data_timeout->setSchedulingPriority(100); data_tx_over = new cMessage("data_tx_over"); data_tx_over->setKind(XMAC_DATA_TX_OVER); stop_preambles = new cMessage("stop_preambles"); stop_preambles->setKind(XMAC_STOP_PREAMBLES); send_preamble = new cMessage("send_preamble"); send_preamble->setKind(XMAC_SEND_PREAMBLE); ack_tx_over = new cMessage("ack_tx_over"); ack_tx_over->setKind(XMAC_ACK_TX_OVER); cca_timeout = new cMessage("cca_timeout"); cca_timeout->setKind(XMAC_CCA_TIMEOUT); cca_timeout->setSchedulingPriority(100); send_ack = new cMessage("send_ack"); send_ack->setKind(XMAC_SEND_ACK); start_xmac = new cMessage("start_xmac"); start_xmac->setKind(XMAC_START_XMAC); ack_timeout = new cMessage("ack_timeout"); ack_timeout->setKind(XMAC_ACK_TIMEOUT); resend_data = new cMessage("resend_data"); resend_data->setKind(XMAC_RESEND_DATA); resend_data->setSchedulingPriority(100); switch_preamble_phase = new cMessage("switch_preamble_phase"); switch_preamble_phase->setKind(SWITCH_PREAMBLE_PHASE); delay_for_ack_within_remote_rx = new cMessage("delay_for_ack_within_remote_rx"); delay_for_ack_within_remote_rx->setKind(DELAY_FOR_ACK_WITHIN_REMOTE_RX); switching_done = new cMessage("switching_done"); switching_done->setKind(XMAC_SWITCHING_FINISHED); scheduleAt(0.0, start_xmac); } } XMacLayer::~XMacLayer() { cancelAndDelete(wakeup); cancelAndDelete(data_timeout); cancelAndDelete(data_tx_over); cancelAndDelete(stop_preambles); cancelAndDelete(send_preamble); cancelAndDelete(ack_tx_over); cancelAndDelete(cca_timeout); cancelAndDelete(send_ack); cancelAndDelete(start_xmac); cancelAndDelete(ack_timeout); cancelAndDelete(resend_data); cancelAndDelete(switch_preamble_phase); cancelAndDelete(delay_for_ack_within_remote_rx); cancelAndDelete(switching_done); } void XMacLayer::flushQueue() { MacQueue::iterator it; for(it = macQueue.begin(); it != macQueue.end(); ++it) { delete (*it); } macQueue.clear(); } void XMacLayer::clearQueue() { macQueue.clear(); } void XMacLayer::finish() { // record stats if (stats) { recordScalar("nbTxDataPackets", nbTxDataPackets); recordScalar("nbTxPreambles", nbTxPreambles); recordScalar("nbRxDataPackets", nbRxDataPackets); recordScalar("nbRxPreambles", nbRxPreambles); recordScalar("nbMissedAcks", nbMissedAcks); recordScalar("nbRecvdAcks", nbRecvdAcks); recordScalar("nbTxAcks", nbTxAcks); recordScalar("nbDroppedDataPackets", nbDroppedDataPackets); recordScalar("nbRxBrokenDataPackets", nbRxBrokenDataPackets); //recordScalar("timeSleep", timeSleep); //recordScalar("timeRX", timeRX); //recordScalar("timeTX", timeTX); } } void XMacLayer::initializeMACAddress() { const char *addrstr = par("address"); if (!strcmp(addrstr, "auto")) { // assign automatic address address = MACAddress::generateAutoAddress(); // change module parameter from "auto" to concrete address par("address").setStringValue(address.str().c_str()); } else { address.setAddress(addrstr); } } InterfaceEntry *XMacLayer::createInterfaceEntry() { InterfaceEntry *e = new InterfaceEntry(this); // data rate e->setDatarate(bitrate); // generate a link-layer address to be used as interface token for IPv6 e->setMACAddress(address); e->setInterfaceToken(address.formInterfaceIdentifier()); // capabilities e->setMtu(par("mtu").longValue()); e->setMulticast(false); e->setBroadcast(true); return e; } /** * Check whether the queue is not full: if yes, print a warning and drop the * packet. Then initiate sending of the packet, if the node is sleeping. Do * nothing, if node is working. */ void XMacLayer::handleUpperPacket(cPacket *msg) { bool pktAdded = addToQueue(msg); if (!pktAdded) return; // force wakeup now if (wakeup->isScheduled() && (macState == SLEEP)) { cancelEvent(wakeup); scheduleAt(simTime() + dblrand()*0.01f, wakeup); }else if(macState == SLEEP){ } } /** * Send one short preamble packet immediately. */ void XMacLayer::sendPreamble(MACAddress preamble_address) { //~ diff with BMAC, @ in preamble! XMacFrame* preamble = new XMacFrame("Preamble"); preamble->setSrc(address); preamble->setDest(preamble_address); preamble->setKind(XMAC_PREAMBLE); preamble->setBitLength(headerLength); attachSignal(preamble, simTime()); sendDown(preamble); nbTxPreambles++; } /** * Send one short preamble packet immediately. */ void XMacLayer::sendMacAck() { XMacFrame* ack = new XMacFrame("Acknowledgment"); ack->setSrc(address); //~ diff with BMAC, ack_preamble_based ack->setDest(lastPreamblePktSrcAddr); ack->setKind(XMAC_ACK); ack->setBitLength(headerLength); attachSignal(ack, simTime()); sendDown(ack); nbTxAcks++; } /** * Handle own messages: */ void XMacLayer::handleSelfMessage(cMessage *msg) { switch (macState) { case INIT: if (msg->getKind() == XMAC_START_XMAC) { EV_DEBUG << "State INIT, message XMAC_START, new state SLEEP" << endl; changeDisplayColor(BLACK); radio->setRadioMode(IRadio::RADIO_MODE_SLEEP); macState = SLEEP; scheduleAt(simTime()+dblrand()*slotDuration, wakeup); return; } break; case SLEEP: if (msg->getKind() == XMAC_WAKE_UP) { EV_DEBUG << "node " << address << " : State SLEEP, message XMAC_WAKEUP, new state CCA, simTime " << simTime() << " to " << simTime() + 1.7f * checkInterval << endl; // this CCA is useful when in RX to detect preamble and has to make room for // 0.2f = Tx switch, 0.5f = Tx send_preamble, 1f = time_for_ack_back scheduleAt(simTime() + 1.7f * checkInterval, cca_timeout); radio->setRadioMode(IRadio::RADIO_MODE_RECEIVER); changeDisplayColor(GREEN); macState = CCA; return; } // we receive an ACK back but it is too late else if (msg->getKind() == XMAC_ACK) { nbMissedAcks++; delete msg; return; } // received messages prior real-switching to SLEEP? I'm sorry, out else { return; } break; case CCA: if (msg->getKind() == XMAC_CCA_TIMEOUT) { // channel is clear and we wanna SEND if (macQueue.size() > 0) { radio->setRadioMode(IRadio::RADIO_MODE_TRANSMITTER); changeDisplayColor(YELLOW); macState = SEND_PREAMBLE; // We send the preamble for a whole SLOT duration :) scheduleAt(simTime() + slotDuration, stop_preambles); // if 0.2f * CI = 2ms to switch to TX -> has to be accounted for RX_preamble_detection scheduleAt(simTime() + 0.2f * checkInterval, switch_preamble_phase); return; } // if anything to send, go back to sleep and wake up after a full period else { scheduleAt(simTime() + slotDuration, wakeup); macState = SLEEP; radio->setRadioMode(IRadio::RADIO_MODE_SLEEP); changeDisplayColor(BLACK); return; } } // during CCA, we received a preamble. Go to state WAIT_DATA and // schedule the timeout. if (msg->getKind() == XMAC_PREAMBLE) { XMacFrame* incoming_preamble = static_cast<XMacFrame *>(msg); // preamble is for me if (incoming_preamble->getDest() == address || incoming_preamble->getDest().isBroadcast() || incoming_preamble->getDest().isMulticast()) { cancelEvent(cca_timeout); nbRxPreambles++; EV << "node " << address << " : State CCA, message XMAC_PREAMBLE received, new state SEND_ACK" << endl; macState = SEND_ACK; lastPreamblePktSrcAddr = incoming_preamble->getSrc(); changeDisplayColor(YELLOW); radio->setRadioMode(IRadio::RADIO_MODE_TRANSMITTER); } // the preamble is not for us else { EV << "node " << address << " : State CCA, message XMAC_PREAMBLE not for me." << endl; //~ better overhearing management? :) cancelEvent(cca_timeout); scheduleAt(simTime() + slotDuration, wakeup); macState = SLEEP; radio->setRadioMode(IRadio::RADIO_MODE_SLEEP); changeDisplayColor(BLACK); } delete msg; return; } //in case we get an ACK, we simply dicard it, because it means the end //of another communication if (msg->getKind() == XMAC_ACK) { EV_DEBUG << "State CCA, message XMAC_ACK, new state CCA" << endl; delete msg; return; } // this case is very, very, very improbable, but let's do it. // if in CCA the node receives directly the data packet, accept it // even if we increased nbMissedAcks in state SLEEP if (msg->getKind() == XMAC_DATA) { XMacFrame* incoming_data = static_cast<XMacFrame *>(msg); // packet is for me if (incoming_data->getDest() == address) { EV << "node " << address << " : State CCA, received XMAC_DATA, accepting it." << endl; cancelEvent(cca_timeout); cancelEvent(switch_preamble_phase); cancelEvent(stop_preambles); macState = WAIT_DATA; scheduleAt(simTime(), msg); } return; } break; case SEND_PREAMBLE: if (msg->getKind() == SWITCH_PREAMBLE_PHASE) { //~ make room for preamble + time_for_ack_back, check_interval is 10ms by default (from NetworkXMAC.ini) // 0.5f* = 5ms if (radio->getRadioMode() == IRadio::RADIO_MODE_RECEIVER) { radio->setRadioMode(IRadio::RADIO_MODE_TRANSMITTER); changeDisplayColor(YELLOW); EV_DEBUG << "node " << address << " : preamble_phase tx, simTime = " << simTime() << endl; scheduleAt(simTime() + 0.5f * checkInterval, switch_preamble_phase); } // 1.0f* = 10ms else if (radio->getRadioMode() == IRadio::RADIO_MODE_TRANSMITTER) { radio->setRadioMode(IRadio::RADIO_MODE_RECEIVER); changeDisplayColor(GREEN); EV_DEBUG << "node " << address << " : preamble_phase rx, simTime = " << simTime() << endl; scheduleAt(simTime() + 1.0f *checkInterval, switch_preamble_phase); } return; } // radio switch from above if(msg->getKind() == XMAC_SWITCHING_FINISHED) { if (radio->getRadioMode() == IRadio::RADIO_MODE_TRANSMITTER) { XMacFrame *pkt_preamble = macQueue.front(); sendPreamble(pkt_preamble->getDest()); } return; } // ack_rx within sending_preamble or preamble_timeout without an ACK if ((msg->getKind() == XMAC_ACK) || (msg->getKind() == XMAC_STOP_PREAMBLES)) { //~ ADDED THE SECOND CONDITION! :) if not, below if (msg->getKind() == XMAC_ACK){ delete msg; EV << "node " << address << " : State SEND_PREAMBLE, message XMAC_ACK, new state SEND_DATA" << endl; }else if (msg->getKind() == XMAC_STOP_PREAMBLES){ EV << "node " << address << " : State SEND_PREAMBLE, message XMAC_STOP_PREAMBLES" << endl; } macState = SEND_DATA; cancelEvent(stop_preambles); cancelEvent(switch_preamble_phase); changeDisplayColor(RED); radio->setRadioMode(IRadio::RADIO_MODE_TRANSMITTER); txAttempts = 1; return; } // next is the case of a node receiving 1 preamble or data while in his preamble gaps, ignore, we are sending! if ((msg->getKind() == XMAC_PREAMBLE) || (msg->getKind() == XMAC_DATA)) { if(msg->getKind() == XMAC_DATA){ nbDroppedDataPackets++; } delete msg; return; } else { EV << "**node " << address << " : State SEND_PREAMBLE, received message " << msg->getKind() << endl; return; } break; case SEND_DATA: if (msg->getKind() == XMAC_STOP_PREAMBLES) { EV << "node " << address << " : State SEND_DATA, message XMAC_STOP_PREAMBLES" << endl; // send the data packet sendDataPacket(); macState = WAIT_TX_DATA_OVER; return; }else if (msg->getKind() == XMAC_SWITCHING_FINISHED) { EV << "node " << address << " : State SEND_DATA, message RADIO_SWITCHING OVER, sending packet..." << endl; // send the data packet sendDataPacket(); macState = WAIT_TX_DATA_OVER; return; } else { return; } break; case WAIT_TX_DATA_OVER: if (msg->getKind() == XMAC_DATA_TX_OVER) { EV_DEBUG << "node " << address << " : State WAIT_TX_DATA_OVER, message XMAC_DATA_TX_OVER, new state SLEEP" << endl; // remove the packet just served from the queue delete macQueue.front(); macQueue.pop_front(); // if something in the queue, wakeup soon. if (macQueue.size() > 0) scheduleAt(simTime() + dblrand()*checkInterval, wakeup); else scheduleAt(simTime() + slotDuration, wakeup); macState = SLEEP; radio->setRadioMode(IRadio::RADIO_MODE_SLEEP); changeDisplayColor(BLACK); return; } break; case WAIT_ACK: //not used break; case WAIT_DATA: if (msg->getKind() == XMAC_PREAMBLE) { //nothing happens nbRxPreambles++; delete msg; return; } if (msg->getKind() == XMAC_ACK) { //nothing happens delete msg; return; } if (msg->getKind() == XMAC_DATA) { XMacFrame* mac = static_cast<XMacFrame *>(msg); const MACAddress& dest = mac->getDest(); if ((dest == address) || dest.isBroadcast() || dest.isMulticast()) { sendUp(decapsMsg(mac)); delete mac; nbRxDataPackets++; cancelEvent(data_timeout); // if something in the queue, wakeup soon. if (macQueue.size() > 0) scheduleAt(simTime() + dblrand()*checkInterval, wakeup); else scheduleAt(simTime() + slotDuration, wakeup); macState = SLEEP; radio->setRadioMode(IRadio::RADIO_MODE_SLEEP); changeDisplayColor(BLACK); } else { delete msg; msg = NULL; mac = NULL; } EV << "node " << address << " : State WAIT_DATA, message XMAC_DATA, new state SLEEP" << endl; return; } // data does not arrives in time if (msg->getKind() == XMAC_DATA_TIMEOUT) { EV << "node " << address << " : State WAIT_DATA, message XMAC_DATA_TIMEOUT, new state SLEEP" << endl; // if something in the queue, wakeup soon. if (macQueue.size() > 0) scheduleAt(simTime() + dblrand()*checkInterval, wakeup); else scheduleAt(simTime() + slotDuration, wakeup); macState = SLEEP; radio->setRadioMode(IRadio::RADIO_MODE_SLEEP); changeDisplayColor(BLACK); return; } break; case SEND_ACK: // send now the ack packet if (msg->getKind() == DELAY_FOR_ACK_WITHIN_REMOTE_RX) { EV_DEBUG << "node " << address << " : State SEND_ACK, message XMAC_SEND_ACK, new state WAIT_ACK_TX" << endl; sendMacAck(); macState = WAIT_ACK_TX; return; } break; case WAIT_ACK_TX: // wait for the ACK to be sent back to the Transmitter if (msg->getKind() == XMAC_ACK_TX_OVER) { EV_DEBUG << "node " << address << " : State WAIT_ACK_TX, message XMAC_ACK_TX_OVER, new state WAIT_DATA" << endl; changeDisplayColor(GREEN); macState = WAIT_DATA; cancelEvent(cca_timeout); scheduleAt(simTime() + (slotDuration / 2), data_timeout); radio->setRadioMode(IRadio::RADIO_MODE_RECEIVER); return; } break; } throw cRuntimeError("Undefined event of type %d in state %d (Radio state %d)!", msg->getKind(), macState, radio->getRadioMode()); } /** * Handle XMAC preambles and received data packets. */ void XMacLayer::handleLowerPacket(cPacket *msg) { if (msg->hasBitError()) { EV << "Received " << msg << " contains bit errors or collision, dropping it\n"; nbRxBrokenDataPackets++; delete msg; return; } // simply pass the massage as self message, to be processed by the FSM. handleSelfMessage(msg); } void XMacLayer::sendDataPacket() { nbTxDataPackets++; XMacFrame* pkt = check_and_cast<XMacFrame *>(macQueue.front()->dup()); attachSignal(pkt, simTime()); lastDataPktDestAddr = pkt->getDest(); pkt->setKind(XMAC_DATA); sendDown(pkt); } /** * Handle transmission over messages: either send another preambles or the data * packet itself. */ void XMacLayer::receiveSignal(cComponent *source, simsignal_t signalID, long value, cObject *details) { Enter_Method_Silent(); if (signalID == IRadio::transmissionStateChangedSignal) { IRadio::TransmissionState newRadioTransmissionState = (IRadio::TransmissionState)value; if (transmissionState == IRadio::TRANSMISSION_STATE_TRANSMITTING && newRadioTransmissionState == IRadio::TRANSMISSION_STATE_IDLE) { // Transmission of one packet is over if (macState == WAIT_TX_DATA_OVER) { scheduleAt(simTime(), data_tx_over); } if (macState == WAIT_ACK_TX) { scheduleAt(simTime(), ack_tx_over); } } transmissionState = newRadioTransmissionState; }else if(signalID ==IRadio::radioModeChangedSignal){ // Radio switching (to RX or TX) is over, ignore switching to SLEEP. if(macState == SEND_PREAMBLE) { scheduleAt(simTime(), switching_done); } else if(macState == SEND_ACK) { scheduleAt(simTime() + 0.5f * checkInterval, delay_for_ack_within_remote_rx); } else if(macState == SEND_DATA) { scheduleAt(simTime(), switching_done); } } } /** * Encapsulates the received network-layer packet into a MacPkt and set all * needed header fields. */ bool XMacLayer::addToQueue(cMessage *msg) { if (macQueue.size() >= queueLength) { // queue is full, message has to be deleted EV_DEBUG << "New packet arrived, but queue is FULL, so new packet is" " deleted\n"; emit(packetFromUpperDroppedSignal, msg); delete msg; nbDroppedDataPackets++; return false; } XMacFrame *macPkt = new XMacFrame(msg->getName()); macPkt->setBitLength(headerLength); IMACProtocolControlInfo *const cInfo = check_and_cast<IMACProtocolControlInfo *>(msg->removeControlInfo()); EV_DETAIL << "CSMA received a message from upper layer, name is " << msg->getName() << ", CInfo removed, mac addr=" << cInfo->getDestinationAddress() << endl; MACAddress dest = cInfo->getDestinationAddress(); macPkt->setDest(dest); delete cInfo; macPkt->setSrc(address); assert(static_cast<cPacket*>(msg)); macPkt->encapsulate(static_cast<cPacket*>(msg)); EV_DETAIL << "pkt encapsulated, length: " << macPkt->getBitLength() << "\n"; macQueue.push_back(macPkt); EV_DEBUG << "Max queue length: " << queueLength << ", packet put in queue" "\n queue size: " << macQueue.size() << " macState: " << macState << endl; return true; } void XMacLayer::attachSignal(XMacFrame *mac, simtime_t_cref startTime) { simtime_t duration = mac->getBitLength() / bitrate; mac->setDuration(duration); } /** * Change the color of the node for animation purposes. */ void XMacLayer::changeDisplayColor(XMAC_COLORS color) { if (!animation) return; cDisplayString& dispStr = getContainingNode(this)->getDisplayString(); //b=40,40,rect,black,black,2" if (color == GREEN) dispStr.setTagArg("b", 3, "green"); //dispStr.parse("b=40,40,rect,green,green,2"); if (color == BLUE) dispStr.setTagArg("b", 3, "blue"); //dispStr.parse("b=40,40,rect,blue,blue,2"); if (color == RED) dispStr.setTagArg("b", 3, "red"); //dispStr.parse("b=40,40,rect,red,red,2"); if (color == BLACK) dispStr.setTagArg("b", 3, "black"); //dispStr.parse("b=40,40,rect,black,black,2"); if (color == YELLOW) dispStr.setTagArg("b", 3, "yellow"); //dispStr.parse("b=40,40,rect,yellow,yellow,2"); } /** * Decapsulate a X-MAC frame. */ cPacket *XMacLayer::decapsMsg(XMacFrame *macPkt) { cPacket *msg = macPkt->decapsulate(); setUpControlInfo(msg, macPkt->getSrc()); return msg; } /** * Attaches a "control info" (MacToNetw) structure (object) to the message pMsg. */ cObject *XMacLayer::setUpControlInfo(cMessage *const pMsg, const MACAddress& pSrcAddr) { SimpleLinkLayerControlInfo *const cCtrlInfo = new SimpleLinkLayerControlInfo(); cCtrlInfo->setSrc(pSrcAddr); pMsg->setControlInfo(cCtrlInfo); return cCtrlInfo; } } // namespace inet
33.497965
163
0.632656
[ "object" ]
0f863728769e4860e3e8e1a88c3ca6f9b259ed30
5,339
cc
C++
verilog/tools/kythe/scope_resolver.cc
DhairyaBahl/verible
6a5000970643d34c714dcd4be9fd3330defd173c
[ "Apache-2.0" ]
null
null
null
verilog/tools/kythe/scope_resolver.cc
DhairyaBahl/verible
6a5000970643d34c714dcd4be9fd3330defd173c
[ "Apache-2.0" ]
null
null
null
verilog/tools/kythe/scope_resolver.cc
DhairyaBahl/verible
6a5000970643d34c714dcd4be9fd3330defd173c
[ "Apache-2.0" ]
null
null
null
// Copyright 2017-2020 The Verible Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "verilog/tools/kythe/scope_resolver.h" #include <string> #include <vector> namespace verilog { namespace kythe { bool ScopeMemberItem::operator==(const ScopeMemberItem& other) const { return this->vname == other.vname; } bool ScopeMemberItem::operator<(const ScopeMemberItem& other) const { return this->vname < other.vname; } void Scope::AddMemberItem(const ScopeMemberItem& member_item) { members_.insert(member_item); } void Scope::AppendScope(const Scope& scope) { for (const ScopeMemberItem& item : scope.Members()) { this->AddMemberItem(item); } } const VName* Scope::SearchForDefinition(absl::string_view name) const { for (const ScopeMemberItem& member_item : verible::make_range(members_.rbegin(), members_.rend())) { if (member_item.vname.signature.IsNameEqual(name)) { return &member_item.vname; } } return nullptr; } const VName* ScopeContext::SearchForDefinition(absl::string_view name) const { for (const auto& scope : verible::make_range(rbegin(), rend())) { const VName* result = scope->SearchForDefinition(name); if (result != nullptr) { return result; } } return nullptr; } void ScopeResolver::MapSignatureToScope(const Signature& signature, const Scope& scope) { scopes_[signature] = scope; } void ScopeResolver::AppendScopeToScopeContext(const Scope& scope) { scope_context_.top().AppendScope(scope); } void ScopeResolver::AddDefinitionToScopeContext( const ScopeMemberItem& new_member) { scope_context_.top().AddMemberItem(new_member); } const VName* ScopeResolver::SearchForDefinitionInGlobalScope( absl::string_view reference_name) const { const VName* definition = SearchForDefinitionInScope(global_scope_signature_, reference_name); if (definition != nullptr) { return definition; } if (previous_file_scope_resolver_ != nullptr) { return previous_file_scope_resolver_->SearchForDefinitionInGlobalScope( reference_name); } return nullptr; } const VName* ScopeResolver::SearchForDefinitionInScopeContext( absl::string_view reference_name) const { return scope_context_.SearchForDefinition(reference_name); } const VName* ScopeResolver::SearchForDefinitionInCurrentScope( absl::string_view name) const { return scope_context_.top().SearchForDefinition(name); } const std::vector<std::pair<const VName*, const Scope*>> ScopeResolver::SearchForDefinitions( const std::vector<std::string>& names) const { std::vector<std::pair<const VName*, const Scope*>> definitions; // Try to find the definition in the scopes of the current file. const VName* definition = SearchForDefinitionInScopeContext(names[0]); // Try to find the definition in the previous files' scopes. if (definition == nullptr && previous_file_scope_resolver_ != nullptr) { // This is a linear-time search over files. definition = previous_file_scope_resolver_->SearchForDefinitionInGlobalScope( names[0]); } if (definition == nullptr) { return definitions; } const Scope* current_scope = SearchForScope(definition->signature); definitions.push_back({definition, current_scope}); // Iterate over the names and try to find the definition in the current scope. for (const auto& name : verible::make_range(names.begin() + 1, names.end())) { if (current_scope == nullptr) { break; } const VName* definition = current_scope->SearchForDefinition(name); if (definition == nullptr) { break; } current_scope = SearchForScope(definition->signature); definitions.push_back({definition, current_scope}); } return definitions; } const Scope* ScopeResolver::SearchForScope(const Signature& signature) const { const auto scope = scopes_.find(signature); if (scope != scopes_.end()) { return &scope->second; } // Try to find the definition in the previous files' scopes. // This is a linear-time search over files. if (previous_file_scope_resolver_ != nullptr) { return previous_file_scope_resolver_->SearchForScope(signature); } return nullptr; } const VName* ScopeResolver::SearchForDefinitionInScope( const Signature& signature, absl::string_view name) const { const Scope* scope = SearchForScope(signature); if (scope == nullptr) { return nullptr; } return scope->SearchForDefinition(name); } void ScopeResolver::MapSignatureToScopeOfSignature( const Signature& signature, const Signature& other_signature) { const Scope* other_scope = SearchForScope(other_signature); if (other_scope == nullptr) { return; } MapSignatureToScope(signature, *other_scope); } } // namespace kythe } // namespace verilog
31.040698
80
0.727852
[ "vector" ]
0f87c4f360f7d146a140e61326128a98ab3fbc0f
1,724
cpp
C++
tests/vector/vector_comparison1.cpp
egorodet/CML
e3fd8ccbe9775ff6e0e41fd6a274b557a80c9d1f
[ "BSL-1.0" ]
125
2015-07-22T11:39:51.000Z
2022-03-06T13:41:44.000Z
tests/vector/vector_comparison1.cpp
egorodet/CML
e3fd8ccbe9775ff6e0e41fd6a274b557a80c9d1f
[ "BSL-1.0" ]
45
2015-06-03T15:50:08.000Z
2021-05-26T01:35:01.000Z
tests/vector/vector_comparison1.cpp
egorodet/CML
e3fd8ccbe9775ff6e0e41fd6a274b557a80c9d1f
[ "BSL-1.0" ]
28
2015-06-03T09:26:26.000Z
2022-03-06T13:42:06.000Z
/* -*- C++ -*- ------------------------------------------------------------ @@COPYRIGHT@@ *-----------------------------------------------------------------------*/ /** @file */ // Make sure the main header compiles cleanly: #include <cml/vector/comparison.h> #include <cml/vector/fixed.h> #include <cml/vector/types.h> /* Testing headers: */ #include "catch_runner.h" CATCH_TEST_CASE("less_greater1") { cml::vector3d v = { 1., 2., 3. }; cml::vector3d w = { 2., 2., 2. }; CATCH_REQUIRE(v < w); CATCH_REQUIRE(w > v); } CATCH_TEST_CASE("less_greater2") { cml::vector3d v = { 1., 2., 3. }; cml::vector3d w = { 1., 2., 2. }; CATCH_REQUIRE(w < v); CATCH_REQUIRE(v > w); } CATCH_TEST_CASE("less_greater3") { cml::vector3d v = { 1., 2., 3. }; cml::vector3d w = { 1., 2., 3. }; CATCH_REQUIRE(!(w < v)); CATCH_REQUIRE(!(v < w)); } CATCH_TEST_CASE("less_equal1") { cml::vector3d v = { 1., 2., 3. }; cml::vector3d w = { 1., 2., 3. }; CATCH_REQUIRE(v <= w); CATCH_REQUIRE(w <= v); } CATCH_TEST_CASE("less_equal2") { cml::vector3d v = { 1., 2., 3. }; cml::vector3d w = { 1., 2., 4. }; CATCH_REQUIRE(v <= w); CATCH_REQUIRE(!(w <= v)); } CATCH_TEST_CASE("greater_equal1") { cml::vector3d v = { 1., 2., 3. }; cml::vector3d w = { 1., 2., 3. }; CATCH_REQUIRE(v >= w); CATCH_REQUIRE(w >= v); } CATCH_TEST_CASE("greater_equal2") { cml::vector3d v = { 1., 2., 3. }; cml::vector3d w = { 1., 2., 4. }; CATCH_REQUIRE(w >= v); CATCH_REQUIRE(!(v >= w)); } CATCH_TEST_CASE("equal1") { cml::vector3d v = { 1., 2., 3. }; cml::vector3d w = { 1., 2., 3. }; CATCH_REQUIRE(w == v); } // ------------------------------------------------------------------------- // vim:ft=cpp:sw=2
21.283951
76
0.490719
[ "vector" ]
0f87fda07d679473c7018badb48b0b00a63775c8
14,686
cpp
C++
Engine/Source/ThirdParty/IntelEmbree/Embree270/src/kernels/xeon/bvh4/bvh4_intersector4_chunk.cpp
windystrife/UnrealEngine_NVIDIAGameWork
b50e6338a7c5b26374d66306ebc7807541ff815e
[ "MIT" ]
1
2022-01-29T18:36:12.000Z
2022-01-29T18:36:12.000Z
Engine/Source/ThirdParty/IntelEmbree/Embree270/src/kernels/xeon/bvh4/bvh4_intersector4_chunk.cpp
windystrife/UnrealEngine_NVIDIAGameWork
b50e6338a7c5b26374d66306ebc7807541ff815e
[ "MIT" ]
null
null
null
Engine/Source/ThirdParty/IntelEmbree/Embree270/src/kernels/xeon/bvh4/bvh4_intersector4_chunk.cpp
windystrife/UnrealEngine_NVIDIAGameWork
b50e6338a7c5b26374d66306ebc7807541ff815e
[ "MIT" ]
null
null
null
// ======================================================================== // // Copyright 2009-2015 Intel Corporation // // // // Licensed under the Apache License, Version 2.0 (the "License"); // // you may not use this file except in compliance with the License. // // You may obtain a copy of the License at // // // // http://www.apache.org/licenses/LICENSE-2.0 // // // // Unless required by applicable law or agreed to in writing, software // // distributed under the License is distributed on an "AS IS" BASIS, // // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // // See the License for the specific language governing permissions and // // limitations under the License. // // ======================================================================== // #include "bvh4_intersector4_chunk.h" #include "bvh4_intersector_node.h" #include "../geometry/triangle4.h" #include "../geometry/triangle4i.h" #include "../geometry/triangle4v.h" #include "../geometry/triangle4v_mb.h" #include "../geometry/triangle8.h" #include "../geometry/intersector_iterators.h" #include "../geometry/bezier1v_intersector.h" #include "../geometry/bezier1i_intersector.h" #include "../geometry/triangle_intersector_moeller.h" #include "../geometry/triangle_intersector_pluecker.h" #include "../geometry/triangle4i_intersector_pluecker.h" #include "../geometry/object_intersector4.h" namespace embree { namespace isa { template<int types, bool robust, typename PrimitiveIntersector4> void BVH4Intersector4Chunk<types,robust,PrimitiveIntersector4>::intersect(bool4* valid_i, BVH4* bvh, Ray4& ray) { /* verify correct input */ bool4 valid0 = *valid_i; #if defined(RTCORE_IGNORE_INVALID_RAYS) valid0 &= ray.valid(); #endif assert(all(valid0,ray.tnear > -FLT_MIN)); assert(!(types & BVH4::FLAG_NODE_MB) || all(valid0,ray.time >= 0.0f & ray.time <= 1.0f)); /* load ray */ const Vec3f4 rdir = rcp_safe(ray.dir); const Vec3f4 org(ray.org), org_rdir = org * rdir; float4 ray_tnear = select(valid0,ray.tnear,float4(pos_inf)); float4 ray_tfar = select(valid0,ray.tfar ,float4(neg_inf)); const float4 inf = float4(pos_inf); Precalculations pre(valid0,ray); /* allocate stack and push root node */ float4 stack_near[stackSize]; NodeRef stack_node[stackSize]; stack_node[0] = BVH4::invalidNode; stack_near[0] = inf; stack_node[1] = bvh->root; stack_near[1] = ray_tnear; NodeRef* stackEnd = stack_node+stackSize; NodeRef* __restrict__ sptr_node = stack_node + 2; float4* __restrict__ sptr_near = stack_near + 2; while (1) { /* pop next node from stack */ assert(sptr_node > stack_node); sptr_node--; sptr_near--; NodeRef cur = *sptr_node; if (unlikely(cur == BVH4::invalidNode)) { assert(sptr_node == stack_node); break; } /* cull node if behind closest hit point */ float4 curDist = *sptr_near; if (unlikely(none(ray_tfar > curDist))) continue; while (1) { /* process normal nodes */ if (likely((types & 0x1) && cur.isNode())) { const bool4 valid_node = ray_tfar > curDist; STAT3(normal.trav_nodes,1,popcnt(valid_node),8); const Node* __restrict__ const node = cur.node(); /* pop of next node */ assert(sptr_node > stack_node); sptr_node--; sptr_near--; cur = *sptr_node; curDist = *sptr_near; #pragma unroll(4) for (unsigned i=0; i<BVH4::N; i++) { const NodeRef child = node->children[i]; if (unlikely(child == BVH4::emptyNode)) break; float4 lnearP; const bool4 lhit = intersect_node<robust>(node,i,org,rdir,org_rdir,ray_tnear,ray_tfar,lnearP); /* if we hit the child we choose to continue with that child if it is closer than the current next child, or we push it onto the stack */ if (likely(any(lhit))) { assert(sptr_node < stackEnd); const float4 childDist = select(lhit,lnearP,inf); const NodeRef child = node->children[i]; assert(child != BVH4::emptyNode); sptr_node++; sptr_near++; /* push cur node onto stack and continue with hit child */ if (any(childDist < curDist)) { *(sptr_node-1) = cur; *(sptr_near-1) = curDist; curDist = childDist; cur = child; } /* push hit child onto stack */ else { *(sptr_node-1) = child; *(sptr_near-1) = childDist; } } } } /* process motion blur nodes */ else if (likely((types & 0x10) && cur.isNodeMB())) { const bool4 valid_node = ray_tfar > curDist; STAT3(normal.trav_nodes,1,popcnt(valid_node),8); const BVH4::NodeMB* __restrict__ const node = cur.nodeMB(); /* pop of next node */ assert(sptr_node > stack_node); sptr_node--; sptr_near--; cur = *sptr_node; curDist = *sptr_near; #pragma unroll(4) for (unsigned i=0; i<BVH4::N; i++) { const NodeRef child = node->child(i); if (unlikely(child == BVH4::emptyNode)) break; float4 lnearP; const bool4 lhit = intersect_node(node,i,org,rdir,org_rdir,ray_tnear,ray_tfar,ray.time,lnearP); /* if we hit the child we choose to continue with that child if it is closer than the current next child, or we push it onto the stack */ if (likely(any(lhit))) { assert(sptr_node < stackEnd); assert(child != BVH4::emptyNode); const float4 childDist = select(lhit,lnearP,inf); sptr_node++; sptr_near++; /* push cur node onto stack and continue with hit child */ if (any(childDist < curDist)) { *(sptr_node-1) = cur; *(sptr_near-1) = curDist; curDist = childDist; cur = child; } /* push hit child onto stack */ else { *(sptr_node-1) = child; *(sptr_near-1) = childDist; } } } } else break; } /* return if stack is empty */ if (unlikely(cur == BVH4::invalidNode)) { assert(sptr_node == stack_node); break; } /* intersect leaf */ assert(cur != BVH4::emptyNode); const bool4 valid_leaf = ray_tfar > curDist; STAT3(normal.trav_leaves,1,popcnt(valid_leaf),4); size_t items; const Primitive* prim = (Primitive*) cur.leaf(items); size_t lazy_node = 0; PrimitiveIntersector4::intersect(valid_leaf,pre,ray,prim,items,bvh->scene,lazy_node); ray_tfar = select(valid_leaf,ray.tfar,ray_tfar); if (unlikely(lazy_node)) { *sptr_node = lazy_node; sptr_node++; *sptr_near = neg_inf; sptr_near++; } } AVX_ZERO_UPPER(); } template<int types, bool robust, typename PrimitiveIntersector4> void BVH4Intersector4Chunk<types,robust,PrimitiveIntersector4>::occluded(bool4* valid_i, BVH4* bvh, Ray4& ray) { /* verify correct input */ bool4 valid = *valid_i; #if defined(RTCORE_IGNORE_INVALID_RAYS) valid &= ray.valid(); #endif assert(all(valid,ray.tnear > -FLT_MIN)); assert(!(types & BVH4::FLAG_NODE_MB) || all(valid,ray.time >= 0.0f & ray.time <= 1.0f)); /* load ray */ bool4 terminated = !valid; const Vec3f4 rdir = rcp_safe(ray.dir); const Vec3f4 org(ray.org), org_rdir = org * rdir; float4 ray_tnear = select(valid,ray.tnear,float4(pos_inf)); float4 ray_tfar = select(valid,ray.tfar ,float4(neg_inf)); const float4 inf = float4(pos_inf); Precalculations pre(valid,ray); /* allocate stack and push root node */ float4 stack_near[stackSize]; NodeRef stack_node[stackSize]; stack_node[0] = BVH4::invalidNode; stack_near[0] = inf; stack_node[1] = bvh->root; stack_near[1] = ray_tnear; NodeRef* stackEnd = stack_node+stackSize; NodeRef* __restrict__ sptr_node = stack_node + 2; float4* __restrict__ sptr_near = stack_near + 2; while (1) { /* pop next node from stack */ assert(sptr_node > stack_node); sptr_node--; sptr_near--; NodeRef cur = *sptr_node; if (unlikely(cur == BVH4::invalidNode)) { assert(sptr_node == stack_node); break; } /* cull node if behind closest hit point */ float4 curDist = *sptr_near; if (unlikely(none(ray_tfar > curDist))) continue; while (1) { /* process normal nodes */ if (likely((types & 0x1) && cur.isNode())) { const bool4 valid_node = ray_tfar > curDist; STAT3(normal.trav_nodes,1,popcnt(valid_node),8); const Node* __restrict__ const node = cur.node(); /* pop of next node */ assert(sptr_node > stack_node); sptr_node--; sptr_near--; cur = *sptr_node; curDist = *sptr_near; #pragma unroll(4) for (unsigned i=0; i<BVH4::N; i++) { const NodeRef child = node->children[i]; if (unlikely(child == BVH4::emptyNode)) break; float4 lnearP; const bool4 lhit = intersect_node<robust>(node,i,org,rdir,org_rdir,ray_tnear,ray_tfar,lnearP); /* if we hit the child we choose to continue with that child if it is closer than the current next child, or we push it onto the stack */ if (likely(any(lhit))) { assert(sptr_node < stackEnd); const float4 childDist = select(lhit,lnearP,inf); const NodeRef child = node->children[i]; assert(child != BVH4::emptyNode); sptr_node++; sptr_near++; /* push cur node onto stack and continue with hit child */ if (any(childDist < curDist)) { *(sptr_node-1) = cur; *(sptr_near-1) = curDist; curDist = childDist; cur = child; } /* push hit child onto stack */ else { *(sptr_node-1) = child; *(sptr_near-1) = childDist; } } } } /* process motion blur nodes */ else if (likely((types & 0x10) && cur.isNodeMB())) { const bool4 valid_node = ray_tfar > curDist; STAT3(normal.trav_nodes,1,popcnt(valid_node),8); const BVH4::NodeMB* __restrict__ const node = cur.nodeMB(); /* pop of next node */ assert(sptr_node > stack_node); sptr_node--; sptr_near--; cur = *sptr_node; curDist = *sptr_near; #pragma unroll(4) for (unsigned i=0; i<BVH4::N; i++) { const NodeRef child = node->child(i); if (unlikely(child == BVH4::emptyNode)) break; float4 lnearP; const bool4 lhit = intersect_node(node,i,org,rdir,org_rdir,ray_tnear,ray_tfar,ray.time,lnearP); /* if we hit the child we choose to continue with that child if it is closer than the current next child, or we push it onto the stack */ if (likely(any(lhit))) { assert(sptr_node < stackEnd); assert(child != BVH4::emptyNode); const float4 childDist = select(lhit,lnearP,inf); sptr_node++; sptr_near++; /* push cur node onto stack and continue with hit child */ if (any(childDist < curDist)) { *(sptr_node-1) = cur; *(sptr_near-1) = curDist; curDist = childDist; cur = child; } /* push hit child onto stack */ else { *(sptr_node-1) = child; *(sptr_near-1) = childDist; } } } } else break; } /* return if stack is empty */ if (unlikely(cur == BVH4::invalidNode)) { assert(sptr_node == stack_node); break; } /* intersect leaf */ assert(cur != BVH4::emptyNode); const bool4 valid_leaf = ray_tfar > curDist; STAT3(shadow.trav_leaves,1,popcnt(valid_leaf),4); size_t items; const Primitive* prim = (Primitive*) cur.leaf(items); size_t lazy_node = 0; terminated |= PrimitiveIntersector4::occluded(!terminated,pre,ray,prim,items,bvh->scene,lazy_node); if (all(terminated)) break; ray_tfar = select(terminated,float4(neg_inf),ray_tfar); if (unlikely(lazy_node)) { *sptr_node = lazy_node; sptr_node++; *sptr_near = neg_inf; sptr_near++; } } store4i(valid & terminated,&ray.geomID,0); AVX_ZERO_UPPER(); } DEFINE_INTERSECTOR4(BVH4Bezier1vIntersector4Chunk, BVH4Intersector4Chunk<0x1 COMMA false COMMA ArrayIntersector4<Bezier1vIntersectorN<Ray4> > >); DEFINE_INTERSECTOR4(BVH4Bezier1iIntersector4Chunk, BVH4Intersector4Chunk<0x1 COMMA false COMMA ArrayIntersector4<Bezier1iIntersectorN<Ray4> > >); DEFINE_INTERSECTOR4(BVH4Triangle4Intersector4ChunkMoeller, BVH4Intersector4Chunk<0x1 COMMA false COMMA ArrayIntersector4<TriangleNIntersectorMMoellerTrumbore<Ray4 COMMA Triangle4 COMMA true> > >); DEFINE_INTERSECTOR4(BVH4Triangle4Intersector4ChunkMoellerNoFilter, BVH4Intersector4Chunk<0x1 COMMA false COMMA ArrayIntersector4<TriangleNIntersectorMMoellerTrumbore<Ray4 COMMA Triangle4 COMMA false> > >); #if defined (__AVX__) DEFINE_INTERSECTOR4(BVH4Triangle8Intersector4ChunkMoeller, BVH4Intersector4Chunk<0x1 COMMA false COMMA ArrayIntersector4<TriangleNIntersectorMMoellerTrumbore<Ray4 COMMA Triangle8 COMMA true> > >); DEFINE_INTERSECTOR4(BVH4Triangle8Intersector4ChunkMoellerNoFilter, BVH4Intersector4Chunk<0x1 COMMA false COMMA ArrayIntersector4<TriangleNIntersectorMMoellerTrumbore<Ray4 COMMA Triangle8 COMMA false> > >); #endif DEFINE_INTERSECTOR4(BVH4Triangle4vIntersector4ChunkPluecker, BVH4Intersector4Chunk<0x1 COMMA true COMMA ArrayIntersector4<TriangleNvIntersectorMPluecker<Ray4 COMMA Triangle4v COMMA true> > >); DEFINE_INTERSECTOR4(BVH4Triangle4iIntersector4ChunkPluecker, BVH4Intersector4Chunk<0x1 COMMA true COMMA ArrayIntersector4<Triangle4iIntersectorMPluecker<Ray4 COMMA true> > >); DEFINE_INTERSECTOR4(BVH4VirtualIntersector4Chunk, BVH4Intersector4Chunk<0x1 COMMA false COMMA ArrayIntersector4<ObjectIntersector4> >); DEFINE_INTERSECTOR4(BVH4Triangle4vMBIntersector4ChunkMoeller, BVH4Intersector4Chunk<0x10 COMMA false COMMA ArrayIntersector4<TriangleNMblurIntersectorMMoellerTrumbore<Ray4 COMMA Triangle4vMB COMMA true> > >); } }
36.441687
212
0.616914
[ "geometry" ]
0f8827b223f8733467c41ae84d3e2cf37677cc5f
7,248
cpp
C++
cfw/src/v20190904/model/DescribeAcListsRequest.cpp
suluner/tencentcloud-sdk-cpp
a56c73cc3f488c4d1e10755704107bb15c5e000d
[ "Apache-2.0" ]
43
2019-08-14T08:14:12.000Z
2022-03-30T12:35:09.000Z
cfw/src/v20190904/model/DescribeAcListsRequest.cpp
suluner/tencentcloud-sdk-cpp
a56c73cc3f488c4d1e10755704107bb15c5e000d
[ "Apache-2.0" ]
12
2019-07-15T10:44:59.000Z
2021-11-02T12:35:00.000Z
cfw/src/v20190904/model/DescribeAcListsRequest.cpp
suluner/tencentcloud-sdk-cpp
a56c73cc3f488c4d1e10755704107bb15c5e000d
[ "Apache-2.0" ]
28
2019-07-12T09:06:22.000Z
2022-03-30T08:04:18.000Z
/* * Copyright (c) 2017-2019 THL A29 Limited, a Tencent company. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <tencentcloud/cfw/v20190904/model/DescribeAcListsRequest.h> #include <tencentcloud/core/utils/rapidjson/document.h> #include <tencentcloud/core/utils/rapidjson/writer.h> #include <tencentcloud/core/utils/rapidjson/stringbuffer.h> using namespace TencentCloud::Cfw::V20190904::Model; using namespace std; DescribeAcListsRequest::DescribeAcListsRequest() : m_protocolHasBeenSet(false), m_strategyHasBeenSet(false), m_searchValueHasBeenSet(false), m_limitHasBeenSet(false), m_offsetHasBeenSet(false), m_directionHasBeenSet(false), m_edgeIdHasBeenSet(false), m_statusHasBeenSet(false), m_areaHasBeenSet(false), m_instanceIdHasBeenSet(false) { } string DescribeAcListsRequest::ToJsonString() const { rapidjson::Document d; d.SetObject(); rapidjson::Document::AllocatorType& allocator = d.GetAllocator(); if (m_protocolHasBeenSet) { rapidjson::Value iKey(rapidjson::kStringType); string key = "Protocol"; iKey.SetString(key.c_str(), allocator); d.AddMember(iKey, rapidjson::Value(m_protocol.c_str(), allocator).Move(), allocator); } if (m_strategyHasBeenSet) { rapidjson::Value iKey(rapidjson::kStringType); string key = "Strategy"; iKey.SetString(key.c_str(), allocator); d.AddMember(iKey, rapidjson::Value(m_strategy.c_str(), allocator).Move(), allocator); } if (m_searchValueHasBeenSet) { rapidjson::Value iKey(rapidjson::kStringType); string key = "SearchValue"; iKey.SetString(key.c_str(), allocator); d.AddMember(iKey, rapidjson::Value(m_searchValue.c_str(), allocator).Move(), allocator); } if (m_limitHasBeenSet) { rapidjson::Value iKey(rapidjson::kStringType); string key = "Limit"; iKey.SetString(key.c_str(), allocator); d.AddMember(iKey, m_limit, allocator); } if (m_offsetHasBeenSet) { rapidjson::Value iKey(rapidjson::kStringType); string key = "Offset"; iKey.SetString(key.c_str(), allocator); d.AddMember(iKey, m_offset, allocator); } if (m_directionHasBeenSet) { rapidjson::Value iKey(rapidjson::kStringType); string key = "Direction"; iKey.SetString(key.c_str(), allocator); d.AddMember(iKey, m_direction, allocator); } if (m_edgeIdHasBeenSet) { rapidjson::Value iKey(rapidjson::kStringType); string key = "EdgeId"; iKey.SetString(key.c_str(), allocator); d.AddMember(iKey, rapidjson::Value(m_edgeId.c_str(), allocator).Move(), allocator); } if (m_statusHasBeenSet) { rapidjson::Value iKey(rapidjson::kStringType); string key = "Status"; iKey.SetString(key.c_str(), allocator); d.AddMember(iKey, rapidjson::Value(m_status.c_str(), allocator).Move(), allocator); } if (m_areaHasBeenSet) { rapidjson::Value iKey(rapidjson::kStringType); string key = "Area"; iKey.SetString(key.c_str(), allocator); d.AddMember(iKey, rapidjson::Value(m_area.c_str(), allocator).Move(), allocator); } if (m_instanceIdHasBeenSet) { rapidjson::Value iKey(rapidjson::kStringType); string key = "InstanceId"; iKey.SetString(key.c_str(), allocator); d.AddMember(iKey, rapidjson::Value(m_instanceId.c_str(), allocator).Move(), allocator); } rapidjson::StringBuffer buffer; rapidjson::Writer<rapidjson::StringBuffer> writer(buffer); d.Accept(writer); return buffer.GetString(); } string DescribeAcListsRequest::GetProtocol() const { return m_protocol; } void DescribeAcListsRequest::SetProtocol(const string& _protocol) { m_protocol = _protocol; m_protocolHasBeenSet = true; } bool DescribeAcListsRequest::ProtocolHasBeenSet() const { return m_protocolHasBeenSet; } string DescribeAcListsRequest::GetStrategy() const { return m_strategy; } void DescribeAcListsRequest::SetStrategy(const string& _strategy) { m_strategy = _strategy; m_strategyHasBeenSet = true; } bool DescribeAcListsRequest::StrategyHasBeenSet() const { return m_strategyHasBeenSet; } string DescribeAcListsRequest::GetSearchValue() const { return m_searchValue; } void DescribeAcListsRequest::SetSearchValue(const string& _searchValue) { m_searchValue = _searchValue; m_searchValueHasBeenSet = true; } bool DescribeAcListsRequest::SearchValueHasBeenSet() const { return m_searchValueHasBeenSet; } uint64_t DescribeAcListsRequest::GetLimit() const { return m_limit; } void DescribeAcListsRequest::SetLimit(const uint64_t& _limit) { m_limit = _limit; m_limitHasBeenSet = true; } bool DescribeAcListsRequest::LimitHasBeenSet() const { return m_limitHasBeenSet; } uint64_t DescribeAcListsRequest::GetOffset() const { return m_offset; } void DescribeAcListsRequest::SetOffset(const uint64_t& _offset) { m_offset = _offset; m_offsetHasBeenSet = true; } bool DescribeAcListsRequest::OffsetHasBeenSet() const { return m_offsetHasBeenSet; } uint64_t DescribeAcListsRequest::GetDirection() const { return m_direction; } void DescribeAcListsRequest::SetDirection(const uint64_t& _direction) { m_direction = _direction; m_directionHasBeenSet = true; } bool DescribeAcListsRequest::DirectionHasBeenSet() const { return m_directionHasBeenSet; } string DescribeAcListsRequest::GetEdgeId() const { return m_edgeId; } void DescribeAcListsRequest::SetEdgeId(const string& _edgeId) { m_edgeId = _edgeId; m_edgeIdHasBeenSet = true; } bool DescribeAcListsRequest::EdgeIdHasBeenSet() const { return m_edgeIdHasBeenSet; } string DescribeAcListsRequest::GetStatus() const { return m_status; } void DescribeAcListsRequest::SetStatus(const string& _status) { m_status = _status; m_statusHasBeenSet = true; } bool DescribeAcListsRequest::StatusHasBeenSet() const { return m_statusHasBeenSet; } string DescribeAcListsRequest::GetArea() const { return m_area; } void DescribeAcListsRequest::SetArea(const string& _area) { m_area = _area; m_areaHasBeenSet = true; } bool DescribeAcListsRequest::AreaHasBeenSet() const { return m_areaHasBeenSet; } string DescribeAcListsRequest::GetInstanceId() const { return m_instanceId; } void DescribeAcListsRequest::SetInstanceId(const string& _instanceId) { m_instanceId = _instanceId; m_instanceIdHasBeenSet = true; } bool DescribeAcListsRequest::InstanceIdHasBeenSet() const { return m_instanceIdHasBeenSet; }
24.569492
96
0.712058
[ "model" ]
0f8a2668103d12980b5d026e3c8b71963c1cf041
7,158
cpp
C++
Workspace/QuickTesting/src/TestApp.cpp
alapontgr/Engine
6be363d79b9fdaee813b196db12ae296a6ca8089
[ "MIT" ]
null
null
null
Workspace/QuickTesting/src/TestApp.cpp
alapontgr/Engine
6be363d79b9fdaee813b196db12ae296a6ca8089
[ "MIT" ]
null
null
null
Workspace/QuickTesting/src/TestApp.cpp
alapontgr/Engine
6be363d79b9fdaee813b196db12ae296a6ca8089
[ "MIT" ]
null
null
null
//////////////////////////////////////////////////////////////////////////////// // // Author: Sergio Alapont Granero (seralgrainf@gmail.com) // Date: 2019/03/30 // File: TestApp.cpp // // Copyright (c) 2018 (See README.md) // //////////////////////////////////////////////////////////////////////////////// // Includes #include "./src/TestApp.h" #include "Common/GfInput/GfInput.h" #include "Common/GfShaderCompiler/GfShaderCompiler.h" #include "stb/stb_image.h" //////////////////////////////////////////////////////////////////////////////// struct GfPerFrameCB { m4 m_mView; m4 m_mProjection; m4 m_mViewProjection; } g_kPerFrameCBData; //////////////////////////////////////////////////////////////////////////////// static GfUniquePtr<char[]> LoadFileSrc(const char* szFilepath, u32& uiOutFileSize) { GfFileHandle kFile; GfFile::OpenFile(GfPaths::getAssetPath(szFilepath).c_str(), EFileAccessMode::Read, kFile); GfFile::GetFileSize(kFile); uiOutFileSize = static_cast<u32>(kFile.GetFileSize()); if (uiOutFileSize > 0) { GfUniquePtr<char[]> pSrc(new char[uiOutFileSize+1]); pSrc[uiOutFileSize] = 0; u32 uiRead = GfFile::ReadBytes(kFile, uiOutFileSize, pSrc.get()); GfFile::CloseFile(kFile); GF_ASSERT(uiRead == uiOutFileSize, "Invalid size read"); return pSrc; } return nullptr; } //////////////////////////////////////////////////////////////////////////////// GfUniquePtr<char[]> LoadTexture(const char* szTexturePath, s32& siWidth, s32& siHeight, s32& siComponentCount) { // Force 4 components u32 uiReqComponents(4); unsigned char *pData = stbi_load(szTexturePath, &siWidth, &siHeight, &siComponentCount, uiReqComponents); size_t uiTextureSize((size_t)siWidth * siHeight * uiReqComponents); GfUniquePtr<char[]> pSrc(new char[uiTextureSize]); memcpy(pSrc.get(), pData, uiTextureSize); stbi_image_free(pData); return pSrc; } //////////////////////////////////////////////////////////////////////////////// s32 TestApp::Run(const GfEntryArgs& kEntryParams) { init(kEntryParams); while (m_window.beginFrame(m_context)) { Update(); u32 uiCurrFrameIdx(m_window.getCurrentFrameIdx()); GfCmdBuffer* cmdBuffer = GfCmdBuffer::get(&m_context, GfCmdBufferType::Primary, GfRenderContextFamilies::Graphics); Render(*cmdBuffer); // Final submit const GfSemaphore& imageReady = m_window.getImageReadySemaphore(); const GfSemaphore& signalSemaphore = m_window.getFinishedRenderingSemaphore(); cmdBuffer->submit(&imageReady, &signalSemaphore); m_context.tick(); m_window.endFrame(m_context); // Remove pending resources to delete GfBufferFactory::removePending(m_context); } return 0; } //////////////////////////////////////////////////////////////////////////////// void TestApp::init(const GfEntryArgs& kEntryParams) { GfCommandLine::init(kEntryParams.m_szCmdLine); GfPaths::init(); // Create the window GfWindowInitParams kWindowInit; kWindowInit.m_width = 512; kWindowInit.m_height = 512; kWindowInit.m_bVSync = false; kWindowInit.m_bFullScreen = false; kWindowInit.m_szWindowName = "TestGriffinApp"; m_window.init(kWindowInit, m_context); GfInput::init(); //////////////////////////////////////////////////////////////////////////////// GfString shaderCache = GfCommandLine::getArg(GfHash::compute("-sc"), "."); GfString testShader = GfCommandLine::getArg(GfHash::compute("-f")); if (shaderCache.size() == 0) { GF_ASSERT_ALWAYS("Need to provide path for shader cache"); } if (testShader.size() == 0) { GF_ASSERT_ALWAYS("Need to provide path for shader to test"); } u32 srcSize = 0; GfUniquePtr<char[]> src = LoadFileSrc(testShader.c_str(), srcSize); GfString shaderSrc(src.get()); std::replace( shaderSrc.begin(), shaderSrc.end(), '\\', '/'); if (shaderSrc.find("\r\n") != GfString::npos) { shaderSrc.erase(std::remove(shaderSrc.begin(), shaderSrc.end(), '\r'), shaderSrc.end()); } GfString errors; m_shaderCache.init(shaderCache); GfShaderCompiler compiler; compiler.init("", ""); compiler.compileShader(testShader.c_str(), shaderSrc, m_shaderCache, errors); if (errors.size() > 0) { GF_ERROR("%s", errors.c_str()); } u32 shaderBlobSize(0); m_shader.setShaderBlob(m_shaderCache.getShaderBlob(testShader)); //////////////////////////////////////////////////////////////////////////////// m_kCamera.UpdatePerspective(GF_DEG_TO_RAD(45.0f), (f32)m_window.getWidth() / (f32)m_window.getHeight(), 0.1f, 1000.0f); m_kCamera.UpdateViewWithTarget(v3(0.0f, 5.0f, 10.0f), v3(0.0f), v3(0.0f, 1.0f, 0.0f)); } //////////////////////////////////////////////////////////////////////////////// void TestApp::Update() { static f64 s_dLastFrameTime(GfTime::GetTimeInSeconds()); f64 dCurrTime = GfTime::GetTimeInSeconds(); f32 fElapsedTime = (f32)(dCurrTime - s_dLastFrameTime); s_dLastFrameTime = dCurrTime; GfInput::Update(); static f32 s_fDeg(0.0f); s_fDeg += fElapsedTime * 45.0f; if (s_fDeg > 360.0f) { s_fDeg -= 360.0f; } f32 fSin(sinf(GF_DEG_TO_RAD(s_fDeg))); f32 fCos(cosf(GF_DEG_TO_RAD(s_fDeg))); fCos = fCos * 0.5f + 0.5f; m_kCamera.UpdateViewWithTarget(v3(fSin * 3.0f, 0.0f, fCos * 5.0f + 2.0f), v3(0.0f), v3(0.0f, 1.0f, 0.0f)); } //////////////////////////////////////////////////////////////////////////////// void TestApp::Render(GfCmdBuffer& cmdBuffer) { f32 width = static_cast<f32>(m_window.getWidth()); f32 height = static_cast<f32>(m_window.getHeight()); GfTextureView backBufferView(m_window.getCurrBackBuffer()); static constexpr u32 s_AttachmentCount = 1; AttachmentDesc attachments[] = { {GfTextureView(m_window.getCurrBackBuffer()), LoadOp::Clear, StoreOp::Store} }; m_renderPass.setAttachments(attachments, s_AttachmentCount, nullptr); m_renderPass.setViewport(width, height, 0.0f, 0.0f); m_renderPass.setScissor(m_window.getWidth(), m_window.getHeight(), 0, 0); m_renderPass.setRenderArea(m_window.getWidth(), m_window.getHeight(), 0, 0); u32 curFrameIdx(m_context.getCurFrame()); m_renderPass.setClearColor(v4( static_cast<f32>(curFrameIdx & 0xff) / 255.0f, static_cast<f32>((curFrameIdx>>8) & 0xff) / 255.0f, static_cast<f32>((curFrameIdx>>16) & 0xff) / 255.0f, 0.0f)); // Recording cmdBuffer.beginRecording(); // Transit swapchain to Color attachment if (m_context.getCurFrame() == 0) { cmdBuffer.addTextureBarrier(attachments[0].m_attachment, TextureUsageFlags::Undefined, TextureUsageFlags::ColorAttachment); } else { cmdBuffer.addTextureBarrier(attachments[0].m_attachment, TextureUsageFlags::Present, TextureUsageFlags::ColorAttachment); } cmdBuffer.beginRenderPass(&m_renderPass); // Render GfShaderVariant variant(&m_shader); cmdBuffer.bindShaderPipe(variant); cmdBuffer.draw(3, 1); cmdBuffer.endRenderPass(); // Transit to present cmdBuffer.addTextureBarrier(attachments[0].m_attachment, TextureUsageFlags::ColorAttachment, TextureUsageFlags::Present); cmdBuffer.endRecording(); } //////////////////////////////////////////////////////////////////////////////// void TestApp::Shutdown() { GfInput::Shutdown(); } //////////////////////////////////////////////////////////////////////////////// // EOF
28.979757
125
0.624197
[ "render" ]
0f9235c08423dbf912d0c69712cf9203e424fe95
129,875
hpp
C++
src/Bindings/Encoder.Converter.hpp
ZehMatt/DotX64Dbg
642cd5f2ed8b806e287ac1f463966e43c81c41ff
[ "MIT" ]
21
2021-06-10T15:08:05.000Z
2021-12-01T06:25:21.000Z
src/Bindings/Encoder.Converter.hpp
ZehMatt/DotX64Dbg
642cd5f2ed8b806e287ac1f463966e43c81c41ff
[ "MIT" ]
3
2021-06-10T15:51:56.000Z
2021-06-24T17:12:50.000Z
src/Bindings/Encoder.Converter.hpp
ZehMatt/DotX64Dbg
642cd5f2ed8b806e287ac1f463966e43c81c41ff
[ "MIT" ]
2
2021-06-30T00:46:41.000Z
2021-09-12T04:25:40.000Z
#pragma once #include <asmjit/asmjit.h> #include "Instruction.hpp" namespace Dotx64Dbg { inline uint32_t convertMnemonic(Mnemonic mnemonic) { switch (mnemonic) { case Mnemonic::Movsd: return asmjit::x86::Inst::kIdMovsd; case Mnemonic::Aaa: return asmjit::x86::Inst::kIdAaa; case Mnemonic::Aad: return asmjit::x86::Inst::kIdAad; case Mnemonic::Aam: return asmjit::x86::Inst::kIdAam; case Mnemonic::Aas: return asmjit::x86::Inst::kIdAas; case Mnemonic::Adc: return asmjit::x86::Inst::kIdAdc; case Mnemonic::Adcx: return asmjit::x86::Inst::kIdAdcx; case Mnemonic::Add: return asmjit::x86::Inst::kIdAdd; case Mnemonic::Addpd: return asmjit::x86::Inst::kIdAddpd; case Mnemonic::Addps: return asmjit::x86::Inst::kIdAddps; case Mnemonic::Addsd: return asmjit::x86::Inst::kIdAddsd; case Mnemonic::Addss: return asmjit::x86::Inst::kIdAddss; case Mnemonic::Addsubpd: return asmjit::x86::Inst::kIdAddsubpd; case Mnemonic::Addsubps: return asmjit::x86::Inst::kIdAddsubps; case Mnemonic::Adox: return asmjit::x86::Inst::kIdAdox; case Mnemonic::Aesdec: return asmjit::x86::Inst::kIdAesdec; case Mnemonic::Aesdeclast: return asmjit::x86::Inst::kIdAesdeclast; case Mnemonic::Aesenc: return asmjit::x86::Inst::kIdAesenc; case Mnemonic::Aesenclast: return asmjit::x86::Inst::kIdAesenclast; case Mnemonic::Aesimc: return asmjit::x86::Inst::kIdAesimc; case Mnemonic::Aeskeygenassist: return asmjit::x86::Inst::kIdAeskeygenassist; case Mnemonic::And: return asmjit::x86::Inst::kIdAnd; case Mnemonic::Andn: return asmjit::x86::Inst::kIdAndn; case Mnemonic::Andnpd: return asmjit::x86::Inst::kIdAndnpd; case Mnemonic::Andnps: return asmjit::x86::Inst::kIdAndnps; case Mnemonic::Andpd: return asmjit::x86::Inst::kIdAndpd; case Mnemonic::Andps: return asmjit::x86::Inst::kIdAndps; case Mnemonic::Arpl: return asmjit::x86::Inst::kIdArpl; case Mnemonic::Bextr: return asmjit::x86::Inst::kIdBextr; case Mnemonic::Blcfill: return asmjit::x86::Inst::kIdBlcfill; case Mnemonic::Blci: return asmjit::x86::Inst::kIdBlci; case Mnemonic::Blcic: return asmjit::x86::Inst::kIdBlcic; case Mnemonic::Blcmsk: return asmjit::x86::Inst::kIdBlcmsk; case Mnemonic::Blcs: return asmjit::x86::Inst::kIdBlcs; case Mnemonic::Blendpd: return asmjit::x86::Inst::kIdBlendpd; case Mnemonic::Blendps: return asmjit::x86::Inst::kIdBlendps; case Mnemonic::Blendvpd: return asmjit::x86::Inst::kIdBlendvpd; case Mnemonic::Blendvps: return asmjit::x86::Inst::kIdBlendvps; case Mnemonic::Blsfill: return asmjit::x86::Inst::kIdBlsfill; case Mnemonic::Blsi: return asmjit::x86::Inst::kIdBlsi; case Mnemonic::Blsic: return asmjit::x86::Inst::kIdBlsic; case Mnemonic::Blsmsk: return asmjit::x86::Inst::kIdBlsmsk; case Mnemonic::Blsr: return asmjit::x86::Inst::kIdBlsr; case Mnemonic::Bndcl: return asmjit::x86::Inst::kIdBndcl; case Mnemonic::Bndcn: return asmjit::x86::Inst::kIdBndcn; case Mnemonic::Bndcu: return asmjit::x86::Inst::kIdBndcu; case Mnemonic::Bndldx: return asmjit::x86::Inst::kIdBndldx; case Mnemonic::Bndmk: return asmjit::x86::Inst::kIdBndmk; case Mnemonic::Bndmov: return asmjit::x86::Inst::kIdBndmov; case Mnemonic::Bndstx: return asmjit::x86::Inst::kIdBndstx; case Mnemonic::Bound: return asmjit::x86::Inst::kIdBound; case Mnemonic::Bsf: return asmjit::x86::Inst::kIdBsf; case Mnemonic::Bsr: return asmjit::x86::Inst::kIdBsr; case Mnemonic::Bswap: return asmjit::x86::Inst::kIdBswap; case Mnemonic::Bt: return asmjit::x86::Inst::kIdBt; case Mnemonic::Btc: return asmjit::x86::Inst::kIdBtc; case Mnemonic::Btr: return asmjit::x86::Inst::kIdBtr; case Mnemonic::Bts: return asmjit::x86::Inst::kIdBts; case Mnemonic::Bzhi: return asmjit::x86::Inst::kIdBzhi; case Mnemonic::Call: return asmjit::x86::Inst::kIdCall; case Mnemonic::Cbw: return asmjit::x86::Inst::kIdCbw; case Mnemonic::Cdq: return asmjit::x86::Inst::kIdCdq; case Mnemonic::Cdqe: return asmjit::x86::Inst::kIdCdqe; case Mnemonic::Clac: return asmjit::x86::Inst::kIdClac; case Mnemonic::Clc: return asmjit::x86::Inst::kIdClc; case Mnemonic::Cld: return asmjit::x86::Inst::kIdCld; case Mnemonic::Clflush: return asmjit::x86::Inst::kIdClflush; case Mnemonic::Clflushopt: return asmjit::x86::Inst::kIdClflushopt; case Mnemonic::Cli: return asmjit::x86::Inst::kIdCli; case Mnemonic::Clts: return asmjit::x86::Inst::kIdClts; case Mnemonic::Clwb: return asmjit::x86::Inst::kIdClwb; case Mnemonic::Clzero: return asmjit::x86::Inst::kIdClzero; case Mnemonic::Cmc: return asmjit::x86::Inst::kIdCmc; case Mnemonic::Cmovb: return asmjit::x86::Inst::kIdCmovb; case Mnemonic::Cmovbe: return asmjit::x86::Inst::kIdCmovbe; case Mnemonic::Cmovl: return asmjit::x86::Inst::kIdCmovl; case Mnemonic::Cmovle: return asmjit::x86::Inst::kIdCmovle; case Mnemonic::Cmovnb: return asmjit::x86::Inst::kIdCmovnb; case Mnemonic::Cmovnbe: return asmjit::x86::Inst::kIdCmovnbe; case Mnemonic::Cmovnl: return asmjit::x86::Inst::kIdCmovnl; case Mnemonic::Cmovnle: return asmjit::x86::Inst::kIdCmovnle; case Mnemonic::Cmovno: return asmjit::x86::Inst::kIdCmovno; case Mnemonic::Cmovnp: return asmjit::x86::Inst::kIdCmovnp; case Mnemonic::Cmovns: return asmjit::x86::Inst::kIdCmovns; case Mnemonic::Cmovnz: return asmjit::x86::Inst::kIdCmovnz; case Mnemonic::Cmovo: return asmjit::x86::Inst::kIdCmovo; case Mnemonic::Cmovp: return asmjit::x86::Inst::kIdCmovp; case Mnemonic::Cmovs: return asmjit::x86::Inst::kIdCmovs; case Mnemonic::Cmovz: return asmjit::x86::Inst::kIdCmovz; case Mnemonic::Cmp: return asmjit::x86::Inst::kIdCmp; case Mnemonic::Cmppd: return asmjit::x86::Inst::kIdCmppd; case Mnemonic::Cmpps: return asmjit::x86::Inst::kIdCmpps; case Mnemonic::Cmpsd: return asmjit::x86::Inst::kIdCmpsd; case Mnemonic::Cmpss: return asmjit::x86::Inst::kIdCmpss; case Mnemonic::Cmpxchg: return asmjit::x86::Inst::kIdCmpxchg; case Mnemonic::Cmpxchg16b: return asmjit::x86::Inst::kIdCmpxchg16b; case Mnemonic::Cmpxchg8b: return asmjit::x86::Inst::kIdCmpxchg8b; case Mnemonic::Comisd: return asmjit::x86::Inst::kIdComisd; case Mnemonic::Comiss: return asmjit::x86::Inst::kIdComiss; case Mnemonic::Cpuid: return asmjit::x86::Inst::kIdCpuid; case Mnemonic::Cqo: return asmjit::x86::Inst::kIdCqo; case Mnemonic::Crc32: return asmjit::x86::Inst::kIdCrc32; case Mnemonic::Cvtdq2pd: return asmjit::x86::Inst::kIdCvtdq2pd; case Mnemonic::Cvtdq2ps: return asmjit::x86::Inst::kIdCvtdq2ps; case Mnemonic::Cvtpd2dq: return asmjit::x86::Inst::kIdCvtpd2dq; case Mnemonic::Cvtpd2pi: return asmjit::x86::Inst::kIdCvtpd2pi; case Mnemonic::Cvtpd2ps: return asmjit::x86::Inst::kIdCvtpd2ps; case Mnemonic::Cvtpi2pd: return asmjit::x86::Inst::kIdCvtpi2pd; case Mnemonic::Cvtpi2ps: return asmjit::x86::Inst::kIdCvtpi2ps; case Mnemonic::Cvtps2dq: return asmjit::x86::Inst::kIdCvtps2dq; case Mnemonic::Cvtps2pd: return asmjit::x86::Inst::kIdCvtps2pd; case Mnemonic::Cvtps2pi: return asmjit::x86::Inst::kIdCvtps2pi; case Mnemonic::Cvtsd2si: return asmjit::x86::Inst::kIdCvtsd2si; case Mnemonic::Cvtsd2ss: return asmjit::x86::Inst::kIdCvtsd2ss; case Mnemonic::Cvtsi2sd: return asmjit::x86::Inst::kIdCvtsi2sd; case Mnemonic::Cvtsi2ss: return asmjit::x86::Inst::kIdCvtsi2ss; case Mnemonic::Cvtss2sd: return asmjit::x86::Inst::kIdCvtss2sd; case Mnemonic::Cvtss2si: return asmjit::x86::Inst::kIdCvtss2si; case Mnemonic::Cvttpd2dq: return asmjit::x86::Inst::kIdCvttpd2dq; case Mnemonic::Cvttpd2pi: return asmjit::x86::Inst::kIdCvttpd2pi; case Mnemonic::Cvttps2dq: return asmjit::x86::Inst::kIdCvttps2dq; case Mnemonic::Cvttps2pi: return asmjit::x86::Inst::kIdCvttps2pi; case Mnemonic::Cvttsd2si: return asmjit::x86::Inst::kIdCvttsd2si; case Mnemonic::Cvttss2si: return asmjit::x86::Inst::kIdCvttss2si; case Mnemonic::Cwd: return asmjit::x86::Inst::kIdCwd; case Mnemonic::Cwde: return asmjit::x86::Inst::kIdCwde; case Mnemonic::Daa: return asmjit::x86::Inst::kIdDaa; case Mnemonic::Das: return asmjit::x86::Inst::kIdDas; case Mnemonic::Dec: return asmjit::x86::Inst::kIdDec; case Mnemonic::Div: return asmjit::x86::Inst::kIdDiv; case Mnemonic::Divpd: return asmjit::x86::Inst::kIdDivpd; case Mnemonic::Divps: return asmjit::x86::Inst::kIdDivps; case Mnemonic::Divsd: return asmjit::x86::Inst::kIdDivsd; case Mnemonic::Divss: return asmjit::x86::Inst::kIdDivss; case Mnemonic::Dppd: return asmjit::x86::Inst::kIdDppd; case Mnemonic::Dpps: return asmjit::x86::Inst::kIdDpps; case Mnemonic::Emms: return asmjit::x86::Inst::kIdEmms; case Mnemonic::Enter: return asmjit::x86::Inst::kIdEnter; case Mnemonic::Extractps: return asmjit::x86::Inst::kIdExtractps; case Mnemonic::Extrq: return asmjit::x86::Inst::kIdExtrq; case Mnemonic::F2xm1: return asmjit::x86::Inst::kIdF2xm1; case Mnemonic::Fabs: return asmjit::x86::Inst::kIdFabs; case Mnemonic::Fadd: return asmjit::x86::Inst::kIdFadd; case Mnemonic::Faddp: return asmjit::x86::Inst::kIdFaddp; case Mnemonic::Fbld: return asmjit::x86::Inst::kIdFbld; case Mnemonic::Fbstp: return asmjit::x86::Inst::kIdFbstp; case Mnemonic::Fchs: return asmjit::x86::Inst::kIdFchs; case Mnemonic::Fcmovb: return asmjit::x86::Inst::kIdFcmovb; case Mnemonic::Fcmovbe: return asmjit::x86::Inst::kIdFcmovbe; case Mnemonic::Fcmove: return asmjit::x86::Inst::kIdFcmove; case Mnemonic::Fcmovnb: return asmjit::x86::Inst::kIdFcmovnb; case Mnemonic::Fcmovnbe: return asmjit::x86::Inst::kIdFcmovnbe; case Mnemonic::Fcmovne: return asmjit::x86::Inst::kIdFcmovne; case Mnemonic::Fcmovnu: return asmjit::x86::Inst::kIdFcmovnu; case Mnemonic::Fcmovu: return asmjit::x86::Inst::kIdFcmovu; case Mnemonic::Fcom: return asmjit::x86::Inst::kIdFcom; case Mnemonic::Fcomi: return asmjit::x86::Inst::kIdFcomi; case Mnemonic::Fcomip: return asmjit::x86::Inst::kIdFcomip; case Mnemonic::Fcomp: return asmjit::x86::Inst::kIdFcomp; case Mnemonic::Fcompp: return asmjit::x86::Inst::kIdFcompp; case Mnemonic::Fcos: return asmjit::x86::Inst::kIdFcos; case Mnemonic::Fdecstp: return asmjit::x86::Inst::kIdFdecstp; case Mnemonic::Fdiv: return asmjit::x86::Inst::kIdFdiv; case Mnemonic::Fdivp: return asmjit::x86::Inst::kIdFdivp; case Mnemonic::Fdivr: return asmjit::x86::Inst::kIdFdivr; case Mnemonic::Fdivrp: return asmjit::x86::Inst::kIdFdivrp; case Mnemonic::Femms: return asmjit::x86::Inst::kIdFemms; case Mnemonic::Ffree: return asmjit::x86::Inst::kIdFfree; case Mnemonic::Fiadd: return asmjit::x86::Inst::kIdFiadd; case Mnemonic::Ficom: return asmjit::x86::Inst::kIdFicom; case Mnemonic::Ficomp: return asmjit::x86::Inst::kIdFicomp; case Mnemonic::Fidiv: return asmjit::x86::Inst::kIdFidiv; case Mnemonic::Fidivr: return asmjit::x86::Inst::kIdFidivr; case Mnemonic::Fild: return asmjit::x86::Inst::kIdFild; case Mnemonic::Fimul: return asmjit::x86::Inst::kIdFimul; case Mnemonic::Fincstp: return asmjit::x86::Inst::kIdFincstp; case Mnemonic::Fist: return asmjit::x86::Inst::kIdFist; case Mnemonic::Fistp: return asmjit::x86::Inst::kIdFistp; case Mnemonic::Fisttp: return asmjit::x86::Inst::kIdFisttp; case Mnemonic::Fisub: return asmjit::x86::Inst::kIdFisub; case Mnemonic::Fisubr: return asmjit::x86::Inst::kIdFisubr; case Mnemonic::Fld: return asmjit::x86::Inst::kIdFld; case Mnemonic::Fld1: return asmjit::x86::Inst::kIdFld1; case Mnemonic::Fldcw: return asmjit::x86::Inst::kIdFldcw; case Mnemonic::Fldenv: return asmjit::x86::Inst::kIdFldenv; case Mnemonic::Fldl2e: return asmjit::x86::Inst::kIdFldl2e; case Mnemonic::Fldl2t: return asmjit::x86::Inst::kIdFldl2t; case Mnemonic::Fldlg2: return asmjit::x86::Inst::kIdFldlg2; case Mnemonic::Fldln2: return asmjit::x86::Inst::kIdFldln2; case Mnemonic::Fldpi: return asmjit::x86::Inst::kIdFldpi; case Mnemonic::Fldz: return asmjit::x86::Inst::kIdFldz; case Mnemonic::Fmul: return asmjit::x86::Inst::kIdFmul; case Mnemonic::Fmulp: return asmjit::x86::Inst::kIdFmulp; case Mnemonic::Fnclex: return asmjit::x86::Inst::kIdFnclex; case Mnemonic::Fninit: return asmjit::x86::Inst::kIdFninit; case Mnemonic::Fnop: return asmjit::x86::Inst::kIdFnop; case Mnemonic::Fnsave: return asmjit::x86::Inst::kIdFnsave; case Mnemonic::Fnstcw: return asmjit::x86::Inst::kIdFnstcw; case Mnemonic::Fnstenv: return asmjit::x86::Inst::kIdFnstenv; case Mnemonic::Fnstsw: return asmjit::x86::Inst::kIdFnstsw; case Mnemonic::Fpatan: return asmjit::x86::Inst::kIdFpatan; case Mnemonic::Fprem: return asmjit::x86::Inst::kIdFprem; case Mnemonic::Fprem1: return asmjit::x86::Inst::kIdFprem1; case Mnemonic::Fptan: return asmjit::x86::Inst::kIdFptan; case Mnemonic::Frndint: return asmjit::x86::Inst::kIdFrndint; case Mnemonic::Frstor: return asmjit::x86::Inst::kIdFrstor; case Mnemonic::Fscale: return asmjit::x86::Inst::kIdFscale; case Mnemonic::Fsin: return asmjit::x86::Inst::kIdFsin; case Mnemonic::Fsincos: return asmjit::x86::Inst::kIdFsincos; case Mnemonic::Fsqrt: return asmjit::x86::Inst::kIdFsqrt; case Mnemonic::Fst: return asmjit::x86::Inst::kIdFst; case Mnemonic::Fstp: return asmjit::x86::Inst::kIdFstp; case Mnemonic::Fsub: return asmjit::x86::Inst::kIdFsub; case Mnemonic::Fsubp: return asmjit::x86::Inst::kIdFsubp; case Mnemonic::Fsubr: return asmjit::x86::Inst::kIdFsubr; case Mnemonic::Fsubrp: return asmjit::x86::Inst::kIdFsubrp; case Mnemonic::Ftst: return asmjit::x86::Inst::kIdFtst; case Mnemonic::Fucom: return asmjit::x86::Inst::kIdFucom; case Mnemonic::Fucomi: return asmjit::x86::Inst::kIdFucomi; case Mnemonic::Fucomip: return asmjit::x86::Inst::kIdFucomip; case Mnemonic::Fucomp: return asmjit::x86::Inst::kIdFucomp; case Mnemonic::Fucompp: return asmjit::x86::Inst::kIdFucompp; case Mnemonic::Fwait: return asmjit::x86::Inst::kIdFwait; case Mnemonic::Fxam: return asmjit::x86::Inst::kIdFxam; case Mnemonic::Fxch: return asmjit::x86::Inst::kIdFxch; case Mnemonic::Fxrstor: return asmjit::x86::Inst::kIdFxrstor; case Mnemonic::Fxrstor64: return asmjit::x86::Inst::kIdFxrstor64; case Mnemonic::Fxsave: return asmjit::x86::Inst::kIdFxsave; case Mnemonic::Fxsave64: return asmjit::x86::Inst::kIdFxsave64; case Mnemonic::Fxtract: return asmjit::x86::Inst::kIdFxtract; case Mnemonic::Fyl2x: return asmjit::x86::Inst::kIdFyl2x; case Mnemonic::Fyl2xp1: return asmjit::x86::Inst::kIdFyl2xp1; case Mnemonic::Haddpd: return asmjit::x86::Inst::kIdHaddpd; case Mnemonic::Haddps: return asmjit::x86::Inst::kIdHaddps; case Mnemonic::Hlt: return asmjit::x86::Inst::kIdHlt; case Mnemonic::Hsubpd: return asmjit::x86::Inst::kIdHsubpd; case Mnemonic::Hsubps: return asmjit::x86::Inst::kIdHsubps; case Mnemonic::Idiv: return asmjit::x86::Inst::kIdIdiv; case Mnemonic::Imul: return asmjit::x86::Inst::kIdImul; case Mnemonic::In: return asmjit::x86::Inst::kIdIn; case Mnemonic::Inc: return asmjit::x86::Inst::kIdInc; case Mnemonic::Insertps: return asmjit::x86::Inst::kIdInsertps; case Mnemonic::Insertq: return asmjit::x86::Inst::kIdInsertq; case Mnemonic::Int: return asmjit::x86::Inst::kIdInt; case Mnemonic::Int3: return asmjit::x86::Inst::kIdInt3; case Mnemonic::Into: return asmjit::x86::Inst::kIdInto; case Mnemonic::Invd: return asmjit::x86::Inst::kIdInvd; case Mnemonic::Invlpg: return asmjit::x86::Inst::kIdInvlpg; case Mnemonic::Invpcid: return asmjit::x86::Inst::kIdInvpcid; case Mnemonic::Iret: return asmjit::x86::Inst::kIdIret; case Mnemonic::Iretd: return asmjit::x86::Inst::kIdIretd; case Mnemonic::Iretq: return asmjit::x86::Inst::kIdIretq; case Mnemonic::Jb: return asmjit::x86::Inst::kIdJb; case Mnemonic::Jbe: return asmjit::x86::Inst::kIdJbe; case Mnemonic::Jecxz: return asmjit::x86::Inst::kIdJecxz; case Mnemonic::Jl: return asmjit::x86::Inst::kIdJl; case Mnemonic::Jle: return asmjit::x86::Inst::kIdJle; case Mnemonic::Jmp: return asmjit::x86::Inst::kIdJmp; case Mnemonic::Jnb: return asmjit::x86::Inst::kIdJnb; case Mnemonic::Jnbe: return asmjit::x86::Inst::kIdJnbe; case Mnemonic::Jnl: return asmjit::x86::Inst::kIdJnl; case Mnemonic::Jnle: return asmjit::x86::Inst::kIdJnle; case Mnemonic::Jno: return asmjit::x86::Inst::kIdJno; case Mnemonic::Jnp: return asmjit::x86::Inst::kIdJnp; case Mnemonic::Jns: return asmjit::x86::Inst::kIdJns; case Mnemonic::Jnz: return asmjit::x86::Inst::kIdJnz; case Mnemonic::Jo: return asmjit::x86::Inst::kIdJo; case Mnemonic::Jp: return asmjit::x86::Inst::kIdJp; case Mnemonic::Js: return asmjit::x86::Inst::kIdJs; case Mnemonic::Jz: return asmjit::x86::Inst::kIdJz; case Mnemonic::Kaddb: return asmjit::x86::Inst::kIdKaddb; case Mnemonic::Kaddd: return asmjit::x86::Inst::kIdKaddd; case Mnemonic::Kaddq: return asmjit::x86::Inst::kIdKaddq; case Mnemonic::Kaddw: return asmjit::x86::Inst::kIdKaddw; case Mnemonic::Kandb: return asmjit::x86::Inst::kIdKandb; case Mnemonic::Kandd: return asmjit::x86::Inst::kIdKandd; case Mnemonic::Kandnb: return asmjit::x86::Inst::kIdKandnb; case Mnemonic::Kandnd: return asmjit::x86::Inst::kIdKandnd; case Mnemonic::Kandnq: return asmjit::x86::Inst::kIdKandnq; case Mnemonic::Kandnw: return asmjit::x86::Inst::kIdKandnw; case Mnemonic::Kandq: return asmjit::x86::Inst::kIdKandq; case Mnemonic::Kandw: return asmjit::x86::Inst::kIdKandw; case Mnemonic::Kmovb: return asmjit::x86::Inst::kIdKmovb; case Mnemonic::Kmovd: return asmjit::x86::Inst::kIdKmovd; case Mnemonic::Kmovq: return asmjit::x86::Inst::kIdKmovq; case Mnemonic::Kmovw: return asmjit::x86::Inst::kIdKmovw; case Mnemonic::Knotb: return asmjit::x86::Inst::kIdKnotb; case Mnemonic::Knotd: return asmjit::x86::Inst::kIdKnotd; case Mnemonic::Knotq: return asmjit::x86::Inst::kIdKnotq; case Mnemonic::Knotw: return asmjit::x86::Inst::kIdKnotw; case Mnemonic::Korb: return asmjit::x86::Inst::kIdKorb; case Mnemonic::Kord: return asmjit::x86::Inst::kIdKord; case Mnemonic::Korq: return asmjit::x86::Inst::kIdKorq; case Mnemonic::Kortestb: return asmjit::x86::Inst::kIdKortestb; case Mnemonic::Kortestd: return asmjit::x86::Inst::kIdKortestd; case Mnemonic::Kortestq: return asmjit::x86::Inst::kIdKortestq; case Mnemonic::Kortestw: return asmjit::x86::Inst::kIdKortestw; case Mnemonic::Korw: return asmjit::x86::Inst::kIdKorw; case Mnemonic::Kshiftlb: return asmjit::x86::Inst::kIdKshiftlb; case Mnemonic::Kshiftld: return asmjit::x86::Inst::kIdKshiftld; case Mnemonic::Kshiftlq: return asmjit::x86::Inst::kIdKshiftlq; case Mnemonic::Kshiftlw: return asmjit::x86::Inst::kIdKshiftlw; case Mnemonic::Kshiftrb: return asmjit::x86::Inst::kIdKshiftrb; case Mnemonic::Kshiftrd: return asmjit::x86::Inst::kIdKshiftrd; case Mnemonic::Kshiftrq: return asmjit::x86::Inst::kIdKshiftrq; case Mnemonic::Kshiftrw: return asmjit::x86::Inst::kIdKshiftrw; case Mnemonic::Ktestb: return asmjit::x86::Inst::kIdKtestb; case Mnemonic::Ktestd: return asmjit::x86::Inst::kIdKtestd; case Mnemonic::Ktestq: return asmjit::x86::Inst::kIdKtestq; case Mnemonic::Ktestw: return asmjit::x86::Inst::kIdKtestw; case Mnemonic::Kunpckbw: return asmjit::x86::Inst::kIdKunpckbw; case Mnemonic::Kunpckdq: return asmjit::x86::Inst::kIdKunpckdq; case Mnemonic::Kunpckwd: return asmjit::x86::Inst::kIdKunpckwd; case Mnemonic::Kxnorb: return asmjit::x86::Inst::kIdKxnorb; case Mnemonic::Kxnord: return asmjit::x86::Inst::kIdKxnord; case Mnemonic::Kxnorq: return asmjit::x86::Inst::kIdKxnorq; case Mnemonic::Kxnorw: return asmjit::x86::Inst::kIdKxnorw; case Mnemonic::Kxorb: return asmjit::x86::Inst::kIdKxorb; case Mnemonic::Kxord: return asmjit::x86::Inst::kIdKxord; case Mnemonic::Kxorq: return asmjit::x86::Inst::kIdKxorq; case Mnemonic::Kxorw: return asmjit::x86::Inst::kIdKxorw; case Mnemonic::Lahf: return asmjit::x86::Inst::kIdLahf; case Mnemonic::Lar: return asmjit::x86::Inst::kIdLar; case Mnemonic::Lddqu: return asmjit::x86::Inst::kIdLddqu; case Mnemonic::Ldmxcsr: return asmjit::x86::Inst::kIdLdmxcsr; case Mnemonic::Lds: return asmjit::x86::Inst::kIdLds; case Mnemonic::Lea: return asmjit::x86::Inst::kIdLea; case Mnemonic::Leave: return asmjit::x86::Inst::kIdLeave; case Mnemonic::Les: return asmjit::x86::Inst::kIdLes; case Mnemonic::Lfence: return asmjit::x86::Inst::kIdLfence; case Mnemonic::Lfs: return asmjit::x86::Inst::kIdLfs; case Mnemonic::Lgdt: return asmjit::x86::Inst::kIdLgdt; case Mnemonic::Lgs: return asmjit::x86::Inst::kIdLgs; case Mnemonic::Lidt: return asmjit::x86::Inst::kIdLidt; case Mnemonic::Lldt: return asmjit::x86::Inst::kIdLldt; case Mnemonic::Lmsw: return asmjit::x86::Inst::kIdLmsw; case Mnemonic::Loop: return asmjit::x86::Inst::kIdLoop; case Mnemonic::Loope: return asmjit::x86::Inst::kIdLoope; case Mnemonic::Loopne: return asmjit::x86::Inst::kIdLoopne; case Mnemonic::Lsl: return asmjit::x86::Inst::kIdLsl; case Mnemonic::Lss: return asmjit::x86::Inst::kIdLss; case Mnemonic::Ltr: return asmjit::x86::Inst::kIdLtr; case Mnemonic::Lzcnt: return asmjit::x86::Inst::kIdLzcnt; case Mnemonic::Maskmovdqu: return asmjit::x86::Inst::kIdMaskmovdqu; case Mnemonic::Maskmovq: return asmjit::x86::Inst::kIdMaskmovq; case Mnemonic::Maxpd: return asmjit::x86::Inst::kIdMaxpd; case Mnemonic::Maxps: return asmjit::x86::Inst::kIdMaxps; case Mnemonic::Maxsd: return asmjit::x86::Inst::kIdMaxsd; case Mnemonic::Maxss: return asmjit::x86::Inst::kIdMaxss; case Mnemonic::Mfence: return asmjit::x86::Inst::kIdMfence; case Mnemonic::Minpd: return asmjit::x86::Inst::kIdMinpd; case Mnemonic::Minps: return asmjit::x86::Inst::kIdMinps; case Mnemonic::Minsd: return asmjit::x86::Inst::kIdMinsd; case Mnemonic::Minss: return asmjit::x86::Inst::kIdMinss; case Mnemonic::Monitor: return asmjit::x86::Inst::kIdMonitor; case Mnemonic::Monitorx: return asmjit::x86::Inst::kIdMonitorx; case Mnemonic::Mov: return asmjit::x86::Inst::kIdMov; case Mnemonic::Movapd: return asmjit::x86::Inst::kIdMovapd; case Mnemonic::Movaps: return asmjit::x86::Inst::kIdMovaps; case Mnemonic::Movbe: return asmjit::x86::Inst::kIdMovbe; case Mnemonic::Movd: return asmjit::x86::Inst::kIdMovd; case Mnemonic::Movddup: return asmjit::x86::Inst::kIdMovddup; case Mnemonic::Movdq2q: return asmjit::x86::Inst::kIdMovdq2q; case Mnemonic::Movdqa: return asmjit::x86::Inst::kIdMovdqa; case Mnemonic::Movdqu: return asmjit::x86::Inst::kIdMovdqu; case Mnemonic::Movhlps: return asmjit::x86::Inst::kIdMovhlps; case Mnemonic::Movhpd: return asmjit::x86::Inst::kIdMovhpd; case Mnemonic::Movhps: return asmjit::x86::Inst::kIdMovhps; case Mnemonic::Movlhps: return asmjit::x86::Inst::kIdMovlhps; case Mnemonic::Movlpd: return asmjit::x86::Inst::kIdMovlpd; case Mnemonic::Movlps: return asmjit::x86::Inst::kIdMovlps; case Mnemonic::Movmskpd: return asmjit::x86::Inst::kIdMovmskpd; case Mnemonic::Movmskps: return asmjit::x86::Inst::kIdMovmskps; case Mnemonic::Movntdq: return asmjit::x86::Inst::kIdMovntdq; case Mnemonic::Movntdqa: return asmjit::x86::Inst::kIdMovntdqa; case Mnemonic::Movnti: return asmjit::x86::Inst::kIdMovnti; case Mnemonic::Movntpd: return asmjit::x86::Inst::kIdMovntpd; case Mnemonic::Movntps: return asmjit::x86::Inst::kIdMovntps; case Mnemonic::Movntq: return asmjit::x86::Inst::kIdMovntq; case Mnemonic::Movntsd: return asmjit::x86::Inst::kIdMovntsd; case Mnemonic::Movntss: return asmjit::x86::Inst::kIdMovntss; case Mnemonic::Movq: return asmjit::x86::Inst::kIdMovq; case Mnemonic::Movq2dq: return asmjit::x86::Inst::kIdMovq2dq; case Mnemonic::Movshdup: return asmjit::x86::Inst::kIdMovshdup; case Mnemonic::Movsldup: return asmjit::x86::Inst::kIdMovsldup; case Mnemonic::Movss: return asmjit::x86::Inst::kIdMovss; case Mnemonic::Movsx: return asmjit::x86::Inst::kIdMovsx; case Mnemonic::Movsxd: return asmjit::x86::Inst::kIdMovsxd; case Mnemonic::Movupd: return asmjit::x86::Inst::kIdMovupd; case Mnemonic::Movups: return asmjit::x86::Inst::kIdMovups; case Mnemonic::Movzx: return asmjit::x86::Inst::kIdMovzx; case Mnemonic::Mpsadbw: return asmjit::x86::Inst::kIdMpsadbw; case Mnemonic::Mul: return asmjit::x86::Inst::kIdMul; case Mnemonic::Mulpd: return asmjit::x86::Inst::kIdMulpd; case Mnemonic::Mulps: return asmjit::x86::Inst::kIdMulps; case Mnemonic::Mulsd: return asmjit::x86::Inst::kIdMulsd; case Mnemonic::Mulss: return asmjit::x86::Inst::kIdMulss; case Mnemonic::Mulx: return asmjit::x86::Inst::kIdMulx; case Mnemonic::Mwait: return asmjit::x86::Inst::kIdMwait; case Mnemonic::Mwaitx: return asmjit::x86::Inst::kIdMwaitx; case Mnemonic::Neg: return asmjit::x86::Inst::kIdNeg; case Mnemonic::Nop: return asmjit::x86::Inst::kIdNop; case Mnemonic::Not: return asmjit::x86::Inst::kIdNot; case Mnemonic::Or: return asmjit::x86::Inst::kIdOr; case Mnemonic::Orpd: return asmjit::x86::Inst::kIdOrpd; case Mnemonic::Orps: return asmjit::x86::Inst::kIdOrps; case Mnemonic::Out: return asmjit::x86::Inst::kIdOut; case Mnemonic::Pabsb: return asmjit::x86::Inst::kIdPabsb; case Mnemonic::Pabsd: return asmjit::x86::Inst::kIdPabsd; case Mnemonic::Pabsw: return asmjit::x86::Inst::kIdPabsw; case Mnemonic::Packssdw: return asmjit::x86::Inst::kIdPackssdw; case Mnemonic::Packsswb: return asmjit::x86::Inst::kIdPacksswb; case Mnemonic::Packusdw: return asmjit::x86::Inst::kIdPackusdw; case Mnemonic::Packuswb: return asmjit::x86::Inst::kIdPackuswb; case Mnemonic::Paddb: return asmjit::x86::Inst::kIdPaddb; case Mnemonic::Paddd: return asmjit::x86::Inst::kIdPaddd; case Mnemonic::Paddq: return asmjit::x86::Inst::kIdPaddq; case Mnemonic::Paddsb: return asmjit::x86::Inst::kIdPaddsb; case Mnemonic::Paddsw: return asmjit::x86::Inst::kIdPaddsw; case Mnemonic::Paddusb: return asmjit::x86::Inst::kIdPaddusb; case Mnemonic::Paddusw: return asmjit::x86::Inst::kIdPaddusw; case Mnemonic::Paddw: return asmjit::x86::Inst::kIdPaddw; case Mnemonic::Palignr: return asmjit::x86::Inst::kIdPalignr; case Mnemonic::Pand: return asmjit::x86::Inst::kIdPand; case Mnemonic::Pandn: return asmjit::x86::Inst::kIdPandn; case Mnemonic::Pause: return asmjit::x86::Inst::kIdPause; case Mnemonic::Pavgb: return asmjit::x86::Inst::kIdPavgb; case Mnemonic::Pavgusb: return asmjit::x86::Inst::kIdPavgusb; case Mnemonic::Pavgw: return asmjit::x86::Inst::kIdPavgw; case Mnemonic::Pblendvb: return asmjit::x86::Inst::kIdPblendvb; case Mnemonic::Pblendw: return asmjit::x86::Inst::kIdPblendw; case Mnemonic::Pclmulqdq: return asmjit::x86::Inst::kIdPclmulqdq; case Mnemonic::Pcmpeqb: return asmjit::x86::Inst::kIdPcmpeqb; case Mnemonic::Pcmpeqd: return asmjit::x86::Inst::kIdPcmpeqd; case Mnemonic::Pcmpeqq: return asmjit::x86::Inst::kIdPcmpeqq; case Mnemonic::Pcmpeqw: return asmjit::x86::Inst::kIdPcmpeqw; case Mnemonic::Pcmpestri: return asmjit::x86::Inst::kIdPcmpestri; case Mnemonic::Pcmpestrm: return asmjit::x86::Inst::kIdPcmpestrm; case Mnemonic::Pcmpgtb: return asmjit::x86::Inst::kIdPcmpgtb; case Mnemonic::Pcmpgtd: return asmjit::x86::Inst::kIdPcmpgtd; case Mnemonic::Pcmpgtq: return asmjit::x86::Inst::kIdPcmpgtq; case Mnemonic::Pcmpgtw: return asmjit::x86::Inst::kIdPcmpgtw; case Mnemonic::Pcmpistri: return asmjit::x86::Inst::kIdPcmpistri; case Mnemonic::Pcmpistrm: return asmjit::x86::Inst::kIdPcmpistrm; case Mnemonic::Pdep: return asmjit::x86::Inst::kIdPdep; case Mnemonic::Pext: return asmjit::x86::Inst::kIdPext; case Mnemonic::Pextrb: return asmjit::x86::Inst::kIdPextrb; case Mnemonic::Pextrd: return asmjit::x86::Inst::kIdPextrd; case Mnemonic::Pextrq: return asmjit::x86::Inst::kIdPextrq; case Mnemonic::Pextrw: return asmjit::x86::Inst::kIdPextrw; case Mnemonic::Pf2id: return asmjit::x86::Inst::kIdPf2id; case Mnemonic::Pf2iw: return asmjit::x86::Inst::kIdPf2iw; case Mnemonic::Pfacc: return asmjit::x86::Inst::kIdPfacc; case Mnemonic::Pfadd: return asmjit::x86::Inst::kIdPfadd; case Mnemonic::Pfcmpeq: return asmjit::x86::Inst::kIdPfcmpeq; case Mnemonic::Pfcmpge: return asmjit::x86::Inst::kIdPfcmpge; case Mnemonic::Pfcmpgt: return asmjit::x86::Inst::kIdPfcmpgt; case Mnemonic::Pfmax: return asmjit::x86::Inst::kIdPfmax; case Mnemonic::Pfmin: return asmjit::x86::Inst::kIdPfmin; case Mnemonic::Pfmul: return asmjit::x86::Inst::kIdPfmul; case Mnemonic::Pfnacc: return asmjit::x86::Inst::kIdPfnacc; case Mnemonic::Pfpnacc: return asmjit::x86::Inst::kIdPfpnacc; case Mnemonic::Pfrcp: return asmjit::x86::Inst::kIdPfrcp; case Mnemonic::Pfrcpit2: return asmjit::x86::Inst::kIdPfrcpit2; case Mnemonic::Pfrsqit1: return asmjit::x86::Inst::kIdPfrsqit1; case Mnemonic::Pfsub: return asmjit::x86::Inst::kIdPfsub; case Mnemonic::Pfsubr: return asmjit::x86::Inst::kIdPfsubr; case Mnemonic::Phaddd: return asmjit::x86::Inst::kIdPhaddd; case Mnemonic::Phaddsw: return asmjit::x86::Inst::kIdPhaddsw; case Mnemonic::Phaddw: return asmjit::x86::Inst::kIdPhaddw; case Mnemonic::Phminposuw: return asmjit::x86::Inst::kIdPhminposuw; case Mnemonic::Phsubd: return asmjit::x86::Inst::kIdPhsubd; case Mnemonic::Phsubsw: return asmjit::x86::Inst::kIdPhsubsw; case Mnemonic::Phsubw: return asmjit::x86::Inst::kIdPhsubw; case Mnemonic::Pi2fd: return asmjit::x86::Inst::kIdPi2fd; case Mnemonic::Pi2fw: return asmjit::x86::Inst::kIdPi2fw; case Mnemonic::Pinsrb: return asmjit::x86::Inst::kIdPinsrb; case Mnemonic::Pinsrd: return asmjit::x86::Inst::kIdPinsrd; case Mnemonic::Pinsrq: return asmjit::x86::Inst::kIdPinsrq; case Mnemonic::Pinsrw: return asmjit::x86::Inst::kIdPinsrw; case Mnemonic::Pmaddubsw: return asmjit::x86::Inst::kIdPmaddubsw; case Mnemonic::Pmaddwd: return asmjit::x86::Inst::kIdPmaddwd; case Mnemonic::Pmaxsb: return asmjit::x86::Inst::kIdPmaxsb; case Mnemonic::Pmaxsd: return asmjit::x86::Inst::kIdPmaxsd; case Mnemonic::Pmaxsw: return asmjit::x86::Inst::kIdPmaxsw; case Mnemonic::Pmaxub: return asmjit::x86::Inst::kIdPmaxub; case Mnemonic::Pmaxud: return asmjit::x86::Inst::kIdPmaxud; case Mnemonic::Pmaxuw: return asmjit::x86::Inst::kIdPmaxuw; case Mnemonic::Pminsb: return asmjit::x86::Inst::kIdPminsb; case Mnemonic::Pminsd: return asmjit::x86::Inst::kIdPminsd; case Mnemonic::Pminsw: return asmjit::x86::Inst::kIdPminsw; case Mnemonic::Pminub: return asmjit::x86::Inst::kIdPminub; case Mnemonic::Pminud: return asmjit::x86::Inst::kIdPminud; case Mnemonic::Pminuw: return asmjit::x86::Inst::kIdPminuw; case Mnemonic::Pmovmskb: return asmjit::x86::Inst::kIdPmovmskb; case Mnemonic::Pmovsxbd: return asmjit::x86::Inst::kIdPmovsxbd; case Mnemonic::Pmovsxbq: return asmjit::x86::Inst::kIdPmovsxbq; case Mnemonic::Pmovsxbw: return asmjit::x86::Inst::kIdPmovsxbw; case Mnemonic::Pmovsxdq: return asmjit::x86::Inst::kIdPmovsxdq; case Mnemonic::Pmovsxwd: return asmjit::x86::Inst::kIdPmovsxwd; case Mnemonic::Pmovsxwq: return asmjit::x86::Inst::kIdPmovsxwq; case Mnemonic::Pmovzxbd: return asmjit::x86::Inst::kIdPmovzxbd; case Mnemonic::Pmovzxbq: return asmjit::x86::Inst::kIdPmovzxbq; case Mnemonic::Pmovzxbw: return asmjit::x86::Inst::kIdPmovzxbw; case Mnemonic::Pmovzxdq: return asmjit::x86::Inst::kIdPmovzxdq; case Mnemonic::Pmovzxwd: return asmjit::x86::Inst::kIdPmovzxwd; case Mnemonic::Pmovzxwq: return asmjit::x86::Inst::kIdPmovzxwq; case Mnemonic::Pmuldq: return asmjit::x86::Inst::kIdPmuldq; case Mnemonic::Pmulhrsw: return asmjit::x86::Inst::kIdPmulhrsw; case Mnemonic::Pmulhrw: return asmjit::x86::Inst::kIdPmulhrw; case Mnemonic::Pmulhuw: return asmjit::x86::Inst::kIdPmulhuw; case Mnemonic::Pmulhw: return asmjit::x86::Inst::kIdPmulhw; case Mnemonic::Pmulld: return asmjit::x86::Inst::kIdPmulld; case Mnemonic::Pmullw: return asmjit::x86::Inst::kIdPmullw; case Mnemonic::Pmuludq: return asmjit::x86::Inst::kIdPmuludq; case Mnemonic::Pop: return asmjit::x86::Inst::kIdPop; case Mnemonic::Popa: return asmjit::x86::Inst::kIdPopa; case Mnemonic::Popad: return asmjit::x86::Inst::kIdPopad; case Mnemonic::Popcnt: return asmjit::x86::Inst::kIdPopcnt; case Mnemonic::Popf: return asmjit::x86::Inst::kIdPopf; case Mnemonic::Popfd: return asmjit::x86::Inst::kIdPopfd; case Mnemonic::Popfq: return asmjit::x86::Inst::kIdPopfq; case Mnemonic::Por: return asmjit::x86::Inst::kIdPor; case Mnemonic::Prefetch: return asmjit::x86::Inst::kIdPrefetch; case Mnemonic::Prefetchnta: return asmjit::x86::Inst::kIdPrefetchnta; case Mnemonic::Prefetcht0: return asmjit::x86::Inst::kIdPrefetcht0; case Mnemonic::Prefetcht1: return asmjit::x86::Inst::kIdPrefetcht1; case Mnemonic::Prefetcht2: return asmjit::x86::Inst::kIdPrefetcht2; case Mnemonic::Prefetchw: return asmjit::x86::Inst::kIdPrefetchw; case Mnemonic::Prefetchwt1: return asmjit::x86::Inst::kIdPrefetchwt1; case Mnemonic::Psadbw: return asmjit::x86::Inst::kIdPsadbw; case Mnemonic::Pshufb: return asmjit::x86::Inst::kIdPshufb; case Mnemonic::Pshufd: return asmjit::x86::Inst::kIdPshufd; case Mnemonic::Pshufhw: return asmjit::x86::Inst::kIdPshufhw; case Mnemonic::Pshuflw: return asmjit::x86::Inst::kIdPshuflw; case Mnemonic::Pshufw: return asmjit::x86::Inst::kIdPshufw; case Mnemonic::Psignb: return asmjit::x86::Inst::kIdPsignb; case Mnemonic::Psignd: return asmjit::x86::Inst::kIdPsignd; case Mnemonic::Psignw: return asmjit::x86::Inst::kIdPsignw; case Mnemonic::Pslld: return asmjit::x86::Inst::kIdPslld; case Mnemonic::Pslldq: return asmjit::x86::Inst::kIdPslldq; case Mnemonic::Psllq: return asmjit::x86::Inst::kIdPsllq; case Mnemonic::Psllw: return asmjit::x86::Inst::kIdPsllw; case Mnemonic::Psrad: return asmjit::x86::Inst::kIdPsrad; case Mnemonic::Psraw: return asmjit::x86::Inst::kIdPsraw; case Mnemonic::Psrld: return asmjit::x86::Inst::kIdPsrld; case Mnemonic::Psrldq: return asmjit::x86::Inst::kIdPsrldq; case Mnemonic::Psrlq: return asmjit::x86::Inst::kIdPsrlq; case Mnemonic::Psrlw: return asmjit::x86::Inst::kIdPsrlw; case Mnemonic::Psubb: return asmjit::x86::Inst::kIdPsubb; case Mnemonic::Psubd: return asmjit::x86::Inst::kIdPsubd; case Mnemonic::Psubq: return asmjit::x86::Inst::kIdPsubq; case Mnemonic::Psubsb: return asmjit::x86::Inst::kIdPsubsb; case Mnemonic::Psubsw: return asmjit::x86::Inst::kIdPsubsw; case Mnemonic::Psubusb: return asmjit::x86::Inst::kIdPsubusb; case Mnemonic::Psubusw: return asmjit::x86::Inst::kIdPsubusw; case Mnemonic::Psubw: return asmjit::x86::Inst::kIdPsubw; case Mnemonic::Pswapd: return asmjit::x86::Inst::kIdPswapd; case Mnemonic::Ptest: return asmjit::x86::Inst::kIdPtest; case Mnemonic::Punpckhbw: return asmjit::x86::Inst::kIdPunpckhbw; case Mnemonic::Punpckhdq: return asmjit::x86::Inst::kIdPunpckhdq; case Mnemonic::Punpckhqdq: return asmjit::x86::Inst::kIdPunpckhqdq; case Mnemonic::Punpckhwd: return asmjit::x86::Inst::kIdPunpckhwd; case Mnemonic::Punpcklbw: return asmjit::x86::Inst::kIdPunpcklbw; case Mnemonic::Punpckldq: return asmjit::x86::Inst::kIdPunpckldq; case Mnemonic::Punpcklqdq: return asmjit::x86::Inst::kIdPunpcklqdq; case Mnemonic::Punpcklwd: return asmjit::x86::Inst::kIdPunpcklwd; case Mnemonic::Push: return asmjit::x86::Inst::kIdPush; case Mnemonic::Pusha: return asmjit::x86::Inst::kIdPusha; case Mnemonic::Pushad: return asmjit::x86::Inst::kIdPushad; case Mnemonic::Pushf: return asmjit::x86::Inst::kIdPushf; case Mnemonic::Pushfd: return asmjit::x86::Inst::kIdPushfd; case Mnemonic::Pushfq: return asmjit::x86::Inst::kIdPushfq; case Mnemonic::Pxor: return asmjit::x86::Inst::kIdPxor; case Mnemonic::Rcl: return asmjit::x86::Inst::kIdRcl; case Mnemonic::Rcpps: return asmjit::x86::Inst::kIdRcpps; case Mnemonic::Rcpss: return asmjit::x86::Inst::kIdRcpss; case Mnemonic::Rcr: return asmjit::x86::Inst::kIdRcr; case Mnemonic::Rdfsbase: return asmjit::x86::Inst::kIdRdfsbase; case Mnemonic::Rdgsbase: return asmjit::x86::Inst::kIdRdgsbase; case Mnemonic::Rdmsr: return asmjit::x86::Inst::kIdRdmsr; case Mnemonic::Rdpmc: return asmjit::x86::Inst::kIdRdpmc; case Mnemonic::Rdrand: return asmjit::x86::Inst::kIdRdrand; case Mnemonic::Rdseed: return asmjit::x86::Inst::kIdRdseed; case Mnemonic::Rdtsc: return asmjit::x86::Inst::kIdRdtsc; case Mnemonic::Rdtscp: return asmjit::x86::Inst::kIdRdtscp; case Mnemonic::Ret: return asmjit::x86::Inst::kIdRet; case Mnemonic::Rol: return asmjit::x86::Inst::kIdRol; case Mnemonic::Ror: return asmjit::x86::Inst::kIdRor; case Mnemonic::Rorx: return asmjit::x86::Inst::kIdRorx; case Mnemonic::Roundpd: return asmjit::x86::Inst::kIdRoundpd; case Mnemonic::Roundps: return asmjit::x86::Inst::kIdRoundps; case Mnemonic::Roundsd: return asmjit::x86::Inst::kIdRoundsd; case Mnemonic::Roundss: return asmjit::x86::Inst::kIdRoundss; case Mnemonic::Rsm: return asmjit::x86::Inst::kIdRsm; case Mnemonic::Rsqrtps: return asmjit::x86::Inst::kIdRsqrtps; case Mnemonic::Rsqrtss: return asmjit::x86::Inst::kIdRsqrtss; case Mnemonic::Sahf: return asmjit::x86::Inst::kIdSahf; case Mnemonic::Sar: return asmjit::x86::Inst::kIdSar; case Mnemonic::Sarx: return asmjit::x86::Inst::kIdSarx; case Mnemonic::Sbb: return asmjit::x86::Inst::kIdSbb; case Mnemonic::Setb: return asmjit::x86::Inst::kIdSetb; case Mnemonic::Setbe: return asmjit::x86::Inst::kIdSetbe; case Mnemonic::Setl: return asmjit::x86::Inst::kIdSetl; case Mnemonic::Setle: return asmjit::x86::Inst::kIdSetle; case Mnemonic::Setnb: return asmjit::x86::Inst::kIdSetnb; case Mnemonic::Setnbe: return asmjit::x86::Inst::kIdSetnbe; case Mnemonic::Setnl: return asmjit::x86::Inst::kIdSetnl; case Mnemonic::Setnle: return asmjit::x86::Inst::kIdSetnle; case Mnemonic::Setno: return asmjit::x86::Inst::kIdSetno; case Mnemonic::Setnp: return asmjit::x86::Inst::kIdSetnp; case Mnemonic::Setns: return asmjit::x86::Inst::kIdSetns; case Mnemonic::Setnz: return asmjit::x86::Inst::kIdSetnz; case Mnemonic::Seto: return asmjit::x86::Inst::kIdSeto; case Mnemonic::Setp: return asmjit::x86::Inst::kIdSetp; case Mnemonic::Sets: return asmjit::x86::Inst::kIdSets; case Mnemonic::Setz: return asmjit::x86::Inst::kIdSetz; case Mnemonic::Sfence: return asmjit::x86::Inst::kIdSfence; case Mnemonic::Sgdt: return asmjit::x86::Inst::kIdSgdt; case Mnemonic::Sha1msg1: return asmjit::x86::Inst::kIdSha1msg1; case Mnemonic::Sha1msg2: return asmjit::x86::Inst::kIdSha1msg2; case Mnemonic::Sha1nexte: return asmjit::x86::Inst::kIdSha1nexte; case Mnemonic::Sha1rnds4: return asmjit::x86::Inst::kIdSha1rnds4; case Mnemonic::Sha256msg1: return asmjit::x86::Inst::kIdSha256msg1; case Mnemonic::Sha256msg2: return asmjit::x86::Inst::kIdSha256msg2; case Mnemonic::Sha256rnds2: return asmjit::x86::Inst::kIdSha256rnds2; case Mnemonic::Shl: return asmjit::x86::Inst::kIdShl; case Mnemonic::Shld: return asmjit::x86::Inst::kIdShld; case Mnemonic::Shlx: return asmjit::x86::Inst::kIdShlx; case Mnemonic::Shr: return asmjit::x86::Inst::kIdShr; case Mnemonic::Shrd: return asmjit::x86::Inst::kIdShrd; case Mnemonic::Shrx: return asmjit::x86::Inst::kIdShrx; case Mnemonic::Shufpd: return asmjit::x86::Inst::kIdShufpd; case Mnemonic::Shufps: return asmjit::x86::Inst::kIdShufps; case Mnemonic::Sidt: return asmjit::x86::Inst::kIdSidt; case Mnemonic::Sldt: return asmjit::x86::Inst::kIdSldt; case Mnemonic::Smsw: return asmjit::x86::Inst::kIdSmsw; case Mnemonic::Sqrtpd: return asmjit::x86::Inst::kIdSqrtpd; case Mnemonic::Sqrtps: return asmjit::x86::Inst::kIdSqrtps; case Mnemonic::Sqrtsd: return asmjit::x86::Inst::kIdSqrtsd; case Mnemonic::Sqrtss: return asmjit::x86::Inst::kIdSqrtss; case Mnemonic::Stac: return asmjit::x86::Inst::kIdStac; case Mnemonic::Stc: return asmjit::x86::Inst::kIdStc; case Mnemonic::Std: return asmjit::x86::Inst::kIdStd; case Mnemonic::Sti: return asmjit::x86::Inst::kIdSti; case Mnemonic::Stmxcsr: return asmjit::x86::Inst::kIdStmxcsr; case Mnemonic::Str: return asmjit::x86::Inst::kIdStr; case Mnemonic::Sub: return asmjit::x86::Inst::kIdSub; case Mnemonic::Subpd: return asmjit::x86::Inst::kIdSubpd; case Mnemonic::Subps: return asmjit::x86::Inst::kIdSubps; case Mnemonic::Subsd: return asmjit::x86::Inst::kIdSubsd; case Mnemonic::Subss: return asmjit::x86::Inst::kIdSubss; case Mnemonic::Swapgs: return asmjit::x86::Inst::kIdSwapgs; case Mnemonic::Syscall: return asmjit::x86::Inst::kIdSyscall; case Mnemonic::Sysenter: return asmjit::x86::Inst::kIdSysenter; case Mnemonic::Sysexit: return asmjit::x86::Inst::kIdSysexit; case Mnemonic::Sysret: return asmjit::x86::Inst::kIdSysret; case Mnemonic::T1mskc: return asmjit::x86::Inst::kIdT1mskc; case Mnemonic::Test: return asmjit::x86::Inst::kIdTest; case Mnemonic::Tzcnt: return asmjit::x86::Inst::kIdTzcnt; case Mnemonic::Tzmsk: return asmjit::x86::Inst::kIdTzmsk; case Mnemonic::Ucomisd: return asmjit::x86::Inst::kIdUcomisd; case Mnemonic::Ucomiss: return asmjit::x86::Inst::kIdUcomiss; case Mnemonic::Ud2: return asmjit::x86::Inst::kIdUd2; case Mnemonic::Unpckhpd: return asmjit::x86::Inst::kIdUnpckhpd; case Mnemonic::Unpckhps: return asmjit::x86::Inst::kIdUnpckhps; case Mnemonic::Unpcklpd: return asmjit::x86::Inst::kIdUnpcklpd; case Mnemonic::Unpcklps: return asmjit::x86::Inst::kIdUnpcklps; case Mnemonic::V4fmaddps: return asmjit::x86::Inst::kIdV4fmaddps; case Mnemonic::V4fmaddss: return asmjit::x86::Inst::kIdV4fmaddss; case Mnemonic::V4fnmaddps: return asmjit::x86::Inst::kIdV4fnmaddps; case Mnemonic::V4fnmaddss: return asmjit::x86::Inst::kIdV4fnmaddss; case Mnemonic::Vaddpd: return asmjit::x86::Inst::kIdVaddpd; case Mnemonic::Vaddps: return asmjit::x86::Inst::kIdVaddps; case Mnemonic::Vaddsd: return asmjit::x86::Inst::kIdVaddsd; case Mnemonic::Vaddss: return asmjit::x86::Inst::kIdVaddss; case Mnemonic::Vaddsubpd: return asmjit::x86::Inst::kIdVaddsubpd; case Mnemonic::Vaddsubps: return asmjit::x86::Inst::kIdVaddsubps; case Mnemonic::Vaesdec: return asmjit::x86::Inst::kIdVaesdec; case Mnemonic::Vaesdeclast: return asmjit::x86::Inst::kIdVaesdeclast; case Mnemonic::Vaesenc: return asmjit::x86::Inst::kIdVaesenc; case Mnemonic::Vaesenclast: return asmjit::x86::Inst::kIdVaesenclast; case Mnemonic::Vaesimc: return asmjit::x86::Inst::kIdVaesimc; case Mnemonic::Vaeskeygenassist: return asmjit::x86::Inst::kIdVaeskeygenassist; case Mnemonic::Valignd: return asmjit::x86::Inst::kIdValignd; case Mnemonic::Valignq: return asmjit::x86::Inst::kIdValignq; case Mnemonic::Vandnpd: return asmjit::x86::Inst::kIdVandnpd; case Mnemonic::Vandnps: return asmjit::x86::Inst::kIdVandnps; case Mnemonic::Vandpd: return asmjit::x86::Inst::kIdVandpd; case Mnemonic::Vandps: return asmjit::x86::Inst::kIdVandps; case Mnemonic::Vblendmpd: return asmjit::x86::Inst::kIdVblendmpd; case Mnemonic::Vblendmps: return asmjit::x86::Inst::kIdVblendmps; case Mnemonic::Vblendpd: return asmjit::x86::Inst::kIdVblendpd; case Mnemonic::Vblendps: return asmjit::x86::Inst::kIdVblendps; case Mnemonic::Vblendvpd: return asmjit::x86::Inst::kIdVblendvpd; case Mnemonic::Vblendvps: return asmjit::x86::Inst::kIdVblendvps; case Mnemonic::Vbroadcastf128: return asmjit::x86::Inst::kIdVbroadcastf128; case Mnemonic::Vbroadcastf32x2: return asmjit::x86::Inst::kIdVbroadcastf32x2; case Mnemonic::Vbroadcastf32x4: return asmjit::x86::Inst::kIdVbroadcastf32x4; case Mnemonic::Vbroadcastf32x8: return asmjit::x86::Inst::kIdVbroadcastf32x8; case Mnemonic::Vbroadcastf64x2: return asmjit::x86::Inst::kIdVbroadcastf64x2; case Mnemonic::Vbroadcastf64x4: return asmjit::x86::Inst::kIdVbroadcastf64x4; case Mnemonic::Vbroadcasti128: return asmjit::x86::Inst::kIdVbroadcasti128; case Mnemonic::Vbroadcasti32x2: return asmjit::x86::Inst::kIdVbroadcasti32x2; case Mnemonic::Vbroadcasti32x4: return asmjit::x86::Inst::kIdVbroadcasti32x4; case Mnemonic::Vbroadcasti32x8: return asmjit::x86::Inst::kIdVbroadcasti32x8; case Mnemonic::Vbroadcasti64x2: return asmjit::x86::Inst::kIdVbroadcasti64x2; case Mnemonic::Vbroadcasti64x4: return asmjit::x86::Inst::kIdVbroadcasti64x4; case Mnemonic::Vbroadcastsd: return asmjit::x86::Inst::kIdVbroadcastsd; case Mnemonic::Vbroadcastss: return asmjit::x86::Inst::kIdVbroadcastss; case Mnemonic::Vcmppd: return asmjit::x86::Inst::kIdVcmppd; case Mnemonic::Vcmpps: return asmjit::x86::Inst::kIdVcmpps; case Mnemonic::Vcmpsd: return asmjit::x86::Inst::kIdVcmpsd; case Mnemonic::Vcmpss: return asmjit::x86::Inst::kIdVcmpss; case Mnemonic::Vcomisd: return asmjit::x86::Inst::kIdVcomisd; case Mnemonic::Vcomiss: return asmjit::x86::Inst::kIdVcomiss; case Mnemonic::Vcompresspd: return asmjit::x86::Inst::kIdVcompresspd; case Mnemonic::Vcompressps: return asmjit::x86::Inst::kIdVcompressps; case Mnemonic::Vcvtdq2pd: return asmjit::x86::Inst::kIdVcvtdq2pd; case Mnemonic::Vcvtdq2ps: return asmjit::x86::Inst::kIdVcvtdq2ps; case Mnemonic::Vcvtpd2dq: return asmjit::x86::Inst::kIdVcvtpd2dq; case Mnemonic::Vcvtpd2ps: return asmjit::x86::Inst::kIdVcvtpd2ps; case Mnemonic::Vcvtpd2qq: return asmjit::x86::Inst::kIdVcvtpd2qq; case Mnemonic::Vcvtpd2udq: return asmjit::x86::Inst::kIdVcvtpd2udq; case Mnemonic::Vcvtpd2uqq: return asmjit::x86::Inst::kIdVcvtpd2uqq; case Mnemonic::Vcvtph2ps: return asmjit::x86::Inst::kIdVcvtph2ps; case Mnemonic::Vcvtps2dq: return asmjit::x86::Inst::kIdVcvtps2dq; case Mnemonic::Vcvtps2pd: return asmjit::x86::Inst::kIdVcvtps2pd; case Mnemonic::Vcvtps2ph: return asmjit::x86::Inst::kIdVcvtps2ph; case Mnemonic::Vcvtps2qq: return asmjit::x86::Inst::kIdVcvtps2qq; case Mnemonic::Vcvtps2udq: return asmjit::x86::Inst::kIdVcvtps2udq; case Mnemonic::Vcvtps2uqq: return asmjit::x86::Inst::kIdVcvtps2uqq; case Mnemonic::Vcvtqq2pd: return asmjit::x86::Inst::kIdVcvtqq2pd; case Mnemonic::Vcvtqq2ps: return asmjit::x86::Inst::kIdVcvtqq2ps; case Mnemonic::Vcvtsd2si: return asmjit::x86::Inst::kIdVcvtsd2si; case Mnemonic::Vcvtsd2ss: return asmjit::x86::Inst::kIdVcvtsd2ss; case Mnemonic::Vcvtsd2usi: return asmjit::x86::Inst::kIdVcvtsd2usi; case Mnemonic::Vcvtsi2sd: return asmjit::x86::Inst::kIdVcvtsi2sd; case Mnemonic::Vcvtsi2ss: return asmjit::x86::Inst::kIdVcvtsi2ss; case Mnemonic::Vcvtss2sd: return asmjit::x86::Inst::kIdVcvtss2sd; case Mnemonic::Vcvtss2si: return asmjit::x86::Inst::kIdVcvtss2si; case Mnemonic::Vcvtss2usi: return asmjit::x86::Inst::kIdVcvtss2usi; case Mnemonic::Vcvttpd2dq: return asmjit::x86::Inst::kIdVcvttpd2dq; case Mnemonic::Vcvttpd2qq: return asmjit::x86::Inst::kIdVcvttpd2qq; case Mnemonic::Vcvttpd2udq: return asmjit::x86::Inst::kIdVcvttpd2udq; case Mnemonic::Vcvttpd2uqq: return asmjit::x86::Inst::kIdVcvttpd2uqq; case Mnemonic::Vcvttps2dq: return asmjit::x86::Inst::kIdVcvttps2dq; case Mnemonic::Vcvttps2qq: return asmjit::x86::Inst::kIdVcvttps2qq; case Mnemonic::Vcvttps2udq: return asmjit::x86::Inst::kIdVcvttps2udq; case Mnemonic::Vcvttps2uqq: return asmjit::x86::Inst::kIdVcvttps2uqq; case Mnemonic::Vcvttsd2si: return asmjit::x86::Inst::kIdVcvttsd2si; case Mnemonic::Vcvttsd2usi: return asmjit::x86::Inst::kIdVcvttsd2usi; case Mnemonic::Vcvttss2si: return asmjit::x86::Inst::kIdVcvttss2si; case Mnemonic::Vcvttss2usi: return asmjit::x86::Inst::kIdVcvttss2usi; case Mnemonic::Vcvtudq2pd: return asmjit::x86::Inst::kIdVcvtudq2pd; case Mnemonic::Vcvtudq2ps: return asmjit::x86::Inst::kIdVcvtudq2ps; case Mnemonic::Vcvtuqq2pd: return asmjit::x86::Inst::kIdVcvtuqq2pd; case Mnemonic::Vcvtuqq2ps: return asmjit::x86::Inst::kIdVcvtuqq2ps; case Mnemonic::Vcvtusi2sd: return asmjit::x86::Inst::kIdVcvtusi2sd; case Mnemonic::Vcvtusi2ss: return asmjit::x86::Inst::kIdVcvtusi2ss; case Mnemonic::Vdbpsadbw: return asmjit::x86::Inst::kIdVdbpsadbw; case Mnemonic::Vdivpd: return asmjit::x86::Inst::kIdVdivpd; case Mnemonic::Vdivps: return asmjit::x86::Inst::kIdVdivps; case Mnemonic::Vdivsd: return asmjit::x86::Inst::kIdVdivsd; case Mnemonic::Vdivss: return asmjit::x86::Inst::kIdVdivss; case Mnemonic::Vdppd: return asmjit::x86::Inst::kIdVdppd; case Mnemonic::Vdpps: return asmjit::x86::Inst::kIdVdpps; case Mnemonic::Verr: return asmjit::x86::Inst::kIdVerr; case Mnemonic::Verw: return asmjit::x86::Inst::kIdVerw; case Mnemonic::Vexp2pd: return asmjit::x86::Inst::kIdVexp2pd; case Mnemonic::Vexp2ps: return asmjit::x86::Inst::kIdVexp2ps; case Mnemonic::Vexpandpd: return asmjit::x86::Inst::kIdVexpandpd; case Mnemonic::Vexpandps: return asmjit::x86::Inst::kIdVexpandps; case Mnemonic::Vextractf128: return asmjit::x86::Inst::kIdVextractf128; case Mnemonic::Vextractf32x4: return asmjit::x86::Inst::kIdVextractf32x4; case Mnemonic::Vextractf32x8: return asmjit::x86::Inst::kIdVextractf32x8; case Mnemonic::Vextractf64x2: return asmjit::x86::Inst::kIdVextractf64x2; case Mnemonic::Vextractf64x4: return asmjit::x86::Inst::kIdVextractf64x4; case Mnemonic::Vextracti128: return asmjit::x86::Inst::kIdVextracti128; case Mnemonic::Vextracti32x4: return asmjit::x86::Inst::kIdVextracti32x4; case Mnemonic::Vextracti32x8: return asmjit::x86::Inst::kIdVextracti32x8; case Mnemonic::Vextracti64x2: return asmjit::x86::Inst::kIdVextracti64x2; case Mnemonic::Vextracti64x4: return asmjit::x86::Inst::kIdVextracti64x4; case Mnemonic::Vextractps: return asmjit::x86::Inst::kIdVextractps; case Mnemonic::Vfixupimmpd: return asmjit::x86::Inst::kIdVfixupimmpd; case Mnemonic::Vfixupimmps: return asmjit::x86::Inst::kIdVfixupimmps; case Mnemonic::Vfixupimmsd: return asmjit::x86::Inst::kIdVfixupimmsd; case Mnemonic::Vfixupimmss: return asmjit::x86::Inst::kIdVfixupimmss; case Mnemonic::Vfmadd132pd: return asmjit::x86::Inst::kIdVfmadd132pd; case Mnemonic::Vfmadd132ps: return asmjit::x86::Inst::kIdVfmadd132ps; case Mnemonic::Vfmadd132sd: return asmjit::x86::Inst::kIdVfmadd132sd; case Mnemonic::Vfmadd132ss: return asmjit::x86::Inst::kIdVfmadd132ss; case Mnemonic::Vfmadd213pd: return asmjit::x86::Inst::kIdVfmadd213pd; case Mnemonic::Vfmadd213ps: return asmjit::x86::Inst::kIdVfmadd213ps; case Mnemonic::Vfmadd213sd: return asmjit::x86::Inst::kIdVfmadd213sd; case Mnemonic::Vfmadd213ss: return asmjit::x86::Inst::kIdVfmadd213ss; case Mnemonic::Vfmadd231pd: return asmjit::x86::Inst::kIdVfmadd231pd; case Mnemonic::Vfmadd231ps: return asmjit::x86::Inst::kIdVfmadd231ps; case Mnemonic::Vfmadd231sd: return asmjit::x86::Inst::kIdVfmadd231sd; case Mnemonic::Vfmadd231ss: return asmjit::x86::Inst::kIdVfmadd231ss; case Mnemonic::Vfmaddpd: return asmjit::x86::Inst::kIdVfmaddpd; case Mnemonic::Vfmaddps: return asmjit::x86::Inst::kIdVfmaddps; case Mnemonic::Vfmaddsd: return asmjit::x86::Inst::kIdVfmaddsd; case Mnemonic::Vfmaddss: return asmjit::x86::Inst::kIdVfmaddss; case Mnemonic::Vfmaddsub132pd: return asmjit::x86::Inst::kIdVfmaddsub132pd; case Mnemonic::Vfmaddsub132ps: return asmjit::x86::Inst::kIdVfmaddsub132ps; case Mnemonic::Vfmaddsub213pd: return asmjit::x86::Inst::kIdVfmaddsub213pd; case Mnemonic::Vfmaddsub213ps: return asmjit::x86::Inst::kIdVfmaddsub213ps; case Mnemonic::Vfmaddsub231pd: return asmjit::x86::Inst::kIdVfmaddsub231pd; case Mnemonic::Vfmaddsub231ps: return asmjit::x86::Inst::kIdVfmaddsub231ps; case Mnemonic::Vfmaddsubpd: return asmjit::x86::Inst::kIdVfmaddsubpd; case Mnemonic::Vfmaddsubps: return asmjit::x86::Inst::kIdVfmaddsubps; case Mnemonic::Vfmsub132pd: return asmjit::x86::Inst::kIdVfmsub132pd; case Mnemonic::Vfmsub132ps: return asmjit::x86::Inst::kIdVfmsub132ps; case Mnemonic::Vfmsub132sd: return asmjit::x86::Inst::kIdVfmsub132sd; case Mnemonic::Vfmsub132ss: return asmjit::x86::Inst::kIdVfmsub132ss; case Mnemonic::Vfmsub213pd: return asmjit::x86::Inst::kIdVfmsub213pd; case Mnemonic::Vfmsub213ps: return asmjit::x86::Inst::kIdVfmsub213ps; case Mnemonic::Vfmsub213sd: return asmjit::x86::Inst::kIdVfmsub213sd; case Mnemonic::Vfmsub213ss: return asmjit::x86::Inst::kIdVfmsub213ss; case Mnemonic::Vfmsub231pd: return asmjit::x86::Inst::kIdVfmsub231pd; case Mnemonic::Vfmsub231ps: return asmjit::x86::Inst::kIdVfmsub231ps; case Mnemonic::Vfmsub231sd: return asmjit::x86::Inst::kIdVfmsub231sd; case Mnemonic::Vfmsub231ss: return asmjit::x86::Inst::kIdVfmsub231ss; case Mnemonic::Vfmsubadd132pd: return asmjit::x86::Inst::kIdVfmsubadd132pd; case Mnemonic::Vfmsubadd132ps: return asmjit::x86::Inst::kIdVfmsubadd132ps; case Mnemonic::Vfmsubadd213pd: return asmjit::x86::Inst::kIdVfmsubadd213pd; case Mnemonic::Vfmsubadd213ps: return asmjit::x86::Inst::kIdVfmsubadd213ps; case Mnemonic::Vfmsubadd231pd: return asmjit::x86::Inst::kIdVfmsubadd231pd; case Mnemonic::Vfmsubadd231ps: return asmjit::x86::Inst::kIdVfmsubadd231ps; case Mnemonic::Vfmsubaddpd: return asmjit::x86::Inst::kIdVfmsubaddpd; case Mnemonic::Vfmsubaddps: return asmjit::x86::Inst::kIdVfmsubaddps; case Mnemonic::Vfmsubpd: return asmjit::x86::Inst::kIdVfmsubpd; case Mnemonic::Vfmsubps: return asmjit::x86::Inst::kIdVfmsubps; case Mnemonic::Vfmsubsd: return asmjit::x86::Inst::kIdVfmsubsd; case Mnemonic::Vfmsubss: return asmjit::x86::Inst::kIdVfmsubss; case Mnemonic::Vfnmadd132pd: return asmjit::x86::Inst::kIdVfnmadd132pd; case Mnemonic::Vfnmadd132ps: return asmjit::x86::Inst::kIdVfnmadd132ps; case Mnemonic::Vfnmadd132sd: return asmjit::x86::Inst::kIdVfnmadd132sd; case Mnemonic::Vfnmadd132ss: return asmjit::x86::Inst::kIdVfnmadd132ss; case Mnemonic::Vfnmadd213pd: return asmjit::x86::Inst::kIdVfnmadd213pd; case Mnemonic::Vfnmadd213ps: return asmjit::x86::Inst::kIdVfnmadd213ps; case Mnemonic::Vfnmadd213sd: return asmjit::x86::Inst::kIdVfnmadd213sd; case Mnemonic::Vfnmadd213ss: return asmjit::x86::Inst::kIdVfnmadd213ss; case Mnemonic::Vfnmadd231pd: return asmjit::x86::Inst::kIdVfnmadd231pd; case Mnemonic::Vfnmadd231ps: return asmjit::x86::Inst::kIdVfnmadd231ps; case Mnemonic::Vfnmadd231sd: return asmjit::x86::Inst::kIdVfnmadd231sd; case Mnemonic::Vfnmadd231ss: return asmjit::x86::Inst::kIdVfnmadd231ss; case Mnemonic::Vfnmaddpd: return asmjit::x86::Inst::kIdVfnmaddpd; case Mnemonic::Vfnmaddps: return asmjit::x86::Inst::kIdVfnmaddps; case Mnemonic::Vfnmaddsd: return asmjit::x86::Inst::kIdVfnmaddsd; case Mnemonic::Vfnmaddss: return asmjit::x86::Inst::kIdVfnmaddss; case Mnemonic::Vfnmsub132pd: return asmjit::x86::Inst::kIdVfnmsub132pd; case Mnemonic::Vfnmsub132ps: return asmjit::x86::Inst::kIdVfnmsub132ps; case Mnemonic::Vfnmsub132sd: return asmjit::x86::Inst::kIdVfnmsub132sd; case Mnemonic::Vfnmsub132ss: return asmjit::x86::Inst::kIdVfnmsub132ss; case Mnemonic::Vfnmsub213pd: return asmjit::x86::Inst::kIdVfnmsub213pd; case Mnemonic::Vfnmsub213ps: return asmjit::x86::Inst::kIdVfnmsub213ps; case Mnemonic::Vfnmsub213sd: return asmjit::x86::Inst::kIdVfnmsub213sd; case Mnemonic::Vfnmsub213ss: return asmjit::x86::Inst::kIdVfnmsub213ss; case Mnemonic::Vfnmsub231pd: return asmjit::x86::Inst::kIdVfnmsub231pd; case Mnemonic::Vfnmsub231ps: return asmjit::x86::Inst::kIdVfnmsub231ps; case Mnemonic::Vfnmsub231sd: return asmjit::x86::Inst::kIdVfnmsub231sd; case Mnemonic::Vfnmsub231ss: return asmjit::x86::Inst::kIdVfnmsub231ss; case Mnemonic::Vfnmsubpd: return asmjit::x86::Inst::kIdVfnmsubpd; case Mnemonic::Vfnmsubps: return asmjit::x86::Inst::kIdVfnmsubps; case Mnemonic::Vfnmsubsd: return asmjit::x86::Inst::kIdVfnmsubsd; case Mnemonic::Vfnmsubss: return asmjit::x86::Inst::kIdVfnmsubss; case Mnemonic::Vfpclasspd: return asmjit::x86::Inst::kIdVfpclasspd; case Mnemonic::Vfpclassps: return asmjit::x86::Inst::kIdVfpclassps; case Mnemonic::Vfpclasssd: return asmjit::x86::Inst::kIdVfpclasssd; case Mnemonic::Vfpclassss: return asmjit::x86::Inst::kIdVfpclassss; case Mnemonic::Vfrczpd: return asmjit::x86::Inst::kIdVfrczpd; case Mnemonic::Vfrczps: return asmjit::x86::Inst::kIdVfrczps; case Mnemonic::Vfrczsd: return asmjit::x86::Inst::kIdVfrczsd; case Mnemonic::Vfrczss: return asmjit::x86::Inst::kIdVfrczss; case Mnemonic::Vgatherdpd: return asmjit::x86::Inst::kIdVgatherdpd; case Mnemonic::Vgatherdps: return asmjit::x86::Inst::kIdVgatherdps; case Mnemonic::Vgatherpf0dpd: return asmjit::x86::Inst::kIdVgatherpf0dpd; case Mnemonic::Vgatherpf0dps: return asmjit::x86::Inst::kIdVgatherpf0dps; case Mnemonic::Vgatherpf0qpd: return asmjit::x86::Inst::kIdVgatherpf0qpd; case Mnemonic::Vgatherpf0qps: return asmjit::x86::Inst::kIdVgatherpf0qps; case Mnemonic::Vgatherpf1dpd: return asmjit::x86::Inst::kIdVgatherpf1dpd; case Mnemonic::Vgatherpf1dps: return asmjit::x86::Inst::kIdVgatherpf1dps; case Mnemonic::Vgatherpf1qpd: return asmjit::x86::Inst::kIdVgatherpf1qpd; case Mnemonic::Vgatherpf1qps: return asmjit::x86::Inst::kIdVgatherpf1qps; case Mnemonic::Vgatherqpd: return asmjit::x86::Inst::kIdVgatherqpd; case Mnemonic::Vgatherqps: return asmjit::x86::Inst::kIdVgatherqps; case Mnemonic::Vgetexppd: return asmjit::x86::Inst::kIdVgetexppd; case Mnemonic::Vgetexpps: return asmjit::x86::Inst::kIdVgetexpps; case Mnemonic::Vgetexpsd: return asmjit::x86::Inst::kIdVgetexpsd; case Mnemonic::Vgetexpss: return asmjit::x86::Inst::kIdVgetexpss; case Mnemonic::Vgetmantpd: return asmjit::x86::Inst::kIdVgetmantpd; case Mnemonic::Vgetmantps: return asmjit::x86::Inst::kIdVgetmantps; case Mnemonic::Vgetmantsd: return asmjit::x86::Inst::kIdVgetmantsd; case Mnemonic::Vgetmantss: return asmjit::x86::Inst::kIdVgetmantss; case Mnemonic::Vhaddpd: return asmjit::x86::Inst::kIdVhaddpd; case Mnemonic::Vhaddps: return asmjit::x86::Inst::kIdVhaddps; case Mnemonic::Vhsubpd: return asmjit::x86::Inst::kIdVhsubpd; case Mnemonic::Vhsubps: return asmjit::x86::Inst::kIdVhsubps; case Mnemonic::Vinsertf128: return asmjit::x86::Inst::kIdVinsertf128; case Mnemonic::Vinsertf32x4: return asmjit::x86::Inst::kIdVinsertf32x4; case Mnemonic::Vinsertf32x8: return asmjit::x86::Inst::kIdVinsertf32x8; case Mnemonic::Vinsertf64x2: return asmjit::x86::Inst::kIdVinsertf64x2; case Mnemonic::Vinsertf64x4: return asmjit::x86::Inst::kIdVinsertf64x4; case Mnemonic::Vinserti128: return asmjit::x86::Inst::kIdVinserti128; case Mnemonic::Vinserti32x4: return asmjit::x86::Inst::kIdVinserti32x4; case Mnemonic::Vinserti32x8: return asmjit::x86::Inst::kIdVinserti32x8; case Mnemonic::Vinserti64x2: return asmjit::x86::Inst::kIdVinserti64x2; case Mnemonic::Vinserti64x4: return asmjit::x86::Inst::kIdVinserti64x4; case Mnemonic::Vinsertps: return asmjit::x86::Inst::kIdVinsertps; case Mnemonic::Vlddqu: return asmjit::x86::Inst::kIdVlddqu; case Mnemonic::Vldmxcsr: return asmjit::x86::Inst::kIdVldmxcsr; case Mnemonic::Vmaskmovdqu: return asmjit::x86::Inst::kIdVmaskmovdqu; case Mnemonic::Vmaskmovpd: return asmjit::x86::Inst::kIdVmaskmovpd; case Mnemonic::Vmaskmovps: return asmjit::x86::Inst::kIdVmaskmovps; case Mnemonic::Vmaxpd: return asmjit::x86::Inst::kIdVmaxpd; case Mnemonic::Vmaxps: return asmjit::x86::Inst::kIdVmaxps; case Mnemonic::Vmaxsd: return asmjit::x86::Inst::kIdVmaxsd; case Mnemonic::Vmaxss: return asmjit::x86::Inst::kIdVmaxss; case Mnemonic::Vminpd: return asmjit::x86::Inst::kIdVminpd; case Mnemonic::Vminps: return asmjit::x86::Inst::kIdVminps; case Mnemonic::Vminsd: return asmjit::x86::Inst::kIdVminsd; case Mnemonic::Vminss: return asmjit::x86::Inst::kIdVminss; case Mnemonic::Vmovapd: return asmjit::x86::Inst::kIdVmovapd; case Mnemonic::Vmovaps: return asmjit::x86::Inst::kIdVmovaps; case Mnemonic::Vmovd: return asmjit::x86::Inst::kIdVmovd; case Mnemonic::Vmovddup: return asmjit::x86::Inst::kIdVmovddup; case Mnemonic::Vmovdqa: return asmjit::x86::Inst::kIdVmovdqa; case Mnemonic::Vmovdqa32: return asmjit::x86::Inst::kIdVmovdqa32; case Mnemonic::Vmovdqa64: return asmjit::x86::Inst::kIdVmovdqa64; case Mnemonic::Vmovdqu: return asmjit::x86::Inst::kIdVmovdqu; case Mnemonic::Vmovdqu16: return asmjit::x86::Inst::kIdVmovdqu16; case Mnemonic::Vmovdqu32: return asmjit::x86::Inst::kIdVmovdqu32; case Mnemonic::Vmovdqu64: return asmjit::x86::Inst::kIdVmovdqu64; case Mnemonic::Vmovdqu8: return asmjit::x86::Inst::kIdVmovdqu8; case Mnemonic::Vmovhlps: return asmjit::x86::Inst::kIdVmovhlps; case Mnemonic::Vmovhpd: return asmjit::x86::Inst::kIdVmovhpd; case Mnemonic::Vmovhps: return asmjit::x86::Inst::kIdVmovhps; case Mnemonic::Vmovlhps: return asmjit::x86::Inst::kIdVmovlhps; case Mnemonic::Vmovlpd: return asmjit::x86::Inst::kIdVmovlpd; case Mnemonic::Vmovlps: return asmjit::x86::Inst::kIdVmovlps; case Mnemonic::Vmovmskpd: return asmjit::x86::Inst::kIdVmovmskpd; case Mnemonic::Vmovmskps: return asmjit::x86::Inst::kIdVmovmskps; case Mnemonic::Vmovntdq: return asmjit::x86::Inst::kIdVmovntdq; case Mnemonic::Vmovntdqa: return asmjit::x86::Inst::kIdVmovntdqa; case Mnemonic::Vmovntpd: return asmjit::x86::Inst::kIdVmovntpd; case Mnemonic::Vmovntps: return asmjit::x86::Inst::kIdVmovntps; case Mnemonic::Vmovq: return asmjit::x86::Inst::kIdVmovq; case Mnemonic::Vmovsd: return asmjit::x86::Inst::kIdVmovsd; case Mnemonic::Vmovshdup: return asmjit::x86::Inst::kIdVmovshdup; case Mnemonic::Vmovsldup: return asmjit::x86::Inst::kIdVmovsldup; case Mnemonic::Vmovss: return asmjit::x86::Inst::kIdVmovss; case Mnemonic::Vmovupd: return asmjit::x86::Inst::kIdVmovupd; case Mnemonic::Vmovups: return asmjit::x86::Inst::kIdVmovups; case Mnemonic::Vmpsadbw: return asmjit::x86::Inst::kIdVmpsadbw; case Mnemonic::Vmulpd: return asmjit::x86::Inst::kIdVmulpd; case Mnemonic::Vmulps: return asmjit::x86::Inst::kIdVmulps; case Mnemonic::Vmulsd: return asmjit::x86::Inst::kIdVmulsd; case Mnemonic::Vmulss: return asmjit::x86::Inst::kIdVmulss; case Mnemonic::Vorpd: return asmjit::x86::Inst::kIdVorpd; case Mnemonic::Vorps: return asmjit::x86::Inst::kIdVorps; case Mnemonic::Vp4dpwssd: return asmjit::x86::Inst::kIdVp4dpwssd; case Mnemonic::Vp4dpwssds: return asmjit::x86::Inst::kIdVp4dpwssds; case Mnemonic::Vpabsb: return asmjit::x86::Inst::kIdVpabsb; case Mnemonic::Vpabsd: return asmjit::x86::Inst::kIdVpabsd; case Mnemonic::Vpabsq: return asmjit::x86::Inst::kIdVpabsq; case Mnemonic::Vpabsw: return asmjit::x86::Inst::kIdVpabsw; case Mnemonic::Vpackssdw: return asmjit::x86::Inst::kIdVpackssdw; case Mnemonic::Vpacksswb: return asmjit::x86::Inst::kIdVpacksswb; case Mnemonic::Vpackusdw: return asmjit::x86::Inst::kIdVpackusdw; case Mnemonic::Vpackuswb: return asmjit::x86::Inst::kIdVpackuswb; case Mnemonic::Vpaddb: return asmjit::x86::Inst::kIdVpaddb; case Mnemonic::Vpaddd: return asmjit::x86::Inst::kIdVpaddd; case Mnemonic::Vpaddq: return asmjit::x86::Inst::kIdVpaddq; case Mnemonic::Vpaddsb: return asmjit::x86::Inst::kIdVpaddsb; case Mnemonic::Vpaddsw: return asmjit::x86::Inst::kIdVpaddsw; case Mnemonic::Vpaddusb: return asmjit::x86::Inst::kIdVpaddusb; case Mnemonic::Vpaddusw: return asmjit::x86::Inst::kIdVpaddusw; case Mnemonic::Vpaddw: return asmjit::x86::Inst::kIdVpaddw; case Mnemonic::Vpalignr: return asmjit::x86::Inst::kIdVpalignr; case Mnemonic::Vpand: return asmjit::x86::Inst::kIdVpand; case Mnemonic::Vpandd: return asmjit::x86::Inst::kIdVpandd; case Mnemonic::Vpandn: return asmjit::x86::Inst::kIdVpandn; case Mnemonic::Vpandnd: return asmjit::x86::Inst::kIdVpandnd; case Mnemonic::Vpandnq: return asmjit::x86::Inst::kIdVpandnq; case Mnemonic::Vpandq: return asmjit::x86::Inst::kIdVpandq; case Mnemonic::Vpavgb: return asmjit::x86::Inst::kIdVpavgb; case Mnemonic::Vpavgw: return asmjit::x86::Inst::kIdVpavgw; case Mnemonic::Vpblendd: return asmjit::x86::Inst::kIdVpblendd; case Mnemonic::Vpblendvb: return asmjit::x86::Inst::kIdVpblendvb; case Mnemonic::Vpblendw: return asmjit::x86::Inst::kIdVpblendw; case Mnemonic::Vpbroadcastb: return asmjit::x86::Inst::kIdVpbroadcastb; case Mnemonic::Vpbroadcastd: return asmjit::x86::Inst::kIdVpbroadcastd; case Mnemonic::Vpbroadcastmb2q: return asmjit::x86::Inst::kIdVpbroadcastmb2q; case Mnemonic::Vpbroadcastq: return asmjit::x86::Inst::kIdVpbroadcastq; case Mnemonic::Vpbroadcastw: return asmjit::x86::Inst::kIdVpbroadcastw; case Mnemonic::Vpclmulqdq: return asmjit::x86::Inst::kIdVpclmulqdq; case Mnemonic::Vpcmov: return asmjit::x86::Inst::kIdVpcmov; case Mnemonic::Vpcmpb: return asmjit::x86::Inst::kIdVpcmpb; case Mnemonic::Vpcmpd: return asmjit::x86::Inst::kIdVpcmpd; case Mnemonic::Vpcmpeqb: return asmjit::x86::Inst::kIdVpcmpeqb; case Mnemonic::Vpcmpeqd: return asmjit::x86::Inst::kIdVpcmpeqd; case Mnemonic::Vpcmpeqq: return asmjit::x86::Inst::kIdVpcmpeqq; case Mnemonic::Vpcmpeqw: return asmjit::x86::Inst::kIdVpcmpeqw; case Mnemonic::Vpcmpestri: return asmjit::x86::Inst::kIdVpcmpestri; case Mnemonic::Vpcmpestrm: return asmjit::x86::Inst::kIdVpcmpestrm; case Mnemonic::Vpcmpgtb: return asmjit::x86::Inst::kIdVpcmpgtb; case Mnemonic::Vpcmpgtd: return asmjit::x86::Inst::kIdVpcmpgtd; case Mnemonic::Vpcmpgtq: return asmjit::x86::Inst::kIdVpcmpgtq; case Mnemonic::Vpcmpgtw: return asmjit::x86::Inst::kIdVpcmpgtw; case Mnemonic::Vpcmpistri: return asmjit::x86::Inst::kIdVpcmpistri; case Mnemonic::Vpcmpistrm: return asmjit::x86::Inst::kIdVpcmpistrm; case Mnemonic::Vpcmpq: return asmjit::x86::Inst::kIdVpcmpq; case Mnemonic::Vpcmpub: return asmjit::x86::Inst::kIdVpcmpub; case Mnemonic::Vpcmpud: return asmjit::x86::Inst::kIdVpcmpud; case Mnemonic::Vpcmpuq: return asmjit::x86::Inst::kIdVpcmpuq; case Mnemonic::Vpcmpuw: return asmjit::x86::Inst::kIdVpcmpuw; case Mnemonic::Vpcmpw: return asmjit::x86::Inst::kIdVpcmpw; case Mnemonic::Vpcomb: return asmjit::x86::Inst::kIdVpcomb; case Mnemonic::Vpcomd: return asmjit::x86::Inst::kIdVpcomd; case Mnemonic::Vpcompressb: return asmjit::x86::Inst::kIdVpcompressb; case Mnemonic::Vpcompressd: return asmjit::x86::Inst::kIdVpcompressd; case Mnemonic::Vpcompressq: return asmjit::x86::Inst::kIdVpcompressq; case Mnemonic::Vpcompressw: return asmjit::x86::Inst::kIdVpcompressw; case Mnemonic::Vpcomq: return asmjit::x86::Inst::kIdVpcomq; case Mnemonic::Vpcomub: return asmjit::x86::Inst::kIdVpcomub; case Mnemonic::Vpcomud: return asmjit::x86::Inst::kIdVpcomud; case Mnemonic::Vpcomuq: return asmjit::x86::Inst::kIdVpcomuq; case Mnemonic::Vpcomuw: return asmjit::x86::Inst::kIdVpcomuw; case Mnemonic::Vpcomw: return asmjit::x86::Inst::kIdVpcomw; case Mnemonic::Vpconflictd: return asmjit::x86::Inst::kIdVpconflictd; case Mnemonic::Vpconflictq: return asmjit::x86::Inst::kIdVpconflictq; case Mnemonic::Vperm2f128: return asmjit::x86::Inst::kIdVperm2f128; case Mnemonic::Vperm2i128: return asmjit::x86::Inst::kIdVperm2i128; case Mnemonic::Vpermb: return asmjit::x86::Inst::kIdVpermb; case Mnemonic::Vpermd: return asmjit::x86::Inst::kIdVpermd; case Mnemonic::Vpermi2b: return asmjit::x86::Inst::kIdVpermi2b; case Mnemonic::Vpermi2d: return asmjit::x86::Inst::kIdVpermi2d; case Mnemonic::Vpermi2pd: return asmjit::x86::Inst::kIdVpermi2pd; case Mnemonic::Vpermi2ps: return asmjit::x86::Inst::kIdVpermi2ps; case Mnemonic::Vpermi2q: return asmjit::x86::Inst::kIdVpermi2q; case Mnemonic::Vpermi2w: return asmjit::x86::Inst::kIdVpermi2w; case Mnemonic::Vpermil2pd: return asmjit::x86::Inst::kIdVpermil2pd; case Mnemonic::Vpermil2ps: return asmjit::x86::Inst::kIdVpermil2ps; case Mnemonic::Vpermilpd: return asmjit::x86::Inst::kIdVpermilpd; case Mnemonic::Vpermilps: return asmjit::x86::Inst::kIdVpermilps; case Mnemonic::Vpermpd: return asmjit::x86::Inst::kIdVpermpd; case Mnemonic::Vpermps: return asmjit::x86::Inst::kIdVpermps; case Mnemonic::Vpermq: return asmjit::x86::Inst::kIdVpermq; case Mnemonic::Vpermt2b: return asmjit::x86::Inst::kIdVpermt2b; case Mnemonic::Vpermt2d: return asmjit::x86::Inst::kIdVpermt2d; case Mnemonic::Vpermt2pd: return asmjit::x86::Inst::kIdVpermt2pd; case Mnemonic::Vpermt2ps: return asmjit::x86::Inst::kIdVpermt2ps; case Mnemonic::Vpermt2q: return asmjit::x86::Inst::kIdVpermt2q; case Mnemonic::Vpermt2w: return asmjit::x86::Inst::kIdVpermt2w; case Mnemonic::Vpermw: return asmjit::x86::Inst::kIdVpermw; case Mnemonic::Vpexpandb: return asmjit::x86::Inst::kIdVpexpandb; case Mnemonic::Vpexpandd: return asmjit::x86::Inst::kIdVpexpandd; case Mnemonic::Vpexpandq: return asmjit::x86::Inst::kIdVpexpandq; case Mnemonic::Vpexpandw: return asmjit::x86::Inst::kIdVpexpandw; case Mnemonic::Vpextrb: return asmjit::x86::Inst::kIdVpextrb; case Mnemonic::Vpextrd: return asmjit::x86::Inst::kIdVpextrd; case Mnemonic::Vpextrq: return asmjit::x86::Inst::kIdVpextrq; case Mnemonic::Vpextrw: return asmjit::x86::Inst::kIdVpextrw; case Mnemonic::Vpgatherdd: return asmjit::x86::Inst::kIdVpgatherdd; case Mnemonic::Vpgatherdq: return asmjit::x86::Inst::kIdVpgatherdq; case Mnemonic::Vpgatherqd: return asmjit::x86::Inst::kIdVpgatherqd; case Mnemonic::Vpgatherqq: return asmjit::x86::Inst::kIdVpgatherqq; case Mnemonic::Vphaddbd: return asmjit::x86::Inst::kIdVphaddbd; case Mnemonic::Vphaddbq: return asmjit::x86::Inst::kIdVphaddbq; case Mnemonic::Vphaddbw: return asmjit::x86::Inst::kIdVphaddbw; case Mnemonic::Vphaddd: return asmjit::x86::Inst::kIdVphaddd; case Mnemonic::Vphadddq: return asmjit::x86::Inst::kIdVphadddq; case Mnemonic::Vphaddsw: return asmjit::x86::Inst::kIdVphaddsw; case Mnemonic::Vphaddubd: return asmjit::x86::Inst::kIdVphaddubd; case Mnemonic::Vphaddubq: return asmjit::x86::Inst::kIdVphaddubq; case Mnemonic::Vphaddubw: return asmjit::x86::Inst::kIdVphaddubw; case Mnemonic::Vphaddudq: return asmjit::x86::Inst::kIdVphaddudq; case Mnemonic::Vphadduwd: return asmjit::x86::Inst::kIdVphadduwd; case Mnemonic::Vphadduwq: return asmjit::x86::Inst::kIdVphadduwq; case Mnemonic::Vphaddw: return asmjit::x86::Inst::kIdVphaddw; case Mnemonic::Vphaddwd: return asmjit::x86::Inst::kIdVphaddwd; case Mnemonic::Vphaddwq: return asmjit::x86::Inst::kIdVphaddwq; case Mnemonic::Vphminposuw: return asmjit::x86::Inst::kIdVphminposuw; case Mnemonic::Vphsubbw: return asmjit::x86::Inst::kIdVphsubbw; case Mnemonic::Vphsubd: return asmjit::x86::Inst::kIdVphsubd; case Mnemonic::Vphsubdq: return asmjit::x86::Inst::kIdVphsubdq; case Mnemonic::Vphsubsw: return asmjit::x86::Inst::kIdVphsubsw; case Mnemonic::Vphsubw: return asmjit::x86::Inst::kIdVphsubw; case Mnemonic::Vphsubwd: return asmjit::x86::Inst::kIdVphsubwd; case Mnemonic::Vpinsrb: return asmjit::x86::Inst::kIdVpinsrb; case Mnemonic::Vpinsrd: return asmjit::x86::Inst::kIdVpinsrd; case Mnemonic::Vpinsrq: return asmjit::x86::Inst::kIdVpinsrq; case Mnemonic::Vpinsrw: return asmjit::x86::Inst::kIdVpinsrw; case Mnemonic::Vplzcntd: return asmjit::x86::Inst::kIdVplzcntd; case Mnemonic::Vplzcntq: return asmjit::x86::Inst::kIdVplzcntq; case Mnemonic::Vpmacsdd: return asmjit::x86::Inst::kIdVpmacsdd; case Mnemonic::Vpmacsdqh: return asmjit::x86::Inst::kIdVpmacsdqh; case Mnemonic::Vpmacsdql: return asmjit::x86::Inst::kIdVpmacsdql; case Mnemonic::Vpmacssdd: return asmjit::x86::Inst::kIdVpmacssdd; case Mnemonic::Vpmacssdqh: return asmjit::x86::Inst::kIdVpmacssdqh; case Mnemonic::Vpmacssdql: return asmjit::x86::Inst::kIdVpmacssdql; case Mnemonic::Vpmacsswd: return asmjit::x86::Inst::kIdVpmacsswd; case Mnemonic::Vpmacssww: return asmjit::x86::Inst::kIdVpmacssww; case Mnemonic::Vpmacswd: return asmjit::x86::Inst::kIdVpmacswd; case Mnemonic::Vpmacsww: return asmjit::x86::Inst::kIdVpmacsww; case Mnemonic::Vpmadcsswd: return asmjit::x86::Inst::kIdVpmadcsswd; case Mnemonic::Vpmadcswd: return asmjit::x86::Inst::kIdVpmadcswd; case Mnemonic::Vpmadd52huq: return asmjit::x86::Inst::kIdVpmadd52huq; case Mnemonic::Vpmadd52luq: return asmjit::x86::Inst::kIdVpmadd52luq; case Mnemonic::Vpmaddubsw: return asmjit::x86::Inst::kIdVpmaddubsw; case Mnemonic::Vpmaddwd: return asmjit::x86::Inst::kIdVpmaddwd; case Mnemonic::Vpmaskmovd: return asmjit::x86::Inst::kIdVpmaskmovd; case Mnemonic::Vpmaskmovq: return asmjit::x86::Inst::kIdVpmaskmovq; case Mnemonic::Vpmaxsb: return asmjit::x86::Inst::kIdVpmaxsb; case Mnemonic::Vpmaxsd: return asmjit::x86::Inst::kIdVpmaxsd; case Mnemonic::Vpmaxsq: return asmjit::x86::Inst::kIdVpmaxsq; case Mnemonic::Vpmaxsw: return asmjit::x86::Inst::kIdVpmaxsw; case Mnemonic::Vpmaxub: return asmjit::x86::Inst::kIdVpmaxub; case Mnemonic::Vpmaxud: return asmjit::x86::Inst::kIdVpmaxud; case Mnemonic::Vpmaxuq: return asmjit::x86::Inst::kIdVpmaxuq; case Mnemonic::Vpmaxuw: return asmjit::x86::Inst::kIdVpmaxuw; case Mnemonic::Vpminsb: return asmjit::x86::Inst::kIdVpminsb; case Mnemonic::Vpminsd: return asmjit::x86::Inst::kIdVpminsd; case Mnemonic::Vpminsq: return asmjit::x86::Inst::kIdVpminsq; case Mnemonic::Vpminsw: return asmjit::x86::Inst::kIdVpminsw; case Mnemonic::Vpminub: return asmjit::x86::Inst::kIdVpminub; case Mnemonic::Vpminud: return asmjit::x86::Inst::kIdVpminud; case Mnemonic::Vpminuq: return asmjit::x86::Inst::kIdVpminuq; case Mnemonic::Vpminuw: return asmjit::x86::Inst::kIdVpminuw; case Mnemonic::Vpmovb2m: return asmjit::x86::Inst::kIdVpmovb2m; case Mnemonic::Vpmovd2m: return asmjit::x86::Inst::kIdVpmovd2m; case Mnemonic::Vpmovdb: return asmjit::x86::Inst::kIdVpmovdb; case Mnemonic::Vpmovdw: return asmjit::x86::Inst::kIdVpmovdw; case Mnemonic::Vpmovm2b: return asmjit::x86::Inst::kIdVpmovm2b; case Mnemonic::Vpmovm2d: return asmjit::x86::Inst::kIdVpmovm2d; case Mnemonic::Vpmovm2q: return asmjit::x86::Inst::kIdVpmovm2q; case Mnemonic::Vpmovm2w: return asmjit::x86::Inst::kIdVpmovm2w; case Mnemonic::Vpmovmskb: return asmjit::x86::Inst::kIdVpmovmskb; case Mnemonic::Vpmovq2m: return asmjit::x86::Inst::kIdVpmovq2m; case Mnemonic::Vpmovqb: return asmjit::x86::Inst::kIdVpmovqb; case Mnemonic::Vpmovqd: return asmjit::x86::Inst::kIdVpmovqd; case Mnemonic::Vpmovqw: return asmjit::x86::Inst::kIdVpmovqw; case Mnemonic::Vpmovsdb: return asmjit::x86::Inst::kIdVpmovsdb; case Mnemonic::Vpmovsdw: return asmjit::x86::Inst::kIdVpmovsdw; case Mnemonic::Vpmovsqb: return asmjit::x86::Inst::kIdVpmovsqb; case Mnemonic::Vpmovsqd: return asmjit::x86::Inst::kIdVpmovsqd; case Mnemonic::Vpmovsqw: return asmjit::x86::Inst::kIdVpmovsqw; case Mnemonic::Vpmovswb: return asmjit::x86::Inst::kIdVpmovswb; case Mnemonic::Vpmovsxbd: return asmjit::x86::Inst::kIdVpmovsxbd; case Mnemonic::Vpmovsxbq: return asmjit::x86::Inst::kIdVpmovsxbq; case Mnemonic::Vpmovsxbw: return asmjit::x86::Inst::kIdVpmovsxbw; case Mnemonic::Vpmovsxdq: return asmjit::x86::Inst::kIdVpmovsxdq; case Mnemonic::Vpmovsxwd: return asmjit::x86::Inst::kIdVpmovsxwd; case Mnemonic::Vpmovsxwq: return asmjit::x86::Inst::kIdVpmovsxwq; case Mnemonic::Vpmovusdb: return asmjit::x86::Inst::kIdVpmovusdb; case Mnemonic::Vpmovusdw: return asmjit::x86::Inst::kIdVpmovusdw; case Mnemonic::Vpmovusqb: return asmjit::x86::Inst::kIdVpmovusqb; case Mnemonic::Vpmovusqd: return asmjit::x86::Inst::kIdVpmovusqd; case Mnemonic::Vpmovusqw: return asmjit::x86::Inst::kIdVpmovusqw; case Mnemonic::Vpmovuswb: return asmjit::x86::Inst::kIdVpmovuswb; case Mnemonic::Vpmovw2m: return asmjit::x86::Inst::kIdVpmovw2m; case Mnemonic::Vpmovwb: return asmjit::x86::Inst::kIdVpmovwb; case Mnemonic::Vpmovzxbd: return asmjit::x86::Inst::kIdVpmovzxbd; case Mnemonic::Vpmovzxbq: return asmjit::x86::Inst::kIdVpmovzxbq; case Mnemonic::Vpmovzxbw: return asmjit::x86::Inst::kIdVpmovzxbw; case Mnemonic::Vpmovzxdq: return asmjit::x86::Inst::kIdVpmovzxdq; case Mnemonic::Vpmovzxwd: return asmjit::x86::Inst::kIdVpmovzxwd; case Mnemonic::Vpmovzxwq: return asmjit::x86::Inst::kIdVpmovzxwq; case Mnemonic::Vpmuldq: return asmjit::x86::Inst::kIdVpmuldq; case Mnemonic::Vpmulhrsw: return asmjit::x86::Inst::kIdVpmulhrsw; case Mnemonic::Vpmulhuw: return asmjit::x86::Inst::kIdVpmulhuw; case Mnemonic::Vpmulhw: return asmjit::x86::Inst::kIdVpmulhw; case Mnemonic::Vpmulld: return asmjit::x86::Inst::kIdVpmulld; case Mnemonic::Vpmullq: return asmjit::x86::Inst::kIdVpmullq; case Mnemonic::Vpmullw: return asmjit::x86::Inst::kIdVpmullw; case Mnemonic::Vpmultishiftqb: return asmjit::x86::Inst::kIdVpmultishiftqb; case Mnemonic::Vpmuludq: return asmjit::x86::Inst::kIdVpmuludq; case Mnemonic::Vpopcntb: return asmjit::x86::Inst::kIdVpopcntb; case Mnemonic::Vpopcntd: return asmjit::x86::Inst::kIdVpopcntd; case Mnemonic::Vpopcntq: return asmjit::x86::Inst::kIdVpopcntq; case Mnemonic::Vpopcntw: return asmjit::x86::Inst::kIdVpopcntw; case Mnemonic::Vpor: return asmjit::x86::Inst::kIdVpor; case Mnemonic::Vpord: return asmjit::x86::Inst::kIdVpord; case Mnemonic::Vporq: return asmjit::x86::Inst::kIdVporq; case Mnemonic::Vpperm: return asmjit::x86::Inst::kIdVpperm; case Mnemonic::Vprold: return asmjit::x86::Inst::kIdVprold; case Mnemonic::Vprolq: return asmjit::x86::Inst::kIdVprolq; case Mnemonic::Vprolvd: return asmjit::x86::Inst::kIdVprolvd; case Mnemonic::Vprolvq: return asmjit::x86::Inst::kIdVprolvq; case Mnemonic::Vprord: return asmjit::x86::Inst::kIdVprord; case Mnemonic::Vprorq: return asmjit::x86::Inst::kIdVprorq; case Mnemonic::Vprorvd: return asmjit::x86::Inst::kIdVprorvd; case Mnemonic::Vprorvq: return asmjit::x86::Inst::kIdVprorvq; case Mnemonic::Vprotb: return asmjit::x86::Inst::kIdVprotb; case Mnemonic::Vprotd: return asmjit::x86::Inst::kIdVprotd; case Mnemonic::Vprotq: return asmjit::x86::Inst::kIdVprotq; case Mnemonic::Vprotw: return asmjit::x86::Inst::kIdVprotw; case Mnemonic::Vpsadbw: return asmjit::x86::Inst::kIdVpsadbw; case Mnemonic::Vpscatterdd: return asmjit::x86::Inst::kIdVpscatterdd; case Mnemonic::Vpscatterdq: return asmjit::x86::Inst::kIdVpscatterdq; case Mnemonic::Vpscatterqd: return asmjit::x86::Inst::kIdVpscatterqd; case Mnemonic::Vpscatterqq: return asmjit::x86::Inst::kIdVpscatterqq; case Mnemonic::Vpshab: return asmjit::x86::Inst::kIdVpshab; case Mnemonic::Vpshad: return asmjit::x86::Inst::kIdVpshad; case Mnemonic::Vpshaq: return asmjit::x86::Inst::kIdVpshaq; case Mnemonic::Vpshaw: return asmjit::x86::Inst::kIdVpshaw; case Mnemonic::Vpshlb: return asmjit::x86::Inst::kIdVpshlb; case Mnemonic::Vpshld: return asmjit::x86::Inst::kIdVpshld; case Mnemonic::Vpshldd: return asmjit::x86::Inst::kIdVpshldd; case Mnemonic::Vpshldq: return asmjit::x86::Inst::kIdVpshldq; case Mnemonic::Vpshldvd: return asmjit::x86::Inst::kIdVpshldvd; case Mnemonic::Vpshldvq: return asmjit::x86::Inst::kIdVpshldvq; case Mnemonic::Vpshldvw: return asmjit::x86::Inst::kIdVpshldvw; case Mnemonic::Vpshldw: return asmjit::x86::Inst::kIdVpshldw; case Mnemonic::Vpshlq: return asmjit::x86::Inst::kIdVpshlq; case Mnemonic::Vpshlw: return asmjit::x86::Inst::kIdVpshlw; case Mnemonic::Vpshrdd: return asmjit::x86::Inst::kIdVpshrdd; case Mnemonic::Vpshrdq: return asmjit::x86::Inst::kIdVpshrdq; case Mnemonic::Vpshrdvd: return asmjit::x86::Inst::kIdVpshrdvd; case Mnemonic::Vpshrdvq: return asmjit::x86::Inst::kIdVpshrdvq; case Mnemonic::Vpshrdvw: return asmjit::x86::Inst::kIdVpshrdvw; case Mnemonic::Vpshrdw: return asmjit::x86::Inst::kIdVpshrdw; case Mnemonic::Vpshufb: return asmjit::x86::Inst::kIdVpshufb; case Mnemonic::Vpshufbitqmb: return asmjit::x86::Inst::kIdVpshufbitqmb; case Mnemonic::Vpshufd: return asmjit::x86::Inst::kIdVpshufd; case Mnemonic::Vpshufhw: return asmjit::x86::Inst::kIdVpshufhw; case Mnemonic::Vpshuflw: return asmjit::x86::Inst::kIdVpshuflw; case Mnemonic::Vpsignb: return asmjit::x86::Inst::kIdVpsignb; case Mnemonic::Vpsignd: return asmjit::x86::Inst::kIdVpsignd; case Mnemonic::Vpsignw: return asmjit::x86::Inst::kIdVpsignw; case Mnemonic::Vpslld: return asmjit::x86::Inst::kIdVpslld; case Mnemonic::Vpslldq: return asmjit::x86::Inst::kIdVpslldq; case Mnemonic::Vpsllq: return asmjit::x86::Inst::kIdVpsllq; case Mnemonic::Vpsllvd: return asmjit::x86::Inst::kIdVpsllvd; case Mnemonic::Vpsllvq: return asmjit::x86::Inst::kIdVpsllvq; case Mnemonic::Vpsllvw: return asmjit::x86::Inst::kIdVpsllvw; case Mnemonic::Vpsllw: return asmjit::x86::Inst::kIdVpsllw; case Mnemonic::Vpsrad: return asmjit::x86::Inst::kIdVpsrad; case Mnemonic::Vpsraq: return asmjit::x86::Inst::kIdVpsraq; case Mnemonic::Vpsravd: return asmjit::x86::Inst::kIdVpsravd; case Mnemonic::Vpsravq: return asmjit::x86::Inst::kIdVpsravq; case Mnemonic::Vpsravw: return asmjit::x86::Inst::kIdVpsravw; case Mnemonic::Vpsraw: return asmjit::x86::Inst::kIdVpsraw; case Mnemonic::Vpsrld: return asmjit::x86::Inst::kIdVpsrld; case Mnemonic::Vpsrldq: return asmjit::x86::Inst::kIdVpsrldq; case Mnemonic::Vpsrlq: return asmjit::x86::Inst::kIdVpsrlq; case Mnemonic::Vpsrlvd: return asmjit::x86::Inst::kIdVpsrlvd; case Mnemonic::Vpsrlvq: return asmjit::x86::Inst::kIdVpsrlvq; case Mnemonic::Vpsrlvw: return asmjit::x86::Inst::kIdVpsrlvw; case Mnemonic::Vpsrlw: return asmjit::x86::Inst::kIdVpsrlw; case Mnemonic::Vpsubb: return asmjit::x86::Inst::kIdVpsubb; case Mnemonic::Vpsubd: return asmjit::x86::Inst::kIdVpsubd; case Mnemonic::Vpsubq: return asmjit::x86::Inst::kIdVpsubq; case Mnemonic::Vpsubsb: return asmjit::x86::Inst::kIdVpsubsb; case Mnemonic::Vpsubsw: return asmjit::x86::Inst::kIdVpsubsw; case Mnemonic::Vpsubusb: return asmjit::x86::Inst::kIdVpsubusb; case Mnemonic::Vpsubusw: return asmjit::x86::Inst::kIdVpsubusw; case Mnemonic::Vpsubw: return asmjit::x86::Inst::kIdVpsubw; case Mnemonic::Vpternlogd: return asmjit::x86::Inst::kIdVpternlogd; case Mnemonic::Vpternlogq: return asmjit::x86::Inst::kIdVpternlogq; case Mnemonic::Vptest: return asmjit::x86::Inst::kIdVptest; case Mnemonic::Vptestmb: return asmjit::x86::Inst::kIdVptestmb; case Mnemonic::Vptestmd: return asmjit::x86::Inst::kIdVptestmd; case Mnemonic::Vptestmq: return asmjit::x86::Inst::kIdVptestmq; case Mnemonic::Vptestmw: return asmjit::x86::Inst::kIdVptestmw; case Mnemonic::Vptestnmb: return asmjit::x86::Inst::kIdVptestnmb; case Mnemonic::Vptestnmd: return asmjit::x86::Inst::kIdVptestnmd; case Mnemonic::Vptestnmq: return asmjit::x86::Inst::kIdVptestnmq; case Mnemonic::Vptestnmw: return asmjit::x86::Inst::kIdVptestnmw; case Mnemonic::Vpunpckhbw: return asmjit::x86::Inst::kIdVpunpckhbw; case Mnemonic::Vpunpckhdq: return asmjit::x86::Inst::kIdVpunpckhdq; case Mnemonic::Vpunpckhqdq: return asmjit::x86::Inst::kIdVpunpckhqdq; case Mnemonic::Vpunpckhwd: return asmjit::x86::Inst::kIdVpunpckhwd; case Mnemonic::Vpunpcklbw: return asmjit::x86::Inst::kIdVpunpcklbw; case Mnemonic::Vpunpckldq: return asmjit::x86::Inst::kIdVpunpckldq; case Mnemonic::Vpunpcklqdq: return asmjit::x86::Inst::kIdVpunpcklqdq; case Mnemonic::Vpunpcklwd: return asmjit::x86::Inst::kIdVpunpcklwd; case Mnemonic::Vpxor: return asmjit::x86::Inst::kIdVpxor; case Mnemonic::Vpxord: return asmjit::x86::Inst::kIdVpxord; case Mnemonic::Vpxorq: return asmjit::x86::Inst::kIdVpxorq; case Mnemonic::Vrangepd: return asmjit::x86::Inst::kIdVrangepd; case Mnemonic::Vrangeps: return asmjit::x86::Inst::kIdVrangeps; case Mnemonic::Vrangesd: return asmjit::x86::Inst::kIdVrangesd; case Mnemonic::Vrangess: return asmjit::x86::Inst::kIdVrangess; case Mnemonic::Vrcp14pd: return asmjit::x86::Inst::kIdVrcp14pd; case Mnemonic::Vrcp14ps: return asmjit::x86::Inst::kIdVrcp14ps; case Mnemonic::Vrcp14sd: return asmjit::x86::Inst::kIdVrcp14sd; case Mnemonic::Vrcp14ss: return asmjit::x86::Inst::kIdVrcp14ss; case Mnemonic::Vrcp28pd: return asmjit::x86::Inst::kIdVrcp28pd; case Mnemonic::Vrcp28ps: return asmjit::x86::Inst::kIdVrcp28ps; case Mnemonic::Vrcp28sd: return asmjit::x86::Inst::kIdVrcp28sd; case Mnemonic::Vrcp28ss: return asmjit::x86::Inst::kIdVrcp28ss; case Mnemonic::Vrcpps: return asmjit::x86::Inst::kIdVrcpps; case Mnemonic::Vrcpss: return asmjit::x86::Inst::kIdVrcpss; case Mnemonic::Vreducepd: return asmjit::x86::Inst::kIdVreducepd; case Mnemonic::Vreduceps: return asmjit::x86::Inst::kIdVreduceps; case Mnemonic::Vreducesd: return asmjit::x86::Inst::kIdVreducesd; case Mnemonic::Vreducess: return asmjit::x86::Inst::kIdVreducess; case Mnemonic::Vrndscalepd: return asmjit::x86::Inst::kIdVrndscalepd; case Mnemonic::Vrndscaleps: return asmjit::x86::Inst::kIdVrndscaleps; case Mnemonic::Vrndscalesd: return asmjit::x86::Inst::kIdVrndscalesd; case Mnemonic::Vrndscaless: return asmjit::x86::Inst::kIdVrndscaless; case Mnemonic::Vroundpd: return asmjit::x86::Inst::kIdVroundpd; case Mnemonic::Vroundps: return asmjit::x86::Inst::kIdVroundps; case Mnemonic::Vroundsd: return asmjit::x86::Inst::kIdVroundsd; case Mnemonic::Vroundss: return asmjit::x86::Inst::kIdVroundss; case Mnemonic::Vrsqrt14pd: return asmjit::x86::Inst::kIdVrsqrt14pd; case Mnemonic::Vrsqrt14ps: return asmjit::x86::Inst::kIdVrsqrt14ps; case Mnemonic::Vrsqrt14sd: return asmjit::x86::Inst::kIdVrsqrt14sd; case Mnemonic::Vrsqrt14ss: return asmjit::x86::Inst::kIdVrsqrt14ss; case Mnemonic::Vrsqrt28pd: return asmjit::x86::Inst::kIdVrsqrt28pd; case Mnemonic::Vrsqrt28ps: return asmjit::x86::Inst::kIdVrsqrt28ps; case Mnemonic::Vrsqrt28sd: return asmjit::x86::Inst::kIdVrsqrt28sd; case Mnemonic::Vrsqrt28ss: return asmjit::x86::Inst::kIdVrsqrt28ss; case Mnemonic::Vrsqrtps: return asmjit::x86::Inst::kIdVrsqrtps; case Mnemonic::Vrsqrtss: return asmjit::x86::Inst::kIdVrsqrtss; case Mnemonic::Vscalefpd: return asmjit::x86::Inst::kIdVscalefpd; case Mnemonic::Vscalefps: return asmjit::x86::Inst::kIdVscalefps; case Mnemonic::Vscalefsd: return asmjit::x86::Inst::kIdVscalefsd; case Mnemonic::Vscalefss: return asmjit::x86::Inst::kIdVscalefss; case Mnemonic::Vscatterdpd: return asmjit::x86::Inst::kIdVscatterdpd; case Mnemonic::Vscatterdps: return asmjit::x86::Inst::kIdVscatterdps; case Mnemonic::Vscatterpf0dpd: return asmjit::x86::Inst::kIdVscatterpf0dpd; case Mnemonic::Vscatterpf0dps: return asmjit::x86::Inst::kIdVscatterpf0dps; case Mnemonic::Vscatterpf0qpd: return asmjit::x86::Inst::kIdVscatterpf0qpd; case Mnemonic::Vscatterpf0qps: return asmjit::x86::Inst::kIdVscatterpf0qps; case Mnemonic::Vscatterpf1dpd: return asmjit::x86::Inst::kIdVscatterpf1dpd; case Mnemonic::Vscatterpf1dps: return asmjit::x86::Inst::kIdVscatterpf1dps; case Mnemonic::Vscatterpf1qpd: return asmjit::x86::Inst::kIdVscatterpf1qpd; case Mnemonic::Vscatterpf1qps: return asmjit::x86::Inst::kIdVscatterpf1qps; case Mnemonic::Vscatterqpd: return asmjit::x86::Inst::kIdVscatterqpd; case Mnemonic::Vscatterqps: return asmjit::x86::Inst::kIdVscatterqps; case Mnemonic::Vshuff32x4: return asmjit::x86::Inst::kIdVshuff32x4; case Mnemonic::Vshuff64x2: return asmjit::x86::Inst::kIdVshuff64x2; case Mnemonic::Vshufi32x4: return asmjit::x86::Inst::kIdVshufi32x4; case Mnemonic::Vshufi64x2: return asmjit::x86::Inst::kIdVshufi64x2; case Mnemonic::Vshufpd: return asmjit::x86::Inst::kIdVshufpd; case Mnemonic::Vshufps: return asmjit::x86::Inst::kIdVshufps; case Mnemonic::Vsqrtpd: return asmjit::x86::Inst::kIdVsqrtpd; case Mnemonic::Vsqrtps: return asmjit::x86::Inst::kIdVsqrtps; case Mnemonic::Vsqrtsd: return asmjit::x86::Inst::kIdVsqrtsd; case Mnemonic::Vsqrtss: return asmjit::x86::Inst::kIdVsqrtss; case Mnemonic::Vstmxcsr: return asmjit::x86::Inst::kIdVstmxcsr; case Mnemonic::Vsubpd: return asmjit::x86::Inst::kIdVsubpd; case Mnemonic::Vsubps: return asmjit::x86::Inst::kIdVsubps; case Mnemonic::Vsubsd: return asmjit::x86::Inst::kIdVsubsd; case Mnemonic::Vsubss: return asmjit::x86::Inst::kIdVsubss; case Mnemonic::Vtestpd: return asmjit::x86::Inst::kIdVtestpd; case Mnemonic::Vtestps: return asmjit::x86::Inst::kIdVtestps; case Mnemonic::Vucomisd: return asmjit::x86::Inst::kIdVucomisd; case Mnemonic::Vucomiss: return asmjit::x86::Inst::kIdVucomiss; case Mnemonic::Vunpckhpd: return asmjit::x86::Inst::kIdVunpckhpd; case Mnemonic::Vunpckhps: return asmjit::x86::Inst::kIdVunpckhps; case Mnemonic::Vunpcklpd: return asmjit::x86::Inst::kIdVunpcklpd; case Mnemonic::Vunpcklps: return asmjit::x86::Inst::kIdVunpcklps; case Mnemonic::Vxorpd: return asmjit::x86::Inst::kIdVxorpd; case Mnemonic::Vxorps: return asmjit::x86::Inst::kIdVxorps; case Mnemonic::Vzeroall: return asmjit::x86::Inst::kIdVzeroall; case Mnemonic::Vzeroupper: return asmjit::x86::Inst::kIdVzeroupper; case Mnemonic::Wbinvd: return asmjit::x86::Inst::kIdWbinvd; case Mnemonic::Wrfsbase: return asmjit::x86::Inst::kIdWrfsbase; case Mnemonic::Wrgsbase: return asmjit::x86::Inst::kIdWrgsbase; case Mnemonic::Wrmsr: return asmjit::x86::Inst::kIdWrmsr; case Mnemonic::Xabort: return asmjit::x86::Inst::kIdXabort; case Mnemonic::Xadd: return asmjit::x86::Inst::kIdXadd; case Mnemonic::Xbegin: return asmjit::x86::Inst::kIdXbegin; case Mnemonic::Xchg: return asmjit::x86::Inst::kIdXchg; case Mnemonic::Xend: return asmjit::x86::Inst::kIdXend; case Mnemonic::Xgetbv: return asmjit::x86::Inst::kIdXgetbv; case Mnemonic::Xlat: return asmjit::x86::Inst::kIdXlatb; case Mnemonic::Xor: return asmjit::x86::Inst::kIdXor; case Mnemonic::Xorpd: return asmjit::x86::Inst::kIdXorpd; case Mnemonic::Xorps: return asmjit::x86::Inst::kIdXorps; case Mnemonic::Xrstor: return asmjit::x86::Inst::kIdXrstor; case Mnemonic::Xrstor64: return asmjit::x86::Inst::kIdXrstor64; case Mnemonic::Xrstors: return asmjit::x86::Inst::kIdXrstors; case Mnemonic::Xrstors64: return asmjit::x86::Inst::kIdXrstors64; case Mnemonic::Xsave: return asmjit::x86::Inst::kIdXsave; case Mnemonic::Xsave64: return asmjit::x86::Inst::kIdXsave64; case Mnemonic::Xsavec: return asmjit::x86::Inst::kIdXsavec; case Mnemonic::Xsavec64: return asmjit::x86::Inst::kIdXsavec64; case Mnemonic::Xsaveopt: return asmjit::x86::Inst::kIdXsaveopt; case Mnemonic::Xsaveopt64: return asmjit::x86::Inst::kIdXsaveopt64; case Mnemonic::Xsaves: return asmjit::x86::Inst::kIdXsaves; case Mnemonic::Xsaves64: return asmjit::x86::Inst::kIdXsaves64; case Mnemonic::Xsetbv: return asmjit::x86::Inst::kIdXsetbv; case Mnemonic::Xtest: return asmjit::x86::Inst::kIdXtest; case Mnemonic::Jrcxz: return asmjit::x86::Inst::kIdJecxz; case Mnemonic::Jcxz: return asmjit::x86::Inst::kIdJecxz; case Mnemonic::Stgi: return asmjit::x86::Inst::kIdStgi; default: __debugbreak(); } return 0; } inline asmjit::x86::Reg convertRegister(const Register reg) { switch (reg) { case Register::None: return {}; // Segment registers case Register::Es: return asmjit::x86::es; case Register::Cs: return asmjit::x86::cs; case Register::Ss: return asmjit::x86::ss; case Register::Ds: return asmjit::x86::ds; case Register::Fs: return asmjit::x86::fs; case Register::Gs: return asmjit::x86::gs; // General purpose registers 8-bit case Register::Al: return asmjit::x86::al; case Register::Cl: return asmjit::x86::cl; case Register::Dl: return asmjit::x86::dl; case Register::Bl: return asmjit::x86::bl; case Register::Ah: return asmjit::x86::ah; case Register::Ch: return asmjit::x86::ch; case Register::Dh: return asmjit::x86::dh; case Register::Bh: return asmjit::x86::bh; case Register::Spl: return asmjit::x86::spl; case Register::Bpl: return asmjit::x86::bpl; case Register::Sil: return asmjit::x86::sil; case Register::Dil: return asmjit::x86::dil; case Register::R8b: return asmjit::x86::r8b; case Register::R9b: return asmjit::x86::r9b; case Register::R10b: return asmjit::x86::r10b; case Register::R11b: return asmjit::x86::r11b; case Register::R12b: return asmjit::x86::r12b; case Register::R13b: return asmjit::x86::r13b; case Register::R14b: return asmjit::x86::r14b; case Register::R15b: return asmjit::x86::r15b; // General purpose registers 16-bit case Register::Ax: return asmjit::x86::ax; case Register::Cx: return asmjit::x86::cx; case Register::Dx: return asmjit::x86::dx; case Register::Bx: return asmjit::x86::bx; case Register::Sp: return asmjit::x86::sp; case Register::Bp: return asmjit::x86::bp; case Register::Si: return asmjit::x86::si; case Register::Di: return asmjit::x86::di; case Register::R8w: return asmjit::x86::r8w; case Register::R9w: return asmjit::x86::r9w; case Register::R10w: return asmjit::x86::r10w; case Register::R11w: return asmjit::x86::r11w; case Register::R12w: return asmjit::x86::r12w; case Register::R13w: return asmjit::x86::r13w; case Register::R14w: return asmjit::x86::r14w; case Register::R15w: return asmjit::x86::r15w; // General purpose registers 32-bit case Register::Eax: return asmjit::x86::eax; case Register::Ecx: return asmjit::x86::ecx; case Register::Edx: return asmjit::x86::edx; case Register::Ebx: return asmjit::x86::ebx; case Register::Esp: return asmjit::x86::esp; case Register::Ebp: return asmjit::x86::ebp; case Register::Esi: return asmjit::x86::esi; case Register::Edi: return asmjit::x86::edi; case Register::R8d: return asmjit::x86::r8d; case Register::R9d: return asmjit::x86::r9d; case Register::R10d: return asmjit::x86::r10d; case Register::R11d: return asmjit::x86::r11d; case Register::R12d: return asmjit::x86::r12d; case Register::R13d: return asmjit::x86::r13d; case Register::R14d: return asmjit::x86::r14d; case Register::R15d: return asmjit::x86::r15d; // General purpose registers 64-bit case Register::Rax: return asmjit::x86::rax; case Register::Rcx: return asmjit::x86::rcx; case Register::Rdx: return asmjit::x86::rdx; case Register::Rbx: return asmjit::x86::rbx; case Register::Rsp: return asmjit::x86::rsp; case Register::Rbp: return asmjit::x86::rbp; case Register::Rsi: return asmjit::x86::rsi; case Register::Rdi: return asmjit::x86::rdi; case Register::R8: return asmjit::x86::r8; case Register::R9: return asmjit::x86::r9; case Register::R10: return asmjit::x86::r10; case Register::R11: return asmjit::x86::r11; case Register::R12: return asmjit::x86::r12; case Register::R13: return asmjit::x86::r13; case Register::R14: return asmjit::x86::r14; case Register::R15: return asmjit::x86::r15; // Floating point legacy registers case Register::St0: return asmjit::x86::st0; case Register::St1: return asmjit::x86::st1; case Register::St2: return asmjit::x86::st2; case Register::St3: return asmjit::x86::st3; case Register::St4: return asmjit::x86::st4; case Register::St5: return asmjit::x86::st5; case Register::St6: return asmjit::x86::st6; case Register::St7: return asmjit::x86::st7; // Floating point multimedia registers case Register::Mm0: return asmjit::x86::mm0; case Register::Mm1: return asmjit::x86::mm1; case Register::Mm2: return asmjit::x86::mm2; case Register::Mm3: return asmjit::x86::mm3; case Register::Mm4: return asmjit::x86::mm4; case Register::Mm5: return asmjit::x86::mm5; case Register::Mm6: return asmjit::x86::mm6; case Register::Mm7: return asmjit::x86::mm7; // Floating point vector registers 128-bit case Register::Xmm0: return asmjit::x86::xmm0; case Register::Xmm1: return asmjit::x86::xmm1; case Register::Xmm2: return asmjit::x86::xmm2; case Register::Xmm3: return asmjit::x86::xmm3; case Register::Xmm4: return asmjit::x86::xmm4; case Register::Xmm5: return asmjit::x86::xmm5; case Register::Xmm6: return asmjit::x86::xmm6; case Register::Xmm7: return asmjit::x86::xmm7; case Register::Xmm8: return asmjit::x86::xmm8; case Register::Xmm9: return asmjit::x86::xmm9; case Register::Xmm10: return asmjit::x86::xmm10; case Register::Xmm11: return asmjit::x86::xmm11; case Register::Xmm12: return asmjit::x86::xmm12; case Register::Xmm13: return asmjit::x86::xmm13; case Register::Xmm14: return asmjit::x86::xmm14; case Register::Xmm15: return asmjit::x86::xmm15; case Register::Xmm16: return asmjit::x86::xmm16; case Register::Xmm17: return asmjit::x86::xmm17; case Register::Xmm18: return asmjit::x86::xmm18; case Register::Xmm19: return asmjit::x86::xmm19; case Register::Xmm20: return asmjit::x86::xmm20; case Register::Xmm21: return asmjit::x86::xmm21; case Register::Xmm22: return asmjit::x86::xmm22; case Register::Xmm23: return asmjit::x86::xmm23; case Register::Xmm24: return asmjit::x86::xmm24; case Register::Xmm25: return asmjit::x86::xmm25; case Register::Xmm26: return asmjit::x86::xmm26; case Register::Xmm27: return asmjit::x86::xmm27; case Register::Xmm28: return asmjit::x86::xmm28; case Register::Xmm29: return asmjit::x86::xmm29; case Register::Xmm30: return asmjit::x86::xmm30; case Register::Xmm31: return asmjit::x86::xmm31; // Floating point vector registers 256-bit case Register::Ymm0: return asmjit::x86::ymm0; case Register::Ymm1: return asmjit::x86::ymm1; case Register::Ymm2: return asmjit::x86::ymm2; case Register::Ymm3: return asmjit::x86::ymm3; case Register::Ymm4: return asmjit::x86::ymm4; case Register::Ymm5: return asmjit::x86::ymm5; case Register::Ymm6: return asmjit::x86::ymm6; case Register::Ymm7: return asmjit::x86::ymm7; case Register::Ymm8: return asmjit::x86::ymm8; case Register::Ymm9: return asmjit::x86::ymm9; case Register::Ymm10: return asmjit::x86::ymm10; case Register::Ymm11: return asmjit::x86::ymm11; case Register::Ymm12: return asmjit::x86::ymm12; case Register::Ymm13: return asmjit::x86::ymm13; case Register::Ymm14: return asmjit::x86::ymm14; case Register::Ymm15: return asmjit::x86::ymm15; case Register::Ymm16: return asmjit::x86::ymm16; case Register::Ymm17: return asmjit::x86::ymm17; case Register::Ymm18: return asmjit::x86::ymm18; case Register::Ymm19: return asmjit::x86::ymm19; case Register::Ymm20: return asmjit::x86::ymm20; case Register::Ymm21: return asmjit::x86::ymm21; case Register::Ymm22: return asmjit::x86::ymm22; case Register::Ymm23: return asmjit::x86::ymm23; case Register::Ymm24: return asmjit::x86::ymm24; case Register::Ymm25: return asmjit::x86::ymm25; case Register::Ymm26: return asmjit::x86::ymm26; case Register::Ymm27: return asmjit::x86::ymm27; case Register::Ymm28: return asmjit::x86::ymm28; case Register::Ymm29: return asmjit::x86::ymm29; case Register::Ymm30: return asmjit::x86::ymm30; case Register::Ymm31: return asmjit::x86::ymm31; // Floating point vector registers 512-bit case Register::Zmm0: return asmjit::x86::zmm0; case Register::Zmm1: return asmjit::x86::zmm1; case Register::Zmm2: return asmjit::x86::zmm2; case Register::Zmm3: return asmjit::x86::zmm3; case Register::Zmm4: return asmjit::x86::zmm4; case Register::Zmm5: return asmjit::x86::zmm5; case Register::Zmm6: return asmjit::x86::zmm6; case Register::Zmm7: return asmjit::x86::zmm7; case Register::Zmm8: return asmjit::x86::zmm8; case Register::Zmm9: return asmjit::x86::zmm9; case Register::Zmm10: return asmjit::x86::zmm10; case Register::Zmm11: return asmjit::x86::zmm11; case Register::Zmm12: return asmjit::x86::zmm12; case Register::Zmm13: return asmjit::x86::zmm13; case Register::Zmm14: return asmjit::x86::zmm14; case Register::Zmm15: return asmjit::x86::zmm15; case Register::Zmm16: return asmjit::x86::zmm16; case Register::Zmm17: return asmjit::x86::zmm17; case Register::Zmm18: return asmjit::x86::zmm18; case Register::Zmm19: return asmjit::x86::zmm19; case Register::Zmm20: return asmjit::x86::zmm20; case Register::Zmm21: return asmjit::x86::zmm21; case Register::Zmm22: return asmjit::x86::zmm22; case Register::Zmm23: return asmjit::x86::zmm23; case Register::Zmm24: return asmjit::x86::zmm24; case Register::Zmm25: return asmjit::x86::zmm25; case Register::Zmm26: return asmjit::x86::zmm26; case Register::Zmm27: return asmjit::x86::zmm27; case Register::Zmm28: return asmjit::x86::zmm28; case Register::Zmm29: return asmjit::x86::zmm29; case Register::Zmm30: return asmjit::x86::zmm30; case Register::Zmm31: return asmjit::x86::zmm31; case Register::Cr0: return asmjit::x86::cr0; case Register::Cr1: return asmjit::x86::cr1; case Register::Cr2: return asmjit::x86::cr2; case Register::Cr3: return asmjit::x86::cr3; case Register::Cr4: return asmjit::x86::cr4; case Register::Cr5: return asmjit::x86::cr5; case Register::Cr6: return asmjit::x86::cr6; case Register::Cr7: return asmjit::x86::cr7; case Register::Cr8: return asmjit::x86::cr8; case Register::Cr9: return asmjit::x86::cr9; case Register::Cr10: return asmjit::x86::cr10; case Register::Cr11: return asmjit::x86::cr11; case Register::Cr12: return asmjit::x86::cr12; case Register::Cr13: return asmjit::x86::cr13; case Register::Cr14: return asmjit::x86::cr14; case Register::Cr15: return asmjit::x86::cr15; case Register::Idtr: // Not mappable. return {}; // NOTE: AsmJit might handle this differently. case Register::Ip: case Register::Eip: case Register::Rip: return asmjit::x86::rip; case Register::Tr: case Register::Tr0: case Register::Tr1: case Register::Tr2: case Register::Tr3: case Register::Tr4: case Register::Tr5: case Register::Tr6: case Register::Tr7: return {}; } // Bug? __debugbreak(); return asmjit::x86::Reg(); } }
39.986145
63
0.580335
[ "vector" ]
0f96c3b9bf20f657d00c48c3ad27e0c4d11b8083
4,252
cpp
C++
inference-engine/tests_deprecated/unit/engines/vpu/get_vpu_scale_from_ir_tests.cpp
evgenytalanin-intel/openvino
c3aa866a3318fe9fa8c7ebd3bd333b075bb1cc36
[ "Apache-2.0" ]
null
null
null
inference-engine/tests_deprecated/unit/engines/vpu/get_vpu_scale_from_ir_tests.cpp
evgenytalanin-intel/openvino
c3aa866a3318fe9fa8c7ebd3bd333b075bb1cc36
[ "Apache-2.0" ]
4
2021-04-01T08:29:48.000Z
2021-08-30T16:12:52.000Z
inference-engine/tests_deprecated/unit/engines/vpu/get_vpu_scale_from_ir_tests.cpp
evgenytalanin-intel/openvino
c3aa866a3318fe9fa8c7ebd3bd333b075bb1cc36
[ "Apache-2.0" ]
3
2021-03-09T08:27:29.000Z
2021-04-07T04:58:54.000Z
// Copyright (C) 2018-2020 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include "graph_transformer_tests.hpp" #include "tests_vpu_common.hpp" #include <cpp/ie_executable_network.hpp> #include <limits> using namespace vpu; using namespace InferenceEngine; using VPU_AddVpuScaleTest = GraphTransformerTest; // TEST_F(VPU_AddVpuScaleTest, CanAddVpuScaleToNetwork) { // InitCompileEnv(); // auto& env = CompileEnv::get(); // CompilationConfig config{}; // config.irWithVpuScalesDir = "/"; // env.updateConfig(config); // Builder::Network builder("network"); // Builder::FullyConnectedLayer fcBuilder("FullyConnected"); // fcBuilder.setOutputNum(1024 * 1); // SizeVector inputDims = {1, 2, 16, 16}; // idx_t layerId = builder.addLayer(Builder::InputLayer("input").setPort(Port(inputDims))); // Blob::Ptr blob = make_shared_blob<ie_fp16>(TensorDesc(Precision::FP16, {1024, 2, 16, 16}, Layout::OIHW)); // blob->allocate(); // idx_t weightsId = builder.addLayer(Builder::ConstLayer("weights").setData(blob)); // layerId = builder.addLayer({{layerId}, {weightsId}}, fcBuilder); // builder.addLayer({PortInfo(layerId)}, Builder::OutputLayer("output")); // auto network = Builder::convertToICNNNetwork(builder.build()); // CNNLayerPtr layer; // network->getLayerByName("FullyConnected", layer, nullptr); // EXPECT_EQ(layer->params.find("vpu_scale"), layer->params.end()); // auto model = frontEnd->buildInitialModel(*network); // auto middleEnd = passManager->buildMiddleEnd(); // middleEnd->run(model); // EXPECT_NE(layer->params.find("vpu_scale"), layer->params.end()); // } // TEST_F(VPU_AddVpuScaleTest, VpuScaleFromIrChangesWeights) { // InitCompileEnv(); // const auto& env = CompileEnv::get(); // CompilationConfig config{}; // config.irWithVpuScalesDir = "/"; // env.updateConfig(config); // Builder::Network netBuilder("network"); // Blob::Ptr weightsBlob = make_shared_blob<ie_fp16>(TensorDesc(Precision::FP16, {1, 1, 1, 1}, Layout::NCHW)); // weightsBlob->allocate(); // auto buf = weightsBlob->buffer().as<ie_fp16*>(); // for (size_t i = 0; i < weightsBlob->size(); ++i) { // buf[i] = PrecisionUtils::f32tof16(1.f); // } // idx_t layerId = netBuilder.addLayer(Builder::InputLayer("input").setPort(Port({1, 1, 1, 1}))); // size_t weightsId = netBuilder.addLayer(Builder::ConstLayer("weights").setData(weightsBlob)); // const auto convBuilder = Builder::ConvolutionLayer("Convolution").setStrides({1, 1}).setKernel({1, 1}) // .setOutDepth(1).setInputPort(Port({1, 1, 1, 1})); // layerId = netBuilder.addLayer({{layerId}, {weightsId}}, convBuilder); // netBuilder.addLayer({PortInfo(layerId)}, Builder::OutputLayer("output")); // auto network = Builder::convertToICNNNetwork(netBuilder.build()); // CNNLayerPtr layer; // network->getLayerByName("Convolution", layer, nullptr); // auto model = frontEnd->buildInitialModel(*network); // auto middleEnd = passManager->buildMiddleEnd(); // auto checkWeightWasChanged = [this, network, layer](const float scale) { // layer->params["vpu_scale"] = toString(scale); // auto model = frontEnd->buildInitialModel(*network); // auto middleEnd = passManager->buildMiddleEnd(); // middleEnd->run(model); // for (const auto& stage : model->getStages()) { // if (stage->name() == "Convolution") { // auto content = stage->input(1)->content()->get<ie_fp16>(); // EXPECT_EQ(scale, PrecisionUtils::f16tof32(content[0])); // } // } // }; // checkWeightWasChanged(32); // checkWeightWasChanged(64); // const auto maxVal = std::numeric_limits<float>::infinity(); // layer->params["vpu_scale"] = toString(maxVal); // model = frontEnd->buildInitialModel(*network); // middleEnd = passManager->buildMiddleEnd(); // middleEnd->run(model); // for (const auto& stage : model->getStages()) { // if (stage->name() == "Convolution") { // EXPECT_EQ(stage->attrs().get<float>("scaleFactor"), maxVal); // } // } // }
36.033898
114
0.640169
[ "model" ]
0f9bb447073c68f4ed9575f8a720730cec93919c
16,002
cc
C++
test/integration/physics_msgs_inertia.cc
SamFerwerda/Gazebo10-commits
b33ac5982fb75cac894fa145f7268146d44e0724
[ "ECL-2.0", "Apache-2.0" ]
3
2018-07-17T00:17:13.000Z
2020-05-26T08:39:25.000Z
test/integration/physics_msgs_inertia.cc
SamFerwerda/Gazebo10-commits
b33ac5982fb75cac894fa145f7268146d44e0724
[ "ECL-2.0", "Apache-2.0" ]
1
2020-06-04T10:26:04.000Z
2020-06-04T10:26:04.000Z
test/integration/physics_msgs_inertia.cc
SamFerwerda/Gazebo10-commits
b33ac5982fb75cac894fa145f7268146d44e0724
[ "ECL-2.0", "Apache-2.0" ]
2
2016-04-25T22:05:09.000Z
2020-03-08T08:45:12.000Z
/* * Copyright (C) 2015 Open Source Robotics Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include <string> #include "gazebo/msgs/msgs.hh" #include "gazebo/physics/physics.hh" #include "gazebo/transport/transport.hh" #include "gazebo/test/ServerFixture.hh" #include "gazebo/test/helper_physics_generator.hh" using namespace gazebo; class InertiaMsgsTest : public ServerFixture, public testing::WithParamInterface<const char*> { /// \brief Set inertia parameters over ~/model/modify /// and verify that Inertial accessors register the change. /// \param[in] _physicsEngine Type of physics engine to use. public: void InertialAccessors(const std::string &_physicsEngine); /// \brief Set center of mass of link over ~/model/modify /// and verify that it causes a seesaw to unbalance. /// \param[in] _physicsEngine Type of physics engine to use. public: void SetCoG(const std::string &_physicsEngine); /// \brief Set mass of link over ~/model/modify /// and verify that it causes a seesaw to unbalance. /// \param[in] _physicsEngine Type of physics engine to use. public: void SetMass(const std::string &_physicsEngine); /// \brief Set moment of inertia of pendulums over ~/model/modify /// and verify that it changes frequency of oscillation. /// \param[in] _physicsEngine Type of physics engine to use. public: void SetPendulumInertia(const std::string &_physicsEngine); }; ///////////////////////////////////////////////// void InertiaMsgsTest::InertialAccessors(const std::string &_physicsEngine) { this->Load("worlds/seesaw.world", true, _physicsEngine); physics::WorldPtr world = physics::get_world("default"); ASSERT_TRUE(world != NULL); const std::string modelName("cube1"); auto model = world->ModelByName(modelName); ASSERT_TRUE(model != NULL); auto link = model->GetLink(); ASSERT_TRUE(link != NULL); auto inertial = link->GetInertial(); ASSERT_TRUE(inertial != NULL); const double mass = inertial->Mass(); const ignition::math::Vector3d cog = inertial->CoG(); const ignition::math::Vector3d Ixxyyzz = inertial->PrincipalMoments(); const ignition::math::Vector3d Ixyxzyz = inertial->ProductsOfInertia(); EXPECT_DOUBLE_EQ(mass, 45.56250000000001); EXPECT_EQ(cog, ignition::math::Vector3d::Zero); EXPECT_EQ(Ixxyyzz, 1.537734375*ignition::math::Vector3d::One); EXPECT_EQ(Ixyxzyz, ignition::math::Vector3d::Zero); // new inertial values msgs::Model msg; msg.set_name(modelName); msg.add_link(); auto msgLink = msg.mutable_link(0); msgLink->set_name("link"); msgLink->set_id(link->GetId()); auto msgInertial = msgLink->mutable_inertial(); msgInertial->set_mass(99.9); msgInertial->set_ixx(12.3); msgInertial->set_ixy(0.123); msgInertial->set_ixz(0.456); msgInertial->set_iyy(13.4); msgInertial->set_iyz(0.789); msgInertial->set_izz(15.6); const ignition::math::Vector3d newCog(1.1, -2.2, 3.3); msgs::Set(msgInertial->mutable_pose(), ignition::math::Pose3d( newCog, ignition::math::Quaterniond())); // Set inertial properties by publishing to "~/model/modify" transport::PublisherPtr modelPub = this->node->Advertise<msgs::Model>("~/model/modify"); modelPub->WaitForConnection(); modelPub->Publish(msg, true); while (newCog != inertial->CoG()) { world->Step(1); common::Time::MSleep(1); modelPub->Publish(msg, true); } EXPECT_DOUBLE_EQ(inertial->Mass(), msgInertial->mass()); EXPECT_EQ(inertial->CoG(), newCog); EXPECT_EQ(inertial->PrincipalMoments(), ignition::math::Vector3d( msgInertial->ixx(), msgInertial->iyy(), msgInertial->izz())); EXPECT_EQ(inertial->ProductsOfInertia(), ignition::math::Vector3d( msgInertial->ixy(), msgInertial->ixz(), msgInertial->iyz())); } ///////////////////////////////////////////////// TEST_P(InertiaMsgsTest, InertialAccessors) { InertialAccessors(GetParam()); } ///////////////////////////////////////////////// void InertiaMsgsTest::SetCoG(const std::string &_physicsEngine) { this->Load("worlds/seesaw.world", true, _physicsEngine); physics::WorldPtr world = physics::get_world("default"); ASSERT_TRUE(world != NULL); // check the gravity vector physics::PhysicsEnginePtr physics = world->Physics(); ASSERT_TRUE(physics != NULL); EXPECT_EQ(physics->GetType(), _physicsEngine); auto g = world->Gravity(); EXPECT_EQ(g, ignition::math::Vector3d(0, 0, -9.8)); const std::string modelName("plank"); auto model = world->ModelByName(modelName); ASSERT_TRUE(model != NULL); auto link = model->GetLink(); ASSERT_TRUE(link != NULL); auto inertial = link->GetInertial(); ASSERT_TRUE(inertial != NULL); const double mass = inertial->Mass(); const ignition::math::Vector3d cog = inertial->CoG(); const ignition::math::Vector3d Ixxyyzz = inertial->PrincipalMoments(); const ignition::math::Vector3d Ixyxzyz = inertial->ProductsOfInertia(); EXPECT_DOUBLE_EQ(mass, 120); EXPECT_EQ(cog, ignition::math::Vector3d::Zero); EXPECT_EQ(Ixxyyzz, ignition::math::Vector3d(2.564, 360.064, 362.5)); EXPECT_EQ(Ixyxzyz, ignition::math::Vector3d::Zero); // new center of mass msgs::Model msg; msg.set_name(modelName); msg.add_link(); auto msgLink = msg.mutable_link(0); msgLink->set_name("link"); msgLink->set_id(link->GetId()); auto msgInertial = msgLink->mutable_inertial(); const ignition::math::Vector3d newCoG(2.5, 0, 0); msgs::Set(msgInertial->mutable_pose(), ignition::math::Pose3d( newCoG, ignition::math::Quaterniond())); // Set inertial properties by publishing to "~/model/modify" transport::PublisherPtr modelPub = this->node->Advertise<msgs::Model>("~/model/modify"); modelPub->WaitForConnection(); modelPub->Publish(msg, true); while (newCoG != inertial->CoG()) { world->Step(1); common::Time::MSleep(1); modelPub->Publish(msg, true); } EXPECT_EQ(inertial->CoG(), newCoG); world->Step(1000); EXPECT_GT(model->WorldPose().Rot().Euler().Y(), 0.25); } ///////////////////////////////////////////////// TEST_P(InertiaMsgsTest, SetCoG) { std::string physicsEngine = GetParam(); if (physicsEngine == "bullet" || physicsEngine == "simbody") { gzerr << physicsEngine << " doesn't yet support dynamically changing a link's center of mass" << std::endl; return; } SetCoG(GetParam()); } ///////////////////////////////////////////////// void InertiaMsgsTest::SetMass(const std::string &_physicsEngine) { this->Load("worlds/seesaw.world", true, _physicsEngine); physics::WorldPtr world = physics::get_world("default"); ASSERT_TRUE(world != NULL); // check the gravity vector physics::PhysicsEnginePtr physics = world->Physics(); ASSERT_TRUE(physics != NULL); EXPECT_EQ(physics->GetType(), _physicsEngine); auto g = world->Gravity(); EXPECT_EQ(g, ignition::math::Vector3d(0, 0, -9.8)); const std::string modelName("cube1"); auto model = world->ModelByName(modelName); ASSERT_TRUE(model != NULL); auto link = model->GetLink(); ASSERT_TRUE(link != NULL); auto inertial = link->GetInertial(); ASSERT_TRUE(inertial != NULL); const double mass = inertial->Mass(); const ignition::math::Vector3d cog = inertial->CoG(); const ignition::math::Vector3d Ixxyyzz = inertial->PrincipalMoments(); const ignition::math::Vector3d Ixyxzyz = inertial->ProductsOfInertia(); EXPECT_DOUBLE_EQ(mass, 45.56250000000001); EXPECT_EQ(cog, ignition::math::Vector3d::Zero); EXPECT_EQ(Ixxyyzz, 1.537734375*ignition::math::Vector3d::One); EXPECT_EQ(Ixyxzyz, ignition::math::Vector3d::Zero); // new inertial values msgs::Model msg; msg.set_name(modelName); msg.add_link(); auto msgLink = msg.mutable_link(0); msgLink->set_name("link"); msgLink->set_id(link->GetId()); auto msgInertial = msgLink->mutable_inertial(); const double newMass = 500; msgInertial->set_mass(newMass); // Set inertial properties by publishing to "~/model/modify" transport::PublisherPtr modelPub = this->node->Advertise<msgs::Model>("~/model/modify"); modelPub->WaitForConnection(); modelPub->Publish(msg, true); while (!ignition::math::equal(newMass, inertial->Mass())) { world->Step(1); common::Time::MSleep(1); modelPub->Publish(msg, true); } EXPECT_DOUBLE_EQ(inertial->Mass(), msgInertial->mass()); world->Step(1000); EXPECT_LT(model->WorldPose().Pos().Z(), 0.40); } ///////////////////////////////////////////////// TEST_P(InertiaMsgsTest, SetMass) { std::string physicsEngine = GetParam(); if (physicsEngine == "simbody") { gzerr << physicsEngine << " doesn't yet support dynamically changing a link's mass" << std::endl; return; } SetMass(physicsEngine); } ///////////////////////////////////////////////// void InertiaMsgsTest::SetPendulumInertia(const std::string &_physicsEngine) { this->Load("worlds/pendulum_axes.world", true, _physicsEngine); physics::WorldPtr world = physics::get_world("default"); ASSERT_TRUE(world != NULL); // check the gravity vector physics::PhysicsEnginePtr physics = world->Physics(); ASSERT_TRUE(physics != NULL); EXPECT_EQ(physics->GetType(), _physicsEngine); auto g = world->Gravity(); EXPECT_EQ(g, ignition::math::Vector3d(0, 0, -9.8)); double dt = physics->GetMaxStepSize(); EXPECT_NEAR(dt, 1e-3, 1e-6); std::vector<std::string> modelNames; for (auto const &model : world->Models()) { std::string name = model->GetName(); if (name.compare(0, 9, "pendulum_") == 0) { modelNames.push_back(name); } } ASSERT_EQ(modelNames.size(), 6u); std::vector<physics::ModelPtr> models; std::vector<physics::JointPtr> joints; std::vector<physics::LinkPtr> links; std::vector<double> pendulumLengths; std::vector<double> initialAngles; std::vector<double> cycleAngles; std::vector<int> cycleCount; for (auto const &modelName : modelNames) { gzdbg << "Initializing model " << modelName << std::endl; auto model = world->ModelByName(modelName); ASSERT_TRUE(model != NULL); models.push_back(model); auto link = model->GetLink(); ASSERT_TRUE(link != NULL); links.push_back(link); auto joint = model->GetJoint("joint"); ASSERT_TRUE(joint != NULL); joints.push_back(joint); // Compute distance from cg to joint anchor auto linkPose = link->WorldCoGPose(); auto jointPose = joint->WorldPose(); auto jointToCoG = linkPose.Pos() - jointPose.Pos(); double length = jointToCoG.Length(); EXPECT_NEAR(length, 0.05, 1e-6); pendulumLengths.push_back(length); double angle = asin(jointToCoG.Cross(g).Dot( joint->GlobalAxis(0)) / length / 9.8); EXPECT_NEAR(angle, -M_PI / 10, 1e-5); initialAngles.push_back(angle); // hysteresis threshhold for cycle counting cycleAngles.push_back(angle / 2); // count of oscillation cycles cycleCount.push_back(0); } // unthrottle physics to allow for many timesteps physics->SetRealTimeUpdateRate(0.0); // simulate 30 seconds and count oscillation cycles const int steps = 30000; const double timeStepped = steps * dt; for (int step = 0; step < steps; ++step) { world->Step(1); for (unsigned int i = 0; i < models.size(); ++i) { auto model = models[i]; auto joint = joints[i]; auto initialAngle = initialAngles[i]; auto cycleAngle = cycleAngles[i]; auto angle = joint->Position(0) - initialAngle; if (angle / cycleAngle >= 1) { cycleAngles[i] *= -1; cycleCount[i]++; } } } // Verify that expected number of cycles is counted for (unsigned int i = 0; i < models.size(); ++i) { auto length = pendulumLengths[i]; auto cycles = cycleCount[i]; // expected natural frequency for box pendulum (Hz) // see physics_msgs_inertia.ipynb for derivation double freq = 0.5 * M_1_PI * sqrt(300.0 / 401.0 * g.Length() / length); // 2 cycles counted per oscillation double expectedCycles = 2 * freq * timeStepped; EXPECT_EQ(cycles, static_cast<int>(expectedCycles)); } // modify inertia of each named pendulum for (unsigned int i = 0; i < models.size(); ++i) { auto model = models[i]; auto joint = joints[i]; auto link = links[i]; auto inertial = link->GetInertial(); ASSERT_TRUE(inertial != NULL); const ignition::math::Vector3d Ixxyyzz = inertial->PrincipalMoments(); const ignition::math::Vector3d Ixyxzyz = inertial->ProductsOfInertia(); // new inertial values msgs::Model msg; msg.set_name(modelNames[i]); msg.add_link(); auto msgLink = msg.mutable_link(0); msgLink->set_name("link"); msgLink->set_id(link->GetId()); auto msgInertial = msgLink->mutable_inertial(); msgInertial->set_ixx(Ixxyyzz[0] * 2); msgInertial->set_iyy(Ixxyyzz[1] * 2); msgInertial->set_izz(Ixxyyzz[2] * 2); // Set inertial properties by publishing to "~/model/modify" transport::PublisherPtr modelPub = this->node->Advertise<msgs::Model>("~/model/modify"); modelPub->WaitForConnection(); modelPub->Publish(msg, true); while (Ixxyyzz[0] == inertial->PrincipalMoments()[0]) { world->Step(1); common::Time::MSleep(1); modelPub->Publish(msg, true); } EXPECT_NEAR(2*Ixxyyzz[0], inertial->PrincipalMoments()[0], 1e-10); EXPECT_NEAR(2*Ixxyyzz[1], inertial->PrincipalMoments()[1], 1e-10); EXPECT_NEAR(2*Ixxyyzz[2], inertial->PrincipalMoments()[2], 1e-10); } // Reset world and cycle count to restore initial conditions world->Reset(); for (unsigned int i = 0; i < cycleCount.size(); ++i) { cycleCount[i] = 0; cycleAngles[i] = initialAngles[i] / 2; } // simulate 30 seconds and count oscillation cycles for (int step = 0; step < steps; ++step) { world->Step(1); for (unsigned int i = 0; i < models.size(); ++i) { auto model = models[i]; auto joint = joints[i]; auto initialAngle = initialAngles[i]; auto cycleAngle = cycleAngles[i]; auto angle = joint->Position(0) - initialAngle; if (angle / cycleAngle >= 1) { cycleAngles[i] *= -1; cycleCount[i]++; } } } // Verify that expected number of cycles is counted for (unsigned int i = 0; i < models.size(); ++i) { auto length = pendulumLengths[i]; auto cycles = cycleCount[i]; // expected natural frequency for box pendulum (Hz) // see physics_msgs_inertia.ipynb for derivation double freq = 0.5 * M_1_PI * sqrt(150.0 / 251.0 * g.Length() / length); // 2 cycles counted per oscillation double expectedCycles = 2 * freq * timeStepped; EXPECT_EQ(cycles, static_cast<int>(expectedCycles)); } } ///////////////////////////////////////////////// TEST_P(InertiaMsgsTest, SetPendulumInertia) { std::string physicsEngine = GetParam(); if (physicsEngine == "simbody") { gzerr << physicsEngine << " doesn't yet support dynamically changing moment of inertia" << std::endl; return; } SetPendulumInertia(physicsEngine); } INSTANTIATE_TEST_CASE_P(PhysicsEngines, InertiaMsgsTest, PHYSICS_ENGINE_VALUES); ///////////////////////////////////////////////// int main(int argc, char **argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
32.723926
80
0.653356
[ "vector", "model" ]
0fa10d6ea27912a89495611298132fe6d8106d14
2,882
cpp
C++
logistic regression.cpp
rockerritesh/ML-in-c-
e8a0d5e4ae2f39819d5f3f0a7d5e4adfb5d38623
[ "MIT" ]
1
2021-06-22T12:10:20.000Z
2021-06-22T12:10:20.000Z
logistic regression.cpp
rockerritesh/ML-in-c-
e8a0d5e4ae2f39819d5f3f0a7d5e4adfb5d38623
[ "MIT" ]
null
null
null
logistic regression.cpp
rockerritesh/ML-in-c-
e8a0d5e4ae2f39819d5f3f0a7d5e4adfb5d38623
[ "MIT" ]
null
null
null
// In this program i have made a simple logistic regression model of two variable i.e. y =mx+b. // And by using gradient update same as machine learning model this program automatically update the value of m and b in each iteration. // this program is only prototype of binary classes predicting model. #include <iostream> #include<math.h> #include <cmath> using namespace std; long double cost(long double y[] , long double y_pred[] , int l); long double y_pred( long double x,long double m, long double b); long double mupdate(long double x, long double y , long double y_pred ); long double cupdate(long double y , long double y_pred ); int main() { long double y[41] ={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}; // here y_train "dont forget to change the value of size of array if you want to use it on your data" long double x[41] ={1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,101,111,101,111,101,115,101,118,103,115,111,101,111,105,111,102}; // here x_train "dont forget to change the value of size of array if you want to use it on your data" int l=sizeof(x)/sizeof(x[0]); // to find length of array long double m= 0; // value of m initial long double b=0; // value of b initial long double y_predict[l]; //array y_predict long double m1,b1; // finial value of m & b int i,j; long double mi,ci; long double sum=0; long double hum = 0; long double z; int iteration=5000; //number of times function will call or training loop for(j=0;j<iteration;j++){ for(i=0;i<l;i++){ z=y_pred(x[i],m,b); y_predict[i]=z; } for(i=0;i<l;i++) { mi=mupdate(x[i],y[i],y_predict[i]); m1=-2*(sum+mi); } for(i=0;i<l;i++) { ci = cupdate(y[i],y_predict[i]); b1=-2*(hum+ci); } m = m - 0.001*m1; b = b + 0.001*b1; cout<<j+1<<"\t"<<"loss= "<<cost(y,y_predict, l)<<"\t"<<"m="<< m<<"\t"<<"b="<<b<<"\n"; } //cout<<m << "&"<< b; //final value will store in m and b int p; cin >> p; cin.ignore(); cout << 1/(1+ exp(-(p*m +b))); return 0; } // y_pred long double y_pred(long double x,long double m,long double b) { return 1/(1+ exp(-(x*m +b))) ; } // cost function long double cost(long double y[], long double y_pred[],int l) { int i,q; int p=0; for(i=0;i<l;i++) { long double q=(-(y[i]*log(y_pred[i]))-((1-y[i])*log(1-y_pred[i]))); p=p+q; } return p/l; //return y; } // m update in each gradient update long double mupdate(long double x, long double y , long double y_pred) { return x*(y-y_pred)*y_pred*(1-y_pred); } // c update in each gradient update long double cupdate(long double y , long double y_pred ) { return (y-y_pred)*y_pred*(1-y_pred); }
24.844828
247
0.590215
[ "model" ]
0fa14142f78b57d19d4d786a17313a34aff7dfe5
796
cc
C++
EE599HW4/4-6/tests/solution_test.cc
YingnanWang-Ray/EE599
7e870b78b4f61f783077d88d437937e697d7abf0
[ "MIT" ]
null
null
null
EE599HW4/4-6/tests/solution_test.cc
YingnanWang-Ray/EE599
7e870b78b4f61f783077d88d437937e697d7abf0
[ "MIT" ]
null
null
null
EE599HW4/4-6/tests/solution_test.cc
YingnanWang-Ray/EE599
7e870b78b4f61f783077d88d437937e697d7abf0
[ "MIT" ]
null
null
null
#include "src/lib/solution.h" #include "gtest/gtest.h" #include <vector> TEST(Function0, Return0) { Solution solution ; std::vector<int> input = {-2, -3,-1,-5,-6,-10}; int k = 3; int expected = -3; EXPECT_EQ(expected, solution.Kth(input,k)); } TEST(Function1, Return1) { Solution solution ; std::vector<int> input = {0,2,1,5,6,3}; int k = 7; int expected = INT32_MIN; EXPECT_EQ(expected, solution.Kth(input,k)); } TEST(Function2, Return2) { Solution solution ; std::vector<int> input = {5675,2,56522,14,444,-456,78}; int k = 3; int expected = 444; EXPECT_EQ(expected, solution.Kth(input,k)); } TEST(Function3, Return3) { Solution solution ; std::vector<int> input = {}; int k = 2; int expected = INT32_MIN; EXPECT_EQ(expected, solution.Kth(input,k)); }
22.742857
57
0.653266
[ "vector" ]
0fa3cc182f09fa1578ef05ad287eed5be7c5fe41
10,075
cpp
C++
code/library/States/TitleScreen.cpp
jpike/noah_ark
491ab775f69457e20a930c0dd8049609dee9c677
[ "Unlicense" ]
null
null
null
code/library/States/TitleScreen.cpp
jpike/noah_ark
491ab775f69457e20a930c0dd8049609dee9c677
[ "Unlicense" ]
61
2015-04-11T21:26:12.000Z
2021-10-02T13:34:43.000Z
code/library/States/TitleScreen.cpp
jpike/noah_ark
491ab775f69457e20a930c0dd8049609dee9c677
[ "Unlicense" ]
null
null
null
#include "Graphics/Color.h" #include "States/TitleScreen.h" namespace STATES { /// Updates the title screen based on elapsed time and user input. /// @param[in] gaming_hardware - The hardware supplying input for the update. /// @return The state the game after updating the title screen. GameState TitleScreen::Update(const HARDWARE::GamingHardware& gaming_hardware) { // UPDATE THE ELAPSED TIME FOR THE TITLE SCREEN. ElapsedTime += gaming_hardware.Clock.ElapsedTimeSinceLastFrame; // CHECK IF THE MAIN 'START' BUTTON WAS PRESSED. bool start_button_pressed = gaming_hardware.InputController.ButtonWasPressed(sf::Keyboard::Return); if (start_button_pressed) { // GET THE STATE FOR THE CURRENTLY SELECTED MENU OPTION. GameState next_state = MenuOptions.at(SelectedMenuOptionIndex); return next_state; } else { // CHECK IF AN ARROW KEY WAS PRESSED TO MOVE TO ANOTHER MENU OPTION. bool up_button_pressed = gaming_hardware.InputController.ButtonWasPressed(sf::Keyboard::Up); bool down_button_pressed = gaming_hardware.InputController.ButtonWasPressed(sf::Keyboard::Down); if (up_button_pressed) { // MOVE TO THE PREVIOUS MENU OPTION. // Protection against moving to an invalid menu option is needed. bool previous_menu_option_exists = (SelectedMenuOptionIndex > 0); if (previous_menu_option_exists) { --SelectedMenuOptionIndex; } } else if (down_button_pressed) { // MOVE TO THE NEXT MENU OPTION. // Protection against moving to an invalid menu option is needed. size_t menu_option_count = MenuOptions.size(); size_t last_menu_option_index = menu_option_count - 1; bool next_menu_option_exists = (SelectedMenuOptionIndex < last_menu_option_index); if (next_menu_option_exists) { ++SelectedMenuOptionIndex; } } // INDICATE THE GAME SHOULD REMAIN ON THE TITLE SCREEN. return GameState::TITLE_SCREEN; } } /// Renders the title screen. /// @param[in,out] renderer - The renderer to use for rendering. /// @return The rendered title screen. sf::Sprite TitleScreen::Render(GRAPHICS::Renderer& renderer) const { // CALCULATE INITIAL PARAMETERS FOR THE GAME'S TITLE. // The sub-heading is centered within the top third of the screen, // and the main title will be below that. However, these two components // end up scrolling in from the top initially. MATH::FloatRectangle screen_rectangle = renderer.Screen->GetBoundingRectangle<float>(); float screen_left_x_position = screen_rectangle.LeftTop.X; float screen_top_y_position = screen_rectangle.LeftTop.Y; float screen_width_in_pixels = screen_rectangle.Width(); float screen_height_in_pixels = screen_rectangle.Height(); float one_third_of_screen_height_in_pixels = screen_height_in_pixels / 3.0f; // CALCULATE THE OFFSET OF THE TITLE BASED ON ELAPSED TIME. // It should scroll in from the top. const sf::Time TITLE_TOTAL_SCROLL_IN_TIME = sf::seconds(1); float title_y_scroll_offset_in_pixels = 0.0f; bool title_still_scrolling_in = (ElapsedTime < TITLE_TOTAL_SCROLL_IN_TIME); if (title_still_scrolling_in) { // How far the title components have scrolled in from the top should be based on // how much of the total elapsed time has passed. For easier placement in the // final positions, the scroll offset will be negative to place the title components // higher up on the screen while they're still scrolling in. float ratio_of_elapsed_time = ElapsedTime / TITLE_TOTAL_SCROLL_IN_TIME; float ratio_of_remaining_elapsed_time = 1.0f - ratio_of_elapsed_time; title_y_scroll_offset_in_pixels = -1.0f * one_third_of_screen_height_in_pixels * ratio_of_remaining_elapsed_time; } // DRAW THE SUB-HEADING FOR THE GAME'S TITLE. // It is centered within the top third of the screen once fully scrolled in. float sub_heading_top_y_position = screen_top_y_position + title_y_scroll_offset_in_pixels; MATH::FloatRectangle sub_heading_screen_rectangle = MATH::FloatRectangle::FromLeftTopAndDimensions( screen_left_x_position, sub_heading_top_y_position, screen_width_in_pixels, one_third_of_screen_height_in_pixels); const float BIBLE_GAMES_HEADING_TEXT_SCALE = 1.5f; renderer.RenderCenteredText( "BIBLE GAMES", RESOURCES::AssetId::SERIF_FONT_TEXTURE, sub_heading_screen_rectangle, GRAPHICS::Color::WHITE, BIBLE_GAMES_HEADING_TEXT_SCALE); // DRAW THE MAIN GAME'S TITLE. // It is centered within the middle third of the screen once fully scrolled in. float main_title_top_y_position = one_third_of_screen_height_in_pixels + title_y_scroll_offset_in_pixels; MATH::FloatRectangle main_title_screen_rectangle = MATH::FloatRectangle::FromLeftTopAndDimensions( screen_left_x_position, main_title_top_y_position, screen_width_in_pixels, one_third_of_screen_height_in_pixels); const float TITLE_TEXT_SCALE = 3.0f; renderer.RenderCenteredText( "NOAH's ARK", RESOURCES::AssetId::SERIF_FONT_TEXTURE, main_title_screen_rectangle, GRAPHICS::Color::YELLOW, TITLE_TEXT_SCALE); // DRAW EACH OF THE MENU OPTIONS. float two_thirds_screen_height_in_pixels = 2 * one_third_of_screen_height_in_pixels; float current_menu_option_top_screen_position = two_thirds_screen_height_in_pixels; size_t menu_option_count = MenuOptions.size(); for (size_t menu_option_index = 0; menu_option_index < menu_option_count; ++menu_option_index) { // CALCULATE THE SCREEN RECTANGLE FOR THE CURRENT MENU OPTION. float menu_option_screen_height = static_cast<float>(GRAPHICS::GUI::Glyph::DEFAULT_HEIGHT_IN_PIXELS); MATH::FloatRectangle menu_option_screen_rectangle = MATH::FloatRectangle::FromLeftTopAndDimensions( screen_left_x_position, current_menu_option_top_screen_position, screen_width_in_pixels, menu_option_screen_height); // GET THE TEXT COLOR FOR THE CURRENT MENU OPTION. // The selected one should have a different color in order to stand out. GRAPHICS::Color menu_option_color = GRAPHICS::Color::RED; bool current_menu_option_is_selected = (menu_option_index == SelectedMenuOptionIndex); if (current_menu_option_is_selected) { // A lighter color is chosen for selected menu options since that // seemed more intuitive. menu_option_color = GRAPHICS::Color::ORANGE; } // GET THE TEXT FOR THE CURRENT MENU OPTION. std::string menu_option_text = ""; if (current_menu_option_is_selected) { // To better help players see which menu option is selected, // some additional "arrow" text is rendered before the // main text for the selected menu option. // It is rendered separately to avoid having the length of the // menu options change, which results in shifting if they're // rendered as centered text. // It conveniently happens that 1/3 across the width of the screen // places this arrow pointer just to the left of all of the // current menu options. float one_third_screen_width_in_pixels = screen_width_in_pixels / 3.0f; float selected_menu_option_arrow_left_screen_position_in_pixels = (screen_left_x_position + one_third_screen_width_in_pixels); MATH::Vector2f selected_menu_option_arrow_left_top_screen_position_in_pixels( selected_menu_option_arrow_left_screen_position_in_pixels, current_menu_option_top_screen_position); const std::string SELECTED_MENU_OPTION_POINTER_TEXT = ">"; renderer.RenderText( SELECTED_MENU_OPTION_POINTER_TEXT, RESOURCES::AssetId::FONT_TEXTURE, selected_menu_option_arrow_left_top_screen_position_in_pixels, menu_option_color); } GameState current_menu_option = MenuOptions.at(menu_option_index); switch (current_menu_option) { case GameState::GAME_SELECTION_SCREEN: menu_option_text += "PLAY GAME"; break; case GameState::CREDITS_SCREEN: menu_option_text += "CREDITS"; break; case GameState::NOTICE_SCREEN: menu_option_text += "RATING"; break; } // DRAW TEXT FOR THE CURRENT MENU OPTION. renderer.RenderCenteredText( menu_option_text, RESOURCES::AssetId::FONT_TEXTURE, menu_option_screen_rectangle, menu_option_color); // UPDATE THE POSITION FOR THE NEXT MENU OPTION. current_menu_option_top_screen_position += menu_option_screen_height; } // RETURN THE FINAL RENDERED SCREEN. sf::Sprite screen = renderer.RenderFinalScreen(); return screen; } }
50.124378
142
0.64139
[ "render" ]
0fa9a2f8f3d1e6b6c97d1afbb54ed9cb754a4f28
34,230
cpp
C++
modules/dnn/src/dnn.cpp
deli4iled/opencv_contrib
27f6d4e78eb77aada2d972f2a0db3dedc3f10efd
[ "BSD-3-Clause" ]
null
null
null
modules/dnn/src/dnn.cpp
deli4iled/opencv_contrib
27f6d4e78eb77aada2d972f2a0db3dedc3f10efd
[ "BSD-3-Clause" ]
null
null
null
modules/dnn/src/dnn.cpp
deli4iled/opencv_contrib
27f6d4e78eb77aada2d972f2a0db3dedc3f10efd
[ "BSD-3-Clause" ]
1
2019-03-01T09:46:20.000Z
2019-03-01T09:46:20.000Z
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2013, OpenCV Foundation, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include "precomp.hpp" #include <set> #include <algorithm> #include <iostream> #include <sstream> #include <iterator> #include <opencv2/dnn/shape_utils.hpp> using namespace cv; using namespace cv::dnn; using std::vector; using std::map; using std::make_pair; using std::set; namespace cv { namespace dnn { template<typename T> static String toString(const T &v) { std::ostringstream ss; ss << v; return ss.str(); } Mat blobFromImage(const Mat& image_, double scalefactor, bool swapRB) { std::vector<Mat> images(1, image_); return blobFromImages(images, scalefactor, swapRB); } Mat blobFromImages(const std::vector<Mat>& images, double scalefactor, bool swapRB) { size_t i, nimages = images.size(); if(nimages == 0) return Mat(); Mat image0 = images[0]; int nch = image0.channels(); CV_Assert(image0.dims == 2); Mat blob, image; if (nch == 3 || nch == 4) { int sz[] = { (int)nimages, 3, image0.rows, image0.cols }; blob = Mat(4, sz, CV_32F); Mat ch[4]; for( i = 0; i < nimages; i++ ) { Mat image_ = images[i]; if(image_.depth() == CV_8U) { image_.convertTo(image, CV_32F, scalefactor); } else image = image_; CV_Assert(image.depth() == CV_32F); nch = image.channels(); CV_Assert(image.dims == 2 && (nch == 3 || nch == 4)); CV_Assert(image.size() == image0.size()); for( int j = 0; j < 3; j++ ) ch[j] = Mat(image.rows, image.cols, CV_32F, blob.ptr((int)i, j)); if(swapRB) std::swap(ch[0], ch[2]); split(image, ch); } } else { CV_Assert(nch == 1); int sz[] = { (int)nimages, 1, image0.rows, image0.cols }; blob = Mat(4, sz, CV_32F); for( i = 0; i < nimages; i++ ) { Mat image_ = images[i]; if(image_.depth() == CV_8U) { image_.convertTo(image, CV_32F, scalefactor); } else image = image_; CV_Assert(image.depth() == CV_32F); nch = image.channels(); CV_Assert(image.dims == 2 && (nch == 1)); CV_Assert(image.size() == image0.size()); image.copyTo(Mat(image.rows, image.cols, CV_32F, blob.ptr((int)i, 0))); } } return blob; } struct LayerPin { int lid; int oid; LayerPin(int layerId = -1, int outputId = -1) : lid(layerId), oid(outputId) {} bool valid() const { return (lid >= 0 && oid >= 0); } bool equal(const LayerPin &r) const { return (lid == r.lid && oid == r.oid); } }; struct LayerData { LayerData() {} LayerData(int _id, const String &_name, const String &_type, LayerParams &_params) : id(_id), name(_name), type(_type), params(_params) { //add logging info params.name = name; params.type = type; } int id; String name; String type; LayerParams params; std::vector<LayerPin> inputBlobsId; std::set<int> inputLayersId; std::set<int> requiredOutputs; Ptr<Layer> layerInstance; std::vector<Mat> outputBlobs; std::vector<Mat*> inputBlobs; std::vector<Mat> internals; int flag; Ptr<Layer> getLayerInstance() { if (layerInstance) return layerInstance; layerInstance = LayerFactory::createLayerInstance(type, params); if (!layerInstance) { CV_Error(Error::StsError, "Can't create layer \"" + name + "\" of type \"" + type + "\""); } return layerInstance; } }; //fake layer containing network input blobs struct DataLayer : public Layer { void finalize(const std::vector<Mat*>&, std::vector<Mat>&) {} void forward(std::vector<Mat*>&, std::vector<Mat>&, std::vector<Mat> &) {} int outputNameToIndex(String tgtName) { int idx = (int)(std::find(outNames.begin(), outNames.end(), tgtName) - outNames.begin()); return (idx < (int)outNames.size()) ? idx : -1; } void setNames(const std::vector<String> &names) { outNames.assign(names.begin(), names.end()); } private: std::vector<String> outNames; }; struct Net::Impl { typedef std::vector<MatShape> ShapesVec; struct LayerShapes { ShapesVec in, out, internal; bool inplace; LayerShapes() {inplace = false;} }; typedef std::map<int, LayerShapes> LayersShapesMap; typedef std::map<int, LayerData> MapIdToLayerData; Impl() { //allocate fake net input layer netInputLayer = Ptr<DataLayer>(new DataLayer()); LayerData &inpl = layers.insert( make_pair(0, LayerData()) ).first->second; inpl.id = 0; inpl.name = "_input"; inpl.type = "__NetInputLayer__"; inpl.layerInstance = netInputLayer; layerNameToId.insert(std::make_pair(inpl.name, inpl.id)); lastLayerId = 1; netWasAllocated = false; } Ptr<DataLayer> netInputLayer; std::vector<int> netOutputs; MapIdToLayerData layers; std::map<String, int> layerNameToId; int lastLayerId; bool netWasAllocated; void setUpNet() { if (!netWasAllocated) { allocateLayers(); computeNetOutputLayers(); netWasAllocated = true; } } int getLayerId(const String &layerName) { std::map<String, int>::iterator it = layerNameToId.find(layerName); return (it != layerNameToId.end()) ? it->second : -1; } int getLayerId(int id) { MapIdToLayerData::iterator it = layers.find(id); return (it != layers.end()) ? id : -1; } int getLayerId(DictValue &layerDesc) { if (layerDesc.isInt()) return getLayerId(layerDesc.get<int>()); else if (layerDesc.isString()) return getLayerId(layerDesc.get<String>()); CV_Assert(layerDesc.isInt() || layerDesc.isString()); return -1; } String getLayerName(int id) { MapIdToLayerData::iterator it = layers.find(id); return (it != layers.end()) ? it->second.name : "(unknown layer)"; } LayerData& getLayerData(int id) { MapIdToLayerData::iterator it = layers.find(id); if (it == layers.end()) CV_Error(Error::StsObjectNotFound, format("Layer with requested id=%d not found", id)); return it->second; } LayerData& getLayerData(const String &layerName) { int id = getLayerId(layerName); if (id < 0) CV_Error(Error::StsError, "Requsted layer \"" + layerName + "\" not found"); return getLayerData(id); } LayerData& getLayerData(const DictValue &layerDesc) { if (layerDesc.isInt()) return getLayerData(layerDesc.get<int>()); else if (layerDesc.isString()) return getLayerData(layerDesc.get<String>()); CV_Assert(layerDesc.isInt() || layerDesc.isString()); return *((LayerData*)NULL); } static void addLayerInput(LayerData &ld, int inNum, LayerPin from) { if ((int)ld.inputBlobsId.size() <= inNum) { ld.inputBlobsId.resize(inNum + 1); } else { LayerPin storedFrom = ld.inputBlobsId[inNum]; if (storedFrom.valid() && !storedFrom.equal(from)) CV_Error(Error::StsError, "Input #" + toString(inNum) + "of layer \"" + ld.name + "\" already was connected"); } ld.inputBlobsId[inNum] = from; } static void splitPin(const String &pinAlias, String &layerName, String &outName) { size_t delimPos = pinAlias.find('.'); layerName = pinAlias.substr(0, delimPos); outName = (delimPos == String::npos) ? String() : pinAlias.substr(delimPos + 1); } int resolvePinOutputName(LayerData &ld, const String &outName, bool isOutPin) { if (outName.empty()) return 0; if (std::isdigit(outName[0])) { char *lastChar; long inum = std::strtol(outName.c_str(), &lastChar, 10); if (*lastChar == 0) { CV_Assert(inum == (int)inum); return (int)inum; } } if (isOutPin) return ld.getLayerInstance()->outputNameToIndex(outName); else return ld.getLayerInstance()->inputNameToIndex(outName); } LayerPin getPinByAlias(const String &pinAlias, bool isOutPin = true) { LayerPin pin; String layerName, outName; splitPin(pinAlias, layerName, outName); pin.lid = (layerName.empty()) ? 0 : getLayerId(layerName); if (pin.lid >= 0) pin.oid = resolvePinOutputName(getLayerData(pin.lid), outName, isOutPin); return pin; } void connect(int outLayerId, int outNum, int inLayerId, int inNum) { LayerData &ldOut = getLayerData(outLayerId); LayerData &ldInp = getLayerData(inLayerId); addLayerInput(ldInp, inNum, LayerPin(outLayerId, outNum)); ldOut.requiredOutputs.insert(outNum); } void computeNetOutputLayers() { netOutputs.clear(); MapIdToLayerData::iterator it; for (it = layers.begin(); it != layers.end(); it++) { int lid = it->first; LayerData &ld = it->second; if (ld.requiredOutputs.size() == 0) netOutputs.push_back(lid); } #ifndef NDEBUG std::cout << "\nNet Outputs(" << netOutputs.size() << "):\n"; for (size_t i = 0; i < netOutputs.size(); i++) std::cout << layers[netOutputs[i]].name << "\n"; #endif } #define CV_RETHROW_ERROR(err, newmsg)\ cv::error(err.code, newmsg, err.func.c_str(), err.file.c_str(), err.line) void allocateLayer(int lid, const LayersShapesMap& layersShapes) { LayerData &ld = layers[lid]; //already allocated if (ld.flag) return; size_t ninputs = ld.inputBlobsId.size(); #if 0 printf("layer %s:", ld.name.c_str()); for (size_t i = 0; i < ninputs; i++) { int inp_lid = ld.inputBlobsId[i].lid; LayerData &inp_ld = layers[inp_lid]; int inp_outputs = (int)inp_ld.outputBlobs.size(); std::cout << " " << inp_ld.name << "(" << inp_outputs; for( int j = 0; j < inp_outputs; j++ ) { std::cout << (j == 0 ? ": " : ", ") << inp_ld.outputBlobs[j].size; } std::cout << ")"; } printf("\n"); #endif //determine parent layers for (size_t i = 0; i < ninputs; i++) ld.inputLayersId.insert(ld.inputBlobsId[i].lid); //allocate parents for (set<int>::iterator i = ld.inputLayersId.begin(); i != ld.inputLayersId.end(); i++) allocateLayer(*i, layersShapes); //bind inputs ld.inputBlobs.resize(ninputs); for (size_t i = 0; i < ninputs; i++) { LayerPin from = ld.inputBlobsId[i]; CV_Assert(from.valid()); CV_DbgAssert(layers.count(from.lid) && (int)layers[from.lid].outputBlobs.size() > from.oid); ld.inputBlobs[i] = &layers[from.lid].outputBlobs[from.oid]; } LayersShapesMap::const_iterator layerShapesIt = layersShapes.find(lid); CV_Assert(layerShapesIt != layersShapes.end()); const ShapesVec& outShapes = layerShapesIt->second.out; CV_Assert(ld.requiredOutputs.size() <= outShapes.size()); ld.outputBlobs.resize(std::max((size_t)1, outShapes.size())); //layer produce at least one output blob for(int i = 0; i < outShapes.size(); i++) { if (shape(ld.outputBlobs[i]) != outShapes[i]) { if (layerShapesIt->second.inplace) { CV_Assert(ld.inputBlobs.size() == ld.outputBlobs.size()); CV_Assert(ld.inputBlobs[i]->total() == total(outShapes[i])); ld.outputBlobs[i] = ld.inputBlobs[i]->reshape(1, outShapes[i]); } else { ld.outputBlobs[i].create(outShapes[i], CV_32F); } } } const ShapesVec& intShapes = layerShapesIt->second.internal; ld.internals.resize(intShapes.size()); for(int i = 0; i < intShapes.size(); i++) { if (shape(ld.internals[i]) != intShapes[i] && total(intShapes[i])) ld.internals[i].create(intShapes[i], CV_32F); } Ptr<Layer> layerPtr = ld.getLayerInstance(); //try { layerPtr->finalize(ld.inputBlobs, ld.outputBlobs); #if 0 std::cout << "\toutputs:"; size_t noutputs = ld.outputBlobs.size(); for (size_t j = 0; j < noutputs; j++) { std::cout << (j == 0 ? " " : ", ") << ld.outputBlobs[j].size; } std::cout << "\n"; #endif } /*catch (const cv::Exception &err) { CV_RETHROW_ERROR(err, format("The following error occured while making allocate() for layer \"%s\": %s", ld.name.c_str(), err.err.c_str())); }*/ ld.flag = 1; } void allocateLayers() { MapIdToLayerData::iterator it; for (it = layers.begin(); it != layers.end(); it++) it->second.flag = 0; CV_Assert(!layers[0].outputBlobs.empty()); ShapesVec inputShapes; for(int i = 0; i < layers[0].outputBlobs.size(); i++) { CV_Assert(layers[0].outputBlobs[i].total()); inputShapes.push_back(shape(layers[0].outputBlobs[i])); } LayersShapesMap layersShapes; getLayersShapes(inputShapes, layersShapes); for (it = layers.begin(); it != layers.end(); it++) { int lid = it->first; allocateLayer(lid, layersShapes); } } void forwardLayer(LayerData &ld, bool clearFlags = true) { if (clearFlags) { MapIdToLayerData::iterator it; for (it = layers.begin(); it != layers.end(); it++) it->second.flag = 0; } //already was forwarded if (ld.flag) return; //forward parents for (set<int>::iterator i = ld.inputLayersId.begin(); i != ld.inputLayersId.end(); i++) { forwardLayer(layers[*i], false); } //forward itself //try { ld.layerInstance->forward(ld.inputBlobs, ld.outputBlobs, ld.internals); } /*catch (const cv::Exception &err) { CV_RETHROW_ERROR(err, format("The following error occured while making forward() for layer \"%s\": %s", ld.name.c_str(), err.err.c_str())); }*/ ld.flag = 1; } void forwardAll() { MapIdToLayerData::iterator it; for (it = layers.begin(); it != layers.end(); it++) it->second.flag = 0; for (it = layers.begin(); it != layers.end(); it++) forwardLayer(it->second, false); } void getLayerShapesRecursively(int id, LayersShapesMap& inOutShapes) { std::vector<LayerPin>& inputLayerIds = layers[id].inputBlobsId; if (inOutShapes[id].in.empty()) { for(int i = 0; i < inputLayerIds.size(); i++) { int layerId = inputLayerIds[i].lid; LayersShapesMap::iterator it = inOutShapes.find(layerId); if(it == inOutShapes.end() || it->second.out.empty()) { getLayerShapesRecursively(layerId, inOutShapes); } const MatShape& shape = inOutShapes[layerId].out[inputLayerIds[i].oid]; inOutShapes[id].in.push_back(shape); } } const ShapesVec& is = inOutShapes[id].in; ShapesVec& os = inOutShapes[id].out; ShapesVec& ints = inOutShapes[id].internal; int requiredOutputs = layers[id].requiredOutputs.size(); inOutShapes[id].inplace = layers[id].getLayerInstance()->getMemoryShapes(is, requiredOutputs, os, ints); } void getLayersShapes(const ShapesVec& netInputShapes, LayersShapesMap& inOutShapes) { inOutShapes.clear(); inOutShapes[0].in = netInputShapes; //insert shape for first input layer for (MapIdToLayerData::iterator it = layers.begin(); it != layers.end(); it++) { getLayerShapesRecursively(it->first, inOutShapes); } } void getLayerShapes(const ShapesVec& netInputShapes, const int layerId, LayerShapes& shapes) { LayersShapesMap inOutShapes; inOutShapes[0].in = netInputShapes; //insert shape for first input layer getLayerShapesRecursively(layerId, inOutShapes); shapes = inOutShapes[layerId]; } }; Net::Net() : impl(new Net::Impl) { } Net::~Net() { } int Net::addLayer(const String &name, const String &type, LayerParams &params) { if (name.find('.') != String::npos) { CV_Error(Error::StsBadArg, "Added layer name \"" + name + "\" must not contain dot symbol"); return -1; } if (impl->getLayerId(name) >= 0) { CV_Error(Error::StsBadArg, "Layer \"" + name + "\" already into net"); return -1; } int id = ++impl->lastLayerId; impl->layerNameToId.insert(std::make_pair(name, id)); impl->layers.insert(std::make_pair(id, LayerData(id, name, type, params))); return id; } int Net::addLayerToPrev(const String &name, const String &type, LayerParams &params) { int prvLid = impl->lastLayerId; int newLid = this->addLayer(name, type, params); this->connect(prvLid, 0, newLid, 0); return newLid; } void Net::connect(int outLayerId, int outNum, int inpLayerId, int inpNum) { impl->connect(outLayerId, outNum, inpLayerId, inpNum); } void Net::connect(String _outPin, String _inPin) { LayerPin outPin = impl->getPinByAlias(_outPin); LayerPin inpPin = impl->getPinByAlias(_inPin); CV_Assert(outPin.valid() && inpPin.valid()); impl->connect(outPin.lid, outPin.oid, inpPin.lid, inpPin.oid); } void Net::allocate() { impl->setUpNet(); } void Net::forward(LayerId toLayer) { impl->setUpNet(); if (toLayer.isString() && toLayer.get<String>().empty()) impl->forwardAll(); else impl->forwardLayer(impl->getLayerData(toLayer)); } void Net::setNetInputs(const std::vector<String> &inputBlobNames) { impl->netInputLayer->setNames(inputBlobNames); } void Net::setBlob(String outputName, const Mat &blob_) { LayerPin pin = impl->getPinByAlias(outputName); if (!pin.valid()) CV_Error(Error::StsObjectNotFound, "Requested blob \"" + outputName + "\" not found"); LayerData &ld = impl->layers[pin.lid]; ld.outputBlobs.resize( std::max(pin.oid+1, (int)ld.requiredOutputs.size()) ); MatShape prevShape = shape(ld.outputBlobs[pin.oid]); ld.outputBlobs[pin.oid] = blob_.clone(); impl->netWasAllocated = impl->netWasAllocated && prevShape == shape(blob_); } Mat Net::getBlob(String outputName) { LayerPin pin = impl->getPinByAlias(outputName); if (!pin.valid()) CV_Error(Error::StsObjectNotFound, "Requested blob \"" + outputName + "\" not found"); LayerData &ld = impl->layers[pin.lid]; if ((size_t)pin.oid >= ld.outputBlobs.size()) { CV_Error(Error::StsOutOfRange, "Layer \"" + ld.name + "\" produce only " + toString(ld.outputBlobs.size()) + " outputs, the #" + toString(pin.oid) + " was requsted"); } return ld.outputBlobs[pin.oid]; } Mat Net::getParam(LayerId layer, int numParam) { LayerData &ld = impl->getLayerData(layer); std::vector<Mat> &layerBlobs = ld.layerInstance->blobs; CV_Assert(numParam < (int)layerBlobs.size()); return layerBlobs[numParam]; } void Net::setParam(LayerId layer, int numParam, const Mat &blob) { LayerData &ld = impl->getLayerData(layer); std::vector<Mat> &layerBlobs = ld.layerInstance->blobs; CV_Assert(numParam < (int)layerBlobs.size()); //we don't make strong checks, use this function carefully layerBlobs[numParam] = blob; } int Net::getLayerId(const String &layer) { return impl->getLayerId(layer); } void Net::deleteLayer(LayerId) { CV_Error(Error::StsNotImplemented, ""); } Ptr<Layer> Net::getLayer(LayerId layerId) { LayerData &ld = impl->getLayerData(layerId); if (!ld.layerInstance) CV_Error(Error::StsNullPtr, format("Requested layer \"%s\" was not initialized", ld.name.c_str())); return ld.layerInstance; } std::vector<Ptr<Layer> > Net::getLayerInputs(LayerId layerId) { LayerData &ld = impl->getLayerData(layerId); if (!ld.layerInstance) CV_Error(Error::StsNullPtr, format("Requested layer \"%s\" was not initialized", ld.name.c_str())); std::vector<Ptr<Layer> > inputLayers; inputLayers.reserve(ld.inputLayersId.size()); std::set<int>::iterator it; for (it = ld.inputLayersId.begin(); it != ld.inputLayersId.end(); ++it) { inputLayers.push_back(getLayer(*it)); } return inputLayers; } std::vector<String> Net::getLayerNames() const { std::vector<String> res; res.reserve(impl->layers.size()); Impl::MapIdToLayerData::iterator it; for (it = impl->layers.begin(); it != impl->layers.end(); it++) { if (it->second.id) //skip Data layer res.push_back(it->second.name); } return res; } bool Net::empty() const { return impl->layers.size() <= 1; //first layer is default Data layer } std::vector<int> Net::getUnconnectedOutLayers() const { std::vector<int> layersIds; Impl::MapIdToLayerData::iterator it; for (it = impl->layers.begin(); it != impl->layers.end(); it++) { int lid = it->first; LayerData &ld = it->second; if (ld.requiredOutputs.size() == 0) layersIds.push_back(lid); } return layersIds; } void Net::getLayersShapes(const Net::Impl::ShapesVec& netInputShapes, std::vector<int>* layersIds, std::vector<Net::Impl::ShapesVec>* inLayersShapes, std::vector<Net::Impl::ShapesVec>* outLayersShapes) const { if ((layersIds || inLayersShapes || outLayersShapes) == false) return; if (layersIds) layersIds->clear(); if (inLayersShapes) inLayersShapes->clear(); if (outLayersShapes) outLayersShapes->clear(); Impl::LayersShapesMap inOutShapes; impl->getLayersShapes(netInputShapes, inOutShapes); for(Impl::LayersShapesMap::const_iterator it = inOutShapes.begin(); it != inOutShapes.end(); it++) { if (layersIds) layersIds->push_back(it->first); if (inLayersShapes) inLayersShapes->push_back(it->second.in); if (outLayersShapes) outLayersShapes->push_back(it->second.out); } } void Net::getLayersShapes(const MatShape& netInputShape, std::vector<int>* layerIds, std::vector<Net::Impl::ShapesVec>* inLayersShapes, std::vector<Net::Impl::ShapesVec>* outLayersShapes) const { getLayersShapes(Net::Impl::ShapesVec(1, netInputShape), layerIds, inLayersShapes, outLayersShapes); } void Net::getLayerShapes(const MatShape& netInputShape, const int layerId, Net::Impl::ShapesVec* inLayerShapes, Net::Impl::ShapesVec* outLayerShapes) const { getLayerShapes(Net::Impl::ShapesVec(1, netInputShape), layerId, inLayerShapes, outLayerShapes); } void Net::getLayerShapes(const Net::Impl::ShapesVec& netInputShapes, const int layerId, Net::Impl::ShapesVec* inLayerShapes, Net::Impl::ShapesVec* outLayerShapes) const { Impl::LayerShapes shapes; impl->getLayerShapes(netInputShapes, layerId, shapes); if (inLayerShapes) *inLayerShapes = shapes.in; if (outLayerShapes) *outLayerShapes = shapes.out; } int64 Net::getFLOPS(const std::vector<MatShape>& netInputShapes) const { int64 flops = 0; std::vector<int> ids; std::vector<std::vector<MatShape> > inShapes, outShapes; getLayersShapes(netInputShapes, &ids, &inShapes, &outShapes); CV_Assert(inShapes.size() == outShapes.size()); CV_Assert(inShapes.size() == ids.size()); for(int i = 0; i < ids.size(); i++) { flops += impl->layers[ids[i]].getLayerInstance()->getFLOPS(inShapes[i], outShapes[i]); } return flops; } int64 Net::getFLOPS(const MatShape& netInputShape) const { return getFLOPS(std::vector<MatShape>(1, netInputShape)); } int64 Net::getFLOPS(const int layerId, const std::vector<MatShape>& netInputShapes) const { Impl::MapIdToLayerData::iterator layer = impl->layers.find(layerId); CV_Assert(layer != impl->layers.end()); Impl::LayerShapes shapes; impl->getLayerShapes(netInputShapes, layerId, shapes); return layer->second.getLayerInstance()->getFLOPS(shapes.in, shapes.out); } int64 Net::getFLOPS(const int layerId, const MatShape& netInputShape) const { return getFLOPS(layerId, std::vector<MatShape>(1, netInputShape)); } void Net::getLayerTypes(std::vector<String>& layersTypes) const { layersTypes.clear(); std::map<String, int> layers; for (Impl::MapIdToLayerData::iterator it = impl->layers.begin(); it != impl->layers.end(); it++) { if (layers.find(it->second.type) == layers.end()) layers[it->second.type] = 0; layers[it->second.type]++; } for (std::map<String, int>::iterator it = layers.begin(); it != layers.end(); it++) { layersTypes.push_back(it->first); } } int Net::getLayersCount(const String& layerType) const { int count = 0; for (Impl::MapIdToLayerData::iterator it = impl->layers.begin(); it != impl->layers.end(); it++) { if (it->second.type == layerType) count++; } return count; } void Net::getMemoryConsumption(const int layerId, const std::vector<MatShape>& netInputShapes, size_t& weights, size_t& blobs) const { Impl::MapIdToLayerData::iterator layer = impl->layers.find(layerId); CV_Assert(layer != impl->layers.end()); weights = blobs = 0; for(int i = 0; i < layer->second.params.blobs.size(); i++) { const Mat& weightsBlob = layer->second.params.blobs[i]; weights += weightsBlob.total()*weightsBlob.elemSize(); } std::vector<MatShape> outLayerShapes; getLayerShapes(netInputShapes, layerId, 0, &outLayerShapes); for(int i = 0; i < outLayerShapes.size(); i++) { blobs += total(outLayerShapes[i]) * sizeof(float); } } void Net::getMemoryConsumption(const std::vector<MatShape>& netInputShapes, size_t& weights, size_t& blobs) const { std::vector<int> layerIds; std::vector<std::vector<MatShape> > outLayerShapes; getLayersShapes(netInputShapes, &layerIds, 0, &outLayerShapes); weights = blobs = 0; for(int i = 0; i < layerIds.size(); i++) { Impl::MapIdToLayerData::iterator layer = impl->layers.find(layerIds[i]); CV_Assert(layer != impl->layers.end()); for(int j = 0; j < layer->second.params.blobs.size(); j++) { const Mat& weightsBlob = layer->second.params.blobs[j]; weights += weightsBlob.total()*weightsBlob.elemSize(); } for(int j = 0; j < outLayerShapes[i].size(); j++) { blobs += total(outLayerShapes[i][j]) * sizeof(float); } } } void Net::getMemoryConsumption(const int layerId, const MatShape& netInputShape, size_t& weights, size_t& blobs) const { getMemoryConsumption(layerId, std::vector<MatShape>(1, netInputShape), weights, blobs); } void Net::getMemoryConsumption(const MatShape& netInputShape, size_t& weights, size_t& blobs) const { getMemoryConsumption(std::vector<MatShape>(1, netInputShape), weights, blobs); } ////////////////////////////////////////////////////////////////////////// Importer::~Importer() {} Layer::Layer() {} Layer::Layer(const LayerParams &params) : blobs(params.blobs), name(params.name), type(params.type) { } void Layer::setParamsFrom(const LayerParams &params) { blobs = params.blobs; name = params.name; type = params.type; } int Layer::inputNameToIndex(String) { return -1; } int Layer::outputNameToIndex(String) { return -1; } template <typename T> static void vecToPVec(const std::vector<T> &v, std::vector<T*> &pv) { pv.resize(v.size()); for (size_t i = 0; i < v.size(); i++) pv[i] = const_cast<T*>(&v[i]); } void Layer::finalize(const std::vector<Mat> &inputs, std::vector<Mat> &outputs) { std::vector<Mat*> inputsp; vecToPVec(inputs, inputsp); this->finalize(inputsp, outputs); } void Layer::finalize(const std::vector<Mat*> &input, std::vector<Mat> &output) { (void)input;(void)output; } std::vector<Mat> Layer::finalize(const std::vector<Mat> &inputs) { std::vector<Mat> outputs; this->finalize(inputs, outputs); return outputs; } void Layer::forward(const std::vector<Mat> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) { std::vector<Mat*> inputsp; vecToPVec(inputs, inputsp); this->forward(inputsp, outputs, internals); } void Layer::run(const std::vector<Mat> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) { std::vector<Mat*> inputsp; vecToPVec(inputs, inputsp); this->finalize(inputsp, outputs); this->forward(inputsp, outputs, internals); } Layer::~Layer() {} bool Layer::getMemoryShapes(const std::vector<MatShape> &inputs, const int requiredOutputs, std::vector<MatShape> &outputs, std::vector<MatShape> &internals) const { CV_Assert(inputs.size()); outputs.assign(std::max(requiredOutputs, (int)inputs.size()), inputs[0]); return false; } ////////////////////////////////////////////////////////////////////////// struct LayerFactory::Impl : public std::map<String, LayerFactory::Constuctor> { }; Ptr<LayerFactory::Impl> LayerFactory::impl () { // allocate on first use static Ptr<LayerFactory::Impl> impl_(new LayerFactory::Impl()); return impl_; } void LayerFactory::registerLayer(const String &_type, Constuctor constructor) { String type = _type.toLowerCase(); Impl::iterator it = impl()->find(type); if (it != impl()->end() && it->second != constructor) { CV_Error(cv::Error::StsBadArg, "Layer \"" + type + "\" already was registered"); } impl()->insert(std::make_pair(type, constructor)); } void LayerFactory::unregisterLayer(const String &_type) { String type = _type.toLowerCase(); impl()->erase(type); } Ptr<Layer> LayerFactory::createLayerInstance(const String &_type, LayerParams& params) { String type = _type.toLowerCase(); Impl::const_iterator it = LayerFactory::impl()->find(type); if (it != impl()->end()) { return it->second(params); } else { return Ptr<Layer>(); //NULL } } } }
29.534081
152
0.585364
[ "shape", "vector" ]
0fab728eaefa3467c89c967091a168e1072ae7b8
3,696
hpp
C++
src/mesh/mechanical/plane/fem_mesh.hpp
annierhea/neon
4eb51a06bda6bbf32c54fff8f39c9e02d429cfd1
[ "MIT" ]
null
null
null
src/mesh/mechanical/plane/fem_mesh.hpp
annierhea/neon
4eb51a06bda6bbf32c54fff8f39c9e02d429cfd1
[ "MIT" ]
null
null
null
src/mesh/mechanical/plane/fem_mesh.hpp
annierhea/neon
4eb51a06bda6bbf32c54fff8f39c9e02d429cfd1
[ "MIT" ]
null
null
null
#pragma once #include "mesh/boundary/dirichlet.hpp" #include "mesh/mechanical/plane/boundary/nonfollower_load.hpp" #include "mesh/mechanical/plane/fem_submesh.hpp" #include "io/file_output.hpp" #include <map> #include <vector> #include <unordered_map> namespace neon { class basic_mesh; namespace mechanical::plane { class fem_mesh { public: using internal_variable_type = internal_variables_t; using traits = fem_submesh::traits; public: fem_mesh(basic_mesh const& basic_mesh, json const& material_data, json const& simulation_data, double const generate_time_step); /// The number of active degrees of freedom in this mesh [[nodiscard]] auto active_dofs() const { return traits::dofs_per_node * coordinates->size(); } /// Checks the boundary conditions and constitutive model to ensure /// resulting matrix from this mesh is symmetric. \sa LinearSolver [[nodiscard]] bool is_symmetric() const; void update_internal_forces(vector const& fint) { reaction_forces = -fint; } /// Deform the body by updating the displacement x = X + u /// and update the internal variables with the new deformation and the /// time step increment void update_internal_variables(vector const& u, double const time_step_size = 0.0); /// Update the internal variables if converged, otherwise revert back /// for next attempted load increment void save_internal_variables(bool const have_converged); /// Constant access to the sub-meshes [[nodiscard]] std::vector<fem_submesh> const& meshes() const noexcept { return submeshes; } /// Mutable access to the sub-meshes [[nodiscard]] std::vector<fem_submesh>& meshes() noexcept { return submeshes; } [[nodiscard]] auto const& dirichlet_boundaries() const { return displacement_bcs; } [[nodiscard]] auto const& nonfollower_boundaries() const { return nonfollower_loads; } /// Gathers the time history for each boundary condition and /// returns a sorted vector which may contain duplicated entries. /// \sa adaptive_time_step [[nodiscard]] std::vector<double> time_history() const; [[nodiscard]] auto const& geometry() const { return *coordinates; } /// Write out results to file void write(std::int32_t const time_step, double const current_time); protected: void check_boundary_conditions(json const& boundary_data) const; void allocate_variable_names(); void allocate_boundary_conditions(json const& boundary_data, basic_mesh const& basic_mesh); void allocate_displacement_boundary(json const& boundary, basic_mesh const& basic_mesh); [[nodiscard]] bool is_nonfollower_load(std::string const& boundary_type) const; protected: std::shared_ptr<material_coordinates> coordinates; std::vector<fem_submesh> submeshes; /// Displacement boundaries std::map<std::string, std::vector<dirichlet>> displacement_bcs; /// Nonfollower boundaries std::map<std::string, nonfollower_load_boundary> nonfollower_loads; /// Internal nodal forces for reaction forces vector reaction_forces; std::unordered_map<std::string, int> const dof_table = {{"x", 0}, {"y", 1}}; /// This time step is taken from /// "Time[Period][Increments][Initial]" in the /// input file. It is used in the boundary class /// to generate cyclic loading for example. This /// ensures the compatibility between user /// defined and sinusoidal boundary conditions. double generate_time_step; /// File output handle std::unique_ptr<io::file_output> writer; /// Output variables std::vector<variable::types> output_variables; }; } }
33
98
0.718344
[ "mesh", "geometry", "vector", "model" ]
0fae847d11b919a0bcff97b40fca4b01fa6c831e
10,740
cpp
C++
src/tools/tools/002e.Sanity/cube4_sanity.cpp
OpenCMISS-Dependencies/cube
bb425e6f75ee5dbdf665fa94b241b48deee11505
[ "Cube" ]
null
null
null
src/tools/tools/002e.Sanity/cube4_sanity.cpp
OpenCMISS-Dependencies/cube
bb425e6f75ee5dbdf665fa94b241b48deee11505
[ "Cube" ]
null
null
null
src/tools/tools/002e.Sanity/cube4_sanity.cpp
OpenCMISS-Dependencies/cube
bb425e6f75ee5dbdf665fa94b241b48deee11505
[ "Cube" ]
2
2016-09-19T00:16:05.000Z
2021-03-29T22:06:45.000Z
/**************************************************************************** ** CUBE http://www.scalasca.org/ ** ***************************************************************************** ** Copyright (c) 1998-2016 ** ** Forschungszentrum Juelich GmbH, Juelich Supercomputing Centre ** ** ** ** Copyright (c) 2009-2015 ** ** German Research School for Simulation Sciences GmbH, ** ** Laboratory for Parallel Programming ** ** ** ** This software may be modified and distributed under the terms of ** ** a BSD-style license. See the COPYING file in the package base ** ** directory for details. ** ****************************************************************************/ /** * \file cube_sanity.cpp * \brief Runs some sanity checks on a .cube file and reports eventual errors. * */ // #ifdef CUBE_COMPRESSED #include "CubeZfstream.h" // #endif #include <fstream> #include <sstream> #include <string> #include <cstdlib> #include <vector> #include <iostream> #include <unistd.h> #if 0 # include <getopt.h> #endif #include "MdAggrCube.h" #include "CnodeConstraint.h" #include "RegionConstraint.h" #include "PrintableCCnode.h" #include "CCnode.h" #include "CRegion.h" #include "Filter.h" #include "Cube.h" #include "CubeCnode.h" #include "CubeRegion.h" #include "CubeError.h" #include "CubeServices.h" #define CUBE_SANITY_INTERNAL_ERROR -1 #define CUBE_SANITY_OK 0 #define CUBE_SANITY_TESTS_FAILED 1 using namespace std; using namespace cube; #include "sanity_calls.h" int main( int argc, char* argv[] ) { int ch = 0; MdAggrCube* cube = NULL; map<char, bool> settings; settings[ 'n' ] = false; settings[ 'l' ] = false; Filter filter; string output_filename; Constraint_Verbosity verbosity_level = FAILVERB_SILENT; ofstream out; // list of tests RegionConstraint* non_empty = NULL; NoAnonymousFunctions* no_anonymous_functions = NULL; NoTruncatedFunctions* no_truncated_functions = NULL; RegionConstraint* filename_not_empty = NULL; RegionConstraint* proper_line_numbers = NULL; CnodeConstraint* no_tracing_outside_init_and_finalize = NULL; NoNegativeInclusiveMetrics* no_negative_inclusive_metrics = NULL; NoNegativeExclusiveMetrics* no_negative_exclusive_metrics = NULL; #if 0 int option_index = 0; const string USAGE = "Usage: " + string( argv[ 0 ] ) + " [ flags ] <cube experiment>\n" "where flags basically turns certains tests on or off:\n" " -h, --help Help; Output a brief help message.\n" " -n, --no-negative-values Disables the (time consuming) ckeck for negative\n" " metric values.\n" " -l, --no-line-numbers Disables checks for line numbers.\n" " -f, --filter <file.filt> Checks whether a node's name is matched by a\n" " pattern in file.filt.\n" " -o, --output <output_file> Path of the output file. If no output file is\n" " given, detailed output will be surpressed. A\n" " summary will always be printed out to stdout.\n"; static struct option long_options[] = { { "help", no_argument, 0, 'h' }, { "no-negative-values", no_argument, 0, 'n' }, { "no-line-numbers", no_argument, 0, 'l' }, { "filter", required_argument, 0, 'f' }, { "output", required_argument, 0, 'o' } }; #else const string USAGE = "Usage: " + string( argv[ 0 ] ) + " [ flags ] <cube experiment>\n" "where flags basically turns certains tests on or off:\n" " -h Help; Output a brief help message.\n" " -n Disables the (time consuming) check for negative\n" " inclusive metric values.\n" " -x Disables the (time consuming) check for negative\n" " exclusive metric values.\n" " -l Disables checks for line numbers.\n" " -f <file.filt> Checks whether a node's name is matched by a\n" " pattern in file.filt.\n" " -o <output_file> Path of the output file. If no output file is\n" " given, detailed output will be surpressed. A\n" " summary will always be printed out to stdout.\n" " -v <0|1|2|3> Verbosity level of output\n"; #endif const char* short_options = "o:f:nxv:lh?"; #if 0 while ( ( ch = getopt_long( argc, argv, short_options, long_options, &option_index ) ) != -1 ) #else while ( ( ch = getopt( argc, argv, short_options ) ) != -1 ) #endif { switch ( ch ) { case 'n': settings[ 'n' ] = true; break; case 'x': settings[ 'x' ] = true; break; case 'l': settings[ 'l' ] = true; break; case 'f': filter.add_file( string( optarg ) ); break; case 'o': output_filename = string( optarg ); break; case 'h': cout << USAGE; return 0; case 'v': verbosity_level = static_cast<Constraint_Verbosity>( strtol( optarg, NULL, 0 ) ); break; default: cout << USAGE << "Error: Unknown option -" << ch << endl; return 1; } } try { if ( argc - optind != 1 ) { cout << USAGE << endl; return CUBE_SANITY_INTERNAL_ERROR; } /* XXX: BAD HACK! The >> operator is not overloaded for MdAggrCube and * Cube is not virtual. */ Cube* _cube = openCubeFile( argv[ optind ] ); cube = new MdAggrCube( *_cube ); delete _cube; CnodeSubForest* all = cube->get_forest(); non_empty = new NameNotEmptyOrUnknown( all ); if ( filter.empty() == false ) { ( new ProperlyFiltered( all, filter ) )->set_parent( non_empty ); } no_anonymous_functions = new NoAnonymousFunctions( all ); no_anonymous_functions->set_parent( non_empty ); no_truncated_functions = new NoTruncatedFunctions( all ); no_truncated_functions->set_parent( non_empty ); filename_not_empty = new FilenameNotEmpty( all ); filename_not_empty->set_parent( non_empty ); if ( settings[ 'l' ] == false ) { proper_line_numbers = new ProperLineNumbers( all ); proper_line_numbers->set_parent( filename_not_empty ); } no_tracing_outside_init_and_finalize = new NoTracingOutsideInitAndFinalize( all ); no_tracing_outside_init_and_finalize->set_parent( non_empty ); if ( settings[ 'n' ] == false ) { no_negative_inclusive_metrics = new NoNegativeInclusiveMetrics( cube ); no_negative_inclusive_metrics->set_verbosity( verbosity_level ); } if ( settings[ 'x' ] == false ) { no_negative_exclusive_metrics = new NoNegativeExclusiveMetrics( cube ); no_negative_exclusive_metrics->set_verbosity( verbosity_level ); } if ( output_filename.empty() == false ) { out.open( output_filename.c_str(), ios::out ); non_empty->set_details_stream( out, true ); if ( no_negative_inclusive_metrics != NULL ) { no_negative_inclusive_metrics->set_details_stream( out, true ); } if ( no_negative_exclusive_metrics != NULL ) { no_negative_exclusive_metrics->set_details_stream( out, true ); } } non_empty->set_verbosity( verbosity_level, true ); non_empty->check(); if ( no_negative_inclusive_metrics != NULL ) { no_negative_inclusive_metrics->check(); } if ( no_negative_exclusive_metrics != NULL ) { no_negative_exclusive_metrics->check(); } if ( output_filename.empty() == false ) { out.close(); } delete no_truncated_functions; delete no_anonymous_functions; delete non_empty; delete no_negative_exclusive_metrics; delete no_negative_inclusive_metrics; delete filename_not_empty; delete proper_line_numbers; delete no_tracing_outside_init_and_finalize; delete cube; } catch ( const Error& e ) // Does this pointer have to be deleted? Memory allocated not in stack, but in heap... { cerr << "Error: " << e.get_msg() << endl; delete no_truncated_functions; delete no_anonymous_functions; delete non_empty; delete no_negative_exclusive_metrics; delete no_negative_inclusive_metrics; delete filename_not_empty; delete proper_line_numbers; delete no_tracing_outside_init_and_finalize; delete cube; return CUBE_SANITY_INTERNAL_ERROR; } }
37.816901
117
0.486872
[ "vector" ]
0fb1e646d016f0eb502df30f792d85625a7f32e3
630
cpp
C++
nau/src/nau/render/iAPISupport.cpp
Khirion/nau
47a2ad8e0355a264cd507da5e7bba1bf7abbff95
[ "MIT" ]
29
2015-09-16T22:28:30.000Z
2022-03-11T02:57:36.000Z
nau/src/nau/render/iAPISupport.cpp
Khirion/nau
47a2ad8e0355a264cd507da5e7bba1bf7abbff95
[ "MIT" ]
1
2017-03-29T13:32:58.000Z
2017-03-31T13:56:03.000Z
nau/src/nau/render/iAPISupport.cpp
Khirion/nau
47a2ad8e0355a264cd507da5e7bba1bf7abbff95
[ "MIT" ]
10
2015-10-15T14:20:15.000Z
2022-02-17T10:37:29.000Z
#include "nau/render/iAPISupport.h" #include "nau/config.h" #ifdef NAU_OPENGL #include "nau/render/opengl/glAPISupport.h" #endif using namespace nau::render; IAPISupport *IAPISupport::Instance = NULL; IAPISupport * IAPISupport::GetInstance() { if (Instance == NULL) { #ifdef NAU_OPENGL Instance = new GLAPISupport(); #endif } return Instance; } bool IAPISupport::apiSupport(APIFeatureSupport feature) { //if (Instance == ) return m_APISupport[feature]; } unsigned int IAPISupport::getVersion() { return m_Version; } IAPISupport::IAPISupport() { } IAPISupport::~IAPISupport() { m_APISupport.clear(); }
12.6
52
0.719048
[ "render" ]
0fb3fe843845ef45df96f6d5f7dbe48f4075805e
5,253
hpp
C++
src/mlpack/core/data/save.hpp
abinezer/mlpack
8002e49150742acea4e76deef8161653b8350936
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
1
2019-11-07T14:34:37.000Z
2019-11-07T14:34:37.000Z
src/mlpack/core/data/save.hpp
abinezer/mlpack
8002e49150742acea4e76deef8161653b8350936
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
2
2020-04-10T17:39:50.000Z
2020-04-11T14:56:25.000Z
src/mlpack/core/data/save.hpp
abinezer/mlpack
8002e49150742acea4e76deef8161653b8350936
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
/** * @file save.hpp * @author Ryan Curtin * * Save an Armadillo matrix to file. This is necessary because Armadillo does * not transpose matrices upon saving, and it allows us to give better error * output. * * mlpack is free software; you may redistribute it and/or modify it under the * terms of the 3-clause BSD license. You should have received a copy of the * 3-clause BSD license along with mlpack. If not, see * http://www.opensource.org/licenses/BSD-3-Clause for more information. */ #ifndef MLPACK_CORE_DATA_SAVE_HPP #define MLPACK_CORE_DATA_SAVE_HPP #include <mlpack/core/util/log.hpp> #include <mlpack/core/arma_extend/arma_extend.hpp> // Includes Armadillo. #include <string> #include "format.hpp" #include "image_info.hpp" namespace mlpack { namespace data /** Functions to load and save matrices. */ { /** * Saves a matrix to file, guessing the filetype from the extension. This * will transpose the matrix at save time. If the filetype cannot be * determined, an error will be given. * * The supported types of files are the same as found in Armadillo: * * - CSV (csv_ascii), denoted by .csv, or optionally .txt * - ASCII (raw_ascii), denoted by .txt * - Armadillo ASCII (arma_ascii), also denoted by .txt * - PGM (pgm_binary), denoted by .pgm * - PPM (ppm_binary), denoted by .ppm * - Raw binary (raw_binary), denoted by .bin * - Armadillo binary (arma_binary), denoted by .bin * - HDF5 (hdf5_binary), denoted by .hdf5, .hdf, .h5, or .he5 * * If the file extension is not one of those types, an error will be given. If * the 'fatal' parameter is set to true, a std::runtime_error exception will be * thrown upon failure. If the 'transpose' parameter is set to true, the matrix * will be transposed before saving. Generally, because mlpack stores matrices * in a column-major format and most datasets are stored on disk as row-major, * this parameter should be left at its default value of 'true'. * * @param filename Name of file to save to. * @param matrix Matrix to save into file. * @param fatal If an error should be reported as fatal (default false). * @param transpose If true, transpose the matrix before saving. * @return Boolean value indicating success or failure of save. */ template<typename eT> bool Save(const std::string& filename, const arma::Mat<eT>& matrix, const bool fatal = false, bool transpose = true); /** * Saves a model to file, guessing the filetype from the extension, or, * optionally, saving the specified format. If automatic extension detection is * used and the filetype cannot be determined, and error will be given. * * The supported types of files are the same as what is supported by the * boost::serialization library: * * - text, denoted by .txt * - xml, denoted by .xml * - binary, denoted by .bin * * The format parameter can take any of the values in the 'format' enum: * 'format::autodetect', 'format::text', 'format::xml', and 'format::binary'. * The autodetect functionality operates on the file extension (so, "file.txt" * would be autodetected as text). * * The name parameter should be specified to indicate the name of the structure * to be saved. If Load() is later called on the generated file, the name used * to load should be the same as the name used for this call to Save(). * * If the parameter 'fatal' is set to true, then an exception will be thrown in * the event of a save failure. Otherwise, the method will return false and the * relevant error information will be printed to Log::Warn. */ template<typename T> bool Save(const std::string& filename, const std::string& name, T& t, const bool fatal = false, format f = format::autodetect); /** * Save the image file from the given matrix. * * @param filename Name of the image file. * @param matrix Matrix to save the image from. * @param info An object of ImageInfo class. * @param fatal If an error should be reported as fatal (default false). * @param transpose If true, transpose the matrix after loading. * @return Boolean value indicating success or failure of load. */ template<typename eT> bool Save(const std::string& filename, arma::Mat<eT>& matrix, ImageInfo& info, const bool fatal = false); /** * Save the image file from the given matrix. * * @param files A vector consisting of filenames. * @param matrix Matrix to save the image from. * @param info An object of ImageInfo class. * @param fatal If an error should be reported as fatal (default false). * @param transpose If true, transpose the matrix after loading. * @return Boolean value indicating success or failure of load. */ template<typename eT> bool Save(const std::vector<std::string>& files, arma::Mat<eT>& matrix, ImageInfo& info, const bool fatal = false); /** * Helper function to save files. Implementation in save_image.cpp. */ bool SaveImage(const std::string& filename, arma::Mat<unsigned char>& image, ImageInfo& info, const bool fatal = false); } // namespace data } // namespace mlpack // Include implementation. #include "save_impl.hpp" #endif
37.255319
80
0.701123
[ "object", "vector", "model" ]
0fbac1663de9428d85ba0d0d1690746504e65324
18,076
cpp
C++
Ipopt-3.12.7/Ipopt/src/Algorithm/IpNLPScaling.cpp
MikeBMW/CarND-MPC-Project
81e6e92de2768dce9fbfd1848de6f4465d468a0e
[ "MIT" ]
9
2020-07-09T06:40:31.000Z
2022-03-28T02:50:21.000Z
third-party/CoinIpopt/Ipopt/src/Algorithm/IpNLPScaling.cpp
WatsonZhouAnda/Cheetah-Software
05e416fb26f968300826f0deb0953be9afb22bfe
[ "MIT" ]
null
null
null
third-party/CoinIpopt/Ipopt/src/Algorithm/IpNLPScaling.cpp
WatsonZhouAnda/Cheetah-Software
05e416fb26f968300826f0deb0953be9afb22bfe
[ "MIT" ]
5
2020-12-01T01:41:12.000Z
2022-01-04T01:21:49.000Z
// Copyright (C) 2005, 2008 International Business Machines and others. // All Rights Reserved. // This code is published under the Eclipse Public License. // // $Id: IpNLPScaling.cpp 2269 2013-05-05 11:32:40Z stefan $ // // Authors: Carl Laird, Andreas Waechter IBM 2005-06-25 #include "IpNLPScaling.hpp" #include "IpSymMatrix.hpp" #include "IpScaledMatrix.hpp" #include "IpSymScaledMatrix.hpp" namespace Ipopt { #if COIN_IPOPT_VERBOSITY > 0 static const Index dbg_verbosity = 0; #endif NLPScalingObject::NLPScalingObject() {} NLPScalingObject::~NLPScalingObject() {} SmartPtr<Vector> NLPScalingObject::apply_vector_scaling_x_LU_NonConst( const Matrix& Px_LU, const SmartPtr<const Vector>& lu, const VectorSpace& x_space) { DBG_START_METH("NLPScalingObject::apply_vector_scaling_x_LU_NonConst", dbg_verbosity); SmartPtr<Vector> scaled_x_LU = lu->MakeNew(); if (have_x_scaling()) { SmartPtr<Vector> tmp_x = x_space.MakeNew(); // move to full x space Px_LU.MultVector(1.0, *lu, 0.0, *tmp_x); // scale in full x space tmp_x = apply_vector_scaling_x_NonConst(ConstPtr(tmp_x)); // move back to x_L space Px_LU.TransMultVector(1.0, *tmp_x, 0.0, *scaled_x_LU); } else { scaled_x_LU->Copy(*lu); } return scaled_x_LU; } SmartPtr<const Vector> NLPScalingObject::apply_vector_scaling_x_LU( const Matrix& Px_LU, const SmartPtr<const Vector>& lu, const VectorSpace& x_space) { DBG_START_METH("NLPScalingObject::apply_vector_scaling_x_LU", dbg_verbosity); if (have_x_scaling()) { return ConstPtr(apply_vector_scaling_x_LU_NonConst(Px_LU, lu, x_space)); } else { return lu; } } SmartPtr<Vector> NLPScalingObject::apply_vector_scaling_d_LU_NonConst( const Matrix& Pd_LU, const SmartPtr<const Vector>& lu, const VectorSpace& d_space) { DBG_START_METH("NLPScalingObject::apply_vector_scaling_d_LU_NonConst", dbg_verbosity); SmartPtr<Vector> scaled_d_LU = lu->MakeNew(); if (have_d_scaling()) { SmartPtr<Vector> tmp_d = d_space.MakeNew(); // move to full d space Pd_LU.MultVector(1.0, *lu, 0.0, *tmp_d); // scale in full x space tmp_d = apply_vector_scaling_d_NonConst(ConstPtr(tmp_d)); // move back to x_L space Pd_LU.TransMultVector(1.0, *tmp_d, 0.0, *scaled_d_LU); } else { scaled_d_LU->Copy(*lu); } return scaled_d_LU; } SmartPtr<const Vector> NLPScalingObject::apply_vector_scaling_d_LU( const Matrix& Pd_LU, const SmartPtr<const Vector>& lu, const VectorSpace& d_space) { DBG_START_METH("NLPScalingObject::apply_vector_scaling_d_LU", dbg_verbosity); if (have_d_scaling()) { return ConstPtr(apply_vector_scaling_d_LU_NonConst(Pd_LU, lu, d_space)); } else { return lu; } } SmartPtr<Vector> NLPScalingObject::unapply_vector_scaling_d_LU_NonConst( const Matrix& Pd_LU, const SmartPtr<const Vector>& lu, const VectorSpace& d_space) { DBG_START_METH("NLPScalingObject::unapply_vector_scaling_d_LU_NonConst", dbg_verbosity); SmartPtr<Vector> unscaled_d_LU = lu->MakeNew(); if (have_d_scaling()) { SmartPtr<Vector> tmp_d = d_space.MakeNew(); // move to full d space Pd_LU.MultVector(1.0, *lu, 0.0, *tmp_d); // scale in full x space tmp_d = unapply_vector_scaling_d_NonConst(ConstPtr(tmp_d)); // move back to x_L space Pd_LU.TransMultVector(1.0, *tmp_d, 0.0, *unscaled_d_LU); } else { unscaled_d_LU->Copy(*lu); } return unscaled_d_LU; } SmartPtr<const Vector> NLPScalingObject::unapply_vector_scaling_d_LU( const Matrix& Pd_LU, const SmartPtr<const Vector>& lu, const VectorSpace& d_space) { DBG_START_METH("NLPScalingObject::unapply_vector_scaling_d_LU", dbg_verbosity); if (have_d_scaling()) { return ConstPtr(unapply_vector_scaling_d_LU_NonConst(Pd_LU, lu, d_space)); } else { return lu; } } SmartPtr<Vector> NLPScalingObject::apply_grad_obj_scaling_NonConst( const SmartPtr<const Vector>& v) { DBG_START_METH("NLPScalingObject::apply_grad_obj_scaling_NonConst", dbg_verbosity); SmartPtr<Vector> scaled_v = unapply_vector_scaling_x_NonConst(v); Number df = apply_obj_scaling(1.0); if (df != 1.) { scaled_v->Scal(df); } return scaled_v; } SmartPtr<const Vector> NLPScalingObject::apply_grad_obj_scaling( const SmartPtr<const Vector>& v) { DBG_START_METH("NLPScalingObject::apply_grad_obj_scaling", dbg_verbosity); Number df = apply_obj_scaling(1.); if (df != 1.) { SmartPtr<Vector> scaled_v = apply_grad_obj_scaling_NonConst(v); return ConstPtr(scaled_v); } else { SmartPtr<const Vector> scaled_v = unapply_vector_scaling_x(v); return scaled_v; } } SmartPtr<Vector> NLPScalingObject::unapply_grad_obj_scaling_NonConst( const SmartPtr<const Vector>& v) { DBG_START_METH("NLPScalingObject::unapply_grad_obj_scaling_NonConst", dbg_verbosity); SmartPtr<Vector> unscaled_v = apply_vector_scaling_x_NonConst(v); Number df = unapply_obj_scaling(1.); if (df != 1.) { unscaled_v->Scal(df); } return unscaled_v; } SmartPtr<const Vector> NLPScalingObject::unapply_grad_obj_scaling( const SmartPtr<const Vector>& v) { DBG_START_METH("NLPScalingObject::unapply_grad_obj_scaling", dbg_verbosity); Number df = unapply_obj_scaling(1.); if (df != 1.) { SmartPtr<Vector> unscaled_v = unapply_grad_obj_scaling_NonConst(v); return ConstPtr(unscaled_v); } else { SmartPtr<const Vector> scaled_v = apply_vector_scaling_x(v); return scaled_v; } } StandardScalingBase::StandardScalingBase() {} StandardScalingBase::~StandardScalingBase() {} void StandardScalingBase::RegisterOptions(SmartPtr<RegisteredOptions> roptions) { roptions->AddNumberOption( "obj_scaling_factor", "Scaling factor for the objective function.", 1., "This option sets a scaling factor for the objective function. " "The scaling is seen internally by Ipopt but the unscaled objective is " "reported in the console output. " "If additional scaling parameters are computed " "(e.g. user-scaling or gradient-based), both factors are multiplied. " "If this value is chosen to be negative, Ipopt will " "maximize the objective function instead of minimizing it."); } bool StandardScalingBase::InitializeImpl(const OptionsList& options, const std::string& prefix) { options.GetNumericValue("obj_scaling_factor", obj_scaling_factor_, prefix); return true; } void StandardScalingBase::DetermineScaling( const SmartPtr<const VectorSpace> x_space, const SmartPtr<const VectorSpace> c_space, const SmartPtr<const VectorSpace> d_space, const SmartPtr<const MatrixSpace> jac_c_space, const SmartPtr<const MatrixSpace> jac_d_space, const SmartPtr<const SymMatrixSpace> h_space, SmartPtr<const MatrixSpace>& new_jac_c_space, SmartPtr<const MatrixSpace>& new_jac_d_space, SmartPtr<const SymMatrixSpace>& new_h_space, const Matrix& Px_L, const Vector& x_L, const Matrix& Px_U, const Vector& x_U) { SmartPtr<Vector> dc; SmartPtr<Vector> dd; DetermineScalingParametersImpl(x_space, c_space, d_space, jac_c_space, jac_d_space, h_space, Px_L, x_L, Px_U, x_U, df_, dx_, dc, dd); df_ *= obj_scaling_factor_; if (Jnlst().ProduceOutput(J_DETAILED, J_MAIN)) { Jnlst().Printf(J_DETAILED, J_MAIN, "objective scaling factor = %g\n", df_); if (IsValid(dx_)) { Jnlst().Printf(J_DETAILED, J_MAIN, "x scaling provided\n"); } else { Jnlst().Printf(J_DETAILED, J_MAIN, "No x scaling provided\n"); } if (IsValid(dc)) { Jnlst().Printf(J_DETAILED, J_MAIN, "c scaling provided\n"); } else { Jnlst().Printf(J_DETAILED, J_MAIN, "No c scaling provided\n"); } if (IsValid(dd)) { Jnlst().Printf(J_DETAILED, J_MAIN, "d scaling provided\n"); } else { Jnlst().Printf(J_DETAILED, J_MAIN, "No d scaling provided\n"); } } if (Jnlst().ProduceOutput(J_VECTOR, J_MAIN)) { if (IsValid(dx_)) { dx_->Print(Jnlst(), J_VECTOR, J_MAIN, "x scaling vector"); } if (IsValid(dc)) { dc->Print(Jnlst(), J_VECTOR, J_MAIN, "c scaling vector"); } if (IsValid(dd)) { dd->Print(Jnlst(), J_VECTOR, J_MAIN, "d scaling vector"); } } // create the scaling matrix spaces if (IsValid(dx_) || IsValid(dc)) { scaled_jac_c_space_ = new ScaledMatrixSpace(ConstPtr(dc), false, jac_c_space, ConstPtr(dx_), true); new_jac_c_space = GetRawPtr(scaled_jac_c_space_); } else { scaled_jac_c_space_ = NULL; new_jac_c_space = jac_c_space; } if (IsValid(dx_) || IsValid(dd)) { scaled_jac_d_space_ = new ScaledMatrixSpace(ConstPtr(dd), false, jac_d_space, ConstPtr(dx_), true); new_jac_d_space = GetRawPtr(scaled_jac_d_space_); } else { scaled_jac_d_space_ = NULL; new_jac_d_space =jac_d_space ; } if (IsValid(h_space)) { if (IsValid(dx_)) { scaled_h_space_ = new SymScaledMatrixSpace(ConstPtr(dx_), true, h_space); new_h_space = GetRawPtr(scaled_h_space_); } else { scaled_h_space_ = NULL; new_h_space = h_space; } } else { new_h_space = NULL; } } Number StandardScalingBase::apply_obj_scaling(const Number& f) { DBG_START_METH("NLPScalingObject::apply_obj_scaling", dbg_verbosity); return df_*f; } Number StandardScalingBase::unapply_obj_scaling(const Number& f) { DBG_START_METH("NLPScalingObject::unapply_obj_scaling", dbg_verbosity); return f/df_; } SmartPtr<Vector> StandardScalingBase::apply_vector_scaling_x_NonConst( const SmartPtr<const Vector>& v) { DBG_START_METH("StandardScalingBase::apply_vector_scaling_x_NonConst", dbg_verbosity); SmartPtr<Vector> scaled_x = v->MakeNewCopy(); if (IsValid(dx_)) { scaled_x->ElementWiseMultiply(*dx_); } else { DBG_PRINT((1, "Creating copy in apply_vector_scaling_x_NonConst!")); } return scaled_x; } SmartPtr<const Vector> StandardScalingBase::apply_vector_scaling_x( const SmartPtr<const Vector>& v) { DBG_START_METH("NLPScalingObject::apply_vector_scaling_x", dbg_verbosity); if (IsValid(dx_)) { return ConstPtr(apply_vector_scaling_x_NonConst(v)); } else { return v; } } SmartPtr<Vector> StandardScalingBase::unapply_vector_scaling_x_NonConst( const SmartPtr<const Vector>& v) { DBG_START_METH("StandardScalingBase::unapply_vector_scaling_x_NonConst", dbg_verbosity); SmartPtr<Vector> unscaled_x = v->MakeNewCopy(); if (IsValid(dx_)) { unscaled_x->ElementWiseDivide(*dx_); } else { DBG_PRINT((1, "Creating copy in unapply_vector_scaling_x_NonConst!")); } return unscaled_x; } SmartPtr<const Vector> StandardScalingBase::unapply_vector_scaling_x( const SmartPtr<const Vector>& v) { DBG_START_METH("NLPScalingObject::unapply_vector_scaling_x", dbg_verbosity); if (IsValid(dx_)) { return ConstPtr(unapply_vector_scaling_x_NonConst(v)); } else { return v; } } SmartPtr<Vector> StandardScalingBase::apply_vector_scaling_c_NonConst( const SmartPtr<const Vector>& v) { DBG_START_METH("StandardScalingBase::apply_vector_scaling_c_NonConst", dbg_verbosity); SmartPtr<Vector> scaled_c = v->MakeNewCopy(); if (IsValid(scaled_jac_c_space_) && IsValid(scaled_jac_c_space_->RowScaling())) { scaled_c->ElementWiseMultiply(*scaled_jac_c_space_->RowScaling()); } else { DBG_PRINT((1,"Creating copy in apply_vector_scaling_c_NonConst!")); } return scaled_c; } SmartPtr<const Vector> StandardScalingBase::apply_vector_scaling_c( const SmartPtr<const Vector>& v) { DBG_START_METH("NLPScalingObject::apply_vector_scaling_c", dbg_verbosity); if (IsValid(scaled_jac_c_space_) && IsValid(scaled_jac_c_space_->RowScaling())) { return ConstPtr(apply_vector_scaling_c_NonConst(v)); } else { return v; } } SmartPtr<Vector> StandardScalingBase::unapply_vector_scaling_c_NonConst( const SmartPtr<const Vector>& v) { DBG_START_METH("StandardScalingBase::unapply_vector_scaling_c_NonConst", dbg_verbosity); SmartPtr<Vector> scaled_c = v->MakeNewCopy(); if (IsValid(scaled_jac_c_space_) && IsValid(scaled_jac_c_space_->RowScaling())) { scaled_c->ElementWiseDivide(*scaled_jac_c_space_->RowScaling()); } else { DBG_PRINT((1,"Creating copy in unapply_vector_scaling_c_NonConst!")); } return scaled_c; } SmartPtr<const Vector> StandardScalingBase::unapply_vector_scaling_c( const SmartPtr<const Vector>& v) { DBG_START_METH("NLPScalingObject::unapply_vector_scaling_c", dbg_verbosity); if (IsValid(scaled_jac_c_space_) && IsValid(scaled_jac_c_space_->RowScaling())) { return ConstPtr(unapply_vector_scaling_c_NonConst(v)); } else { return v; } } SmartPtr<Vector> StandardScalingBase::apply_vector_scaling_d_NonConst( const SmartPtr<const Vector>& v) { DBG_START_METH("StandardScalingBase::apply_vector_scaling_d_NonConst", dbg_verbosity); SmartPtr<Vector> scaled_d = v->MakeNewCopy(); if (IsValid(scaled_jac_d_space_) && IsValid(scaled_jac_d_space_->RowScaling())) { scaled_d->ElementWiseMultiply(*scaled_jac_d_space_->RowScaling()); } else { DBG_PRINT((1,"Creating copy in apply_vector_scaling_d_NonConst!")); } return scaled_d; } SmartPtr<const Vector> StandardScalingBase::apply_vector_scaling_d( const SmartPtr<const Vector>& v) { DBG_START_METH("NLPScalingObject::apply_vector_scaling_d", dbg_verbosity); if (IsValid(scaled_jac_d_space_) && IsValid(scaled_jac_d_space_->RowScaling())) { return ConstPtr(apply_vector_scaling_d_NonConst(v)); } else { return v; } } SmartPtr<Vector> StandardScalingBase::unapply_vector_scaling_d_NonConst( const SmartPtr<const Vector>& v) { DBG_START_METH("StandardScalingBase::unapply_vector_scaling_d_NonConst", dbg_verbosity); SmartPtr<Vector> scaled_d = v->MakeNewCopy(); if (IsValid(scaled_jac_d_space_) && IsValid(scaled_jac_d_space_->RowScaling())) { scaled_d->ElementWiseDivide(*scaled_jac_d_space_->RowScaling()); } else { DBG_PRINT((1,"Creating copy in unapply_vector_scaling_d_NonConst!")); } return scaled_d; } SmartPtr<const Vector> StandardScalingBase::unapply_vector_scaling_d( const SmartPtr<const Vector>& v) { DBG_START_METH("NLPScalingObject::unapply_vector_scaling_d", dbg_verbosity); if (IsValid(scaled_jac_d_space_) && IsValid(scaled_jac_d_space_->RowScaling())) { return ConstPtr(unapply_vector_scaling_d_NonConst(v)); } else { return v; } } // ToDo: matrix not passed by reference, so setting to NULL doesn't make difference SmartPtr<const Matrix> StandardScalingBase::apply_jac_c_scaling( SmartPtr<const Matrix> matrix) { DBG_START_METH("NLPScalingObject::apply_jac_c_scaling", dbg_verbosity); if (IsValid(scaled_jac_c_space_)) { SmartPtr<ScaledMatrix> ret = scaled_jac_c_space_->MakeNewScaledMatrix(false); ret->SetUnscaledMatrix(matrix); return GetRawPtr(ret); } else { SmartPtr<const Matrix> ret = matrix; matrix = NULL; return ret; } } SmartPtr<const Matrix> StandardScalingBase::apply_jac_d_scaling( SmartPtr<const Matrix> matrix) { DBG_START_METH("NLPScalingObject::apply_jac_d_scaling", dbg_verbosity); if (IsValid(scaled_jac_d_space_)) { SmartPtr<ScaledMatrix> ret = scaled_jac_d_space_->MakeNewScaledMatrix(false); ret->SetUnscaledMatrix(matrix); return GetRawPtr(ret); } else { SmartPtr<const Matrix> ret = matrix; matrix = NULL; return ret; } } SmartPtr<const SymMatrix> StandardScalingBase::apply_hessian_scaling( SmartPtr<const SymMatrix> matrix) { DBG_START_METH("NLPScalingObject::apply_hessian_scaling", dbg_verbosity); if (IsValid(scaled_h_space_)) { SmartPtr<SymScaledMatrix> ret = scaled_h_space_->MakeNewSymScaledMatrix(false); ret->SetUnscaledMatrix(matrix); return GetRawPtr(ret); } else { SmartPtr<const SymMatrix> ret = matrix; matrix = NULL; return ret; } } bool StandardScalingBase::have_x_scaling() { return IsValid(dx_); } bool StandardScalingBase::have_c_scaling() { return (IsValid(scaled_jac_c_space_) && IsValid(scaled_jac_c_space_->RowScaling())); } bool StandardScalingBase::have_d_scaling() { return (IsValid(scaled_jac_d_space_) && IsValid(scaled_jac_d_space_->RowScaling())); } void NoNLPScalingObject::DetermineScalingParametersImpl( const SmartPtr<const VectorSpace> x_space, const SmartPtr<const VectorSpace> c_space, const SmartPtr<const VectorSpace> d_space, const SmartPtr<const MatrixSpace> jac_c_space, const SmartPtr<const MatrixSpace> jac_d_space, const SmartPtr<const SymMatrixSpace> h_space, const Matrix& Px_L, const Vector& x_L, const Matrix& Px_U, const Vector& x_U, Number& df, SmartPtr<Vector>& dx, SmartPtr<Vector>& dc, SmartPtr<Vector>& dd) { df = 1.; dx = NULL; dc = NULL; dd = NULL; } } // namespace Ipopt
30.482293
92
0.678967
[ "vector" ]
0fbb8b021370f8b7fe92a3c4f9e395d193f85a32
29,729
cc
C++
chrome/browser/renderer_host/chrome_render_message_filter.cc
codenote/chromium-test
0637af0080f7e80bf7d20b29ce94c5edc817f390
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
chrome/browser/renderer_host/chrome_render_message_filter.cc
codenote/chromium-test
0637af0080f7e80bf7d20b29ce94c5edc817f390
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
chrome/browser/renderer_host/chrome_render_message_filter.cc
codenote/chromium-test
0637af0080f7e80bf7d20b29ce94c5edc817f390
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
1
2020-11-04T07:25:45.000Z
2020-11-04T07:25:45.000Z
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/renderer_host/chrome_render_message_filter.h" #include "base/bind.h" #include "base/command_line.h" #include "base/file_util.h" #include "base/metrics/histogram.h" #include "chrome/browser/automation/automation_resource_message_filter.h" #include "chrome/browser/browser_process.h" #include "chrome/browser/content_settings/cookie_settings.h" #include "chrome/browser/content_settings/tab_specific_content_settings.h" #include "chrome/browser/extensions/activity_log.h" #include "chrome/browser/extensions/api/messaging/message_service.h" #include "chrome/browser/extensions/event_router.h" #include "chrome/browser/extensions/extension_function_dispatcher.h" #include "chrome/browser/extensions/extension_info_map.h" #include "chrome/browser/extensions/extension_process_manager.h" #include "chrome/browser/extensions/extension_service.h" #include "chrome/browser/extensions/extension_system.h" #include "chrome/browser/nacl_host/nacl_infobar.h" #include "chrome/browser/nacl_host/nacl_process_host.h" #include "chrome/browser/nacl_host/pnacl_file_host.h" #include "chrome/browser/net/chrome_url_request_context.h" #include "chrome/browser/net/predictor.h" #include "chrome/browser/profiles/profile.h" #include "chrome/browser/task_manager/task_manager.h" #include "chrome/common/chrome_notification_types.h" #include "chrome/common/chrome_switches.h" #include "chrome/common/extensions/api/i18n/default_locale_handler.h" #include "chrome/common/extensions/extension.h" #include "chrome/common/extensions/extension_file_util.h" #include "chrome/common/extensions/extension_messages.h" #include "chrome/common/extensions/message_bundle.h" #include "chrome/common/render_messages.h" #include "chrome/common/url_constants.h" #include "content/public/browser/notification_service.h" #include "content/public/browser/render_process_host.h" #include "content/public/browser/resource_dispatcher_host.h" #include "content/public/common/process_type.h" #include "extensions/common/constants.h" #include "googleurl/src/gurl.h" #include "third_party/WebKit/Source/Platform/chromium/public/WebString.h" #include "third_party/WebKit/Source/WebKit/chromium/public/WebSecurityOrigin.h" #include "webkit/plugins/npapi/plugin_list.h" #if defined(USE_TCMALLOC) #include "chrome/browser/browser_about_handler.h" #endif using content::BrowserThread; using extensions::APIPermission; using WebKit::WebCache; using WebKit::WebSecurityOrigin; namespace { void AddAPIActionToExtensionActivityLog( Profile* profile, const extensions::Extension* extension, const std::string& api_call, scoped_ptr<ListValue> args, const std::string& extra) { // The ActivityLog can only be accessed from the main (UI) thread. If we're // running on the wrong thread, re-dispatch from the main thread. if (!BrowserThread::CurrentlyOn(BrowserThread::UI)) { BrowserThread::PostTask(BrowserThread::UI, FROM_HERE, base::Bind(&AddAPIActionToExtensionActivityLog, profile, extension, api_call, base::Passed(&args), extra)); } else { extensions::ActivityLog* activity_log = extensions::ActivityLog::GetInstance(profile); if (activity_log && activity_log->IsLogEnabled()) activity_log->LogAPIAction(extension, api_call, args.get(), extra); } } void AddDOMActionToExtensionActivityLog( Profile* profile, const extensions::Extension* extension, const GURL& url, const string16& url_title, const std::string& api_call, scoped_ptr<ListValue> args, const std::string& extra) { // The ActivityLog can only be accessed from the main (UI) thread. If we're // running on the wrong thread, re-dispatch from the main thread. if (!BrowserThread::CurrentlyOn(BrowserThread::UI)) { BrowserThread::PostTask(BrowserThread::UI, FROM_HERE, base::Bind(&AddDOMActionToExtensionActivityLog, profile, extension, url, url_title, api_call, base::Passed(&args), extra)); } else { extensions::ActivityLog* activity_log = extensions::ActivityLog::GetInstance(profile); if (activity_log && activity_log->IsLogEnabled()) activity_log->LogDOMAction(extension, url, url_title, api_call, args.get(), extra); } } } // namespace ChromeRenderMessageFilter::ChromeRenderMessageFilter( int render_process_id, Profile* profile, net::URLRequestContextGetter* request_context) : render_process_id_(render_process_id), profile_(profile), off_the_record_(profile_->IsOffTheRecord()), request_context_(request_context), extension_info_map_( extensions::ExtensionSystem::Get(profile)->info_map()), cookie_settings_(CookieSettings::Factory::GetForProfile(profile)), weak_ptr_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { } ChromeRenderMessageFilter::~ChromeRenderMessageFilter() { } bool ChromeRenderMessageFilter::OnMessageReceived(const IPC::Message& message, bool* message_was_ok) { bool handled = true; IPC_BEGIN_MESSAGE_MAP_EX(ChromeRenderMessageFilter, message, *message_was_ok) #if !defined(DISABLE_NACL) IPC_MESSAGE_HANDLER_DELAY_REPLY(ChromeViewHostMsg_LaunchNaCl, OnLaunchNaCl) IPC_MESSAGE_HANDLER_DELAY_REPLY(ChromeViewHostMsg_GetReadonlyPnaclFD, OnGetReadonlyPnaclFd) IPC_MESSAGE_HANDLER_DELAY_REPLY(ChromeViewHostMsg_NaClCreateTemporaryFile, OnNaClCreateTemporaryFile) IPC_MESSAGE_HANDLER(ChromeViewHostMsg_NaClErrorStatus, OnNaClErrorStatus) #endif IPC_MESSAGE_HANDLER(ChromeViewHostMsg_DnsPrefetch, OnDnsPrefetch) IPC_MESSAGE_HANDLER(ChromeViewHostMsg_ResourceTypeStats, OnResourceTypeStats) IPC_MESSAGE_HANDLER(ChromeViewHostMsg_UpdatedCacheStats, OnUpdatedCacheStats) IPC_MESSAGE_HANDLER(ChromeViewHostMsg_FPS, OnFPS) IPC_MESSAGE_HANDLER(ChromeViewHostMsg_V8HeapStats, OnV8HeapStats) IPC_MESSAGE_HANDLER(ExtensionHostMsg_OpenChannelToExtension, OnOpenChannelToExtension) IPC_MESSAGE_HANDLER(ExtensionHostMsg_OpenChannelToTab, OnOpenChannelToTab) IPC_MESSAGE_HANDLER(ExtensionHostMsg_OpenChannelToNativeApp, OnOpenChannelToNativeApp) IPC_MESSAGE_HANDLER_DELAY_REPLY(ExtensionHostMsg_GetMessageBundle, OnGetExtensionMessageBundle) IPC_MESSAGE_HANDLER(ExtensionHostMsg_AddListener, OnExtensionAddListener) IPC_MESSAGE_HANDLER(ExtensionHostMsg_RemoveListener, OnExtensionRemoveListener) IPC_MESSAGE_HANDLER(ExtensionHostMsg_AddLazyListener, OnExtensionAddLazyListener) IPC_MESSAGE_HANDLER(ExtensionHostMsg_RemoveLazyListener, OnExtensionRemoveLazyListener) IPC_MESSAGE_HANDLER(ExtensionHostMsg_AddFilteredListener, OnExtensionAddFilteredListener) IPC_MESSAGE_HANDLER(ExtensionHostMsg_RemoveFilteredListener, OnExtensionRemoveFilteredListener) IPC_MESSAGE_HANDLER(ExtensionHostMsg_CloseChannel, OnExtensionCloseChannel) IPC_MESSAGE_HANDLER(ExtensionHostMsg_RequestForIOThread, OnExtensionRequestForIOThread) IPC_MESSAGE_HANDLER(ExtensionHostMsg_ShouldSuspendAck, OnExtensionShouldSuspendAck) IPC_MESSAGE_HANDLER(ExtensionHostMsg_GenerateUniqueID, OnExtensionGenerateUniqueID) IPC_MESSAGE_HANDLER(ExtensionHostMsg_SuspendAck, OnExtensionSuspendAck) IPC_MESSAGE_HANDLER(ExtensionHostMsg_ResumeRequests, OnExtensionResumeRequests); IPC_MESSAGE_HANDLER(ExtensionHostMsg_AddAPIActionToActivityLog, OnAddAPIActionToExtensionActivityLog); IPC_MESSAGE_HANDLER(ExtensionHostMsg_AddDOMActionToActivityLog, OnAddDOMActionToExtensionActivityLog); IPC_MESSAGE_HANDLER(ChromeViewHostMsg_AllowDatabase, OnAllowDatabase) IPC_MESSAGE_HANDLER(ChromeViewHostMsg_AllowDOMStorage, OnAllowDOMStorage) IPC_MESSAGE_HANDLER(ChromeViewHostMsg_AllowFileSystem, OnAllowFileSystem) IPC_MESSAGE_HANDLER(ChromeViewHostMsg_AllowIndexedDB, OnAllowIndexedDB) IPC_MESSAGE_HANDLER(ChromeViewHostMsg_CanTriggerClipboardRead, OnCanTriggerClipboardRead) IPC_MESSAGE_HANDLER(ChromeViewHostMsg_CanTriggerClipboardWrite, OnCanTriggerClipboardWrite) IPC_MESSAGE_UNHANDLED(handled = false) IPC_END_MESSAGE_MAP() #if defined(ENABLE_AUTOMATION) if ((message.type() == ChromeViewHostMsg_GetCookies::ID || message.type() == ChromeViewHostMsg_SetCookie::ID) && AutomationResourceMessageFilter::ShouldFilterCookieMessages( render_process_id_, message.routing_id())) { // ChromeFrame then we need to get/set cookies from the external host. IPC_BEGIN_MESSAGE_MAP_EX(ChromeRenderMessageFilter, message, *message_was_ok) IPC_MESSAGE_HANDLER_DELAY_REPLY(ChromeViewHostMsg_GetCookies, OnGetCookies) IPC_MESSAGE_HANDLER(ChromeViewHostMsg_SetCookie, OnSetCookie) IPC_END_MESSAGE_MAP() handled = true; } #endif return handled; } void ChromeRenderMessageFilter::OverrideThreadForMessage( const IPC::Message& message, BrowserThread::ID* thread) { switch (message.type()) { case ChromeViewHostMsg_ResourceTypeStats::ID: case ExtensionHostMsg_AddListener::ID: case ExtensionHostMsg_RemoveListener::ID: case ExtensionHostMsg_AddLazyListener::ID: case ExtensionHostMsg_RemoveLazyListener::ID: case ExtensionHostMsg_AddFilteredListener::ID: case ExtensionHostMsg_RemoveFilteredListener::ID: case ExtensionHostMsg_CloseChannel::ID: case ExtensionHostMsg_ShouldSuspendAck::ID: case ExtensionHostMsg_SuspendAck::ID: case ChromeViewHostMsg_UpdatedCacheStats::ID: *thread = BrowserThread::UI; break; default: break; } } net::HostResolver* ChromeRenderMessageFilter::GetHostResolver() { return request_context_->GetURLRequestContext()->host_resolver(); } #if !defined(DISABLE_NACL) void ChromeRenderMessageFilter::OnLaunchNaCl( const nacl::NaClLaunchParams& launch_params, IPC::Message* reply_msg) { NaClProcessHost* host = new NaClProcessHost( GURL(launch_params.manifest_url), launch_params.render_view_id, launch_params.permission_bits, launch_params.uses_irt, off_the_record_); host->Launch(this, reply_msg, extension_info_map_); } void ChromeRenderMessageFilter::OnGetReadonlyPnaclFd( const std::string& filename, IPC::Message* reply_msg) { // This posts a task to another thread, but the renderer will // block until the reply is sent. pnacl_file_host::GetReadonlyPnaclFd(this, filename, reply_msg); } void ChromeRenderMessageFilter::OnNaClCreateTemporaryFile( IPC::Message* reply_msg) { pnacl_file_host::CreateTemporaryFile(this, reply_msg); } void ChromeRenderMessageFilter::OnNaClErrorStatus(int render_view_id, int error_id) { // Currently there is only one kind of error status, for which // we want to show the user an infobar. ShowNaClInfobar(render_process_id_, render_view_id, error_id); } #endif void ChromeRenderMessageFilter::OnDnsPrefetch( const std::vector<std::string>& hostnames) { if (profile_->GetNetworkPredictor()) profile_->GetNetworkPredictor()->DnsPrefetchList(hostnames); } void ChromeRenderMessageFilter::OnResourceTypeStats( const WebCache::ResourceTypeStats& stats) { HISTOGRAM_COUNTS("WebCoreCache.ImagesSizeKB", static_cast<int>(stats.images.size / 1024)); HISTOGRAM_COUNTS("WebCoreCache.CSSStylesheetsSizeKB", static_cast<int>(stats.cssStyleSheets.size / 1024)); HISTOGRAM_COUNTS("WebCoreCache.ScriptsSizeKB", static_cast<int>(stats.scripts.size / 1024)); HISTOGRAM_COUNTS("WebCoreCache.XSLStylesheetsSizeKB", static_cast<int>(stats.xslStyleSheets.size / 1024)); HISTOGRAM_COUNTS("WebCoreCache.FontsSizeKB", static_cast<int>(stats.fonts.size / 1024)); DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); #if defined(ENABLE_TASK_MANAGER) TaskManager::GetInstance()->model()->NotifyResourceTypeStats( base::GetProcId(peer_handle()), stats); #endif // defined(ENABLE_TASK_MANAGER) } void ChromeRenderMessageFilter::OnUpdatedCacheStats( const WebCache::UsageStats& stats) { WebCacheManager::GetInstance()->ObserveStats(render_process_id_, stats); } void ChromeRenderMessageFilter::OnFPS(int routing_id, float fps) { if (!BrowserThread::CurrentlyOn(BrowserThread::UI)) { BrowserThread::PostTask( BrowserThread::UI, FROM_HERE, base::Bind( &ChromeRenderMessageFilter::OnFPS, this, routing_id, fps)); return; } base::ProcessId renderer_id = base::GetProcId(peer_handle()); #if defined(ENABLE_TASK_MANAGER) TaskManager::GetInstance()->model()->NotifyFPS( renderer_id, routing_id, fps); #endif // defined(ENABLE_TASK_MANAGER) FPSDetails details(routing_id, fps); content::NotificationService::current()->Notify( chrome::NOTIFICATION_RENDERER_FPS_COMPUTED, content::Source<const base::ProcessId>(&renderer_id), content::Details<const FPSDetails>(&details)); } void ChromeRenderMessageFilter::OnV8HeapStats(int v8_memory_allocated, int v8_memory_used) { if (!BrowserThread::CurrentlyOn(BrowserThread::UI)) { BrowserThread::PostTask( BrowserThread::UI, FROM_HERE, base::Bind( &ChromeRenderMessageFilter::OnV8HeapStats, this, v8_memory_allocated, v8_memory_used)); return; } base::ProcessId renderer_id = base::GetProcId(peer_handle()); #if defined(ENABLE_TASK_MANAGER) TaskManager::GetInstance()->model()->NotifyV8HeapStats( renderer_id, static_cast<size_t>(v8_memory_allocated), static_cast<size_t>(v8_memory_used)); #endif // defined(ENABLE_TASK_MANAGER) V8HeapStatsDetails details(v8_memory_allocated, v8_memory_used); content::NotificationService::current()->Notify( chrome::NOTIFICATION_RENDERER_V8_HEAP_STATS_COMPUTED, content::Source<const base::ProcessId>(&renderer_id), content::Details<const V8HeapStatsDetails>(&details)); } void ChromeRenderMessageFilter::OnOpenChannelToExtension( int routing_id, const std::string& source_extension_id, const std::string& target_extension_id, const std::string& channel_name, int* port_id) { int port2_id; extensions::MessageService::AllocatePortIdPair(port_id, &port2_id); BrowserThread::PostTask( BrowserThread::UI, FROM_HERE, base::Bind(&ChromeRenderMessageFilter::OpenChannelToExtensionOnUIThread, this, render_process_id_, routing_id, port2_id, source_extension_id, target_extension_id, channel_name)); } void ChromeRenderMessageFilter::OpenChannelToExtensionOnUIThread( int source_process_id, int source_routing_id, int receiver_port_id, const std::string& source_extension_id, const std::string& target_extension_id, const std::string& channel_name) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); extensions::ExtensionSystem::Get(profile_)->message_service()-> OpenChannelToExtension( source_process_id, source_routing_id, receiver_port_id, source_extension_id, target_extension_id, channel_name); } void ChromeRenderMessageFilter::OnOpenChannelToNativeApp( int routing_id, const std::string& source_extension_id, const std::string& native_app_name, int* port_id) { int port2_id; extensions::MessageService::AllocatePortIdPair(port_id, &port2_id); BrowserThread::PostTask( BrowserThread::UI, FROM_HERE, base::Bind(&ChromeRenderMessageFilter::OpenChannelToNativeAppOnUIThread, this, routing_id, port2_id, source_extension_id, native_app_name)); } void ChromeRenderMessageFilter::OpenChannelToNativeAppOnUIThread( int source_routing_id, int receiver_port_id, const std::string& source_extension_id, const std::string& native_app_name) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); extensions::ExtensionSystem::Get(profile_)->message_service()-> OpenChannelToNativeApp( render_process_id_, source_routing_id, receiver_port_id, source_extension_id, native_app_name); } void ChromeRenderMessageFilter::OnOpenChannelToTab( int routing_id, int tab_id, const std::string& extension_id, const std::string& channel_name, int* port_id) { int port2_id; extensions::MessageService::AllocatePortIdPair(port_id, &port2_id); BrowserThread::PostTask( BrowserThread::UI, FROM_HERE, base::Bind(&ChromeRenderMessageFilter::OpenChannelToTabOnUIThread, this, render_process_id_, routing_id, port2_id, tab_id, extension_id, channel_name)); } void ChromeRenderMessageFilter::OpenChannelToTabOnUIThread( int source_process_id, int source_routing_id, int receiver_port_id, int tab_id, const std::string& extension_id, const std::string& channel_name) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); extensions::ExtensionSystem::Get(profile_)->message_service()-> OpenChannelToTab( source_process_id, source_routing_id, receiver_port_id, tab_id, extension_id, channel_name); } void ChromeRenderMessageFilter::OnGetExtensionMessageBundle( const std::string& extension_id, IPC::Message* reply_msg) { const extensions::Extension* extension = extension_info_map_->extensions().GetByID(extension_id); base::FilePath extension_path; std::string default_locale; if (extension) { extension_path = extension->path(); default_locale = extensions::LocaleInfo::GetDefaultLocale(extension); } BrowserThread::PostTask( BrowserThread::FILE, FROM_HERE, base::Bind( &ChromeRenderMessageFilter::OnGetExtensionMessageBundleOnFileThread, this, extension_path, extension_id, default_locale, reply_msg)); } void ChromeRenderMessageFilter::OnGetExtensionMessageBundleOnFileThread( const base::FilePath& extension_path, const std::string& extension_id, const std::string& default_locale, IPC::Message* reply_msg) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::FILE)); scoped_ptr<extensions::MessageBundle::SubstitutionMap> dictionary_map( extension_file_util::LoadMessageBundleSubstitutionMap( extension_path, extension_id, default_locale)); ExtensionHostMsg_GetMessageBundle::WriteReplyParams( reply_msg, *dictionary_map); Send(reply_msg); } void ChromeRenderMessageFilter::OnExtensionAddListener( const std::string& extension_id, const std::string& event_name) { content::RenderProcessHost* process = content::RenderProcessHost::FromID(render_process_id_); if (!process || !extensions::ExtensionSystem::Get(profile_)->event_router()) return; extensions::ExtensionSystem::Get(profile_)->event_router()-> AddEventListener(event_name, process, extension_id); } void ChromeRenderMessageFilter::OnExtensionRemoveListener( const std::string& extension_id, const std::string& event_name) { content::RenderProcessHost* process = content::RenderProcessHost::FromID(render_process_id_); if (!process || !extensions::ExtensionSystem::Get(profile_)->event_router()) return; extensions::ExtensionSystem::Get(profile_)->event_router()-> RemoveEventListener(event_name, process, extension_id); } void ChromeRenderMessageFilter::OnExtensionAddLazyListener( const std::string& extension_id, const std::string& event_name) { if (extensions::ExtensionSystem::Get(profile_)->event_router()) { extensions::ExtensionSystem::Get(profile_)->event_router()-> AddLazyEventListener(event_name, extension_id); } } void ChromeRenderMessageFilter::OnExtensionRemoveLazyListener( const std::string& extension_id, const std::string& event_name) { if (extensions::ExtensionSystem::Get(profile_)->event_router()) { extensions::ExtensionSystem::Get(profile_)->event_router()-> RemoveLazyEventListener(event_name, extension_id); } } void ChromeRenderMessageFilter::OnExtensionAddFilteredListener( const std::string& extension_id, const std::string& event_name, const base::DictionaryValue& filter, bool lazy) { content::RenderProcessHost* process = content::RenderProcessHost::FromID(render_process_id_); if (!process || !extensions::ExtensionSystem::Get(profile_)->event_router()) return; extensions::ExtensionSystem::Get(profile_)->event_router()-> AddFilteredEventListener(event_name, process, extension_id, filter, lazy); } void ChromeRenderMessageFilter::OnExtensionRemoveFilteredListener( const std::string& extension_id, const std::string& event_name, const base::DictionaryValue& filter, bool lazy) { content::RenderProcessHost* process = content::RenderProcessHost::FromID(render_process_id_); if (!process || !extensions::ExtensionSystem::Get(profile_)->event_router()) return; extensions::ExtensionSystem::Get(profile_)->event_router()-> RemoveFilteredEventListener(event_name, process, extension_id, filter, lazy); } void ChromeRenderMessageFilter::OnExtensionCloseChannel( int port_id, const std::string& error_message) { if (!content::RenderProcessHost::FromID(render_process_id_)) return; // To guard against crash in browser_tests shutdown. extensions::MessageService* message_service = extensions::ExtensionSystem::Get(profile_)->message_service(); if (message_service) message_service->CloseChannel(port_id, error_message); } void ChromeRenderMessageFilter::OnExtensionRequestForIOThread( int routing_id, const ExtensionHostMsg_Request_Params& params) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); ExtensionFunctionDispatcher::DispatchOnIOThread( extension_info_map_, profile_, render_process_id_, weak_ptr_factory_.GetWeakPtr(), routing_id, params); } void ChromeRenderMessageFilter::OnExtensionShouldSuspendAck( const std::string& extension_id, int sequence_id) { if (extensions::ExtensionSystem::Get(profile_)->process_manager()) { extensions::ExtensionSystem::Get(profile_)->process_manager()-> OnShouldSuspendAck(extension_id, sequence_id); } } void ChromeRenderMessageFilter::OnExtensionSuspendAck( const std::string& extension_id) { if (extensions::ExtensionSystem::Get(profile_)->process_manager()) { extensions::ExtensionSystem::Get(profile_)->process_manager()-> OnSuspendAck(extension_id); } } void ChromeRenderMessageFilter::OnExtensionGenerateUniqueID(int* unique_id) { static int next_unique_id = 1; *unique_id = next_unique_id++; } void ChromeRenderMessageFilter::OnExtensionResumeRequests(int route_id) { content::ResourceDispatcherHost::Get()->ResumeBlockedRequestsForRoute( render_process_id_, route_id); } void ChromeRenderMessageFilter::OnAddAPIActionToExtensionActivityLog( const std::string& extension_id, const ExtensionHostMsg_APIAction_Params& params) { const extensions::Extension* extension = extension_info_map_->extensions().GetByID(extension_id); scoped_ptr<ListValue> args(params.arguments.DeepCopy()); // The activity is recorded as an API action in the extension // activity log. AddAPIActionToExtensionActivityLog(profile_, extension, params.api_call, args.Pass(), params.extra); } void ChromeRenderMessageFilter::OnAddDOMActionToExtensionActivityLog( const std::string& extension_id, const ExtensionHostMsg_DOMAction_Params& params) { const extensions::Extension* extension = extension_info_map_->extensions().GetByID(extension_id); scoped_ptr<ListValue> args(params.arguments.DeepCopy()); // The activity is recorded as a DOM action on the extension // activity log. AddDOMActionToExtensionActivityLog(profile_, extension, params.url, params.url_title, params.api_call, args.Pass(), params.extra); } void ChromeRenderMessageFilter::OnAllowDatabase(int render_view_id, const GURL& origin_url, const GURL& top_origin_url, const string16& name, const string16& display_name, bool* allowed) { *allowed = cookie_settings_->IsSettingCookieAllowed(origin_url, top_origin_url); BrowserThread::PostTask( BrowserThread::UI, FROM_HERE, base::Bind( &TabSpecificContentSettings::WebDatabaseAccessed, render_process_id_, render_view_id, origin_url, name, display_name, !*allowed)); } void ChromeRenderMessageFilter::OnAllowDOMStorage(int render_view_id, const GURL& origin_url, const GURL& top_origin_url, bool local, bool* allowed) { *allowed = cookie_settings_->IsSettingCookieAllowed(origin_url, top_origin_url); // Record access to DOM storage for potential display in UI. BrowserThread::PostTask( BrowserThread::UI, FROM_HERE, base::Bind( &TabSpecificContentSettings::DOMStorageAccessed, render_process_id_, render_view_id, origin_url, local, !*allowed)); } void ChromeRenderMessageFilter::OnAllowFileSystem(int render_view_id, const GURL& origin_url, const GURL& top_origin_url, bool* allowed) { *allowed = cookie_settings_->IsSettingCookieAllowed(origin_url, top_origin_url); // Record access to file system for potential display in UI. BrowserThread::PostTask( BrowserThread::UI, FROM_HERE, base::Bind( &TabSpecificContentSettings::FileSystemAccessed, render_process_id_, render_view_id, origin_url, !*allowed)); } void ChromeRenderMessageFilter::OnAllowIndexedDB(int render_view_id, const GURL& origin_url, const GURL& top_origin_url, const string16& name, bool* allowed) { *allowed = cookie_settings_->IsSettingCookieAllowed(origin_url, top_origin_url); BrowserThread::PostTask( BrowserThread::UI, FROM_HERE, base::Bind( &TabSpecificContentSettings::IndexedDBAccessed, render_process_id_, render_view_id, origin_url, name, !*allowed)); } void ChromeRenderMessageFilter::OnCanTriggerClipboardRead( const GURL& origin, bool* allowed) { *allowed = extension_info_map_->SecurityOriginHasAPIPermission( origin, render_process_id_, APIPermission::kClipboardRead); } void ChromeRenderMessageFilter::OnCanTriggerClipboardWrite( const GURL& origin, bool* allowed) { // Since all extensions could historically write to the clipboard, preserve it // for compatibility. *allowed = (origin.SchemeIs(extensions::kExtensionScheme) || extension_info_map_->SecurityOriginHasAPIPermission( origin, render_process_id_, APIPermission::kClipboardWrite)); } void ChromeRenderMessageFilter::OnGetCookies( const GURL& url, const GURL& first_party_for_cookies, IPC::Message* reply_msg) { #if defined(ENABLE_AUTOMATION) AutomationResourceMessageFilter::GetCookiesForUrl( this, request_context_->GetURLRequestContext(), render_process_id_, reply_msg, url); #endif } void ChromeRenderMessageFilter::OnSetCookie(const IPC::Message& message, const GURL& url, const GURL& first_party_for_cookies, const std::string& cookie) { #if defined(ENABLE_AUTOMATION) AutomationResourceMessageFilter::SetCookiesForUrl( render_process_id_, message.routing_id(), url, cookie); #endif }
41.695652
80
0.703354
[ "vector", "model" ]
0fbc06edbbb673a5e01d88897b7d6f9f401f24fe
6,988
cxx
C++
vtkm/filter/vector_analysis/testing/UnitTestGradientUniform.cxx
Kitware/vtk-m
b24a878f72b288d69c9da8c7ac33f08db6d39ba9
[ "BSD-3-Clause" ]
null
null
null
vtkm/filter/vector_analysis/testing/UnitTestGradientUniform.cxx
Kitware/vtk-m
b24a878f72b288d69c9da8c7ac33f08db6d39ba9
[ "BSD-3-Clause" ]
null
null
null
vtkm/filter/vector_analysis/testing/UnitTestGradientUniform.cxx
Kitware/vtk-m
b24a878f72b288d69c9da8c7ac33f08db6d39ba9
[ "BSD-3-Clause" ]
null
null
null
//============================================================================ // Copyright (c) Kitware, Inc. // All rights reserved. // See LICENSE.txt for details. // // This software is distributed WITHOUT ANY WARRANTY; without even // the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // PURPOSE. See the above copyright notice for more information. //============================================================================ #include <vtkm/filter/vector_analysis/Gradient.h> #include <vtkm/cont/ErrorFilterExecution.h> #include <vtkm/cont/testing/MakeTestDataSet.h> #include <vtkm/cont/testing/Testing.h> namespace { void TestCellGradientUniform3D() { std::cout << "Testing Gradient Filter with cell output on 3D structured data" << std::endl; vtkm::cont::testing::MakeTestDataSet testDataSet; vtkm::cont::DataSet dataSet = testDataSet.Make3DUniformDataSet0(); vtkm::filter::vector_analysis::Gradient gradient; gradient.SetOutputFieldName("Gradient"); gradient.SetComputeVorticity(true); //this won't work as we have a scalar field gradient.SetComputeQCriterion(true); //this won't work as we have a scalar field gradient.SetActiveField("pointvar"); vtkm::cont::DataSet result; // We provocate this exception try { result = gradient.Execute(dataSet); VTKM_TEST_FAIL("Gradient attempted to compute Vorticity or QCriterion with scalars"); } catch (vtkm::cont::ErrorFilterExecution&) { // We should exit in this catch } } void TestCellGradientUniform3DWithVectorField() { std::cout << "Testing Gradient Filter with vector cell output on 3D structured data" << std::endl; vtkm::cont::testing::MakeTestDataSet testDataSet; vtkm::cont::DataSet dataSet = testDataSet.Make3DUniformDataSet0(); //Verify that we can compute the gradient of a 3 component vector const int nVerts = 18; vtkm::Float64 vars[nVerts] = { 10.1, 20.1, 30.1, 40.1, 50.2, 60.2, 70.2, 80.2, 90.3, 100.3, 110.3, 120.3, 130.4, 140.4, 150.4, 160.4, 170.5, 180.5 }; std::vector<vtkm::Vec3f_64> vec(nVerts); for (std::size_t i = 0; i < vec.size(); ++i) { vec[i] = vtkm::make_Vec(vars[i], vars[i], vars[i]); } vtkm::cont::ArrayHandle<vtkm::Vec3f_64> input = vtkm::cont::make_ArrayHandle(vec, vtkm::CopyFlag::On); dataSet.AddPointField("vec_pointvar", input); //we need to add Vec3 array to the dataset vtkm::filter::vector_analysis::Gradient gradient; gradient.SetOutputFieldName("vec_gradient"); gradient.SetComputeDivergence(true); gradient.SetComputeVorticity(true); gradient.SetComputeQCriterion(true); gradient.SetActiveField("vec_pointvar"); vtkm::cont::DataSet result = gradient.Execute(dataSet); VTKM_TEST_ASSERT(result.HasCellField("vec_gradient"), "Result field missing."); //verify that the vorticity and qcriterion fields DO exist VTKM_TEST_ASSERT(result.HasField("Divergence")); VTKM_TEST_ASSERT(result.HasField("Vorticity")); VTKM_TEST_ASSERT(result.HasField("QCriterion")); VTKM_TEST_ASSERT(test_equal_ArrayHandles( result.GetCellField("vec_gradient").GetData(), vtkm::cont::make_ArrayHandle<vtkm::Vec<vtkm::Vec3f_64, 3>>( { { { 10.025, 10.025, 10.025 }, { 30.075, 30.075, 30.075 }, { 60.125, 60.125, 60.125 } }, { { 10.025, 10.025, 10.025 }, { 30.075, 30.075, 30.075 }, { 60.125, 60.125, 60.125 } }, { { 10.025, 10.025, 10.025 }, { 30.075, 30.075, 30.075 }, { 60.175, 60.175, 60.175 } }, { { 10.025, 10.025, 10.025 }, { 30.075, 30.075, 30.075 }, { 60.175, 60.175, 60.175 } } }))); VTKM_TEST_ASSERT(test_equal_ArrayHandles( result.GetCellField("Divergence").GetData(), vtkm::cont::make_ArrayHandle<vtkm::Float64>({ 100.225, 100.225, 100.275, 100.275 }))); VTKM_TEST_ASSERT(test_equal_ArrayHandles( result.GetCellField("Vorticity").GetData(), vtkm::cont::make_ArrayHandle<vtkm::Vec3f_64>({ { -30.05, 50.1, -20.05 }, { -30.05, 50.1, -20.05 }, { -30.1, 50.15, -20.05 }, { -30.1, 50.15, -20.05 } }))); VTKM_TEST_ASSERT(test_equal_ArrayHandles( result.GetCellField("QCriterion").GetData(), vtkm::cont::make_ArrayHandle<vtkm::Float64>({ -5022.53, -5022.53, -5027.54, -5027.54 }))); } void TestPointGradientUniform3DWithVectorField() { std::cout << "Testing Gradient Filter with vector point output on 3D structured data" << std::endl; vtkm::cont::testing::MakeTestDataSet testDataSet; vtkm::cont::DataSet dataSet = testDataSet.Make3DUniformDataSet0(); //Verify that we can compute the gradient of a 3 component vector const int nVerts = 18; vtkm::Float64 vars[nVerts] = { 10.1, 20.1, 30.1, 40.1, 50.2, 60.2, 70.2, 80.2, 90.3, 100.3, 110.3, 120.3, 130.4, 140.4, 150.4, 160.4, 170.5, 180.5 }; std::vector<vtkm::Vec3f_64> vec(nVerts); for (std::size_t i = 0; i < vec.size(); ++i) { vec[i] = vtkm::make_Vec(vars[i], vars[i], vars[i]); } vtkm::cont::ArrayHandle<vtkm::Vec3f_64> input = vtkm::cont::make_ArrayHandle(vec, vtkm::CopyFlag::On); dataSet.AddPointField("vec_pointvar", input); //we need to add Vec3 array to the dataset vtkm::filter::vector_analysis::Gradient gradient; gradient.SetComputePointGradient(true); gradient.SetOutputFieldName("vec_gradient"); gradient.SetActiveField("vec_pointvar"); vtkm::cont::DataSet result = gradient.Execute(dataSet); VTKM_TEST_ASSERT(result.HasPointField("vec_gradient"), "Result field missing."); vtkm::cont::ArrayHandle<vtkm::Vec<vtkm::Vec3f_64, 3>> resultArrayHandle; result.GetPointField("vec_gradient").GetData().AsArrayHandle(resultArrayHandle); vtkm::Vec<vtkm::Vec3f_64, 3> expected[4] = { { { 10.0, 10.0, 10.0 }, { 30.0, 30.0, 30.0 }, { 60.1, 60.1, 60.1 } }, { { 10.0, 10.0, 10.0 }, { 30.1, 30.1, 30.1 }, { 60.1, 60.1, 60.1 } }, { { 10.0, 10.0, 10.0 }, { 30.1, 30.1, 30.1 }, { 60.2, 60.2, 60.2 } }, { { 10.1, 10.1, 10.1 }, { 30.0, 30.0, 30.0 }, { 60.2, 60.2, 60.2 } } }; for (int i = 0; i < 4; ++i) { vtkm::Vec<vtkm::Vec3f_64, 3> e = expected[i]; vtkm::Vec<vtkm::Vec3f_64, 3> r = resultArrayHandle.ReadPortal().Get(i); VTKM_TEST_ASSERT(test_equal(e[0], r[0]), "Wrong result for vec field CellGradient filter on 3D uniform data"); VTKM_TEST_ASSERT(test_equal(e[1], r[1]), "Wrong result for vec field CellGradient filter on 3D uniform data"); VTKM_TEST_ASSERT(test_equal(e[2], r[2]), "Wrong result for vec field CellGradient filter on 3D uniform data"); } } void TestGradient() { TestCellGradientUniform3D(); TestCellGradientUniform3DWithVectorField(); TestPointGradientUniform3DWithVectorField(); } } int UnitTestGradientUniform(int argc, char* argv[]) { return vtkm::cont::testing::Testing::Run(TestGradient, argc, argv); }
39.931429
100
0.64024
[ "vector", "3d" ]
0fbde0095d6c4aabfac0052615224182b94c34c9
4,090
cpp
C++
2021/xs-and-os/template.cpp
Muklek/facebook-hackercup
b736309fac39b17fe9315d26516c361793617a8b
[ "MIT" ]
null
null
null
2021/xs-and-os/template.cpp
Muklek/facebook-hackercup
b736309fac39b17fe9315d26516c361793617a8b
[ "MIT" ]
null
null
null
2021/xs-and-os/template.cpp
Muklek/facebook-hackercup
b736309fac39b17fe9315d26516c361793617a8b
[ "MIT" ]
null
null
null
#include<iostream> #include<string> #include<string_view> #include<array> #include<vector> #include<deque> #include<list> #include<unordered_map> #include<map> #include<unordered_set> #include<set> #include<algorithm> #include<functional> #include<memory> #include<fstream> using si = std::size_t; using i16 = std::int16_t; using i32 = std::int32_t; using i64 = std::int64_t; using ui16 = std::uint16_t; using ui32 = std::uint32_t; using ui64 = std::uint64_t; using fl = float; using du = double; using ch = char; using st = std::string; template<typename T, std::size_t S> using ar = std::array<T,S>; template<typename T> using ve = std::vector<T>; template<typename T> using de = std::deque<T>; template<typename T> using li = std::list<T>; template<typename K, typename V> using um = std::unordered_map<K,V>; template<typename K, typename V> using om = std::map<K,V>; template<typename T> using us = std::unordered_set<T>; template<typename T> using os = std::set<T>; #define pb push_back; #define eb emplace_back; #define br '\n'; #define p1d(x) for(auto y : *x) std::cout<<y<<' '; #define p2d(x) for(auto& y : *x) std::cout<<'\n', for(auto z : y) std::cout<<z<< ' '; #define pm(x) for(auto& y : *x) std::cout<<y.first<<' '<<y.second<<'\n'; #define sort(x) std::sort(x->begin(), x->end()); ////////////////////////////////////////////////////////////////////////////////////// __attribute__((always_inline)) inline std::pair<si,si> gc(const ve<st>& b) { si const mrow{b.size()}; si const mcol{b[0].size()}; os<si> row, col; for(si r{0}; r<mrow; r++) { for(si c{0}; c<mcol; c++) { if(b[r][c] == 'O' && row.find(r) == row.end()) row.insert(r); if(b[r][c] == 'O' && col.find(c) == col.end()) col.insert(c); } } um<si,si> xrow, xcol; for(si r{0}; r<mrow; r++) { for(si c{0}; c<mcol; c++) { if(b[r][c] == 'X') { if(xrow.find(r) == xrow.end()) xrow.insert({r, 1}); else xrow.at(r) += 1; if(xcol.find(c) == xcol.end()) xcol.insert({c, 1}); else xcol.at(c) += 1; } } } um<si, si> t; um<si,si> ut; for(si r{0}, c{0}, x{0}; r<mrow; r++, c=0) { if(row.find(r) == row.end()) { if(xrow.find(r) != xrow.end()) x = mrow - xrow.at(r); else x = mrow; if(x == 1) { while(c < mcol && b[r][c] == 'X') ++c; ut.insert({r, c}); } if(t.find(x) != t.end()) t.at(x) +=1; else t.insert({x, 1}); } } for(si c{0}, r{0}, x{0}, inc{true}; c<mcol; c++, r=0, inc=true) { if(col.find(c) == col.end()) { if(xcol.find(c) != xcol.end()) x = mcol - xcol.at(c); else x = mcol; if(x == 1) { while(r < mrow && b[r][c] == 'X') ++r; if(ut.find(r) != ut.end() && ut.at(r) == c) inc = false; } if(inc && t.find(x) != t.end()) t.at(x) += 1; else if(inc) t.insert({x, 1}); } } si mcount{std::numeric_limits<si>::max()}; si mpos{std::numeric_limits<si>::max()}; if(t.size() == 0) return std::make_pair(mcount, mpos); for(auto& x : t) if(x.first < mcount) mcount = x.first, mpos = x.second; return std::make_pair(mcount, mpos); } int main() { std::ios::sync_with_stdio(0), std::cin.tie(0); std::fstream iFile; std::fstream oFile; iFile.open("./input.txt"); oFile.open("./output.txt"); std::string line{""}; int lineCount{0}, boardCount{0}, boardSize{0}; std::getline(iFile, line); boardCount = std::stoi(line); ve<st> board; for(si i{1}; i<=boardCount; i++) { std::getline(iFile,line); boardSize = std::stoi(line); for(si j{1}; j<=boardSize; j++) { std::getline(iFile,line); board.emplace_back(line); } auto x{gc(board)}; oFile<<"Case #"<<i<<":"<<" "; if(x.first == std::numeric_limits<si>::max()) oFile<<"Impossible"<<'\n'; else oFile<<x.first<<" "<<x.second<<'\n'; board.clear(); } iFile.close(); oFile.close(); }
24.638554
86
0.520049
[ "vector" ]
0fc71d8ac29c665706e7dd6b99207ec993f56818
10,009
cc
C++
src/runtime/hexagon/hexagon_buffer.cc
gayatripk1/tvm
8bf6cd5800daaf42935fd69cbd63180c97bef262
[ "Apache-2.0" ]
4,640
2017-08-17T19:22:15.000Z
2019-11-04T15:29:46.000Z
src/runtime/hexagon/hexagon_buffer.cc
gayatripk1/tvm
8bf6cd5800daaf42935fd69cbd63180c97bef262
[ "Apache-2.0" ]
2,863
2017-08-17T19:55:50.000Z
2019-11-04T17:18:41.000Z
src/runtime/hexagon/hexagon_buffer.cc
gayatripk1/tvm
8bf6cd5800daaf42935fd69cbd63180c97bef262
[ "Apache-2.0" ]
1,352
2017-08-17T19:30:38.000Z
2019-11-04T16:09:29.000Z
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #include "hexagon_buffer.h" #include <tvm/runtime/module.h> #include <algorithm> #include <string> #include <utility> #include "HAP_compute_res.h" #include "hexagon_common.h" namespace tvm { namespace runtime { namespace hexagon { int hexagon_user_dma_1d_sync(void* dst, void* src, uint32_t length); struct Allocation { Allocation(size_t allocation_nbytes, size_t alignment) : allocation_nbytes_(allocation_nbytes), alignment_(alignment) {} virtual ~Allocation() {} Allocation(const Allocation&) = delete; Allocation& operator=(const Allocation&) = delete; Allocation(Allocation&&) = delete; Allocation& operator=(Allocation&&) = delete; void* data_{nullptr}; size_t allocation_nbytes_; size_t alignment_; }; struct DDRAllocation : public Allocation { DDRAllocation(size_t nbytes, size_t alignment) : Allocation(nbytes, alignment) { int ret = posix_memalign(&data_, alignment, nbytes); CHECK_EQ(ret, 0); } ~DDRAllocation() { free(data_); } }; struct VTCMAllocation : public Allocation { VTCMAllocation(size_t nbytes, size_t alignment) : Allocation(nbytes, alignment) { compute_res_attr_t res_info; HEXAGON_SAFE_CALL(HAP_compute_res_attr_init(&res_info)); // allocate nbytes of vtcm on a single page HEXAGON_SAFE_CALL(HAP_compute_res_attr_set_vtcm_param(&res_info, /*vtcm_size = */ nbytes, /*b_single_page = */ 1)); // TODO(HWE): Investigate why a non-zero timeout results in // hanging, both in the simulator and on hardware. context_id_ = HAP_compute_res_acquire(&res_info, /*timeout = */ 0); if (context_id_) { data_ = HAP_compute_res_attr_get_vtcm_ptr(&res_info); if (!data_) { LOG(ERROR) << "ERROR: Allocated VTCM ptr is null."; HEXAGON_SAFE_CALL(HAP_compute_res_release(context_id_)); return; } } else { LOG(ERROR) << "ERROR: Unable to acquire requeisted resource."; return; } } ~VTCMAllocation() { HEXAGON_SAFE_CALL(HAP_compute_res_release(context_id_)); data_ = nullptr; } unsigned int context_id_{0}; }; template <HexagonBuffer::StorageScope S> std::unique_ptr<Allocation> Allocator(size_t nbytes, size_t alignment); template <> std::unique_ptr<Allocation> Allocator<HexagonBuffer::StorageScope::kDDR>(size_t nbytes, size_t alignment) { return std::make_unique<DDRAllocation>(nbytes, alignment); } template <> std::unique_ptr<Allocation> Allocator<HexagonBuffer::StorageScope::kVTCM>(size_t nbytes, size_t alignment) { return std::make_unique<VTCMAllocation>(nbytes, alignment); } HexagonBuffer::HexagonBuffer(size_t nbytes, size_t alignment, Optional<String> scope) : ndim_(1), nbytes_per_allocation_(nbytes) { SetStorageScope(scope); std::unique_ptr<Allocation> alloca = nullptr; if (GetStorageScope() == StorageScope::kDDR) { alloca = Allocator<StorageScope::kDDR>(nbytes, alignment); } else if (GetStorageScope() == StorageScope::kVTCM) { alloca = Allocator<StorageScope::kVTCM>(nbytes, alignment); } CHECK(alloca != nullptr); allocations_.push_back(alloca->data_); managed_allocations_.push_back(std::move(alloca)); } HexagonBuffer::HexagonBuffer(size_t nallocs, size_t nbytes, size_t alignment, Optional<String> scope) : ndim_(2), nbytes_per_allocation_(nbytes) { SetStorageScope(scope); size_t nbytes_aligned = ((nbytes + (alignment - 1)) / alignment) * alignment; size_t nbytes_monolithic = nallocs * nbytes_aligned; std::unique_ptr<Allocation> alloca = nullptr; if (GetStorageScope() == StorageScope::kDDR) { alloca = Allocator<StorageScope::kDDR>(nbytes_monolithic, alignment); } else if (GetStorageScope() == StorageScope::kVTCM) { alloca = Allocator<StorageScope::kVTCM>(nbytes_monolithic, alignment); } CHECK(alloca) << "could not create allocation"; for (size_t i = 0; i < nallocs; ++i) { void* alloc_offset = static_cast<unsigned char*>(alloca->data_) + i * nbytes_aligned; allocations_.push_back(alloc_offset); } managed_allocations_.push_back(std::move(alloca)); } HexagonBuffer::~HexagonBuffer() { managed_allocations_.clear(); } void* HexagonBuffer::GetPointer() { ICHECK(allocations_.size()) << "Internal failure, allocations_ should be set in HexagonBuffer constructor"; if (ndim_ == 1) { ICHECK_EQ(allocations_.size(), 1); return allocations_[0]; } else if (ndim_ == 2) { return allocations_.data(); } else { LOG(FATAL) << "HexagonBuffer should be either 1-d or 2-d, not " << ndim_ << "-d"; return nullptr; } } HexagonBuffer::StorageScope HexagonBuffer::GetStorageScope() const { return storage_scope_; } void HexagonBuffer::SetStorageScope(Optional<String> scope) { if (!scope.defined()) { storage_scope_ = StorageScope::kDDR; } else { if (scope.value() == "global") { storage_scope_ = StorageScope::kDDR; } else if (scope.value() == "global.vtcm") { storage_scope_ = StorageScope::kVTCM; } else { CHECK(false) << "Encountered unknown HexagonBuffer storage scope: " << std::string(scope.value()); } } } std::vector<MemoryCopy> BufferSet::MemoryCopies(const BufferSet& dest, const BufferSet& src, size_t bytes_to_copy) { CHECK_LE(bytes_to_copy, src.TotalBytes()); CHECK_LE(bytes_to_copy, dest.TotalBytes()); auto pointer_to = [](const BufferSet& buf, size_t region_i, size_t byte_i) -> void* { void* region = buf.buffers[region_i]; return static_cast<unsigned char*>(region) + byte_i; }; size_t num_src_regions = (bytes_to_copy + src.region_size_bytes - 1) / src.region_size_bytes; // First, determine all copies that do not cross boundaries in // either source or destination region. This requires two loops, as // a single source region may overlap one or more destination // regions, and vice versa. std::vector<MemoryCopy> micro_copies; for (size_t src_i = 0; src_i < num_src_regions; src_i++) { size_t src_region_begin = src_i * src.region_size_bytes; size_t src_region_end = std::min((src_i + 1) * src.region_size_bytes, bytes_to_copy); size_t dest_i_begin = src_region_begin / dest.region_size_bytes; size_t dest_i_end = (src_region_end - 1) / dest.region_size_bytes + 1; for (size_t dest_i = dest_i_begin; dest_i < dest_i_end; dest_i++) { size_t offset_begin = std::max(src_region_begin, dest_i * dest.region_size_bytes); size_t offset_end = std::min(src_region_end, (dest_i + 1) * dest.region_size_bytes); size_t num_bytes = offset_end - offset_begin; void* src_ptr = pointer_to(src, src_i, offset_begin % src.region_size_bytes); void* dest_ptr = pointer_to(dest, dest_i, offset_begin % dest.region_size_bytes); micro_copies.push_back(MemoryCopy(dest_ptr, src_ptr, num_bytes)); } } return micro_copies; } std::vector<MemoryCopy> MemoryCopy::MergeAdjacent(std::vector<MemoryCopy> micro_copies) { std::sort(micro_copies.begin(), micro_copies.end(), [](const MemoryCopy& a, const MemoryCopy& b) { return a.src < b.src; }); std::vector<MemoryCopy> macro_copies; for (const auto& copy : micro_copies) { if (macro_copies.size() && macro_copies.back().IsDirectlyBefore(copy)) { macro_copies.back().num_bytes += copy.num_bytes; } else { macro_copies.push_back(copy); } } return macro_copies; } void hexagon_buffer_copy_across_regions(const BufferSet& dest, const BufferSet& src, size_t bytes_to_copy) { // First, determine all copies that do not cross boundaries in // either source or destination region. auto micro_copies = BufferSet::MemoryCopies(dest, src, bytes_to_copy); // If regions are contiguously allocated, we can reduce the number // of copies required by merging adjacent copies. auto macro_copies = MemoryCopy::MergeAdjacent(std::move(micro_copies)); // Finally, do the memory copies. for (const auto& copy : macro_copies) { int error_code = hexagon_user_dma_1d_sync(copy.dest, copy.src, copy.num_bytes); CHECK_EQ(error_code, 0); } } void HexagonBuffer::CopyTo(void* data, size_t nbytes) const { BufferSet src(allocations_.data(), allocations_.size(), nbytes_per_allocation_); BufferSet dest(&data, 1, nbytes); hexagon_buffer_copy_across_regions(dest, src, nbytes); } void HexagonBuffer::CopyFrom(void* data, size_t nbytes) { BufferSet src(&data, 1, nbytes); BufferSet dest(allocations_.data(), allocations_.size(), nbytes_per_allocation_); hexagon_buffer_copy_across_regions(dest, src, nbytes); } void HexagonBuffer::CopyFrom(const HexagonBuffer& other, size_t nbytes) { BufferSet src(other.allocations_.data(), other.allocations_.size(), other.nbytes_per_allocation_); BufferSet dest(allocations_.data(), allocations_.size(), nbytes_per_allocation_); hexagon_buffer_copy_across_regions(dest, src, nbytes); } } // namespace hexagon } // namespace runtime } // namespace tvm
36.797794
100
0.698871
[ "vector" ]
0fc7f5f5af68c9012e0b83482986a793b91f126e
10,434
cpp
C++
lib/Callable/Tests.cpp
lsbharadwaj/PothosCore
02b3491ed06f23924a4c749f35b7fade88b81a14
[ "BSL-1.0" ]
180
2017-09-11T00:44:36.000Z
2022-03-25T09:23:47.000Z
lib/Callable/Tests.cpp
lsbharadwaj/PothosCore
02b3491ed06f23924a4c749f35b7fade88b81a14
[ "BSL-1.0" ]
109
2015-01-19T07:33:38.000Z
2017-08-12T00:29:13.000Z
lib/Callable/Tests.cpp
lsbharadwaj/PothosCore
02b3491ed06f23924a4c749f35b7fade88b81a14
[ "BSL-1.0" ]
32
2017-09-20T10:47:29.000Z
2022-03-24T06:13:03.000Z
// Copyright (c) 2013-2019 Josh Blum // SPDX-License-Identifier: BSL-1.0 #include <Pothos/Callable.hpp> #include <Pothos/Testing.hpp> #include <string> #include <iostream> #include <functional> struct TestClass { TestClass(void) { _bar = -1; } TestClass(const int bar) { _bar = bar; } TestClass(const long bar, const std::string &) { _bar = bar; } void setBar(const int bar) { _bar = bar; } int getBar(void) { return _bar; } static long strLen(const std::string &s) { return s.size(); } static long add(int a, unsigned b) { return long(a + b); } static long long addMany(int a, unsigned b, long c, char d, short e) { return (long long)(a + b + c + d + e); } static int itsGonnaThrow(const int &) { throw std::runtime_error("told you so"); } int overloaded(const int) { return 0; } int overloaded(const long &) { return 1; } void overloaded(const std::string &) { return; } int _bar; }; struct NonsenseClass { //nothing here }; POTHOS_TEST_BLOCK("/callable/tests", test_callable_null) { Pothos::Callable callNull; POTHOS_TEST_THROWS(callNull.call(0), Pothos::CallableNullError); POTHOS_TEST_TRUE(callNull == callNull); POTHOS_TEST_TRUE(callNull == Pothos::Callable()); } /*********************************************************************** * Passing object types in and out **********************************************************************/ static Pothos::Object getObj(void) { return Pothos::Object(int(42)); } static Pothos::Object passObj(const Pothos::Object &o) { return o; } POTHOS_TEST_BLOCK("/callable/tests", test_callable_meta) { Pothos::Callable getObjCall(&getObj); Pothos::Object ret = getObjCall.opaqueCall(nullptr, 0); POTHOS_TEST_TRUE(ret.type() == typeid(Pothos::Object)); POTHOS_TEST_TRUE(ret.extract<Pothos::Object>().type() == typeid(int)); POTHOS_TEST_EQUAL(ret.extract<Pothos::Object>().extract<int>(), 42); Pothos::Callable passObjCall(&passObj); Pothos::Object ret2 = getObjCall.opaqueCall(&ret, 0); POTHOS_TEST_TRUE(ret2.type() == typeid(Pothos::Object)); POTHOS_TEST_TRUE(ret2.extract<Pothos::Object>().type() == typeid(int)); POTHOS_TEST_EQUAL(ret2.extract<Pothos::Object>().extract<int>(), 42); } /*********************************************************************** * Using std::function **********************************************************************/ static int my_negate(const int x) { return -x; } static int my_multiply(const int x, const int y) { return x*y; } POTHOS_TEST_BLOCK("/callable/tests", test_callable_std_function) { std::function<int(int)> my_func(my_negate); Pothos::Callable my_callable(my_func); POTHOS_TEST_EQUAL(my_callable.call(42), -42); std::function<int(int)> bound_func = std::bind(my_multiply, -1, std::placeholders::_1); Pothos::Callable my_bound_callable(bound_func); POTHOS_TEST_EQUAL(my_bound_callable.call(123), -123); } /*********************************************************************** * Test binding methods **********************************************************************/ POTHOS_TEST_BLOCK("/callable/tests", test_callable_with_methods) { //bind some class methods Pothos::Callable setBar(&TestClass::setBar); POTHOS_TEST_EQUAL(setBar.getNumArgs(), 2); POTHOS_TEST_TRUE(setBar.type(-1) == typeid(void)); POTHOS_TEST_TRUE(setBar.type(0) == typeid(TestClass)); POTHOS_TEST_TRUE(setBar.type(1) == typeid(int)); POTHOS_TEST_THROWS(setBar.call(0), Pothos::CallableArgumentError); Pothos::Callable getBar(&TestClass::getBar); POTHOS_TEST_EQUAL(getBar.getNumArgs(), 1); POTHOS_TEST_TRUE(getBar.type(-1) == typeid(int)); POTHOS_TEST_TRUE(getBar.type(0) == typeid(TestClass)); POTHOS_TEST_THROWS(getBar.call<int>(), Pothos::CallableArgumentError); //call the class methods TestClass test; setBar.call(std::ref(test), int(42)); POTHOS_TEST_EQUAL(42, getBar.call<int>(std::ref(test))); //check the return error conditions POTHOS_TEST_THROWS(setBar.call<int>(std::ref(test), 21), Pothos::CallableReturnError); POTHOS_TEST_THROWS(getBar.call<NonsenseClass>(std::ref(test)), Pothos::CallableReturnError); } /*********************************************************************** * Test binding functions **********************************************************************/ POTHOS_TEST_BLOCK("/callable/tests", test_callable_with_functions) { //bind a function (static method) Pothos::Callable strLen(&TestClass::strLen); POTHOS_TEST_EQUAL(strLen.getNumArgs(), 1); POTHOS_TEST_TRUE(strLen.type(-1) == typeid(long)); POTHOS_TEST_TRUE(strLen.type(0) == typeid(std::string)); POTHOS_TEST_EQUAL(5, strLen.call<long>(std::string("hello"))); POTHOS_TEST_THROWS(strLen.call<long>(NonsenseClass()), Pothos::CallableArgumentError); //test copy ability Pothos::Callable strLenCopy0 = strLen; POTHOS_TEST_EQUAL(5, strLenCopy0.call<long>(std::string("world"))); Pothos::Callable strLenCopy1 = Pothos::Callable(strLen); POTHOS_TEST_EQUAL(2, strLenCopy1.call<long>(std::string("!!"))); //test multiple args Pothos::Callable add(&TestClass::add); POTHOS_TEST_EQUAL(32, add.call<long>(int(10), unsigned(22))); std::cout << add.toString() << std::endl; } /*********************************************************************** * Test binding constructors **********************************************************************/ POTHOS_TEST_BLOCK("/callable/tests", test_callable_constructors) { auto constructor0 = Pothos::Callable::factory<TestClass>(); POTHOS_TEST_TRUE(constructor0.type(-1) == typeid(TestClass)); auto constructor1 = Pothos::Callable::factory<TestClass, const int>(); POTHOS_TEST_TRUE(constructor1.type(-1) == typeid(TestClass)); POTHOS_TEST_TRUE(constructor1.type(0) == typeid(int)); auto constructor2 = Pothos::Callable::factory<TestClass, long, std::string>(); POTHOS_TEST_TRUE(constructor2.type(-1) == typeid(TestClass)); POTHOS_TEST_TRUE(constructor2.type(0) == typeid(long)); POTHOS_TEST_TRUE(constructor2.type(1) == typeid(std::string)); TestClass test0 = constructor0.call<TestClass>(); POTHOS_TEST_EQUAL(test0.getBar(), int(-1)); TestClass test1 = constructor1.call<TestClass>(int(42)); POTHOS_TEST_EQUAL(test1.getBar(), int(42)); TestClass test2 = constructor1.call<TestClass>(long(21), std::string("test")); POTHOS_TEST_EQUAL(test2.getBar(), int(21)); } /*********************************************************************** * Test binding overloaded **********************************************************************/ POTHOS_TEST_BLOCK("/callable/tests", test_callable_overloaded) { //bind a function (static method) auto overloaded0 = Pothos::Callable::make<int, TestClass, int>(&TestClass::overloaded); auto overloaded1 = Pothos::Callable::make<int, TestClass, const long &>(&TestClass::overloaded); auto overloaded2 = Pothos::Callable::make<void, TestClass, const std::string &>(&TestClass::overloaded); POTHOS_TEST_TRUE(overloaded0.type(-1) == typeid(int)); POTHOS_TEST_TRUE(overloaded1.type(-1) == typeid(int)); POTHOS_TEST_TRUE(overloaded2.type(-1) == typeid(void)); TestClass test; POTHOS_TEST_EQUAL(overloaded0.call<int>(std::ref(test), int(0)), 0); POTHOS_TEST_EQUAL(overloaded1.call<int>(std::ref(test), long(0)), 1); } /*********************************************************************** * Test binding arguments **********************************************************************/ POTHOS_TEST_BLOCK("/callable/tests", test_callable_bind) { //bind the class instance into set and get Pothos::Callable setBar(&TestClass::setBar); Pothos::Callable getBar(&TestClass::getBar); TestClass test; setBar.bind(std::ref(test), 0); getBar.bind(std::ref(test), 0); setBar.call(int(42)); POTHOS_TEST_EQUAL(42, getBar.call<int>()); //bind and unbind arguments for add Pothos::Callable add(&TestClass::add); add.bind(unsigned(11), 1); POTHOS_TEST_EQUAL(21, add.call<long>(int(10))); add.bind(int(33), 0); POTHOS_TEST_EQUAL(44, add.call<long>()); add.unbind(1); POTHOS_TEST_EQUAL(43, add.call<long>(unsigned(10))); //test type() and numArgs() logic with many args Pothos::Callable addMany(&TestClass::addMany); std::cout << addMany.toString() << std::endl; POTHOS_TEST_EQUAL(addMany.getNumArgs(), 5); POTHOS_TEST_TRUE(addMany.type(-1) == typeid(long long)); POTHOS_TEST_TRUE(addMany.type(0) == typeid(int)); POTHOS_TEST_TRUE(addMany.type(1) == typeid(unsigned)); POTHOS_TEST_TRUE(addMany.type(2) == typeid(long)); POTHOS_TEST_TRUE(addMany.type(3) == typeid(char)); POTHOS_TEST_TRUE(addMany.type(4) == typeid(short)); POTHOS_TEST_THROWS(addMany.type(5), Pothos::CallableArgumentError); //bind something, recheck type() and numArgs() addMany.bind(long(42), 2); POTHOS_TEST_EQUAL(addMany.getNumArgs(), 4); POTHOS_TEST_TRUE(addMany.type(-1) == typeid(long long)); POTHOS_TEST_TRUE(addMany.type(0) == typeid(int)); POTHOS_TEST_TRUE(addMany.type(1) == typeid(unsigned)); POTHOS_TEST_TRUE(addMany.type(2) == typeid(char)); POTHOS_TEST_TRUE(addMany.type(3) == typeid(short)); POTHOS_TEST_THROWS(addMany.type(4), Pothos::CallableArgumentError); //bind something, recheck type() and numArgs() addMany.bind(long(21), 0); POTHOS_TEST_EQUAL(addMany.getNumArgs(), 3); POTHOS_TEST_TRUE(addMany.type(-1) == typeid(long long)); POTHOS_TEST_TRUE(addMany.type(0) == typeid(unsigned)); POTHOS_TEST_TRUE(addMany.type(1) == typeid(char)); POTHOS_TEST_TRUE(addMany.type(2) == typeid(short)); POTHOS_TEST_THROWS(addMany.type(3), Pothos::CallableArgumentError); } /*********************************************************************** * Test throwing **********************************************************************/ POTHOS_TEST_BLOCK("/callable/tests", test_callable_throwing) { Pothos::Callable itsGonnaThrow(&TestClass::itsGonnaThrow); POTHOS_TEST_THROWS(itsGonnaThrow.call(int(42)), std::runtime_error); }
34.435644
108
0.608875
[ "object" ]
0fc8a99ded64198ca78c63d4cf8b0b344c5ed53e
9,530
cpp
C++
storage/source/net/message/session/opening/CloseChunkFileMsgEx.cpp
congweitao/congfs
54cedf484f8a2cacab567fe182cc1f6413c25cf2
[ "BSD-3-Clause" ]
null
null
null
storage/source/net/message/session/opening/CloseChunkFileMsgEx.cpp
congweitao/congfs
54cedf484f8a2cacab567fe182cc1f6413c25cf2
[ "BSD-3-Clause" ]
null
null
null
storage/source/net/message/session/opening/CloseChunkFileMsgEx.cpp
congweitao/congfs
54cedf484f8a2cacab567fe182cc1f6413c25cf2
[ "BSD-3-Clause" ]
null
null
null
#include <common/net/message/control/GenericResponseMsg.h> #include <common/net/message/session/opening/CloseChunkFileRespMsg.h> #include <common/toolkit/SessionTk.h> #include <net/msghelpers/MsgHelperIO.h> #include <program/Program.h> #include <toolkit/StorageTkEx.h> #include "CloseChunkFileMsgEx.h" #include <boost/lexical_cast.hpp> bool CloseChunkFileMsgEx::processIncoming(ResponseContext& ctx) { App* app = Program::getApp(); FhgfsOpsErr closeMsgRes; DynamicAttribs dynAttribs; std::tie(closeMsgRes, dynAttribs) = close(ctx); // if closeMsgRes == FhgfsOpsErr_COMMUNICATION, a GenericResponseMsg has conn sent already if (closeMsgRes != FhgfsOpsErr_COMMUNICATION) ctx.sendResponse( CloseChunkFileRespMsg(closeMsgRes, dynAttribs.filesize, dynAttribs.allocedBlocks, dynAttribs.modificationTimeSecs, dynAttribs.lastAccessTimeSecs, dynAttribs.storageVersion) ); // update op counters app->getNodeOpStats()->updateNodeOp(ctx.getSocket()->getPeerIP(), StorageOpCounter_CLOSELOCAL, getMsgHeaderUserID() ); return true; } std::pair<FhgfsOpsErr, CloseChunkFileMsgEx::DynamicAttribs> CloseChunkFileMsgEx::close( ResponseContext& ctx) { const char* logContext = "CloseChunkFileMsg incoming"; App* app = Program::getApp(); Config* config = app->getConfig(); SessionStore* sessions = app->getSessions(); uint16_t targetID; FhgfsOpsErr closeMsgRes = FhgfsOpsErr_SUCCESS; // the result that will be sent to requestor DynamicAttribs dynAttribs = {0, 0, 0, 0, 0}; std::string fileHandleID(getFileHandleID() ); bool isMirrorSession = isMsgHeaderFeatureFlagSet(CLOSECHUNKFILEMSG_FLAG_BUDDYMIRROR); SessionLocalFileStore* sessionLocalFiles; // select the right targetID targetID = getTargetID(); if(isMsgHeaderFeatureFlagSet(CLOSECHUNKFILEMSG_FLAG_BUDDYMIRROR) ) { // given targetID refers to a buddy mirror group MirrorBuddyGroupMapper* mirrorBuddies = app->getMirrorBuddyGroupMapper(); targetID = isMsgHeaderFeatureFlagSet(CLOSECHUNKFILEMSG_FLAG_BUDDYMIRROR_SECOND) ? mirrorBuddies->getSecondaryTargetID(targetID) : mirrorBuddies->getPrimaryTargetID(targetID); if(unlikely(!targetID) ) { // unknown target LogContext(logContext).logErr("Invalid mirror buddy group ID: " + StringTk::uintToStr(getTargetID() ) ); return {FhgfsOpsErr_UNKNOWNTARGET, {}}; } } // forward to secondary (if appropriate) closeMsgRes = forwardToSecondary(ctx); if (unlikely(closeMsgRes != FhgfsOpsErr_SUCCESS)) return {closeMsgRes, dynAttribs}; auto session = sessions->referenceOrAddSession(getSessionID()); sessionLocalFiles = session->getLocalFiles(); auto fsState = sessionLocalFiles->removeSession(fileHandleID, targetID, isMirrorSession); // get current dynamic file attribs if (fsState) { // file no longer in use => refresh filesize and close file fd auto& fd = fsState->getFD(); /* get dynamic attribs, here before closing the file. * Note: Depending on the underlying file system the returned st_blocks might be too large * (pre-allocated blocks, which are only released on close() ). Advantage here is * that we already have the file descriptor. */ if( (config->getTuneEarlyStat() ) && (!isMsgHeaderFeatureFlagSet(CLOSECHUNKFILEMSG_FLAG_NODYNAMICATTRIBS) ) ) getDynamicAttribsByFD(*fd, fileHandleID, targetID, dynAttribs); // close fd if (!fsState->close()) closeMsgRes = FhgfsOpsErr_INTERNAL; // only get the attributes here, in order to make xfs to release pre-allocated blocks if( (!config->getTuneEarlyStat() ) && (!isMsgHeaderFeatureFlagSet(CLOSECHUNKFILEMSG_FLAG_NODYNAMICATTRIBS) ) ) getDynamicAttribsByPath(fileHandleID, targetID, dynAttribs); } else if(!isMsgHeaderFeatureFlagSet(CLOSECHUNKFILEMSG_FLAG_NODYNAMICATTRIBS) ) { // file still in use by other threads => get dynamic attribs by path bool getRes = getDynamicAttribsByPath(fileHandleID, targetID, dynAttribs); if (getRes) { // LogContext(logContext).log(Log_DEBUG, "Chunk file virtually closed. " // "HandleID: " + fileHandleID); } } // note: "file not exists" is not an error. we just have nothing to do in that case. return {closeMsgRes, dynAttribs}; } /** * If this is a buddy mirror msg and we are the primary, forward this msg to secondary. * * @return _COMMUNICATION if forwarding to buddy failed and buddy is not marked offline (in which * case *outChunkLocked==false is guaranteed). * @throw SocketException if sending of GenericResponseMsg fails. */ FhgfsOpsErr CloseChunkFileMsgEx::forwardToSecondary(ResponseContext& ctx) { const char* logContext = "CloseChunkFileMsg incoming (forward to secondary)"; App* app = Program::getApp(); if(!isMsgHeaderFeatureFlagSet(CLOSECHUNKFILEMSG_FLAG_BUDDYMIRROR) || isMsgHeaderFeatureFlagSet(CLOSECHUNKFILEMSG_FLAG_BUDDYMIRROR_SECOND) ) return FhgfsOpsErr_SUCCESS; // nothing to do // instead of creating a new msg object, we just re-use "this" with "buddymirror second" flag addMsgHeaderFeatureFlag(CLOSECHUNKFILEMSG_FLAG_BUDDYMIRROR_SECOND); RequestResponseArgs rrArgs(NULL, this, NETMSGTYPE_CloseChunkFileResp); RequestResponseTarget rrTarget(getTargetID(), app->getTargetMapper(), app->getStorageNodes(), app->getTargetStateStore(), app->getMirrorBuddyGroupMapper(), true); FhgfsOpsErr commRes = MessagingTk::requestResponseTarget(&rrTarget, &rrArgs); // remove the flag that we just added for secondary unsetMsgHeaderFeatureFlag(CLOSECHUNKFILEMSG_FLAG_BUDDYMIRROR_SECOND); if(unlikely( (commRes == FhgfsOpsErr_COMMUNICATION) && (rrTarget.outTargetReachabilityState == TargetReachabilityState_OFFLINE) ) ) { LOG_DEBUG(logContext, Log_DEBUG, std::string("Secondary is offline and will need resync. ") + "mirror buddy group ID: " + StringTk::uintToStr(getTargetID() ) );; return FhgfsOpsErr_SUCCESS; // go ahead with local msg processing } if(unlikely(commRes != FhgfsOpsErr_SUCCESS) ) { LogContext(logContext).log(Log_DEBUG, "Forwarding failed. " "mirror buddy group ID: " + StringTk::uintToStr(getTargetID() ) + "; " "error: " + boost::lexical_cast<std::string>(commRes)); std::string genericRespStr = "Communication with secondary failed. " "mirror buddy group ID: " + StringTk::uintToStr(getTargetID() ); ctx.sendResponse( GenericResponseMsg(GenericRespMsgCode_INDIRECTCOMMERR, std::move(genericRespStr))); return FhgfsOpsErr_COMMUNICATION; } CloseChunkFileRespMsg* respMsg = (CloseChunkFileRespMsg*)rrArgs.outRespMsg.get(); FhgfsOpsErr secondaryRes = respMsg->getResult(); if(unlikely(secondaryRes != FhgfsOpsErr_SUCCESS) ) { LogContext(logContext).log(Log_NOTICE, std::string("Secondary reported error: ") + boost::lexical_cast<std::string>(secondaryRes) + "; " "mirror buddy group ID: " + StringTk::uintToStr(getTargetID() ) ); return secondaryRes; } return FhgfsOpsErr_SUCCESS; } bool CloseChunkFileMsgEx::getDynamicAttribsByFD(const int fd, std::string fileHandleID, uint16_t targetID, DynamicAttribs& outDynAttribs) { SyncedStoragePaths* syncedPaths = Program::getApp()->getSyncedStoragePaths(); std::string fileID(SessionTk::fileIDFromHandleID(fileHandleID) ); uint64_t storageVersion = syncedPaths->lockPath(fileID, targetID); // LOCK // note: this is locked because we need to get the filesize together with the storageVersion bool getDynAttribsRes = StorageTkEx::getDynamicFileAttribs(fd, &outDynAttribs.filesize, &outDynAttribs.allocedBlocks, &outDynAttribs.modificationTimeSecs, &outDynAttribs.lastAccessTimeSecs); if(getDynAttribsRes) outDynAttribs.storageVersion = storageVersion; syncedPaths->unlockPath(fileID, targetID); // UNLOCK return getDynAttribsRes; } bool CloseChunkFileMsgEx::getDynamicAttribsByPath(std::string fileHandleID, uint16_t targetID, DynamicAttribs& outDynAttribs) { const char* logContext = "CloseChunkFileMsg (attribs by path)"; App* app = Program::getApp(); SyncedStoragePaths* syncedPaths = app->getSyncedStoragePaths(); auto* const target = app->getStorageTargets()->getTarget(targetID); if (!target) { // unknown targetID LogContext(logContext).logErr("Unknown targetID: " + StringTk::uintToStr(targetID) ); return false; } const int targetFD = isMsgHeaderFeatureFlagSet(CLOSECHUNKFILEMSG_FLAG_BUDDYMIRROR) ? *target->getMirrorFD() : *target->getChunkFD(); std::string fileID = SessionTk::fileIDFromHandleID(fileHandleID); std::string pathStr = StorageTk::getFileChunkPath(getPathInfo(), fileID); uint64_t storageVersion = syncedPaths->lockPath(fileID, targetID); // L O C K path // note: this is locked because we need to get the filesize together with the storageVersion bool getDynAttribsRes = StorageTkEx::getDynamicFileAttribs(targetFD, pathStr.c_str(), &outDynAttribs.filesize, &outDynAttribs.allocedBlocks, &outDynAttribs.modificationTimeSecs, &outDynAttribs.lastAccessTimeSecs); if(getDynAttribsRes) outDynAttribs.storageVersion = storageVersion; syncedPaths->unlockPath(fileID, targetID); // U N L O C K path return getDynAttribsRes; }
37.667984
99
0.726653
[ "object" ]
0fcb8537815f3e7dbe146a835c808200a4eaa086
3,462
cpp
C++
source/slang/slang-ir-generics-lowering-context.cpp
micmo-fb/slang
61be38f39cc96ad9644f17f6ab8d262875e99e9e
[ "MIT" ]
null
null
null
source/slang/slang-ir-generics-lowering-context.cpp
micmo-fb/slang
61be38f39cc96ad9644f17f6ab8d262875e99e9e
[ "MIT" ]
null
null
null
source/slang/slang-ir-generics-lowering-context.cpp
micmo-fb/slang
61be38f39cc96ad9644f17f6ab8d262875e99e9e
[ "MIT" ]
null
null
null
//slang-ir-generics-lowering-context.cpp #include "slang-ir-generics-lowering-context.h" #include "slang-ir-layout.h" namespace Slang { bool isPolymorphicType(IRInst* typeInst) { if (as<IRParam>(typeInst) && as<IRTypeType>(typeInst->getDataType())) return true; switch (typeInst->op) { case kIROp_ThisType: case kIROp_AssociatedType: case kIROp_InterfaceType: case kIROp_lookup_interface_method: return true; case kIROp_Specialize: { for (UInt i = 0; i < typeInst->getOperandCount(); i++) { if (isPolymorphicType(typeInst->getOperand(i))) return true; } return false; } default: break; } if (auto ptrType = as<IRPtrTypeBase>(typeInst)) { return isPolymorphicType(ptrType->getValueType()); } return false; } bool isTypeValue(IRInst* typeInst) { if (typeInst) { switch (typeInst->op) { case kIROp_TypeType: return true; case kIROp_lookup_interface_method: return typeInst->getDataType()->op == kIROp_TypeKind; default: return false; } } return false; } IRInst* SharedGenericsLoweringContext::maybeEmitRTTIObject(IRInst* typeInst) { IRInst* result = nullptr; if (mapTypeToRTTIObject.TryGetValue(typeInst, result)) return result; IRBuilder builderStorage; auto builder = &builderStorage; builder->sharedBuilder = &sharedBuilderStorage; builder->setInsertBefore(typeInst->next); result = builder->emitMakeRTTIObject(typeInst); // For now the only type info we encapsualte is type size. IRSizeAndAlignment sizeAndAlignment; getNaturalSizeAndAlignment((IRType*)typeInst, &sizeAndAlignment); builder->addRTTITypeSizeDecoration(result, sizeAndAlignment.size); // Give a name to the rtti object. if (auto exportDecoration = typeInst->findDecoration<IRExportDecoration>()) { String rttiObjName = String(exportDecoration->getMangledName()) + "_rtti"; builder->addExportDecoration(result, rttiObjName.getUnownedSlice()); } mapTypeToRTTIObject[typeInst] = result; return result; } IRInst* SharedGenericsLoweringContext::findInterfaceRequirementVal(IRInterfaceType* interfaceType, IRInst* requirementKey) { if (auto dict = mapInterfaceRequirementKeyValue.TryGetValue(interfaceType)) return (*dict)[requirementKey].GetValue(); _builldInterfaceRequirementMap(interfaceType); return findInterfaceRequirementVal(interfaceType, requirementKey); } void SharedGenericsLoweringContext::_builldInterfaceRequirementMap(IRInterfaceType* interfaceType) { mapInterfaceRequirementKeyValue.Add(interfaceType, Dictionary<IRInst*, IRInst*>()); auto dict = mapInterfaceRequirementKeyValue.TryGetValue(interfaceType); for (UInt i = 0; i < interfaceType->getOperandCount(); i++) { auto entry = cast<IRInterfaceRequirementEntry>(interfaceType->getOperand(i)); (*dict)[entry->getRequirementKey()] = entry->getRequirementVal(); } } }
33.61165
126
0.622184
[ "object" ]
0fcfc019c021a3ae28e840b0d478d960ac03120e
1,143
cpp
C++
tree/vertical traversal.cpp
ishan0805/gfg_must_do
10af0a6d37d0ac338c7bd235e7aa6190abc9d750
[ "MIT" ]
null
null
null
tree/vertical traversal.cpp
ishan0805/gfg_must_do
10af0a6d37d0ac338c7bd235e7aa6190abc9d750
[ "MIT" ]
null
null
null
tree/vertical traversal.cpp
ishan0805/gfg_must_do
10af0a6d37d0ac338c7bd235e7aa6190abc9d750
[ "MIT" ]
null
null
null
class Solution { public: // Function to find the vertical order traversal of Binary Tree. vector<int> verticalOrder(Node *root) { queue<pair<Node *, int>> q; q.push({root, 0}); unordered_map<int, vector<int>> m; int first = 0, last = 0, curr = 0; while (!q.empty()) { int s = q.size(); while (s--) { auto p = q.front(); q.pop(); auto r = p.first; int l = p.second; m[l].push_back(r->data); if (r->left != nullptr) { first = min(first, l - 1); q.push({r->left, l - 1}); } if (r->right != nullptr) { last = max(last, l + 1); q.push({r->right, l + 1}); } } } vector<int> ans; for (int i = first; i <= last; i++) { for (auto ele : m[i]) { ans.push_back(ele); } } return ans; } };
26.581395
68
0.349081
[ "vector" ]
0fd0270ea04f05ebe20c89c5c593aabf02e21464
1,556
cpp
C++
src/cpp/my-test.cpp
hamadakafu/rsTFHE
459bce310b3b12d3c40af02b6324457d99ce2591
[ "Apache-2.0" ]
3
2020-11-24T06:18:49.000Z
2022-02-22T14:05:59.000Z
src/cpp/my-test.cpp
hamadakafu/L-2
459bce310b3b12d3c40af02b6324457d99ce2591
[ "Apache-2.0" ]
null
null
null
src/cpp/my-test.cpp
hamadakafu/L-2
459bce310b3b12d3c40af02b6324457d99ce2591
[ "Apache-2.0" ]
null
null
null
#include <stdint.h> #include <cmath> #include <string> #include <cassert> #include <vector> #include <iostream> #include <cstdlib> #include <complex> #include "spqlios-fft.h" using namespace std; int32_t main(int32_t argc, char** argv) { static const int32_t nn = 32; void* tables = new_fft_table(nn); void* itables = new_ifft_table(nn); double* buf_fft = fft_table_get_buffer(tables); double* buf_ifft = ifft_table_get_buffer(itables); double* a = new double[nn]; double* a2 = new double[nn]; double* b = new double[nn]; for (int32_t i=0; i<nn; i++) a[i]=i; for (int32_t i=0; i<nn; i++) a2[i]=i; for (int32_t i=0; i<nn; i++) b[i]=a[i]; printf("before fft\n"); for (int32_t i=0; i<nn; i++) printf("%lf ", a[i]); printf("\n"); for (int32_t i=0; i<nn; i++) buf_fft[i]=a[i]; fft_model(tables); for (int32_t i=0; i<nn; i++) a[i]=buf_fft[i]; for (int32_t i=0; i<nn; i++) buf_fft[i]=a2[i]; fft(tables,buf_fft); for (int32_t i=0; i<nn; i++) a2[i]=buf_fft[i]; printf("after fft\n"); for (int32_t i=0; i<nn; i++) printf("a: %lf, a2: %lf\n", a[i], a2[i]); printf("\n"); printf("before ifft\n"); for (int32_t i=0; i<nn; i++) buf_ifft[i]=a[i]; ifft_model(itables); for (int32_t i=0; i<nn; i++) a[i]=buf_ifft[i]; for (int32_t i=0; i<nn; i++) buf_ifft[i]=a2[i]; ifft(itables,buf_ifft); for (int32_t i=0; i<nn; i++) a2[i]=buf_ifft[i]; printf("after ifft\n"); for (int32_t i=0; i<nn; i++) printf("%lf ", a2[i]); printf("\n"); }
26.827586
74
0.577763
[ "vector" ]
0fd0994ed64b41a501bc9a7736ddccf31d47dced
1,258
cpp
C++
LeetCode/DynamicProgramming/JumpGame.cpp
a4org/Angorithm4
d2227d36608491bed270375bcea67fbde134209a
[ "MIT" ]
null
null
null
LeetCode/DynamicProgramming/JumpGame.cpp
a4org/Angorithm4
d2227d36608491bed270375bcea67fbde134209a
[ "MIT" ]
null
null
null
LeetCode/DynamicProgramming/JumpGame.cpp
a4org/Angorithm4
d2227d36608491bed270375bcea67fbde134209a
[ "MIT" ]
null
null
null
/* * LeetCode 55 Jump Game * Medium * Jiawei Wang * 2021 8.11 */ /* Revision * $1 2021.10.3 Jiawei Wang * $2 2022.5.17 Jiawei Wang * */ #include <vector> using namespace::std; class Solution { public: bool canJump(vector<int>& nums) { int maxjump = 0; int size = nums.size(); for (int i = 0; i < size; i++) { if (maxjump < i) return false; maxjump = max(i + nums[i], maxjump); if (maxjump >= size-1) return true; } return false; } // dp[i] => the maximum jump distance in index i bool canJump2(vector<int>& nums) { int n = nums.size(); vector<int> dp(n); if (n == 1 || n == 0) return true; if (nums[0] == 0) return false; dp[0] = nums[0]; for (int i = 1; i < n-1; i++) { dp[i] = max(dp[i-1]-1, nums[i]); if (dp[i] == 0) return false; } return (dp[n-2] > 0); } bool canJump3(vector<int>& nums) { // the current state only depends on prev state int n = nums.size(); if (n == 1 || n == 0) return true; int curr = nums[0]; for (int i = 1; i < n-1; i++) { if (curr == 0) return false; curr = max(curr-1, nums[i]); } return curr > 0; } };
19.353846
53
0.491256
[ "vector" ]
0fd0be1750fab9cb1888c4ce2dc8bfcf9d3118ab
856
cpp
C++
Ch 10/10.34_35_36_37.cpp
Felon03/CppPrimer
7dc2daf59f0ae7ec5670def15cb5fab174fe9780
[ "Apache-2.0" ]
null
null
null
Ch 10/10.34_35_36_37.cpp
Felon03/CppPrimer
7dc2daf59f0ae7ec5670def15cb5fab174fe9780
[ "Apache-2.0" ]
null
null
null
Ch 10/10.34_35_36_37.cpp
Felon03/CppPrimer
7dc2daf59f0ae7ec5670def15cb5fab174fe9780
[ "Apache-2.0" ]
null
null
null
#include<iostream> #include<vector> #include<list> #include<algorithm> using namespace std; int main() { vector<int> vi{ 0,1,2,3,4,5,6,7,8,9 }; // 10.34 使用reverse_iterator逆序打印一个vector for (auto r_iter = vi.crbegin(); r_iter != vi.crend(); ++r_iter) cout << *r_iter << " "; cout << endl; // 10.35 使用普通迭代器逆序打印一个vector for (auto iter = vi.end(); iter != vi.begin();) cout << *--iter << " "; cout << endl; // 10.36 使用find在一个int的list中查找最后一个值为0的元素 list<int> lst{ 0,1,2,3,4,5,6,7,8,9,0,9,7,6,4,3,2,1,0,12,3,4 }; auto iter = find(lst.crbegin(), lst.crend(), 0); cout << "Find "<<*iter << " in front of " << *iter.base() << endl; // 10.37 给定一个包含10个元素的vector,将位置3到7之间的元素按逆序拷贝到一个list中 list<int> ls(7 - 3 + 1); reverse_copy(vi.cbegin() + 3, vi.cbegin() + 8, ls.begin()); for (const auto &x : ls) cout << x << " "; cout << endl; return 0; }
25.176471
67
0.607477
[ "vector" ]
0fd10977e2534b07f0b113be7140ced0cabac8af
7,339
cc
C++
rtc_base/async_resolver.cc
shiguredo/libwebrtc
a52fc6f940ae22c7781dcbf2ff765ea648e98c04
[ "BSD-3-Clause" ]
14
2016-06-22T06:38:23.000Z
2022-01-26T16:39:17.000Z
rtc_base/async_resolver.cc
shiguredo/libwebrtc
a52fc6f940ae22c7781dcbf2ff765ea648e98c04
[ "BSD-3-Clause" ]
1
2021-02-18T00:57:11.000Z
2021-02-18T00:57:11.000Z
rtc_base/async_resolver.cc
shiguredo/libwebrtc
a52fc6f940ae22c7781dcbf2ff765ea648e98c04
[ "BSD-3-Clause" ]
6
2019-10-28T15:13:36.000Z
2021-11-18T06:43:08.000Z
/* * Copyright 2008 The WebRTC Project Authors. All rights reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ #include "rtc_base/async_resolver.h" #include <memory> #include <string> #include <utility> #include "api/ref_counted_base.h" #include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" #if defined(WEBRTC_WIN) #include <ws2spi.h> #include <ws2tcpip.h> #include "rtc_base/win32.h" #endif #if defined(WEBRTC_POSIX) && !defined(__native_client__) #if defined(WEBRTC_ANDROID) #include "rtc_base/ifaddrs_android.h" #else #include <ifaddrs.h> #endif #endif // defined(WEBRTC_POSIX) && !defined(__native_client__) #include "api/task_queue/task_queue_base.h" #include "rtc_base/ip_address.h" #include "rtc_base/logging.h" #include "rtc_base/platform_thread.h" #include "rtc_base/task_queue.h" #include "rtc_base/task_utils/to_queued_task.h" #include "rtc_base/third_party/sigslot/sigslot.h" // for signal_with_thread... #if defined(WEBRTC_MAC) || defined(WEBRTC_IOS) #include <dispatch/dispatch.h> #endif namespace rtc { #if defined(WEBRTC_MAC) || defined(WEBRTC_IOS) namespace { void GlobalGcdRunTask(void* context) { std::unique_ptr<webrtc::QueuedTask> task( static_cast<webrtc::QueuedTask*>(context)); task->Run(); } // Post a task into the system-defined global concurrent queue. void PostTaskToGlobalQueue(std::unique_ptr<webrtc::QueuedTask> task) { dispatch_queue_global_t global_queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); webrtc::QueuedTask* context = task.release(); dispatch_async_f(global_queue, context, &GlobalGcdRunTask); } } // namespace #endif int ResolveHostname(const std::string& hostname, int family, std::vector<IPAddress>* addresses) { #ifdef __native_client__ RTC_NOTREACHED(); RTC_LOG(LS_WARNING) << "ResolveHostname() is not implemented for NaCl"; return -1; #else // __native_client__ if (!addresses) { return -1; } addresses->clear(); struct addrinfo* result = nullptr; struct addrinfo hints = {0}; hints.ai_family = family; // `family` here will almost always be AF_UNSPEC, because `family` comes from // AsyncResolver::addr_.family(), which comes from a SocketAddress constructed // with a hostname. When a SocketAddress is constructed with a hostname, its // family is AF_UNSPEC. However, if someday in the future we construct // a SocketAddress with both a hostname and a family other than AF_UNSPEC, // then it would be possible to get a specific family value here. // The behavior of AF_UNSPEC is roughly "get both ipv4 and ipv6", as // documented by the various operating systems: // Linux: http://man7.org/linux/man-pages/man3/getaddrinfo.3.html // Windows: https://msdn.microsoft.com/en-us/library/windows/desktop/ // ms738520(v=vs.85).aspx // Mac: https://developer.apple.com/legacy/library/documentation/Darwin/ // Reference/ManPages/man3/getaddrinfo.3.html // Android (source code, not documentation): // https://android.googlesource.com/platform/bionic/+/ // 7e0bfb511e85834d7c6cb9631206b62f82701d60/libc/netbsd/net/getaddrinfo.c#1657 hints.ai_flags = AI_ADDRCONFIG; int ret = getaddrinfo(hostname.c_str(), nullptr, &hints, &result); if (ret != 0) { return ret; } struct addrinfo* cursor = result; for (; cursor; cursor = cursor->ai_next) { if (family == AF_UNSPEC || cursor->ai_family == family) { IPAddress ip; if (IPFromAddrInfo(cursor, &ip)) { addresses->push_back(ip); } } } freeaddrinfo(result); return 0; #endif // !__native_client__ } struct AsyncResolver::State : public RefCountedBase { webrtc::Mutex mutex; enum class Status { kLive, kDead } status RTC_GUARDED_BY(mutex) = Status::kLive; }; AsyncResolver::AsyncResolver() : error_(-1), state_(new State) {} AsyncResolver::~AsyncResolver() { RTC_DCHECK_RUN_ON(&sequence_checker_); // Ensure the thread isn't using a stale reference to the current task queue, // or calling into ResolveDone post destruction. webrtc::MutexLock lock(&state_->mutex); state_->status = State::Status::kDead; } void RunResolution(void* obj) { std::function<void()>* function_ptr = static_cast<std::function<void()>*>(obj); (*function_ptr)(); delete function_ptr; } void AsyncResolver::Start(const SocketAddress& addr) { RTC_DCHECK_RUN_ON(&sequence_checker_); RTC_DCHECK(!destroy_called_); addr_ = addr; auto thread_function = [this, addr, caller_task_queue = webrtc::TaskQueueBase::Current(), state = state_] { std::vector<IPAddress> addresses; int error = ResolveHostname(addr.hostname().c_str(), addr.family(), &addresses); webrtc::MutexLock lock(&state->mutex); if (state->status == State::Status::kLive) { caller_task_queue->PostTask(webrtc::ToQueuedTask( [this, error, addresses = std::move(addresses), state] { bool live; { // ResolveDone can lead to instance destruction, so make sure // we don't deadlock. webrtc::MutexLock lock(&state->mutex); live = state->status == State::Status::kLive; } if (live) { RTC_DCHECK_RUN_ON(&sequence_checker_); ResolveDone(std::move(addresses), error); } })); } }; #if defined(WEBRTC_MAC) || defined(WEBRTC_IOS) PostTaskToGlobalQueue(webrtc::ToQueuedTask(std::move(thread_function))); #else PlatformThread::SpawnDetached(std::move(thread_function), "AsyncResolver"); #endif } bool AsyncResolver::GetResolvedAddress(int family, SocketAddress* addr) const { RTC_DCHECK_RUN_ON(&sequence_checker_); RTC_DCHECK(!destroy_called_); if (error_ != 0 || addresses_.empty()) return false; *addr = addr_; for (size_t i = 0; i < addresses_.size(); ++i) { if (family == addresses_[i].family()) { addr->SetResolvedIP(addresses_[i]); return true; } } return false; } int AsyncResolver::GetError() const { RTC_DCHECK_RUN_ON(&sequence_checker_); RTC_DCHECK(!destroy_called_); return error_; } void AsyncResolver::Destroy(bool wait) { // Some callers have trouble guaranteeing that Destroy is called on the // sequence guarded by `sequence_checker_`. // RTC_DCHECK_RUN_ON(&sequence_checker_); RTC_DCHECK(!destroy_called_); destroy_called_ = true; MaybeSelfDestruct(); } const std::vector<IPAddress>& AsyncResolver::addresses() const { RTC_DCHECK_RUN_ON(&sequence_checker_); RTC_DCHECK(!destroy_called_); return addresses_; } void AsyncResolver::ResolveDone(std::vector<IPAddress> addresses, int error) { addresses_ = addresses; error_ = error; recursion_check_ = true; SignalDone(this); MaybeSelfDestruct(); } void AsyncResolver::MaybeSelfDestruct() { if (!recursion_check_) { delete this; } else { recursion_check_ = false; } } } // namespace rtc
31.229787
80
0.694236
[ "vector" ]
0fd28f595566eb563b5b29acc9c4ef18952f9c4a
1,338
cpp
C++
cpp-leetcode/leetcode19-remove-nth-node-from-end-of-list-solution1.cpp
yanglr/LeetCodeOJ
27dd1e4a2442b707deae7921e0118752248bef5e
[ "MIT" ]
45
2021-07-25T00:45:43.000Z
2022-03-24T05:10:43.000Z
cpp-leetcode/leetcode19-remove-nth-node-from-end-of-list-solution1.cpp
yanglr/LeetCodeOJ
27dd1e4a2442b707deae7921e0118752248bef5e
[ "MIT" ]
null
null
null
cpp-leetcode/leetcode19-remove-nth-node-from-end-of-list-solution1.cpp
yanglr/LeetCodeOJ
27dd1e4a2442b707deae7921e0118752248bef5e
[ "MIT" ]
15
2021-07-25T00:40:52.000Z
2021-12-27T06:25:31.000Z
#include <iostream> #include <algorithm> #include <vector> #include <string> using namespace std; /** * Definition for singly-linked list. */ struct ListNode { int val; ListNode *next; ListNode() : val(0), next(nullptr) {} ListNode(int x) : val(x), next(nullptr) {} ListNode(int x, ListNode *next) : val(x), next(next) {} }; class Solution { public: ListNode* removeNthFromEnd(ListNode* head, int n) { int len = 0; ListNode* p = head; while(p != NULL) { p = p -> next; len++; } if (n == len) { ListNode* newHead = head -> next; delete head; // 释放被删除节点占用的内存 return newHead; } p = head; for (int i = 1; i < len - n; i++) /* 注意 for能跑的区间长度是 len - n - 1, 到要删除的节点的前一个就停下来 */ { p = p -> next; } ListNode* toDel = p -> next; p->next = p->next->next; delete toDel; // 释放被删除节点占用的内存 return head; } }; // Test int main() { Solution sol; ListNode *l1; l1 = new ListNode(1); l1->next = new ListNode(2); int k = 2; ListNode* p = sol.removeNthFromEnd(l1, k); while (p != NULL) { cout << p -> val << endl; p = p -> next; } return 0; }
19.391304
92
0.479073
[ "vector" ]
0fd2f1f6d5f13c6abd860c64126b5f3bdc4cb340
1,722
hpp
C++
src/io/MultiThreadedFileCollector.hpp
slide-lig/plcmpp
d2f5d8e2d0543023faf24b081b6085e42778b73a
[ "Apache-2.0" ]
null
null
null
src/io/MultiThreadedFileCollector.hpp
slide-lig/plcmpp
d2f5d8e2d0543023faf24b081b6085e42778b73a
[ "Apache-2.0" ]
null
null
null
src/io/MultiThreadedFileCollector.hpp
slide-lig/plcmpp
d2f5d8e2d0543023faf24b081b6085e42778b73a
[ "Apache-2.0" ]
null
null
null
/******************************************************************************* * Copyright (c) 2014 Etienne Dublé, Martin Kirchgessner, Vincent Leroy, Alexandre Termier, CNRS and Université Joseph Fourier. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * or see the LICENSE.txt file joined with this program. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ #pragma once #include <thread> #include <map> #include <string> using namespace std; #include <io/PatternsCollector.hpp> namespace io { class FileCollector; /** * A thread safe PatternsCollector that will write to multiple files, one per mining thread. */ class MultiThreadedFileCollector : public PatternsCollector { private: map<thread::id, FileCollector*> *collectors; string _prefix; FileCollector *getCollectorOfCurrentThread(); public: void collect(int32_t support, vector<int32_t>* pattern) override; int64_t close() override; int32_t getAveragePatternLength() override; /** * @param prefix * filename prefix for pattern files, each thread will append [ThreadID].dat */ MultiThreadedFileCollector(string& prefix); ~MultiThreadedFileCollector(); }; }
29.186441
127
0.671312
[ "vector" ]
0fd3395172d42838ecefb898715ea13df01afac4
1,369
cpp
C++
Libraries/LibSerenity/unistd.cpp
jcs/serenity
ed4398bca89cf0a473e2b889b0f033c2c6dac4b8
[ "BSD-2-Clause" ]
9
2020-02-05T00:09:15.000Z
2020-06-01T16:39:27.000Z
Libraries/LibSerenity/unistd.cpp
jcs/serenity
ed4398bca89cf0a473e2b889b0f033c2c6dac4b8
[ "BSD-2-Clause" ]
null
null
null
Libraries/LibSerenity/unistd.cpp
jcs/serenity
ed4398bca89cf0a473e2b889b0f033c2c6dac4b8
[ "BSD-2-Clause" ]
2
2020-03-07T16:52:37.000Z
2020-03-31T14:41:47.000Z
/* * Copyright (c) 2020 joshua stein <jcs@jcs.org> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <AK/Vector.h> #include <assert.h> #include <errno.h> #include <stdio.h> #include <string.h> #include <sys/types.h> #include <unistd.h> extern "C" { int get_process_name(char* buffer, int buffer_size) { const char *progname = getprogname(); snprintf(buffer, buffer_size, "%s", progname); return 0; } void sysbeep() { /* * TODO: could use WSKBDIO_COMPLEXBELL ioctl on openbsd, but we may not be * allowed to open /dev/wskbd or do ioctls here */ } int set_process_icon(__attribute__((unused)) int icon_id) { /* TODO */ return 0; } }
27.938776
78
0.717312
[ "vector" ]
0fd37f0c58f0e4a7b3a4f1bdbe059ab760b3d867
1,063
cpp
C++
441. Arranging Coins.cpp
yuyangh/LeetCode
5d81cbd975c0c1f2bbca0cb25cefe361a169e460
[ "MIT" ]
1
2020-10-11T08:10:53.000Z
2020-10-11T08:10:53.000Z
441. Arranging Coins.cpp
yuyangh/LeetCode
5d81cbd975c0c1f2bbca0cb25cefe361a169e460
[ "MIT" ]
null
null
null
441. Arranging Coins.cpp
yuyangh/LeetCode
5d81cbd975c0c1f2bbca0cb25cefe361a169e460
[ "MIT" ]
null
null
null
// // Created by Yuyang Huang on 7/2/20. // #include "LeetCodeLib.h" /* * 441. Arranging Coins * * https://leetcode.com/problems/arranging-coins/description/ * * algorithms * Easy * * You have a total of n coins that you want to form in a staircase shape, * where every k-th row must have exactly k coins. * Given n, find the total number of full staircase rows that can be formed. * n is a non-negative integer and fits within the range of a 32-bit signed * integer. * * Example 1: * * n = 5 * * The coins can form the following rows: * ¤ * ¤ ¤ * ¤ ¤ * * Because the 3rd row is incomplete, we return 2. * * * * Example 2: * * n = 8 * * The coins can form the following rows: * ¤ * ¤ ¤ * ¤ ¤ ¤ * ¤ ¤ * * Because the 4th row is incomplete, we return 3. * * */ // @lc code=start class Solution { public: /* * Time complexity: O(1) * Since (1 + row) * row / 2 <= n * So, (r + 0.5) ^ 2 <= 2n + 0.25 */ int arrangeCoins(int n) { return floor(sqrt(2 * static_cast<double>(n) + 0.25) - 0.5); } }; // @lc code=end
16.873016
76
0.595484
[ "shape" ]
0fda414e719d339be7457b847d1b971bd47ec917
4,354
cpp
C++
src/Core/TundraCore/Script/ScriptAsset.cpp
Adminotech/tundra
8270097dbf79c3ec1935cf66c7979eeef9c24c0e
[ "Apache-2.0" ]
null
null
null
src/Core/TundraCore/Script/ScriptAsset.cpp
Adminotech/tundra
8270097dbf79c3ec1935cf66c7979eeef9c24c0e
[ "Apache-2.0" ]
null
null
null
src/Core/TundraCore/Script/ScriptAsset.cpp
Adminotech/tundra
8270097dbf79c3ec1935cf66c7979eeef9c24c0e
[ "Apache-2.0" ]
null
null
null
// For conditions of distribution and use, see copyright notice in LICENSE #include "StableHeaders.h" #include "DebugOperatorNew.h" #include "ScriptAsset.h" #include "AssetAPI.h" #include "LoggingFunctions.h" #include <QList> #include <QDir> #include "MemoryLeakCheck.h" ScriptAsset::~ScriptAsset() { Unload(); } void ScriptAsset::DoUnload() { scriptContent = ""; references.clear(); } bool ScriptAsset::DeserializeFromData(const u8 *data, size_t numBytes, bool /*allowAsynchronous*/) { QByteArray arr((const char *)data, (int)numBytes); scriptContent = arr; ParseReferences(); assetAPI->AssetLoadCompleted(Name()); return true; } bool ScriptAsset::SerializeTo(std::vector<u8> &dst, const QString &/*serializationParameters*/) const { QByteArray arr(scriptContent.toStdString().c_str()); dst.clear(); dst.insert(dst.end(), arr.data(), arr.data() + arr.size()); return true; } void ScriptAsset::ParseReferences() { references.clear(); QStringList addedRefs; std::string content = scriptContent.toStdString(); sregex_iterator searchEnd; // In headless mode we don't want to mark certain asset types as // dependencies for the script, as they will fail Load() anyways QStringList ignoredAssetTypes; if (assetAPI->IsHeadless()) ignoredAssetTypes << "QtUiFile" << "Texture" << "OgreParticle" << "OgreMaterial" << "Audio"; // Script asset dependencies are expressed in code comments using lines like "// !ref: http://myserver.com/myasset.png". // The asset type can be specified using a comma: "// !ref: http://myserver.com/avatarasset.xml, Avatar". regex expression("!ref:\\s*(.*?)(\\s*,\\s*(.*?))?\\s*(\\n|\\r|$)"); for(sregex_iterator iter(content.begin(), content.end(), expression); iter != searchEnd; ++iter) { QString regexResult = QString::fromStdString((*iter)[1].str()); AssetReference ref; ref.ref = assetAPI->ResolveAssetRef(Name(), regexResult); if ((*iter)[3].matched) ref.type = (*iter)[3].str().c_str(); if (ignoredAssetTypes.contains(assetAPI->GetResourceTypeFromAssetRef(ref.ref))) continue; // Don't allow including our own ref, will break AssetAPI dependency code to infinite recursion. if (Name().compare(regexResult, Qt::CaseSensitive) == 0 || Name().compare(ref.ref, Qt::CaseSensitive) == 0) { LogWarning("[ScriptAsset]: Script " + Name() + " has a !ref dependency declaration to itself, this is not allowed!"); continue; } if (!addedRefs.contains(ref.ref, Qt::CaseInsensitive)) { references.push_back(ref); addedRefs << ref.ref; } } expression = regex("engine.IncludeFile\\(\\s*\"\\s*(.*?)\\s*\"\\s*\\)"); for(sregex_iterator iter(content.begin(), content.end(), expression); iter != searchEnd; ++iter) { QString regexResult = QString::fromStdString((*iter)[1].str()); // First check if this is a relative ref directly to jsmodules // We don't want to add these to the references list as it will request them via asset api // with a relative path and it will always fail (as we dont have working file:// schema etc.) // The IncludeFile function will take care of relative refs when the script is ran. if (QDir::isRelativePath(regexResult) && (regexResult.startsWith("jsmodules") || regexResult.startsWith("/jsmodules") || regexResult.startsWith("./jsmodules"))) continue; // Ask AssetAPI to resolve the ref AssetReference ref; ref.ref = assetAPI->ResolveAssetRef(Name(), regexResult); // Don't allow including our own ref, will break AssetAPI dependency code to infinite recursion. if (Name().compare(regexResult, Qt::CaseSensitive) == 0 || Name().compare(ref.ref, Qt::CaseSensitive) == 0) { LogWarning("[ScriptAsset]: Script " + Name() + " has engine.IncludeFile invocation to itself, this is not allowed!"); continue; } if (!addedRefs.contains(ref.ref, Qt::CaseInsensitive)) { references.push_back(ref); addedRefs << ref.ref; } } } bool ScriptAsset::IsLoaded() const { return !scriptContent.isEmpty(); }
35.688525
129
0.639412
[ "vector" ]
0fe07a394814a8eb8e5f61fbf22d8294022fc321
1,542
hpp
C++
cpp/include/cuml/common/host_buffer.hpp
lawrenceN/cuml
91abe6747ea61a5b59526f76568ea14d52814454
[ "Apache-2.0" ]
1
2021-05-12T02:51:08.000Z
2021-05-12T02:51:08.000Z
cpp/include/cuml/common/host_buffer.hpp
lawrenceN/cuml
91abe6747ea61a5b59526f76568ea14d52814454
[ "Apache-2.0" ]
null
null
null
cpp/include/cuml/common/host_buffer.hpp
lawrenceN/cuml
91abe6747ea61a5b59526f76568ea14d52814454
[ "Apache-2.0" ]
null
null
null
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/mr/device/allocator.hpp> #include <raft/mr/host/buffer.hpp> namespace MLCommon { /** * RAII object owning a contigous typed host buffer. The passed in allocator supports asynchronus allocation and * deallocation so this can be used for temporary memory * @code{.cpp} * template<typename T> * void foo( const raft::handle_t& h, const T* in_d , T* out_d, ..., cudaStream_t stream ) * { * ... * host_buffer<T> temp( handle->get_host_allocator(), stream, 0 ) * * temp.resize(n, stream); * cudaMemcpyAsync( temp.data(), in_d, temp.size()*sizeof(T), cudaMemcpyDeviceToHost ); * ... * cudaMemcpyAsync( out_d, temp.data(), temp.size()*sizeof(T), cudaMemcpyHostToDevice ); * temp.release(stream); * } * @endcode * @todo: Add missing doxygen documentation */ template <typename T> using host_buffer = raft::mr::host::buffer<T>; } // namespace MLCommon
32.125
112
0.696498
[ "object" ]
0fe13b852009790a16c0dfe348774042fda23b6e
8,870
cpp
C++
modules/detour/detour.cpp
slapin/godot-fork
5947769594fbe9d0495df92af8c368db3fa9d972
[ "CC-BY-3.0", "Apache-2.0", "MIT" ]
3
2018-05-12T13:01:29.000Z
2019-12-26T20:43:24.000Z
modules/detour/detour.cpp
slapin/godot-fork
5947769594fbe9d0495df92af8c368db3fa9d972
[ "CC-BY-3.0", "Apache-2.0", "MIT" ]
null
null
null
modules/detour/detour.cpp
slapin/godot-fork
5947769594fbe9d0495df92af8c368db3fa9d972
[ "CC-BY-3.0", "Apache-2.0", "MIT" ]
null
null
null
#include "detour.h" #include "scene/3d/mesh_instance.h" #include <Recast.h> #include <DetourNavMesh.h> static const int DEFAULT_TILE_SIZE = 128; static const float DEFAULT_CELL_SIZE = 0.3f; static const float DEFAULT_CELL_HEIGHT = 0.2f; static const float DEFAULT_AGENT_HEIGHT = 2.0f; static const float DEFAULT_AGENT_RADIUS = 0.6f; static const float DEFAULT_AGENT_MAX_CLIMB = 0.9f; static const float DEFAULT_AGENT_MAX_SLOPE = 45.0f; static const float DEFAULT_REGION_MIN_SIZE = 8.0f; static const float DEFAULT_REGION_MERGE_SIZE = 20.0f; static const float DEFAULT_EDGE_MAX_LENGTH = 12.0f; static const float DEFAULT_EDGE_MAX_ERROR = 1.3f; static const float DEFAULT_DETAIL_SAMPLE_DISTANCE = 6.0f; static const float DEFAULT_DETAIL_SAMPLE_MAX_ERROR = 1.0f; DetourNavigationMesh::DetourNavigationMesh() : Resource(), navmesh(NULL), cell_size(DEFAULT_CELL_SIZE), cell_height(DEFAULT_CELL_HEIGHT), agent_height(DEFAULT_AGENT_HEIGHT), agent_radius(DEFAULT_AGENT_RADIUS), agent_max_climb(DEFAULT_AGENT_MAX_CLIMB), agent_max_slope(DEFAULT_AGENT_MAX_SLOPE), region_min_size(DEFAULT_REGION_MIN_SIZE), region_merge_size(DEFAULT_REGION_MERGE_SIZE), edge_max_length(DEFAULT_EDGE_MAX_LENGTH), edge_max_error(DEFAULT_EDGE_MAX_ERROR), detail_sample_distance(DEFAULT_DETAIL_SAMPLE_DISTANCE), detail_sample_max_error(DEFAULT_DETAIL_SAMPLE_MAX_ERROR), tile_size(DEFAULT_TILE_SIZE) { padding = Vector3(1.0f, 1.0f, 1.0f); bounding_box = AABB(); group = ""; } void DetourNavigationMesh::collect_geometries(Node *root_node, bool recursive) { List<Node *> groupNodes; Set<Node *> processedNodes; if (!root_node) return; List<Node *> node_queue; geometries.clear(); root_node->get_tree()->get_nodes_in_group(group, &groupNodes); for (const List<Node *>::Element *E = groupNodes.front(); E; E = E->next()) { Node *groupNode = E->get(); node_queue.push_back(groupNode); } while (node_queue.size() > 0) { Node *groupNode = node_queue.front()->get(); node_queue.pop_front(); if (Object::cast_to<MeshInstance>(groupNode)) { MeshInstance *mi = Object::cast_to<MeshInstance>(groupNode); Ref<Mesh> mesh = mi->get_mesh(); if (mesh.is_valid()) add_mesh(mesh); } if (recursive) for (int i = 0; i < groupNode->get_child_count(); i++) node_queue.push_back(groupNode->get_child(i)); } } void DetourNavigationMesh::add_mesh(const Ref<Mesh>& mesh) { geometries.push_back(mesh); } inline unsigned int nextPow2(unsigned int v) { v--; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; v++; return v; } inline unsigned int ilog2(unsigned int v) { unsigned int r; unsigned int shift; r = (v > 0xffff) << 4; v >>= r; shift = (v > 0xff) << 3; v >>= shift; r |= shift; shift = (v > 0xf) << 2; v >>= shift; r |= shift; shift = (v > 0x3) << 1; v >>= shift; r |= shift; r |= (v >> 1); return r; } void DetourNavigationMesh::build() { if (geometries.size() == 0) return; for (int i = 0; i < geometries.size(); i++) if (geometries[i].is_valid()) bounding_box.merge(geometries[i]->get_aabb()); bounding_box.position -= padding; bounding_box.size += padding * 2.0; int gridH = 0, gridW = 0; float tile_edge_length = (float)tile_size * cell_size; Vector3 bmin = bounding_box.position; Vector3 bmax = bounding_box.position + bounding_box.size; rcCalcGridSize(&bmin.coord[0], &bmax.coord[0], cell_size, &gridW, &gridH); num_tiles_x = (gridW + tile_size - 1) / tile_size; num_tiles_z = (gridH + tile_size - 1) / tile_size; unsigned int tile_bits = (unsigned int)ilog2(nextPow2(num_tiles_x * num_tiles_z)); if (tile_bits > 14) tile_bits = 14; unsigned int poly_bits = 22 - tile_bits; unsigned int max_tiles = 1u << tile_bits; unsigned int max_polys = 1 << poly_bits; dtNavMeshParams params; rcVcopy(params.orig, &bmin.coord[0]); params.tileWidth = tile_edge_length; params.tileHeight = tile_edge_length; params.maxTiles = max_tiles; params.maxPolys = max_polys; navmesh = dtAllocNavMesh(); if (!navmesh) return; if (dtStatusFailed(((dtNavMesh *)navmesh)->init(&params))) { release_navmesh(); return; } unsigned int result = build_tiles(0, 0, num_tiles_x - 1, num_tiles_z - 1); } void DetourNavigationMesh::add_meshdata(const Ref<Mesh> &p_mesh, const Transform &p_xform, Vector<float> &p_verticies, Vector<int> &p_indices) { int current_vertex_count = 0; for (int i = 0; i < p_mesh->get_surface_count(); i++) { current_vertex_count = p_verticies.size() / 3; if (p_mesh->surface_get_primitive_type(i) != Mesh::PRIMITIVE_TRIANGLES) continue; int index_count = 0; if (p_mesh->surface_get_format(i) & Mesh::ARRAY_FORMAT_INDEX) { index_count = p_mesh->surface_get_array_index_len(i); } else { index_count = p_mesh->surface_get_array_len(i); } ERR_CONTINUE((index_count == 0 || (index_count % 3) != 0)); int face_count = index_count / 3; Array a = p_mesh->surface_get_arrays(i); PoolVector<Vector3> mesh_vertices = a[Mesh::ARRAY_VERTEX]; PoolVector<Vector3>::Read vr = mesh_vertices.read(); if (p_mesh->surface_get_format(i) & Mesh::ARRAY_FORMAT_INDEX) { PoolVector<int> mesh_indices = a[Mesh::ARRAY_INDEX]; PoolVector<int>::Read ir = mesh_indices.read(); for (int i = 0; i < mesh_vertices.size(); i++) { Vector3 p_vec3 = p_xform.xform(vr[i]); p_verticies.push_back(p_vec3.x); p_verticies.push_back(p_vec3.y); p_verticies.push_back(p_vec3.z); } for (int i = 0; i < face_count; i++) { // CCW p_indices.push_back(current_vertex_count + (ir[i * 3 + 0])); p_indices.push_back(current_vertex_count + (ir[i * 3 + 2])); p_indices.push_back(current_vertex_count + (ir[i * 3 + 1])); } } else { face_count = mesh_vertices.size() / 3; for (int i = 0; i < face_count; i++) { Vector3 p_vec3 = p_xform.xform(vr[i * 3 + 0]); p_verticies.push_back(p_vec3.x); p_verticies.push_back(p_vec3.y); p_verticies.push_back(p_vec3.z); p_vec3 = p_xform.xform(vr[i * 3 + 2]); p_verticies.push_back(p_vec3.x); p_verticies.push_back(p_vec3.y); p_verticies.push_back(p_vec3.z); p_vec3 = p_xform.xform(vr[i * 3 + 1]); p_verticies.push_back(p_vec3.x); p_verticies.push_back(p_vec3.y); p_verticies.push_back(p_vec3.z); p_indices.push_back(current_vertex_count + (i * 3 + 0)); p_indices.push_back(current_vertex_count + (i * 3 + 1)); p_indices.push_back(current_vertex_count + (i * 3 + 2)); } } } } unsigned char *DetourNavigationMesh::build_tile_mesh(int tx, int ty, const float* bmin, const float* bmax, int& dataSize, const Ref<Mesh>& mesh) { Vector<float> verts; Vector<int> indices; Transform xform; add_meshdata(mesh, xform, verts, indices); int nverts = verts.size(); int ntris = indices.size() / 3; } void DetourNavigationMesh::release_navmesh() { dtFreeNavMesh((dtNavMesh*)navmesh); navmesh = NULL; num_tiles_x = 0; num_tiles_z = 0; bounding_box = AABB(); } void DetourNavigationMesh::set_group(const String& group) { this->group = group; } unsigned int DetourNavigationMesh::build_tiles(int x1, int z1, int x2, int z2) { unsigned ret = 0; for (int z = z1; z <= z2; z++) { for (int x = x1; x <= x2; x++) if (build_tile(x, z)) ret++; } return ret; } void DetourNavigationMesh::get_tile_bounding_box(int x, int z, Vector3& bmin, Vector3& bmax) { const float tile_edge_length = (float)tile_size * cell_size; bmin = bounding_box.position + Vector3(tile_edge_length * (float)x, 0, tile_edge_length * (float)z); bmax = bmin + Vector3(tile_edge_length, 0, tile_edge_length); } bool DetourNavigationMesh::build_tile(int x, int z) { Vector3 bmin, bmax; get_tile_bounding_box(x, z, bmin, bmax); dtNavMesh *nav = (dtNavMesh *)navmesh; nav->removeTile(nav->getTileRefAt(x, z, 0), NULL, NULL); rcConfig cfg; cfg.cs = cell_size; cfg.ch = cell_height; cfg.walkableSlopeAngle = agent_max_slope; cfg.walkableHeight = (int)ceil(agent_height / cfg.ch); cfg.walkableClimb = (int)floor(agent_max_climb / cfg.ch); cfg.walkableRadius = (int)ceil(agent_radius / cfg.cs); cfg.maxEdgeLen = (int)(edge_max_length / cfg.cs); cfg.maxSimplificationError = edge_max_error; cfg.minRegionArea = (int)sqrtf(region_min_size); cfg.mergeRegionArea = (int)sqrtf(region_merge_size); cfg.maxVertsPerPoly = 6; cfg.tileSize = tile_size; cfg.borderSize = cfg.walkableRadius + 3; cfg.width = cfg.tileSize + cfg.borderSize * 2; cfg.height = cfg.tileSize + cfg.borderSize * 2; cfg.detailSampleDist = detail_sample_distance < 0.9f ? 0.0f : cell_size * detail_sample_distance; cfg.detailSampleMaxError = cell_height * detail_sample_max_error; rcVcopy(cfg.bmin, &bmin.coord[0]); rcVcopy(cfg.bmax, &bmax.coord[0]); cfg.bmin[0] -= cfg.borderSize * cfg.cs; cfg.bmin[2] -= cfg.borderSize * cfg.cs; cfg.bmax[0] += cfg.borderSize * cfg.cs; cfg.bmax[2] += cfg.borderSize * cfg.cs; return false; }
32.372263
144
0.704961
[ "mesh", "object", "vector", "transform", "3d" ]
0fe209aa5f2e73627035315a8503bf7477671385
9,657
cpp
C++
src/pyglue/PyLookTransform.cpp
omenos/OpenColorIO
7316c3be20752278924dd3f213bff297ffb63a14
[ "BSD-3-Clause" ]
4
2016-09-28T21:35:17.000Z
2021-10-13T21:13:49.000Z
src/pyglue/PyLookTransform.cpp
omenos/OpenColorIO
7316c3be20752278924dd3f213bff297ffb63a14
[ "BSD-3-Clause" ]
null
null
null
src/pyglue/PyLookTransform.cpp
omenos/OpenColorIO
7316c3be20752278924dd3f213bff297ffb63a14
[ "BSD-3-Clause" ]
2
2019-03-05T20:43:59.000Z
2019-11-11T20:35:55.000Z
/* Copyright (c) 2003-2010 Sony Pictures Imageworks Inc., et al. All Rights Reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Sony Pictures Imageworks nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <Python.h> #include <OpenColorIO/OpenColorIO.h> #include "PyUtil.h" #include "PyDoc.h" #define GetConstLookTransform(pyobject) GetConstPyOCIO<PyOCIO_Transform, \ ConstLookTransformRcPtr, LookTransform>(pyobject, PyOCIO_LookTransformType) #define GetEditableLookTransform(pyobject) GetEditablePyOCIO<PyOCIO_Transform, \ LookTransformRcPtr, LookTransform>(pyobject, PyOCIO_LookTransformType) OCIO_NAMESPACE_ENTER { namespace { /////////////////////////////////////////////////////////////////////// /// int PyOCIO_LookTransform_init(PyOCIO_Transform * self, PyObject * args, PyObject * kwds); PyObject * PyOCIO_LookTransform_getSrc(PyObject * self); PyObject * PyOCIO_LookTransform_setSrc(PyObject * self, PyObject * args); PyObject * PyOCIO_LookTransform_getDst(PyObject * self); PyObject * PyOCIO_LookTransform_setDst(PyObject * self, PyObject * args); PyObject * PyOCIO_LookTransform_getLooks(PyObject * self); PyObject * PyOCIO_LookTransform_setLooks(PyObject * self, PyObject * args); /////////////////////////////////////////////////////////////////////// /// PyMethodDef PyOCIO_LookTransform_methods[] = { { "getSrc", (PyCFunction) PyOCIO_LookTransform_getSrc, METH_NOARGS, LOOKTRANSFORM_GETSRC__DOC__ }, { "setSrc", PyOCIO_LookTransform_setSrc, METH_VARARGS, LOOKTRANSFORM_SETSRC__DOC__ }, { "getDst", (PyCFunction) PyOCIO_LookTransform_getDst, METH_NOARGS, LOOKTRANSFORM_GETDST__DOC__ }, { "setDst", PyOCIO_LookTransform_setDst, METH_VARARGS, LOOKTRANSFORM_SETDST__DOC__ }, { "getLooks", (PyCFunction) PyOCIO_LookTransform_getLooks, METH_NOARGS, LOOKTRANSFORM_GETLOOKS__DOC__ }, { "setLooks", PyOCIO_LookTransform_setLooks, METH_VARARGS, LOOKTRANSFORM_SETLOOKS__DOC__ }, { NULL, NULL, 0, NULL } }; } /////////////////////////////////////////////////////////////////////////// /// PyTypeObject PyOCIO_LookTransformType = { PyVarObject_HEAD_INIT(NULL, 0) //ob_size OCIO_PYTHON_NAMESPACE(LookTransform), //tp_name sizeof(PyOCIO_Transform), //tp_basicsize 0, //tp_itemsize 0, //tp_dealloc 0, //tp_print 0, //tp_getattr 0, //tp_setattr 0, //tp_compare 0, //tp_repr 0, //tp_as_number 0, //tp_as_sequence 0, //tp_as_mapping 0, //tp_hash 0, //tp_call 0, //tp_str 0, //tp_getattro 0, //tp_setattro 0, //tp_as_buffer Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, //tp_flags LOOKTRANSFORM__DOC__, //tp_doc 0, //tp_traverse 0, //tp_clear 0, //tp_richcompare 0, //tp_weaklistoffset 0, //tp_iter 0, //tp_iternext PyOCIO_LookTransform_methods, //tp_methods 0, //tp_members 0, //tp_getset &PyOCIO_TransformType, //tp_base 0, //tp_dict 0, //tp_descr_get 0, //tp_descr_set 0, //tp_dictoffset (initproc) PyOCIO_LookTransform_init, //tp_init 0, //tp_alloc 0, //tp_new 0, //tp_free 0, //tp_is_gc }; namespace { /////////////////////////////////////////////////////////////////////// /// int PyOCIO_LookTransform_init(PyOCIO_Transform * self, PyObject * args, PyObject * kwds) { OCIO_PYTRY_ENTER() LookTransformRcPtr ptr = LookTransform::Create(); int ret = BuildPyTransformObject<LookTransformRcPtr>(self, ptr); char* src = NULL; char* dst = NULL; char* looks = NULL; char* direction = NULL; static const char *kwlist[] = { "src", "dst", "looks", "direction", NULL }; if(!PyArg_ParseTupleAndKeywords(args, kwds, "|ssss", const_cast<char **>(kwlist), &src, &dst, &looks, &direction)) return -1; if(src) ptr->setSrc(src); if(dst) ptr->setDst(dst); if(looks) ptr->setLooks(looks); if(direction) ptr->setDirection(TransformDirectionFromString(direction)); return ret; OCIO_PYTRY_EXIT(-1) } PyObject * PyOCIO_LookTransform_getSrc(PyObject * self) { OCIO_PYTRY_ENTER() ConstLookTransformRcPtr transform = GetConstLookTransform(self); return PyString_FromString(transform->getSrc()); OCIO_PYTRY_EXIT(NULL) } PyObject * PyOCIO_LookTransform_setSrc(PyObject * self, PyObject * args) { OCIO_PYTRY_ENTER() const char* str = 0; if (!PyArg_ParseTuple(args, "s:setSrc", &str)) return NULL; LookTransformRcPtr transform = GetEditableLookTransform(self); transform->setSrc(str); Py_RETURN_NONE; OCIO_PYTRY_EXIT(NULL) } PyObject * PyOCIO_LookTransform_getDst(PyObject * self) { OCIO_PYTRY_ENTER() ConstLookTransformRcPtr transform = GetConstLookTransform(self); return PyString_FromString(transform->getDst()); OCIO_PYTRY_EXIT(NULL) } PyObject * PyOCIO_LookTransform_setDst(PyObject * self, PyObject * args) { OCIO_PYTRY_ENTER() const char* str = 0; if (!PyArg_ParseTuple(args, "s:setDst", &str)) return NULL; LookTransformRcPtr transform = GetEditableLookTransform(self); transform->setDst(str); Py_RETURN_NONE; OCIO_PYTRY_EXIT(NULL) } PyObject * PyOCIO_LookTransform_getLooks(PyObject * self) { OCIO_PYTRY_ENTER() ConstLookTransformRcPtr transform = GetConstLookTransform(self); return PyString_FromString( transform->getLooks() ); OCIO_PYTRY_EXIT(NULL) } PyObject * PyOCIO_LookTransform_setLooks(PyObject * self, PyObject * args) { OCIO_PYTRY_ENTER() const char* str = 0; if (!PyArg_ParseTuple(args, "s:setLooks", &str)) return NULL; LookTransformRcPtr transform = GetEditableLookTransform(self); transform->setLooks(str); Py_RETURN_NONE; OCIO_PYTRY_EXIT(NULL) } } } OCIO_NAMESPACE_EXIT
44.502304
102
0.516309
[ "transform" ]
0fe7325d22df827dabcbf9aa6906833232b04912
1,196
cpp
C++
chapter_3/user.cpp
hbatagelo/a_tour_of_cpp
11b177ff9ae25df8945c2a7b01a1abfa6e8f3f92
[ "Unlicense" ]
null
null
null
chapter_3/user.cpp
hbatagelo/a_tour_of_cpp
11b177ff9ae25df8945c2a7b01a1abfa6e8f3f92
[ "Unlicense" ]
null
null
null
chapter_3/user.cpp
hbatagelo/a_tour_of_cpp
11b177ff9ae25df8945c2a7b01a1abfa6e8f3f92
[ "Unlicense" ]
null
null
null
#include <cmath> #include <iostream> #include <numeric> #include "vector.hpp" double sqrt_sum(Vector &v) { double sum{0}; for (std::size_t i = 0; i != v.size(); ++i) { sum += std::sqrt(v[i]); // sum of square } return sum; } void f(Vector &v) { try { // Exceptions here are handled by the handler defined below v[v.size()] = 7; // Try to access beyond the end of v } catch (std::out_of_range &err) { // oops: out_of_range error // ... handle range error ... std::cerr << " what(): " << err.what() << '\n'; } } void user(std::size_t sz) noexcept { Vector v(sz); // Fill v with 1, 2, 3, 4... // std::terminate() will be called because v[sz] will throw std::iota(&v[0], &v[sz], 1); } void test() { try { Vector v{10}; std::cout << sqrt_sum(v) << '\n'; f(v); } catch (std::length_error &err) { // Do something and rethrow std::cerr << " what(): " << err.what() << '\n'; throw; } catch (std::bad_alloc &err) { // Ouch! This program is not designed to handle memory exhaustion std::terminate(); } } int main() { static_assert(4 <= sizeof(int), "integers are too small"); test(); user(10); }
23.92
74
0.561037
[ "vector" ]
0feb48a3c4450dbc8ad6b43516ba9ad16f6ae37b
3,696
cpp
C++
media_driver/agnostic/gen9/vp/hal/vphal_render_sfc_g9_base.cpp
lacc97/media-driver
8aa1d74b80668f9963e691b1c01ab564f50aec85
[ "Intel", "BSD-3-Clause", "MIT" ]
6
2019-04-18T02:51:40.000Z
2021-07-07T01:32:26.000Z
media_driver/agnostic/gen9/vp/hal/vphal_render_sfc_g9_base.cpp
lacc97/media-driver
8aa1d74b80668f9963e691b1c01ab564f50aec85
[ "Intel", "BSD-3-Clause", "MIT" ]
2
2018-11-08T02:14:17.000Z
2019-01-09T22:13:08.000Z
media_driver/agnostic/gen9/vp/hal/vphal_render_sfc_g9_base.cpp
lacc97/media-driver
8aa1d74b80668f9963e691b1c01ab564f50aec85
[ "Intel", "BSD-3-Clause", "MIT" ]
6
2018-12-20T06:39:59.000Z
2019-09-25T06:24:02.000Z
/* * Copyright (c) 2012-2017, Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ //! //! \file vphal_render_sfc_g9_base.cpp //! \brief VPHAL SFC Gen9 rendering component //! \details The SFC renderer supports Scaling, IEF, CSC/ColorFill and Rotation. //! It's responsible for setting up HW states and generating the SFC //! commands. //! #include "vphal_render_vebox_base.h" #include "vphal_render_sfc_g9_base.h" #if __VPHAL_SFC_SUPPORTED bool VphalSfcStateG9::IsInputFormatSupported( PVPHAL_SURFACE srcSurface) { bool ret = true; // Check if Input Format is supported if ((srcSurface->Format != Format_NV12) && (srcSurface->Format != Format_AYUV) && (srcSurface->Format != Format_P010) && (srcSurface->Format != Format_P016) && (srcSurface->Format != Format_A8B8G8R8) && (srcSurface->Format != Format_X8B8G8R8) && (srcSurface->Format != Format_A8R8G8B8) && (srcSurface->Format != Format_X8R8G8B8) && (!IS_PA_FORMAT(srcSurface->Format) || (srcSurface->Format == Format_Y410) || // Gen9 can't support Y410/Y416/Y210/Y216 Format (srcSurface->Format == Format_Y416) || (srcSurface->Format == Format_Y210) || (srcSurface->Format == Format_Y216))) { VPHAL_RENDER_NORMALMESSAGE("Unsupported Source Format '0x%08x' for SFC.", srcSurface->Format); ret = false; } return ret; } bool VphalSfcStateG9::IsOutputFormatSupported( PVPHAL_SURFACE outSurface) { bool ret = true; if (!IS_RGB32_FORMAT(outSurface->Format) && // Remove RGB565 support due to quality issue, may reopen this after root cause in the future. //!IS_RGB16_FORMAT(outSurface->Format) && outSurface->Format != Format_NV12 && outSurface->Format != Format_YUY2 && outSurface->Format != Format_UYVY && outSurface->Format != Format_AYUV) { VPHAL_RENDER_NORMALMESSAGE("Unsupported Render Target Format '0x%08x' for SFC Pipe.", outSurface->Format); ret = false; } return ret; } void VphalSfcStateG9::GetInputWidthHeightAlignUnit( MOS_FORMAT inputFormat, MOS_FORMAT outputFormat, uint16_t &widthAlignUnit, uint16_t &heightAlignUnit) { MOS_UNUSED(inputFormat); widthAlignUnit = 1; heightAlignUnit = 1; // Apply output alignment restriction to Region of the input frame. GetOutputWidthHeightAlignUnit(outputFormat, widthAlignUnit, heightAlignUnit); } #endif // __VPHAL_SFC_SUPPORTED
38.905263
114
0.677219
[ "render" ]
0ff09c17a7c42879fab01ffeebc1994ea17d983f
1,716
cc
C++
Dragon/src/operators/ndarray/argmax_op.cc
neopenx/Dragon
0e639a7319035ddc81918bd3df059230436ee0a1
[ "BSD-2-Clause" ]
212
2015-07-05T07:57:17.000Z
2022-02-27T01:55:35.000Z
Dragon/src/operators/ndarray/argmax_op.cc
neopenx/Dragon
0e639a7319035ddc81918bd3df059230436ee0a1
[ "BSD-2-Clause" ]
6
2016-07-07T14:31:56.000Z
2017-12-12T02:21:15.000Z
Dragon/src/operators/ndarray/argmax_op.cc
neopenx/Dragon
0e639a7319035ddc81918bd3df059230436ee0a1
[ "BSD-2-Clause" ]
71
2016-03-24T09:02:41.000Z
2021-06-03T01:52:41.000Z
#include "operators/ndarray/argmax_op.h" #include "utils/op_kernel.h" namespace dragon { template <class Context> template <typename T> void ArgmaxOp<Context>::RunWithType() { if (top_k != 1) { // it's difficult to implement device code when top_k > 1 auto* Xdata = input(0).template data<T, CPUContext>(); auto* Ydata = output(0)->template mutable_data<T, CPUContext>(); kernel::Argmax<T, CPUContext>(count, axis_dim, inner_dim, top_k, Xdata, Ydata); } else { auto* Xdata = input(0).template data<T, Context>(); auto* Ydata = output(0)->template mutable_data<T, Context>(); kernel::Argmax<T, Context>(count, axis_dim, inner_dim, top_k, Xdata, Ydata); } } template <class Context> void ArgmaxOp<Context>::RunOnDevice() { if (axis != -1) { axis_dim = input(0).dim(axis); inner_dim = input(0).count(axis) / axis_dim; } else { axis_dim = input(0).count(); inner_dim = 1; } count = input(0).count() / axis_dim; vector<TIndex> dims = input(0).dims(); if (!keep_dims) { if (axis != -1) { if (top_k == 1) dims.erase(dims.begin() + axis); else dims[axis] = top_k; } else { dims = vector<TIndex>(1, top_k); } } else { if (axis == -1) dims = vector<TIndex>(input(0).ndim(), 1); dims[axis] = top_k; } output(0)->Reshape(dims); if (input(0).template IsType<float>()) RunWithType<float>(); else LOG(FATAL) << "Unsupported input types."; } DEPLOY_CPU(Argmax); #ifdef WITH_CUDA DEPLOY_CUDA(Argmax); #endif OPERATOR_SCHEMA(Argmax).NumInputs(1).NumOutputs(1); NO_GRADIENT(Argmax); } // namespace dragon
30.642857
87
0.599068
[ "vector" ]
0ff4fd379645e0b833b83f7d47efc814d42fb0a8
8,307
cpp
C++
src/cascadia/ut_app/JsonTests.cpp
ahaganatr/terminal
fc75430cd40b79835a735f2cdb5329629caa6ebc
[ "MIT" ]
1
2020-10-13T00:55:48.000Z
2020-10-13T00:55:48.000Z
src/cascadia/ut_app/JsonTests.cpp
ahaganatr/terminal
fc75430cd40b79835a735f2cdb5329629caa6ebc
[ "MIT" ]
null
null
null
src/cascadia/ut_app/JsonTests.cpp
ahaganatr/terminal
fc75430cd40b79835a735f2cdb5329629caa6ebc
[ "MIT" ]
null
null
null
// Copyright (c) Microsoft Corporation. // Licensed under the MIT license. #include "precomp.h" #include "../TerminalSettingsModel/ColorScheme.h" #include "../TerminalSettingsModel/Profile.h" #include "../TerminalSettingsModel/CascadiaSettings.h" #include "../LocalTests_SettingsModel/JsonTestClass.h" using namespace Microsoft::Console; using namespace WEX::Logging; using namespace WEX::TestExecution; using namespace WEX::Common; using namespace winrt::Microsoft::Terminal::Settings::Model; using namespace winrt::Microsoft::Terminal::TerminalControl; namespace TerminalAppUnitTests { class JsonTests : public JsonTestClass { BEGIN_TEST_CLASS(JsonTests) TEST_CLASS_PROPERTY(L"ActivationContext", L"TerminalApp.Unit.Tests.manifest") END_TEST_CLASS() TEST_METHOD(ParseInvalidJson); TEST_METHOD(ParseSimpleColorScheme); TEST_METHOD(ProfileGeneratesGuid); TEST_CLASS_SETUP(ClassSetup) { InitializeJsonReader(); // Use 4 spaces to indent instead of \t _builder.settings_["indentation"] = " "; return true; } Json::Value VerifyParseSucceeded(std::string content); void VerifyParseFailed(std::string content); private: Json::StreamWriterBuilder _builder; }; Json::Value JsonTests::VerifyParseSucceeded(std::string content) { Json::Value root; std::string errs; const bool parseResult = _reader->parse(content.c_str(), content.c_str() + content.size(), &root, &errs); VERIFY_IS_TRUE(parseResult, winrt::to_hstring(errs).c_str()); return root; } void JsonTests::VerifyParseFailed(std::string content) { Json::Value root; std::string errs; const bool parseResult = _reader->parse(content.c_str(), content.c_str() + content.size(), &root, &errs); VERIFY_IS_FALSE(parseResult); } void JsonTests::ParseInvalidJson() { const std::string badJson{ "{ foo : bar : baz }" }; VerifyParseFailed(badJson); } void JsonTests::ParseSimpleColorScheme() { const std::string campbellScheme{ "{" "\"background\" : \"#0C0C0C\"," "\"black\" : \"#0C0C0C\"," "\"blue\" : \"#0037DA\"," "\"brightBlack\" : \"#767676\"," "\"brightBlue\" : \"#3B78FF\"," "\"brightCyan\" : \"#61D6D6\"," "\"brightGreen\" : \"#16C60C\"," "\"brightPurple\" : \"#B4009E\"," "\"brightRed\" : \"#E74856\"," "\"brightWhite\" : \"#F2F2F2\"," "\"brightYellow\" : \"#F9F1A5\"," "\"cursorColor\" : \"#FFFFFF\"," "\"cyan\" : \"#3A96DD\"," "\"foreground\" : \"#F2F2F2\"," "\"green\" : \"#13A10E\"," "\"name\" : \"Campbell\"," "\"purple\" : \"#881798\"," "\"red\" : \"#C50F1F\"," "\"selectionBackground\" : \"#131313\"," "\"white\" : \"#CCCCCC\"," "\"yellow\" : \"#C19C00\"" "}" }; const auto schemeObject = VerifyParseSucceeded(campbellScheme); auto scheme = implementation::ColorScheme::FromJson(schemeObject); VERIFY_ARE_EQUAL(L"Campbell", scheme->Name()); VERIFY_ARE_EQUAL(til::color(0xf2, 0xf2, 0xf2, 255), til::color{ scheme->Foreground() }); VERIFY_ARE_EQUAL(til::color(0x0c, 0x0c, 0x0c, 255), til::color{ scheme->Background() }); VERIFY_ARE_EQUAL(til::color(0x13, 0x13, 0x13, 255), til::color{ scheme->SelectionBackground() }); VERIFY_ARE_EQUAL(til::color(0xFF, 0xFF, 0xFF, 255), til::color{ scheme->CursorColor() }); std::array<COLORREF, COLOR_TABLE_SIZE> expectedCampbellTable; auto campbellSpan = gsl::span<COLORREF>(&expectedCampbellTable[0], COLOR_TABLE_SIZE); Utils::InitializeCampbellColorTable(campbellSpan); Utils::SetColorTableAlpha(campbellSpan, 0); for (size_t i = 0; i < expectedCampbellTable.size(); i++) { const auto& expected = expectedCampbellTable.at(i); const til::color actual{ scheme->Table().at(static_cast<uint32_t>(i)) }; VERIFY_ARE_EQUAL(expected, actual); } Log::Comment(L"Roundtrip Test for Color Scheme"); Json::Value outJson{ scheme->ToJson() }; VERIFY_ARE_EQUAL(schemeObject, outJson); } void JsonTests::ProfileGeneratesGuid() { // Parse some profiles without guids. We should NOT generate new guids // for them. If a profile doesn't have a GUID, we'll leave its _guid // set to nullopt. CascadiaSettings::_ValidateProfilesHaveGuid will // ensure all profiles have a GUID that's actually set. // The null guid _is_ a valid guid, so we won't re-generate that // guid. null is _not_ a valid guid, so we'll leave that nullopt // See SettingsTests::ValidateProfilesGenerateGuids for a version of // this test that includes synthesizing GUIDS for profiles without GUIDs // set const std::string profileWithoutGuid{ R"({ "name" : "profile0" })" }; const std::string secondProfileWithoutGuid{ R"({ "name" : "profile1" })" }; const std::string profileWithNullForGuid{ R"({ "name" : "profile2", "guid" : null })" }; const std::string profileWithNullGuid{ R"({ "name" : "profile3", "guid" : "{00000000-0000-0000-0000-000000000000}" })" }; const std::string profileWithGuid{ R"({ "name" : "profile4", "guid" : "{6239a42c-1de4-49a3-80bd-e8fdd045185c}" })" }; const auto profile0Json = VerifyParseSucceeded(profileWithoutGuid); const auto profile1Json = VerifyParseSucceeded(secondProfileWithoutGuid); const auto profile2Json = VerifyParseSucceeded(profileWithNullForGuid); const auto profile3Json = VerifyParseSucceeded(profileWithNullGuid); const auto profile4Json = VerifyParseSucceeded(profileWithGuid); const auto profile0 = implementation::Profile::FromJson(profile0Json); const auto profile1 = implementation::Profile::FromJson(profile1Json); const auto profile2 = implementation::Profile::FromJson(profile2Json); const auto profile3 = implementation::Profile::FromJson(profile3Json); const auto profile4 = implementation::Profile::FromJson(profile4Json); const winrt::guid cmdGuid = Utils::GuidFromString(L"{6239a42c-1de4-49a3-80bd-e8fdd045185c}"); const winrt::guid nullGuid{}; VERIFY_IS_FALSE(profile0->HasGuid()); VERIFY_IS_FALSE(profile1->HasGuid()); VERIFY_IS_FALSE(profile2->HasGuid()); VERIFY_IS_TRUE(profile3->HasGuid()); VERIFY_IS_TRUE(profile4->HasGuid()); VERIFY_ARE_EQUAL(profile3->Guid(), nullGuid); VERIFY_ARE_EQUAL(profile4->Guid(), cmdGuid); } }
47.741379
114
0.519682
[ "model" ]
0ff63da17dc24ad792ce2b88174a4c0b11e5c9c4
38,899
cpp
C++
src/cpp/readindata.cpp
LipeiDu/iS3D2
231b8279554232e34946997776b513cfc5d84a39
[ "MIT" ]
1
2022-02-08T21:26:12.000Z
2022-02-08T21:26:12.000Z
src/cpp/readindata.cpp
LipeiDu/iS3D2
231b8279554232e34946997776b513cfc5d84a39
[ "MIT" ]
null
null
null
src/cpp/readindata.cpp
LipeiDu/iS3D2
231b8279554232e34946997776b513cfc5d84a39
[ "MIT" ]
1
2022-02-13T21:36:16.000Z
2022-02-13T21:36:16.000Z
#include<iostream> #include<sstream> #include<string> #include<fstream> #include<cmath> #include<iomanip> #include<stdlib.h> #include "iS3D.h" #include "Macros.h" #include "readindata.h" #include "Arsenal.h" #include "ParameterReader.h" #include "Table.h" using namespace std; Gauss_Laguerre::Gauss_Laguerre() { } void Gauss_Laguerre::load_roots_and_weights(string file_name) { stringstream file; file << file_name; FILE * gauss_file = fopen(file.str().c_str(), "r"); if(gauss_file == NULL) { printf("load_roots_and_weights flag: couldn't open gauss laguerre file\n"); } // get the powers and number of gauss points fscanf(gauss_file, "%d\t%d", &alpha, &points); // allocate memory for the roots and weights root = (double **)calloc(alpha, sizeof(double)); weight = (double **)calloc(alpha, sizeof(double)); for(int i = 0; i < alpha; i++) { root[i] = (double *)calloc(points, sizeof(double)); weight[i] = (double *)calloc(points, sizeof(double)); } int dummy; // dummy alpha index // load the arrays for(int i = 0; i < alpha; i++) { for(int j = 0; j < points; j++) { fscanf(gauss_file, "%d\t%lf\t%lf", &dummy, &root[i][j], &weight[i][j]); } } fclose(gauss_file); } Gauss_Legendre::Gauss_Legendre() { } void Gauss_Legendre::load_roots_and_weights(string file_name) { stringstream file; file << file_name; FILE * gauss_file = fopen(file.str().c_str(), "r"); if(gauss_file == NULL) { printf("load_roots_and_weights flag: couldn't open gauss legendre file\n"); } fscanf(gauss_file, "%d", &points); // allocate memory for the roots and weights root = (double *)calloc(points, sizeof(double)); weight = (double *)calloc(points, sizeof(double)); // load the arrays for(int i = 0; i < points; i++) { fscanf(gauss_file, "%lf\t%lf", &root[i], &weight[i]); } fclose(gauss_file); } Plasma::Plasma() { } void Plasma::load_thermodynamic_averages() { // reads the averaged thermodynamic quantities of the freezeout surface // ** assumes the file has already been created in read_surf_switch FILE * thermodynamic_file = fopen("tables/thermodynamic/average_thermodynamic_quantities.dat", "r"); if(thermodynamic_file == NULL) { printf("load_thermodynamic_averages flag: couldn't open average thermodynamic file\n"); } fscanf(thermodynamic_file, "%lf\n%lf\n%lf\n%lf\n%lf", &temperature, &energy_density, &pressure, &baryon_chemical_potential, &net_baryon_density); fclose(thermodynamic_file); } FO_data_reader::FO_data_reader(ParameterReader* paraRdr_in, string path_in) { paraRdr = paraRdr_in; mode = paraRdr->getVal("mode"); // change name to hydro_code dimension = paraRdr->getVal("dimension"); include_baryon = paraRdr->getVal("include_baryon"); } FO_data_reader::~FO_data_reader() { } int FO_data_reader::get_number_cells() { ostringstream surface_file; surface_file << "input/surface.dat"; Table block_file(surface_file.str().c_str()); number_of_cells = block_file.getNumberOfRows(); return number_of_cells; } void FO_data_reader::read_freezeout_surface(FO_surf* surf_ptr) { if(mode == 1 || mode == 5) { read_surface_cpu_vh(surf_ptr); // read 2+1d or 3+1d surface file from cpu vh (or cpu vah) } else if (mode == 6) { read_surface_music(surf_ptr); // read 2+1d or 3+1d surface file from MUSIC (public version) } else if (mode == 7) { read_surface_hic_eventgen(surf_ptr); // read 2+1d surface file from HIC-EventGen } } void FO_data_reader::read_surface_cpu_vh(FO_surf* surf_ptr) { printf("from input/surface.dat and undoing hbarc = 1 units..."); if(mode == 5) { printf(" (includes thermal vorticity)"); // only Derek's version of cpu vh outputs thermal vorticity wbar^\munu } printf("\n\nHydrodynamic code = CPU VH (or CPU VAH)\n\n"); printf("\thttps://github.com/derekeverett/cpu-vh\t(CPU VH)\n"); printf("\thttps://github.com/mjmcnelis/cpu_vah\t(CPU VAH)\n\n"); printf("Please check that input/surface.dat has the following format (and 1 blank line eof):\n\n\t"); if(include_baryon) { if(mode == 5) { printf("[t x y n ds_t ds_x ds_y ds_n u^x u^y u^n E T P pi^xx pi^xy pi^xn pi^yy pi^yn Pi muB nB V^x V^y V^n wbar^tx wbar^ty wbar^tn wbar^xy wbar^xn wbar^yn]\n\n"); } else { printf("[t x y n ds_t ds_x ds_y ds_n u^x u^y u^n E T P pi^xx pi^xy pi^xn pi^yy pi^yn Pi muB nB V^x V^y V^n]\n\n"); } } else { if(mode == 5) { printf("[t x y n ds_t ds_x ds_y ds_n u^x u^y u^n E T P pi^xx pi^xy pi^xn pi^yy pi^yn Pi wbar^tx wbar^ty wbar^tn wbar^xy wbar^xn wbar^yn]\n\n"); } else { printf("[t x y n ds_t ds_x ds_y ds_n u^x u^y u^n E T P pi^xx pi^xy pi^xn pi^yy pi^yn Pi]\n\n"); } } ostringstream surfdat_stream; // prepare to read in surface.dat surfdat_stream << "input/surface.dat"; ifstream surfdat(surfdat_stream.str().c_str()); double dummy; double T_avg = 0; // average thermodynamic variables across freezeout surface double E_avg = 0; double P_avg = 0; double muB_avg = 0; double nB_avg = 0; double max_volume = 0; // max volume of freezeout surface for(long i = 0; i < number_of_cells; i++) // loop over freezeout cells { // contravariant spacetime position x^\mu surfdat >> surf_ptr[i].tau; // \tau [fm] surfdat >> surf_ptr[i].x; // x [fm] surfdat >> surf_ptr[i].y; // y [fm] surfdat >> surf_ptr[i].eta; // \eta_s [1] // covariant surface normal vector d\sigma_\mu surfdat >> surf_ptr[i].dat; // d\sigma_\tau [fm^-2] surfdat >> surf_ptr[i].dax; // d\sigma_x [fm^-2] surfdat >> surf_ptr[i].day; // d\sigma_y [fm^-2] surfdat >> surf_ptr[i].dan; // d\sigma_\eta [fm^-1] // contravariant fluid velocity u^\mu surfdat >> surf_ptr[i].ux; // u^x [1] surfdat >> surf_ptr[i].uy; // u^y [1] surfdat >> surf_ptr[i].un; // u^\eta [fm^-1] // thermodynamic variables surfdat >> dummy; // energy density [fm^-4] double E = dummy * hbarC; // convert to [GeV/fm^3] surf_ptr[i].E = E; surfdat >> dummy; // temperature [fm^-1] double T = dummy * hbarC; // convert to [GeV] surf_ptr[i].T = T; surfdat >> dummy; // equilibrium pressure [fm^-4] double P = dummy * hbarC; // convert to [GeV/fm^3] surf_ptr[i].P = P; // contravariant shear stress pi^\munu surfdat >> dummy; // pi^xx [fm^-4] surf_ptr[i].pixx = dummy * hbarC; // convert to [GeV/fm^3] surfdat >> dummy; // pi^xy [fm^-4] surf_ptr[i].pixy = dummy * hbarC; // pi^x\eta [fm^-5] surfdat >> dummy; // pi^x\eta [fm^-5] surf_ptr[i].pixn = dummy * hbarC; // convert to [GeV/fm^4] surfdat >> dummy; // pi^yy [fm^-4] surf_ptr[i].piyy = dummy * hbarC; // convert to [GeV/fm^3] surfdat >> dummy; // pi^y\eta [fm^-5] surf_ptr[i].piyn = dummy * hbarC; // convert to [GeV/fm^4] // bulk viscous pressure surfdat >> dummy; // Pi [fm^-4] surf_ptr[i].bulkPi = dummy * hbarC; // convert to [GeV/fm^3] double muB = 0; // default value for net-baryon chemical potential double nB = 0; // default value for net-baryon density if(include_baryon) { // net-baryon chemical potential and density surfdat >> dummy; // muB [fm^-1] muB = dummy * hbarC; // convert to [GeV] surf_ptr[i].muB = muB; surfdat >> nB; // nB[fm^-3] surf_ptr[i].nB = nB; // contravariant baryon diffusion V^\mu surfdat >> surf_ptr[i].Vx; // V^x [fm^-3] surfdat >> surf_ptr[i].Vy; // V^y [fm^-3] surfdat >> surf_ptr[i].Vn; // V^\eta [fm^-4] (12/2/20 check this --> fixed units on 10/8/18) } // thermal vorticity wbar^\mu\nu if(mode == 5) // contravariant, dimensionless? undo hbarc = 1? { surfdat >> surf_ptr[i].wtx; // ask Derek for definition and units (any conversion?) surfdat >> surf_ptr[i].wty; // all upper indices? surfdat >> surf_ptr[i].wtn; surfdat >> surf_ptr[i].wxy; surfdat >> surf_ptr[i].wxn; surfdat >> surf_ptr[i].wyn; } // check whether 2+1d freezeout cells are really boost-invariant if(dimension == 2) { if(surf_ptr[i].eta != 0) { #ifdef FLAGS printf("read_surface_cpu_vh flag: setting spacetime rapidity of boost-invariant freezeout cell to eta = 0\n"); #endif surf_ptr[i].eta = 0; } if(surf_ptr[i].dan != 0 || surf_ptr[i].un != 0 || surf_ptr[i].pixn != 0 || surf_ptr[i].piyn != 0) { #ifdef FLAGS printf("read_surface_cpu_vh flag: dimension = 2 but freezeout cell %ld is not boost-invariant (please check format in surface.dat)\n", i); #endif } } // compute the averaged thermodynamic quantities (for fast df coefficients) double tau = surf_ptr[i].tau; double tau2 = tau * tau; double ux = surf_ptr[i].ux; double uy = surf_ptr[i].uy; double un = surf_ptr[i].un; double ut = sqrt(1. + ux * ux + uy * uy + tau2 * un * un); double dat = surf_ptr[i].dat; double dax = surf_ptr[i].dax; double day = surf_ptr[i].day; double dan = surf_ptr[i].dan; double uds = ut * dat + ux * dax + uy * day + un * dan; // u^\mu . d\sigma_\mu double ds_ds = dat * dat - dax * dax - day * day - dan * dan / tau2; // d\sigma^\mu . d\sigma_\mu double ds_max = fabs(uds) + sqrt(fabs(uds * uds - ds_ds)); // max volume element |ds| max_volume += ds_max; // append values E_avg += (E * ds_max); T_avg += (T * ds_max); P_avg += (P * ds_max); muB_avg += (muB * ds_max); nB_avg += (nB * ds_max); } surfdat.close(); T_avg /= max_volume; // divide by total max volume E_avg /= max_volume; P_avg /= max_volume; muB_avg /= max_volume; nB_avg /= max_volume; // write averaged thermodynamic variables to file (what happens if read from memory again?) ofstream thermal_average("tables/thermodynamic/average_thermodynamic_quantities.dat", ios_base::out); thermal_average << setprecision(15) << T_avg << "\n" << E_avg << "\n" << P_avg << "\n" << muB_avg << "\n" << nB_avg; thermal_average.close(); } void FO_data_reader::read_surface_music(FO_surf* surf_ptr) { printf("from input/surface.dat and undoing hbarc = 1 units and tau factors...\n"); printf("\nHydrodynamic code = MUSIC (public version)\n\n"); printf("\thttps://github.com/MUSIC-fluid/MUSIC\n\n"); printf("Please check that input/surface.dat has the following format (and 1 blank line eof):\n\n\t"); if(include_baryon) { printf("[t x y n ds_t/t ds_x/t ds_y/t ds_n/t u^t u^x u^y t.u^n E T muB muS muC (E+P)/T pi^tt pi^tx pi^ty t.pi^tn pi^xx pi^xy t.pi^xn pi^yy t.pi^yn t2.pi^nn Pi nB V^t V^x V^y t.V^n]\n\n"); // muB, muS, muC = baryon, strange, charm chemical potentials } else { printf("[t x y n ds_t/t ds_x/t ds_y/t ds_n/t u^t u^x u^y t.u^n E T muB muS muC (E+P)/T pi^tt pi^tx pi^ty t.pi^tn pi^xx pi^xy t.pi^xn pi^yy t.pi^yn t2.pi^nn Pi]\n\n"); } ostringstream surfdat_stream; // prepare to read in surface.dat surfdat_stream << "input/surface.dat"; ifstream surfdat(surfdat_stream.str().c_str()); double dummy; double T_avg = 0; // average thermodynamic variables across freezeout surface double E_avg = 0; double P_avg = 0; double muB_avg = 0; double nB_avg = 0; double max_volume = 0; // max volume of freezeout surface for(long i = 0; i < number_of_cells; i++) // loop over freezeout cells { // contravariant spacetime position x^\mu surfdat >> dummy; // \tau [fm] double tau = dummy; surf_ptr[i].tau = tau; surfdat >> surf_ptr[i].x; // x [fm] surfdat >> surf_ptr[i].y; // y [fm] surfdat >> surf_ptr[i].eta; // \eta_s [1] // covariant surface normal vector d\sigma_\mu / \tau surfdat >> dummy; // d\sigma_\tau / \tau [fm^-3] surf_ptr[i].dat = dummy * tau; // multiply by \tau surfdat >> dummy; // d\sigma_x / \tau [fm^-3] surf_ptr[i].dax = dummy * tau; // multiply by \tau surfdat >> dummy; // d\sigma_y / \tau [fm^-3] surf_ptr[i].day = dummy * tau; // multiply by \tau surfdat >> dummy; // d\sigma_\eta / \tau [fm^-4] surf_ptr[i].dan = dummy * tau; // multiply by \tau // contravariant fluid velocity u^\mu surfdat >> dummy; // u^\tau [1] surfdat >> surf_ptr[i].ux; // u^x [1] surfdat >> surf_ptr[i].uy; // u^y [1] surfdat >> dummy; // \tau . u^\eta [1] surf_ptr[i].un = dummy / tau; // divide by \tau // thermodynamic variables surfdat >> dummy; // energy density [fm^-4] double E = dummy * hbarC; // convert to [GeV/fm^3] surf_ptr[i].E = E; surfdat >> dummy; // temperature [fm^-1] double T = dummy * hbarC; // convert to [GeV] surf_ptr[i].T = T; surfdat >> dummy; // net-baryon chemical potential [fm^-1] double muB = dummy * hbarC; // convert to [GeV] surf_ptr[i].muB = muB; surfdat >> dummy; // strange chemical potential (units?) surfdat >> dummy; // charm chemical potential (these don't seem to be used here...) surfdat >> dummy; // (E + P) / T [fm^-3] double P = dummy * T - E; // equilibrium pressure [GeV/fm^3] surf_ptr[i].P = P; // contravariant shear stress pi^\munu surfdat >> dummy; // pi^\tau\tau [fm^-4] surfdat >> dummy; // pi^\taux [fm^-4] surfdat >> dummy; // pi^\tauy [fm^-4] surfdat >> dummy; // tau . pi^\tau\eta [fm^-4] surfdat >> dummy; // pi^xx [fm^-4] surf_ptr[i].pixx = dummy * hbarC; // convert to [GeV/fm^3] surfdat >> dummy; // pi^xy [fm^-4] surf_ptr[i].pixy = dummy * hbarC; // convert to [GeV/fm^3] surfdat >> dummy; // \tau . pi^x\eta [fm^-4] surf_ptr[i].pixn = dummy * hbarC / tau; // convert to [GeV/fm^4] (divided by \tau) surfdat >> dummy; // pi^yy [fm^-4] surf_ptr[i].piyy = dummy * hbarC; // convert to [GeV/fm^3] surfdat >> dummy; // \tau . pi^y\eta [fm^-4] surf_ptr[i].piyn = dummy * hbarC / tau; // convert to [GeV/fm^4] (divided by \tau) surfdat >> dummy; // \tau^2 . pi^\eta\eta [fm^-4] // bulk viscous pressure surfdat >> dummy; // Pi [fm^-4] surf_ptr[i].bulkPi = dummy * hbarC; // convert to [GeV/fm^3] double nB = 0.0; // default value for net-baryon density if(include_baryon) { // net-baryon density surfdat >> nB; // nB [fm^-3] surf_ptr[i].nB = nB; // contravariant net-baryon diffusion V^\mu surfdat >> dummy; // V^\tau [fm^-3] surfdat >> surf_ptr[i].Vx; // V^x [fm^-3] surfdat >> surf_ptr[i].Vy; // V^y [fm^-3] surfdat >> dummy; // \tau . V^\eta [fm^-3] (need to check music) surf_ptr[i].Vn = dummy / tau; // divide by \tau } // check whether 2+1d freezeout cells are really boost-invariant if(dimension == 2) { if(surf_ptr[i].eta != 0) { #ifdef FLAGS printf("read_surface_music flag: setting spacetime rapidity of boost-invariant freezeout cell to eta = 0\n"); #endif surf_ptr[i].eta = 0; } if(surf_ptr[i].dan != 0 || surf_ptr[i].un != 0 || surf_ptr[i].pixn != 0 || surf_ptr[i].piyn != 0) { #ifdef FLAGS printf("read_surface_music flag: dimension = 2 but freezeout cell %ld is not boost-invariant (please check format in surface.dat)\n", i); #endif } } // compute average thermodynamic quantities double tau2 = tau * tau; double ux = surf_ptr[i].ux; double uy = surf_ptr[i].uy; double un = surf_ptr[i].un; double ut = sqrt(1. + ux * ux + uy * uy + tau2 * un * un); double dat = surf_ptr[i].dat; double dax = surf_ptr[i].dax; double day = surf_ptr[i].day; double dan = surf_ptr[i].dan; double uds = ut * dat + ux * dax + uy * day + un * dan; // u^\mu . d\sigma_\mu double ds_ds = dat * dat - dax * dax - day * day - dan * dan / tau2; // d\sigma^\mu . d\sigma_\mu double ds_max = fabs(uds) + sqrt(fabs(uds * uds - ds_ds)); // max volume element |ds| max_volume += ds_max; // append values E_avg += (E * ds_max); T_avg += (T * ds_max); P_avg += (P * ds_max); muB_avg += (muB * ds_max); nB_avg += (nB * ds_max); } surfdat.close(); T_avg /= max_volume; // divide by total max volume E_avg /= max_volume; P_avg /= max_volume; muB_avg /= max_volume; nB_avg /= max_volume; // write averaged thermodynamic variables to file ofstream thermal_average("tables/thermodynamic/average_thermodynamic_quantities.dat", ios_base::out); thermal_average << setprecision(15) << T_avg << "\n" << E_avg << "\n" << P_avg << "\n" << muB_avg << "\n" << nB_avg; thermal_average.close(); } void FO_data_reader::read_surface_hic_eventgen(FO_surf* surf_ptr) { printf("from input/surface.dat and undoing tau factors...\n"); printf("\nHydrodynamic code = HIC-EventGen\n\n"); printf("\thttps://github.com/Duke-QCD/hic-eventgen\n\n"); if(dimension != 2) { printf("read_surface_hic_eventgen error: HIC-EventGen is boost-invariant (need to set dimension = 2)\n"); exit(-1); } else if(include_baryon) { printf("read_surface_hic_eventgen error: HIC-EventGen does not consider baryon chemical potential (need to set include_baryon = 0)\n"); exit(-1); } printf("Please check that input/surface.dat has the following format (and 1 blank line eof):\n\n\t"); printf("[t x y n ds_t/t ds_x/t ds_y/t ds_n/t v^x v^y t.v^n pi^tt pi^tx pi^ty t.pi^tn pi^xx pi^xy t.pi^xn pi^yy t.pi^yn t2.pi^nn Pi T E P muB]\n\n"); ostringstream surfdat_stream; // prepare to read in surface.dat surfdat_stream << "input/surface.dat"; ifstream surfdat(surfdat_stream.str().c_str()); double dummy; double T_avg = 0; // average thermodynamic variables across freezeout surface double E_avg = 0; double P_avg = 0; double muB_avg = 0; double nB_avg = 0; double max_volume = 0; // max volume of freezeout surface for(long i = 0; i < number_of_cells; i++) // loop over freezeout cells { // contravariant spacetime position x^\mu surfdat >> dummy; // \tau [fm] double tau = dummy; surf_ptr[i].tau = tau; surfdat >> surf_ptr[i].x; // x [fm] surfdat >> surf_ptr[i].y; // y [fm] surfdat >> dummy; // \eta_s [1] surf_ptr[i].eta = 0; // covariant surface normal vector d\sigma_\mu / \tau surfdat >> dummy; // d\sigma_\tau / \tau [fm^-3] surf_ptr[i].dat = dummy * tau; // multiply by \tau surfdat >> dummy; // d\sigma_x / \tau [fm^-3] surf_ptr[i].dax = dummy * tau; // multiply by \tau surfdat >> dummy; // d\sigma_y / \tau [fm^-3] surf_ptr[i].day = dummy * tau; // multiply by \tau surfdat >> dummy; // d\sigma_\eta / \tau [fm^-4] surf_ptr[i].dan = 0; // puzzled about this... // covariant fluid velocity // ask Derek if covariant... double vx; double vy; surfdat >> vx; // u^x / u^\tau [1] surfdat >> vy; // u^y / u^\tau [1] surfdat >> dummy; // \tau . u^\eta / u^\tau double ut = 1. / sqrt(fabs(1. - vx * vx - vy * vy)); surf_ptr[i].ux = ut * vx; // if covariant, would need minus sign... surf_ptr[i].uy = ut * vy; surf_ptr[i].un = 0; // contravariant shear stress pi^\mu\nu surfdat >> dummy; // pi^\tau\tau [GeV/fm^3] surfdat >> dummy; // pi^\taux [GeV/fm^3] surfdat >> dummy; // pi^\taux [GeV/fm^3] surfdat >> dummy; // \tau . pi^\tau\eta [GeV/fm^3] surfdat >> surf_ptr[i].pixx; // pi^xx [GeV/fm^3] surfdat >> surf_ptr[i].pixy; // pi^xy [GeV/fm^3] surfdat >> dummy; // \tau . pi^x\eta [GeV/fm^3] surf_ptr[i].pixn = 0; surfdat >> surf_ptr[i].piyy; // pi^yy [GeV/fm^3] surfdat >> dummy; // \tau . pi^y\eta [GeV/fm^3] surf_ptr[i].piyn = 0; surfdat >> dummy; // \tau^2 . pi^\eta\eta [GeV/fm^3] // bulk viscous pressure surfdat >> surf_ptr[i].bulkPi; // Pi [GeV/fm^3] // thermodynamic variables surfdat >> dummy; // temperature [GeV] double T = dummy; surf_ptr[i].T = T; surfdat >> dummy; // energy density [GeV/fm^3] double E = dummy; surf_ptr[i].E = E; surfdat >> dummy; // equilibrium pressure [GeV/fm^3] double P = dummy; surf_ptr[i].P = P; surfdat >> dummy; // baryon chemical potential [GeV] double muB = dummy; surf_ptr[i].muB = muB; double nB = 0.0; // default value for net-baryon density double tau2 = tau * tau; double ux = surf_ptr[i].ux; double uy = surf_ptr[i].uy; double un = 0; double dat = surf_ptr[i].dat; double dax = surf_ptr[i].dax; double day = surf_ptr[i].day; double dan = 0; double uds = ut * dat + ux * dax + uy * day + un * dan; // u^\mu . d\sigma_\mu double ds_ds = dat * dat - dax * dax - day * day - dan * dan / tau2; // d\sigma^\mu . d\sigma_\mu double ds_max = fabs(uds) + sqrt(fabs(uds * uds - ds_ds)); // max volume element |ds| max_volume += ds_max; // append values E_avg += (E * ds_max); T_avg += (T * ds_max); P_avg += (P * ds_max); muB_avg += (muB * ds_max); nB_avg += (nB * ds_max); } surfdat.close(); T_avg /= max_volume; E_avg /= max_volume; P_avg /= max_volume; muB_avg /= max_volume; nB_avg /= max_volume; // write averaged thermodynamic variables to file ofstream thermal_average("tables/thermodynamic/average_thermodynamic_quantities.dat", ios_base::out); thermal_average << setprecision(15) << T_avg << "\n" << E_avg << "\n" << P_avg << "\n" << muB_avg << "\n" << nB_avg; thermal_average.close(); return; } read_mcid::read_mcid(long int mcid_in) { mcid = mcid_in; char sign = '\0'; if(mcid < 0) { sign = '-'; printf("Error: should only be particles (not antiparticles) in pdg_test.dat\n"); } int * mcid_holder = (int *)calloc(max_digits, sizeof(int)); long int x = abs(mcid_in); int digits = 1; // get individual digits from right to left for(int i = 0; i < max_digits; i++) { mcid_holder[i] = (int)(x % (long int)10); x /= (long int)10; if(x > 0) { digits++; if(digits > max_digits) printf("Error: mcid %ld is > %d digits\n", mcid_in, max_digits); } } // set the quantum numbers nJ = mcid_holder[0]; // hadrons have 7-digit codes nq3 = mcid_holder[1]; nq2 = mcid_holder[2]; nq1 = mcid_holder[3]; nL = mcid_holder[4]; nR = mcid_holder[5]; n = mcid_holder[6]; n8 = mcid_holder[7]; // there are some hadrons with 8-digit codes n9 = mcid_holder[8]; n10 = mcid_holder[9]; // nuclei have 10-digit codes nJ += n8; // I think n8 adds to nJ if spin > 9 // test print the mcid //cout << sign << n << nR << nL << nq1 << nq2 << nq3 << nJ << endl; free(mcid_holder); // free memory // get relevant particle properties is_particle_a_deuteron(); is_particle_a_hadron(); is_particle_a_meson(); is_particle_a_baryon(); get_spin(); get_gspin(); get_baryon(); get_sign(); does_particle_have_distinct_antiparticle(); } void read_mcid::is_particle_a_deuteron() { // check if particle is a deuteron (easy way) is_deuteron = (mcid == 1000010020); if(is_deuteron) printf("Error: there is a deuteron in HRG\n"); } void read_mcid::is_particle_a_hadron() { // check if particle is a hadron is_hadron = (!is_deuteron && nq3 != 0 && nq2 != 0); } void read_mcid::is_particle_a_meson() { // check if particle is a meson is_meson = (is_hadron && nq1 == 0); } void read_mcid::is_particle_a_baryon() { // check if particle is a baryon is_baryon = (is_hadron && nq1 != 0); } void read_mcid::get_spin() { // get the spin x 2 of the particle if(is_hadron) { if(nJ == 0) { spin = 0; // special cases: K0_L=0x130 & K0_S=0x310 return; } else { spin = nJ - 1; return; } } else if(is_deuteron) { spin = 2; return; } else { printf("Error: particle is not a deuteron or hadron\n"); // this assumes that we only have white particles (no single // quarks): Electroweak fermions have 11-17, so the // second-to-last-digit is the spin. The same for the Bosons: they // have 21-29 and 2spin = 2 (this fails for the Higgs). spin = nq3; return; } } void read_mcid::get_gspin() { // get the spin degeneracy of the particle if(is_hadron && nJ > 0) { gspin = nJ; return; } else if(is_deuteron) { gspin = 3; // isospin is 0 -> spin = 1 (generalize for any I later) return; } else { printf("Error: particle is not a deuteron or hadron\n"); gspin = spin + 1; // lepton return; } } void read_mcid::get_baryon() { // get the baryon number of the particle if(is_deuteron) { //baryon = nq3 + 10 * nq2 + 100 * nq1; // nucleus baryon = 2; return; } else if(is_hadron) { if(is_meson) { baryon = 0; return; } else if(is_baryon) { baryon = 1; return; } } else { printf("Error: particle is not a deuteron or hadron\n"); baryon = 0; return; } } void read_mcid::get_sign() { // get the quantum statistics sign of the particle if(is_deuteron) { sign = -1; // deuteron is boson return; } else if(is_hadron) { if(is_meson) { sign = -1; return; } else if(is_baryon) { sign = 1; return; } } else { printf("Error: particle is not a deuteron or hadron\n"); sign = spin % 2; return; } } void read_mcid::does_particle_have_distinct_antiparticle() { // check if the particle has distinct antiparticle if(is_hadron) // hadron { has_antiparticle = ((baryon != 0) || (nq2 != nq3)); return; } else if(is_deuteron) { has_antiparticle = true; } else { printf("Error: particle is not a deuteron or hadron\n"); has_antiparticle = (nq3 == 1); // lepton return; } } PDG_Data::PDG_Data(ParameterReader * paraRdr_in) { paraRdr = paraRdr_in; hrg_eos = paraRdr->getVal("hrg_eos"); } PDG_Data::~PDG_Data() { ////////////////////////////// } int PDG_Data::read_resonances_conventional(particle_info * particle, string pdg_filename) { double eps = 1.e-15; int Nparticle = 0; ifstream resofile(pdg_filename); int local_i = 0; int dummy_int; while(!resofile.eof()) { resofile >> particle[local_i].mc_id; resofile >> particle[local_i].name; resofile >> particle[local_i].mass; resofile >> particle[local_i].width; resofile >> particle[local_i].gspin; //spin degeneracy resofile >> particle[local_i].baryon; resofile >> particle[local_i].strange; resofile >> particle[local_i].charm; resofile >> particle[local_i].bottom; resofile >> particle[local_i].gisospin; //isospin degeneracy resofile >> particle[local_i].charge; resofile >> particle[local_i].decays; for(int j = 0; j < particle[local_i].decays; j++) { resofile >> dummy_int; resofile >> particle[local_i].decays_Npart[j]; resofile >> particle[local_i].decays_branchratio[j]; resofile >> particle[local_i].decays_part[j][0]; resofile >> particle[local_i].decays_part[j][1]; resofile >> particle[local_i].decays_part[j][2]; resofile >> particle[local_i].decays_part[j][3]; resofile >> particle[local_i].decays_part[j][4]; } //decide whether particle is stable under strong interactions if (particle[local_i].decays_Npart[0] == 1) particle[local_i].stable = 1; else particle[local_i].stable = 0; //add anti-particle entry if (particle[local_i].baryon > 0) // changed on Feb. 2019 { local_i++; particle[local_i].mc_id = -particle[local_i-1].mc_id; ostringstream antiname; antiname << "Anti-baryon-" << particle[local_i-1].name; particle[local_i].name = antiname.str(); particle[local_i].mass = particle[local_i-1].mass; particle[local_i].width = particle[local_i-1].width; particle[local_i].gspin = particle[local_i-1].gspin; particle[local_i].baryon = -particle[local_i-1].baryon; particle[local_i].strange = -particle[local_i-1].strange; particle[local_i].charm = -particle[local_i-1].charm; particle[local_i].bottom = -particle[local_i-1].bottom; particle[local_i].gisospin = particle[local_i-1].gisospin; particle[local_i].charge = -particle[local_i-1].charge; particle[local_i].decays = particle[local_i-1].decays; particle[local_i].stable = particle[local_i-1].stable; for (int j = 0; j < particle[local_i].decays; j++) { particle[local_i].decays_Npart[j]=particle[local_i-1].decays_Npart[j]; particle[local_i].decays_branchratio[j]=particle[local_i-1].decays_branchratio[j]; for (int k=0; k< Maxdecaypart; k++) { if(particle[local_i-1].decays_part[j][k] == 0) particle[local_i].decays_part[j][k] = (particle[local_i-1].decays_part[j][k]); else { int idx; // find the index for decay particle for(idx = 0; idx < local_i; idx++) if (particle[idx].mc_id == particle[local_i-1].decays_part[j][k]) break; if(idx == local_i && particle[local_i-1].stable == 0 && particle[local_i-1].decays_branchratio[j] > eps) { cout << "Error: can not find decay particle index for anti-baryon!" << endl; cout << "particle mc_id : " << particle[local_i-1].decays_part[j][k] << endl; exit(1); } if (particle[idx].baryon == 0 && particle[idx].charge == 0 && particle[idx].strange == 0) particle[local_i].decays_part[j][k] = (particle[local_i-1].decays_part[j][k]); else particle[local_i].decays_part[j][k] = (- particle[local_i-1].decays_part[j][k]); } } } } local_i++; // Add one to the counting variable "i" for the meson/baryon } resofile.close(); Nparticle = local_i - 1; //take account the final fake one for (int i = 0; i < Nparticle; i++) { //if (particle[i].baryon == 0) particle[i].sign = -1; // this made deuteron a fermion if (particle[i].baryon % 2 == 0) particle[i].sign = -1; else particle[i].sign = 1; } // count the number of mesons, baryons and antibaryons int meson = 0; int baryon = 0; int antibaryon = 0; for(int i = 0; i < Nparticle; i++) { if(particle[i].baryon == 0) meson++; else if(particle[i].baryon > 0) baryon++; else if(particle[i].baryon < 0) antibaryon++; } if(baryon != antibaryon) printf("Error: (anti)baryons not paired correctly\n"); printf("\nNumber of resonances = %d\n\n\t", Nparticle); printf("%d mesons\n\t", meson); printf("%d baryons\n\t", baryon); printf("%d antibaryons\n\n", antibaryon); particle_info last_particle = particle[Nparticle - 1]; printf("Last particle has mcid = %ld, %s, m = %lf GeV (please check this) \n\n", last_particle.mc_id, last_particle.name.c_str(), last_particle.mass); return Nparticle; } int PDG_Data::read_resonances_smash_box(particle_info * particle, string pdg_filename) { //******************************| //******************************| const int mcid_entries = 4; //| (increase if > 4 mcid entries per line) //******************************| //******************************| printf("\nNumber of mcid entries per row is set to mcid_entries = %d (increase if needed)\n", mcid_entries); long int * mc_id = (long int*)calloc(mcid_entries, sizeof(long int)); string name; double mass; double width; char parity; string line; ifstream pdg_smash_box(pdg_filename); int i = 0; // particle index while(getline(pdg_smash_box, line)) { istringstream current_line(line); // add mc_id[..] if increase mcid_entries current_line >> name >> mass >> width >> parity >> mc_id[0] >> mc_id[1] >> mc_id[2] >> mc_id[3]; if(line.empty() || line.at(0) == '#') { // skip lines that are blank or start with comments continue; } for(int k = 0; k < mcid_entries; k++) { // add particles with nonzero mc_id's to particle struct if(mc_id[k] != 0) { // get remaining particle info from the mcid read_mcid mcid_info(mc_id[k]); particle[i].name = name; particle[i].mass = mass; particle[i].width = width; particle[i].mc_id = mc_id[k]; particle[i].gspin = mcid_info.gspin; particle[i].baryon = mcid_info.baryon; particle[i].sign = mcid_info.sign; i++; if(mcid_info.has_antiparticle) { // create antiparticle next to hadron ostringstream antiname; antiname << "Anti-" << name; particle[i].name = antiname.str(); particle[i].mass = mass; particle[i].width = width; particle[i].mc_id = -mc_id[k]; particle[i].gspin = mcid_info.gspin; particle[i].baryon = -mcid_info.baryon; particle[i].sign = mcid_info.sign; i++; } // add antiparticle } // check if mc_id if nonzero // reset mc_id's to zero mc_id[k] = 0; } // mcid index if(i > (Maxparticle -1)) { printf("\nError: number of particles in file exceeds Maxparticle = %d. Exiting...\n\n", Maxparticle); exit(-1); } } // scanning file pdg_smash_box.close(); free(mc_id); int Nparticle = i; // total number of resonances // count the number of mesons, baryons and antibaryons int meson = 0; int baryon = 0; int antibaryon = 0; for(int i = 0; i < Nparticle; i++) { if(particle[i].baryon == 0) meson++; else if(particle[i].baryon > 0) baryon++; else if(particle[i].baryon < 0) antibaryon++; } if(baryon != antibaryon) printf("Error: (anti)baryons not paired correctly\n"); printf("\nNumber of resonances = %d\n\n\t", Nparticle); printf("%d mesons\n\t", meson); printf("%d baryons\n\t", baryon); printf("%d antibaryons\n\n", antibaryon); particle_info last_particle = particle[Nparticle - 1]; printf("Last particle is mcid = %ld, %s, m = %lf GeV (please check this) \n\n", last_particle.mc_id, last_particle.name.c_str(), last_particle.mass); return Nparticle; } int PDG_Data::read_resonances(particle_info * particle) { int Nparticle; switch(hrg_eos) { case 1: { printf("PDG/pdg-urqmd_v3.3+.dat... (please check if 1 blank line eof)\n"); Nparticle = read_resonances_conventional(particle, urqmd); // read urqmd break; } case 2: { printf("PDG/pdg_smash.dat... (please check if 1 blank line eof)\n"); Nparticle = read_resonances_conventional(particle, smash); // read smash break; } case 3: { printf("PDG/pdg_box.dat...\n"); Nparticle = read_resonances_smash_box(particle, smash_box); // read smash box break; } default: { printf("\nread_resonances error: need to set hrg_eos = (1,2,3)\n"); exit(-1); } } return Nparticle; }
31.019936
191
0.546698
[ "vector" ]
0ff6e874aa0d3be66eeabf73d2a25715af0a7f9d
3,095
cpp
C++
src/effect_deband.cpp
aqxa1/vkBasalt
c82eb92a98779336c63f7736f16f67b1732c2f8e
[ "Zlib" ]
null
null
null
src/effect_deband.cpp
aqxa1/vkBasalt
c82eb92a98779336c63f7736f16f67b1732c2f8e
[ "Zlib" ]
null
null
null
src/effect_deband.cpp
aqxa1/vkBasalt
c82eb92a98779336c63f7736f16f67b1732c2f8e
[ "Zlib" ]
null
null
null
#include "effect_deband.hpp" #include <cstring> #include "image_view.hpp" #include "descriptor_set.hpp" #include "buffer.hpp" #include "renderpass.hpp" #include "graphics_pipeline.hpp" #include "framebuffer.hpp" #include "shader.hpp" #include "sampler.hpp" #ifndef ASSERT_VULKAN #define ASSERT_VULKAN(val)\ if(val!=VK_SUCCESS)\ {\ throw std::runtime_error("ASSERT_VULKAN failed " + std::to_string(val));\ } #endif namespace vkBasalt { DebandEffect::DebandEffect(VkPhysicalDevice physicalDevice, VkLayerInstanceDispatchTable instanceDispatchTable, VkDevice device, VkLayerDispatchTable dispatchTable, VkFormat format, VkExtent2D imageExtent, std::vector<VkImage> inputImages, std::vector<VkImage> outputImages, std::shared_ptr<vkBasalt::Config> pConfig) { std::string fullScreenRectFile = "full_screen_triangle.vert.spv"; std::string debandFragmentFile = "deband.frag.spv"; vertexCode = readFile(fullScreenRectFile); fragmentCode = readFile(debandFragmentFile); struct{ float screenWidth; float screenHeight; float reverseScreenWidth; float reverseScreenHeight; float debandAvgdiff; float debandMaxdiff; float debandMiddiff; float range; int32_t iterations; } debandOptions {}; debandOptions.screenWidth = (float) imageExtent.width; debandOptions.screenHeight = (float) imageExtent.height; debandOptions.reverseScreenWidth = 1.0f / imageExtent.width; debandOptions.reverseScreenHeight = 1.0f / imageExtent.height; //get Options debandOptions.debandAvgdiff = std::stod(pConfig->getOption("debandAvgdiff", "3.4")); debandOptions.debandMaxdiff = std::stod(pConfig->getOption("debandMaxdiff", "6.8")); debandOptions.debandMiddiff = std::stod(pConfig->getOption("debandMiddiff", "3.3")); debandOptions.range = std::stod(pConfig->getOption("debandRange", "16.0")); debandOptions.iterations = std::stoi(pConfig->getOption("debandIterations", "4")); std::vector<VkSpecializationMapEntry> specMapEntrys(9); for(uint32_t i=0;i<specMapEntrys.size();i++) { specMapEntrys[i].constantID = i; specMapEntrys[i].offset = sizeof(float) * i;//TODO not clean to assume that sizeof(int32_t) == sizeof(float) specMapEntrys[i].size = sizeof(float); } VkSpecializationInfo specializationInfo; specializationInfo.mapEntryCount = specMapEntrys.size(); specializationInfo.pMapEntries = specMapEntrys.data(); specializationInfo.dataSize = sizeof(debandOptions); specializationInfo.pData = &debandOptions; pVertexSpecInfo = nullptr; pFragmentSpecInfo = &specializationInfo; init(physicalDevice, instanceDispatchTable, device, dispatchTable, format, imageExtent, inputImages, outputImages, pConfig); } DebandEffect::~DebandEffect() { } }
38.6875
322
0.671729
[ "vector" ]
ba03688a40351708374da6d8d3caf6106623eaf0
3,461
cpp
C++
Library/sdlmove/sdlmove.cpp
denisjackman/Cee
2176e9dccc17ac93463bd5473f437f1c76ba9c3c
[ "CC-BY-4.0" ]
null
null
null
Library/sdlmove/sdlmove.cpp
denisjackman/Cee
2176e9dccc17ac93463bd5473f437f1c76ba9c3c
[ "CC-BY-4.0" ]
2
2016-06-30T14:31:43.000Z
2016-07-01T08:43:03.000Z
Library/sdlmove/sdlmove.cpp
denisjackman/game
2176e9dccc17ac93463bd5473f437f1c76ba9c3c
[ "CC-BY-4.0" ]
null
null
null
#include "SDL2/SDL.h" #define SCREEN_WIDTH 640 #define SCREEN_HEIGHT 480 #define SPRITE_SIZE 32 #define SCREEN_TITLE "SDL Move" int main(int argc, char* argv[]) { SDL_Surface *temp; SDL_Window* window = NULL; SDL_Renderer* renderer = NULL; SDL_Texture* sprite = NULL; SDL_Texture* grass = NULL; SDL_Texture* screen = NULL; SDL_Rect rcSprite, rcGrass, rcScreen; SDL_Event event; const unsigned char *keystate; int colorkey; int gameover; SDL_Init(SDL_INIT_VIDEO); window = SDL_CreateWindow( SCREEN_TITLE, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, SCREEN_WIDTH, SCREEN_HEIGHT, SDL_WINDOW_SHOWN ); renderer = SDL_CreateRenderer( window, -1, SDL_RENDERER_ACCELERATED ); /* load grass */ temp = SDL_LoadBMP("grass.bmp"); grass = SDL_CreateTextureFromSurface(renderer, temp); SDL_FreeSurface(temp); /* load sprite */ temp = SDL_LoadBMP("sprite.bmp"); /* now we set the background to transparent ff00ff */ SDL_SetColorKey( temp, SDL_TRUE, SDL_MapRGB( temp->format, 0xFF, 0, 0xFF ) ); sprite = SDL_CreateTextureFromSurface(renderer, temp); SDL_FreeSurface(temp); /* set sprite position */ rcSprite.x = 0; rcSprite.y = 0; rcSprite.w = SPRITE_SIZE; rcSprite.h = SPRITE_SIZE; rcScreen.x = SCREEN_WIDTH / 2 - SPRITE_SIZE / 2; rcScreen.y = SCREEN_HEIGHT / 2 - SPRITE_SIZE / 2; rcScreen.w = SPRITE_SIZE; rcScreen.h = SPRITE_SIZE; gameover = 0; /* message pump */ while (!gameover) { //Clear screen SDL_RenderClear( renderer ); /* look for an event */ if (SDL_PollEvent(&event)) { /* an event was found */ switch (event.type) { /* close button clicked */ case SDL_QUIT: gameover = 1; break; /* handle the keyboard */ case SDL_KEYDOWN: switch (event.key.keysym.sym) { case SDLK_ESCAPE: case SDLK_q: gameover = 1; break; } break; } } /* handle sprite movement */ /* keystate = SDL_GetKeyboardState(NULL); if (keystate[SDL_SCANCODE_LEFT] ) { rcSprite.x -= 2; } if (keystate[SDL_SCANCODE_RIGHT] ) { rcSprite.x += 2; } if (keystate[SDL_SCANCODE_UP] ) { rcSprite.y -= 2; } if (keystate[SDL_SCANCODE_DOWN] ) { rcSprite.y += 2; } */ /* collide with edges of screen if ( rcSprite.x < 0 ) { rcSprite.x = 0; } else if ( rcSprite.x > SCREEN_WIDTH-SPRITE_SIZE ) { rcSprite.x = SCREEN_WIDTH-SPRITE_SIZE; } if ( rcSprite.y < 0 ) { rcSprite.y = 0; } else if ( rcSprite.y > SCREEN_HEIGHT-SPRITE_SIZE ) { rcSprite.y = SCREEN_HEIGHT-SPRITE_SIZE; } */ rcScreen.x = rcSprite.x; rcScreen.y = rcSprite.y; /* draw the grass */ for (int x = 0; x < SCREEN_WIDTH/SPRITE_SIZE; x++) { for (int y = 0; y < SCREEN_HEIGHT/SPRITE_SIZE; y++) { rcGrass.x = x * SPRITE_SIZE; rcGrass.y = y * SPRITE_SIZE; rcScreen.x = x * SPRITE_SIZE; rcScreen.y = y * SPRITE_SIZE; SDL_RenderCopy( renderer, grass, NULL, NULL ); //SDL_BlitSurface(grass, NULL, screen, &rcGrass); } } /* draw the sprite */ //SDL_BlitSurface(sprite, NULL, screen, &rcSprite); //Render texture to screen SDL_RenderCopy( renderer, sprite, &rcSprite, &rcScreen ); /* update the screen */ //SDL_UpdateRect(screen,0,0,0,0); SDL_RenderPresent( renderer ); } /* clean up */ SDL_DestroyTexture( sprite ); SDL_DestroyTexture( grass ); SDL_Quit(); return 0; }
24.721429
143
0.640277
[ "render" ]
61323a33f2639b33ede94fe97f6b7ced5791b06d
2,346
hpp
C++
src/client/action_state.hpp
mnewhouse/tselements
bd1c6724018e862156948a680bb1bc70dd28bef6
[ "MIT" ]
null
null
null
src/client/action_state.hpp
mnewhouse/tselements
bd1c6724018e862156948a680bb1bc70dd28bef6
[ "MIT" ]
null
null
null
src/client/action_state.hpp
mnewhouse/tselements
bd1c6724018e862156948a680bb1bc70dd28bef6
[ "MIT" ]
null
null
null
/* * TS Elements * Copyright 2015-2018 M. Newhouse * Released under the MIT license. */ #pragma once #include "game/game_state.hpp" #include "local_player_roster.hpp" #include "client_message_dispatcher.hpp" #include "control_event_translator.hpp" #include "client_viewport_arrangement.hpp" #include "client_race_hud.hpp" #include "controls/control_center.hpp" #include "scene/scene.hpp" #include "world/world_message_fwd.hpp" #include <memory> namespace ts { namespace client { class ActionState : public game::GameState { public: ActionState(game::GameContext game_context, scene::Scene scene_obj, const LocalPlayerRoster& local_player); virtual void render(const render_context&) const override; virtual void update(const update_context&) override; virtual void process_event(const event_type&) override; void request_update(std::uint32_t frame_duration); scene::Scene& scene_object(); const scene::Scene& scene_object() const; void connect(server::MessageConveyor message_conveyor); void pause(); void resume(); void toggle_paused(); bool is_paused() const { return is_paused_; } const controls::ControlCenter& control_center() const { return control_center_; } const scene::Scene& scene_obj() const { return scene_; } void hide_race_hud(); void show_race_hud(); //template <typename MessageType> //void handle_message(const MessageType&) {} //void handle_message(const world::messages::SceneryCollision& collision); //void handle_message(const world::messages::EntityCollision& collision); protected: template <typename MessageType> void dispatch_message(const MessageType& m) { message_dispatcher_.send(m); } private: virtual void launch_action(); virtual void end_action(); game::GameContext game_context_; KeySettings key_settings_; scene::Scene scene_; controls::ControlCenter control_center_; ControlEventTranslator control_event_translator_; scene::ViewportArrangement viewport_arrangement_; LocalPlayerRoster local_players_; RaceHUD race_hud_; bool is_paused_ = false; bool hud_visible_ = true; MessageDispatcher message_dispatcher_; }; } }
25.78022
113
0.705882
[ "render" ]
61336de73fc964dc2f365d00c453b21e173c49d9
1,873
cpp
C++
source/get_process_input/Scale_A_Ea.cpp
DetlevCM/chemical-kinetics-solver
7010fd6c72c29a0d912ad0c353ff13a5b643cc04
[ "MIT" ]
3
2015-07-03T20:14:00.000Z
2021-02-02T13:45:31.000Z
source/get_process_input/Scale_A_Ea.cpp
DetlevCM/chemical-kinetics-solver
7010fd6c72c29a0d912ad0c353ff13a5b643cc04
[ "MIT" ]
null
null
null
source/get_process_input/Scale_A_Ea.cpp
DetlevCM/chemical-kinetics-solver
7010fd6c72c29a0d912ad0c353ff13a5b643cc04
[ "MIT" ]
4
2017-11-09T19:49:18.000Z
2020-08-04T18:29:28.000Z
/* * Scale_A_Ea.cpp * * Created on: 21 Feb 2018 * Author: detlev */ #include "../headers/Headers.hpp" double Scale_A( double A_read_in, vector<double> ReactantData, int scaling_type ) { // should it be * 1000 or / 1000? if(scaling_type == 1) // A is molecules / cm^3 { // for moles / dm^3 double order = 0.0; for(size_t i=0;i<ReactantData.size();i++) { order = order + ReactantData[i]; } order = fabs(order) - 1.0; // make sure it is positive // 6.0221e+23 <- molecules per mol double scale = (6.0221e23); // convert to molecules / cm^(-3) scale = scale / 1000.0; // convert from cm^3 to dm^3 A_read_in = A_read_in * pow(scale,(order)); return A_read_in; // and convert to molecules per liter (dm^3) } else if(scaling_type == 2) // A is in moles / cm^3 { double order = 0.0; for(size_t i=0;i<ReactantData.size();i++) { order = order + ReactantData[i]; } order = fabs(order) - 1.0; // make sure it is positive double scale = 1.0 / 1000.0; // convert from cm^3 to dm^3 A_read_in = A_read_in * pow(scale,(order)); return A_read_in; // and convert to molecules per liter (dm^3) } else // return unmodified parameter if we do not know what to do { return A_read_in; } } // Ea needs to be in Kelvin for the remainder of the calculations double Scale_Ea( double Ea_read_in, int scaling_type ) { if(scaling_type == 1) // Ea is in kcal/mol { return Ea_read_in/1.98709e-3; } else if(scaling_type == 2) // Ea is in cal/mol { return Ea_read_in/1.98709; } else if(scaling_type == 3) // Ea is in kJ/mol { return Ea_read_in/8.3144621e-3; } else if(scaling_type == 4) // Ea is in J/mol { return Ea_read_in/8.3144621; } //if(scaling_type == 0) // Ea is in Kelvin else // do nothing if we don't identify the scaling, return value as read in { return Ea_read_in; } }
20.139785
77
0.638014
[ "vector" ]
6134c8dd94013c3d194dbc290e6f215757ddf772
3,967
cc
C++
content/public/test/unittest_test_suite.cc
zealoussnow/chromium
fd8a8914ca0183f0add65ae55f04e287543c7d4a
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
14,668
2015-01-01T01:57:10.000Z
2022-03-31T23:33:32.000Z
content/public/test/unittest_test_suite.cc
zealoussnow/chromium
fd8a8914ca0183f0add65ae55f04e287543c7d4a
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
86
2015-10-21T13:02:42.000Z
2022-03-14T07:50:50.000Z
content/public/test/unittest_test_suite.cc
zealoussnow/chromium
fd8a8914ca0183f0add65ae55f04e287543c7d4a
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
5,941
2015-01-02T11:32:21.000Z
2022-03-31T16:35:46.000Z
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "content/public/test/unittest_test_suite.h" #include <memory> #include "base/base_switches.h" #include "base/check.h" #include "base/command_line.h" #include "base/feature_list.h" #include "base/rand_util.h" #include "base/test/test_suite.h" #include "build/build_config.h" #include "content/browser/network_service_instance_impl.h" #include "content/browser/storage_partition_impl.h" #include "content/public/browser/network_service_instance.h" #include "content/public/test/scoped_web_ui_controller_factory_registration.h" #include "content/public/test/test_host_resolver.h" #include "content/test/test_blink_web_unit_test_support.h" #include "testing/gtest/include/gtest/gtest.h" #include "third_party/blink/public/web/blink.h" #if defined(USE_AURA) #include "ui/aura/env.h" #endif #if defined(OS_FUCHSIA) #include "ui/ozone/public/ozone_switches.h" #endif namespace content { namespace { // The global NetworkService object could be created in some tests due to // various StoragePartition calls. Since it has a mojo pipe that is bound using // the current thread, which goes away between tests, we need to destruct it to // avoid calls being dropped silently. class ResetNetworkServiceBetweenTests : public testing::EmptyTestEventListener { public: ResetNetworkServiceBetweenTests() = default; ResetNetworkServiceBetweenTests(const ResetNetworkServiceBetweenTests&) = delete; ResetNetworkServiceBetweenTests& operator=( const ResetNetworkServiceBetweenTests&) = delete; void OnTestEnd(const testing::TestInfo& test_info) override { // If the network::NetworkService object was instantiated during a unit test // it will be deleted because network_service_instance.cc has it in a // SequenceLocalStorageSlot. However we want to synchronously destruct the // InterfacePtr pointing to it to avoid it getting the connection error // later and have other tests use the InterfacePtr that is invalid. ResetNetworkServiceForTesting(); } }; } // namespace UnitTestTestSuite::UnitTestTestSuite(base::TestSuite* test_suite) : test_suite_(test_suite) { base::CommandLine* command_line = base::CommandLine::ForCurrentProcess(); std::string enabled = command_line->GetSwitchValueASCII(switches::kEnableFeatures); std::string disabled = command_line->GetSwitchValueASCII(switches::kDisableFeatures); ForceCreateNetworkServiceDirectlyForTesting(); StoragePartitionImpl::ForceInProcessStorageServiceForTesting(); testing::TestEventListeners& listeners = testing::UnitTest::GetInstance()->listeners(); listeners.Append(new ResetNetworkServiceBetweenTests); listeners.Append(new CheckForLeakedWebUIControllerFactoryRegistrations); // The ThreadPool created by the test launcher is never destroyed. // Similarly, the FeatureList created here is never destroyed so it // can safely be accessed by the ThreadPool. std::unique_ptr<base::FeatureList> feature_list = std::make_unique<base::FeatureList>(); feature_list->InitializeFromCommandLine(enabled, disabled); base::FeatureList::SetInstance(std::move(feature_list)); #if defined(OS_FUCHSIA) // Use headless ozone platform on Fuchsia by default. // TODO(crbug.com/865172): Remove this flag. if (!command_line->HasSwitch(switches::kOzonePlatform)) command_line->AppendSwitchASCII(switches::kOzonePlatform, "headless"); #endif DCHECK(test_suite); blink_test_support_ = std::make_unique<TestBlinkWebUnitTestSupport>(); test_host_resolver_ = std::make_unique<TestHostResolver>(); } UnitTestTestSuite::~UnitTestTestSuite() = default; int UnitTestTestSuite::Run() { #if defined(USE_AURA) std::unique_ptr<aura::Env> aura_env = aura::Env::CreateInstance(); #endif return test_suite_->Run(); } } // namespace content
36.394495
80
0.777918
[ "object" ]
61385825b662c9361cf36b64608a84ef05e91378
8,359
cpp
C++
torch/csrc/jit/tensorexpr/bounds_inference.cpp
vladap2013/pytorch
30367773056de95e006107d82ddaa3db5eeaa05a
[ "Intel" ]
1
2021-06-17T13:02:45.000Z
2021-06-17T13:02:45.000Z
torch/csrc/jit/tensorexpr/bounds_inference.cpp
vladap2013/pytorch
30367773056de95e006107d82ddaa3db5eeaa05a
[ "Intel" ]
null
null
null
torch/csrc/jit/tensorexpr/bounds_inference.cpp
vladap2013/pytorch
30367773056de95e006107d82ddaa3db5eeaa05a
[ "Intel" ]
null
null
null
#include <torch/csrc/jit/tensorexpr/bounds_inference.h> #include <torch/csrc/jit/tensorexpr/bounds_overlap.h> #include <torch/csrc/jit/tensorexpr/expr.h> #include <torch/csrc/jit/tensorexpr/ir.h> #include <torch/csrc/jit/tensorexpr/ir_printer.h> #include <torch/csrc/jit/tensorexpr/ir_simplifier.h> #include <torch/csrc/jit/tensorexpr/ir_visitor.h> #include <torch/csrc/jit/tensorexpr/stmt.h> namespace torch { namespace jit { namespace tensorexpr { using namespace analysis; template <typename Container> BoundsInfo mergeTensorAccesses( const Container& accesses, const std::unordered_map<const Var*, const Buf*>& varToBuf, bool distinctAccessKinds) { BoundsInfo ret; for (auto& access : accesses) { if (access->type() == AccessType::Input || access->type() == AccessType::Output) { continue; } auto vtbIt = varToBuf.find(access->var()); TORCH_INTERNAL_ASSERT(vtbIt != varToBuf.end()); const Buf* buf = vtbIt->second; std::vector<TensorAccessBoundsInfo>& infos = ret[buf]; bool added = false; // This loop should be small, max of 2 (kLoad, kStore). for (auto& TABI : infos) { TensorAccessKind kind = access->isWrite() ? kStore : kLoad; if (!distinctAccessKinds || kind == TABI.kind) { TORCH_INTERNAL_ASSERT(TABI.start.size() == access->bounds().size()); TORCH_INTERNAL_ASSERT(TABI.stop.size() == access->bounds().size()); for (size_t i = 0; i < TABI.start.size(); ++i) { TABI.start[i] = IRSimplifier::simplify( new Min(TABI.start[i], access->bounds()[i].start, true)); TABI.stop[i] = IRSimplifier::simplify( new Max(TABI.stop[i], access->bounds()[i].end, true)); added = true; if (kind != TABI.kind) { TABI.kind = kMutate; } } } } if (!added) { TensorAccessBoundsInfo info; info.kind = access->isWrite() ? kStore : kLoad; for (auto& b : access->bounds()) { info.start.push_back(b.start); info.stop.push_back(b.end); } infos.push_back(info); } } return ret; } std::unordered_map<const Var*, const Buf*> getAllBufs(Stmt* s) { std::unordered_map<const Var*, const Buf*> varToBuf; auto bufs = NodeFinder<const Buf>::find(s); auto calls = NodeFinder<FunctionCall>::find(s); for (auto* c : calls) { bufs.push_back(c->tensor()->buf()); } for (auto* b : bufs) { varToBuf[b->base_handle()] = b; } return varToBuf; } BoundsInfo inferBounds(Stmt* s, bool distinctAccessKinds) { auto varToBuf = getAllBufs(s); MemDependencyChecker checker; s->accept(&checker); return mergeTensorAccesses( checker.getHistory(), varToBuf, distinctAccessKinds); } BoundsInfo getInferredBounds( MemDependencyChecker& analyzer, Stmt* s, bool distinctAccessKinds) { return mergeTensorAccesses( analyzer.accessesWithin(s), getAllBufs(s), distinctAccessKinds); } void printBoundsInfo(const BoundsInfo& v) { std::cerr << "Access vector {\n"; for (auto& pair : v) { std::cerr << *pair.first << " in ["; bool first = true; for (const auto& b : pair.second) { if (!first) { std::cerr << ", "; } std::cerr << ((b.kind == kLoad) ? "LOAD" : "STORE") << "("; int i = 0; if (b.start.empty()) { std::cerr << "0"; } for (const auto& s : b.start) { if (i != 0) { std::cerr << ", "; } std::cerr << *s; i++; } std::cerr << "; "; i = 0; if (b.stop.empty()) { std::cerr << "0"; } for (const auto& s : b.stop) { if (i != 0) { std::cerr << ", "; } std::cerr << *s; i++; } std::cerr << ")"; first = false; } std::cerr << "]\n"; } std::cerr << "}\n"; } std::vector<const Expr*> getBoundExtents( const std::vector<TensorAccessBoundsInfo>& infos) { std::vector<const Expr*> starts; std::vector<const Expr*> stops; // Find the safe size of the temprorary buffer by determining the outer // extents of a union of all bounds. for (const TensorAccessBoundsInfo& p : infos) { for (size_t i = 0; i < p.start.size(); i++) { if (starts.size() <= i) { starts.push_back(p.start[i]); } else { starts[i] = IRSimplifier::simplify(new Min(starts[i], p.start[i], true)); } if (stops.size() <= i) { stops.push_back(p.stop[i]); } else { stops[i] = IRSimplifier::simplify(new Max(stops[i], p.stop[i], true)); } } } std::vector<const Expr*> extents; for (size_t i = 0; i < starts.size(); ++i) { const Expr* dim = IRSimplifier::simplify( new Add(new Sub(stops[i], starts[i]), new IntImm(1))); extents.push_back(dim); } return extents; } using BoundSet = std::unordered_set<Bound, BoundHash>; BoundSet convertBounds( const std::vector<TensorAccessBoundsInfo>& bounds, TensorAccessKind filter = kMutate) { BoundSet ret; for (auto& TABI : bounds) { if (filter == kMutate || TABI.kind == filter) { for (size_t i = 0; i < TABI.start.size(); ++i) { ret.insert(Bound(TABI.start[i], TABI.stop[i])); } } } return ret; } BoundSet convertBounds( BoundsInfo& bounds, const Buf* buf, TensorAccessKind filter = kMutate) { auto it = bounds.find(buf); if (it == bounds.end()) { return BoundSet(); } return convertBounds(it->second, filter); } HazardKind getPotentialHazards( MemDependencyChecker& analyzer, Stmt* A, Stmt* B) { BoundsInfo aBounds = getInferredBounds(analyzer, A, true); BoundsInfo bBounds = getInferredBounds(analyzer, B, true); BoundSet aWrites; BoundSet aReads; for (auto& pair : bBounds) { const Buf* buf = pair.first; if (aBounds.find(buf) == aBounds.end()) { continue; } auto aWrites = convertBounds(aBounds, buf, kStore); auto aReads = convertBounds(aBounds, buf, kLoad); auto bWrites = convertBounds(pair.second, kStore); auto bReads = convertBounds(pair.second, kLoad); // First, RAW. for (auto& bR : bReads) { for (auto& aW : aWrites) { if (boundOverlap(bR, aW) != NoOverlap) { return HazardKind::ReadAfterWrite; } } } // Then WAR. for (auto& bW : bWrites) { for (auto& aR : aReads) { if (boundOverlap(bW, aR) != NoOverlap) { return HazardKind::WriteAfterRead; } } } // Then WAW. for (auto& bW : bWrites) { for (auto& aW : aWrites) { if (boundOverlap(bW, aW) != NoOverlap) { return HazardKind::WriteAfterWrite; } } } } return HazardKind::NoDependency; } IndexBounds getIndexBounds(const TensorAccessBoundsInfo& tabi) { TORCH_INTERNAL_ASSERT(tabi.start.size() == tabi.stop.size()); IndexBounds ret(tabi.start.size()); if (tabi.start.empty()) { return ret; } for (size_t i = 0; i < tabi.start.size(); ++i) { ret[i] = Bound(tabi.start[i], tabi.stop[i]); } return ret; } std::vector<IndexBounds> getIndexBounds( const std::vector<TensorAccessBoundsInfo>& vTABI) { std::vector<IndexBounds> bounds(vTABI.size()); for (size_t i = 0; i < vTABI.size(); ++i) { bounds[i] = getIndexBounds(vTABI[i]); } return bounds; } bool hasPartialOverlap( analysis::MemDependencyChecker& analyzer, Stmt* A, Stmt* B) { BoundsInfo aBounds = getInferredBounds(analyzer, A, true); BoundsInfo bBounds = getInferredBounds(analyzer, B, true); for (const auto& aBound : aBounds) { auto bIt = bBounds.find(aBound.first); if (bIt == bBounds.end()) { continue; } auto aIndexBounds = getIndexBounds(aBound.second); auto bIndexBounds = getIndexBounds(bIt->second); for (const auto& aIndexBound : aIndexBounds) { for (const auto& bIndexBound : bIndexBounds) { auto overlap = overlaps(aIndexBound, bIndexBound); // If the returned OverlapKind is "Contains", that means `bound1` is // a super set of `bound2`, so that is also a PartialOverlap. if (overlap == Contains || overlap == PartialOverlap) { return true; } } } } return false; } } // namespace tensorexpr } // namespace jit } // namespace torch
26.621019
78
0.598636
[ "vector" ]
613adfd314572137b308ae07f19ec468b6d3b690
18,193
cpp
C++
rmw_iceoryx_cpp/src/internal/iceoryx_deserialize.cpp
thomas-moulard/rmw_iceoryx
b3ffe34fc61970d283a9418dd340df859cd21a69
[ "Apache-2.0" ]
null
null
null
rmw_iceoryx_cpp/src/internal/iceoryx_deserialize.cpp
thomas-moulard/rmw_iceoryx
b3ffe34fc61970d283a9418dd340df859cd21a69
[ "Apache-2.0" ]
null
null
null
rmw_iceoryx_cpp/src/internal/iceoryx_deserialize.cpp
thomas-moulard/rmw_iceoryx
b3ffe34fc61970d283a9418dd340df859cd21a69
[ "Apache-2.0" ]
null
null
null
// Copyright (c) 2019 by Robert Bosch GmbH. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <array> #include <cstring> #include <iostream> #include <string> #include <tuple> #include <utility> #include <vector> #include "rosidl_generator_c/primitives_sequence.h" #include "rosidl_typesupport_cpp/message_type_support.hpp" #include "rosidl_typesupport_introspection_c/field_types.h" #include "rosidl_typesupport_introspection_c/message_introspection.h" #include "rosidl_typesupport_introspection_cpp/field_types.hpp" #include "rosidl_typesupport_introspection_cpp/message_introspection.hpp" #include "rmw_iceoryx_cpp/iceoryx_deserialize.hpp" namespace rmw_iceoryx_cpp { std::pair<const char *, uint32_t> load_array_size(const char * serialized_msg) { // This is 64 bit aligned // REVIEW: Please discuss const uint32_t array_check = *reinterpret_cast<const uint32_t *>(serialized_msg); serialized_msg += sizeof(array_check); const uint32_t array_size = *reinterpret_cast<const uint32_t *>(serialized_msg); serialized_msg += sizeof(array_size); if (array_check != 101) { std::cerr << "deserialization failure: array size is " << array_size << " and check failed! (" << array_check << ")" << std::endl; } return std::make_pair(serialized_msg, array_size); } // FIXME: Use proper templating here! + add allocator handling inline std::pair<const char *, size_t> get_submessage_vector_cpp( const rosidl_typesupport_introspection_cpp::MessageMember * member, const char * serialized_msg, char * ros_message_field, void * & subros_message, size_t sub_members_size) { (void)member; uint32_t vector_elements = 0; std::tie(serialized_msg, vector_elements) = load_array_size(serialized_msg); auto vector = reinterpret_cast<std::vector<unsigned char> *>(ros_message_field); vector->resize(vector_elements * sub_members_size); subros_message = reinterpret_cast<void *>(vector->data()); return std::make_pair(serialized_msg, vector_elements); } inline std::pair<const char *, size_t> get_submessage_array_c( const rosidl_typesupport_introspection_c__MessageMember * member, const char * serialized_msg, void * ros_message_field, void * & subros_message, size_t sub_members_size) { (void)member; uint32_t array_elements = 0; std::tie(serialized_msg, array_elements) = load_array_size(serialized_msg); auto data_array = const_cast<rosidl_generator_c__char__Sequence *>(reinterpret_cast<const rosidl_generator_c__char__Sequence *>(ros_message_field)); data_array->data = static_cast<signed char *>(calloc(array_elements, sub_members_size)); data_array->capacity = array_elements; data_array->size = array_elements; subros_message = reinterpret_cast<void *>(data_array->data); return std::make_pair(serialized_msg, array_elements); } template<typename T> const char * copy_payload_array_cpp(const char * serialized_msg, void * ros_message_field) { uint32_t size = sizeof(T); uint32_t array_size = 0; std::tie(serialized_msg, array_size) = load_array_size(serialized_msg); if (array_size > 0) { // FIXME: add ", typename ContainerAllocator::template rebind<int8_t>::other" std::vector<T> * data = reinterpret_cast<std::vector<T> *>(ros_message_field); data->resize(array_size); uint32_t char_size = size * array_size; memcpy(data->data(), serialized_msg, char_size); serialized_msg += char_size; } return serialized_msg; } template<> const char * copy_payload_array_cpp<bool>(const char * serialized_msg, void * ros_message_field) { uint32_t size = 0; uint32_t array_size = 0; std::tie(serialized_msg, array_size) = load_array_size(serialized_msg); std::tie(serialized_msg, size) = load_array_size(serialized_msg); std::vector<bool> * data = reinterpret_cast<std::vector<bool> *>(ros_message_field); data->resize(array_size); // Boolean arrays are treated specially for they are stored as single bits memcpy(data->begin()._M_p, serialized_msg, size); serialized_msg += size; return serialized_msg; } template<typename T> const char * copy_payload_array_c(const char * serialized_msg, void * ros_message_field) { uint32_t size = sizeof(T); uint32_t array_size = 0; std::tie(serialized_msg, array_size) = load_array_size(serialized_msg); uint32_t char_size = size * array_size; auto data_array = const_cast<rosidl_generator_c__char__Sequence *>(reinterpret_cast<const rosidl_generator_c__char__Sequence *>(ros_message_field)); data_array->data = static_cast<signed char *>(calloc(array_size, size)); data_array->capacity = array_size; memcpy(data_array->data, serialized_msg, char_size); serialized_msg += char_size; data_array->capacity = array_size; data_array->size = array_size; return serialized_msg; } template<> const char * copy_payload_array_c<std::string>( const char * serialized_msg, void * ros_message_field) { serialized_msg = copy_payload_array_c<char>(serialized_msg, ros_message_field); auto data_array = const_cast<rosidl_generator_c__char__Sequence *>(reinterpret_cast<const rosidl_generator_c__char__Sequence *>(ros_message_field)); --data_array->capacity; // Set size of string to \0 auto array_size = strlen((const char *) const_cast<const signed char *>(data_array->data)); data_array->size = array_size; return serialized_msg; } template<typename T> const char * copy_payload_fixed_array_cpp( const char * serialized_msg, void * ros_message_field, uint32_t size) { T * ros_message_field_data = reinterpret_cast<std::array<T, 1> *>(ros_message_field)->data(); auto char_size = size * sizeof(T); memcpy(ros_message_field_data, serialized_msg, char_size); serialized_msg += char_size; return serialized_msg; } template<typename T> const char * copy_payload_cpp( const rosidl_typesupport_introspection_cpp::MessageMember * member, const char * serialized_msg, void * ros_message_field) { if (!member->is_array_) { uint32_t size = sizeof(T); T * data = reinterpret_cast<T *>(ros_message_field); memcpy(data, serialized_msg, size); serialized_msg += size; } else { if (member->array_size_ > 0 && !member->is_upper_bound_) { serialized_msg = copy_payload_fixed_array_cpp<T>( serialized_msg, ros_message_field, member->array_size_); } else { serialized_msg = copy_payload_array_cpp<T>(serialized_msg, ros_message_field); } } return serialized_msg; } template<> const char * copy_payload_cpp<std::string>( const rosidl_typesupport_introspection_cpp::MessageMember * member, const char * serialized_msg, void * ros_message_field) { if (!member->is_array_) { std::string * data = reinterpret_cast<std::string *>(ros_message_field); std::vector<char> vec; serialized_msg = copy_payload_array_cpp<char>(serialized_msg, &vec); int size = 0; for (char terminator : vec) { if (terminator == '\0') { break; } ++size; } if (vec.size() > 0) { data->insert(data->begin(), vec.begin(), vec.begin() + size); } } else { if (member->array_size_ > 0 && !member->is_upper_bound_) { std::string * ros_message_field_data = reinterpret_cast<std::array<std::string, 1> *>(ros_message_field)->data(); for (auto i = 0u; i < member->array_size_; ++i) { std::vector<char> vec; serialized_msg = copy_payload_array_cpp<char>(serialized_msg, &vec); ros_message_field_data[i].insert(ros_message_field_data[i].begin(), vec.begin(), vec.end()); } } else { std::vector<std::string> * data = reinterpret_cast<std::vector<std::string> *>(ros_message_field); uint32_t array_size = 0; std::tie(serialized_msg, array_size) = load_array_size(serialized_msg); data->resize(array_size); for (auto & data_element : *data) { std::vector<char> vec; serialized_msg = copy_payload_array_cpp<char>(serialized_msg, &vec); data_element.insert(data_element.begin(), vec.begin(), vec.end()); } } } return serialized_msg; } template<typename T> const char * copy_payload_c( const rosidl_typesupport_introspection_c__MessageMember * member, const char * serialized_msg, void * ros_message_field) { if (!member->is_array_) { uint32_t size = sizeof(T); T * data = reinterpret_cast<T *>(ros_message_field); memcpy(data, serialized_msg, size); serialized_msg += size; } else { if (member->array_size_ > 0 && !member->is_upper_bound_) { serialized_msg = copy_payload_fixed_array_cpp<T>( serialized_msg, ros_message_field, member->array_size_); } else { serialized_msg = copy_payload_array_c<T>(serialized_msg, ros_message_field); } } return serialized_msg; } template<> const char * copy_payload_c<std::string>( const rosidl_typesupport_introspection_c__MessageMember * member, const char * serialized_msg, void * ros_message_field) { if (!member->is_array_) { serialized_msg = copy_payload_array_c<std::string>(serialized_msg, ros_message_field); } else { if (member->array_size_ > 0 && !member->is_upper_bound_) { std::string * ros_message_field_data = reinterpret_cast<std::array<std::string, 1> *>(ros_message_field)->data(); for (auto i = 0u; i < member->array_size_; ++i) { serialized_msg = copy_payload_array_c<std::string>(serialized_msg, ros_message_field_data); } } else { uint32_t array_size = 0; std::tie(serialized_msg, array_size) = load_array_size(serialized_msg); for (uint32_t i = 0; i < array_size; ++i) { serialized_msg = copy_payload_array_c<std::string>(serialized_msg, ros_message_field); } } } return serialized_msg; } const char * copy_payload_c_ros_message( const rosidl_typesupport_introspection_c__MessageMember * member, const char * serialized_msg, void * ros_message_field) { auto sub_members = (const rosidl_typesupport_introspection_c__MessageMembers *)member->members_->data; if (!member->is_array_) { serialized_msg = deserialize(serialized_msg, sub_members, ros_message_field); } else { void * subros_message = nullptr; size_t array_elememts = 0; size_t sub_members_size = sub_members->size_of_; if (member->array_size_ && !member->is_upper_bound_) { subros_message = ros_message_field; array_elememts = member->array_size_; } else { std::tie(serialized_msg, array_elememts) = get_submessage_array_c( member, serialized_msg, ros_message_field, subros_message, sub_members_size); } for (size_t index = 0; index < array_elememts; ++index) { serialized_msg = deserialize(serialized_msg, sub_members, subros_message); subros_message = static_cast<char *>(subros_message) + sub_members_size; } } return serialized_msg; } const char * copy_payload_cpp_ros_message( const rosidl_typesupport_introspection_cpp::MessageMember * member, const char * serialized_msg, char * ros_message_field) { auto sub_members = (const rosidl_typesupport_introspection_cpp::MessageMembers *)member->members_->data; if (!member->is_array_) { serialized_msg = deserialize(serialized_msg, sub_members, ros_message_field); } else { void * subros_message = nullptr; size_t array_elememts = 0; size_t sub_members_size = sub_members->size_of_; std::tie(serialized_msg, array_elememts) = get_submessage_vector_cpp( member, serialized_msg, ros_message_field, subros_message, sub_members_size); for (size_t index = 0; index < array_elememts; ++index) { serialized_msg = deserialize(serialized_msg, sub_members, subros_message); subros_message = static_cast<char *>(subros_message) + sub_members_size; } } return serialized_msg; } const char * deserialize( const char * serialized_msg, const rosidl_typesupport_introspection_cpp::MessageMembers * members, void * ros_message) { for (uint32_t i = 0; i < members->member_count_; ++i) { const auto * member = members->members_ + i; char * ros_message_field = static_cast<char *>(ros_message) + member->offset_; switch (member->type_id_) { case ::rosidl_typesupport_introspection_cpp::ROS_TYPE_BOOL: serialized_msg = copy_payload_cpp<bool>(member, serialized_msg, ros_message_field); break; case ::rosidl_typesupport_introspection_cpp::ROS_TYPE_BYTE: case ::rosidl_typesupport_introspection_cpp::ROS_TYPE_UINT8: serialized_msg = copy_payload_cpp<uint8_t>(member, serialized_msg, ros_message_field); break; case ::rosidl_typesupport_introspection_cpp::ROS_TYPE_CHAR: case ::rosidl_typesupport_introspection_cpp::ROS_TYPE_INT8: serialized_msg = copy_payload_cpp<char>(member, serialized_msg, ros_message_field); break; case ::rosidl_typesupport_introspection_cpp::ROS_TYPE_FLOAT32: serialized_msg = copy_payload_cpp<float>(member, serialized_msg, ros_message_field); break; case ::rosidl_typesupport_introspection_cpp::ROS_TYPE_FLOAT64: serialized_msg = copy_payload_cpp<double>(member, serialized_msg, ros_message_field); break; case ::rosidl_typesupport_introspection_cpp::ROS_TYPE_INT16: serialized_msg = copy_payload_cpp<int16_t>(member, serialized_msg, ros_message_field); break; case ::rosidl_typesupport_introspection_cpp::ROS_TYPE_UINT16: serialized_msg = copy_payload_cpp<uint16_t>(member, serialized_msg, ros_message_field); break; case ::rosidl_typesupport_introspection_cpp::ROS_TYPE_INT32: serialized_msg = copy_payload_cpp<int32_t>(member, serialized_msg, ros_message_field); break; case ::rosidl_typesupport_introspection_cpp::ROS_TYPE_UINT32: serialized_msg = copy_payload_cpp<uint32_t>(member, serialized_msg, ros_message_field); break; case ::rosidl_typesupport_introspection_cpp::ROS_TYPE_INT64: serialized_msg = copy_payload_cpp<int64_t>(member, serialized_msg, ros_message_field); break; case ::rosidl_typesupport_introspection_cpp::ROS_TYPE_UINT64: serialized_msg = copy_payload_cpp<uint64_t>(member, serialized_msg, ros_message_field); break; case ::rosidl_typesupport_introspection_cpp::ROS_TYPE_STRING: serialized_msg = copy_payload_cpp<std::string>(member, serialized_msg, ros_message_field); break; case ::rosidl_typesupport_introspection_cpp::ROS_TYPE_MESSAGE: serialized_msg = copy_payload_cpp_ros_message(member, serialized_msg, ros_message_field); break; default: throw std::runtime_error("unknown type"); } } return serialized_msg; } const char * deserialize( const char * serialized_msg, const rosidl_typesupport_introspection_c__MessageMembers * members, void * ros_message) { for (uint32_t i = 0; i < members->member_count_; ++i) { const auto * member = members->members_ + i; char * ros_message_field = static_cast<char *>(ros_message) + member->offset_; switch (member->type_id_) { case ::rosidl_typesupport_introspection_c__ROS_TYPE_BOOL: serialized_msg = copy_payload_c<bool>(member, serialized_msg, ros_message_field); break; case ::rosidl_typesupport_introspection_c__ROS_TYPE_BYTE: case ::rosidl_typesupport_introspection_c__ROS_TYPE_UINT8: serialized_msg = copy_payload_c<uint8_t>(member, serialized_msg, ros_message_field); break; case ::rosidl_typesupport_introspection_c__ROS_TYPE_CHAR: case ::rosidl_typesupport_introspection_c__ROS_TYPE_INT8: serialized_msg = copy_payload_c<char>(member, serialized_msg, ros_message_field); break; case ::rosidl_typesupport_introspection_c__ROS_TYPE_FLOAT32: serialized_msg = copy_payload_c<float>(member, serialized_msg, ros_message_field); break; case ::rosidl_typesupport_introspection_c__ROS_TYPE_FLOAT64: serialized_msg = copy_payload_c<double>(member, serialized_msg, ros_message_field); break; case ::rosidl_typesupport_introspection_c__ROS_TYPE_INT16: serialized_msg = copy_payload_c<int16_t>(member, serialized_msg, ros_message_field); break; case ::rosidl_typesupport_introspection_c__ROS_TYPE_UINT16: serialized_msg = copy_payload_c<uint16_t>(member, serialized_msg, ros_message_field); break; case ::rosidl_typesupport_introspection_c__ROS_TYPE_INT32: serialized_msg = copy_payload_c<int32_t>(member, serialized_msg, ros_message_field); break; case ::rosidl_typesupport_introspection_c__ROS_TYPE_UINT32: serialized_msg = copy_payload_c<uint32_t>(member, serialized_msg, ros_message_field); break; case ::rosidl_typesupport_introspection_c__ROS_TYPE_INT64: serialized_msg = copy_payload_c<int64_t>(member, serialized_msg, ros_message_field); break; case ::rosidl_typesupport_introspection_c__ROS_TYPE_UINT64: serialized_msg = copy_payload_c<uint64_t>(member, serialized_msg, ros_message_field); break; case ::rosidl_typesupport_introspection_c__ROS_TYPE_STRING: serialized_msg = copy_payload_c<std::string>(member, serialized_msg, ros_message_field); break; case ::rosidl_typesupport_introspection_c__ROS_TYPE_MESSAGE: serialized_msg = copy_payload_c_ros_message(member, serialized_msg, ros_message_field); break; default: throw std::runtime_error("unknown type"); } } return serialized_msg; } } // namespace rmw_iceoryx_cpp
38.140461
100
0.733194
[ "vector" ]
613f81a2be429a23f98e56b261f33947f9ceb4ac
3,346
cpp
C++
GWEN/src/Controls/PanelListPanel.cpp
hpidcock/gbsfml
e3aa990dff8c6b95aef92bab3e94affb978409f2
[ "Zlib" ]
null
null
null
GWEN/src/Controls/PanelListPanel.cpp
hpidcock/gbsfml
e3aa990dff8c6b95aef92bab3e94affb978409f2
[ "Zlib" ]
null
null
null
GWEN/src/Controls/PanelListPanel.cpp
hpidcock/gbsfml
e3aa990dff8c6b95aef92bab3e94affb978409f2
[ "Zlib" ]
null
null
null
#include "stdafx.h" #include "Gwen/Controls/PanelListPanel.h" using namespace Gwen; using namespace Controls; GWEN_CONTROL_CONSTRUCTOR( PanelListPanel ) { m_bVertical = false; m_bSizeToChildren = true; m_iControlSpacing = 5; m_iLineSpacing = 5; m_bWrapping = true; } void PanelListPanel::Render( Gwen::Skin::Base* skin ) { } Point PanelListPanel::GetBiggestChildSize() { int width = 0; int height = 0; for ( Base::List::iterator it = Children.begin(); it != Children.end(); ++it ) { Controls::Base* pChild = *it; if ( pChild->Width() > width ) width = pChild->Width(); if ( pChild->Height() > height ) height = pChild->Height(); } return Point( width, height ); } Point PanelListPanel::GetChildrenSizeTotal() { int width = 0; int height = 0; for ( Base::List::iterator it = Children.begin(); it != Children.end(); ++it ) { Controls::Base* pChild = *it; int testX = pChild->X() + pChild->Width(); if ( testX > width ) width = testX; int testY = pChild->Y() + pChild->Height(); if ( testY > height ) height = testY; } return Point( width, height ); } void PanelListPanel::DoVerticalLayout() { int panelWidth = 0; int panelX = GetPadding().left; int panelY = GetPadding().top; int lastPanelY = panelY; int testWrap = 0; Point childSize = GetBiggestChildSize(); //Lay my children out accordingly for ( Base::List::iterator it = Children.begin(); it != Children.end(); ++it ) { Controls::Base* pChild = *it; testWrap = lastPanelY + m_iControlSpacing + childSize.y; if ( m_bWrapping && testWrap > Height() - GetPadding().bottom ) { panelY = GetPadding().top; panelX = GetPadding().left + panelWidth + m_iLineSpacing; lastPanelY = panelY + m_iControlSpacing + childSize.y; } else { panelY = lastPanelY; lastPanelY = testWrap; } pChild->SetPos( panelX, panelY ); if (pChild->X() + childSize.x > panelWidth ) panelWidth = pChild->X() + childSize.x; } if ( m_bSizeToChildren ) { Point childrenSizeTotal = GetChildrenSizeTotal(); SetSize( childrenSizeTotal.x, Height()); } } void PanelListPanel::DoHorizontalLayout() { int panelHeight = 0; int panelX = GetPadding().left; int panelY = GetPadding().top; int lastPanelX = panelX; int testWrap = 0; Point childSize = GetBiggestChildSize(); for ( Base::List::iterator it = Children.begin(); it != Children.end(); ++it ) { Controls::Base* pChild = *it; testWrap = lastPanelX + m_iControlSpacing + childSize.x; if ( m_bWrapping && testWrap > Width() - GetPadding().right ) { panelX = GetPadding().left; panelY = GetPadding().top + panelHeight + m_iLineSpacing; lastPanelX = panelX + m_iControlSpacing + childSize.x; } else { panelX = lastPanelX; lastPanelX = testWrap; } pChild->SetPos( panelX, panelY ); if (pChild->Y() + childSize.y > panelHeight ) panelHeight = pChild->Y() + childSize.y; } if ( m_bSizeToChildren ) { Point childrenSizeTotal = GetChildrenSizeTotal(); SetSize( Width(), childrenSizeTotal.y); } } void PanelListPanel::Layout( Skin::Base* skin ) { BaseClass::Layout( skin ); if ( IsHorizontalLayout() ) DoHorizontalLayout(); else DoVerticalLayout(); }
23.075862
80
0.63419
[ "render" ]
61412b7e61fd5d8f8334bf12f513ca80c72c3aa7
4,359
hpp
C++
src/algebra/fields/fp6_2over3.hpp
ThisIsNotOfficialCodeItsJustForks/libsnark
6c23407a73ecc8d7648772886f2cd500cd8560a7
[ "MIT" ]
16
2017-03-12T17:10:00.000Z
2021-11-04T14:42:25.000Z
src/algebra/fields/fp6_2over3.hpp
ThisIsNotOfficialCodeItsJustForks/libsnark
6c23407a73ecc8d7648772886f2cd500cd8560a7
[ "MIT" ]
null
null
null
src/algebra/fields/fp6_2over3.hpp
ThisIsNotOfficialCodeItsJustForks/libsnark
6c23407a73ecc8d7648772886f2cd500cd8560a7
[ "MIT" ]
12
2017-06-26T20:20:37.000Z
2021-11-04T14:42:18.000Z
/** @file ***************************************************************************** Declaration of arithmetic in the finite field F[(p^3)^2] ***************************************************************************** * @author This file is part of libsnark, developed by SCIPR Lab * and contributors (see AUTHORS). * @copyright MIT license (see LICENSE file) *****************************************************************************/ #ifndef FP6_2OVER3_HPP_ #define FP6_2OVER3_HPP_ #include "algebra/fields/fp.hpp" #include "algebra/fields/fp2.hpp" #include "algebra/fields/fp3.hpp" namespace libsnark { /** * Arithmetic in the finite field F[(p^3)^2]. * * Let p := modulus. This interface provides arithmetic for the extension field * Fp6 = Fp3[Y]/(Y^2-X) where Fp3 = Fp[X]/(X^3-non_residue) and non_residue is in Fp. * * ASSUMPTION: p = 1 (mod 6) */ template<mp_size_t n, const bigint<n>& modulus> class Fp6_2over3_model; template<mp_size_t n, const bigint<n>& modulus> std::ostream& operator<<(std::ostream &, const Fp6_2over3_model<n, modulus> &); template<mp_size_t n, const bigint<n>& modulus> std::istream& operator>>(std::istream &, Fp6_2over3_model<n, modulus> &); template<mp_size_t n, const bigint<n>& modulus> class Fp6_2over3_model { public: typedef Fp_model<n, modulus> my_Fp; typedef Fp2_model<n, modulus> my_Fp2; typedef Fp3_model<n, modulus> my_Fp3; typedef my_Fp3 my_Fpe; static my_Fp non_residue; static my_Fp Frobenius_coeffs_c1[6]; // non_residue^((modulus^i-1)/6) for i=0,1,2,3,4,5 my_Fp3 c0, c1; Fp6_2over3_model() {}; Fp6_2over3_model(const my_Fp3& c0, const my_Fp3& c1) : c0(c0), c1(c1) {}; void print() const { printf("c0/c1:\n"); c0.print(); c1.print(); } void clear() { c0.clear(); c1.clear(); } static Fp6_2over3_model<n, modulus> zero(); static Fp6_2over3_model<n, modulus> one(); static Fp6_2over3_model<n, modulus> random_element(); bool is_zero() const { return c0.is_zero() && c1.is_zero(); } bool operator==(const Fp6_2over3_model &other) const; bool operator!=(const Fp6_2over3_model &other) const; Fp6_2over3_model operator+(const Fp6_2over3_model &other) const; Fp6_2over3_model operator-(const Fp6_2over3_model &other) const; Fp6_2over3_model operator*(const Fp6_2over3_model &other) const; Fp6_2over3_model mul_by_2345(const Fp6_2over3_model &other) const; Fp6_2over3_model operator-() const; Fp6_2over3_model squared() const; Fp6_2over3_model inverse() const; Fp6_2over3_model Frobenius_map(unsigned long power) const; Fp6_2over3_model unitary_inverse() const; Fp6_2over3_model cyclotomic_squared() const; static my_Fp3 mul_by_non_residue(const my_Fp3 &elem); template<mp_size_t m> Fp6_2over3_model cyclotomic_exp(const bigint<m> &exponent) const; static bigint<n> base_field_char() { return modulus; } static constexpr size_t extension_degree() { return 6; } friend std::ostream& operator<< <n, modulus>(std::ostream &out, const Fp6_2over3_model<n, modulus> &el); friend std::istream& operator>> <n, modulus>(std::istream &in, Fp6_2over3_model<n, modulus> &el); }; template<mp_size_t n, const bigint<n>& modulus> std::ostream& operator<<(std::ostream& out, const std::vector<Fp6_2over3_model<n, modulus> > &v); template<mp_size_t n, const bigint<n>& modulus> std::istream& operator>>(std::istream& in, std::vector<Fp6_2over3_model<n, modulus> > &v); template<mp_size_t n, const bigint<n>& modulus> Fp6_2over3_model<n, modulus> operator*(const Fp_model<n, modulus> &lhs, const Fp6_2over3_model<n, modulus> &rhs); template<mp_size_t n, const bigint<n>& modulus, mp_size_t m> Fp6_2over3_model<n, modulus> operator^(const Fp6_2over3_model<n, modulus> &self, const bigint<m> &exponent); template<mp_size_t n, const bigint<n>& modulus, mp_size_t m, const bigint<m>& exp_modulus> Fp6_2over3_model<n, modulus> operator^(const Fp6_2over3_model<n, modulus> &self, const Fp_model<m, exp_modulus> &exponent); template<mp_size_t n, const bigint<n>& modulus> Fp_model<n, modulus> Fp6_2over3_model<n, modulus>::non_residue; template<mp_size_t n, const bigint<n>& modulus> Fp_model<n, modulus> Fp6_2over3_model<n, modulus>::Frobenius_coeffs_c1[6]; } // libsnark #include "algebra/fields/fp6_2over3.tcc" #endif // FP6_2OVER3_HPP_
39.990826
123
0.68479
[ "vector" ]
6143c3eaa145a39299be0227dd10be90c1994984
24,896
cpp
C++
src/ui.cpp
IcingTomato/WioTerminal-LoRaWAN-Gateway-Tester
156a9182aa99f4f9a7e1ea2fc92957ac76643f58
[ "MIT" ]
2
2022-01-22T06:34:07.000Z
2022-03-30T08:03:58.000Z
src/ui.cpp
IcingTomato/WioTerminal-LoRaWAN-Gateway-Tester
156a9182aa99f4f9a7e1ea2fc92957ac76643f58
[ "MIT" ]
3
2021-09-13T08:19:36.000Z
2022-01-29T23:55:47.000Z
src/ui.cpp
IcingTomato/WioTerminal-LoRaWAN-Gateway-Tester
156a9182aa99f4f9a7e1ea2fc92957ac76643f58
[ "MIT" ]
5
2021-08-04T08:17:15.000Z
2021-10-14T02:49:09.000Z
#include <Arduino.h> #include "config.h" #include "fonts.h" #include "testeur.h" #include "ui.h" #include "gps.h" #include "E5_Module.h" #include "SqQueue.h" LGFX lcd; LGFX_Sprite sprite(&lcd); #define X_OFFSET 2 #define Y_OFFSET 0 #define X_SIZE 80 #define Y_SIZE 20 #define R_SIZE 4 #define BOX_SPACING 2 // RRRRRGGGGGGBBBBB #define TFT_GRAY 0b1010010100010000 #define TFT_GRAY10 0b0100001100001000 #define TFT_GRAY20 0b0010000110000100 #define HIST_X_OFFSET 2 #define HIST_Y_OFFSET 75 #define HIST_X_SIZE 315 #define HIST_X_TXTSIZE X_SIZE-3 #define HIST_Y_SIZE 160 #define HIST_X_BAR_OFFSET 50 #define HIST_X_BAR_SPACE 2 #define MAX_SNR 40 #define MAX_RETRY 8 #define MAX_HS 20 #define SELECTED_NONE 0 #define SELECTED_POWER 1 #define SELECTED_SF 2 #define SELECTED_TOTAL 3 ui_t ui; int index_Rssi = 0; bool hasAction = false; void initScreen() { lcd.init(); lcd.setRotation(1); lcd.fillScreen(TFT_BLACK); // Totally unusefull so totally mandatory #ifdef WITH_SPLASH lcd.drawRoundRect((320-200)/2,200,200,10,5,TFT_WHITE); for ( int i = 10 ; i < 100 ; i+=4 ) { lcd.fillRoundRect((320-200)/2+2,202,((204*i)/100),6,3,TFT_WHITE); // #ifdef WITH_SPLASH_HELIUM // draw_splash_helium(HELIUM_XCENTER, (240-100)/2, i); // #endif #ifdef WITH_SPLASH_TTN draw_splash_ttn(TTN_XCENTER, (240-85)/2, i); #endif } #endif lcd.fillScreen(TFT_BLACK); ui.selected_mode = MODE_MANUAL; ui.selected_menu = SELECTED_NONE; ui.displayed_state = UKN_STAT; ui.previous_display = DISPLAY_MAX; ui.hasClick = false; ui.refreshPower = true; ui.refreshSf = true; ui.refreshTotal = true; ui.refreshMode = true; ui.refreshLastFrame = true; ui.hasRefreshed = false; pinMode(WIO_KEY_A, INPUT_PULLUP); pinMode(WIO_KEY_B, INPUT_PULLUP); pinMode(WIO_KEY_C, INPUT_PULLUP); pinMode(WIO_5S_UP, INPUT_PULLUP); pinMode(WIO_5S_DOWN, INPUT_PULLUP); pinMode(WIO_5S_LEFT, INPUT_PULLUP); pinMode(WIO_5S_RIGHT, INPUT_PULLUP); pinMode(WIO_5S_PRESS, INPUT_PULLUP); } /** * Call on regular basis by the main loop * check the button status to update the user interface * */ bool Clear_Data_Flag = false; void Clear_Data() { if((E5_Module_Data.SendNumber != 0) || (E5_Module_Data.RecvNumber != 0) || (E5_Module_Data.rssi != 0) || (E5_Module_Data.snr != 0) || (QueueEmpty(SqQueueRssi) != true) || (QueueEmpty(SqQueueRssi) != true)) { E5_Module_Data.SendNumber = 0; E5_Module_Data.RecvNumber = 0; E5_Module_Data.rssi = 0; E5_Module_Data.snr = 0; ClearQueue(&SqQueueRssi); ClearQueue(&SqQueueSnr); ui.refreshLastFrame = true; ui.hasRefreshed = true; } } void Button_Detection(void){ hasAction = true; bool configHasChanged = true; uint8_t prev_select = ui.selected_menu; if (digitalRead(WIO_KEY_C) == LOW) { configHasChanged = false; ui.selected_menu = ( prev_select == SELECTED_POWER )?SELECTED_NONE:SELECTED_POWER; } else if (digitalRead(WIO_KEY_B) == LOW) { configHasChanged = false; ui.selected_menu = ( prev_select == SELECTED_SF )?SELECTED_NONE:SELECTED_SF; } else if (digitalRead(WIO_KEY_A) == LOW) { configHasChanged = false; ui.selected_menu = ( prev_select == SELECTED_TOTAL )?SELECTED_NONE:SELECTED_TOTAL; } else if ((digitalRead(WIO_5S_UP) == LOW) || (digitalRead(WIO_5S_DOWN) == LOW)) { int wio_5s_Up = digitalRead(WIO_5S_UP); switch ( ui.selected_menu ) { case SELECTED_POWER: ui.refreshPower = true; if(wio_5s_Up == LOW){ tst_setPower(state.cPwr+2); } else{ tst_setPower(state.cPwr-2); } E5_Module_Data.Pwr = state.cPwr; SqQueueFillData(&SqQueueAtCmd,AT_POWER); break; case SELECTED_SF: ui.refreshSf = true; if(wio_5s_Up == LOW){ tst_setSf(state.cSf+1); } else{ tst_setSf(state.cSf-1); } E5_Module_Data.Sf = (e_Lora_Sf)(state.cSf); SqQueueFillData(&SqQueueAtCmd,AT_SF); break; case SELECTED_TOTAL: ui.refreshTotal = true; if(wio_5s_Up == LOW){ if(state.cTotal>=990) { state.cTotal = 990; } else { state.cTotal += 10; } } else{ if(state.cTotal<=10) { state.cTotal = 10; } else { state.cTotal -= 10; } } break; case SELECTED_NONE: if(ui.selected_display == DISPLAY_DEVICE_INFO) { if(E5_Module_Data.Moudlue_is_exist == true) { if(wio_5s_Up == LOW){ tst_setRegion(state.cRegion+1); } else{ tst_setRegion(state.cRegion-1); } ClearQueue(&SqQueueAtCmd); E5_Module_Data.Region = (e_Lora_Regional)state.cRegion; SqQueueFillData(&SqQueueAtCmd,AT_REGION); if(E5_Module_Data.Pwr != state.cPwr) { ui.refreshPower = true; E5_Module_Data.Pwr = state.cPwr; SqQueueFillData(&SqQueueAtCmd,AT_POWER); } if((E5_Module_Data.Sf != state.cSf) || (state.cRegion == US915) || (state.cRegion == US915HYBRID)) { ui.refreshSf = true; E5_Module_Data.Sf = (e_Lora_Sf)state.cSf; SqQueueFillData(&SqQueueAtCmd,AT_SF); } ui.hasRefreshed = true; if(ui.selected_mode != MODE_MANUAL) // Forced in MODE_MANUAL { ui.selected_mode = MODE_MANUAL; ui.refreshMode = true; } } } else { if(wio_5s_Up == LOW){ ui.selected_mode = (ui.selected_mode+MODE_MAX+1)%MODE_MAX; } else{ ui.selected_mode = (ui.selected_mode+MODE_MAX-1)%MODE_MAX; } ui.refreshMode = true; } default: break; } } else if (digitalRead(WIO_5S_RIGHT) == LOW) { ui.selected_display = (ui.selected_display+1)%DISPLAY_MAX; } else if (digitalRead(WIO_5S_LEFT) == LOW) { ui.selected_display = (ui.selected_display+DISPLAY_MAX-1)%DISPLAY_MAX; } else if (digitalRead(WIO_5S_PRESS) == LOW) { if (( ui.selected_mode == MODE_MANUAL) && (ui.selected_display != DISPLAY_DEVICE_INFO)) { ui.hasClick = true; } else if(ui.selected_mode == MODE_CLEAR_DATA) { Clear_Data(); Clear_Data_Flag = true; } } else{ hasAction = false; configHasChanged = false; } if ( prev_select != ui.selected_menu) { if ( prev_select == SELECTED_POWER || ui.selected_menu == SELECTED_POWER ) { refreshPower(); } if ( prev_select == SELECTED_SF || ui.selected_menu == SELECTED_SF ) { refreshSf(); } if ( prev_select == SELECTED_TOTAL || ui.selected_menu == SELECTED_TOTAL ) { refreshTotal(); } } if ( configHasChanged ) { storeConfig(); } // avoid re-entreing //if ( hasAction ) delay(10);//(300); } void refresUI() { if(ui.refreshPower){ ui.refreshPower = false; refreshPower(); } if(ui.refreshSf){ ui.refreshSf = false; refreshSf(); } if(ui.refreshTotal){ ui.refreshTotal = false; refreshTotal(); } if(ui.refreshMode) { ui.refreshMode = false; refreshMode(); } // refresh the Join state part refreshState(); // refresh the graph history part if ((ui.hasRefreshed) || (ui.previous_display != ui.selected_display)){ ui.hasRefreshed = false; switch ( ui.selected_display ) { case DISPLAY_RSSI_HIST: refreshRssiHist(); break; case DISPLAY_SNR_HIST: refreshSnrHist(); break; case DISPLAY_DEVICE_INFO: refreshDeviceInfo(); break; } } if ( ui.refreshLastFrame == true ) { ui.refreshLastFrame = false; refreshLastFrame(); } if(ui.selected_display == DISPLAY_GPS_INFO) { refreshGpsInfo(); } } /** * Select the way the messages are sent * On user action * Automatically */ void refreshMode() { static bool state_tmp = false; int xOffset = X_OFFSET+3*X_SIZE; int yOffset = Y_OFFSET; lcd.fillRoundRect(xOffset,yOffset,X_SIZE-5,Y_SIZE,R_SIZE,TFT_WHITE); lcd.setTextColor(TFT_BLACK); lcd.setFont(FS9); // Select the orginal small TomThumb font switch ( ui.selected_mode ) { case MODE_MANUAL: lcd.drawString("Manual",xOffset+5,yOffset+3); break; case MODE_CLEAR_DATA: lcd.drawString("Clr Data",xOffset+5,yOffset+3); break; case MODE_AUTO_1MIN: lcd.drawString("Auto 1m",xOffset+5,yOffset+3); break; case MODE_MAX_RATE: lcd.drawString("Max rate",xOffset+5,yOffset+3); break; } } /** * Update the last frame information on top of the screen */ void refreshLastFrame() { int xOffset = X_OFFSET; int yOffset = Y_OFFSET+Y_SIZE+2; lcd.fillRect(xOffset,yOffset,3*X_SIZE,Y_SIZE,TFT_BLACK); lcd.drawRoundRect(xOffset,yOffset,3*X_SIZE,Y_SIZE,R_SIZE,TFT_WHITE); lcd.fillRect(xOffset+X_SIZE-3,yOffset+Y_SIZE,3*X_SIZE,Y_SIZE,TFT_BLACK); lcd.drawRoundRect(xOffset+X_SIZE-3,yOffset+Y_SIZE,3*X_SIZE,Y_SIZE,R_SIZE,TFT_WHITE); // int idx = getLastIndexWritten(); lcd.setFont(FS9); lcd.setTextColor(TFT_WHITE, TFT_BLACK); char tmp[100]; if(E5_Module_Data.rssi == 0 && E5_Module_Data.snr == 0) { lcd.setTextColor(TFT_RED, TFT_BLACK); sprintf(tmp,"NO DATA"); } else { lcd.setTextColor(TFT_GREEN, TFT_BLACK); sprintf(tmp,"RSSI:%-4ddBm SNR:%-4ddB",E5_Module_Data.rssi,E5_Module_Data.snr); } lcd.drawString(tmp,xOffset+3,yOffset+2); lcd.setTextColor(TFT_WHITE, TFT_BLACK); sprintf(tmp,"Send:%-3d Recv:%-3d PER:%-3d%%",E5_Module_Data.SendNumber,E5_Module_Data.RecvNumber,((E5_Module_Data.SendNumber-E5_Module_Data.RecvNumber)*100)/state.cTotal); lcd.drawString(tmp,xOffset+X_SIZE+2,yOffset+Y_SIZE+2); } /** * Update the current state display to see on the corner right / top * the current tester action */ void refreshState() { int xOffset = X_OFFSET+3*X_SIZE; int yOffset = Y_OFFSET+Y_SIZE+2; if ( ui.displayed_state != E5_Module_Data.State ) { ui.displayed_state = E5_Module_Data.State; lcd.fillRect(xOffset,yOffset,X_SIZE-BOX_SPACING,Y_SIZE,TFT_BLACK); lcd.setTextSize(1); switch ( ui.displayed_state ) { case NOT_JOINED: lcd.setTextColor(TFT_RED, TFT_BLACK); lcd.drawString("Disc",xOffset+3,yOffset+2); break; case JOIN_FAILED: lcd.setTextColor(TFT_RED, TFT_BLACK); lcd.drawString("Fail",xOffset+3,yOffset+2); break; case JOINED: lcd.setTextColor(TFT_GREEN, TFT_BLACK); lcd.drawString("Conn",xOffset+3,yOffset+2); break; case JOINING: lcd.setTextColor(TFT_ORANGE, TFT_BLACK); lcd.drawString("Join",xOffset+3,yOffset+2); break; case IN_TX: lcd.setTextColor(TFT_GREEN, TFT_BLACK); lcd.drawString("Tx",xOffset+3,yOffset+2); break; case IN_RPT: lcd.setTextColor(TFT_ORANGE, TFT_BLACK); lcd.drawString("Tx",xOffset+3,yOffset+2); break; case DWNLINK: lcd.setTextColor(TFT_GREEN, TFT_BLACK); lcd.drawString("Dwn",xOffset+3,yOffset+2); break; case DWNLINK_FAILED: lcd.setTextColor(TFT_RED, TFT_BLACK); lcd.drawString("Dwn",xOffset+3,yOffset+2); break; } } } void refreshPower() { uint16_t color = (ui.selected_menu == SELECTED_POWER)?TFT_WHITE:TFT_GRAY; lcd.fillRoundRect(X_OFFSET,Y_OFFSET,X_SIZE-BOX_SPACING,Y_SIZE,R_SIZE,color); lcd.setTextColor(TFT_BLACK); lcd.setFont(FS9); // Select the orginal small TomThumb font char sPower[10]; sprintf(sPower,"+%02d dBm",state.cPwr); lcd.drawString(sPower,X_OFFSET+5,Y_OFFSET+3); } void refreshSf() { int xOffset = X_OFFSET+1*X_SIZE; uint16_t color = (ui.selected_menu == SELECTED_SF)?TFT_WHITE:TFT_GRAY; lcd.fillRoundRect(xOffset,Y_OFFSET,X_SIZE-BOX_SPACING,Y_SIZE,R_SIZE,color); lcd.setTextColor(TFT_BLACK); lcd.setFont(FS9);//); // Select the orginal small TomThumb font char sSf[10]; if((state.cRegion == US915) || (state.cRegion == US915HYBRID)){ sprintf(sSf,"SF %02d",state.cSf-2); } else{ sprintf(sSf,"SF %02d",state.cSf); } lcd.drawString(sSf,xOffset+5,Y_OFFSET+3); } void refreshTotal(){ int xOffset = X_OFFSET+2*X_SIZE; uint16_t color = (ui.selected_menu == SELECTED_TOTAL)?TFT_WHITE:TFT_GRAY; lcd.fillRoundRect(xOffset,Y_OFFSET,X_SIZE-BOX_SPACING,Y_SIZE,R_SIZE,color); lcd.setTextColor(TFT_BLACK); lcd.setFont(FS9); // Select the orginal small TomThumb font char sRetry[10]; sprintf(sRetry,"Total %3d",state.cTotal); lcd.drawString(sRetry,xOffset+5,Y_OFFSET+3); } void refreshRssiHist() { // No need to refresh everytime if ( ui.previous_display != ui.selected_display ) { lcd.fillRect(HIST_X_OFFSET,HIST_Y_OFFSET-18,HIST_X_TXTSIZE,18,TFT_BLACK); lcd.fillRect(HIST_X_OFFSET,HIST_Y_OFFSET,HIST_X_SIZE,HIST_Y_SIZE,TFT_BLACK); lcd.setFont(FM9); lcd.setTextColor(TFT_WHITE); lcd.drawString("Rx Rssi",HIST_X_OFFSET,HIST_Y_OFFSET-18); lcd.drawRoundRect(HIST_X_OFFSET,HIST_Y_OFFSET,HIST_X_SIZE,HIST_Y_SIZE,R_SIZE,TFT_WHITE); ui.previous_display = ui.selected_display; } // clean the bar int xSz = (HIST_X_SIZE - (HIST_X_OFFSET+HIST_X_BAR_OFFSET + MAXBUFFER*HIST_X_BAR_SPACE)) / MAXBUFFER; int xOffset = HIST_X_OFFSET+HIST_X_SIZE-xSz-HIST_X_BAR_SPACE; for ( int i = 0 ; i < MAXBUFFER ; i++ ) { lcd.fillRect(xOffset,HIST_Y_OFFSET+1,xSz,154,TFT_BLACK); xOffset -= xSz + HIST_X_BAR_SPACE; } // Redraw lines lcd.drawLine(HIST_X_OFFSET+2,HIST_Y_OFFSET+10,HIST_X_SIZE-2,HIST_Y_OFFSET+10,TFT_GRAY); for ( int i = 20 ; i < HIST_Y_SIZE ; i+=20 ) { if ( i % 40 == 0 ) { char sTmp[10]; sprintf(sTmp,"-%d",i); lcd.setFont(FF25); lcd.setTextColor(TFT_GRAY); lcd.drawString(sTmp,HIST_X_OFFSET+5,HIST_Y_OFFSET-5+i); } lcd.drawLine(HIST_X_OFFSET+2,HIST_Y_OFFSET+10+i,HIST_X_SIZE-2,HIST_Y_OFFSET+10+i,TFT_GRAY20); } xOffset = HIST_X_OFFSET+HIST_X_SIZE-xSz-HIST_X_BAR_SPACE; int i = SqQueueRssi.front; int length = QueueLength(SqQueueRssi); if(length <= 0) { return; } xOffset -= (xSz + HIST_X_BAR_SPACE)*(length-1); while(i!=SqQueueRssi.rear) { int rssi = SqQueueRssi.data[i];//data[i]; i=(i+1)%MAXSIZE; uint16_t color = TFT_GREEN; if ( rssi > 8 ) rssi = 8; // avoid drawing over the graph. if ( rssi < -125 ) color = TFT_RED; else if (rssi < -100 ) color = TFT_ORANGE; else if (rssi < -80 ) color = TFT_DARKGREEN; if ( rssi < 0 ) { lcd.fillRect(xOffset,HIST_Y_OFFSET+10,xSz,-rssi,color); } else { lcd.fillRect(xOffset,HIST_Y_OFFSET+10-rssi,xSz,rssi,color); } xOffset += xSz + HIST_X_BAR_SPACE; } } void refreshSnrHist() { // No need to refresh everytime if ( ui.previous_display != ui.selected_display ) { lcd.fillRect(HIST_X_OFFSET,HIST_Y_OFFSET-18,HIST_X_TXTSIZE,18,TFT_BLACK); lcd.fillRect(HIST_X_OFFSET,HIST_Y_OFFSET,HIST_X_SIZE,HIST_Y_SIZE,TFT_BLACK); lcd.setFont(FM9); lcd.setTextColor(TFT_WHITE); lcd.drawString("Rx Snr",HIST_X_OFFSET,HIST_Y_OFFSET-18); lcd.drawRoundRect(HIST_X_OFFSET,HIST_Y_OFFSET,HIST_X_SIZE,HIST_Y_SIZE,R_SIZE,TFT_WHITE); ui.previous_display = ui.selected_display; } // clean the bar int xSz = (HIST_X_SIZE - (HIST_X_OFFSET+HIST_X_BAR_OFFSET + MAXBUFFER*HIST_X_BAR_SPACE)) / MAXBUFFER; int xOffset = HIST_X_OFFSET+HIST_X_SIZE-xSz-HIST_X_BAR_SPACE; for ( int i = 0 ; i < MAXBUFFER ; i++ ) { lcd.fillRect(xOffset,HIST_Y_OFFSET+2,xSz,HIST_Y_SIZE-4,TFT_BLACK); xOffset -= xSz + HIST_X_BAR_SPACE; } // Redraw lines int yOffset = HIST_Y_OFFSET+HIST_Y_SIZE-10; int yStep10 = ((HIST_Y_OFFSET+HIST_Y_SIZE-10) - ( HIST_Y_OFFSET - 5 )) / (MAX_SNR/10); // step for 10 SNR for ( int i = 10 ; i < MAX_SNR ; i+= 10 ) { int y = yOffset-(yStep10*i)/10; if ( i % 10 == 0 ) { char sTmp[10]; if(i == 10) { sprintf(sTmp,"-10"); } else if(i == 20) { sprintf(sTmp,"0"); } else if(i == 30) { sprintf(sTmp,"10"); } lcd.setFont(FF25); lcd.setTextColor(TFT_GRAY); lcd.drawString(sTmp,HIST_X_OFFSET+5,y-15); } lcd.drawLine(HIST_X_OFFSET+2,y,HIST_X_SIZE-2,y,TFT_GRAY20); } lcd.drawLine(HIST_X_OFFSET+2,yOffset-(yStep10*20)/10,HIST_X_SIZE-2,yOffset-(yStep10*20)/10,TFT_GRAY); xOffset = HIST_X_OFFSET+HIST_X_SIZE-xSz-HIST_X_BAR_SPACE; yOffset -= (yStep10*20)/10; int i = SqQueueSnr.front; int length = QueueLength(SqQueueSnr); if(length <= 0) { return; } xOffset -= (xSz + HIST_X_BAR_SPACE)*(length-1); while(i!=SqQueueRssi.rear) { int snr = SqQueueSnr.data[i];//data[i]; i=(i+1)%MAXSIZE; uint16_t color = TFT_GREEN; if ( snr < 0 ) color = TFT_RED; else if (snr < 10 ) color = TFT_ORANGE; else if (snr < 20 ) color = TFT_DARKGREEN; if(snr >= 20) { snr = 20; }else if(snr <= -20) { snr = -20; } if(snr<0) { lcd.fillRect(xOffset,yOffset,xSz,- (snr*yStep10)/10,color); } else { lcd.fillRect(xOffset,yOffset-(snr*yStep10)/10,xSz,(snr*yStep10)/10,color); } xOffset += xSz + HIST_X_BAR_SPACE; } } void refreshDeviceInfo() { int Length = 60; int xOffset; static bool lora_state = false; if (ui.previous_display != ui.selected_display ){ lcd.fillRect(HIST_X_OFFSET,HIST_Y_OFFSET-18,HIST_X_TXTSIZE,18,TFT_BLACK); lcd.fillRect(HIST_X_OFFSET,HIST_Y_OFFSET,HIST_X_SIZE,HIST_Y_SIZE,TFT_BLACK); lcd.setFont(FM9); lcd.setTextColor(TFT_WHITE); lcd.drawString("Device",HIST_X_OFFSET,HIST_Y_OFFSET-18); lcd.drawRoundRect(HIST_X_OFFSET,HIST_Y_OFFSET,HIST_X_SIZE,HIST_Y_SIZE,R_SIZE,TFT_WHITE); } if((lora_state != E5_Module_Data.Moudlue_is_exist) || (ui.refreshDeviceInfo) || (ui.previous_display != ui.selected_display)) { ui.refreshDeviceInfo = false; lcd.fillRect(HIST_X_OFFSET,HIST_Y_OFFSET,HIST_X_SIZE,HIST_Y_SIZE,TFT_BLACK); lcd.drawRoundRect(HIST_X_OFFSET,HIST_Y_OFFSET,HIST_X_SIZE,HIST_Y_SIZE,R_SIZE,TFT_WHITE); lora_state = E5_Module_Data.Moudlue_is_exist; if(E5_Module_Data.Moudlue_is_exist == true) { xOffset = HIST_X_OFFSET + 10; int yOffset = HIST_Y_OFFSET + 2; lcd.setFont(FM9); lcd.setTextColor(TFT_BLUE); lcd.drawString("LoRaWAN",xOffset,yOffset); yOffset += 18; lcd.setTextColor(TFT_WHITE); lcd.drawString("Region:",xOffset,yOffset); lcd.setFont(FM9); lcd.setTextColor(TFT_WHITE); char str[100] = {0}; char str1[50] = {0}; strcpy(str,"DevEui:"); strcat(str,"\""); strcat(str,E5_Module_Data.DevEui); strcat(str,"\""); lcd.drawString(str,xOffset,yOffset+18); memset(str, 0, sizeof(str)); strcpy(str,"AppEui:"); strcat(str,"\""); strcat(str,E5_Module_Data.AppEui); strcat(str,"\""); lcd.drawString(str,xOffset,yOffset+36); memset(str, 0, sizeof(str)); strcpy(str,"AppKey:"); strcat(str,"\""); memcpy(str1,E5_Module_Data.AppKey,16); strcat(str,str1); lcd.drawString(str,xOffset,yOffset+54); memset(str, 0, sizeof(str)); strcpy(str, " "); strcat(str,&E5_Module_Data.AppKey[16]); strcat(str,"\""); lcd.drawString(str,xOffset,yOffset+72); lcd.setTextColor(TFT_BLUE); lcd.drawString("Firmware Version",xOffset,yOffset+90); lcd.setTextColor(TFT_WHITE); memset(str, 0, sizeof(str)); strcpy(str,"Lora :"); strcat(str,"\""); strcat(str,"V"); strcat(str,E5_Module_Data.Version); strcat(str,"\""); lcd.drawString(str,xOffset,yOffset+108); } else { xOffset = HIST_X_OFFSET + 10; lcd.setFont(FSSO9); lcd.setTextColor(TFT_RED); lcd.drawString("LoRaWAN No Find",xOffset,HIST_Y_OFFSET+20); } } ui.previous_display = ui.selected_display; if(E5_Module_Data.Moudlue_is_exist == true) { lcd.setTextColor(TFT_BLACK); lcd.setFont(FS9); lcd.fillRoundRect(HIST_X_OFFSET + 10+80,HIST_Y_OFFSET +18 + 2-2,130,18,4,TFT_BLACK);//TFT_GRAY // TFT_WHITE if(state.cRegion == US915HYBRID) { Length = 130; } lcd.fillRoundRect(HIST_X_OFFSET + 10+80,HIST_Y_OFFSET +18 + 2-2,Length,18,4,TFT_WHITE);//TFT_GRAY // TFT_WHITE switch(state.cRegion) { case EU868: lcd.drawString("EU868",HIST_X_OFFSET + 10+84,HIST_Y_OFFSET +18 + 2); break; case US915: lcd.drawString("US915",HIST_X_OFFSET + 10+84,HIST_Y_OFFSET +18 + 2); break; case US915HYBRID: lcd.drawString("US915HYBRID",HIST_X_OFFSET + 10+84,HIST_Y_OFFSET +18 + 2); break; case AU915: lcd.drawString("AU915",HIST_X_OFFSET + 10+84,HIST_Y_OFFSET +18 + 2); break; case AS923: lcd.drawString("AS923",HIST_X_OFFSET + 10+84,HIST_Y_OFFSET +18 + 2); break; case KR920: lcd.drawString("KR920",HIST_X_OFFSET + 10+84,HIST_Y_OFFSET +18 + 2); break; case IN865: lcd.drawString("IN865",HIST_X_OFFSET + 10+84,HIST_Y_OFFSET +18 + 2); break; default: break; } } } void refreshGpsInfo(){ int xOffset,yOffset; if ( ui.previous_display != ui.selected_display ) { lcd.fillRect(HIST_X_OFFSET,HIST_Y_OFFSET-18,HIST_X_TXTSIZE,18,TFT_BLACK); lcd.fillRect(HIST_X_OFFSET,HIST_Y_OFFSET,HIST_X_SIZE,HIST_Y_SIZE,TFT_BLACK); lcd.setFont(FM9); lcd.setTextColor(TFT_WHITE); lcd.drawString("Gps",HIST_X_OFFSET,HIST_Y_OFFSET-18); xOffset = HIST_X_OFFSET+10; yOffset = HIST_Y_OFFSET+10; lcd.drawString("Date: ",xOffset,yOffset); lcd.drawString(N_date,xOffset+65,yOffset); yOffset += 18; lcd.drawString("Time: ",xOffset,yOffset); lcd.drawString(N_time,xOffset+65,yOffset); yOffset += 18; lcd.drawString("LAT: ",xOffset,yOffset); lcd.drawString(N_lat,xOffset+65,yOffset); yOffset += 18; lcd.drawString("LONG: ",xOffset,yOffset); lcd.drawString(N_lng,xOffset+65,yOffset); yOffset += 18; lcd.drawString("ALT: ",xOffset,yOffset); lcd.drawString(N_meters,xOffset+65,yOffset); yOffset += 18; lcd.drawString("Satellites: ",xOffset,yOffset); lcd.drawString(N_satellites,xOffset+135,yOffset); lcd.drawRoundRect(HIST_X_OFFSET,HIST_Y_OFFSET,HIST_X_SIZE,HIST_Y_SIZE,R_SIZE,TFT_WHITE); ui.previous_display = ui.selected_display; } lcd.setFont(FM9); lcd.setTextColor(TFT_WHITE); xOffset = HIST_X_OFFSET+10+50; yOffset = HIST_Y_OFFSET+10; if(N_date != P_date){ lcd.fillRect(xOffset,yOffset,150,18,TFT_BLACK); lcd.drawString(N_date,xOffset+15,yOffset); P_date = N_date; } yOffset += 18; if(N_time != P_time){ lcd.fillRect(xOffset,yOffset,150,18,TFT_BLACK); lcd.drawString(N_time,xOffset+15,yOffset); P_time = N_time; } yOffset += 18; if(N_lat != P_lat){ lcd.fillRect(xOffset,yOffset,150,18,TFT_BLACK); lcd.drawString(N_lat,xOffset+15,yOffset); P_lat = N_lat; } yOffset += 18; if(N_lng != P_lng){ lcd.fillRect(xOffset,yOffset,150,18,TFT_BLACK); lcd.drawString(N_lng,xOffset+15,yOffset); P_lng = N_lng; } yOffset += 18; if(N_meters != P_meters){ lcd.fillRect(xOffset,yOffset,150,18,TFT_BLACK); lcd.drawString(N_meters,xOffset+15,yOffset); P_meters = N_meters; } xOffset += 70; yOffset += 18; if(N_satellites != P_satellites){ lcd.fillRect(xOffset,yOffset,100,18,TFT_BLACK); lcd.drawString(N_satellites,xOffset+15,yOffset); P_satellites = N_satellites; } }
31.836317
173
0.621023
[ "3d" ]
6151a2c00d1175c25fc37c76c5a1be3c5ddad79d
475,041
cxx
C++
panda/src/glstuff/glGraphicsStateGuardian_src.cxx
Psychotropos/panda3d
ffe4f387ae9dd6299e6002be95037a44aa5b2a27
[ "PHP-3.01", "PHP-3.0" ]
null
null
null
panda/src/glstuff/glGraphicsStateGuardian_src.cxx
Psychotropos/panda3d
ffe4f387ae9dd6299e6002be95037a44aa5b2a27
[ "PHP-3.01", "PHP-3.0" ]
null
null
null
panda/src/glstuff/glGraphicsStateGuardian_src.cxx
Psychotropos/panda3d
ffe4f387ae9dd6299e6002be95037a44aa5b2a27
[ "PHP-3.01", "PHP-3.0" ]
null
null
null
/** * PANDA 3D SOFTWARE * Copyright (c) Carnegie Mellon University. All rights reserved. * * All use of this software is subject to the terms of the revised BSD * license. You should have received a copy of this license along * with this source code in a file named "LICENSE." * * @file glGraphicsStateGuardian_src.cxx * @author drose * @date 1999-02-02 * @author fperazzi, PandaSE * @date 2010-05-05 * get_supports_cg_profile) */ #include "config_putil.h" #include "displayRegion.h" #include "renderBuffer.h" #include "geom.h" #include "geomVertexData.h" #include "geomTriangles.h" #include "geomTristrips.h" #include "geomTrifans.h" #include "geomLines.h" #include "geomLinestrips.h" #include "geomPoints.h" #include "geomVertexReader.h" #include "graphicsWindow.h" #include "lens.h" #include "perspectiveLens.h" #include "directionalLight.h" #include "pointLight.h" #include "spotlight.h" #include "planeNode.h" #include "fog.h" #include "clockObject.h" #include "string_utils.h" #include "nodePath.h" #include "dcast.h" #include "pvector.h" #include "vector_string.h" #include "string_utils.h" #include "pnmImage.h" #include "config_gobj.h" #include "lightMutexHolder.h" #include "indirectLess.h" #include "pStatTimer.h" #include "load_prc_file.h" #include "bamCache.h" #include "bamCacheRecord.h" #include "alphaTestAttrib.h" #include "clipPlaneAttrib.h" #include "cullFaceAttrib.h" #include "depthOffsetAttrib.h" #include "depthWriteAttrib.h" #include "fogAttrib.h" #include "lightAttrib.h" #include "logicOpAttrib.h" #include "materialAttrib.h" #include "rescaleNormalAttrib.h" #include "scissorAttrib.h" #include "shadeModelAttrib.h" #include "stencilAttrib.h" #include "graphicsEngine.h" #include "shaderGenerator.h" #include "samplerState.h" #include "displayInformation.h" #if defined(HAVE_CG) && !defined(OPENGLES) #include <Cg/cgGL.h> #endif #include <algorithm> using std::dec; using std::endl; using std::hex; using std::max; using std::min; using std::string; TypeHandle CLP(GraphicsStateGuardian)::_type_handle; PStatCollector CLP(GraphicsStateGuardian)::_load_display_list_pcollector("Draw:Transfer data:Display lists"); PStatCollector CLP(GraphicsStateGuardian)::_primitive_batches_display_list_pcollector("Primitive batches:Display lists"); PStatCollector CLP(GraphicsStateGuardian)::_vertices_display_list_pcollector("Vertices:Display lists"); PStatCollector CLP(GraphicsStateGuardian)::_vertices_immediate_pcollector("Vertices:Immediate mode"); PStatCollector CLP(GraphicsStateGuardian)::_memory_barrier_pcollector("Draw:Memory barriers"); PStatCollector CLP(GraphicsStateGuardian)::_vertex_array_update_pcollector("Draw:Update arrays"); PStatCollector CLP(GraphicsStateGuardian)::_texture_update_pcollector("Draw:Update texture"); PStatCollector CLP(GraphicsStateGuardian)::_fbo_bind_pcollector("Draw:Bind FBO"); PStatCollector CLP(GraphicsStateGuardian)::_check_error_pcollector("Draw:Check errors"); PStatCollector CLP(GraphicsStateGuardian)::_check_residency_pcollector("*:PStats:Check residency"); // The following noop functions are assigned to the corresponding glext // function pointers in the class, in case the functions are not defined by // the GL, just so it will always be safe to call the extension functions. static void APIENTRY null_glPointParameterfv(GLenum, const GLfloat *) { } #ifdef OPENGLES_1 // OpenGL ES 1 doesn't support this, period. Might as well macro it. #define _glDrawRangeElements(mode, start, end, count, type, indices) \ glDrawElements(mode, count, type, indices) #else static void APIENTRY null_glDrawRangeElements(GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const GLvoid *indices) { // If we don't support glDrawRangeElements(), just use the original // glDrawElements() instead. glDrawElements(mode, count, type, indices); } #endif #if defined(OPENGLES) && !defined(OPENGLES_1) static void APIENTRY null_glVertexAttrib4dv(GLuint index, const GLdouble *v) { GLfloat vf[4] = {(GLfloat)v[0], (GLfloat)v[1], (GLfloat)v[2], (GLfloat)v[3]}; glVertexAttrib4fv(index, vf); } #endif static void APIENTRY null_glActiveTexture(GLenum gl_texture_stage) { // If we don't support multitexture, we'd better not try to request a // texture beyond the first texture stage. nassertv(gl_texture_stage == GL_TEXTURE0); } #ifdef OPENGLES_2 #define _glBlendEquation glBlendEquation #define _glBlendEquationSeparate glBlendEquationSeparate #define _glBlendFuncSeparate glBlendFuncSeparate #define _glBlendColor glBlendColor #else static void APIENTRY null_glBlendEquation(GLenum) { } static void APIENTRY null_glBlendFuncSeparate(GLenum src, GLenum dest, GLenum, GLenum) { glBlendFunc(src, dest); } static void APIENTRY null_glBlendColor(GLclampf, GLclampf, GLclampf, GLclampf) { } #endif #ifndef OPENGLES_1 // We have a default shader that will be applied when there isn't any shader // applied (e.g. if it failed to compile). We need this because OpenGL ES // 2.x and OpenGL 3.2+ core don't have a fixed-function pipeline. This // default shader just applies a single texture, which is good enough for // drawing GUIs and such. static const string default_vshader = #ifndef OPENGLES #ifdef __APPLE__ // Apple's GL 3.2 contexts require at least GLSL 1.50. "#version 150\n" #else "#version 130\n" #endif "in vec4 p3d_Vertex;\n" "in vec4 p3d_Color;\n" "in vec2 p3d_MultiTexCoord0;\n" "out vec2 texcoord;\n" "out vec4 color;\n" #else "precision mediump float;\n" "attribute vec4 p3d_Vertex;\n" "attribute vec4 p3d_Color;\n" "attribute vec2 p3d_MultiTexCoord0;\n" "varying vec2 texcoord;\n" "varying lowp vec4 color;\n" #endif "uniform mat4 p3d_ModelViewProjectionMatrix;\n" "uniform vec4 p3d_ColorScale;\n" "void main(void) {\n" " gl_Position = p3d_ModelViewProjectionMatrix * p3d_Vertex;\n" " texcoord = p3d_MultiTexCoord0;\n" " color = p3d_Color * p3d_ColorScale;\n" "}\n"; #ifndef OPENGLES // This version of the shader is used if vertices-float64 is enabled. static const string default_vshader_fp64 = #ifdef __APPLE__ "#version 150\n" #else "#version 130\n" #endif "#extension GL_ARB_vertex_attrib_64bit : require\n" "#extension GL_ARB_gpu_shader_fp64 : require\n" "in dvec3 p3d_Vertex;\n" "in vec4 p3d_Color;\n" "in dvec2 p3d_MultiTexCoord0;\n" "out vec2 texcoord;\n" "out vec4 color;\n" "uniform mat4 p3d_ModelViewMatrix;\n" "uniform mat4 p3d_ProjectionMatrix;\n" "uniform vec4 p3d_ColorScale;\n" "void main(void) {\n" // Apply proj & modelview in two steps, more precise " gl_Position = vec4(dmat4(p3d_ProjectionMatrix) * (dmat4(p3d_ModelViewMatrix) * dvec4(p3d_Vertex, 1)));\n" " texcoord = vec2(p3d_MultiTexCoord0);\n" " color = p3d_Color * p3d_ColorScale;\n" "}\n"; // Same as above, but for OpenGL 4.1. static const string default_vshader_fp64_gl41 = "#version 410\n" "in dvec3 p3d_Vertex;\n" "in vec4 p3d_Color;\n" "in dvec2 p3d_MultiTexCoord0;\n" "out vec2 texcoord;\n" "out vec4 color;\n" "uniform mat4 p3d_ModelViewMatrix;\n" "uniform mat4 p3d_ProjectionMatrix;\n" "uniform vec4 p3d_ColorScale;\n" "void main(void) {\n" // Apply proj & modelview in two steps, more precise " gl_Position = vec4(dmat4(p3d_ProjectionMatrix) * (dmat4(p3d_ModelViewMatrix) * dvec4(p3d_Vertex, 1)));\n" " texcoord = vec2(p3d_MultiTexCoord0);\n" " color = p3d_Color * p3d_ColorScale;\n" "}\n"; #endif static const string default_fshader = #ifndef OPENGLES #ifdef __APPLE__ // Apple's GL 3.2 contexts require at least GLSL 1.50. "#version 150\n" #else "#version 130\n" #endif "in vec2 texcoord;\n" "in vec4 color;\n" "out vec4 p3d_FragColor;\n" "uniform sampler2D p3d_Texture0;\n" "uniform vec4 p3d_TexAlphaOnly;\n" #else "precision mediump float;\n" "varying vec2 texcoord;\n" "varying lowp vec4 color;\n" "uniform lowp sampler2D p3d_Texture0;\n" "uniform lowp vec4 p3d_TexAlphaOnly;\n" #endif "void main(void) {\n" #ifndef OPENGLES " p3d_FragColor = texture(p3d_Texture0, texcoord);\n" " p3d_FragColor += p3d_TexAlphaOnly;\n" // Hack for text rendering " p3d_FragColor *= color;\n" #else " gl_FragColor = texture2D(p3d_Texture0, texcoord);\n" " gl_FragColor += p3d_TexAlphaOnly;\n" // Hack for text rendering " gl_FragColor *= color;\n" #endif "}\n"; #endif /** * Recopies the given array of pixels, converting from BGR to RGB arrangement. */ static void uchar_bgr_to_rgb(unsigned char *dest, const unsigned char *source, int num_pixels) { for (int i = 0; i < num_pixels; i++) { dest[0] = source[2]; dest[1] = source[1]; dest[2] = source[0]; dest += 3; source += 3; } } /** * Recopies the given array of pixels, converting from BGRA to RGBA * arrangement. */ static void uchar_bgra_to_rgba(unsigned char *dest, const unsigned char *source, int num_pixels) { for (int i = 0; i < num_pixels; i++) { dest[0] = source[2]; dest[1] = source[1]; dest[2] = source[0]; dest[3] = source[3]; dest += 4; source += 4; } } /** * Recopies the given array of pixels, converting from BGR to RGB arrangement. */ static void ushort_bgr_to_rgb(unsigned short *dest, const unsigned short *source, int num_pixels) { for (int i = 0; i < num_pixels; i++) { dest[0] = source[2]; dest[1] = source[1]; dest[2] = source[0]; dest += 3; source += 3; } } /** * Recopies the given array of pixels, converting from BGRA to RGBA * arrangement. */ static void ushort_bgra_to_rgba(unsigned short *dest, const unsigned short *source, int num_pixels) { for (int i = 0; i < num_pixels; i++) { dest[0] = source[2]; dest[1] = source[1]; dest[2] = source[0]; dest[3] = source[3]; dest += 4; source += 4; } } /** * Reverses the order of the components within the image, to convert (for * instance) GL_BGR to GL_RGB. Returns the byte pointer representing the * converted image, or the original image if it is unchanged. * * new_image must be supplied; it is the PTA_uchar that will be used to hold * the converted image if required. It will be modified only if the * conversion is necessary, in which case the data will be stored there, and * this pointer will be returned. If the conversion is not necessary, this * pointer will be left unchanged. */ static const unsigned char * fix_component_ordering(PTA_uchar &new_image, const unsigned char *orig_image, size_t orig_image_size, GLenum external_format, Texture *tex) { const unsigned char *result = orig_image; switch (external_format) { case GL_RGB: switch (tex->get_component_type()) { case Texture::T_unsigned_byte: case Texture::T_byte: new_image = PTA_uchar::empty_array(orig_image_size); uchar_bgr_to_rgb(new_image, orig_image, orig_image_size / 3); result = new_image; break; case Texture::T_unsigned_short: case Texture::T_short: new_image = PTA_uchar::empty_array(orig_image_size); ushort_bgr_to_rgb((unsigned short *)new_image.p(), (const unsigned short *)orig_image, orig_image_size / 6); result = new_image; break; default: break; } break; case GL_RGBA: switch (tex->get_component_type()) { case Texture::T_unsigned_byte: case Texture::T_byte: new_image = PTA_uchar::empty_array(orig_image_size); uchar_bgra_to_rgba(new_image, orig_image, orig_image_size / 4); result = new_image; break; case Texture::T_unsigned_short: case Texture::T_short: new_image = PTA_uchar::empty_array(orig_image_size); ushort_bgra_to_rgba((unsigned short *)new_image.p(), (const unsigned short *)orig_image, orig_image_size / 8); result = new_image; break; default: break; } break; default: break; } return result; } // #--- Zhao Nov2011 string CLP(GraphicsStateGuardian)::get_driver_vendor() { return _gl_vendor; } string CLP(GraphicsStateGuardian)::get_driver_renderer() { return _gl_renderer; } string CLP(GraphicsStateGuardian)::get_driver_version() { return _gl_version; } int CLP(GraphicsStateGuardian)::get_driver_version_major() { return _gl_version_major; } int CLP(GraphicsStateGuardian)::get_driver_version_minor() { return _gl_version_minor; } int CLP(GraphicsStateGuardian)::get_driver_shader_version_major() { return _gl_shadlang_ver_major; } int CLP(GraphicsStateGuardian)::get_driver_shader_version_minor() { return _gl_shadlang_ver_minor; } /** * */ CLP(GraphicsStateGuardian):: CLP(GraphicsStateGuardian)(GraphicsEngine *engine, GraphicsPipe *pipe) : GraphicsStateGuardian(gl_coordinate_system, engine, pipe), _renderbuffer_residency(get_prepared_objects()->get_name(), "renderbuffer") { _error_count = 0; _last_error_check = -1.0; // calling glGetError() forces a sync, this turns it on if you want to. _check_errors = gl_check_errors; _force_flush = gl_force_flush; _gl_shadlang_ver_major = 0; _gl_shadlang_ver_minor = 0; // Let's say we have a core profile, to be checked later (Otherwise, if we are // wrong the user may ask for some non-available functions) #ifndef OPENGLES _core_profile = true; #endif // Hack. Turn on the flag that we turned off at a higher level, since we // know this works properly in OpenGL, and we want the performance benefit // it gives us. _prepared_objects->_support_released_buffer_cache = true; // Assume that we will get a hardware-accelerated context, unless the window // tells us otherwise. _is_hardware = true; _scissor_enabled = false; _scissor_attrib_active = false; _white_texture = 0; #ifndef OPENGLES _shader_point_size = false; #endif #ifdef HAVE_CG _cg_context = 0; #endif #ifdef DO_PSTATS if (gl_finish) { GLCAT.warning() << "The config variable gl-finish is set to true. This may have a substantial negative impact on your render performance.\n"; } #endif // DO_PSTATS } /** * */ CLP(GraphicsStateGuardian):: ~CLP(GraphicsStateGuardian)() { if (GLCAT.is_debug()) { GLCAT.debug() << "GLGraphicsStateGuardian " << this << " destructing\n"; } close_gsg(); } /** * This is called by the GL if an error occurs, if gl_debug has been enabled * (and the driver supports the GL_ARB_debug_output extension). */ void CLP(GraphicsStateGuardian):: debug_callback(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *message, GLvoid *userParam) { // Determine how to map the severity level. NotifySeverity level; switch (severity) { case GL_DEBUG_SEVERITY_HIGH: level = NS_error; break; case GL_DEBUG_SEVERITY_MEDIUM: if (type == GL_DEBUG_TYPE_PERFORMANCE) { // Performance warnings should really be "info". level = NS_info; } else { level = NS_warning; } break; case GL_DEBUG_SEVERITY_LOW: level = NS_info; break; case GL_DEBUG_SEVERITY_NOTIFICATION: level = NS_debug; break; default: level = NS_fatal; //??? break; } string msg_str(message, length); GLCAT.out(level) << msg_str << "\n"; #ifndef NDEBUG if (level >= gl_debug_abort_level.get_value()) { abort(); } #endif } /** * Resets all internal state as if the gsg were newly created. */ void CLP(GraphicsStateGuardian):: reset() { _last_error_check = -1.0; free_pointers(); GraphicsStateGuardian::reset(); // Build _inv_state_mask as a mask of 1's where we don't care, and 0's where // we do care, about the state. _inv_state_mask = // RenderState::SlotMask::all_on(); _inv_state_mask.clear_bit(ShaderAttrib::get_class_slot()); _inv_state_mask.clear_bit(AlphaTestAttrib::get_class_slot()); _inv_state_mask.clear_bit(AntialiasAttrib::get_class_slot()); _inv_state_mask.clear_bit(ClipPlaneAttrib::get_class_slot()); _inv_state_mask.clear_bit(ColorAttrib::get_class_slot()); _inv_state_mask.clear_bit(ColorScaleAttrib::get_class_slot()); _inv_state_mask.clear_bit(CullFaceAttrib::get_class_slot()); _inv_state_mask.clear_bit(DepthOffsetAttrib::get_class_slot()); _inv_state_mask.clear_bit(DepthTestAttrib::get_class_slot()); _inv_state_mask.clear_bit(DepthWriteAttrib::get_class_slot()); _inv_state_mask.clear_bit(RenderModeAttrib::get_class_slot()); _inv_state_mask.clear_bit(RescaleNormalAttrib::get_class_slot()); _inv_state_mask.clear_bit(ShadeModelAttrib::get_class_slot()); _inv_state_mask.clear_bit(TransparencyAttrib::get_class_slot()); _inv_state_mask.clear_bit(ColorWriteAttrib::get_class_slot()); _inv_state_mask.clear_bit(ColorBlendAttrib::get_class_slot()); _inv_state_mask.clear_bit(LogicOpAttrib::get_class_slot()); _inv_state_mask.clear_bit(TextureAttrib::get_class_slot()); _inv_state_mask.clear_bit(TexGenAttrib::get_class_slot()); _inv_state_mask.clear_bit(TexMatrixAttrib::get_class_slot()); _inv_state_mask.clear_bit(MaterialAttrib::get_class_slot()); _inv_state_mask.clear_bit(LightAttrib::get_class_slot()); _inv_state_mask.clear_bit(StencilAttrib::get_class_slot()); _inv_state_mask.clear_bit(FogAttrib::get_class_slot()); _inv_state_mask.clear_bit(ScissorAttrib::get_class_slot()); // Output the vendor and version strings. query_gl_version(); if (_gl_version_major == 0) { // Couldn't get GL. Fail. mark_new(); return; } // Save the extensions tokens. _extensions.clear(); // In OpenGL (ES) 3.0 and later, glGetString(GL_EXTENSIONS) is deprecated. #ifndef OPENGLES_1 if (_gl_version_major >= 3) { PFNGLGETSTRINGIPROC _glGetStringi = (PFNGLGETSTRINGIPROC)get_extension_func("glGetStringi"); if (_glGetStringi != nullptr) { GLint n = 0; glGetIntegerv(GL_NUM_EXTENSIONS, &n); for (GLint i = 0; i < n; ++i) { const char *extension = (const char *)_glGetStringi(GL_EXTENSIONS, i); _extensions.insert(string(extension)); } } else { GLCAT.error() << "glGetStringi is not available!\n"; save_extensions((const char *)glGetString(GL_EXTENSIONS)); } } else #endif { save_extensions((const char *)glGetString(GL_EXTENSIONS)); } get_extra_extensions(); // This needs access to the extensions, so put this after save_extensions. query_glsl_version(); #ifndef OPENGLES bool core_profile = is_at_least_gl_version(3, 2) && !has_extension("GL_ARB_compatibility"); if (GLCAT.is_debug()) { if (core_profile) { GLCAT.debug() << "Using core profile\n"; } else { GLCAT.debug() << "Using compatibility profile\n"; } } _core_profile = core_profile; #elif defined(OPENGLES_1) static const bool core_profile = false; #else static const bool core_profile = true; #endif // Print out a list of all extensions. report_extensions(); // Check if we are running under a profiling tool such as apitrace. #if !defined(NDEBUG) && !defined(OPENGLES_1) if (has_extension("GL_EXT_debug_marker")) { _glPushGroupMarker = (PFNGLPUSHGROUPMARKEREXTPROC) get_extension_func("glPushGroupMarkerEXT"); _glPopGroupMarker = (PFNGLPOPGROUPMARKEREXTPROC) get_extension_func("glPopGroupMarkerEXT"); // Start a group right away. push_group_marker("reset"); } else { _glPushGroupMarker = nullptr; _glPopGroupMarker = nullptr; } #endif // Initialize OpenGL debugging output first, if enabled and supported. _supports_debug = false; _use_object_labels = false; if (gl_debug) { PFNGLDEBUGMESSAGECALLBACKPROC_P _glDebugMessageCallback; PFNGLDEBUGMESSAGECONTROLPROC _glDebugMessageControl; if (is_at_least_gl_version(4, 3) || has_extension("GL_KHR_debug")) { #ifdef OPENGLES _glDebugMessageCallback = (PFNGLDEBUGMESSAGECALLBACKPROC_P) get_extension_func("glDebugMessageCallbackKHR"); _glDebugMessageControl = (PFNGLDEBUGMESSAGECONTROLPROC) get_extension_func("glDebugMessageControlKHR"); _glObjectLabel = (PFNGLOBJECTLABELPROC) get_extension_func("glObjectLabelKHR"); #else _glDebugMessageCallback = (PFNGLDEBUGMESSAGECALLBACKPROC_P) get_extension_func("glDebugMessageCallback"); _glDebugMessageControl = (PFNGLDEBUGMESSAGECONTROLPROC) get_extension_func("glDebugMessageControl"); _glObjectLabel = (PFNGLOBJECTLABELPROC) get_extension_func("glObjectLabel"); #endif glEnable(GL_DEBUG_OUTPUT); // Not supported in ARB version _supports_debug = true; _use_object_labels = gl_debug_object_labels; #ifndef OPENGLES } else if (has_extension("GL_ARB_debug_output")) { _glDebugMessageCallback = (PFNGLDEBUGMESSAGECALLBACKPROC_P) get_extension_func("glDebugMessageCallbackARB"); _glDebugMessageControl = (PFNGLDEBUGMESSAGECONTROLPROC) get_extension_func("glDebugMessageControlARB"); _supports_debug = true; #endif } else { _supports_debug = false; } if (_supports_debug) { // Set the categories we want to listen to. _glDebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_HIGH, 0, nullptr, GLCAT.is_error()); _glDebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_MEDIUM, 0, nullptr, GLCAT.is_warning()); _glDebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_LOW, 0, nullptr, GLCAT.is_info()); _glDebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_NOTIFICATION, 0, nullptr, GLCAT.is_debug()); // Enable the callback. _glDebugMessageCallback((GLDEBUGPROC_P) &debug_callback, (void*)this); if (gl_debug_synchronous) { glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS); } GLCAT.info() << "gl-debug enabled.\n"; } else { GLCAT.warning() << "gl-debug enabled, but NOT supported.\n"; } } else { // However, still check if it is supported. _supports_debug = is_at_least_gl_version(4, 3) || has_extension("GL_KHR_debug") || has_extension("GL_ARB_debug_output"); if (_supports_debug) { GLCAT.debug() << "gl-debug supported, but NOT enabled.\n"; } else { GLCAT.debug() << "gl-debug disabled and unsupported.\n"; } } _supported_geom_rendering = #ifndef OPENGLES Geom::GR_render_mode_wireframe | Geom::GR_render_mode_point | #endif Geom::GR_indexed_point | Geom::GR_point | Geom::GR_point_uniform_size | Geom::GR_indexed_other | Geom::GR_triangle_strip | Geom::GR_triangle_fan | Geom::GR_line_strip | Geom::GR_flat_last_vertex; #ifndef OPENGLES if (_supports_geometry_shaders) { _supported_geom_rendering |= Geom::GR_adjacency; } #endif _supports_point_parameters = false; #ifdef OPENGLES_1 _glPointParameterfv = glPointParameterfv; #elif defined(OPENGLES) // Other OpenGL ES versions don't support point parameters. #else if (is_at_least_gl_version(1, 4)) { _supports_point_parameters = true; _glPointParameterfv = (PFNGLPOINTPARAMETERFVPROC) get_extension_func("glPointParameterfv"); } else if (has_extension("GL_ARB_point_parameters")) { _supports_point_parameters = true; _glPointParameterfv = (PFNGLPOINTPARAMETERFVPROC) get_extension_func("glPointParameterfvARB"); } if (_supports_point_parameters) { if (_glPointParameterfv == nullptr) { GLCAT.warning() << "glPointParameterfv advertised as supported by OpenGL runtime, but could not get pointers to extension functions.\n"; _supports_point_parameters = false; } } if (_supports_point_parameters) { _supported_geom_rendering |= Geom::GR_point_perspective | Geom::GR_point_scale; } else { _glPointParameterfv = null_glPointParameterfv; } #endif // !OPENGLES_2 #if defined(OPENGLES_2) // OpenGL ES 2 doesn't have point sprites. _supports_point_sprite = false; #elif defined(OPENGLES_1) _supports_point_sprite = has_extension("GL_OES_point_sprite"); #else _supports_point_sprite = is_at_least_gl_version(2, 0) || has_extension("GL_ARB_point_sprite"); #endif if (_supports_point_sprite) { // It appears that the point_sprite extension doesn't support texture // transforms on the generated texture coordinates. How inconsistent. // Because of this, we don't advertise GR_point_sprite_tex_matrix. _supported_geom_rendering |= Geom::GR_point_sprite; } // Determine whether we support wide lines (and how wide they can be). { GLfloat aliased_range[2] = {1.0f, 1.0f}; glGetFloatv(GL_ALIASED_LINE_WIDTH_RANGE, aliased_range); _max_line_width = aliased_range[1]; #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { #ifndef OPENGLES GLfloat range[2] = {1.0f, 1.0f}; glGetFloatv(GL_LINE_WIDTH_RANGE, range); _max_line_width = std::max(_max_line_width, range[1]); #endif GLfloat smooth_range[2] = {1.0f, 1.0f}; glGetFloatv(GL_SMOOTH_LINE_WIDTH_RANGE, smooth_range); _max_line_width = std::max(_max_line_width, smooth_range[1]); } #endif } #ifdef OPENGLES_1 // OpenGL ES 1.0 does not support primitive restart indices. #elif defined(OPENGLES) if (gl_support_primitive_restart_index && is_at_least_gles_version(3, 0)) { glEnable(GL_PRIMITIVE_RESTART_FIXED_INDEX); _supported_geom_rendering |= Geom::GR_strip_cut_index; } #else _explicit_primitive_restart = false; _glPrimitiveRestartIndex = nullptr; if (gl_support_primitive_restart_index) { if ((is_at_least_gl_version(4, 3) || has_extension("GL_ARB_ES3_compatibility")) && _gl_renderer.substr(0, 7) != "Gallium") { // As long as we enable this, OpenGL will always use the highest // possible index for a numeric type as strip cut index, which coincides // with our convention. This saves us a call to glPrimitiveRestartIndex // ... of course, though, the Gallium driver bugs out here. See also: // https://www.panda3d.org/forums/viewtopic.php?f=5&t=17512 glEnable(GL_PRIMITIVE_RESTART_FIXED_INDEX); _supported_geom_rendering |= Geom::GR_strip_cut_index; } else if (is_at_least_gl_version(3, 1)) { // We have to use an explicit primitive restart enableindex. _explicit_primitive_restart = true; _supported_geom_rendering |= Geom::GR_strip_cut_index; _glPrimitiveRestartIndex = (PFNGLPRIMITIVERESTARTINDEXPROC) get_extension_func("glPrimitiveRestartIndex"); } } #endif #if !defined(OPENGLES) && defined(SUPPORT_FIXED_FUNCTION) if (has_fixed_function_pipeline() && is_at_least_gl_version(1, 4)) { _glSecondaryColorPointer = (PFNGLSECONDARYCOLORPOINTERPROC) get_extension_func("glSecondaryColorPointer"); } else if (has_extension("GL_EXT_secondary_color")) { _glSecondaryColorPointer = (PFNGLSECONDARYCOLORPOINTERPROC) get_extension_func("glSecondaryColorPointerEXT"); } #endif #ifndef OPENGLES_1 _glDrawRangeElements = null_glDrawRangeElements; #ifdef OPENGLES if (is_at_least_gles_version(3, 0)) { _glDrawRangeElements = (PFNGLDRAWRANGEELEMENTSPROC) get_extension_func("glDrawRangeElements"); } #else if (is_at_least_gl_version(1, 2)) { _glDrawRangeElements = (PFNGLDRAWRANGEELEMENTSPROC) get_extension_func("glDrawRangeElements"); } else if (has_extension("GL_EXT_draw_range_elements")) { _glDrawRangeElements = (PFNGLDRAWRANGEELEMENTSPROC) get_extension_func("glDrawRangeElementsEXT"); } #endif if (_glDrawRangeElements == nullptr) { GLCAT.warning() << "glDrawRangeElements advertised as supported by OpenGL runtime, but could not get pointers to extension functions.\n"; _glDrawRangeElements = null_glDrawRangeElements; } #endif // !OPENGLES_1 _supports_3d_texture = false; #ifndef OPENGLES_1 if (is_at_least_gl_version(1, 2) || is_at_least_gles_version(3, 0)) { _supports_3d_texture = true; _glTexImage3D = (PFNGLTEXIMAGE3DPROC_P) get_extension_func("glTexImage3D"); _glTexSubImage3D = (PFNGLTEXSUBIMAGE3DPROC) get_extension_func("glTexSubImage3D"); _glCopyTexSubImage3D = (PFNGLCOPYTEXSUBIMAGE3DPROC) get_extension_func("glCopyTexSubImage3D"); #ifndef OPENGLES } else if (has_extension("GL_EXT_texture3D")) { _supports_3d_texture = true; _glTexImage3D = (PFNGLTEXIMAGE3DPROC_P) get_extension_func("glTexImage3DEXT"); _glTexSubImage3D = (PFNGLTEXSUBIMAGE3DPROC) get_extension_func("glTexSubImage3DEXT"); _glCopyTexSubImage3D = nullptr; if (has_extension("GL_EXT_copy_texture")) { _glCopyTexSubImage3D = (PFNGLCOPYTEXSUBIMAGE3DPROC) get_extension_func("glCopyTexSubImage3DEXT"); } #else } else if (has_extension("GL_OES_texture_3D")) { _supports_3d_texture = true; _glTexImage3D = (PFNGLTEXIMAGE3DPROC_P) get_extension_func("glTexImage3DOES"); _glTexSubImage3D = (PFNGLTEXSUBIMAGE3DPROC) get_extension_func("glTexSubImage3DOES"); _glCopyTexSubImage3D = (PFNGLCOPYTEXSUBIMAGE3DPROC) get_extension_func("glCopyTexSubImage3DOES"); _glFramebufferTexture3D = (PFNGLFRAMEBUFFERTEXTURE3DOES) get_extension_func("glFramebufferTexture3DOES"); #endif } if (_supports_3d_texture) { if (_glTexImage3D == nullptr || _glTexSubImage3D == nullptr) { GLCAT.warning() << "3-D textures advertised as supported by OpenGL runtime, but could not get pointers to extension functions.\n"; _supports_3d_texture = false; } } #endif // !OPENGLES_1 _supports_tex_storage = false; #ifdef OPENGLES if (is_at_least_gles_version(3, 0)) { #else if (is_at_least_gl_version(4, 2) || has_extension("GL_ARB_texture_storage")) { #endif _supports_tex_storage = true; _glTexStorage1D = (PFNGLTEXSTORAGE1DPROC) get_extension_func("glTexStorage1D"); _glTexStorage2D = (PFNGLTEXSTORAGE2DPROC) get_extension_func("glTexStorage2D"); _glTexStorage3D = (PFNGLTEXSTORAGE3DPROC) get_extension_func("glTexStorage3D"); } #ifdef OPENGLES else if (has_extension("GL_EXT_texture_storage")) { _supports_tex_storage = true; _glTexStorage1D = (PFNGLTEXSTORAGE1DPROC) get_extension_func("glTexStorage1DEXT"); _glTexStorage2D = (PFNGLTEXSTORAGE2DPROC) get_extension_func("glTexStorage2DEXT"); _glTexStorage3D = (PFNGLTEXSTORAGE3DPROC) get_extension_func("glTexStorage3DEXT"); } #endif if (_supports_tex_storage) { if (_glTexStorage1D == nullptr || _glTexStorage2D == nullptr || _glTexStorage3D == nullptr) { GLCAT.warning() << "Immutable texture storage advertised as supported by OpenGL runtime, but could not get pointers to extension functions.\n"; _supports_tex_storage = false; } } _supports_clear_texture = false; #ifndef OPENGLES if (is_at_least_gl_version(4, 4) || has_extension("GL_ARB_clear_texture")) { _glClearTexImage = (PFNGLCLEARTEXIMAGEPROC) get_extension_func("glClearTexImage"); if (_glClearTexImage == nullptr) { GLCAT.warning() << "GL_ARB_clear_texture advertised as supported by OpenGL runtime, but could not get pointers to extension function.\n"; } else { _supports_clear_texture = true; } } #endif _supports_clear_buffer = false; #ifndef OPENGLES if (is_at_least_gl_version(4, 3) || has_extension("GL_ARB_clear_buffer_object")) { _glClearBufferData = (PFNGLCLEARBUFFERDATAPROC) get_extension_func("glClearBufferData"); if (_glClearBufferData == nullptr) { GLCAT.warning() << "GL_ARB_clear_buffer_object advertised as supported by OpenGL runtime, but could not get pointers to extension function.\n"; } else { _supports_clear_buffer = true; } } #endif _supports_2d_texture_array = false; #ifndef OPENGLES_1 if (_gl_version_major >= 3) { _supports_2d_texture_array = true; _glFramebufferTextureLayer = (PFNGLFRAMEBUFFERTEXTURELAYERPROC) get_extension_func("glFramebufferTextureLayer"); #ifndef OPENGLES } else if (has_extension("GL_EXT_texture_array")) { _supports_2d_texture_array = true; _glFramebufferTextureLayer = (PFNGLFRAMEBUFFERTEXTURELAYERPROC) get_extension_func("glFramebufferTextureLayerEXT"); #endif } if (_supports_2d_texture_array && _glFramebufferTextureLayer == nullptr) { GLCAT.warning() << "Texture arrays advertised as supported by OpenGL runtime, but could not get pointer to glFramebufferTextureLayer function.\n"; } #endif // !OPENGLES_1 #ifdef OPENGLES_2 _supports_cube_map = true; #else _supports_cube_map = is_at_least_gl_version(1, 3) || has_extension("GL_ARB_texture_cube_map") || has_extension("GL_OES_texture_cube_map"); #endif #ifndef OPENGLES if (_supports_cube_map && gl_cube_map_seamless) { if (is_at_least_gl_version(3, 2) || has_extension("GL_ARB_seamless_cube_map")) { glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS); } } #endif #ifndef OPENGLES _supports_cube_map_array = is_at_least_gl_version(4, 0) || has_extension("GL_ARB_texture_cube_map_array"); #endif #ifndef OPENGLES if (is_at_least_gl_version(3, 1)) { _glTexBuffer = (PFNGLTEXBUFFERPROC)get_extension_func("glTexBuffer"); _supports_buffer_texture = true; } else if (has_extension("GL_ARB_texture_buffer_object")) { _glTexBuffer = (PFNGLTEXBUFFERPROC)get_extension_func("glTexBufferARB"); _supports_buffer_texture = true; } #endif #ifdef OPENGLES _supports_texture_srgb = is_at_least_gles_version(3, 0) || has_extension("GL_EXT_sRGB"); #else _supports_texture_srgb = is_at_least_gl_version(2, 1) || has_extension("GL_EXT_texture_sRGB"); #endif #ifdef OPENGLES _supports_compressed_texture = true; // Supported in the core. 1D textures are not supported by OpenGL ES. _glCompressedTexImage1D = nullptr; _glCompressedTexImage2D = glCompressedTexImage2D; _glCompressedTexSubImage1D = nullptr; _glCompressedTexSubImage2D = glCompressedTexSubImage2D; _glGetCompressedTexImage = nullptr; _glCompressedTexImage3D = nullptr; _glCompressedTexSubImage3D = nullptr; #ifdef OPENGLES_2 if (_supports_3d_texture) { _glCompressedTexImage3D = (PFNGLCOMPRESSEDTEXIMAGE3DPROC) get_extension_func("glCompressedTexImage3DOES"); _glCompressedTexSubImage3D = (PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC) get_extension_func("glCompressedTexSubImageOES"); } #endif #else if (is_at_least_gl_version(1, 3)) { _supports_compressed_texture = true; _glCompressedTexImage1D = (PFNGLCOMPRESSEDTEXIMAGE1DPROC) get_extension_func("glCompressedTexImage1D"); _glCompressedTexImage2D = (PFNGLCOMPRESSEDTEXIMAGE2DPROC) get_extension_func("glCompressedTexImage2D"); _glCompressedTexImage3D = (PFNGLCOMPRESSEDTEXIMAGE3DPROC) get_extension_func("glCompressedTexImage3D"); _glCompressedTexSubImage1D = (PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC) get_extension_func("glCompressedTexSubImage1D"); _glCompressedTexSubImage2D = (PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC) get_extension_func("glCompressedTexSubImage2D"); _glCompressedTexSubImage3D = (PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC) get_extension_func("glCompressedTexSubImage3D"); _glGetCompressedTexImage = (PFNGLGETCOMPRESSEDTEXIMAGEPROC) get_extension_func("glGetCompressedTexImage"); } else if (has_extension("GL_ARB_texture_compression")) { _supports_compressed_texture = true; _glCompressedTexImage1D = (PFNGLCOMPRESSEDTEXIMAGE1DPROC) get_extension_func("glCompressedTexImage1DARB"); _glCompressedTexImage2D = (PFNGLCOMPRESSEDTEXIMAGE2DPROC) get_extension_func("glCompressedTexImage2DARB"); _glCompressedTexImage3D = (PFNGLCOMPRESSEDTEXIMAGE3DPROC) get_extension_func("glCompressedTexImage3DARB"); _glCompressedTexSubImage1D = (PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC) get_extension_func("glCompressedTexSubImage1DARB"); _glCompressedTexSubImage2D = (PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC) get_extension_func("glCompressedTexSubImage2DARB"); _glCompressedTexSubImage3D = (PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC) get_extension_func("glCompressedTexSubImage3DARB"); _glGetCompressedTexImage = (PFNGLGETCOMPRESSEDTEXIMAGEPROC) get_extension_func("glGetCompressedTexImageARB"); } else { _supports_compressed_texture = false; } if (_supports_compressed_texture) { if (_glCompressedTexImage1D == nullptr || _glCompressedTexImage2D == nullptr || _glCompressedTexImage3D == nullptr || _glCompressedTexSubImage1D == nullptr || _glCompressedTexSubImage2D == nullptr || _glCompressedTexSubImage3D == nullptr || _glGetCompressedTexImage == nullptr) { GLCAT.warning() << "Compressed textures advertised as supported by OpenGL runtime, but could not get pointers to extension functions.\n"; _supports_compressed_texture = false; } } #endif if (_supports_compressed_texture) { #ifndef OPENGLES _compressed_texture_formats.set_bit(Texture::CM_on); #endif GLint num_compressed_formats = 0; glGetIntegerv(GL_NUM_COMPRESSED_TEXTURE_FORMATS, &num_compressed_formats); GLint *formats = (GLint *)alloca(num_compressed_formats * sizeof(GLint)); glGetIntegerv(GL_COMPRESSED_TEXTURE_FORMATS, formats); for (int i = 0; i < num_compressed_formats; ++i) { switch (formats[i]) { case GL_COMPRESSED_RGB_S3TC_DXT1_EXT: case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT: _compressed_texture_formats.set_bit(Texture::CM_dxt1); break; #ifdef OPENGLES case GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG: case GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG: _compressed_texture_formats.set_bit(Texture::CM_pvr1_2bpp); break; case GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG: case GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG: _compressed_texture_formats.set_bit(Texture::CM_pvr1_4bpp); break; #else case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT: _compressed_texture_formats.set_bit(Texture::CM_dxt3); break; case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT: _compressed_texture_formats.set_bit(Texture::CM_dxt5); break; case GL_COMPRESSED_RGB_FXT1_3DFX: case GL_COMPRESSED_RGBA_FXT1_3DFX: _compressed_texture_formats.set_bit(Texture::CM_fxt1); break; #endif case GL_COMPRESSED_R11_EAC: case GL_COMPRESSED_RG11_EAC: _compressed_texture_formats.set_bit(Texture::CM_eac); break; case GL_COMPRESSED_RGB8_ETC2: case GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2: case GL_COMPRESSED_RGBA8_ETC2_EAC: _compressed_texture_formats.set_bit(Texture::CM_etc1); _compressed_texture_formats.set_bit(Texture::CM_etc2); break; #ifdef OPENGLES case GL_ETC1_RGB8_OES: _compressed_texture_formats.set_bit(Texture::CM_etc1); break; #endif default: break; } } #ifndef OPENGLES // The OpenGL spec states that these are not reported by the above // mechanism, so we have to check for the extension ourselves. if (is_at_least_gl_version(3, 0) || has_extension("GL_ARB_texture_compression_rgtc") || has_extension("GL_EXT_texture_compression_rgtc")) { _compressed_texture_formats.set_bit(Texture::CM_rgtc); } #endif } #ifdef OPENGLES // Note that these extensions only offer support for GL_BGRA, not GL_BGR. _supports_bgr = has_extension("GL_EXT_texture_format_BGRA8888") || has_extension("GL_APPLE_texture_format_BGRA8888"); #else // In regular OpenGL, we have both GL_BGRA and GL_BGR. _supports_bgr = is_at_least_gl_version(1, 2) || has_extension("GL_EXT_bgra"); #endif #ifdef SUPPORT_FIXED_FUNCTION #ifdef OPENGLES_1 _supports_rescale_normal = true; #else _supports_rescale_normal = has_fixed_function_pipeline() && gl_support_rescale_normal && (is_at_least_gl_version(1, 2) || has_extension("GL_EXT_rescale_normal")); #endif #ifndef OPENGLES _use_separate_specular_color = has_fixed_function_pipeline() && gl_separate_specular_color && (is_at_least_gl_version(1, 2) || has_extension("GL_EXT_separate_specular_color")); #endif #endif // SUPPORT_FIXED_FUNCTION #ifdef OPENGLES _supports_packed_dabc = false; _supports_packed_ufloat = false; #else _supports_packed_dabc = is_at_least_gl_version(3, 2) || has_extension("GL_ARB_vertex_array_bgra") || has_extension("GL_EXT_vertex_array_bgra"); _supports_packed_ufloat = is_at_least_gl_version(4, 4) || has_extension("GL_ARB_vertex_type_10f_11f_11f_rev"); #endif #ifdef OPENGLES //TODO _supports_multisample = false; #else _supports_multisample = is_at_least_gl_version(1, 3) || has_extension("GL_ARB_multisample"); #endif #ifdef OPENGLES_1 _supports_generate_mipmap = is_at_least_gles_version(1, 1); #elif defined(OPENGLES) _supports_generate_mipmap = true; #else _supports_generate_mipmap = is_at_least_gl_version(1, 4) || has_extension("GL_SGIS_generate_mipmap"); #endif #ifdef OPENGLES_1 _supports_tex_non_pow2 = false; #elif defined(OPENGLES) _supports_tex_non_pow2 = is_at_least_gles_version(3, 0) || has_extension("GL_OES_texture_npot"); #else _supports_tex_non_pow2 = is_at_least_gl_version(2, 0) || has_extension("GL_ARB_texture_non_power_of_two"); #endif #ifndef OPENGLES_2 bool supports_multitexture = false; if (is_at_least_gl_version(1, 3) || is_at_least_gles_version(1, 1)) { supports_multitexture = true; _glActiveTexture = (PFNGLACTIVETEXTUREPROC) get_extension_func("glActiveTexture"); #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { _glClientActiveTexture = (PFNGLACTIVETEXTUREPROC) get_extension_func("glClientActiveTexture"); } #endif #ifdef SUPPORT_IMMEDIATE_MODE _glMultiTexCoord1f = (PFNGLMULTITEXCOORD1FPROC) get_extension_func("glMultiTexCoord1f"); _glMultiTexCoord2f = (PFNGLMULTITEXCOORD2FPROC) get_extension_func("glMultiTexCoord2f"); _glMultiTexCoord3f = (PFNGLMULTITEXCOORD3FPROC) get_extension_func("glMultiTexCoord3f"); _glMultiTexCoord4f = (PFNGLMULTITEXCOORD4FPROC) get_extension_func("glMultiTexCoord4f"); _glMultiTexCoord1d = (PFNGLMULTITEXCOORD1DPROC) get_extension_func("glMultiTexCoord1d"); _glMultiTexCoord2d = (PFNGLMULTITEXCOORD2DPROC) get_extension_func("glMultiTexCoord2d"); _glMultiTexCoord3d = (PFNGLMULTITEXCOORD3DPROC) get_extension_func("glMultiTexCoord3d"); _glMultiTexCoord4d = (PFNGLMULTITEXCOORD4DPROC) get_extension_func("glMultiTexCoord4d"); #endif } else if (has_extension("GL_ARB_multitexture")) { supports_multitexture = true; _glActiveTexture = (PFNGLACTIVETEXTUREPROC) get_extension_func("glActiveTextureARB"); #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { _glClientActiveTexture = (PFNGLACTIVETEXTUREPROC) get_extension_func("glClientActiveTextureARB"); } #endif #ifdef SUPPORT_IMMEDIATE_MODE _glMultiTexCoord1f = (PFNGLMULTITEXCOORD1FPROC) get_extension_func("glMultiTexCoord1fARB"); _glMultiTexCoord2f = (PFNGLMULTITEXCOORD2FPROC) get_extension_func("glMultiTexCoord2fARB"); _glMultiTexCoord3f = (PFNGLMULTITEXCOORD3FPROC) get_extension_func("glMultiTexCoord3fARB"); _glMultiTexCoord4f = (PFNGLMULTITEXCOORD4FPROC) get_extension_func("glMultiTexCoord4fARB"); _glMultiTexCoord1d = (PFNGLMULTITEXCOORD1DPROC) get_extension_func("glMultiTexCoord1dARB"); _glMultiTexCoord2d = (PFNGLMULTITEXCOORD2DPROC) get_extension_func("glMultiTexCoord2dARB"); _glMultiTexCoord3d = (PFNGLMULTITEXCOORD3DPROC) get_extension_func("glMultiTexCoord3dARB"); _glMultiTexCoord4d = (PFNGLMULTITEXCOORD4DPROC) get_extension_func("glMultiTexCoord4dARB"); #endif } else { supports_multitexture = false; } if (supports_multitexture) { if (_glActiveTexture == nullptr #ifdef SUPPORT_FIXED_FUNCTION || (has_fixed_function_pipeline() && _glClientActiveTexture == nullptr) #endif #ifdef SUPPORT_IMMEDIATE_MODE || GLf(_glMultiTexCoord1) == nullptr || GLf(_glMultiTexCoord2) == nullptr || GLf(_glMultiTexCoord3) == nullptr || GLf(_glMultiTexCoord4) == nullptr #endif ) { GLCAT.warning() << "Multitexture advertised as supported by OpenGL runtime, but could not get pointers to extension functions.\n"; supports_multitexture = false; } } if (!supports_multitexture) { // Replace with dummy no-op functions. _glActiveTexture = null_glActiveTexture; } #ifdef SUPPORT_FIXED_FUNCTION if (!supports_multitexture || !has_fixed_function_pipeline()) { _glClientActiveTexture = null_glActiveTexture; } #endif #endif // OPENGLES_2 #ifdef OPENGLES_1 _supports_depth_texture = false; _supports_depth_stencil = has_extension("GL_OES_packed_depth_stencil"); _supports_depth24 = has_extension("GL_OES_depth24"); _supports_depth32 = has_extension("GL_OES_depth32"); _supports_luminance_texture = true; #elif defined(OPENGLES) if (is_at_least_gles_version(3, 0)) { _supports_depth_texture = true; _supports_depth_stencil = true; _supports_depth24 = true; _supports_depth32 = true; } else { if (has_extension("GL_ANGLE_depth_texture")) { // This extension provides both depth textures and depth-stencil support. _supports_depth_texture = true; _supports_depth_stencil = true; } else if (has_extension("GL_OES_depth_texture")) { _supports_depth_texture = true; _supports_depth_stencil = has_extension("GL_OES_packed_depth_stencil"); } _supports_depth24 = has_extension("GL_OES_depth24"); _supports_depth32 = has_extension("GL_OES_depth32"); } _supports_luminance_texture = true; #else _supports_depth_texture = (is_at_least_gl_version(1, 4) || has_extension("GL_ARB_depth_texture")); _supports_depth_stencil = (is_at_least_gl_version(3, 0) || has_extension("GL_ARB_framebuffer_object") || has_extension("GL_EXT_packed_depth_stencil")); // OpenGL 3 deprecates luminance, luminance-alpha and alpha textures. _supports_luminance_texture = !core_profile; #endif #ifdef OPENGLES_2 if (gl_support_shadow_filter && _supports_depth_texture && (is_at_least_gles_version(3, 0) || has_extension("GL_EXT_shadow_samplers"))) { _supports_shadow_filter = true; } #else if (gl_support_shadow_filter && _supports_depth_texture && (is_at_least_gl_version(1, 4) || has_extension("GL_ARB_shadow")) && (is_at_least_gl_version(2, 0) || has_extension("GL_ARB_fragment_program_shadow"))) { _supports_shadow_filter = true; } #endif // Actually, we can't keep forever disabling ARB_shadow on ATI cards, since // they do work correctly now. Maybe there is some feature level we can // check somewhere? /*if (_gl_vendor.substr(0,3)=="ATI") { // ATI drivers have never provided correct shadow support. _supports_shadow_filter = false; }*/ _supports_texture_combine = false; _supports_texture_saved_result = false; _supports_texture_dot3 = false; #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { _supports_texture_combine = is_at_least_gl_version(1, 3) || is_at_least_gles_version(1, 1) || has_extension("GL_ARB_texture_env_combine"); #ifdef OPENGLES_1 _supports_texture_saved_result = has_extension("GL_OES_texture_env_crossbar"); #else _supports_texture_saved_result = is_at_least_gl_version(1, 4) || has_extension("GL_ARB_texture_env_crossbar"); #endif _supports_texture_dot3 = is_at_least_gl_version(1, 3) || is_at_least_gles_version(1, 1) || has_extension("GL_ARB_texture_env_dot3"); } #endif // SUPPORT_FIXED_FUNCTION #ifdef OPENGLES_2 _supports_buffers = true; _glGenBuffers = glGenBuffers; _glBindBuffer = glBindBuffer; _glBufferData = glBufferData; _glBufferSubData = glBufferSubData; _glDeleteBuffers = glDeleteBuffers; #else _supports_buffers = false; if (is_at_least_gl_version(1, 5) || is_at_least_gles_version(1, 1)) { _supports_buffers = true; _glGenBuffers = (PFNGLGENBUFFERSPROC) get_extension_func("glGenBuffers"); _glBindBuffer = (PFNGLBINDBUFFERPROC) get_extension_func("glBindBuffer"); _glBufferData = (PFNGLBUFFERDATAPROC) get_extension_func("glBufferData"); _glBufferSubData = (PFNGLBUFFERSUBDATAPROC) get_extension_func("glBufferSubData"); _glDeleteBuffers = (PFNGLDELETEBUFFERSPROC) get_extension_func("glDeleteBuffers"); #ifndef OPENGLES _glMapBuffer = (PFNGLMAPBUFFERPROC) get_extension_func("glMapBuffer"); _glUnmapBuffer = (PFNGLUNMAPBUFFERPROC) get_extension_func("glUnmapBuffer"); _glGetBufferSubData = (PFNGLGETBUFFERSUBDATAPROC) get_extension_func("glGetBufferSubData"); #endif } #ifndef OPENGLES_1 else if (has_extension("GL_ARB_vertex_buffer_object")) { _supports_buffers = true; _glGenBuffers = (PFNGLGENBUFFERSPROC) get_extension_func("glGenBuffersARB"); _glBindBuffer = (PFNGLBINDBUFFERPROC) get_extension_func("glBindBufferARB"); _glBufferData = (PFNGLBUFFERDATAPROC) get_extension_func("glBufferDataARB"); _glBufferSubData = (PFNGLBUFFERSUBDATAPROC) get_extension_func("glBufferSubDataARB"); _glDeleteBuffers = (PFNGLDELETEBUFFERSPROC) get_extension_func("glDeleteBuffersARB"); _glMapBuffer = (PFNGLMAPBUFFERPROC) get_extension_func("glMapBufferARB"); _glUnmapBuffer = (PFNGLUNMAPBUFFERPROC) get_extension_func("glUnmapBufferARB"); _glGetBufferSubData = (PFNGLGETBUFFERSUBDATAPROC) get_extension_func("glGetBufferSubDataARB"); } #endif // OPENGLES_1 if (_supports_buffers) { if (_glGenBuffers == nullptr || _glBindBuffer == nullptr || _glBufferData == nullptr || _glBufferSubData == nullptr || _glDeleteBuffers == nullptr) { GLCAT.warning() << "Buffers advertised as supported by OpenGL runtime, but could not get pointers to extension functions.\n"; _supports_buffers = false; } } #endif #ifdef OPENGLES if (is_at_least_gles_version(3, 0)) { _glMapBufferRange = (PFNGLMAPBUFFERRANGEEXTPROC) get_extension_func("glMapBufferRange"); } else if (has_extension("GL_EXT_map_buffer_range")) { _glMapBufferRange = (PFNGLMAPBUFFERRANGEEXTPROC) get_extension_func("glMapBufferRangeEXT"); } else { _glMapBufferRange = nullptr; } #else // Check for various advanced buffer management features. if (is_at_least_gl_version(3, 0) || has_extension("GL_ARB_map_buffer_range")) { _glMapBufferRange = (PFNGLMAPBUFFERRANGEPROC) get_extension_func("glMapBufferRange"); } else { _glMapBufferRange = nullptr; } if (is_at_least_gl_version(4, 4) || has_extension("GL_ARB_buffer_storage")) { _glBufferStorage = (PFNGLBUFFERSTORAGEPROC) get_extension_func("glBufferStorage"); if (_glBufferStorage != nullptr) { _supports_buffer_storage = true; } else { GLCAT.warning() << "Buffer storage advertised as supported by OpenGL runtime, but " "could not get pointers to extension function.\n"; } } else { _supports_buffer_storage = false; } #endif _supports_vao = false; #ifdef OPENGLES if (is_at_least_gles_version(3, 0)) { #else if (is_at_least_gl_version(3, 0) || has_extension("GL_ARB_vertex_array_object")) { #endif _supports_vao = true; _glBindVertexArray = (PFNGLBINDVERTEXARRAYPROC) get_extension_func("glBindVertexArray"); _glDeleteVertexArrays = (PFNGLDELETEVERTEXARRAYSPROC) get_extension_func("glDeleteVertexArrays"); _glGenVertexArrays = (PFNGLGENVERTEXARRAYSPROC) get_extension_func("glGenVertexArrays"); #ifdef OPENGLES } else if (has_extension("GL_OES_vertex_array_object")) { _supports_vao = true; _glBindVertexArray = (PFNGLBINDVERTEXARRAYPROC) get_extension_func("glBindVertexArrayOES"); _glDeleteVertexArrays = (PFNGLDELETEVERTEXARRAYSPROC) get_extension_func("glDeleteVertexArraysOES"); _glGenVertexArrays = (PFNGLGENVERTEXARRAYSPROC) get_extension_func("glGenVertexArraysOES"); #endif } if (_supports_vao) { if (_glBindVertexArray == nullptr || _glDeleteVertexArrays == nullptr || _glGenVertexArrays == nullptr) { GLCAT.warning() << "Vertex array objects advertised as supported by OpenGL runtime, but could not get pointers to extension functions.\n"; _supports_vao = false; } } // Check for GLSL support. #if defined(OPENGLES_1) _supports_glsl = false; _supports_geometry_shaders = false; _supports_tessellation_shaders = false; #elif defined(OPENGLES) _supports_glsl = true; _supports_geometry_shaders = false; _supports_tessellation_shaders = false; #else _supports_glsl = (_gl_shadlang_ver_major >= 1); _supports_tessellation_shaders = is_at_least_gl_version(4, 0) || has_extension("GL_ARB_tessellation_shader"); if (is_at_least_gl_version(3, 2)) { _supports_geometry_shaders = true; _glFramebufferTexture = (PFNGLFRAMEBUFFERTEXTUREARBPROC) get_extension_func("glFramebufferTexture"); } else if (has_extension("GL_ARB_geometry_shader4")) { _supports_geometry_shaders = true; _glFramebufferTexture = (PFNGLFRAMEBUFFERTEXTUREARBPROC) get_extension_func("glFramebufferTextureARB"); _glProgramParameteri = (PFNGLPROGRAMPARAMETERIPROC) get_extension_func("glProgramParameteriARB"); } else if (has_extension("GL_EXT_geometry_shader4")) { _supports_geometry_shaders = true; _glFramebufferTexture = nullptr; _glProgramParameteri = (PFNGLPROGRAMPARAMETERIPROC) get_extension_func("glProgramParameteriEXT"); } else { _supports_geometry_shaders = false; _glFramebufferTexture = nullptr; } #endif _shader_caps._supports_glsl = _supports_glsl; // Check for support for other types of shaders that can be used by Cg. _supports_basic_shaders = false; #if defined(HAVE_CG) && !defined(OPENGLES) if (has_extension("GL_ARB_vertex_program") && has_extension("GL_ARB_fragment_program")) { _supports_basic_shaders = true; _shader_caps._active_vprofile = (int)CG_PROFILE_ARBVP1; _shader_caps._active_fprofile = (int)CG_PROFILE_ARBFP1; _shader_caps._active_gprofile = (int)CG_PROFILE_UNKNOWN; // No geometry shader if only using basic if (basic_shaders_only) { // We're happy with ARB programs, thanks. } else if (has_extension("GL_NV_gpu_program5")) { _shader_caps._active_vprofile = (int)CG_PROFILE_GP5VP; _shader_caps._active_fprofile = (int)CG_PROFILE_GP5FP; _shader_caps._active_gprofile = (int)CG_PROFILE_GP5GP; } else if (has_extension("GL_NV_gpu_program4")) { _shader_caps._active_vprofile = (int)CG_PROFILE_GP4VP; _shader_caps._active_fprofile = (int)CG_PROFILE_GP4FP; _shader_caps._active_gprofile = (int)CG_PROFILE_GP4GP; } else if (has_extension("GL_NV_vertex_program3") && has_extension("GL_NV_fragment_program2")) { _shader_caps._active_vprofile = (int)CG_PROFILE_VP40; _shader_caps._active_fprofile = (int)CG_PROFILE_FP40; _shader_caps._active_gprofile = (int)CG_PROFILE_UNKNOWN; } else if (has_extension("GL_NV_vertex_program2") && has_extension("GL_NV_fragment_program")) { _shader_caps._active_vprofile = (int)CG_PROFILE_VP30; _shader_caps._active_fprofile = (int)CG_PROFILE_FP30; _shader_caps._active_gprofile = (int)CG_PROFILE_UNKNOWN; } else if (has_extension("GL_NV_vertex_program1_1") && has_extension("GL_NV_texture_shader2") && has_extension("GL_NV_register_combiners2")) { _shader_caps._active_vprofile = (int)CG_PROFILE_VP20; _shader_caps._active_fprofile = (int)CG_PROFILE_FP20; _shader_caps._active_gprofile = (int)CG_PROFILE_UNKNOWN; } else if (_supports_glsl) { // This is what will be available to non-NVIDIA cards. It is the last // option since it is slower to compile GLSL than the other options. _shader_caps._active_vprofile = (int)CG_PROFILE_GLSLV; _shader_caps._active_fprofile = (int)CG_PROFILE_GLSLF; if (_supports_geometry_shaders) { _shader_caps._active_gprofile = (int)CG_PROFILE_GLSLG; } } _shader_caps._ultimate_vprofile = (int)CG_PROFILE_VP40; _shader_caps._ultimate_fprofile = (int)CG_PROFILE_FP40; _shader_caps._ultimate_gprofile = (int)CG_PROFILE_GPU_GP; // Bug workaround for radeons. if (_shader_caps._active_fprofile == CG_PROFILE_ARBFP1) { if (has_extension("GL_ATI_draw_buffers")) { _shader_caps._bug_list.insert(Shader::SBUG_ati_draw_buffers); } } Shader::set_default_caps(_shader_caps); } else if (_supports_glsl) { // No, but we do support GLSL... _shader_caps._active_vprofile = (int)CG_PROFILE_GLSLV; _shader_caps._active_fprofile = (int)CG_PROFILE_GLSLF; if (_supports_geometry_shaders) { _shader_caps._active_gprofile = (int)CG_PROFILE_GLSLG; } else { _shader_caps._active_gprofile = (int)CG_PROFILE_UNKNOWN; } } #endif // HAVE_CG _supports_compute_shaders = false; #ifndef OPENGLES_1 #ifdef OPENGLES if (is_at_least_gles_version(3, 1)) { #else if (is_at_least_gl_version(4, 3) || has_extension("GL_ARB_compute_shader")) { #endif _glDispatchCompute = (PFNGLDISPATCHCOMPUTEPROC) get_extension_func("glDispatchCompute"); if (_glDispatchCompute != nullptr) { _supports_compute_shaders = true; } } #endif // !OPENGLES_1 #ifndef OPENGLES if (_supports_glsl) { _glAttachShader = (PFNGLATTACHSHADERPROC) get_extension_func("glAttachShader"); _glBindAttribLocation = (PFNGLBINDATTRIBLOCATIONPROC) get_extension_func("glBindAttribLocation"); _glCompileShader = (PFNGLCOMPILESHADERPROC) get_extension_func("glCompileShader"); _glCreateProgram = (PFNGLCREATEPROGRAMPROC) get_extension_func("glCreateProgram"); _glCreateShader = (PFNGLCREATESHADERPROC) get_extension_func("glCreateShader"); _glDeleteProgram = (PFNGLDELETEPROGRAMPROC) get_extension_func("glDeleteProgram"); _glDeleteShader = (PFNGLDELETESHADERPROC) get_extension_func("glDeleteShader"); _glDetachShader = (PFNGLDETACHSHADERPROC) get_extension_func("glDetachShader"); _glDisableVertexAttribArray = (PFNGLDISABLEVERTEXATTRIBARRAYPROC) get_extension_func("glDisableVertexAttribArray"); _glEnableVertexAttribArray = (PFNGLENABLEVERTEXATTRIBARRAYPROC) get_extension_func("glEnableVertexAttribArray"); _glGetActiveAttrib = (PFNGLGETACTIVEATTRIBPROC) get_extension_func("glGetActiveAttrib"); _glGetActiveUniform = (PFNGLGETACTIVEUNIFORMPROC) get_extension_func("glGetActiveUniform"); _glGetAttribLocation = (PFNGLGETATTRIBLOCATIONPROC) get_extension_func("glGetAttribLocation"); _glGetProgramiv = (PFNGLGETPROGRAMIVPROC) get_extension_func("glGetProgramiv"); _glGetProgramInfoLog = (PFNGLGETPROGRAMINFOLOGPROC) get_extension_func("glGetProgramInfoLog"); _glGetShaderiv = (PFNGLGETSHADERIVPROC) get_extension_func("glGetShaderiv"); _glGetShaderInfoLog = (PFNGLGETSHADERINFOLOGPROC) get_extension_func("glGetShaderInfoLog"); _glGetUniformLocation = (PFNGLGETUNIFORMLOCATIONPROC) get_extension_func("glGetUniformLocation"); _glLinkProgram = (PFNGLLINKPROGRAMPROC) get_extension_func("glLinkProgram"); _glShaderSource = (PFNGLSHADERSOURCEPROC_P) get_extension_func("glShaderSource"); _glUseProgram = (PFNGLUSEPROGRAMPROC) get_extension_func("glUseProgram"); _glUniform4f = (PFNGLUNIFORM4FPROC) get_extension_func("glUniform4f"); _glUniform1i = (PFNGLUNIFORM1IPROC) get_extension_func("glUniform1i"); _glUniform1fv = (PFNGLUNIFORM1FVPROC) get_extension_func("glUniform1fv"); _glUniform2fv = (PFNGLUNIFORM2FVPROC) get_extension_func("glUniform2fv"); _glUniform3fv = (PFNGLUNIFORM3FVPROC) get_extension_func("glUniform3fv"); _glUniform4fv = (PFNGLUNIFORM4FVPROC) get_extension_func("glUniform4fv"); _glUniform1iv = (PFNGLUNIFORM1IVPROC) get_extension_func("glUniform1iv"); _glUniform2iv = (PFNGLUNIFORM2IVPROC) get_extension_func("glUniform2iv"); _glUniform3iv = (PFNGLUNIFORM3IVPROC) get_extension_func("glUniform3iv"); _glUniform4iv = (PFNGLUNIFORM4IVPROC) get_extension_func("glUniform4iv"); _glUniformMatrix3fv = (PFNGLUNIFORMMATRIX3FVPROC) get_extension_func("glUniformMatrix3fv"); _glUniformMatrix4fv = (PFNGLUNIFORMMATRIX4FVPROC) get_extension_func("glUniformMatrix4fv"); _glValidateProgram = (PFNGLVALIDATEPROGRAMPROC) get_extension_func("glValidateProgram"); _glVertexAttrib4fv = (PFNGLVERTEXATTRIB4FVPROC) get_extension_func("glVertexAttrib4fv"); _glVertexAttrib4dv = (PFNGLVERTEXATTRIB4DVPROC) get_extension_func("glVertexAttrib4dv"); _glVertexAttribPointer = (PFNGLVERTEXATTRIBPOINTERPROC) get_extension_func("glVertexAttribPointer"); if (is_at_least_gl_version(3, 0)) { _glBindFragDataLocation = (PFNGLBINDFRAGDATALOCATIONPROC) get_extension_func("glBindFragDataLocation"); _glVertexAttribIPointer = (PFNGLVERTEXATTRIBIPOINTERPROC) get_extension_func("glVertexAttribIPointer"); _glUniform1uiv = (PFNGLUNIFORM1UIVPROC) get_extension_func("glUniform1uiv"); _glUniform2uiv = (PFNGLUNIFORM2UIVPROC) get_extension_func("glUniform2uiv"); _glUniform3uiv = (PFNGLUNIFORM3UIVPROC) get_extension_func("glUniform3uiv"); _glUniform4uiv = (PFNGLUNIFORM4UIVPROC) get_extension_func("glUniform4uiv"); } else if (has_extension("GL_EXT_gpu_shader4")) { _glBindFragDataLocation = (PFNGLBINDFRAGDATALOCATIONPROC) get_extension_func("glBindFragDataLocationEXT"); _glVertexAttribIPointer = (PFNGLVERTEXATTRIBIPOINTERPROC) get_extension_func("glVertexAttribIPointerEXT"); _glUniform1uiv = (PFNGLUNIFORM1UIVPROC) get_extension_func("glUniform1uivEXT"); _glUniform2uiv = (PFNGLUNIFORM2UIVPROC) get_extension_func("glUniform2uivEXT"); _glUniform3uiv = (PFNGLUNIFORM3UIVPROC) get_extension_func("glUniform3uivEXT"); _glUniform4uiv = (PFNGLUNIFORM4UIVPROC) get_extension_func("glUniform4uivEXT"); } else { _glBindFragDataLocation = nullptr; _glVertexAttribIPointer = nullptr; } if (is_at_least_gl_version(4, 1) || has_extension("GL_ARB_vertex_attrib_64bit")) { _glVertexAttribLPointer = (PFNGLVERTEXATTRIBLPOINTERPROC) get_extension_func("glVertexAttribLPointer"); } else { _glVertexAttribLPointer = nullptr; } if (_supports_tessellation_shaders) { _glPatchParameteri = (PFNGLPATCHPARAMETERIPROC) get_extension_func("glPatchParameteri"); } } else if (_supports_basic_shaders) { // We don't support GLSL, but we support ARB programs. _glDisableVertexAttribArray = (PFNGLDISABLEVERTEXATTRIBARRAYPROC) get_extension_func("glDisableVertexAttribArrayARB"); _glEnableVertexAttribArray = (PFNGLENABLEVERTEXATTRIBARRAYPROC) get_extension_func("glEnableVertexAttribArrayARB"); _glVertexAttrib4fv = (PFNGLVERTEXATTRIB4FVPROC) get_extension_func("glVertexAttrib4fvARB"); _glVertexAttrib4dv = (PFNGLVERTEXATTRIB4DVPROC) get_extension_func("glVertexAttrib4dvARB"); _glVertexAttribPointer = (PFNGLVERTEXATTRIBPOINTERPROC) get_extension_func("glVertexAttribPointerARB"); _glBindFragDataLocation = nullptr; _glVertexAttribIPointer = nullptr; _glVertexAttribLPointer = nullptr; } #endif #ifdef OPENGLES_2 _glAttachShader = glAttachShader; _glBindAttribLocation = glBindAttribLocation; _glCompileShader = glCompileShader; _glCreateProgram = glCreateProgram; _glCreateShader = glCreateShader; _glDeleteProgram = glDeleteProgram; _glDeleteShader = glDeleteShader; _glDetachShader = glDetachShader; _glDisableVertexAttribArray = glDisableVertexAttribArray; _glEnableVertexAttribArray = glEnableVertexAttribArray; _glGetActiveAttrib = glGetActiveAttrib; _glGetActiveUniform = glGetActiveUniform; _glGetAttribLocation = glGetAttribLocation; _glGetProgramiv = glGetProgramiv; _glGetProgramInfoLog = glGetProgramInfoLog; _glGetShaderiv = glGetShaderiv; _glGetShaderInfoLog = glGetShaderInfoLog; _glGetUniformLocation = glGetUniformLocation; _glLinkProgram = glLinkProgram; _glShaderSource = (PFNGLSHADERSOURCEPROC_P) glShaderSource; _glUseProgram = glUseProgram; _glUniform4f = glUniform4f; _glUniform1i = glUniform1i; _glUniform1fv = glUniform1fv; _glUniform2fv = glUniform2fv; _glUniform3fv = glUniform3fv; _glUniform4fv = glUniform4fv; _glUniform1iv = glUniform1iv; _glUniform2iv = glUniform2iv; _glUniform3iv = glUniform3iv; _glUniform4iv = glUniform4iv; _glUniformMatrix3fv = glUniformMatrix3fv; _glUniformMatrix4fv = glUniformMatrix4fv; _glValidateProgram = glValidateProgram; _glVertexAttrib4fv = glVertexAttrib4fv; _glVertexAttrib4dv = null_glVertexAttrib4dv; _glVertexAttribPointer = glVertexAttribPointer; _glVertexAttribLPointer = nullptr; if (is_at_least_gles_version(3, 0)) { _glVertexAttribIPointer = (PFNGLVERTEXATTRIBIPOINTERPROC) get_extension_func("glVertexAttribIPointer"); } else { _glVertexAttribIPointer = nullptr; } if (has_extension("GL_EXT_blend_func_extended")) { _glBindFragDataLocation = (PFNGLBINDFRAGDATALOCATIONPROC) get_extension_func("glBindFragDataLocationEXT"); } else { _glBindFragDataLocation = nullptr; } #endif #ifndef OPENGLES_1 _use_vertex_attrib_binding = false; #ifdef OPENGLES if (is_at_least_gles_version(3, 1)) { #else if (is_at_least_gl_version(4, 3) || has_extension("GL_ARB_vertex_attrib_binding")) { #endif _glBindVertexBuffer = (PFNGLBINDVERTEXBUFFERPROC) get_extension_func("glBindVertexBuffer"); _glVertexAttribFormat = (PFNGLVERTEXATTRIBFORMATPROC) get_extension_func("glVertexAttribFormat"); _glVertexAttribIFormat = (PFNGLVERTEXATTRIBIFORMATPROC) get_extension_func("glVertexAttribIFormat"); _glVertexAttribBinding = (PFNGLVERTEXATTRIBBINDINGPROC) get_extension_func("glVertexAttribBinding"); _glVertexBindingDivisor = (PFNGLVERTEXBINDINGDIVISORPROC) get_extension_func("glVertexBindingDivisor"); #ifndef OPENGLES _glVertexAttribLFormat = (PFNGLVERTEXATTRIBLFORMATPROC) get_extension_func("glVertexAttribLFormat"); #endif if (gl_fixed_vertex_attrib_locations) { _use_vertex_attrib_binding = true; } } #endif // We need to have a default shader to apply in case something didn't happen // to have a shader applied, or if it failed to compile. This default // shader just outputs a red color, indicating that something went wrong. #ifndef OPENGLES_1 if (_default_shader == nullptr && !has_fixed_function_pipeline()) { #ifndef OPENGLES bool use_float64 = vertices_float64; if (use_float64 && is_at_least_gl_version(4, 1)) { _default_shader = Shader::make(Shader::SL_GLSL, default_vshader_fp64_gl41, default_fshader); } else if (use_float64 && has_extension("GL_ARB_vertex_attrib_64bit")) { _default_shader = Shader::make(Shader::SL_GLSL, default_vshader_fp64, default_fshader); } else #endif { _default_shader = Shader::make(Shader::SL_GLSL, default_vshader, default_fshader); } } #endif #ifndef OPENGLES_1 // Check for uniform buffers. #ifdef OPENGLES if (is_at_least_gl_version(3, 1) || has_extension("GL_ARB_uniform_buffer_object")) { #else if (is_at_least_gles_version(3, 0)) { #endif _supports_uniform_buffers = true; _glGetActiveUniformsiv = (PFNGLGETACTIVEUNIFORMSIVPROC) get_extension_func("glGetActiveUniformsiv"); _glGetActiveUniformBlockiv = (PFNGLGETACTIVEUNIFORMBLOCKIVPROC) get_extension_func("glGetActiveUniformBlockiv"); _glGetActiveUniformBlockName = (PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC) get_extension_func("glGetActiveUniformBlockName"); } else { _supports_uniform_buffers = false; } #ifndef OPENGLES // Check for SSBOs. if (is_at_least_gl_version(4, 3) || has_extension("ARB_shader_storage_buffer_object")) { _supports_shader_buffers = true; _glGetProgramInterfaceiv = (PFNGLGETPROGRAMINTERFACEIVPROC) get_extension_func("glGetProgramInterfaceiv"); _glGetProgramResourceName = (PFNGLGETPROGRAMRESOURCENAMEPROC) get_extension_func("glGetProgramResourceName"); _glGetProgramResourceiv = (PFNGLGETPROGRAMRESOURCEIVPROC) get_extension_func("glGetProgramResourceiv"); } else #endif { _supports_shader_buffers = false; } if (_supports_uniform_buffers || _supports_shader_buffers) { _glBindBufferBase = (PFNGLBINDBUFFERBASEPROC) get_extension_func("glBindBufferBase"); } #endif // Check whether we support geometry instancing and instanced vertex // attribs. #if defined(OPENGLES_1) _supports_vertex_attrib_divisor = false; _supports_geometry_instancing = false; #elif defined(OPENGLES) if (is_at_least_gles_version(3, 0)) { // OpenGL ES 3 has all of this in the core. _glVertexAttribDivisor = (PFNGLVERTEXATTRIBDIVISORPROC) get_extension_func("glVertexAttribDivisor"); _glDrawArraysInstanced = (PFNGLDRAWARRAYSINSTANCEDPROC) get_extension_func("glDrawArraysInstanced"); _glDrawElementsInstanced = (PFNGLDRAWELEMENTSINSTANCEDPROC) get_extension_func("glDrawElementsInstanced"); _supports_vertex_attrib_divisor = true; _supports_geometry_instancing = true; } else if (has_extension("GL_ANGLE_instanced_arrays")) { // This extension has both things in one. #ifdef __EMSCRIPTEN__ // Work around bug - it doesn't allow ANGLE suffix in getProcAddress. _glVertexAttribDivisor = (PFNGLVERTEXATTRIBDIVISORPROC) get_extension_func("glVertexAttribDivisor"); _glDrawArraysInstanced = (PFNGLDRAWARRAYSINSTANCEDPROC) get_extension_func("glDrawArraysInstanced"); _glDrawElementsInstanced = (PFNGLDRAWELEMENTSINSTANCEDPROC) get_extension_func("glDrawElementsInstanced"); #else _glVertexAttribDivisor = (PFNGLVERTEXATTRIBDIVISORPROC) get_extension_func("glVertexAttribDivisorANGLE"); _glDrawArraysInstanced = (PFNGLDRAWARRAYSINSTANCEDPROC) get_extension_func("glDrawArraysInstancedANGLE"); _glDrawElementsInstanced = (PFNGLDRAWELEMENTSINSTANCEDPROC) get_extension_func("glDrawElementsInstancedANGLE"); #endif _supports_vertex_attrib_divisor = true; _supports_geometry_instancing = true; } else { // Check separately for geometry instancing and instanced attribs. if (has_extension("GL_EXT_draw_instanced")) { _glDrawArraysInstanced = (PFNGLDRAWARRAYSINSTANCEDPROC) get_extension_func("glDrawArraysInstancedEXT"); _glDrawElementsInstanced = (PFNGLDRAWELEMENTSINSTANCEDPROC) get_extension_func("glDrawElementsInstancedEXT"); _supports_geometry_instancing = true; } else if (has_extension("GL_NV_draw_instanced")) { _glDrawArraysInstanced = (PFNGLDRAWARRAYSINSTANCEDPROC) get_extension_func("glDrawArraysInstancedNV"); _glDrawElementsInstanced = (PFNGLDRAWELEMENTSINSTANCEDPROC) get_extension_func("glDrawElementsInstancedNV"); _supports_geometry_instancing = true; } else { _supports_geometry_instancing = false; } if (has_extension("GL_EXT_instanced_arrays")) { _glVertexAttribDivisor = (PFNGLVERTEXATTRIBDIVISORPROC) get_extension_func("glVertexAttribDivisorEXT"); _supports_vertex_attrib_divisor = true; } else if (has_extension("GL_NV_instanced_arrays")) { _glVertexAttribDivisor = (PFNGLVERTEXATTRIBDIVISORPROC) get_extension_func("glVertexAttribDivisorNV"); _supports_vertex_attrib_divisor = true; } else { _supports_vertex_attrib_divisor = false; } } #else if (is_at_least_gl_version(3, 3)) { // This feature is in OpenGL core as of 3.3. _glVertexAttribDivisor = (PFNGLVERTEXATTRIBDIVISORPROC) get_extension_func("glVertexAttribDivisor"); _supports_vertex_attrib_divisor = true; } else if (has_extension("GL_ARB_instanced_arrays")) { _glVertexAttribDivisor = (PFNGLVERTEXATTRIBDIVISORPROC) get_extension_func("glVertexAttribDivisorARB"); _supports_vertex_attrib_divisor = true; } else { _supports_vertex_attrib_divisor = false; } // Some drivers expose one extension, some expose the other. if (is_at_least_gl_version(3, 1)) { _glDrawArraysInstanced = (PFNGLDRAWARRAYSINSTANCEDPROC) get_extension_func("glDrawArraysInstanced"); _glDrawElementsInstanced = (PFNGLDRAWELEMENTSINSTANCEDPROC) get_extension_func("glDrawElementsInstanced"); _supports_geometry_instancing = true; } else if (has_extension("GL_ARB_draw_instanced")) { _glDrawArraysInstanced = (PFNGLDRAWARRAYSINSTANCEDPROC) get_extension_func("glDrawArraysInstancedARB"); _glDrawElementsInstanced = (PFNGLDRAWELEMENTSINSTANCEDPROC) get_extension_func("glDrawElementsInstancedARB"); _supports_geometry_instancing = true; } else if (has_extension("GL_EXT_draw_instanced")) { _glDrawArraysInstanced = (PFNGLDRAWARRAYSINSTANCEDPROC) get_extension_func("glDrawArraysInstancedEXT"); _glDrawElementsInstanced = (PFNGLDRAWELEMENTSINSTANCEDPROC) get_extension_func("glDrawElementsInstancedEXT"); _supports_geometry_instancing = true; } else { _glDrawElementsInstanced = 0; _glDrawArraysInstanced = 0; _supports_geometry_instancing = false; } #endif #ifndef OPENGLES_1 if (_supports_geometry_instancing) { if (_glDrawArraysInstanced == nullptr || _glDrawElementsInstanced == nullptr) { GLCAT.warning() << "Geometry instancing advertised as supported by OpenGL runtime, but could not get pointers to extension functions.\n"; _supports_geometry_instancing = false; } } if (_supports_vertex_attrib_divisor) { if (_glVertexAttribDivisor == nullptr) { GLCAT.warning() << "Instanced vertex arrays advertised as supported by OpenGL runtime, but could not get pointers to extension functions.\n"; _supports_vertex_attrib_divisor = false; } } #endif // Check if we support indirect draw. _supports_indirect_draw = false; #ifndef OPENGLES_1 #ifdef OPENGLES if (is_at_least_gles_version(3, 1)) { #else if (is_at_least_gl_version(4, 0) || has_extension("GL_ARB_draw_indirect")) { #endif _glDrawArraysIndirect = (PFNGLDRAWARRAYSINDIRECTPROC) get_extension_func("glDrawArraysIndirect"); _glDrawElementsIndirect = (PFNGLDRAWELEMENTSINDIRECTPROC) get_extension_func("glDrawElementsIndirect"); if (_glDrawArraysIndirect == nullptr || _glDrawElementsIndirect == nullptr) { GLCAT.warning() << "Indirect draw advertised as supported by OpenGL runtime, but could not get pointers to extension functions.\n"; } else { _supports_indirect_draw = true; } } #endif #ifdef OPENGLES_1 _supports_framebuffer_multisample = false; _supports_framebuffer_blit = false; if (has_extension("GL_OES_framebuffer_object")) { _supports_framebuffer_object = true; _glIsRenderbuffer = (PFNGLISRENDERBUFFEROESPROC) get_extension_func("glIsRenderbufferOES"); _glBindRenderbuffer = (PFNGLBINDRENDERBUFFEROESPROC) get_extension_func("glBindRenderbufferOES"); _glDeleteRenderbuffers = (PFNGLDELETERENDERBUFFERSOESPROC) get_extension_func("glDeleteRenderbuffersOES"); _glGenRenderbuffers = (PFNGLGENRENDERBUFFERSOESPROC) get_extension_func("glGenRenderbuffersOES"); _glRenderbufferStorage = (PFNGLRENDERBUFFERSTORAGEOESPROC) get_extension_func("glRenderbufferStorageOES"); _glGetRenderbufferParameteriv = (PFNGLGETRENDERBUFFERPARAMETERIVOESPROC) get_extension_func("glGetRenderbufferParameterivOES"); _glIsFramebuffer = (PFNGLISFRAMEBUFFEROESPROC) get_extension_func("glIsFramebufferOES"); _glBindFramebuffer = (PFNGLBINDFRAMEBUFFEROESPROC) get_extension_func("glBindFramebufferOES"); _glDeleteFramebuffers = (PFNGLDELETEFRAMEBUFFERSOESPROC) get_extension_func("glDeleteFramebuffersOES"); _glGenFramebuffers = (PFNGLGENFRAMEBUFFERSOESPROC) get_extension_func("glGenFramebuffersOES"); _glCheckFramebufferStatus = (PFNGLCHECKFRAMEBUFFERSTATUSOESPROC) get_extension_func("glCheckFramebufferStatusOES"); _glFramebufferTexture1D = nullptr; _glFramebufferTexture2D = (PFNGLFRAMEBUFFERTEXTURE2DOESPROC) get_extension_func("glFramebufferTexture2DOES"); _glFramebufferRenderbuffer = (PFNGLFRAMEBUFFERRENDERBUFFEROESPROC) get_extension_func("glFramebufferRenderbufferOES"); _glGetFramebufferAttachmentParameteriv = (PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVOESPROC) get_extension_func("glGetFramebufferAttachmentParameterivOES"); _glGenerateMipmap = (PFNGLGENERATEMIPMAPOESPROC) get_extension_func("glGenerateMipmapOES"); } else { _supports_framebuffer_object = false; _glGenerateMipmap = nullptr; } #elif defined(OPENGLES) // In OpenGL ES 2.x, FBO's are supported in the core. _supports_framebuffer_object = true; _glIsRenderbuffer = glIsRenderbuffer; _glBindRenderbuffer = glBindRenderbuffer; _glDeleteRenderbuffers = glDeleteRenderbuffers; _glGenRenderbuffers = glGenRenderbuffers; _glRenderbufferStorage = glRenderbufferStorage; _glGetRenderbufferParameteriv = glGetRenderbufferParameteriv; _glIsFramebuffer = glIsFramebuffer; _glBindFramebuffer = glBindFramebuffer; _glDeleteFramebuffers = glDeleteFramebuffers; _glGenFramebuffers = glGenFramebuffers; _glCheckFramebufferStatus = glCheckFramebufferStatus; _glFramebufferTexture1D = nullptr; _glFramebufferTexture2D = glFramebufferTexture2D; _glFramebufferRenderbuffer = glFramebufferRenderbuffer; _glGetFramebufferAttachmentParameteriv = glGetFramebufferAttachmentParameteriv; _glGenerateMipmap = glGenerateMipmap; if (is_at_least_gles_version(3, 0)) { _supports_framebuffer_multisample = true; _supports_framebuffer_blit = true; _glRenderbufferStorageMultisample = (PFNGLRENDERBUFFERSTORAGEMULTISAMPLEEXTPROC) get_extension_func("glRenderbufferStorageMultisample"); _glBlitFramebuffer = (PFNGLBLITFRAMEBUFFEREXTPROC) get_extension_func("glBlitFramebuffer"); } else { if (has_extension("GL_ANGLE_framebuffer_multisample")) { _supports_framebuffer_multisample = true; _glRenderbufferStorageMultisample = (PFNGLRENDERBUFFERSTORAGEMULTISAMPLEANGLEPROC) get_extension_func("glRenderbufferStorageMultisampleANGLE"); } else { _supports_framebuffer_multisample = false; } if (has_extension("GL_ANGLE_framebuffer_blit")) { _supports_framebuffer_blit = true; _glBlitFramebuffer = (PFNGLBLITFRAMEBUFFERANGLEPROC) get_extension_func("glBlitFramebufferANGLE"); } else { _supports_framebuffer_blit = false; } } #else // Desktop OpenGL case. if (is_at_least_gl_version(3, 0) || has_extension("GL_ARB_framebuffer_object")) { _supports_framebuffer_object = true; _supports_framebuffer_multisample = true; _supports_framebuffer_blit = true; _glIsRenderbuffer = (PFNGLISRENDERBUFFERPROC) get_extension_func("glIsRenderbuffer"); _glBindRenderbuffer = (PFNGLBINDRENDERBUFFERPROC) get_extension_func("glBindRenderbuffer"); _glDeleteRenderbuffers = (PFNGLDELETERENDERBUFFERSPROC) get_extension_func("glDeleteRenderbuffers"); _glGenRenderbuffers = (PFNGLGENRENDERBUFFERSPROC) get_extension_func("glGenRenderbuffers"); _glRenderbufferStorage = (PFNGLRENDERBUFFERSTORAGEPROC) get_extension_func("glRenderbufferStorage"); _glGetRenderbufferParameteriv = (PFNGLGETRENDERBUFFERPARAMETERIVPROC) get_extension_func("glGetRenderbufferParameteriv"); _glIsFramebuffer = (PFNGLISFRAMEBUFFERPROC) get_extension_func("glIsFramebuffer"); _glBindFramebuffer = (PFNGLBINDFRAMEBUFFERPROC) get_extension_func("glBindFramebuffer"); _glDeleteFramebuffers = (PFNGLDELETEFRAMEBUFFERSPROC) get_extension_func("glDeleteFramebuffers"); _glGenFramebuffers = (PFNGLGENFRAMEBUFFERSPROC) get_extension_func("glGenFramebuffers"); _glCheckFramebufferStatus = (PFNGLCHECKFRAMEBUFFERSTATUSPROC) get_extension_func("glCheckFramebufferStatus"); _glFramebufferTexture1D = (PFNGLFRAMEBUFFERTEXTURE1DPROC) get_extension_func("glFramebufferTexture1D"); _glFramebufferTexture2D = (PFNGLFRAMEBUFFERTEXTURE2DPROC) get_extension_func("glFramebufferTexture2D"); _glFramebufferTexture3D = (PFNGLFRAMEBUFFERTEXTURE3DPROC) get_extension_func("glFramebufferTexture3D"); _glFramebufferRenderbuffer = (PFNGLFRAMEBUFFERRENDERBUFFERPROC) get_extension_func("glFramebufferRenderbuffer"); _glGetFramebufferAttachmentParameteriv = (PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC) get_extension_func("glGetFramebufferAttachmentParameteriv"); _glGenerateMipmap = (PFNGLGENERATEMIPMAPPROC) get_extension_func("glGenerateMipmap"); _glRenderbufferStorageMultisample = (PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC) get_extension_func("glRenderbufferStorageMultisample"); _glBlitFramebuffer = (PFNGLBLITFRAMEBUFFERPROC) get_extension_func("glBlitFramebuffer"); } else if (has_extension("GL_EXT_framebuffer_object")) { _supports_framebuffer_object = true; _glIsRenderbuffer = (PFNGLISRENDERBUFFEREXTPROC) get_extension_func("glIsRenderbufferEXT"); _glBindRenderbuffer = (PFNGLBINDRENDERBUFFEREXTPROC) get_extension_func("glBindRenderbufferEXT"); _glDeleteRenderbuffers = (PFNGLDELETERENDERBUFFERSEXTPROC) get_extension_func("glDeleteRenderbuffersEXT"); _glGenRenderbuffers = (PFNGLGENRENDERBUFFERSEXTPROC) get_extension_func("glGenRenderbuffersEXT"); _glRenderbufferStorage = (PFNGLRENDERBUFFERSTORAGEEXTPROC) get_extension_func("glRenderbufferStorageEXT"); _glGetRenderbufferParameteriv = (PFNGLGETRENDERBUFFERPARAMETERIVEXTPROC) get_extension_func("glGetRenderbufferParameterivEXT"); _glIsFramebuffer = (PFNGLISFRAMEBUFFEREXTPROC) get_extension_func("glIsFramebufferEXT"); _glBindFramebuffer = (PFNGLBINDFRAMEBUFFEREXTPROC) get_extension_func("glBindFramebufferEXT"); _glDeleteFramebuffers = (PFNGLDELETEFRAMEBUFFERSEXTPROC) get_extension_func("glDeleteFramebuffersEXT"); _glGenFramebuffers = (PFNGLGENFRAMEBUFFERSEXTPROC) get_extension_func("glGenFramebuffersEXT"); _glCheckFramebufferStatus = (PFNGLCHECKFRAMEBUFFERSTATUSEXTPROC) get_extension_func("glCheckFramebufferStatusEXT"); _glFramebufferTexture1D = (PFNGLFRAMEBUFFERTEXTURE1DEXTPROC) get_extension_func("glFramebufferTexture1DEXT"); _glFramebufferTexture2D = (PFNGLFRAMEBUFFERTEXTURE2DEXTPROC) get_extension_func("glFramebufferTexture2DEXT"); _glFramebufferTexture3D = (PFNGLFRAMEBUFFERTEXTURE3DEXTPROC) get_extension_func("glFramebufferTexture3DEXT"); _glFramebufferRenderbuffer = (PFNGLFRAMEBUFFERRENDERBUFFEREXTPROC) get_extension_func("glFramebufferRenderbufferEXT"); _glGetFramebufferAttachmentParameteriv = (PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVEXTPROC) get_extension_func("glGetFramebufferAttachmentParameterivEXT"); _glGenerateMipmap = (PFNGLGENERATEMIPMAPEXTPROC) get_extension_func("glGenerateMipmapEXT"); if (has_extension("GL_EXT_framebuffer_multisample")) { _supports_framebuffer_multisample = true; _glRenderbufferStorageMultisample = (PFNGLRENDERBUFFERSTORAGEMULTISAMPLEEXTPROC) get_extension_func("glRenderbufferStorageMultisampleEXT"); } else { _supports_framebuffer_multisample = false; } if (has_extension("GL_EXT_framebuffer_blit")) { _supports_framebuffer_blit = true; _glBlitFramebuffer = (PFNGLBLITFRAMEBUFFEREXTPROC) get_extension_func("glBlitFramebufferEXT"); } else { _supports_framebuffer_blit = false; } } else { _supports_framebuffer_object = false; _supports_framebuffer_multisample = false; _supports_framebuffer_blit = false; _glGenerateMipmap = nullptr; } #endif #ifndef OPENGLES if (is_at_least_gl_version(4, 5) || has_extension("GL_ARB_direct_state_access")) { _glGenerateTextureMipmap = (PFNGLGENERATETEXTUREMIPMAPPROC) get_extension_func("glGenerateTextureMipmap"); _supports_dsa = true; } else { _supports_dsa = false; } #endif #ifndef OPENGLES_1 // Do we support empty framebuffer objects? #ifdef OPENGLES if (is_at_least_gles_version(3, 1)) { #else if (is_at_least_gl_version(4, 3) || has_extension("GL_ARB_framebuffer_no_attachments")) { #endif _glFramebufferParameteri = (PFNGLFRAMEBUFFERPARAMETERIPROC) get_extension_func("glFramebufferParameteri"); _supports_empty_framebuffer = true; } else { _supports_empty_framebuffer = false; } #endif // !OPENGLES_1 #ifndef OPENGLES _supports_framebuffer_multisample_coverage_nv = false; if (_supports_framebuffer_multisample && has_extension("GL_NV_framebuffer_multisample_coverage")) { _supports_framebuffer_multisample_coverage_nv = true; _glRenderbufferStorageMultisampleCoverage = (PFNGLRENDERBUFFERSTORAGEMULTISAMPLECOVERAGENVPROC) get_extension_func("glRenderbufferStorageMultisampleCoverageNV"); } #endif #if defined(OPENGLES_1) _glDrawBuffers = nullptr; _max_color_targets = 1; #elif defined(OPENGLES_2) if (is_at_least_gles_version(3, 0)) { _glDrawBuffers = (PFNGLDRAWBUFFERSPROC) get_extension_func("glDrawBuffers"); } else if (has_extension("GL_EXT_draw_buffers")) { _glDrawBuffers = (PFNGLDRAWBUFFERSPROC) get_extension_func("glDrawBuffersEXT"); } else if (has_extension("GL_NV_draw_buffers")) { _glDrawBuffers = (PFNGLDRAWBUFFERSPROC) get_extension_func("glDrawBuffersNV"); } else { _glDrawBuffers = nullptr; } #else if (is_at_least_gl_version(2, 0)) { _glDrawBuffers = (PFNGLDRAWBUFFERSPROC) get_extension_func("glDrawBuffers"); } else if (has_extension("GL_ARB_draw_buffers")) { _glDrawBuffers = (PFNGLDRAWBUFFERSPROC) get_extension_func("glDrawBuffersARB"); } else { _glDrawBuffers = nullptr; } #endif #ifndef OPENGLES_1 _max_color_targets = 1; if (_glDrawBuffers != nullptr) { GLint max_draw_buffers = 0; glGetIntegerv(GL_MAX_DRAW_BUFFERS, &max_draw_buffers); _max_color_targets = max_draw_buffers; } #endif // !OPENGLES_1 #ifndef OPENGLES_1 if (_gl_version_major >= 3) { _glClearBufferfv = (PFNGLCLEARBUFFERFVPROC) get_extension_func("glClearBufferfv"); _glClearBufferiv = (PFNGLCLEARBUFFERIVPROC) get_extension_func("glClearBufferiv"); _glClearBufferfi = (PFNGLCLEARBUFFERFIPROC) get_extension_func("glClearBufferfi"); } else { _glClearBufferfv = nullptr; _glClearBufferiv = nullptr; _glClearBufferfi = nullptr; } #endif // !OPENGLES #ifndef OPENGLES _supports_viewport_arrays = false; if (is_at_least_gl_version(4, 1) || has_extension("GL_ARB_viewport_array")) { _glViewportArrayv = (PFNGLVIEWPORTARRAYVPROC) get_extension_func("glViewportArrayv"); _glScissorArrayv = (PFNGLSCISSORARRAYVPROC) get_extension_func("glScissorArrayv"); _glDepthRangeArrayv = (PFNGLDEPTHRANGEARRAYVPROC) get_extension_func("glDepthRangeArrayv"); if (_glViewportArrayv == nullptr || _glScissorArrayv == nullptr || _glDepthRangeArrayv == nullptr) { GLCAT.warning() << "Viewport arrays advertised as supported by OpenGL runtime, but could not get pointers to extension functions.\n"; } else { _supports_viewport_arrays = true; } } #endif // !OPENGLES _max_fb_samples = 0; if (_supports_framebuffer_multisample) { GLint max_samples; glGetIntegerv(GL_MAX_SAMPLES_EXT, &max_samples); _max_fb_samples = max_samples; } _supports_occlusion_query = false; #ifndef OPENGLES if (gl_support_occlusion_query) { if (is_at_least_gl_version(1, 5)) { _supports_occlusion_query = true; _glGenQueries = (PFNGLGENQUERIESPROC) get_extension_func("glGenQueries"); _glBeginQuery = (PFNGLBEGINQUERYPROC) get_extension_func("glBeginQuery"); _glEndQuery = (PFNGLENDQUERYPROC) get_extension_func("glEndQuery"); _glDeleteQueries = (PFNGLDELETEQUERIESPROC) get_extension_func("glDeleteQueries"); _glGetQueryiv = (PFNGLGETQUERYIVPROC) get_extension_func("glGetQueryiv"); _glGetQueryObjectuiv = (PFNGLGETQUERYOBJECTUIVPROC) get_extension_func("glGetQueryObjectuiv"); } else if (has_extension("GL_ARB_occlusion_query")) { _supports_occlusion_query = true; _glGenQueries = (PFNGLGENQUERIESPROC) get_extension_func("glGenQueriesARB"); _glBeginQuery = (PFNGLBEGINQUERYPROC) get_extension_func("glBeginQueryARB"); _glEndQuery = (PFNGLENDQUERYPROC) get_extension_func("glEndQueryARB"); _glDeleteQueries = (PFNGLDELETEQUERIESPROC) get_extension_func("glDeleteQueriesARB"); _glGetQueryiv = (PFNGLGETQUERYIVPROC) get_extension_func("glGetQueryivARB"); _glGetQueryObjectuiv = (PFNGLGETQUERYOBJECTUIVPROC) get_extension_func("glGetQueryObjectuivARB"); } } if (_supports_occlusion_query) { if (_glGenQueries == nullptr || _glBeginQuery == nullptr || _glEndQuery == nullptr || _glDeleteQueries == nullptr || _glGetQueryiv == nullptr || _glGetQueryObjectuiv == nullptr) { GLCAT.warning() << "Occlusion queries advertised as supported by OpenGL runtime, but could not get pointers to extension functions.\n"; _supports_occlusion_query = false; } else { GLint num_bits; _glGetQueryiv(GL_SAMPLES_PASSED, GL_QUERY_COUNTER_BITS, &num_bits); if (num_bits == 0) { _supports_occlusion_query = false; } if (GLCAT.is_debug()) { GLCAT.debug() << "Occlusion query counter provides " << num_bits << " bits.\n"; } } } #endif // !OPENGLES _supports_timer_query = false; #if defined(DO_PSTATS) && !defined(OPENGLES) if (is_at_least_gl_version(3, 3) || has_extension("GL_ARB_timer_query")) { _supports_timer_query = true; _glQueryCounter = (PFNGLQUERYCOUNTERPROC) get_extension_func("glQueryCounter"); _glGetQueryObjecti64v = (PFNGLGETQUERYOBJECTI64VPROC) get_extension_func("glGetQueryObjecti64v"); _glGetQueryObjectui64v = (PFNGLGETQUERYOBJECTUI64VPROC) get_extension_func("glGetQueryObjectui64v"); _glGetInteger64v = (PFNGLGETINTEGER64VPROC) get_extension_func("glGetInteger64v"); } #endif #ifdef OPENGLES_1 // In OpenGL ES 1, blending is supported via extensions. if (has_extension("GL_OES_blend_subtract")) { _glBlendEquation = (PFNGLBLENDEQUATIONPROC) get_extension_func("glBlendEquationOES"); if (_glBlendEquation == nullptr) { _glBlendEquation = null_glBlendEquation; GLCAT.warning() << "BlendEquationOES advertised as supported by OpenGL ES runtime, but " "could not get pointer to extension function.\n"; } } else { _glBlendEquation = null_glBlendEquation; } if (has_extension("GL_OES_blend_equation_separate")) { _glBlendEquationSeparate = (PFNGLBLENDEQUATIONSEPARATEOESPROC) get_extension_func("glBlendEquationSeparateOES"); if (_glBlendEquation == nullptr) { _supports_blend_equation_separate = false; GLCAT.warning() << "BlendEquationSeparateOES advertised as supported by OpenGL ES " "runtime, but could not get pointer to extension function.\n"; } else { _supports_blend_equation_separate = true; } } else { _supports_blend_equation_separate = false; _glBlendEquationSeparate = nullptr; } if (has_extension("GL_OES_blend_func_separate")) { _glBlendFuncSeparate = (PFNGLBLENDFUNCSEPARATEOESPROC) get_extension_func("glBlendFuncSeparateOES"); if (_glBlendFuncSeparate == nullptr) { _glBlendFuncSeparate = null_glBlendFuncSeparate; GLCAT.warning() << "BlendFuncSeparateOES advertised as supported by OpenGL ES runtime, but " "could not get pointer to extension function.\n"; } } else { _glBlendFuncSeparate = null_glBlendFuncSeparate; } #elif defined(OPENGLES) // In OpenGL ES 2.x and above, this is supported in the core. _supports_blend_equation_separate = false; #else if (is_at_least_gl_version(1, 2)) { _glBlendEquation = (PFNGLBLENDEQUATIONPROC) get_extension_func("glBlendEquation"); } else if (has_extension("GL_EXT_blend_minmax")) { _glBlendEquation = (PFNGLBLENDEQUATIONPROC) get_extension_func("glBlendEquationEXT"); } else { _glBlendEquation = null_glBlendEquation; } if (_glBlendEquation == nullptr) { _glBlendEquation = null_glBlendEquation; GLCAT.warning() << "BlendEquation advertised as supported by OpenGL runtime, but could " "not get pointer to extension function.\n"; } if (is_at_least_gl_version(2, 0)) { _supports_blend_equation_separate = true; _glBlendEquationSeparate = (PFNGLBLENDEQUATIONSEPARATEPROC) get_extension_func("glBlendEquationSeparate"); } else if (has_extension("GL_EXT_blend_equation_separate")) { _supports_blend_equation_separate = true; _glBlendEquationSeparate = (PFNGLBLENDEQUATIONSEPARATEEXTPROC) get_extension_func("glBlendEquationSeparateEXT"); } else { _supports_blend_equation_separate = false; _glBlendEquationSeparate = nullptr; } if (_supports_blend_equation_separate && _glBlendEquationSeparate == nullptr) { _supports_blend_equation_separate = false; GLCAT.warning() << "BlendEquationSeparate advertised as supported by OpenGL runtime, " "but could not get pointer to extension function.\n"; } if (is_at_least_gl_version(1, 4)) { _glBlendFuncSeparate = (PFNGLBLENDFUNCSEPARATEPROC) get_extension_func("glBlendFuncSeparate"); } else if (has_extension("GL_EXT_blend_func_separate")) { _glBlendFuncSeparate = (PFNGLBLENDFUNCSEPARATEEXTPROC) get_extension_func("glBlendFuncSeparateEXT"); } else { _glBlendFuncSeparate = null_glBlendFuncSeparate; } if (_glBlendFuncSeparate == nullptr) { _glBlendFuncSeparate = null_glBlendFuncSeparate; GLCAT.warning() << "BlendFuncSeparate advertised as supported by OpenGL runtime, but could not get pointers to extension function.\n"; } #endif // In OpenGL ES 2.x, this is supported in the core. In 1.x, not at all. #ifndef OPENGLES _glBlendColor = nullptr; bool supports_blend_color = false; if (is_at_least_gl_version(1, 2)) { supports_blend_color = true; _glBlendColor = (PFNGLBLENDCOLORPROC) get_extension_func("glBlendColor"); } else if (has_extension("GL_EXT_blend_color")) { supports_blend_color = true; _glBlendColor = (PFNGLBLENDCOLORPROC) get_extension_func("glBlendColorEXT"); } if (supports_blend_color && _glBlendColor == nullptr) { GLCAT.warning() << "BlendColor advertised as supported by OpenGL runtime, but could not get pointers to extension function.\n"; } if (_glBlendColor == nullptr) { _glBlendColor = null_glBlendColor; } #endif #ifdef OPENGLES_1 // OpenGL ES 1 doesn't support dual-source blending. #elif defined(OPENGLES) _supports_dual_source_blending = has_extension("GL_EXT_blend_func_extended"); #else _supports_dual_source_blending = is_at_least_gl_version(3, 3) || has_extension("GL_ARB_blend_func_extended"); #endif #ifdef OPENGLES _edge_clamp = GL_CLAMP_TO_EDGE; #else _edge_clamp = GL_CLAMP; if (is_at_least_gl_version(1, 2) || is_at_least_gles_version(1, 1) || has_extension("GL_SGIS_texture_edge_clamp")) { _edge_clamp = GL_CLAMP_TO_EDGE; } #endif _border_clamp = _edge_clamp; #ifndef OPENGLES if (gl_support_clamp_to_border && (is_at_least_gl_version(1, 3) || has_extension("GL_ARB_texture_border_clamp"))) { _border_clamp = GL_CLAMP_TO_BORDER; } #endif #ifdef OPENGLES_1 _mirror_repeat = GL_REPEAT; if (has_extension("GL_OES_texture_mirrored_repeat")) { _mirror_repeat = GL_MIRRORED_REPEAT; } #elif defined(OPENGLES) // OpenGL 2.x and above support this in the core. _mirror_repeat = GL_MIRRORED_REPEAT; #else _mirror_repeat = GL_REPEAT; if (is_at_least_gl_version(1, 4) || has_extension("GL_ARB_texture_mirrored_repeat")) { _mirror_repeat = GL_MIRRORED_REPEAT; } #endif _mirror_clamp = _edge_clamp; _mirror_edge_clamp = _edge_clamp; _mirror_border_clamp = _border_clamp; #ifndef OPENGLES if (has_extension("GL_EXT_texture_mirror_clamp")) { _mirror_clamp = GL_MIRROR_CLAMP_EXT; _mirror_edge_clamp = GL_MIRROR_CLAMP_TO_EDGE_EXT; _mirror_border_clamp = GL_MIRROR_CLAMP_TO_BORDER_EXT; } else if (is_at_least_gl_version(4, 4) || has_extension("GL_ARB_texture_mirror_clamp_to_edge")) { _mirror_clamp = GL_MIRROR_CLAMP_TO_EDGE; _mirror_edge_clamp = GL_MIRROR_CLAMP_TO_EDGE; } #endif #ifdef OPENGLES _supports_texture_lod = is_at_least_gles_version(3, 0); _supports_texture_lod_bias = false; #else _supports_texture_lod = false; _supports_texture_lod_bias = false; if (gl_support_texture_lod && (is_at_least_gl_version(1, 2) || has_extension("GL_SGIS_texture_lod"))) { _supports_texture_lod = true; if (is_at_least_gl_version(1, 4) || has_extension("GL_EXT_texture_lod_bias")) { _supports_texture_lod_bias = true; } } #endif #ifdef OPENGLES _supports_texture_max_level = is_at_least_gles_version(3, 0) || has_extension("GL_APPLE_texture_max_level"); #else _supports_texture_max_level = is_at_least_gl_version(1, 2); #endif if (_supports_multisample) { GLint sample_buffers = 0; glGetIntegerv(GL_SAMPLE_BUFFERS, &sample_buffers); if (sample_buffers != 1) { // Even if the API supports multisample, we might have ended up with a // framebuffer that doesn't have any multisample bits. (It's also // possible the graphics card doesn't provide any framebuffers with // multisample.) In this case, we don't really support the multisample // API's, since they won't do anything. _supports_multisample = false; } } GLint max_texture_size = 0; GLint max_3d_texture_size = 0; GLint max_2d_texture_array_layers = 0; GLint max_cube_map_size = 0; glGetIntegerv(GL_MAX_TEXTURE_SIZE, &max_texture_size); _max_texture_dimension = max_texture_size; if (_supports_3d_texture) { #ifndef OPENGLES_1 glGetIntegerv(GL_MAX_3D_TEXTURE_SIZE, &max_3d_texture_size); #endif _max_3d_texture_dimension = max_3d_texture_size; } else { _max_3d_texture_dimension = 0; } #ifndef OPENGLES_1 if (_supports_2d_texture_array) { glGetIntegerv(GL_MAX_ARRAY_TEXTURE_LAYERS, &max_2d_texture_array_layers); _max_2d_texture_array_layers = max_2d_texture_array_layers; } #endif if (_supports_cube_map) { glGetIntegerv(GL_MAX_CUBE_MAP_TEXTURE_SIZE, &max_cube_map_size); _max_cube_map_dimension = max_cube_map_size; } else { _max_cube_map_dimension = 0; } #ifndef OPENGLES if (_supports_buffer_texture) { GLint max_buffer_texture_size = 0; glGetIntegerv(GL_MAX_TEXTURE_BUFFER_SIZE, &max_buffer_texture_size); _max_buffer_texture_size = max_buffer_texture_size; } else { _max_buffer_texture_size = 0; } #endif // !OPENGLES #ifndef OPENGLES GLint max_elements_vertices = 0, max_elements_indices = 0; if (is_at_least_gl_version(1, 2) || has_extension("GL_EXT_draw_range_elements")) { glGetIntegerv(GL_MAX_ELEMENTS_VERTICES, &max_elements_vertices); glGetIntegerv(GL_MAX_ELEMENTS_INDICES, &max_elements_indices); if (max_elements_vertices > 0) { _max_vertices_per_array = max_elements_vertices; } if (max_elements_indices > 0) { _max_vertices_per_primitive = max_elements_indices; } } #endif // OPENGLES if (GLCAT.is_debug()) { GLCAT.debug() << "max texture dimension = " << _max_texture_dimension << ", max 3d texture = " << _max_3d_texture_dimension << ", max 2d texture array = " << max_2d_texture_array_layers << ", max cube map = " << _max_cube_map_dimension << "\n"; #ifndef OPENGLES GLCAT.debug() << "max_elements_vertices = " << max_elements_vertices << ", max_elements_indices = " << max_elements_indices << "\n"; #endif if (_supports_buffers) { if (vertex_buffers) { GLCAT.debug() << "vertex buffer objects are supported.\n"; } else { GLCAT.debug() << "vertex buffer objects are supported (but not enabled).\n"; } } else { GLCAT.debug() << "vertex buffer objects are NOT supported.\n"; } #ifdef SUPPORT_IMMEDIATE_MODE if (!vertex_arrays) { GLCAT.debug() << "immediate mode commands will be used instead of vertex arrays.\n"; } #endif if (!_supports_compressed_texture) { GLCAT.debug() << "Texture compression is not supported.\n"; } else { GLint num_compressed_formats = 0; glGetIntegerv(GL_NUM_COMPRESSED_TEXTURE_FORMATS, &num_compressed_formats); if (num_compressed_formats == 0) { GLCAT.debug() << "No specific compressed texture formats are supported.\n"; } else { #ifndef NDEBUG GLCAT.debug() << "Supported compressed texture formats:\n"; GLint *formats = (GLint *)alloca(num_compressed_formats * sizeof(GLint)); glGetIntegerv(GL_COMPRESSED_TEXTURE_FORMATS, formats); for (int i = 0; i < num_compressed_formats; ++i) { const char *format_str = get_compressed_format_string(formats[i]); if (format_str != nullptr) { GLCAT.debug(false) << " " << format_str << '\n'; } else { GLCAT.debug(false) << " Unknown compressed format 0x" << hex << formats[i] << dec << "\n"; } } #endif } } } _active_texture_stage = -1; _num_active_texture_stages = 0; // Check availability of anisotropic texture filtering. _supports_anisotropy = false; _max_anisotropy = 1.0; if (is_at_least_gl_version(4, 6) || has_extension("GL_EXT_texture_filter_anisotropic") || has_extension("GL_ARB_texture_filter_anisotropic")) { GLfloat max_anisotropy; glGetFloatv(GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, &max_anisotropy); _max_anisotropy = (PN_stdfloat)max_anisotropy; _supports_anisotropy = true; } // Check availability of image read/write functionality in shaders. _max_image_units = 0; #ifndef OPENGLES_1 #ifdef OPENGLES if (is_at_least_gl_version(3, 1)) { #else if (is_at_least_gl_version(4, 2) || has_extension("GL_ARB_shader_image_load_store")) { #endif _glBindImageTexture = (PFNGLBINDIMAGETEXTUREPROC) get_extension_func("glBindImageTexture"); _glMemoryBarrier = (PFNGLMEMORYBARRIERPROC) get_extension_func("glMemoryBarrier"); glGetIntegerv(GL_MAX_IMAGE_UNITS, &_max_image_units); #ifndef OPENGLES } else if (has_extension("GL_EXT_shader_image_load_store")) { _glBindImageTexture = (PFNGLBINDIMAGETEXTUREPROC) get_extension_func("glBindImageTextureEXT"); _glMemoryBarrier = (PFNGLMEMORYBARRIERPROC) get_extension_func("glMemoryBarrierEXT"); glGetIntegerv(GL_MAX_IMAGE_UNITS_EXT, &_max_image_units); #endif } else { _glBindImageTexture = nullptr; _glMemoryBarrier = nullptr; } #endif // !OPENGLES_1 _supports_sampler_objects = false; #ifndef OPENGLES_1 if (gl_support_sampler_objects && #ifdef OPENGLES is_at_least_gles_version(3, 0)) { #else (is_at_least_gl_version(3, 3) || has_extension("GL_ARB_sampler_objects"))) { #endif _glGenSamplers = (PFNGLGENSAMPLERSPROC) get_extension_func("glGenSamplers"); _glDeleteSamplers = (PFNGLDELETESAMPLERSPROC) get_extension_func("glDeleteSamplers"); _glBindSampler = (PFNGLBINDSAMPLERPROC) get_extension_func("glBindSampler"); _glSamplerParameteri = (PFNGLSAMPLERPARAMETERIPROC) get_extension_func("glSamplerParameteri"); _glSamplerParameteriv = (PFNGLSAMPLERPARAMETERIVPROC) get_extension_func("glSamplerParameteriv"); _glSamplerParameterf = (PFNGLSAMPLERPARAMETERFPROC) get_extension_func("glSamplerParameterf"); _glSamplerParameterfv = (PFNGLSAMPLERPARAMETERFVPROC) get_extension_func("glSamplerParameterfv"); if (_glGenSamplers == nullptr || _glDeleteSamplers == nullptr || _glBindSampler == nullptr || _glSamplerParameteri == nullptr || _glSamplerParameteriv == nullptr || _glSamplerParameterf == nullptr || _glSamplerParameterfv == nullptr) { GLCAT.warning() << "GL_ARB_sampler_objects advertised as supported by OpenGL runtime, but could not get pointers to extension function.\n"; } else { _supports_sampler_objects = true; } } #endif // !OPENGLES_1 // Check availability of multi-bind functions. _supports_multi_bind = false; #ifndef OPENGLES if (is_at_least_gl_version(4, 4) || has_extension("GL_ARB_multi_bind")) { _glBindTextures = (PFNGLBINDTEXTURESPROC) get_extension_func("glBindTextures"); _glBindImageTextures = (PFNGLBINDIMAGETEXTURESPROC) get_extension_func("glBindImageTextures"); if (_supports_sampler_objects) { _glBindSamplers = (PFNGLBINDSAMPLERSPROC) get_extension_func("glBindSamplers"); } if (_use_vertex_attrib_binding) { _glBindVertexBuffers = (PFNGLBINDVERTEXBUFFERSPROC) get_extension_func("glBindVertexBuffers"); } if (_glBindTextures != nullptr && _glBindImageTextures != nullptr) { _supports_multi_bind = true; } else { GLCAT.warning() << "ARB_multi_bind advertised as supported by OpenGL runtime, but could not get pointers to extension function.\n"; } } #endif // !OPENGLES #ifndef OPENGLES_1 #ifdef OPENGLES if (is_at_least_gl_version(3, 0)) { #else if (is_at_least_gl_version(4, 3) || has_extension("GL_ARB_internalformat_query2")) { #endif _glGetInternalformativ = (PFNGLGETINTERNALFORMATIVPROC) get_extension_func("glGetInternalformativ"); if (_glGetInternalformativ == nullptr) { GLCAT.warning() << "ARB_internalformat_query2 advertised as supported by OpenGL runtime, but could not get pointers to extension function.\n"; } } #endif // !OPENGLES_1 _supports_bindless_texture = false; #ifndef OPENGLES if (has_extension("GL_ARB_bindless_texture")) { _glGetTextureHandle = (PFNGLGETTEXTUREHANDLEPROC) get_extension_func("glGetTextureHandleARB"); _glGetTextureSamplerHandle = (PFNGLGETTEXTURESAMPLERHANDLEPROC) get_extension_func("glGetTextureSamplerHandleARB"); _glMakeTextureHandleResident = (PFNGLMAKETEXTUREHANDLERESIDENTPROC) get_extension_func("glMakeTextureHandleResidentARB"); _glUniformHandleui64 = (PFNGLUNIFORMHANDLEUI64PROC) get_extension_func("glUniformHandleui64ARB"); if (_glGetTextureHandle == nullptr || _glMakeTextureHandleResident == nullptr || _glUniformHandleui64 == nullptr) { GLCAT.warning() << "GL_ARB_bindless_texture advertised as supported by OpenGL runtime, but could not get pointers to extension function.\n"; } else { _supports_bindless_texture = true; } } #endif // !OPENGLES #ifndef OPENGLES_1 _supports_get_program_binary = false; _program_binary_formats.clear(); #ifdef OPENGLES if (is_at_least_gles_version(3, 0)) { #else if (is_at_least_gl_version(4, 1) || has_extension("GL_ARB_get_program_binary")) { #endif _glGetProgramBinary = (PFNGLGETPROGRAMBINARYPROC) get_extension_func("glGetProgramBinary"); _glProgramBinary = (PFNGLPROGRAMBINARYPROC) get_extension_func("glProgramBinary"); _glProgramParameteri = (PFNGLPROGRAMPARAMETERIPROC) get_extension_func("glProgramParameteri"); GLint num_binary_formats = 0; if (_glGetProgramBinary != nullptr && _glProgramBinary != nullptr && _glProgramParameteri != nullptr) { glGetIntegerv(GL_NUM_PROGRAM_BINARY_FORMATS, &num_binary_formats); } if (num_binary_formats > 0) { _supports_get_program_binary = true; GLenum *binary_formats = (GLenum *)alloca(sizeof(GLenum) * num_binary_formats); glGetIntegerv(GL_PROGRAM_BINARY_FORMATS, (GLint *)binary_formats); for (int i = 0; i < num_binary_formats; ++i) { _program_binary_formats.insert(binary_formats[i]); } } } #endif // !OPENGLES_1 report_my_gl_errors(); if (core_profile) { // TODO: better detection mechanism? _supports_stencil = support_stencil; } #ifdef SUPPORT_FIXED_FUNCTION else if (support_stencil) { GLint num_stencil_bits; glGetIntegerv(GL_STENCIL_BITS, &num_stencil_bits); _supports_stencil = (num_stencil_bits != 0); } #endif #ifdef OPENGLES_1 _supports_stencil_wrap = has_extension("GL_OES_stencil_wrap"); #elif defined(OPENGLES) _supports_stencil_wrap = true; #else _supports_stencil_wrap = is_at_least_gl_version(1, 4) || has_extension("GL_EXT_stencil_wrap"); #endif _supports_two_sided_stencil = false; #ifndef OPENGLES //TODO: support the two-sided stencil functions that ended up in core. if (has_extension("GL_EXT_stencil_two_side")) { _glActiveStencilFaceEXT = (PFNGLACTIVESTENCILFACEEXTPROC) get_extension_func("glActiveStencilFaceEXT"); _supports_two_sided_stencil = true; } else { _glActiveStencilFaceEXT = 0; } #endif // Ensure the initial state is what we say it should be (in some cases, we // don't want the GL default settings; in others, we have to force the point // with some drivers that aren't strictly compliant w.r.t. initial // settings). glFrontFace(GL_CCW); #ifndef OPENGLES_2 glDisable(GL_LINE_SMOOTH); #endif #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { glDisable(GL_POINT_SMOOTH); } #endif #ifndef OPENGLES glDisable(GL_POLYGON_SMOOTH); #endif // OPENGLES #ifndef OPENGLES_2 if (_supports_multisample) { glDisable(GL_MULTISAMPLE); } #endif // Set depth range from zero to one if requested. #ifndef OPENGLES _use_depth_zero_to_one = false; _use_remapped_depth_range = false; if (gl_depth_zero_to_one) { if (is_at_least_gl_version(4, 5) || has_extension("GL_ARB_clip_control")) { PFNGLCLIPCONTROLPROC pglClipControl = (PFNGLCLIPCONTROLPROC)get_extension_func("glClipControl"); if (pglClipControl != nullptr) { pglClipControl(GL_LOWER_LEFT, GL_ZERO_TO_ONE); _use_depth_zero_to_one = true; if (GLCAT.is_debug()) { GLCAT.debug() << "Set zero-to-one depth using glClipControl\n"; } } }/* else if (has_extension("GL_NV_depth_buffer_float")) { // Alternatively, all GeForce 8+ and even some AMD drivers support this // extension, which (unlike the core glDepthRange, which clamps its // input parameters) can compensate for the built-in depth remapping. _glDepthRangedNV = (PFNGLDEPTHRANGEDNVPROC)get_extension_func("glDepthRangedNV"); if (_glDepthRangedNV != nullptr) { _glDepthRangedNV(-1.0, 1.0); _use_depth_zero_to_one = true; _use_remapped_depth_range = true; if (GLCAT.is_debug()) { GLCAT.debug() << "Set zero-to-one depth using glDepthRangedNV\n"; } } }*/ if (!_use_depth_zero_to_one) { GLCAT.warning() << "Zero-to-one depth was requested, but driver does not support it.\n"; } } #endif // Set up all the enableddisabled flags to GL's known initial values: // everything off. _multisample_mode = 0; _line_smooth_enabled = false; _point_smooth_enabled = false; _polygon_smooth_enabled = false; _stencil_test_enabled = false; _blend_enabled = false; _depth_test_enabled = false; _fog_enabled = false; _alpha_test_enabled = false; _polygon_offset_enabled = false; _flat_shade_model = false; _decal_level = 0; _active_color_write_mask = ColorWriteAttrib::C_all; _tex_gen_point_sprite = false; #ifndef OPENGLES_1 _enabled_vertex_attrib_arrays.clear(); memset(_vertex_attrib_divisors, 0, sizeof(GLuint) * 32); #endif // Dither is on by default in GL; let's turn it off glDisable(GL_DITHER); _dithering_enabled = false; #ifndef OPENGLES_1 _current_shader = nullptr; _current_shader_context = nullptr; _vertex_array_shader = nullptr; _vertex_array_shader_context = nullptr; _texture_binding_shader = nullptr; _texture_binding_shader_context = nullptr; #endif // Count the max number of lights _max_lights = 0; #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { GLint max_lights = 0; glGetIntegerv(GL_MAX_LIGHTS, &max_lights); _max_lights = max_lights; if (GLCAT.is_debug()) { GLCAT.debug() << "max lights = " << _max_lights << "\n"; } } #endif // Count the max number of clipping planes _max_clip_planes = 0; #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { GLint max_clip_planes = 0; glGetIntegerv(GL_MAX_CLIP_PLANES, &max_clip_planes); _max_clip_planes = max_clip_planes; if (GLCAT.is_debug()) { GLCAT.debug() << "max clip planes = " << _max_clip_planes << "\n"; } } #endif _max_texture_stages = 1; #ifdef SUPPORT_FIXED_FUNCTION if (supports_multitexture && has_fixed_function_pipeline()) { GLint max_texture_stages = 0; glGetIntegerv(GL_MAX_TEXTURE_UNITS, &max_texture_stages); _max_texture_stages = max_texture_stages; if (GLCAT.is_debug()) { GLCAT.debug() << "max texture stages = " << _max_texture_stages << "\n"; } } #endif _current_vbuffer_index = 0; _current_ibuffer_index = 0; _current_vao_index = 0; _current_fbo = 0; _auto_antialias_mode = false; _render_mode = RenderModeAttrib::M_filled; _point_size = 1.0f; _point_perspective = false; #ifndef OPENGLES _current_vertex_buffers.clear(); _current_vertex_format.clear(); memset(_vertex_attrib_columns, 0, sizeof(const GeomVertexColumn *) * 32); _current_sbuffer_index = 0; _current_sbuffer_base.clear(); #endif report_my_gl_errors(); #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { if (gl_cheap_textures) { GLCAT.info() << "Setting glHint() for fastest textures.\n"; glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_FASTEST); } // Use per-vertex fog if per-pixel fog requires SW renderer glHint(GL_FOG_HINT, GL_DONT_CARE); } #endif #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { GLint num_red_bits = 0; glGetIntegerv(GL_RED_BITS, &num_red_bits); if (num_red_bits < 8) { glEnable(GL_DITHER); _dithering_enabled = true; if (GLCAT.is_debug()) { GLCAT.debug() << "frame buffer depth = " << num_red_bits << " bits/channel, enabling dithering\n"; } } } #endif _error_count = 0; report_my_gl_errors(); #ifndef OPENGLES_1 if (GLCAT.is_debug()) { if (_supports_get_program_binary) { GLCAT.debug() << "Supported program binary formats:\n"; GLCAT.debug() << " "; pset<GLenum>::const_iterator it; for (it = _program_binary_formats.begin(); it != _program_binary_formats.end(); ++it) { char number[16]; sprintf(number, "0x%04X", *it); GLCAT.debug(false) << " " << number << ""; } GLCAT.debug(false) << "\n"; } else { GLCAT.debug() << "No program binary formats supported.\n"; } } #endif // Do we guarantee that we can apply the color scale via a shader? We set // this false if there is a chance that the fixed-function pipeline is used. _runtime_color_scale = !has_fixed_function_pipeline(); #ifndef OPENGLES if (_gl_shadlang_ver_major >= 4 || has_extension("GL_NV_gpu_program5")) { // gp5fp - OpenGL fragment profile for GeForce 400 Series and up _shader_model = SM_50; } else if (_gl_shadlang_ver_major >= 3 || has_extension("GL_NV_gpu_program4")) { // gp4fp - OpenGL fragment profile for G8x (GeForce 8xxx and up) _shader_model = SM_40; } else if (has_extension("GL_NV_fragment_program2")) { // fp40 - OpenGL fragment profile for NV4x (GeForce 6xxx and 7xxx Series, // NV4x-based Quadro FX, etc.) _shader_model = SM_30; } else if (has_extension("GL_NV_fragment_program")) { // fp30 - OpenGL fragment profile for NV3x (GeForce FX, Quadro FX, etc.) _shader_model = SM_2X; } else if (_gl_shadlang_ver_major >= 1 || has_extension("GL_ARB_fragment_program")) { // This OpenGL profile corresponds to the per-fragment functionality // introduced by GeForce FX and other DirectX 9 GPUs. _shader_model = SM_20; } else if (has_extension("GL_NV_texture_shader2")) { // fp20 - OpenGL fragment profile for NV2x (GeForce3, GeForce4 Ti, Quadro // DCC, etc.) _shader_model = SM_11; } else { // No shader support _shader_model = SM_00; } // DisplayInformation may have better shader model detection { GraphicsPipe *pipe; DisplayInformation *display_information; pipe = this->get_pipe(); if (pipe) { display_information = pipe->get_display_information (); if (display_information) { if (display_information->get_shader_model() > _shader_model) { _shader_model = display_information->get_shader_model(); } } } } _auto_detect_shader_model = _shader_model; if (GLCAT.is_debug()) { #ifdef HAVE_CG #if CG_VERSION_NUM >= 2200 GLCAT.debug() << "Supported Cg profiles:\n"; int num_profiles = cgGetNumSupportedProfiles(); for (int i = 0; i < num_profiles; ++i) { CGprofile profile = cgGetSupportedProfile(i); if (cgGLIsProfileSupported(profile)) { GLCAT.debug() << " " << cgGetProfileString(profile) << "\n"; } } #endif // CG_VERSION_NUM >= 2200 #if CG_VERSION_NUM >= 3100 GLCAT.debug() << "Cg GLSL version = " << cgGLGetGLSLVersionString(cgGLDetectGLSLVersion()) << "\n"; #endif GLCAT.debug() << "Cg latest vertex profile = " << cgGetProfileString(cgGLGetLatestProfile(CG_GL_VERTEX)) << "\n"; GLCAT.debug() << "Cg latest fragment profile = " << cgGetProfileString(cgGLGetLatestProfile(CG_GL_FRAGMENT)) << "\n"; #if CG_VERSION_NUM >= 2000 GLCAT.debug() << "Cg latest geometry profile = " << cgGetProfileString(cgGLGetLatestProfile(CG_GL_GEOMETRY)) << "\n"; #endif GLCAT.debug() << "basic-shaders-only " << basic_shaders_only << "\n"; GLCAT.debug() << "Cg active vertex profile = " << cgGetProfileString((CGprofile)_shader_caps._active_vprofile) << "\n"; GLCAT.debug() << "Cg active fragment profile = " << cgGetProfileString((CGprofile)_shader_caps._active_fprofile) << "\n"; GLCAT.debug() << "Cg active geometry profile = " << cgGetProfileString((CGprofile)_shader_caps._active_gprofile) << "\n"; #endif // HAVE_CG GLCAT.debug() << "shader model = " << _shader_model << "\n"; } #endif // !OPENGLES // OpenGL core profile requires a VAO to be bound. It's a bit silly, // because we can just bind a VAO and then forget about it. #if !defined(OPENGLES) if (core_profile) { if (_supports_vao) { _glGenVertexArrays(1, &_current_vao_index); _glBindVertexArray(_current_vao_index); } else { GLCAT.error() << "Core profile enabled, but vertex array objects not supported!\n"; } } #endif pop_group_marker(); // Now that the GSG has been initialized, make it available for // optimizations. add_gsg(this); } /** * Force the graphics card to finish drawing before returning. !!!!!HACK * WARNING!!!! glfinish does not actually wait for the graphics card to finish * drawing only for draw calls to finish. Thus flip may not happene * immediately. Instead we read a single pixel from the framebuffer. This * forces the graphics card to finish drawing the frame before returning. */ void CLP(GraphicsStateGuardian):: finish() { // Rather than call glfinish which returns immediately if draw commands have // been submitted, we will read a single pixel from the frame. That will // force the graphics card to finish drawing before it is called char data[4]; glReadPixels(0,0,1,1,GL_RGBA,GL_UNSIGNED_BYTE,&data); // glFinish(); } /** * Clears the framebuffer within the current DisplayRegion, according to the * flags indicated by the given DrawableRegion object. * * This does not set the DisplayRegion first. You should call * prepare_display_region() to specify the region you wish the clear operation * to apply to. */ void CLP(GraphicsStateGuardian):: clear(DrawableRegion *clearable) { report_my_gl_errors(); if (!clearable->is_any_clear_active()) { return; } // XXX rdb: Is this line really necessary? Could we perhaps just reset the // color write mask and other relevant attributes? set_state_and_transform(RenderState::make_empty(), _internal_transform); int mask = 0; #ifndef OPENGLES_1 if (_current_fbo != 0 && _glClearBufferfv != nullptr) { // We can use glClearBuffer to clear all the color attachments, which // protects us from the overhead of having to call set_draw_buffer for // every single attachment. int index = 0; if (_current_properties->get_color_bits() > 0) { if (_current_properties->is_stereo()) { // Clear both left and right attachments. if (clearable->get_clear_active(GraphicsOutput::RTP_color)) { LColorf v = LCAST(float, clearable->get_clear_value(GraphicsOutput::RTP_color)); _glClearBufferfv(GL_COLOR, index, v.get_data()); _glClearBufferfv(GL_COLOR, index + 1, v.get_data()); } index += 2; } else { if (clearable->get_clear_active(GraphicsOutput::RTP_color)) { LColorf v = LCAST(float, clearable->get_clear_value(GraphicsOutput::RTP_color)); _glClearBufferfv(GL_COLOR, index, v.get_data()); } ++index; } } for (int i = 0; i < _current_properties->get_aux_rgba(); ++i) { int layerid = GraphicsOutput::RTP_aux_rgba_0 + i; if (clearable->get_clear_active(layerid)) { LColorf v = LCAST(float, clearable->get_clear_value(layerid)); _glClearBufferfv(GL_COLOR, index, v.get_data()); } ++index; } for (int i = 0; i < _current_properties->get_aux_hrgba(); ++i) { int layerid = GraphicsOutput::RTP_aux_hrgba_0 + i; if (clearable->get_clear_active(layerid)) { LColorf v = LCAST(float, clearable->get_clear_value(layerid)); _glClearBufferfv(GL_COLOR, index, v.get_data()); } ++index; } for (int i = 0; i < _current_properties->get_aux_float(); ++i) { int layerid = GraphicsOutput::RTP_aux_float_0 + i; if (clearable->get_clear_active(layerid)) { LColorf v = LCAST(float, clearable->get_clear_value(layerid)); _glClearBufferfv(GL_COLOR, index, v.get_data()); } ++index; } } else #endif { if (_current_properties->get_aux_mask() != 0) { for (int i = 0; i < _current_properties->get_aux_rgba(); ++i) { int layerid = GraphicsOutput::RTP_aux_rgba_0 + i; int layerbit = RenderBuffer::T_aux_rgba_0 << i; if (clearable->get_clear_active(layerid)) { LColor v = clearable->get_clear_value(layerid); glClearColor(v[0], v[1], v[2], v[3]); set_draw_buffer(layerbit); glClear(GL_COLOR_BUFFER_BIT); } } for (int i = 0; i < _current_properties->get_aux_hrgba(); ++i) { int layerid = GraphicsOutput::RTP_aux_hrgba_0 + i; int layerbit = RenderBuffer::T_aux_hrgba_0 << i; if (clearable->get_clear_active(layerid)) { LColor v = clearable->get_clear_value(layerid); glClearColor(v[0], v[1], v[2], v[3]); set_draw_buffer(layerbit); glClear(GL_COLOR_BUFFER_BIT); } } for (int i = 0; i < _current_properties->get_aux_float(); ++i) { int layerid = GraphicsOutput::RTP_aux_float_0 + i; int layerbit = RenderBuffer::T_aux_float_0 << i; if (clearable->get_clear_active(layerid)) { LColor v = clearable->get_clear_value(layerid); glClearColor(v[0], v[1], v[2], v[3]); set_draw_buffer(layerbit); glClear(GL_COLOR_BUFFER_BIT); } } // In the past, it was possible to set the draw buffer once in // prepare_display_region and then forget about it. Now, with aux // layers, it is necessary to occasionally change the draw buffer. In // time, I think there will need to be a draw buffer attrib. Until // then, this little hack to put things back the way they were after // prepare_display_region will do. set_draw_buffer(_draw_buffer_type); } if (_current_properties->get_color_bits() > 0) { if (clearable->get_clear_color_active()) { LColor v = clearable->get_clear_color(); glClearColor(v[0], v[1], v[2], v[3]); clear_color_write_mask(); _state_mask.clear_bit(ColorWriteAttrib::get_class_slot()); mask |= GL_COLOR_BUFFER_BIT; } } } if (clearable->get_clear_depth_active()) { #ifdef OPENGLES glClearDepthf(clearable->get_clear_depth()); #else glClearDepth(clearable->get_clear_depth()); #endif // OPENGLES #ifdef GSG_VERBOSE GLCAT.spam() << "glDepthMask(GL_TRUE)" << endl; #endif glDepthMask(GL_TRUE); _state_mask.clear_bit(DepthWriteAttrib::get_class_slot()); mask |= GL_DEPTH_BUFFER_BIT; } if (_supports_stencil && clearable->get_clear_stencil_active()) { glStencilMask(~0); glClearStencil(clearable->get_clear_stencil()); mask |= GL_STENCIL_BUFFER_BIT; } if (mask != 0) { glClear(mask); if (GLCAT.is_spam()) { string clear_flags; if (mask & GL_COLOR_BUFFER_BIT) { clear_flags += " | GL_COLOR_BUFFER_BIT"; } if (mask & GL_DEPTH_BUFFER_BIT) { clear_flags += " | GL_DEPTH_BUFFER_BIT"; } if (mask & GL_STENCIL_BUFFER_BIT) { clear_flags += " | GL_STENCIL_BUFFER_BIT"; } #ifndef OPENGLES if (mask & GL_ACCUM_BUFFER_BIT) { clear_flags += " | GL_ACCUM_BUFFER_BIT"; } #endif GLCAT.spam() << "glClear(" << (clear_flags.c_str() + 3) << ")\n"; } } report_my_gl_errors(); } /** * Prepare a display region for rendering (set up scissor region and viewport) */ void CLP(GraphicsStateGuardian):: prepare_display_region(DisplayRegionPipelineReader *dr) { nassertv(dr != nullptr); GraphicsStateGuardian::prepare_display_region(dr); int l, b, w, h; dr->get_region_pixels(l, b, w, h); _viewport_x = l; _viewport_y = b; _viewport_width = w; _viewport_height = h; GLint x = GLint(l); GLint y = GLint(b); GLsizei width = GLsizei(w); GLsizei height = GLsizei(h); _draw_buffer_type = dr->get_object()->get_draw_buffer_type() & _current_properties->get_buffer_mask() & _stereo_buffer_mask; _draw_buffer_type |= _current_properties->get_aux_mask(); set_draw_buffer(_draw_buffer_type); int count = dr->get_num_regions(); if (dr->get_scissor_enabled()) { if (GLCAT.is_spam()) { GLCAT.spam() << "glEnable(GL_SCISSOR_TEST)\n"; } glEnable(GL_SCISSOR_TEST); _scissor_enabled = true; _scissor_array.resize(count); } else { if (GLCAT.is_spam()) { GLCAT.spam() << "glDisable(GL_SCISSOR_TEST)\n"; } glDisable(GL_SCISSOR_TEST); _scissor_enabled = false; _scissor_array.clear(); } _scissor_attrib_active = false; #ifndef OPENGLES if (_supports_viewport_arrays) { GLfloat *viewports = (GLfloat *)alloca(sizeof(GLfloat) * 4 * count); // We store the scissor regions in a vector since we may need to switch // back to it in do_issue_scissor. for (int i = 0; i < count; ++i) { LVecBase4i sr; dr->get_region_pixels(i, sr[0], sr[1], sr[2], sr[3]); GLfloat *vr = viewports + i * 4; vr[0] = (GLfloat) sr[0]; vr[1] = (GLfloat) sr[1]; vr[2] = (GLfloat) sr[2]; vr[3] = (GLfloat) sr[3]; if (_scissor_enabled) { _scissor_array[i] = sr; } } _glViewportArrayv(0, count, viewports); if (_scissor_enabled) { _glScissorArrayv(0, count, _scissor_array[0].get_data()); } if (GLCAT.is_spam()) { GLCAT.spam() << "glViewportArrayv(0, " << count << ","; for (int i = 0; i < count; ++i) { GLfloat *vr = viewports + i * 4; GLCAT.spam(false) << " [" << vr[0] << " " << vr[1] << " " << vr[2] << " " << vr[3] << "]"; } GLCAT.spam(false) << ")\n"; if (_scissor_enabled) { GLCAT.spam() << "glScissorArrayv(0, " << count << ","; for (int i = 0; i < count; ++i) { const LVecBase4i &sr = _scissor_array[i]; GLCAT.spam(false) << " [" << sr << "]"; } GLCAT.spam(false) << ")\n"; } } } else #endif // OPENGLES { glViewport(x, y, width, height); if (_scissor_enabled) { glScissor(x, y, width, height); _scissor_array.resize(1); _scissor_array[0].set(x, y, width, height); } if (GLCAT.is_spam()) { GLCAT.spam() << "glViewport(" << x << ", " << y << ", " << width << ", " << height << ")\n"; if (dr->get_scissor_enabled()) { GLCAT.spam() << "glScissor(" << x << ", " << y << ", " << width << ", " << height << ")\n"; } } } report_my_gl_errors(); } /** * Resets any non-standard graphics state that might give a callback apoplexy. * Some drivers require that the graphics state be restored to neutral before * performing certain operations. In OpenGL, for instance, this closes any * open vertex buffers. */ void CLP(GraphicsStateGuardian):: clear_before_callback() { #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { disable_standard_vertex_arrays(); } #endif #ifndef OPENGLES_1 if (_vertex_array_shader_context != 0) { _vertex_array_shader_context->disable_shader_vertex_arrays(); _vertex_array_shader = nullptr; _vertex_array_shader_context = nullptr; } #endif unbind_buffers(); // Some callbacks may quite reasonably assume that the active texture stage // is still set to stage 0. CEGUI, in particular, makes this assumption. set_active_texture_stage(0); #ifdef SUPPORT_FIXED_FUNCTION _glClientActiveTexture(GL_TEXTURE0); #endif // It's also quite reasonable to presume there aren't any funny color write // mask settings active. clear_color_write_mask(); // Clear the bound sampler object, so that we do not inadvertently override // the callback's desired sampler settings. #ifndef OPENGLES_1 if (_supports_sampler_objects) { _glBindSampler(0, 0); if (GLCAT.is_spam()) { GLCAT.spam() << "glBindSampler(0, 0)\n"; } } #endif } /** * Given a lens, calculates the appropriate projection matrix for use with * this gsg. Note that the projection matrix depends a lot upon the * coordinate system of the rendering API. * * The return value is a TransformState if the lens is acceptable, NULL if it * is not. */ CPT(TransformState) CLP(GraphicsStateGuardian):: calc_projection_mat(const Lens *lens) { if (lens == nullptr) { return nullptr; } if (!lens->is_linear()) { return nullptr; } // The projection matrix must always be right-handed Y-up, even if our // coordinate system of choice is otherwise, because certain GL calls // (specifically glTexGen(GL_SPHERE_MAP)) assume this kind of a coordinate // system. Sigh. In order to implement a Z-up (or other arbitrary) // coordinate system, we'll use a Y-up projection matrix, and store the // conversion to our coordinate system of choice in the modelview matrix. LMatrix4 result = LMatrix4::convert_mat(_internal_coordinate_system, lens->get_coordinate_system()) * lens->get_projection_mat(_current_stereo_channel); #ifndef OPENGLES if (_use_depth_zero_to_one) { // If we requested that the OpenGL NDC Z goes from zero to one like in // Direct3D, we need to scale the projection matrix, which assumes -1..1. static const LMatrix4 rescale_mat (1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0.5, 0, 0, 0, 0.5, 1); result *= rescale_mat; } #endif if (_scene_setup->get_inverted()) { // If the scene is supposed to be inverted, then invert the projection // matrix. result *= LMatrix4::scale_mat(1.0f, -1.0f, 1.0f); } return TransformState::make_mat(result); } /** * Makes the current lens (whichever lens was most recently specified with * set_scene()) active, so that it will transform future rendered geometry. * Normally this is only called from the draw process, and usually it is * called by set_scene(). * * The return value is true if the lens is acceptable, false if it is not. */ bool CLP(GraphicsStateGuardian):: prepare_lens() { #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { if (GLCAT.is_spam()) { GLCAT.spam() << "glMatrixMode(GL_PROJECTION): " << _projection_mat->get_mat() << endl; } glMatrixMode(GL_PROJECTION); call_glLoadMatrix(_projection_mat->get_mat()); report_my_gl_errors(); do_point_size(); } #endif #ifndef OPENGLES_1 if (_current_shader_context) { _current_shader_context->issue_parameters(Shader::SSD_transform); } #endif return true; } /** * Called before each frame is rendered, to allow the GSG a chance to do any * internal cleanup before beginning the frame. * * The return value is true if successful (in which case the frame will be * drawn and end_frame() will be called later), or false if unsuccessful (in * which case nothing will be drawn and end_frame() will not be called). */ bool CLP(GraphicsStateGuardian):: begin_frame(Thread *current_thread) { if (!GraphicsStateGuardian::begin_frame(current_thread)) { return false; } _renderbuffer_residency.begin_frame(current_thread); report_my_gl_errors(); #ifdef DO_PSTATS _vertices_display_list_pcollector.clear_level(); _vertices_immediate_pcollector.clear_level(); _primitive_batches_display_list_pcollector.clear_level(); #endif #ifndef NDEBUG _show_texture_usage = false; if (gl_show_texture_usage) { // When this is true, then every other second, we show the usage textures // instead of the real textures. double now = ClockObject::get_global_clock()->get_frame_time(); int this_second = (int)floor(now); if (this_second & 1) { _show_texture_usage = true; _show_texture_usage_index = this_second >> 1; int max_size = gl_show_texture_usage_max_size; if (max_size != _show_texture_usage_max_size) { // Remove the cache of usage textures; we've changed the max size. UsageTextures::iterator ui; for (ui = _usage_textures.begin(); ui != _usage_textures.end(); ++ui) { GLuint index = (*ui).second; glDeleteTextures(1, &index); } _usage_textures.clear(); _show_texture_usage_max_size = max_size; } } } #endif // NDEBUG #ifdef DO_PSTATS /*if (_supports_timer_query) { // Measure the difference between the OpenGL clock and the PStats clock. GLint64 time_ns; _glGetInteger64v(GL_TIMESTAMP, &time_ns); _timer_delta = time_ns * -0.000000001; _timer_delta += PStatClient::get_global_pstats()->get_real_time(); }*/ #endif #ifndef OPENGLES if (_current_properties->get_srgb_color()) { glEnable(GL_FRAMEBUFFER_SRGB); } #endif report_my_gl_errors(); return true; } /** * Called between begin_frame() and end_frame() to mark the beginning of * drawing commands for a "scene" (usually a particular DisplayRegion) within * a frame. All 3-D drawing commands, except the clear operation, must be * enclosed within begin_scene() .. end_scene(). * * The return value is true if successful (in which case the scene will be * drawn and end_scene() will be called later), or false if unsuccessful (in * which case nothing will be drawn and end_scene() will not be called). */ bool CLP(GraphicsStateGuardian):: begin_scene() { return GraphicsStateGuardian::begin_scene(); } /** * Called between begin_frame() and end_frame() to mark the end of drawing * commands for a "scene" (usually a particular DisplayRegion) within a frame. * All 3-D drawing commands, except the clear operation, must be enclosed * within begin_scene() .. end_scene(). */ void CLP(GraphicsStateGuardian):: end_scene() { GraphicsStateGuardian::end_scene(); _dlights.clear(); report_my_gl_errors(); } /** * Called after each frame is rendered, to allow the GSG a chance to do any * internal cleanup after rendering the frame, and before the window flips. */ void CLP(GraphicsStateGuardian):: end_frame(Thread *current_thread) { report_my_gl_errors(); #ifndef OPENGLES if (_current_properties->get_srgb_color()) { glDisable(GL_FRAMEBUFFER_SRGB); } #endif #ifdef DO_PSTATS // Check for textures, etc., that are no longer resident. These calls might // be measurably expensive, and they don't have any benefit unless we are // actually viewing PStats, so don't do them unless we're connected. That // will just mean that we'll count everything as resident until the user // connects PStats, at which point it will then correct the assessment. No // harm done. if (has_fixed_function_pipeline() && PStatClient::is_connected()) { PStatTimer timer(_check_residency_pcollector); check_nonresident_texture(_prepared_objects->_texture_residency.get_inactive_resident()); check_nonresident_texture(_prepared_objects->_texture_residency.get_active_resident()); // OpenGL provides no methods for querying whether a buffer object (vertex // buffer) is resident. In fact, the API appears geared towards the // assumption that such buffers are always resident. OK. } #endif #ifndef OPENGLES_1 // This breaks shaders across multiple regions. if (_vertex_array_shader_context != 0) { _vertex_array_shader_context->disable_shader_vertex_arrays(); _vertex_array_shader = nullptr; _vertex_array_shader_context = nullptr; } if (_texture_binding_shader_context != 0) { _texture_binding_shader_context->disable_shader_texture_bindings(); _texture_binding_shader = nullptr; _texture_binding_shader_context = nullptr; } if (_current_shader_context != 0) { _current_shader_context->unbind(); _current_shader = nullptr; _current_shader_context = nullptr; } #endif // Respecify the active texture next frame, for good measure. _active_texture_stage = -1; // Calling glFlush() at the end of the frame is particularly necessary if // this is a single-buffered visual, so that the frame will be finished // drawing before we return to the application. It's not clear what effect // this has on our total frame time. if (_force_flush || // _current_properties->is_single_buffered()) { gl_flush(); } maybe_gl_finish(); GraphicsStateGuardian::end_frame(current_thread); _renderbuffer_residency.end_frame(current_thread); // Flush any PCollectors specific to this kind of GSG. _primitive_batches_display_list_pcollector.flush_level(); _vertices_display_list_pcollector.flush_level(); _vertices_immediate_pcollector.flush_level(); // Now is a good time to delete any pending display lists. #ifndef OPENGLES #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline() && display_lists) { LightMutexHolder holder(_lock); if (!_deleted_display_lists.empty()) { DeletedNames::iterator ddli; for (ddli = _deleted_display_lists.begin(); ddli != _deleted_display_lists.end(); ++ddli) { if (GLCAT.is_debug()) { GLCAT.debug() << "releasing display list index " << (int)(*ddli) << "\n"; } glDeleteLists((*ddli), 1); } _deleted_display_lists.clear(); } } #endif // And deleted queries, too, unless we're using query timers in which case // we'll need to reuse lots of them. if (_supports_occlusion_query && !get_timer_queries_active()) { LightMutexHolder holder(_lock); if (!_deleted_queries.empty()) { if (GLCAT.is_spam()) { DeletedNames::iterator dqi; for (dqi = _deleted_queries.begin(); dqi != _deleted_queries.end(); ++dqi) { GLCAT.spam() << "releasing query index " << (int)(*dqi) << "\n"; } } _glDeleteQueries(_deleted_queries.size(), &_deleted_queries[0]); _deleted_queries.clear(); } } #endif // OPENGLES #ifndef NDEBUG if (_check_errors || (_supports_debug && gl_debug)) { report_my_gl_errors(); } else { // If _check_errors is false, we still want to check for errors once every // second, so that we know if anything went wrong at all. double current = ClockObject::get_global_clock()->get_frame_time(); if (current - _last_error_check >= 1.0) { _last_error_check = current; PStatTimer timer(_check_error_pcollector); GLenum error_code = glGetError(); if (error_code != GL_NO_ERROR) { int error_count = 0; do { ++error_count; GLCAT.error() << "GL error 0x" << hex << error_code << dec << " : " << get_error_string(error_code) << "\n"; error_code = glGetError(); } while (error_code != GL_NO_ERROR); if (error_count == 1) { GLCAT.error() << "An OpenGL error has occurred."; } else { GLCAT.error() << error_count << " OpenGL errors have occurred."; } if (_supports_debug) { GLCAT.error(false) << " Set gl-debug #t " << "in your PRC file to display more information.\n"; } else { GLCAT.error(false) << " Set gl-check-errors #t " << "in your PRC file to display more information.\n"; } _error_count += error_count; if (_error_count >= gl_max_errors) { panic_deactivate(); } } } } #endif // Add in a newline to the spam output for improved legibility. if (GLCAT.is_spam()) { GLCAT.spam(false) << endl; } } /** * Called before a sequence of draw_primitive() functions are called, this * should prepare the vertex data for rendering. It returns true if the * vertices are ok, false to abort this group of primitives. */ bool CLP(GraphicsStateGuardian):: begin_draw_primitives(const GeomPipelineReader *geom_reader, const GeomVertexDataPipelineReader *data_reader, bool force) { #ifndef NDEBUG if (GLCAT.is_spam()) { GLCAT.spam() << "begin_draw_primitives: " << *(data_reader->get_object()) << "\n"; } #endif // NDEBUG #ifndef OPENGLES_1 if (!has_fixed_function_pipeline()) { // We can't draw without a shader bound in OpenGL ES 2. This shouldn't // happen anyway unless the default shader failed to compile somehow. if (_current_shader_context == nullptr) { return false; } } #endif if (!GraphicsStateGuardian::begin_draw_primitives(geom_reader, data_reader, force)) { return false; } nassertr(_data_reader != nullptr, false); _geom_display_list = 0; if (_auto_antialias_mode) { switch (geom_reader->get_primitive_type()) { case GeomPrimitive::PT_polygons: case GeomPrimitive::PT_patches: setup_antialias_polygon(); break; case GeomPrimitive::PT_points: setup_antialias_point(); break; case GeomPrimitive::PT_lines: setup_antialias_line(); break; case GeomPrimitive::PT_none: break; } int transparency_slot = TransparencyAttrib::get_class_slot(); int color_write_slot = ColorWriteAttrib::get_class_slot(); int color_blend_slot = ColorBlendAttrib::get_class_slot(); if (!_state_mask.get_bit(transparency_slot) || !_state_mask.get_bit(color_write_slot) || !_state_mask.get_bit(color_blend_slot)) { do_issue_blending(); _state_mask.set_bit(transparency_slot); _state_mask.set_bit(color_write_slot); _state_mask.set_bit(color_blend_slot); } } #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline() && _data_reader->is_vertex_transformed()) { // If the vertex data claims to be already transformed into clip // coordinates, wipe out the current projection and modelview matrix (so // we don't attempt to transform it again). glMatrixMode(GL_PROJECTION); glPushMatrix(); glLoadIdentity(); glMatrixMode(GL_MODELVIEW); glPushMatrix(); glLoadIdentity(); } #endif #if !defined(OPENGLES) && defined(SUPPORT_FIXED_FUNCTION) // Display lists not supported by OpenGL ES. if (has_fixed_function_pipeline() && /*geom_reader->get_usage_hint() == Geom::UH_static &&*/ _data_reader->get_usage_hint() == Geom::UH_static && display_lists) { // If the geom claims to be totally static, try to build it into a display // list. // Before we compile or call a display list, make sure the current buffers // are unbound, or the nVidia drivers may crash. unbind_buffers(); GeomContext *gc = geom_reader->prepare_now(get_prepared_objects(), this); nassertr(gc != nullptr, false); CLP(GeomContext) *ggc = DCAST(CLP(GeomContext), gc); //const CLP(GeomMunger) *gmunger = DCAST(CLP(GeomMunger), _munger); UpdateSeq modified = max(geom_reader->get_modified(), _data_reader->get_modified()); if (ggc->get_display_list(_geom_display_list, nullptr, modified)) { // If it hasn't been modified, just play the display list again. if (GLCAT.is_spam()) { GLCAT.spam() << "calling display list " << (int)_geom_display_list << "\n"; } glCallList(_geom_display_list); #ifdef DO_PSTATS _vertices_display_list_pcollector.add_level(ggc->_num_verts); _primitive_batches_display_list_pcollector.add_level(1); #endif // And now we don't need to do anything else for this geom. _geom_display_list = 0; end_draw_primitives(); return false; } // Since we start this collector explicitly, we have to be sure to stop it // again. _load_display_list_pcollector.start(); if (GLCAT.is_debug()) { GLCAT.debug() << "compiling display list " << (int)_geom_display_list << "\n"; } // If it has been modified, or this is the first time, then we need to // build the display list up. if (gl_compile_and_execute) { glNewList(_geom_display_list, GL_COMPILE_AND_EXECUTE); } else { glNewList(_geom_display_list, GL_COMPILE); } #ifdef DO_PSTATS // Count up the number of vertices used by primitives in the Geom, for // PStats reporting. ggc->_num_verts = 0; for (int i = 0; i < geom_reader->get_num_primitives(); i++) { ggc->_num_verts += geom_reader->get_primitive(i)->get_num_vertices(); } #endif } #endif // OPENGLES // Enable the appropriate vertex arrays, and disable any extra vertex arrays // used by the previous rendering mode. #ifdef SUPPORT_IMMEDIATE_MODE _use_sender = !vertex_arrays; #endif #ifndef OPENGLES_1 if (_use_vertex_attrib_binding) { const GeomVertexFormat *format = data_reader->get_format(); if (format != _current_vertex_format) { update_shader_vertex_format(format); } } #endif { // PStatGPUTimer timer(this, _vertex_array_update_pcollector); #ifdef OPENGLES_1 if (!update_standard_vertex_arrays(force)) { return false; } #else if (_current_shader_context == 0) { // No shader. if (_vertex_array_shader_context != 0) { _vertex_array_shader_context->disable_shader_vertex_arrays(); } #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline() && !update_standard_vertex_arrays(force)) { return false; } #endif } else { #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { // Shader. if (_vertex_array_shader_context == 0 || _vertex_array_shader_context->uses_standard_vertex_arrays()) { // Previous shader used standard arrays. if (_current_shader_context->uses_standard_vertex_arrays()) { // So does the current, so update them. if (!update_standard_vertex_arrays(force)) { return false; } } else { // The current shader does not, so disable them entirely. disable_standard_vertex_arrays(); } } #ifdef HAVE_CG else if (_vertex_array_shader_context->is_of_type(CLP(CgShaderContext)::get_class_type())) { // The previous shader was a Cg shader, which can leave a messy // situation. _vertex_array_shader_context->disable_shader_vertex_arrays(); } #endif } #endif // SUPPORT_FIXED_FUNCTION // Now update the vertex arrays for the current shader. if (!_current_shader_context-> update_shader_vertex_arrays(_vertex_array_shader_context, force)) { return false; } } _vertex_array_shader = _current_shader; _vertex_array_shader_context = _current_shader_context; #endif // OPENGLES_1 } report_my_gl_errors(); return true; } #ifdef SUPPORT_FIXED_FUNCTION /** * Disables any unneeded vertex arrays that were previously enabled, and * enables any vertex arrays that are needed that were not previously enabled * (or, sets up an immediate-mode sender). Called only from * begin_draw_primitives. Used only when the standard (non-shader) pipeline * is about to be used - glShaderContexts are responsible for setting up their * own vertex arrays. */ bool CLP(GraphicsStateGuardian):: update_standard_vertex_arrays(bool force) { #ifdef SUPPORT_IMMEDIATE_MODE if (_use_sender) { // We must use immediate mode to render primitives. _sender.clear(); _sender.add_column(_data_reader, InternalName::get_normal(), nullptr, nullptr, GLPf(Normal3), nullptr); #ifndef NDEBUG if (_show_texture_usage) { // In show_texture_usage mode, all colors are white, so as not to // contaminate the texture color. GLPf(Color4)(1.0f, 1.0f, 1.0f, 1.0f); } else #endif // NDEBUG if (!_sender.add_column(_data_reader, InternalName::get_color(), nullptr, nullptr, GLPf(Color3), GLPf(Color4))) { // If we didn't have a color column, the item color is white. GLPf(Color4)(1.0f, 1.0f, 1.0f, 1.0f); } // Now set up each of the active texture coordinate stages--or at least // those for which we're not generating texture coordinates automatically. int max_stage_index = _target_texture->get_num_on_ff_stages(); int stage_index = 0; while (stage_index < max_stage_index) { TextureStage *stage = _target_texture->get_on_ff_stage(stage_index); if (!_target_tex_gen->has_gen_texcoord_stage(stage)) { // This stage is not one of the stages that doesn't need texcoords // issued for it. const InternalName *name = stage->get_texcoord_name(); if (stage_index == 0) { // Use the original functions for stage 0, in case we don't support // multitexture. _sender.add_column(_data_reader, name, GLPf(TexCoord1), GLPf(TexCoord2), GLPf(TexCoord3), GLPf(TexCoord4)); } else { // Other stages require the multitexture functions. _sender.add_texcoord_column(_data_reader, name, stage_index, GLf(_glMultiTexCoord1), GLf(_glMultiTexCoord2), GLf(_glMultiTexCoord3), GLf(_glMultiTexCoord4)); } } ++stage_index; } // Be sure also to disable any texture stages we had enabled before. while (stage_index < _last_max_stage_index) { _glClientActiveTexture(GL_TEXTURE0 + stage_index); glDisableClientState(GL_TEXTURE_COORD_ARRAY); ++stage_index; } _last_max_stage_index = max_stage_index; // We must add vertex last, because glVertex3f() is the key function call // that actually issues the vertex. _sender.add_column(_data_reader, InternalName::get_vertex(), nullptr, GLPf(Vertex2), GLPf(Vertex3), GLPf(Vertex4)); } else #endif // SUPPORT_IMMEDIATE_MODE { // We may use vertex arrays or buffers to render primitives. const GeomVertexArrayDataHandle *array_reader; const unsigned char *client_pointer; int num_values; Geom::NumericType numeric_type; int start; int stride; if (_data_reader->get_normal_info(array_reader, numeric_type, start, stride)) { if (!setup_array_data(client_pointer, array_reader, force)) { return false; } glNormalPointer(get_numeric_type(numeric_type), stride, client_pointer + start); glEnableClientState(GL_NORMAL_ARRAY); } else { glDisableClientState(GL_NORMAL_ARRAY); } #ifndef NDEBUG if (_show_texture_usage) { // In show_texture_usage mode, all colors are white, so as not to // contaminate the texture color. glDisableClientState(GL_COLOR_ARRAY); GLPf(Color4)(1.0f, 1.0f, 1.0f, 1.0f); } else #endif // NDEBUG if (_vertex_colors_enabled && _data_reader->get_color_info(array_reader, num_values, numeric_type, start, stride)) { if (!setup_array_data(client_pointer, array_reader, force)) { return false; } if (numeric_type == Geom::NT_packed_dabc) { glColorPointer(GL_BGRA, GL_UNSIGNED_BYTE, stride, client_pointer + start); } else { glColorPointer(num_values, get_numeric_type(numeric_type), stride, client_pointer + start); } glEnableClientState(GL_COLOR_ARRAY); } else { glDisableClientState(GL_COLOR_ARRAY); // Since we don't have per-vertex color, the implicit color is white. if (_color_scale_via_lighting) { GLPf(Color4)(1.0f, 1.0f, 1.0f, 1.0f); } else { LColor color = _scene_graph_color; color.componentwise_mult(_current_color_scale); GLPf(Color4)(color[0], color[1], color[2], color[3]); } } // Now set up each of the active texture coordinate stages--or at least // those for which we're not generating texture coordinates automatically. int max_stage_index = _target_texture->get_num_on_ff_stages(); int stage_index = 0; while (stage_index < max_stage_index) { _glClientActiveTexture(GL_TEXTURE0 + stage_index); TextureStage *stage = _target_texture->get_on_ff_stage(stage_index); if (!_target_tex_gen->has_gen_texcoord_stage(stage)) { // This stage is not one of the stages that doesn't need texcoords // issued for it. const InternalName *name = stage->get_texcoord_name(); if (_data_reader->get_array_info(name, array_reader, num_values, numeric_type, start, stride)) { // The vertex data does have texcoords for this stage. if (!setup_array_data(client_pointer, array_reader, force)) { return false; } glTexCoordPointer(num_values, get_numeric_type(numeric_type), stride, client_pointer + start); glEnableClientState(GL_TEXTURE_COORD_ARRAY); } else { // The vertex data doesn't have texcoords for this stage (even // though they're needed). glDisableClientState(GL_TEXTURE_COORD_ARRAY); } } else { // No texcoords are needed for this stage. glDisableClientState(GL_TEXTURE_COORD_ARRAY); } ++stage_index; } // Be sure also to disable any texture stages we had enabled before. while (stage_index < _last_max_stage_index) { _glClientActiveTexture(GL_TEXTURE0 + stage_index); glDisableClientState(GL_TEXTURE_COORD_ARRAY); ++stage_index; } _last_max_stage_index = max_stage_index; // There's no requirement that we add vertices last, but we do anyway. if (_data_reader->get_vertex_info(array_reader, num_values, numeric_type, start, stride)) { if (!setup_array_data(client_pointer, array_reader, force)) { return false; } glVertexPointer(num_values, get_numeric_type(numeric_type), stride, client_pointer + start); glEnableClientState(GL_VERTEX_ARRAY); } } return true; } #endif // SUPPORT_FIXED_FUNCTION /** * Ensures the vertex and array buffers are no longer bound. Some graphics * drivers crash if these are left bound indiscriminantly. */ void CLP(GraphicsStateGuardian):: unbind_buffers() { if (_current_vbuffer_index != 0) { if (GLCAT.is_spam() && gl_debug_buffers) { GLCAT.spam() << "unbinding vertex buffer\n"; } _glBindBuffer(GL_ARRAY_BUFFER, 0); _current_vbuffer_index = 0; } if (_current_ibuffer_index != 0) { if (GLCAT.is_spam() && gl_debug_buffers) { GLCAT.spam() << "unbinding index buffer\n"; } _glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); _current_ibuffer_index = 0; } #ifndef OPENGLES if (_current_vertex_buffers.size() > 1 && _supports_multi_bind) { _glBindVertexBuffers(0, _current_vertex_buffers.size(), nullptr, nullptr, nullptr); } else { for (size_t i = 0; i < _current_vertex_buffers.size(); ++i) { if (_current_vertex_buffers[i] != 0) { _glBindVertexBuffer((GLuint)i, 0, 0, 0); } } } _current_vertex_buffers.clear(); #endif #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { disable_standard_vertex_arrays(); } #endif } #ifdef SUPPORT_FIXED_FUNCTION /** * Used to disable all the standard vertex arrays that are currently enabled. * glShaderContexts are responsible for setting up their own vertex arrays, * but before they can do so, the standard vertex arrays need to be disabled * to get them "out of the way." Called only from begin_draw_primitives. */ void CLP(GraphicsStateGuardian):: disable_standard_vertex_arrays() { #ifdef SUPPORT_IMMEDIATE_MODE if (_use_sender) return; #endif glDisableClientState(GL_NORMAL_ARRAY); glDisableClientState(GL_COLOR_ARRAY); GLPf(Color4)(1.0f, 1.0f, 1.0f, 1.0f); for (int stage_index=0; stage_index < _last_max_stage_index; stage_index++) { _glClientActiveTexture(GL_TEXTURE0 + stage_index); glDisableClientState(GL_TEXTURE_COORD_ARRAY); } _last_max_stage_index = 0; glDisableClientState(GL_VERTEX_ARRAY); report_my_gl_errors(); } #endif // SUPPORT_FIXED_FUNCTION #ifndef OPENGLES_1 /** * Updates the vertex format used by the shader. This is still an * experimental feature. */ void CLP(GraphicsStateGuardian):: update_shader_vertex_format(const GeomVertexFormat *format) { size_t num_columns = format->get_num_columns(); for (size_t ci = 0; ci < num_columns; ++ci) { GLuint binding = format->get_array_with(ci); const GeomVertexColumn *column = format->get_column(ci); // Needs improvement, obviously. const InternalName *name = column->get_name(); GLuint loc; if (name == InternalName::get_vertex()) { loc = 0; } else if (name == InternalName::get_transform_weight()) { loc = 1; } else if (name == InternalName::get_normal()) { loc = 2; } else if (name == InternalName::get_color()) { loc = 3; } else if (name == InternalName::get_transform_index()) { loc = 7; } else if (name == InternalName::get_texcoord()) { loc = 8; } else { // Not yet supported, ignore for now. This system will be improved. continue; } if (_vertex_attrib_columns[loc] != nullptr && _vertex_attrib_columns[loc]->compare_to(*column) == 0) { continue; } _vertex_attrib_columns[loc] = column; GLuint offset = column->get_start(); GLenum type = get_numeric_type(column->get_numeric_type()); GLboolean normalized = (column->get_contents() == GeomEnums::C_color); GLint size = column->get_num_values(); if (column->get_numeric_type() == GeomEnums::NT_packed_dabc) { // GL_BGRA is a special accepted value available since OpenGL 3.2. It // requires us to pass GL_TRUE for normalized. size = GL_BGRA; normalized = GL_TRUE; } for (int i = 0; i < column->get_num_elements(); ++i) { if (loc == 7) { // Temp hack _glVertexAttribIFormat(loc, size, type, offset); } else { _glVertexAttribFormat(loc, size, type, normalized, offset); } _glVertexAttribBinding(loc, binding); offset += column->get_element_stride(); ++loc; } } size_t num_arrays = format->get_num_arrays(); for (size_t ai = 0; ai < num_arrays; ++ai) { _glVertexBindingDivisor(ai, format->get_array(ai)->get_divisor()); } _current_vertex_format = format; } #endif /** * Draws a series of disconnected triangles. */ bool CLP(GraphicsStateGuardian):: draw_triangles(const GeomPrimitivePipelineReader *reader, bool force) { // PStatGPUTimer timer(this, _draw_primitive_pcollector, // reader->get_current_thread()); #ifndef NDEBUG if (GLCAT.is_spam()) { GLCAT.spam() << "draw_triangles: " << *(reader->get_object()) << "\n"; } #endif // NDEBUG #ifdef SUPPORT_IMMEDIATE_MODE if (_use_sender) { draw_immediate_simple_primitives(reader, GL_TRIANGLES); } else #endif // SUPPORT_IMMEDIATE_MODE { int num_vertices = reader->get_num_vertices(); _vertices_tri_pcollector.add_level(num_vertices); _primitive_batches_tri_pcollector.add_level(1); if (reader->is_indexed()) { const unsigned char *client_pointer; if (!setup_primitive(client_pointer, reader, force)) { return false; } #ifndef OPENGLES_1 if (_supports_geometry_instancing && _instance_count > 0) { _glDrawElementsInstanced(GL_TRIANGLES, num_vertices, get_numeric_type(reader->get_index_type()), client_pointer, _instance_count); } else #endif { _glDrawRangeElements(GL_TRIANGLES, reader->get_min_vertex(), reader->get_max_vertex(), num_vertices, get_numeric_type(reader->get_index_type()), client_pointer); } } else { #ifndef OPENGLES_1 if (_supports_geometry_instancing && _instance_count > 0) { _glDrawArraysInstanced(GL_TRIANGLES, reader->get_first_vertex(), num_vertices, _instance_count); } else #endif { glDrawArrays(GL_TRIANGLES, reader->get_first_vertex(), num_vertices); } } } report_my_gl_errors(); return true; } /** * Draws a series of disconnected triangles with adjacency information. */ #ifndef OPENGLES bool CLP(GraphicsStateGuardian):: draw_triangles_adj(const GeomPrimitivePipelineReader *reader, bool force) { // PStatGPUTimer timer(this, _draw_primitive_pcollector, // reader->get_current_thread()); #ifndef NDEBUG if (GLCAT.is_spam()) { GLCAT.spam() << "draw_triangles_adj: " << *(reader->get_object()) << "\n"; } #endif // NDEBUG #ifdef SUPPORT_IMMEDIATE_MODE if (_use_sender) { draw_immediate_simple_primitives(reader, GL_TRIANGLES_ADJACENCY); } else #endif // SUPPORT_IMMEDIATE_MODE { int num_vertices = reader->get_num_vertices(); _vertices_tri_pcollector.add_level(num_vertices); _primitive_batches_tri_pcollector.add_level(1); if (reader->is_indexed()) { const unsigned char *client_pointer; if (!setup_primitive(client_pointer, reader, force)) { return false; } if (_supports_geometry_instancing && _instance_count > 0) { _glDrawElementsInstanced(GL_TRIANGLES_ADJACENCY, num_vertices, get_numeric_type(reader->get_index_type()), client_pointer, _instance_count); } else { _glDrawRangeElements(GL_TRIANGLES_ADJACENCY, reader->get_min_vertex(), reader->get_max_vertex(), num_vertices, get_numeric_type(reader->get_index_type()), client_pointer); } } else { if (_supports_geometry_instancing && _instance_count > 0) { _glDrawArraysInstanced(GL_TRIANGLES_ADJACENCY, reader->get_first_vertex(), num_vertices, _instance_count); } else { glDrawArrays(GL_TRIANGLES_ADJACENCY, reader->get_first_vertex(), num_vertices); } } } report_my_gl_errors(); return true; } #endif // OPENGLES /** * Draws a series of triangle strips. */ bool CLP(GraphicsStateGuardian):: draw_tristrips(const GeomPrimitivePipelineReader *reader, bool force) { // PStatGPUTimer timer(this, _draw_primitive_pcollector, // reader->get_current_thread()); report_my_gl_errors(); #ifndef NDEBUG if (GLCAT.is_spam()) { GLCAT.spam() << "draw_tristrips: " << *(reader->get_object()) << "\n"; } #endif // NDEBUG #ifdef SUPPORT_IMMEDIATE_MODE if (_use_sender) { draw_immediate_composite_primitives(reader, GL_TRIANGLE_STRIP); } else #endif // SUPPORT_IMMEDIATE_MODE { if (connect_triangle_strips && _render_mode != RenderModeAttrib::M_wireframe) { // One long triangle strip, connected by the degenerate vertices that // have already been set up within the primitive. int num_vertices = reader->get_num_vertices(); _vertices_tristrip_pcollector.add_level(num_vertices); _primitive_batches_tristrip_pcollector.add_level(1); if (reader->is_indexed()) { const unsigned char *client_pointer; if (!setup_primitive(client_pointer, reader, force)) { return false; } #ifndef OPENGLES_1 if (_supports_geometry_instancing && _instance_count > 0) { _glDrawElementsInstanced(GL_TRIANGLE_STRIP, num_vertices, get_numeric_type(reader->get_index_type()), client_pointer, _instance_count); } else #endif { _glDrawRangeElements(GL_TRIANGLE_STRIP, reader->get_min_vertex(), reader->get_max_vertex(), num_vertices, get_numeric_type(reader->get_index_type()), client_pointer); } } else { #ifndef OPENGLES_1 if (_supports_geometry_instancing && _instance_count > 0) { _glDrawArraysInstanced(GL_TRIANGLE_STRIP, reader->get_first_vertex(), num_vertices, _instance_count); } else #endif { glDrawArrays(GL_TRIANGLE_STRIP, reader->get_first_vertex(), num_vertices); } } } else { // Send the individual triangle strips, stepping over the degenerate // vertices. CPTA_int ends = reader->get_ends(); _primitive_batches_tristrip_pcollector.add_level(ends.size()); if (reader->is_indexed()) { const unsigned char *client_pointer; if (!setup_primitive(client_pointer, reader, force)) { return false; } int index_stride = reader->get_index_stride(); GeomVertexReader mins(reader->get_mins(), 0); GeomVertexReader maxs(reader->get_maxs(), 0); nassertr(reader->get_mins()->get_num_rows() == (int)ends.size() && reader->get_maxs()->get_num_rows() == (int)ends.size(), false); unsigned int start = 0; for (size_t i = 0; i < ends.size(); i++) { _vertices_tristrip_pcollector.add_level(ends[i] - start); #ifndef OPENGLES_1 if (_supports_geometry_instancing && _instance_count > 0) { _glDrawElementsInstanced(GL_TRIANGLE_STRIP, ends[i] - start, get_numeric_type(reader->get_index_type()), client_pointer + start * index_stride, _instance_count); } else #endif { _glDrawRangeElements(GL_TRIANGLE_STRIP, mins.get_data1i(), maxs.get_data1i(), ends[i] - start, get_numeric_type(reader->get_index_type()), client_pointer + start * index_stride); } start = ends[i] + 2; } } else { unsigned int start = 0; int first_vertex = reader->get_first_vertex(); for (size_t i = 0; i < ends.size(); i++) { _vertices_tristrip_pcollector.add_level(ends[i] - start); #ifndef OPENGLES_1 if (_supports_geometry_instancing && _instance_count > 0) { _glDrawArraysInstanced(GL_TRIANGLE_STRIP, first_vertex + start, ends[i] - start, _instance_count); } else #endif { glDrawArrays(GL_TRIANGLE_STRIP, first_vertex + start, ends[i] - start); } start = ends[i] + 2; } } } } report_my_gl_errors(); return true; } /** * Draws a series of triangle strips with adjacency information. */ #ifndef OPENGLES bool CLP(GraphicsStateGuardian):: draw_tristrips_adj(const GeomPrimitivePipelineReader *reader, bool force) { // PStatGPUTimer timer(this, _draw_primitive_pcollector, // reader->get_current_thread()); report_my_gl_errors(); #ifndef NDEBUG if (GLCAT.is_spam()) { GLCAT.spam() << "draw_tristrips_adj: " << *(reader->get_object()) << "\n"; } #endif // NDEBUG #ifdef SUPPORT_IMMEDIATE_MODE if (_use_sender) { draw_immediate_composite_primitives(reader, GL_TRIANGLE_STRIP_ADJACENCY); } else #endif // SUPPORT_IMMEDIATE_MODE { if (reader->is_indexed() && (_supported_geom_rendering & GeomEnums::GR_strip_cut_index) != 0) { // One long line strip, connected by strip cut indices. if (_explicit_primitive_restart) { glEnable(GL_PRIMITIVE_RESTART); _glPrimitiveRestartIndex(reader->get_strip_cut_index()); } int num_vertices = reader->get_num_vertices(); _vertices_tristrip_pcollector.add_level(num_vertices); _primitive_batches_tristrip_pcollector.add_level(1); if (reader->is_indexed()) { const unsigned char *client_pointer; if (!setup_primitive(client_pointer, reader, force)) { return false; } if (_supports_geometry_instancing && _instance_count > 0) { _glDrawElementsInstanced(GL_TRIANGLE_STRIP_ADJACENCY, num_vertices, get_numeric_type(reader->get_index_type()), client_pointer, _instance_count); } else { _glDrawRangeElements(GL_TRIANGLE_STRIP_ADJACENCY, reader->get_min_vertex(), reader->get_max_vertex(), num_vertices, get_numeric_type(reader->get_index_type()), client_pointer); } } else { if (_supports_geometry_instancing && _instance_count > 0) { _glDrawArraysInstanced(GL_TRIANGLE_STRIP_ADJACENCY, reader->get_first_vertex(), num_vertices, _instance_count); } else { glDrawArrays(GL_TRIANGLE_STRIP_ADJACENCY, reader->get_first_vertex(), num_vertices); } } if (_explicit_primitive_restart) { glDisable(GL_PRIMITIVE_RESTART); } } else { // Send the individual triangle strips, stepping over the degenerate // vertices. CPTA_int ends = reader->get_ends(); _primitive_batches_tristrip_pcollector.add_level(ends.size()); if (reader->is_indexed()) { const unsigned char *client_pointer; if (!setup_primitive(client_pointer, reader, force)) { return false; } int index_stride = reader->get_index_stride(); GeomVertexReader mins(reader->get_mins(), 0); GeomVertexReader maxs(reader->get_maxs(), 0); nassertr(reader->get_mins()->get_num_rows() == (int)ends.size() && reader->get_maxs()->get_num_rows() == (int)ends.size(), false); unsigned int start = 0; for (size_t i = 0; i < ends.size(); i++) { _vertices_tristrip_pcollector.add_level(ends[i] - start); if (_supports_geometry_instancing && _instance_count > 0) { _glDrawElementsInstanced(GL_TRIANGLE_STRIP_ADJACENCY, ends[i] - start, get_numeric_type(reader->get_index_type()), client_pointer + start * index_stride, _instance_count); } else { _glDrawRangeElements(GL_TRIANGLE_STRIP_ADJACENCY, mins.get_data1i(), maxs.get_data1i(), ends[i] - start, get_numeric_type(reader->get_index_type()), client_pointer + start * index_stride); } start = ends[i] + 1; } } else { unsigned int start = 0; int first_vertex = reader->get_first_vertex(); for (size_t i = 0; i < ends.size(); i++) { _vertices_tristrip_pcollector.add_level(ends[i] - start); if (_supports_geometry_instancing && _instance_count > 0) { _glDrawArraysInstanced(GL_TRIANGLE_STRIP_ADJACENCY, first_vertex + start, ends[i] - start, _instance_count); } else { glDrawArrays(GL_TRIANGLE_STRIP_ADJACENCY, first_vertex + start, ends[i] - start); } start = ends[i] + 1; } } } } report_my_gl_errors(); return true; } #endif // OPENGLES /** * Draws a series of triangle fans. */ bool CLP(GraphicsStateGuardian):: draw_trifans(const GeomPrimitivePipelineReader *reader, bool force) { // PStatGPUTimer timer(this, _draw_primitive_pcollector, // reader->get_current_thread()); #ifndef NDEBUG if (GLCAT.is_spam()) { GLCAT.spam() << "draw_trifans: " << *(reader->get_object()) << "\n"; } #endif // NDEBUG #ifdef SUPPORT_IMMEDIATE_MODE if (_use_sender) { draw_immediate_composite_primitives(reader, GL_TRIANGLE_FAN); } else #endif // SUPPORT_IMMEDIATE_MODE { // Send the individual triangle fans. There's no connecting fans with // degenerate vertices, so no worries about that. CPTA_int ends = reader->get_ends(); _primitive_batches_trifan_pcollector.add_level(ends.size()); if (reader->is_indexed()) { const unsigned char *client_pointer; if (!setup_primitive(client_pointer, reader, force)) { return false; } int index_stride = reader->get_index_stride(); GeomVertexReader mins(reader->get_mins(), 0); GeomVertexReader maxs(reader->get_maxs(), 0); nassertr(reader->get_mins()->get_num_rows() == (int)ends.size() && reader->get_maxs()->get_num_rows() == (int)ends.size(), false); unsigned int start = 0; for (size_t i = 0; i < ends.size(); i++) { _vertices_trifan_pcollector.add_level(ends[i] - start); #ifndef OPENGLES_1 if (_supports_geometry_instancing && _instance_count > 0) { _glDrawElementsInstanced(GL_TRIANGLE_FAN, ends[i] - start, get_numeric_type(reader->get_index_type()), client_pointer + start * index_stride, _instance_count); } else #endif { _glDrawRangeElements(GL_TRIANGLE_FAN, mins.get_data1i(), maxs.get_data1i(), ends[i] - start, get_numeric_type(reader->get_index_type()), client_pointer + start * index_stride); } start = ends[i]; } } else { unsigned int start = 0; int first_vertex = reader->get_first_vertex(); for (size_t i = 0; i < ends.size(); i++) { _vertices_trifan_pcollector.add_level(ends[i] - start); #ifndef OPENGLES_1 if (_supports_geometry_instancing && _instance_count > 0) { _glDrawArraysInstanced(GL_TRIANGLE_FAN, first_vertex + start, ends[i] - start, _instance_count); } else #endif { glDrawArrays(GL_TRIANGLE_FAN, first_vertex + start, ends[i] - start); } start = ends[i]; } } } report_my_gl_errors(); return true; } /** * Draws a series of "patches", which can only be processed by a tessellation * shader. */ bool CLP(GraphicsStateGuardian):: draw_patches(const GeomPrimitivePipelineReader *reader, bool force) { // PStatGPUTimer timer(this, _draw_primitive_pcollector, // reader->get_current_thread()); #ifndef NDEBUG if (GLCAT.is_spam()) { GLCAT.spam() << "draw_patches: " << *(reader->get_object()) << "\n"; } #endif // NDEBUG if (!get_supports_tessellation_shaders()) { return false; } #ifndef OPENGLES _glPatchParameteri(GL_PATCH_VERTICES, reader->get_object()->get_num_vertices_per_primitive()); #ifdef SUPPORT_IMMEDIATE_MODE if (_use_sender) { draw_immediate_simple_primitives(reader, GL_PATCHES); } else #endif // SUPPORT_IMMEDIATE_MODE { int num_vertices = reader->get_num_vertices(); _vertices_patch_pcollector.add_level(num_vertices); _primitive_batches_patch_pcollector.add_level(1); if (reader->is_indexed()) { const unsigned char *client_pointer; if (!setup_primitive(client_pointer, reader, force)) { return false; } #ifndef OPENGLES_1 if (_supports_geometry_instancing && _instance_count > 0) { _glDrawElementsInstanced(GL_PATCHES, num_vertices, get_numeric_type(reader->get_index_type()), client_pointer, _instance_count); } else #endif { _glDrawRangeElements(GL_PATCHES, reader->get_min_vertex(), reader->get_max_vertex(), num_vertices, get_numeric_type(reader->get_index_type()), client_pointer); } } else { #ifndef OPENGLES_1 if (_supports_geometry_instancing && _instance_count > 0) { _glDrawArraysInstanced(GL_PATCHES, reader->get_first_vertex(), num_vertices, _instance_count); } else #endif { glDrawArrays(GL_PATCHES, reader->get_first_vertex(), num_vertices); } } } #endif // OPENGLES report_my_gl_errors(); return true; } /** * Draws a series of disconnected line segments. */ bool CLP(GraphicsStateGuardian):: draw_lines(const GeomPrimitivePipelineReader *reader, bool force) { // PStatGPUTimer timer(this, _draw_primitive_pcollector, // reader->get_current_thread()); #ifndef NDEBUG if (GLCAT.is_spam()) { GLCAT.spam() << "draw_lines: " << *(reader->get_object()) << "\n"; } #endif // NDEBUG #ifdef SUPPORT_IMMEDIATE_MODE if (_use_sender) { draw_immediate_simple_primitives(reader, GL_LINES); } else #endif // SUPPORT_IMMEDIATE_MODE { int num_vertices = reader->get_num_vertices(); _vertices_other_pcollector.add_level(num_vertices); _primitive_batches_other_pcollector.add_level(1); if (reader->is_indexed()) { const unsigned char *client_pointer; if (!setup_primitive(client_pointer, reader, force)) { return false; } #ifndef OPENGLES_1 if (_supports_geometry_instancing && _instance_count > 0) { _glDrawElementsInstanced(GL_LINES, num_vertices, get_numeric_type(reader->get_index_type()), client_pointer, _instance_count); } else #endif { _glDrawRangeElements(GL_LINES, reader->get_min_vertex(), reader->get_max_vertex(), num_vertices, get_numeric_type(reader->get_index_type()), client_pointer); } } else { #ifndef OPENGLES_1 if (_supports_geometry_instancing && _instance_count > 0) { _glDrawArraysInstanced(GL_LINES, reader->get_first_vertex(), num_vertices, _instance_count); } else #endif { glDrawArrays(GL_LINES, reader->get_first_vertex(), num_vertices); } } } report_my_gl_errors(); return true; } /** * Draws a series of disconnected line segments with adjacency information. */ #ifndef OPENGLES bool CLP(GraphicsStateGuardian):: draw_lines_adj(const GeomPrimitivePipelineReader *reader, bool force) { // PStatGPUTimer timer(this, _draw_primitive_pcollector, // reader->get_current_thread()); #ifndef NDEBUG if (GLCAT.is_spam()) { GLCAT.spam() << "draw_lines_adj: " << *(reader->get_object()) << "\n"; } #endif // NDEBUG #ifdef SUPPORT_IMMEDIATE_MODE if (_use_sender) { draw_immediate_simple_primitives(reader, GL_LINES_ADJACENCY); } else #endif // SUPPORT_IMMEDIATE_MODE { int num_vertices = reader->get_num_vertices(); _vertices_other_pcollector.add_level(num_vertices); _primitive_batches_other_pcollector.add_level(1); if (reader->is_indexed()) { const unsigned char *client_pointer; if (!setup_primitive(client_pointer, reader, force)) { return false; } if (_supports_geometry_instancing && _instance_count > 0) { _glDrawElementsInstanced(GL_LINES_ADJACENCY, num_vertices, get_numeric_type(reader->get_index_type()), client_pointer, _instance_count); } else { _glDrawRangeElements(GL_LINES_ADJACENCY, reader->get_min_vertex(), reader->get_max_vertex(), num_vertices, get_numeric_type(reader->get_index_type()), client_pointer); } } else { if (_supports_geometry_instancing && _instance_count > 0) { _glDrawArraysInstanced(GL_LINES_ADJACENCY, reader->get_first_vertex(), num_vertices, _instance_count); } else { glDrawArrays(GL_LINES_ADJACENCY, reader->get_first_vertex(), num_vertices); } } } report_my_gl_errors(); return true; } #endif // OPENGLES /** * Draws a series of line strips. */ bool CLP(GraphicsStateGuardian):: draw_linestrips(const GeomPrimitivePipelineReader *reader, bool force) { // PStatGPUTimer timer(this, _draw_primitive_pcollector, // reader->get_current_thread()); report_my_gl_errors(); #ifndef NDEBUG if (GLCAT.is_spam()) { GLCAT.spam() << "draw_linestrips: " << *(reader->get_object()) << "\n"; } #endif // NDEBUG #ifdef SUPPORT_IMMEDIATE_MODE if (_use_sender) { draw_immediate_composite_primitives(reader, GL_LINE_STRIP); } else #endif // SUPPORT_IMMEDIATE_MODE { if (reader->is_indexed() && (_supported_geom_rendering & GeomEnums::GR_strip_cut_index) != 0) { // One long triangle strip, connected by strip cut indices. #ifndef OPENGLES if (_explicit_primitive_restart) { glEnable(GL_PRIMITIVE_RESTART); _glPrimitiveRestartIndex(reader->get_strip_cut_index()); } #endif // !OPENGLES int num_vertices = reader->get_num_vertices(); _vertices_other_pcollector.add_level(num_vertices); _primitive_batches_other_pcollector.add_level(1); const unsigned char *client_pointer; if (!setup_primitive(client_pointer, reader, force)) { return false; } #ifndef OPENGLES_1 if (_supports_geometry_instancing && _instance_count > 0) { _glDrawElementsInstanced(GL_LINE_STRIP, num_vertices, get_numeric_type(reader->get_index_type()), client_pointer, _instance_count); } else #endif // !OPENGLES { _glDrawRangeElements(GL_LINE_STRIP, reader->get_min_vertex(), reader->get_max_vertex(), num_vertices, get_numeric_type(reader->get_index_type()), client_pointer); } #ifndef OPENGLES if (_explicit_primitive_restart) { glDisable(GL_PRIMITIVE_RESTART); } #endif // !OPENGLES } else { // Send the individual line strips, stepping over the strip-cut indices. CPTA_int ends = reader->get_ends(); _primitive_batches_other_pcollector.add_level(ends.size()); if (reader->is_indexed()) { const unsigned char *client_pointer; if (!setup_primitive(client_pointer, reader, force)) { return false; } int index_stride = reader->get_index_stride(); GeomVertexReader mins(reader->get_mins(), 0); GeomVertexReader maxs(reader->get_maxs(), 0); nassertr(reader->get_mins()->get_num_rows() == (int)ends.size() && reader->get_maxs()->get_num_rows() == (int)ends.size(), false); unsigned int start = 0; for (size_t i = 0; i < ends.size(); i++) { _vertices_other_pcollector.add_level(ends[i] - start); #ifndef OPENGLES_1 if (_supports_geometry_instancing && _instance_count > 0) { _glDrawElementsInstanced(GL_LINE_STRIP, ends[i] - start, get_numeric_type(reader->get_index_type()), client_pointer + start * index_stride, _instance_count); } else #endif { _glDrawRangeElements(GL_LINE_STRIP, mins.get_data1i(), maxs.get_data1i(), ends[i] - start, get_numeric_type(reader->get_index_type()), client_pointer + start * index_stride); } start = ends[i] + 1; } } else { unsigned int start = 0; int first_vertex = reader->get_first_vertex(); for (size_t i = 0; i < ends.size(); i++) { _vertices_other_pcollector.add_level(ends[i] - start); #ifndef OPENGLES_1 if (_supports_geometry_instancing && _instance_count > 0) { _glDrawArraysInstanced(GL_LINE_STRIP, first_vertex + start, ends[i] - start, _instance_count); } else #endif { glDrawArrays(GL_LINE_STRIP, first_vertex + start, ends[i] - start); } start = ends[i] + 1; } } } } report_my_gl_errors(); return true; } /** * Draws a series of line strips with adjacency information. */ #ifndef OPENGLES bool CLP(GraphicsStateGuardian):: draw_linestrips_adj(const GeomPrimitivePipelineReader *reader, bool force) { // PStatGPUTimer timer(this, _draw_primitive_pcollector, // reader->get_current_thread()); report_my_gl_errors(); #ifndef NDEBUG if (GLCAT.is_spam()) { GLCAT.spam() << "draw_linestrips_adj: " << *(reader->get_object()) << "\n"; } #endif // NDEBUG #ifdef SUPPORT_IMMEDIATE_MODE if (_use_sender) { draw_immediate_composite_primitives(reader, GL_LINE_STRIP_ADJACENCY); } else #endif // SUPPORT_IMMEDIATE_MODE { if (reader->is_indexed() && (_supported_geom_rendering & GeomEnums::GR_strip_cut_index) != 0) { // One long line strip, connected by strip cut indices. if (_explicit_primitive_restart) { glEnable(GL_PRIMITIVE_RESTART); _glPrimitiveRestartIndex(reader->get_strip_cut_index()); } int num_vertices = reader->get_num_vertices(); _vertices_other_pcollector.add_level(num_vertices); _primitive_batches_other_pcollector.add_level(1); const unsigned char *client_pointer; if (!setup_primitive(client_pointer, reader, force)) { return false; } if (_supports_geometry_instancing && _instance_count > 0) { _glDrawElementsInstanced(GL_LINE_STRIP_ADJACENCY, num_vertices, get_numeric_type(reader->get_index_type()), client_pointer, _instance_count); } else { _glDrawRangeElements(GL_LINE_STRIP_ADJACENCY, reader->get_min_vertex(), reader->get_max_vertex(), num_vertices, get_numeric_type(reader->get_index_type()), client_pointer); } if (_explicit_primitive_restart) { glDisable(GL_PRIMITIVE_RESTART); } } else { // Send the individual line strips, stepping over the strip-cut indices. CPTA_int ends = reader->get_ends(); _primitive_batches_other_pcollector.add_level(ends.size()); if (reader->is_indexed()) { const unsigned char *client_pointer; if (!setup_primitive(client_pointer, reader, force)) { return false; } int index_stride = reader->get_index_stride(); GeomVertexReader mins(reader->get_mins(), 0); GeomVertexReader maxs(reader->get_maxs(), 0); nassertr(reader->get_mins()->get_num_rows() == (int)ends.size() && reader->get_maxs()->get_num_rows() == (int)ends.size(), false); unsigned int start = 0; for (size_t i = 0; i < ends.size(); i++) { _vertices_other_pcollector.add_level(ends[i] - start); if (_supports_geometry_instancing && _instance_count > 0) { _glDrawElementsInstanced(GL_LINE_STRIP_ADJACENCY, ends[i] - start, get_numeric_type(reader->get_index_type()), client_pointer + start * index_stride, _instance_count); } else { _glDrawRangeElements(GL_LINE_STRIP_ADJACENCY, mins.get_data1i(), maxs.get_data1i(), ends[i] - start, get_numeric_type(reader->get_index_type()), client_pointer + start * index_stride); } start = ends[i] + 1; } } else { unsigned int start = 0; int first_vertex = reader->get_first_vertex(); for (size_t i = 0; i < ends.size(); i++) { _vertices_other_pcollector.add_level(ends[i] - start); if (_supports_geometry_instancing && _instance_count > 0) { _glDrawArraysInstanced(GL_LINE_STRIP_ADJACENCY, first_vertex + start, ends[i] - start, _instance_count); } else { glDrawArrays(GL_LINE_STRIP_ADJACENCY, first_vertex + start, ends[i] - start); } start = ends[i] + 1; } } } } report_my_gl_errors(); return true; } #endif // OPENGLES /** * Draws a series of disconnected points. */ bool CLP(GraphicsStateGuardian):: draw_points(const GeomPrimitivePipelineReader *reader, bool force) { // PStatGPUTimer timer(this, _draw_primitive_pcollector, // reader->get_current_thread()); #ifndef NDEBUG if (GLCAT.is_spam()) { GLCAT.spam() << "draw_points: " << *(reader->get_object()) << "\n"; } #endif // NDEBUG #ifdef SUPPORT_IMMEDIATE_MODE if (_use_sender) { draw_immediate_simple_primitives(reader, GL_POINTS); } else #endif // SUPPORT_IMMEDIATE_MODE { int num_vertices = reader->get_num_vertices(); _vertices_other_pcollector.add_level(num_vertices); _primitive_batches_other_pcollector.add_level(1); if (reader->is_indexed()) { const unsigned char *client_pointer; if (!setup_primitive(client_pointer, reader, force)) { return false; } #ifndef OPENGLES_1 if (_supports_geometry_instancing && _instance_count > 0) { _glDrawElementsInstanced(GL_POINTS, num_vertices, get_numeric_type(reader->get_index_type()), client_pointer, _instance_count); } else #endif { _glDrawRangeElements(GL_POINTS, reader->get_min_vertex(), reader->get_max_vertex(), num_vertices, get_numeric_type(reader->get_index_type()), client_pointer); } } else { #ifndef OPENGLES_1 if (_supports_geometry_instancing && _instance_count > 0) { _glDrawArraysInstanced(GL_POINTS, reader->get_first_vertex(), num_vertices, _instance_count); } else #endif { glDrawArrays(GL_POINTS, reader->get_first_vertex(), num_vertices); } } } report_my_gl_errors(); return true; } /** * Called after a sequence of draw_primitive() functions are called, this * should do whatever cleanup is appropriate. */ void CLP(GraphicsStateGuardian):: end_draw_primitives() { #if !defined(OPENGLES) && defined(SUPPORT_FIXED_FUNCTION) // Display lists not supported by OpenGL ES. if (has_fixed_function_pipeline() && _geom_display_list != 0) { // If we were building a display list, close it now. glEndList(); _load_display_list_pcollector.stop(); if (!gl_compile_and_execute) { glCallList(_geom_display_list); } _primitive_batches_display_list_pcollector.add_level(1); } _geom_display_list = 0; #endif #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline() && _transform_stale) { glMatrixMode(GL_MODELVIEW); call_glLoadMatrix(_internal_transform->get_mat()); } if (has_fixed_function_pipeline() && _data_reader->is_vertex_transformed()) { // Restore the matrices that we pushed above. glMatrixMode(GL_PROJECTION); glPopMatrix(); glMatrixMode(GL_MODELVIEW); glPopMatrix(); } #endif GraphicsStateGuardian::end_draw_primitives(); maybe_gl_finish(); report_my_gl_errors(); } #ifndef OPENGLES_1 /** * Issues the given memory barriers, and clears the list of textures marked as * incoherent for the given bits. */ void CLP(GraphicsStateGuardian):: issue_memory_barrier(GLbitfield barriers) { if (!gl_enable_memory_barriers || _glMemoryBarrier == nullptr) { return; } PStatGPUTimer timer(this, _memory_barrier_pcollector); if (GLCAT.is_spam()) { GLCAT.spam() << "Issuing memory barriers:"; } _glMemoryBarrier(barriers); // Indicate that barriers no longer need to be issued for the relevant lists // of textures. if (barriers & GL_TEXTURE_FETCH_BARRIER_BIT) { _textures_needing_fetch_barrier.clear(); GLCAT.spam(false) << " texture_fetch"; } if (barriers & GL_SHADER_IMAGE_ACCESS_BARRIER_BIT) { _textures_needing_image_access_barrier.clear(); GLCAT.spam(false) << " shader_image_access"; } if (barriers & GL_TEXTURE_UPDATE_BARRIER_BIT) { _textures_needing_update_barrier.clear(); GLCAT.spam(false) << " texture_update"; } if (barriers & GL_FRAMEBUFFER_BARRIER_BIT) { _textures_needing_framebuffer_barrier.clear(); GLCAT.spam(false) << " framebuffer"; } GLCAT.spam(false) << "\n"; report_my_gl_errors(); } #endif // OPENGLES_1 /** * Creates whatever structures the GSG requires to represent the texture * internally, and returns a newly-allocated TextureContext object with this * data. It is the responsibility of the calling function to later call * release_texture() with this same pointer (which will also delete the * pointer). * * This function should not be called directly to prepare a texture. Instead, * call Texture::prepare(). */ TextureContext *CLP(GraphicsStateGuardian):: prepare_texture(Texture *tex, int view) { PStatGPUTimer timer(this, _prepare_texture_pcollector); report_my_gl_errors(); // Make sure we'll support this texture when it's rendered. Don't bother to // prepare it if we won't. switch (tex->get_texture_type()) { case Texture::TT_3d_texture: if (!_supports_3d_texture) { GLCAT.warning() << "3-D textures are not supported by this OpenGL driver.\n"; return nullptr; } break; case Texture::TT_2d_texture_array: if (!_supports_2d_texture_array) { GLCAT.warning() << "2-D texture arrays are not supported by this OpenGL driver.\n"; return nullptr; } break; case Texture::TT_cube_map: if (!_supports_cube_map) { GLCAT.warning() << "Cube map textures are not supported by this OpenGL driver.\n"; return nullptr; } break; case Texture::TT_buffer_texture: if (!_supports_buffer_texture) { GLCAT.warning() << "Buffer textures are not supported by this OpenGL driver.\n"; return nullptr; } break; case Texture::TT_cube_map_array: if (!_supports_cube_map_array) { GLCAT.warning() << "Cube map arrays are not supported by this OpenGL driver.\n"; return nullptr; } break; default: break; } CLP(TextureContext) *gtc = new CLP(TextureContext)(this, _prepared_objects, tex, view); report_my_gl_errors(); return gtc; } /** * Ensures that the current Texture data is refreshed onto the GSG. This * means updating the texture properties and/or re-uploading the texture * image, if necessary. This should only be called within the draw thread. * * If force is true, this function will not return until the texture has been * fully uploaded. If force is false, the function may choose to upload a * simple version of the texture instead, if the texture is not fully resident * (and if get_incomplete_render() is true). */ bool CLP(GraphicsStateGuardian):: update_texture(TextureContext *tc, bool force) { CLP(TextureContext) *gtc; DCAST_INTO_R(gtc, tc, false); if (gtc->was_image_modified() || !gtc->_has_storage) { PStatGPUTimer timer(this, _texture_update_pcollector); // If the texture image was modified, reload the texture. apply_texture(gtc); Texture *tex = tc->get_texture(); if (gtc->was_properties_modified()) { specify_texture(gtc, tex->get_default_sampler()); } bool okflag = upload_texture(gtc, force, tex->uses_mipmaps()); if (!okflag) { GLCAT.error() << "Could not load " << *tex << "\n"; return false; } } else if (gtc->was_properties_modified()) { PStatGPUTimer timer(this, _texture_update_pcollector); // If only the properties have been modified, we don't necessarily need to // reload the texture. apply_texture(gtc); Texture *tex = tc->get_texture(); if (specify_texture(gtc, tex->get_default_sampler())) { // Actually, looks like the texture *does* need to be reloaded. gtc->mark_needs_reload(); bool okflag = upload_texture(gtc, force, tex->uses_mipmaps()); if (!okflag) { GLCAT.error() << "Could not load " << *tex << "\n"; return false; } } else { // The texture didn't need reloading, but mark it fully updated now. gtc->mark_loaded(); } } gtc->enqueue_lru(&_prepared_objects->_graphics_memory_lru); report_my_gl_errors(); return true; } /** * Frees the GL resources previously allocated for the texture. This function * should never be called directly; instead, call Texture::release() (or * simply let the Texture destruct). */ void CLP(GraphicsStateGuardian):: release_texture(TextureContext *tc) { CLP(TextureContext) *gtc = DCAST(CLP(TextureContext), tc); #ifndef OPENGLES_1 _textures_needing_fetch_barrier.erase(gtc); _textures_needing_image_access_barrier.erase(gtc); _textures_needing_update_barrier.erase(gtc); _textures_needing_framebuffer_barrier.erase(gtc); #endif glDeleteTextures(1, &gtc->_index); if (gtc->_buffer != 0) { _glDeleteBuffers(1, &gtc->_buffer); } delete gtc; } /** * This method should only be called by the GraphicsEngine. Do not call it * directly; call GraphicsEngine::extract_texture_data() instead. * * This method will be called in the draw thread to download the texture * memory's image into its ram_image value. It returns true on success, false * otherwise. */ bool CLP(GraphicsStateGuardian):: extract_texture_data(Texture *tex) { bool success = true; // Make sure the error stack is cleared out before we begin. report_my_gl_errors(); int num_views = tex->get_num_views(); for (int view = 0; view < num_views; ++view) { TextureContext *tc = tex->prepare_now(view, get_prepared_objects(), this); nassertr(tc != nullptr, false); CLP(TextureContext) *gtc = DCAST(CLP(TextureContext), tc); if (!do_extract_texture_data(gtc)) { success = false; } } return success; } #ifndef OPENGLES_1 /** * Creates whatever structures the GSG requires to represent the sampler state * internally, and returns a newly-allocated SamplerContext object with this * data. It is the responsibility of the calling function to later call * release_sampler() with this same pointer (which will also delete the * pointer). * * This function should not be called directly to prepare a sampler object. * Instead, call SamplerState::prepare(). */ SamplerContext *CLP(GraphicsStateGuardian):: prepare_sampler(const SamplerState &sampler) { nassertr(_supports_sampler_objects, nullptr); PStatGPUTimer timer(this, _prepare_sampler_pcollector); CLP(SamplerContext) *gsc = new CLP(SamplerContext)(this, sampler); GLuint index = gsc->_index; // Sampler contexts are immutable in Panda, so might as well just initialize // all the settings here. _glSamplerParameteri(index, GL_TEXTURE_WRAP_S, get_texture_wrap_mode(sampler.get_wrap_u())); _glSamplerParameteri(index, GL_TEXTURE_WRAP_T, get_texture_wrap_mode(sampler.get_wrap_v())); _glSamplerParameteri(index, GL_TEXTURE_WRAP_R, get_texture_wrap_mode(sampler.get_wrap_w())); #ifndef OPENGLES #ifdef STDFLOAT_DOUBLE LVecBase4f fvalue = LCAST(float, sampler.get_border_color()); _glSamplerParameterfv(index, GL_TEXTURE_BORDER_COLOR, fvalue.get_data()); #else _glSamplerParameterfv(index, GL_TEXTURE_BORDER_COLOR, sampler.get_border_color().get_data()); #endif #endif // OPENGLES SamplerState::FilterType minfilter = sampler.get_effective_minfilter(); SamplerState::FilterType magfilter = sampler.get_effective_magfilter(); bool uses_mipmaps = SamplerState::is_mipmap(minfilter) && !gl_ignore_mipmaps; #ifndef NDEBUG if (gl_force_mipmaps) { minfilter = SamplerState::FT_linear_mipmap_linear; magfilter = SamplerState::FT_linear; uses_mipmaps = true; } #endif _glSamplerParameteri(index, GL_TEXTURE_MIN_FILTER, get_texture_filter_type(minfilter, !uses_mipmaps)); _glSamplerParameteri(index, GL_TEXTURE_MAG_FILTER, get_texture_filter_type(magfilter, true)); // Set anisotropic filtering. if (_supports_anisotropy) { PN_stdfloat anisotropy = sampler.get_effective_anisotropic_degree(); anisotropy = min(anisotropy, _max_anisotropy); anisotropy = max(anisotropy, (PN_stdfloat)1.0); _glSamplerParameterf(index, GL_TEXTURE_MAX_ANISOTROPY_EXT, anisotropy); } if (_supports_shadow_filter) { if ((sampler.get_magfilter() == SamplerState::FT_shadow) || (sampler.get_minfilter() == SamplerState::FT_shadow)) { _glSamplerParameteri(index, GL_TEXTURE_COMPARE_MODE_ARB, GL_COMPARE_R_TO_TEXTURE_ARB); _glSamplerParameteri(index, GL_TEXTURE_COMPARE_FUNC_ARB, GL_LEQUAL); } else { _glSamplerParameteri(index, GL_TEXTURE_COMPARE_MODE_ARB, GL_NONE); _glSamplerParameteri(index, GL_TEXTURE_COMPARE_FUNC_ARB, GL_LEQUAL); } } if (_supports_texture_lod) { _glSamplerParameterf(index, GL_TEXTURE_MIN_LOD, sampler.get_min_lod()); _glSamplerParameterf(index, GL_TEXTURE_MAX_LOD, sampler.get_max_lod()); } #ifndef OPENGLES if (_supports_texture_lod_bias) { _glSamplerParameterf(index, GL_TEXTURE_LOD_BIAS, sampler.get_lod_bias()); } #endif gsc->enqueue_lru(&_prepared_objects->_sampler_object_lru); report_my_gl_errors(); return gsc; } #endif // !OPENGLES_1 #ifndef OPENGLES_1 /** * Frees the GL resources previously allocated for the sampler. This function * should never be called directly; instead, call SamplerState::release(). */ void CLP(GraphicsStateGuardian):: release_sampler(SamplerContext *sc) { CLP(SamplerContext) *gsc = DCAST(CLP(SamplerContext), sc); if (gsc->_index != 0) { _glDeleteSamplers(1, &gsc->_index); } delete gsc; } #endif // !OPENGLES_1 /** * Creates a new retained-mode representation of the given geom, and returns a * newly-allocated GeomContext pointer to reference it. It is the * responsibility of the calling function to later call release_geom() with * this same pointer (which will also delete the pointer). * * This function should not be called directly to prepare a geom. Instead, * call Geom::prepare(). */ GeomContext *CLP(GraphicsStateGuardian):: prepare_geom(Geom *geom) { PStatGPUTimer timer(this, _prepare_geom_pcollector); return new CLP(GeomContext)(geom); } /** * Frees the GL resources previously allocated for the geom. This function * should never be called directly; instead, call Geom::release() (or simply * let the Geom destruct). */ void CLP(GraphicsStateGuardian):: release_geom(GeomContext *gc) { CLP(GeomContext) *ggc = DCAST(CLP(GeomContext), gc); if (has_fixed_function_pipeline()) { ggc->release_display_lists(); } report_my_gl_errors(); delete ggc; } /** * */ ShaderContext *CLP(GraphicsStateGuardian):: prepare_shader(Shader *se) { PStatGPUTimer timer(this, _prepare_shader_pcollector); #ifndef OPENGLES_1 ShaderContext *result = nullptr; switch (se->get_language()) { case Shader::SL_GLSL: if (_supports_glsl) { result = new CLP(ShaderContext)(this, se); break; } else { GLCAT.error() << "Tried to load GLSL shader, but GLSL shaders not supported.\n"; return nullptr; } case Shader::SL_Cg: #if defined(HAVE_CG) && !defined(OPENGLES) if (_supports_basic_shaders) { result = new CLP(CgShaderContext)(this, se); break; } else { GLCAT.error() << "Tried to load Cg shader, but basic shaders not supported.\n"; return nullptr; } #elif defined(OPENGLES) GLCAT.error() << "Tried to load Cg shader, but Cg support is not available for OpenGL ES.\n"; return nullptr; #else GLCAT.error() << "Tried to load Cg shader, but Cg support not compiled in.\n"; return nullptr; #endif default: GLCAT.error() << "Tried to load shader with unsupported shader language!\n"; return nullptr; } if (result->valid()) { return result; } delete result; #endif // OPENGLES_1 return nullptr; } /** * */ void CLP(GraphicsStateGuardian):: release_shader(ShaderContext *sc) { #ifndef OPENGLES_1 if (sc->is_of_type(CLP(ShaderContext)::get_class_type())) { ((CLP(ShaderContext) *)sc)->release_resources(); } #if defined(HAVE_CG) && !defined(OPENGLES_2) else if (sc->is_of_type(CLP(CgShaderContext)::get_class_type())) { ((CLP(CgShaderContext) *)sc)->release_resources(); } #endif #endif delete sc; } /** * This is intended to be called only from the GLGeomContext destructor. It * saves the indicated display list index in the list to be deleted at the end * of the frame. */ void CLP(GraphicsStateGuardian):: record_deleted_display_list(GLuint index) { LightMutexHolder holder(_lock); _deleted_display_lists.push_back(index); } /** * Creates a new retained-mode representation of the given data, and returns a * newly-allocated VertexBufferContext pointer to reference it. It is the * responsibility of the calling function to later call * release_vertex_buffer() with this same pointer (which will also delete the * pointer). * * This function should not be called directly to prepare a buffer. Instead, * call Geom::prepare(). */ VertexBufferContext *CLP(GraphicsStateGuardian):: prepare_vertex_buffer(GeomVertexArrayData *data) { if (_supports_buffers) { PStatGPUTimer timer(this, _prepare_vertex_buffer_pcollector); CLP(VertexBufferContext) *gvbc = new CLP(VertexBufferContext)(this, _prepared_objects, data); _glGenBuffers(1, &gvbc->_index); if (GLCAT.is_debug() && gl_debug_buffers) { GLCAT.debug() << "creating vertex buffer " << (int)gvbc->_index << ": " << data->get_num_rows() << " vertices " << *data->get_array_format() << "\n"; } report_my_gl_errors(); update_vertex_buffer(gvbc, data->get_handle(), false); return gvbc; } return nullptr; } /** * Makes sure that the data in the vertex buffer is up-to-date. This may bind * it to the GL_ARRAY_BUFFER binding point if necessary. */ bool CLP(GraphicsStateGuardian):: update_vertex_buffer(CLP(VertexBufferContext) *gvbc, const GeomVertexArrayDataHandle *reader, bool force) { nassertr(_supports_buffers, false); if (reader->get_modified() == UpdateSeq::initial()) { // No need to re-apply. return true; } gvbc->set_active(true); if (gvbc->was_modified(reader)) { int num_bytes = reader->get_data_size_bytes(); if (GLCAT.is_debug() && gl_debug_buffers) { GLCAT.debug() << "copying " << num_bytes << " bytes into vertex buffer " << (int)gvbc->_index << "\n"; } if (num_bytes != 0) { const unsigned char *client_pointer = reader->get_read_pointer(force); if (client_pointer == nullptr) { return false; } PStatGPUTimer timer(this, _load_vertex_buffer_pcollector, reader->get_current_thread()); if (_current_vbuffer_index != gvbc->_index) { if (GLCAT.is_spam() && gl_debug_buffers) { GLCAT.spam() << "binding vertex buffer " << (int)gvbc->_index << "\n"; } _glBindBuffer(GL_ARRAY_BUFFER, gvbc->_index); _current_vbuffer_index = gvbc->_index; } if (gvbc->changed_size(reader) || gvbc->changed_usage_hint(reader)) { _glBufferData(GL_ARRAY_BUFFER, num_bytes, client_pointer, get_usage(reader->get_usage_hint())); } else { _glBufferSubData(GL_ARRAY_BUFFER, 0, num_bytes, client_pointer); } _data_transferred_pcollector.add_level(num_bytes); } gvbc->mark_loaded(reader); } gvbc->enqueue_lru(&_prepared_objects->_graphics_memory_lru); maybe_gl_finish(); report_my_gl_errors(); return true; } /** * Frees the GL resources previously allocated for the data. This function * should never be called directly; instead, call Data::release() (or simply * let the Data destruct). */ void CLP(GraphicsStateGuardian):: release_vertex_buffer(VertexBufferContext *vbc) { nassertv(_supports_buffers); CLP(VertexBufferContext) *gvbc = DCAST(CLP(VertexBufferContext), vbc); if (GLCAT.is_debug() && gl_debug_buffers) { GLCAT.debug() << "deleting vertex buffer " << (int)gvbc->_index << "\n"; } // Make sure the buffer is unbound before we delete it. Not strictly // necessary according to the OpenGL spec, but it might help out a flaky // driver, and we need to keep our internal state consistent anyway. if (_current_vbuffer_index == gvbc->_index) { if (GLCAT.is_spam() && gl_debug_buffers) { GLCAT.spam() << "unbinding vertex buffer\n"; } _glBindBuffer(GL_ARRAY_BUFFER, 0); _current_vbuffer_index = 0; } _glDeleteBuffers(1, &gvbc->_index); report_my_gl_errors(); gvbc->_index = 0; delete gvbc; } /** * Internal function to bind a buffer object for the indicated data array, if * appropriate, or to unbind a buffer object if it should be rendered from * client memory. * * If the buffer object is bound, this function sets client_pointer to NULL * (representing the start of the buffer object in server memory); if the * buffer object is not bound, this function sets client_pointer the pointer * to the data array in client memory, that is, the data array passed in. * * If force is not true, the function may return false indicating the data is * not currently available. */ bool CLP(GraphicsStateGuardian):: setup_array_data(const unsigned char *&client_pointer, const GeomVertexArrayDataHandle *array_reader, bool force) { if (!_supports_buffers) { // No support for buffer objects; always render from client. client_pointer = array_reader->get_read_pointer(force); return (client_pointer != nullptr); } if (!vertex_buffers || _geom_display_list != 0 || array_reader->get_usage_hint() < gl_min_buffer_usage_hint) { // The array specifies client rendering only, or buffer objects are // configured off. if (_current_vbuffer_index != 0) { if (GLCAT.is_spam() && gl_debug_buffers) { GLCAT.spam() << "unbinding vertex buffer\n"; } _glBindBuffer(GL_ARRAY_BUFFER, 0); _current_vbuffer_index = 0; } client_pointer = array_reader->get_read_pointer(force); return (client_pointer != nullptr); } // Prepare the buffer object and bind it. CLP(VertexBufferContext) *gvbc = DCAST(CLP(VertexBufferContext), array_reader->prepare_now(get_prepared_objects(), this)); nassertr(gvbc != (CLP(VertexBufferContext) *)nullptr, false); if (!update_vertex_buffer(gvbc, array_reader, force)) { return false; } if (_current_vbuffer_index != gvbc->_index) { if (GLCAT.is_spam() && gl_debug_buffers) { GLCAT.spam() << "binding vertex buffer " << (int)gvbc->_index << "\n"; } _glBindBuffer(GL_ARRAY_BUFFER, gvbc->_index); _current_vbuffer_index = gvbc->_index; } // NULL is the OpenGL convention for the first byte of the buffer object. client_pointer = nullptr; return true; } /** * Creates a new retained-mode representation of the given data, and returns a * newly-allocated IndexBufferContext pointer to reference it. It is the * responsibility of the calling function to later call release_index_buffer() * with this same pointer (which will also delete the pointer). * * This function should not be called directly to prepare a buffer. Instead, * call Geom::prepare(). */ IndexBufferContext *CLP(GraphicsStateGuardian):: prepare_index_buffer(GeomPrimitive *data) { if (_supports_buffers) { PStatGPUTimer timer(this, _prepare_index_buffer_pcollector); CLP(IndexBufferContext) *gibc = new CLP(IndexBufferContext)(this, _prepared_objects, data); _glGenBuffers(1, &gibc->_index); if (GLCAT.is_debug() && gl_debug_buffers) { GLCAT.debug() << "creating index buffer " << (int)gibc->_index << ": " << data->get_num_vertices() << " indices (" << data->get_vertices()->get_array_format()->get_column(0)->get_numeric_type() << ")\n"; } report_my_gl_errors(); GeomPrimitivePipelineReader reader(data, Thread::get_current_thread()); apply_index_buffer(gibc, &reader, false); return gibc; } return nullptr; } /** * Makes the data the currently available data for rendering. */ bool CLP(GraphicsStateGuardian):: apply_index_buffer(IndexBufferContext *ibc, const GeomPrimitivePipelineReader *reader, bool force) { nassertr(_supports_buffers, false); if (reader->get_modified() == UpdateSeq::initial()) { // No need to re-apply. return true; } CLP(IndexBufferContext) *gibc = DCAST(CLP(IndexBufferContext), ibc); if (_current_ibuffer_index != gibc->_index) { if (GLCAT.is_spam() && gl_debug_buffers) { GLCAT.spam() << "binding index buffer " << (int)gibc->_index << "\n"; } _glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, gibc->_index); _current_ibuffer_index = gibc->_index; gibc->set_active(true); } if (gibc->was_modified(reader)) { int num_bytes = reader->get_data_size_bytes(); if (GLCAT.is_debug() && gl_debug_buffers) { GLCAT.debug() << "copying " << num_bytes << " bytes into index buffer " << (int)gibc->_index << "\n"; } if (num_bytes != 0) { const unsigned char *client_pointer = reader->get_read_pointer(force); if (client_pointer == nullptr) { return false; } PStatGPUTimer timer(this, _load_index_buffer_pcollector, reader->get_current_thread()); if (gibc->changed_size(reader) || gibc->changed_usage_hint(reader)) { _glBufferData(GL_ELEMENT_ARRAY_BUFFER, num_bytes, client_pointer, get_usage(reader->get_usage_hint())); } else { _glBufferSubData(GL_ELEMENT_ARRAY_BUFFER, 0, num_bytes, client_pointer); } _data_transferred_pcollector.add_level(num_bytes); } gibc->mark_loaded(reader); } gibc->enqueue_lru(&_prepared_objects->_graphics_memory_lru); maybe_gl_finish(); report_my_gl_errors(); return true; } /** * Frees the GL resources previously allocated for the data. This function * should never be called directly; instead, call Data::release() (or simply * let the Data destruct). */ void CLP(GraphicsStateGuardian):: release_index_buffer(IndexBufferContext *ibc) { nassertv(_supports_buffers); CLP(IndexBufferContext) *gibc = DCAST(CLP(IndexBufferContext), ibc); if (GLCAT.is_debug() && gl_debug_buffers) { GLCAT.debug() << "deleting index buffer " << (int)gibc->_index << "\n"; } // Make sure the buffer is unbound before we delete it. Not strictly // necessary according to the OpenGL spec, but it might help out a flaky // driver, and we need to keep our internal state consistent anyway. if (_current_ibuffer_index == gibc->_index) { if (GLCAT.is_spam() && gl_debug_buffers) { GLCAT.spam() << "unbinding index buffer\n"; } _glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); _current_ibuffer_index = 0; } _glDeleteBuffers(1, &gibc->_index); report_my_gl_errors(); gibc->_index = 0; delete gibc; } /** * Internal function to bind a buffer object for the indicated primitive's * index list, if appropriate, or to unbind a buffer object if it should be * rendered from client memory. * * If the buffer object is bound, this function sets client_pointer to NULL * (representing the start of the buffer object in server memory); if the * buffer object is not bound, this function sets client_pointer to to the * data array in client memory, that is, the data array passed in. * * If force is not true, the function may return false indicating the data is * not currently available. */ bool CLP(GraphicsStateGuardian):: setup_primitive(const unsigned char *&client_pointer, const GeomPrimitivePipelineReader *reader, bool force) { if (!_supports_buffers) { // No support for buffer objects; always render from client. client_pointer = reader->get_read_pointer(force); return (client_pointer != nullptr); } if (!vertex_buffers || _geom_display_list != 0 || reader->get_usage_hint() == Geom::UH_client) { // The array specifies client rendering only, or buffer objects are // configured off. if (_current_ibuffer_index != 0) { if (GLCAT.is_spam() && gl_debug_buffers) { GLCAT.spam() << "unbinding index buffer\n"; } _glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); _current_ibuffer_index = 0; } client_pointer = reader->get_read_pointer(force); return (client_pointer != nullptr); } // Prepare the buffer object and bind it. IndexBufferContext *ibc = reader->prepare_now(get_prepared_objects(), this); nassertr(ibc != nullptr, false); if (!apply_index_buffer(ibc, reader, force)) { return false; } // NULL is the OpenGL convention for the first byte of the buffer object. client_pointer = nullptr; return true; } #ifndef OPENGLES /** * Creates a new retained-mode representation of the given data, and returns a * newly-allocated BufferContext pointer to reference it. It is the * responsibility of the calling function to later call release_shader_buffer() * with this same pointer (which will also delete the pointer). * * This function should not be called directly to prepare a buffer. Instead, * call ShaderBuffer::prepare(). */ BufferContext *CLP(GraphicsStateGuardian):: prepare_shader_buffer(ShaderBuffer *data) { if (_supports_shader_buffers) { PStatGPUTimer timer(this, _prepare_shader_buffer_pcollector); CLP(BufferContext) *gbc = new CLP(BufferContext)(this, _prepared_objects, data); _glGenBuffers(1, &gbc->_index); if (GLCAT.is_debug() && gl_debug_buffers) { GLCAT.debug() << "creating shader buffer " << (int)gbc->_index << ": "<< *data << "\n"; } _glBindBuffer(GL_SHADER_STORAGE_BUFFER, gbc->_index); _current_sbuffer_index = gbc->_index; if (_use_object_labels) { string name = data->get_name(); _glObjectLabel(GL_BUFFER, gbc->_index, name.size(), name.data()); } // Some drivers require the buffer to be padded to 16 byte boundary. uint64_t num_bytes = (data->get_data_size_bytes() + 15u) & ~15u; if (_supports_buffer_storage) { _glBufferStorage(GL_SHADER_STORAGE_BUFFER, num_bytes, data->get_initial_data(), 0); } else { _glBufferData(GL_SHADER_STORAGE_BUFFER, num_bytes, data->get_initial_data(), get_usage(data->get_usage_hint())); } gbc->enqueue_lru(&_prepared_objects->_graphics_memory_lru); report_my_gl_errors(); return gbc; } return nullptr; } /** * Binds the given shader buffer to the given binding slot. */ void CLP(GraphicsStateGuardian):: apply_shader_buffer(GLuint base, ShaderBuffer *buffer) { GLuint index = 0; if (buffer != nullptr) { BufferContext *bc = buffer->prepare_now(get_prepared_objects(), this); if (bc != nullptr) { CLP(BufferContext) *gbc = DCAST(CLP(BufferContext), bc); index = gbc->_index; gbc->set_active(true); } } if (base >= _current_sbuffer_base.size()) { _current_sbuffer_base.resize(base + 1, 0); } if (_current_sbuffer_base[base] != index) { if (GLCAT.is_spam() && gl_debug_buffers) { GLCAT.spam() << "binding shader buffer " << (int)index << " to index " << base << "\n"; } _glBindBufferBase(GL_SHADER_STORAGE_BUFFER, base, index); _current_sbuffer_base[base] = index; _current_sbuffer_index = index; report_my_gl_errors(); } } /** * Frees the GL resources previously allocated for the data. This function * should never be called directly; instead, call Data::release() (or simply * let the Data destruct). */ void CLP(GraphicsStateGuardian):: release_shader_buffer(BufferContext *bc) { nassertv(_supports_buffers); CLP(BufferContext) *gbc = DCAST(CLP(BufferContext), bc); if (GLCAT.is_debug() && gl_debug_buffers) { GLCAT.debug() << "deleting shader buffer " << (int)gbc->_index << "\n"; } // Make sure the buffer is unbound before we delete it. Not strictly // necessary according to the OpenGL spec, but it might help out a flaky // driver, and we need to keep our internal state consistent anyway. if (_current_sbuffer_index == gbc->_index) { if (GLCAT.is_spam() && gl_debug_buffers) { GLCAT.spam() << "unbinding shader buffer\n"; } _glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0); _current_sbuffer_index = 0; } _glDeleteBuffers(1, &gbc->_index); report_my_gl_errors(); gbc->_index = 0; delete gbc; } #endif #ifndef OPENGLES /** * Begins a new occlusion query. After this call, you may call * begin_draw_primitives() and draw_triangles()/draw_whatever() repeatedly. * Eventually, you should call end_occlusion_query() before the end of the * frame; that will return a new OcclusionQueryContext object that will tell * you how many pixels represented by the bracketed geometry passed the depth * test. * * It is not valid to call begin_occlusion_query() between another * begin_occlusion_query() .. end_occlusion_query() sequence. */ void CLP(GraphicsStateGuardian):: begin_occlusion_query() { nassertv(_supports_occlusion_query); nassertv(_current_occlusion_query == nullptr); PT(CLP(OcclusionQueryContext)) query = new CLP(OcclusionQueryContext)(this); _glGenQueries(1, &query->_index); if (GLCAT.is_debug()) { GLCAT.debug() << "beginning occlusion query index " << (int)query->_index << "\n"; } _glBeginQuery(GL_SAMPLES_PASSED, query->_index); _current_occlusion_query = query; report_my_gl_errors(); } #endif // !OPENGLES #ifndef OPENGLES /** * Ends a previous call to begin_occlusion_query(). This call returns the * OcclusionQueryContext object that will (eventually) report the number of * pixels that passed the depth test between the call to * begin_occlusion_query() and end_occlusion_query(). */ PT(OcclusionQueryContext) CLP(GraphicsStateGuardian):: end_occlusion_query() { nassertr(_current_occlusion_query != nullptr, nullptr); PT(OcclusionQueryContext) result = _current_occlusion_query; GLuint index = DCAST(CLP(OcclusionQueryContext), result)->_index; if (GLCAT.is_debug()) { GLCAT.debug() << "ending occlusion query index " << (int)index << "\n"; } _current_occlusion_query = nullptr; _glEndQuery(GL_SAMPLES_PASSED); // Temporary hack to try working around an apparent driver bug on iMacs. // Occlusion queries sometimes incorrectly report 0 samples, unless we stall // the pipe to keep fewer than a certain maximum number of queries pending // at once. static ConfigVariableInt limit_occlusion_queries("limit-occlusion-queries", 0); if (limit_occlusion_queries > 0) { if (index > (unsigned int)limit_occlusion_queries) { PStatGPUTimer timer(this, _wait_occlusion_pcollector); GLuint result; _glGetQueryObjectuiv(index - (unsigned int)limit_occlusion_queries, GL_QUERY_RESULT, &result); } } report_my_gl_errors(); return result; } #endif // !OPENGLES /** * Adds a timer query to the command stream, associated with the given PStats * collector index. */ PT(TimerQueryContext) CLP(GraphicsStateGuardian):: issue_timer_query(int pstats_index) { #if defined(DO_PSTATS) && !defined(OPENGLES) nassertr(_supports_timer_query, nullptr); PT(CLP(TimerQueryContext)) query; // Hack if (pstats_index == _command_latency_pcollector.get_index()) { query = new CLP(LatencyQueryContext)(this, pstats_index); } else { query = new CLP(TimerQueryContext)(this, pstats_index); } if (_deleted_queries.size() >= 1) { query->_index = _deleted_queries.back(); _deleted_queries.pop_back(); } else { _glGenQueries(1, &query->_index); if (GLCAT.is_spam()) { GLCAT.spam() << "Generating query for " << pstats_index << ": " << query->_index << "\n"; } } // Issue the timestamp query. _glQueryCounter(query->_index, GL_TIMESTAMP); if (_use_object_labels) { // Assign a label to it based on the PStatCollector name. const PStatClient *client = PStatClient::get_global_pstats(); string name = client->get_collector_fullname(pstats_index & 0x7fff); _glObjectLabel(GL_QUERY, query->_index, name.size(), name.data()); } _pending_timer_queries.push_back((TimerQueryContext *)query); return (TimerQueryContext *)query; #else return nullptr; #endif } #ifndef OPENGLES_1 /** * Dispatches a currently bound compute shader using the given work group * counts. */ void CLP(GraphicsStateGuardian):: dispatch_compute(int num_groups_x, int num_groups_y, int num_groups_z) { maybe_gl_finish(); PStatGPUTimer timer(this, _compute_dispatch_pcollector); nassertv(_supports_compute_shaders); nassertv(_current_shader_context != nullptr); _glDispatchCompute(num_groups_x, num_groups_y, num_groups_z); maybe_gl_finish(); } #endif // !OPENGLES_1 /** * Creates a new GeomMunger object to munge vertices appropriate to this GSG * for the indicated state. */ PT(GeomMunger) CLP(GraphicsStateGuardian):: make_geom_munger(const RenderState *state, Thread *current_thread) { PT(CLP(GeomMunger)) munger = new CLP(GeomMunger)(this, state); return GeomMunger::register_munger(munger, current_thread); } /** * Copy the pixels within the indicated display region from the framebuffer * into texture memory. * * If z > -1, it is the cube map index or layer index into which to copy. */ bool CLP(GraphicsStateGuardian):: framebuffer_copy_to_texture(Texture *tex, int view, int z, const DisplayRegion *dr, const RenderBuffer &rb) { nassertr(tex != nullptr && dr != nullptr, false); set_read_buffer(rb._buffer_type); clear_color_write_mask(); int xo, yo, w, h; dr->get_region_pixels(xo, yo, w, h); tex->set_size_padded(w, h, tex->get_z_size()); if (tex->get_compression() == Texture::CM_default) { // Unless the user explicitly turned on texture compression, turn it off // for the copy-to-texture case. tex->set_compression(Texture::CM_off); } // Sanity check everything. if (z >= 0) { if (z >= tex->get_z_size()) { // This can happen, when textures with different layer counts are // attached to a buffer. We simply ignore this if it happens. return false; } if ((w != tex->get_x_size()) || (h != tex->get_y_size())) { return false; } if (tex->get_texture_type() == Texture::TT_cube_map) { if (!_supports_cube_map) { return false; } nassertr(z < 6, false); if (w != h) { return false; } } else if (tex->get_texture_type() == Texture::TT_3d_texture) { if (!_supports_3d_texture) { return false; } } else if (tex->get_texture_type() == Texture::TT_2d_texture_array) { if (!_supports_2d_texture_array) { return false; } } else { GLCAT.error() << "Don't know how to copy framebuffer to texture " << *tex << "\n"; } } else { nassertr(tex->get_texture_type() == Texture::TT_2d_texture, false); } // Match framebuffer format if necessary. if (tex->get_match_framebuffer_format()) { switch (tex->get_format()) { case Texture::F_depth_component: case Texture::F_depth_component16: case Texture::F_depth_component24: case Texture::F_depth_component32: case Texture::F_depth_stencil: // Don't remap if we're one of these special format. break; default: // If the texture is a color format, we want to match the presence of // sRGB and alpha according to the framebuffer. if (_current_properties->get_srgb_color()) { if (_current_properties->get_alpha_bits()) { tex->set_format(Texture::F_srgb_alpha); } else { tex->set_format(Texture::F_srgb); } } else { if (_current_properties->get_alpha_bits()) { tex->set_format(Texture::F_rgba); } else { tex->set_format(Texture::F_rgb); } } } } TextureContext *tc = tex->prepare_now(view, get_prepared_objects(), this); nassertr(tc != nullptr, false); CLP(TextureContext) *gtc = DCAST(CLP(TextureContext), tc); apply_texture(gtc); bool needs_reload = specify_texture(gtc, tex->get_default_sampler()); GLenum target = get_texture_target(tex->get_texture_type()); GLint internal_format = get_internal_image_format(tex); int width = tex->get_x_size(); int height = tex->get_y_size(); int depth = tex->get_z_size(); bool uses_mipmaps = tex->uses_mipmaps() && !gl_ignore_mipmaps; if (uses_mipmaps) { if (_supports_generate_mipmap) { #ifndef OPENGLES_2 if (_glGenerateMipmap == nullptr) { glTexParameteri(target, GL_GENERATE_MIPMAP, true); } #endif } else { // If we can't auto-generate mipmaps, do without mipmaps. glTexParameteri(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR); uses_mipmaps = false; } } bool new_image = needs_reload || gtc->was_image_modified(); if (z >= 0) { if (target == GL_TEXTURE_CUBE_MAP) { // Copy to a cube map face, which is treated as a 2D texture. target = GL_TEXTURE_CUBE_MAP_POSITIVE_X + z; depth = 1; z = -1; // Cube map faces seem to have trouble with CopyTexSubImage, so we // always reload the image. new_image = true; } } if (!gtc->_has_storage || internal_format != gtc->_internal_format || uses_mipmaps != gtc->_uses_mipmaps || width != gtc->_width || height != gtc->_height || depth != gtc->_depth) { // If the texture properties have changed, we need to reload the image. new_image = true; } if (new_image && gtc->_immutable) { gtc->reset_data(); glBindTexture(target, gtc->_index); if (GLCAT.is_spam()) { GLCAT.spam() << "glBindTexture(0x" << hex << target << dec << ", " << gtc->_index << "): " << *tex << "\n"; } } #ifndef OPENGLES_1 if (gtc->needs_barrier(GL_TEXTURE_UPDATE_BARRIER_BIT)) { // Make sure that any incoherent writes to this texture have been synced. issue_memory_barrier(GL_TEXTURE_UPDATE_BARRIER_BIT); } #endif if (z >= 0) { #ifndef OPENGLES_1 if (new_image) { // These won't be used because we pass a NULL image, but we still have // to specify them. Might as well use the actual values. GLint external_format = get_external_image_format(tex); GLint component_type = get_component_type(tex->get_component_type()); _glTexImage3D(target, 0, internal_format, width, height, depth, 0, external_format, component_type, nullptr); } _glCopyTexSubImage3D(target, 0, 0, 0, z, xo, yo, w, h); #endif } else { if (new_image) { // We have to create a new image. It seems that OpenGL accepts a size // higher than the framebuffer, but if we run into trouble we'll have to // replace this with something smarter. glCopyTexImage2D(target, 0, internal_format, xo, yo, width, height, 0); } else { // We can overlay the existing image. glCopyTexSubImage2D(target, 0, 0, 0, xo, yo, w, h); } } if (uses_mipmaps && _glGenerateMipmap != nullptr) { glEnable(target); _glGenerateMipmap(target); glDisable(target); } gtc->_has_storage = true; gtc->_uses_mipmaps = uses_mipmaps; gtc->_internal_format = internal_format; gtc->_width = width; gtc->_height = height; gtc->_depth = depth; gtc->mark_loaded(); gtc->enqueue_lru(&_prepared_objects->_graphics_memory_lru); report_my_gl_errors(); // Force reload of texture state, since we've just monkeyed with it. _state_mask.clear_bit(TextureAttrib::get_class_slot()); return true; } /** * Copy the pixels within the indicated display region from the framebuffer * into system memory, not texture memory. Returns true on success, false on * failure. * * This completely redefines the ram image of the indicated texture. */ bool CLP(GraphicsStateGuardian):: framebuffer_copy_to_ram(Texture *tex, int view, int z, const DisplayRegion *dr, const RenderBuffer &rb) { nassertr(tex != nullptr && dr != nullptr, false); set_read_buffer(rb._buffer_type); glPixelStorei(GL_PACK_ALIGNMENT, 1); clear_color_write_mask(); // Bug fix for RE, RE2, and VTX - need to disable texturing in order for // glReadPixels() to work NOTE: reading the depth buffer is *much* slower // than reading the color buffer set_state_and_transform(RenderState::make_empty(), _internal_transform); int xo, yo, w, h; dr->get_region_pixels(xo, yo, w, h); Texture::ComponentType component_type = tex->get_component_type(); Texture::Format format = tex->get_format(); switch (format) { case Texture::F_depth_stencil: if (_current_properties->get_float_depth()) { component_type = Texture::T_float; } else { component_type = Texture::T_unsigned_int_24_8; } break; case Texture::F_depth_component: if (_current_properties->get_float_depth()) { component_type = Texture::T_float; } else if (_current_properties->get_depth_bits() <= 8) { component_type = Texture::T_unsigned_byte; } else if (_current_properties->get_depth_bits() <= 16) { component_type = Texture::T_unsigned_short; } else { component_type = Texture::T_float; } break; case Texture::F_depth_component16: component_type = Texture::T_unsigned_short; break; case Texture::F_depth_component24: case Texture::F_depth_component32: component_type = Texture::T_float; break; default: if (_current_properties->get_srgb_color()) { if (_current_properties->get_alpha_bits()) { format = Texture::F_srgb_alpha; } else { format = Texture::F_srgb; } } else if (_current_properties->get_float_color()) { if (_current_properties->get_alpha_bits()) { format = Texture::F_rgba32; } else if (_current_properties->get_blue_bits()) { format = Texture::F_rgb32; } else if (_current_properties->get_green_bits()) { format = Texture::F_rg32; } else { format = Texture::F_r32; } } else { if (_current_properties->get_alpha_bits()) { format = Texture::F_rgba; } else { format = Texture::F_rgb; } } if (_current_properties->get_float_color()) { component_type = Texture::T_float; } else if (_current_properties->get_color_bits() <= 24 && _current_properties->get_red_bits() <= 8 && _current_properties->get_green_bits() <= 8 && _current_properties->get_blue_bits() <= 8 && _current_properties->get_alpha_bits() <= 8) { component_type = Texture::T_unsigned_byte; } else { component_type = Texture::T_unsigned_short; } } Texture::TextureType texture_type; int z_size; // TODO: should be extended to support 3D textures, 2D arrays and cube map // arrays. if (z >= 0) { texture_type = Texture::TT_cube_map; z_size = 6; } else { texture_type = Texture::TT_2d_texture; z_size = 1; } if (tex->get_x_size() != w || tex->get_y_size() != h || tex->get_z_size() != z_size || tex->get_component_type() != component_type || tex->get_format() != format || tex->get_texture_type() != texture_type) { // Re-setup the texture; its properties have changed. tex->setup_texture(texture_type, w, h, z_size, component_type, format); } nassertr(z < tex->get_z_size(), false); GLenum external_format = get_external_image_format(tex); if (GLCAT.is_spam()) { GLCAT.spam() << "glReadPixels(" << xo << ", " << yo << ", " << w << ", " << h << ", "; switch (external_format) { case GL_DEPTH_COMPONENT: GLCAT.spam(false) << "GL_DEPTH_COMPONENT, "; break; case GL_DEPTH_STENCIL: GLCAT.spam(false) << "GL_DEPTH_STENCIL, "; break; #ifndef OPENGLES_1 case GL_RG: GLCAT.spam(false) << "GL_RG, "; break; #endif case GL_RGB: GLCAT.spam(false) << "GL_RGB, "; break; case GL_RGBA: GLCAT.spam(false) << "GL_RGBA, "; break; #ifndef OPENGLES case GL_BGR: GLCAT.spam(false) << "GL_BGR, "; break; #endif case GL_BGRA: GLCAT.spam(false) << "GL_BGRA, "; break; default: GLCAT.spam(false) << "unknown, "; break; } switch (get_component_type(component_type)) { case GL_UNSIGNED_BYTE: GLCAT.spam(false) << "GL_UNSIGNED_BYTE"; break; case GL_UNSIGNED_SHORT: GLCAT.spam(false) << "GL_UNSIGNED_SHORT"; break; case GL_FLOAT: GLCAT.spam(false) << "GL_FLOAT"; break; #ifndef OPENGLES_1 case GL_INT: GLCAT.spam(false) << "GL_INT"; break; #endif default: GLCAT.spam(false) << "unknown"; break; } GLCAT.spam(false) << ")" << endl; } unsigned char *image_ptr = tex->modify_ram_image(); size_t image_size = tex->get_ram_image_size(); if (z >= 0 || view > 0) { image_size = tex->get_expected_ram_page_size(); if (z >= 0) { image_ptr += z * image_size; } if (view > 0) { image_ptr += (view * tex->get_z_size()) * image_size; } } glReadPixels(xo, yo, w, h, external_format, get_component_type(component_type), image_ptr); // We may have to reverse the byte ordering of the image if GL didn't do it // for us. if (external_format == GL_RGBA || external_format == GL_RGB) { PTA_uchar new_image; const unsigned char *result = fix_component_ordering(new_image, image_ptr, image_size, external_format, tex); if (result != image_ptr) { memcpy(image_ptr, result, image_size); } } report_my_gl_errors(); return true; } #ifdef SUPPORT_FIXED_FUNCTION /** * */ void CLP(GraphicsStateGuardian):: apply_fog(Fog *fog) { Fog::Mode fmode = fog->get_mode(); glFogf(GL_FOG_MODE, get_fog_mode_type(fmode)); if (fmode == Fog::M_linear) { PN_stdfloat onset, opaque; fog->get_linear_range(onset, opaque); glFogf(GL_FOG_START, onset); glFogf(GL_FOG_END, opaque); } else { // Exponential fog is always camera-relative. glFogf(GL_FOG_DENSITY, fog->get_exp_density()); } call_glFogfv(GL_FOG_COLOR, fog->get_color()); report_my_gl_errors(); } #endif // SUPPORT_FIXED_FUNCTION /** * Sends the indicated transform matrix to the graphics API to be applied to * future vertices. * * This transform is the internal_transform, already converted into the GSG's * internal coordinate system. */ void CLP(GraphicsStateGuardian):: do_issue_transform() { #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { // OpenGL ES 2 does not support glLoadMatrix. const TransformState *transform = _internal_transform; if (GLCAT.is_spam()) { GLCAT.spam() << "glLoadMatrix(GL_MODELVIEW): " << transform->get_mat() << endl; } DO_PSTATS_STUFF(_transform_state_pcollector.add_level(1)); glMatrixMode(GL_MODELVIEW); call_glLoadMatrix(transform->get_mat()); } #endif _transform_stale = false; report_my_gl_errors(); } #ifdef SUPPORT_FIXED_FUNCTION /** * */ void CLP(GraphicsStateGuardian):: do_issue_shade_model() { const ShadeModelAttrib *target_shade_model; _target_rs->get_attrib_def(target_shade_model); switch (target_shade_model->get_mode()) { case ShadeModelAttrib::M_smooth: glShadeModel(GL_SMOOTH); _flat_shade_model = false; break; case ShadeModelAttrib::M_flat: glShadeModel(GL_FLAT); _flat_shade_model = true; break; } } #endif // SUPPORT_FIXED_FUNCTION #ifndef OPENGLES_1 /** * Called when the current ShaderAttrib state has changed. */ void CLP(GraphicsStateGuardian):: do_issue_shader() { PStatTimer timer(_draw_set_state_shader_pcollector); ShaderContext *context = 0; Shader *shader = (Shader *)_target_shader->get_shader(); // If we don't have a shader, apply the default shader. if (!has_fixed_function_pipeline() && !shader) { shader = _default_shader; nassertv(shader != nullptr); } if (shader) { if (_current_shader != shader) { context = shader->prepare_now(get_prepared_objects(), this); } else { context = _current_shader_context; } } // If it failed, try applying the default shader. if (_default_shader != nullptr && shader != _default_shader && (context == 0 || !context->valid())) { shader = _default_shader; nassertv(shader != nullptr); if (_current_shader != shader) { context = shader->prepare_now(get_prepared_objects(), this); } else { context = _current_shader_context; } } if (context == 0 || !context->valid()) { if (_current_shader_context != 0) { _current_shader_context->unbind(); _current_shader = 0; _current_shader_context = 0; } } else { if (context != _current_shader_context) { // Use a completely different shader than before. Unbind old shader, // bind the new one. if (_current_shader_context != nullptr && _current_shader->get_language() != shader->get_language()) { // If it's a different type of shader, make sure to unbind the old. _current_shader_context->unbind(); } context->bind(); _current_shader = shader; } // Bind the shader storage buffers. context->update_shader_buffer_bindings(_current_shader_context); _current_shader_context = context; } #ifndef OPENGLES // Is the point size provided by the shader or by OpenGL? bool shader_point_size = _target_shader->get_flag(ShaderAttrib::F_shader_point_size); if (shader_point_size != _shader_point_size) { if (shader_point_size) { glEnable(GL_PROGRAM_POINT_SIZE); } else { glDisable(GL_PROGRAM_POINT_SIZE); } _shader_point_size = shader_point_size; } #endif report_my_gl_errors(); } #endif // !OPENGLES_1 /** * */ void CLP(GraphicsStateGuardian):: do_issue_render_mode() { const RenderModeAttrib *target_render_mode; _target_rs->get_attrib_def(target_render_mode); _render_mode = target_render_mode->get_mode(); PN_stdfloat thickness = target_render_mode->get_thickness(); _point_perspective = target_render_mode->get_perspective(); #ifndef OPENGLES // glPolygonMode not supported by OpenGL ES. switch (_render_mode) { case RenderModeAttrib::M_unchanged: case RenderModeAttrib::M_filled: case RenderModeAttrib::M_filled_flat: glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); break; case RenderModeAttrib::M_wireframe: glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); break; case RenderModeAttrib::M_point: glPolygonMode(GL_FRONT_AND_BACK, GL_POINT); break; default: GLCAT.error() << "Unknown render mode " << (int)_render_mode << endl; } #endif // OPENGLES // The thickness affects both the line width and the point size. if (thickness != _point_size) { if (GLCAT.is_spam()) { GLCAT.spam() << "setting thickness to " << thickness << "\n"; } glLineWidth(std::min((GLfloat)thickness, _max_line_width)); #ifndef OPENGLES_2 glPointSize(thickness); #endif _point_size = thickness; } report_my_gl_errors(); #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { do_point_size(); } #endif } /** * */ void CLP(GraphicsStateGuardian):: do_issue_antialias() { const AntialiasAttrib *target_antialias; _target_rs->get_attrib_def(target_antialias); if (target_antialias->get_mode_type() == AntialiasAttrib::M_auto) { // In this special mode, we must enable antialiasing on a case-by-case // basis, because we enable it differently for polygons and for points and // lines. _auto_antialias_mode = true; } else { // Otherwise, explicitly enable or disable according to the bits that are // set. But if multisample is requested and supported, don't use the // other bits at all (they will be ignored by GL anyway). _auto_antialias_mode = false; unsigned short mode = target_antialias->get_mode(); if (_supports_multisample && (mode & AntialiasAttrib::M_multisample) != 0) { enable_multisample_antialias(true); } else { enable_multisample_antialias(false); enable_line_smooth((mode & AntialiasAttrib::M_line) != 0); enable_point_smooth((mode & AntialiasAttrib::M_point) != 0); enable_polygon_smooth((mode & AntialiasAttrib::M_polygon) != 0); } } #ifndef OPENGLES_2 GLenum quality; switch (target_antialias->get_mode_quality()) { case AntialiasAttrib::M_faster: quality = GL_FASTEST; break; case AntialiasAttrib::M_better: quality = GL_NICEST; break; default: quality = GL_DONT_CARE; break; } if (_line_smooth_enabled) { glHint(GL_LINE_SMOOTH_HINT, quality); } if (_point_smooth_enabled) { glHint(GL_POINT_SMOOTH_HINT, quality); } #ifndef OPENGLES if (_polygon_smooth_enabled) { glHint(GL_POLYGON_SMOOTH_HINT, quality); } #endif #endif // !OPENGLES_2 report_my_gl_errors(); } #ifdef SUPPORT_FIXED_FUNCTION // OpenGL ES 2.0 doesn't support rescaling normals. /** * */ void CLP(GraphicsStateGuardian):: do_issue_rescale_normal() { RescaleNormalAttrib::Mode mode = RescaleNormalAttrib::M_none; const RescaleNormalAttrib *target_rescale_normal; if (_target_rs->get_attrib(target_rescale_normal)) { mode = target_rescale_normal->get_mode(); } switch (mode) { case RescaleNormalAttrib::M_none: glDisable(GL_NORMALIZE); if (_supports_rescale_normal && support_rescale_normal) { glDisable(GL_RESCALE_NORMAL); } break; case RescaleNormalAttrib::M_rescale: if (_supports_rescale_normal && support_rescale_normal) { glEnable(GL_RESCALE_NORMAL); glDisable(GL_NORMALIZE); } else { glEnable(GL_NORMALIZE); } break; case RescaleNormalAttrib::M_normalize: glEnable(GL_NORMALIZE); if (_supports_rescale_normal && support_rescale_normal) { glDisable(GL_RESCALE_NORMAL); } break; default: GLCAT.error() << "Unknown rescale_normal mode " << (int)mode << endl; } report_my_gl_errors(); } #endif // SUPPORT_FIXED_FUNCTION // PandaCompareFunc - 1 + 0x200 === GL_NEVER, etc. order is sequential #define PANDA_TO_GL_COMPAREFUNC(PANDACMPFUNC) (PANDACMPFUNC-1 +0x200) /** * */ void CLP(GraphicsStateGuardian):: do_issue_depth_test() { const DepthTestAttrib *target_depth_test; _target_rs->get_attrib_def(target_depth_test); DepthTestAttrib::PandaCompareFunc mode = target_depth_test->get_mode(); if (mode == DepthTestAttrib::M_none) { enable_depth_test(false); } else { enable_depth_test(true); glDepthFunc(PANDA_TO_GL_COMPAREFUNC(mode)); } report_my_gl_errors(); } #ifdef SUPPORT_FIXED_FUNCTION /** * */ void CLP(GraphicsStateGuardian):: do_issue_alpha_test() { #ifndef OPENGLES_1 if (_target_shader->get_flag(ShaderAttrib::F_subsume_alpha_test)) { enable_alpha_test(false); } else #endif { const AlphaTestAttrib *target_alpha_test; _target_rs->get_attrib_def(target_alpha_test); AlphaTestAttrib::PandaCompareFunc mode = target_alpha_test->get_mode(); if (mode == AlphaTestAttrib::M_none) { enable_alpha_test(false); } else { nassertv(GL_NEVER == (AlphaTestAttrib::M_never-1+0x200)); glAlphaFunc(PANDA_TO_GL_COMPAREFUNC(mode), target_alpha_test->get_reference_alpha()); enable_alpha_test(true); } } } #endif // SUPPORT_FIXED_FUNCTION /** * */ void CLP(GraphicsStateGuardian):: do_issue_depth_write() { const DepthWriteAttrib *target_depth_write; _target_rs->get_attrib_def(target_depth_write); DepthWriteAttrib::Mode mode = target_depth_write->get_mode(); if (mode == DepthWriteAttrib::M_off) { #ifdef GSG_VERBOSE GLCAT.spam() << "glDepthMask(GL_FALSE)" << endl; #endif glDepthMask(GL_FALSE); } else { #ifdef GSG_VERBOSE GLCAT.spam() << "glDepthMask(GL_TRUE)" << endl; #endif glDepthMask(GL_TRUE); } report_my_gl_errors(); } /** * */ void CLP(GraphicsStateGuardian):: do_issue_cull_face() { const CullFaceAttrib *target_cull_face; _target_rs->get_attrib_def(target_cull_face); CullFaceAttrib::Mode mode = target_cull_face->get_effective_mode(); switch (mode) { case CullFaceAttrib::M_cull_none: glDisable(GL_CULL_FACE); break; case CullFaceAttrib::M_cull_clockwise: glEnable(GL_CULL_FACE); glCullFace(GL_BACK); break; case CullFaceAttrib::M_cull_counter_clockwise: glEnable(GL_CULL_FACE); glCullFace(GL_FRONT); break; default: GLCAT.error() << "invalid cull face mode " << (int)mode << endl; break; } report_my_gl_errors(); } #ifdef SUPPORT_FIXED_FUNCTION /** * */ void CLP(GraphicsStateGuardian):: do_issue_fog() { const FogAttrib *target_fog; _target_rs->get_attrib_def(target_fog); if (!target_fog->is_off()) { enable_fog(true); Fog *fog = target_fog->get_fog(); nassertv(fog != nullptr); apply_fog(fog); } else { enable_fog(false); } report_my_gl_errors(); } #endif // SUPPORT_FIXED_FUNCTION /** * */ void CLP(GraphicsStateGuardian):: do_issue_depth_offset() { const DepthOffsetAttrib *target_depth_offset = (const DepthOffsetAttrib *) _target_rs->get_attrib_def(DepthOffsetAttrib::get_class_slot()); int offset = target_depth_offset->get_offset(); if (offset != 0) { // The relationship between these two parameters is a little unclear and // poorly explained in the GL man pages. glPolygonOffset((GLfloat) -offset, (GLfloat) -offset); enable_polygon_offset(true); } else { enable_polygon_offset(false); } PN_stdfloat min_value = target_depth_offset->get_min_value(); PN_stdfloat max_value = target_depth_offset->get_max_value(); #ifdef GSG_VERBOSE GLCAT.spam() << "glDepthRange(" << min_value << ", " << max_value << ")" << endl; #endif #ifdef OPENGLES // OpenGL ES uses a single-precision call. glDepthRangef((GLclampf)min_value, (GLclampf)max_value); #else // Mainline OpenGL uses a double-precision call. if (!_use_remapped_depth_range) { glDepthRange((GLclampd)min_value, (GLclampd)max_value); } else { // If we have a remapped depth range, we should adjust the values to range // from -1 to 1. We need to use an NV extension to pass unclamped values. _glDepthRangedNV(min_value * 2.0 - 1.0, max_value * 2.0 - 1.0); } #endif // OPENGLES report_my_gl_errors(); } #ifdef SUPPORT_FIXED_FUNCTION /** * */ void CLP(GraphicsStateGuardian):: do_issue_material() { static Material empty; const Material *material; const MaterialAttrib *target_material; _target_rs->get_attrib_def(target_material); if (target_material == nullptr || target_material->is_off()) { material = &empty; } else { material = target_material->get_material(); } bool has_material_force_color = _has_material_force_color; #ifndef NDEBUG if (_show_texture_usage) { // In show_texture_usage mode, all colors are white, so as not to // contaminate the texture color. This means we disable lighting // materials too. material = &empty; has_material_force_color = false; } #endif // NDEBUG #ifdef OPENGLES const GLenum face = GL_FRONT_AND_BACK; #else GLenum face = material->get_twoside() ? GL_FRONT_AND_BACK : GL_FRONT; #endif call_glMaterialfv(face, GL_SPECULAR, material->get_specular()); call_glMaterialfv(face, GL_EMISSION, material->get_emission()); glMaterialf(face, GL_SHININESS, max(min(material->get_shininess(), (PN_stdfloat)128), (PN_stdfloat)0)); if ((material->has_ambient() && material->has_diffuse()) || material->has_base_color()) { // The material has both an ambient and diffuse specified. This means we // do not need glMaterialColor(). glDisable(GL_COLOR_MATERIAL); call_glMaterialfv(face, GL_AMBIENT, material->get_ambient()); call_glMaterialfv(face, GL_DIFFUSE, material->get_diffuse()); } else if (material->has_ambient()) { // The material specifies an ambient, but not a diffuse component. The // diffuse component comes from the object's color. if (has_material_force_color) { glDisable(GL_COLOR_MATERIAL); call_glMaterialfv(face, GL_DIFFUSE, _material_force_color); } else { #ifndef OPENGLES glColorMaterial(face, GL_DIFFUSE); #endif // OPENGLES glEnable(GL_COLOR_MATERIAL); } call_glMaterialfv(face, GL_AMBIENT, material->get_ambient()); } else if (material->has_diffuse()) { // The material specifies a diffuse, but not an ambient component. The // ambient component comes from the object's color. if (has_material_force_color) { glDisable(GL_COLOR_MATERIAL); call_glMaterialfv(face, GL_AMBIENT, _material_force_color); } else { #ifndef OPENGLES glColorMaterial(face, GL_AMBIENT); #endif // OPENGLES glEnable(GL_COLOR_MATERIAL); } call_glMaterialfv(face, GL_DIFFUSE, material->get_diffuse()); } else { // The material specifies neither a diffuse nor an ambient component. // Both components come from the object's color. if (has_material_force_color) { glDisable(GL_COLOR_MATERIAL); call_glMaterialfv(face, GL_AMBIENT, _material_force_color); call_glMaterialfv(face, GL_DIFFUSE, _material_force_color); } else { #ifndef OPENGLES glColorMaterial(face, GL_AMBIENT_AND_DIFFUSE); #endif // OPENGLES glEnable(GL_COLOR_MATERIAL); } } #ifndef OPENGLES glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, material->get_local()); glLightModeli(GL_LIGHT_MODEL_TWO_SIDE, material->get_twoside()); if (_use_separate_specular_color) { glLightModeli(GL_LIGHT_MODEL_COLOR_CONTROL, GL_SEPARATE_SPECULAR_COLOR); } else { glLightModeli(GL_LIGHT_MODEL_COLOR_CONTROL, GL_SINGLE_COLOR); } #endif report_my_gl_errors(); } #endif // SUPPORT_FIXED_FUNCTION /** * Issues the logic operation attribute to the GL. */ #if !defined(OPENGLES) || defined(OPENGLES_1) void CLP(GraphicsStateGuardian):: do_issue_logic_op() { const LogicOpAttrib *target_logic_op; _target_rs->get_attrib_def(target_logic_op); if (target_logic_op->get_operation() != LogicOpAttrib::O_none) { glEnable(GL_COLOR_LOGIC_OP); glLogicOp(GL_CLEAR - 1 + (int)target_logic_op->get_operation()); if (GLCAT.is_spam()) { GLCAT.spam() << "glEnable(GL_COLOR_LOGIC_OP)\n"; GLCAT.spam() << "glLogicOp(" << target_logic_op->get_operation() << ")\n"; } } else { glDisable(GL_COLOR_LOGIC_OP); glLogicOp(GL_COPY); if (GLCAT.is_spam()) { GLCAT.spam() << "glDisable(GL_COLOR_LOGIC_OP)\n"; } } } #endif /** * */ void CLP(GraphicsStateGuardian):: do_issue_blending() { // Handle the color_write attrib. If color_write is off, then all the other // blending-related stuff doesn't matter. If the device doesn't support // color-write, we use blending tricks to effectively disable color write. const ColorWriteAttrib *target_color_write; _target_rs->get_attrib_def(target_color_write); unsigned int color_channels = target_color_write->get_channels() & _color_write_mask; #ifndef OPENGLES_1 if (_target_shader->get_flag(ShaderAttrib::F_disable_alpha_write)) { color_channels &= ~(ColorWriteAttrib::C_alpha); } #endif if (color_channels == ColorWriteAttrib::C_off) { enable_multisample_alpha_one(false); enable_multisample_alpha_mask(false); if (gl_color_mask) { enable_blend(false); set_color_write_mask(ColorWriteAttrib::C_off); } else { enable_blend(true); _glBlendEquation(GL_FUNC_ADD); glBlendFunc(GL_ZERO, GL_ONE); } if (GLCAT.is_spam()) { GLCAT.spam() << "glBlendEquation(GL_FUNC_ADD)\n"; GLCAT.spam() << "glBlendFunc(GL_ZERO, GL_ONE)\n"; } return; } else { set_color_write_mask(color_channels); } const ColorBlendAttrib *target_color_blend; _target_rs->get_attrib_def(target_color_blend); CPT(ColorBlendAttrib) color_blend = target_color_blend; ColorBlendAttrib::Mode color_blend_mode = target_color_blend->get_mode(); ColorBlendAttrib::Mode alpha_blend_mode = target_color_blend->get_alpha_mode(); const TransparencyAttrib *target_transparency; _target_rs->get_attrib_def(target_transparency); TransparencyAttrib::Mode transparency_mode = target_transparency->get_mode(); _color_blend_involves_color_scale = color_blend->involves_color_scale(); // Is there a color blend set? if (color_blend_mode != ColorBlendAttrib::M_none) { enable_multisample_alpha_one(false); enable_multisample_alpha_mask(false); enable_blend(true); if (_supports_blend_equation_separate) { _glBlendEquationSeparate(get_blend_equation_type(color_blend_mode), get_blend_equation_type(alpha_blend_mode)); } else { _glBlendEquation(get_blend_equation_type(color_blend_mode)); } _glBlendFuncSeparate(get_blend_func(color_blend->get_operand_a()), get_blend_func(color_blend->get_operand_b()), get_blend_func(color_blend->get_alpha_operand_a()), get_blend_func(color_blend->get_alpha_operand_b())); #ifndef OPENGLES_1 LColor c; if (_color_blend_involves_color_scale) { // Apply the current color scale to the blend mode. c = _current_color_scale; } else { c = color_blend->get_color(); } _glBlendColor(c[0], c[1], c[2], c[3]); #endif if (GLCAT.is_spam()) { if (_supports_blend_equation_separate) { GLCAT.spam() << "glBlendEquationSeparate(" << color_blend_mode << ", " << alpha_blend_mode << ")\n"; } else { GLCAT.spam() << "glBlendEquation(" << color_blend_mode << ")\n"; } GLCAT.spam() << "glBlendFuncSeparate(" << color_blend->get_operand_a() << ", " << color_blend->get_operand_b() << ", " << color_blend->get_alpha_operand_a() << ", " << color_blend->get_alpha_operand_b() << ")\n"; #ifndef OPENGLES_1 GLCAT.spam() << "glBlendColor(" << c << ")\n"; #endif } return; } // No color blend; is there a transparency set? switch (transparency_mode) { case TransparencyAttrib::M_none: case TransparencyAttrib::M_binary: break; case TransparencyAttrib::M_alpha: case TransparencyAttrib::M_dual: enable_multisample_alpha_one(false); enable_multisample_alpha_mask(false); enable_blend(true); _glBlendEquation(GL_FUNC_ADD); if (old_alpha_blend) { glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); } else { _glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_ONE, GL_ONE_MINUS_SRC_ALPHA); } if (GLCAT.is_spam()) { GLCAT.spam() << "glBlendEquation(GL_FUNC_ADD)\n"; if (_supports_blend_equation_separate && !old_alpha_blend) { GLCAT.spam() << "glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_ONE, GL_ONE_MINUS_SRC_ALPHA)\n"; } else { GLCAT.spam() << "glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n"; } } return; case TransparencyAttrib::M_premultiplied_alpha: enable_multisample_alpha_one(false); enable_multisample_alpha_mask(false); enable_blend(true); _glBlendEquation(GL_FUNC_ADD); glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA); if (GLCAT.is_spam()) { GLCAT.spam() << "glBlendEquation(GL_FUNC_ADD)\n"; GLCAT.spam() << "glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA)\n"; } return; case TransparencyAttrib::M_multisample: // We need to enable *both* of these in M_multisample case. enable_multisample_alpha_one(true); enable_multisample_alpha_mask(true); enable_blend(false); return; case TransparencyAttrib::M_multisample_mask: enable_multisample_alpha_one(false); enable_multisample_alpha_mask(true); enable_blend(false); return; default: GLCAT.error() << "invalid transparency mode " << (int)transparency_mode << endl; break; } if (_line_smooth_enabled || _point_smooth_enabled) { // If we have either of these turned on, we also need to have blend mode // enabled in order to see it. enable_multisample_alpha_one(false); enable_multisample_alpha_mask(false); enable_blend(true); _glBlendEquation(GL_FUNC_ADD); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); if (GLCAT.is_spam()) { GLCAT.spam() << "glBlendEquation(GL_FUNC_ADD)\n"; GLCAT.spam() << "glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n"; } return; } /* * For best polygon smoothing, we need: (1) a frame buffer that supports alpha * (2) sort polygons front-to-back (3) glBlendFunc(GL_SRC_ALPHA_SATURATE, * GL_ONE); Since these modes have other implications for the application, we * don't attempt to do this by default. If you really want good polygon * smoothing (and you don't have multisample support), do all this yourself. */ // Nothing's set, so disable blending. enable_multisample_alpha_one(false); enable_multisample_alpha_mask(false); enable_blend(false); } #ifdef SUPPORT_FIXED_FUNCTION /** * Called the first time a particular light has been bound to a given id * within a frame, this should set up the associated hardware light with the * light's properties. */ void CLP(GraphicsStateGuardian):: bind_light(PointLight *light_obj, const NodePath &light, int light_id) { nassertv(has_fixed_function_pipeline()); // static PStatCollector // _draw_set_state_light_bind_point_pcollector("Draw:Set // State:Light:Bind:Point"); PStatGPUTimer timer(this, // _draw_set_state_light_bind_point_pcollector); GLenum id = get_light_id(light_id); static const LColor black(0.0f, 0.0f, 0.0f, 1.0f); call_glLightfv(id, GL_AMBIENT, black); call_glLightfv(id, GL_DIFFUSE, get_light_color(light_obj)); call_glLightfv(id, GL_SPECULAR, light_obj->get_specular_color()); // Position needs to specify x, y, z, and w w == 1 implies non-infinite // position CPT(TransformState) transform = light.get_transform(_scene_setup->get_scene_root().get_parent()); LPoint3 pos = light_obj->get_point() * transform->get_mat(); LPoint4 fpos(pos[0], pos[1], pos[2], 1.0f); call_glLightfv(id, GL_POSITION, fpos); // GL_SPOT_DIRECTION is not significant when cutoff == 180 // Exponent == 0 implies uniform light distribution glLightf(id, GL_SPOT_EXPONENT, 0.0f); // Cutoff == 180 means uniform point light source glLightf(id, GL_SPOT_CUTOFF, 180.0f); const LVecBase3 &att = light_obj->get_attenuation(); glLightf(id, GL_CONSTANT_ATTENUATION, att[0]); glLightf(id, GL_LINEAR_ATTENUATION, att[1]); glLightf(id, GL_QUADRATIC_ATTENUATION, att[2]); report_my_gl_errors(); } #endif // SUPPORT_FIXED_FUNCTION #ifdef SUPPORT_FIXED_FUNCTION /** * Called the first time a particular light has been bound to a given id * within a frame, this should set up the associated hardware light with the * light's properties. */ void CLP(GraphicsStateGuardian):: bind_light(DirectionalLight *light_obj, const NodePath &light, int light_id) { nassertv(has_fixed_function_pipeline()); // static PStatCollector // _draw_set_state_light_bind_directional_pcollector("Draw:Set // State:Light:Bind:Directional"); PStatGPUTimer timer(this, // _draw_set_state_light_bind_directional_pcollector); std::pair<DirectionalLights::iterator, bool> lookup = _dlights.insert(DirectionalLights::value_type(light, DirectionalLightFrameData())); DirectionalLightFrameData &fdata = (*lookup.first).second; if (lookup.second) { // The light was not computed yet this frame. Compute it now. CPT(TransformState) transform = light.get_transform(_scene_setup->get_scene_root().get_parent()); LVector3 dir = light_obj->get_direction() * transform->get_mat(); fdata._neg_dir.set(-dir[0], -dir[1], -dir[2], 0); } GLenum id = get_light_id( light_id ); static const LColor black(0.0f, 0.0f, 0.0f, 1.0f); call_glLightfv(id, GL_AMBIENT, black); call_glLightfv(id, GL_DIFFUSE, get_light_color(light_obj)); call_glLightfv(id, GL_SPECULAR, light_obj->get_specular_color()); // Position needs to specify x, y, z, and w. w == 0 implies light is at // infinity call_glLightfv(id, GL_POSITION, fdata._neg_dir); // GL_SPOT_DIRECTION is not significant when cutoff == 180 In this case, // position x, y, z specifies direction // Exponent == 0 implies uniform light distribution glLightf(id, GL_SPOT_EXPONENT, 0.0f); // Cutoff == 180 means uniform point light source glLightf(id, GL_SPOT_CUTOFF, 180.0f); // Default attenuation values (only spotlight and point light can modify // these) glLightf(id, GL_CONSTANT_ATTENUATION, 1.0f); glLightf(id, GL_LINEAR_ATTENUATION, 0.0f); glLightf(id, GL_QUADRATIC_ATTENUATION, 0.0f); report_my_gl_errors(); } #endif // SUPPORT_FIXED_FUNCTION #ifdef SUPPORT_FIXED_FUNCTION /** * Called the first time a particular light has been bound to a given id * within a frame, this should set up the associated hardware light with the * light's properties. */ void CLP(GraphicsStateGuardian):: bind_light(Spotlight *light_obj, const NodePath &light, int light_id) { nassertv(has_fixed_function_pipeline()); // static PStatCollector // _draw_set_state_light_bind_spotlight_pcollector("Draw:Set // State:Light:Bind:Spotlight"); PStatGPUTimer timer(this, // _draw_set_state_light_bind_spotlight_pcollector); Lens *lens = light_obj->get_lens(); nassertv(lens != nullptr); GLenum id = get_light_id(light_id); static const LColor black(0.0f, 0.0f, 0.0f, 1.0f); call_glLightfv(id, GL_AMBIENT, black); call_glLightfv(id, GL_DIFFUSE, get_light_color(light_obj)); call_glLightfv(id, GL_SPECULAR, light_obj->get_specular_color()); // Position needs to specify x, y, z, and w w == 1 implies non-infinite // position CPT(TransformState) transform = light.get_transform(_scene_setup->get_scene_root().get_parent()); const LMatrix4 &light_mat = transform->get_mat(); LPoint3 pos = lens->get_nodal_point() * light_mat; LVector3 dir = lens->get_view_vector() * light_mat; LPoint4 fpos(pos[0], pos[1], pos[2], 1.0f); call_glLightfv(id, GL_POSITION, fpos); call_glLightfv(id, GL_SPOT_DIRECTION, dir); glLightf(id, GL_SPOT_EXPONENT, max(min(light_obj->get_exponent(), (PN_stdfloat)128), (PN_stdfloat)0)); glLightf(id, GL_SPOT_CUTOFF, lens->get_hfov() * 0.5f); const LVecBase3 &att = light_obj->get_attenuation(); glLightf(id, GL_CONSTANT_ATTENUATION, att[0]); glLightf(id, GL_LINEAR_ATTENUATION, att[1]); glLightf(id, GL_QUADRATIC_ATTENUATION, att[2]); report_my_gl_errors(); } #endif // SUPPORT_FIXED_FUNCTION /** * Creates a depth buffer for shadow mapping. A derived GSG can override this * if it knows that a particular buffer type works best for shadow rendering. */ GraphicsOutput *CLP(GraphicsStateGuardian):: make_shadow_buffer(LightLensNode *light, Texture *tex, GraphicsOutput *host) { // We override this to circumvent the fact that GraphicsEngine::make_output // can only be called from the app thread. if (!_supports_framebuffer_object) { return GraphicsStateGuardian::make_shadow_buffer(light, tex, host); } bool is_point = light->is_of_type(PointLight::get_class_type()); // Determine the properties for creating the depth buffer. FrameBufferProperties fbp; fbp.set_depth_bits(shadow_depth_bits); WindowProperties props = WindowProperties::size(light->get_shadow_buffer_size()); int flags = GraphicsPipe::BF_refuse_window; if (is_point) { flags |= GraphicsPipe::BF_size_square; } CLP(GraphicsBuffer) *sbuffer = new CLP(GraphicsBuffer)(get_engine(), get_pipe(), light->get_name(), fbp, props, flags, this, host); sbuffer->add_render_texture(tex, GraphicsOutput::RTM_bind_or_copy, GraphicsOutput::RTP_depth); get_engine()->add_window(sbuffer, light->get_shadow_buffer_sort()); return sbuffer; } #ifdef SUPPORT_IMMEDIATE_MODE /** * Uses the ImmediateModeSender to draw a series of primitives of the * indicated type. */ void CLP(GraphicsStateGuardian):: draw_immediate_simple_primitives(const GeomPrimitivePipelineReader *reader, GLenum mode) { int num_vertices = reader->get_num_vertices(); _vertices_immediate_pcollector.add_level(num_vertices); glBegin(mode); if (reader->is_indexed()) { for (int v = 0; v < num_vertices; ++v) { _sender.set_vertex(reader->get_vertex(v)); _sender.issue_vertex(); } } else { _sender.set_vertex(reader->get_first_vertex()); for (int v = 0; v < num_vertices; ++v) { _sender.issue_vertex(); } } glEnd(); } #endif // SUPPORT_IMMEDIATE_MODE #ifdef SUPPORT_IMMEDIATE_MODE /** * Uses the ImmediateModeSender to draw a series of primitives of the * indicated type. This form is for primitive types like tristrips which must * involve several begin/end groups. */ void CLP(GraphicsStateGuardian):: draw_immediate_composite_primitives(const GeomPrimitivePipelineReader *reader, GLenum mode) { int num_vertices = reader->get_num_vertices(); _vertices_immediate_pcollector.add_level(num_vertices); CPTA_int ends = reader->get_ends(); int num_unused_vertices_per_primitive = reader->get_object()->get_num_unused_vertices_per_primitive(); if (reader->is_indexed()) { int begin = 0; CPTA_int::const_iterator ei; for (ei = ends.begin(); ei != ends.end(); ++ei) { int end = (*ei); glBegin(mode); for (int v = begin; v < end; ++v) { _sender.set_vertex(reader->get_vertex(v)); _sender.issue_vertex(); } glEnd(); begin = end + num_unused_vertices_per_primitive; } } else { _sender.set_vertex(reader->get_first_vertex()); int begin = 0; CPTA_int::const_iterator ei; for (ei = ends.begin(); ei != ends.end(); ++ei) { int end = (*ei); glBegin(mode); for (int v = begin; v < end; ++v) { _sender.issue_vertex(); } glEnd(); begin = end + num_unused_vertices_per_primitive; } } } #endif // SUPPORT_IMMEDIATE_MODE /** * Calls glFlush(). */ void CLP(GraphicsStateGuardian):: gl_flush() const { PStatTimer timer(_flush_pcollector); glFlush(); } /** * Returns the result of glGetError(). */ GLenum CLP(GraphicsStateGuardian):: gl_get_error() const { if (_check_errors) { PStatTimer timer(_check_error_pcollector); return glGetError(); } else { return GL_NO_ERROR; } } /** * The internal implementation of report_errors(). Don't call this function; * use report_errors() instead. The return value is true if everything is ok, * or false if we should shut down. */ bool CLP(GraphicsStateGuardian):: report_errors_loop(int line, const char *source_file, GLenum error_code, int &error_count) { while ((gl_max_errors < 0 || error_count < gl_max_errors) && (error_code != GL_NO_ERROR)) { GLCAT.error() << "at " << line << " of " << source_file << " : " << get_error_string(error_code) << "\n"; error_code = glGetError(); error_count++; } return (error_code == GL_NO_ERROR); } /** * Returns an error string for an OpenGL error code. */ string CLP(GraphicsStateGuardian):: get_error_string(GLenum error_code) { // We used to use gluErrorString here, but I (rdb) took it out because that // was really the only function we used from GLU. The idea with the error // table was taken from SGI's sample implementation. static const char *error_strings[] = { "invalid enumerant", "invalid value", "invalid operation", "stack overflow", "stack underflow", "out of memory", "invalid framebuffer operation", "context lost", }; if (error_code == GL_NO_ERROR) { return "no error"; #ifndef OPENGLES } else if (error_code == GL_TABLE_TOO_LARGE) { return "table too large"; #endif } else if (error_code >= 0x0500 && error_code <= 0x0507) { return error_strings[error_code - 0x0500]; } // Other error, somehow? Just display the error code then. std::ostringstream strm; strm << "GL error " << (int)error_code; return strm.str(); } /** * Outputs the result of glGetString() on the indicated tag. The output * string is returned. */ string CLP(GraphicsStateGuardian):: show_gl_string(const string &name, GLenum id) { string result; const GLubyte *text = glGetString(id); if (text == nullptr) { GLCAT.warning() << "Unable to query " << name << "\n"; } else { result = (const char *)text; if (GLCAT.is_debug()) { GLCAT.debug() << name << " = " << result << "\n"; } } return result; } /** * Queries the runtime version of OpenGL in use. */ void CLP(GraphicsStateGuardian):: query_gl_version() { _gl_vendor = show_gl_string("GL_VENDOR", GL_VENDOR); _gl_renderer = show_gl_string("GL_RENDERER", GL_RENDERER); _gl_version = show_gl_string("GL_VERSION", GL_VERSION); _gl_version_major = 0; _gl_version_minor = 0; // This is the most preposterous driver bug: NVIDIA drivers will claim // that the version is 1.2 as long as the process is named pview.exe! #ifndef OPENGLES if (_gl_version.substr(0, 10) == "1.2 NVIDIA") { Filename exec_name = ExecutionEnvironment::get_binary_name(); if (cmp_nocase(exec_name.get_basename(), "pview.exe") == 0) { glGetIntegerv(GL_MAJOR_VERSION, &_gl_version_major); glGetIntegerv(GL_MINOR_VERSION, &_gl_version_minor); if (glGetError() == GL_INVALID_ENUM) { _gl_version_major = 1; _gl_version_minor = 2; GLCAT.warning() << "Driver possibly misreported GL_VERSION! Unable to detect " "correct OpenGL version.\n"; } else if (_gl_version_major != 1 || _gl_version_minor != 2) { GLCAT.debug() << "Driver misreported GL_VERSION! Correct version detected as " << _gl_version_major << "." << _gl_version_minor << "\n"; } return; } } // If we asked for a GL 3 context, let's first try and see if we can use the // OpenGL 3 way to query version. if (gl_version.get_num_words() > 0 && gl_version[0] >= 3) { glGetIntegerv(GL_MAJOR_VERSION, &_gl_version_major); glGetIntegerv(GL_MINOR_VERSION, &_gl_version_minor); if (_gl_version_major >= 1) { // Fair enough, seems to check out. if (GLCAT.is_debug()) { GLCAT.debug() << "Detected OpenGL version: " << _gl_version_major << "." << _gl_version_minor << "\n"; } return; } } #endif // !OPENGLES // Otherwise, parse the GL_VERSION string. if (_gl_version.empty()) { GLCAT.error() << "Unable to detect OpenGL version\n"; } else { string input = _gl_version; // Skip any initial words that don't begin with a digit. while (!input.empty() && !isdigit(input[0])) { size_t space = input.find(' '); if (space == string::npos) { break; } size_t next = space + 1; while (next < input.length() && isspace(input[next])) { ++next; } input = input.substr(next); } // Truncate after the first space. size_t space = input.find(' '); if (space != string::npos) { input = input.substr(0, space); } vector_string components; tokenize(input, components, "."); if (components.size() >= 1) { string_to_int(components[0], _gl_version_major); } if (components.size() >= 2) { string_to_int(components[1], _gl_version_minor); } if (GLCAT.is_debug()) { GLCAT.debug() << "GL_VERSION decoded to: " << _gl_version_major << "." << _gl_version_minor << "\n"; } } } /** * Queries the supported GLSL version. */ void CLP(GraphicsStateGuardian):: query_glsl_version() { _gl_shadlang_ver_major = 0; _gl_shadlang_ver_minor = 0; #ifndef OPENGLES_1 #ifndef OPENGLES // OpenGL 2.0 introduces GLSL in the core. In 1.x, it is an extension. if (_gl_version_major >= 2 || has_extension("GL_ARB_shading_language_100")) { string ver = show_gl_string("GL_SHADING_LANGUAGE_VERSION", GL_SHADING_LANGUAGE_VERSION); _gl_shadlang_ver_major = 1; _gl_shadlang_ver_minor = (_gl_version_major >= 2) ? 1 : 0; if (ver.empty() || sscanf(ver.c_str(), "%d.%d", &_gl_shadlang_ver_major, &_gl_shadlang_ver_minor) != 2) { GLCAT.warning() << "Invalid GL_SHADING_LANGUAGE_VERSION format.\n"; } } #else // OpenGL ES 2.0 and above has shader support built-in. string ver = show_gl_string("GL_SHADING_LANGUAGE_VERSION", GL_SHADING_LANGUAGE_VERSION); _gl_shadlang_ver_major = 1; _gl_shadlang_ver_minor = 0; if (ver.empty() || sscanf(ver.c_str(), "OpenGL ES GLSL ES %d.%d", &_gl_shadlang_ver_major, &_gl_shadlang_ver_minor) != 2) { GLCAT.warning() << "Invalid GL_SHADING_LANGUAGE_VERSION format.\n"; } #endif if (GLCAT.is_debug()) { GLCAT.debug() << "Detected GLSL " #ifdef OPENGLES "ES " #endif "version: " << _gl_shadlang_ver_major << "." << _gl_shadlang_ver_minor << "\n"; } #endif // !OPENGLES_1 } /** * Separates the string returned by GL_EXTENSIONS (or glx or wgl extensions) * into its individual tokens and saves them in the _extensions member. */ void CLP(GraphicsStateGuardian):: save_extensions(const char *extensions) { if (extensions != nullptr) { vector_string tokens; extract_words(extensions, tokens); vector_string::iterator ti; for (ti = tokens.begin(); ti != tokens.end(); ++ti) { _extensions.insert(*ti); } } } /** * This may be redefined by a derived class (e.g. glx or wgl) to get whatever * further extensions strings may be appropriate to that interface, in * addition to the GL extension strings return by glGetString(). */ void CLP(GraphicsStateGuardian):: get_extra_extensions() { } /** * Outputs the list of GL extensions to notify, if debug mode is enabled. */ void CLP(GraphicsStateGuardian):: report_extensions() const { if (GLCAT.is_debug()) { std::ostream &out = GLCAT.debug(); out << "GL Extensions:\n"; pset<string>::const_iterator ei; for (ei = _extensions.begin(); ei != _extensions.end(); ++ei) { size_t len = (*ei).size(); out << " " << (*ei); // Display a second column. if (len <= 38) { if (++ei != _extensions.end()) { for (int i = len; i < 38; ++i) { out.put(' '); } out << ' ' << (*ei); } else { out.put('\n'); break; } } out.put('\n'); } } } /** * Returns the pointer to the GL extension function with the indicated name, * or NULL if the function is not available. */ void *CLP(GraphicsStateGuardian):: get_extension_func(const char *name) { // First, look in the static-compiled namespace. If we were compiled to // expect at least a certain minimum runtime version of OpenGL, then we can // expect those extension functions to be available at compile time. // Somewhat more reliable than poking around in the runtime pointers. static struct { const char *name; void *fptr; } compiled_function_table[] = { #ifdef EXPECT_GL_VERSION_1_2 { "glBlendColor", (void *)&glBlendColor }, { "glBlendEquation", (void *)&glBlendEquation }, { "glDrawRangeElements", (void *)&glDrawRangeElements }, { "glTexImage3D", (void *)&glTexImage3D }, { "glTexSubImage3D", (void *)&glTexSubImage3D }, { "glCopyTexSubImage3D", (void *)&glCopyTexSubImage3D }, #endif #ifdef EXPECT_GL_VERSION_1_3 { "glActiveTexture", (void *)&glActiveTexture }, { "glClientActiveTexture", (void *)&glClientActiveTexture }, { "glCompressedTexImage1D", (void *)&glCompressedTexImage1D }, { "glCompressedTexImage2D", (void *)&glCompressedTexImage2D }, { "glCompressedTexImage3D", (void *)&glCompressedTexImage3D }, { "glCompressedTexSubImage1D", (void *)&glCompressedTexSubImage1D }, { "glCompressedTexSubImage2D", (void *)&glCompressedTexSubImage2D }, { "glCompressedTexSubImage3D", (void *)&glCompressedTexSubImage3D }, { "glGetCompressedTexImage", (void *)&glGetCompressedTexImage }, { "glMultiTexCoord1f", (void *)&glMultiTexCoord1f }, { "glMultiTexCoord2", (void *)&glMultiTexCoord2 }, { "glMultiTexCoord3", (void *)&glMultiTexCoord3 }, { "glMultiTexCoord4", (void *)&glMultiTexCoord4 }, #endif #ifdef EXPECT_GL_VERSION_1_4 { "glPointParameterfv", (void *)&glPointParameterfv }, { "glSecondaryColorPointer", (void *)&glSecondaryColorPointer }, #endif #ifdef EXPECT_GL_VERSION_1_5 { "glBeginQuery", (void *)&glBeginQuery }, { "glBindBuffer", (void *)&glBindBuffer }, { "glBufferData", (void *)&glBufferData }, { "glBufferSubData", (void *)&glBufferSubData }, { "glDeleteBuffers", (void *)&glDeleteBuffers }, { "glDeleteQueries", (void *)&glDeleteQueries }, { "glEndQuery", (void *)&glEndQuery }, { "glGenBuffers", (void *)&glGenBuffers }, { "glGenQueries", (void *)&glGenQueries }, { "glGetQueryObjectuiv", (void *)&glGetQueryObjectuiv }, { "glGetQueryiv", (void *)&glGetQueryiv }, #endif #ifdef OPENGLES { "glActiveTexture", (void *)&glActiveTexture }, #ifndef OPENGLES_2 { "glClientActiveTexture", (void *)&glClientActiveTexture }, #endif { "glBindBuffer", (void *)&glBindBuffer }, { "glBufferData", (void *)&glBufferData }, { "glBufferSubData", (void *)&glBufferSubData }, { "glDeleteBuffers", (void *)&glDeleteBuffers }, { "glGenBuffers", (void *)&glGenBuffers }, #endif { nullptr, nullptr } }; int i = 0; while (compiled_function_table[i].name != nullptr) { if (strcmp(compiled_function_table[i].name, name) == 0) { return compiled_function_table[i].fptr; } ++i; } // If the extension function wasn't compiled in, then go get it from the // runtime. There's a different interface for each API. return do_get_extension_func(name); } /** * This is the virtual implementation of get_extension_func(). Each API- * specific GL implementation will map this method to the appropriate API call * to retrieve the extension function pointer. Returns NULL if the function * is not available. */ void *CLP(GraphicsStateGuardian):: do_get_extension_func(const char *) { return nullptr; } /** * Sets up the glDrawBuffer to render into the buffer indicated by the * RenderBuffer object. This only sets up the color and aux bits; it does not * affect the depth, stencil, accum layers. */ void CLP(GraphicsStateGuardian):: set_draw_buffer(int rbtype) { #ifndef OPENGLES // Draw buffers not supported by OpenGL ES. (TODO!) if (_current_fbo) { GLuint buffers[16]; int nbuffers = 0; int index = 0; if (_current_properties->get_color_bits() > 0) { if (rbtype & RenderBuffer::T_left) { buffers[nbuffers++] = GL_COLOR_ATTACHMENT0_EXT + index; } ++index; if (_current_properties->is_stereo()) { if (rbtype & RenderBuffer::T_right) { buffers[nbuffers++] = GL_COLOR_ATTACHMENT0_EXT + index; } ++index; } } for (int i = 0; i < _current_properties->get_aux_rgba(); ++i) { if (rbtype & (RenderBuffer::T_aux_rgba_0 << i)) { buffers[nbuffers++] = GL_COLOR_ATTACHMENT0_EXT + index; } ++index; } for (int i = 0; i < _current_properties->get_aux_hrgba(); ++i) { if (rbtype & (RenderBuffer::T_aux_hrgba_0 << i)) { buffers[nbuffers++] = GL_COLOR_ATTACHMENT0_EXT + index; } ++index; } for (int i = 0; i < _current_properties->get_aux_float(); ++i) { if (rbtype & (RenderBuffer::T_aux_float_0 << i)) { buffers[nbuffers++] = GL_COLOR_ATTACHMENT0_EXT + index; } ++index; } _glDrawBuffers(nbuffers, buffers); } else { switch (rbtype & RenderBuffer::T_color) { case RenderBuffer::T_front: glDrawBuffer(GL_FRONT); break; case RenderBuffer::T_back: glDrawBuffer(GL_BACK); break; case RenderBuffer::T_right: glDrawBuffer(GL_RIGHT); break; case RenderBuffer::T_left: glDrawBuffer(GL_LEFT); break; case RenderBuffer::T_front_right: nassertv(_current_properties->is_stereo()); glDrawBuffer(GL_FRONT_RIGHT); break; case RenderBuffer::T_front_left: nassertv(_current_properties->is_stereo()); glDrawBuffer(GL_FRONT_LEFT); break; case RenderBuffer::T_back_right: nassertv(_current_properties->is_stereo()); glDrawBuffer(GL_BACK_RIGHT); break; case RenderBuffer::T_back_left: nassertv(_current_properties->is_stereo()); glDrawBuffer(GL_BACK_LEFT); break; default: break; } } #endif // OPENGLES // Also ensure that any global color channels are masked out. set_color_write_mask(_color_write_mask); report_my_gl_errors(); } /** * Sets up the glReadBuffer to render into the buffer indicated by the * RenderBuffer object. This only sets up the color bits; it does not affect * the depth, stencil, accum layers. */ void CLP(GraphicsStateGuardian):: set_read_buffer(int rbtype) { #ifndef OPENGLES // Draw buffers not supported by OpenGL ES. (TODO!) if (rbtype & (RenderBuffer::T_depth | RenderBuffer::T_stencil)) { // Special case: don't have to call ReadBuffer for these. return; } if (_current_fbo) { GLuint buffer = GL_COLOR_ATTACHMENT0_EXT; int index = 1; if (_current_properties->is_stereo()) { if (rbtype & RenderBuffer::T_right) { buffer = GL_COLOR_ATTACHMENT1_EXT; } ++index; } for (int i = 0; i < _current_properties->get_aux_rgba(); ++i) { if (rbtype & (RenderBuffer::T_aux_rgba_0 << i)) { buffer = GL_COLOR_ATTACHMENT0_EXT + index; } ++index; } for (int i = 0; i < _current_properties->get_aux_hrgba(); ++i) { if (rbtype & (RenderBuffer::T_aux_hrgba_0 << i)) { buffer = GL_COLOR_ATTACHMENT0_EXT + index; } ++index; } for (int i = 0; i < _current_properties->get_aux_float(); ++i) { if (rbtype & (RenderBuffer::T_aux_float_0 << i)) { buffer = GL_COLOR_ATTACHMENT0_EXT + index; } ++index; } glReadBuffer(buffer); } else { switch (rbtype & RenderBuffer::T_color) { case RenderBuffer::T_front: glReadBuffer(GL_FRONT); break; case RenderBuffer::T_back: glReadBuffer(GL_BACK); break; case RenderBuffer::T_right: glReadBuffer(GL_RIGHT); break; case RenderBuffer::T_left: glReadBuffer(GL_LEFT); break; case RenderBuffer::T_front_right: glReadBuffer(GL_FRONT_RIGHT); break; case RenderBuffer::T_front_left: glReadBuffer(GL_FRONT_LEFT); break; case RenderBuffer::T_back_right: glReadBuffer(GL_BACK_RIGHT); break; case RenderBuffer::T_back_left: glReadBuffer(GL_BACK_LEFT); break; default: break; } } report_my_gl_errors(); #endif // OPENGLES } /** * Maps from the Geom's internal numeric type symbols to GL's. */ GLenum CLP(GraphicsStateGuardian):: get_numeric_type(Geom::NumericType numeric_type) { switch (numeric_type) { case Geom::NT_uint16: return GL_UNSIGNED_SHORT; case Geom::NT_uint32: #ifndef OPENGLES_1 return GL_UNSIGNED_INT; #else break; #endif case Geom::NT_uint8: case Geom::NT_packed_dcba: case Geom::NT_packed_dabc: return GL_UNSIGNED_BYTE; case Geom::NT_float32: return GL_FLOAT; case Geom::NT_float64: #ifndef OPENGLES return GL_DOUBLE; #else break; #endif case Geom::NT_stdfloat: // Shouldn't happen, display error. break; case Geom::NT_int8: return GL_BYTE; case Geom::NT_int16: return GL_SHORT; case Geom::NT_int32: #ifndef OPENGLES_1 return GL_INT; #else break; #endif case Geom::NT_packed_ufloat: #ifndef OPENGLES_1 return GL_UNSIGNED_INT_10F_11F_11F_REV; #else break; #endif } GLCAT.error() << "Invalid NumericType value (" << (int)numeric_type << ")\n"; return GL_UNSIGNED_BYTE; } /** * Maps from the Texture's texture type symbols to GL's. */ GLenum CLP(GraphicsStateGuardian):: get_texture_target(Texture::TextureType texture_type) const { switch (texture_type) { case Texture::TT_1d_texture: // There are no 1D textures in OpenGL ES. Fall back to 2D textures. #ifndef OPENGLES return GL_TEXTURE_1D; #endif case Texture::TT_1d_texture_array: // There are no 1D array textures in OpenGL ES. Fall back to 2D textures. #ifndef OPENGLES return GL_TEXTURE_1D_ARRAY; #endif case Texture::TT_2d_texture: return GL_TEXTURE_2D; case Texture::TT_3d_texture: #ifndef OPENGLES_1 if (_supports_3d_texture) { return GL_TEXTURE_3D; } #endif return GL_NONE; case Texture::TT_2d_texture_array: #ifndef OPENGLES_1 if (_supports_2d_texture_array) { return GL_TEXTURE_2D_ARRAY; } #endif return GL_NONE; case Texture::TT_cube_map: if (_supports_cube_map) { return GL_TEXTURE_CUBE_MAP; } else { return GL_NONE; } case Texture::TT_cube_map_array: #ifndef OPENGLES if (_supports_cube_map_array) { return GL_TEXTURE_CUBE_MAP_ARRAY; } #endif return GL_NONE; case Texture::TT_buffer_texture: #ifndef OPENGLES if (_supports_buffer_texture) { return GL_TEXTURE_BUFFER; } #endif return GL_NONE; } GLCAT.error() << "Invalid Texture::TextureType value!\n"; return GL_TEXTURE_2D; } /** * Maps from the Texture's internal wrap mode symbols to GL's. */ GLenum CLP(GraphicsStateGuardian):: get_texture_wrap_mode(SamplerState::WrapMode wm) const { if (gl_ignore_clamp) { return GL_REPEAT; } switch (wm) { case SamplerState::WM_clamp: return _edge_clamp; case SamplerState::WM_repeat: return GL_REPEAT; case SamplerState::WM_mirror: return _mirror_repeat; case SamplerState::WM_mirror_once: return _mirror_border_clamp; case SamplerState::WM_border_color: return _border_clamp; case SamplerState::WM_invalid: break; } GLCAT.error() << "Invalid SamplerState::WrapMode value!\n"; return _edge_clamp; } /** * Maps from the GL's internal wrap mode symbols to Panda's. */ SamplerState::WrapMode CLP(GraphicsStateGuardian):: get_panda_wrap_mode(GLenum wm) { switch (wm) { #ifndef OPENGLES case GL_CLAMP: #endif case GL_CLAMP_TO_EDGE: return SamplerState::WM_clamp; #ifndef OPENGLES case GL_CLAMP_TO_BORDER: return SamplerState::WM_border_color; #endif case GL_REPEAT: return SamplerState::WM_repeat; case GL_MIRRORED_REPEAT: return SamplerState::WM_mirror; #ifndef OPENGLES case GL_MIRROR_CLAMP_EXT: case GL_MIRROR_CLAMP_TO_EDGE_EXT: return SamplerState::WM_mirror; case GL_MIRROR_CLAMP_TO_BORDER_EXT: return SamplerState::WM_mirror_once; #endif } GLCAT.error() << "Unexpected GL wrap mode " << (int)wm << "\n"; return SamplerState::WM_clamp; } /** * Maps from the Texture's internal filter type symbols to GL's. */ GLenum CLP(GraphicsStateGuardian):: get_texture_filter_type(SamplerState::FilterType ft, bool ignore_mipmaps) { if (gl_ignore_filters) { return GL_NEAREST; } else if (ignore_mipmaps) { switch (ft) { case SamplerState::FT_nearest_mipmap_nearest: case SamplerState::FT_nearest: return GL_NEAREST; case SamplerState::FT_linear: case SamplerState::FT_linear_mipmap_nearest: case SamplerState::FT_nearest_mipmap_linear: case SamplerState::FT_linear_mipmap_linear: return GL_LINEAR; case SamplerState::FT_shadow: return GL_LINEAR; case SamplerState::FT_default: case SamplerState::FT_invalid: break; } } else { switch (ft) { case SamplerState::FT_nearest: return GL_NEAREST; case SamplerState::FT_linear: return GL_LINEAR; case SamplerState::FT_nearest_mipmap_nearest: return GL_NEAREST_MIPMAP_NEAREST; case SamplerState::FT_linear_mipmap_nearest: return GL_LINEAR_MIPMAP_NEAREST; case SamplerState::FT_nearest_mipmap_linear: return GL_NEAREST_MIPMAP_LINEAR; case SamplerState::FT_linear_mipmap_linear: return GL_LINEAR_MIPMAP_LINEAR; case SamplerState::FT_shadow: return GL_LINEAR; case SamplerState::FT_default: case SamplerState::FT_invalid: break; } } GLCAT.error() << "Invalid SamplerState::FilterType value!\n"; return GL_NEAREST; } /** * Maps from the GL's internal filter type symbols to Panda's. */ SamplerState::FilterType CLP(GraphicsStateGuardian):: get_panda_filter_type(GLenum ft) { switch (ft) { case GL_NEAREST: return SamplerState::FT_nearest; case GL_LINEAR: return SamplerState::FT_linear; case GL_NEAREST_MIPMAP_NEAREST: return SamplerState::FT_nearest_mipmap_nearest; case GL_LINEAR_MIPMAP_NEAREST: return SamplerState::FT_linear_mipmap_nearest; case GL_NEAREST_MIPMAP_LINEAR: return SamplerState::FT_nearest_mipmap_linear; case GL_LINEAR_MIPMAP_LINEAR: return SamplerState::FT_linear_mipmap_linear; } GLCAT.error() << "Unexpected GL filter type " << (int)ft << "\n"; return SamplerState::FT_linear; } /** * Maps from the Texture's internal ComponentType symbols to GL's. */ GLenum CLP(GraphicsStateGuardian):: get_component_type(Texture::ComponentType component_type) { switch (component_type) { case Texture::T_unsigned_byte: return GL_UNSIGNED_BYTE; case Texture::T_unsigned_short: return GL_UNSIGNED_SHORT; case Texture::T_float: return GL_FLOAT; case Texture::T_unsigned_int_24_8: if (_supports_depth_stencil) { return GL_UNSIGNED_INT_24_8_EXT; } else { return GL_UNSIGNED_BYTE; } case Texture::T_int: #ifndef OPENGLES_1 return GL_INT; #endif case Texture::T_byte: return GL_BYTE; case Texture::T_short: return GL_SHORT; #ifndef OPENGLES_1 case Texture::T_half_float: return GL_HALF_FLOAT; #endif #ifndef OPENGLES_1 case Texture::T_unsigned_int: return GL_UNSIGNED_INT; #endif default: GLCAT.error() << "Invalid Texture::Type value!\n"; return GL_UNSIGNED_BYTE; } } /** * Maps from the Texture's Format symbols to GL's. */ GLint CLP(GraphicsStateGuardian):: get_external_image_format(Texture *tex) const { Texture::CompressionMode compression = tex->get_ram_image_compression(); Texture::Format format = tex->get_format(); if (compression != Texture::CM_off && get_supports_compressed_texture_format(compression)) { switch (compression) { case Texture::CM_on: #ifndef OPENGLES switch (format) { case Texture::F_color_index: case Texture::F_depth_component: case Texture::F_depth_component16: case Texture::F_depth_component24: case Texture::F_depth_component32: case Texture::F_depth_stencil: case Texture::F_r11_g11_b10: case Texture::F_rgb9_e5: // This shouldn't be possible. nassertr(false, GL_RGB); break; case Texture::F_rgba: case Texture::F_rgbm: case Texture::F_rgba4: case Texture::F_rgba8: case Texture::F_rgba12: case Texture::F_rgba16: case Texture::F_rgba32: case Texture::F_rgba8i: case Texture::F_rgb10_a2: return GL_COMPRESSED_RGBA; case Texture::F_rgb: case Texture::F_rgb5: case Texture::F_rgba5: case Texture::F_rgb8: case Texture::F_rgb8i: case Texture::F_rgb12: case Texture::F_rgb332: case Texture::F_rgb16: case Texture::F_rgb32: return GL_COMPRESSED_RGB; case Texture::F_alpha: return GL_COMPRESSED_ALPHA; case Texture::F_red: case Texture::F_green: case Texture::F_blue: case Texture::F_r8i: case Texture::F_r16: case Texture::F_r16i: case Texture::F_r32: case Texture::F_r32i: return GL_COMPRESSED_RED; case Texture::F_rg: case Texture::F_rg8i: case Texture::F_rg16: case Texture::F_rg32: return GL_COMPRESSED_RG; case Texture::F_luminance: return GL_COMPRESSED_LUMINANCE; case Texture::F_luminance_alpha: case Texture::F_luminance_alphamask: return GL_COMPRESSED_LUMINANCE_ALPHA; case Texture::F_srgb: return GL_COMPRESSED_SRGB; case Texture::F_srgb_alpha: return GL_COMPRESSED_SRGB_ALPHA; case Texture::F_sluminance: return GL_COMPRESSED_SLUMINANCE; case Texture::F_sluminance_alpha: return GL_COMPRESSED_SLUMINANCE_ALPHA; } #endif break; case Texture::CM_dxt1: #ifndef OPENGLES if (format == Texture::F_srgb_alpha) { return GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT; } else if (format == Texture::F_srgb) { return GL_COMPRESSED_SRGB_S3TC_DXT1_EXT; } else #endif if (Texture::has_alpha(format)) { return GL_COMPRESSED_RGBA_S3TC_DXT1_EXT; } else { return GL_COMPRESSED_RGB_S3TC_DXT1_EXT; } case Texture::CM_dxt3: #ifndef OPENGLES if (format == Texture::F_srgb || format == Texture::F_srgb_alpha) { return GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT; } #endif #ifndef OPENGLES_1 return GL_COMPRESSED_RGBA_S3TC_DXT3_EXT; #endif break; case Texture::CM_dxt5: #ifndef OPENGLES if (format == Texture::F_srgb || format == Texture::F_srgb_alpha) { return GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT; } #endif #ifndef OPENGLES_1 return GL_COMPRESSED_RGBA_S3TC_DXT5_EXT; #endif break; case Texture::CM_fxt1: #ifndef OPENGLES if (Texture::has_alpha(format)) { return GL_COMPRESSED_RGBA_FXT1_3DFX; } else { return GL_COMPRESSED_RGB_FXT1_3DFX; } #endif break; #ifdef OPENGLES case Texture::CM_pvr1_2bpp: if (Texture::has_alpha(format)) { return GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG; } else { return GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG; } case Texture::CM_pvr1_4bpp: if (Texture::has_alpha(format)) { return GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG; } else { return GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG; } #else case Texture::CM_pvr1_2bpp: case Texture::CM_pvr1_4bpp: break; #endif // OPENGLES case Texture::CM_rgtc: #ifndef OPENGLES if (format == Texture::F_luminance) { return GL_COMPRESSED_LUMINANCE_LATC1_EXT; } else if (format == Texture::F_luminance_alpha) { return GL_COMPRESSED_LUMINANCE_ALPHA_LATC2_EXT; } else if (tex->get_num_components() == 1) { return GL_COMPRESSED_RED_RGTC1; } else { return GL_COMPRESSED_RG_RGTC2; } #endif break; case Texture::CM_etc1: #ifdef OPENGLES return GL_ETC1_RGB8_OES; #endif // Fall through - ETC2 is backward compatible case Texture::CM_etc2: if (format == Texture::F_rgbm) { return GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2; } else if (format == Texture::F_srgb_alpha) { return GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC; } else if (format == Texture::F_srgb) { return GL_COMPRESSED_SRGB8_ETC2; } else if (Texture::has_alpha(format)) { return GL_COMPRESSED_RGBA8_ETC2_EAC; } else { return GL_COMPRESSED_RGB8_ETC2; } break; case Texture::CM_eac: if (Texture::is_unsigned(tex->get_component_type())) { if (tex->get_num_components() == 1) { return GL_COMPRESSED_R11_EAC; } else { return GL_COMPRESSED_RG11_EAC; } } else { if (tex->get_num_components() == 1) { return GL_COMPRESSED_SIGNED_R11_EAC; } else { return GL_COMPRESSED_SIGNED_RG11_EAC; } } break; case Texture::CM_default: case Texture::CM_off: case Texture::CM_dxt2: case Texture::CM_dxt4: // This shouldn't happen. nassertr(false, GL_RGB); break; } } switch (format) { #ifndef OPENGLES case Texture::F_color_index: return GL_COLOR_INDEX; #endif case Texture::F_depth_component: case Texture::F_depth_component16: case Texture::F_depth_component24: case Texture::F_depth_component32: return GL_DEPTH_COMPONENT; case Texture::F_depth_stencil: return _supports_depth_stencil ? GL_DEPTH_STENCIL : GL_DEPTH_COMPONENT; #ifndef OPENGLES case Texture::F_red: case Texture::F_r16: case Texture::F_r32: return GL_RED; case Texture::F_green: return GL_GREEN; case Texture::F_blue: return GL_BLUE; #endif case Texture::F_alpha: #ifdef OPENGLES return GL_ALPHA; #else return _supports_luminance_texture ? GL_ALPHA : GL_RED; #endif #ifndef OPENGLES_1 case Texture::F_rg: case Texture::F_rg16: case Texture::F_rg32: return GL_RG; #endif case Texture::F_rgb: case Texture::F_rgb5: case Texture::F_rgb8: case Texture::F_rgb12: case Texture::F_rgb332: case Texture::F_rgb16: case Texture::F_rgb32: case Texture::F_srgb: case Texture::F_r11_g11_b10: case Texture::F_rgb9_e5: #ifdef OPENGLES // OpenGL ES never supports BGR, even if _supports_bgr is true. return GL_RGB; #else return _supports_bgr ? GL_BGR : GL_RGB; #endif case Texture::F_rgba: case Texture::F_rgbm: case Texture::F_rgba4: case Texture::F_rgba5: case Texture::F_rgba8: case Texture::F_rgba12: case Texture::F_rgba16: case Texture::F_rgba32: case Texture::F_srgb_alpha: case Texture::F_rgb10_a2: return _supports_bgr ? GL_BGRA : GL_RGBA; case Texture::F_luminance: case Texture::F_sluminance: #ifdef OPENGLES return GL_LUMINANCE; #else return _supports_luminance_texture ? GL_LUMINANCE : GL_RED; #endif case Texture::F_luminance_alphamask: case Texture::F_luminance_alpha: case Texture::F_sluminance_alpha: #ifdef OPENGLES return GL_LUMINANCE_ALPHA; #else return _supports_luminance_texture ? GL_LUMINANCE_ALPHA : GL_RG; #endif #ifndef OPENGLES_1 case Texture::F_r8i: case Texture::F_r16i: case Texture::F_r32i: return GL_RED_INTEGER; case Texture::F_rg8i: return GL_RG_INTEGER; case Texture::F_rgb8i: return GL_RGB_INTEGER; case Texture::F_rgba8i: return GL_RGBA_INTEGER; #endif default: break; } GLCAT.error() << "Invalid Texture::Format value in get_external_image_format(): " << format << "\n"; return GL_RGB; } /** * Maps from the Texture's Format symbols to a suitable internal format for GL * textures. */ GLint CLP(GraphicsStateGuardian):: get_internal_image_format(Texture *tex, bool force_sized) const { Texture::CompressionMode compression = tex->get_compression(); if (compression == Texture::CM_default) { compression = (compressed_textures) ? Texture::CM_on : Texture::CM_off; } Texture::Format format = tex->get_format(); if (tex->get_render_to_texture()) { // no compression for render targets compression = Texture::CM_off; } bool is_3d = (tex->get_texture_type() == Texture::TT_3d_texture || tex->get_texture_type() == Texture::TT_2d_texture_array); if (get_supports_compressed_texture_format(compression)) { switch (compression) { case Texture::CM_on: // The user asked for just generic compression. OpenGL supports // requesting just generic compression, but we'd like to go ahead and // request a specific type (if we can figure out an appropriate choice), // since that makes saving the result as a pre-compressed texture more // dependable--this way, we will know which compression algorithm was // applied. switch (format) { case Texture::F_color_index: case Texture::F_depth_component: case Texture::F_depth_component16: case Texture::F_depth_component24: case Texture::F_depth_component32: case Texture::F_depth_stencil: case Texture::F_r8i: case Texture::F_rg8i: case Texture::F_rgb8i: case Texture::F_rgba8i: case Texture::F_r16i: case Texture::F_r32i: case Texture::F_r11_g11_b10: case Texture::F_rgb9_e5: // Unsupported; fall through to below. break; case Texture::F_rgbm: case Texture::F_rgba5: case Texture::F_rgb10_a2: if (get_supports_compressed_texture_format(Texture::CM_dxt1) && !is_3d) { return GL_COMPRESSED_RGBA_S3TC_DXT1_EXT; } if (get_supports_compressed_texture_format(Texture::CM_etc2) && !is_3d) { return GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2; } #ifndef OPENGLES if (get_supports_compressed_texture_format(Texture::CM_fxt1) && !is_3d) { return GL_COMPRESSED_RGBA_FXT1_3DFX; } return GL_COMPRESSED_RGBA; #endif break; case Texture::F_rgba4: #ifndef OPENGLES_1 if (get_supports_compressed_texture_format(Texture::CM_dxt3) && !is_3d) { return GL_COMPRESSED_RGBA_S3TC_DXT3_EXT; } #endif if (get_supports_compressed_texture_format(Texture::CM_etc2) && !is_3d) { return GL_COMPRESSED_RGBA8_ETC2_EAC; } #ifndef OPENGLES if (get_supports_compressed_texture_format(Texture::CM_fxt1) && !is_3d) { return GL_COMPRESSED_RGBA_FXT1_3DFX; } return GL_COMPRESSED_RGBA; #endif break; case Texture::F_rgba: case Texture::F_rgba8: case Texture::F_rgba12: case Texture::F_rgba16: case Texture::F_rgba32: #ifndef OPENGLES_1 if (get_supports_compressed_texture_format(Texture::CM_dxt5) && !is_3d) { return GL_COMPRESSED_RGBA_S3TC_DXT5_EXT; } #endif if (get_supports_compressed_texture_format(Texture::CM_etc2) && !is_3d) { return GL_COMPRESSED_RGBA8_ETC2_EAC; } #ifndef OPENGLES if (get_supports_compressed_texture_format(Texture::CM_fxt1) && !is_3d) { return GL_COMPRESSED_RGBA_FXT1_3DFX; } return GL_COMPRESSED_RGBA; #endif break; case Texture::F_rgb: case Texture::F_rgb5: case Texture::F_rgb8: case Texture::F_rgb12: case Texture::F_rgb332: case Texture::F_rgb16: case Texture::F_rgb32: if (get_supports_compressed_texture_format(Texture::CM_dxt1) && !is_3d) { return GL_COMPRESSED_RGB_S3TC_DXT1_EXT; } if (get_supports_compressed_texture_format(Texture::CM_etc2) && !is_3d) { return GL_COMPRESSED_RGB8_ETC2; } #ifdef OPENGLES if (get_supports_compressed_texture_format(Texture::CM_etc1) && !is_3d) { return GL_ETC1_RGB8_OES; } #else if (get_supports_compressed_texture_format(Texture::CM_fxt1) && !is_3d) { return GL_COMPRESSED_RGB_FXT1_3DFX; } return GL_COMPRESSED_RGB; #endif break; case Texture::F_alpha: #ifndef OPENGLES_1 if (get_supports_compressed_texture_format(Texture::CM_dxt5) && !is_3d) { return GL_COMPRESSED_RGBA_S3TC_DXT5_EXT; } #endif #ifndef OPENGLES if (get_supports_compressed_texture_format(Texture::CM_fxt1) && !is_3d) { return GL_COMPRESSED_RGBA_FXT1_3DFX; } return GL_COMPRESSED_ALPHA; #endif break; case Texture::F_red: case Texture::F_green: case Texture::F_blue: case Texture::F_r16: case Texture::F_r32: #ifndef OPENGLES if (get_supports_compressed_texture_format(Texture::CM_rgtc) && !is_3d) { return GL_COMPRESSED_RED_RGTC1; } #endif if (get_supports_compressed_texture_format(Texture::CM_eac) && !is_3d) { if (Texture::is_unsigned(tex->get_component_type())) { return GL_COMPRESSED_R11_EAC; } else { return GL_COMPRESSED_SIGNED_R11_EAC; } } if (get_supports_compressed_texture_format(Texture::CM_dxt1) && !is_3d) { return GL_COMPRESSED_RGB_S3TC_DXT1_EXT; } #ifndef OPENGLES if (get_supports_compressed_texture_format(Texture::CM_fxt1) && !is_3d) { return GL_COMPRESSED_RGB_FXT1_3DFX; } return GL_COMPRESSED_RED; #endif break; case Texture::F_rg: case Texture::F_rg16: case Texture::F_rg32: #ifndef OPENGLES if (get_supports_compressed_texture_format(Texture::CM_rgtc) && !is_3d) { return GL_COMPRESSED_RG_RGTC2; } #endif if (get_supports_compressed_texture_format(Texture::CM_eac) && !is_3d) { if (Texture::is_unsigned(tex->get_component_type())) { return GL_COMPRESSED_RG11_EAC; } else { return GL_COMPRESSED_SIGNED_RG11_EAC; } } if (get_supports_compressed_texture_format(Texture::CM_dxt1) && !is_3d) { return GL_COMPRESSED_RGB_S3TC_DXT1_EXT; } #ifndef OPENGLES if (get_supports_compressed_texture_format(Texture::CM_fxt1) && !is_3d) { return GL_COMPRESSED_RGB_FXT1_3DFX; } return GL_COMPRESSED_RG; #endif break; case Texture::F_luminance: if (get_supports_compressed_texture_format(Texture::CM_dxt1) && !is_3d) { return GL_COMPRESSED_RGB_S3TC_DXT1_EXT; } #ifndef OPENGLES if (get_supports_compressed_texture_format(Texture::CM_fxt1) && !is_3d) { return GL_COMPRESSED_RGB_FXT1_3DFX; } return GL_COMPRESSED_LUMINANCE; #endif break; case Texture::F_luminance_alpha: case Texture::F_luminance_alphamask: #ifndef OPENGLES_1 if (get_supports_compressed_texture_format(Texture::CM_dxt5) && !is_3d) { return GL_COMPRESSED_RGBA_S3TC_DXT5_EXT; } #endif #ifndef OPENGLES if (get_supports_compressed_texture_format(Texture::CM_fxt1) && !is_3d) { return GL_COMPRESSED_RGBA_FXT1_3DFX; } return GL_COMPRESSED_LUMINANCE_ALPHA; #endif break; #ifndef OPENGLES case Texture::F_srgb: if (get_supports_compressed_texture_format(Texture::CM_dxt1) && !is_3d) { return GL_COMPRESSED_SRGB_S3TC_DXT1_EXT; } if (get_supports_compressed_texture_format(Texture::CM_etc2) && !is_3d) { return GL_COMPRESSED_SRGB8_ETC2; } return GL_COMPRESSED_SRGB; case Texture::F_srgb_alpha: if (get_supports_compressed_texture_format(Texture::CM_dxt5) && !is_3d) { return GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT; } if (get_supports_compressed_texture_format(Texture::CM_etc2) && !is_3d) { return GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC; } return GL_COMPRESSED_SRGB_ALPHA; case Texture::F_sluminance: return GL_COMPRESSED_SLUMINANCE; case Texture::F_sluminance_alpha: return GL_COMPRESSED_SLUMINANCE_ALPHA; #else // For now, we don't support compressed sRGB textures in OpenGL ES. case Texture::F_srgb: case Texture::F_srgb_alpha: case Texture::F_sluminance: case Texture::F_sluminance_alpha: break; #endif } break; case Texture::CM_dxt1: #ifndef OPENGLES if (format == Texture::F_srgb_alpha) { return GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT; } else if (format == Texture::F_srgb) { return GL_COMPRESSED_SRGB_S3TC_DXT1_EXT; } else #endif if (Texture::has_alpha(format)) { return GL_COMPRESSED_RGBA_S3TC_DXT1_EXT; } else { return GL_COMPRESSED_RGB_S3TC_DXT1_EXT; } case Texture::CM_dxt3: #ifndef OPENGLES if (format == Texture::F_srgb || format == Texture::F_srgb_alpha) { return GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT; } #endif #ifndef OPENGLES_1 return GL_COMPRESSED_RGBA_S3TC_DXT3_EXT; #endif break; case Texture::CM_dxt5: #ifndef OPENGLES if (format == Texture::F_srgb || format == Texture::F_srgb_alpha) { return GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT; } #endif #ifndef OPENGLES_1 return GL_COMPRESSED_RGBA_S3TC_DXT5_EXT; #endif case Texture::CM_fxt1: #ifndef OPENGLES if (Texture::has_alpha(format)) { return GL_COMPRESSED_RGBA_FXT1_3DFX; } else { return GL_COMPRESSED_RGB_FXT1_3DFX; } #endif break; #ifdef OPENGLES case Texture::CM_pvr1_2bpp: if (Texture::has_alpha(format)) { return GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG; } else { return GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG; } case Texture::CM_pvr1_4bpp: if (Texture::has_alpha(format)) { return GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG; } else { return GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG; } #else case Texture::CM_pvr1_2bpp: case Texture::CM_pvr1_4bpp: break; #endif case Texture::CM_rgtc: #ifndef OPENGLES if (format == Texture::F_luminance) { return GL_COMPRESSED_LUMINANCE_LATC1_EXT; } else if (format == Texture::F_luminance_alpha) { return GL_COMPRESSED_LUMINANCE_ALPHA_LATC2_EXT; } else if (tex->get_num_components() == 1) { return GL_COMPRESSED_RED_RGTC1; } else if (tex->get_num_components() == 2) { return GL_COMPRESSED_RG_RGTC2; } #endif break; case Texture::CM_etc1: #ifdef OPENGLES return GL_ETC1_RGB8_OES; #endif // Fall through - ETC2 is backward compatible case Texture::CM_etc2: if (format == Texture::F_rgbm) { return GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2; } else if (format == Texture::F_srgb_alpha) { return GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC; } else if (format == Texture::F_srgb) { return GL_COMPRESSED_SRGB8_ETC2; } else if (Texture::has_alpha(format)) { return GL_COMPRESSED_RGBA8_ETC2_EAC; } else { return GL_COMPRESSED_RGB8_ETC2; } break; case Texture::CM_eac: if (Texture::is_unsigned(tex->get_component_type())) { if (tex->get_num_components() == 1) { return GL_COMPRESSED_R11_EAC; } else { return GL_COMPRESSED_RG11_EAC; } } else { if (tex->get_num_components() == 1) { return GL_COMPRESSED_SIGNED_R11_EAC; } else { return GL_COMPRESSED_SIGNED_RG11_EAC; } } break; case Texture::CM_default: case Texture::CM_off: case Texture::CM_dxt2: case Texture::CM_dxt4: // No compression: fall through to below. break; } } switch (format) { #ifndef OPENGLES case Texture::F_color_index: return GL_COLOR_INDEX; #endif case Texture::F_depth_stencil: if (_supports_depth_stencil) { #ifndef OPENGLES if (tex->get_component_type() == Texture::T_float) { return GL_DEPTH32F_STENCIL8; } else #endif { return force_sized ? GL_DEPTH24_STENCIL8 : GL_DEPTH_STENCIL; } } // Fall through. case Texture::F_depth_component: #ifndef OPENGLES if (tex->get_component_type() == Texture::T_float) { return GL_DEPTH_COMPONENT32F; } else #endif { return force_sized ? GL_DEPTH_COMPONENT16 : GL_DEPTH_COMPONENT; } case Texture::F_depth_component16: #ifdef OPENGLES return GL_DEPTH_COMPONENT16_OES; #else return GL_DEPTH_COMPONENT16; #endif case Texture::F_depth_component24: #ifdef OPENGLES if (_supports_depth24) { return GL_DEPTH_COMPONENT24_OES; } else { return GL_DEPTH_COMPONENT16_OES; } #else return GL_DEPTH_COMPONENT24; #endif case Texture::F_depth_component32: #ifdef OPENGLES if (_supports_depth32) { return GL_DEPTH_COMPONENT32_OES; } else if (_supports_depth24) { return GL_DEPTH_COMPONENT24_OES; } else { return GL_DEPTH_COMPONENT16_OES; } #else if (tex->get_component_type() == Texture::T_float) { return GL_DEPTH_COMPONENT32F; } else { return GL_DEPTH_COMPONENT32; } #endif case Texture::F_rgba: case Texture::F_rgbm: #ifndef OPENGLES_1 if (tex->get_component_type() == Texture::T_float) { return GL_RGBA16F; } else #endif #ifdef OPENGLES { // In OpenGL ES, the internal format must match the external format. return _supports_bgr ? GL_BGRA : GL_RGBA; } #else if (tex->get_component_type() == Texture::T_unsigned_short) { return GL_RGBA16; } else if (tex->get_component_type() == Texture::T_short) { return GL_RGBA16_SNORM; } else if (tex->get_component_type() == Texture::T_byte) { return GL_RGBA8_SNORM; } else { return force_sized ? GL_RGBA8 : GL_RGBA; } #endif case Texture::F_rgba4: return GL_RGBA4; #ifdef OPENGLES case Texture::F_rgba8: return GL_RGBA8_OES; case Texture::F_rgba12: return force_sized ? GL_RGBA8 : GL_RGBA; #else case Texture::F_rgba8: if (Texture::is_unsigned(tex->get_component_type())) { return GL_RGBA8; } else { return GL_RGBA8_SNORM; } case Texture::F_r8i: if (Texture::is_unsigned(tex->get_component_type())) { return GL_R8UI; } else { return GL_R8I; } case Texture::F_rg8i: if (Texture::is_unsigned(tex->get_component_type())) { return GL_RG8UI; } else { return GL_RG8I; } case Texture::F_rgb8i: if (Texture::is_unsigned(tex->get_component_type())) { return GL_RGB8UI; } else { return GL_RGB8I; } case Texture::F_rgba8i: if (Texture::is_unsigned(tex->get_component_type())) { return GL_RGBA8UI; } else { return GL_RGBA8I; } case Texture::F_rgba12: return GL_RGBA12; #endif // OPENGLES #ifndef OPENGLES case Texture::F_rgba16: if (tex->get_component_type() == Texture::T_float) { return GL_RGBA16F; } else if (Texture::is_unsigned(tex->get_component_type())) { return GL_RGBA16; } else { return GL_RGBA16_SNORM; } case Texture::F_rgba32: return GL_RGBA32F; #endif // OPENGLES case Texture::F_rgb: switch (tex->get_component_type()) { case Texture::T_float: return GL_RGB16F; #ifndef OPENGLES case Texture::T_unsigned_short: return GL_RGB16; case Texture::T_short: return GL_RGB16_SNORM; case Texture::T_byte: return GL_RGB8_SNORM; #endif default: return force_sized ? GL_RGB8 : GL_RGB; } case Texture::F_rgb5: #ifdef OPENGLES // Close enough. return GL_RGB565_OES; #else return GL_RGB5; #endif case Texture::F_rgba5: return GL_RGB5_A1; #ifdef OPENGLES case Texture::F_rgb8: return GL_RGB8_OES; case Texture::F_rgb12: return force_sized ? GL_RGB8 : GL_RGB; case Texture::F_rgb16: return GL_RGB16F; #else case Texture::F_rgb8: if (Texture::is_unsigned(tex->get_component_type())) { return GL_RGB8; } else { return GL_RGB8_SNORM; } case Texture::F_rgb12: return GL_RGB12; case Texture::F_rgb16: if (tex->get_component_type() == Texture::T_float) { return GL_RGB16F; } else if (Texture::is_unsigned(tex->get_component_type())) { return GL_RGB16; } else { return GL_RGB16_SNORM; } #endif // OPENGLES case Texture::F_rgb32: return GL_RGB32F; #ifndef OPENGLES case Texture::F_rgb332: return GL_R3_G3_B2; #endif #if defined(OPENGLES_2) case Texture::F_r16: return GL_R16F_EXT; case Texture::F_rg16: return GL_RG16F_EXT; #elif !defined(OPENGLES_1) case Texture::F_r16: if (tex->get_component_type() == Texture::T_float) { return GL_R16F; } else if (Texture::is_unsigned(tex->get_component_type())) { return GL_R16; } else { return GL_R16_SNORM; } case Texture::F_r16i: if (Texture::is_unsigned(tex->get_component_type())) { return GL_R16UI; } else { return GL_R16I; } case Texture::F_rg16: if (tex->get_component_type() == Texture::T_float) { return GL_RG16F; } else if (Texture::is_unsigned(tex->get_component_type())) { return GL_RG16; } else { return GL_RG16_SNORM; } #endif #ifndef OPENGLES_1 case Texture::F_r32: return GL_R32F; case Texture::F_rg32: return GL_RG32F; case Texture::F_red: case Texture::F_green: case Texture::F_blue: #ifndef OPENGLES if (!Texture::is_unsigned(tex->get_component_type())) { return GL_R8_SNORM; } else #endif { return force_sized ? GL_R8 : GL_RED; } #endif case Texture::F_alpha: #ifdef OPENGLES return force_sized ? GL_ALPHA8 : GL_ALPHA; #else if (_supports_luminance_texture) { return force_sized ? GL_ALPHA8 : GL_ALPHA; } else { return force_sized ? GL_R8 : GL_RED; } #endif case Texture::F_luminance: #ifdef OPENGLES return force_sized ? GL_LUMINANCE8 : GL_LUMINANCE; #else if (_supports_luminance_texture) { switch (tex->get_component_type()) { case Texture::T_float: case Texture::T_half_float: return GL_LUMINANCE16F_ARB; case Texture::T_short: return GL_LUMINANCE16_SNORM; case Texture::T_unsigned_short: return GL_LUMINANCE16; default: return force_sized ? GL_LUMINANCE8 : GL_LUMINANCE; } } else { switch (tex->get_component_type()) { case Texture::T_float: case Texture::T_half_float: return GL_R16F; case Texture::T_short: return GL_R16_SNORM; case Texture::T_unsigned_short: return GL_R16; default: return force_sized ? GL_R8 : GL_RED; } } #endif case Texture::F_luminance_alpha: case Texture::F_luminance_alphamask: #ifdef OPENGLES return force_sized ? GL_LUMINANCE8_ALPHA8 : GL_LUMINANCE_ALPHA; #else if (_supports_luminance_texture) { switch (tex->get_component_type()) { case Texture::T_float: case Texture::T_half_float: return GL_LUMINANCE_ALPHA16F_ARB; case Texture::T_short: return GL_LUMINANCE16_SNORM; case Texture::T_unsigned_short: return GL_LUMINANCE16_ALPHA16; default: return force_sized ? GL_LUMINANCE8_ALPHA8 : GL_LUMINANCE_ALPHA; } } else { switch (tex->get_component_type()) { case Texture::T_float: case Texture::T_half_float: return GL_RG16F; case Texture::T_short: return GL_RG16_SNORM; case Texture::T_unsigned_short: return GL_RG16; default: return force_sized ? GL_RG8 : GL_RG; } } #endif #ifndef OPENGLES_1 case Texture::F_rg: return force_sized ? GL_RG8 : GL_RG; #endif #ifndef OPENGLES_1 case Texture::F_srgb: #ifndef OPENGLES return GL_SRGB8; #endif case Texture::F_srgb_alpha: return GL_SRGB8_ALPHA8; #endif #ifndef OPENGLES case Texture::F_sluminance: return GL_SLUMINANCE8; case Texture::F_sluminance_alpha: return GL_SLUMINANCE8_ALPHA8; #endif #ifndef OPENGLES case Texture::F_r32i: return GL_R32I; #endif #ifndef OPENGLES_1 case Texture::F_r11_g11_b10: return GL_R11F_G11F_B10F; case Texture::F_rgb9_e5: return GL_RGB9_E5; case Texture::F_rgb10_a2: return GL_RGB10_A2; #endif default: GLCAT.error() << "Invalid image format in get_internal_image_format(): " << (int)format << "\n"; return force_sized ? GL_RGB8 : GL_RGB; } } /** * Returns true if the indicated GL minfilter type represents a mipmap format, * false otherwise. */ bool CLP(GraphicsStateGuardian):: is_mipmap_filter(GLenum min_filter) { switch (min_filter) { case GL_NEAREST_MIPMAP_NEAREST: case GL_LINEAR_MIPMAP_NEAREST: case GL_NEAREST_MIPMAP_LINEAR: case GL_LINEAR_MIPMAP_LINEAR: return true; default: return false; } } /** * Returns true if the indicated GL internal format represents a compressed * texture format, false otherwise. */ bool CLP(GraphicsStateGuardian):: is_compressed_format(GLenum format) { switch (format) { case GL_COMPRESSED_RGB_S3TC_DXT1_EXT: case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT: #ifdef OPENGLES case GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG: case GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG: case GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG: case GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG: #else case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT: case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT: case GL_COMPRESSED_RGB_FXT1_3DFX: case GL_COMPRESSED_RGBA_FXT1_3DFX: case GL_COMPRESSED_RED_RGTC1: case GL_COMPRESSED_SIGNED_RED_RGTC1: case GL_COMPRESSED_RG_RGTC2: case GL_COMPRESSED_SIGNED_RG_RGTC2: case GL_COMPRESSED_LUMINANCE_LATC1_EXT: case GL_COMPRESSED_SIGNED_LUMINANCE_LATC1_EXT: case GL_COMPRESSED_LUMINANCE_ALPHA_LATC2_EXT: case GL_COMPRESSED_SIGNED_LUMINANCE_ALPHA_LATC2_EXT: case GL_COMPRESSED_RGB: case GL_COMPRESSED_SRGB_EXT: case GL_COMPRESSED_RGBA: case GL_COMPRESSED_SRGB_ALPHA_EXT: case GL_COMPRESSED_ALPHA: case GL_COMPRESSED_LUMINANCE: case GL_COMPRESSED_LUMINANCE_ALPHA: #endif return true; default: return false; } } /** * Maps from the texture stage's mode types to the corresponding OpenGL ids */ GLint CLP(GraphicsStateGuardian):: get_texture_apply_mode_type(TextureStage::Mode am) { #ifdef SUPPORT_FIXED_FUNCTION switch (am) { case TextureStage::M_modulate: return GL_MODULATE; case TextureStage::M_decal: return GL_DECAL; case TextureStage::M_blend: return GL_BLEND; case TextureStage::M_replace: return GL_REPLACE; case TextureStage::M_add: return GL_ADD; case TextureStage::M_combine: return GL_COMBINE; case TextureStage::M_blend_color_scale: return GL_BLEND; case TextureStage::M_modulate_glow: return GL_MODULATE; case TextureStage::M_modulate_gloss: return GL_MODULATE; default: // Other modes shouldn't get here. Fall through and error. break; } GLCAT.error() << "Invalid TextureStage::Mode value" << endl; return GL_MODULATE; #else return 0; #endif } /** * Maps from the texture stage's CombineMode types to the corresponding OpenGL * ids */ GLint CLP(GraphicsStateGuardian):: get_texture_combine_type(TextureStage::CombineMode cm) { #ifdef SUPPORT_FIXED_FUNCTION switch (cm) { case TextureStage::CM_undefined: // fall through case TextureStage::CM_replace: return GL_REPLACE; case TextureStage::CM_modulate: return GL_MODULATE; case TextureStage::CM_add: return GL_ADD; case TextureStage::CM_add_signed: return GL_ADD_SIGNED; case TextureStage::CM_interpolate: return GL_INTERPOLATE; case TextureStage::CM_subtract: return GL_SUBTRACT; case TextureStage::CM_dot3_rgb: return GL_DOT3_RGB; case TextureStage::CM_dot3_rgba: return GL_DOT3_RGBA; } GLCAT.error() << "Invalid TextureStage::CombineMode value" << endl; #endif return GL_REPLACE; } /** * Maps from the texture stage's CombineSource types to the corresponding * OpenGL ids */ GLint CLP(GraphicsStateGuardian):: get_texture_src_type(TextureStage::CombineSource cs, int last_stage, int last_saved_result, int this_stage) const { #ifdef SUPPORT_FIXED_FUNCTION switch (cs) { case TextureStage::CS_undefined: // fall through case TextureStage::CS_texture: return GL_TEXTURE; case TextureStage::CS_constant: return GL_CONSTANT; case TextureStage::CS_primary_color: return GL_PRIMARY_COLOR; case TextureStage::CS_constant_color_scale: return GL_CONSTANT; case TextureStage::CS_previous: if (last_stage == this_stage - 1) { return GL_PREVIOUS; } else if (last_stage == -1) { return GL_PRIMARY_COLOR; } else if (_supports_texture_saved_result) { return GL_TEXTURE0 + last_stage; } else { GLCAT.warning() << "Current OpenGL driver does not support texture crossbar blending.\n"; return GL_PRIMARY_COLOR; } case TextureStage::CS_last_saved_result: if (last_saved_result == this_stage - 1) { return GL_PREVIOUS; } else if (last_saved_result == -1) { return GL_PRIMARY_COLOR; } else if (_supports_texture_saved_result) { return GL_TEXTURE0 + last_saved_result; } else { GLCAT.warning() << "Current OpenGL driver does not support texture crossbar blending.\n"; return GL_PRIMARY_COLOR; } } GLCAT.error() << "Invalid TextureStage::CombineSource value" << endl; #endif return GL_TEXTURE; } /** * Maps from the texture stage's CombineOperand types to the corresponding * OpenGL ids */ GLint CLP(GraphicsStateGuardian):: get_texture_operand_type(TextureStage::CombineOperand co) { switch (co) { case TextureStage::CO_undefined: // fall through case TextureStage::CO_src_alpha: return GL_SRC_ALPHA; case TextureStage::CO_one_minus_src_alpha: return GL_ONE_MINUS_SRC_ALPHA; case TextureStage::CO_src_color: return GL_SRC_COLOR; case TextureStage::CO_one_minus_src_color: return GL_ONE_MINUS_SRC_COLOR; } GLCAT.error() << "Invalid TextureStage::CombineOperand value" << endl; return GL_SRC_COLOR; } #ifdef SUPPORT_FIXED_FUNCTION /** * Maps from the fog types to gl version */ GLenum CLP(GraphicsStateGuardian):: get_fog_mode_type(Fog::Mode m) { switch(m) { case Fog::M_linear: return GL_LINEAR; case Fog::M_exponential: return GL_EXP; case Fog::M_exponential_squared: return GL_EXP2; /* case Fog::M_spline: return GL_FOG_FUNC_SGIS; */ default: GLCAT.error() << "Invalid Fog::Mode value" << endl; return GL_EXP; } } #endif /** * Maps from ColorBlendAttrib::Mode to glBlendEquation value. */ GLenum CLP(GraphicsStateGuardian):: get_blend_equation_type(ColorBlendAttrib::Mode mode) { switch (mode) { case ColorBlendAttrib::M_none: case ColorBlendAttrib::M_add: return GL_FUNC_ADD; case ColorBlendAttrib::M_subtract: return GL_FUNC_SUBTRACT; case ColorBlendAttrib::M_inv_subtract: return GL_FUNC_REVERSE_SUBTRACT; #ifdef OPENGLES case ColorBlendAttrib::M_min: return GL_MIN_EXT; case ColorBlendAttrib::M_max: return GL_MAX_EXT; #else case ColorBlendAttrib::M_min: return GL_MIN; case ColorBlendAttrib::M_max: return GL_MAX; #endif } GLCAT.error() << "Unknown color blend mode " << (int)mode << endl; return GL_FUNC_ADD; } /** * Maps from ColorBlendAttrib::Operand to glBlendFunc value. */ GLenum CLP(GraphicsStateGuardian):: get_blend_func(ColorBlendAttrib::Operand operand) { switch (operand) { case ColorBlendAttrib::O_zero: return GL_ZERO; case ColorBlendAttrib::O_one: return GL_ONE; case ColorBlendAttrib::O_incoming_color: return GL_SRC_COLOR; case ColorBlendAttrib::O_one_minus_incoming_color: return GL_ONE_MINUS_SRC_COLOR; case ColorBlendAttrib::O_fbuffer_color: return GL_DST_COLOR; case ColorBlendAttrib::O_one_minus_fbuffer_color: return GL_ONE_MINUS_DST_COLOR; case ColorBlendAttrib::O_incoming_alpha: return GL_SRC_ALPHA; case ColorBlendAttrib::O_one_minus_incoming_alpha: return GL_ONE_MINUS_SRC_ALPHA; case ColorBlendAttrib::O_fbuffer_alpha: return GL_DST_ALPHA; case ColorBlendAttrib::O_one_minus_fbuffer_alpha: return GL_ONE_MINUS_DST_ALPHA; #ifdef OPENGLES_1 // OpenGL ES 1 has no constant blend factor. case ColorBlendAttrib::O_constant_color: case ColorBlendAttrib::O_color_scale: case ColorBlendAttrib::O_one_minus_constant_color: case ColorBlendAttrib::O_one_minus_color_scale: case ColorBlendAttrib::O_constant_alpha: case ColorBlendAttrib::O_alpha_scale: case ColorBlendAttrib::O_one_minus_constant_alpha: case ColorBlendAttrib::O_one_minus_alpha_scale: break; // No dual-source blending, either. case ColorBlendAttrib::O_incoming1_color: case ColorBlendAttrib::O_one_minus_incoming1_color: case ColorBlendAttrib::O_incoming1_alpha: case ColorBlendAttrib::O_one_minus_incoming1_alpha: break; #else case ColorBlendAttrib::O_constant_color: case ColorBlendAttrib::O_color_scale: return GL_CONSTANT_COLOR; case ColorBlendAttrib::O_one_minus_constant_color: case ColorBlendAttrib::O_one_minus_color_scale: return GL_ONE_MINUS_CONSTANT_COLOR; case ColorBlendAttrib::O_constant_alpha: case ColorBlendAttrib::O_alpha_scale: return GL_CONSTANT_ALPHA; case ColorBlendAttrib::O_one_minus_constant_alpha: case ColorBlendAttrib::O_one_minus_alpha_scale: return GL_ONE_MINUS_CONSTANT_ALPHA; case ColorBlendAttrib::O_incoming1_color: return GL_SRC1_COLOR; case ColorBlendAttrib::O_one_minus_incoming1_color: return GL_ONE_MINUS_SRC1_COLOR; case ColorBlendAttrib::O_incoming1_alpha: return GL_SRC1_ALPHA; case ColorBlendAttrib::O_one_minus_incoming1_alpha: return GL_ONE_MINUS_SRC1_ALPHA; #endif case ColorBlendAttrib::O_incoming_color_saturate: return GL_SRC_ALPHA_SATURATE; } GLCAT.error() << "Unknown color blend operand " << (int)operand << endl; return GL_ZERO; } /** * Maps from UsageHint to the GL symbol. */ GLenum CLP(GraphicsStateGuardian):: get_usage(Geom::UsageHint usage_hint) { switch (usage_hint) { case Geom::UH_stream: #ifdef OPENGLES_1 return GL_DYNAMIC_DRAW; #else return GL_STREAM_DRAW; #endif // OPENGLES case Geom::UH_static: case Geom::UH_unspecified: return GL_STATIC_DRAW; case Geom::UH_dynamic: return GL_DYNAMIC_DRAW; case Geom::UH_client: break; } GLCAT.error() << "Unexpected usage_hint " << (int)usage_hint << endl; return GL_STATIC_DRAW; } #ifndef NDEBUG /** * Returns a string describing an compression format. */ const char *CLP(GraphicsStateGuardian):: get_compressed_format_string(GLenum format) { switch (format) { case 0x83F0: return "GL_COMPRESSED_RGB_S3TC_DXT1_EXT"; case 0x83F1: return "GL_COMPRESSED_RGBA_S3TC_DXT1_EXT"; case 0x83F2: return "GL_COMPRESSED_RGBA_S3TC_DXT3_EXT"; case 0x83F3: return "GL_COMPRESSED_RGBA_S3TC_DXT5_EXT"; case 0x86B0: return "GL_COMPRESSED_RGB_FXT1_3DFX"; case 0x86B1: return "GL_COMPRESSED_RGBA_FXT1_3DFX"; case 0x88EE: return "GL_ETC1_SRGB8_NV"; case 0x8A54: return "GL_COMPRESSED_SRGB_PVRTC_2BPPV1_EXT"; case 0x8A55: return "GL_COMPRESSED_SRGB_PVRTC_4BPPV1_EXT"; case 0x8A56: return "GL_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV1_EXT"; case 0x8A57: return "GL_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV1_EXT"; case 0x8B90: return "GL_PALETTE4_RGB8_OES"; case 0x8B91: return "GL_PALETTE4_RGBA8_OES"; case 0x8B92: return "GL_PALETTE4_R5_G6_B5_OES"; case 0x8B93: return "GL_PALETTE4_RGBA4_OES"; case 0x8B94: return "GL_PALETTE4_RGB5_A1_OES"; case 0x8B95: return "GL_PALETTE8_RGB8_OES"; case 0x8B96: return "GL_PALETTE8_RGBA8_OES"; case 0x8B97: return "GL_PALETTE8_R5_G6_B5_OES"; case 0x8B98: return "GL_PALETTE8_RGBA4_OES"; case 0x8B99: return "GL_PALETTE8_RGB5_A1_OES"; case 0x8C00: return "GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG"; case 0x8C01: return "GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG"; case 0x8C02: return "GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG"; case 0x8C03: return "GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG"; case 0x8C48: return "GL_COMPRESSED_SRGB_EXT"; case 0x8C49: return "GL_COMPRESSED_SRGB_ALPHA_EXT"; case 0x8C4A: return "GL_COMPRESSED_SLUMINANCE_EXT"; case 0x8C4B: return "GL_COMPRESSED_SLUMINANCE_ALPHA_EXT"; case 0x8C4C: return "GL_COMPRESSED_SRGB_S3TC_DXT1_EXT"; case 0x8C4D: return "GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT"; case 0x8C4E: return "GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT"; case 0x8C4F: return "GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT"; case 0x8C70: return "GL_COMPRESSED_LUMINANCE_LATC1_EXT"; case 0x8C71: return "GL_COMPRESSED_SIGNED_LUMINANCE_LATC1_EXT"; case 0x8C72: return "GL_COMPRESSED_LUMINANCE_ALPHA_LATC2_EXT"; case 0x8C73: return "GL_COMPRESSED_SIGNED_LUMINANCE_ALPHA_LATC2_EXT"; case 0x8D64: return "GL_ETC1_RGB8_OES"; case 0x8DBB: return "GL_COMPRESSED_RED_RGTC1"; case 0x8DBC: return "GL_COMPRESSED_SIGNED_RED_RGTC1"; case 0x8DBD: return "GL_COMPRESSED_RG_RGTC2"; case 0x8DBE: return "GL_COMPRESSED_SIGNED_RG_RGTC2"; case 0x8E8C: return "GL_COMPRESSED_RGBA_BPTC_UNORM"; case 0x8E8D: return "GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM"; case 0x8E8E: return "GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT"; case 0x8E8F: return "GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT"; case 0x9137: return "GL_COMPRESSED_RGBA_PVRTC_2BPPV2_IMG"; case 0x9138: return "GL_COMPRESSED_RGBA_PVRTC_4BPPV2_IMG"; case 0x9270: return "GL_COMPRESSED_R11_EAC"; case 0x9271: return "GL_COMPRESSED_SIGNED_R11_EAC"; case 0x9272: return "GL_COMPRESSED_RG11_EAC"; case 0x9273: return "GL_COMPRESSED_SIGNED_RG11_EAC"; case 0x9274: return "GL_COMPRESSED_RGB8_ETC2"; case 0x9275: return "GL_COMPRESSED_SRGB8_ETC2"; case 0x9276: return "GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2"; case 0x9277: return "GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2"; case 0x9278: return "GL_COMPRESSED_RGBA8_ETC2_EAC"; case 0x9279: return "GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC"; case 0x93B0: return "GL_COMPRESSED_RGBA_ASTC_4x4_KHR"; case 0x93B1: return "GL_COMPRESSED_RGBA_ASTC_5x4_KHR"; case 0x93B2: return "GL_COMPRESSED_RGBA_ASTC_5x5_KHR"; case 0x93B3: return "GL_COMPRESSED_RGBA_ASTC_6x5_KHR"; case 0x93B4: return "GL_COMPRESSED_RGBA_ASTC_6x6_KHR"; case 0x93B5: return "GL_COMPRESSED_RGBA_ASTC_8x5_KHR"; case 0x93B6: return "GL_COMPRESSED_RGBA_ASTC_8x6_KHR"; case 0x93B7: return "GL_COMPRESSED_RGBA_ASTC_8x8_KHR"; case 0x93B8: return "GL_COMPRESSED_RGBA_ASTC_10x5_KHR"; case 0x93B9: return "GL_COMPRESSED_RGBA_ASTC_10x6_KHR"; case 0x93BA: return "GL_COMPRESSED_RGBA_ASTC_10x8_KHR"; case 0x93BB: return "GL_COMPRESSED_RGBA_ASTC_10x10_KHR"; case 0x93BC: return "GL_COMPRESSED_RGBA_ASTC_12x10_KHR"; case 0x93BD: return "GL_COMPRESSED_RGBA_ASTC_12x12_KHR"; case 0x93C0: return "GL_COMPRESSED_RGBA_ASTC_3x3x3_OES"; case 0x93C1: return "GL_COMPRESSED_RGBA_ASTC_4x3x3_OES"; case 0x93C2: return "GL_COMPRESSED_RGBA_ASTC_4x4x3_OES"; case 0x93C3: return "GL_COMPRESSED_RGBA_ASTC_4x4x4_OES"; case 0x93C4: return "GL_COMPRESSED_RGBA_ASTC_5x4x4_OES"; case 0x93C5: return "GL_COMPRESSED_RGBA_ASTC_5x5x4_OES"; case 0x93C6: return "GL_COMPRESSED_RGBA_ASTC_5x5x5_OES"; case 0x93C7: return "GL_COMPRESSED_RGBA_ASTC_6x5x5_OES"; case 0x93C8: return "GL_COMPRESSED_RGBA_ASTC_6x6x5_OES"; case 0x93C9: return "GL_COMPRESSED_RGBA_ASTC_6x6x6_OES"; case 0x93D0: return "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR"; case 0x93D1: return "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR"; case 0x93D2: return "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR"; case 0x93D3: return "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR"; case 0x93D4: return "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR"; case 0x93D5: return "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR"; case 0x93D6: return "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR"; case 0x93D7: return "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR"; case 0x93D8: return "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR"; case 0x93D9: return "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR"; case 0x93DA: return "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR"; case 0x93DB: return "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR"; case 0x93DC: return "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR"; case 0x93DD: return "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR"; case 0x93E0: return "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_3x3x3_OES"; case 0x93E1: return "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x3x3_OES"; case 0x93E2: return "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4x3_OES"; case 0x93E3: return "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4x4_OES"; case 0x93E4: return "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4x4_OES"; case 0x93E5: return "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5x4_OES"; case 0x93E6: return "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5x5_OES"; case 0x93E7: return "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5x5_OES"; case 0x93E8: return "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6x5_OES"; case 0x93E9: return "GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6x6_OES"; case 0x93F0: return "GL_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV2_IMG"; case 0x93F1: return "GL_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV2_IMG"; default: return nullptr; } } #endif /** * Returns the value that that should be issued as the light's color, as * scaled by the current value of _light_color_scale, in the case of * color_scale_via_lighting. */ LVecBase4 CLP(GraphicsStateGuardian):: get_light_color(Light *light) const { #ifndef NDEBUG if (_show_texture_usage) { // In show_texture_usage mode, all lights are white, so as not to // contaminate the texture color. return LVecBase4(1.0, 1.0, 1.0, 1.0); } #endif // NDEBUG const LColor &c = light->get_color(); LVecBase4 light_color(c[0] * _light_color_scale[0], c[1] * _light_color_scale[1], c[2] * _light_color_scale[2], c[3] * _light_color_scale[3]); return light_color; } /** * Called by clear_state_and_transform() to ensure that the current modelview * and projection matrices are properly loaded in the graphics state, after a * callback might have mucked them up. */ void CLP(GraphicsStateGuardian):: reissue_transforms() { prepare_lens(); do_issue_transform(); _active_texture_stage = -1; #ifndef OPENGLES_1 // Might also want to reissue the vertex format, for good measure. _current_vertex_format.clear(); memset(_vertex_attrib_columns, 0, sizeof(const GeomVertexColumn *) * 32); #endif // Some libraries (Kivy) leave their buffers bound. How clumsy of them. if (_supports_buffers) { _glBindBuffer(GL_ARRAY_BUFFER, 0); _glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); _current_vbuffer_index = 0; _current_ibuffer_index = 0; } #ifndef OPENGLES if (_supports_glsl) { _glDisableVertexAttribArray(0); _glDisableVertexAttribArray(1); } #endif // Since this is called by clear_state_and_transform(), we also should reset // the states that won't automatically be respecified when clearing the // state mask. _active_color_write_mask = ColorWriteAttrib::C_all; glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); if (_dithering_enabled) { glEnable(GL_DITHER); } else { glDisable(GL_DITHER); } if (_depth_test_enabled) { glEnable(GL_DEPTH_TEST); } else { glDisable(GL_DEPTH_TEST); } if (_stencil_test_enabled) { glEnable(GL_STENCIL_TEST); } else { glDisable(GL_STENCIL_TEST); } if (_blend_enabled) { glEnable(GL_BLEND); } else { glDisable(GL_BLEND); } #ifndef OPENGLES_2 if (_multisample_mode != 0) { glEnable(GL_MULTISAMPLE); } else { glDisable(GL_MULTISAMPLE); glDisable(GL_SAMPLE_ALPHA_TO_ONE); glDisable(GL_SAMPLE_ALPHA_TO_COVERAGE); } if (_line_smooth_enabled) { glEnable(GL_LINE_SMOOTH); } else { glDisable(GL_LINE_SMOOTH); } #endif #ifndef OPENGLES if (_polygon_smooth_enabled) { glEnable(GL_POLYGON_SMOOTH); } else { glDisable(GL_POLYGON_SMOOTH); } #endif #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { if (_alpha_test_enabled) { glEnable(GL_ALPHA_TEST); } else { glDisable(GL_ALPHA_TEST); } if (_point_smooth_enabled) { glEnable(GL_POINT_SMOOTH); } else { glDisable(GL_POINT_SMOOTH); } } #endif } #ifdef SUPPORT_FIXED_FUNCTION /** * Intended to be overridden by a derived class to enable or disable the use * of lighting overall. This is called by do_issue_light() according to * whether any lights are in use or not. */ void CLP(GraphicsStateGuardian):: enable_lighting(bool enable) { nassertv(has_fixed_function_pipeline()); // static PStatCollector // _draw_set_state_light_enable_lighting_pcollector("Draw:Set // State:Light:Enable lighting"); PStatGPUTimer timer(this, // _draw_set_state_light_enable_lighting_pcollector); if (enable) { glEnable(GL_LIGHTING); } else { glDisable(GL_LIGHTING); } } #endif // SUPPORT_FIXED_FUNCTION #ifdef SUPPORT_FIXED_FUNCTION /** * Intended to be overridden by a derived class to indicate the color of the * ambient light that should be in effect. This is called by do_issue_light() * after all other lights have been enabled or disabled. */ void CLP(GraphicsStateGuardian):: set_ambient_light(const LColor &color) { nassertv(has_fixed_function_pipeline()); // static PStatCollector _draw_set_state_light_ambient_pcollector("Draw:Set // State:Light:Ambient"); PStatGPUTimer timer(this, // _draw_set_state_light_ambient_pcollector); LColor c = color; c.set(c[0] * _light_color_scale[0], c[1] * _light_color_scale[1], c[2] * _light_color_scale[2], c[3] * _light_color_scale[3]); call_glLightModelfv(GL_LIGHT_MODEL_AMBIENT, c); } #endif // SUPPORT_FIXED_FUNCTION #ifdef SUPPORT_FIXED_FUNCTION /** * Intended to be overridden by a derived class to enable the indicated light * id. A specific Light will already have been bound to this id via * bind_light(). */ void CLP(GraphicsStateGuardian):: enable_light(int light_id, bool enable) { nassertv(has_fixed_function_pipeline()); // static PStatCollector // _draw_set_state_light_enable_light_pcollector("Draw:Set // State:Light:Enable light"); PStatGPUTimer timer(this, // _draw_set_state_light_enable_light_pcollector); if (enable) { glEnable(get_light_id(light_id)); } else { glDisable(get_light_id(light_id)); } } #endif // SUPPORT_FIXED_FUNCTION #ifdef SUPPORT_FIXED_FUNCTION /** * Called immediately before bind_light() is called, this is intended to * provide the derived class a hook in which to set up some state (like * transform) that might apply to several lights. * * The sequence is: begin_bind_lights() will be called, then one or more * bind_light() calls, then end_bind_lights(). */ void CLP(GraphicsStateGuardian):: begin_bind_lights() { nassertv(has_fixed_function_pipeline()); // static PStatCollector // _draw_set_state_light_begin_bind_pcollector("Draw:Set State:Light:Begin // bind"); PStatGPUTimer timer(this, // _draw_set_state_light_begin_bind_pcollector); // We need to temporarily load a new matrix so we can define the light in a // known coordinate system. We pick the transform of the root. // (Alternatively, we could leave the current transform where it is and // compute the light position relative to that transform instead of relative // to the root, by composing with the matrix computed by // _internal_transform->invert_compose(render_transform). But I think // loading a completely new matrix is simpler.) CPT(TransformState) render_transform = _cs_transform->compose(_scene_setup->get_world_transform()); glMatrixMode(GL_MODELVIEW); glPushMatrix(); call_glLoadMatrix(render_transform->get_mat()); } #endif // SUPPORT_FIXED_FUNCTION #ifdef SUPPORT_FIXED_FUNCTION /** * Called after before bind_light() has been called one or more times (but * before any geometry is issued or additional state is changed), this is * intended to clean up any temporary changes to the state that may have been * made by begin_bind_lights(). */ void CLP(GraphicsStateGuardian):: end_bind_lights() { nassertv(has_fixed_function_pipeline()); // static PStatCollector _draw_set_state_light_end_bind_pcollector("Draw:Set // State:Light:End bind"); PStatGPUTimer timer(this, // _draw_set_state_light_end_bind_pcollector); glMatrixMode(GL_MODELVIEW); glPopMatrix(); } #endif // SUPPORT_FIXED_FUNCTION #ifdef SUPPORT_FIXED_FUNCTION /** * Intended to be overridden by a derived class to enable the indicated * clip_plane id. A specific PlaneNode will already have been bound to this * id via bind_clip_plane(). */ void CLP(GraphicsStateGuardian):: enable_clip_plane(int plane_id, bool enable) { nassertv(has_fixed_function_pipeline()); if (enable) { glEnable(get_clip_plane_id(plane_id)); } else { glDisable(get_clip_plane_id(plane_id)); } } #endif // SUPPORT_FIXED_FUNCTION #ifdef SUPPORT_FIXED_FUNCTION /** * Called immediately before bind_clip_plane() is called, this is intended to * provide the derived class a hook in which to set up some state (like * transform) that might apply to several clip_planes. * * The sequence is: begin_bind_clip_planes() will be called, then one or more * bind_clip_plane() calls, then end_bind_clip_planes(). */ void CLP(GraphicsStateGuardian):: begin_bind_clip_planes() { nassertv(has_fixed_function_pipeline()); // We need to temporarily load a new matrix so we can define the clip_plane // in a known coordinate system. We pick the transform of the root. // (Alternatively, we could leave the current transform where it is and // compute the clip_plane position relative to that transform instead of // relative to the root, by composing with the matrix computed by // _internal_transform->invert_compose(render_transform). But I think // loading a completely new matrix is simpler.) CPT(TransformState) render_transform = _cs_transform->compose(_scene_setup->get_world_transform()); glMatrixMode(GL_MODELVIEW); glPushMatrix(); call_glLoadMatrix(render_transform->get_mat()); } #endif // SUPPORT_FIXED_FUNCTION #ifdef SUPPORT_FIXED_FUNCTION /** * Called the first time a particular clip_plane has been bound to a given id * within a frame, this should set up the associated hardware clip_plane with * the clip_plane's properties. */ void CLP(GraphicsStateGuardian):: bind_clip_plane(const NodePath &plane, int plane_id) { nassertv(has_fixed_function_pipeline()); GLenum id = get_clip_plane_id(plane_id); CPT(TransformState) transform = plane.get_transform(_scene_setup->get_scene_root().get_parent()); const PlaneNode *plane_node; DCAST_INTO_V(plane_node, plane.node()); LPlane xformed_plane = plane_node->get_plane() * transform->get_mat(); #ifdef OPENGLES // OpenGL ES uses a single-precision call. LPlanef single_plane(LCAST(float, xformed_plane)); glClipPlanef(id, single_plane.get_data()); #else // Mainline OpenGL uses a double-precision call. LPlaned double_plane(LCAST(double, xformed_plane)); glClipPlane(id, double_plane.get_data()); #endif // OPENGLES report_my_gl_errors(); } #endif // SUPPORT_FIXED_FUNCTION #ifdef SUPPORT_FIXED_FUNCTION /** * Called after before bind_clip_plane() has been called one or more times * (but before any geometry is issued or additional state is changed), this is * intended to clean up any temporary changes to the state that may have been * made by begin_bind_clip_planes(). */ void CLP(GraphicsStateGuardian):: end_bind_clip_planes() { nassertv(has_fixed_function_pipeline()); glMatrixMode(GL_MODELVIEW); glPopMatrix(); } #endif // SUPPORT_FIXED_FUNCTION /** * Simultaneously resets the render state and the transform state. * * This transform specified is the "internal" net transform, already converted * into the GSG's internal coordinate space by composing it to * get_cs_transform(). (Previously, this used to be the "external" net * transform, with the assumption that that GSG would convert it internally, * but that is no longer the case.) * * Special case: if (state==NULL), then the target state is already stored in * _target. */ void CLP(GraphicsStateGuardian):: set_state_and_transform(const RenderState *target, const TransformState *transform) { report_my_gl_errors(); #ifndef NDEBUG if (gsg_cat.is_spam()) { gsg_cat.spam() << "Setting GSG state to " << (void *)target << ":\n"; target->write(gsg_cat.spam(false), 2); } #endif _state_pcollector.add_level(1); PStatGPUTimer timer1(this, _draw_set_state_pcollector); if (transform != _internal_transform) { // PStatGPUTimer timer(this, _draw_set_state_transform_pcollector); _transform_state_pcollector.add_level(1); _internal_transform = transform; do_issue_transform(); } if (target == _state_rs && (_state_mask | _inv_state_mask).is_all_on()) { return; } _target_rs = target; #ifndef OPENGLES_1 determine_target_shader(); _instance_count = _target_shader->get_instance_count(); if (_target_shader != _state_shader) { do_issue_shader(); _state_shader = _target_shader; _state_mask.clear_bit(TextureAttrib::get_class_slot()); } else if (!has_fixed_function_pipeline() && _current_shader == nullptr) { // In the case of OpenGL ES 2.x, we need to glUseShader before we draw anything. do_issue_shader(); _state_mask.clear_bit(TextureAttrib::get_class_slot()); } // Update all of the state that is bound to the shader program. if (_current_shader_context != nullptr) { _current_shader_context->set_state_and_transform(target, transform, _scene_setup->get_camera_transform(), _projection_mat); } #endif #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { int alpha_test_slot = AlphaTestAttrib::get_class_slot(); if (_target_rs->get_attrib(alpha_test_slot) != _state_rs->get_attrib(alpha_test_slot) || !_state_mask.get_bit(alpha_test_slot) #ifndef OPENGLES_1 || (_target_shader->get_flag(ShaderAttrib::F_subsume_alpha_test) != _state_shader->get_flag(ShaderAttrib::F_subsume_alpha_test)) #endif ) { // PStatGPUTimer timer(this, _draw_set_state_alpha_test_pcollector); do_issue_alpha_test(); _state_mask.set_bit(alpha_test_slot); } } #endif int antialias_slot = AntialiasAttrib::get_class_slot(); if (_target_rs->get_attrib(antialias_slot) != _state_rs->get_attrib(antialias_slot) || !_state_mask.get_bit(antialias_slot)) { // PStatGPUTimer timer(this, _draw_set_state_antialias_pcollector); do_issue_antialias(); _state_mask.set_bit(antialias_slot); } int clip_plane_slot = ClipPlaneAttrib::get_class_slot(); if (_target_rs->get_attrib(clip_plane_slot) != _state_rs->get_attrib(clip_plane_slot) || !_state_mask.get_bit(clip_plane_slot)) { // PStatGPUTimer timer(this, _draw_set_state_clip_plane_pcollector); do_issue_clip_plane(); _state_mask.set_bit(clip_plane_slot); } int color_slot = ColorAttrib::get_class_slot(); int color_scale_slot = ColorScaleAttrib::get_class_slot(); if (_target_rs->get_attrib(color_slot) != _state_rs->get_attrib(color_slot) || _target_rs->get_attrib(color_scale_slot) != _state_rs->get_attrib(color_scale_slot) || !_state_mask.get_bit(color_slot) || !_state_mask.get_bit(color_scale_slot)) { // PStatGPUTimer timer(this, _draw_set_state_color_pcollector); do_issue_color(); do_issue_color_scale(); _state_mask.set_bit(color_slot); _state_mask.set_bit(color_scale_slot); } int cull_face_slot = CullFaceAttrib::get_class_slot(); if (_target_rs->get_attrib(cull_face_slot) != _state_rs->get_attrib(cull_face_slot) || !_state_mask.get_bit(cull_face_slot)) { // PStatGPUTimer timer(this, _draw_set_state_cull_face_pcollector); do_issue_cull_face(); _state_mask.set_bit(cull_face_slot); } int depth_offset_slot = DepthOffsetAttrib::get_class_slot(); if (_target_rs->get_attrib(depth_offset_slot) != _state_rs->get_attrib(depth_offset_slot) || !_state_mask.get_bit(depth_offset_slot)) { // PStatGPUTimer timer(this, _draw_set_state_depth_offset_pcollector); do_issue_depth_offset(); _state_mask.set_bit(depth_offset_slot); } int depth_test_slot = DepthTestAttrib::get_class_slot(); if (_target_rs->get_attrib(depth_test_slot) != _state_rs->get_attrib(depth_test_slot) || !_state_mask.get_bit(depth_test_slot)) { // PStatGPUTimer timer(this, _draw_set_state_depth_test_pcollector); do_issue_depth_test(); _state_mask.set_bit(depth_test_slot); } int depth_write_slot = DepthWriteAttrib::get_class_slot(); if (_target_rs->get_attrib(depth_write_slot) != _state_rs->get_attrib(depth_write_slot) || !_state_mask.get_bit(depth_write_slot)) { // PStatGPUTimer timer(this, _draw_set_state_depth_write_pcollector); do_issue_depth_write(); _state_mask.set_bit(depth_write_slot); } int render_mode_slot = RenderModeAttrib::get_class_slot(); if (_target_rs->get_attrib(render_mode_slot) != _state_rs->get_attrib(render_mode_slot) || !_state_mask.get_bit(render_mode_slot)) { // PStatGPUTimer timer(this, _draw_set_state_render_mode_pcollector); do_issue_render_mode(); _state_mask.set_bit(render_mode_slot); } #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { int rescale_normal_slot = RescaleNormalAttrib::get_class_slot(); if (_target_rs->get_attrib(rescale_normal_slot) != _state_rs->get_attrib(rescale_normal_slot) || !_state_mask.get_bit(rescale_normal_slot)) { // PStatGPUTimer timer(this, _draw_set_state_rescale_normal_pcollector); do_issue_rescale_normal(); _state_mask.set_bit(rescale_normal_slot); } int shade_model_slot = ShadeModelAttrib::get_class_slot(); if (_target_rs->get_attrib(shade_model_slot) != _state_rs->get_attrib(shade_model_slot) || !_state_mask.get_bit(shade_model_slot)) { // PStatGPUTimer timer(this, _draw_set_state_shade_model_pcollector); do_issue_shade_model(); _state_mask.set_bit(shade_model_slot); } } #endif #if !defined(OPENGLES) || defined(OPENGLES_1) int logic_op_slot = LogicOpAttrib::get_class_slot(); if (_target_rs->get_attrib(logic_op_slot) != _state_rs->get_attrib(logic_op_slot) || !_state_mask.get_bit(logic_op_slot)) { // PStatGPUTimer timer(this, _draw_set_state_logic_op_pcollector); do_issue_logic_op(); _state_mask.set_bit(logic_op_slot); } #endif int transparency_slot = TransparencyAttrib::get_class_slot(); int color_write_slot = ColorWriteAttrib::get_class_slot(); int color_blend_slot = ColorBlendAttrib::get_class_slot(); if (_target_rs->get_attrib(transparency_slot) != _state_rs->get_attrib(transparency_slot) || _target_rs->get_attrib(color_write_slot) != _state_rs->get_attrib(color_write_slot) || _target_rs->get_attrib(color_blend_slot) != _state_rs->get_attrib(color_blend_slot) || !_state_mask.get_bit(transparency_slot) || !_state_mask.get_bit(color_write_slot) || !_state_mask.get_bit(color_blend_slot) #ifndef OPENGLES_1 || (_target_shader->get_flag(ShaderAttrib::F_disable_alpha_write) != _state_shader->get_flag(ShaderAttrib::F_disable_alpha_write)) #endif ) { // PStatGPUTimer timer(this, _draw_set_state_blending_pcollector); do_issue_blending(); _state_mask.set_bit(transparency_slot); _state_mask.set_bit(color_write_slot); _state_mask.set_bit(color_blend_slot); } int texture_slot = TextureAttrib::get_class_slot(); if (_target_rs->get_attrib(texture_slot) != _state_rs->get_attrib(texture_slot) || !_state_mask.get_bit(texture_slot)) { PStatGPUTimer timer(this, _draw_set_state_texture_pcollector); determine_target_texture(); do_issue_texture(); // Since the TexGen and TexMatrix states depend partly on the particular // set of textures in use, we should force both of those to be reissued // every time we change the texture state. _state_mask.clear_bit(TexGenAttrib::get_class_slot()); _state_mask.clear_bit(TexMatrixAttrib::get_class_slot()); _state_texture = _target_texture; _state_mask.set_bit(texture_slot); } // If one of the previously-loaded TexGen modes modified the texture matrix, // then if either state changed, we have to change both of them now. if (_tex_gen_modifies_mat) { int tex_gen_slot = TexGenAttrib::get_class_slot(); int tex_matrix_slot = TexMatrixAttrib::get_class_slot(); if (_target_rs->get_attrib(tex_gen_slot) != _state_rs->get_attrib(tex_gen_slot) || _target_rs->get_attrib(tex_matrix_slot) != _state_rs->get_attrib(tex_matrix_slot) || !_state_mask.get_bit(tex_gen_slot) || !_state_mask.get_bit(tex_matrix_slot)) { _state_mask.clear_bit(tex_gen_slot); _state_mask.clear_bit(tex_matrix_slot); } } int tex_matrix_slot = TexMatrixAttrib::get_class_slot(); if (_target_rs->get_attrib(tex_matrix_slot) != _state_rs->get_attrib(tex_matrix_slot) || !_state_mask.get_bit(tex_matrix_slot)) { // PStatGPUTimer timer(this, _draw_set_state_tex_matrix_pcollector); #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { do_issue_tex_matrix(); } #endif _state_mask.set_bit(tex_matrix_slot); #ifndef OPENGLES_1 if (_current_shader_context) { _current_shader_context->issue_parameters(Shader::SSD_tex_matrix); } #endif } #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { int tex_gen_slot = TexGenAttrib::get_class_slot(); if (_target_tex_gen != _state_tex_gen || !_state_mask.get_bit(tex_gen_slot)) { // PStatGPUTimer timer(this, _draw_set_state_tex_gen_pcollector); do_issue_tex_gen(); _state_tex_gen = _target_tex_gen; _state_mask.set_bit(tex_gen_slot); } int material_slot = MaterialAttrib::get_class_slot(); if (_target_rs->get_attrib(material_slot) != _state_rs->get_attrib(material_slot) || !_state_mask.get_bit(material_slot)) { // PStatGPUTimer timer(this, _draw_set_state_material_pcollector); do_issue_material(); _state_mask.set_bit(material_slot); } int light_slot = LightAttrib::get_class_slot(); if (_target_rs->get_attrib(light_slot) != _state_rs->get_attrib(light_slot) || !_state_mask.get_bit(light_slot)) { // PStatGPUTimer timer(this, _draw_set_state_light_pcollector); do_issue_light(); _state_mask.set_bit(light_slot); } int fog_slot = FogAttrib::get_class_slot(); if (_target_rs->get_attrib(fog_slot) != _state_rs->get_attrib(fog_slot) || !_state_mask.get_bit(fog_slot)) { // PStatGPUTimer timer(this, _draw_set_state_fog_pcollector); do_issue_fog(); _state_mask.set_bit(fog_slot); } } #endif int stencil_slot = StencilAttrib::get_class_slot(); if (_target_rs->get_attrib(stencil_slot) != _state_rs->get_attrib(stencil_slot) || !_state_mask.get_bit(stencil_slot)) { // PStatGPUTimer timer(this, _draw_set_state_stencil_pcollector); do_issue_stencil(); _state_mask.set_bit(stencil_slot); } int scissor_slot = ScissorAttrib::get_class_slot(); if (_target_rs->get_attrib(scissor_slot) != _state_rs->get_attrib(scissor_slot) || !_state_mask.get_bit(scissor_slot)) { // PStatGPUTimer timer(this, _draw_set_state_scissor_pcollector); do_issue_scissor(); _state_mask.set_bit(scissor_slot); } _state_rs = _target_rs; maybe_gl_finish(); report_my_gl_errors(); } /** * Frees some memory that was explicitly allocated within the glgsg. */ void CLP(GraphicsStateGuardian):: free_pointers() { #if defined(HAVE_CG) && !defined(OPENGLES) if (_cg_context != 0) { cgDestroyContext(_cg_context); _cg_context = 0; } #endif } /** * This is called by set_state_and_transform() when the texture state has * changed. */ void CLP(GraphicsStateGuardian):: do_issue_texture() { DO_PSTATS_STUFF(_texture_state_pcollector.add_level(1)); #ifdef OPENGLES_1 update_standard_texture_bindings(); #else if (_current_shader_context == 0) { // No shader, or a non-Cg shader. if (_texture_binding_shader_context != 0) { _texture_binding_shader_context->disable_shader_texture_bindings(); } #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { update_standard_texture_bindings(); } #endif } else { if (_texture_binding_shader_context == 0) { #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { disable_standard_texture_bindings(); } #endif _current_shader_context->update_shader_texture_bindings(nullptr); } else { _current_shader_context-> update_shader_texture_bindings(_texture_binding_shader_context); } } _texture_binding_shader = _current_shader; _texture_binding_shader_context = _current_shader_context; #endif } #ifdef SUPPORT_FIXED_FUNCTION /** * Applies the appropriate set of textures for the current state, using the * standard fixed-function pipeline. */ void CLP(GraphicsStateGuardian):: update_standard_texture_bindings() { #ifndef NDEBUG if (_show_texture_usage) { update_show_usage_texture_bindings(-1); return; } #endif // NDEBUG int num_stages = _target_texture->get_num_on_ff_stages(); #ifndef NDEBUG // Also check the _flash_texture. If it is non-NULL, we need to check to // see if our flash_texture is in the texture stack here. If so, then we // need to call the special show_texture method instead of the normal // texture stack. if (_flash_texture != nullptr) { double now = ClockObject::get_global_clock()->get_frame_time(); int this_second = (int)floor(now); if (this_second & 1) { int show_stage_index = -1; for (int i = 0; i < num_stages && show_stage_index < 0; ++i) { TextureStage *stage = _target_texture->get_on_ff_stage(i); Texture *texture = _target_texture->get_on_texture(stage); if (texture == _flash_texture) { show_stage_index = i; } } if (show_stage_index >= 0) { update_show_usage_texture_bindings(show_stage_index); return; } } } #endif // NDEBUG nassertv(num_stages <= _max_texture_stages && _num_active_texture_stages <= _max_texture_stages); _texture_involves_color_scale = false; int last_saved_result = -1; int last_stage = -1; int i; for (i = 0; i < num_stages; i++) { TextureStage *stage = _target_texture->get_on_ff_stage(i); Texture *texture = _target_texture->get_on_texture(stage); nassertv(texture != nullptr); // Issue the texture on stage i. set_active_texture_stage(i); // First, turn off the previous texture mode. glDisable(GL_TEXTURE_2D); if (_supports_cube_map) { glDisable(GL_TEXTURE_CUBE_MAP); } #ifndef OPENGLES glDisable(GL_TEXTURE_1D); if (_supports_3d_texture) { glDisable(GL_TEXTURE_3D); } #endif // OPENGLES int view = get_current_tex_view_offset() + stage->get_tex_view_offset(); TextureContext *tc = texture->prepare_now(view, _prepared_objects, this); if (tc == nullptr) { // Something wrong with this texture; skip it. continue; } // Then, turn on the current texture mode. GLenum target = get_texture_target(texture->get_texture_type()); if (target == GL_NONE) { // Unsupported texture mode. continue; } #ifndef OPENGLES_1 if (target == GL_TEXTURE_2D_ARRAY || target == GL_TEXTURE_CUBE_MAP_ARRAY) { // Cannot be applied via the FFP. continue; } #endif // OPENGLES glEnable(target); if (!update_texture(tc, false)) { glDisable(target); continue; } // Don't DCAST(); we already did the verification in update_texture. CLP(TextureContext) *gtc = (CLP(TextureContext) *)tc; apply_texture(gtc); apply_sampler(i, _target_texture->get_on_sampler(stage), gtc); if (stage->involves_color_scale() && _color_scale_enabled) { LColor color = stage->get_color(); color.set(color[0] * _current_color_scale[0], color[1] * _current_color_scale[1], color[2] * _current_color_scale[2], color[3] * _current_color_scale[3]); _texture_involves_color_scale = true; call_glTexEnvfv(GL_TEXTURE_ENV, GL_TEXTURE_ENV_COLOR, color); } else { call_glTexEnvfv(GL_TEXTURE_ENV, GL_TEXTURE_ENV_COLOR, stage->get_color()); } if (stage->get_mode() == TextureStage::M_decal) { if (texture->get_num_components() < 3 && _supports_texture_combine) { // Make a special case for 1- and 2-channel decal textures. OpenGL // does not define their use with GL_DECAL for some reason, so // implement them using the combiner instead. glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE); glTexEnvi(GL_TEXTURE_ENV, GL_COMBINE_RGB, GL_INTERPOLATE); glTexEnvi(GL_TEXTURE_ENV, GL_RGB_SCALE, 1); glTexEnvi(GL_TEXTURE_ENV, GL_ALPHA_SCALE, 1); glTexEnvi(GL_TEXTURE_ENV, GL_SRC0_RGB, GL_TEXTURE); glTexEnvi(GL_TEXTURE_ENV, GL_OPERAND0_RGB, GL_SRC_COLOR); glTexEnvi(GL_TEXTURE_ENV, GL_SRC1_RGB, GL_PREVIOUS); glTexEnvi(GL_TEXTURE_ENV, GL_OPERAND1_RGB, GL_SRC_COLOR); glTexEnvi(GL_TEXTURE_ENV, GL_SRC2_RGB, GL_TEXTURE); glTexEnvi(GL_TEXTURE_ENV, GL_OPERAND2_RGB, GL_SRC_ALPHA); } else { // Normal 3- and 4-channel decal textures. glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL); } } else if (stage->get_mode() == TextureStage::M_combine) { if (!_supports_texture_combine) { GLCAT.warning() << "TextureStage::M_combine mode is not supported.\n"; glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE); } else { glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE); glTexEnvi(GL_TEXTURE_ENV, GL_COMBINE_RGB, GL_INTERPOLATE); glTexEnvi(GL_TEXTURE_ENV, GL_RGB_SCALE, stage->get_rgb_scale()); glTexEnvi(GL_TEXTURE_ENV, GL_ALPHA_SCALE, stage->get_alpha_scale()); glTexEnvi(GL_TEXTURE_ENV, GL_COMBINE_RGB, get_texture_combine_type(stage->get_combine_rgb_mode())); switch (stage->get_num_combine_rgb_operands()) { case 3: glTexEnvi(GL_TEXTURE_ENV, GL_SRC2_RGB, get_texture_src_type(stage->get_combine_rgb_source2(), last_stage, last_saved_result, i)); glTexEnvi(GL_TEXTURE_ENV, GL_OPERAND2_RGB, get_texture_operand_type(stage->get_combine_rgb_operand2())); // fall through case 2: glTexEnvi(GL_TEXTURE_ENV, GL_SRC1_RGB, get_texture_src_type(stage->get_combine_rgb_source1(), last_stage, last_saved_result, i)); glTexEnvi(GL_TEXTURE_ENV, GL_OPERAND1_RGB, get_texture_operand_type(stage->get_combine_rgb_operand1())); // fall through case 1: glTexEnvi(GL_TEXTURE_ENV, GL_SRC0_RGB, get_texture_src_type(stage->get_combine_rgb_source0(), last_stage, last_saved_result, i)); glTexEnvi(GL_TEXTURE_ENV, GL_OPERAND0_RGB, get_texture_operand_type(stage->get_combine_rgb_operand0())); // fall through default: break; } glTexEnvi(GL_TEXTURE_ENV, GL_COMBINE_ALPHA, get_texture_combine_type(stage->get_combine_alpha_mode())); switch (stage->get_num_combine_alpha_operands()) { case 3: glTexEnvi(GL_TEXTURE_ENV, GL_SRC2_ALPHA, get_texture_src_type(stage->get_combine_alpha_source2(), last_stage, last_saved_result, i)); glTexEnvi(GL_TEXTURE_ENV, GL_OPERAND2_ALPHA, get_texture_operand_type(stage->get_combine_alpha_operand2())); // fall through case 2: glTexEnvi(GL_TEXTURE_ENV, GL_SRC1_ALPHA, get_texture_src_type(stage->get_combine_alpha_source1(), last_stage, last_saved_result, i)); glTexEnvi(GL_TEXTURE_ENV, GL_OPERAND1_ALPHA, get_texture_operand_type(stage->get_combine_alpha_operand1())); // fall through case 1: glTexEnvi(GL_TEXTURE_ENV, GL_SRC0_ALPHA, get_texture_src_type(stage->get_combine_alpha_source0(), last_stage, last_saved_result, i)); glTexEnvi(GL_TEXTURE_ENV, GL_OPERAND0_ALPHA, get_texture_operand_type(stage->get_combine_alpha_operand0())); // fall through default: break; } } } else { GLint glmode = get_texture_apply_mode_type(stage->get_mode()); glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, glmode); } if (stage->get_saved_result()) { // This texture's result will be "saved" for a future stage's input. last_saved_result = i; } else { // This is a regular texture stage; it will be the "previous" input for // the next stage. last_stage = i; } } // Disable the texture stages that are no longer used. for (i = num_stages; i < _num_active_texture_stages; i++) { set_active_texture_stage(i); glDisable(GL_TEXTURE_2D); if (_supports_cube_map) { glDisable(GL_TEXTURE_CUBE_MAP); } #ifndef OPENGLES glDisable(GL_TEXTURE_1D); if (_supports_3d_texture) { glDisable(GL_TEXTURE_3D); } #endif // OPENGLES } // Save the count of texture stages for next time. _num_active_texture_stages = num_stages; report_my_gl_errors(); } #endif // SUPPORT_FIXED_FUNCTION /** * Applies a white dummy texture. This is useful to bind to a texture slot * when a texture is missing. Also binds the default sampler to the unit. */ void CLP(GraphicsStateGuardian):: apply_white_texture(GLuint unit) { set_active_texture_stage(unit); glBindTexture(GL_TEXTURE_2D, get_white_texture()); // Also apply the default sampler, if there's a chance we'd applied anything // else. #ifndef OPENGLES_1 if (_supports_sampler_objects) { _glBindSampler(unit, 0); } #endif } /** * Returns a white dummy texture. This is useful to bind to a texture slot * when a texture is missing. */ GLuint CLP(GraphicsStateGuardian):: get_white_texture() { if (_white_texture == 0) { glGenTextures(1, &_white_texture); glBindTexture(GL_TEXTURE_2D, _white_texture); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); unsigned char data[] = {0xff, 0xff, 0xff, 0xff}; glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, data); } return _white_texture; } #ifndef NDEBUG /** * This is a special function that loads the usage textures in gl-show- * texture-usage mode, instead of loading the actual used textures. * * If the indicated stage_index is >= 0, then it is the particular texture * that is shown. Otherwise, the textures are rotated through based on * show_texture_usage_index. */ void CLP(GraphicsStateGuardian):: update_show_usage_texture_bindings(int show_stage_index) { int num_stages = _target_texture->get_num_on_ff_stages(); nassertv(num_stages <= _max_texture_stages && _num_active_texture_stages <= _max_texture_stages); _texture_involves_color_scale = false; // First, we walk through the list of textures and pretend to render them // all, even though we don't actually render them, just so Panda will keep // track of the list of "active" textures correctly during the flash. int i; for (i = 0; i < num_stages; i++) { TextureStage *stage = _target_texture->get_on_ff_stage(i); Texture *texture = _target_texture->get_on_texture(stage); nassertv(texture != nullptr); int view = get_current_tex_view_offset() + stage->get_tex_view_offset(); TextureContext *tc = texture->prepare_now(view, _prepared_objects, this); if (tc == nullptr) { // Something wrong with this texture; skip it. break; } tc->enqueue_lru(&_prepared_objects->_graphics_memory_lru); } #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { // Disable all texture stages. for (i = 0; i < _num_active_texture_stages; i++) { set_active_texture_stage(i); #ifndef OPENGLES glDisable(GL_TEXTURE_1D); #endif // OPENGLES glDisable(GL_TEXTURE_2D); if (_supports_3d_texture) { #ifndef OPENGLES_1 glDisable(GL_TEXTURE_3D); #endif // OPENGLES_1 } if (_supports_cube_map) { glDisable(GL_TEXTURE_CUBE_MAP); } } } #endif // Save the count of texture stages for next time. _num_active_texture_stages = num_stages; if (num_stages > 0) { // Now, pick just one texture stage to apply. if (show_stage_index >= 0 && show_stage_index < num_stages) { i = show_stage_index; } else { i = _show_texture_usage_index % num_stages; } TextureStage *stage = _target_texture->get_on_ff_stage(i); Texture *texture = _target_texture->get_on_texture(stage); nassertv(texture != nullptr); // Choose the corresponding usage texture and apply it. set_active_texture_stage(i); #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { glEnable(GL_TEXTURE_2D); } #endif UsageTextureKey key(texture->get_x_size(), texture->get_y_size()); UsageTextures::iterator ui = _usage_textures.find(key); GLuint index; if (ui == _usage_textures.end()) { // Need to create a new texture for this size. glGenTextures(1, &index); glBindTexture(GL_TEXTURE_2D, index); // TODO: this could be a lot simpler with glTexStorage2D followed by a // call to glClearTexImage. upload_usage_texture(texture->get_x_size(), texture->get_y_size()); _usage_textures[key] = index; } else { // Just bind the previously-created texture. index = (*ui).second; glBindTexture(GL_TEXTURE_2D, index); } if (GLCAT.is_spam()) { GLCAT.spam() << "glBindTexture(GL_TEXTURE_2D, " << index << ")\n"; } // TODO: glBindSampler(0) ? } report_my_gl_errors(); } #endif // NDEBUG #ifndef NDEBUG /** * Uploads a special "usage" texture intended to be applied only in gl-show- * texture-usage mode, to reveal where texture memory is being spent. */ void CLP(GraphicsStateGuardian):: upload_usage_texture(int width, int height) { glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); if (GLCAT.is_debug()) { GLCAT.debug() << "upload_usage_texture(" << width << ", " << height << ")\n"; } static LColor colors[3] = { LColor(0.4, 0.5f, 0.8f, 1.0f), // mipmap 0: blue LColor(1.0f, 1.0f, 0.0f, 1.0f), // mipmap 1: yellow LColor(0.8f, 0.3, 0.3, 1.0f), // mipmap 2 and higher: red }; // Allocate a temporary array large enough to contain the toplevel mipmap. uint32_t *buffer = (uint32_t *)PANDA_MALLOC_ARRAY(width * height * 4); int n = 0; while (true) { // Choose the color for the nth mipmap. LColor c = colors[min(n, 2)]; // A simple union to store the colors values bytewise, and get the answer // wordwise, independently of machine byte-ordernig. union { struct { unsigned char r, g, b, a; } b; uint32_t w; } store; store.b.r = (unsigned char)(c[0] * 255.0f); store.b.g = (unsigned char)(c[1] * 255.0f); store.b.b = (unsigned char)(c[2] * 255.0f); store.b.a = 0xff; // Fill in the array. int num_pixels = width * height; for (int p = 0; p < num_pixels; ++p) { buffer[p] = store.w; } glTexImage2D(GL_TEXTURE_2D, n, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, buffer); if (width == 1 && height == 1) { // That was the last mipmap level. break; } width = max(width >> 1, 1); height = max(height >> 1, 1); ++n; } PANDA_FREE_ARRAY(buffer); } #endif // NDEBUG #ifdef SUPPORT_FIXED_FUNCTION /** * */ void CLP(GraphicsStateGuardian):: disable_standard_texture_bindings() { // Disable the texture stages that are no longer used. for (int i = 0; i < _num_active_texture_stages; i++) { set_active_texture_stage(i); #ifndef OPENGLES glDisable(GL_TEXTURE_1D); #endif // OPENGLES glDisable(GL_TEXTURE_2D); if (_supports_3d_texture) { #ifndef OPENGLES_1 glDisable(GL_TEXTURE_3D); #endif // OPENGLES_1 } if (_supports_cube_map) { glDisable(GL_TEXTURE_CUBE_MAP); } } _num_active_texture_stages = 0; report_my_gl_errors(); } #endif // SUPPORT_FIXED_FUNCTION #ifdef SUPPORT_FIXED_FUNCTION /** * */ void CLP(GraphicsStateGuardian):: do_issue_tex_matrix() { nassertv(_num_active_texture_stages <= _max_texture_stages); for (int i = 0; i < _num_active_texture_stages; i++) { TextureStage *stage = _target_texture->get_on_ff_stage(i); set_active_texture_stage(i); glMatrixMode(GL_TEXTURE); const TexMatrixAttrib *target_tex_matrix; _target_rs->get_attrib_def(target_tex_matrix); if (target_tex_matrix->has_stage(stage)) { call_glLoadMatrix(target_tex_matrix->get_mat(stage)); } else { glLoadIdentity(); // For some reason, the glLoadIdentity() call doesn't work on my Dell // laptop's IBM OpenGL driver, when used in conjunction with glTexGen(), // below. But explicitly loading an identity matrix does work. But // this buggy-driver workaround might have other performance // implications, so I leave it out. // call_glLoadMatrix(LMatrix4::ident_mat()); } } report_my_gl_errors(); } #endif // SUPPORT_FIXED_FUNCTION #ifdef SUPPORT_FIXED_FUNCTION /** * */ void CLP(GraphicsStateGuardian):: do_issue_tex_gen() { nassertv(_num_active_texture_stages <= _max_texture_stages); // These are passed in for the four OBJECT_PLANE or EYE_PLANE values; they // effectively define an identity matrix that maps the spatial coordinates // one-for-one to UV's. If you want a mapping other than identity, use a // TexMatrixAttrib (or a TexProjectorEffect). #ifndef OPENGLES static const PN_stdfloat s_data[4] = { 1, 0, 0, 0 }; static const PN_stdfloat t_data[4] = { 0, 1, 0, 0 }; static const PN_stdfloat r_data[4] = { 0, 0, 1, 0 }; static const PN_stdfloat q_data[4] = { 0, 0, 0, 1 }; #endif _tex_gen_modifies_mat = false; for (int i = 0; i < _num_active_texture_stages; i++) { set_active_texture_stage(i); if (_supports_point_sprite) { #ifdef OPENGLES glTexEnvi(GL_POINT_SPRITE_OES, GL_COORD_REPLACE_OES, GL_FALSE); #else glTexEnvi(GL_POINT_SPRITE_ARB, GL_COORD_REPLACE_ARB, GL_FALSE); #endif // OPENGLES } #ifndef OPENGLES // TexGen not supported by OpenGL ES. glDisable(GL_TEXTURE_GEN_S); glDisable(GL_TEXTURE_GEN_T); glDisable(GL_TEXTURE_GEN_R); glDisable(GL_TEXTURE_GEN_Q); TextureStage *stage = _target_texture->get_on_ff_stage(i); TexGenAttrib::Mode mode = _target_tex_gen->get_mode(stage); switch (mode) { case TexGenAttrib::M_off: case TexGenAttrib::M_unused2: break; case TexGenAttrib::M_eye_sphere_map: glTexGeni(GL_S, GL_TEXTURE_GEN_MODE, GL_SPHERE_MAP); glTexGeni(GL_T, GL_TEXTURE_GEN_MODE, GL_SPHERE_MAP); glEnable(GL_TEXTURE_GEN_S); glEnable(GL_TEXTURE_GEN_T); break; case TexGenAttrib::M_eye_cube_map: if (_supports_cube_map) { // We need to rotate the normals out of GL's coordinate system and // into the user's coordinate system. We do this by composing a // transform onto the texture matrix. LMatrix4 mat = _inv_cs_transform->get_mat(); mat.set_row(3, LVecBase3(0.0f, 0.0f, 0.0f)); glMatrixMode(GL_TEXTURE); GLPf(MultMatrix)(mat.get_data()); // Now we need to reset the texture matrix next time around to undo // this. _tex_gen_modifies_mat = true; glTexGeni(GL_S, GL_TEXTURE_GEN_MODE, GL_REFLECTION_MAP); glTexGeni(GL_T, GL_TEXTURE_GEN_MODE, GL_REFLECTION_MAP); glTexGeni(GL_R, GL_TEXTURE_GEN_MODE, GL_REFLECTION_MAP); glEnable(GL_TEXTURE_GEN_S); glEnable(GL_TEXTURE_GEN_T); glEnable(GL_TEXTURE_GEN_R); } break; case TexGenAttrib::M_world_cube_map: if (_supports_cube_map) { // We dynamically transform normals from eye space to world space by // applying the appropriate rotation transform to the current texture // matrix. Unlike M_world_position, we can't achieve this effect by // monkeying with the modelview transform, since the current modelview // doesn't affect GL_REFLECTION_MAP. CPT(TransformState) camera_transform = _scene_setup->get_camera_transform()->compose(_inv_cs_transform); LMatrix4 mat = camera_transform->get_mat(); mat.set_row(3, LVecBase3(0.0f, 0.0f, 0.0f)); glMatrixMode(GL_TEXTURE); GLPf(MultMatrix)(mat.get_data()); // Now we need to reset the texture matrix next time around to undo // this. _tex_gen_modifies_mat = true; glTexGeni(GL_S, GL_TEXTURE_GEN_MODE, GL_REFLECTION_MAP); glTexGeni(GL_T, GL_TEXTURE_GEN_MODE, GL_REFLECTION_MAP); glTexGeni(GL_R, GL_TEXTURE_GEN_MODE, GL_REFLECTION_MAP); glEnable(GL_TEXTURE_GEN_S); glEnable(GL_TEXTURE_GEN_T); glEnable(GL_TEXTURE_GEN_R); } break; case TexGenAttrib::M_eye_normal: if (_supports_cube_map) { // We need to rotate the normals out of GL's coordinate system and // into the user's coordinate system. We do this by composing a // transform onto the texture matrix. LMatrix4 mat = _inv_cs_transform->get_mat(); mat.set_row(3, LVecBase3(0.0f, 0.0f, 0.0f)); glMatrixMode(GL_TEXTURE); GLPf(MultMatrix)(mat.get_data()); // Now we need to reset the texture matrix next time around to undo // this. _tex_gen_modifies_mat = true; glTexGeni(GL_S, GL_TEXTURE_GEN_MODE, GL_NORMAL_MAP); glTexGeni(GL_T, GL_TEXTURE_GEN_MODE, GL_NORMAL_MAP); glTexGeni(GL_R, GL_TEXTURE_GEN_MODE, GL_NORMAL_MAP); glEnable(GL_TEXTURE_GEN_S); glEnable(GL_TEXTURE_GEN_T); glEnable(GL_TEXTURE_GEN_R); } break; case TexGenAttrib::M_world_normal: if (_supports_cube_map) { // We dynamically transform normals from eye space to world space by // applying the appropriate rotation transform to the current texture // matrix. Unlike M_world_position, we can't achieve this effect by // monkeying with the modelview transform, since the current modelview // doesn't affect GL_NORMAL_MAP. CPT(TransformState) camera_transform = _scene_setup->get_camera_transform()->compose(_inv_cs_transform); LMatrix4 mat = camera_transform->get_mat(); mat.set_row(3, LVecBase3(0.0f, 0.0f, 0.0f)); glMatrixMode(GL_TEXTURE); GLPf(MultMatrix)(mat.get_data()); // Now we need to reset the texture matrix next time around to undo // this. _tex_gen_modifies_mat = true; glTexGeni(GL_S, GL_TEXTURE_GEN_MODE, GL_NORMAL_MAP); glTexGeni(GL_T, GL_TEXTURE_GEN_MODE, GL_NORMAL_MAP); glTexGeni(GL_R, GL_TEXTURE_GEN_MODE, GL_NORMAL_MAP); glEnable(GL_TEXTURE_GEN_S); glEnable(GL_TEXTURE_GEN_T); glEnable(GL_TEXTURE_GEN_R); } break; case TexGenAttrib::M_eye_position: // To represent eye position correctly, we need to temporarily load the // coordinate-system transform. glMatrixMode(GL_MODELVIEW); glPushMatrix(); call_glLoadMatrix(_cs_transform->get_mat()); glTexGeni(GL_S, GL_TEXTURE_GEN_MODE, GL_EYE_LINEAR); glTexGeni(GL_T, GL_TEXTURE_GEN_MODE, GL_EYE_LINEAR); glTexGeni(GL_R, GL_TEXTURE_GEN_MODE, GL_EYE_LINEAR); glTexGeni(GL_Q, GL_TEXTURE_GEN_MODE, GL_EYE_LINEAR); GLPfv(TexGen)(GL_S, GL_EYE_PLANE, s_data); GLPfv(TexGen)(GL_T, GL_EYE_PLANE, t_data); GLPfv(TexGen)(GL_R, GL_EYE_PLANE, r_data); GLPfv(TexGen)(GL_Q, GL_EYE_PLANE, q_data); glEnable(GL_TEXTURE_GEN_S); glEnable(GL_TEXTURE_GEN_T); glEnable(GL_TEXTURE_GEN_R); glEnable(GL_TEXTURE_GEN_Q); glMatrixMode(GL_MODELVIEW); glPopMatrix(); break; case TexGenAttrib::M_world_position: // We achieve world position coordinates by using the eye position mode, // and loading the transform of the root node--thus putting the "eye" at // the root. { glMatrixMode(GL_MODELVIEW); glPushMatrix(); CPT(TransformState) root_transform = _cs_transform->compose(_scene_setup->get_world_transform()); call_glLoadMatrix(root_transform->get_mat()); glTexGeni(GL_S, GL_TEXTURE_GEN_MODE, GL_EYE_LINEAR); glTexGeni(GL_T, GL_TEXTURE_GEN_MODE, GL_EYE_LINEAR); glTexGeni(GL_R, GL_TEXTURE_GEN_MODE, GL_EYE_LINEAR); glTexGeni(GL_Q, GL_TEXTURE_GEN_MODE, GL_EYE_LINEAR); GLPfv(TexGen)(GL_S, GL_EYE_PLANE, s_data); GLPfv(TexGen)(GL_T, GL_EYE_PLANE, t_data); GLPfv(TexGen)(GL_R, GL_EYE_PLANE, r_data); GLPfv(TexGen)(GL_Q, GL_EYE_PLANE, q_data); glEnable(GL_TEXTURE_GEN_S); glEnable(GL_TEXTURE_GEN_T); glEnable(GL_TEXTURE_GEN_R); glEnable(GL_TEXTURE_GEN_Q); glMatrixMode(GL_MODELVIEW); glPopMatrix(); } break; case TexGenAttrib::M_point_sprite: if (_supports_point_sprite) { #ifdef OPENGLES glTexEnvi(GL_POINT_SPRITE_OES, GL_COORD_REPLACE_OES, GL_TRUE); #else glTexEnvi(GL_POINT_SPRITE_ARB, GL_COORD_REPLACE_ARB, GL_TRUE); #endif } break; case TexGenAttrib::M_constant: // To generate a constant UV(w) coordinate everywhere, we use EYE_LINEAR // mode, but we construct a special matrix that flattens the vertex // position to zero and then adds our desired value. { const LTexCoord3 &v = _target_tex_gen->get_constant_value(stage); glTexGeni(GL_S, GL_TEXTURE_GEN_MODE, GL_OBJECT_LINEAR); glTexGeni(GL_T, GL_TEXTURE_GEN_MODE, GL_OBJECT_LINEAR); glTexGeni(GL_R, GL_TEXTURE_GEN_MODE, GL_OBJECT_LINEAR); glTexGeni(GL_Q, GL_TEXTURE_GEN_MODE, GL_OBJECT_LINEAR); LVecBase4 s(0.0f, 0.0f, 0.0f, v[0]); LVecBase4 t(0.0f, 0.0f, 0.0f, v[1]); LVecBase4 r(0.0f, 0.0f, 0.0f, v[2]); GLPfv(TexGen)(GL_S, GL_OBJECT_PLANE, s.get_data()); GLPfv(TexGen)(GL_T, GL_OBJECT_PLANE, t.get_data()); GLPfv(TexGen)(GL_R, GL_OBJECT_PLANE, r.get_data()); GLPfv(TexGen)(GL_Q, GL_OBJECT_PLANE, q_data); glEnable(GL_TEXTURE_GEN_S); glEnable(GL_TEXTURE_GEN_T); glEnable(GL_TEXTURE_GEN_R); glEnable(GL_TEXTURE_GEN_Q); } break; case TexGenAttrib::M_unused: break; } #endif // OPENGLES } bool got_point_sprites = _supports_point_sprite && (_target_tex_gen->get_geom_rendering(Geom::GR_point) & GeomEnums::GR_point_sprite) != 0; if (got_point_sprites != _tex_gen_point_sprite) { _tex_gen_point_sprite = got_point_sprites; #ifdef OPENGLES if (_tex_gen_point_sprite) { glEnable(GL_POINT_SPRITE_OES); } else { glDisable(GL_POINT_SPRITE_OES); } #else if (_tex_gen_point_sprite) { glEnable(GL_POINT_SPRITE_ARB); } else { glDisable(GL_POINT_SPRITE_ARB); } #endif // OPENGLES } report_my_gl_errors(); } #endif // SUPPORT_FIXED_FUNCTION /** * Specifies the texture parameters. Returns true if the texture may need to * be reloaded. Pass non-NULL sampler argument to use different sampler * settings. */ bool CLP(GraphicsStateGuardian):: specify_texture(CLP(TextureContext) *gtc, const SamplerState &sampler) { #ifndef OPENGLES nassertr(gtc->_handle == 0 /* can't modify tex with active handle */, false); #endif Texture *tex = gtc->get_texture(); GLenum target = get_texture_target(tex->get_texture_type()); if (target == GL_NONE) { // Unsupported target (e.g. 3-d texturing on GL 1.1). return false; } #ifndef OPENGLES if (target == GL_TEXTURE_BUFFER) { // Buffer textures may not receive texture parameters. return false; } #endif // OPENGLES // Record the active sampler settings. gtc->_active_sampler = sampler; glTexParameteri(target, GL_TEXTURE_WRAP_S, get_texture_wrap_mode(sampler.get_wrap_u())); #ifndef OPENGLES if (target != GL_TEXTURE_1D) #endif { glTexParameteri(target, GL_TEXTURE_WRAP_T, get_texture_wrap_mode(sampler.get_wrap_v())); } #ifndef OPENGLES_1 if (target == GL_TEXTURE_3D) { glTexParameteri(target, GL_TEXTURE_WRAP_R, get_texture_wrap_mode(sampler.get_wrap_w())); } #endif #ifndef OPENGLES LColor border_color = sampler.get_border_color(); call_glTexParameterfv(target, GL_TEXTURE_BORDER_COLOR, border_color); #endif // OPENGLES SamplerState::FilterType minfilter = sampler.get_effective_minfilter(); SamplerState::FilterType magfilter = sampler.get_effective_magfilter(); bool uses_mipmaps = SamplerState::is_mipmap(minfilter) && !gl_ignore_mipmaps; #ifndef NDEBUG if (gl_force_mipmaps) { minfilter = SamplerState::FT_linear_mipmap_linear; magfilter = SamplerState::FT_linear; uses_mipmaps = true; } #endif if (!tex->might_have_ram_image()) { // If it's a dynamically generated texture (that is, the RAM image isn't // available so it didn't pass through the CPU), we should enable GL- // generated mipmaps if we can. if (!_supports_generate_mipmap) { // However, if the GPU doesn't support mipmap generation, we have to // turn it off. uses_mipmaps = false; } } glTexParameteri(target, GL_TEXTURE_MIN_FILTER, get_texture_filter_type(minfilter, !uses_mipmaps)); glTexParameteri(target, GL_TEXTURE_MAG_FILTER, get_texture_filter_type(magfilter, true)); // Set anisotropic filtering. if (_supports_anisotropy) { PN_stdfloat anisotropy = sampler.get_effective_anisotropic_degree(); anisotropy = min(anisotropy, _max_anisotropy); anisotropy = max(anisotropy, (PN_stdfloat)1.0); glTexParameterf(target, GL_TEXTURE_MAX_ANISOTROPY_EXT, anisotropy); } #ifndef OPENGLES_1 if (tex->get_format() == Texture::F_depth_stencil || tex->get_format() == Texture::F_depth_component || tex->get_format() == Texture::F_depth_component16 || tex->get_format() == Texture::F_depth_component24 || tex->get_format() == Texture::F_depth_component32) { #ifdef SUPPORT_FIXED_FUNCTION if (has_fixed_function_pipeline()) { glTexParameteri(target, GL_DEPTH_TEXTURE_MODE_ARB, GL_INTENSITY); } #endif if (_supports_shadow_filter) { if ((sampler.get_magfilter() == SamplerState::FT_shadow) || (sampler.get_minfilter() == SamplerState::FT_shadow)) { glTexParameteri(target, GL_TEXTURE_COMPARE_MODE_ARB, GL_COMPARE_R_TO_TEXTURE_ARB); glTexParameteri(target, GL_TEXTURE_COMPARE_FUNC_ARB, GL_LEQUAL); } else { glTexParameteri(target, GL_TEXTURE_COMPARE_MODE_ARB, GL_NONE); glTexParameteri(target, GL_TEXTURE_COMPARE_FUNC_ARB, GL_LEQUAL); } } } #endif #ifndef OPENGLES_1 if (_supports_texture_lod) { glTexParameterf(target, GL_TEXTURE_MIN_LOD, sampler.get_min_lod()); glTexParameterf(target, GL_TEXTURE_MAX_LOD, sampler.get_max_lod()); } #endif #ifndef OPENGLES if (_supports_texture_lod_bias) { glTexParameterf(target, GL_TEXTURE_LOD_BIAS, sampler.get_lod_bias()); } #endif report_my_gl_errors(); if (uses_mipmaps && !gtc->_uses_mipmaps) { // Suddenly we require mipmaps. This means the texture may need // reloading. return true; } return false; } /** * Updates OpenGL with the current information for this texture, and makes it * the current texture available for rendering. */ bool CLP(GraphicsStateGuardian):: apply_texture(CLP(TextureContext) *gtc) { gtc->set_active(true); GLenum target = get_texture_target(gtc->get_texture()->get_texture_type()); if (target == GL_NONE) { return false; } if (gtc->_target != target) { // The target has changed. That means we have to re-bind a new texture // object. gtc->reset_data(); gtc->_target = target; } glBindTexture(target, gtc->_index); if (GLCAT.is_spam()) { GLCAT.spam() << "glBindTexture(0x" << hex << target << dec << ", " << gtc->_index << "): " << *gtc->get_texture() << "\n"; } report_my_gl_errors(); return true; } /** * Updates OpenGL with the current information for this sampler, and makes it * the current sampler available for rendering. Use NULL to unbind the * sampler. * * If the GSG doesn't support sampler objects, the sampler settings are * applied to the given texture context instead. */ bool CLP(GraphicsStateGuardian):: apply_sampler(GLuint unit, const SamplerState &sampler, CLP(TextureContext) *gtc) { #ifndef OPENGLES_1 if (_supports_sampler_objects) { // We support sampler objects. Prepare the sampler object and bind it to // the indicated texture unit. SamplerContext *sc = sampler.prepare_now(get_prepared_objects(), this); nassertr(sc != nullptr, false); CLP(SamplerContext) *gsc = DCAST(CLP(SamplerContext), sc); gsc->enqueue_lru(&_prepared_objects->_sampler_object_lru); _glBindSampler(unit, gsc->_index); if (GLCAT.is_spam()) { GLCAT.spam() << "glBindSampler(" << unit << ", " << gsc->_index << "): " << sampler << "\n"; } } else #endif // !OPENGLES_1 { // We don't support sampler objects. We'll have to bind the texture and // change the texture parameters if they don't match. if (gtc->_active_sampler != sampler) { set_active_texture_stage(unit); apply_texture(gtc); specify_texture(gtc, sampler); } } if (sampler.uses_mipmaps() && !gtc->_uses_mipmaps && !gl_ignore_mipmaps) { // The texture wasn't created with mipmaps, but we are trying to sample it // with mipmaps. We will need to reload it. GLCAT.info() << "reloading texture " << gtc->get_texture()->get_name() << " with mipmaps\n"; apply_texture(gtc); gtc->mark_needs_reload(); bool okflag = upload_texture(gtc, false, true); if (!okflag) { GLCAT.error() << "Could not load " << *gtc->get_texture() << "\n"; return false; } } report_my_gl_errors(); return true; } /** * Uploads the entire texture image to OpenGL, including all pages. * * The return value is true if successful, or false if the texture has no * image. */ bool CLP(GraphicsStateGuardian):: upload_texture(CLP(TextureContext) *gtc, bool force, bool uses_mipmaps) { PStatGPUTimer timer(this, _load_texture_pcollector); Texture *tex = gtc->get_texture(); if (_effective_incomplete_render && !force) { bool has_image = _supports_compressed_texture ? tex->has_ram_image() : tex->has_uncompressed_ram_image(); if (!has_image && tex->might_have_ram_image() && tex->has_simple_ram_image() && !_loader.is_null()) { // If we don't have the texture data right now, go get it, but in the // meantime load a temporary simple image in its place. async_reload_texture(gtc); has_image = _supports_compressed_texture ? tex->has_ram_image() : tex->has_uncompressed_ram_image(); if (!has_image) { if (gtc->was_simple_image_modified()) { return upload_simple_texture(gtc); } return true; } } } CPTA_uchar image; if (_supports_compressed_texture) { image = tex->get_ram_image(); } else { image = tex->get_uncompressed_ram_image(); } Texture::CompressionMode image_compression; if (image.is_null()) { image_compression = Texture::CM_off; } else { image_compression = tex->get_ram_image_compression(); } if (!get_supports_compressed_texture_format(image_compression)) { image = tex->get_uncompressed_ram_image(); image_compression = Texture::CM_off; // If this triggers, Panda cannot decompress the texture. Compile with // libsquish support or precompress the texture. nassertr(!image.is_null(), false); } int mipmap_bias = 0; int width = tex->get_x_size(); int height = tex->get_y_size(); int depth = tex->get_z_size(); // If we'll use immutable texture storage, we have to pick a sized image // format. bool force_sized = (gl_immutable_texture_storage && _supports_tex_storage) || (tex->get_texture_type() == Texture::TT_buffer_texture); GLint internal_format = get_internal_image_format(tex, force_sized); GLint external_format = get_external_image_format(tex); GLenum component_type = get_component_type(tex->get_component_type()); if (GLCAT.is_debug()) { if (image.is_null()) { GLCAT.debug() << "loading texture with NULL image"; } else if (image_compression != Texture::CM_off) { GLCAT.debug() << "loading pre-compressed texture"; } else if (is_compressed_format(internal_format)) { GLCAT.debug() << "loading compressed texture"; } else { GLCAT.debug() << "loading uncompressed texture"; } GLCAT.debug(false) << " " << tex->get_name() << "\n"; } // Ensure that the texture fits within the GL's specified limits. Need to // split dimensions because of texture arrays int max_dimension_x; int max_dimension_y; int max_dimension_z; switch (tex->get_texture_type()) { case Texture::TT_3d_texture: max_dimension_x = _max_3d_texture_dimension; max_dimension_y = _max_3d_texture_dimension; max_dimension_z = _max_3d_texture_dimension; break; case Texture::TT_cube_map: max_dimension_x = _max_cube_map_dimension; max_dimension_y = _max_cube_map_dimension; max_dimension_z = 6; break; case Texture::TT_2d_texture_array: max_dimension_x = _max_texture_dimension; max_dimension_y = _max_texture_dimension; max_dimension_z = _max_2d_texture_array_layers; break; case Texture::TT_cube_map_array: max_dimension_x = _max_texture_dimension; max_dimension_y = _max_texture_dimension; max_dimension_z = int(_max_2d_texture_array_layers / 6) * 6; break; case Texture::TT_buffer_texture: max_dimension_x = _max_buffer_texture_size; max_dimension_y = 1; max_dimension_z = 1; break; default: max_dimension_x = _max_texture_dimension; max_dimension_y = _max_texture_dimension; max_dimension_z = 1; } if (max_dimension_x == 0 || max_dimension_y == 0 || max_dimension_z == 0) { // Guess this GL doesn't support cube mapping3d textures2d texture arrays. report_my_gl_errors(); return false; } // If it doesn't fit, we have to reduce it on-the-fly. We do this by // incrementing the mipmap_bias, so we're effectively loading a lower mipmap // level. This requires generating the mipmaps on the CPU if they haven't // already been generated. It would have been better if the user had // specified max-texture-dimension to reduce the texture at load time // instead; of course, the user doesn't always know ahead of time what the // hardware limits are. if ((max_dimension_x > 0 && max_dimension_y > 0 && max_dimension_z > 0) && image_compression == Texture::CM_off) { while (tex->get_expected_mipmap_x_size(mipmap_bias) > max_dimension_x || tex->get_expected_mipmap_y_size(mipmap_bias) > max_dimension_y || tex->get_expected_mipmap_z_size(mipmap_bias) > max_dimension_z) { ++mipmap_bias; } if (mipmap_bias >= tex->get_num_ram_mipmap_images()) { // We need to generate some more mipmap images. if (tex->has_ram_image()) { tex->generate_ram_mipmap_images(); if (mipmap_bias >= tex->get_num_ram_mipmap_images()) { // It didn't work. Send the smallest we've got, and hope for the // best. mipmap_bias = tex->get_num_ram_mipmap_images() - 1; } } } width = tex->get_expected_mipmap_x_size(mipmap_bias); height = tex->get_expected_mipmap_y_size(mipmap_bias); depth = tex->get_expected_mipmap_z_size(mipmap_bias); if (mipmap_bias != 0) { GLCAT.info() << "Reducing image " << tex->get_name() << " from " << tex->get_x_size() << " x " << tex->get_y_size() << " x " << tex->get_z_size() << " to " << width << " x " << height << " x " << depth << "\n"; } } if (image_compression != Texture::CM_off) { #ifndef OPENGLES switch (tex->get_effective_quality_level()) { case Texture::QL_fastest: glHint(GL_TEXTURE_COMPRESSION_HINT, GL_FASTEST); break; case Texture::QL_default: case Texture::QL_normal: glHint(GL_TEXTURE_COMPRESSION_HINT, GL_DONT_CARE); break; case Texture::QL_best: glHint(GL_TEXTURE_COMPRESSION_HINT, GL_NICEST); break; } #endif } glPixelStorei(GL_UNPACK_ALIGNMENT, 1); GLenum target = get_texture_target(tex->get_texture_type()); uses_mipmaps = (uses_mipmaps && !gl_ignore_mipmaps) || gl_force_mipmaps; #ifndef OPENGLES if (target == GL_TEXTURE_BUFFER) { // Buffer textures may not have mipmaps. uses_mipmaps = false; } #endif // OPENGLES bool needs_reload = false; if (!gtc->_has_storage || gtc->_uses_mipmaps != uses_mipmaps || gtc->_internal_format != internal_format || gtc->_width != width || gtc->_height != height || gtc->_depth != depth) { // We need to reload a new GL Texture object. needs_reload = true; if (_use_object_labels) { // This seems like a good time to assign a label for the debug messages. const string &name = tex->get_name(); _glObjectLabel(GL_TEXTURE, gtc->_index, name.size(), name.data()); } } if (needs_reload && gtc->_immutable) { GLCAT.info() << "Attempt to modify texture with immutable storage, recreating texture.\n"; gtc->reset_data(); glBindTexture(target, gtc->_index); if (GLCAT.is_spam()) { GLCAT.spam() << "glBindTexture(0x" << hex << target << dec << ", " << gtc->_index << "): " << *tex << "\n"; } } #ifndef OPENGLES if (target == GL_TEXTURE_BUFFER) { // Buffer textures don't support mipmappping. gtc->_generate_mipmaps = false; if (gtc->_buffer == 0) { // The buffer object wasn't created yet. _glGenBuffers(1, &gtc->_buffer); _glBindBuffer(GL_TEXTURE_BUFFER, gtc->_buffer); _glTexBuffer(GL_TEXTURE_BUFFER, internal_format, gtc->_buffer); needs_reload = true; } else { _glBindBuffer(GL_TEXTURE_BUFFER, gtc->_buffer); if (gtc->_internal_format != internal_format) { _glTexBuffer(GL_TEXTURE_BUFFER, internal_format, gtc->_buffer); } } } else #endif // !OPENGLES if (needs_reload) { // Figure out whether mipmaps will be generated by the GPU or by Panda (or // not at all), and how many mipmap levels should be created. gtc->_generate_mipmaps = false; int num_levels = 1; CPTA_uchar image = tex->get_ram_mipmap_image(mipmap_bias); if (image.is_null()) { // We don't even have a RAM image, so we have no choice but to let // mipmaps be generated on the GPU. if (uses_mipmaps) { if (_supports_generate_mipmap) { num_levels = tex->get_expected_num_mipmap_levels() - mipmap_bias; gtc->_generate_mipmaps = true; } else { // If it can't, do without mipmaps. num_levels = 1; glTexParameteri(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR); } } } else { if (uses_mipmaps) { num_levels = tex->get_num_ram_mipmap_images() - mipmap_bias; if (num_levels <= 1) { // No RAM mipmap levels available. Should we generate some? if (!_supports_generate_mipmap || !driver_generate_mipmaps || image_compression != Texture::CM_off) { // Yes, the GL can't or won't generate them, so we need to. Note // that some drivers (nVidia) will *corrupt memory* if you ask // them to generate mipmaps for a pre-compressed texture. tex->generate_ram_mipmap_images(); num_levels = tex->get_num_ram_mipmap_images() - mipmap_bias; } } if (num_levels <= 1) { // We don't have mipmap levels in RAM. Ask the GL to generate them // if it can. if (_supports_generate_mipmap) { num_levels = tex->get_expected_num_mipmap_levels() - mipmap_bias; gtc->_generate_mipmaps = true; } else { // If it can't, do without mipmaps. glTexParameteri(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR); num_levels = 1; } } } } if (_supports_texture_max_level) { // By the time we get here, we have a pretty good prediction for the // number of mipmaps we're going to have, so tell the GL that's all it's // going to get. glTexParameteri(target, GL_TEXTURE_MAX_LEVEL, num_levels - 1); } #ifndef OPENGLES_2 if (gtc->_generate_mipmaps && _glGenerateMipmap == nullptr) { // The old, deprecated way to generate mipmaps. glTexParameteri(target, GL_GENERATE_MIPMAP, GL_TRUE); } #endif #ifndef OPENGLES if (!_supports_luminance_texture) { // Do we need to apply a swizzle mask to emulate these deprecated texture // formats? switch (tex->get_format()) { case Texture::F_alpha: glTexParameteri(target, GL_TEXTURE_SWIZZLE_R, GL_ZERO); glTexParameteri(target, GL_TEXTURE_SWIZZLE_G, GL_ZERO); glTexParameteri(target, GL_TEXTURE_SWIZZLE_B, GL_ZERO); glTexParameteri(target, GL_TEXTURE_SWIZZLE_A, GL_RED); break; case Texture::F_luminance: glTexParameteri(target, GL_TEXTURE_SWIZZLE_R, GL_RED); glTexParameteri(target, GL_TEXTURE_SWIZZLE_G, GL_RED); glTexParameteri(target, GL_TEXTURE_SWIZZLE_B, GL_RED); glTexParameteri(target, GL_TEXTURE_SWIZZLE_A, GL_ONE); break; case Texture::F_luminance_alpha: glTexParameteri(target, GL_TEXTURE_SWIZZLE_R, GL_RED); glTexParameteri(target, GL_TEXTURE_SWIZZLE_G, GL_RED); glTexParameteri(target, GL_TEXTURE_SWIZZLE_B, GL_RED); glTexParameteri(target, GL_TEXTURE_SWIZZLE_A, GL_GREEN); break; default: break; } } #endif // Allocate immutable storage for the texture, after which we can subload // it. Pre-allocating storage using glTexStorage is more efficient than // using glTexImage to load all of the individual images one by one later, // but we are not allowed to change the texture size or number of mipmap // levels after this point. if (gl_immutable_texture_storage && _supports_tex_storage && !gtc->_has_storage) { if (GLCAT.is_debug()) { GLCAT.debug() << "allocating storage for texture " << tex->get_name() << ", " << width << " x " << height << " x " << depth << ", mipmaps " << num_levels << ", uses_mipmaps = " << uses_mipmaps << "\n"; } switch (tex->get_texture_type()) { case Texture::TT_buffer_texture: // Won't get here, but squelch compiler warning case Texture::TT_1d_texture: _glTexStorage1D(target, num_levels, internal_format, width); break; case Texture::TT_2d_texture: case Texture::TT_cube_map: case Texture::TT_1d_texture_array: _glTexStorage2D(target, num_levels, internal_format, width, height); break; case Texture::TT_3d_texture: case Texture::TT_2d_texture_array: case Texture::TT_cube_map_array: _glTexStorage3D(target, num_levels, internal_format, width, height, depth); break; } gtc->_has_storage = true; gtc->_immutable = true; gtc->_uses_mipmaps = uses_mipmaps; gtc->_internal_format = internal_format; gtc->_width = width; gtc->_height = height; gtc->_depth = depth; gtc->update_data_size_bytes(get_texture_memory_size(gtc)); needs_reload = false; } } else { // Maybe we need to generate mipmaps on the CPU. if (!image.is_null() && uses_mipmaps) { if (tex->get_num_ram_mipmap_images() - mipmap_bias <= 1) { // No RAM mipmap levels available. Should we generate some? if (!_supports_generate_mipmap || !driver_generate_mipmaps || image_compression != Texture::CM_off) { // Yes, the GL can't or won't generate them, so we need to. Note // that some drivers (nVidia) will *corrupt memory* if you ask them // to generate mipmaps for a pre-compressed texture. tex->generate_ram_mipmap_images(); } } } } bool success = true; if (tex->get_texture_type() == Texture::TT_cube_map) { // A cube map must load six different 2-d images (which are stored as the // six pages of the system ram image). if (!_supports_cube_map) { report_my_gl_errors(); return false; } nassertr(target == GL_TEXTURE_CUBE_MAP, false); success = success && upload_texture_image (gtc, needs_reload, uses_mipmaps, mipmap_bias, GL_TEXTURE_CUBE_MAP, GL_TEXTURE_CUBE_MAP_POSITIVE_X, internal_format, external_format, component_type, true, 0, image_compression); success = success && upload_texture_image (gtc, needs_reload, uses_mipmaps, mipmap_bias, GL_TEXTURE_CUBE_MAP, GL_TEXTURE_CUBE_MAP_NEGATIVE_X, internal_format, external_format, component_type, true, 1, image_compression); success = success && upload_texture_image (gtc, needs_reload, uses_mipmaps, mipmap_bias, GL_TEXTURE_CUBE_MAP, GL_TEXTURE_CUBE_MAP_POSITIVE_Y, internal_format, external_format, component_type, true, 2, image_compression); success = success && upload_texture_image (gtc, needs_reload, uses_mipmaps, mipmap_bias, GL_TEXTURE_CUBE_MAP, GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, internal_format, external_format, component_type, true, 3, image_compression); success = success && upload_texture_image (gtc, needs_reload, uses_mipmaps, mipmap_bias, GL_TEXTURE_CUBE_MAP, GL_TEXTURE_CUBE_MAP_POSITIVE_Z, internal_format, external_format, component_type, true, 4, image_compression); success = success && upload_texture_image (gtc, needs_reload, uses_mipmaps, mipmap_bias, GL_TEXTURE_CUBE_MAP, GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, internal_format, external_format, component_type, true, 5, image_compression); } else { // Any other kind of texture can be loaded all at once. success = upload_texture_image (gtc, needs_reload, uses_mipmaps, mipmap_bias, target, target, internal_format, external_format, component_type, false, 0, image_compression); } if (gtc->_generate_mipmaps && _glGenerateMipmap != nullptr && !image.is_null()) { // We uploaded an image; we may need to generate mipmaps. if (GLCAT.is_debug()) { GLCAT.debug() << "generating mipmaps for texture " << tex->get_name() << ", " << width << " x " << height << " x " << depth << ", uses_mipmaps = " << uses_mipmaps << "\n"; } _glGenerateMipmap(target); } maybe_gl_finish(); if (success) { if (needs_reload) { gtc->_has_storage = true; gtc->_uses_mipmaps = uses_mipmaps; gtc->_internal_format = internal_format; gtc->_width = width; gtc->_height = height; gtc->_depth = depth; gtc->update_data_size_bytes(get_texture_memory_size(gtc)); } nassertr(gtc->_has_storage, false); if (tex->get_post_load_store_cache()) { tex->set_post_load_store_cache(false); // OK, get the RAM image, and save it in a BamCache record. if (do_extract_texture_data(gtc)) { if (tex->has_ram_image()) { BamCache *cache = BamCache::get_global_ptr(); PT(BamCacheRecord) record = cache->lookup(tex->get_fullpath(), "txo"); if (record != nullptr) { record->set_data(tex, tex); cache->store(record); } } } } GraphicsEngine *engine = get_engine(); nassertr(engine != nullptr, false); engine->texture_uploaded(tex); gtc->mark_loaded(); report_my_gl_errors(); return true; } report_my_gl_errors(); return false; } /** * Loads a texture image, or one page of a cube map image, from system RAM to * texture memory. * * texture_target is normally the same thing as page_target; both represent * the GL target onto which the texture image is loaded, e.g. GL_TEXTURE_1D, * GL_TEXTURE_2D, etc. The only time they may differ is in the case of cube * mapping, in which case texture_target will be target for the overall * texture, e.g. GL_TEXTURE_CUBE_MAP, and page_target will be the target for * this particular page, e.g. GL_TEXTURE_CUBE_MAP_POSITIVE_X. */ bool CLP(GraphicsStateGuardian):: upload_texture_image(CLP(TextureContext) *gtc, bool needs_reload, bool uses_mipmaps, int mipmap_bias, GLenum texture_target, GLenum page_target, GLint internal_format, GLint external_format, GLenum component_type, bool one_page_only, int z, Texture::CompressionMode image_compression) { // Make sure the error stack is cleared out before we begin. clear_my_gl_errors(); if (texture_target == GL_NONE) { // Unsupported target (e.g. 3-d texturing on GL 1.1). return false; } if (image_compression != Texture::CM_off && !_supports_compressed_texture) { return false; } Texture *tex = gtc->get_texture(); nassertr(tex != nullptr, false); CPTA_uchar image = tex->get_ram_mipmap_image(mipmap_bias); int width = tex->get_expected_mipmap_x_size(mipmap_bias); int height = tex->get_expected_mipmap_y_size(mipmap_bias); int depth = tex->get_expected_mipmap_z_size(mipmap_bias); // Determine the number of images to upload. int num_levels = mipmap_bias + 1; if (uses_mipmaps) { num_levels = tex->get_expected_num_mipmap_levels(); } int num_ram_mipmap_levels = 0; if (!image.is_null()) { if (uses_mipmaps) { num_ram_mipmap_levels = tex->get_num_ram_mipmap_images(); } else { num_ram_mipmap_levels = 1; } } #ifndef OPENGLES_1 if (needs_reload || num_ram_mipmap_levels > 0) { // Make sure that any incoherent writes to this texture have been synced. if (gtc->needs_barrier(GL_TEXTURE_UPDATE_BARRIER_BIT)) { issue_memory_barrier(GL_TEXTURE_UPDATE_BARRIER_BIT); } } #endif if (!needs_reload) { // Try to subload the image over the existing GL Texture object, possibly // saving on texture memory fragmentation. if (GLCAT.is_debug()) { if (num_ram_mipmap_levels == 0) { if (tex->has_clear_color()) { GLCAT.debug() << "clearing texture " << tex->get_name() << ", " << width << " x " << height << " x " << depth << ", z = " << z << ", uses_mipmaps = " << uses_mipmaps << ", clear_color = " << tex->get_clear_color() << "\n"; } else { GLCAT.debug() << "not loading NULL image for texture " << tex->get_name() << ", " << width << " x " << height << " x " << depth << ", z = " << z << ", uses_mipmaps = " << uses_mipmaps << "\n"; } } else { GLCAT.debug() << "updating image data of texture " << tex->get_name() << ", " << width << " x " << height << " x " << depth << ", z = " << z << ", mipmaps " << num_ram_mipmap_levels << ", uses_mipmaps = " << uses_mipmaps << "\n"; } } for (int n = mipmap_bias; n < num_levels; ++n) { // we grab the mipmap pointer first, if it is NULL we grab the normal // mipmap image pointer which is a PTA_uchar const unsigned char *image_ptr = (unsigned char*)tex->get_ram_mipmap_pointer(n); CPTA_uchar ptimage; if (image_ptr == nullptr) { ptimage = tex->get_ram_mipmap_image(n); if (ptimage.is_null()) { if (n < num_ram_mipmap_levels) { // We were told we'd have this many RAM mipmap images, but we // don't. Raise a warning. GLCAT.warning() << "No mipmap level " << n << " defined for " << tex->get_name() << "\n"; break; } if (tex->has_clear_color()) { // The texture has a clear color, so we should fill this mipmap // level to a solid color. #ifndef OPENGLES if (texture_target != GL_TEXTURE_BUFFER) { if (_supports_clear_texture) { // We can do that with the convenient glClearTexImage // function. vector_uchar clear_data = tex->get_clear_data(); _glClearTexImage(gtc->_index, n - mipmap_bias, external_format, component_type, (void *)&clear_data[0]); continue; } } else { if (_supports_clear_buffer) { // For buffer textures we need to clear the underlying // storage. vector_uchar clear_data = tex->get_clear_data(); _glClearBufferData(GL_TEXTURE_BUFFER, internal_format, external_format, component_type, (const void *)&clear_data[0]); continue; } } #endif // OPENGLES // Ask the Texture class to create the mipmap level in RAM. It'll // fill it in with the correct clear color, which we can then // upload. ptimage = tex->make_ram_mipmap_image(n); } else { // No clear color and no more images. break; } } image_ptr = ptimage; } PTA_uchar bgr_image; size_t view_size = tex->get_ram_mipmap_view_size(n); if (image_ptr != nullptr) { const unsigned char *orig_image_ptr = image_ptr; image_ptr += view_size * gtc->get_view(); if (one_page_only) { view_size = tex->get_ram_mipmap_page_size(n); image_ptr += view_size * z; } nassertr(image_ptr >= orig_image_ptr && image_ptr + view_size <= orig_image_ptr + tex->get_ram_mipmap_image_size(n), false); if (image_compression == Texture::CM_off) { // If the GL doesn't claim to support BGR, we may have to reverse // the component ordering of the image. image_ptr = fix_component_ordering(bgr_image, image_ptr, view_size, external_format, tex); } } int width = tex->get_expected_mipmap_x_size(n); int height = tex->get_expected_mipmap_y_size(n); #ifndef OPENGLES_1 int depth = tex->get_expected_mipmap_z_size(n); #endif #ifdef DO_PSTATS _data_transferred_pcollector.add_level(view_size); #endif switch (texture_target) { #ifndef OPENGLES_1 case GL_TEXTURE_3D: if (_supports_3d_texture) { if (image_compression == Texture::CM_off) { _glTexSubImage3D(page_target, n - mipmap_bias, 0, 0, 0, width, height, depth, external_format, component_type, image_ptr); } else { _glCompressedTexSubImage3D(page_target, n - mipmap_bias, 0, 0, 0, width, height, depth, external_format, view_size, image_ptr); } } else { report_my_gl_errors(); return false; } break; #endif // OPENGLES_1 #ifndef OPENGLES case GL_TEXTURE_1D: if (image_compression == Texture::CM_off) { glTexSubImage1D(page_target, n - mipmap_bias, 0, width, external_format, component_type, image_ptr); } else { _glCompressedTexSubImage1D(page_target, n - mipmap_bias, 0, width, external_format, view_size, image_ptr); } break; #endif // OPENGLES #ifndef OPENGLES_1 case GL_TEXTURE_2D_ARRAY: case GL_TEXTURE_CUBE_MAP_ARRAY: if (_supports_2d_texture_array) { if (image_compression == Texture::CM_off) { _glTexSubImage3D(page_target, n - mipmap_bias, 0, 0, 0, width, height, depth, external_format, component_type, image_ptr); } else { _glCompressedTexSubImage3D(page_target, n - mipmap_bias, 0, 0, 0, width, height, depth, external_format, view_size, image_ptr); } } else { report_my_gl_errors(); return false; } break; #endif // OPENGLES_1 #ifndef OPENGLES case GL_TEXTURE_BUFFER: if (_supports_buffer_texture) { _glBufferSubData(GL_TEXTURE_BUFFER, 0, view_size, image_ptr); } else { report_my_gl_errors(); return false; } break; #endif // OPENGLES default: if (image_compression == Texture::CM_off) { if (n==0) { // It's unfortunate that we can't adjust the width, too, but // TexSubImage2D doesn't accept a row-stride parameter. height = tex->get_y_size() - tex->get_pad_y_size(); } glTexSubImage2D(page_target, n - mipmap_bias, 0, 0, width, height, external_format, component_type, image_ptr); } else { _glCompressedTexSubImage2D(page_target, n - mipmap_bias, 0, 0, width, height, external_format, view_size, image_ptr); } break; } } // Did that fail? If it did, we'll immediately try again, this time // loading the texture from scratch. GLenum error_code = gl_get_error(); if (error_code != GL_NO_ERROR) { if (GLCAT.is_debug()) { GLCAT.debug() << "GL texture subload failed for " << tex->get_name() << " : " << get_error_string(error_code) << "\n"; } needs_reload = true; } } if (needs_reload) { // Load the image up from scratch, creating a new GL Texture object. if (GLCAT.is_debug()) { GLCAT.debug() << "loading new texture object for " << tex->get_name() << ", " << width << " x " << height << " x " << depth << ", z = " << z << ", mipmaps " << num_ram_mipmap_levels << ", uses_mipmaps = " << uses_mipmaps << "\n"; } // If there is immutable storage, this is impossible to do, and we should // not have gotten here at all. nassertr(!gtc->_immutable, false); if (num_ram_mipmap_levels == 0) { if (GLCAT.is_debug()) { GLCAT.debug() << " (initializing NULL image)\n"; } if ((external_format == GL_DEPTH_STENCIL) && get_supports_depth_stencil()) { #ifdef OPENGLES component_type = GL_UNSIGNED_INT_24_8_OES; #else component_type = GL_UNSIGNED_INT_24_8_EXT; #endif } } for (int n = mipmap_bias; n < num_levels; ++n) { const unsigned char *image_ptr = (unsigned char*)tex->get_ram_mipmap_pointer(n); CPTA_uchar ptimage; if (image_ptr == nullptr) { ptimage = tex->get_ram_mipmap_image(n); if (ptimage.is_null()) { if (n < num_ram_mipmap_levels) { // We were told we'd have this many RAM mipmap images, but we // don't. Raise a warning. GLCAT.warning() << "No mipmap level " << n << " defined for " << tex->get_name() << "\n"; if (_supports_texture_max_level) { // Tell the GL we have no more mipmaps for it to use. glTexParameteri(texture_target, GL_TEXTURE_MAX_LEVEL, n - mipmap_bias); } break; } if (tex->has_clear_color()) { // Ask the Texture class to create the mipmap level in RAM. It'll // fill it in with the correct clear color, which we can then // upload. ptimage = tex->make_ram_mipmap_image(n); } else if (image_compression != Texture::CM_off) { // We can't upload a NULL compressed texture. if (_supports_texture_max_level) { // Tell the GL we have no more mipmaps for it to use. glTexParameteri(texture_target, GL_TEXTURE_MAX_LEVEL, n - mipmap_bias); } break; } } image_ptr = ptimage; } PTA_uchar bgr_image; size_t view_size = tex->get_ram_mipmap_view_size(n); if (image_ptr != nullptr) { const unsigned char *orig_image_ptr = image_ptr; image_ptr += view_size * gtc->get_view(); if (one_page_only) { view_size = tex->get_ram_mipmap_page_size(n); image_ptr += view_size * z; } nassertr(image_ptr >= orig_image_ptr && image_ptr + view_size <= orig_image_ptr + tex->get_ram_mipmap_image_size(n), false); if (image_compression == Texture::CM_off) { // If the GL doesn't claim to support BGR, we may have to reverse // the component ordering of the image. image_ptr = fix_component_ordering(bgr_image, image_ptr, view_size, external_format, tex); } } int width = tex->get_expected_mipmap_x_size(n); int height = tex->get_expected_mipmap_y_size(n); #ifndef OPENGLES_1 int depth = tex->get_expected_mipmap_z_size(n); #endif #ifdef DO_PSTATS _data_transferred_pcollector.add_level(view_size); #endif switch (texture_target) { #ifndef OPENGLES // 1-d textures not supported by OpenGL ES. Fall through. case GL_TEXTURE_1D: if (image_compression == Texture::CM_off) { glTexImage1D(page_target, n - mipmap_bias, internal_format, width, 0, external_format, component_type, image_ptr); } else { _glCompressedTexImage1D(page_target, n - mipmap_bias, external_format, width, 0, view_size, image_ptr); } break; #endif // OPENGLES // OpenGL ES will fall through. #ifndef OPENGLES_1 case GL_TEXTURE_3D: if (_supports_3d_texture) { if (image_compression == Texture::CM_off) { _glTexImage3D(page_target, n - mipmap_bias, internal_format, width, height, depth, 0, external_format, component_type, image_ptr); } else { _glCompressedTexImage3D(page_target, n - mipmap_bias, external_format, width, height, depth, 0, view_size, image_ptr); } } else { report_my_gl_errors(); return false; } break; #endif // OPENGLES_1 #ifndef OPENGLES_1 case GL_TEXTURE_2D_ARRAY: case GL_TEXTURE_CUBE_MAP_ARRAY: if (_supports_2d_texture_array) { if (image_compression == Texture::CM_off) { _glTexImage3D(page_target, n - mipmap_bias, internal_format, width, height, depth, 0, external_format, component_type, image_ptr); } else { _glCompressedTexImage3D(page_target, n - mipmap_bias, external_format, width, height, depth, 0, view_size, image_ptr); } } else { report_my_gl_errors(); return false; } break; #endif // OPENGLES_1 #ifndef OPENGLES case GL_TEXTURE_BUFFER: if (_supports_buffer_texture) { _glBufferData(GL_TEXTURE_BUFFER, view_size, image_ptr, get_usage(tex->get_usage_hint())); } else { report_my_gl_errors(); return false; } break; #endif // OPENGLES default: if (image_compression == Texture::CM_off) { glTexImage2D(page_target, n - mipmap_bias, internal_format, width, height, 0, external_format, component_type, image_ptr); } else { _glCompressedTexImage2D(page_target, n - mipmap_bias, external_format, width, height, 0, view_size, image_ptr); } } } // Report the error message explicitly if the GL texture creation failed. GLenum error_code = gl_get_error(); if (error_code != GL_NO_ERROR) { GLCAT.error() << "GL texture creation failed for " << tex->get_name() << " : " << get_error_string(error_code) << "\n"; gtc->_has_storage = false; return false; } } report_my_gl_errors(); return true; } /** * Causes mipmaps to be generated for an uploaded texture. */ void CLP(GraphicsStateGuardian):: generate_mipmaps(CLP(TextureContext) *gtc) { #ifndef OPENGLES if (_supports_dsa) { // OpenGL 4.5 offers an easy way to do this without binding. _glGenerateTextureMipmap(gtc->_index); return; } #endif if (_glGenerateMipmap != nullptr) { _state_texture = 0; update_texture(gtc, true); apply_texture(gtc); _glGenerateMipmap(gtc->_target); glBindTexture(gtc->_target, 0); } } /** * This is used as a standin for upload_texture when the texture in question * is unavailable (e.g. it hasn't yet been loaded from disk). Until the * texture image itself becomes available, we will render the texture's * "simple" image--a sharply reduced version of the same texture. */ bool CLP(GraphicsStateGuardian):: upload_simple_texture(CLP(TextureContext) *gtc) { report_my_gl_errors(); PStatGPUTimer timer(this, _load_texture_pcollector); Texture *tex = gtc->get_texture(); nassertr(tex != nullptr, false); GLenum internal_format = GL_RGBA; GLenum external_format = GL_BGRA; const unsigned char *image_ptr = tex->get_simple_ram_image(); if (image_ptr == nullptr) { return false; } size_t image_size = tex->get_simple_ram_image_size(); PTA_uchar bgr_image; if (!_supports_bgr) { // If the GL doesn't claim to support BGR, we may have to reverse the // component ordering of the image. external_format = GL_RGBA; image_ptr = fix_component_ordering(bgr_image, image_ptr, image_size, external_format, tex); } int width = tex->get_simple_x_size(); int height = tex->get_simple_y_size(); GLenum component_type = GL_UNSIGNED_BYTE; if (GLCAT.is_debug()) { GLCAT.debug() << "loading simple image for " << tex->get_name() << "\n"; } // Turn off mipmaps for the simple texture. if (tex->uses_mipmaps() && _supports_texture_max_level) { glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0); } #ifdef DO_PSTATS _data_transferred_pcollector.add_level(image_size); #endif #ifdef OPENGLES internal_format = external_format; #endif glTexImage2D(GL_TEXTURE_2D, 0, internal_format, width, height, 0, external_format, component_type, image_ptr); gtc->mark_simple_loaded(); report_my_gl_errors(); return true; } /** * Asks OpenGL how much texture memory is consumed by the indicated texture * (which is also the currently-selected texture). */ size_t CLP(GraphicsStateGuardian):: get_texture_memory_size(CLP(TextureContext) *gtc) { Texture *tex = gtc->get_texture(); #ifdef OPENGLES // Texture querying not supported on OpenGL ES. int width = tex->get_x_size(); int height = tex->get_y_size(); int depth = 1; int scale = 1; size_t num_bytes = 2; // Temporary assumption? #else GLenum target = get_texture_target(tex->get_texture_type()); GLenum page_target = target; GLint scale = 1; if (target == GL_TEXTURE_CUBE_MAP) { // We need a particular page to get the level parameter from. page_target = GL_TEXTURE_CUBE_MAP_POSITIVE_X; scale = 6; } else if (target == GL_TEXTURE_BUFFER) { // In the case of buffer textures, we provided the size to begin with, so // no point in querying anything. Plus, glGetTexParameter is not even // supported for buffer textures. return tex->get_expected_ram_image_size(); } clear_my_gl_errors(); GLint internal_format; glGetTexLevelParameteriv(page_target, 0, GL_TEXTURE_INTERNAL_FORMAT, &internal_format); if (is_compressed_format(internal_format)) { // Try to get the compressed size. GLint image_size; glGetTexLevelParameteriv(page_target, 0, GL_TEXTURE_COMPRESSED_IMAGE_SIZE, &image_size); GLenum error_code = gl_get_error(); if (error_code != GL_NO_ERROR) { if (GLCAT.is_debug()) { GLCAT.debug() << "Couldn't get compressed size for " << tex->get_name() << " : " << get_error_string(error_code) << "\n"; } // Fall through to the noncompressed case. } else { return image_size * scale; } } // OK, get the noncompressed size. GLint red_size, green_size, blue_size, alpha_size; GLint depth_size = 0, luminance_size = 0, intensity_size = 0; glGetTexLevelParameteriv(page_target, 0, GL_TEXTURE_RED_SIZE, &red_size); glGetTexLevelParameteriv(page_target, 0, GL_TEXTURE_GREEN_SIZE, &green_size); glGetTexLevelParameteriv(page_target, 0, GL_TEXTURE_BLUE_SIZE, &blue_size); glGetTexLevelParameteriv(page_target, 0, GL_TEXTURE_ALPHA_SIZE, &alpha_size); if (get_supports_luminance_texture()) { glGetTexLevelParameteriv(page_target, 0, GL_TEXTURE_LUMINANCE_SIZE, &luminance_size); glGetTexLevelParameteriv(page_target, 0, GL_TEXTURE_INTENSITY_SIZE, &intensity_size); } if (get_supports_depth_texture()) { glGetTexLevelParameteriv(page_target, 0, GL_TEXTURE_DEPTH_SIZE, &depth_size); } GLint width = 1, height = 1, depth = 1; glGetTexLevelParameteriv(page_target, 0, GL_TEXTURE_WIDTH, &width); glGetTexLevelParameteriv(page_target, 0, GL_TEXTURE_HEIGHT, &height); if (_supports_3d_texture || _supports_2d_texture_array) { glGetTexLevelParameteriv(page_target, 0, GL_TEXTURE_DEPTH, &depth); } report_my_gl_errors(); size_t num_bits = (red_size + green_size + blue_size + alpha_size + luminance_size + intensity_size + depth_size); size_t num_bytes = (num_bits + 7) / 8; #endif // OPENGLES size_t result = num_bytes * width * height * depth * scale; if (gtc->_uses_mipmaps) { result = (result * 4) / 3; } return result; } /** * Checks the list of resident texture objects to see if any have recently * been evicted. */ void CLP(GraphicsStateGuardian):: check_nonresident_texture(BufferContextChain &chain) { #if defined(SUPPORT_FIXED_FUNCTION) && !defined(OPENGLES) // Residency queries not supported by OpenGL ES. size_t num_textures = chain.get_count(); if (num_textures == 0) { return; } CLP(TextureContext) **gtc_list = (CLP(TextureContext) **)alloca(num_textures * sizeof(CLP(TextureContext) *)); GLuint *texture_list = (GLuint *)alloca(num_textures * sizeof(GLuint)); size_t ti = 0; BufferContext *node = chain.get_first(); while (node != nullptr) { CLP(TextureContext) *gtc = DCAST(CLP(TextureContext), node); gtc_list[ti] = gtc; texture_list[ti] = gtc->_index; node = node->get_next(); ++ti; } nassertv(ti == num_textures); GLboolean *results = (GLboolean *)alloca(num_textures * sizeof(GLboolean)); bool all_resident = (glAreTexturesResident(num_textures, texture_list, results) != 0); report_my_gl_errors(); if (!all_resident) { // Some are now nonresident. for (ti = 0; ti < num_textures; ++ti) { if (!results[ti]) { gtc_list[ti]->set_resident(false); } } } #endif // OPENGLES } /** * The internal implementation of extract_texture_data(), given an already- * created TextureContext. */ bool CLP(GraphicsStateGuardian):: do_extract_texture_data(CLP(TextureContext) *gtc) { report_my_gl_errors(); GLenum target = gtc->_target; if (target == GL_NONE) { return false; } #ifndef OPENGLES_1 // Make sure any incoherent writes to the texture have been synced. if (gtc->needs_barrier(GL_TEXTURE_UPDATE_BARRIER_BIT)) { issue_memory_barrier(GL_TEXTURE_UPDATE_BARRIER_BIT); } #endif Texture *tex = gtc->get_texture(); glBindTexture(target, gtc->_index); if (GLCAT.is_spam()) { GLCAT.spam() << "glBindTexture(0x" << hex << target << dec << ", " << gtc->_index << "): " << *tex << "\n"; } #ifndef OPENGLES if (target == GL_TEXTURE_BUFFER) { _glBindBuffer(GL_TEXTURE_BUFFER, gtc->_buffer); } #endif GLint wrap_u, wrap_v, wrap_w; GLint minfilter, magfilter; #ifndef OPENGLES GLfloat border_color[4]; #endif #ifdef OPENGLES if (true) { #else if (target != GL_TEXTURE_BUFFER) { #endif glGetTexParameteriv(target, GL_TEXTURE_WRAP_S, &wrap_u); glGetTexParameteriv(target, GL_TEXTURE_WRAP_T, &wrap_v); wrap_w = GL_REPEAT; #ifndef OPENGLES_1 if (_supports_3d_texture) { glGetTexParameteriv(target, GL_TEXTURE_WRAP_R, &wrap_w); } #endif glGetTexParameteriv(target, GL_TEXTURE_MIN_FILTER, &minfilter); glGetTexParameteriv(target, GL_TEXTURE_MAG_FILTER, &magfilter); #ifndef OPENGLES glGetTexParameterfv(target, GL_TEXTURE_BORDER_COLOR, border_color); #endif } GLenum page_target = target; if (target == GL_TEXTURE_CUBE_MAP) { // We need a particular page to get the level parameter from. page_target = GL_TEXTURE_CUBE_MAP_POSITIVE_X; } GLint width = gtc->_width, height = gtc->_height, depth = gtc->_depth; #ifndef OPENGLES glGetTexLevelParameteriv(page_target, 0, GL_TEXTURE_WIDTH, &width); if (target != GL_TEXTURE_1D) { glGetTexLevelParameteriv(page_target, 0, GL_TEXTURE_HEIGHT, &height); } if (_supports_3d_texture && target == GL_TEXTURE_3D) { glGetTexLevelParameteriv(page_target, 0, GL_TEXTURE_DEPTH, &depth); } else if (target == GL_TEXTURE_2D_ARRAY || target == GL_TEXTURE_CUBE_MAP_ARRAY) { glGetTexLevelParameteriv(page_target, 0, GL_TEXTURE_DEPTH, &depth); } else if (target == GL_TEXTURE_CUBE_MAP) { depth = 6; } #endif clear_my_gl_errors(); if (width <= 0 || height <= 0 || depth <= 0) { GLCAT.error() << "No texture data for " << tex->get_name() << "\n"; return false; } GLint internal_format = GL_RGBA; #ifndef OPENGLES if (target != GL_TEXTURE_BUFFER) { glGetTexLevelParameteriv(page_target, 0, GL_TEXTURE_INTERNAL_FORMAT, &internal_format); } else { // Some drivers give the wrong result for the above call. No problem; we // already know the internal format of a buffer texture since glTexBuffer // required passing the exact sized format. internal_format = gtc->_internal_format; } #endif // OPENGLES // Make sure we were able to query those parameters properly. GLenum error_code = gl_get_error(); if (error_code != GL_NO_ERROR) { GLCAT.error() << "Unable to query texture parameters for " << tex->get_name() << " : " << get_error_string(error_code) << "\n"; return false; } Texture::ComponentType type = Texture::T_unsigned_byte; Texture::Format format = Texture::F_rgb; Texture::CompressionMode compression = Texture::CM_off; switch (internal_format) { #ifndef OPENGLES case GL_COLOR_INDEX: format = Texture::F_color_index; break; #endif #if GL_DEPTH_COMPONENT != GL_DEPTH_COMPONENT24 case GL_DEPTH_COMPONENT: #endif case GL_DEPTH_COMPONENT16: case GL_DEPTH_COMPONENT24: case GL_DEPTH_COMPONENT32: type = Texture::T_unsigned_short; format = Texture::F_depth_component; break; #ifndef OPENGLES case GL_DEPTH_COMPONENT32F: type = Texture::T_float; format = Texture::F_depth_component; break; #endif case GL_DEPTH_STENCIL: case GL_DEPTH24_STENCIL8: type = Texture::T_unsigned_int_24_8; format = Texture::F_depth_stencil; break; #ifndef OPENGLES case GL_DEPTH32F_STENCIL8: type = Texture::T_float; format = Texture::F_depth_stencil; break; #endif case GL_RGBA: case 4: format = Texture::F_rgba; break; case GL_RGBA4: format = Texture::F_rgba4; break; #ifdef OPENGLES case GL_RGBA8_OES: format = Texture::F_rgba8; break; #else case GL_RGBA8: format = Texture::F_rgba8; break; #endif #ifndef OPENGLES case GL_RGBA12: type = Texture::T_unsigned_short; format = Texture::F_rgba12; break; #endif case GL_RGB: case 3: format = Texture::F_rgb; break; #ifndef OPENGLES case GL_RGB5: format = Texture::F_rgb5; break; #endif case GL_RGB5_A1: format = Texture::F_rgba5; break; #ifndef OPENGLES case GL_RGB8: format = Texture::F_rgb8; break; case GL_RGB12: format = Texture::F_rgb12; break; case GL_RGBA16: format = Texture::F_rgba16; break; case GL_R3_G3_B2: format = Texture::F_rgb332; break; case GL_R8I: type = Texture::T_byte; format = Texture::F_r8i; break; case GL_RG8I: type = Texture::T_byte; format = Texture::F_rg8i; break; case GL_RGB8I: type = Texture::T_byte; format = Texture::F_rgb8i; break; case GL_RGBA8I: type = Texture::T_byte; format = Texture::F_rgba8i; break; case GL_R8UI: type = Texture::T_unsigned_byte; format = Texture::F_r8i; break; case GL_RG8UI: type = Texture::T_unsigned_byte; format = Texture::F_rg8i; break; case GL_RGB8UI: type = Texture::T_unsigned_byte; format = Texture::F_rgb8i; break; case GL_RGBA8UI: type = Texture::T_unsigned_byte; format = Texture::F_rgba8i; break; case GL_R16I: type = Texture::T_short; format = Texture::F_r16i; break; case GL_R16UI: type = Texture::T_unsigned_short; format = Texture::F_r16i; break; #endif #ifndef OPENGLES_1 case GL_RGBA16F: type = Texture::T_float; format = Texture::F_rgba16; break; case GL_RGB16F: type = Texture::T_float; format = Texture::F_rgb16; break; case GL_RG16F: type = Texture::T_float; format = Texture::F_rg16; break; case GL_R16F: type = Texture::T_float; format = Texture::F_r16; break; case GL_RGBA32F: type = Texture::T_float; format = Texture::F_rgba32; break; case GL_RGB32F: type = Texture::T_float; format = Texture::F_rgb32; break; case GL_RG32F: type = Texture::T_float; format = Texture::F_rg32; break; case GL_R32F: type = Texture::T_float; format = Texture::F_r32; break; #endif #ifndef OPENGLES case GL_RGB16: type = Texture::T_unsigned_short; format = Texture::F_rgb16; break; case GL_RG16: type = Texture::T_unsigned_short; format = Texture::F_rg16; break; case GL_R16: type = Texture::T_unsigned_short; format = Texture::F_r16; break; case GL_RGB16_SNORM: type = Texture::T_short; format = Texture::F_rgb16; break; case GL_RG16_SNORM: type = Texture::T_short; format = Texture::F_rg16; break; case GL_R16_SNORM: type = Texture::T_short; format = Texture::F_r16; break; #endif #ifndef OPENGLES_1 case GL_R11F_G11F_B10F: type = Texture::T_float; format = Texture::F_r11_g11_b10; break; case GL_RGB9_E5: type = Texture::T_float; format = Texture::F_rgb9_e5; break; case GL_RGB10_A2: type = Texture::T_unsigned_short; format = Texture::F_rgb10_a2; break; #endif #ifdef OPENGLES_2 case GL_RED_EXT: case GL_R8_EXT: format = Texture::F_red; break; #endif #ifndef OPENGLES case GL_R32I: type = Texture::T_int; format = Texture::F_r32i; break; #endif #ifndef OPENGLES case GL_RED: format = Texture::F_red; break; case GL_GREEN: format = Texture::F_green; break; case GL_BLUE: format = Texture::F_blue; break; #endif // OPENGLES case GL_ALPHA: format = Texture::F_alpha; break; case GL_LUMINANCE: #ifndef OPENGLES case GL_LUMINANCE16: case GL_LUMINANCE16F_ARB: #endif case 1: format = Texture::F_luminance; break; case GL_LUMINANCE_ALPHA: #ifndef OPENGLES case GL_LUMINANCE_ALPHA16F_ARB: #endif case 2: format = Texture::F_luminance_alpha; break; #ifndef OPENGLES_1 case GL_SRGB: #ifndef OPENGLES case GL_SRGB8: #endif format = Texture::F_srgb; break; case GL_SRGB_ALPHA: case GL_SRGB8_ALPHA8: format = Texture::F_srgb_alpha; break; #endif // OPENGLES_1 #ifndef OPENGLES case GL_SLUMINANCE: case GL_SLUMINANCE8: format = Texture::F_sluminance; break; case GL_SLUMINANCE_ALPHA: case GL_SLUMINANCE8_ALPHA8: format = Texture::F_sluminance_alpha; break; #endif // OPENGLES #ifndef OPENGLES case GL_COMPRESSED_RGB: format = Texture::F_rgb; compression = Texture::CM_on; break; case GL_COMPRESSED_RGBA: format = Texture::F_rgba; compression = Texture::CM_on; break; case GL_COMPRESSED_ALPHA: format = Texture::F_alpha; compression = Texture::CM_on; break; case GL_COMPRESSED_LUMINANCE: format = Texture::F_luminance; compression = Texture::CM_on; break; case GL_COMPRESSED_LUMINANCE_ALPHA: format = Texture::F_luminance_alpha; compression = Texture::CM_on; break; case GL_COMPRESSED_SRGB: format = Texture::F_srgb; compression = Texture::CM_on; break; case GL_COMPRESSED_SRGB_ALPHA: format = Texture::F_srgb_alpha; compression = Texture::CM_on; break; case GL_COMPRESSED_SLUMINANCE: format = Texture::F_sluminance; compression = Texture::CM_on; break; case GL_COMPRESSED_SLUMINANCE_ALPHA: format = Texture::F_sluminance_alpha; compression = Texture::CM_on; break; #endif case GL_COMPRESSED_RGB_S3TC_DXT1_EXT: format = Texture::F_rgb; compression = Texture::CM_dxt1; break; case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT: format = Texture::F_rgbm; compression = Texture::CM_dxt1; break; #ifndef OPENGLES case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT: format = Texture::F_srgb; compression = Texture::CM_dxt1; break; case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT: format = Texture::F_srgb_alpha; compression = Texture::CM_dxt1; break; #endif #ifdef OPENGLES case GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG: format = Texture::F_rgb; compression = Texture::CM_pvr1_2bpp; break; case GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG: format = Texture::F_rgba; compression = Texture::CM_pvr1_2bpp; break; case GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG: format = Texture::F_rgb; compression = Texture::CM_pvr1_4bpp; break; case GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG: format = Texture::F_rgba; compression = Texture::CM_pvr1_4bpp; break; #else case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT: format = Texture::F_rgba; compression = Texture::CM_dxt3; break; case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT: format = Texture::F_rgba; compression = Texture::CM_dxt5; break; case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT: format = Texture::F_srgb_alpha; compression = Texture::CM_dxt3; break; case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT: format = Texture::F_srgb_alpha; compression = Texture::CM_dxt5; break; case GL_COMPRESSED_RGB_FXT1_3DFX: format = Texture::F_rgb; compression = Texture::CM_fxt1; break; case GL_COMPRESSED_RGBA_FXT1_3DFX: format = Texture::F_rgba; compression = Texture::CM_fxt1; break; case GL_COMPRESSED_LUMINANCE_LATC1_EXT: format = Texture::F_luminance; compression = Texture::CM_rgtc; break; case GL_COMPRESSED_LUMINANCE_ALPHA_LATC2_EXT: format = Texture::F_luminance_alpha; compression = Texture::CM_rgtc; break; case GL_COMPRESSED_RED_RGTC1: format = Texture::F_red; compression = Texture::CM_rgtc; break; case GL_COMPRESSED_SIGNED_RED_RGTC1: type = Texture::T_byte; format = Texture::F_red; compression = Texture::CM_rgtc; break; case GL_COMPRESSED_RG_RGTC2: format = Texture::F_rg; compression = Texture::CM_rgtc; break; case GL_COMPRESSED_SIGNED_RG_RGTC2: type = Texture::T_byte; format = Texture::F_rg; compression = Texture::CM_rgtc; break; #endif default: GLCAT.warning() << "Unhandled internal format for " << tex->get_name() << " : " << hex << "0x" << internal_format << dec << "\n"; return false; } // We don't want to call setup_texture() again; that resets too much. // Instead, we'll just set the individual components. tex->set_x_size(width); tex->set_y_size(height); tex->set_z_size(depth); tex->set_component_type(type); tex->set_format(format); #ifdef OPENGLES if (true) { #else if (target != GL_TEXTURE_BUFFER) { #endif tex->set_wrap_u(get_panda_wrap_mode(wrap_u)); tex->set_wrap_v(get_panda_wrap_mode(wrap_v)); tex->set_wrap_w(get_panda_wrap_mode(wrap_w)); tex->set_minfilter(get_panda_filter_type(minfilter)); //tex->set_magfilter(get_panda_filter_type(magfilter)); #ifndef OPENGLES tex->set_border_color(LColor(border_color[0], border_color[1], border_color[2], border_color[3])); #endif } PTA_uchar image; size_t page_size = 0; if (!extract_texture_image(image, page_size, tex, target, page_target, type, compression, 0)) { return false; } tex->set_ram_image(image, compression, page_size); if (gtc->_uses_mipmaps) { // Also get the mipmap levels. GLint num_expected_levels = tex->get_expected_num_mipmap_levels(); GLint highest_level = num_expected_levels; if (_supports_texture_max_level) { glGetTexParameteriv(target, GL_TEXTURE_MAX_LEVEL, &highest_level); highest_level = min(highest_level, num_expected_levels); } for (int n = 1; n <= highest_level; ++n) { if (!extract_texture_image(image, page_size, tex, target, page_target, type, compression, n)) { return false; } tex->set_ram_mipmap_image(n, image, page_size); } } return true; } /** * Called from extract_texture_data(), this gets just the image array for a * particular mipmap level (or for the base image). */ bool CLP(GraphicsStateGuardian):: extract_texture_image(PTA_uchar &image, size_t &page_size, Texture *tex, GLenum target, GLenum page_target, Texture::ComponentType type, Texture::CompressionMode compression, int n) { #ifdef OPENGLES // Extracting texture data unsupported in OpenGL ES. nassert_raise("OpenGL ES does not support extracting texture data"); return false; #else // Make sure the GL driver does not align textures, otherwise we get corrupt // memory, since we don't take alignment into account. glPixelStorei(GL_PACK_ALIGNMENT, 1); if (target == GL_TEXTURE_CUBE_MAP) { // A cube map, compressed or uncompressed. This we must extract one page // at a time. // If the cube map is compressed, we assume that all the compressed pages // are exactly the same size. OpenGL doesn't make this assumption, but it // happens to be true for all currently extant compression schemes, and it // makes things simpler for us. (It also makes things much simpler for // the graphics hardware, so it's likely to continue to be true for a // while at least.) GLenum external_format = get_external_image_format(tex); GLenum pixel_type = get_component_type(type); page_size = tex->get_expected_ram_mipmap_page_size(n); if (compression != Texture::CM_off) { GLint image_size; glGetTexLevelParameteriv(page_target, n, GL_TEXTURE_COMPRESSED_IMAGE_SIZE, &image_size); nassertr(image_size <= (int)page_size, false); page_size = image_size; } image = PTA_uchar::empty_array(page_size * 6); for (int z = 0; z < 6; ++z) { page_target = GL_TEXTURE_CUBE_MAP_POSITIVE_X + z; if (compression == Texture::CM_off) { glGetTexImage(page_target, n, external_format, pixel_type, image.p() + z * page_size); } else { _glGetCompressedTexImage(page_target, 0, image.p() + z * page_size); } } #ifndef OPENGLES } else if (target == GL_TEXTURE_BUFFER) { // In the case of a buffer texture, we need to get it from the buffer. image = PTA_uchar::empty_array(tex->get_expected_ram_mipmap_image_size(n)); _glGetBufferSubData(target, 0, image.size(), image.p()); #endif } else if (compression == Texture::CM_off) { // An uncompressed 1-d, 2-d, or 3-d texture. image = PTA_uchar::empty_array(tex->get_expected_ram_mipmap_image_size(n)); GLenum external_format = get_external_image_format(tex); GLenum pixel_type = get_component_type(type); glGetTexImage(target, n, external_format, pixel_type, image.p()); } else { // A compressed 1-d, 2-d, or 3-d texture. GLint image_size; glGetTexLevelParameteriv(target, n, GL_TEXTURE_COMPRESSED_IMAGE_SIZE, &image_size); page_size = image_size / tex->get_z_size(); image = PTA_uchar::empty_array(image_size); // Some drivers (ATI!) seem to try to overstuff more bytes in the array // than they asked us to allocate (that is, more bytes than // GL_TEXTURE_COMPRESSED_IMAGE_SIZE), requiring us to overallocate and // then copy the result into our final buffer. Sheesh. // We'll only do this for small textures (the ATI bug doesn't *seem* to // affect large textures), to save on the overhead of the double-copy, and // reduce risk from an overly-large alloca(). #ifndef NDEBUG static const int max_trouble_buffer = 102400; #else static const int max_trouble_buffer = 1024; #endif if (image_size < max_trouble_buffer) { static const int extra_space = 32; unsigned char *buffer = (unsigned char *)alloca(image_size + extra_space); #ifndef NDEBUG // Tag the buffer with a specific byte so we can report on whether that // driver bug is still active. static unsigned char keep_token = 0x00; unsigned char token = ++keep_token; memset(buffer + image_size, token, extra_space); #endif _glGetCompressedTexImage(target, n, buffer); memcpy(image.p(), buffer, image_size); #ifndef NDEBUG int count = extra_space; while (count > 0 && buffer[image_size + count - 1] == token) { --count; } if (count != 0) { GLCAT.warning() << "GL graphics driver overfilled " << count << " bytes into a " << image_size << "-byte buffer provided to glGetCompressedTexImage()\n"; } // This had better not equal the amount of buffer space we set aside. // If it does, we assume the driver might have overfilled even our // provided extra buffer. nassertr(count != extra_space, true) #endif // NDEBUG } else { _glGetCompressedTexImage(target, n, image.p()); } } // Now see if we were successful. GLenum error_code = gl_get_error(); if (error_code != GL_NO_ERROR) { GLCAT.error() << "Unable to extract texture for " << *tex << ", mipmap level " << n << " : " << get_error_string(error_code) << "\n"; nassertr(false, false); return false; } return true; #endif // OPENGLES } /** * Internally sets the point size parameters after any of the properties have * changed that might affect this. */ #ifdef SUPPORT_FIXED_FUNCTION void CLP(GraphicsStateGuardian):: do_point_size() { if (!_point_perspective) { // Normal, constant-sized points. Here _point_size is a width in pixels. static LVecBase3f constant(1.0f, 0.0f, 0.0f); _glPointParameterfv(GL_POINT_DISTANCE_ATTENUATION, constant.get_data()); } else { // Perspective-sized points. Here _point_size is a width in 3-d units. // To arrange that, we need to figure out the appropriate scaling factor // based on the current viewport and projection matrix. LVector3 height(0.0f, _point_size, 1.0f); height = height * _projection_mat->get_mat(); height = height * _internal_transform->get_scale()[1]; PN_stdfloat s = height[1] * _viewport_height / _point_size; if (_current_lens->is_orthographic()) { // If we have an orthographic lens in effect, we don't actually apply a // perspective transform: we just scale the points once, regardless of // the distance from the camera. LVecBase3f constant(1.0f / (s * s), 0.0f, 0.0f); _glPointParameterfv(GL_POINT_DISTANCE_ATTENUATION, constant.get_data()); } else { // Otherwise, we give it a true perspective adjustment. LVecBase3f square(0.0f, 0.0f, 1.0f / (s * s)); _glPointParameterfv(GL_POINT_DISTANCE_ATTENUATION, square.get_data()); } } report_my_gl_errors(); } #endif /** * Returns true if this particular GSG supports the specified Cg Shader * Profile. */ bool CLP(GraphicsStateGuardian):: get_supports_cg_profile(const string &name) const { #if !defined(HAVE_CG) || defined(OPENGLES) return false; #else CGprofile profile = cgGetProfile(name.c_str()); if (profile == CG_PROFILE_UNKNOWN) { GLCAT.error() << name << ", unknown Cg-profile\n"; return false; } return (cgGLIsProfileSupported(profile) != 0); #endif } /** * Binds a framebuffer object. */ void CLP(GraphicsStateGuardian):: bind_fbo(GLuint fbo) { if (_current_fbo == fbo) { return; } PStatGPUTimer timer(this, _fbo_bind_pcollector); nassertv(_glBindFramebuffer != 0); #if defined(OPENGLES_2) _glBindFramebuffer(GL_FRAMEBUFFER, fbo); #elif defined(OPENGLES_1) _glBindFramebuffer(GL_FRAMEBUFFER_OES, fbo); #else _glBindFramebuffer(GL_FRAMEBUFFER_EXT, fbo); #endif _current_fbo = fbo; } // GL stencil code section static int gl_stencil_operations_array[] = { GL_KEEP, GL_ZERO, GL_REPLACE, #ifdef OPENGLES_1 GL_INCR_WRAP_OES, GL_DECR_WRAP_OES, #else GL_INCR_WRAP, GL_DECR_WRAP, #endif GL_INVERT, GL_INCR, GL_DECR, }; /** * Set stencil render states. */ void CLP(GraphicsStateGuardian):: do_issue_stencil() { if (!_supports_stencil) { return; } const StencilAttrib *stencil; if (_target_rs->get_attrib(stencil)) { // DEBUG if (false) { GLCAT.debug() << "STENCIL STATE CHANGE\n"; GLCAT.debug() << "\n" << "SRS_front_comparison_function " << (int)stencil->get_render_state(StencilAttrib::SRS_front_comparison_function) << "\n" << "SRS_front_stencil_fail_operation " << (int)stencil->get_render_state(StencilAttrib::SRS_front_stencil_fail_operation) << "\n" << "SRS_front_stencil_pass_z_fail_operation " << (int)stencil->get_render_state(StencilAttrib::SRS_front_stencil_pass_z_fail_operation) << "\n" << "SRS_front_stencil_pass_z_pass_operation " << (int)stencil->get_render_state(StencilAttrib::SRS_front_stencil_pass_z_pass_operation) << "\n" << "SRS_reference " << (int)stencil->get_render_state(StencilAttrib::SRS_reference) << "\n" << "SRS_read_mask " << (int)stencil->get_render_state(StencilAttrib::SRS_read_mask) << "\n" << "SRS_write_mask " << (int)stencil->get_render_state(StencilAttrib::SRS_write_mask) << "\n" << "SRS_back_comparison_function " << (int)stencil->get_render_state(StencilAttrib::SRS_back_comparison_function) << "\n" << "SRS_back_stencil_fail_operation " << (int)stencil->get_render_state(StencilAttrib::SRS_back_stencil_fail_operation) << "\n" << "SRS_back_stencil_pass_z_fail_operation " << (int)stencil->get_render_state(StencilAttrib::SRS_back_stencil_pass_z_fail_operation) << "\n" << "SRS_back_stencil_pass_z_pass_operation " << (int)stencil->get_render_state(StencilAttrib::SRS_back_stencil_pass_z_pass_operation) << "\n"; } #ifndef OPENGLES if (_supports_two_sided_stencil) { // TODO: add support for OpenGL 2.0-style glStencilFuncSeparate. unsigned int back_compare; back_compare = stencil->get_render_state(StencilAttrib::SRS_back_comparison_function); if (back_compare != RenderAttrib::M_none) { glEnable(GL_STENCIL_TEST_TWO_SIDE_EXT); _glActiveStencilFaceEXT(GL_BACK); glStencilFunc( PANDA_TO_GL_COMPAREFUNC(back_compare), stencil->get_render_state(StencilAttrib::SRS_reference), stencil->get_render_state(StencilAttrib::SRS_read_mask)); glStencilOp( gl_stencil_operations_array[stencil->get_render_state(StencilAttrib::SRS_back_stencil_fail_operation)], gl_stencil_operations_array[stencil->get_render_state(StencilAttrib::SRS_back_stencil_pass_z_fail_operation)], gl_stencil_operations_array[stencil->get_render_state(StencilAttrib::SRS_back_stencil_pass_z_pass_operation)] ); glStencilMask(stencil->get_render_state(StencilAttrib::SRS_write_mask)); } else { glDisable(GL_STENCIL_TEST_TWO_SIDE_EXT); } _glActiveStencilFaceEXT(GL_FRONT); } #endif // OPENGLES unsigned int front_compare; front_compare = stencil->get_render_state(StencilAttrib::SRS_front_comparison_function); if (front_compare != RenderAttrib::M_none) { glEnable(GL_STENCIL_TEST); glStencilFunc( PANDA_TO_GL_COMPAREFUNC(front_compare), stencil->get_render_state(StencilAttrib::SRS_reference), stencil->get_render_state(StencilAttrib::SRS_read_mask)); glStencilOp( gl_stencil_operations_array[stencil->get_render_state(StencilAttrib::SRS_front_stencil_fail_operation)], gl_stencil_operations_array[stencil->get_render_state(StencilAttrib::SRS_front_stencil_pass_z_fail_operation)], gl_stencil_operations_array[stencil->get_render_state(StencilAttrib::SRS_front_stencil_pass_z_pass_operation)] ); glStencilMask(stencil->get_render_state(StencilAttrib::SRS_write_mask)); } else { glDisable(GL_STENCIL_TEST); } if (stencil->get_render_state(StencilAttrib::SRS_clear)) { // clear stencil buffer glClearStencil(stencil->get_render_state(StencilAttrib::SRS_clear_value)); glClear(GL_STENCIL_BUFFER_BIT); } } else { glDisable(GL_STENCIL_TEST); #ifndef OPENGLES if (_supports_two_sided_stencil) { glDisable(GL_STENCIL_TEST_TWO_SIDE_EXT); } #endif // OPENGLES } } /** * */ void CLP(GraphicsStateGuardian):: do_issue_scissor() { const ScissorAttrib *target_scissor; _target_rs->get_attrib_def(target_scissor); if (!target_scissor->is_off()) { // A non-off ScissorAttrib means to override the scissor setting that was // specified by the DisplayRegion. if (!_scissor_enabled) { if (GLCAT.is_spam()) { GLCAT.spam() << "glEnable(GL_SCISSOR_TEST)\n"; } glEnable(GL_SCISSOR_TEST); _scissor_enabled = true; } const LVecBase4 &frame = target_scissor->get_frame(); int x = (int)(_viewport_x + _viewport_width * frame[0] + 0.5f); int y = (int)(_viewport_y + _viewport_height * frame[2] + 0.5f); int width = (int)(_viewport_width * (frame[1] - frame[0]) + 0.5f); int height = (int)(_viewport_height * (frame[3] - frame[2]) + 0.5f); if (GLCAT.is_spam()) { GLCAT.spam() << "glScissor(" << x << ", " << y << ", " << width << ", " << height << ")\n"; } glScissor(x, y, width, height); _scissor_attrib_active = true; } else if (_scissor_attrib_active) { _scissor_attrib_active = false; if (_scissor_array.size() > 0) { // Scissoring is enabled on the display region. Revert to the scissor // state specified in the DisplayRegion. #ifndef OPENGLES if (_supports_viewport_arrays) { _glScissorArrayv(0, _scissor_array.size(), _scissor_array[0].get_data()); } else #endif // OPENGLES { const LVecBase4i sr = _scissor_array[0]; glScissor(sr[0], sr[1], sr[2], sr[3]); } } else if (_scissor_enabled) { // The display region had no scissor enabled. Disable scissoring. if (GLCAT.is_spam()) { GLCAT.spam() << "glDisable(GL_SCISSOR_TEST)\n"; } glDisable(GL_SCISSOR_TEST); _scissor_enabled = false; } } }
32.613003
163
0.688088
[ "geometry", "render", "object", "vector", "model", "transform", "3d", "solid" ]
6156959a3057abc947b737515b6553e3d09de29e
10,293
cc
C++
hybridse/examples/toydb/src/tablet/tablet_server_impl.cc
xuman2019/OpenMLDB
38d533cca517c2e119f574d5dc7f93469504d68a
[ "Apache-2.0" ]
1
2021-11-01T10:16:37.000Z
2021-11-01T10:16:37.000Z
hybridse/examples/toydb/src/tablet/tablet_server_impl.cc
xuman2019/OpenMLDB
38d533cca517c2e119f574d5dc7f93469504d68a
[ "Apache-2.0" ]
null
null
null
hybridse/examples/toydb/src/tablet/tablet_server_impl.cc
xuman2019/OpenMLDB
38d533cca517c2e119f574d5dc7f93469504d68a
[ "Apache-2.0" ]
null
null
null
/* * Copyright 2021 4Paradigm * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "tablet/tablet_server_impl.h" #include <map> #include <memory> #include <string> #include <utility> #include <vector> #include "base/fe_strings.h" #include "brpc/controller.h" #include "butil/iobuf.h" #include "codec/fe_schema_codec.h" #include "gflags/gflags.h" DECLARE_string(dbms_endpoint); DECLARE_string(toydb_endpoint); DECLARE_int32(toydb_port); DECLARE_bool(enable_keep_alive); namespace hybridse { namespace tablet { TabletServerImpl::TabletServerImpl() : slock_(), engine_(), catalog_(), dbms_ch_(NULL) {} TabletServerImpl::~TabletServerImpl() { delete dbms_ch_; } bool TabletServerImpl::Init() { catalog_ = std::shared_ptr<TabletCatalog>(new TabletCatalog()); bool ok = catalog_->Init(); if (!ok) { LOG(WARNING) << "fail to init catalog "; return false; } engine_ = std::unique_ptr<vm::Engine>(new vm::Engine(catalog_)); if (FLAGS_enable_keep_alive) { dbms_ch_ = new ::brpc::Channel(); brpc::ChannelOptions options; int ret = dbms_ch_->Init(FLAGS_dbms_endpoint.c_str(), &options); if (ret != 0) { return false; } KeepAlive(); } LOG(INFO) << "init tablet ok"; return true; } void TabletServerImpl::KeepAlive() { dbms::DBMSServer_Stub stub(dbms_ch_); std::string endpoint = FLAGS_toydb_endpoint; dbms::KeepAliveRequest request; request.set_endpoint(endpoint); dbms::KeepAliveResponse response; brpc::Controller cntl; stub.KeepAlive(&cntl, &request, &response, NULL); } void TabletServerImpl::CreateTable(RpcController* ctrl, const CreateTableRequest* request, CreateTableResponse* response, Closure* done) { brpc::ClosureGuard done_guard(done); ::hybridse::common::Status* status = response->mutable_status(); if (request->pids_size() == 0) { status->set_code(common::kBadRequest); status->set_msg("create table without pid"); return; } if (request->tid() <= 0) { status->set_code(common::kBadRequest); status->set_msg("create table with invalid tid " + std::to_string(request->tid())); return; } for (int32_t i = 0; i < request->pids_size(); ++i) { std::shared_ptr<storage::Table> table(new storage::Table( request->tid(), request->pids(i), request->table())); bool ok = table->Init(); if (!ok) { LOG(WARNING) << "fail to init table storage for table " << request->table().name(); status->set_code(common::kBadRequest); status->set_msg("fail to init table storage"); return; } ok = AddTableLocked(table); if (!ok) { LOG(WARNING) << "table with name " << request->table().name() << " exists"; status->set_code(common::kTableExists); status->set_msg("table exist"); return; } // TODO(wangtaize) just one partition break; } status->set_code(common::kOk); DLOG(INFO) << "create table with name " << request->table().name() << " done"; } void TabletServerImpl::Insert(RpcController* ctrl, const InsertRequest* request, InsertResponse* response, Closure* done) { brpc::ClosureGuard done_guard(done); ::hybridse::common::Status* status = response->mutable_status(); if (request->db().empty() || request->table().empty()) { status->set_code(common::kBadRequest); status->set_msg("db or table name is empty"); return; } std::shared_ptr<TabletTableHandler> handler = GetTableLocked(request->db(), request->table()); if (!handler) { status->set_code(common::kTableNotFound); status->set_msg("table is not found"); return; } bool ok = handler->GetTable()->Put(request->row().c_str(), request->row().size()); if (!ok) { status->set_code(common::kTablePutFailed); status->set_msg("fail to put row"); LOG(WARNING) << "fail to put data to table " << request->table() << " with key " << request->key(); return; } status->set_code(common::kOk); } void TabletServerImpl::Query(RpcController* ctrl, const QueryRequest* request, QueryResponse* response, Closure* done) { brpc::ClosureGuard done_guard(done); common::Status* status = response->mutable_status(); status->set_code(common::kOk); status->set_msg("ok"); brpc::Controller* cntl = static_cast<brpc::Controller*>(ctrl); butil::IOBuf& buf = cntl->response_attachment(); if (request->is_batch()) { vm::BatchRunSession session; session.SetParameterSchema(request->parameter_schema()); { base::Status base_status; bool ok = engine_->Get(request->sql(), request->db(), session, base_status); if (!ok) { status->set_msg(base_status.str()); status->set_code(base_status.code); LOG(WARNING) << base_status.str(); return; } } if (request->is_debug()) { session.EnableDebug(); } codec::Row parameter(request->parameter_row()); std::vector<hybridse::codec::Row> outputs; int32_t ret = session.Run(parameter, outputs); if (0 != ret) { LOG(WARNING) << "fail to run sql " << request->sql(); status->set_code(common::kSqlError); status->set_msg("fail to run sql"); return; } uint32_t byte_size = 0; uint32_t count = 0; for(auto& row: outputs) { byte_size += row.size(); buf.append(reinterpret_cast<void*>(row.buf()), row.size()); count += 1; } response->set_schema(session.GetEncodedSchema()); response->set_byte_size(byte_size); response->set_count(count); status->set_code(common::kOk); } else { if (request->row().empty()) { status->set_code(common::kBadRequest); status->set_msg("input row is empty"); return; } vm::RequestRunSession session; { base::Status base_status; bool ok = engine_->Get(request->sql(), request->db(), session, base_status); if (!ok) { status->set_msg(base_status.str()); status->set_code(base_status.code); LOG(WARNING) << base_status.str(); return; } } if (request->is_debug()) { session.EnableDebug(); } codec::Row row(request->row()); codec::Row output; int32_t ret = session.Run(request->task_id(), row, &output); if (ret != 0) { LOG(WARNING) << "fail to run sql " << request->sql(); status->set_code(common::kSqlError); status->set_msg("fail to run sql"); return; } buf.append(reinterpret_cast<void*>(output.buf()), output.size()); response->set_schema(session.GetEncodedSchema()); response->set_byte_size(output.size()); response->set_count(1); status->set_code(common::kOk); } } void TabletServerImpl::Explain(RpcController* ctrl, const ExplainRequest* request, ExplainResponse* response, Closure* done) { brpc::ClosureGuard done_guard(done); common::Status* status = response->mutable_status(); vm::ExplainOutput output; base::Status base_status; bool ok = engine_->Explain(request->sql(), request->db(), vm::kRequestMode, request->parameter_schema(), &output, &base_status); if (!ok || base_status.code != 0) { status->set_msg(base_status.str()); status->set_code(base_status.code); LOG(WARNING) << base_status.str(); return; } ok = codec::SchemaCodec::Encode(output.input_schema, response->mutable_input_schema()); if (!ok) { status->set_msg("fail encode schema"); status->set_code(common::kSchemaCodecError); return; } ok = codec::SchemaCodec::Encode(output.output_schema, response->mutable_output_schema()); if (!ok) { status->set_msg("fail encode schema"); status->set_code(common::kSchemaCodecError); return; } response->set_ir(output.ir); response->set_logical_plan(output.logical_plan); response->set_physical_plan(output.physical_plan); status->set_code(common::kOk); } void TabletServerImpl::GetTableSchema(RpcController* ctrl, const GetTablesSchemaRequest* request, GetTableSchemaReponse* response, Closure* done) { brpc::ClosureGuard done_guard(done); ::hybridse::common::Status* status = response->mutable_status(); std::shared_ptr<TabletTableHandler> handler = GetTableLocked(request->db(), request->name()); if (!handler) { status->set_code(common::kTableNotFound); status->set_msg("table is not found"); return; } response->mutable_schema()->CopyFrom(handler->GetTable()->GetTableDef()); } } // namespace tablet } // namespace hybridse
35.615917
80
0.577383
[ "vector" ]
6160ca2fad3ad8bae94fed076697237d20db32d5
19,905
cpp
C++
src/execution_tree/primitives/less.cpp
rtohid/phylanx
c2e4e8e531c204a70b1907995b1fd467870e6d9d
[ "BSL-1.0" ]
null
null
null
src/execution_tree/primitives/less.cpp
rtohid/phylanx
c2e4e8e531c204a70b1907995b1fd467870e6d9d
[ "BSL-1.0" ]
null
null
null
src/execution_tree/primitives/less.cpp
rtohid/phylanx
c2e4e8e531c204a70b1907995b1fd467870e6d9d
[ "BSL-1.0" ]
null
null
null
// Copyright (c) 2017-2018 Hartmut Kaiser // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include <phylanx/config.hpp> #include <phylanx/execution_tree/primitives/less.hpp> #include <phylanx/ir/node_data.hpp> #include <hpx/include/lcos.hpp> #include <hpx/include/naming.hpp> #include <hpx/include/util.hpp> #include <hpx/throw_exception.hpp> #include <cstddef> #include <cstdint> #include <memory> #include <numeric> #include <string> #include <utility> #include <vector> /////////////////////////////////////////////////////////////////////////////// namespace phylanx { namespace execution_tree { namespace primitives { /////////////////////////////////////////////////////////////////////////// primitive create_less(hpx::id_type const& locality, std::vector<primitive_argument_type>&& operands, std::string const& name, std::string const& codename) { static std::string type("__lt"); return create_primitive_component( locality, type, std::move(operands), name, codename); } match_pattern_type const less::match_data = { hpx::util::make_tuple("__lt", std::vector<std::string>{"_1 < _2", "__lt(_1, _2)"}, &create_less, &create_primitive<less>) }; /////////////////////////////////////////////////////////////////////////// less::less(std::vector<primitive_argument_type>&& operands, std::string const& name, std::string const& codename) : primitive_component_base(std::move(operands), name, codename) {} /////////////////////////////////////////////////////////////////////////// template <typename T> primitive_argument_type less::less0d1d( ir::node_data<T>&& lhs, ir::node_data<T>&& rhs) const { // TODO: SIMD functionality should be added, blaze implementation // is not currently available if (rhs.is_ref()) { rhs = blaze::map(rhs.vector(), [&](double x) { return (x < lhs.scalar()); }); } else { rhs.vector() = blaze::map(rhs.vector(), [&](double x) { return (x < lhs.scalar()); }); } return primitive_argument_type( ir::node_data<std::uint8_t>{std::move(rhs)}); } template <typename T> primitive_argument_type less::less0d2d( ir::node_data<T>&& lhs, ir::node_data<T>&& rhs) const { // TODO: SIMD functionality should be added, blaze implementation // is not currently available if (rhs.is_ref()) { rhs = blaze::map(rhs.matrix(), [&](double x) { return (x < lhs.scalar()); }); } else { rhs.matrix() = blaze::map(rhs.matrix(), [&](double x) { return (x < lhs.scalar()); }); } return primitive_argument_type( ir::node_data<std::uint8_t>{std::move(rhs)}); } template <typename T> primitive_argument_type less::less0d( ir::node_data<T>&& lhs, ir::node_data<T>&& rhs) const { std::size_t rhs_dims = rhs.num_dimensions(); switch(rhs_dims) { case 0: return primitive_argument_type( ir::node_data<std::uint8_t>{lhs.scalar() < rhs.scalar()}); case 1: return less0d1d(std::move(lhs), std::move(rhs)); case 2: return less0d2d(std::move(lhs), std::move(rhs)); default: HPX_THROW_EXCEPTION(hpx::bad_parameter, "less::less0d", execution_tree::generate_error_message( "the operands have incompatible number of " "dimensions", name_, codename_)); } } template <typename T> primitive_argument_type less::less1d0d( ir::node_data<T>&& lhs, ir::node_data<T>&& rhs) const { // TODO: SIMD functionality should be added, blaze implementation // is not currently available if (lhs.is_ref()) { lhs = blaze::map(lhs.vector(), [&](double x) { return (x < rhs.scalar()); }); } else { lhs.vector() = blaze::map(lhs.vector(), [&](double x) { return (x < rhs.scalar()); }); } return primitive_argument_type( ir::node_data<std::uint8_t>{std::move(lhs)}); } template <typename T> primitive_argument_type less::less1d1d( ir::node_data<T>&& lhs, ir::node_data<T>&& rhs) const { std::size_t lhs_size = lhs.dimension(0); std::size_t rhs_size = rhs.dimension(0); if (lhs_size != rhs_size) { HPX_THROW_EXCEPTION(hpx::bad_parameter, "less::less1d1d", execution_tree::generate_error_message( "the dimensions of the operands do not match", name_, codename_)); } // TODO: SIMD functionality should be added, blaze implementation // is not currently available if (lhs.is_ref()) { lhs = blaze::map(lhs.vector(), rhs.vector(), [&](double x, double y) { return (x < y); }); } else { lhs.vector() = blaze::map(lhs.vector(), rhs.vector(), [&](double x, double y) { return (x < y); }); } return primitive_argument_type( ir::node_data<std::uint8_t>{std::move(lhs)}); } template <typename T> primitive_argument_type less::less1d2d( ir::node_data<T>&& lhs, ir::node_data<T>&& rhs) const { auto cv = lhs.vector(); auto cm = rhs.matrix(); if (cv.size() != cm.columns()) { HPX_THROW_EXCEPTION(hpx::bad_parameter, "less::less1d2d", execution_tree::generate_error_message( "the dimensions of the operands do not match", name_, codename_)); } // TODO: SIMD functionality should be added, blaze implementation // is not currently available if (rhs.is_ref()) { blaze::DynamicMatrix<double> m{cm.rows(), cm.columns()}; for (size_t i = 0UL; i != cm.rows(); i++) blaze::row(m, i) = blaze::map(blaze::row(cm, i), blaze::trans(cv), [](double x, double y) { return x < y; }); return primitive_argument_type( ir::node_data<std::uint8_t>{std::move(m)}); } for (size_t i = 0UL; i != cm.rows(); i++) blaze::row(cm, i) = blaze::map(blaze::row(cm, i), blaze::trans(cv), [](double x, double y) { return x < y; }); return primitive_argument_type( ir::node_data<std::uint8_t>{std::move(rhs)}); } template <typename T> primitive_argument_type less::less1d( ir::node_data<T>&& lhs, ir::node_data<T>&& rhs) const { std::size_t rhs_dims = rhs.num_dimensions(); switch(rhs_dims) { case 0: return less1d0d(std::move(lhs), std::move(rhs)); case 1: return less1d1d(std::move(lhs), std::move(rhs)); case 2: return less1d2d(std::move(lhs), std::move(rhs)); default: HPX_THROW_EXCEPTION(hpx::bad_parameter, "less::less1d", execution_tree::generate_error_message( "the operands have incompatible number of " "dimensions", name_, codename_)); } } template <typename T> primitive_argument_type less::less2d0d( ir::node_data<T>&& lhs, ir::node_data<T>&& rhs) const { std::size_t lhs_size = lhs.dimension(0); std::size_t rhs_size = rhs.dimension(0); // TODO: SIMD functionality should be added, blaze implementation // is not currently available if (lhs.is_ref()) { lhs = blaze::map(lhs.matrix(), [&](double x) { return (x < rhs.scalar()); }); } else { lhs.matrix() = blaze::map(lhs.matrix(), [&](double x) { return (x < rhs.scalar()); }); } return primitive_argument_type( ir::node_data<std::uint8_t>{std::move(lhs)}); } template <typename T> primitive_argument_type less::less2d1d( ir::node_data<T>&& lhs, ir::node_data<T>&& rhs) const { auto cv = rhs.vector(); auto cm = lhs.matrix(); if (cv.size() != cm.columns()) { HPX_THROW_EXCEPTION(hpx::bad_parameter, "less::less2d1d", execution_tree::generate_error_message( "the dimensions of the operands do not match", name_, codename_)); } // TODO: SIMD functionality should be added, blaze implementation // is not currently available if (lhs.is_ref()) { blaze::DynamicMatrix<double> m{cm.rows(), cm.columns()}; for (size_t i = 0UL; i != cm.rows(); i++) blaze::row(m, i) = blaze::map(blaze::row(cm, i), blaze::trans(cv), [](double x, double y) { return x < y; }); return primitive_argument_type( ir::node_data<std::uint8_t>{std::move(m)}); } for (size_t i = 0UL; i != cm.rows(); i++) blaze::row(cm, i) = blaze::map(blaze::row(cm, i), blaze::trans(cv), [](double x, double y) { return x < y; }); return primitive_argument_type( ir::node_data<std::uint8_t>{std::move(lhs)}); } template <typename T> primitive_argument_type less::less2d2d( ir::node_data<T>&& lhs, ir::node_data<T>&& rhs) const { auto lhs_size = lhs.dimensions(); auto rhs_size = rhs.dimensions(); if (lhs_size != rhs_size) { HPX_THROW_EXCEPTION(hpx::bad_parameter, "less::less2d2d", execution_tree::generate_error_message( "the dimensions of the operands do not match", name_, codename_)); } // TODO: SIMD functionality should be added, blaze implementation // is not currently available if (lhs.is_ref()) { lhs = blaze::map(lhs.matrix(), rhs.matrix(), [&](double x, double y) { return (x < y); }); } else { lhs.matrix() = blaze::map(lhs.matrix(), rhs.matrix(), [&](double x, double y) { return (x < y); }); } return primitive_argument_type( ir::node_data<std::uint8_t>{std::move(lhs)}); } template <typename T> primitive_argument_type less::less2d( ir::node_data<T>&& lhs, ir::node_data<T>&& rhs) const { std::size_t rhs_dims = rhs.num_dimensions(); switch(rhs_dims) { case 0: return less2d0d(std::move(lhs), std::move(rhs)); case 1: return less2d1d(std::move(lhs), std::move(rhs)); case 2: return less2d2d(std::move(lhs), std::move(rhs)); default: HPX_THROW_EXCEPTION(hpx::bad_parameter, "less::less2d", execution_tree::generate_error_message( "the operands have incompatible number of " "dimensions", name_, codename_)); } } template <typename T> primitive_argument_type less::less_all( ir::node_data<T>&& lhs, ir::node_data<T>&& rhs) const { std::size_t lhs_dims = lhs.num_dimensions(); switch (lhs_dims) { case 0: return less0d(std::move(lhs), std::move(rhs)); case 1: return less1d(std::move(lhs), std::move(rhs)); case 2: return less2d(std::move(lhs), std::move(rhs)); default: HPX_THROW_EXCEPTION(hpx::bad_parameter, "less::less_all", execution_tree::generate_error_message( "left hand side operand has unsupported number of " "dimensions", name_, codename_)); } } struct less::visit_less { template <typename T1, typename T2> primitive_argument_type operator()(T1, T2) const { HPX_THROW_EXCEPTION(hpx::bad_parameter, "less::eval", execution_tree::generate_error_message( "left hand side and right hand side are incompatible " "and can't be compared", less_.name_, less_.codename_)); } primitive_argument_type operator()( ir::node_data<primitive_argument_type>&&, ir::node_data<primitive_argument_type>&&) const { HPX_THROW_EXCEPTION(hpx::bad_parameter, "less::eval", execution_tree::generate_error_message( "left hand side and right hand side are incompatible " "and can't be compared", less_.name_, less_.codename_)); } primitive_argument_type operator()(std::vector<ast::expression>&&, std::vector<ast::expression>&&) const { HPX_THROW_EXCEPTION(hpx::bad_parameter, "less::eval", execution_tree::generate_error_message( "left hand side and right hand side are incompatible " "and can't be compared", less_.name_, less_.codename_)); } primitive_argument_type operator()( ast::expression&&, ast::expression&&) const { HPX_THROW_EXCEPTION(hpx::bad_parameter, "less::eval", execution_tree::generate_error_message( "left hand side and right hand side are incompatible " "and can't be compared", less_.name_, less_.codename_)); } primitive_argument_type operator()(primitive&&, primitive&&) const { HPX_THROW_EXCEPTION(hpx::bad_parameter, "less::eval", execution_tree::generate_error_message( "left hand side and right hand side are incompatible " "and can't be compared", less_.name_, less_.codename_)); } template <typename T> primitive_argument_type operator()(T && lhs, T && rhs) const { return primitive_argument_type( ir::node_data<std::uint8_t>{lhs < rhs}); } primitive_argument_type operator()( util::recursive_wrapper< std::vector<primitive_argument_type>>&&, util::recursive_wrapper< std::vector<primitive_argument_type>>&&) const { HPX_THROW_EXCEPTION(hpx::bad_parameter, "less::eval", execution_tree::generate_error_message( "left hand side and right hand side are incompatible " "and can't be compared", less_.name_, less_.codename_)); } primitive_argument_type operator()( ir::node_data<double>&& lhs, std::int64_t&& rhs) const { if (lhs.num_dimensions() != 0) { return less_.less_all( std::move(lhs), operand_type(std::move(rhs))); } return primitive_argument_type( ir::node_data<std::uint8_t>{lhs[0] < rhs}); } primitive_argument_type operator()( std::int64_t&& lhs, ir::node_data<double>&& rhs) const { if (rhs.num_dimensions() != 0) { return less_.less_all( operand_type(std::move(lhs)), std::move(rhs)); } return primitive_argument_type( ir::node_data<std::uint8_t>{lhs < rhs[0]}); } primitive_argument_type operator()( ir::node_data<std::uint8_t>&& lhs, std::int64_t&& rhs) const { if (lhs.num_dimensions() != 0) { return less_.less_all(std::move(lhs), ir::node_data<std::uint8_t>{rhs != 0}); } return primitive_argument_type( ir::node_data<std::uint8_t>{lhs[0] < rhs}); } primitive_argument_type operator()( std::int64_t&& lhs, ir::node_data<std::uint8_t>&& rhs) const { if (rhs.num_dimensions() != 0) { return less_.less_all( ir::node_data<std::uint8_t>{lhs != 0}, std::move(rhs)); } return primitive_argument_type( ir::node_data<std::uint8_t>{lhs < rhs[0]}); } primitive_argument_type operator()( operand_type&& lhs, operand_type&& rhs) const { return less_.less_all( std::move(lhs), std::move(rhs)); } primitive_argument_type operator()(ir::node_data<std::uint8_t>&& lhs, ir::node_data<std::uint8_t>&& rhs) const { HPX_THROW_EXCEPTION(hpx::bad_parameter, "less::eval", execution_tree::generate_error_message( "left hand side and right hand side can't be compared", less_.name_, less_.codename_)); } less const& less_; }; hpx::future<primitive_argument_type> less::eval( std::vector<primitive_argument_type> const& operands, std::vector<primitive_argument_type> const& args) const { if (operands.size() != 2) { HPX_THROW_EXCEPTION(hpx::bad_parameter, "less::eval", execution_tree::generate_error_message( "the less primitive requires exactly two " "operands", name_, codename_)); } if (!valid(operands[0]) || !valid(operands[1])) { HPX_THROW_EXCEPTION(hpx::bad_parameter, "less::eval", execution_tree::generate_error_message( "the less primitive requires that the " "arguments given by the operands array are valid", name_, codename_)); } auto this_ = this->shared_from_this(); return hpx::dataflow(hpx::launch::sync, hpx::util::unwrapping( [this_](primitive_argument_type&& op1, primitive_argument_type&& op2) -> primitive_argument_type { return primitive_argument_type( util::visit(visit_less{*this_}, std::move(op1.variant()), std::move(op2.variant()))); }), literal_operand(operands[0], args, name_, codename_), literal_operand(operands[1], args, name_, codename_)); } ////////////////////////////////////////////////////////////////////////// // Implement '<' for all possible combinations of lhs and rhs hpx::future<primitive_argument_type> less::eval( std::vector<primitive_argument_type> const& args) const { if (operands_.empty()) { return eval(args, noargs); } return eval(operands_, args); } }}}
34.142367
80
0.513188
[ "vector" ]
6164b0cb41bdaac26990819122a1a01fcf158c69
4,802
cpp
C++
lib/pxr/usd/usdAi/wrapAiShaderExport.cpp
chunkified/usd-arnold
e344f002a1073b37b837882417588e689dc73673
[ "Apache-2.0" ]
57
2017-07-29T10:28:06.000Z
2022-02-27T09:40:38.000Z
lib/pxr/usd/usdAi/wrapAiShaderExport.cpp
BigRoy/usd-arnold
8df3f080119016ed571e1dc0db68849a1cb81c25
[ "Apache-2.0" ]
35
2017-07-31T01:31:11.000Z
2020-04-24T00:50:31.000Z
lib/pxr/usd/usdAi/wrapAiShaderExport.cpp
BigRoy/usd-arnold
8df3f080119016ed571e1dc0db68849a1cb81c25
[ "Apache-2.0" ]
12
2017-07-29T06:46:00.000Z
2021-08-14T23:38:55.000Z
// // Copyright 2016 Pixar // // Licensed under the Apache License, Version 2.0 (the "Apache License") // with the following modification; you may not use this file except in // compliance with the Apache License and the following modification to it: // Section 6. Trademarks. is deleted and replaced with: // // 6. Trademarks. This License does not grant permission to use the trade // names, trademarks, service marks, or product names of the Licensor // and its affiliates, except as required to comply with Section 4(c) of // the License and to reproduce the content of the NOTICE file. // // You may obtain a copy of the Apache License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the Apache License with the above modification is // distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the Apache License for the specific // language governing permissions and limitations under the Apache License. // #include "pxr/usd/usdAi/aiShaderExport.h" #include "pxr/usd/sdf/primSpec.h" #include "pxr/usd/usd/pyConversions.h" #include "pxr/base/tf/pyContainerConversions.h" #include "pxr/base/tf/pyResultConversions.h" #include "pxr/base/tf/pyUtils.h" #include "pxr/base/tf/wrapTypeHelpers.h" #include <boost/python/class.hpp> #include <boost/python/import.hpp> #include <string> using std::string; using namespace boost::python; PXR_NAMESPACE_USING_DIRECTIVE namespace { // TODO: register a converter for AtNode* type? AtNode* to_arnold_node(const object& ctypes_node) { static object ctypes_addressof = import("ctypes").attr("addressof"); if (ctypes_node.is_none()) { return nullptr; } else { return *reinterpret_cast<AtNode**>(uintptr_t(extract<uintptr_t>(ctypes_addressof(ctypes_node)))); } } static SdfPath export_material(AiShaderExport &self, const char* material_name, const object& surf_shader, const object& disp_shader) { return self.export_material(material_name, to_arnold_node(surf_shader), to_arnold_node(disp_shader)); } static SdfPath export_arnold_node(AiShaderExport &self, const object& arnold_node, SdfPath& parent_path, const std::set<std::string>& exportable_params) { return self.export_arnold_node(to_arnold_node(arnold_node), parent_path, &exportable_params); } // static UsdShadeOutput // get_output(AiShaderExport &self, const object& src_arnold_node, UsdShadeShader& src_shader, bool is_node_type=false, int32_t comp_index=-1) // { // UsdShadeOutput out; // self.get_output(to_arnold_node(src_arnold_node), src_shader, out, is_node_type, comp_index); // return out; // } static bool export_connection(AiShaderExport &self, const object& dest_arnold_node, UsdAiShader& dest_shader, const char* dest_param_name, const object& src_arnold_node, UsdAiShader& src_shader, int32_t src_comp_index=-1) { return self.export_connection( to_arnold_node(dest_arnold_node), dest_shader, dest_param_name, to_arnold_node(src_arnold_node), src_shader, src_comp_index); } } // anonymous namespace void wrapUsdAiShaderExport() { typedef AiShaderExport This; class_<This> cls("AiShaderExport", no_init); cls .def(init<const UsdStagePtr &, const SdfPath &, const UsdTimeCode &>( (arg("_stage"), arg("parent_scope") = SdfPath(), arg("_time_code") = UsdTimeCode::Default()))) // .def("__init__", __init__, // (arg("_stage"), // arg("_time_code") = UsdTimeCode::Default(), // arg("parent_scope") = string())) .def("bind_material", &This::bind_material, (arg("shader_path"), arg("shape_path"))) .def("export_material", &export_material, (arg("material_name"), arg("surf_shader"), arg("disp_shader"))) // TODO: add an overload of export_arnold_node w/out exportable_params .def("export_arnold_node", &export_arnold_node, (arg("material_name"), arg("arnold_node"), arg("parent_path"), arg("exportable_params"))) // .def("get_output", &get_output, // (arg("src_arnold_node"), // arg("src_shader"), // arg("is_node_type")=false, // arg("comp_index")=-1)) .def("export_connection", &export_connection, (arg("dest_arnold_node"), arg("dest_shader"), arg("dest_param_name"), arg("src_arnold_node"), arg("src_shader"), arg("src_comp_index") = -1)) ; }
35.308824
142
0.66868
[ "object" ]
616df244ddef7205c7de7d55a13d7492e4148bd2
11,979
cxx
C++
Modules/Visualization/MonteverdiGui/src/mvdLayerStackWidget.cxx
heralex/OTB
c52b504b64dc89c8fe9cac8af39b8067ca2c3a57
[ "Apache-2.0" ]
317
2015-01-19T08:40:58.000Z
2022-03-17T11:55:48.000Z
Modules/Visualization/MonteverdiGui/src/mvdLayerStackWidget.cxx
guandd/OTB
707ce4c6bb4c7186e3b102b2b00493a5050872cb
[ "Apache-2.0" ]
18
2015-07-29T14:13:45.000Z
2021-03-29T12:36:24.000Z
Modules/Visualization/MonteverdiGui/src/mvdLayerStackWidget.cxx
guandd/OTB
707ce4c6bb4c7186e3b102b2b00493a5050872cb
[ "Apache-2.0" ]
132
2015-02-21T23:57:25.000Z
2022-03-25T16:03:16.000Z
/* * Copyright (C) 2005-2020 Centre National d'Etudes Spatiales (CNES) * * This file is part of Orfeo Toolbox * * https://www.orfeo-toolbox.org/ * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mvdLayerStackWidget.h" #include "ui_mvdLayerStackWidget.h" /*****************************************************************************/ /* INCLUDE SECTION */ // // Qt includes (sorted by alphabetic order) //// Must be included before system/custom includes. // // System includes (sorted by alphabetic order) #include <cassert> // // ITK includes (sorted by alphabetic order) // // OTB includes (sorted by alphabetic order) // // Monteverdi includes (sorted by alphabetic order) #include "mvdGui.h" #include "mvdLayerStackItemModel.h" namespace mvd { /* TRANSLATOR mvd::LayerStackWidget Necessary for lupdate to be aware of C++ namespaces. Context comment for translator. */ /*****************************************************************************/ /* CONSTANTS */ /*****************************************************************************/ /* STATIC IMPLEMENTATION SECTION */ /*****************************************************************************/ /* CLASS IMPLEMENTATION SECTION */ /*******************************************************************************/ LayerStackWidget::LayerStackWidget(QWidget* p, Qt::WindowFlags flags) : QWidget(p, flags), m_UI(new mvd::Ui::LayerStackWidget()) { m_UI->setupUi(this); m_UI->reloadButton->setVisible(false); m_UI->treeView->setDragEnabled(true); { QItemSelectionModel* ism = m_UI->treeView->selectionModel(); m_UI->treeView->setModel(new LayerStackItemModel(m_UI->treeView)); delete ism; ism = NULL; } InstallEventFilter(this); // Width of the columns in the layer stack: // Header section sizes are user adjustable but are not saved after a restart // So we set it to a guess value here // The unit is pixel, Qt's default is 100 m_UI->treeView->header()->resizeSection(LayerStackItemModel::COLUMN_PROJ, 75); m_UI->treeView->header()->resizeSection(LayerStackItemModel::COLUMN_RESOLUTION, 40); m_UI->treeView->header()->resizeSection(LayerStackItemModel::COLUMN_NAME, 200); m_UI->treeView->header()->resizeSection(LayerStackItemModel::COLUMN_EFFECT, 90); m_UI->treeView->header()->resizeSection(LayerStackItemModel::COLUMN_I, 60); m_UI->treeView->header()->resizeSection(LayerStackItemModel::COLUMN_J, 60); m_UI->treeView->header()->resizeSection(LayerStackItemModel::COLUMN_R, 90); m_UI->treeView->header()->resizeSection(LayerStackItemModel::COLUMN_G, 90); m_UI->treeView->header()->resizeSection(LayerStackItemModel::COLUMN_B, 90); m_UI->treeView->header()->resizeSection(LayerStackItemModel::COLUMN_X, 90); m_UI->treeView->header()->resizeSection(LayerStackItemModel::COLUMN_Y, 90); QObject::connect(m_UI->treeView->selectionModel(), SIGNAL(currentRowChanged(const QModelIndex&, const QModelIndex&)), // to: this, SLOT(OnCurrentRowChanged(const QModelIndex&, const QModelIndex&))); QObject::connect(m_UI->treeView->selectionModel(), SIGNAL(selectionChanged(const QItemSelection&, const QItemSelection&)), // to: this, SLOT(OnSelectionChanged(const QItemSelection&, const QItemSelection&))); QObject::connect(m_UI->topButton, SIGNAL(clicked()), // to: this, SIGNAL(TopButtonClicked())); QObject::connect(m_UI->bottomButton, SIGNAL(clicked()), // to: this, SIGNAL(BottomButtonClicked())); QObject::connect(m_UI->upButton, SIGNAL(clicked()), // to: this, SIGNAL(UpButtonClicked())); QObject::connect(m_UI->downButton, SIGNAL(clicked()), // to: this, SIGNAL(DownButtonClicked())); QObject::connect(m_UI->deleteButton, SIGNAL(clicked()), // to: this, SIGNAL(DeleteLayerRequested())); QObject::connect(m_UI->deleteAllButton, SIGNAL(clicked()), // to: this, SIGNAL(DeleteAllLayersRequested())); QObject::connect(m_UI->projectionButton, SIGNAL(clicked()), // to: this, SIGNAL(ProjectionButtonClicked())); QObject::connect(m_UI->applyButton, SIGNAL(clicked()), // to: this, SIGNAL(ApplyButtonClicked())); QObject::connect(m_UI->resetEffectsButton, SIGNAL(clicked()), // to: this, SIGNAL(ResetEffectsButtonClicked())); /////// Bugfix for layer deletion QObject::connect(this->GetItemModel(), SIGNAL(LayerDeletingModel(unsigned int)), // to: this, SIGNAL(LayerDeletingWidget(unsigned int))); /////////////// } /*******************************************************************************/ LayerStackWidget::~LayerStackWidget() { delete m_UI; m_UI = NULL; } /*******************************************************************************/ bool LayerStackWidget::eventFilter(QObject* object, QEvent* e) { assert(object == m_UI->treeView); assert(e != NULL); if (object != m_UI->treeView) return false; switch (e->type()) { // // KEY RELEASE case QEvent::KeyRelease: { QKeyEvent* keyEvent = dynamic_cast<QKeyEvent*>(e); assert(keyEvent != NULL); switch (keyEvent->key()) { case Qt::Key_C: if (keyEvent->modifiers() == Qt::ControlModifier && m_UI->treeView->currentIndex().isValid()) { Q_EMIT CopyLayerRequested(LayerStackItemModel::GetLayer(m_UI->treeView->currentIndex())); return true; } break; // case Qt::Key_Delete: if (keyEvent->modifiers() == Qt::NoModifier) { Q_EMIT DeleteLayerRequested(); return true; } else if (keyEvent->modifiers() == Qt::ShiftModifier) { Q_EMIT DeleteAllLayersRequested(); return true; } break; } } break; // // MOUSE-WHEEL case QEvent::Wheel: { QWheelEvent* wEvent = dynamic_cast<QWheelEvent*>(e); assert(wEvent != NULL); if (wEvent->modifiers() == Qt::ControlModifier) { Q_EMIT RotateLayersRequested(wEvent->delta() / (MOUSE_WHEEL_STEP_FACTOR * MOUSE_WHEEL_STEP_DEGREES)); return true; } } break; // // other. default: break; } return false; } /*******************************************************************************/ const LayerStackItemModel* LayerStackWidget::GetItemModel() const { return const_cast<LayerStackWidget*>(this)->GetItemModel(); } /*******************************************************************************/ LayerStackItemModel* LayerStackWidget::GetItemModel() { assert(m_UI->treeView->model() == qobject_cast<LayerStackItemModel*>(m_UI->treeView->model())); return qobject_cast<LayerStackItemModel*>(m_UI->treeView->model()); } /*******************************************************************************/ void LayerStackWidget::InstallEventFilter(QObject* filter) { assert(m_UI != NULL); assert(m_UI->treeView != NULL); m_UI->treeView->installEventFilter(filter); } /*******************************************************************************/ // void // LayerStackWidget // ::SetModel( LayerStackItemModel * itemModel ) // { // // See http://qt-project.org/doc/qt-4.8/qabstractitemview.html#setModel . // QItemSelectionModel * itemSelectionModel = m_UI->treeView->selectionModel(); // m_UI->treeView->setModel( itemModel ); // itemModel->setParent( m_UI->treeView ); // delete itemSelectionModel; // itemSelectionModel = NULL; // } /*******************************************************************************/ void LayerStackWidget::SetApplyEnabled(bool enabled) { assert(m_UI->applyButton != NULL); m_UI->applyButton->setEnabled(enabled); } /*******************************************************************************/ void LayerStackWidget::SetCurrent(int row) { assert(m_UI->treeView->selectionModel() != NULL); // if( m_UI->treeView->selectionModel()->currentIndex().row()==row ) // return; if (row < 0) m_UI->treeView->selectionModel()->clearSelection(); else m_UI->treeView->selectionModel()->select(m_UI->treeView->model()->index(row, 1), QItemSelectionModel::ClearAndSelect | QItemSelectionModel::Rows); /* m_UI->treeView->selectionModel()->setCurrentIndex( m_UI->treeView->model()->index( row, 1 ), QItemSelectionModel::ClearAndSelect | // QItemSelectionModel::Current | QItemSelectionModel::Rows ); */ } /*******************************************************************************/ void LayerStackWidget::SetDeleteEnabled(bool enabled) { assert(m_UI != NULL); assert(m_UI->deleteButton != NULL); m_UI->deleteButton->setEnabled(enabled); assert(m_UI->deleteAllButton != NULL); m_UI->deleteAllButton->setEnabled(enabled); } /*******************************************************************************/ void LayerStackWidget::SetMoveEnabled(bool enabled) { assert(m_UI != NULL); assert(m_UI->upButton != NULL); assert(m_UI->downButton != NULL); assert(m_UI->topButton != NULL); assert(m_UI->bottomButton != NULL); m_UI->upButton->setEnabled(enabled); m_UI->downButton->setEnabled(enabled); m_UI->topButton->setEnabled(enabled); m_UI->bottomButton->setEnabled(enabled); } /*******************************************************************************/ void LayerStackWidget::SetProjectionEnabled(bool enabled) { assert(m_UI != NULL); assert(m_UI->projectionButton != NULL); m_UI->projectionButton->setEnabled(enabled); } /*******************************************************************************/ void LayerStackWidget::SetReloadEnabled(bool enabled) { assert(m_UI != NULL); assert(m_UI->reloadButton != NULL); m_UI->reloadButton->setEnabled(enabled); } /*******************************************************************************/ void LayerStackWidget::SetResetEffectsEnabled(bool enabled) { assert(m_UI != NULL); assert(m_UI->reloadButton != NULL); m_UI->resetEffectsButton->setEnabled(enabled); } /*******************************************************************************/ /* SLOTS */ /*******************************************************************************/ void LayerStackWidget::OnCurrentRowChanged(const QModelIndex& current, const QModelIndex&) { // qDebug() // << this // << "::OnCurrentRowChange(" << current.row() << "," << previous.row() << ")"; Q_EMIT CurrentChanged(current.row()); } /*******************************************************************************/ void LayerStackWidget::OnSelectionChanged(const QItemSelection& selected, const QItemSelection&) { // qDebug() // << this // << "::OnSelectionChanged(" << selected << "," << deselected << ")"; QModelIndexList indexes(selected.indexes()); // assert( indexes.empty() || indexes.size()==1 ); Q_EMIT SelectionChanged(indexes.empty() ? -1 : indexes.front().row()); } } // end namespace 'mvd'
30.558673
150
0.556223
[ "object", "model" ]