hexsha stringlengths 40 40 | size int64 22 2.4M | ext stringclasses 5
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 260 | max_stars_repo_name stringlengths 5 109 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 9 | max_stars_count float64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 260 | max_issues_repo_name stringlengths 5 109 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 9 | max_issues_count float64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 260 | max_forks_repo_name stringlengths 5 109 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 9 | max_forks_count float64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 22 2.4M | avg_line_length float64 5 169k | max_line_length int64 5 786k | alphanum_fraction float64 0.06 0.95 | matches listlengths 1 11 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ea76f9f3b60f8b70aa1dfcdc8c866c54470a3b54 | 1,657 | h | C | example/MultiThreadParallelMachine/include/MGPExecutor.h | jxt1234/Genetic-Program-Frame | c0a801e337a31de05f49047fd11920a3c2e32ed6 | [
"Apache-2.0"
] | 3 | 2016-01-04T09:23:31.000Z | 2019-08-06T11:52:07.000Z | example/MultiThreadParallelMachine/include/MGPExecutor.h | jxt1234/Renascence | c0a801e337a31de05f49047fd11920a3c2e32ed6 | [
"Apache-2.0"
] | null | null | null | example/MultiThreadParallelMachine/include/MGPExecutor.h | jxt1234/Renascence | c0a801e337a31de05f49047fd11920a3c2e32ed6 | [
"Apache-2.0"
] | 6 | 2016-05-10T16:05:12.000Z | 2019-12-30T09:14:21.000Z | /******************************************************************
Copyright 2016, Jiang Xiao-tang
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************/
#ifndef MGPEXECUTOR_H
#define MGPEXECUTOR_H
#include "lowlevelAPI/IParallelMachine.h"
#include <queue>
#include "MGPThreadPool.h"
#include "backend/GPKeyIteratorFactory.h"
class MGPExecutor:public IParallelMachine::Executor
{
public:
MGPExecutor(const GPParallelType* data, int threadNum, IParallelMachine::PARALLELTYPE type);
virtual ~MGPExecutor();
virtual bool vRun(GPPieces* output, GPPieces** inputs, int inputNumber) const;
class ThreadData;
private:
bool _mapRun(GPPieces* output, GPPieces** inputs, int inputNumber) const;
bool _reduceRun(GPPieces* output, GPPieces** inputs, int inputNumber) const;
GPPtr<IKeyFunction> mCondition;
MGPThreadPool* mPool;
ThreadData* mMainData;
std::vector<ThreadData*> mUserData;
IParallelMachine::PARALLELTYPE mType;
GPParallelType::KEYS mOutputKey;
GPParallelType::KEYS mVariableKey;
GPPtr<GPKeyIteratorFactory> mFactory;
};
#endif
| 38.534884 | 96 | 0.697043 | [
"vector"
] |
ea7f5709fb616593e8a698e1e67ce1639584b011 | 10,058 | h | C | src/Math.h | AndresTraks/BulletSharp | c277666667f91c58191f4cfa97f117053de679ef | [
"MIT"
] | 245 | 2015-01-02T14:11:26.000Z | 2022-03-18T08:56:36.000Z | src/Math.h | AndresTraks/BulletSharp | c277666667f91c58191f4cfa97f117053de679ef | [
"MIT"
] | 50 | 2015-01-04T22:32:21.000Z | 2021-06-08T20:26:24.000Z | src/Math.h | AndresTraks/BulletSharp | c277666667f91c58191f4cfa97f117053de679ef | [
"MIT"
] | 69 | 2015-04-03T15:38:44.000Z | 2022-01-20T14:27:30.000Z | #pragma once
#if GRAPHICS_MOGRE
#define Matrix Matrix4^
using namespace Mogre;
#elif GRAPHICS_NUMERICS
#define Matrix Matrix4x4
#elif GRAPHICS_OPENTK
#ifdef BT_USE_DOUBLE_PRECISION
#define Vector3 Vector3d
#define Vector4 Vector4d
#define Matrix Matrix4d
#else
#define Matrix Matrix4
#endif
#elif GRAPHICS_SHARPDX
#define Matrix SharpDX::Matrix
#define Quaternion SharpDX::Quaternion
#define Vector3 SharpDX::Vector3
#define Vector4 SharpDX::Vector4
#elif GRAPHICS_GENERIC
#include "Matrix.h"
#include "Quaternion.h"
#include "Vector3.h"
#include "Vector4.h"
#endif
// Still have no idea what this condition is supposed to be, definitions seem inconsistent.
// Even if BT_USE_SSE is defined and BT_USE_SSE_IN_API is undefined, the localInertia parameter
// of the RigidBodyConstructionInfo constructor fails since it's not aligned on the stack.
// For now, always align memory for SSE operations.
//#if defined(BT_USE_SIMD_VECTOR3) && defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)
#if 1
#define BTSHARP_USE_SSE_ALIGNMENT
#endif
#if (defined(BT_USE_DOUBLE_PRECISION) && !defined(GRAPHICS_GENERIC) && !defined(GRAPHICS_OPENTK)) || \
(defined(BTSHARP_USE_SSE_ALIGNMENT) )
#define GRAPHICS_NO_DIRECT_CAST
#endif
// Macros for passing Vector3 parameters.
// VECTOR3_CONV and VECTOR3_PTR convert Vector3 to btVector3, but allow pinned pointers to be passed
// if the structure of Vector3 and btVector3 is compatible.
// VECTOR3_DEL cleans up the btVector3 if pinning is not possible.
// VECTOR3_USE is used to dereference the btVector3* pointer.
// FIXME: is it safe to cast Vector3 to btVector3 given that btVector3 has padding?
#define VECTOR3_NAME(vec) vec ## Temp
#define VECTOR4_NAME(vec) vec ## Temp
#ifdef GRAPHICS_NO_DIRECT_CAST
#define VECTOR3_CONV(vec) btVector3* VECTOR3_NAME(vec) = Math::Vector3ToBtVector3(vec)
#define VECTOR3_PTR(vec) VECTOR3_NAME(vec)
#define VECTOR3_DEL(vec) ALIGNED_FREE(VECTOR3_PTR(vec))
#define VECTOR4_CONV(vec) btVector4* VECTOR4_NAME(vec) = Math::Vector4ToBtVector4(vec)
#define VECTOR4_PTR(vec) VECTOR4_NAME(vec)
#define VECTOR4_DEL(vec) ALIGNED_FREE(VECTOR4_PTR(vec))
#else
#define VECTOR3_PTR(vec) ((btVector3*) VECTOR3_NAME(vec))
#define VECTOR4_PTR(vec) ((btVector4*) VECTOR3_NAME(vec))
#ifdef BTSHARP_USE_SSE_ALIGNMENT
#define VECTOR3_CONV(vec) btVector3* VECTOR3_NAME(vec) = Math::Vector3ToBtVector3(vec)
#define VECTOR3_DEL(vec) ALIGNED_FREE(VECTOR3_PTR(vec))
#define VECTOR4_CONV(vec) btVector4* VECTOR4_NAME(vec) = Math::Vector4ToBtVector4(vec)
#define VECTOR4_DEL(vec) ALIGNED_FREE(VECTOR4_PTR(vec))
#else
#define VECTOR3_CONV(vec) pin_ptr<Vector3> VECTOR3_NAME(vec) = &vec
#define VECTOR3_DEL(vec)
#define VECTOR4_CONV(vec) pin_ptr<Vector4> VECTOR4_NAME(vec) = &vec
#define VECTOR4_DEL(vec)
#endif
#endif
#define VECTOR3_USE(vec) *VECTOR3_PTR(vec)
#define VECTOR4_USE(vec) *VECTOR4_PTR(vec)
#define TRANSFORM_NAME(t) t ## Temp
#define TRANSFORM_DEF(t) btTransform* TRANSFORM_NAME(t)
#define TRANSFORM_CONV(t) TRANSFORM_DEF(t) = Math::MatrixToBtTransform(t)
#define TRANSFORM_PTR(t) TRANSFORM_NAME(t)
#define TRANSFORM_USE(t) *TRANSFORM_PTR(t)
#define TRANSFORM_DEL(t) ALIGNED_FREE(TRANSFORM_PTR(t))
#define MATRIX3X3_NAME(t) t ## Temp
#define MATRIX3X3_DEF(t) btMatrix3x3* MATRIX3X3_NAME(t)
#define MATRIX3X3_CONV(t) MATRIX3X3_DEF(t) = Math::MatrixToBtMatrix3x3(t)
#define MATRIX3X3_PTR(t) MATRIX3X3_NAME(t)
#define MATRIX3X3_USE(t) *MATRIX3X3_PTR(t)
#define MATRIX3X3_DEL(t) ALIGNED_FREE(MATRIX3X3_PTR(t))
#define QUATERNION_NAME(t) t ## Temp
#define QUATERNION_DEF(t) btQuaternion* QUATERNION_NAME(t)
#define QUATERNION_CONV(t) QUATERNION_DEF(t) = Math::QuaternionToBtQuat(t)
#define QUATERNION_PTR(t) QUATERNION_NAME(t)
#define QUATERNION_USE(t) *QUATERNION_PTR(t)
#define QUATERNION_DEL(t) ALIGNED_FREE(QUATERNION_PTR(t))
#ifdef GRAPHICS_MOGRE
#define Vector_X(v) btScalar((v).x)
#define Vector_Y(v) btScalar((v).y)
#define Vector_Z(v) btScalar((v).z)
#define Vector_W(v) btScalar((v).w)
#define Vector_SetX(v, s) (v).x = s
#define Vector_SetY(v, s) (v).y = s
#define Vector_SetZ(v, s) (v).z = s
#define Vector_SetW(v, s) (v).w = s
#else
#define Vector_X(v) btScalar((v).X)
#define Vector_Y(v) btScalar((v).Y)
#define Vector_Z(v) btScalar((v).Z)
#define Vector_W(v) btScalar((v).W)
#define Vector_SetX(v, s) (v).X = s
#define Vector_SetY(v, s) (v).Y = s
#define Vector_SetZ(v, s) (v).Z = s
#define Vector_SetW(v, s) (v).W = s
#endif
#if defined(GRAPHICS_OPENTK) && defined(BT_USE_DOUBLE_PRECISION)
#define GRAPHICS_SCALAR double
#else
#define GRAPHICS_SCALAR float
#endif
namespace BulletSharp
{
private ref class Math
{
public:
static int* IntArrayToUnmanaged(array<int>^);
static int* IntArrayToUnmanaged(array<int>^, int);
static btScalar* BtScalarArrayToUnmanaged(array<btScalar>^);
static btScalar* BtScalarArrayToUnmanaged(array<btScalar>^, int);
static inline Vector3 BtVector3ToVector3(const btVector3* vector)
{
return Vector3(vector->m_floats[0], vector->m_floats[1], vector->m_floats[2]);
}
static inline void BtVector3ToVector3(const btVector3* vector, [Out] Vector3% vectorOut)
{
Vector_SetX(vectorOut, vector->m_floats[0]);
Vector_SetY(vectorOut, vector->m_floats[1]);
Vector_SetZ(vectorOut, vector->m_floats[2]);
}
static btVector3* Vector3ToBtVector3(Vector3%);
static void Vector3ToBtVector3(Vector3%, btVector3*);
static btVector3* Vector3ArrayToUnmanaged(array<Vector3>^);
static array<Vector3>^ Vector3ArrayToManaged(btVector3*, int);
static inline Vector4 BtVector4ToVector4(const btVector4* vector)
{
return Vector4(vector->m_floats[0], vector->m_floats[1], vector->m_floats[2], vector->m_floats[3]);
}
static btVector4* Vector4ToBtVector4(Vector4);
static void Vector4ToBtVector4(Vector4, btVector4*);
static Quaternion BtQuatToQuaternion(const btQuaternion*);
static btQuaternion* QuaternionToBtQuat(Quaternion);
static void QuaternionToBtQuat(Quaternion, btQuaternion*);
static Matrix BtTransformToMatrix(const btTransform*);
static void BtTransformToMatrix(const btTransform*, [Out] Matrix%);
static btTransform* MatrixToBtTransform(Matrix);
static void MatrixToBtTransform(Matrix, btTransform*);
static Matrix BtMatrix3x3ToMatrix(const btMatrix3x3*);
static btMatrix3x3* MatrixToBtMatrix3x3(Matrix);
static void MatrixToBtMatrix3x3(Matrix, btMatrix3x3*);
};
};
#define BtVector3ToVector3Fast(v, out) \
Vector_SetX(out, (v)->m_floats[0]); \
Vector_SetY(out, (v)->m_floats[1]); \
Vector_SetZ(out, (v)->m_floats[2]);
#define BtVector3ToVector3FastRet(v) Vector3((v)->m_floats[0], (v)->m_floats[1], (v)->m_floats[2])
#if defined(GRAPHICS_MOGRE)
#define BtTransformToMatrixFast(transform, out) out = gcnew Mogre::Matrix4(); \
btScalar* m = (btScalar*)&transform; \
out->m00 = m[0]; \
out->m01 = m[1]; \
out->m02 = m[2]; \
out->m30 = 0; \
out->m10 = m[4]; \
out->m11 = m[5]; \
out->m12 = m[6]; \
out->m31 = 0; \
out->m20 = m[8]; \
out->m21 = m[9]; \
out->m22 = m[10]; \
out->m32 = 0; \
out->m03 = m[12]; \
out->m13 = m[13]; \
out->m23 = m[14]; \
out->m33 = 1;
#else
#ifdef GRAPHICS_NO_DIRECT_CAST
#define BtTransformToMatrixFast(transform, out) \
btScalar* m = (btScalar*)&transform; \
out.M11 = (GRAPHICS_SCALAR)m[0]; \
out.M12 = (GRAPHICS_SCALAR)m[4]; \
out.M13 = (GRAPHICS_SCALAR)m[8]; \
out.M21 = (GRAPHICS_SCALAR)m[1]; \
out.M22 = (GRAPHICS_SCALAR)m[5]; \
out.M23 = (GRAPHICS_SCALAR)m[9]; \
out.M31 = (GRAPHICS_SCALAR)m[2]; \
out.M32 = (GRAPHICS_SCALAR)m[6]; \
out.M33 = (GRAPHICS_SCALAR)m[10]; \
out.M41 = (GRAPHICS_SCALAR)m[12]; \
out.M42 = (GRAPHICS_SCALAR)m[13]; \
out.M43 = (GRAPHICS_SCALAR)m[14]; \
out.M44 = 1;
#else
#define BtTransformToMatrixFast(transform, out) pin_ptr<Matrix> ptr = &out; \
transform.getOpenGLMatrix((btScalar*)ptr);
#endif
#endif
#define Vector3_IsFuzzyZero(v) btFuzzyZero(Vector_X(v)) && btFuzzyZero(Vector_Y(v)) && btFuzzyZero(Vector_Z(v))
#if defined(GRAPHICS_MONOGAME) || defined(GRAPHICS_NUMERICS) || defined(GRAPHICS_SHARPDX) || defined(GRAPHICS_SLIMDX)
#define Vector3_Length(v) (v).Length()
#else
#define Vector3_Length(v) (v).Length
#endif
#if defined(GRAPHICS_MOGRE)
#define Vector3_Cross(left, right, result) result = (left).CrossProduct(right)
#define Vector3_Dot(left, right) (left).DotProduct(right)
#define Vector3_LengthSquared(v) (v).SquaredLength
#define Vector3_Normalize(v) (v).Normalise()
#define Vector3_Zero Vector3::ZERO
#define Matrix_Identity Matrix4::IDENTITY
#define Matrix_Origin(m) (m)->GetTrans()
#elif defined(GRAPHICS_NUMERICS)
#define Vector3_Cross(left, right, result) result = Vector3::Cross(left, right)
#define Vector3_Dot(left, right) Vector3::Dot(left, right)
#define Vector3_LengthSquared(v) (v).LengthSquared()
#define Vector3_Normalize(v) Vector3::Normalize(v)
#define Vector3_Zero Vector3::Zero
#define Matrix_Identity Matrix4x4::Identity
#define Matrix_Origin(m) (m).Translation
#else
#define Vector3_Cross(left, right, result) Vector3::Cross(left, right, result)
#define Vector3_Dot(left, right) Vector3::Dot(left, right)
#define Vector3_Normalize(v) (v).Normalize()
#define Vector3_Zero Vector3::Zero
#define Matrix_Identity Matrix::Identity
#ifdef GRAPHICS_OPENTK
#define Vector3_LengthSquared(v) (v).LengthSquared
#define Matrix_Origin(m) (m).ExtractTranslation()
#elif defined(GRAPHICS_GENERIC) || defined(GRAPHICS_OPENTK)
#define Vector3_LengthSquared(v) (v).LengthSquared
#define Matrix_Origin(m) (m).Origin
#elif defined(GRAPHICS_SHARPDX)
#define Vector3_LengthSquared(v) (v).LengthSquared()
#define Matrix_Origin(m) (m).TranslationVector
#elif defined(GRAPHICS_MONOGAME)
#define Vector3_LengthSquared(v) (v).LengthSquared()
#define Matrix_Origin(m) (m).Translation
#else
#define Vector3_LengthSquared(v) (v).LengthSquared
#define Matrix_Origin(m) Vector3((m).M41, (m).M42, (m).M43)
#endif
#endif
| 37.81203 | 118 | 0.738914 | [
"vector",
"transform"
] |
ea865893bb5a7396e8cf91a8f3f54ec0cd9aeabe | 7,616 | h | C | Sources/Elastos/Packages/Apps/Dialer/inc/elastos/droid/incallui/CallerInfoAsyncQuery.h | jingcao80/Elastos | d0f39852356bdaf3a1234743b86364493a0441bc | [
"Apache-2.0"
] | 7 | 2017-07-13T10:34:54.000Z | 2021-04-16T05:40:35.000Z | Sources/Elastos/Packages/Apps/Dialer/inc/elastos/droid/incallui/CallerInfoAsyncQuery.h | jingcao80/Elastos | d0f39852356bdaf3a1234743b86364493a0441bc | [
"Apache-2.0"
] | null | null | null | Sources/Elastos/Packages/Apps/Dialer/inc/elastos/droid/incallui/CallerInfoAsyncQuery.h | jingcao80/Elastos | d0f39852356bdaf3a1234743b86364493a0441bc | [
"Apache-2.0"
] | 9 | 2017-07-13T12:33:20.000Z | 2021-06-19T02:46:48.000Z | //=========================================================================
// Copyright (C) 2012 The Elastos Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//=========================================================================
#ifndef __ELASTOS_DROID_INCALLUI_CALLERINFOASYNCQUERY_H__
#define __ELASTOS_DROID_INCALLUI_CALLERINFOASYNCQUERY_H__
#include "elastos/droid/incallui/CallerInfo.h"
#include "elastos/droid/content/AsyncQueryHandler.h"
using Elastos::Droid::Content::AsyncQueryHandler;
using Elastos::Droid::Content::IContext;
using Elastos::Droid::Net::IUri;
namespace Elastos {
namespace Droid {
namespace InCallUI {
/**
* Helper class to make it easier to run asynchronous caller-id lookup queries.
* @see CallerInfo
*
*/
class CallerInfoAsyncQuery
: public Object
{
public:
/**
* Simple exception used to communicate problems with the query pool.
*/
class QueryPoolException
: public Object // SQLException
{
public:
QueryPoolException(
/* [in] */ const String& error);
};
private:
/**
* Wrap the cookie from the WorkerArgs with additional information needed by our
* classes.
*/
class CookieWrapper
: public Object
{
public:
CookieWrapper()
: mEvent(0)
, mNumber(NULL)
{}
public:
AutoPtr<IOnQueryCompleteListener> mListener;
AutoPtr<IInterface> mCookie;
Int32 mEvent;
String mNumber;
};
/**
* Our own implementation of the AsyncQueryHandler.
*/
class CallerInfoAsyncQueryHandler
: public AsyncQueryHandler
{
protected:
/**
* Our own query worker thread.
*
* This thread handles the messages enqueued in the looper. The normal sequence
* of events is that a new query shows up in the looper queue, followed by 0 or
* more add listener requests, and then an end request. Of course, these requests
* can be interlaced with requests from other tokens, but is irrelevant to this
* handler since the handler has no state.
*
* Note that we depend on the queue to keep things in order; in other words, the
* looper queue must be FIFO with respect to input from the synchronous startQuery
* calls and output to this handleMessage call.
*
* This use of the queue is required because CallerInfo objects may be accessed
* multiple times before the query is complete. All accesses (listeners) must be
* queued up and informed in order when the query is complete.
*/
class CallerInfoWorkerHandler
: public WorkerHandler
{
public:
CallerInfoWorkerHandler(
/* [in] */ IWeakReference* wr);
//@Override
CARAPI HandleMessage(
/* [in] */ IMessage* msg);
};
public:
/**
* Asynchronous query handler class for the contact / callerinfo object.
*/
CallerInfoAsyncQueryHandler(
/* [in] */ IContext* context,
/* [in] */ CallerInfoAsyncQuery* host);
// @Override
CARAPI StartQuery(
/* [in] */ Int32 token,
/* [in] */ IInterface* cookie,
/* [in] */ IUri* uri,
/* [in] */ ArrayOf<String>* projection,
/* [in] */ const String& selection,
/* [in] */ ArrayOf<String>* selectionArgs,
/* [in] */ const String& orderBy);
//@Override
CARAPI CreateHandler(
/* [in] */ ILooper* looper,
/* [out] */ IHandler** handler);
/**
* Overrides onQueryComplete from AsyncQueryHandler.
*
* This method takes into account the state of this class; we construct the CallerInfo
* object only once for each set of listeners. When the query thread has done its work
* and calls this method, we inform the remaining listeners in the queue, until we're
* out of listeners. Once we get the message indicating that we should expect no new
* listeners for this CallerInfo object, we release the AsyncCursorInfo back into the
* pool.
*/
//@Override
CARAPI OnQueryComplete(
/* [in] */ Int32 token,
/* [in] */ IInterface* cookie,
/* [in] */ ICursor* cursor);
public:
/**
* The information relevant to each CallerInfo query. Each query may have multiple
* listeners, so each AsyncCursorInfo is associated with 2 or more CookieWrapper
* objects in the queue (one with a new query event, and one with a end event, with
* 0 or more additional listeners in between).
*/
AutoPtr<IContext> mQueryContext;
AutoPtr<IUri> mQueryUri;
AutoPtr<CallerInfo> mCallerInfo;
AutoPtr<CallerInfoAsyncQuery> mHost;
};
public:
/**
* Private constructor for factory methods.
*/
CallerInfoAsyncQuery();
/**
* Factory method to start the query based on a CallerInfo object.
*
* Note: if the number contains an "@" character we treat it
* as a SIP address, and look it up directly in the Data table
* rather than using the PhoneLookup table.
* TODO: But eventually we should expose two separate methods, one for
* numbers and one for SIP addresses, and then have
* PhoneUtils.startGetCallerInfo() decide which one to call based on
* the phone type of the incoming connection.
*/
static CARAPI_(AutoPtr<CallerInfoAsyncQuery>) StartQuery(
/* [in] */ Int32 token,
/* [in] */ IContext* context,
/* [in] */ CallerInfo* info,
/* [in] */ IOnQueryCompleteListener* listener,
/* [in] */ IInterface* cookie);
/**
* Releases the relevant data.
*/
CARAPI_(UInt32) CallerInfoAsyncQueryRelease();
/**
* Method to create a new CallerInfoAsyncQueryHandler object, ensuring correct
* state of context and uri.
*/
CARAPI_(void) Allocate(
/* [in] */ IContext* context,
/* [in] */ IUri* contactRef);
static CARAPI_(String) SanitizeUriToString(
/* [in] */ IUri* uri);
public:
static Boolean DBG;
static String LOGTAG;
static const Int32 EVENT_NEW_QUERY;
static const Int32 EVENT_ADD_LISTENER;
static const Int32 EVENT_END_OF_QUEUE;
static const Int32 EVENT_EMERGENCY_NUMBER;
static const Int32 EVENT_VOICEMAIL_NUMBER;
AutoPtr<CallerInfoAsyncQueryHandler> mHandler;
// If the CallerInfo query finds no contacts, should we use the
// PhoneNumberOfflineGeocoder to look up a "geo description"?
// (TODO: This could become a flag in config.xml if it ever needs to be
// configured on a per-product basis.)
static Boolean ENABLE_UNKNOWN_NUMBER_GEO_DESCRIPTION;
};
} // namespace InCallUI
} // namespace Droid
} // namespace Elastos
#endif // __ELASTOS_DROID_INCALLUI_CALLERINFOASYNCQUERY_H__
| 34 | 94 | 0.622111 | [
"object"
] |
ea8a0a70bc8b8871de4fe4737293c52a9adbe086 | 2,248 | h | C | Assignment.h | iu-vail/iAuction-algorithm | e9ecc06c9349e23a978c892e23aeddee885b4ed1 | [
"MIT"
] | 3 | 2018-08-15T05:44:51.000Z | 2020-11-16T03:14:40.000Z | Assignment.h | iu-vail/iAuction-algorithm | e9ecc06c9349e23a978c892e23aeddee885b4ed1 | [
"MIT"
] | null | null | null | Assignment.h | iu-vail/iAuction-algorithm | e9ecc06c9349e23a978c892e23aeddee885b4ed1 | [
"MIT"
] | 3 | 2019-04-09T11:49:04.000Z | 2021-12-10T06:42:27.000Z |
///////////////////////////////////////////////////////////////////////////////
// File name: Assignment.h
// This file defines the class of Assignment.
// The assignment matrix can be obtained either by randomly generated, or by
// reading from a specified input file. For the input file, user is required to
// specify a nXn matrix. Parser has some intelligence, so no strict format is
// required. E.g. put a file named 'example' in current directory.
// Lantao Liu, Nov 1, 2009
// Last modified: Dec 3, 2011
///////////////////////////////////////////////////////////////////////////////
#ifndef ASSIGNMENT_H
#define ASSIGNMENT_H
#include <string>
#include <string.h>
#include <set>
#include "Define.h"
using namespace std;
///////////////////////////////////////////////////////////////////////////////
//
// Assignment class: defined methods for obtaining an assignment matrix
//
///////////////////////////////////////////////////////////////////////////////
class Assignment{
public:
Assignment(uint _seed = SEED):seed(_seed){}
~Assignment(){}
//Set the seed for random generator
void SetSeed(uint _seed){ seed = _seed; }
uint GetSeed(void){ return seed; }
uint GetRowSize(void){ return row_size; }
uint GetColSize(void){ return col_size; }
mat& GetMatrix(void){ return matrix; }
vector<uint>& GetAsgnVec(void){ return asgn_vec; }
//Randomly generate an assignment-matrix, the default arguments are pre-set
//Currently can generate only integer numbers
void RandomGenerate(mat&, uint nrows = ROW_SIZE, uint ncols = COL_SIZE,
int Max = 100, uint _seed = SEED);
void RandomGenerate(uint nrows = ROW_SIZE, uint ncols = COL_SIZE,
int Max = 100, uint _seed = SEED);
//Import a utility/cost matrix from external file
void ImportMatrix(ifstream&, mat&);
void ImportMatrix(ifstream&);
//Import an assignment vector
void ImportVec(ifstream&, vector<uint>&);
void ImportVec(ifstream&);
// Negate a matrix via flipping the signs
void NegateMatrix(mat&);
void NegateMatrix(void);
//Display matrix onto screen
void DisplayMatrix(mat&) const;
private:
//basic data members
uint seed;
uint row_size;
uint col_size;
mat matrix;
vector<uint> asgn_vec;
};
#endif
| 28.1 | 80 | 0.615214 | [
"vector"
] |
ea8adf4a6307e7c10056fa89b9fb57062e5de176 | 4,425 | h | C | include/dai/utils/timer.h | flurischt/libDAI | 20683a222e2ef307209290f79081fe428d9c5050 | [
"BSD-2-Clause"
] | 1 | 2015-05-03T00:17:48.000Z | 2015-05-03T00:17:48.000Z | include/dai/utils/timer.h | flurischt/libDAI | 20683a222e2ef307209290f79081fe428d9c5050 | [
"BSD-2-Clause"
] | null | null | null | include/dai/utils/timer.h | flurischt/libDAI | 20683a222e2ef307209290f79081fe428d9c5050 | [
"BSD-2-Clause"
] | null | null | null | #ifndef __defined_libdai_utils_timer_h
#define __defined_libdai_utils_timer_h
#include <cstdint>
#include <string>
#include <vector>
#include <iostream>
/*
* We use the RDTSC timer code from the FNC Lecture.
* Our timer is based on the homework code taken from here:
* http://www.inf.ethz.ch/personal/markusp/teaching/263-2300-ETH-spring15/homeworks/hw01files/tsc_x86.h
* Credits go to Prof. and Assistants of the FNC Lecture 2015 ETH Zurich.
*/
/* ================ GNU C and possibly other UNIX compilers ================ */
#ifndef WIN32
#if defined(__GNUC__) || defined(__linux__)
#define VOLATILE __volatile__
#define ASM __asm__
#else
/* if we're neither compiling with gcc or under linux, we can hope
* the following lines work, they probably won't */
#define ASM asm
#define VOLATILE
#endif
/* ================================= WIN32 ================================= */
#else
#endif
/* This is the RDTSC timer.
* RDTSC is an instruction on several Intel and compatible CPUs that reads the
* Time Stamp Counter. The Intel manuals contain more information.
*/
#define COUNTER_LO(a) ((a).int32.lo)
#define COUNTER_HI(a) ((a).int32.hi)
#define COUNTER_VAL(a) ((a).int64)
#define COUNTER(a) \
((unsigned long long)COUNTER_VAL(a))
#define COUNTER_DIFF(a,b) \
(COUNTER(a)-COUNTER(b))
/* ==================== GNU C and possibly other UNIX compilers ===================== */
#ifndef WIN32
typedef union
{
int64_t int64;
struct {int32_t lo, hi;} int32;
} tsc_counter;
#define RDTSC(cpu_c) \
ASM VOLATILE ("rdtsc" : "=a" ((cpu_c).int32.lo), "=d"((cpu_c).int32.hi))
#define CPUID() \
ASM VOLATILE ("cpuid" : : "a" (0) : "bx", "cx", "dx" )
/* ================================= WIN32 ================================= */
#else
typedef union
{
int64_t int64;
struct {int32_t lo, hi;} int32;
} tsc_counter;
#define RDTSC(cpu_c) \
{ \
__asm rdtsc \
__asm mov (cpu_c).int32.lo,eax \
__asm mov (cpu_c).int32.hi,edx \
}
#define CPUID() \
{ \
__asm mov eax, 0 \
__asm cpuid \
}
#endif
class TimerAbstract {
public:
virtual void tic() = 0;
virtual double toc() = 0;
};
/*****************************************************************************/
class TimerGOD : public TimerAbstract{
public:
TimerGOD() : TimerAbstract()
{
start_time.tv_sec = 0;
start_time.tv_usec = 0;
stop_time.tv_sec = 0;
stop_time.tv_usec = 0;
}
virtual void tic() {
gettimeofday(&start_time, NULL);
}
virtual double toc() {
gettimeofday(&stop_time, NULL);
return (stop_time.tv_sec - start_time.tv_sec)
+ (stop_time.tv_usec - start_time.tv_usec)*1e-6;
}
private:
struct timeval start_time, stop_time;
};
/*****************************************************************************/
class TimerTSC : public TimerAbstract {
public:
TimerTSC() : TimerAbstract()
{
}
virtual void tic() {
CPUID();
RDTSC(start);
}
virtual double toc() {
tsc_counter end;
RDTSC(end);
CPUID();
return COUNTER_VAL(end) - COUNTER_VAL(start);
}
private:
tsc_counter start;
};
typedef TimerTSC Timer;
/*****************************************************************************/
struct PerformanceStats
{
void addMeasurement(double duration,
const std::string &label,
std::size_t iterations = 1)
{
m_durations.push_back(duration);
m_iterations.push_back(iterations);
m_labels.push_back(label);
}
void print() const
{
const std::size_t n = m_durations.size();
printf("------------------------------------------------\n");
printf("Labels : Loops Total Average\n");
for (std::size_t i=0; i<n; ++i)
{
printf("%-10s: %7zu %10g %10g\n", m_labels[i].c_str(),
m_iterations[i], m_durations[i],
m_durations[i]/m_iterations[i]);
}
printf("------------------------------------------------\n");
}
void clear()
{
m_durations.clear();
m_iterations.clear();
m_labels.clear();
}
private:
std::vector<double> m_durations;
std::vector<std::size_t> m_iterations;
std::vector<std::string> m_labels;
};
#endif //__defined_libdai_utils_timer_h
| 24.313187 | 106 | 0.539887 | [
"vector"
] |
ea8c4d73b1fe6146416597a6c1f6e948c15d680c | 15,720 | c | C | SPI_Prog/flashrom-master/print_wiki.c | vernonet/stm32F4_prj | 7e150e9ad9e9991bbb831724fdcd1792b5e1c470 | [
"Unlicense"
] | 4 | 2019-05-20T18:10:44.000Z | 2022-02-27T11:17:27.000Z | SPI_Prog/flashrom-master/print_wiki.c | vernonet/stm32F4_prj | 7e150e9ad9e9991bbb831724fdcd1792b5e1c470 | [
"Unlicense"
] | null | null | null | SPI_Prog/flashrom-master/print_wiki.c | vernonet/stm32F4_prj | 7e150e9ad9e9991bbb831724fdcd1792b5e1c470 | [
"Unlicense"
] | 3 | 2020-12-10T10:35:22.000Z | 2021-11-25T23:05:47.000Z | /*
* This file is part of the flashrom project.
*
* Copyright (C) 2009 Uwe Hermann <uwe@hermann-uwe.de>
* Copyright (C) 2009 Carl-Daniel Hailfinger
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include "flash.h"
#include "flashchips.h"
#include "programmer.h"
static const char wiki_header[] = "= Supported devices =\n\n\
<div style=\"margin-top:0.5em; padding:0.5em 0.5em 0.5em 0.5em; \
background-color:#eeeeee; text-align:left; border:1px solid #aabbcc;\">\
<small>\n\
'''Last update:''' %s (generated by flashrom %s)<br />\n\
The tables below are generated from flashrom's source by copying the output of '''flashrom -z'''.<br /><br />\n\
A short explanation of the cells representing the support state follows:<br />\n\
{| border=\"0\" valign=\"top\"\n\
! style=\"text-align:left;\" |\n\
! style=\"text-align:left;\" |\n\
|-\n\
|{{OK}}\n\
| The feature was '''tested and should work''' in general unless there is a bug in flashrom or another component in \
the system prohibits some functionality.\n\
|-\n\
|{{Dep}}\n\
| '''Configuration-dependent'''. The feature was tested and should work in general but there are common \
configurations that drastically limit flashrom's capabilities or make it completely stop working.\n\
|-\n\
|{{?3}}\n\
| The feature is '''untested''' but believed to be working.\n\
|-\n\
|{{NA}}\n\
| The feature is '''not applicable''' in this configuration (e.g. write operations on ROM chips).\n\
|-\n\
|{{No}}\n\
| The feature is '''known to not work'''. Don't bother testing (nor reporting. Patches welcome! ;).\n\
|}\n\
</small></div>\n";
static const char th_start[] = "| valign=\"top\"|\n\n\
{| border=\"0\" style=\"font-size: smaller\" valign=\"top\"\n\
|- bgcolor=\"#6699dd\"\n";
#if CONFIG_INTERNAL == 1
static const char chipset_th[] = "\
! align=\"left\" | Vendor\n\
! align=\"left\" | Southbridge\n\
! align=\"center\" | PCI IDs\n\
! align=\"center\" | Status\n\n";
static const char board_th[] = "\
! align=\"left\" | Vendor\n\
! align=\"left\" | Mainboard\n\
! align=\"left\" | Required option\n\
! align=\"center\" | Status\n\n";
static const char board_intro[] = "\
\n== Supported mainboards ==\n\n\
In general, it is very likely that flashrom works out of the box even if your \
mainboard is not listed below.\n\nThis is a list of mainboards where we have \
verified that they either do or do not need any special initialization to \
make flashrom work (given flashrom supports the respective chipset and flash \
chip), or that they do not yet work at all. If they do not work, support may \
or may not be added later.\n\n\
Mainboards (or individual revisions) which don't appear in the list may or may \
not work (we don't know, someone has to give it a try). Please report any \
further verified mainboards on the [[Mailinglist|mailing list]].\n";
#endif
static const char chip_th[] = "\
! align=\"left\" | Vendor\n\
! align=\"left\" | Device\n\
! align=\"center\" | Size [kB]\n\
! align=\"center\" | Type\n\
! align=\"center\" colspan=\"4\" | Status\n\
! align=\"center\" colspan=\"2\" | Voltage [V]\n\n\
|- bgcolor=\"#6699ff\"\n| colspan=\"4\" | \n\
| Probe\n| Read\n| Erase\n| Write\n\
| align=\"center\" | Min\n| align=\"center\" | Max\n\n";
static const char chip_intro[] = "\
\n== Supported flash chips ==\n\n\
The list below contains all chips that have some kind of explicit support added to flashrom and their last \
known test status. Newer SPI flash chips might work even without explicit support if they implement SFDP ([\
http://www.jedec.org/standards-documents/docs/jesd216 Serial Flash Discoverable Parameters - JESD216]). \
Flashrom will detect this automatically and inform you about it.\n\n\
The names used below are designed to be as concise as possible and hence contain only the characters \
describing properties that are relevant to flashrom. Irrelevant characters specify attributes flashrom can not \
use or even detect by itself (e.g. the physical package) and have no effect on flashrom's operation. They are \
replaced by dots ('.') functioning as wildcards (like in Regular Expressions) or are completely omitted at the \
end of a name.\n";
static const char programmer_th[] = "\
! align=\"left\" | Programmer\n\
! align=\"left\" | Vendor\n\
! align=\"left\" | Device\n\
! align=\"center\" | IDs\n\
! align=\"center\" | Status\n\n";
/* The output of this module relies on MediaWiki templates to select special formatting styles for table cells
* reflecting the test status of the respective hardware. This functions returns the correct template name for
* the supplied enum test_state. */
static const char *test_state_to_template(enum test_state test_state)
{
switch (test_state) {
case OK: return "OK";
case BAD: return "No";
case NA: return "NA";
case DEP: return "Dep";
case NT:
default: return "?3";
}
}
#if CONFIG_INTERNAL == 1
static const char laptop_intro[] = "\n== Supported mobile devices (laptops, tablets etc.) ==\n\n\
In general, flashing mobile devices is more difficult because they\n\n\
* often use the flash chip for stuff besides the BIOS,\n\
* often have special protection stuff which has to be handled by flashrom,\n\
* often use flash translation circuits which need drivers in flashrom.\n\n\
<div style=\"margin-top:0.5em; padding:0.5em 0.5em 0.5em 0.5em; \
background-color:#ff6666; align:right; border:1px solid #000000;\">\n\
'''IMPORTANT:''' At this point we recommend to '''not''' use flashrom on \
untested mobile devices unless you have a means to recover from a flashing that goes \
wrong (a working backup flash chip and/or good soldering skills).\n</div>\n";
static void print_supported_chipsets_wiki(int cols)
{
int i;
unsigned int lines_per_col;
const struct penable *e;
int enablescount = 0, color = 1;
for (e = chipset_enables; e->vendor_name != NULL; e++)
enablescount++;
/* +1 to force the resulting number of columns to be < cols */
lines_per_col = enablescount / cols + ((enablescount%cols) > 0 ? 1 : 0);
printf("\n== Supported chipsets ==\n\nTotal amount of supported chipsets: '''%d'''\n\n"
"{| border=\"0\" valign=\"top\"\n", enablescount);
e = chipset_enables;
for (i = 0; e[i].vendor_name != NULL; i++) {
if ((i % lines_per_col) == 0)
printf("%s%s", th_start, chipset_th);
/* Alternate colors if the vendor changes. */
if (i > 0 && strcmp(e[i].vendor_name, e[i - 1].vendor_name))
color = !color;
printf("|- bgcolor=\"#%s\"\n| %s || %s "
"|| %04x:%04x || {{%s}}\n", (color) ? "eeeeee" : "dddddd",
e[i].vendor_name, e[i].device_name,
e[i].vendor_id, e[i].device_id,
test_state_to_template(e[i].status));
if (((i % lines_per_col) + 1) == lines_per_col)
printf("\n|}\n\n");
}
/* end inner table if it did not fill the last column fully */
if (((i % lines_per_col)) > 0)
printf("\n|}\n\n");
printf("\n\n|}\n");
}
static void print_supported_boards_wiki_helper(const char *devicetype, int cols, const struct board_info boards[])
{
int i, k;
unsigned int boardcount, lines_per_col;
unsigned int boardcount_good = 0, boardcount_bad = 0, boardcount_nt = 0;
int num_notes = 0, color = 1;
char *notes = calloc(1, 1);
char tmp[900 + 1];
const struct board_match *b = board_matches;
for (i = 0; boards[i].vendor != NULL; i++) {
if (boards[i].working == OK)
boardcount_good++;
else if (boards[i].working == NT)
boardcount_nt++;
else
boardcount_bad++;
}
boardcount = boardcount_good + boardcount_nt + boardcount_bad;
/* +1 to force the resulting number of columns to be < cols */
lines_per_col = boardcount / cols + ((boardcount%cols) > 0 ? 1 : 0);
printf("\n\nTotal amount of known good %s: '''%d'''; "
"Untested (e.g. user vanished before testing new code): '''%d'''; "
"Not yet supported (i.e. known-bad): '''%d'''.\n\n"
"{| border=\"0\" valign=\"top\"\n", devicetype, boardcount_good, boardcount_nt, boardcount_bad);
for (i = 0; boards[i].vendor != NULL; i++) {
if ((i % lines_per_col) == 0)
printf("%s%s", th_start, board_th);
/* Alternate colors if the vendor changes. */
if (i > 0 && strcmp(boards[i].vendor, boards[i - 1].vendor))
color = !color;
k = 0;
while ((b[k].vendor_name != NULL) &&
(strcmp(b[k].vendor_name, boards[i].vendor) ||
strcmp(b[k].board_name, boards[i].name))) {
k++;
}
printf("|- bgcolor=\"#%s\"\n| %s || %s%s %s%s || %s%s%s%s "
"|| {{%s}}", (color) ? "eeeeee" : "dddddd",
boards[i].vendor,
boards[i].url ? "[" : "",
boards[i].url ? boards[i].url : "",
boards[i].name,
boards[i].url ? "]" : "",
b[k].lb_vendor ? "-p internal:mainboard=" : "—",
b[k].lb_vendor ? b[k].lb_vendor : "",
b[k].lb_vendor ? ":" : "",
b[k].lb_vendor ? b[k].lb_part : "",
test_state_to_template(boards[i].working));
if (boards[i].note) {
num_notes++;
printf(" <span id=\"%s_ref%d\"><sup>[[#%s_note%d|%d]]</sup></span>\n",
devicetype, num_notes, devicetype, num_notes, num_notes);
int ret = snprintf(tmp, sizeof(tmp),
"<span id=\"%s_note%d\">%d. [[#%s_ref%d|↑]]</span>"
" <nowiki>%s</nowiki><br />\n", devicetype, num_notes, num_notes,
devicetype, num_notes, boards[i].note);
if (ret < 0 || ret >= sizeof(tmp)) {
fprintf(stderr, "Footnote text #%d of %s truncated (ret=%d, sizeof(tmp)=%zu)\n",
num_notes, devicetype, ret, sizeof(tmp));
}
notes = strcat_realloc(notes, tmp);
} else {
printf("\n");
}
if (((i % lines_per_col) + 1) == lines_per_col)
printf("\n|}\n\n");
}
/* end inner table if it did not fill the last column fully */
if (((i % lines_per_col)) > 0)
printf("\n|}\n\n");
printf("|}\n");
if (num_notes > 0)
printf("\n<small>\n%s</small>\n", notes);
free(notes);
}
static void print_supported_boards_wiki(void)
{
printf("%s", board_intro);
print_supported_boards_wiki_helper("mainboards", 2, boards_known);
printf("%s", laptop_intro);
print_supported_boards_wiki_helper("mobile devices", 1, laptops_known);
}
#endif
static void print_supported_chips_wiki(int cols)
{
unsigned int lines_per_col;
char *s;
char vmax[6];
char vmin[6];
const struct flashchip *f, *old = NULL;
int i = 0, c = 1, chipcount = 0;
for (f = flashchips; f->name != NULL; f++) {
/* Don't count generic entries. */
if (!strncmp(f->vendor, "Unknown", 7) ||
!strncmp(f->vendor, "Programmer", 10) ||
!strncmp(f->name, "unknown", 7))
continue;
chipcount++;
}
/* +1 to force the resulting number of columns to be < cols */
lines_per_col = chipcount / cols + ((chipcount%cols) > 0 ? 1 : 0);
printf("%s", chip_intro);
printf("\nTotal amount of supported chips: '''%d'''\n\n"
"{| border=\"0\" valign=\"top\"\n", chipcount);
for (f = flashchips; f->name != NULL; f++) {
/* Don't print generic entries. */
if (!strncmp(f->vendor, "Unknown", 7) ||
!strncmp(f->vendor, "Programmer", 10) ||
!strncmp(f->name, "unknown", 7))
continue;
if ((i % lines_per_col) == 0)
printf("%s%s", th_start, chip_th);
/* Alternate colors if the vendor changes. */
if (old != NULL && strcmp(old->vendor, f->vendor))
c = !c;
old = f;
s = flashbuses_to_text(f->bustype);
sprintf(vmin, "%0.03f", f->voltage.min / (double)1000);
sprintf(vmax, "%0.03f", f->voltage.max / (double)1000);
printf("|- bgcolor=\"#%s\"\n| %s || %s || align=\"right\" | %d "
"|| %s || {{%s}} || {{%s}} || {{%s}} || {{%s}}"
"|| %s || %s\n",
(c == 1) ? "eeeeee" : "dddddd", f->vendor, f->name,
f->total_size, s,
test_state_to_template(f->tested.probe),
test_state_to_template(f->tested.read),
test_state_to_template(f->tested.erase),
test_state_to_template(f->tested.write),
f->voltage.min ? vmin : "?",
f->voltage.max ? vmax : "?");
free(s);
if (((i % lines_per_col) + 1) == lines_per_col)
printf("\n|}\n\n");
i++;
}
/* end inner table if it did not fill the last column fully */
if (((i % lines_per_col)) > 0)
printf("\n|}\n\n");
printf("|}\n\n");
}
/* Following functions are not needed when no PCI/USB programmers are compiled in,
* but since print_wiki code has no size constraints we include it unconditionally. */
static int count_supported_devs_wiki(const struct dev_entry *devs)
{
unsigned int count = 0;
unsigned int i = 0;
for (i = 0; devs[i].vendor_id != 0; i++)
count++;
return count;
}
static void print_supported_devs_wiki_helper(const struct programmer_entry prog)
{
int i = 0;
static int c = 0;
const struct dev_entry *devs = prog.devs.dev;
const unsigned int count = count_supported_devs_wiki(devs);
/* Alternate colors if the vendor changes. */
c = !c;
for (i = 0; devs[i].vendor_id != 0; i++) {
printf("|- bgcolor=\"#%s\"\n", (c) ? "eeeeee" : "dddddd");
if (i == 0)
printf("| rowspan=\"%u\" | %s |", count, prog.name);
printf("| %s || %s || %04x:%04x || {{%s}}\n", devs[i].vendor_name, devs[i].device_name,
devs[i].vendor_id, devs[i].device_id, test_state_to_template(devs[i].status));
}
}
static void print_supported_devs_wiki()
{
unsigned int pci_count = 0;
unsigned int usb_count = 0;
unsigned int i;
for (i = 0; i < PROGRAMMER_INVALID; i++) {
const struct programmer_entry prog = programmer_table[i];
switch (prog.type) {
case USB:
usb_count += count_supported_devs_wiki(prog.devs.dev);
break;
case PCI:
pci_count += count_supported_devs_wiki(prog.devs.dev);
break;
case OTHER:
default:
break;
}
}
printf("\n== PCI Devices ==\n\n"
"Total amount of supported PCI devices flashrom can use as a programmer: '''%d'''\n\n"
"{%s%s", pci_count, th_start, programmer_th);
for (i = 0; i < PROGRAMMER_INVALID; i++) {
const struct programmer_entry prog = programmer_table[i];
if (prog.type == PCI) {
print_supported_devs_wiki_helper(prog);
}
}
printf("\n|}\n\n|}\n");
printf("\n== USB Devices ==\n\n"
"Total amount of supported USB devices flashrom can use as a programmer: '''%d'''\n\n"
"{%s%s", usb_count, th_start, programmer_th);
for (i = 0; i < PROGRAMMER_INVALID; i++) {
const struct programmer_entry prog = programmer_table[i];
if (prog.type == USB) {
print_supported_devs_wiki_helper(prog);
}
}
printf("\n|}\n\n|}\n");
printf("\n== Other programmers ==\n\n"
"{%s", th_start);
printf("! align=\"left\" | Programmer\n"
"! align=\"left\" | Note\n\n");
for (i = 0; i < PROGRAMMER_INVALID; i++) {
static int c = 0;
const struct programmer_entry prog = programmer_table[i];
if (prog.type == OTHER && prog.devs.note != NULL) {
c = !c;
printf("|- bgcolor=\"#%s\"\n", (c) ? "eeeeee" : "dddddd");
printf("| %s || %s", prog.name, prog.devs.note);
}
}
printf("\n|}\n\n|}\n");
}
void print_supported_wiki(void)
{
time_t t = time(NULL);
char buf[sizeof("1986-02-28T12:37:42Z")];
strftime(buf, sizeof(buf), "%Y-%m-%dT%H:%M:%SZ", gmtime(&t));
printf(wiki_header, buf, flashrom_version);
print_supported_chips_wiki(2);
#if CONFIG_INTERNAL == 1
print_supported_chipsets_wiki(3);
print_supported_boards_wiki();
#endif
print_supported_devs_wiki();
}
| 34.701987 | 117 | 0.642112 | [
"solid"
] |
ea8e172f71afcd660490ecd82807e46df73a773b | 17,237 | h | C | include/vgpu.h | joeldevahl/vgpu | fca23b51f886a1d7139d161940597226062d7287 | [
"MIT"
] | 1 | 2017-04-11T12:55:31.000Z | 2017-04-11T12:55:31.000Z | include/vgpu.h | joeldevahl/vgpu | fca23b51f886a1d7139d161940597226062d7287 | [
"MIT"
] | null | null | null | include/vgpu.h | joeldevahl/vgpu | fca23b51f886a1d7139d161940597226062d7287 | [
"MIT"
] | null | null | null | #ifndef VGPU_H
#define VGPU_H
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/******************************************************************************\
*
* General defines
*
\******************************************************************************/
#define VGPU_MAX_RENDER_TARGETS 8
#define VGPU_MAX_ROOT_SLOTS 4
#define VGPU_MULTI_BUFFERING 2
/******************************************************************************\
*
* Enumerations
*
\******************************************************************************/
typedef enum
{
VGPU_DEVICE_NULL,
VGPU_DEVICE_DX11,
VGPU_DEVICE_DX12,
VGPU_DEVICE_GL,
VGPU_DEVICE_VK,
MAX_VGPU_DEVICE_TYPES,
} vgpu_device_type_t;
typedef enum
{
VGPU_COMMAND_LIST_IMMEDIATE_GRAPHICS = 0,
VGPU_COMMAND_LIST_GRAPHICS,
VGPU_COMMAND_LIST_COMPUTE,
VGPU_COMMAND_LIST_COPY,
} vgpu_command_list_type_t;
typedef enum
{
VGPU_CAPS_FLAG_BIND_CONSTANT_BUFFER_AT_OFFSET = 0x1,
VGPU_CAPS_FLAG_BIND_BUFFER_AT_OFFSET = 0x2,
} vgpu_caps_flag_t;
typedef enum
{
VGPU_USAGE_DEFAULT = 0,
VGPU_USAGE_DYNAMIC,
} vgpu_usage_t;
// TODO: streamline or remove
typedef enum
{
VGPU_RESOURCE_STATE_COMMON = 0,
VGPU_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER = 0x1,
VGPU_RESOURCE_STATE_INDEX_BUFFER = 0x2,
VGPU_RESOURCE_STATE_RENDER_TARGET = 0x4,
VGPU_RESOURCE_STATE_UNORDERED_ACCESS = 0x8,
VGPU_RESOURCE_STATE_DEPTH_WRITE = 0x10,
VGPU_RESOURCE_STATE_DEPTH_READ = 0x20,
VGPU_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE = 0x40,
VGPU_RESOURCE_STATE_PIXEL_SHADER_RESOURCE = 0x80,
VGPU_RESOURCE_STATE_STREAM_OUT = 0x100,
VGPU_RESOURCE_STATE_INDIRECT_ARGUMENT = 0x200,
VGPU_RESOURCE_STATE_COPY_DEST = 0x400,
VGPU_RESOURCE_STATE_COPY_SOURCE = 0x800,
VGPU_RESOURCE_STATE_RESOLVE_DEST = 0x1000,
VGPU_RESOURCE_STATE_RESOLVE_SOURCE = 0x2000,
VGPU_RESOURCE_STATE_GENERIC_READ = VGPU_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER | VGPU_RESOURCE_STATE_INDEX_BUFFER | VGPU_RESOURCE_STATE_COPY_SOURCE | VGPU_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE | VGPU_RESOURCE_STATE_PIXEL_SHADER_RESOURCE | VGPU_RESOURCE_STATE_INDIRECT_ARGUMENT,
VGPU_RESOURCE_STATE_PRESENT = 0,
VGPU_RESOURCE_STATE_PREDICATION = VGPU_RESOURCE_STATE_INDIRECT_ARGUMENT
} vgpu_resource_state_t;
typedef enum
{
VGPU_VERTEX_PROGRAM,
VGPU_FRAGMENT_PROGRAM,
MAX_VGPU_PROGRAM_TYPES,
} vgpu_program_type_t;
typedef enum
{
VGPU_TEXTURETYPE_1D,
VGPU_TEXTURETYPE_2D,
VGPU_TEXTURETYPE_3D,
VGPU_TEXTURETYPE_CUBE,
} vgpu_texture_type_t;
typedef enum
{
VGPU_TEXTUREFORMAT_RGBA8,
VGPU_TEXTUREFORMAT_BC1,
VGPU_TEXTUREFORMAT_BC2,
VGPU_TEXTUREFORMAT_BC3,
VGPU_TEXTUREFORMAT_D32F_S8X24,
} vgpu_texture_format_t;
typedef enum
{
VGPU_DATA_TYPE_FP32,
VGPU_DATA_TYPE_UINT32,
VGPU_DATA_TYPE_UINT16,
VGPU_DATA_TYPE_UINT8,
} vgpu_data_type_t;
typedef enum
{
VGPU_RESOURCE_NONE,
VGPU_RESOURCE_TEXTURE,
VGPU_RESOURCE_BUFFER,
VGPU_RESOURCE_SAMPLER,
VGPU_RESOURCE_TABLE,
} vgpu_resource_type_t;
typedef enum
{
VGPU_PRIMITIVE_TRIANGLES,
VGPU_PRIMITIVE_LINES,
} vgpu_primitive_type_t;
typedef enum
{
VGPU_FILL_SOLID,
VGPU_FILL_WIRE,
} vgpu_fill_mode_t;
typedef enum
{
VGPU_WIND_CCW,
VGPU_WIND_CW,
} vgpu_winding_order_t;
typedef enum
{
VGPU_CULL_NONE,
VGPU_CULL_FRONT,
VGPU_CULL_BACK,
} vgpu_cull_mode_t;
typedef enum
{
VGPU_BLEND_ELEM_ZERO,
VGPU_BLEND_ELEM_ONE,
VGPU_BLEND_ELEM_SRC_COLOR,
VGPU_BLEND_ELEM_INV_SRC_COLOR,
VGPU_BLEND_ELEM_SRC_ALPHA,
VGPU_BLEND_ELEM_INV_SRC_ALPHA,
VGPU_BLEND_ELEM_DEST_ALPHA,
VGPU_BLEND_ELEM_INV_DEST_ALPHA,
VGPU_BLEND_ELEM_DEST_COLOR,
VGPU_BLEND_ELEM_INV_DEST_COLOR,
VGPU_BLEND_ELEM_SRC_ALPHA_SAT,
VGPU_BLEND_ELEM_BLEND_FACTOR,
VGPU_BLEND_ELEM_INV_BLEND_FACTOR,
VGPU_BLEND_ELEM_SRC1_COLOR,
VGPU_BLEND_ELEM_INV_SRC1_COLOR,
VGPU_BLEND_ELEM_SRC1_ALPHA,
VGPU_BLEND_ELEM_INV_SRC1_ALPHA,
} vgpu_blend_elem_t;
typedef enum
{
VGPU_BLEND_OP_ADD,
VGPU_BLEND_OP_SUBTRACT,
VGPU_BLEND_OP_REV_SUBTRACT,
VGPU_BLEND_OP_MIN,
VGPU_BLEND_OP_MAX,
} vgpu_blend_op_t;
typedef enum
{
VGPU_COMPARE_ALWAYS,
VGPU_COMPARE_LESS,
VGPU_COMPARE_LESS_EQUAL,
VGPU_COMPARE_EQUAL,
VGPU_COMPARE_NOT_EQUAL,
VGPU_COMPARE_GREATER_EQUAL,
VGPU_COMPARE_GREATER,
VGPU_COMPARE_NEVER,
} vgpu_compare_func_t;
typedef enum
{
VGPU_STENCIL_OP_KEEP,
VGPU_STENCIL_OP_ZERO,
VGPU_STENCIL_OP_REPLACE,
VGPU_STENCIL_OP_INCR_SAT,
VGPU_STENCIL_OP_DECR_SAT,
VGPU_STENCIL_OP_INVERT,
VGPU_STENCIL_OP_INCR,
VGPU_STENCIL_OP_DECR,
} vgpu_stencil_op_t;
typedef enum
{
VGPU_ROOT_SLOT_TYPE_TABLE = 0,
VGPU_ROOT_SLOT_TYPE_RESOURCE,
} vgpu_root_slot_type_t;
/******************************************************************************\
*
* Internal types
*
\******************************************************************************/
typedef struct vgpu_device_s vgpu_device_t;
typedef struct vgpu_thread_context_s vgpu_thread_context_t;
typedef struct vgpu_command_list_s vgpu_command_list_t;
typedef struct vgpu_texture_s vgpu_texture_t;
typedef struct vgpu_buffer_s vgpu_buffer_t;
typedef struct vgpu_resource_table_s vgpu_resource_table_t;
typedef struct vgpu_root_layout_s vgpu_root_layout_t;
typedef struct vgpu_program_s vgpu_program_t;
typedef struct vgpu_pipeline_s vgpu_pipeline_t;
typedef struct vgpu_render_pass_s vgpu_render_pass_t;
/******************************************************************************\
*
* Allocation interface
*
\******************************************************************************/
typedef struct vgpu_allocator_s
{
void* (*alloc)(struct vgpu_allocator_s* allocator, size_t count, size_t size, size_t align, const char* file, int line);
void* (*realloc)(struct vgpu_allocator_s* allocator, void* memory, size_t count, size_t size, size_t align, const char* file, int line);
void (*free)(struct vgpu_allocator_s* allocator, void* memory, const char* file, int line);
} vgpu_allocator_t;
/******************************************************************************\
*
* Structures
*
\******************************************************************************/
typedef struct
{
uint32_t flags;
} vgpu_caps_t;
typedef struct
{
uint32_t num_vertices;
uint32_t num_instances;
uint32_t first_vertex;
uint32_t first_instance;
} vgpu_draw_indirect_args_t;
typedef struct
{
uint32_t num_indices;
uint32_t num_instances;
uint32_t first_index;
uint32_t first_vertex;
uint32_t first_instance;
} vgpu_draw_indexed_indirect_args_t;
typedef void (*vgpu_log_func_t)(const char* message);
typedef int (*vgpu_error_func_t)(const char* file, unsigned int line, const char* cond, const char* fmt, ...);
typedef struct vgpu_create_device_params_s
{
vgpu_allocator_t* allocator;
void* window;
uint32_t force_disable_flags;
vgpu_log_func_t log_func;
vgpu_error_func_t error_func;
} vgpu_create_device_params_t;
typedef struct vgpu_create_thread_context_params_s
{
} vgpu_create_thread_context_params_t;
typedef struct vgpu_create_command_list_params_s
{
vgpu_command_list_type_t type;
} vgpu_create_command_list_params_t;
typedef struct vgpu_root_layout_range_s
{
uint16_t start;
uint16_t count;
} vgpu_root_layout_range_t;
typedef struct vgpu_root_layout_slot_s
{
vgpu_root_slot_type_t type;
union
{
struct
{
vgpu_root_layout_range_t range_buffers;
vgpu_root_layout_range_t range_constant_buffers;
} table;
struct
{
uint16_t location;
vgpu_resource_type_t type;
bool treat_as_constant_buffer;
} resource;
};
} vgpu_root_layout_slot_t;
typedef struct vgpu_blend_s
{
bool enabled;
vgpu_blend_elem_t color_src;
vgpu_blend_elem_t color_dst;
vgpu_blend_op_t color_op;
vgpu_blend_elem_t alpha_src;
vgpu_blend_elem_t alpha_dst;
vgpu_blend_op_t alpha_op;
} vgpu_blend_t;
typedef struct vgpu_depth_s
{
bool enabled;
vgpu_compare_func_t func;
} vgpu_depth_t;
typedef struct vgpu_stencil_s
{
bool enabled;
struct
{
vgpu_compare_func_t func;
vgpu_stencil_op_t fail_op;
vgpu_stencil_op_t depth_fail_op;
vgpu_stencil_op_t pass_op;
} front, back;
uint8_t read_mask;
uint8_t write_mask;
} vgpu_stencil_t;
typedef struct vgpu_state_s
{
vgpu_fill_mode_t fill;
vgpu_winding_order_t wind;
vgpu_cull_mode_t cull;
uint32_t depth_bias;
bool blend_independent;
vgpu_blend_t blend[VGPU_MAX_RENDER_TARGETS];
vgpu_depth_t depth;
vgpu_stencil_t stencil;
} vgpu_state_t;
typedef struct vgpu_create_program_params_s
{
const uint8_t* data;
size_t size;
vgpu_program_type_t program_type;
} vgpu_create_program_params_t;
typedef struct vgpu_create_pipeline_params_s
{
vgpu_root_layout_t* root_layout;
vgpu_render_pass_t* render_pass;
vgpu_program_t* vertex_program;
vgpu_program_t* fragment_program;
vgpu_state_t state;
vgpu_primitive_type_t primitive_type;
} vgpu_create_pipeline_params_t;
typedef struct vgpu_resource_table_entry_s
{
uint8_t location;
vgpu_resource_type_t type;
void* resource;
size_t offset;
size_t num_bytes;
bool treat_as_constant_buffer;
} vgpu_resource_table_entry_t;
typedef enum
{
VGPU_BUFFER_FLAG_INDEX_BUFFER = 0x1,
VGPU_BUFFER_FLAG_CONSTANT_BUFFER = 0x2,
} vgpu_buffer_flag_t;
typedef struct vgpu_create_buffer_params_s
{
size_t num_bytes;
vgpu_usage_t usage;
uint32_t flags;
uint32_t structure_stride;
const char* name;
} vgpu_create_buffer_params_t;
typedef struct vgpu_lock_buffer_params_s
{
vgpu_buffer_t* buffer;
size_t offset;
size_t num_bytes;
uintptr_t unlock_data[4];
} vgpu_lock_buffer_params_t;
typedef union vgpu_clear_value_s
{
struct
{
float r, g, b, a;
};
float color[4];
struct
{
float depth;
uint8_t stencil;
} depth_stencil;
} vgpu_clear_value_t;
typedef struct vgpu_create_texture_params_s
{
vgpu_texture_type_t type;
vgpu_texture_format_t format;
vgpu_usage_t usage;
uint32_t width;
uint32_t height;
uint32_t depth;
uint32_t num_mips;
uint32_t is_render_target;
const char* name;
vgpu_clear_value_t clear_value;
} vgpu_create_texture_params_t;
typedef struct vgpu_render_pass_target_param_s
{
vgpu_texture_t* texture;
uint32_t clear_on_bind; // TODO: more fine grained begin/end operations
} vgpu_render_pass_target_param_t;
typedef struct vgpu_create_render_pass_params_s
{
size_t num_color_targets;
vgpu_render_pass_target_param_t color_targets[16];
vgpu_render_pass_target_param_t depth_stencil_target;
} vgpu_create_render_pass_params_t;
/******************************************************************************\
*
* Device operations
*
\******************************************************************************/
vgpu_device_t* vgpu_create_device(const vgpu_create_device_params_t* params);
void vgpu_destroy_device(vgpu_device_t* device);
vgpu_device_type_t vgpu_get_device_type(vgpu_device_t* device);
void vgpu_apply_command_lists(vgpu_device_t* device, uint32_t num_command_lists, vgpu_command_list_t** command_lists, uint32_t queue = 0);
void vgpu_present(vgpu_device_t* device);
vgpu_texture_t* vgpu_get_back_buffer(vgpu_device_t* device);
uint64_t vgpu_get_frame_no(vgpu_device_t* device);
uint64_t vgpu_get_frame_id(vgpu_device_t* device);
uint64_t vgpu_max_buffered_frames(vgpu_device_t* device);
void vgpu_get_caps(vgpu_device_t* device, vgpu_caps_t* out_caps);
/******************************************************************************\
*
* Thread context handling
*
\******************************************************************************/
vgpu_thread_context_t* vgpu_create_thread_context(vgpu_device_t* device, const vgpu_create_thread_context_params_t* params);
void vgpu_destroy_thread_context(vgpu_device_t* device, vgpu_thread_context_t* thread_context);
void vgpu_prepare_thread_context(vgpu_device_t* device, vgpu_thread_context_t* thread_context);
/******************************************************************************\
*
* Buffer handling
*
\******************************************************************************/
vgpu_buffer_t* vgpu_create_buffer(vgpu_device_t* device, const vgpu_create_buffer_params_t* params);
void vgpu_destroy_buffer(vgpu_device_t* device, vgpu_buffer_t* buffer);
/******************************************************************************\
*
* Resource table handling
*
\******************************************************************************/
vgpu_resource_table_t* vgpu_create_resource_table(vgpu_device_t* device, const vgpu_root_layout_t* root_layout, uint32_t root_slot, const vgpu_resource_table_entry_t* entries, size_t num_entries);
void vgpu_destroy_resource_table(vgpu_device_t* device, vgpu_resource_table_t* resource_table);
/******************************************************************************\
*
* Root layout handling
*
\******************************************************************************/
vgpu_root_layout_t* vgpu_create_root_layout(vgpu_device_t* device, const vgpu_root_layout_slot_t* slots, size_t num_slots);
void vgpu_destroy_root_layout(vgpu_device_t* device, vgpu_root_layout_t* root_layout);
/******************************************************************************\
*
* Texture handling
*
\******************************************************************************/
vgpu_texture_t* vgpu_create_texture(vgpu_device_t* device, const vgpu_create_texture_params_t* params);
void vgpu_destroy_texture(vgpu_device_t* device, vgpu_texture_t* texture);
/******************************************************************************\
*
* Program handling
*
\******************************************************************************/
vgpu_program_t* vgpu_create_program(vgpu_device_t* device, const vgpu_create_program_params_t* params);
void vgpu_destroy_program(vgpu_device_t* device, vgpu_program_t* program);
/******************************************************************************\
*
* Pipeline handling
*
\******************************************************************************/
vgpu_pipeline_t* vgpu_create_pipeline(vgpu_device_t* device, const vgpu_create_pipeline_params_t* params);
void vgpu_destroy_pipeline(vgpu_device_t* device, vgpu_pipeline_t* pipeline);
/******************************************************************************\
*
* Render pass handling
*
\******************************************************************************/
vgpu_render_pass_t* vgpu_create_render_pass(vgpu_device_t* device, const vgpu_create_render_pass_params_t* params);
void vgpu_destroy_render_pass(vgpu_device_t* device, vgpu_render_pass_t* render_pass);
/******************************************************************************\
*
* Command list handling
*
\******************************************************************************/
vgpu_command_list_t* vgpu_create_command_list(vgpu_device_t* device, const vgpu_create_command_list_params_t* params);
void vgpu_destroy_command_list(vgpu_device_t* device, vgpu_command_list_t* command_list);
bool vgpu_is_command_list_type_supported(vgpu_device_t* device, vgpu_command_list_type_t command_list_type);
/******************************************************************************\
*
* Command list building
*
\******************************************************************************/
void vgpu_begin_command_list(vgpu_thread_context_t* thread_context, vgpu_command_list_t* command_list, vgpu_render_pass_t* render_pass);
void vgpu_end_command_list(vgpu_command_list_t* command_list);
void* vgpu_lock_buffer(vgpu_command_list_t* command_list, vgpu_lock_buffer_params_t* params);
void vgpu_unlock_buffer(vgpu_command_list_t* command_list, const vgpu_lock_buffer_params_t* params);
void vgpu_set_buffer_data(vgpu_command_list_t* command_list, vgpu_buffer_t* buffer, size_t offset, const void* data, size_t num_bytes);
void vgpu_set_resource_table(vgpu_command_list_t* command_list, uint32_t slot, vgpu_resource_table_t* resource_table);
void vgpu_set_buffer(vgpu_command_list_t* command_list, uint32_t slot, vgpu_buffer_t* buffer, size_t offset, size_t num_bytes);
void vgpu_set_pipeline(vgpu_command_list_t* command_list, vgpu_pipeline_t* pipeline);
void vgpu_set_index_buffer(vgpu_command_list_t* command_list, vgpu_data_type_t index_type, vgpu_buffer_t* index_buffer);
void vgpu_draw(vgpu_command_list_t* command_list, uint32_t first_instance, uint32_t num_instances, uint32_t first_vertex, uint32_t num_vertices);
void vgpu_draw_indexed(vgpu_command_list_t* command_list, uint32_t first_instance, uint32_t num_instances, uint32_t first_index, uint32_t num_indices, uint32_t first_vertex);
void vgpu_draw_indirect(vgpu_command_list_t* command_list, vgpu_buffer_t* buffer, uint32_t count);
void vgpu_draw_indexed_indirect(vgpu_command_list_t* command_list, vgpu_buffer_t* buffer, uint32_t count);
void vgpu_blit(vgpu_command_list_t* command_list, vgpu_texture_t* texture);
void vgpu_clear_render_pass(vgpu_command_list_t* command_list, vgpu_render_pass_t* render_pass);
void vgpu_set_render_pass(vgpu_command_list_t* command_list, vgpu_render_pass_t* render_pass);
void vgpu_transition_buffer(vgpu_command_list_t* command_list, vgpu_buffer_t* buffer, vgpu_resource_state_t state_before, vgpu_resource_state_t state_after);
void vgpu_transition_texture(vgpu_command_list_t* command_list, vgpu_texture_t* texture, vgpu_resource_state_t state_before, vgpu_resource_state_t state_after);
#ifdef __cplusplus
}
#endif
#endif // VGPU_H
| 27.360317 | 284 | 0.711957 | [
"render"
] |
ea8e34b56e42610eaedf646131f6e411469714fa | 1,870 | h | C | src/storage/mutate/AddVerticesProcessor.h | WPH95/nebula | 408d731d564837c9fcd156a26048445bf89dae93 | [
"Apache-2.0"
] | 2 | 2021-12-22T11:21:18.000Z | 2022-01-20T11:49:56.000Z | src/storage/mutate/AddVerticesProcessor.h | WPH95/nebula | 408d731d564837c9fcd156a26048445bf89dae93 | [
"Apache-2.0"
] | null | null | null | src/storage/mutate/AddVerticesProcessor.h | WPH95/nebula | 408d731d564837c9fcd156a26048445bf89dae93 | [
"Apache-2.0"
] | null | null | null | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License.
*/
#ifndef STORAGE_MUTATE_ADDVERTICESPROCESSOR_H_
#define STORAGE_MUTATE_ADDVERTICESPROCESSOR_H_
#include "common/base/Base.h"
#include "common/base/ConcurrentLRUCache.h"
#include "kvstore/LogEncoder.h"
#include "storage/BaseProcessor.h"
#include "storage/CommonUtils.h"
namespace nebula {
namespace storage {
extern ProcessorCounters kAddVerticesCounters;
class AddVerticesProcessor : public BaseProcessor<cpp2::ExecResponse> {
public:
static AddVerticesProcessor* instance(StorageEnv* env,
const ProcessorCounters* counters = &kAddVerticesCounters) {
return new AddVerticesProcessor(env, counters);
}
void process(const cpp2::AddVerticesRequest& req);
void doProcess(const cpp2::AddVerticesRequest& req);
void doProcessWithIndex(const cpp2::AddVerticesRequest& req);
private:
AddVerticesProcessor(StorageEnv* env, const ProcessorCounters* counters)
: BaseProcessor<cpp2::ExecResponse>(env, counters) {}
ErrorOr<nebula::cpp2::ErrorCode, std::string> findOldValue(PartitionID partId,
const VertexID& vId,
TagID tagId);
std::vector<std::string> indexKeys(PartitionID partId,
const VertexID& vId,
RowReader* reader,
std::shared_ptr<nebula::meta::cpp2::IndexItem> index);
private:
GraphSpaceID spaceId_;
std::vector<std::shared_ptr<nebula::meta::cpp2::IndexItem>> indexes_;
bool ifNotExists_{false};
bool ignoreExistedIndex_{false};
};
} // namespace storage
} // namespace nebula
#endif // STORAGE_MUTATE_ADDVERTICESPROCESSOR_H_
| 33.392857 | 100 | 0.663636 | [
"vector"
] |
ea948c440c5ef25a0975ec5a11f80188281883ad | 464 | h | C | KidsTC/KidsTC/Business/Main/Product/FlashBuy/Settlement/FlashAdvanceSettlement/V/FlashAdvanceSettlementToolBar.h | zhpigh/KidsTC_Objective-C | ef095ae4d7fd7c3a69565ba5f0eb44f9e93e40b6 | [
"MIT"
] | null | null | null | KidsTC/KidsTC/Business/Main/Product/FlashBuy/Settlement/FlashAdvanceSettlement/V/FlashAdvanceSettlementToolBar.h | zhpigh/KidsTC_Objective-C | ef095ae4d7fd7c3a69565ba5f0eb44f9e93e40b6 | [
"MIT"
] | null | null | null | KidsTC/KidsTC/Business/Main/Product/FlashBuy/Settlement/FlashAdvanceSettlement/V/FlashAdvanceSettlementToolBar.h | zhpigh/KidsTC_Objective-C | ef095ae4d7fd7c3a69565ba5f0eb44f9e93e40b6 | [
"MIT"
] | 1 | 2018-09-18T07:26:36.000Z | 2018-09-18T07:26:36.000Z | //
// FlashAdvanceSettlementToolBar.h
// KidsTC
//
// Created by zhanping on 8/16/16.
// Copyright © 2016 詹平. All rights reserved.
//
#import <UIKit/UIKit.h>
#import "FlashSettlementModel.h"
@interface FlashAdvanceSettlementToolBar : UIView
@property (weak, nonatomic) IBOutlet UILabel *label;
@property (weak, nonatomic) IBOutlet UIButton *btn;
@property (nonatomic, weak) FlashSettlementModel *model;
@property (nonatomic, copy) void (^actionBlock)();
@end
| 27.294118 | 56 | 0.74569 | [
"model"
] |
ea966ad9112247b101780bed0f10a6a22d30fa0c | 8,162 | c | C | benchmarks/Olden_Custom1/health/ht/health.c | mcai/Archimulator | 039b8ba31d76a769e2bac84ed9c83e706a307e64 | [
"MIT"
] | 3 | 2016-08-04T05:01:36.000Z | 2020-01-17T19:48:37.000Z | benchmarks/Olden_Custom1/health/ht/health.c | mcai/Archimulator | 039b8ba31d76a769e2bac84ed9c83e706a307e64 | [
"MIT"
] | null | null | null | benchmarks/Olden_Custom1/health/ht/health.c | mcai/Archimulator | 039b8ba31d76a769e2bac84ed9c83e706a307e64 | [
"MIT"
] | 3 | 2015-08-05T13:18:11.000Z | 2022-02-01T23:03:27.000Z | /* For copyright information, see olden_v1.0/COPYRIGHT */
/*******************************************************************
* Health.c : Model of the Columbian Health Care System *
*******************************************************************/
#include "push.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "health.h"
#include <assert.h>
struct timeval t_start, t_end;
int max_level;
long max_time, seed;
struct list *volatile __attribute__((aligned(64))) g_list;
struct list *volatile __attribute__((aligned(64))) g_valiage;
extern volatile int __attribute__((aligned(64))) push_flag;
int count=0;
struct Village *alloc_tree(int level, int label, struct Village *back) {
if (level == 0)
return NULL;
else {
struct Village *new;
int i;
struct Village *fval[4];
new = (struct Village *)malloc(sizeof(struct Village));
for (i = 3; i >= 0; i--)
fval[i] = alloc_tree(level - 1, label*4 + i + 1, new);
new->back = back;
new->label = label;
new->seed = label * (IQ + seed);
new->hosp.personnel = (int)pow(2, level - 1);
new->hosp.free_personnel = new->hosp.personnel;
new->hosp.num_waiting_patients = 0;
new->hosp.assess.forward = NULL;
new->hosp.assess.back = NULL;
new->hosp.assess.patient = NULL; /* ADDED FOR LLVM [OLDEN BUGS!] */
new->hosp.waiting.forward = NULL;
new->hosp.waiting.back = NULL;
new->hosp.waiting.patient = NULL; /* ADDED FOR LLVM [OLDEN BUGS!] */
new->hosp.inside.forward = NULL;
new->hosp.inside.back = NULL;
new->hosp.inside.patient = NULL; /* ADDED FOR LLVM [OLDEN BUGS!] */
new->hosp.up.forward = NULL; /* ADDED FOR LLVM [OLDEN BUGS!] */
new->hosp.up.back = NULL; /* ADDED FOR LLVM [OLDEN BUGS!] */
new->hosp.up.patient = NULL; /* ADDED FOR LLVM [OLDEN BUGS!] */
new->returned.back = NULL;
new->returned.forward = NULL;
for (i = 0; i < 4; i++)
new->forward[i] = fval[i];
return new;
}
}
struct Results get_results(struct Village *village) {
int i;
struct List *list;
struct Patient *p;
struct Results fval[4];
struct Results r1;
r1.total_hosps = 0.0;
r1.total_patients = 0.0;
r1.total_time = 0.0;
if (village == NULL) return r1;
for (i = 3; i > 0; i--) {
struct Village *V = village->forward[i];
fval[i] = get_results(V);
}
fval[0] = get_results(village->forward[0]);
for (i = 3; i >= 0; i--) {
r1.total_hosps += fval[i].total_hosps;
r1.total_patients += fval[i].total_patients;
r1.total_time += fval[i].total_time;
}
list = village->returned.forward;
while (list != NULL) {
p = list->patient;
r1.total_hosps += (float)(p->hosps_visited);
r1.total_time += (float)(p->time);
r1.total_patients += 1.0;
list = list->forward;
}
return r1;
}
void check_patients_inside(struct Village *village, struct List *list)
{
struct List *l;
struct Patient *p;
int t;
while (list != NULL) {
p = list->patient;
t = p->time_left;
p->time_left = t - 1;
if (p->time_left == 0) {
t = village->hosp.free_personnel;
village->hosp.free_personnel = t+1;
l = &(village->hosp.inside);
removeList(l, p);
l = &(village->returned);
addList(l, p); }
list = list->forward; /* :) adt_pf detected */
}
}
struct List *check_patients_assess(struct Village *village, struct List *list) {
float rand;
struct Patient *p;
struct List *up = NULL;
long s;
int label, t;
while (list != NULL) {
p = list->patient;
t = p->time_left;
p->time_left = t - 1;
label = village->label;
if (p->time_left == 0) {
s = village->seed;
rand = my_rand(s);
village->seed = (long)(rand * IM);
label = village->label;
if (rand > 0.1 || label == 0) {
removeList(&village->hosp.assess, p);
addList(&village->hosp.inside, p);
p->time_left = 10;
t = p->time;
p->time = t + 10;
} else {
t = village->hosp.free_personnel;
village->hosp.free_personnel = t+1;
removeList(&village->hosp.assess, p);
up = &village->hosp.up;
addList(up, p);
}
}
list = list->forward; /* :) adt_pf detected */
}
return up;
}
void check_patients_waiting(struct Village *village, struct List *list) {
int i, t;
struct Patient *p;
count++;
g_list=list;
g_valiage=village;
if(count>500000)
push_flag=1;
while (list != NULL) {
i = village->hosp.free_personnel;
p = list->patient;
g_list=list;
//g_valiage=village;
if (i > 0) {
t = village->hosp.free_personnel;
village->hosp.free_personnel = t-1;
p->time_left = 3;
t = p->time;
p->time = t + 3;
removeList(&village->hosp.waiting, p);
addList(&village->hosp.assess, p); }
else {
t = p->time;
p->time = t + 1; }
list = list->forward; } /* :) adt_pf detected */
if(count>500000) push_flag=0;
}
void put_in_hosp(struct Hosp *hosp, struct Patient *patient) {
int t = patient->hosps_visited;
patient->hosps_visited = t + 1;
if (hosp->free_personnel > 0) {
t = hosp->free_personnel;
hosp->free_personnel = t-1;
addList(&hosp->assess, patient);
patient->time_left = 3;
t = patient->time;
patient->time = t + 3;
} else {
addList(&hosp->waiting, patient);
}
}
struct Patient *generate_patient(struct Village *village)
{
long s,newseed;
struct Patient *patient;
float rand;
int label;
s = village->seed;
rand = my_rand(s);
village->seed = (long)(rand * IM);
newseed = village->seed;
label = village->label;
if (rand > 0.666) {
patient = (struct Patient *)malloc(sizeof(struct Patient));
patient->hosps_visited = 0;
patient->time = 0;
patient->time_left = 0;
patient->home_village = village;
return patient;
}
return NULL;
}
int main(int argc, char *argv[])
{
struct Results results;
struct Village *top = 0;
int i;
float total_time, total_patients, total_hosps;
pthread_t push_thread_id = thread_spawn((void*)&push_thread_func);
#if defined(SIMICS)
MAGIC(9006);
#endif
#if defined(SIMICS)
MAGIC(9007);
#endif
//~ syscall(500, 0); // enter detailed simulation
#ifdef MIPS_1
asm volatile ("addiu $0,$0,3720");
#endif
dealwithargs(argc, argv);
top = alloc_tree(max_level, 0, top);
for (i = 0; i < max_time; i++) {
sim(top);
} /* :) adt_pf detected */
results = get_results(top); /* :) adt_pf detected */
total_patients = results.total_patients;
total_time = results.total_time;
total_hosps = results.total_hosps;
//~ syscall(500, 1); // exit simulation
//~ thread_destroy(push_thread_id);
#if defined(SIMICS)
MAGIC(9008);
#endif
return 0;
}
struct List *sim(struct Village *village)
{
int i;
struct Patient *patient;
struct List *l, *up;
struct Hosp *h;
struct List *val[4];
int label;
if (village == NULL) return NULL;
label = village->label;
for (i = 3; i > 0; i--) {
struct Village *V = village->forward[i];
struct List *L = sim(V);
val[i] = L;
}
val[0] = sim(village->forward[0]);
h = &village->hosp;
for (i = 3; i >= 0; i--) {
struct List *valI = l = val[i];
if (l != NULL) {
l = l->forward;
while (l != NULL) {
put_in_hosp(h, l->patient);
removeList(valI, l->patient);
l = l->forward;
}
}
}
check_patients_inside(village, village->hosp.inside.forward);
up = check_patients_assess(village, village->hosp.assess.forward);
check_patients_waiting(village, village->hosp.waiting.forward);
/*** Generate new patients ***/
if ((patient = generate_patient(village)) != NULL) {
label = village->label;
put_in_hosp(&village->hosp, patient);
}
return up;
}
| 25.426791 | 80 | 0.569468 | [
"model"
] |
ea97f015d2deb7e487bd3aa5efab04cc6c1c4737 | 38,207 | c | C | openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/src/debug/krb5/1.17-r0/krb5-1.17/src/lib/kdb/kdb_convert.c | sotaoverride/backup | ca53a10b72295387ef4948a9289cb78ab70bc449 | [
"Apache-2.0"
] | null | null | null | openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/src/debug/krb5/1.17-r0/krb5-1.17/src/lib/kdb/kdb_convert.c | sotaoverride/backup | ca53a10b72295387ef4948a9289cb78ab70bc449 | [
"Apache-2.0"
] | null | null | null | openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/src/debug/krb5/1.17-r0/krb5-1.17/src/lib/kdb/kdb_convert.c | sotaoverride/backup | ca53a10b72295387ef4948a9289cb78ab70bc449 | [
"Apache-2.0"
] | null | null | null | /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/* #pragma ident "@(#)kdb_convert.c 1.3 05/01/05 SMI" */
/*
* This file contains api's for conversion of the kdb_incr_update_t
* struct(s) into krb5_db_entry struct(s) and vice-versa.
*/
#include <com_err.h>
#include <iprop_hdr.h>
#include <k5-int.h>
#include <kdb.h>
#include <kdb_log.h>
#include <locale.h>
#include <sys/types.h>
#include "iprop.h"
/* BEGIN CSTYLED */
#define ULOG_ENTRY_TYPE(upd, i) \
((kdb_incr_update_t*)upd)->kdb_update.kdbe_t_val[i]
#define ULOG_ENTRY(upd, i) \
((kdb_incr_update_t*)upd)->kdb_update.kdbe_t_val[i].kdbe_val_t_u
#define ULOG_ENTRY_KEYVAL(upd, i, j) \
((kdb_incr_update_t*)upd) \
->kdb_update.kdbe_t_val[i] \
.kdbe_val_t_u.av_keydata.av_keydata_val[j]
#define ULOG_ENTRY_PRINC(upd, i, j) \
((kdb_incr_update_t*)upd) \
->kdb_update.kdbe_t_val[i] \
.kdbe_val_t_u.av_princ.k_components.k_components_val[j]
#define ULOG_ENTRY_MOD_PRINC(upd, i, j) \
((kdb_incr_update_t*)upd) \
->kdb_update.kdbe_t_val[i] \
.kdbe_val_t_u.av_mod_princ.k_components.k_components_val[j]
/* END CSTYLED */
typedef enum
{
REG_PRINC = 0,
MOD_PRINC = 1
} princ_type;
/*
* This routine tracks the krb5_db_entry fields that have been modified
* (by comparing it to the db_entry currently present in principal.db)
* in the update.
*/
static void find_changed_attrs(krb5_db_entry* current, krb5_db_entry* new,
krb5_boolean exclude_nra,
kdbe_attr_type_t* attrs, int* nattrs)
{
int i = 0, j = 0;
krb5_tl_data *first, *second;
if (current->attributes != new->attributes)
attrs[i++] = AT_ATTRFLAGS;
if (current->max_life != new->max_life)
attrs[i++] = AT_MAX_LIFE;
if (current->max_renewable_life != new->max_renewable_life)
attrs[i++] = AT_MAX_RENEW_LIFE;
if (current->expiration != new->expiration)
attrs[i++] = AT_EXP;
if (current->pw_expiration != new->pw_expiration)
attrs[i++] = AT_PW_EXP;
if (!exclude_nra)
{
if (current->last_success != new->last_success)
attrs[i++] = AT_LAST_SUCCESS;
if (current->last_failed != new->last_failed)
attrs[i++] = AT_LAST_FAILED;
if (current->fail_auth_count != new->fail_auth_count)
attrs[i++] = AT_FAIL_AUTH_COUNT;
}
if ((current->princ->type == new->princ->type) &&
(current->princ->length == new->princ->length))
{
if ((current->princ->realm.length == new->princ->realm.length) &&
strncmp(current->princ->realm.data, new->princ->realm.data,
current->princ->realm.length))
{
for (j = 0; j < current->princ->length; j++)
{
if ((current->princ->data[j].data != NULL) &&
(strncmp(current->princ->data[j].data,
new->princ->data[j].data,
current->princ->data[j].length)))
{
attrs[i++] = AT_PRINC;
break;
}
}
}
else
{
attrs[i++] = AT_PRINC;
}
}
else
{
attrs[i++] = AT_PRINC;
}
if (current->n_key_data == new->n_key_data)
{
/* Assuming key ordering is the same in new & current */
for (j = 0; j < new->n_key_data; j++)
{
if (current->key_data[j].key_data_kvno !=
new->key_data[j].key_data_kvno)
{
attrs[i++] = AT_KEYDATA;
break;
}
}
}
else
{
attrs[i++] = AT_KEYDATA;
}
if (current->n_tl_data == new->n_tl_data)
{
/* Assuming we preserve the TL_DATA ordering between updates */
for (first = current->tl_data, second = new->tl_data; first;
first = first->tl_data_next, second = second->tl_data_next)
{
if ((first->tl_data_length == second->tl_data_length) &&
(first->tl_data_type == second->tl_data_type))
{
if ((memcmp((char*)first->tl_data_contents,
(char*)second->tl_data_contents,
first->tl_data_length)) != 0)
{
attrs[i++] = AT_TL_DATA;
break;
}
}
else
{
attrs[i++] = AT_TL_DATA;
break;
}
}
}
else
{
attrs[i++] = AT_TL_DATA;
}
if (current->len != new->len)
attrs[i++] = AT_LEN;
/*
* Store the no. of (possibly :)) changed attributes
*/
*nattrs = i;
}
/* Initialize *u with a copy of d. Return 0 on success, -1 on failure. */
static int data_to_utf8str(utf8str_t* u, krb5_data d)
{
u->utf8str_t_len = d.length;
if (d.data)
{
u->utf8str_t_val = malloc(d.length);
if (u->utf8str_t_val == NULL)
return -1;
memcpy(u->utf8str_t_val, d.data, d.length);
}
else
u->utf8str_t_val = NULL;
return 0;
}
/*
* Converts the krb5_principal struct from db2 to ulog format.
*/
static krb5_error_code conv_princ_2ulog(krb5_principal princ,
kdb_incr_update_t* upd, int cnt,
princ_type tp)
{
int i = 0;
kdbe_princ_t* p;
kdbe_data_t* components;
if ((upd == NULL) || !princ)
return (KRB5KRB_ERR_GENERIC);
switch (tp)
{
case REG_PRINC:
case MOD_PRINC:
p = &ULOG_ENTRY(upd, cnt).av_princ; /* or av_mod_princ */
p->k_nametype = (int32_t)princ->type;
if (data_to_utf8str(&p->k_realm, princ->realm) < 0)
{
return ENOMEM;
}
p->k_components.k_components_len = princ->length;
p->k_components.k_components_val = components =
malloc(princ->length * sizeof(kdbe_data_t));
if (p->k_components.k_components_val == NULL)
{
free(p->k_realm.utf8str_t_val);
p->k_realm.utf8str_t_val = NULL;
return (ENOMEM);
}
memset(components, 0, princ->length * sizeof(kdbe_data_t));
for (i = 0; i < princ->length; i++)
components[i].k_data.utf8str_t_val = NULL;
for (i = 0; i < princ->length; i++)
{
components[i].k_magic = princ->data[i].magic;
if (data_to_utf8str(&components[i].k_data, princ->data[i]) < 0)
{
int j;
for (j = 0; j < i; j++)
{
free(components[j].k_data.utf8str_t_val);
components[j].k_data.utf8str_t_val = NULL;
}
free(components);
p->k_components.k_components_val = NULL;
free(p->k_realm.utf8str_t_val);
p->k_realm.utf8str_t_val = NULL;
return ENOMEM;
}
}
break;
default:
break;
}
return (0);
}
/*
* Copies a UTF-8 string from ulog to a krb5_data object, which may
* already have allocated storage associated with it.
*
* Maybe a return value should indicate success/failure?
*/
static void set_from_utf8str(krb5_data* d, utf8str_t u)
{
if (u.utf8str_t_len > INT_MAX - 1)
{
d->data = NULL;
return;
}
d->length = u.utf8str_t_len;
d->data = malloc(d->length + 1);
if (d->data == NULL)
return;
if (d->length) /* Pointer may be null if length = 0. */
strncpy(d->data, u.utf8str_t_val, d->length);
d->data[d->length] = 0;
}
/*
* Converts the krb5_principal struct from ulog to db2 format.
*/
static krb5_principal conv_princ_2db(krb5_context context,
kdbe_princ_t* kdbe_princ)
{
unsigned int i;
int j;
krb5_principal princ;
kdbe_data_t* components;
princ = calloc(1, sizeof(krb5_principal_data));
if (princ == NULL)
{
return NULL;
}
princ->length = 0;
princ->data = NULL;
components = kdbe_princ->k_components.k_components_val;
princ->type = (krb5_int32)kdbe_princ->k_nametype;
princ->realm.data = NULL;
set_from_utf8str(&princ->realm, kdbe_princ->k_realm);
if (princ->realm.data == NULL)
goto error;
princ->data =
calloc(kdbe_princ->k_components.k_components_len, sizeof(krb5_data));
if (princ->data == NULL)
goto error;
for (i = 0; i < kdbe_princ->k_components.k_components_len; i++)
princ->data[i].data = NULL;
princ->length = (krb5_int32)kdbe_princ->k_components.k_components_len;
for (j = 0; j < princ->length; j++)
{
princ->data[j].magic = components[j].k_magic;
set_from_utf8str(&princ->data[j], components[j].k_data);
if (princ->data[j].data == NULL)
goto error;
}
return princ;
error:
krb5_free_principal(context, princ);
return NULL;
}
/*
* This routine converts a krb5 DB record into update log (ulog) entry format.
* Space for the update log entry should be allocated prior to invocation of
* this routine.
*/
krb5_error_code ulog_conv_2logentry(krb5_context context, krb5_db_entry* entry,
kdb_incr_update_t* update)
{
int i, j, cnt, final, nattrs, tmpint;
krb5_principal tmpprinc;
krb5_tl_data* newtl;
krb5_db_entry* curr;
krb5_error_code ret;
kdbe_attr_type_t* attr_types;
int kadm_data_yes;
/* always exclude non-replicated attributes, for now */
krb5_boolean exclude_nra = TRUE;
nattrs = tmpint = 0;
final = -1;
kadm_data_yes = 0;
attr_types = NULL;
/*
* XXX we rely on the good behaviour of the database not to
* exceed this limit.
*/
if ((update->kdb_update.kdbe_t_val = (kdbe_val_t*)malloc(MAXENTRY_SIZE)) ==
NULL)
{
return (ENOMEM);
}
/*
* Find out which attrs have been modified
*/
if ((attr_types = (kdbe_attr_type_t*)malloc(sizeof(kdbe_attr_type_t) *
MAXATTRS_SIZE)) == NULL)
{
return (ENOMEM);
}
ret = krb5_db_get_principal(context, entry->princ, 0, &curr);
if (ret && ret != KRB5_KDB_NOENTRY)
{
free(attr_types);
return (ret);
}
if (ret == KRB5_KDB_NOENTRY)
{
/*
* This is a new entry to the database, hence will
* include all the attribute-value pairs
*
* We leave out the TL_DATA types which we model as
* attrs in kdbe_attr_type_t, since listing AT_TL_DATA
* encompasses these other types-turned-attributes
*
* So, we do *NOT* consider AT_MOD_PRINC, AT_MOD_TIME,
* AT_MOD_WHERE, AT_PW_LAST_CHANGE, AT_PW_POLICY,
* AT_PW_POLICY_SWITCH, AT_PW_HIST_KVNO and AT_PW_HIST,
* totalling 8 attrs.
*/
while (nattrs < MAXATTRS_SIZE - 8)
{
attr_types[nattrs] = nattrs;
nattrs++;
}
}
else
{
find_changed_attrs(curr, entry, exclude_nra, attr_types, &nattrs);
krb5_db_free_principal(context, curr);
}
for (i = 0; i < nattrs; i++)
{
switch (attr_types[i])
{
case AT_ATTRFLAGS:
if (entry->attributes >= 0)
{
ULOG_ENTRY_TYPE(update, ++final).av_type = AT_ATTRFLAGS;
ULOG_ENTRY(update, final).av_attrflags =
(uint32_t)entry->attributes;
}
break;
case AT_MAX_LIFE:
if (entry->max_life >= 0)
{
ULOG_ENTRY_TYPE(update, ++final).av_type = AT_MAX_LIFE;
ULOG_ENTRY(update, final).av_max_life =
(uint32_t)entry->max_life;
}
break;
case AT_MAX_RENEW_LIFE:
if (entry->max_renewable_life >= 0)
{
ULOG_ENTRY_TYPE(update, ++final).av_type =
AT_MAX_RENEW_LIFE;
ULOG_ENTRY(update, final).av_max_renew_life =
(uint32_t)entry->max_renewable_life;
}
break;
case AT_EXP:
if (entry->expiration >= 0)
{
ULOG_ENTRY_TYPE(update, ++final).av_type = AT_EXP;
ULOG_ENTRY(update, final).av_exp =
(uint32_t)entry->expiration;
}
break;
case AT_PW_EXP:
if (entry->pw_expiration >= 0)
{
ULOG_ENTRY_TYPE(update, ++final).av_type = AT_PW_EXP;
ULOG_ENTRY(update, final).av_pw_exp =
(uint32_t)entry->pw_expiration;
}
break;
case AT_LAST_SUCCESS:
if (!exclude_nra && entry->last_success >= 0)
{
ULOG_ENTRY_TYPE(update, ++final).av_type = AT_LAST_SUCCESS;
ULOG_ENTRY(update, final).av_last_success =
(uint32_t)entry->last_success;
}
break;
case AT_LAST_FAILED:
if (!exclude_nra && entry->last_failed >= 0)
{
ULOG_ENTRY_TYPE(update, ++final).av_type = AT_LAST_FAILED;
ULOG_ENTRY(update, final).av_last_failed =
(uint32_t)entry->last_failed;
}
break;
case AT_FAIL_AUTH_COUNT:
if (!exclude_nra)
{
ULOG_ENTRY_TYPE(update, ++final).av_type =
AT_FAIL_AUTH_COUNT;
ULOG_ENTRY(update, final).av_fail_auth_count =
(uint32_t)entry->fail_auth_count;
}
break;
case AT_PRINC:
if (entry->princ->length > 0)
{
ULOG_ENTRY_TYPE(update, ++final).av_type = AT_PRINC;
if ((ret = conv_princ_2ulog(entry->princ, update, final,
REG_PRINC)))
{
free(attr_types);
return (ret);
}
}
break;
case AT_KEYDATA:
/* BEGIN CSTYLED */
if (entry->n_key_data >= 0)
{
ULOG_ENTRY_TYPE(update, ++final).av_type = AT_KEYDATA;
ULOG_ENTRY(update, final).av_keydata.av_keydata_len =
entry->n_key_data;
ULOG_ENTRY(update, final).av_keydata.av_keydata_val =
malloc(entry->n_key_data * sizeof(kdbe_key_t));
if (ULOG_ENTRY(update, final).av_keydata.av_keydata_val ==
NULL)
{
free(attr_types);
return (ENOMEM);
}
for (j = 0; j < entry->n_key_data; j++)
{
ULOG_ENTRY_KEYVAL(update, final, j).k_ver =
entry->key_data[j].key_data_ver;
ULOG_ENTRY_KEYVAL(update, final, j).k_kvno =
entry->key_data[j].key_data_kvno;
ULOG_ENTRY_KEYVAL(update, final, j)
.k_enctype.k_enctype_len =
entry->key_data[j].key_data_ver;
ULOG_ENTRY_KEYVAL(update, final, j)
.k_contents.k_contents_len =
entry->key_data[j].key_data_ver;
ULOG_ENTRY_KEYVAL(update, final, j)
.k_enctype.k_enctype_val = malloc(
entry->key_data[j].key_data_ver * sizeof(int32_t));
if (ULOG_ENTRY_KEYVAL(update, final, j)
.k_enctype.k_enctype_val == NULL)
{
free(attr_types);
return (ENOMEM);
}
ULOG_ENTRY_KEYVAL(update, final, j)
.k_contents.k_contents_val =
malloc(entry->key_data[j].key_data_ver *
sizeof(utf8str_t));
if (ULOG_ENTRY_KEYVAL(update, final, j)
.k_contents.k_contents_val == NULL)
{
free(attr_types);
return (ENOMEM);
}
for (cnt = 0; cnt < entry->key_data[j].key_data_ver;
cnt++)
{
ULOG_ENTRY_KEYVAL(update, final, j)
.k_enctype.k_enctype_val[cnt] =
entry->key_data[j].key_data_type[cnt];
ULOG_ENTRY_KEYVAL(update, final, j)
.k_contents.k_contents_val[cnt]
.utf8str_t_len =
entry->key_data[j].key_data_length[cnt];
ULOG_ENTRY_KEYVAL(update, final, j)
.k_contents.k_contents_val[cnt]
.utf8str_t_val =
malloc(entry->key_data[j].key_data_length[cnt] *
sizeof(char));
if (ULOG_ENTRY_KEYVAL(update, final, j)
.k_contents.k_contents_val[cnt]
.utf8str_t_val == NULL)
{
free(attr_types);
return (ENOMEM);
}
(void)memcpy(
ULOG_ENTRY_KEYVAL(update, final, j)
.k_contents.k_contents_val[cnt]
.utf8str_t_val,
entry->key_data[j].key_data_contents[cnt],
entry->key_data[j].key_data_length[cnt]);
}
}
}
break;
case AT_TL_DATA:
ret = krb5_dbe_lookup_last_pwd_change(context, entry, &tmpint);
if (ret == 0)
{
ULOG_ENTRY_TYPE(update, ++final).av_type =
AT_PW_LAST_CHANGE;
ULOG_ENTRY(update, final).av_pw_last_change = tmpint;
}
tmpint = 0;
if (!(ret = krb5_dbe_lookup_mod_princ_data(context, entry,
&tmpint, &tmpprinc)))
{
ULOG_ENTRY_TYPE(update, ++final).av_type = AT_MOD_PRINC;
ret = conv_princ_2ulog(tmpprinc, update, final, MOD_PRINC);
krb5_free_principal(context, tmpprinc);
if (ret)
{
free(attr_types);
return (ret);
}
ULOG_ENTRY_TYPE(update, ++final).av_type = AT_MOD_TIME;
ULOG_ENTRY(update, final).av_mod_time = tmpint;
}
newtl = entry->tl_data;
while (newtl)
{
switch (newtl->tl_data_type)
{
case KRB5_TL_LAST_PWD_CHANGE:
case KRB5_TL_MOD_PRINC:
break;
case KRB5_TL_KADM_DATA:
default:
if (kadm_data_yes == 0)
{
ULOG_ENTRY_TYPE(update, ++final).av_type =
AT_TL_DATA;
ULOG_ENTRY(update, final)
.av_tldata.av_tldata_len = 0;
ULOG_ENTRY(update, final)
.av_tldata.av_tldata_val = malloc(
entry->n_tl_data * sizeof(kdbe_tl_t));
if (ULOG_ENTRY(update, final)
.av_tldata.av_tldata_val == NULL)
{
free(attr_types);
return (ENOMEM);
}
kadm_data_yes = 1;
}
tmpint = ULOG_ENTRY(update, final)
.av_tldata.av_tldata_len;
ULOG_ENTRY(update, final).av_tldata.av_tldata_len++;
ULOG_ENTRY(update, final)
.av_tldata.av_tldata_val[tmpint]
.tl_type = newtl->tl_data_type;
ULOG_ENTRY(update, final)
.av_tldata.av_tldata_val[tmpint]
.tl_data.tl_data_len = newtl->tl_data_length;
ULOG_ENTRY(update, final)
.av_tldata.av_tldata_val[tmpint]
.tl_data.tl_data_val =
malloc(newtl->tl_data_length * sizeof(char));
if (ULOG_ENTRY(update, final)
.av_tldata.av_tldata_val[tmpint]
.tl_data.tl_data_val == NULL)
{
free(attr_types);
return (ENOMEM);
}
(void)memcpy(ULOG_ENTRY(update, final)
.av_tldata.av_tldata_val[tmpint]
.tl_data.tl_data_val,
newtl->tl_data_contents,
newtl->tl_data_length);
break;
}
newtl = newtl->tl_data_next;
}
break;
/* END CSTYLED */
case AT_LEN:
if (entry->len >= 0)
{
ULOG_ENTRY_TYPE(update, ++final).av_type = AT_LEN;
ULOG_ENTRY(update, final).av_len = (int16_t)entry->len;
}
break;
default:
break;
}
}
free(attr_types);
/*
* Update len field in kdb_update
*/
update->kdb_update.kdbe_t_len = ++final;
return (0);
}
/* Convert an update log (ulog) entry into a kerberos record. */
krb5_error_code ulog_conv_2dbentry(krb5_context context, krb5_db_entry** entry,
kdb_incr_update_t* update)
{
krb5_db_entry* ent;
int replica;
krb5_principal mod_princ = NULL;
int i, j, cnt = 0, mod_time = 0, nattrs;
krb5_principal dbprinc;
char* dbprincstr = NULL;
krb5_tl_data newtl;
krb5_error_code ret;
unsigned int prev_n_keys = 0;
krb5_boolean is_add;
void* newptr;
*entry = NULL;
replica = (context->kdblog_context != NULL) &&
(context->kdblog_context->iproprole == IPROP_REPLICA);
/*
* Store the no. of changed attributes in nattrs
*/
nattrs = update->kdb_update.kdbe_t_len;
dbprincstr =
malloc((update->kdb_princ_name.utf8str_t_len + 1) * sizeof(char));
if (dbprincstr == NULL)
return (ENOMEM);
strncpy(dbprincstr, (char*)update->kdb_princ_name.utf8str_t_val,
update->kdb_princ_name.utf8str_t_len);
dbprincstr[update->kdb_princ_name.utf8str_t_len] = 0;
ret = krb5_parse_name(context, dbprincstr, &dbprinc);
free(dbprincstr);
if (ret)
return (ret);
ret = krb5_db_get_principal(context, dbprinc, 0, &ent);
krb5_free_principal(context, dbprinc);
if (ret && ret != KRB5_KDB_NOENTRY)
return (ret);
is_add = (ret == KRB5_KDB_NOENTRY);
/*
* Set ent->n_tl_data = 0 initially, if this is an ADD update
*/
if (is_add)
{
ent = calloc(1, sizeof(*ent));
if (ent == NULL)
return (ENOMEM);
ent->n_tl_data = 0;
}
for (i = 0; i < nattrs; i++)
{
krb5_principal tmpprinc = NULL;
#define u (ULOG_ENTRY(update, i))
switch (ULOG_ENTRY_TYPE(update, i).av_type)
{
case AT_ATTRFLAGS:
ent->attributes = (krb5_flags)u.av_attrflags;
break;
case AT_MAX_LIFE:
ent->max_life = (krb5_deltat)u.av_max_life;
break;
case AT_MAX_RENEW_LIFE:
ent->max_renewable_life = (krb5_deltat)u.av_max_renew_life;
break;
case AT_EXP:
ent->expiration = (krb5_timestamp)u.av_exp;
break;
case AT_PW_EXP:
ent->pw_expiration = (krb5_timestamp)u.av_pw_exp;
break;
case AT_LAST_SUCCESS:
if (!replica)
ent->last_success = (krb5_timestamp)u.av_last_success;
break;
case AT_LAST_FAILED:
if (!replica)
ent->last_failed = (krb5_timestamp)u.av_last_failed;
break;
case AT_FAIL_AUTH_COUNT:
if (!replica)
ent->fail_auth_count = (krb5_kvno)u.av_fail_auth_count;
break;
case AT_PRINC:
tmpprinc = conv_princ_2db(context, &u.av_princ);
if (tmpprinc == NULL)
return ENOMEM;
krb5_free_principal(context, ent->princ);
ent->princ = tmpprinc;
break;
case AT_KEYDATA:
if (!is_add)
prev_n_keys = ent->n_key_data;
else
prev_n_keys = 0;
ent->n_key_data = (krb5_int16)u.av_keydata.av_keydata_len;
if (is_add)
ent->key_data = NULL;
/* Allocate one extra key data to avoid allocating zero bytes.
*/
newptr = realloc(ent->key_data,
(ent->n_key_data + 1) * sizeof(krb5_key_data));
if (newptr == NULL)
return ENOMEM;
ent->key_data = newptr;
/* BEGIN CSTYLED */
for (j = prev_n_keys; j < ent->n_key_data; j++)
{
for (cnt = 0; cnt < 2; cnt++)
{
ent->key_data[j].key_data_contents[cnt] = NULL;
}
}
for (j = 0; j < ent->n_key_data; j++)
{
krb5_key_data* kp = &ent->key_data[j];
kdbe_key_t* kv = &ULOG_ENTRY_KEYVAL(update, i, j);
kp->key_data_ver = (krb5_int16)kv->k_ver;
kp->key_data_kvno = (krb5_ui_2)kv->k_kvno;
if (kp->key_data_ver > 2)
{
return EINVAL; /* XXX ? */
}
for (cnt = 0; cnt < kp->key_data_ver; cnt++)
{
kp->key_data_type[cnt] =
(krb5_int16)kv->k_enctype.k_enctype_val[cnt];
kp->key_data_length[cnt] =
(krb5_int16)kv->k_contents.k_contents_val[cnt]
.utf8str_t_len;
newptr = realloc(kp->key_data_contents[cnt],
kp->key_data_length[cnt]);
if (newptr == NULL)
return ENOMEM;
kp->key_data_contents[cnt] = newptr;
(void)memset(kp->key_data_contents[cnt], 0,
kp->key_data_length[cnt]);
(void)memcpy(
kp->key_data_contents[cnt],
kv->k_contents.k_contents_val[cnt].utf8str_t_val,
kp->key_data_length[cnt]);
}
}
break;
case AT_TL_DATA:
{
for (j = 0; j < (int)u.av_tldata.av_tldata_len; j++)
{
newtl.tl_data_type =
(krb5_int16)u.av_tldata.av_tldata_val[j].tl_type;
newtl.tl_data_length =
(krb5_int16)u.av_tldata.av_tldata_val[j]
.tl_data.tl_data_len;
newtl.tl_data_contents =
(krb5_octet*)u.av_tldata.av_tldata_val[j]
.tl_data.tl_data_val;
newtl.tl_data_next = NULL;
if ((ret = krb5_dbe_update_tl_data(context, ent, &newtl)))
return (ret);
}
break;
/* END CSTYLED */
}
case AT_PW_LAST_CHANGE:
if ((ret = krb5_dbe_update_last_pwd_change(
context, ent, u.av_pw_last_change)))
return (ret);
break;
case AT_MOD_PRINC:
tmpprinc = conv_princ_2db(context, &u.av_mod_princ);
if (tmpprinc == NULL)
return ENOMEM;
mod_princ = tmpprinc;
break;
case AT_MOD_TIME:
mod_time = u.av_mod_time;
break;
case AT_LEN:
ent->len = (krb5_int16)u.av_len;
break;
default:
break;
}
#undef u
}
/*
* process mod_princ_data request
*/
if (mod_time && mod_princ)
{
ret = krb5_dbe_update_mod_princ_data(context, ent, mod_time, mod_princ);
krb5_free_principal(context, mod_princ);
mod_princ = NULL;
if (ret)
return (ret);
}
*entry = ent;
return (0);
}
/*
* This routine frees up memory associated with the bunched ulog entries.
*/
void ulog_free_entries(kdb_incr_update_t* updates, int no_of_updates)
{
kdb_incr_update_t* upd;
unsigned int i, j;
int k, cnt;
if (updates == NULL)
return;
upd = updates;
/*
* Loop thru each ulog entry
*/
for (cnt = 0; cnt < no_of_updates; cnt++)
{
/*
* ulog entry - kdb_princ_name
*/
free(upd->kdb_princ_name.utf8str_t_val);
/* BEGIN CSTYLED */
/*
* ulog entry - kdb_kdcs_seen_by
*/
if (upd->kdb_kdcs_seen_by.kdb_kdcs_seen_by_val)
{
for (i = 0; i < upd->kdb_kdcs_seen_by.kdb_kdcs_seen_by_len; i++)
free(upd->kdb_kdcs_seen_by.kdb_kdcs_seen_by_val[i]
.utf8str_t_val);
free(upd->kdb_kdcs_seen_by.kdb_kdcs_seen_by_val);
}
/*
* ulog entry - kdb_futures
*/
free(upd->kdb_futures.kdb_futures_val);
/*
* ulog entry - kdb_update
*/
if (upd->kdb_update.kdbe_t_val)
{
/*
* Loop thru all the attributes and free up stuff
*/
for (i = 0; i < upd->kdb_update.kdbe_t_len; i++)
{
/*
* Free av_key_data
*/
if ((ULOG_ENTRY_TYPE(upd, i).av_type == AT_KEYDATA) &&
ULOG_ENTRY(upd, i).av_keydata.av_keydata_val)
{
for (j = 0;
j < ULOG_ENTRY(upd, i).av_keydata.av_keydata_len; j++)
{
free(ULOG_ENTRY_KEYVAL(upd, i, j)
.k_enctype.k_enctype_val);
if (ULOG_ENTRY_KEYVAL(upd, i, j)
.k_contents.k_contents_val)
{
for (k = 0; k < ULOG_ENTRY_KEYVAL(upd, i, j).k_ver;
k++)
{
free(ULOG_ENTRY_KEYVAL(upd, i, j)
.k_contents.k_contents_val[k]
.utf8str_t_val);
}
free(ULOG_ENTRY_KEYVAL(upd, i, j)
.k_contents.k_contents_val);
}
}
free(ULOG_ENTRY(upd, i).av_keydata.av_keydata_val);
}
/*
* Free av_tl_data
*/
if ((ULOG_ENTRY_TYPE(upd, i).av_type == AT_TL_DATA) &&
ULOG_ENTRY(upd, i).av_tldata.av_tldata_val)
{
for (j = 0; j < ULOG_ENTRY(upd, i).av_tldata.av_tldata_len;
j++)
{
free(ULOG_ENTRY(upd, i)
.av_tldata.av_tldata_val[j]
.tl_data.tl_data_val);
}
free(ULOG_ENTRY(upd, i).av_tldata.av_tldata_val);
}
/*
* Free av_princ
*/
if (ULOG_ENTRY_TYPE(upd, i).av_type == AT_PRINC)
{
free(ULOG_ENTRY(upd, i).av_princ.k_realm.utf8str_t_val);
if (ULOG_ENTRY(upd, i)
.av_princ.k_components.k_components_val)
{
for (j = 0;
j < ULOG_ENTRY(upd, i)
.av_princ.k_components.k_components_len;
j++)
{
free(ULOG_ENTRY_PRINC(upd, i, j)
.k_data.utf8str_t_val);
}
free(ULOG_ENTRY(upd, i)
.av_princ.k_components.k_components_val);
}
}
/*
* Free av_mod_princ
*/
if (ULOG_ENTRY_TYPE(upd, i).av_type == AT_MOD_PRINC)
{
free(ULOG_ENTRY(upd, i).av_mod_princ.k_realm.utf8str_t_val);
if (ULOG_ENTRY(upd, i)
.av_mod_princ.k_components.k_components_val)
{
for (j = 0;
j <
ULOG_ENTRY(upd, i)
.av_mod_princ.k_components.k_components_len;
j++)
{
free(ULOG_ENTRY_MOD_PRINC(upd, i, j)
.k_data.utf8str_t_val);
}
free(ULOG_ENTRY(upd, i)
.av_mod_princ.k_components.k_components_val);
}
}
/*
* Free av_mod_where
*/
if ((ULOG_ENTRY_TYPE(upd, i).av_type == AT_MOD_WHERE) &&
ULOG_ENTRY(upd, i).av_mod_where.utf8str_t_val)
free(ULOG_ENTRY(upd, i).av_mod_where.utf8str_t_val);
/*
* Free av_pw_policy
*/
if ((ULOG_ENTRY_TYPE(upd, i).av_type == AT_PW_POLICY) &&
ULOG_ENTRY(upd, i).av_pw_policy.utf8str_t_val)
free(ULOG_ENTRY(upd, i).av_pw_policy.utf8str_t_val);
/*
* XXX: Free av_pw_hist
*
* For now, we just free the pointer
* to av_pw_hist_val, since we aren't
* populating this union member in
* the conv api function(s) anyways.
*/
if ((ULOG_ENTRY_TYPE(upd, i).av_type == AT_PW_HIST) &&
ULOG_ENTRY(upd, i).av_pw_hist.av_pw_hist_val)
free(ULOG_ENTRY(upd, i).av_pw_hist.av_pw_hist_val);
}
/*
* Free up the pointer to kdbe_t_val
*/
free(upd->kdb_update.kdbe_t_val);
}
/* END CSTYLED */
/*
* Bump up to next struct
*/
upd++;
}
/*
* Finally, free up the pointer to the bunched ulog entries
*/
free(updates);
}
| 34.482852 | 80 | 0.449551 | [
"object",
"model"
] |
ea9d557a285c238c731a498f0518289d7abfaf3f | 23,236 | h | C | Roller/Content/CWS_WaveFrontReader.h | GarrettVance/Roller | ce996a4e9df785a4d2259360b048dc25b1686696 | [
"MIT"
] | 1 | 2020-06-20T08:27:29.000Z | 2020-06-20T08:27:29.000Z | Roller/Content/CWS_WaveFrontReader.h | GarrettVance/Roller | ce996a4e9df785a4d2259360b048dc25b1686696 | [
"MIT"
] | null | null | null | Roller/Content/CWS_WaveFrontReader.h | GarrettVance/Roller | ce996a4e9df785a4d2259360b048dc25b1686696 | [
"MIT"
] | null | null | null | //--------------------------------------------------------------------------------------
// File: WaveFrontReader.h
//
// Code for loading basic mesh data from a WaveFront OBJ file
//
// http://en.wikipedia.org/wiki/Wavefront_.obj_file
//
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
//
// http://go.microsoft.com/fwlink/?LinkID=324981
//--------------------------------------------------------------------------------------
#pragma once
#pragma warning(push)
#pragma warning(disable : 4005)
#define WIN32_LEAN_AND_MEAN
#define NOMINMAX
#define NODRAWTEXT
#define NOGDI
#define NOMCX
#define NOSERVICE
#define NOHELP
#pragma warning(pop)
#include <windows.h>
#include <algorithm>
#include <fstream>
#include <string>
#include <vector>
#include <unordered_map>
#include <stdint.h>
#include <directxmath.h>
#include <directxcollision.h>
#ifndef DIRECTX_NOEXCEPT
#if defined(_MSC_VER) && (_MSC_VER < 1900)
#define DIRECTX_NOEXCEPT
#else
#define DIRECTX_NOEXCEPT noexcept
#endif
#endif
template<class index_t>
class WaveFrontReader
{
public:
typedef index_t index_t;
struct WFR_Vertex
{
DirectX::XMFLOAT3 position;
DirectX::XMFLOAT3 normal;
DirectX::XMFLOAT2 textureCoordinate;
DirectX::XMFLOAT3 tangent; // ghv added 20190204;
DirectX::XMFLOAT3 bitangent; // ghv added 20190204;
};
WaveFrontReader() DIRECTX_NOEXCEPT : hasNormals(false), hasTexcoords(false) {} // Class ctor for the WaveFrontReader class;
HRESULT Load(_In_z_ const wchar_t* p_dot_obj_filename, bool ccw = true)
{
Clear();
static const size_t MAX_POLY = 64;
using namespace DirectX;
std::wifstream InFile(p_dot_obj_filename);
if (!InFile)
return HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND);
wchar_t fname[_MAX_FNAME] = {};
_wsplitpath_s(p_dot_obj_filename, nullptr, 0, nullptr, 0, fname, _MAX_FNAME, nullptr, 0);
name = fname;
std::vector<XMFLOAT3> positions;
std::vector<XMFLOAT3> normals;
std::vector<XMFLOAT2> texCoords;
VertexCache vertexCache;
CWS_Material defmat;
wcscpy_s(defmat.strName, L"default");
plector_materials.emplace_back(defmat);
uint32_t curSubset = 0;
wchar_t external_dot_mtl_filename[MAX_PATH] = {};
for (;; )
{
std::wstring strCommand;
InFile >> strCommand;
if (!InFile)
break;
if (*strCommand.c_str() == L'#')
{
// Comment
}
else if (0 == wcscmp(strCommand.c_str(), L"o"))
{
// Object name ignored
}
else if (0 == wcscmp(strCommand.c_str(), L"g"))
{
// Group name ignored
}
else if (0 == wcscmp(strCommand.c_str(), L"s"))
{
// Smoothing group ignored
}
else if (0 == wcscmp(strCommand.c_str(), L"v"))
{
// Vertex Position
float x, y, z;
InFile >> x >> y >> z;
positions.emplace_back(XMFLOAT3(x, y, z));
}
else if (0 == wcscmp(strCommand.c_str(), L"vt"))
{
// Vertex TexCoord
float u, v;
InFile >> u >> v;
texCoords.emplace_back(XMFLOAT2(u, v));
hasTexcoords = true;
}
else if (0 == wcscmp(strCommand.c_str(), L"vn"))
{
// Vertex Normal
float x, y, z;
InFile >> x >> y >> z;
normals.emplace_back(XMFLOAT3(x, y, z));
hasNormals = true;
}
else if (0 == wcscmp(strCommand.c_str(), L"f"))
{
// Face
INT iPosition, iTexCoord, iNormal;
WFR_Vertex vertex;
DWORD faceIndex[MAX_POLY];
size_t iFace = 0;
for (;;)
{
if (iFace >= MAX_POLY)
{
// Too many polygon verts for the reader
return E_FAIL;
}
memset(&vertex, 0, sizeof(vertex));
InFile >> iPosition;
UINT vertexIndex = 0;
if (!iPosition)
{
// 0 is not allowed for index
return E_UNEXPECTED;
}
else if (iPosition < 0)
{
// Negative values are relative indices
vertexIndex = UINT(positions.size() + iPosition);
}
else
{
// OBJ format uses 1-based arrays
vertexIndex = iPosition - 1;
}
if (vertexIndex >= positions.size())
return E_FAIL;
vertex.position = positions[vertexIndex];
if ('/' == InFile.peek())
{
InFile.ignore();
if ('/' != InFile.peek())
{
// Optional texture coordinate
InFile >> iTexCoord;
UINT coordIndex = 0;
if (!iTexCoord)
{
// 0 is not allowed for index
return E_UNEXPECTED;
}
else if (iTexCoord < 0)
{
// Negative values are relative indices
coordIndex = UINT(texCoords.size() + iTexCoord);
}
else
{
// OBJ format uses 1-based arrays
coordIndex = iTexCoord - 1;
}
if (coordIndex >= texCoords.size())
return E_FAIL;
vertex.textureCoordinate = texCoords[coordIndex];
}
if ('/' == InFile.peek())
{
InFile.ignore();
// Optional vertex normal
InFile >> iNormal;
UINT normIndex = 0;
if (!iNormal)
{
// 0 is not allowed for index
return E_UNEXPECTED;
}
else if (iNormal < 0)
{
// Negative values are relative indices
normIndex = UINT(normals.size() + iNormal);
}
else
{
// OBJ format uses 1-based arrays
normIndex = iNormal - 1;
}
if (normIndex >= normals.size())
return E_FAIL;
vertex.normal = normals[normIndex];
}
}
// If a duplicate vertex doesn't exist, add this vertex to the Vertices
// list. Store the index in the Indices array.
//
//
// ghv :
// ghv : The Vertices and Indices lists will eventually become
// ghv : the Vertex Buffer and Index Buffer for the mesh.
// ghv :
DWORD index = AddVertex(vertexIndex, &vertex, vertexCache);
if (index == (DWORD)-1)
return E_OUTOFMEMORY;
#pragma warning( suppress : 4127 )
if (sizeof(index_t) == 2 && (index >= 0xFFFF))
{
// Too many indices for 16-bit IB!
return E_FAIL;
}
#pragma warning( suppress : 4127 )
else if (sizeof(index_t) == 4 && (index >= 0xFFFFFFFF))
{
// Too many indices for 32-bit IB!
return E_FAIL;
}
faceIndex[iFace] = index;
++iFace;
// Check for more face data or end of the face statement
bool faceEnd = false;
for (;;)
{
wchar_t p = InFile.peek();
if ('\n' == p || !InFile)
{
faceEnd = true;
break;
}
else if (isdigit(p) || p == '-' || p == '+')
break;
InFile.ignore();
}
if (faceEnd)
break;
}
if (iFace < 3)
{
// Need at least 3 points to form a triangle
return E_FAIL;
}
// Convert polygons to triangles
DWORD i0 = faceIndex[0];
DWORD i1 = faceIndex[1];
for (size_t j = 2; j < iFace; ++j)
{
DWORD index = faceIndex[j];
plector_indices.emplace_back(static_cast<index_t>(i0));
if (ccw)
{
plector_indices.emplace_back(static_cast<index_t>(i1));
plector_indices.emplace_back(static_cast<index_t>(index));
}
else
{
plector_indices.emplace_back(static_cast<index_t>(index));
plector_indices.emplace_back(static_cast<index_t>(i1));
}
plector_attributes.emplace_back(curSubset);
i1 = index;
}
assert(plector_attributes.size() * 3 == plector_indices.size());
}
else if (0 == wcscmp(strCommand.c_str(), L"mtllib"))
{
// ghv : The WaveFront OBJ file contains a "mtllib" directive
// ghv : to identify an external dot.mtl file, e.g.
// ghv :
// ghv : mtllib Trapezohedron_Blender_Exported.mtl
// ghv :
// ghv : The "mtllib" directive's argument is the non-qualified
// ghv : file name of the external dot.mtl file.
//
InFile >> external_dot_mtl_filename;
}
else if (0 == wcscmp(strCommand.c_str(), L"usemtl"))
{
// ghv : The WaveFront OBJ file contains "usemtl" directives, e.g.
// ghv :
// ghv : usemtl None
// ghv : usemtl None_JuliaSetOne.bmp
// ghv : usemtl None_Mandelbrot.png ...etc...
// ghv :
// ghv : Each "usemtl" directive has an argument giving
// ghv : a kind of logical name to each CWS_Material.
// ghv : The logical CWS_Material name is then found inside the dot.mtl file.
//
wchar_t usemtl_material_nickname[MAX_PATH] = {};
InFile >> usemtl_material_nickname;
bool bFound = false;
uint32_t count = 0;
for (auto it = plector_materials.cbegin(); it != plector_materials.cend(); ++it, ++count)
{
// ghv : traverse the whole std::vector of materials
if (0 == wcscmp(it->strName, usemtl_material_nickname))
{
bFound = true;
curSubset = count;
break;
}
}
if (!bFound)
{
CWS_Material mat; // ghv : No match of usemtl_material_nickname so must instantiate new CWS_Material object;
curSubset = static_cast<uint32_t>(plector_materials.size());
wcscpy_s(mat.strName, MAX_PATH - 1, usemtl_material_nickname);
plector_materials.emplace_back(mat);
}
}
else
{
#ifdef _DEBUG
// Unimplemented or unrecognized command
OutputDebugStringW(strCommand.c_str());
#endif
}
InFile.ignore(1000, '\n');
}
if (positions.empty())
return E_FAIL;
// Cleanup
InFile.close();
BoundingBox::CreateFromPoints(bounds, positions.size(), positions.data(), sizeof(XMFLOAT3));
// If an associated material file was found, read that in as well.
if (*external_dot_mtl_filename)
{
// ghv : Get the extension .xyz from "external_dot_mtl_filename". Expect it to be "mtl":
wchar_t mater_extension[_MAX_EXT] = {};
_wsplitpath_s(external_dot_mtl_filename, nullptr, 0, nullptr, 0, fname, _MAX_FNAME, mater_extension, _MAX_EXT);
// ghv : Get the disk drive and the directory from "p_dot_obj_filename":
wchar_t mater_drive[_MAX_DRIVE] = {};
wchar_t mater_dir[_MAX_DIR] = {};
_wsplitpath_s(p_dot_obj_filename, mater_drive, _MAX_DRIVE, mater_dir, _MAX_DIR, nullptr, 0, nullptr, 0);
// ghv : Combine what has been discovered to synthesize a fully-qualified path:
// ghv : mater_drive + mater_dir + mater_FNAME + mater_extension = fully-qualified filename.
wchar_t szPath[MAX_PATH] = {};
_wmakepath_s(szPath, MAX_PATH, mater_drive, mater_dir, fname, mater_extension);
HRESULT hr = LoadMTL(szPath);
if (FAILED(hr))
return hr;
}
return S_OK;
}
// Closes Load();
HRESULT LoadMTL(_In_z_ const wchar_t* p_dot_mtl_file)
{
using namespace DirectX;
// ghv :
// ghv : Assumes MTL is in CWD along with OBJ !!!
// ghv : Assumes MTL is in CWD along with OBJ !!!
// ghv : Assumes MTL is in CWD along with OBJ !!!
// ghv :
std::wifstream InFile(p_dot_mtl_file);
if (!InFile)
return HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND);
auto curMaterial = plector_materials.end();
for (;; )
{
std::wstring strCommand;
InFile >> strCommand;
if (!InFile)
break;
if (0 == wcscmp(strCommand.c_str(), L"newmtl"))
{
// Switching active materials
wchar_t strName[MAX_PATH] = {};
InFile >> strName;
curMaterial = plector_materials.end();
for (auto it = plector_materials.begin(); it != plector_materials.end(); ++it)
{
if (0 == wcscmp(it->strName, strName))
{
curMaterial = it;
break;
}
}
}
// The rest of the commands rely on an active material
if (curMaterial == plector_materials.end())
continue;
if (0 == wcscmp(strCommand.c_str(), L"#"))
{
// Comment
}
else if (0 == wcscmp(strCommand.c_str(), L"Ka"))
{
// Ambient color
float r, g, b;
InFile >> r >> g >> b;
curMaterial->vAmbient = XMFLOAT3(r, g, b);
}
else if (0 == wcscmp(strCommand.c_str(), L"Kd"))
{
// Diffuse color
float r, g, b;
InFile >> r >> g >> b;
curMaterial->vDiffuse = XMFLOAT3(r, g, b);
}
else if (0 == wcscmp(strCommand.c_str(), L"Ks"))
{
// Specular color
float r, g, b;
InFile >> r >> g >> b;
curMaterial->vSpecular = XMFLOAT3(r, g, b);
}
else if (0 == wcscmp(strCommand.c_str(), L"d") ||
0 == wcscmp(strCommand.c_str(), L"Tr"))
{
// Alpha
InFile >> curMaterial->fAlpha;
}
else if (0 == wcscmp(strCommand.c_str(), L"Ns"))
{
// Shininess
int nShininess;
InFile >> nShininess;
curMaterial->nShininess = nShininess;
}
else if (0 == wcscmp(strCommand.c_str(), L"illum"))
{
// Specular on/off
int illumination;
InFile >> illumination;
curMaterial->bSpecular = (illumination == 2);
}
else if (0 == wcscmp(strCommand.c_str(), L"map_Kd"))
{
// Texture
InFile >> curMaterial->strTexture;
}
else
{
// Unimplemented or unrecognized command
}
InFile.ignore(1000, L'\n');
}
InFile.close();
return S_OK;
}
// Closes LoadMTL();
void Clear()
{
plector_vertices.clear();
plector_indices.clear();
plector_attributes.clear();
plector_materials.clear();
name.clear();
hasNormals = false;
hasTexcoords = false;
bounds.Center.x = bounds.Center.y = bounds.Center.z = 0.f;
bounds.Extents.x = bounds.Extents.y = bounds.Extents.z = 0.f;
}
HRESULT LoadVBO(_In_z_ const wchar_t* p_vbo_file)
{
Clear();
wchar_t fname[_MAX_FNAME] = {};
_wsplitpath_s(p_vbo_file, nullptr, 0, nullptr, 0, fname, _MAX_FNAME, nullptr, 0);
name = fname;
CWS_Material defmat;
wcscpy_s(defmat.strName, L"default");
materials.emplace_back(defmat);
std::ifstream vboFile(p_vbo_file, std::ifstream::in | std::ifstream::binary);
if (!vboFile.is_open())
return HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND);
hasNormals = hasTexcoords = true;
uint32_t numVertices = 0;
uint32_t numIndices = 0;
vboFile.read(reinterpret_cast<char*>(&numVertices), sizeof(uint32_t));
if (!numVertices)
return E_FAIL;
vboFile.read(reinterpret_cast<char*>(&numIndices), sizeof(uint32_t));
if (!numIndices)
return E_FAIL;
plector_vertices.resize(numVertices);
vboFile.read(reinterpret_cast<char*>(plector_vertices.data()), sizeof(WFR_Vertex) * numVertices);
#pragma warning( suppress : 4127 )
if (sizeof(index_t) == 2)
{
plector_indices.resize(numIndices);
vboFile.read(reinterpret_cast<char*>(plector_indices.data()), sizeof(uint16_t) * numIndices);
}
else
{
std::vector<uint16_t> tmp;
tmp.resize(numIndices);
vboFile.read(reinterpret_cast<char*>(tmp.data()), sizeof(uint16_t) * numIndices);
plector_indices.reserve(numIndices);
for (auto it = tmp.cbegin(); it != tmp.cend(); ++it)
{
plector_indices.emplace_back(*it);
}
}
BoundingBox::CreateFromPoints(bounds, plector_vertices.size(), reinterpret_cast<const XMFLOAT3*>(plector_vertices.data()), sizeof(WFR_Vertex));
vboFile.close();
return S_OK;
}
// Closes LoadVBO();
struct CWS_Material
{
DirectX::XMFLOAT3 vAmbient;
DirectX::XMFLOAT3 vDiffuse;
DirectX::XMFLOAT3 vSpecular;
uint32_t nShininess;
float fAlpha;
bool bSpecular;
wchar_t strName[MAX_PATH];
wchar_t strTexture[MAX_PATH];
CWS_Material() DIRECTX_NOEXCEPT : // Class ctor for "CWS_Material" class;
vAmbient(0.2f, 0.2f, 0.2f),
vDiffuse(0.8f, 0.8f, 0.8f),
vSpecular(1.0f, 1.0f, 1.0f),
nShininess(0),
fAlpha(1.f),
bSpecular(false)
{
memset(strName, 0, sizeof(strName)); memset(strTexture, 0, sizeof(strTexture));
}
};
std::vector<WFR_Vertex> plector_vertices; // ghv : plector = "public not private" + "std::vector";
std::vector<index_t> plector_indices;
std::vector<uint32_t> plector_attributes;
std::vector<CWS_Material> plector_materials;
std::wstring name;
bool hasNormals;
bool hasTexcoords;
DirectX::BoundingBox bounds;
private:
typedef std::unordered_multimap<UINT, UINT> VertexCache;
DWORD AddVertex(UINT hash, WFR_Vertex* pVertex, VertexCache& cache)
{
auto f = cache.equal_range(hash);
for (auto it = f.first; it != f.second; ++it)
{
auto& tv = plector_vertices[it->second];
if (0 == memcmp(pVertex, &tv, sizeof(WFR_Vertex)))
{
return it->second;
}
}
DWORD index = static_cast<UINT>(plector_vertices.size());
plector_vertices.emplace_back(*pVertex);
VertexCache::value_type entry(hash, index);
cache.insert(entry);
return index;
}
// Closes AddVertex();
};
// Closes class WaveFrontReader;
| 28.028951 | 152 | 0.437855 | [
"mesh",
"object",
"vector"
] |
eaa37cf3396ce59420025557fde183240f8db8e4 | 1,432 | h | C | src/ofApp.h | saba12m/VenationDiagram | 963c9ff2ec83a889cf8b463f3d9d9b2427801b4b | [
"MIT"
] | 1 | 2021-02-23T11:06:13.000Z | 2021-02-23T11:06:13.000Z | src/ofApp.h | saba12m/VenationDiagram | 963c9ff2ec83a889cf8b463f3d9d9b2427801b4b | [
"MIT"
] | null | null | null | src/ofApp.h | saba12m/VenationDiagram | 963c9ff2ec83a889cf8b463f3d9d9b2427801b4b | [
"MIT"
] | null | null | null | #pragma once
#include "ofMain.h"
#include "Venation2DOpen.h"
#include "Venation2DClosed.h"
#include "Venation3D.h"
#include "Venation3DOpen.h"
#include "Venation3DClosed.h"
#include "BranchMesh.h"
//#include "ofxAssimpModelLoader.h"
//#include "ofxRayTriangleIntersection.h"
class ofApp : public ofBaseApp{
public:
void setup();
void update();
void draw();
void keyPressed(int key);
void keyReleased(int key);
void mouseMoved(int x, int y );
void mouseDragged(int x, int y, int button);
void mousePressed(int x, int y, int button);
void mouseReleased(int x, int y, int button);
void mouseEntered(int x, int y);
void mouseExited(int x, int y);
void windowResized(int w, int h);
void dragEvent(ofDragInfo dragInfo);
void gotMessage(ofMessage msg);
private:
ofEasyCam cam;
ofLight dirLight;
// Venation2DOpen v;
// Venation2DClosed v;
// Venation3DOpen v;
Venation3DClosed v;
Venation3D vv;
bool run;
BranchMesh m;
// // mesh
// ofxAssimpModelLoader geometry;
// ofMesh geometryMesh;
// ofxAssimpModelLoader surface;
// ofMesh surfaceMesh;
// ofBoxPrimitive boundingBox;
//
// vector <ofVec3f> points;
// // raycasting
// ofxRayTriangleIntersection rtIntersect;
// vector <FaceTri> tris;
// vector <Ray> rays;
// vector <ofVec3f> pIn;
// vector <ofVec3f> pOut;
};
| 22.030769 | 49 | 0.656425 | [
"mesh",
"geometry",
"vector"
] |
eaad0a267e34a9888043629f1e7e330c16ddab93 | 9,955 | h | C | src/game/client/c_team_objectiveresource.h | DeadZoneLuna/csso-src | 6c978ea304ee2df3796bc9c0d2916bac550050d5 | [
"Unlicense"
] | 3 | 2020-12-15T23:09:28.000Z | 2022-01-13T15:55:04.000Z | src/game/client/c_team_objectiveresource.h | cafeed28/what | 08e51d077f0eae50afe3b592543ffa07538126f5 | [
"Unlicense"
] | null | null | null | src/game/client/c_team_objectiveresource.h | cafeed28/what | 08e51d077f0eae50afe3b592543ffa07538126f5 | [
"Unlicense"
] | 2 | 2021-07-14T11:03:04.000Z | 2021-11-08T08:32:17.000Z | //========= Copyright Valve Corporation, All rights reserved. ============//
//
// Purpose:
//
//=============================================================================
#ifndef C_TEAM_OBJECTIVERESOURCE_H
#define C_TEAM_OBJECTIVERESOURCE_H
#ifdef _WIN32
#pragma once
#endif
#include "shareddefs.h"
#include "const.h"
#include "c_baseentity.h"
#include <igameresources.h>
#define TEAM_ARRAY( index, team ) (index + (team * MAX_CONTROL_POINTS))
//-----------------------------------------------------------------------------
// Purpose: An entity that networks the state of the game's objectives.
// May contain data for objectives that aren't used by your mod, but
// the extra data will never be networked as long as it's zeroed out.
//-----------------------------------------------------------------------------
class C_BaseTeamObjectiveResource : public C_BaseEntity
{
DECLARE_CLASS( C_BaseTeamObjectiveResource, C_BaseEntity );
public:
DECLARE_CLIENTCLASS();
C_BaseTeamObjectiveResource();
virtual ~C_BaseTeamObjectiveResource();
public:
virtual void ClientThink();
virtual void OnPreDataChanged( DataUpdateType_t updateType );
virtual void OnDataChanged( DataUpdateType_t updateType );
void UpdateControlPoint( const char *pszEvent, int index = -1 );
float GetCPCapPercentage( int index );
int GetNumControlPoints( void ) { return m_iNumControlPoints; }
int GetNumControlPointsOwned( void );
void SetOwningTeam( int index, int team );
virtual void SetCappingTeam( int index, int team );
void SetCapLayout( const char *pszLayout );
// Is the point visible in the objective display
bool IsCPVisible( int index_ )
{
Assert( index_ < m_iNumControlPoints );
return m_bCPIsVisible[index_];
}
bool IsCPBlocked( int index_ )
{
Assert( index_ < m_iNumControlPoints );
return m_bBlocked[index_];
}
// Get the world location of this control point
Vector& GetCPPosition( int index_ )
{
Assert( index_ < m_iNumControlPoints );
return m_vCPPositions[index_];
}
int GetOwningTeam( int index_ )
{
if ( index_ >= m_iNumControlPoints )
return TEAM_UNASSIGNED;
return m_iOwner[index_];
}
int GetCappingTeam( int index_ )
{
if ( index_ >= m_iNumControlPoints )
return TEAM_UNASSIGNED;
return m_iCappingTeam[index_];
}
int GetTeamInZone( int index_ )
{
if ( index_ >= m_iNumControlPoints )
return TEAM_UNASSIGNED;
return m_iTeamInZone[index_];
}
// Icons
int GetCPCurrentOwnerIcon( int index_, int iOwner )
{
Assert( index_ < m_iNumControlPoints );
return GetIconForTeam( index_, iOwner );
}
int GetCPCappingIcon( int index_ )
{
Assert( index_ < m_iNumControlPoints );
int iCapper = GetCappingTeam( index_ );
Assert( iCapper != TEAM_UNASSIGNED );
return GetIconForTeam( index_, iCapper );
}
// Icon for the specified team
int GetIconForTeam( int index_, int team )
{
Assert( index_ < m_iNumControlPoints );
return m_iTeamIcons[ TEAM_ARRAY(index_,team) ];
}
// Overlay for the specified team
int GetOverlayForTeam( int index_, int team )
{
Assert( index_ < m_iNumControlPoints );
return m_iTeamOverlays[ TEAM_ARRAY(index_,team) ];
}
// Number of players in the area
int GetNumPlayersInArea( int index_, int team )
{
Assert( index_ < m_iNumControlPoints );
return m_iNumTeamMembers[ TEAM_ARRAY(index_,team) ];
}
// get the required cappers for the passed team
int GetRequiredCappers( int index_, int team )
{
Assert( index_ < m_iNumControlPoints );
return m_iTeamReqCappers[ TEAM_ARRAY(index_,team) ];
}
// Base Icon for the specified team
int GetBaseIconForTeam( int team )
{
Assert( team < MAX_TEAMS );
return m_iTeamBaseIcons[ team ];
}
int GetBaseControlPointForTeam( int iTeam )
{
Assert( iTeam < MAX_TEAMS );
return m_iBaseControlPoints[iTeam];
}
int GetPreviousPointForPoint( int index_, int team, int iPrevIndex )
{
Assert( index_ < m_iNumControlPoints );
Assert( iPrevIndex >= 0 && iPrevIndex < MAX_PREVIOUS_POINTS );
int iIntIndex = iPrevIndex + (index_ * MAX_PREVIOUS_POINTS) + (team * MAX_CONTROL_POINTS * MAX_PREVIOUS_POINTS);
return m_iPreviousPoints[ iIntIndex ];
}
bool TeamCanCapPoint( int index_, int team )
{
Assert( index_ < m_iNumControlPoints );
return m_bTeamCanCap[ TEAM_ARRAY( index_, team ) ];
}
const char *GetCapLayoutInHUD( void ) { return m_pszCapLayoutInHUD; }
void GetCapLayoutCustomPosition( float& flCustomPositionX, float& flCustomPositionY ) { flCustomPositionX = m_flCustomPositionX; flCustomPositionY = m_flCustomPositionY; }
bool PlayingMiniRounds( void ){ return m_bPlayingMiniRounds; }
bool IsInMiniRound( int index_ ) { return m_bInMiniRound[index_]; }
int GetCapWarningLevel( int index_ )
{
Assert( index_ < m_iNumControlPoints );
return m_iWarnOnCap[index_];
}
int GetCPGroup( int index_ )
{
Assert( index_ < m_iNumControlPoints );
return m_iCPGroup[index_];
}
const char *GetWarnSound( int index_ )
{
Assert( index_ < m_iNumControlPoints );
return m_iszWarnSound[index_];
}
virtual const char *GetGameSpecificCPCappingSwipe( int index_, int iCappingTeam )
{
// You need to implement this in your game's objective resource.
Assert(0);
return NULL;
}
virtual const char *GetGameSpecificCPBarFG( int index_, int iOwningTeam )
{
// You need to implement this in your game's objective resource.
Assert(0);
return NULL;
}
virtual const char *GetGameSpecificCPBarBG( int index_, int iCappingTeam )
{
// You need to implement this in your game's objective resource.
Assert(0);
return NULL;
}
bool CapIsBlocked( int index_ );
int GetTimerToShowInHUD( void ) { return m_iTimerToShowInHUD; }
int GetStopWatchTimer( void ) { return m_iStopWatchTimer; }
float GetPathDistance( int index_ )
{
Assert( index_ < m_iNumControlPoints );
return m_flPathDistance[index_];
}
bool GetCPLocked( int index_ )
{
Assert( index_ < m_iNumControlPoints );
return m_bCPLocked[index_];
}
bool GetTrackAlarm( int index_ )
{
Assert( index_ < TEAM_TRAIN_MAX_TEAMS );
return m_bTrackAlarm[index_];
}
int GetNumNodeHillData( int team ){ return ( team < TEAM_TRAIN_MAX_TEAMS ) ? m_nNumNodeHillData[team] : 0; }
void GetHillData( int team, int hill, float &flStart, float &flEnd )
{
if ( hill < TEAM_TRAIN_MAX_HILLS && team < TEAM_TRAIN_MAX_TEAMS )
{
int index_ = ( hill * TEAM_TRAIN_FLOATS_PER_HILL ) + ( team * TEAM_TRAIN_MAX_HILLS * TEAM_TRAIN_FLOATS_PER_HILL );
if ( index_ < TEAM_TRAIN_HILLS_ARRAY_SIZE - 1 ) // - 1 because we want to look at 2 entries
{
flStart = m_flNodeHillData[index_];
flEnd = m_flNodeHillData[index_ +1];
}
}
}
void SetTrainOnHill( int team, int hill, bool state )
{
if ( team < TEAM_TRAIN_MAX_TEAMS && hill < TEAM_TRAIN_MAX_HILLS )
{
int index_ = hill + ( team * TEAM_TRAIN_MAX_HILLS );
m_bTrainOnHill[index_] = state;
}
}
bool IsTrainOnHill( int team, int hill )
{
if ( team < TEAM_TRAIN_MAX_TEAMS && hill < TEAM_TRAIN_MAX_HILLS )
{
return m_bTrainOnHill[hill + ( team * TEAM_TRAIN_MAX_HILLS )];
}
return false;
}
bool IsHillDownhill( int team, int hill )
{
if ( team < TEAM_TRAIN_MAX_TEAMS && hill < TEAM_TRAIN_MAX_HILLS )
{
return m_bHillIsDownhill[hill + ( team * TEAM_TRAIN_MAX_HILLS )];
}
return true;
}
protected:
int m_iTimerToShowInHUD;
int m_iStopWatchTimer;
int m_iNumControlPoints;
int m_iPrevNumControlPoints;
bool m_bPlayingMiniRounds;
bool m_bControlPointsReset;
bool m_bOldControlPointsReset;
int m_iUpdateCapHudParity;
int m_iOldUpdateCapHudParity;
// data variables
Vector m_vCPPositions[MAX_CONTROL_POINTS];
bool m_bCPIsVisible[MAX_CONTROL_POINTS];
float m_flLazyCapPerc[MAX_CONTROL_POINTS];
float m_flOldLazyCapPerc[MAX_CONTROL_POINTS];
int m_iTeamIcons[MAX_CONTROL_POINTS * MAX_CONTROL_POINT_TEAMS];
int m_iTeamOverlays[MAX_CONTROL_POINTS * MAX_CONTROL_POINT_TEAMS];
int m_iTeamReqCappers[MAX_CONTROL_POINTS * MAX_CONTROL_POINT_TEAMS];
float m_flTeamCapTime[MAX_CONTROL_POINTS * MAX_CONTROL_POINT_TEAMS];
int m_iPreviousPoints[ MAX_CONTROL_POINTS * MAX_CONTROL_POINT_TEAMS * MAX_PREVIOUS_POINTS ];
bool m_bTeamCanCap[ MAX_CONTROL_POINTS * MAX_CONTROL_POINT_TEAMS ];
int m_iTeamBaseIcons[MAX_TEAMS];
int m_iBaseControlPoints[MAX_TEAMS];
bool m_bInMiniRound[MAX_CONTROL_POINTS];
int m_iWarnOnCap[MAX_CONTROL_POINTS];
char m_iszWarnSound[MAX_CONTROL_POINTS][255];
float m_flPathDistance[MAX_CONTROL_POINTS];
int m_iCPGroup[MAX_CONTROL_POINTS];
bool m_bCPLocked[MAX_CONTROL_POINTS];
float m_flUnlockTimes[MAX_CONTROL_POINTS];
float m_flOldUnlockTimes[MAX_CONTROL_POINTS];
float m_flCPTimerTimes[MAX_CONTROL_POINTS];
float m_flOldCPTimerTimes[MAX_CONTROL_POINTS];
// state variables
int m_iNumTeamMembers[MAX_CONTROL_POINTS * MAX_CONTROL_POINT_TEAMS];
int m_iCappingTeam[MAX_CONTROL_POINTS];
int m_iTeamInZone[MAX_CONTROL_POINTS];
bool m_bBlocked[MAX_CONTROL_POINTS];
int m_iOwner[MAX_CONTROL_POINTS];
bool m_bCPCapRateScalesWithPlayers[MAX_CONTROL_POINTS];
// client calculated state
float m_flCapTimeLeft[MAX_CONTROL_POINTS];
float m_flCapLastThinkTime[MAX_CONTROL_POINTS];
bool m_bWarnedOnFinalCap[MAX_CONTROL_POINTS];
float m_flLastCapWarningTime[MAX_CONTROL_POINTS];
char m_pszCapLayoutInHUD[MAX_CAPLAYOUT_LENGTH];
float m_flOldCustomPositionX;
float m_flOldCustomPositionY;
float m_flCustomPositionX;
float m_flCustomPositionY;
// hill data for multi-escort payload maps
int m_nNumNodeHillData[TEAM_TRAIN_MAX_TEAMS];
float m_flNodeHillData[TEAM_TRAIN_HILLS_ARRAY_SIZE];
bool m_bTrainOnHill[TEAM_TRAIN_MAX_HILLS*TEAM_TRAIN_MAX_TEAMS];
bool m_bTrackAlarm[TEAM_TRAIN_MAX_TEAMS];
bool m_bHillIsDownhill[TEAM_TRAIN_MAX_HILLS*TEAM_TRAIN_MAX_TEAMS];
};
extern C_BaseTeamObjectiveResource *g_pObjectiveResource;
inline C_BaseTeamObjectiveResource *ObjectiveResource()
{
return g_pObjectiveResource;
}
#endif // C_TEAM_OBJECTIVERESOURCE_H
| 28.606322 | 172 | 0.737217 | [
"vector"
] |
eaae4880be806e889fa3aece83818f606a50e6cf | 1,784 | h | C | sources/Renderer/Vulkan/RenderState/VKQueryHeap.h | beldenfox/LLGL | 3a54125ebfa79bb06fccf8c413d308ff22186b52 | [
"BSD-3-Clause"
] | 1,403 | 2016-09-28T21:48:07.000Z | 2022-03-31T23:58:57.000Z | sources/Renderer/Vulkan/RenderState/VKQueryHeap.h | beldenfox/LLGL | 3a54125ebfa79bb06fccf8c413d308ff22186b52 | [
"BSD-3-Clause"
] | 70 | 2016-10-13T20:15:58.000Z | 2022-01-12T23:51:12.000Z | sources/Renderer/Vulkan/RenderState/VKQueryHeap.h | beldenfox/LLGL | 3a54125ebfa79bb06fccf8c413d308ff22186b52 | [
"BSD-3-Clause"
] | 122 | 2016-10-23T15:33:44.000Z | 2022-03-07T07:41:23.000Z | /*
* VKQueryHeap.h
*
* This file is part of the "LLGL" project (Copyright (c) 2015-2019 by Lukas Hermanns)
* See "LICENSE.txt" for license information.
*/
#ifndef LLGL_VK_QUERY_HEAP_H
#define LLGL_VK_QUERY_HEAP_H
#include <LLGL/QueryHeap.h>
#include "../Vulkan.h"
#include "../VKPtr.h"
namespace LLGL
{
// Base class for Vulkan query heaps (sub class: VKPredicateQueryHeap).
class VKQueryHeap : public QueryHeap
{
public:
VKQueryHeap(const VKPtr<VkDevice>& device, const QueryHeapDescriptor& desc);
// Returns the Vulkan VkQueryPool object.
inline VkQueryPool GetVkQueryPool() const
{
return queryPool_.Get();
}
// Returns the control flags to be used for vkCmdBeginQuery.
inline VkQueryControlFlags GetControlFlags() const
{
return controlFlags_;
}
// Returns the number of queries per group.
inline std::uint32_t GetGroupSize() const
{
return groupSize_;
}
// Returns the number of native queries.
inline std::uint32_t GetNumQueries() const
{
return numQueries_;
}
// Returns true if this query heap has predicates for conditional rendering, i.e. it can be casted to <VKPredicateQueryHeap>.
inline bool HasPredicates() const
{
return hasPredicates_;
}
private:
VKPtr<VkQueryPool> queryPool_;
VkQueryControlFlags controlFlags_ = 0;
std::uint32_t groupSize_ = 1;
std::uint32_t numQueries_ = 0;
bool hasPredicates_ = false;
};
} // /namespace LLGL
#endif
// ================================================================================
| 22.871795 | 133 | 0.584641 | [
"object"
] |
eab0d317df4134e7400407b4da3a3426e73cde8a | 45,603 | h | C | mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h | httpsgithu/mindspore | c29d6bb764e233b427319cb89ba79e420f1e2c64 | [
"Apache-2.0"
] | 1 | 2022-02-23T09:13:43.000Z | 2022-02-23T09:13:43.000Z | mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h | 949144093/mindspore | c29d6bb764e233b427319cb89ba79e420f1e2c64 | [
"Apache-2.0"
] | null | null | null | mindspore/ccsrc/frontend/optimizer/irpass/incorporate_getitem.h | 949144093/mindspore | c29d6bb764e233b427319cb89ba79e420f1e2c64 | [
"Apache-2.0"
] | null | null | null | /**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_FRONTEND_OPTIMIZER_IRPASS_INCORPORATE_GETITEM_H_
#define MINDSPORE_CCSRC_FRONTEND_OPTIMIZER_IRPASS_INCORPORATE_GETITEM_H_
#include <algorithm>
#include <memory>
#include <set>
#include <vector>
#include <utility>
#include "utils/hash_map.h"
#include "ir/func_graph.h"
#include "ir/func_graph_cloner.h"
#include "frontend/optimizer/optimizer_caller.h"
#include "frontend/optimizer/anf_visitor.h"
#include "frontend/operator/ops.h"
#include "frontend/optimizer/irpass.h"
#include "frontend/optimizer/optimizer.h"
namespace mindspore {
namespace opt {
namespace irpass {
namespace internal {
class GetitemTransform {
public:
GetitemTransform() : cache_() {}
~GetitemTransform() = default;
FuncGraphPtr operator()(const AnfNodePtr &node, const FuncGraphPtr &fg, int64_t idx) {
if (cache_.find(fg) == cache_.end()) {
cache_[fg] = {};
}
auto &cache = cache_[fg];
if (cache.find(idx) == cache.end()) {
std::ostringstream ss("tp", std::ostringstream::app);
ss << idx;
auto new_fg = TransformableClone(fg, std::make_shared<TraceTransform>(ss.str()));
auto output = new_fg->output();
if (IsPrimitiveCNode(output, prim::kPrimMakeTuple)) {
auto cnode = output->cast<CNodePtr>();
auto ids = LongToSize(idx + 1);
// Inputs should be [make_tuple, item1, item2, ...], so have to offset idx in tuple_getitem by 1.
if (ids >= cnode->size()) {
MS_LOG(EXCEPTION) << "index " << ids << " is out of inputs length " << cnode->size();
}
new_fg->set_output(cnode->input(ids));
} else {
auto idx_node = NewValueNode(idx);
idx_node->set_abstract(std::make_shared<abstract::AbstractScalar>(idx));
auto output_node = new_fg->NewCNode({NewValueNode(prim::kPrimTupleGetItem), output, idx_node});
output_node->set_abstract(node->abstract());
new_fg->set_output(output_node);
}
cache[idx] = new_fg;
}
return cache[idx];
}
private:
mindspore::HashMap<FuncGraphPtr, mindspore::HashMap<int64_t, FuncGraphPtr>> cache_;
};
class GetItemTransformACrossGraph {
public:
GetItemTransformACrossGraph() : cache_() {}
~GetItemTransformACrossGraph() = default;
FuncGraphPtr operator()(const AnfNodePtr &node, const FuncGraphPtr &fg, int64_t idx) {
if (cache_.find(fg) == cache_.end()) {
cache_[fg] = {};
}
auto &cache = cache_[fg];
if (cache.find(idx) == cache.end()) {
std::ostringstream ss("tp", std::ostringstream::app);
ss << idx;
auto new_fg_outer = TransformableClone(fg, std::make_shared<TraceTransform>(ss.str()));
MS_EXCEPTION_IF_NULL(fg->manager());
fg->manager()->AddFuncGraph(new_fg_outer);
auto output_outer = new_fg_outer->output();
if (!IsValueNode<FuncGraph>(output_outer)) {
MS_LOG(WARNING) << "Output of outer graph should be a func_graph";
return nullptr;
}
auto fg_inner = GetValueNode<FuncGraphPtr>(output_outer);
auto new_fg = TransformableClone(fg_inner, std::make_shared<TraceTransform>(ss.str()));
new_fg_outer->set_output(NewValueNode(new_fg));
auto output = new_fg->output();
if (IsPrimitiveCNode(output, prim::kPrimMakeTuple)) {
auto cnode = output->cast<CNodePtr>();
auto ids = LongToSize(idx + 1);
// Inputs should be [make_tuple, item1, item2, ...], so have to offset idx in tuple_getitem by 1.
if (ids >= cnode->size()) {
MS_LOG(EXCEPTION) << "index " << ids << " is out of inputs length " << cnode->size();
}
new_fg->set_output(cnode->input(ids));
} else {
auto idx_node = NewValueNode(idx);
idx_node->set_abstract(std::make_shared<abstract::AbstractScalar>(idx));
auto output_node = new_fg->NewCNode({NewValueNode(prim::kPrimTupleGetItem), output, idx_node});
output_node->set_abstract(node->abstract());
new_fg->set_output(output_node);
}
cache[idx] = new_fg_outer;
}
return cache[idx];
}
private:
mindspore::HashMap<FuncGraphPtr, mindspore::HashMap<int64_t, FuncGraphPtr>> cache_;
};
bool HasMoreMetaFgPrim(const OptimizerPtr &optimizer) {
bool more_meta_fg_prim = false;
auto res = optimizer->resource();
auto resource_ptr = std::dynamic_pointer_cast<pipeline::Resource>(res);
if (resource_ptr != nullptr) {
const auto &manager = optimizer->manager();
MS_EXCEPTION_IF_NULL(manager);
more_meta_fg_prim = manager->func_graph_meta_fg_prim_total(resource_ptr->func_graph());
}
return more_meta_fg_prim;
}
bool IsOutputShrinkable(const AnfNodePtr &output) {
if (IsPrimitiveCNode(output, prim::kPrimMakeTuple)) {
return true;
}
if (GetValueNode<ValueTuplePtr>(output)) {
return true;
}
return false;
}
size_t GetOutputSize(const AnfNodePtr &output) {
if (IsPrimitiveCNode(output, prim::kPrimMakeTuple)) {
const auto &output_cnode = output->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(output_cnode);
return output_cnode->size() - 1;
}
const auto &value_tuple = GetValueNode<ValueTuplePtr>(output);
if (value_tuple == nullptr) {
MS_LOG(EXCEPTION) << "fg output is not MakeTuple or ValueTuple, but: " << output->DebugString();
}
return value_tuple->size();
}
struct TpCNodeAndIndex {
// CNode {TupleGetItem, call, index}
CNodePtr tp_cnode;
int64_t index;
};
int64_t UpdateUserNodeIndex(const CNodePtr &fg_call_cnode, const int64_t current_index,
const std::vector<TpCNodeAndIndex> &tp_cnodes_and_index) {
const auto &manager = fg_call_cnode->func_graph()->manager();
MS_EXCEPTION_IF_NULL(manager);
int64_t new_index = current_index;
auto txn = manager->Transact();
for (int64_t i = 0; i < SizeToLong(tp_cnodes_and_index.size()); ++i) {
const auto &cnode_and_index = tp_cnodes_and_index[i];
if (cnode_and_index.index != i) {
constexpr auto kInputIndex = 2;
txn.SetEdge(cnode_and_index.tp_cnode, kInputIndex, NewValueNode(i));
}
if (cnode_and_index.index == current_index) {
new_index = i;
}
}
txn.Commit();
return new_index;
}
AbstractBasePtr ShrinkAbstract(const AbstractBasePtr &original_abstract,
const std::vector<TpCNodeAndIndex> &tp_cnodes_and_index) {
if (original_abstract != nullptr && original_abstract->isa<abstract::AbstractTuple>()) {
const auto &abs_tuple = original_abstract->cast<abstract::AbstractTuplePtr>();
MS_EXCEPTION_IF_NULL(abs_tuple);
const auto &abs_tuple_elements = abs_tuple->elements();
const int64_t before_shrink_tuple_size = SizeToLong(abs_tuple_elements.size());
AbstractBasePtrList shrunk_abstract_elements;
std::transform(tp_cnodes_and_index.cbegin(), tp_cnodes_and_index.cend(),
std::back_inserter(shrunk_abstract_elements),
[abs_tuple_elements, before_shrink_tuple_size](const auto &node_and_index) {
if (node_and_index.index >= before_shrink_tuple_size) {
MS_LOG(EXCEPTION) << "index should be less than inputs size, index: " << node_and_index.index
<< ", abstract tuple size: " << before_shrink_tuple_size;
}
return abs_tuple_elements[node_and_index.index];
});
return std::make_shared<abstract::AbstractTuple>(shrunk_abstract_elements);
}
return nullptr;
}
FuncGraphPtr ShrinkUnsedOutput(const FuncGraphPtr &fg, const std::vector<TpCNodeAndIndex> &tp_cnodes_and_index) {
const auto &manager = fg->manager();
MS_EXCEPTION_IF_NULL(manager);
auto new_fg = TransformableClone(fg, std::make_shared<TraceTransform>("tp_use"));
auto new_fg_output = new_fg->output();
AnfNodePtr shrunk_output = nullptr;
int64_t before_shrink_inputs_size = 0;
if (IsPrimitiveCNode(new_fg_output, prim::kPrimMakeTuple)) {
// Shrink output;
auto new_fg_output_cnode = new_fg_output->cast<CNodePtr>();
const auto &new_fg_output_inputs = new_fg_output_cnode->inputs();
constexpr auto kMinimalSize = 2;
if (new_fg_output_inputs.size() <= kMinimalSize) {
MS_LOG(EXCEPTION) << "New fg output should have at least 2 elements, but: " << new_fg_output->DebugString();
}
before_shrink_inputs_size = SizeToLong(new_fg_output_inputs.size() - 1);
AnfNodePtrList shrunk_inputs{NewValueNode({prim::kPrimMakeTuple})};
// Bypass maketuple primitive in new_fg_output_inputs;
std::transform(tp_cnodes_and_index.cbegin(), tp_cnodes_and_index.cend(), std::back_inserter(shrunk_inputs),
[new_fg_output, new_fg_output_inputs, before_shrink_inputs_size](const auto &node_and_index) {
if (node_and_index.index >= before_shrink_inputs_size) {
MS_LOG(EXCEPTION) << "index should be less than inputs size, index: " << node_and_index.index
<< ", output: " << new_fg_output->DebugString();
}
return new_fg_output_inputs[node_and_index.index + 1];
});
shrunk_output = new_fg->NewCNode(shrunk_inputs);
} else {
auto value_tuple = GetValueNode<ValueTuplePtr>(new_fg_output);
if (value_tuple == nullptr) {
MS_LOG(EXCEPTION) << "New fg output is not MakeTuple or ValueTuple, but " << new_fg_output->DebugString();
}
ValuePtrList shrunk_inputs;
before_shrink_inputs_size = SizeToLong(value_tuple->size());
std::transform(tp_cnodes_and_index.cbegin(), tp_cnodes_and_index.cend(), std::back_inserter(shrunk_inputs),
[new_fg_output, value_tuple, before_shrink_inputs_size](const auto &node_and_index) {
if (node_and_index.index >= before_shrink_inputs_size) {
MS_LOG(EXCEPTION) << "index should be less than inputs size, index: " << node_and_index.index
<< ", output: " << new_fg_output->DebugString();
}
return (*value_tuple)[node_and_index.index];
});
shrunk_output = NewValueNode(std::make_shared<ValueTuple>(shrunk_inputs));
}
auto shrunk_abstract = ShrinkAbstract(new_fg_output->abstract(), tp_cnodes_and_index);
MS_EXCEPTION_IF_NULL(shrunk_abstract);
shrunk_output->set_abstract(shrunk_abstract);
new_fg->set_output(shrunk_output);
MS_LOG(DEBUG) << "Partly item used; original size: " << before_shrink_inputs_size
<< ", new size: " << tp_cnodes_and_index.size() << ", fg: " << fg->ToString() << ", new graph"
<< new_fg->ToString();
return new_fg;
}
struct FuncGraphIntVectorPairHasher {
std::size_t Int64VectorHash(const std::vector<int64_t> &int_vector) const {
std::size_t hash_value = 0;
constexpr auto kMaxElementsNum = 4;
for (size_t i = 0; (i < int_vector.size()) && (i < kMaxElementsNum); ++i) {
hash_value = hash_combine(hash_value, std::hash<int64_t>{}(int_vector[i]));
}
return hash_value;
}
std::size_t operator()(const std::pair<FuncGraphPtr, std::vector<int64_t>> &p) const {
auto h1 = std::hash<FuncGraphPtr>{}(p.first);
auto h2 = Int64VectorHash(p.second);
return hash_combine(h1, h2);
}
};
bool ShouldTransform(const AnfNodePtr &node, const std::vector<TpCNodeAndIndex> &tp_cnodes_and_index) {
if (node->abstract() && node->abstract()->isa<abstract::AbstractTuple>()) {
const auto &abs_tuple = *(node->abstract()->cast<abstract::AbstractTuplePtr>());
if (tp_cnodes_and_index[0].index == 0 && abs_tuple.size() > 0) {
if (abs_tuple[0]->isa<abstract::AbstractScalar>() && abs_tuple[0]->GetTypeTrack()->isa<EnvType>()) {
return true;
}
}
// fprop_fg will return MakeTuple(xx, bprop_fg).
if (tp_cnodes_and_index.size() > 1 && tp_cnodes_and_index[1].index == 1 && abs_tuple.size() > 1 &&
abs_tuple[1]->isa<abstract::AbstractFunction>()) {
return true;
}
}
return false;
}
// Incorporate getitem if the indexed node is a ZerosLike node, so another opt pass AddN(MakeTuple(Xs, ZerosLike))
// can work.
bool AlwaysTransformThisIndex(const AnfNodePtr &output, const int64_t index) {
if (IsPrimitiveCNode(output, prim::kPrimMakeTuple)) {
const auto &output_cnode = output->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(output_cnode);
if (index >= SizeToLong(output_cnode->size() - 1)) {
MS_LOG(EXCEPTION) << "Index of GetItem: " << index
<< " exceeds size of MakeTuple: " << output_cnode->DebugString();
}
if (IsPrimitiveCNode(output_cnode->input(LongToSize(index + 1)), prim::kPrimZerosLike)) {
return true;
}
}
return false;
}
} // namespace internal
// {prim::kPrimTupleGetItem, {G, Xs}, C}
class IncorporateGetitem : public AnfVisitor {
public:
IncorporateGetitem() : getitem_transform_() {}
~IncorporateGetitem() override = default;
AnfNodePtr operator()(const OptimizerPtr &optimizer, const AnfNodePtr &node) override {
Reset();
AnfVisitor::Match(prim::kPrimTupleGetItem, {IsCNode, IsValueNode<Int64Imm>})(node);
if (node->func_graph() == nullptr || idx_ == -1 || fg_ == nullptr || fg_->has_flag(FUNC_GRAPH_FLAG_NO_INLINE) ||
fg_->has_flag(FUNC_GRAPH_FLAG_DEFER_INLINE) || fg_->has_flag(FUNC_GRAPH_OUTPUT_NO_RECOMPUTE)) {
return nullptr;
}
const auto &manager = fg_->manager();
MS_EXCEPTION_IF_NULL(manager);
if (internal::AlwaysTransformThisIndex(fg_->output(), idx_)) {
return TransformFuncGraph(manager, node);
}
// This node had been substituted.
if (processed_nodes_.find(fg_call_cnode_) != processed_nodes_.end()) {
MS_LOG(DEBUG) << "fg call with same cnode is already replaced, node: " << node->DebugString()
<< ", fg_call: " << fg_call_cnode_->DebugString();
return nullptr;
}
bool output_is_shrinkable = internal::IsOutputShrinkable(fg_->output());
std::vector<internal::TpCNodeAndIndex> tp_cnodes_and_index;
auto fg_call_cnode_users_counter = MultipleUse(fg_call_cnode_, fg_, &tp_cnodes_and_index);
bool multiple_use = (tp_cnodes_and_index.size() > 1);
if (output_is_shrinkable && multiple_use && (tp_cnodes_and_index.size() == fg_call_cnode_users_counter)) {
if (!internal::ShouldTransform(fg_call_cnode_, tp_cnodes_and_index) && !internal::HasMoreMetaFgPrim(optimizer)) {
MS_LOG(DEBUG) << "No more j or vmap and multiple use, will shrink, node: " << node->DebugString()
<< ", fg_call: " << fg_call_cnode_->DebugString();
const auto output_size = internal::GetOutputSize(fg_->output());
if (fg_call_cnode_users_counter == output_size) {
processed_nodes_.emplace(fg_call_cnode_);
MS_LOG(DEBUG) << "All elements in output is used, no need to transform, node: " << node->DebugString()
<< ", fg_call: " << fg_call_cnode_->DebugString();
return nullptr;
}
auto new_node = ShrinkFuncGraphOutput(node, tp_cnodes_and_index);
if (new_node != nullptr) {
return new_node;
}
}
}
MS_LOG(DEBUG) << "Cannot shrink, transform_getitem, node: " << node->DebugString()
<< ", fg_call: " << fg_call_cnode_->DebugString();
return TransformFuncGraph(manager, node);
}
size_t MultipleUse(const CNodePtr &fg_call, const FuncGraphPtr &fg,
std::vector<internal::TpCNodeAndIndex> *cnodes_and_index) const {
const auto &manager = fg->manager();
MS_EXCEPTION_IF_NULL(manager);
auto &cnode_and_index_vector = *cnodes_and_index;
std::set<int64_t> index_set;
std::size_t total_usage = 0;
const auto &node_users_map = manager->node_users();
const auto &it = node_users_map.find(fg_call);
if (it == node_users_map.end()) {
return 0;
}
const auto &node_users = it->second;
for (const auto &user : node_users) {
if (IsPrimitiveCNode(user.first, prim::kPrimTupleGetItem)) {
const auto &cnode = user.first->cast<CNodePtr>();
if (cnode->input(2)->isa<ValueNode>()) {
auto idx = GetValue<int64_t>(cnode->input(2)->cast<ValueNodePtr>()->value());
cnode_and_index_vector.push_back({cnode, idx});
index_set.insert(idx);
total_usage++;
} else {
MS_LOG(EXCEPTION) << "tuple_getitem index is not valuenode, but: " << user.first->DebugString();
}
} else {
MS_LOG(DEBUG) << "fg_call usre is not tuple_getitem, user: " << user.first->DebugString();
}
}
if (index_set.size() != total_usage) {
MS_LOG(DEBUG) << "some index usage is duplicated, total_usage: " << total_usage;
MS_LOG(DEBUG) << "index_set:";
for (auto idx : index_set) {
MS_LOG(DEBUG) << " " << idx;
}
}
// sort by index;
std::sort(cnode_and_index_vector.begin(), cnode_and_index_vector.end(),
[](const auto &tp1, const auto &tp2) { return tp1.index < tp2.index; });
return node_users.size();
}
AnfNodePtr ShrinkFuncGraphOutput(const AnfNodePtr &node,
const std::vector<internal::TpCNodeAndIndex> &tp_cnodes_and_index) {
const auto &manager = fg_->manager();
MS_EXCEPTION_IF_NULL(manager);
std::vector<int64_t> index_vector;
(void)std::transform(tp_cnodes_and_index.begin(), tp_cnodes_and_index.end(), std::back_inserter(index_vector),
[](const auto &cnode_and_index) { return cnode_and_index.index; });
auto iter = processed_fgs_.find(std::make_pair(fg_, index_vector));
if (iter != processed_fgs_.end()) {
MS_LOG(DEBUG) << "fg is already processed, just update caller index, node: " << node->DebugString()
<< ", fg_call: " << fg_call_cnode_->DebugString();
MS_LOG(DEBUG) << "original fg: " << fg_->ToString() << ", processed_fg: " << iter->second->ToString();
processed_nodes_.emplace(fg_call_cnode_);
manager->SetEdge(fg_call_cnode_, 0, NewValueNode(iter->second));
auto shrunk_abstract = internal::ShrinkAbstract(fg_call_cnode_->abstract(), tp_cnodes_and_index);
if (shrunk_abstract != nullptr) {
fg_call_cnode_->set_abstract(shrunk_abstract);
}
auto new_idx = internal::UpdateUserNodeIndex(fg_call_cnode_, idx_, tp_cnodes_and_index);
auto new_node =
node->func_graph()->NewCNode({NewValueNode(prim::kPrimTupleGetItem), fg_call_cnode_, NewValueNode(new_idx)});
new_node->set_abstract(node->abstract());
return new_node;
}
const auto new_fg = internal::ShrinkUnsedOutput(fg_, tp_cnodes_and_index);
if (new_fg != nullptr) {
MS_LOG(DEBUG) << "fg output is shrunk, original fg: " << fg_->ToString() << ", new fg: " << new_fg->ToString();
processed_nodes_.emplace(fg_call_cnode_);
processed_fgs_.emplace(std::make_pair(fg_, index_vector), new_fg);
manager->SetEdge(fg_call_cnode_, 0, NewValueNode(new_fg));
auto shrunk_abstract = internal::ShrinkAbstract(fg_call_cnode_->abstract(), tp_cnodes_and_index);
if (shrunk_abstract != nullptr) {
fg_call_cnode_->set_abstract(shrunk_abstract);
}
auto new_idx = internal::UpdateUserNodeIndex(fg_call_cnode_, idx_, tp_cnodes_and_index);
auto new_node =
node->func_graph()->NewCNode({NewValueNode(prim::kPrimTupleGetItem), fg_call_cnode_, NewValueNode(new_idx)});
new_node->set_abstract(node->abstract());
return new_node;
}
MS_LOG(DEBUG) << "Shrink failed. node: " << node->DebugString()
<< ", switch_call: " << fg_call_cnode_->DebugString();
return nullptr;
}
AnfNodePtr TransformFuncGraph(const FuncGraphManagerPtr &manager, const AnfNodePtr &origin_node) {
auto new_fg = getitem_transform_(origin_node, fg_, idx_);
MS_LOG(DEBUG) << "Original fg: " << fg_->ToString() << ", new fg: " << new_fg->ToString();
(void)args_.insert(args_.begin(), NewValueNode(new_fg));
auto new_node = origin_node->func_graph()->NewCNode(args_);
// Check if the another only usage of {G, Xs} is UpdateState{s, {G, Xs}}, if yes, replace
// UpdateState{s, {G, Xs}} with UpdateState{s, new_node};
auto &node_users_map = manager->node_users();
auto it = node_users_map.find(fg_call_cnode_);
if (it != node_users_map.end()) {
AnfNodePtr update_state_node = nullptr;
auto &node_users = it->second;
if (node_users.size() == 2) {
for (auto &node_user : node_users) {
if (IsPrimitiveCNode(node_user.first, prim::kPrimUpdateState)) {
update_state_node = node_user.first;
}
}
}
if (update_state_node != nullptr) {
auto update_state_cnode = update_state_node->cast<CNodePtr>();
// double check;
if (update_state_cnode->input(2) == fg_call_cnode_) {
MS_LOG(DEBUG) << "Replace UpdateState node: " << update_state_cnode->DebugString(2)
<< ", input 2 with: " << new_node->DebugString();
manager->SetEdge(update_state_cnode, 2, new_node);
}
}
}
new_node->set_abstract(origin_node->abstract());
return new_node;
}
void Visit(const CNodePtr &cnode) override {
if (cnode->size() == 0 || !IsValueNode<FuncGraph>(cnode->input(0))) {
return;
}
fg_call_cnode_ = cnode;
auto &inputs = cnode->inputs();
fg_ = GetValueNode<FuncGraphPtr>(inputs[0]);
(void)std::copy(inputs.begin() + 1, inputs.end(), std::back_inserter(args_));
}
void Visit(const ValueNodePtr &vnode) override { idx_ = GetValue<int64_t>(vnode->value()); }
void Reset() {
idx_ = -1;
fg_ = nullptr;
fg_call_cnode_ = nullptr;
args_.clear();
}
private:
int64_t idx_{-1};
FuncGraphPtr fg_{nullptr};
CNodePtr fg_call_cnode_{nullptr};
std::vector<AnfNodePtr> args_{};
std::set<AnfNodePtr> processed_nodes_;
mindspore::HashMap<std::pair<FuncGraphPtr, std::vector<int64_t>>, FuncGraphPtr,
internal::FuncGraphIntVectorPairHasher>
processed_fgs_;
internal::GetitemTransform getitem_transform_;
};
// A special case, cannot wait for TupleListGetitemDependReorder pass.
// {prim::kPrimTupleGetItem, {prim::kPrimDepend, {G, Xs}, {prim::kPrimUpdateState, Y, {G, Xs}}}, C} ->
// {prim::kPrimDepend, {tp_idx_G, Xs}, {prim::kPrimUpdateState, Y, {tp_idx_G, Xs}}} ->
class IncorporateGetitemDepend : public AnfVisitor {
public:
IncorporateGetitemDepend() : getitem_transform_() {}
~IncorporateGetitemDepend() override = default;
AnfNodePtr operator()(const OptimizerPtr &, const AnfNodePtr &node) override {
Reset();
AnfVisitor::Match(prim::kPrimTupleGetItem, {IsCNode, IsValueNode<Int64Imm>})(node);
if (node->func_graph() == nullptr || idx_ == -1 || fg_ == nullptr || fg_->has_flag(FUNC_GRAPH_FLAG_NO_INLINE) ||
fg_->has_flag(FUNC_GRAPH_FLAG_DEFER_INLINE) || fg_->has_flag(FUNC_GRAPH_OUTPUT_NO_RECOMPUTE)) {
return nullptr;
}
auto new_fg = getitem_transform_(node, fg_, idx_);
(void)args_.insert(args_.begin(), NewValueNode(new_fg));
auto new_fg_cnode = node->func_graph()->NewCNode(args_);
AnfNodePtr new_depend_cnode;
if (used_in_update_) {
auto update_cnode = depend_2nd_input_->cast<CNodePtr>();
AnfNodePtrList new_update_inputs;
(void)std::copy(update_cnode->inputs().begin(), update_cnode->inputs().end() - 1,
std::back_inserter(new_update_inputs));
new_update_inputs.push_back(new_fg_cnode);
auto new_update_cnode = node->func_graph()->NewCNode(new_update_inputs);
new_depend_cnode =
node->func_graph()->NewCNode({NewValueNode(prim::kPrimDepend), new_fg_cnode, new_update_cnode});
} else {
new_depend_cnode =
node->func_graph()->NewCNode({NewValueNode(prim::kPrimDepend), new_fg_cnode, depend_2nd_input_});
}
new_depend_cnode->set_abstract(node->abstract());
return new_depend_cnode;
}
void Visit(const CNodePtr &cnode) override {
// cnode : {kPrimDepend, {G, Xs}, {kPrimUpdatestate, Y, {G, Xs}}}
if (!IsPrimitiveCNode(cnode, prim::kPrimDepend)) {
return;
}
if (cnode->size() != 3 || !IsCNode(cnode->input(1))) {
return;
}
depend_2nd_input_ = cnode->input(2);
auto fg_cnode = cnode->input(1)->cast<CNodePtr>();
// fg_cnode : {G, Xs}
if (!IsValueNode<FuncGraph>(fg_cnode->input(0))) {
return;
}
auto &inputs = fg_cnode->inputs();
fg_ = GetValueNode<FuncGraphPtr>(inputs[0]);
(void)std::copy(inputs.begin() + 1, inputs.end(), std::back_inserter(args_));
if (!IsPrimitiveCNode(depend_2nd_input_, prim::kPrimUpdateState)) {
return;
}
auto update_cnode = depend_2nd_input_->cast<CNodePtr>();
if (update_cnode->size() != 3) {
return;
}
// match {kPrimUpdateState, Y, {G, Xs}}
if (update_cnode->input(2) == fg_cnode) {
used_in_update_ = true;
}
}
void Visit(const ValueNodePtr &vnode) override { idx_ = GetValue<int64_t>(vnode->value()); }
void Reset() {
idx_ = -1;
fg_ = nullptr;
args_.clear();
}
private:
bool used_in_update_{false};
int64_t idx_{-1};
FuncGraphPtr fg_{nullptr};
AnfNodePtr depend_2nd_input_{nullptr};
std::vector<AnfNodePtr> args_{};
internal::GetitemTransform getitem_transform_;
};
// {prim::kPrimTupleGetItem, {{prim::kPrimSwitch, X, G1, G2}, Xs}, C}
class IncorporateGetitemSwitch : public AnfVisitor {
public:
IncorporateGetitemSwitch() : getitem_transform_() {}
~IncorporateGetitemSwitch() override = default;
AnfNodePtr operator()(const OptimizerPtr &optimizer, const AnfNodePtr &node) override {
Reset();
is_in_get_ = true;
AnfVisitor::Match(prim::kPrimTupleGetItem, {IsCNode, IsValueNode<Int64Imm>})(node);
is_in_get_ = false;
auto fg = node->func_graph();
if (idx_ == -1 || switch_ == nullptr || fg == nullptr) {
return nullptr;
}
is_in_switch_ = true;
AnfVisitor::Match(prim::kPrimSwitch, {IsNode, IsValueNode<FuncGraph>, IsValueNode<FuncGraph>})(switch_);
is_in_switch_ = false;
if (g2_ == nullptr) {
return nullptr;
}
if (processed_nodes_.find(switch_) != processed_nodes_.end()) {
MS_LOG(DEBUG) << "fg in switch node has been replaced. node: " << node->DebugString()
<< ", switch: " << switch_->DebugString();
return nullptr;
}
bool g1_output_is_shrinkable = internal::IsOutputShrinkable(g1_->output());
bool g2_output_is_shrinkable = internal::IsOutputShrinkable(g2_->output());
auto tuple_getitem = node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(tuple_getitem);
const auto &switch_call = tuple_getitem->input(1);
MS_EXCEPTION_IF_NULL(switch_call);
const auto &switch_call_cnode = switch_call->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(switch_call_cnode);
// If exist EnvironGet/EnvironSet in this funcgraph or
// if g1_/g2_ is fprop func_graph and the corresponding bprop funcgraph has any EnvironGet or EnvironSet;
std::vector<internal::TpCNodeAndIndex> tp_cnodes_and_index;
auto switch_call_users_counter = MultipleUseOfSwitch(switch_call, fg, &tp_cnodes_and_index);
bool multiple_use = (tp_cnodes_and_index.size() > 1);
if (g1_output_is_shrinkable && g2_output_is_shrinkable && multiple_use &&
(tp_cnodes_and_index.size() == switch_call_users_counter)) {
if (!internal::HasMoreMetaFgPrim(optimizer) && !ExistEnvironNode(fg) && !ExistEnvironNodeInTupleItem(g1_) &&
!ExistEnvironNodeInTupleItem(g2_) && !internal::ShouldTransform(switch_call, tp_cnodes_and_index)) {
MS_LOG(DEBUG) << "No more j or vmap, will shrink. Node: " << node->DebugString()
<< ", switch: " << switch_->DebugString();
const auto g1_output_size = internal::GetOutputSize(g1_->output());
const auto g2_output_size = internal::GetOutputSize(g2_->output());
if (g1_output_size != g2_output_size) {
MS_LOG(EXCEPTION) << "output of g1 and g2 should have same tuple size, but g1 output: "
<< g1_->output()->DebugString() << ", g2 output: " << g2_->output()->DebugString();
}
if (switch_call_users_counter == g1_output_size) {
processed_nodes_.emplace(switch_call);
MS_LOG(DEBUG) << "All elements in output is used, no need to transform, node: " << node->DebugString()
<< ", switch: " << switch_->DebugString();
return nullptr;
}
auto new_node = ShrinkFuncGraphOutput(node, switch_call_cnode, tp_cnodes_and_index);
if (new_node != nullptr) {
return new_node;
}
}
}
MS_LOG(DEBUG) << "Cannot shrink output, transform_getitem_switch, node: " << node->DebugString()
<< ", switch: " << switch_->DebugString();
auto new_g1 = getitem_transform_(node, g1_, idx_);
auto new_g2 = getitem_transform_(node, g2_, idx_);
MS_LOG(DEBUG) << "Original fg1: " << g1_->ToString() << ", new_fg1: " << new_g1->ToString();
MS_LOG(DEBUG) << "Original fg2: " << g2_->ToString() << ", new_fg2: " << new_g2->ToString();
auto sw_node = fg->NewCNode({NewValueNode(prim::kPrimSwitch), x_, NewValueNode(new_g1), NewValueNode(new_g2)});
(void)args_.insert(args_.begin(), sw_node);
auto new_node = fg->NewCNode(args_);
new_node->set_abstract(node->abstract());
return new_node;
}
AnfNodePtr ShrinkFuncGraphOutput(const AnfNodePtr &node, const CNodePtr &switch_call_cnode,
const std::vector<internal::TpCNodeAndIndex> &tp_cnodes_and_index) {
const auto &manager = node->func_graph()->manager();
MS_EXCEPTION_IF_NULL(manager);
auto switch_cnode = switch_->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(switch_cnode);
std::vector<int64_t> index_vector;
(void)std::transform(tp_cnodes_and_index.begin(), tp_cnodes_and_index.end(), std::back_inserter(index_vector),
[](const auto &cnode_and_index) { return cnode_and_index.index; });
const auto &iter1 = processed_fgs_.find(std::make_pair(g1_, index_vector));
const auto &iter2 = processed_fgs_.find(std::make_pair(g2_, index_vector));
if (iter1 != processed_fgs_.end() && iter2 != processed_fgs_.end()) {
MS_LOG(DEBUG) << "fg output had been processed, no need to transform, node: " << node->DebugString()
<< ", switch: " << switch_->DebugString();
MS_LOG(DEBUG) << "Original fg1: " << g1_->ToString() << ", new_fg1: " << iter1->second->ToString();
MS_LOG(DEBUG) << "Original fg2: " << g2_->ToString() << ", new_fg2: " << iter2->second->ToString();
processed_nodes_.emplace(switch_);
manager->SetEdge(switch_cnode, 2, NewValueNode(iter1->second));
manager->SetEdge(switch_cnode, 3, NewValueNode(iter2->second));
auto shrunk_abstract = internal::ShrinkAbstract(switch_call_cnode->abstract(), tp_cnodes_and_index);
if (shrunk_abstract != nullptr) {
switch_call_cnode->set_abstract(shrunk_abstract);
}
auto new_idx = internal::UpdateUserNodeIndex(switch_call_cnode, idx_, tp_cnodes_and_index);
auto new_node =
node->func_graph()->NewCNode({NewValueNode(prim::kPrimTupleGetItem), switch_call_cnode, NewValueNode(new_idx)});
new_node->set_abstract(node->abstract());
return new_node;
}
const auto &new_g1 = internal::ShrinkUnsedOutput(g1_, tp_cnodes_and_index);
const auto &new_g2 = internal::ShrinkUnsedOutput(g2_, tp_cnodes_and_index);
if (new_g1 != nullptr && new_g2 != nullptr) {
MS_LOG(DEBUG) << "Shrink output. node: " << node->DebugString() << ", switch: " << switch_->DebugString();
MS_LOG(DEBUG) << "Original fg1: " << g1_->ToString() << ", new_fg1: " << new_g1->ToString();
MS_LOG(DEBUG) << "Original fg2: " << g2_->ToString() << ", new_fg2: " << new_g2->ToString();
processed_nodes_.emplace(switch_);
processed_fgs_.emplace(std::make_pair(g1_, index_vector), new_g1);
processed_fgs_.emplace(std::make_pair(g2_, index_vector), new_g2);
manager->SetEdge(switch_cnode, 2, NewValueNode(new_g1));
manager->SetEdge(switch_cnode, 3, NewValueNode(new_g2));
auto shrunk_abstract = internal::ShrinkAbstract(switch_call_cnode->abstract(), tp_cnodes_and_index);
if (shrunk_abstract != nullptr) {
switch_call_cnode->set_abstract(shrunk_abstract);
}
auto new_idx = internal::UpdateUserNodeIndex(switch_call_cnode, idx_, tp_cnodes_and_index);
auto new_node =
node->func_graph()->NewCNode({NewValueNode(prim::kPrimTupleGetItem), switch_call_cnode, NewValueNode(new_idx)});
new_node->set_abstract(node->abstract());
return new_node;
}
MS_LOG(DEBUG) << "Shrink failed. node: " << node->DebugString()
<< ", switch_call: " << switch_call_cnode->DebugString();
return nullptr;
}
void Visit(const AnfNodePtr &node) override {
if (is_in_switch_ && x_ == nullptr) {
x_ = node;
return;
}
AnfVisitor::Visit(node);
}
void Visit(const CNodePtr &cnode) override {
if (is_in_get_ && cnode->size() != 0) {
auto &inputs = cnode->inputs();
switch_ = inputs[0];
(void)std::copy(inputs.begin() + 1, inputs.end(), std::back_inserter(args_));
}
}
void Visit(const ValueNodePtr &vnode) override {
if (is_in_get_) {
idx_ = GetValue<int64_t>(vnode->value());
}
if (is_in_switch_) {
auto g = GetValueNode<FuncGraphPtr>(vnode);
if (g1_ == nullptr) {
g1_ = g;
} else {
g2_ = g;
}
}
}
void Reset() {
x_ = nullptr;
g1_ = nullptr;
g2_ = nullptr;
switch_ = nullptr;
args_.clear();
is_in_get_ = false;
is_in_switch_ = false;
}
private:
size_t MultipleUseOfSwitch(const AnfNodePtr &switch_call, const FuncGraphPtr &fg,
std::vector<internal::TpCNodeAndIndex> *cnodes_and_index) const {
auto switch_call_cnode = switch_call->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(switch_call_cnode);
auto manager = fg->manager();
MS_EXCEPTION_IF_NULL(manager);
auto &cnode_and_index_vector = *cnodes_and_index;
std::set<int64_t> index_set;
std::size_t total_usage = 0;
auto &node_users_map = manager->node_users();
auto it = node_users_map.find(switch_call);
if (it == node_users_map.end()) {
return 0;
}
auto &node_users = it->second;
// If switch was used by more than 1 tuple_getitem nodes, this pass shouldn't be execute.
for (auto user : node_users) {
if (IsPrimitiveCNode(user.first, prim::kPrimTupleGetItem)) {
auto cnode = user.first->cast<CNodePtr>();
constexpr auto kInputIndex = 2;
if (cnode->input(kInputIndex)->isa<ValueNode>()) {
const auto &idx_node = cnode->input(kInputIndex)->cast<ValueNodePtr>();
MS_EXCEPTION_IF_NULL(idx_node);
auto idx = GetValue<int64_t>(idx_node->value());
cnode_and_index_vector.push_back({cnode, idx});
index_set.insert(idx);
total_usage++;
} else {
MS_LOG(EXCEPTION) << "Tuple_getitem index is not valuenode, but: " << user.first->DebugString(2);
}
} else {
MS_LOG(DEBUG) << "switch_call user is not tuple_getitem, user: " << user.first->DebugString(2);
}
}
if (index_set.size() != total_usage) {
MS_LOG(DEBUG) << "some index is duplicated, total_usage: " << total_usage;
MS_LOG(DEBUG) << "index_set: ";
for (auto idx : index_set) {
MS_LOG(DEBUG) << " " << idx;
}
}
// sort by index;
std::sort(cnode_and_index_vector.begin(), cnode_and_index_vector.end(),
[](const auto &tp1, const auto &tp2) { return tp1.index < tp2.index; });
return node_users.size();
}
static bool inline ExistEnvironNode(const FuncGraphPtr &fg) {
MS_EXCEPTION_IF_NULL(fg);
auto &nodes = fg->value_nodes();
return std::any_of(nodes.begin(), nodes.end(), [](const auto &node) {
return IsPrimitive(node.first, prim::kPrimEnvironSet) || IsPrimitive(node.first, prim::kPrimEnvironGet);
});
}
static bool ExistEnvironNodeInTupleItem(const FuncGraphPtr &fg) {
MS_EXCEPTION_IF_NULL(fg);
const auto &output = fg->output();
if (!IsPrimitiveCNode(output, prim::kPrimMakeTuple)) {
return false;
}
const auto &cnode = output->cast<CNodePtr>();
const auto &inputs = cnode->inputs();
return std::any_of(inputs.cbegin() + 1, inputs.cend(), [](const auto &input) {
auto sub_fg = GetValueNode<FuncGraphPtr>(input);
if (sub_fg != nullptr && ExistEnvironNode(sub_fg)) {
return true;
}
return false;
});
}
int64_t idx_{-1};
AnfNodePtr switch_{nullptr}, x_{nullptr};
FuncGraphPtr g1_{nullptr}, g2_{nullptr};
bool is_in_get_{false}, is_in_switch_{false};
std::vector<AnfNodePtr> args_{};
std::set<AnfNodePtr> processed_nodes_;
mindspore::HashMap<std::pair<FuncGraphPtr, std::vector<int64_t>>, FuncGraphPtr,
internal::FuncGraphIntVectorPairHasher>
processed_fgs_;
internal::GetitemTransform getitem_transform_;
};
// {prim::kPrimTupleGetItem, {{prim::kPrimSwitchLayer, X, {prim::kPrimMakeTuple, G1, G2...}}, Xs}, C}
class IncorporateGetitemSwitchLayerA : public AnfVisitor {
public:
IncorporateGetitemSwitchLayerA() : getitem_transform_() {}
~IncorporateGetitemSwitchLayerA() override = default;
AnfNodePtr operator()(const OptimizerPtr &, const AnfNodePtr &node) override {
Reset();
is_in_get_ = true;
AnfVisitor::Match(prim::kPrimTupleGetItem, {IsCNode, IsValueNode<Int64Imm>})(node);
is_in_get_ = false;
auto fg = node->func_graph();
if (idx_ == -1 || switch_layer_ == nullptr || fg == nullptr) {
return nullptr;
}
is_in_switch_ = true;
AnfVisitor::Match(prim::kPrimSwitchLayer, {IsNode, IsCNode})(switch_layer_);
is_in_switch_ = false;
if (graphs_.empty()) {
return nullptr;
}
std::vector<AnfNodePtr> layers;
for (auto &graph : graphs_) {
auto fg_transform = getitem_transform_(node, graph, idx_);
if (fg_transform == nullptr) {
return nullptr;
}
layers.push_back(NewValueNode(fg_transform));
}
auto layers_node = fg->NewCNode(prim::kPrimMakeTuple, layers);
std::vector<AnfNodePtr> sw_args{NewValueNode(prim::kPrimSwitchLayer), x_, layers_node};
auto sw_node = fg->NewCNode(sw_args);
(void)args_.insert(args_.begin(), sw_node);
return fg->NewCNode(args_);
}
void Visit(const AnfNodePtr &node) override {
if (is_in_switch_ && x_ == nullptr) {
x_ = node;
return;
}
AnfVisitor::Visit(node);
}
void Visit(const CNodePtr &cnode) override {
if (is_in_get_ && cnode->size() != 0) {
auto &inputs = cnode->inputs();
switch_layer_ = inputs[0];
(void)std::copy(inputs.begin() + 1, inputs.end(), std::back_inserter(args_));
}
if (is_in_switch_ && cnode->size() >= 2) {
auto &inputs = cnode->inputs();
if (IsPrimitiveCNode(cnode, prim::kPrimMakeTuple) && IsValueNode<FuncGraph>(inputs[1])) {
(void)std::transform(inputs.begin() + 1, inputs.end(), std::back_inserter(graphs_),
[](const AnfNodePtr &vnode) { return GetValueNode<FuncGraphPtr>(vnode); });
}
}
}
void Visit(const ValueNodePtr &vnode) override {
if (is_in_get_) {
idx_ = GetValue<int64_t>(vnode->value());
}
}
void Reset() {
x_ = nullptr;
graphs_.clear();
switch_layer_ = nullptr;
args_.clear();
is_in_get_ = false;
is_in_switch_ = false;
}
private:
int64_t idx_{-1};
AnfNodePtr switch_layer_{nullptr}, x_{nullptr};
std::vector<FuncGraphPtr> graphs_{};
bool is_in_get_{false}, is_in_switch_{false};
std::vector<AnfNodePtr> args_{};
internal::GetitemTransform getitem_transform_;
};
// {prim::kPrimTupleGetItem, {{{prim::kPrimSwitchLayer, X, {prim::kPrimMakeTuple, G1, G2...}}, Xs}, Ys}, C}
class IncorporateGetitemSwitchLayerB : public AnfVisitor {
public:
IncorporateGetitemSwitchLayerB() : getitem_transform_() {}
~IncorporateGetitemSwitchLayerB() override = default;
AnfNodePtr operator()(const OptimizerPtr &, const AnfNodePtr &node) override {
Reset();
is_in_get_ = true;
AnfVisitor::Match(prim::kPrimTupleGetItem, {IsCNode, IsValueNode<Int64Imm>})(node);
is_in_get_ = false;
auto fg = node->func_graph();
if (idx_ == -1 || switch_layer_call_ == nullptr || !switch_layer_call_->isa<CNode>() || fg == nullptr) {
return nullptr;
}
auto &switch_layer_call_inputs = switch_layer_call_->cast<CNodePtr>()->inputs();
(void)std::copy(switch_layer_call_inputs.begin() + 1, switch_layer_call_inputs.end(), std::back_inserter(args_));
is_in_switch_ = true;
AnfVisitor::Match(prim::kPrimSwitchLayer, {IsNode, IsCNode})(switch_layer_call_inputs[0]);
is_in_switch_ = false;
if (graphs_.empty()) {
return nullptr;
}
std::vector<AnfNodePtr> layers;
for (auto &graph : graphs_) {
auto fg_transform = getitem_transform_(node, graph, idx_);
if (fg_transform == nullptr) {
return nullptr;
}
layers.push_back(NewValueNode(fg_transform));
}
auto layers_node = fg->NewCNode(prim::kPrimMakeTuple, layers);
std::vector<AnfNodePtr> sw_args{NewValueNode(prim::kPrimSwitchLayer), x_, layers_node};
auto sw_node = fg->NewCNode(sw_args);
(void)args_.insert(args_.begin(), sw_node);
auto call_switch_layer = fg->NewCNode(args_);
(void)outer_call_args_.insert(outer_call_args_.begin(), call_switch_layer);
return fg->NewCNode(outer_call_args_);
}
void Visit(const AnfNodePtr &node) override {
if (is_in_switch_ && x_ == nullptr) {
x_ = node;
return;
}
AnfVisitor::Visit(node);
}
void Visit(const CNodePtr &cnode) override {
if (is_in_get_ && cnode->size() != 0) {
auto &inputs = cnode->inputs();
switch_layer_call_ = inputs[0];
(void)std::copy(inputs.begin() + 1, inputs.end(), std::back_inserter(outer_call_args_));
}
if (is_in_switch_ && cnode->size() >= 2) {
auto &inputs = cnode->inputs();
if (IsPrimitiveCNode(cnode, prim::kPrimMakeTuple) && IsValueNode<FuncGraph>(inputs[1])) {
(void)std::transform(inputs.begin() + 1, inputs.end(), std::back_inserter(graphs_),
[](const AnfNodePtr &vnode) { return GetValueNode<FuncGraphPtr>(vnode); });
}
}
}
void Visit(const ValueNodePtr &vnode) override {
if (is_in_get_) {
idx_ = GetValue<int64_t>(vnode->value());
}
}
void Reset() {
x_ = nullptr;
graphs_.clear();
switch_layer_call_ = nullptr;
args_.clear();
outer_call_args_.clear();
is_in_get_ = false;
is_in_switch_ = false;
}
private:
int64_t idx_{-1};
AnfNodePtr switch_layer_call_{nullptr}, x_{nullptr};
std::vector<FuncGraphPtr> graphs_{};
bool is_in_get_{false}, is_in_switch_{false};
std::vector<AnfNodePtr> args_{};
std::vector<AnfNodePtr> outer_call_args_{};
internal::GetItemTransformACrossGraph getitem_transform_;
};
class IncorporateGetitemSet : public OptimizerCaller {
public:
IncorporateGetitemSet()
: incorporate_getitem_(std::make_shared<IncorporateGetitem>()),
incorporate_getitem_depend_(std::make_shared<IncorporateGetitemDepend>()),
incorporate_getitem_switch_(std::make_shared<IncorporateGetitemSwitch>()),
incorporate_getitem_switch_layer_a_(std::make_shared<IncorporateGetitemSwitchLayerA>()),
incorporate_getitem_switch_layer_b_(std::make_shared<IncorporateGetitemSwitchLayerB>()) {
eliminaters_.emplace_back(incorporate_getitem_);
eliminaters_.emplace_back(incorporate_getitem_depend_);
eliminaters_.emplace_back(incorporate_getitem_switch_);
eliminaters_.emplace_back(incorporate_getitem_switch_layer_a_);
eliminaters_.emplace_back(incorporate_getitem_switch_layer_b_);
}
~IncorporateGetitemSet() = default;
AnfNodePtr operator()(const OptimizerPtr &optimizer, const AnfNodePtr &node) override {
static bool enable_closure = common::GetEnv("MS_DEV_ENABLE_CLOSURE") != "0";
if (enable_closure) {
return nullptr;
}
AnfNodePtr new_node;
for (auto &eliminater : eliminaters_) {
new_node = (*eliminater)(optimizer, node);
if (new_node != nullptr) {
return new_node;
}
}
return nullptr;
}
private:
OptimizerCallerPtr incorporate_getitem_, incorporate_getitem_depend_, incorporate_getitem_switch_,
incorporate_getitem_switch_layer_a_, incorporate_getitem_switch_layer_b_;
std::vector<OptimizerCallerPtr> eliminaters_{};
};
} // namespace irpass
} // namespace opt
} // namespace mindspore
#endif // MINDSPORE_CCSRC_FRONTEND_OPTIMIZER_IRPASS_INCORPORATE_GETITEM_H_
| 41.608577 | 120 | 0.663158 | [
"vector",
"transform"
] |
eab58f0708f804fd0089d9175933ee3e3a947f03 | 39,839 | h | C | root/RooT.h | clayne/QuadRay-engine | b5eb8a9b63b7a53cf7c55c23697b78da7ae11502 | [
"MIT"
] | 21 | 2015-04-14T16:04:59.000Z | 2021-05-18T03:01:05.000Z | root/RooT.h | clayne/QuadRay-engine | b5eb8a9b63b7a53cf7c55c23697b78da7ae11502 | [
"MIT"
] | null | null | null | root/RooT.h | clayne/QuadRay-engine | b5eb8a9b63b7a53cf7c55c23697b78da7ae11502 | [
"MIT"
] | 8 | 2017-03-18T15:32:33.000Z | 2021-04-02T01:04:57.000Z | /******************************************************************************/
/* Copyright (c) 2013-2021 VectorChief (at github, bitbucket, sourceforge) */
/* Distributed under the MIT software license, see the accompanying */
/* file COPYING or http://www.opensource.org/licenses/mit-license.php */
/******************************************************************************/
#ifndef RT_ROOT_H
#define RT_ROOT_H
#include <stdlib.h>
#include <string.h>
#include "engine.h"
#include "all_scn.h"
/* enable test scene for smallpt-based path-tracer
* enable SIMD buffers for 5x performance in PT mode
* set RT_FEAT_BUFFERS to 1 in core/tracer/tracer.cpp
* set RT_OFFS_BUFFERS to 0x0A0*1 in core/tracer/tracer.h
* set RT_OPTS_BUFFERS to (0 << 24) in core/engine/format.h
* to enable path-tracer in runtime press Q (or T) or pass -q
* to change the number of frames between updates press E (or Y)
*/
#define RT_TEST_PT 0
#if RT_TEST_PT != 0
#include "../test/scenes/scn_test17.h" /* RaVi - glass cube */
#include "../test/scenes/scn_test18.h" /* smallpt - Cornell box */
#endif /* RT_TEST_PT */
#define RT_X_RES 800
#define RT_Y_RES 480
rt_astr title = "QuadRay engine demo, (C) 2013-2021 VectorChief";
rt_si32 x_win = RT_X_RES; /* window-rect (client) x-resolution */
rt_si32 y_win = RT_Y_RES; /* window-rect (client) y-resolution */
rt_si32 x_res = RT_X_RES;
rt_si32 y_res = RT_Y_RES;
rt_si32 x_row = (RT_X_RES+RT_SIMD_WIDTH-1) & ~(RT_SIMD_WIDTH-1);
rt_ui32 *frame = RT_NULL;
rt_si32 thnum = RT_THREADS_NUM;
rt_SCENE *sc_rt[] =
{
&scn_demo01::sc_root,
&scn_demo02::sc_root,
&scn_demo03::sc_root,
#if RT_TEST_PT != 0
&scn_test17::sc_root, /* RaVi - glass cube */
&scn_test18::sc_root, /* smallpt - Cornell box */
#endif /* RT_TEST_PT */
};
rt_Platform*pfm = RT_NULL; /* platformobj */
rt_Scene *sc[RT_ARR_SIZE(sc_rt)] = {0}; /* scene array */
rt_si32 d = RT_ARR_SIZE(sc_rt)-1; /* demo-scene */
rt_si32 c = 0; /* camera-idx */
rt_si32 tile_w = 0; /* tile width */
rt_time b_time = 0; /* time-begins-(ms) (from command-line) */
rt_time e_time =-1; /* time-ending-(ms) (from command-line) */
rt_si32 m_num = 1; /* frames-in-update (from command-line) */
rt_si32 f_num =-1; /* number-of-frames (from command-line) */
rt_time f_time =-1; /* frame-delta-(ms) (from command-line) */
rt_si32 n_simd = 0; /* SIMD-native-size (from command-line) */
rt_si32 k_size = 0; /* SIMD-size-factor (from command-line) */
rt_si32 s_type = 0; /* SIMD-sub-variant (from command-line) */
rt_si32 t_pool = 0; /* Thread-pool-size (from command-line) */
#if RT_FULLSCREEN == 1
rt_si32 w_size = 0; /* Window-rect-size (from command-line) */
#else /* RT_FULLSCREEN */
rt_si32 w_size = 1; /* Window-rect-size (from command-line) */
#endif /* RT_FULLSCREEN */
rt_si32 x_new = 0; /* New x-resolution (from command-line) */
rt_si32 y_new = 0; /* New y-resolution (from command-line) */
rt_si32 img_id =-1; /* save-image-index (from command-line) */
rt_time l_time = 500; /* fpslogupd-(ms) (from command-line) */
rt_bool l_mode = RT_FALSE; /* fpslogoff (from command-line) */
rt_bool h_mode = RT_FALSE; /* hide mode (from command-line) */
rt_bool p_mode = RT_FALSE; /* pause mode (from command-line) */
rt_bool q_mode = RT_FALSE; /* quake mode (from command-line) */
rt_si32 u_mode = 0; /* update/render threadoff (from command-line) */
rt_bool o_mode = RT_FALSE; /* offscreen (from command-line) */
rt_si32 a_mode = RT_FSAA_NO; /* FSAA mode (from command-line) */
/******************************************************************************/
/******************************** PLATFORM ********************************/
/******************************************************************************/
#include "rtzero.h"
/*
* Get system time in milliseconds.
*/
rt_time get_time();
/*
* Allocate memory from system heap.
*/
rt_pntr sys_alloc(rt_size size);
/*
* Free memory from system heap.
*/
rt_void sys_free(rt_pntr ptr, rt_size size);
/*
* Initialize platform-specific pool of "thnum" threads (< 0 - no feedback).
*/
rt_pntr init_threads(rt_si32 thnum, rt_Platform *pfm);
/*
* Terminate platform-specific pool of "thnum" threads.
*/
rt_void term_threads(rt_pntr tdata, rt_si32 thnum);
/*
* Task platform-specific pool of "thnum" threads to update scene,
* block until finished.
*/
rt_void update_scene(rt_pntr tdata, rt_si32 thnum, rt_si32 phase);
/*
* Task platform-specific pool of "thnum" threads to render scene,
* block until finished.
*/
rt_void render_scene(rt_pntr tdata, rt_si32 thnum, rt_si32 phase);
/*
* Set current frame to screen.
*/
rt_void frame_to_screen(rt_ui32 *frame, rt_si32 x_row);
/******************************************************************************/
/******************************* EVENT-LOOP *******************************/
/******************************************************************************/
#define RK_ESCAPE 0
#define RK_F1 1
#define RK_F2 2
#define RK_F3 3
#define RK_F4 4
#define RK_F5 5
#define RK_F6 6
#define RK_F7 7
#define RK_F8 8
#define RK_F9 9
#define RK_F10 10
#define RK_F11 11
#define RK_F12 12
#define RK_X 13
#define RK_C 14
#define RK_UP 15
#define RK_DOWN 16
#define RK_LEFT 17
#define RK_RIGHT 18
#define RK_U 19
#define RK_O 20
#define RK_W 21
#define RK_S 22
#define RK_A 23
#define RK_D 24
#define RK_Q 25
#define RK_E 26
#define RK_I 27
#define RK_L 28
#define RK_P 29
#define RK_T 30
#define RK_Y 31
#define RK_0 50
#define RK_1 51
#define RK_2 52
#define RK_3 53
#define RK_4 54
#define RK_5 55
#define RK_6 56
#define RK_7 57
#define RK_8 58
#define RK_9 59
#define KEY_MASK 0xFF
/* thread's exception variables */
rt_si32 eout = 0, emax = 0;
rt_pstr *estr = RT_NULL;
/* state tracking variables */
rt_si32 d_prev = -1; /* prev demo-scene */
rt_si32 c_prev = -1; /* prev camera-idx */
rt_si32 n_prev = -1; /* prev SIMD-native-size */
rt_si32 k_prev = -1; /* prev SIMD-size-factor */
rt_si32 s_prev = -1; /* prev SIMD-sub-variant */
rt_si32 p_prev = -1; /* prev pause mode */
rt_si32 q_prev = -1; /* prev quake mode */
/* time counter variables */
rt_time init_time = 0;
rt_time anim_time = 0;
rt_time prev_time = 0;
rt_time run_time = 0;
rt_time log_time = 0;
rt_time cur_time = 0;
rt_bool switched = 0;
/* frame counter variables */
rt_si32 cnt = 0;
rt_real fps = 0.0f;
rt_si32 glb = 0;
rt_real avg = 0.0f;
rt_si32 ttl = 0;
rt_si32 scr_id = 0;
/* virtual key arrays */
rt_byte r_to_p[KEY_MASK + 1];
rt_byte h_keys[KEY_MASK + 1];
rt_byte t_keys[KEY_MASK + 1];
rt_byte r_keys[KEY_MASK + 1];
/* hold keys */
#define H_KEYS(k) (h_keys[r_to_p[(k) & KEY_MASK]])
/* toggle on press */
#define T_KEYS(k) (t_keys[r_to_p[(k) & KEY_MASK]])
/* toggle on release */
#define R_KEYS(k) (r_keys[r_to_p[(k) & KEY_MASK]])
/*
* Print average fps.
*/
rt_void print_avgfps()
{
RT_LOGI("---%s%s------------- FPS AVG ------ simd = %4dx%dv%d -\n",
q_prev == 2 ? " P " : p_prev ? " p " : "---", q_prev ? "q " : "--",
n_prev * 128, k_prev, s_prev);
if (cur_time - run_time > 0)
{
avg = (rt_real)(glb + cnt) * 1000 / (cur_time - run_time);
}
else
{
avg = (rt_real)0;
}
RT_LOGI("AVG = %.2f\n", avg);
}
/*
* Print current target config.
*/
rt_void print_target()
{
RT_LOGI("------------------ TARGET CONFIG ---------------------\n");
if (q_mode)
{
RT_LOGI("SIMD size/type = %4dx%dv%d, updatePT = %d, FSAA = %d %s\n",
n_simd * 128, k_size, s_type, m_num,
1 << a_mode, a_mode ? "(spp)" : "(off)");
} else {
RT_LOGI("SIMD size/type = %4dx%dv%d, tile_W = %dxW, FSAA = %d %s\n",
n_simd * 128, k_size, s_type, tile_w / 8,
1 << a_mode, a_mode ? "(spp)" : "(off)");
}
RT_LOGI("Framebuffer X-row = %5d, ptr = %016" PR_Z "X\n",
sc[d]->get_x_row(), (rt_full)sc[d]->get_frame());
RT_LOGI("Framebuffer X-res = %5d, Y-res = %4d, l %d, h %d %s %s\n",
x_res, y_res, l_mode, h_mode,
p_mode ? "p" : " ", q_mode ? "q" : " ");
RT_LOGI("Window-rect X-res = %5d, Y-res = %4d, u %d, o %d\n",
x_win, y_win, u_mode, o_mode);
RT_LOGI("Threads/affinity = %4d/%d, reserved = %d, d%2d, c%2d\n",
pfm->get_thnum(), RT_SETAFFINITY, 0, d+1, c+1);
RT_LOGI("---%s%s------------- FPS LOG ------ ptr/fp = %d%s%d --\n",
p_mode ? " p " : "---", q_mode ? "q " : "--",
RT_POINTER, RT_ADDRESS == 32 ? "_" : "f", RT_ELEMENT);
}
/*
* Event loop's main step.
*/
rt_si32 main_step()
{
if (sc[d] == RT_NULL)
{
return 0;
}
rt_si32 g = d; /* current scene for save_frame at the end of each run */
rt_pstr str = "--------------------------------------------------------";
try
{
if (T_KEYS(RK_F4) || T_KEYS(RK_4))
{
sc[d]->save_frame(scr_id++);
switched = 1;
}
if (T_KEYS(RK_F5) || T_KEYS(RK_L))
{
l_mode = !l_mode;
switched = 1;
}
if (T_KEYS(RK_P))
{
p_prev = p_mode;
p_mode = !p_mode;
switched = 1;
}
if (T_KEYS(RK_Q) || T_KEYS(RK_T))
{
q_prev = q_mode;
q_mode = !q_mode;
rt_si32 q_test = sc[d]->set_pton(q_mode ? m_num : 0);
if (q_test != (q_mode ? m_num : 0))
{
q_mode = q_prev;
/* to enable path-tracer in a particular scene
* add RT_OPTS_PT to the list of optimizations
* to be turned off in scene definition struct */
RT_LOGI("%s\n", str);
RT_LOGI("Quasi-realistic mode: %d (off), %s\n", q_mode,
"add RT_OPTS_PT per scene");
RT_LOGI("%s\n", str);
}
else
{
switched = 1;
}
}
if (T_KEYS(RK_E) || T_KEYS(RK_Y))
{
if (m_num > 9)
{
m_num = 1;
}
else
{
m_num = (m_num % 9) + 1;
}
rt_si32 q_test = sc[d]->set_pton(q_mode ? m_num : 0);
if (q_test != (q_mode ? m_num : 0))
{
q_mode = q_prev;
/* to enable path-tracer in a particular scene
* add RT_OPTS_PT to the list of optimizations
* to be turned off in scene definition struct */
RT_LOGI("%s\n", str);
RT_LOGI("Quasi-realistic mode: %d (off), %s\n", q_mode,
"add RT_OPTS_PT per scene");
RT_LOGI("%s\n", str);
}
else
{
switched = 1;
}
}
if (T_KEYS(RK_9) || T_KEYS(RK_U))
{
rt_si32 opts = sc[d]->get_opts();
u_mode = (u_mode + 1) % 7;
opts &= ~RT_OPTS_UPDATE_EXT0 & ~RT_OPTS_UPDATE_EXT1 &
~RT_OPTS_UPDATE_EXT2 & ~RT_OPTS_UPDATE_EXT3 &
~RT_OPTS_RENDER_EXT0 & ~RT_OPTS_RENDER_EXT1;
switch (u_mode)
{
case 6:
opts |= RT_OPTS_RENDER_EXT0;
case 5:
opts |= RT_OPTS_UPDATE_EXT0;
break;
case 4:
opts |= RT_OPTS_RENDER_EXT1;
case 3:
opts |= RT_OPTS_UPDATE_EXT3;
case 2:
opts |= RT_OPTS_UPDATE_EXT2;
case 1:
opts |= RT_OPTS_UPDATE_EXT1;
break;
default:
break;
}
sc[d]->set_opts(opts);
switched = 1;
}
if (T_KEYS(RK_0) || T_KEYS(RK_O))
{
o_mode = !o_mode;
switched = 1;
}
if (T_KEYS(RK_F12) || T_KEYS(RK_5))
{
h_mode = !h_mode;
switched = 1;
}
if (T_KEYS(RK_ESCAPE))
{
return 0;
}
/* update time variables */
cur_time = get_time();
if (init_time == 0)
{
init_time = cur_time - b_time;
anim_time = run_time = log_time = prev_time = b_time;
}
cur_time = cur_time - init_time;
if (!p_mode)
{
anim_time += (cur_time - prev_time);
}
if (cur_time - log_time >= l_time)
{
fps = (rt_real)cnt * 1000 / (cur_time - log_time);
glb += cnt;
cnt = 0;
log_time = cur_time;
if (!l_mode)
{
RT_LOGI("FPS = %.2f\n", fps);
}
}
if (e_time >= 0 && anim_time >= e_time)
{
return 0;
}
if (f_num >= 0 && ttl >= f_num)
{
return 0;
}
#if RT_OPTS_UPDATE_EXT0 != 0
if (u_mode <= 4 && !p_mode)
{ /* -->---->-- skip update0 -->---->-- */
#endif /* RT_OPTS_UPDATE_EXT0 */
if (H_KEYS(RK_W)) sc[d]->update(anim_time, RT_CAMERA_MOVE_FORWARD);
if (H_KEYS(RK_S)) sc[d]->update(anim_time, RT_CAMERA_MOVE_BACK);
if (H_KEYS(RK_A)) sc[d]->update(anim_time, RT_CAMERA_MOVE_LEFT);
if (H_KEYS(RK_D)) sc[d]->update(anim_time, RT_CAMERA_MOVE_RIGHT);
if (H_KEYS(RK_UP)) sc[d]->update(anim_time, RT_CAMERA_ROTATE_DOWN);
if (H_KEYS(RK_DOWN)) sc[d]->update(anim_time, RT_CAMERA_ROTATE_UP);
if (H_KEYS(RK_LEFT)) sc[d]->update(anim_time, RT_CAMERA_ROTATE_LEFT);
if (H_KEYS(RK_RIGHT)) sc[d]->update(anim_time, RT_CAMERA_ROTATE_RIGHT);
if (T_KEYS(RK_F1) || T_KEYS(RK_I))
{
sc[d]->print_state();
}
if (T_KEYS(RK_F2) || T_KEYS(RK_2))
{
rt_si32 a_prev = a_mode;
a_mode = (a_mode + 1) % (pfm->get_fsaa_max() + 1);
a_mode = pfm->set_fsaa(a_mode);
switched = a_prev != a_mode ? 1 : switched;
}
if (T_KEYS(RK_F3) || T_KEYS(RK_3))
{
c_prev = c;
c = sc[d]->next_cam();
switched = c_prev != c ? 1 : switched;
}
if (T_KEYS(RK_F6) || T_KEYS(RK_6))
{
k_prev = k_size;
rt_si32 size, type, simd;
do
{
k_size = k_size % 4 + k_size % 3; /* 1, 2, 4 */
simd = pfm->set_simd(simd_init(n_simd, s_type, k_size));
size = (simd >> 16) & 0xFF;
type = (simd >> 8) & 0xFF;
simd = simd & 0xFF;
if (size != k_size)
{
simd = pfm->set_simd(simd_init(n_simd, 0, k_size));
size = (simd >> 16) & 0xFF;
type = (simd >> 8) & 0xFF;
simd = simd & 0xFF;
}
if (size == k_size && simd == n_simd)
{
s_type = type;
}
if (simd != n_simd)
{
size = 0;
}
}
while (size != k_size);
a_mode = pfm->get_fsaa();
switched = k_prev != k_size ? 1 : switched;
}
if (T_KEYS(RK_F7) || T_KEYS(RK_7))
{
s_prev = s_type;
rt_si32 size, type, simd;
do
{
s_type = s_type % 32 + s_type % 31; /* 1, 2, 4, 8, 16, 32 */
simd = pfm->set_simd(simd_init(n_simd, s_type, k_size));
size = (simd >> 16) & 0xFF;
type = (simd >> 8) & 0xFF;
simd = simd & 0xFF;
if (simd != n_simd || size != k_size)
{
type = 0;
}
}
while (type != s_type);
a_mode = pfm->get_fsaa();
switched = s_prev != s_type ? 1 : switched;
}
if (T_KEYS(RK_F8) || T_KEYS(RK_8))
{
n_prev = n_simd;
rt_si32 size, type, simd;
do
{
n_simd = n_simd % 16 + n_simd % 15; /* 1, 2, 4, 8, 16 */
simd = pfm->set_simd(simd_init(n_simd, s_type, k_size));
size = (simd >> 16) & 0xFF;
type = (simd >> 8) & 0xFF;
simd = simd & 0xFF;
if (simd != n_simd)
{
simd = pfm->set_simd(simd_init(n_simd, 0, k_size));
size = (simd >> 16) & 0xFF;
type = (simd >> 8) & 0xFF;
simd = simd & 0xFF;
}
if (simd != n_simd)
{
simd = pfm->set_simd(simd_init(n_simd, 0, 0));
size = (simd >> 16) & 0xFF;
type = (simd >> 8) & 0xFF;
simd = simd & 0xFF;
}
if (simd == n_simd)
{
k_size = size;
s_type = type;
}
}
while (simd != n_simd);
a_mode = pfm->get_fsaa();
switched = n_prev != n_simd ? 1 : switched;
}
if (T_KEYS(RK_F11) || T_KEYS(RK_1))
{
d_prev = d;
d = (d + 1) % RT_ARR_SIZE(sc_rt);
c = sc[d]->get_cam_idx();
pfm->set_cur_scene(sc[d]);
switched = d_prev != d ? 1 : switched;
rt_si32 q_test = sc[d]->set_pton(q_mode ? m_num : 0);
if (q_test != (q_mode ? m_num : 0))
{
q_mode = q_prev;
/* to enable path-tracer in a particular scene
* add RT_OPTS_PT to the list of optimizations
* to be turned off in scene definition struct */
RT_LOGI("%s\n", str);
RT_LOGI("Quasi-realistic mode: %d (off), %s\n", q_mode,
"add RT_OPTS_PT per scene");
RT_LOGI("%s\n", str);
}
}
#if RT_OPTS_UPDATE_EXT0 != 0
} /* --<----<-- skip update0 --<----<-- */
#endif /* RT_OPTS_UPDATE_EXT0 */
memset(t_keys, 0, sizeof(t_keys));
memset(r_keys, 0, sizeof(r_keys));
if (switched && img_id >= 0 && img_id <= 999)
{
sc[g]->save_frame(img_id++);
}
if (switched)
{
switched = 0;
print_avgfps();
print_target();
d_prev = d;
c_prev = c;
n_prev = n_simd;
k_prev = k_size;
s_prev = s_type;
p_prev = p_mode;
q_prev = q_mode;
glb = 0;
run_time = cur_time;
cnt = 0;
log_time = cur_time;
}
prev_time = cur_time;
/* update frame counters */
cnt++;
ttl++;
sc[d]->render(f_time >= 0 ? b_time + f_time * ttl : anim_time);
if (!h_mode)
{
sc[d]->render_num(x_res-30, 10, -1, 2, (rt_si32)fps);
sc[d]->render_num(x_res-10, 10, -1, 2, q_mode ? m_num : tile_w / 8);
sc[d]->render_num(x_res-10, 34, -1, 2, 1 << a_mode);
sc[d]->render_num( 30, 10, +1, 2, n_simd * 128);
sc[d]->render_num( 10, 10, +1, 2, k_size);
sc[d]->render_num( 10, 34, +1, 2, s_type);
}
}
catch (rt_Exception e)
{
RT_LOGE("Exception: %s\n", e.err);
return 0;
}
if (eout != 0)
{
rt_si32 i;
for (i = 0; i < emax; i++)
{
if (estr[i] != RT_NULL)
{
RT_LOGE("Exception: thread %d: %s\n", i, estr[i]);
}
}
return 0;
}
if (!o_mode)
{
frame_to_screen(sc[d]->get_frame(), sc[d]->get_x_row());
}
return 1;
}
/*
* Initialize internal variables from command-line arguments.
*/
rt_si32 args_init(rt_si32 argc, rt_char *argv[])
{
rt_si32 k, l, r, t;
if (argc >= 2)
{
RT_LOGI("--------------------------------------------------------\n");
RT_LOGI("Usage options are given below:\n");
RT_LOGI(" -d n, specify default demo-scene, where 1 <= n <= d_num\n");
RT_LOGI(" -c n, specify default camera-idx, where 1 <= n <= c_num\n");
RT_LOGI(" -b n, specify time (ms) at which testing begins, n >= 0\n");
RT_LOGI(" -e n, specify time (ms) at which testing ends, n >= min\n");
RT_LOGI(" -m n, specify # of path-tracer frames in update, n >= 1\n");
RT_LOGI(" -f n, specify # of consecutive frames to render, n >= 0\n");
RT_LOGI(" -g n, specify delta (ms) for consecutive frames, n >= 0\n");
RT_LOGI(" -n n, override SIMD-native-size, where new simd is 1.16\n");
RT_LOGI(" -k n, override SIMD-size-factor, where new size is 1..4\n");
RT_LOGI(" -s n, override SIMD-sub-variant, where new type is 1.32\n");
RT_LOGI(" -t n, override thread-pool-size, where new size <= 1000\n");
RT_LOGI(" -w n, override window-rect-size, where new size is 1..9\n");
RT_LOGI(" -w 0, activate window-less mode, full native resolution\n");
RT_LOGI(" -x n, override x-resolution, where new x-value <= 65535\n");
RT_LOGI(" -y n, override y-resolution, where new y-value <= 65535\n");
RT_LOGI(" -i n, save image at the end of each run, n is image-idx\n");
RT_LOGI(" -r n, fps-logging update rate, where n is interval (ms)\n");
RT_LOGI(" -l, fps-logging-off mode, turns off fps-logging updates\n");
RT_LOGI(" -h, hide-screen-num mode, turns off info-number drawing\n");
RT_LOGI(" -p, pause mode, stops animation from starting time (ms)\n");
RT_LOGI(" -q, quake mode, enables path-tracing for quality lights\n");
RT_LOGI(" -u n, 1-3/4 serial update/render, 5/6 update/render off\n");
RT_LOGI(" -o, offscreen-frame mode, turns off window-rect updates\n");
RT_LOGI(" -a, enable 4x antialiasing by default, 8x not supported\n");
RT_LOGI(" -a n, enable antialiasing, 2 for 2x, 4 for 4x, 8 for 8x\n");
RT_LOGI("options -d n ... ... ... ... ... -a n can all be mixed\n");
RT_LOGI("--------------------------------------------------------\n");
}
for (k = 1; k < argc; k++)
{
if (k < argc && strcmp(argv[k], "-d") == 0 && ++k < argc)
{
for (l = strlen(argv[k]), r = 1, t = 0; l > 0; l--, r *= 10)
{
t += (argv[k][l-1] - '0') * r;
}
if (t >= 1 && t <= RT_ARR_SIZE(sc_rt))
{
RT_LOGI("Demo-scene overridden: %d\n", t);
d = t-1;
}
else
{
RT_LOGI("Demo-scene value out of range\n");
return 0;
}
}
if (k < argc && strcmp(argv[k], "-c") == 0 && ++k < argc)
{
for (l = strlen(argv[k]), r = 1, t = 0; l > 0; l--, r *= 10)
{
t += (argv[k][l-1] - '0') * r;
}
if (t >= 1 && t <= 65535)
{
RT_LOGI("Camera-idx overridden: %d\n", t);
c = t-1;
}
else
{
RT_LOGI("Camera-idx value out of range\n");
return 0;
}
}
if (k < argc && strcmp(argv[k], "-b") == 0 && ++k < argc)
{
for (l = strlen(argv[k]), r = 1, t = 0; l > 0; l--, r *= 10)
{
t += (argv[k][l-1] - '0') * r;
}
if (t >= 0)
{
RT_LOGI("Initial-test-time (ms): %d\n", t);
b_time = t;
}
else
{
RT_LOGI("Initial-test-time value out of range\n");
return 0;
}
}
if (k < argc && strcmp(argv[k], "-e") == 0 && ++k < argc)
{
for (l = strlen(argv[k]), r = 1, t = 0; l > 0; l--, r *= 10)
{
t += (argv[k][l-1] - '0') * r;
}
if (t >= 1)
{
RT_LOGI("Closing-test-time (ms): %d\n", t);
e_time = t;
}
else
{
RT_LOGI("Closing-test-time value out of range\n");
return 0;
}
}
if (k < argc && strcmp(argv[k], "-m") == 0 && ++k < argc)
{
for (l = strlen(argv[k]), r = 1, t = 0; l > 0; l--, r *= 10)
{
t += (argv[k][l-1] - '0') * r;
}
if (t >= 1)
{
RT_LOGI("Frames-in-update: %d\n", t);
m_num = t;
}
else
{
RT_LOGI("Frames-in-update value out of range\n");
return 0;
}
}
if (k < argc && strcmp(argv[k], "-f") == 0 && ++k < argc)
{
for (l = strlen(argv[k]), r = 1, t = 0; l > 0; l--, r *= 10)
{
t += (argv[k][l-1] - '0') * r;
}
if (t >= 0)
{
RT_LOGI("Number-of-frames: %d\n", t);
f_num = t;
}
else
{
RT_LOGI("Number-of-frames value out of range\n");
return 0;
}
}
if (k < argc && strcmp(argv[k], "-g") == 0 && ++k < argc)
{
for (l = strlen(argv[k]), r = 1, t = 0; l > 0; l--, r *= 10)
{
t += (argv[k][l-1] - '0') * r;
}
if (t >= 0)
{
RT_LOGI("Frame-delta (ms): %d\n", t);
f_time = t;
}
else
{
RT_LOGI("Frame-delta (ms) value out of range\n");
return 0;
}
}
if (k < argc && strcmp(argv[k], "-n") == 0 && ++k < argc)
{
for (l = strlen(argv[k]), r = 1, t = 0; l > 0; l--, r *= 10)
{
t += (argv[k][l-1] - '0') * r;
}
if (t == 1 || t == 2 || t == 4 || t == 8 || t == 16
|| t == 128 || t == 256 || t == 512 || t == 1024 || t == 2048)
{
RT_LOGI("SIMD-native-size overridden: %d\n", t);
n_simd = t >= 128 ? t / 128 : t;
}
else
{
RT_LOGI("SIMD-native-size value out of range\n");
return 0;
}
}
if (k < argc && strcmp(argv[k], "-k") == 0 && ++k < argc)
{
t = argv[k][0] - '0';
if (strlen(argv[k]) == 1 && (t == 1 || t == 2 || t == 4))
{
RT_LOGI("SIMD-size-factor overridden: %d\n", t);
k_size = t;
}
else
{
RT_LOGI("SIMD-size-factor value out of range\n");
return 0;
}
}
if (k < argc && strcmp(argv[k], "-s") == 0 && ++k < argc)
{
for (l = strlen(argv[k]), r = 1, t = 0; l > 0; l--, r *= 10)
{
t += (argv[k][l-1] - '0') * r;
}
if (t == 1 || t == 2 || t == 4 || t == 8 || t == 16 || t == 32)
{
RT_LOGI("SIMD-sub-variant overridden: %d\n", t);
s_type = t;
}
else
{
RT_LOGI("SIMD-sub-variant value out of range\n");
return 0;
}
}
if (k < argc && strcmp(argv[k], "-t") == 0 && ++k < argc)
{
for (l = strlen(argv[k]), r = 1, t = 0; l > 0; l--, r *= 10)
{
t += (argv[k][l-1] - '0') * r;
}
if (t >= 0 && t <= 1000)
{
RT_LOGI("Thread-pool-size overridden: %d\n", t);
t_pool = t;
}
else
{
RT_LOGI("Thread-pool-size value out of range\n");
return 0;
}
}
if (k < argc && strcmp(argv[k], "-w") == 0 && ++k < argc)
{
t = argv[k][0] - '0';
if (strlen(argv[k]) == 1 && t >= 0 && t <= 9)
{
RT_LOGI("Window-rect-size overridden: %d\n", t);
w_size = t;
}
else
{
RT_LOGI("Window-rect-size value out of range\n");
return 0;
}
}
if (k < argc && strcmp(argv[k], "-x") == 0 && ++k < argc)
{
for (l = strlen(argv[k]), r = 1, t = 0; l > 0; l--, r *= 10)
{
t += (argv[k][l-1] - '0') * r;
}
if (t >= 1 && t <= 65535)
{
RT_LOGI("X-resolution overridden: %d\n", t);
x_res = x_new = t;
}
else
{
RT_LOGI("X-resolution value out of range\n");
return 0;
}
}
if (k < argc && strcmp(argv[k], "-y") == 0 && ++k < argc)
{
for (l = strlen(argv[k]), r = 1, t = 0; l > 0; l--, r *= 10)
{
t += (argv[k][l-1] - '0') * r;
}
if (t >= 1 && t <= 65535)
{
RT_LOGI("Y-resolution overridden: %d\n", t);
y_res = y_new = t;
}
else
{
RT_LOGI("Y-resolution value out of range\n");
return 0;
}
}
if (k < argc && strcmp(argv[k], "-i") == 0)
{
l = 0;
if (++k < argc)
{
if (argv[k][0] != '-')
{
l = strlen(argv[k]);
}
else
{
k--;
}
}
for (r = 1, t = 0; l > 0; l--, r *= 10)
{
t += (argv[k][l-1] - '0') * r;
}
if (t >= 0 && t <= 999)
{
RT_LOGI("Save-image-index: %d\n", t);
img_id = t;
}
else
{
RT_LOGI("Save-image-index value out of range\n");
return 0;
}
}
if (k < argc && strcmp(argv[k], "-r") == 0 && ++k < argc)
{
for (l = strlen(argv[k]), r = 1, t = 0; l > 0; l--, r *= 10)
{
t += (argv[k][l-1] - '0') * r;
}
if (t >= 0)
{
RT_LOGI("FPS-logging-interval (ms) overridden: %d\n", t);
l_time = t;
}
else
{
RT_LOGI("FPS-logging-interval value out of range\n");
return 0;
}
}
if (k < argc && strcmp(argv[k], "-l") == 0 && !l_mode)
{
l_mode = RT_TRUE;
RT_LOGI("FPS-logging-off mode: %d\n", l_mode);
}
if (k < argc && strcmp(argv[k], "-h") == 0 && !h_mode)
{
h_mode = RT_TRUE;
RT_LOGI("Hide-screen-num mode: %d\n", h_mode);
}
if (k < argc && strcmp(argv[k], "-p") == 0 && !p_mode)
{
p_mode = RT_TRUE;
RT_LOGI("Pause-animation mode: %d\n", p_mode);
}
if (k < argc && strcmp(argv[k], "-q") == 0 && !q_mode)
{
q_mode = RT_TRUE;
RT_LOGI("Quasi-realistic mode: %d\n", q_mode);
}
if (k < argc && strcmp(argv[k], "-u") == 0 && ++k < argc)
{
t = argv[k][0] - '0';
if (strlen(argv[k]) == 1 && t >= 0 && t <= 6)
{
RT_LOGI("Threaded-updates-off phases-up-to: %d\n", t);
u_mode = t;
}
else
{
RT_LOGI("Threaded-updates-off value out of range\n");
return 0;
}
}
if (k < argc && strcmp(argv[k], "-o") == 0 && !o_mode)
{
o_mode = RT_TRUE;
RT_LOGI("Offscreen-frame mode: %d\n", o_mode);
}
if (k < argc && strcmp(argv[k], "-a") == 0)
{
rt_si32 aa_map[10] =
{
RT_FSAA_NO, RT_FSAA_NO, RT_FSAA_2X, RT_FSAA_2X,
RT_FSAA_4X, RT_FSAA_4X, RT_FSAA_4X, RT_FSAA_4X,
RT_FSAA_8X, RT_FSAA_8X
};
a_mode = RT_FSAA_4X;
if (++k < argc)
{
t = argv[k][0] - '0';
if (strlen(argv[k]) == 1 && t >= 0 && t <= 9)
{
a_mode = aa_map[t];
}
else
{
k--;
}
}
RT_LOGI("Antialiasing request: %d\n", 1 << a_mode);
}
}
x_res = x_res * (w_size != 0 ? w_size : 1);
y_res = y_res * (w_size != 0 ? w_size : 1);
x_row = (x_res+RT_SIMD_WIDTH-1) & ~(RT_SIMD_WIDTH-1);
thnum = t_pool != 0 ? -t_pool : thnum; /* no feedback (< 0) if overridden */
return 1;
}
/*
* Initialize event loop.
*/
rt_si32 main_init()
{
rt_si32 size, type, simd = 0;
rt_si32 i, n = RT_ARR_SIZE(sc_rt);
try
{
i = -1;
pfm = new rt_Platform(sys_alloc, sys_free, thnum,
init_threads, term_threads,
update_scene, render_scene);
}
catch (rt_Exception e)
{
RT_LOGE("Exception in main_init, %s %d: %s\n",
i+1 ? "scene" : "platform", i+1, e.err);
return 0;
}
simd = pfm->set_simd(simd_init(n_simd, s_type, k_size));
if (a_mode != pfm->set_fsaa(a_mode))
{
RT_LOGI("Requested antialiasing mode not supported, check options\n");
return 0;
}
tile_w = pfm->get_tile_w();
try
{
for (i = 0; i < n; i++)
{
sc[i] = new(pfm) rt_Scene(sc_rt[i],
x_res, y_res, x_row, frame, pfm);
}
pfm->set_cur_scene(sc[d]);
}
catch (rt_Exception e)
{
RT_LOGE("Exception in main_init, %s %d: %s\n",
i+1 ? "scene" : "platform", i+1, e.err);
return 0;
}
size = (simd >> 16) & 0xFF;
type = (simd >> 8) & 0xFF;
simd = simd & 0xFF;
/* test converted internal SIMD variables against new command-line format */
if ((k_size != 0 && k_size != size)
|| (s_type != 0 && s_type != type)
|| (n_simd != 0 && n_simd != simd && n_simd != simd * size))
{
RT_LOGI("Chosen SIMD target not supported, check -n/-k/-s options\n");
return 0;
}
/* update state-tracking SIMD variables from currently chosen SIMD target */
k_size = size;
s_type = type;
n_simd = simd;
for (; c > 0; c--)
{
sc[d]->next_cam();
}
c = sc[d]->get_cam_idx();
rt_si32 opts = sc[d]->get_opts();
switch (u_mode)
{
case 6:
opts |= RT_OPTS_RENDER_EXT0;
case 5:
opts |= RT_OPTS_UPDATE_EXT0;
break;
case 4:
opts |= RT_OPTS_RENDER_EXT1;
case 3:
opts |= RT_OPTS_UPDATE_EXT3;
case 2:
opts |= RT_OPTS_UPDATE_EXT2;
case 1:
opts |= RT_OPTS_UPDATE_EXT1;
break;
default:
break;
}
sc[d]->set_opts(opts);
rt_si32 q_test = sc[d]->set_pton(q_mode ? m_num : 0);
if (q_test != (q_mode ? m_num : 0))
{
q_mode = RT_FALSE;
/* to enable path-tracer in a particular scene
* add RT_OPTS_PT to the list of optimizations
* to be turned off in scene definition struct */
RT_LOGI("Quasi-realistic mode: %d (off), %s\n", q_mode,
"add RT_OPTS_PT per scene");
}
d_prev = d;
c_prev = c;
n_prev = n_simd;
k_prev = k_size;
s_prev = s_type;
p_prev = p_mode;
q_prev = q_mode;
print_target();
return 1;
}
/*
* Terminate event loop.
*/
rt_si32 main_term()
{
if (img_id >= 0 && img_id <= 999)
{
sc[d]->save_frame(img_id++);
}
print_avgfps();
rt_si32 i, n = RT_ARR_SIZE(sc_rt);
try
{
for (i = 0; i < n; i++)
{
if (sc[i] == RT_NULL)
{
continue;
}
delete sc[i];
}
i = -1;
delete pfm;
}
catch (rt_Exception e)
{
RT_LOGE("Exception in main_term, %s %d: %s\n",
i+1 ? "scene" : "platform", i+1, e.err);
return 0;
}
return 1;
}
#endif /* RT_ROOT_H */
/******************************************************************************/
/******************************************************************************/
/******************************************************************************/
| 32.548203 | 81 | 0.405708 | [
"render"
] |
eac38b1e25e350a676237f979f69a748497cdf75 | 1,845 | h | C | src/components/autofill/browser/autofill_scanner.h | jxjnjjn/chromium | 435c1d02fd1b99001dc9e1e831632c894523580d | [
"Apache-2.0"
] | 9 | 2018-09-21T05:36:12.000Z | 2021-11-15T15:14:36.000Z | components/autofill/browser/autofill_scanner.h | devasia1000/chromium | 919a8a666862fb866a6bb7aa7f3ae8c0442b4828 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | components/autofill/browser/autofill_scanner.h | devasia1000/chromium | 919a8a666862fb866a6bb7aa7f3ae8c0442b4828 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 3 | 2018-11-28T14:54:13.000Z | 2020-07-02T07:36:07.000Z | // Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_AUTOFILL_BROWSER_AUTOFILL_SCANNER_H_
#define COMPONENTS_AUTOFILL_BROWSER_AUTOFILL_SCANNER_H_
#include <vector>
#include "base/basictypes.h"
#include "base/string16.h"
namespace autofill {
class AutofillField;
// A helper class for parsing a stream of |AutofillField|'s with lookahead.
class AutofillScanner {
public:
explicit AutofillScanner(const std::vector<const AutofillField*>& fields);
~AutofillScanner();
// Advances the cursor by one step, if possible.
void Advance();
// Returns the current field in the stream, or |NULL| if there are no more
// fields in the stream.
const AutofillField* Cursor() const;
// Returns |true| if the cursor has reached the end of the stream.
bool IsEnd() const;
// Restores the most recently saved cursor. See also |SaveCursor()|.
void Rewind();
// Repositions the cursor to the specified |index|. See also |SaveCursor()|.
void RewindTo(size_t index);
// Saves and returns the current cursor position. See also |Rewind()| and
// |RewindTo()|.
size_t SaveCursor();
private:
// Indicates the current position in the stream, represented as a vector.
std::vector<const AutofillField*>::const_iterator cursor_;
// The most recently saved cursor.
std::vector<const AutofillField*>::const_iterator saved_cursor_;
// The beginning pointer for the stream.
const std::vector<const AutofillField*>::const_iterator begin_;
// The past-the-end pointer for the stream.
const std::vector<const AutofillField*>::const_iterator end_;
DISALLOW_COPY_AND_ASSIGN(AutofillScanner);
};
} // namespace autofill
#endif // COMPONENTS_AUTOFILL_BROWSER_AUTOFILL_SCANNER_H_
| 29.758065 | 78 | 0.746883 | [
"vector"
] |
eac960f7ffab314ddb333994935238177e01b980 | 2,647 | h | C | Game_exe/release_mode/windows/obj/include/flixel/system/frontEnds/WatchFrontEnd.h | hisatsuga/Salty-Psyche-Engine-Port-Main | 0c6afc6ef57f6f6a8b83ff23bb6a26bb05117ab7 | [
"Apache-2.0"
] | 1 | 2021-07-19T05:10:43.000Z | 2021-07-19T05:10:43.000Z | export/release/windows/obj/include/flixel/system/frontEnds/WatchFrontEnd.h | Tyrcnex/tai-mod | b83152693bb3139ee2ae73002623934f07d35baf | [
"Apache-2.0"
] | null | null | null | export/release/windows/obj/include/flixel/system/frontEnds/WatchFrontEnd.h | Tyrcnex/tai-mod | b83152693bb3139ee2ae73002623934f07d35baf | [
"Apache-2.0"
] | null | null | null | #ifndef INCLUDED_flixel_system_frontEnds_WatchFrontEnd
#define INCLUDED_flixel_system_frontEnds_WatchFrontEnd
#ifndef HXCPP_H
#include <hxcpp.h>
#endif
HX_DECLARE_STACK_FRAME(_hx_pos_25766fbc3da200fe_10_new)
HX_DECLARE_CLASS3(flixel,_hx_system,frontEnds,WatchFrontEnd)
namespace flixel{
namespace _hx_system{
namespace frontEnds{
class HXCPP_CLASS_ATTRIBUTES WatchFrontEnd_obj : public ::hx::Object
{
public:
typedef ::hx::Object super;
typedef WatchFrontEnd_obj OBJ_;
WatchFrontEnd_obj();
public:
enum { _hx_ClassId = 0x40eb558f };
void __construct();
inline void *operator new(size_t inSize, bool inContainer=false,const char *inName="flixel.system.frontEnds.WatchFrontEnd")
{ return ::hx::Object::operator new(inSize,inContainer,inName); }
inline void *operator new(size_t inSize, int extra)
{ return ::hx::Object::operator new(inSize+extra,false,"flixel.system.frontEnds.WatchFrontEnd"); }
inline static ::hx::ObjectPtr< WatchFrontEnd_obj > __new() {
::hx::ObjectPtr< WatchFrontEnd_obj > __this = new WatchFrontEnd_obj();
__this->__construct();
return __this;
}
inline static ::hx::ObjectPtr< WatchFrontEnd_obj > __alloc(::hx::Ctx *_hx_ctx) {
WatchFrontEnd_obj *__this = (WatchFrontEnd_obj*)(::hx::Ctx::alloc(_hx_ctx, sizeof(WatchFrontEnd_obj), false, "flixel.system.frontEnds.WatchFrontEnd"));
*(void **)__this = WatchFrontEnd_obj::_hx_vtable;
{
HX_STACKFRAME(&_hx_pos_25766fbc3da200fe_10_new)
}
return __this;
}
static void * _hx_vtable;
static Dynamic __CreateEmpty();
static Dynamic __Create(::hx::DynamicArray inArgs);
//~WatchFrontEnd_obj();
HX_DO_RTTI_ALL;
::hx::Val __Field(const ::String &inString, ::hx::PropertyAccess inCallProp);
static void __register();
bool _hx_isInstanceOf(int inClassId);
::String __ToString() const { return HX_("WatchFrontEnd",01,3c,87,3e); }
void add( ::Dynamic object,::String field,::String displayName);
::Dynamic add_dyn();
void remove( ::Dynamic object,::String field);
::Dynamic remove_dyn();
void addQuick(::String displayName, ::Dynamic value);
::Dynamic addQuick_dyn();
void removeQuick(::String displayName);
::Dynamic removeQuick_dyn();
void addExpression(::String expression,::String displayName);
::Dynamic addExpression_dyn();
void removeExpression(::String displayName);
::Dynamic removeExpression_dyn();
void addMouse();
::Dynamic addMouse_dyn();
void removeMouse();
::Dynamic removeMouse_dyn();
};
} // end namespace flixel
} // end namespace system
} // end namespace frontEnds
#endif /* INCLUDED_flixel_system_frontEnds_WatchFrontEnd */
| 29.411111 | 154 | 0.738194 | [
"object"
] |
eacb6550fa0c15c5af2acaf4823b6fa930828fd1 | 3,178 | h | C | src/WebSocket.h | l1kw1d/uWebSockets | 3a0d77a1a5f0bf4b73c64bbeea4c78ccf3e596a9 | [
"Zlib"
] | 686 | 2017-01-28T20:31:00.000Z | 2022-03-26T09:02:12.000Z | src/WebSocket.h | l1kw1d/uWebSockets | 3a0d77a1a5f0bf4b73c64bbeea4c78ccf3e596a9 | [
"Zlib"
] | 62 | 2017-01-31T21:44:23.000Z | 2020-11-09T03:52:53.000Z | src/WebSocket.h | l1kw1d/uWebSockets | 3a0d77a1a5f0bf4b73c64bbeea4c78ccf3e596a9 | [
"Zlib"
] | 132 | 2017-01-31T20:28:12.000Z | 2022-02-13T07:09:31.000Z | #ifndef WEBSOCKET_UWS_H
#define WEBSOCKET_UWS_H
#include "WebSocketProtocol.h"
#include "Socket.h"
namespace uWS {
template <bool isServer>
struct Group;
template <const bool isServer>
struct WIN32_EXPORT WebSocket : protected uS::Socket {
struct Data : uS::SocketData, WebSocketProtocol<isServer> {
std::string fragmentBuffer, controlBuffer;
enum CompressionStatus : char {
DISABLED,
ENABLED,
COMPRESSED_FRAME
} compressionStatus;
bool hasOutstandingPong = false;
Data(bool perMessageDeflate, uS::SocketData *socketData) : uS::SocketData(*socketData) {
compressionStatus = perMessageDeflate ? CompressionStatus::ENABLED : CompressionStatus::DISABLED;
}
};
WebSocket(uS::Socket s = nullptr) : uS::Socket(s) {
}
struct PreparedMessage {
char *buffer;
size_t length;
int references;
void(*callback)(void *webSocket, void *data, bool cancelled, void *reserved);
};
using uS::Socket::getUserData;
using uS::Socket::setUserData;
using uS::Socket::getAddress;
using uS::Socket::Address;
void transfer(Group<isServer> *group) {
((Group<isServer> *) getSocketData()->nodeData)->removeWebSocket(p);
uS::Socket::transfer((uS::NodeData *) group, [](uv_poll_t *p) {
uS::Socket s(p);
((Group<isServer> *) s.getSocketData()->nodeData)->addWebSocket(s);
});
}
uv_poll_t *getPollHandle() const {return p;}
void terminate();
void close(int code = 1000, const char *message = nullptr, size_t length = 0);
void ping(const char *message) {send(message, OpCode::PING);}
void send(const char *message, OpCode opCode = OpCode::TEXT) {send(message, strlen(message), opCode);}
void send(const char *message, size_t length, OpCode opCode, void(*callback)(void *webSocket, void *data, bool cancelled, void *reserved) = nullptr, void *callbackData = nullptr);
static PreparedMessage *prepareMessage(char *data, size_t length, OpCode opCode, bool compressed, void(*callback)(void *webSocket, void *data, bool cancelled, void *reserved) = nullptr);
static PreparedMessage *prepareMessageBatch(std::vector<std::string> &messages, std::vector<int> &excludedMessages, OpCode opCode, bool compressed, void(*callback)(void *webSocket, void *data, bool cancelled, void *reserved) = nullptr);
void sendPrepared(PreparedMessage *preparedMessage, void *callbackData = nullptr);
static void finalizeMessage(PreparedMessage *preparedMessage);
bool operator==(const WebSocket &other) const {return p == other.p;}
bool operator<(const WebSocket &other) const {return p < other.p;}
private:
friend class uS::Socket;
template <bool> friend struct Group;
static void onData(uS::Socket s, char *data, int length);
static void onEnd(uS::Socket s);
};
}
namespace std {
template <bool isServer>
struct hash<uWS::WebSocket<isServer>> {
std::size_t operator()(const uWS::WebSocket<isServer> &webSocket) const
{
return std::hash<uv_poll_t *>()(webSocket.getPollHandle());
}
};
}
#endif // WEBSOCKET_UWS_H
| 36.528736 | 240 | 0.680302 | [
"vector"
] |
eace146b623a976417bd39872a9463b489dce910 | 4,298 | h | C | exercises/Optimization_Based_Planning/template_ws/src/pick_and_place/include/test_bed_core/trajopt_pick_and_place_constructor.h | JonathanPlasse/industrial_training | 2de2ecbc8d1f7d2b4b724cc6badd003ca2d653d7 | [
"Apache-2.0"
] | 324 | 2015-01-31T07:35:37.000Z | 2022-03-27T09:30:14.000Z | exercises/Optimization_Based_Planning/template_ws/src/pick_and_place/include/test_bed_core/trajopt_pick_and_place_constructor.h | AhmedMounir/industrial_training | e6761c7bee65d3802fee6cf7c99e3113d3dc1af2 | [
"Apache-2.0"
] | 226 | 2015-01-20T17:15:56.000Z | 2022-01-19T04:55:23.000Z | exercises/Optimization_Based_Planning/template_ws/src/pick_and_place/include/test_bed_core/trajopt_pick_and_place_constructor.h | AhmedMounir/industrial_training | e6761c7bee65d3802fee6cf7c99e3113d3dc1af2 | [
"Apache-2.0"
] | 219 | 2015-03-29T03:05:11.000Z | 2022-03-23T11:12:43.000Z | #pragma once
#include <tesseract_planning/trajopt/trajopt_planner.h>
#include <tesseract_ros/kdl/kdl_env.h>
#include <trajopt/problem_description.hpp>
/**
* @brief The TrajoptPickAndPlaceConstructor class
*/
class TrajoptPickAndPlaceConstructor
{
private:
/**< @brief Problem Construction Info */
std::string manipulator_, ee_link_, pick_object_;
Eigen::Isometry3d tcp_; /**< @brief Tool center point offset */
tesseract::BasicEnvConstPtr env_; /**< @brief Environment description */
tesseract::BasicKinConstPtr kin_; /**< @brief Kinematics description */
public:
// Needed because an Eigen::Isometry3d is a class member
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
TrajoptPickAndPlaceConstructor(tesseract::BasicEnvConstPtr env,
std::string manipulator,
std::string ee_link,
std::string pick_object,
Eigen::Isometry3d tcp = Eigen::Isometry3d::Identity());
/**
* @brief addTotalTimeCost - Adds cost to the overall total time to elapsed in the trajectory
* @param pci - The trajopt problem construction info to which the cost is added
* @param coeff - Used to scale this cost relative to other costs
*/
void addTotalTimeCost(trajopt::ProblemConstructionInfo& pci, double coeff);
/**
* @brief Adds a single waypoint at the desired pose
* @param pci - The trajopt problem construction info to which the cost is added
* @param pose - The target pose
* @param time_step - Time step at which the cost applies
*/
void addSingleWaypoint(trajopt::ProblemConstructionInfo& pci,
Eigen::Isometry3d pose,
int time_step);
/**
* @brief Adds a linear move to the problem construction info
* @param pci - The trajopt problem construction info to which the move is added
* @param start_pose - The starting pose of the linear move
* @param end_pose - The end pose of the linear move
* @param num_steps - Number of steps for the move.
* @param first_time_step - Time step at which the move is added
*/
void addLinearMotion(trajopt::ProblemConstructionInfo& pci,
Eigen::Isometry3d start_pose,
Eigen::Isometry3d end_pose,
int num_steps,
int first_time_step);
/**
* @brief Generates a trajopt problem for a "pick" move
* Consists of 2 phases - a free space move to approach_pose and a linear move to final_pose
* @param approach_pose - Pose moved to prior to picking
* @param final_pose - Pose moved to for the pick operation
* @param steps_per_phase - Number of steps per phase. Total move is steps_per_phase*2
* @return
*/
trajopt::TrajOptProbPtr generatePickProblem(Eigen::Isometry3d& approach_pose,
Eigen::Isometry3d& final_pose,
int steps_per_phase);
/**
* @brief Generates a trajopt problem for a "place" move
* Consists of 3 phases - linearly lifting the object to retreat_pose, a free space move to approach_pose, and then
* linearly placing the object to final_pose
* @param retreat_pose - Pose to which the object is moved from it's starting pose
* @param approach_pose - Pose to which the object is moved prior to placing
* @param final_pose - Final "placed" pose
* @param steps_per_phase - Number of steps per move phase. Total move length is steps_per_phase*3
* @return
*/
trajopt::TrajOptProbPtr generatePlaceProblem(Eigen::Isometry3d& retreat_pose,
Eigen::Isometry3d& approach_pose,
Eigen::Isometry3d& final_pose,
int steps_per_phase);
/**
* @brief Uses trajopt to calculate inverse kinematics
* Calculates the IK solution that is closest to the current joint position. This is useful for creating a
* STRAIGHT_LINE constraint
* @param end_pose - The pose of the point for which the IK is desired.
* @return
*/
Eigen::VectorXd numericalIK(Eigen::Isometry3d& end_pose);
};
| 44.770833 | 117 | 0.64402 | [
"object"
] |
eace87cd64b77a66a54c9f2e22e562eca6adc96e | 4,644 | c | C | qmk_firmware/keyboards/mechwild/murphpad/keymaps/default/keymap.c | DanTupi/personal_setup | 911b4951e4d8b78d6ea8ca335229e2e970fda871 | [
"MIT"
] | null | null | null | qmk_firmware/keyboards/mechwild/murphpad/keymaps/default/keymap.c | DanTupi/personal_setup | 911b4951e4d8b78d6ea8ca335229e2e970fda871 | [
"MIT"
] | null | null | null | qmk_firmware/keyboards/mechwild/murphpad/keymaps/default/keymap.c | DanTupi/personal_setup | 911b4951e4d8b78d6ea8ca335229e2e970fda871 | [
"MIT"
] | null | null | null | /* Copyright 2021 Kyle McCreery
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include QMK_KEYBOARD_H
// Defines names for use in layer keycodes and the keymap
enum layer_names {
_BASE,
_FN1,
_FN2,
_FN3
};
const uint16_t PROGMEM keymaps[][MATRIX_ROWS][MATRIX_COLS] = {
/* Base */
[_BASE] = LAYOUT(
KC_F1, KC_F2, KC_F3, KC_F4,
KC_NLCK, KC_PSLS, KC_PAST, KC_PMNS,
KC_P7, KC_P8, KC_P9, KC_PPLS,
KC_MUTE, KC_P4, KC_P5, KC_P6, _______,
MO(_FN1), KC_P1, KC_P2, KC_P3, KC_PENT,
KC_BSPC, KC_P0, _______, KC_PDOT, _______,
KC_F5, KC_F6, KC_F7
),
[_FN1] = LAYOUT(
_______, _______, _______, _______,
_______, _______, _______, _______,
RGB_HUD, RGB_SPI, RGB_HUI, _______,
_______, RGB_RMOD, RGB_TOG, RGB_MOD, _______,
_______, RGB_VAD, RGB_SPD, RGB_VAI, _______,
_______, RGB_SAD, _______, RGB_SAI, _______,
_______, _______, _______
),
[_FN2] = LAYOUT(
_______, _______, _______, _______,
_______, _______, _______, _______,
_______, _______, _______, _______,
_______, _______, _______, _______, _______,
_______, _______, _______, _______, _______,
_______, _______, _______, _______, _______,
_______, _______, _______
),
[_FN3] = LAYOUT(
_______, _______, _______, _______,
_______, _______, _______, _______,
_______, _______, _______, _______,
_______, _______, _______, _______, _______,
_______, _______, _______, _______, _______,
_______, _______, _______, _______, _______,
_______, _______, _______
)
};
#ifdef ENCODER_ENABLE
bool encoder_update_user(uint8_t index, bool clockwise) {
switch (index) {
case 0:
if (clockwise) {
tap_code(KC_VOLU);
} else {
tap_code(KC_VOLD);
}
break;
case 1:
if (clockwise) {
tap_code(KC_BRIU);
} else {
tap_code(KC_BRID);
}
break;
}
return true;
}
#endif
#ifdef OLED_DRIVER_ENABLE
oled_rotation_t oled_init_user(oled_rotation_t rotation) {
return OLED_ROTATION_270; // flips the display 270 degrees
}
static void render_logo(void) { // Render MechWild "MW" Logo
static const char PROGMEM logo_1[] = {0x8A, 0x8B, 0x8C, 0x8D, 0x00};
static const char PROGMEM logo_2[] = {0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0x00};
static const char PROGMEM logo_3[] = {0xCA, 0xCB, 0xCC, 0xCD, 0x00};
static const char PROGMEM logo_4[] = {0x20, 0x8E, 0x8F, 0x90, 0x00};
oled_set_cursor(0,0);
oled_write_P(logo_1, false);
oled_set_cursor(0,1);
oled_write_P(logo_2, false);
oled_set_cursor(0,2);
oled_write_P(logo_3, false);
oled_set_cursor(0,3);
oled_write_P(logo_4, false);
}
void oled_task_user(void) {
render_logo();
oled_set_cursor(0,6);
oled_write_ln_P(PSTR("Layer"), false);
switch (get_highest_layer(layer_state)) {
case _BASE:
oled_write_ln_P(PSTR("Base"), false);
break;
case _FN1:
oled_write_ln_P(PSTR("FN 1"), false);
break;
case _FN2:
oled_write_ln_P(PSTR("FN 2"), false);
break;
case _FN3:
oled_write_ln_P(PSTR("FN 3"), false);
break;
default:
oled_write_ln_P(PSTR("Undef"), false);
}
oled_write_ln_P(PSTR(""), false);
// Host Keyboard LED Status
led_t led_state = host_keyboard_led_state();
oled_write_ln_P(led_state.num_lock ? PSTR("NUM ") : PSTR(" "), false);
oled_write_ln_P(led_state.caps_lock ? PSTR("CAP ") : PSTR(" "), false);
oled_write_ln_P(led_state.scroll_lock ? PSTR("SCR ") : PSTR(" "), false);
}
#endif
| 31.378378 | 80 | 0.599053 | [
"render"
] |
ead1a5d4a89cab26c6e9bfa6a7677292307c4ab4 | 39,410 | h | C | src/cxx_supportlib/UnionStationFilterSupport.h | freshdesk/passenger | 7f6fbb6e8136b41cb0ad3815465fa307528127ee | [
"MIT"
] | null | null | null | src/cxx_supportlib/UnionStationFilterSupport.h | freshdesk/passenger | 7f6fbb6e8136b41cb0ad3815465fa307528127ee | [
"MIT"
] | null | null | null | src/cxx_supportlib/UnionStationFilterSupport.h | freshdesk/passenger | 7f6fbb6e8136b41cb0ad3815465fa307528127ee | [
"MIT"
] | 1 | 2020-11-04T07:56:27.000Z | 2020-11-04T07:56:27.000Z | /*
* Phusion Passenger - https://www.phusionpassenger.com/
* Copyright (c) 2011-2015 Phusion Holding B.V.
*
* "Passenger", "Phusion Passenger" and "Union Station" are registered
* trademarks of Phusion Holding B.V.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef _PASSENGER_FILTER_SUPPORT_H_
#define _PASSENGER_FILTER_SUPPORT_H_
#ifdef __cplusplus
#include <boost/shared_ptr.hpp>
#include <boost/make_shared.hpp>
#include <oxt/tracable_exception.hpp>
#include <string>
#include <set>
// Checking for _PCREPOSIX_H avoids conflicts with headers provided by Apache.
// https://code.google.com/p/phusion-passenger/issues/detail?id=651
#ifndef _PCREPOSIX_H
#include <boost/regex.h>
#endif
#include <cstdio>
#include <cstring>
#include <string.h>
#include <stdlib.h>
#include <StaticString.h>
#include <Exceptions.h>
#include <Utils/StrIntUtils.h>
#include <Utils/ReleaseableScopedPointer.h>
namespace Passenger {
namespace FilterSupport {
using namespace std;
using namespace boost;
using namespace oxt;
class Tokenizer {
public:
enum TokenType {
NONE,
NOT,
AND,
OR,
MATCHES,
NOT_MATCHES,
EQUALS,
NOT_EQUALS,
GREATER_THAN,
GREATER_THAN_OR_EQUALS,
LESS_THAN,
LESS_THAN_OR_EQUALS,
LPARENTHESIS,
RPARENTHESIS,
COMMA,
REGEXP,
STRING,
INTEGER,
TRUE_LIT,
FALSE_LIT,
IDENTIFIER,
END_OF_DATA
};
enum TokenOptions {
NO_OPTIONS = 0,
REGEXP_OPTION_CASE_INSENSITIVE = 1
};
struct Token {
TokenType type;
int options;
unsigned int pos;
unsigned int size;
StaticString rawValue;
Token() {
type = NONE;
}
Token(TokenType _type, unsigned int _pos, unsigned int _size, const StaticString &_rawValue)
: type(_type),
options(NO_OPTIONS),
pos(_pos),
size(_size),
rawValue(_rawValue)
{ }
string toString() const {
return Tokenizer::typeToString(type);
}
};
private:
StaticString data;
bool debug;
unsigned int pos;
static bool isWhitespace(char ch) {
return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n';
}
void skipWhitespaces() {
while (pos < data.size() && isWhitespace(data[pos])) {
pos++;
}
}
unsigned int available() const {
return data.size() - pos;
}
char current() const {
return data[pos];
}
char next() const {
return data[pos + 1];
}
static bool isIdentifierChar(char ch) {
return (ch >= 'a' && ch <= 'z')
|| (ch >= 'A' && ch <= 'Z')
|| (ch >= '0' && ch <= '9')
|| ch == '_';
}
static bool isDigit(char ch) {
return ch >= '0' && ch <= '9';
}
Token logToken(const Token &token) const {
if (debug) {
printf("# Token: %s\n", token.toString().c_str());
}
return token;
}
void raiseSyntaxError(const string &message = "") {
string msg = "Syntax error at character " + toString(pos + 1);
if (!message.empty()) {
msg.append(": ");
msg.append(message);
}
throw SyntaxError(msg);
}
void expectingAtLeast(unsigned int size) {
if (available() < size) {
raiseSyntaxError("at least " + toString(size) +
" more characters expected");
}
}
void expectingNextChar(char ch) {
expectingAtLeast(2);
if (next() != ch) {
raiseSyntaxError("expected '" + toString(ch) +
"', but found '" + toString(next()) +
"'");
}
}
Token matchToken(TokenType type, unsigned int size = 0) {
unsigned int oldPos = pos;
pos += size;
return Token(type, oldPos, size, data.substr(oldPos, size));
}
Token matchTokensStartingWithNegation() {
expectingAtLeast(2);
switch (next()) {
case '~':
return matchToken(NOT_MATCHES, 2);
case '=':
return matchToken(NOT_EQUALS, 2);
default:
raiseSyntaxError("unrecognized operator '" + data.substr(pos, 2) + "'");
return Token(); // Shut up compiler warning.
};
}
Token matchAnd() {
expectingNextChar('&');
return matchToken(AND, 2);
}
Token matchOr() {
expectingNextChar('|');
return matchToken(OR, 2);
}
Token matchTokensStartingWithEquals() {
expectingAtLeast(2);
switch (next()) {
case '~':
return matchToken(MATCHES, 2);
case '=':
return matchToken(EQUALS, 2);
default:
raiseSyntaxError("unrecognized operator '" + data.substr(pos, 2) + "'");
return Token(); // Shut up compiler warning.
}
}
Token matchTokensStartingWithGreaterThan() {
if (available() == 0 || next() != '=') {
return matchToken(GREATER_THAN, 1);
} else {
return matchToken(GREATER_THAN_OR_EQUALS, 2);
}
}
Token matchTokensStartingWithLessThan() {
if (available() == 0 || next() != '=') {
return matchToken(LESS_THAN, 1);
} else {
return matchToken(LESS_THAN_OR_EQUALS, 2);
}
}
Token matchRegexp(char terminator) {
unsigned int start = pos;
bool endFound = false;
// Match initial quote slash.
pos++;
// Match rest of regexp including terminating slash.
while (pos < data.size() && !endFound) {
char ch = current();
if (ch == '\\') {
pos++;
if (pos >= data.size()) {
raiseSyntaxError("unterminated regular expression");
} else {
pos++;
}
} else if (ch == terminator) {
pos++;
endFound = true;
} else {
pos++;
}
}
if (endFound) {
Token t(REGEXP, start, pos - start, data.substr(start, pos - start));
// Match regexp options.
endFound = false;
while (pos < data.size() && !endFound) {
char ch = current();
if (ch == 'i') {
t.options |= Tokenizer::REGEXP_OPTION_CASE_INSENSITIVE;
} else if (isWhitespace(ch)) {
endFound = true;
}
pos++;
}
return t;
} else {
raiseSyntaxError("unterminated regular expression");
return Token(); // Shut up compiler warning.
}
}
Token matchString(char terminator) {
unsigned int start = pos;
bool endFound = false;
// Match initial quote character.
pos++;
// Match rest of string including terminating quote.
while (pos < data.size() && !endFound) {
char ch = current();
if (ch == '\\') {
pos++;
if (pos >= data.size()) {
raiseSyntaxError("unterminated string");
} else {
pos++;
}
} else if (ch == terminator) {
pos++;
endFound = true;
} else {
pos++;
}
}
if (endFound) {
return Token(STRING, start, pos - start, data.substr(start, pos - start));
} else {
raiseSyntaxError("unterminated string");
return Token(); // Shut up compiler warning.
}
}
Token matchInteger() {
unsigned int start = pos;
// Accept initial minus or digit.
pos++;
while (pos < data.size() && isDigit(data[pos])) {
pos++;
}
return Token(INTEGER, start, pos - start, data.substr(start, pos - start));
}
Token matchIdentifier() {
char ch = current();
if ((ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') ||
ch == '_') {
unsigned int start = pos;
pos++;
while (pos < data.size() && isIdentifierChar(current())) {
pos++;
}
StaticString val = data.substr(start, pos - start);
if (val == "true") {
return Token(TRUE_LIT, start, pos - start, val);
} else if (val == "false") {
return Token(FALSE_LIT, start, pos - start, val);
} else {
return Token(IDENTIFIER, start, pos - start, val);
}
} else {
raiseSyntaxError("Identifier expected, but got an unknown token");
return Token(); // Shut up compiler warning.
}
}
public:
Tokenizer(const StaticString &data, bool debug = false) {
this->data = data;
this->debug = debug;
pos = 0;
}
Token getNext() {
skipWhitespaces();
if (pos >= data.size()) {
return logToken(Token(END_OF_DATA, data.size(), 0, ""));
}
switch (current()) {
case '!':
return logToken(matchTokensStartingWithNegation());
case '&':
return logToken(matchAnd());
case '|':
return logToken(matchOr());
case '=':
return logToken(matchTokensStartingWithEquals());
case '>':
return logToken(matchTokensStartingWithGreaterThan());
case '<':
return logToken(matchTokensStartingWithLessThan());
case '(':
return logToken(matchToken(LPARENTHESIS, 1));
case ')':
return logToken(matchToken(RPARENTHESIS, 1));
case ',':
return logToken(matchToken(COMMA, 1));
case '/':
return logToken(matchRegexp('/'));
case '%':
expectingAtLeast(3);
if (memcmp(data.data() + pos, "%r{", 3) != 0) {
raiseSyntaxError("expected '%r{', but found '" +
data.substr(pos, 3) + "'");
}
pos += 2;
return logToken(matchRegexp('}'));
case '"':
return logToken(matchString('"'));
case '\'':
return logToken(matchString('\''));
case '-':
return logToken(matchInteger());
default:
if (isDigit(current())) {
return logToken(matchInteger());
} else {
return logToken(matchIdentifier());
}
}
}
static string typeToString(TokenType type) {
switch (type) {
case NONE:
return "NONE";
case NOT:
return "NOT";
case AND:
return "AND";
case OR:
return "OR";
case MATCHES:
return "MATCHES";
case NOT_MATCHES:
return "NOT_MATCHES";
case EQUALS:
return "EQUALS";
case NOT_EQUALS:
return "NOT_EQUALS";
case GREATER_THAN:
return "GREATER_THAN";
case GREATER_THAN_OR_EQUALS:
return "GREATER_THAN_OR_EQUALS";
case LESS_THAN:
return "LESS_THAN";
case LESS_THAN_OR_EQUALS:
return "LESS_THAN_OR_EQUALS";
case LPARENTHESIS:
return "LPARENTHESIS";
case RPARENTHESIS:
return "RPARENTHESIS";
case COMMA:
return "COMMA";
case REGEXP:
return "REGEXP";
case STRING:
return "STRING";
case INTEGER:
return "INTEGER";
case TRUE_LIT:
return "TRUE";
case FALSE_LIT:
return "FALSE";
case IDENTIFIER:
return "IDENTIFIER";
case END_OF_DATA:
return "END_OF_DATA";
default:
return "(unknown)";
}
}
};
enum ValueType {
REGEXP_TYPE,
STRING_TYPE,
INTEGER_TYPE,
BOOLEAN_TYPE,
UNKNOWN_TYPE
};
class Context {
public:
enum FieldIdentifier {
URI,
CONTROLLER,
RESPONSE_TIME,
RESPONSE_TIME_WITHOUT_GC,
STATUS,
STATUS_CODE,
GC_TIME
};
virtual ~Context() { }
virtual string getURI() const = 0;
virtual string getController() const = 0;
virtual int getResponseTime() const = 0;
virtual string getStatus() const = 0;
virtual int getStatusCode() const = 0;
virtual int getGcTime() const = 0;
virtual bool hasHint(const string &name) const = 0;
int getResponseTimeWithoutGc() const {
return getResponseTime() - getGcTime();
}
string queryStringField(FieldIdentifier id) const {
switch (id) {
case URI:
return getURI();
case CONTROLLER:
return getController();
case RESPONSE_TIME:
return toString(getResponseTime());
case RESPONSE_TIME_WITHOUT_GC:
return toString(getResponseTimeWithoutGc());
case STATUS:
return getStatus();
case STATUS_CODE:
return toString(getStatusCode());
case GC_TIME:
return toString(getGcTime());
default:
return "";
}
}
int queryIntField(FieldIdentifier id) const {
switch (id) {
case RESPONSE_TIME:
return getResponseTime();
case RESPONSE_TIME_WITHOUT_GC:
return getResponseTimeWithoutGc();
case STATUS_CODE:
return getStatusCode();
case GC_TIME:
return getGcTime();
default:
return 0;
}
}
bool queryBoolField(FieldIdentifier id) const {
switch (id) {
case URI:
return !getURI().empty();
case CONTROLLER:
return !getController().empty();
case RESPONSE_TIME:
return getResponseTime() > 0;
case RESPONSE_TIME_WITHOUT_GC:
return getResponseTimeWithoutGc() > 0;
case STATUS:
return !getStatus().empty();
case STATUS_CODE:
return getStatusCode() > 0;
case GC_TIME:
return getGcTime() > 0;
default:
return false;
}
}
static ValueType getFieldType(FieldIdentifier id) {
switch (id) {
case URI:
case CONTROLLER:
case STATUS:
return STRING_TYPE;
case RESPONSE_TIME:
case RESPONSE_TIME_WITHOUT_GC:
case STATUS_CODE:
case GC_TIME:
return INTEGER_TYPE;
default:
return UNKNOWN_TYPE;
}
}
};
class SimpleContext: public Context {
public:
string uri;
string controller;
string status;
int responseTime;
int statusCode;
int gcTime;
set<string> hints;
SimpleContext() {
responseTime = 0;
statusCode = 0;
gcTime = 0;
}
virtual string getURI() const {
return uri;
}
virtual string getController() const {
return controller;
}
virtual int getResponseTime() const {
return responseTime;
}
virtual string getStatus() const {
return status;
}
virtual int getStatusCode() const {
return statusCode;
}
virtual int getGcTime() const {
return gcTime;
}
virtual bool hasHint(const string &name) const {
return hints.find(name) != hints.end();
}
};
class ContextFromLog: public Context {
private:
StaticString logData;
mutable SimpleContext *parsedData;
struct ParseState {
unsigned long long requestProcessingStart;
unsigned long long requestProcessingEnd;
unsigned long long smallestTimestamp;
unsigned long long largestTimestamp;
unsigned long long gcTimeStart;
unsigned long long gcTimeEnd;
};
static void parseLine(const StaticString &txnId, unsigned long long timestamp,
const StaticString &data, SimpleContext &ctx, ParseState &state)
{
if (startsWith(data, "BEGIN: request processing")) {
state.requestProcessingStart = extractEventTimestamp(data);
} else if (startsWith(data, "END: request processing")
|| startsWith(data, "FAIL: request processing")) {
state.requestProcessingEnd = extractEventTimestamp(data);
} else if (startsWith(data, "URI: ")) {
ctx.uri = data.substr(data.find(':') + 2);
} else if (startsWith(data, "Controller action: ")) {
StaticString value = data.substr(data.find(':') + 2);
size_t pos = value.find('#');
if (pos != string::npos) {
ctx.controller = value.substr(0, pos);
}
} else if (startsWith(data, "Status: ")) {
StaticString value = data.substr(data.find(':') + 2);
ctx.status = value;
ctx.statusCode = stringToInt(value);
} else if (startsWith(data, "Initial GC time: ")) {
StaticString value = data.substr(data.find(':') + 2);
state.gcTimeStart = stringToULL(value);
} else if (startsWith(data, "Final GC time: ")) {
StaticString value = data.substr(data.find(':') + 2);
state.gcTimeEnd = stringToULL(value);
}
if (state.smallestTimestamp == 0 || timestamp < state.smallestTimestamp) {
state.smallestTimestamp = timestamp;
}
if (timestamp > state.largestTimestamp) {
state.largestTimestamp = timestamp;
}
}
static void reallyParse(const StaticString &data, SimpleContext &ctx) {
const char *current = data.data();
const char *end = data.data() + data.size();
ParseState state;
memset(&state, 0, sizeof(state));
while (current < end) {
current = skipNewlines(current, end);
if (current < end) {
const char *endOfLine = findEndOfLine(current, end);
StaticString line(current, endOfLine - current);
if (!line.empty()) {
StaticString txnId;
unsigned long long timestamp;
unsigned int writeCount;
StaticString lineData;
// If we want to do more complicated analysis we should sort
// the lines but for the purposes of ContextFromLog
// analyzing the data without sorting is good enough.
if (splitLine(line, txnId, timestamp, writeCount, lineData)) {
parseLine(txnId, timestamp, lineData, ctx,
state);
}
}
current = endOfLine;
}
}
if (state.requestProcessingEnd != 0) {
ctx.responseTime = int(state.requestProcessingEnd -
state.requestProcessingStart);
} else if (state.smallestTimestamp != 0) {
ctx.responseTime = state.largestTimestamp - state.smallestTimestamp;
}
if (state.gcTimeEnd != 0) {
ctx.gcTime = state.gcTimeEnd - state.gcTimeStart;
}
}
static bool splitLine(const StaticString &line, StaticString &txnId,
unsigned long long ×tamp, unsigned int &writeCount,
StaticString &data)
{
size_t firstDelim = line.find(' ');
if (firstDelim == string::npos) {
return false;
}
size_t secondDelim = line.find(' ', firstDelim + 1);
if (secondDelim == string::npos) {
return false;
}
size_t thirdDelim = line.find(' ', secondDelim + 1);
if (thirdDelim == string::npos) {
return false;
}
txnId = line.substr(0, firstDelim);
timestamp = hexatriToULL(line.substr(firstDelim + 1, secondDelim - firstDelim - 1));
writeCount = (unsigned int) hexatriToULL(line.substr(secondDelim + 1,
thirdDelim - secondDelim - 1));
data = line.substr(thirdDelim + 1);
return true;
}
static unsigned long long extractEventTimestamp(const StaticString &data) {
size_t pos = data.find('(');
if (pos == string::npos) {
return 0;
} else {
pos++;
size_t start = pos;
while (pos < data.size() && isDigit(data[pos])) {
pos++;
}
if (pos >= data.size()) {
return 0;
} else {
return hexatriToULL(data.substr(start, pos - start));
}
}
}
static bool isNewline(char ch) {
return ch == '\n' || ch == '\r';
}
static bool isDigit(char ch) {
return ch >= '0' && ch <= '9';
}
static const char *skipNewlines(const char *current, const char *end) {
while (current < end && isNewline(*current)) {
current++;
}
return current;
}
static const char *findEndOfLine(const char *current, const char *end) {
while (current < end && !isNewline(*current)) {
current++;
}
return current;
}
SimpleContext *parse() const {
if (parsedData == NULL) {
ReleaseableScopedPointer<SimpleContext> ctx(new SimpleContext());
reallyParse(logData, *ctx.get());
parsedData = ctx.release();
}
return parsedData;
}
public:
ContextFromLog(const StaticString &logData) {
this->logData = logData;
parsedData = NULL;
}
~ContextFromLog() {
delete parsedData;
}
virtual string getURI() const {
return parse()->uri;
}
virtual string getController() const {
return parse()->getController();
}
virtual int getResponseTime() const {
return parse()->getResponseTime();
}
virtual string getStatus() const {
return parse()->getStatus();
}
virtual int getStatusCode() const {
return parse()->getStatusCode();
}
virtual int getGcTime() const {
return parse()->getGcTime();
}
virtual bool hasHint(const string &name) const {
return parse()->hasHint(name);
}
};
class Filter {
private:
typedef Tokenizer::Token Token;
typedef Tokenizer::TokenType TokenType;
struct BooleanComponent;
struct MultiExpression;
struct Comparison;
struct FunctionCall;
typedef boost::shared_ptr<BooleanComponent> BooleanComponentPtr;
typedef boost::shared_ptr<MultiExpression> MultiExpressionPtr;
typedef boost::shared_ptr<Comparison> ComparisonPtr;
typedef boost::shared_ptr<FunctionCall> FunctionCallPtr;
struct BooleanComponent {
virtual ~BooleanComponent() { }
virtual bool evaluate(const Context &ctx) = 0;
};
enum LogicalOperator {
AND,
OR
};
enum Comparator {
MATCHES,
NOT_MATCHES,
EQUALS,
NOT_EQUALS,
GREATER_THAN,
GREATER_THAN_OR_EQUALS,
LESS_THAN,
LESS_THAN_OR_EQUALS,
UNKNOWN_COMPARATOR
};
struct MultiExpression: public BooleanComponent {
struct Part {
LogicalOperator theOperator;
BooleanComponentPtr expression;
};
BooleanComponentPtr firstExpression;
vector<Part> rest;
virtual bool evaluate(const Context &ctx) {
bool result = firstExpression->evaluate(ctx);
unsigned int i = 0;
bool done = i == rest.size();
while (!done) {
Part &nextPart = rest[i];
if (nextPart.theOperator == AND) {
result = result && nextPart.expression->evaluate(ctx);
done = !result;
} else {
result = result || nextPart.expression->evaluate(ctx);
}
i++;
done = done || i == rest.size();
}
return result;
}
};
struct Negation: public BooleanComponent {
BooleanComponentPtr expr;
Negation(const BooleanComponentPtr &e)
: expr(e)
{ }
virtual bool evaluate(const Context &ctx) {
return !expr->evaluate(ctx);
}
};
struct Value {
enum Source {
REGEXP_LITERAL,
STRING_LITERAL,
INTEGER_LITERAL,
BOOLEAN_LITERAL,
CONTEXT_FIELD_IDENTIFIER
};
Source source;
union {
struct {
char stringStorage[sizeof(string)];
string *stringPointer;
struct {
regex_t regexp;
int options;
} regexp;
} stringOrRegexpValue;
int intValue;
bool boolValue;
Context::FieldIdentifier contextFieldIdentifier;
} u;
Value() {
source = INTEGER_LITERAL;
u.intValue = 0;
}
Value(const Value &other) {
initializeFrom(other);
}
Value(bool regexp, const StaticString &value, bool caseInsensitive = false) {
if (regexp) {
source = REGEXP_LITERAL;
} else {
source = STRING_LITERAL;
}
u.stringOrRegexpValue.stringPointer = new (u.stringOrRegexpValue.stringStorage)
string(value.data(), value.size());
if (regexp) {
int options = REG_EXTENDED;
u.stringOrRegexpValue.regexp.options = 0;
if (caseInsensitive) {
options |= REG_ICASE;
u.stringOrRegexpValue.regexp.options |=
Tokenizer::REGEXP_OPTION_CASE_INSENSITIVE;
}
regcomp(&u.stringOrRegexpValue.regexp.regexp,
u.stringOrRegexpValue.stringPointer->c_str(),
options);
}
}
Value(int val) {
source = INTEGER_LITERAL;
u.intValue = val;
}
Value(bool val) {
source = BOOLEAN_LITERAL;
u.boolValue = val;
}
Value(Context::FieldIdentifier identifier) {
source = CONTEXT_FIELD_IDENTIFIER;
u.contextFieldIdentifier = identifier;
}
~Value() {
freeStorage();
}
Value &operator=(const Value &other) {
freeStorage();
initializeFrom(other);
return *this;
}
regex_t *getRegexpValue(const Context &ctx) const {
if (source == REGEXP_LITERAL) {
return &storedRegexp();
} else {
return NULL;
}
}
string getStringValue(const Context &ctx) const {
switch (source) {
case REGEXP_LITERAL:
case STRING_LITERAL:
return storedString();
case INTEGER_LITERAL:
return toString(u.intValue);
case BOOLEAN_LITERAL:
if (u.boolValue) {
return "true";
} else {
return "false";
}
case CONTEXT_FIELD_IDENTIFIER:
return ctx.queryStringField(u.contextFieldIdentifier);
default:
return "";
}
}
int getIntegerValue(const Context &ctx) const {
switch (source) {
case REGEXP_LITERAL:
return 0;
case STRING_LITERAL:
return atoi(storedString());
case INTEGER_LITERAL:
return u.intValue;
case BOOLEAN_LITERAL:
return (int) u.boolValue;
case CONTEXT_FIELD_IDENTIFIER:
return ctx.queryIntField(u.contextFieldIdentifier);
default:
return 0;
}
}
bool getBooleanValue(const Context &ctx) const {
switch (source) {
case REGEXP_LITERAL:
return true;
case STRING_LITERAL:
return !storedString().empty();
case INTEGER_LITERAL:
return (bool) u.intValue;
case BOOLEAN_LITERAL:
return u.boolValue;
case CONTEXT_FIELD_IDENTIFIER:
return ctx.queryBoolField(u.contextFieldIdentifier);
default:
return 0;
}
}
ValueType getType() const {
switch (source) {
case REGEXP_LITERAL:
return REGEXP_TYPE;
case STRING_LITERAL:
return STRING_TYPE;
case INTEGER_LITERAL:
return INTEGER_TYPE;
case BOOLEAN_LITERAL:
return BOOLEAN_TYPE;
case CONTEXT_FIELD_IDENTIFIER:
return Context::getFieldType(u.contextFieldIdentifier);
default:
return UNKNOWN_TYPE;
}
}
private:
const string &storedString() const {
return *u.stringOrRegexpValue.stringPointer;
}
regex_t &storedRegexp() const {
return (regex_t &) u.stringOrRegexpValue.regexp.regexp;
}
void freeStorage() {
if (source == REGEXP_LITERAL || source == STRING_LITERAL) {
storedString().~string();
if (source == REGEXP_LITERAL) {
regfree(&storedRegexp());
}
}
}
void initializeFrom(const Value &other) {
int options;
source = other.source;
switch (source) {
case REGEXP_LITERAL:
u.stringOrRegexpValue.stringPointer = new (u.stringOrRegexpValue.stringStorage)
string(other.storedString());
options = REG_EXTENDED;
if (other.u.stringOrRegexpValue.regexp.options & Tokenizer::REGEXP_OPTION_CASE_INSENSITIVE) {
options |= REG_ICASE;
}
regcomp(&u.stringOrRegexpValue.regexp.regexp,
storedString().c_str(),
options);
u.stringOrRegexpValue.regexp.options = other.u.stringOrRegexpValue.regexp.options;
break;
case STRING_LITERAL:
u.stringOrRegexpValue.stringPointer = new (u.stringOrRegexpValue.stringStorage)
string(other.storedString());
break;
case INTEGER_LITERAL:
u.intValue = other.u.intValue;
break;
case BOOLEAN_LITERAL:
u.boolValue = other.u.boolValue;
break;
case CONTEXT_FIELD_IDENTIFIER:
u.contextFieldIdentifier = other.u.contextFieldIdentifier;
break;
}
}
};
struct SingleValueComponent: public BooleanComponent {
Value val;
SingleValueComponent(const Value &v)
: val(v)
{ }
virtual bool evaluate(const Context &ctx) {
return val.getBooleanValue(ctx);
}
};
struct Comparison: public BooleanComponent {
Value subject;
Comparator comparator;
Value object;
virtual bool evaluate(const Context &ctx) {
switch (subject.getType()) {
case STRING_TYPE:
return compareStringOrRegexp(subject.getStringValue(ctx), ctx);
case INTEGER_TYPE:
return compareInteger(subject.getIntegerValue(ctx), ctx);
case BOOLEAN_TYPE:
return compareBoolean(subject.getBooleanValue(ctx), ctx);
default:
// error
return false;
}
}
private:
bool compareStringOrRegexp(const string &str, const Context &ctx) {
switch (comparator) {
case MATCHES:
return regexec(object.getRegexpValue(ctx), str.c_str(), 0, NULL, 0) == 0;
case NOT_MATCHES:
return regexec(object.getRegexpValue(ctx), str.c_str(), 0, NULL, 0) != 0;
case EQUALS:
return str == object.getStringValue(ctx);
case NOT_EQUALS:
return str != object.getStringValue(ctx);
default:
// error
return false;
}
}
bool compareInteger(int value, const Context &ctx) {
int value2 = object.getIntegerValue(ctx);
switch (comparator) {
case EQUALS:
return value == value2;
case NOT_EQUALS:
return value != value2;
case GREATER_THAN:
return value > value2;
case GREATER_THAN_OR_EQUALS:
return value >= value2;
case LESS_THAN:
return value < value2;
case LESS_THAN_OR_EQUALS:
return value <= value2;
default:
// error
return false;
}
}
bool compareBoolean(bool value, const Context &ctx) {
bool value2 = object.getBooleanValue(ctx);
switch (comparator) {
case EQUALS:
return value == value2;
case NOT_EQUALS:
return value != value2;
default:
// error
return false;
}
}
};
struct FunctionCall: public BooleanComponent {
vector<Value> arguments;
virtual void checkArguments() const = 0;
};
struct StartsWithFunctionCall: public FunctionCall {
virtual bool evaluate(const Context &ctx) {
return startsWith(arguments[0].getStringValue(ctx),
arguments[1].getStringValue(ctx));
}
virtual void checkArguments() const {
if (arguments.size() != 2) {
throw SyntaxError("you passed " + toString(arguments.size()) +
" argument(s) to starts_with(), but it accepts exactly 2 arguments");
}
}
};
struct HasHintFunctionCall: public FunctionCall {
virtual bool evaluate(const Context &ctx) {
return ctx.hasHint(arguments[0].getStringValue(ctx));
}
virtual void checkArguments() const {
if (arguments.size() != 1) {
throw SyntaxError("you passed " + toString(arguments.size()) +
" argument(s) to has_hint(), but it accepts exactly 1 argument");
}
}
};
Tokenizer tokenizer;
BooleanComponentPtr root;
Token lookahead;
bool debug;
static bool isLiteralToken(const Token &token) {
return token.type == Tokenizer::REGEXP
|| token.type == Tokenizer::STRING
|| token.type == Tokenizer::INTEGER
|| token.type == Tokenizer::TRUE_LIT
|| token.type == Tokenizer::FALSE_LIT;
}
static bool isValueToken(const Token &token) {
return isLiteralToken(token) || token.type == Tokenizer::IDENTIFIER;
}
static bool isLogicalOperatorToken(const Token &token) {
return token.type == Tokenizer::AND
|| token.type == Tokenizer::OR;
}
static Comparator determineComparator(Tokenizer::TokenType type) {
switch (type) {
case Tokenizer::MATCHES:
return MATCHES;
case Tokenizer::NOT_MATCHES:
return NOT_MATCHES;
case Tokenizer::EQUALS:
return EQUALS;
case Tokenizer::NOT_EQUALS:
return NOT_EQUALS;
case Tokenizer::GREATER_THAN:
return GREATER_THAN;
case Tokenizer::GREATER_THAN_OR_EQUALS:
return GREATER_THAN_OR_EQUALS;
case Tokenizer::LESS_THAN:
return LESS_THAN;
case Tokenizer::LESS_THAN_OR_EQUALS:
return LESS_THAN_OR_EQUALS;
default:
return UNKNOWN_COMPARATOR;
}
}
static bool comparatorAcceptsValueTypes(Comparator cmp, ValueType subjectType, ValueType objectType) {
switch (cmp) {
case MATCHES:
case NOT_MATCHES:
return subjectType == STRING_TYPE && objectType == REGEXP_TYPE;
case EQUALS:
case NOT_EQUALS:
return (subjectType == STRING_TYPE || subjectType == INTEGER_TYPE || subjectType == BOOLEAN_TYPE)
&& subjectType == objectType;
case GREATER_THAN:
case GREATER_THAN_OR_EQUALS:
case LESS_THAN:
case LESS_THAN_OR_EQUALS:
return subjectType == INTEGER_TYPE && objectType == INTEGER_TYPE;
default:
abort();
return false; // Shut up compiler warning.
}
}
static string unescapeCString(const StaticString &data) {
string result;
result.reserve(data.size());
const char *current = data.data();
const char *end = data.data() + data.size();
while (current < end) {
char ch = *current;
if (ch == '\\') {
current++;
if (current < end) {
ch = *current;
switch (ch) {
case 'r':
result.append(1, '\r');
break;
case 'n':
result.append(1, '\n');
break;
case 't':
result.append(1, '\t');
break;
default:
result.append(1, ch);
break;
}
current++;
}
} else {
result.append(1, ch);
current++;
}
}
return result;
}
void logMatch(int level, const char *name) const {
if (level > 100) {
// If level is too deep then it's probably a bug.
abort();
}
if (debug) {
for (int i = 0; i < level; i++) {
printf(" ");
}
printf("Matching: %s\n", name);
}
}
Token peek() const {
return lookahead;
}
bool peek(Tokenizer::TokenType type) const {
return lookahead.type == type;
}
Token match(TokenType type) {
if (lookahead.type == type) {
return match();
} else {
raiseSyntaxError("Expected a " + Tokenizer::typeToString(type) +
" token, but got " + lookahead.toString(),
lookahead);
return Token(); // Shut up compiler warning.
}
}
Token match() {
Token old = lookahead;
lookahead = tokenizer.getNext();
return old;
}
void raiseSyntaxError(const string &msg = "", const Token &token = Token()) {
if (token.type != Tokenizer::NONE) {
string message = "at character " + toString(token.pos + 1);
if (!msg.empty()) {
message.append(": ");
message.append(msg);
}
throw SyntaxError(message);
} else {
throw SyntaxError(msg);
}
}
BooleanComponentPtr matchMultiExpression(int level) {
logMatch(level, "matchMultiExpression()");
MultiExpressionPtr result = boost::make_shared<MultiExpression>();
result->firstExpression = matchExpression(level + 1);
while (isLogicalOperatorToken(peek())) {
MultiExpression::Part part;
part.theOperator = matchOperator(level + 1);
part.expression = matchExpression(level + 1);
result->rest.push_back(part);
}
return result;
}
BooleanComponentPtr matchExpression(int level) {
logMatch(level, "matchExpression()");
bool negate = false;
if (peek(Tokenizer::NOT)) {
match();
negate = true;
}
Token next = peek();
if (next.type == Tokenizer::LPARENTHESIS) {
match();
BooleanComponentPtr expression = matchMultiExpression(level + 1);
match(Tokenizer::RPARENTHESIS);
if (negate) {
return boost::make_shared<Negation>(expression);
} else {
return expression;
}
} else if (isValueToken(next)) {
BooleanComponentPtr component;
Token ¤t = next;
match();
if (peek(Tokenizer::LPARENTHESIS)) {
component = matchFunctionCall(level + 1, current);
} else if (determineComparator(peek().type) != UNKNOWN_COMPARATOR) {
component = matchComparison(level + 1, current);
} else if (current.type == Tokenizer::TRUE_LIT || current.type == Tokenizer::FALSE_LIT) {
component = matchSingleValueComponent(level + 1, current);
} else {
raiseSyntaxError("expected a function call, comparison or boolean literal", current);
}
if (negate) {
return boost::make_shared<Negation>(component);
} else {
return component;
}
} else {
raiseSyntaxError("expected a left parenthesis or an identifier", next);
return BooleanComponentPtr(); // Shut up compiler warning.
}
}
BooleanComponentPtr matchSingleValueComponent(int level, const Token &token) {
logMatch(level, "matchSingleValueComponent()");
return boost::make_shared<SingleValueComponent>(matchLiteral(level + 1, token));
}
ComparisonPtr matchComparison(int level, const Token &subjectToken) {
logMatch(level, "matchComparison()");
ComparisonPtr comparison = boost::make_shared<Comparison>();
comparison->subject = matchValue(level + 1, subjectToken);
comparison->comparator = matchComparator(level + 1);
comparison->object = matchValue(level + 1, match());
if (!comparatorAcceptsValueTypes(comparison->comparator, comparison->subject.getType(), comparison->object.getType())) {
raiseSyntaxError("the comparator cannot operate on the given combination of types", subjectToken);
}
return comparison;
}
FunctionCallPtr matchFunctionCall(int level, const Token &id) {
logMatch(level, "matchFunctionCall()");
FunctionCallPtr function;
if (id.rawValue == "starts_with") {
function = boost::make_shared<StartsWithFunctionCall>();
} else if (id.rawValue == "has_hint") {
function = boost::make_shared<HasHintFunctionCall>();
} else {
raiseSyntaxError("unknown function '" + id.rawValue + "'", id);
}
match(Tokenizer::LPARENTHESIS);
if (isValueToken(peek())) {
function->arguments.push_back(matchValue(level + 1, match()));
while (peek(Tokenizer::COMMA)) {
match();
function->arguments.push_back(matchValue(level + 1, match()));
}
}
match(Tokenizer::RPARENTHESIS);
function->checkArguments();
return function;
}
Value matchValue(int level, const Token &token) {
logMatch(level, "matchValue()");
if (isLiteralToken(token)) {
return matchLiteral(level + 1, token);
} else if (token.type == Tokenizer::IDENTIFIER) {
return matchContextFieldIdentifier(level + 1, token);
} else {
raiseSyntaxError("Unrecognized value token " +
Tokenizer::typeToString(token.type), token);
return Value(); // Shut up compiler warning.
}
}
LogicalOperator matchOperator(int level) {
logMatch(level, "matchOperator()");
if (peek(Tokenizer::AND)) {
logMatch(level + 1, "AND");
match();
return AND;
} else if (peek(Tokenizer::OR)) {
logMatch(level + 1, "OR");
match();
return OR;
} else {
raiseSyntaxError("", peek());
return AND; // Shut up compiler warning.
}
}
Comparator matchComparator(int level) {
logMatch(level, "matchComparator()");
Comparator comparator = determineComparator(peek().type);
if (comparator == UNKNOWN_COMPARATOR) {
raiseSyntaxError("", peek());
return MATCHES; // Shut up compiler warning.
} else {
logMatch(level + 1, Tokenizer::typeToString(peek().type).c_str());
match();
return comparator;
}
}
Value matchLiteral(int level, const Token &token) {
logMatch(level, "matchLiteral()");
if (token.type == Tokenizer::REGEXP) {
logMatch(level + 1, "regexp");
return Value(true, unescapeCString(token.rawValue.substr(1, token.rawValue.size() - 2)),
token.options & Tokenizer::REGEXP_OPTION_CASE_INSENSITIVE);
} else if (token.type == Tokenizer::STRING) {
logMatch(level + 1, "string");
return Value(false, unescapeCString(token.rawValue.substr(1, token.rawValue.size() - 2)));
} else if (token.type == Tokenizer::INTEGER) {
logMatch(level + 1, "integer");
return Value(atoi(token.rawValue.toString()));
} else if (token.type == Tokenizer::TRUE_LIT) {
logMatch(level + 1, "true");
return Value(true);
} else if (token.type == Tokenizer::FALSE_LIT) {
logMatch(level + 1, "false");
return Value(false);
} else {
raiseSyntaxError("regular expression, string, integer or boolean expected", token);
return Value(); // Shut up compiler warning.
}
}
Value matchContextFieldIdentifier(int level, const Token &token) {
logMatch(level, "matchContextFieldIdentifier()");
if (token.rawValue == "uri") {
return Value(Context::URI);
} else if (token.rawValue == "controller") {
return Value(Context::CONTROLLER);
} else if (token.rawValue == "response_time") {
return Value(Context::RESPONSE_TIME);
} else if (token.rawValue == "response_time_without_gc") {
return Value(Context::RESPONSE_TIME_WITHOUT_GC);
} else if (token.rawValue == "status") {
return Value(Context::STATUS);
} else if (token.rawValue == "status_code") {
return Value(Context::STATUS_CODE);
} else if (token.rawValue == "gc_time") {
return Value(Context::GC_TIME);
} else {
raiseSyntaxError("unknown field '" + token.rawValue + "'", token);
return Value(); // Shut up compiler warning.
}
}
public:
Filter(const StaticString &source, bool debug = false)
: tokenizer(source, debug)
{
this->debug = debug;
lookahead = tokenizer.getNext();
root = matchMultiExpression(0);
logMatch(0, "end of data");
match(Tokenizer::END_OF_DATA);
}
bool run(const Context &ctx) {
return root->evaluate(ctx);
}
};
typedef boost::shared_ptr<Filter> FilterPtr;
} // namespace FilterSupport
} // namespace Passenger
#endif /* __cplusplus */
/********* C bindings *********/
#ifdef __cplusplus
extern "C" {
#endif
typedef void *PassengerFilter;
PassengerFilter *passenger_filter_create(const char *source, int size, char **error);
void passenger_filter_free(PassengerFilter *filter);
char *passenger_filter_validate(const char *source, int size);
#ifdef __cplusplus
}
#endif
#endif /* _PASSENGER_FILTER_SUPPORT_H_ */
| 24.267241 | 122 | 0.669728 | [
"object",
"vector"
] |
eae3198bea285839751b1febb59ac68e6b914781 | 6,092 | h | C | render_pipeline/kde/matlab_kde_package/mex/cpp/BallTreeDensity.h | aniketpokale10/RenderForCNN_modified | a7bd8d808bac7e361cc9b3331fc75483e2e9b1a0 | [
"MIT"
] | 261 | 2015-09-25T04:39:42.000Z | 2022-03-17T20:06:31.000Z | render_pipeline/kde/matlab_kde_package/mex/cpp/BallTreeDensity.h | aniketpokale10/RenderForCNN_modified | a7bd8d808bac7e361cc9b3331fc75483e2e9b1a0 | [
"MIT"
] | 15 | 2016-01-25T15:24:51.000Z | 2021-03-13T03:35:05.000Z | render_pipeline/kde/matlab_kde_package/mex/cpp/BallTreeDensity.h | aniketpokale10/RenderForCNN_modified | a7bd8d808bac7e361cc9b3331fc75483e2e9b1a0 | [
"MIT"
] | 104 | 2015-09-25T17:06:19.000Z | 2022-02-08T04:46:56.000Z | //////////////////////////////////////////////////////////////////////////////////////
// BallTreeDensity.h -- class definition for a tree-based kernel density estimate
//
// A few functions are defined only for MEX calls (construction & load from matlab)
// Most others can be used more generally.
//
//////////////////////////////////////////////////////////////////////////////////////
//
// Written by Alex Ihler and Mike Mandel
// Copyright (C) 2003 Alexander Ihler; distributable under GPL -- see README.txt
//
//////////////////////////////////////////////////////////////////////////////////////
#ifndef __BALL_TREE_DENSITY_H
#define __BALL_TREE_DENSITY_H
#include "BallTree.h"
#include <assert.h>
#include <float.h>
class BallTreeDensity : public BallTree {
public:
enum KernelType { Gaussian, Epanetchnikov, Laplacian };
KernelType getType(void) const { return type; };
enum Gradient { WRTMean, WRTVariance, WRTWeight };
/////////////////////////////
// Constructors
/////////////////////////////
//BallTreeDensity( unsigned int d, index N, double* points_,
// double* weights_, double* bandwidths_);
#ifdef MEX // for loading ball trees from matlab
BallTreeDensity() : BallTree() { bandwidth = bandwidthMax = bandwidthMin = NULL; }
BallTreeDensity(const mxArray* structure);
static mxArray* createInMatlab(const mxArray* pts, const mxArray* wts, const mxArray* bw, BallTreeDensity::KernelType _type=Gaussian);
#endif
/////////////////////////////
// Accessor Functions
/////////////////////////////
const double* mean(BallTree::index i) const { return means+i*dims; }
const double* variance(BallTree::index i) const { return bandwidth+i*dims; } // !!! only works for Gaussian
const double* bw(BallTree::index i) const { return bandwidth +i*dims; }
const double* bwMax(BallTree::index i) const { return bandwidthMax+i*dims*multibandwidth; }
const double* bwMin(BallTree::index i) const { return bandwidthMin+i*dims*multibandwidth; }
bool bwUniform(void) const { return multibandwidth==0; };
// -- Others inherited from BallTree --
///////////////////////////////
//
// Evaluation of the density at a set of points:
// pre-constructed balltree version
// array of doubles version
// leave-one-out cross-validation version
//
void evaluate(const BallTree& atPoints, double* values, double maxErr=0) const;
// void evaluate(index Npts, const double* atPoints, double* values, double maxErr=0) const;
void evaluate(double* p, double maxErr) const { evaluate(*this,p,maxErr); }
void llGrad(const BallTree& locations, double* gradDens, double* gradAt, double tolEval, double tolGrad, Gradient) const;
// void llGrad(index Npts, const double* atPoints, double* gradDens, double* gradAt, double tolEval, double tolGrad) const;
bool updateBW(const double*, index);
/////////////////////////////
// Private object functions
/////////////////////////////
protected:
#ifdef MEX
static mxArray* matlabMakeStruct(const mxArray* pts, const mxArray* wts, const mxArray* bw, BallTreeDensity::KernelType type);
#endif
virtual void swap(BallTree::index, BallTree::index);// leaf-swapping function
virtual void calcStats(BallTree::index root); // recursion for computing BW ranges
KernelType type;
unsigned int multibandwidth; // flag: is bandwidth uniform?
double *means; // Weighted mean of points from this level down
double *bandwidth; // Variance or other multiscale bandwidth
double *bandwidthMax,*bandwidthMin; // Bounds on BW in non-uniform case
// Internal evaluate functions:
// Recursive tree evaluation
const static index DirectSize = 100; // if N*M is less than this, just compute.
void evaluate(BallTree::index myRoot, const BallTree& atTree, BallTree::index aRoot, double maxErr) const;
void evalDirect(BallTree::index myRoot, const BallTree& atTree, BallTree::index aRoot) const;
void llGradDirect(BallTree::index dRoot, const BallTree& atTree, BallTree::index aRoot, Gradient) const;
void llGradRecurse(BallTree::index dRoot,const BallTree& atTree, BallTree::index aRoot, double tolGrad, Gradient) const;
void llGradWDirect(index dRoot, const BallTree& atTree, index aRoot) const;
void llGradWRecurse(index dRoot,const BallTree& atTree, index aRoot, double tolGrad) const;
// Bounds on kernel values between points in this subtree & another
double maxDistKer(BallTree::index dRoot, const BallTree& atTree, BallTree::index aRoot) const {
switch(getType())
{ case Gaussian: return maxDistGauss(dRoot,atTree,aRoot);
case Laplacian: return maxDistLaplace(dRoot,atTree,aRoot);
case Epanetchnikov: return maxDistEpanetch(dRoot,atTree,aRoot);
}
};
double minDistKer(BallTree::index dRoot, const BallTree& atTree, BallTree::index aRoot) const {
switch(getType())
{ case Gaussian: return minDistGauss(dRoot,atTree,aRoot);
case Laplacian: return minDistLaplace(dRoot,atTree,aRoot);
case Epanetchnikov: return minDistEpanetch(dRoot,atTree,aRoot);
}
};
// Types of kernels supported
double maxDistLaplace(BallTree::index dRoot, const BallTree& atTree, BallTree::index aRoot) const;
double minDistLaplace(BallTree::index dRoot, const BallTree& atTree, BallTree::index aRoot) const;
double maxDistGauss(BallTree::index dRoot, const BallTree& atTree, BallTree::index aRoot) const;
double minDistGauss(BallTree::index dRoot, const BallTree& atTree, BallTree::index aRoot) const;
double maxDistEpanetch(BallTree::index dRoot, const BallTree& atTree, BallTree::index aRoot, int dim=-1) const;
double minDistEpanetch(BallTree::index dRoot, const BallTree& atTree, BallTree::index aRoot, int dim=-1) const;
void dKdX_p(BallTree::index dRoot,const BallTree& atTree, BallTree::index aRoot, bool bothLeaves, Gradient) const;
};
#endif
| 47.224806 | 137 | 0.655778 | [
"object"
] |
eaf0b1d67467ed06a7d4e6ace705c8e6304aa8f6 | 3,015 | h | C | src/Vulkan/VkConfig.h | ZhymabekRoman/swiftshader | db5d836d001f1313f574ffa71f02e567a2098214 | [
"Apache-2.0"
] | null | null | null | src/Vulkan/VkConfig.h | ZhymabekRoman/swiftshader | db5d836d001f1313f574ffa71f02e567a2098214 | [
"Apache-2.0"
] | null | null | null | src/Vulkan/VkConfig.h | ZhymabekRoman/swiftshader | db5d836d001f1313f574ffa71f02e567a2098214 | [
"Apache-2.0"
] | null | null | null | // Copyright 2018 The SwiftShader Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef VK_CONFIG_HPP_
#define VK_CONFIG_HPP_
#include "Version.h"
#include <Vulkan/VulkanPlatform.h>
namespace vk {
// Note: Constant array initialization requires a string literal.
// constexpr char* or char[] does not work for that purpose.
#define SWIFTSHADER_DEVICE_NAME "SwiftShader Device" // Max length: VK_MAX_PHYSICAL_DEVICE_NAME_SIZE
#define SWIFTSHADER_UUID "SwiftShaderUUID" // Max length: VK_UUID_SIZE (16)
enum
{
API_VERSION = VK_API_VERSION_1_1,
DRIVER_VERSION = VK_MAKE_VERSION(MAJOR_VERSION, MINOR_VERSION, PATCH_VERSION),
VENDOR_ID = 0x1AE0, // Google, Inc.: https://pcisig.com/google-inc-1
DEVICE_ID = 0xC0DE, // SwiftShader (placeholder)
};
enum
{
// Alignment of all Vulkan objects, pools, device memory, images, buffers, descriptors.
REQUIRED_MEMORY_ALIGNMENT = 16, // 16 bytes for 128-bit vector types.
MIN_TEXEL_BUFFER_OFFSET_ALIGNMENT = 256,
MIN_UNIFORM_BUFFER_OFFSET_ALIGNMENT = 256,
MIN_STORAGE_BUFFER_OFFSET_ALIGNMENT = 256,
MEMORY_TYPE_GENERIC_BIT = 0x1, // Generic system memory.
};
enum
{
MAX_IMAGE_LEVELS_1D = 14,
MAX_IMAGE_LEVELS_2D = 14,
MAX_IMAGE_LEVELS_3D = 11,
MAX_IMAGE_LEVELS_CUBE = 14,
MAX_IMAGE_ARRAY_LAYERS = 2048,
MAX_SAMPLER_LOD_BIAS = 15,
};
enum
{
MAX_BOUND_DESCRIPTOR_SETS = 4,
MAX_VERTEX_INPUT_BINDINGS = 16,
MAX_PUSH_CONSTANT_SIZE = 128,
};
enum
{
MAX_DESCRIPTOR_SET_UNIFORM_BUFFERS_DYNAMIC = 8,
MAX_DESCRIPTOR_SET_STORAGE_BUFFERS_DYNAMIC = 4,
MAX_DESCRIPTOR_SET_COMBINED_BUFFERS_DYNAMIC =
MAX_DESCRIPTOR_SET_UNIFORM_BUFFERS_DYNAMIC +
MAX_DESCRIPTOR_SET_STORAGE_BUFFERS_DYNAMIC,
};
enum
{
MAX_POINT_SIZE = 1023,
};
constexpr int SUBPIXEL_PRECISION_BITS = 4;
constexpr float SUBPIXEL_PRECISION_FACTOR = static_cast<float>(1 << SUBPIXEL_PRECISION_BITS);
constexpr int SUBPIXEL_PRECISION_MASK = 0xFFFFFFFF >> (32 - SUBPIXEL_PRECISION_BITS);
} // namespace vk
#if defined(__linux__) || defined(__ANDROID__)
#define SWIFTSHADER_EXTERNAL_MEMORY_OPAQUE_FD 1
#define SWIFTSHADER_EXTERNAL_SEMAPHORE_OPAQUE_FD 1
#endif
constexpr VkDeviceSize MAX_MEMORY_ALLOCATION_SIZE = 0x40000000ull; // 0x40000000 = 1 GiB
// Memory offset calculations in 32-bit SIMD elements limit us to addressing at most 4 GiB.
// Signed arithmetic further restricts it to 2 GiB.
static_assert(MAX_MEMORY_ALLOCATION_SIZE <= 0x80000000ull, "maxMemoryAllocationSize must not exceed 2 GiB");
#endif // VK_CONFIG_HPP_
| 30.765306 | 108 | 0.78209 | [
"vector"
] |
eaf1fb94df76894aaaeab7c070ae9e454c3ed6ae | 5,023 | h | C | ContentLoader.h | lethosor/stonesense | 205851212e02638393ce018eb71baa33e1dde8bc | [
"Artistic-1.0-Perl"
] | null | null | null | ContentLoader.h | lethosor/stonesense | 205851212e02638393ce018eb71baa33e1dde8bc | [
"Artistic-1.0-Perl"
] | null | null | null | ContentLoader.h | lethosor/stonesense | 205851212e02638393ce018eb71baa33e1dde8bc | [
"Artistic-1.0-Perl"
] | null | null | null | #pragma once
#include "tinyxml.h"
#include "BuildingConfiguration.h"
#include "CreatureConfiguration.h"
#include "VegetationConfiguration.h"
#include "GroundMaterialConfiguration.h"
#include "ColorConfiguration.h"
#include "commonTypes.h"
#include "FluidConfiguration.h"
#include "ItemConfiguration.h"
#include "common.h"
class ContentLoader
{
private:
bool parseContentIndexFile( const char* filepath );
bool parseContentXMLFile( const char* filepath );
bool parseBuildingContent( TiXmlElement* elemRoot );
bool parseCreatureContent( TiXmlElement* elemRoot );
bool parseTerrainContent ( TiXmlElement* elemRoot );
bool parseGrowthContent(TiXmlElement* elemRoot);
bool parseTreeContent( TiXmlElement* elemRoot );
bool parseShrubContent( TiXmlElement* elemRoot );
bool parseColorContent( TiXmlElement* elemRoot );
bool parseFluidContent( TiXmlElement* elemRoot );
bool parseGrassContent( TiXmlElement* elemRoot );
bool parseItemContent( TiXmlElement* elemRoot );
void flushCreatureConfig();
bool translationComplete;
void gatherStyleIndices(df::world_raws * raws);
public:
ContentLoader(void);
~ContentLoader(void);
bool Load();
bool reload_configs();
std::vector<BuildingConfiguration> buildingConfigs;
std::vector<std::vector<CreatureConfiguration>*> creatureConfigs;
std::vector<VegetationConfiguration> treeConfigs;
std::vector<VegetationConfiguration> shrubConfigs;
std::vector<VegetationConfiguration> grassConfigs;
std::vector<TerrainConfiguration*> terrainFloorConfigs;
std::vector<TerrainConfiguration*> terrainWallConfigs;
std::vector<ColorConfiguration> colorConfigs;
MaterialMatcher<ALLEGRO_COLOR> materialColorConfigs;
MaterialMatcher<c_sprite> growthTopConfigs;
MaterialMatcher<c_sprite> growthBottomConfigs;
std::vector<ItemConfiguration*> itemConfigs;
FluidConfiguration lava[8];
FluidConfiguration water[8];
//race.caste.hairtype.styletype
std::vector<std::vector<std::vector<int32_t>*>*> style_indices;
std::vector<std::vector<int32_t>*> position_Indices;
RemoteFortressReader::MaterialList materialNameList;
RemoteFortressReader::MaterialList growthNameList;
RemoteFortressReader::TiletypeList tiletypeNameList;
std::vector<std::string> professionStrings;
std::map <uint32_t, std::string> custom_workshop_types;
DFHack::Materials * Mats;
std::vector<t_matgloss> organic;
std::vector<t_matglossInorganic> inorganic;
uint32_t currentTick;
uint32_t currentYear;
uint8_t currentMonth;
uint8_t currentDay;
uint8_t currentHour;
uint8_t currentTickRel;
t_gamemodes gameMode;
int obsidian;
};
extern ContentLoader * contentLoader;
extern const char* getDocument(TiXmlNode* element);
bool getLocalFilename(char * buffer, const char* filename, const char* relativeto);
extern void contentError(const char* message, TiXmlNode* element);
extern void contentWarning(const char* message, TiXmlNode* element);
extern char getAnimFrames(const char* framestring);
extern int loadConfigImgFile(const char* filename, TiXmlElement* referrer);
MAT_BASICS lookupMaterialType(const char* strValue);
int lookupMaterialIndex(int matType, const char* strValue);
template <typename T>
int lookupIndexedType(const char* indexName, std::vector<T>& typeVector)
{
if (indexName == NULL || indexName[0] == 0) {
return INVALID_INDEX;
}
uint32_t vsize = (uint32_t)typeVector.size();
for(uint32_t i=0; i < vsize; i++) {
if (typeVector[i].id == indexName) {
return i;
}
}
return INVALID_INDEX;
}
template <typename T>
int lookupIndexedPonterType(const char* indexName, std::vector<T*>& typeVector)
{
if (indexName == NULL || indexName[0] == 0) {
return INVALID_INDEX;
}
uint32_t vsize = (uint32_t)typeVector.size();
for(uint32_t i=0; i < vsize; i++) {
if (typeVector[i]->id == indexName) {
return i;
}
}
return INVALID_INDEX;
}
const char *lookupMaterialTypeName(int matType);
const char *lookupMaterialName(int matType,int matIndex);
const char *lookupBuildingSubtype(int main_type, int i);
uint8_t lookupMaterialFore(int matType,int matIndex);
uint8_t lookupMaterialBack(int matType,int matIndex);
uint8_t lookupMaterialBright(int matType,int matIndex);
const char *lookupTreeName(int matIndex);
ALLEGRO_COLOR lookupMaterialColor(DFHack::t_matglossPair matt, DFHack::t_matglossPair dyematt, ALLEGRO_COLOR defaultColor=al_map_rgb(255,255,255));
ALLEGRO_COLOR lookupMaterialColor(DFHack::t_matglossPair matt, ALLEGRO_COLOR defaultColor=al_map_rgb(255,255,255));
ALLEGRO_COLOR lookupMaterialColor(int matType, int matIndex, int dyeType, int dyeIndex, ALLEGRO_COLOR defaultColor=al_map_rgb(255,255,255));
ALLEGRO_COLOR lookupMaterialColor(int matType, int matIndex, ALLEGRO_COLOR defaultColor=al_map_rgb(255,255,255));
const char * lookupFormName(int formType);
ShadeBy getShadeType(const char* Input);
| 38.343511 | 147 | 0.755326 | [
"vector"
] |
eaf2180762013321655a59c9dcc8a066a5a79cea | 2,208 | h | C | src/myscene.h | nfette/sorpsim-opt | d85df8c6a99fa181bd1fdc6f7fbea5f7f8be2c77 | [
"RSA-MD"
] | null | null | null | src/myscene.h | nfette/sorpsim-opt | d85df8c6a99fa181bd1fdc6f7fbea5f7f8be2c77 | [
"RSA-MD"
] | 1 | 2018-01-26T21:15:43.000Z | 2018-01-31T19:42:21.000Z | src/myscene.h | nfette/sorpsim-opt | d85df8c6a99fa181bd1fdc6f7fbea5f7f8be2c77 | [
"RSA-MD"
] | null | null | null | /*! \file myscene.h
This file is part of SorpSim and is distributed under terms in the file LICENSE.
Developed by Zhiyao Yang and Dr. Ming Qu for ORNL.
\author Zhiyao Yang (zhiyaoYang)
\author Dr. Ming Qu
\author Nicholas Fette (nfette)
\copyright 2015, UT-Battelle, LLC
\copyright 2017-2018, Nicholas Fette
*/
#ifndef MYSCENE_H
#define MYSCENE_H
#include <QObject>
#include <QGraphicsView>
#include <QGraphicsScene>
#include <QGraphicsTextItem>
#include <QGraphicsRectItem>
#include <QGraphicsEllipseItem>
#include <QGraphicsItem>
#include <QGraphicsTextItem>
#include <QGraphicsSceneMouseEvent>
#include "node.h"
#include "link.h"
#include "unit.h"
#include "tableselectparadialog.h"
#include "tabledialog.h"
#include "overlaysettingdialog.h"
#include "texteditdialog.h"
#include "myview.h"
#include "plotsdialog.h"
#include "editpropertycurvedialog.h"
#include "edittabledialog.h"
/// Custom class based on QGraphicsScene
/// - object is created and setup in myView (QGraphicsView)
/// - operations in the operating panel is handled via myScene including:
/// - mouse press, double click on items
/// - called by various classes in the project
class myScene :public QGraphicsScene
{
public:
myScene(QObject * parent = NULL);
tableSelectParaDialog * tDialog;
editTableDialog * etDialog;
plotsDialog* plotWindow;
QGraphicsSimpleTextItem * copcap;
QGraphicsRectItem * copRect;
Plot * sel_plot;
overlaysetting * overlaydialog;
editPropertyCurveDialog*editPropDialog;
QList<Node *> selectednodeslist;
SimpleTextItem* textitem;
public slots:
void drawLink(Node* node1, Node * node2);
void drawAUnit(unit * unit);
void evokeProperties();
void editUnit();
void evokeTDialog();
void editUnit(unit * edUnit);
void editSp(Node * node);
void cancelLink(Node*node1,Node*node2=NULL);
void resetPointedComp();
protected:
virtual void mousePressEvent(QGraphicsSceneMouseEvent *event);
virtual void mouseDoubleClickEvent(QGraphicsSceneMouseEvent *event);
private:
int checkFluidForLink(Node*node1,Node*node2);
QGraphicsRectItem * rect;
myView* theView;
};
#endif // MYSCENE_H
| 25.37931 | 84 | 0.734601 | [
"object"
] |
eaf300ed950ebc13fb98a9889dd8997af9ab1c5e | 2,264 | h | C | cpprevolve/revolve/gazebo/brains/ThymioBrain.h | braj29/robo_swimmers | b3c3fa91976884095eb6b5e67844167598ec573d | [
"Apache-1.1"
] | null | null | null | cpprevolve/revolve/gazebo/brains/ThymioBrain.h | braj29/robo_swimmers | b3c3fa91976884095eb6b5e67844167598ec573d | [
"Apache-1.1"
] | null | null | null | cpprevolve/revolve/gazebo/brains/ThymioBrain.h | braj29/robo_swimmers | b3c3fa91976884095eb6b5e67844167598ec573d | [
"Apache-1.1"
] | null | null | null | /*
* Copyright (C) 2015-2018 Vrije Universiteit Amsterdam
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Description: TODO: <Add brief description about file purpose>
* Author: Milan Jelisavcic
* Date: 28/10/2018
*
*/
#ifndef REVOLVE_THYMIOBRAIN_H
#define REVOLVE_THYMIOBRAIN_H
#include "Brain.h"
namespace revolve
{
namespace gazebo
{
class ThymioBrain
: public ::revolve::gazebo::Brain
{
/// \brief The RLPower constructor reads out configuration file,
/// deretmines which algorithm type to apply and initialises new policy.
/// \param[in] _modelName: name of a robot
/// \param[in] _node: configuration file
/// \param[in] _motors: vector list of robot's actuators
/// \param[in] _sensors: vector list of robot's sensors
/// \return pointer to the RLPower class object
public: ThymioBrain(
::gazebo::physics::ModelPtr _model,
sdf::ElementPtr _node,
std::vector< MotorPtr > &_motors,
std::vector< SensorPtr > &_sensors);
/// \brief Destructor
public: ~ThymioBrain() override;
/// \brief Method for updating sensors readings, actuators positions,
/// ranked list of policies and generating new policy
/// \param[in] _motors: vector list of robot's actuators
/// \param[in] _sensors: vector list of robot's sensors
/// \param[in] _time:
/// \param[in] _step:
public: void Update(
const std::vector< MotorPtr > &_motors,
const std::vector< SensorPtr > &_sensors,
double _time,
double _step) override;
/// \brief Name of the robot
private: ::gazebo::physics::ModelPtr robot_;
};
}
}
#endif //REVOLVE_THYMIOBRAIN_H
| 32.342857 | 78 | 0.664311 | [
"object",
"vector"
] |
eaf51dc67f231d8ab2334b2af2ee8b5b2fc170d5 | 982 | h | C | src/BotConfig.h | zarifmahfuz/SmartSC2Bot | f54cbc11d50126e3275aa864c00e1f66bf9c79b9 | [
"MIT"
] | 2 | 2022-01-01T21:36:51.000Z | 2022-01-05T16:41:14.000Z | src/BotConfig.h | zarifmahfuz/SmartSC2Bot | f54cbc11d50126e3275aa864c00e1f66bf9c79b9 | [
"MIT"
] | null | null | null | src/BotConfig.h | zarifmahfuz/SmartSC2Bot | f54cbc11d50126e3275aa864c00e1f66bf9c79b9 | [
"MIT"
] | 1 | 2022-01-05T16:41:33.000Z | 2022-01-05T16:41:33.000Z | #ifndef BASICSC2BOT_BOTCONFIG_H
#define BASICSC2BOT_BOTCONFIG_H
#include <string>
#include <yaml-cpp/yaml.h>
#include <unordered_map>
class BotConfig {
public:
static BotConfig from_file(const std::string &filename);
const int engineeringBayFirst;
const int attackTriggerTimeSeconds;
const int attackTriggerArmyUnits;
const float defendRadius;
const float stimpackMinHealth;
// supply levels to build each barracks at
const std::vector<int> supplyToBuildBarracksAt;
// a dictionary that maps the n'th Supply Depot to it's required supply level
std::unordered_map<std::string, int> supply_depot;
// a dictionary that maps the n'th Refinery to it's required supply level
std::unordered_map<std::string, int> refinery;
// defines the maximum number of scouts that will be active during the game at a time
const int maxSimulScouts;
private:
explicit BotConfig(const YAML::Node &);
};
#endif //BASICSC2BOT_BOTCONFIG_H
| 28.057143 | 89 | 0.747454 | [
"vector"
] |
eafef11f124a89c544ad30870ad6027d000be26e | 5,054 | h | C | vnext/Desktop/Sandbox/SandboxJSExecutor.h | cknestri/react-native-windows | 1343142c06fd9fcc241496f5eb26a20ba72712f2 | [
"MIT"
] | null | null | null | vnext/Desktop/Sandbox/SandboxJSExecutor.h | cknestri/react-native-windows | 1343142c06fd9fcc241496f5eb26a20ba72712f2 | [
"MIT"
] | null | null | null | vnext/Desktop/Sandbox/SandboxJSExecutor.h | cknestri/react-native-windows | 1343142c06fd9fcc241496f5eb26a20ba72712f2 | [
"MIT"
] | null | null | null | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <cstdint>
#include <memory>
#include <unordered_map>
#include <DevSupportManager.h>
#include <cxxreact/JSExecutor.h>
#include <cxxreact/JSModulesUnbundle.h>
#include "Sandbox/SandboxEndpoint.h"
// OFFICEDEV: Ignore warnings
#pragma warning(push)
#pragma warning(disable : 4290)
namespace facebook {
namespace react {
class MessageQueueThread;
#if !defined(OSS_RN)
// Used by sandbox process to handle NativeModule calls from JS.
class SandboxDelegateFactory : public ExecutorDelegateFactory {
public:
SandboxDelegateFactory() = delete;
explicit SandboxDelegateFactory(SendNativeModuleCall &&sendNativeModuleCall)
: m_sendNativeModuleCall(std::move(sendNativeModuleCall)) {}
virtual std::unique_ptr<ExecutorDelegate> createExecutorDelegate(
std::shared_ptr<ModuleRegistry>,
std::shared_ptr<InstanceCallback>) override;
private:
SendNativeModuleCall m_sendNativeModuleCall;
};
// Used by SandboxExecutor in host to handle NativeModule calls from sandbox.
class SandboxExecutorDelegateFactory : public ExecutorDelegateFactory {
public:
SandboxExecutorDelegateFactory() = default;
virtual std::unique_ptr<ExecutorDelegate> createExecutorDelegate(
std::shared_ptr<ModuleRegistry>,
std::shared_ptr<InstanceCallback>) override;
};
using JSECreator = std::function<std::unique_ptr<facebook::react::JSExecutor>(
std::shared_ptr<facebook::react::ExecutorDelegate>,
std::shared_ptr<facebook::react::MessageQueueThread>)>;
class SandboxJSExecutorFactory : public JSExecutorFactory {
public:
SandboxJSExecutorFactory() = default;
SandboxJSExecutorFactory(JSECreator &&jsExecutorFactory);
virtual std::unique_ptr<JSExecutor> createJSExecutor(
std::shared_ptr<ExecutorDelegate> delegate,
std::shared_ptr<MessageQueueThread> jsQueue) override;
private:
JSECreator m_jseCreater;
};
class SandboxJSExecutor : public JSExecutor {
public:
SandboxJSExecutor(
std::shared_ptr<ExecutorDelegate> delegate,
std::shared_ptr<MessageQueueThread> messageQueueThread);
~SandboxJSExecutor() override;
virtual void loadApplicationScript(
std::unique_ptr<const JSBigString> script,
uint64_t scriptVersion,
std::string sourceURL,
std::string &&bytecodeFileName) override;
virtual void setBundleRegistry(
std::unique_ptr<RAMBundleRegistry> bundleRegistry) override;
virtual void registerBundle(uint32_t bundleId, const std::string &bundlePath)
override;
virtual void callFunction(
const std::string &moduleId,
const std::string &methodId,
const folly::dynamic &arguments) override;
virtual void invokeCallback(
const double callbackId,
const folly::dynamic &arguments) override;
virtual void setGlobalVariable(
std::string propName,
std::unique_ptr<const JSBigString> jsonValue) override;
virtual void *getJavaScriptContext() override;
virtual std::string getDescription() override;
#ifdef WITH_JSC_MEMORY_PRESSURE
virtual void handleMemoryPressure(int pressureLevel) override;
#endif
virtual void destroy() override;
Concurrency::task<bool> ConnectAsync(
std::shared_ptr<SandboxEndpoint> endpoint,
const std::function<void(std::string)> &errorCallback);
private:
Concurrency::task<bool> PrepareJavaScriptRuntimeAsync();
void Call(const std::string &methodName, folly::dynamic &arguments);
Concurrency::task<bool> SendMessageAsync(
int64_t requestId,
const std::string &methodName,
folly::dynamic &arguments);
void CompleteRequest(int64_t requestId);
void OnReplyMessage(int64_t replyId);
void OnNativeModuleCallMessage(folly::dynamic &&calls);
const int ConnectTimeoutMilliseconds = 5000;
const int ConnectRetryCount = 3;
std::shared_ptr<ExecutorDelegate> m_delegate;
std::shared_ptr<MessageQueueThread> m_messageQueueThread;
std::shared_ptr<SandboxEndpoint> m_sandboxEndpoint;
folly::dynamic m_injectedObjects = folly::dynamic::object;
std::function<void(std::string)> m_errorCallback;
std::map<int64_t, Concurrency::task_completion_event<void>> m_callbacks;
enum class State {
Disconnected,
Listening,
Connected, // Sandbox is connected, still need to prepare the runtime
Running, // Runtime is running
Disposed, // Executor has been shutdown
Error,
};
std::atomic<State> m_state{State::Disconnected};
void SetState(State state) noexcept;
bool IsListening() const noexcept;
bool IsConnected() const noexcept;
bool IsDisposed() const noexcept;
bool IsRunning() const noexcept;
bool IsInError() const noexcept;
int64_t GetNextRequestId();
std::atomic<int64_t> m_requestId{1}; // RequestId starting from 1
};
#endif // OSS_RN
} // namespace react
} // namespace facebook
#pragma warning(pop)
| 32.818182 | 80 | 0.737634 | [
"object"
] |
d8043352870850d9c83f5774e84d4ace85ba2801 | 3,963 | h | C | include/jet/bvh2.h | Whitemane/fluid-engine-dev | 93c3e942182cd73d54b74b7c2a283854e79911be | [
"MIT"
] | 1 | 2018-04-16T13:09:03.000Z | 2018-04-16T13:09:03.000Z | include/jet/bvh2.h | kentbarber/fluid-engine-dev | fb2256badb80c04702db536b63b14754699038ca | [
"MIT"
] | null | null | null | include/jet/bvh2.h | kentbarber/fluid-engine-dev | fb2256badb80c04702db536b63b14754699038ca | [
"MIT"
] | null | null | null | // Copyright (c) 2018 Doyub Kim
//
// I am making my contributions/submissions to this project solely in my
// personal capacity and am not conveying any rights to any intellectual
// property of any third parties.
#ifndef INCLUDE_JET_BVH2_H_
#define INCLUDE_JET_BVH2_H_
#include <jet/intersection_query_engine2.h>
#include <jet/nearest_neighbor_query_engine2.h>
#include <vector>
namespace jet {
//!
//! \brief Bounding Volume Hierarchy (BVH) in 2D
//!
//! This class implements the classic bounding volume hierarchy structure in 2D.
//! It implements IntersectionQueryEngine2 in order to support box/ray
//! intersection tests. Also, NearestNeighborQueryEngine2 is implemented to
//! provide nearest neighbor query.
//!
template <typename T>
class Bvh2 final : public IntersectionQueryEngine2<T>,
public NearestNeighborQueryEngine2<T> {
public:
typedef std::vector<T> ContainerType;
typedef typename ContainerType::iterator Iterator;
typedef typename ContainerType::const_iterator ConstIterator;
//! Default constructor.
Bvh2();
//! Builds bounding volume hierarchy.
void build(const std::vector<T>& items,
const std::vector<BoundingBox2D>& itemsBounds);
//! Clears all the contents of this instance.
void clear();
//! Returns the nearest neighbor for given point and distance measure
//! function.
NearestNeighborQueryResult2<T> nearest(
const Vector2D& pt,
const NearestNeighborDistanceFunc2<T>& distanceFunc) const override;
//! Returns true if given \p box intersects with any of the stored items.
bool intersects(const BoundingBox2D& box,
const BoxIntersectionTestFunc2<T>& testFunc) const override;
//! Returns true if given \p ray intersects with any of the stored items.
bool intersects(const Ray2D& ray,
const RayIntersectionTestFunc2<T>& testFunc) const override;
//! Invokes \p visitorFunc for every intersecting items.
void forEachIntersectingItem(
const BoundingBox2D& box, const BoxIntersectionTestFunc2<T>& testFunc,
const IntersectionVisitorFunc2<T>& visitorFunc) const override;
//! Invokes \p visitorFunc for every intersecting items.
void forEachIntersectingItem(
const Ray2D& ray, const RayIntersectionTestFunc2<T>& testFunc,
const IntersectionVisitorFunc2<T>& visitorFunc) const override;
//! Returns the closest intersection for given \p ray.
ClosestIntersectionQueryResult2<T> closestIntersection(
const Ray2D& ray,
const GetRayIntersectionFunc2<T>& testFunc) const override;
//! Returns bounding box of every items.
const BoundingBox2D& boundingBox() const;
//! Returns the begin iterator of the item.
Iterator begin();
//! Returns the end iterator of the item.
Iterator end();
//! Returns the immutable begin iterator of the item.
ConstIterator begin() const;
//! Returns the immutable end iterator of the item.
ConstIterator end() const;
//! Returns the number of items.
size_t numberOfItems() const;
//! Returns the item at \p i.
const T& item(size_t i) const;
private:
struct Node {
char flags;
union {
size_t child;
size_t item;
};
BoundingBox2D bound;
Node();
void initLeaf(size_t it, const BoundingBox2D& b);
void initInternal(uint8_t axis, size_t c, const BoundingBox2D& b);
bool isLeaf() const;
};
BoundingBox2D _bound;
ContainerType _items;
std::vector<BoundingBox2D> _itemBounds;
std::vector<Node> _nodes;
size_t build(size_t nodeIndex, size_t* itemIndices, size_t nItems,
size_t currentDepth);
size_t qsplit(size_t* itemIndices, size_t numItems, double pivot,
uint8_t axis);
};
} // namespace jet
#include "detail/bvh2-inl.h"
#endif // INCLUDE_JET_BVH2_H_
| 31.959677 | 80 | 0.692909 | [
"vector"
] |
23811cb83dc43252a4250d29872a3306f76fc16e | 3,921 | h | C | Operations/albaOpLabelizeSurface.h | IOR-BIC/ALBA | b574968b05d9a3a2756dd2ac61d015a0d20232a4 | [
"Apache-2.0",
"BSD-3-Clause"
] | 9 | 2018-11-19T10:15:29.000Z | 2021-08-30T11:52:07.000Z | Operations/albaOpLabelizeSurface.h | IOR-BIC/ALBA | b574968b05d9a3a2756dd2ac61d015a0d20232a4 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | Operations/albaOpLabelizeSurface.h | IOR-BIC/ALBA | b574968b05d9a3a2756dd2ac61d015a0d20232a4 | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2018-06-10T22:56:29.000Z | 2019-12-12T06:22:56.000Z | /*=========================================================================
Program: ALBA (Agile Library for Biomedical Applications)
Module: albaOpLabelizeSurface
Authors: Matteo Giacomoni
Copyright (c) BIC
All rights reserved. See Copyright.txt or
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
#ifndef __albaOpLabelizeSurface_H__
#define __albaOpLabelizeSurface_H__
//----------------------------------------------------------------------------
// Include :
//----------------------------------------------------------------------------
#include "albaDefines.h"
#include "albaOp.h"
//----------------------------------------------------------------------------
// forward references :
//----------------------------------------------------------------------------
class albaVMEGizmo;
class albaGizmoTranslate;
class albaGizmoRotate;
class albaGizmoScale;
class albaVMESurface;
class albaVMESurfaceEditor;
class albaInteractorCompositorMouse;
class albaInteractorGenericMouse;
class vtkPlane;
class vtkPlaneSource;
class vtkArrowSource;
class vtkAppendPolyData;
class vtkGlyph3D;
class vtkALBAClipSurfaceBoundingBox;
class vtkPolyData;
class vtkLookupTable;
//----------------------------------------------------------------------------
// albaOpLabelizeSurface :
//----------------------------------------------------------------------------
/** */
class ALBA_EXPORT albaOpLabelizeSurface: public albaOp
{
public:
albaOpLabelizeSurface(const wxString &label = "Labelize Surface");
~albaOpLabelizeSurface();
virtual void OnEvent(albaEventBase *alba_event);
albaTypeMacro(albaOpLabelizeSurface, albaOp);
albaOp* Copy();
bool Accept(albaVME*node);
void OpRun();
void OpDo();
void OpUndo();
enum GIZMO_TYPE
{
GIZMO_TRANSLATE = 0,
GIZMO_ROTATE,
GIZMO_SCALE,
};
void SetLutEditor(vtkLookupTable *lut);
void SetLabelValue(double val){m_LabelValue=val;};
void SetPlaneDimension(double w,double h);
void Labelize();
virtual void OpStop(int result);
protected:
/** Create the GUI */
void CreateGui();
void ShowClipPlane(bool show);
void CreateGizmos();
void AttachInteraction();
void UpdateISARefSys();
void Undo();
/** Change type of gizmo in the view */
void ChangeGizmo();
void OnEventGizmoPlane(albaEventBase *alba_event);
void OnEventThis(albaEventBase *alba_event);
void OnEventGizmoTranslate(albaEventBase *alba_event);
void OnEventGizmoRotate(albaEventBase *alba_event);
void OnEventGizmoScale(albaEventBase *alba_event);
void PostMultiplyEventMatrix(albaEventBase *alba_event);
void SetPlaneDimension();
albaInteractorCompositorMouse *m_IsaCompositorWithoutGizmo;
albaInteractorCompositorMouse *m_IsaCompositorWithGizmo;
albaInteractorGenericMouse *m_IsaTranslate;
albaInteractorGenericMouse *m_IsaRotate;
albaInteractorGenericMouse *m_IsaChangeArrowWithGizmo;
albaInteractorGenericMouse *m_IsaChangeArrowWithoutGizmo;
albaInteractorGenericMouse *m_IsaLabelizeWithGizmo;
albaInteractorGenericMouse *m_IsaLabelizeWithoutGizmo;
int m_LabelInside;
bool m_PlaneCreated;
double m_PlaneWidth;
double m_PlaneHeight;
double m_LabelValue;
int m_GizmoType;
int m_UseGizmo;
albaVMESurface *m_InputSurface;
albaVMESurfaceEditor *m_VmeEditor;
albaVMEGizmo *m_ImplicitPlaneGizmo;
vtkPlane *m_ClipperPlane;
vtkPlaneSource *m_PlaneSource;
vtkArrowSource *m_ArrowShape;
vtkAppendPolyData *m_Gizmo;
vtkGlyph3D *m_Arrow;
vtkALBAClipSurfaceBoundingBox *m_ClipperBoundingBox;
std::vector<vtkPolyData*> m_ResultPolyData;
vtkPolyData *m_OriginalPolydata;
albaGizmoTranslate *m_GizmoTranslate;
albaGizmoRotate *m_GizmoRotate;
albaGizmoScale *m_GizmoScale;
};
#endif
| 27.41958 | 78 | 0.673043 | [
"vector"
] |
23876e33046ed1519722421e89d144f4a787191a | 22,818 | c | C | src/debug/harness.c | gacha/simba | c31af7018990015784af0d84d427812db37917d8 | [
"MIT"
] | null | null | null | src/debug/harness.c | gacha/simba | c31af7018990015784af0d84d427812db37917d8 | [
"MIT"
] | null | null | null | src/debug/harness.c | gacha/simba | c31af7018990015784af0d84d427812db37917d8 | [
"MIT"
] | null | null | null | /*
* The MIT License (MIT)
*
* Copyright (c) 2014-2018, Erik Moqvist
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* This file is part of the Simba project.
*/
#include "simba.h"
#if CONFIG_HARNESS_DEBUG == 1
# define DPRINT(fmt, ...) std_printf(OSTR(fmt), ##__VA_ARGS__)
#else
# define DPRINT(fmt, ...)
#endif
/* A rough estimate of the average mock entry size, including heap
allocation overhead. */
#define MOCK_ENTRY_SIZE (sizeof(struct mock_entry_t) + 8 * sizeof(void *))
#define HEAP_SIZE (MOCK_ENTRY_SIZE * CONFIG_HARNESS_MOCK_ENTRIES_MAX)
struct mock_entry_t {
struct list_elem_t base;
struct entry_t *next_p;
const char *id_p;
#if CONFIG_HARNESS_WRITE_BACKTRACE_DEPTH_MAX > 0
struct {
void *array[CONFIG_HARNESS_WRITE_BACKTRACE_DEPTH_MAX];
int8_t depth;
} backtrace;
#endif
struct {
size_t size;
uint8_t buf[1];
} data;
};
struct mock_entry_cb_t {
struct list_elem_t base;
struct mock_entry_t *entry_p;
harness_mock_cb_t fn;
char arg[1];
};
struct module_t {
struct {
struct heap_t obj;
uint8_t buf[HEAP_SIZE];
} heap;
struct {
struct list_t list;
struct list_t cb_list;
} mock;
struct mutex_t mutex;
struct bus_t bus;
int total;
int passed;
int failed;
int skipped;
int current_testcase_result;
};
static struct module_t module;
static int print_backtrace_array(void *array[], int depth, const char *name_p)
{
int i;
std_printf(OSTR("\r\n"
"Mock %s backtrace (most recent call first):\r\n"),
name_p);
for (i = 0; i < depth; i++) {
#if defined(ARCH_LINUX)
fprintf(stderr, ": %p\r\n", array[2 * i]);
#else
std_printf(OSTR(": 0x%08x\r\n"), array[2 * i]);
#endif
}
return (0);
}
static int print_backtrace(const char *name_p)
{
void *array[2 * (CONFIG_HARNESS_BACKTRACE_DEPTH_MAX + 8)];
int depth;
depth = sys_backtrace(array, sizeof(array));
return (print_backtrace_array(array, depth, name_p));
}
static int print_assert_backtrace(void)
{
return (print_backtrace("assert"));
}
static int print_read_backtrace(void)
{
return (print_backtrace("read"));
}
static int print_write_backtrace(void)
{
return (print_backtrace("write"));
}
#if CONFIG_HARNESS_WRITE_BACKTRACE_DEPTH_MAX > 0
static int mock_entry_create_write_backtrace(struct mock_entry_t *entry_p)
{
void *array[2 * CONFIG_HARNESS_WRITE_BACKTRACE_DEPTH_MAX];
int depth;
int i;
depth = sys_backtrace(array, sizeof(array));
for (i = 0; i < depth; i++) {
entry_p->backtrace.array[i] = array[2 * i];
}
entry_p->backtrace.depth = (int8_t)depth;
return (0);
}
static int mock_entry_print_write_backtrace(struct mock_entry_t *entry_p)
{
void *array[2 * CONFIG_HARNESS_BACKTRACE_DEPTH_MAX];
int depth;
int i;
depth = (int)entry_p->backtrace.depth;
for (i = 0; i < depth; i++) {
array[2 * i] = entry_p->backtrace.array[i];
}
return (print_backtrace_array(&array[0], depth, "write"));
}
#else
static int mock_entry_create_write_backtrace(struct mock_entry_t *entry_p)
{
return (0);
}
static int mock_entry_print_write_backtrace(struct mock_entry_t *entry_p)
{
return (0);
}
#endif
static struct mock_entry_cb_t *alloc_mock_entry_cb(size_t size)
{
struct mock_entry_cb_t *entry_cb_p;
mutex_lock(&module.mutex);
entry_cb_p = heap_alloc(&module.heap.obj,
sizeof(*entry_cb_p) + size - 1);
mutex_unlock(&module.mutex);
return (entry_cb_p);
}
static int free_mock_entry_cb_no_lock(struct mock_entry_cb_t *entry_cb_p)
{
heap_free(&module.heap.obj, entry_cb_p);
return (0);
}
static struct mock_entry_cb_t *find_mock_entry_cb(struct mock_entry_t *entry_p)
{
struct list_iter_t iter;
struct mock_entry_cb_t *entry_cb_p;
list_iter_init(&iter, &module.mock.cb_list);
while (1) {
entry_cb_p = (struct mock_entry_cb_t *)list_iter_next(&iter);
if (entry_cb_p == NULL) {
break;
}
if (entry_cb_p->entry_p == entry_p) {
list_remove(&module.mock.cb_list, entry_cb_p);
break;
}
}
return (entry_cb_p);
}
static struct mock_entry_t *alloc_mock_entry_no_lock(const char *id_p,
size_t size)
{
struct mock_entry_t *entry_p;
DPRINT("Allocating mock entry for id '%s'.\r\n", id_p);
entry_p = heap_alloc(&module.heap.obj,
sizeof(*entry_p) + size - 1);
if (entry_p != NULL) {
entry_p->id_p = id_p;
}
return (entry_p);
}
static struct mock_entry_t *alloc_mock_entry(const char *id_p,
size_t size)
{
struct mock_entry_t *entry_p;
mutex_lock(&module.mutex);
entry_p = alloc_mock_entry_no_lock(id_p, size);
mutex_unlock(&module.mutex);
return (entry_p);
}
static int free_mock_entry(struct mock_entry_t *entry_p)
{
DPRINT("Freeing mock entry with id '%s'.\r\n", entry_p->id_p);
mutex_lock(&module.mutex);
heap_free(&module.heap.obj, entry_p);
mutex_unlock(&module.mutex);
return (0);
}
static struct mock_entry_t *copy_mock_entry_no_lock(struct mock_entry_t *entry_p)
{
struct mock_entry_t *copy_p;
copy_p = alloc_mock_entry_no_lock(entry_p->id_p,
entry_p->data.size);
memcpy(copy_p, entry_p, sizeof(*entry_p) + entry_p->data.size - 1);
return (copy_p);
}
static struct mock_entry_t *find_mock_entry(const char *id_p)
{
struct list_iter_t iter;
struct mock_entry_t *entry_p;
struct mock_entry_t *unmodified_entry_p;
struct mock_entry_cb_t *entry_cb_p;
int res;
mutex_lock(&module.mutex);
list_iter_init(&iter, &module.mock.list);
while (1) {
entry_p = (struct mock_entry_t *)list_iter_next(&iter);
if (entry_p == NULL) {
break;
}
if (strcmp(entry_p->id_p, id_p) == 0) {
entry_cb_p = find_mock_entry_cb(entry_p);
if (entry_cb_p == NULL) {
list_remove(&module.mock.list, entry_p);
} else {
/* Make a copy of the mock entry since the mock
callback may modify it. */
unmodified_entry_p = copy_mock_entry_no_lock(entry_p);
res = entry_cb_p->fn(&entry_cb_p->arg[0],
&entry_p->data.buf[0]);
if (res == 1) {
list_remove(&module.mock.list, entry_p);
free_mock_entry_cb_no_lock(entry_cb_p);
} else {
list_add_tail(&module.mock.cb_list, entry_cb_p);
}
entry_p = unmodified_entry_p;
}
break;
}
}
mutex_unlock(&module.mutex);
return (entry_p);
}
static int read_mock_entry(struct mock_entry_t *entry_p,
const char *id_p,
void *buf_p,
size_t size,
const char *function_p)
{
ssize_t res;
res = -1;
if (size == entry_p->data.size) {
if (size > 0) {
if (buf_p != NULL) {
memcpy(buf_p,
&entry_p->data.buf[0],
size);
res = size;
} else {
std_printf(OSTR("\r\n%s(): Got NULL pointer with size greater "
"than zero(0) for mock id '%s'."),
function_p,
id_p);
}
} else {
res = 0;
}
} else {
std_printf(OSTR("\r\n%s(): Trying to read exactly %d bytes(s) from "
"mock entry with id '%s' but got %d"),
function_p,
size,
id_p,
entry_p->data.size);
}
if (res < 0) {
std_printf(OSTR(" ::\r\n"
"Mock entry data:\r\n"));
std_hexdump(sys_get_stdout(),
&entry_p->data.buf[0],
entry_p->data.size);
print_read_backtrace();
mock_entry_print_write_backtrace(entry_p);
harness_set_testcase_result(-1);
}
free_mock_entry(entry_p);
return (res);
}
static int mwrite_cb(void *arg_p, void *buf_p)
{
int *length_p;
length_p = arg_p;
(*length_p)--;
return (*length_p == 0);
}
static ssize_t create_mock_entry(const char *id_p,
const void *buf_p,
size_t size,
struct mock_entry_t **entry_pp)
{
ASSERTN(id_p != NULL, EINVAL);
struct mock_entry_t *entry_p;
if ((buf_p == NULL) && (size > 0)) {
std_printf(OSTR("create_mock_entry(): Got NULL pointer with size "
"greater than zero(0) for mock id '%s'\r\n"),
id_p);
print_write_backtrace();
harness_set_testcase_result(-1);
return (-EINVAL);
}
entry_p = alloc_mock_entry(id_p, size);
if (entry_p == NULL) {
std_printf(
OSTR("Mock entry memory allocation failed for id '%s'\r\n"),
id_p);
print_write_backtrace();
harness_set_testcase_result(-1);
return (-ENOMEM);
}
/* Initiate the object. */
if (size > 0) {
memcpy(&entry_p->data.buf[0], buf_p, size);
}
entry_p->data.size = size;
mock_entry_create_write_backtrace(entry_p);
*entry_pp = entry_p;
return (size);
}
static int number_of_testcases(struct harness_testcase_t *testcase_p)
{
int number_of_testcases;
number_of_testcases = 0;
while (testcase_p->callback != NULL) {
number_of_testcases++;
testcase_p++;
}
return (number_of_testcases);
}
static int print_report_and_stop(void)
{
int total;
int passed;
int failed;
int skipped;
total = module.total;
passed = module.passed;
failed = module.failed;
skipped = module.skipped;
skipped += (total - passed - failed - skipped);
std_printf(OSTR("\r\n"
"harness report: total(%d), passed(%d), "
"failed(%d), skipped(%d)\r\n\r\n"),
total,
passed,
failed,
skipped);
std_printf(OSTR("=============================== TEST END (%s) =========="
"====================\r\n\r\n"),
((passed + skipped) == total ? "PASSED" : "FAILED"));
sys_stop(module.failed);
return (0);
}
int harness_run(struct harness_testcase_t *testcases_p)
{
int err;
struct harness_testcase_t *testcase_p;
struct mock_entry_t *entry_p;
size_t sizes[HEAP_FIXED_SIZES_MAX] = {
8, 16, 32, 32, 32, 32, 32, 32
};
mutex_init(&module.mutex);
module.total = number_of_testcases(testcases_p);
module.passed = 0;
module.failed = 0;
module.skipped = 0;
testcase_p = testcases_p;
thrd_sleep_ms(CONFIG_HARNESS_SLEEP_MS);
/* Print a header. */
std_printf(OSTR("\r\n"));
std_printf(OSTR("================================== TEST BEGIN =========="
"========================\r\n\r\n"));
std_printf(sys_get_info());
std_printf(OSTR("\r\n"));
std_printf(OSTR("mock heap size: %u bytes\r\n"), sizeof(module.heap.buf));
while (testcase_p->callback != NULL) {
/* Reinitialize the heap before every testcase for minimal
memory usage. */
list_init(&module.mock.list);
list_init(&module.mock.cb_list);
heap_init(&module.heap.obj,
&module.heap.buf[0],
sizeof(module.heap.buf),
&sizes[0]);
/* Mark current testcase as passed before its executed. */
harness_set_testcase_result(0);
std_printf(OSTR("\r\nenter: %s\r\n"), testcase_p->name_p);
err = testcase_p->callback();
do {
entry_p = (struct mock_entry_t *)list_remove_head(&module.mock.list);
if (entry_p != NULL) {
std_printf(OSTR("Found unread mock id '%s'. Failing test.\r\n"),
entry_p->id_p);
err = -1;
}
} while (entry_p != NULL);
if ((err < 0) || (harness_get_testcase_result() == -1)) {
module.failed++;
std_printf(OSTR("exit: %s: FAILED\r\n"),
testcase_p->name_p);
#if CONFIG_HARNESS_EARLY_EXIT == 1
print_report_and_stop();
#endif
} else if ((err == 0) && (harness_get_testcase_result() == 0)) {
module.passed++;
std_printf(OSTR("exit: %s: PASSED\r\n"),
testcase_p->name_p);
} else {
module.skipped++;
std_printf(OSTR("exit: %s: SKIPPED\r\n"),
testcase_p->name_p);
}
testcase_p++;
}
#if CONFIG_THRD_FS_COMMANDS == 1
char buf[18];
std_printf(OSTR("\r\n"));
std_strcpy(buf, OSTR("/kernel/thrd/list"));
fs_call(buf, NULL, sys_get_stdout(), NULL);
#endif
return (print_report_and_stop());
}
int harness_expect(void *chan_p,
const char *pattern_p,
const struct time_t *timeout_p)
{
char c;
size_t length;
size_t pattern_length;
static char buf[CONFIG_HARNESS_EXPECT_BUFFER_SIZE];
static struct time_t timeout = {
.seconds = 1,
.nanoseconds = 0
};
length = 0;
pattern_length = strlen(pattern_p);
if (timeout_p == NULL) {
timeout_p = &timeout;
}
while (length < sizeof(buf) - 1) {
if (chan_poll(chan_p, timeout_p) == NULL) {
return (-ETIMEDOUT);
}
chan_read(chan_p, &c, sizeof(c));
std_printf(OSTR("%c"), c);
buf[length++] = c;
buf[length] = '\0';
/* Compare to pattern. */
if (length >= pattern_length) {
if (strcmp(&buf[length - pattern_length], pattern_p) == 0) {
return (length);
}
}
}
return (-1);
}
ssize_t harness_mock_write(const char *id_p,
const void *buf_p,
size_t size)
{
ASSERTN(id_p != NULL, EINVAL);
ssize_t res;
struct mock_entry_t *entry_p;
res = create_mock_entry(id_p, buf_p, size, &entry_p);
if (res != size) {
return (res);
}
/* Add the entry at the end of the list. */
mutex_lock(&module.mutex);
list_add_tail(&module.mock.list, entry_p);
mutex_unlock(&module.mutex);
return (res);
}
ssize_t harness_mock_mwrite(const char *id_p,
const void *buf_p,
size_t size,
int length)
{
return (harness_mock_cwrite(id_p,
buf_p,
size,
mwrite_cb,
&length,
sizeof(length)));
}
ssize_t harness_mock_cwrite(const char *id_p,
const void *buf_p,
size_t size,
harness_mock_cb_t cb,
void *arg_p,
size_t arg_size)
{
ssize_t res;
struct mock_entry_t *entry_p;
struct mock_entry_cb_t *entry_cb_p;
res = create_mock_entry(id_p,
buf_p,
size,
&entry_p);
if (res != size) {
return (res);
}
/* Allocate a callback entry. */
entry_cb_p = alloc_mock_entry_cb(arg_size);
if (entry_cb_p == NULL) {
std_printf(
OSTR("harness_mock_cwrite(): Mock entry callback memory allocation "
"failed for id '%s'\r\n"),
id_p);
print_write_backtrace();
harness_set_testcase_result(-1);
free_mock_entry(entry_p);
return (-ENOMEM);
}
/* Initiate the callback entry. */
entry_cb_p->entry_p = entry_p;
entry_cb_p->fn = cb;
if (arg_p != NULL) {
memcpy(&entry_cb_p->arg[0], arg_p, arg_size);
}
mutex_lock(&module.mutex);
list_add_tail(&module.mock.list, entry_p);
list_add_tail(&module.mock.cb_list, entry_cb_p);
mutex_unlock(&module.mutex);
return (size);
}
ssize_t harness_mock_read(const char *id_p,
void *buf_p,
size_t size)
{
struct mock_entry_t *entry_p;
ssize_t res;
res = -1;
entry_p = find_mock_entry(id_p);
if (entry_p != NULL) {
res = read_mock_entry(entry_p, id_p, buf_p, size, "harness_mock_read");
} else {
std_printf(OSTR("\r\nharness_mock_read(): Mock id '%s' not found.\r\n"),
id_p);
print_read_backtrace();
harness_set_testcase_result(-1);
}
return (res);
}
ssize_t harness_mock_try_read(const char *id_p,
void *buf_p,
size_t size)
{
struct mock_entry_t *entry_p;
ssize_t res;
res = -ENOENT;
entry_p = find_mock_entry(id_p);
if (entry_p != NULL) {
res = read_mock_entry(entry_p,
id_p,
buf_p,
size,
"harness_mock_try_read");
}
return (res);
}
int harness_mock_assert(const char *id_p,
const void *buf_p,
size_t size)
{
ASSERTN(id_p != NULL, EINVAL);
struct mock_entry_t *entry_p;
int res;
res = -1;
entry_p = find_mock_entry(id_p);
if (entry_p != NULL) {
if (size == entry_p->data.size) {
if (size > 0) {
if (buf_p != NULL) {
res = memcmp(buf_p, &entry_p->data.buf[0], entry_p->data.size);
if (res != 0) {
std_printf(OSTR("\r\nharness_mock_assert(): Data "
"mismatch for mock id '%s' "),
id_p);
res = -1;
}
} else {
std_printf(OSTR("\r\nharness_mock_assert(): Got NULL pointer "
"with size greater than zero(0) for mock id "
"'%s' "),
id_p);
}
} else {
res = 0;
}
} else {
std_printf(OSTR("\r\nharness_mock_assert(): Trying to read exactly "
"%d bytes(s) from mock id '%s' but got %d"),
size,
id_p,
entry_p->data.size);
}
if (res < 0) {
if ((buf_p != NULL) && (size > 0)) {
_ASSERTHEX("actual",
buf_p,
"expected",
&entry_p->data.buf[0],
size,
entry_p->data.size);
} else {
std_printf(OSTR("::\r\nMock entry data:\r\n"));
std_hexdump(sys_get_stdout(),
&entry_p->data.buf[0],
entry_p->data.size);
}
print_assert_backtrace();
mock_entry_print_write_backtrace(entry_p);
harness_set_testcase_result(-1);
}
free_mock_entry(entry_p);
} else {
std_printf(OSTR("\r\nharness_mock_assert(): %s: mock id not found\r\n"),
id_p);
print_assert_backtrace();
harness_set_testcase_result(-1);
}
return (res);
}
ssize_t harness_mock_write_notify(const char *id_p,
const void *buf_p,
size_t size)
{
ASSERTN(id_p != NULL, EINVAL);
uint32_t mask;
ssize_t res;
res = harness_mock_write(id_p, buf_p, size);
if (res != size) {
return (res);
}
mask = 1;
bus_write(&module.bus, 0, &mask, sizeof(mask));
return (size);
}
ssize_t harness_mock_read_wait(const char *id_p,
void *buf_p,
size_t size,
struct time_t *timeout_p)
{
ASSERTN(id_p != NULL, EINVAL);
struct event_t event;
struct bus_listener_t listener;
ssize_t res;
uint32_t mask;
struct mock_entry_t *entry_p;
res = 0;
event_init(&event);
bus_listener_init(&listener, 0, &event);
bus_attach(&module.bus, &listener);
while (1) {
entry_p = find_mock_entry(id_p);
if (entry_p != NULL) {
res = read_mock_entry(entry_p,
id_p,
buf_p,
size,
"harness_mock_read_wait");
bus_detach(&module.bus, &listener);
break;
}
mask = 1;
event_read(&event, &mask, sizeof(mask));
}
return (res);
}
int harness_set_testcase_result(int result)
{
module.current_testcase_result = result;
#if CONFIG_HARNESS_EARLY_EXIT == 1
if (result == -1) {
module.failed++;
print_report_and_stop();
}
#endif
return (0);
}
int harness_get_testcase_result(void)
{
return (module.current_testcase_result);
}
| 25.870748 | 83 | 0.537821 | [
"object"
] |
238eadcdb8ee4787786f5dc0ccdcce94a0197a44 | 21,936 | c | C | src/router_core/agent_config_address.c | prvn002/qpid-dispatch | f277905077e164aac2d38736441be5c49d2dfaf7 | [
"Apache-2.0"
] | 59 | 2015-09-22T18:49:09.000Z | 2022-03-08T03:38:02.000Z | src/router_core/agent_config_address.c | ErnieAllen/qpid-dispatch | db1170421c051e48defd2eee62be21da583021a1 | [
"Apache-2.0"
] | 861 | 2015-10-07T14:07:09.000Z | 2022-03-25T18:24:47.000Z | src/router_core/agent_config_address.c | ErnieAllen/qpid-dispatch | db1170421c051e48defd2eee62be21da583021a1 | [
"Apache-2.0"
] | 91 | 2015-09-02T19:01:48.000Z | 2022-01-31T11:52:34.000Z | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include "agent_config_address.h"
#include "qpid/dispatch/ctools.h"
#include <inttypes.h>
#include <stdio.h>
#define QDR_CONFIG_ADDRESS_NAME 0
#define QDR_CONFIG_ADDRESS_IDENTITY 1
#define QDR_CONFIG_ADDRESS_TYPE 2
#define QDR_CONFIG_ADDRESS_PREFIX 3
#define QDR_CONFIG_ADDRESS_DISTRIBUTION 4
#define QDR_CONFIG_ADDRESS_WAYPOINT 5
#define QDR_CONFIG_ADDRESS_IN_PHASE 6
#define QDR_CONFIG_ADDRESS_OUT_PHASE 7
#define QDR_CONFIG_ADDRESS_PATTERN 8
#define QDR_CONFIG_ADDRESS_PRIORITY 9
#define QDR_CONFIG_ADDRESS_FALLBACK 10
const char *qdr_config_address_columns[] =
{"name",
"identity",
"type",
"prefix",
"distribution",
"waypoint",
"ingressPhase",
"egressPhase",
"pattern",
"priority",
"fallback",
0};
const char *CONFIG_ADDRESS_TYPE = "org.apache.qpid.dispatch.router.config.address";
const char CONFIG_ADDRESS_PREFIX = 'C';
static void qdr_config_address_insert_column_CT(qdr_address_config_t *addr, int col, qd_composed_field_t *body, bool as_map)
{
const char *text = 0;
if (as_map)
qd_compose_insert_string(body, qdr_config_address_columns[col]);
switch(col) {
case QDR_CONFIG_ADDRESS_NAME:
if (addr->name)
qd_compose_insert_string(body, addr->name);
else
qd_compose_insert_null(body);
break;
case QDR_CONFIG_ADDRESS_IDENTITY: {
char id_str[100];
snprintf(id_str, 100, "%"PRId64, addr->identity);
qd_compose_insert_string(body, id_str);
break;
}
case QDR_CONFIG_ADDRESS_TYPE:
qd_compose_insert_string(body, CONFIG_ADDRESS_TYPE);
break;
case QDR_CONFIG_ADDRESS_PREFIX:
if (addr->is_prefix && addr->pattern) {
// Note (kgiusti): internally we prepend a '/#' to the configured
// prefix and treat it like a pattern. Remove trailing '/#' to put
// it back into its original form
const size_t len = strlen(addr->pattern);
assert(len > 1);
qd_compose_insert_string_n(body, addr->pattern, len - 2);
} else
qd_compose_insert_null(body);
break;
case QDR_CONFIG_ADDRESS_PATTERN:
if (!addr->is_prefix && addr->pattern)
qd_compose_insert_string(body, addr->pattern);
else
qd_compose_insert_null(body);
break;
case QDR_CONFIG_ADDRESS_DISTRIBUTION:
switch (addr->treatment) {
case QD_TREATMENT_MULTICAST_FLOOD:
case QD_TREATMENT_MULTICAST_ONCE: text = "multicast"; break;
case QD_TREATMENT_ANYCAST_CLOSEST: text = "closest"; break;
case QD_TREATMENT_ANYCAST_BALANCED: text = "balanced"; break;
default:
text = 0;
}
if (text)
qd_compose_insert_string(body, text);
else
qd_compose_insert_null(body);
break;
case QDR_CONFIG_ADDRESS_WAYPOINT:
qd_compose_insert_bool(body, addr->in_phase == 0 && addr->out_phase == 1);
break;
case QDR_CONFIG_ADDRESS_IN_PHASE:
qd_compose_insert_int(body, addr->in_phase);
break;
case QDR_CONFIG_ADDRESS_OUT_PHASE:
qd_compose_insert_int(body, addr->out_phase);
break;
case QDR_CONFIG_ADDRESS_PRIORITY:
qd_compose_insert_int(body, addr->priority);
break;
case QDR_CONFIG_ADDRESS_FALLBACK:
qd_compose_insert_bool(body, addr->fallback);
break;
}
}
static void qdr_agent_write_config_address_CT(qdr_query_t *query, qdr_address_config_t *addr)
{
qd_composed_field_t *body = query->body;
qd_compose_start_list(body);
int i = 0;
while (query->columns[i] >= 0) {
qdr_config_address_insert_column_CT(addr, query->columns[i], body, false);
i++;
}
qd_compose_end_list(body);
}
static void qdr_manage_advance_config_address_CT(qdr_query_t *query, qdr_address_config_t *addr)
{
if (addr) {
addr = DEQ_NEXT(addr);
query->more = !!addr;
query->next_offset++;
}
else {
query->more = false;
}
}
void qdra_config_address_get_first_CT(qdr_core_t *core, qdr_query_t *query, int offset)
{
//
// Queries that get this far will always succeed.
//
query->status = QD_AMQP_OK;
//
// If the offset goes beyond the set of objects, end the query now.
//
if (offset >= DEQ_SIZE(core->addr_config)) {
query->more = false;
qdr_agent_enqueue_response_CT(core, query);
return;
}
//
// Run to the object at the offset.
//
qdr_address_config_t *addr = DEQ_HEAD(core->addr_config);
for (int i = 0; i < offset && addr; i++)
addr = DEQ_NEXT(addr);
assert(addr);
if (addr) {
//
// Write the columns of the object into the response body.
//
qdr_agent_write_config_address_CT(query, addr);
//
// Advance to the next address
//
query->next_offset = offset;
qdr_manage_advance_config_address_CT(query, addr);
}
else {
query->more = false;
}
//
// Enqueue the response.
//
qdr_agent_enqueue_response_CT(core, query);
}
void qdra_config_address_get_next_CT(qdr_core_t *core, qdr_query_t *query)
{
qdr_address_config_t *addr = 0;
if (query->next_offset < DEQ_SIZE(core->addr_config)) {
addr = DEQ_HEAD(core->addr_config);
if (!addr) {
query->more = false;
qdr_agent_enqueue_response_CT(core, query);
return;
}
for (int i = 0; i < query->next_offset && addr; i++)
addr = DEQ_NEXT(addr);
}
if (addr) {
//
// Write the columns of the addr entity into the response body.
//
qdr_agent_write_config_address_CT(query, addr);
//
// Advance to the next object
//
qdr_manage_advance_config_address_CT(query, addr);
} else
query->more = false;
//
// Enqueue the response.
//
qdr_agent_enqueue_response_CT(core, query);
}
static qd_address_treatment_t qdra_address_treatment_CT(qd_parsed_field_t *field)
{
if (field) {
qd_iterator_t *iter = qd_parse_raw(field);
if (qd_iterator_equal(iter, (unsigned char*) "multicast")) return QD_TREATMENT_MULTICAST_ONCE;
if (qd_iterator_equal(iter, (unsigned char*) "closest")) return QD_TREATMENT_ANYCAST_CLOSEST;
if (qd_iterator_equal(iter, (unsigned char*) "balanced")) return QD_TREATMENT_ANYCAST_BALANCED;
if (qd_iterator_equal(iter, (unsigned char*) "unavailable")) return QD_TREATMENT_UNAVAILABLE;
}
return QD_TREATMENT_ANYCAST_BALANCED;
}
static qdr_address_config_t *qdr_address_config_find_by_identity_CT(qdr_core_t *core, qd_iterator_t *identity)
{
if (!identity)
return 0;
qdr_address_config_t *rc = DEQ_HEAD(core->addr_config);
while (rc) {
// Convert the passed in identity to a char*
char id[100];
snprintf(id, 100, "%"PRId64, rc->identity);
if (qd_iterator_equal(identity, (const unsigned char*) id))
break;
rc = DEQ_NEXT(rc);
}
return rc;
}
static qdr_address_config_t *qdr_address_config_find_by_name_CT(qdr_core_t *core, qd_iterator_t *name)
{
if (!name)
return 0;
qdr_address_config_t *rc = DEQ_HEAD(core->addr_config);
while (rc) { // Sometimes the name can be null
if (rc->name && qd_iterator_equal(name, (const unsigned char*) rc->name))
break;
rc = DEQ_NEXT(rc);
}
return rc;
}
void qdra_config_address_delete_CT(qdr_core_t *core,
qdr_query_t *query,
qd_iterator_t *name,
qd_iterator_t *identity)
{
qdr_address_config_t *addr = 0;
if (!name && !identity) {
query->status = QD_AMQP_BAD_REQUEST;
query->status.description = "No name or identity provided";
qd_log(core->agent_log, QD_LOG_ERROR, "Error performing DELETE of %s: %s", CONFIG_ADDRESS_TYPE, query->status.description);
}
else {
if (identity)
addr = qdr_address_config_find_by_identity_CT(core, identity);
else if (name)
addr = qdr_address_config_find_by_name_CT(core, name);
if (addr) {
qdr_core_remove_address_config(core, addr);
query->status = QD_AMQP_NO_CONTENT;
} else
query->status = QD_AMQP_NOT_FOUND;
}
//
// Enqueue the response.
//
qdr_agent_enqueue_response_CT(core, query);
}
void qdra_config_address_create_CT(qdr_core_t *core,
qd_iterator_t *name,
qdr_query_t *query,
qd_parsed_field_t *in_body)
{
char *pattern = NULL;
while (true) {
//
// Ensure there isn't a duplicate name
//
qdr_address_config_t *addr = 0;
if (name) {
qd_iterator_view_t iter_view = qd_iterator_get_view(name);
qd_iterator_annotate_prefix(name, CONFIG_ADDRESS_PREFIX);
qd_iterator_reset_view(name, ITER_VIEW_ADDRESS_HASH);
qd_hash_retrieve(core->addr_lr_al_hash, name, (void**) &addr);
qd_iterator_reset_view(name, iter_view);
}
if (!!addr) {
query->status = QD_AMQP_BAD_REQUEST;
query->status.description = "Name conflicts with an existing entity";
qd_log(core->agent_log, QD_LOG_ERROR, "Error performing CREATE of %s: %s", CONFIG_ADDRESS_TYPE, query->status.description);
break;
}
// Ensure that the body is a map
if (!qd_parse_is_map(in_body)) {
query->status = QD_AMQP_BAD_REQUEST;
query->status.description = "Body of request must be a map";
qd_log(core->agent_log, QD_LOG_ERROR, "Error performing CREATE of %s: %s", CONFIG_ADDRESS_TYPE, query->status.description);
break;
}
//
// Extract the fields from the request
//
qd_parsed_field_t *prefix_field = qd_parse_value_by_key(in_body, qdr_config_address_columns[QDR_CONFIG_ADDRESS_PREFIX]);
qd_parsed_field_t *pattern_field = qd_parse_value_by_key(in_body, qdr_config_address_columns[QDR_CONFIG_ADDRESS_PATTERN]);
qd_parsed_field_t *distrib_field = qd_parse_value_by_key(in_body, qdr_config_address_columns[QDR_CONFIG_ADDRESS_DISTRIBUTION]);
qd_parsed_field_t *waypoint_field = qd_parse_value_by_key(in_body, qdr_config_address_columns[QDR_CONFIG_ADDRESS_WAYPOINT]);
qd_parsed_field_t *in_phase_field = qd_parse_value_by_key(in_body, qdr_config_address_columns[QDR_CONFIG_ADDRESS_IN_PHASE]);
qd_parsed_field_t *out_phase_field = qd_parse_value_by_key(in_body, qdr_config_address_columns[QDR_CONFIG_ADDRESS_OUT_PHASE]);
qd_parsed_field_t *priority_field = qd_parse_value_by_key(in_body, qdr_config_address_columns[QDR_CONFIG_ADDRESS_PRIORITY]);
qd_parsed_field_t *fallback_field = qd_parse_value_by_key(in_body, qdr_config_address_columns[QDR_CONFIG_ADDRESS_FALLBACK]);
bool waypoint = waypoint_field ? qd_parse_as_bool(waypoint_field) : false;
long in_phase = in_phase_field ? qd_parse_as_long(in_phase_field) : -1;
long out_phase = out_phase_field ? qd_parse_as_long(out_phase_field) : -1;
long priority = priority_field ? qd_parse_as_long(priority_field) : -1;
bool fallback = fallback_field ? qd_parse_as_bool(fallback_field) : false;
//
// Either a prefix or a pattern field is mandatory. Prefix and pattern
// are mutually exclusive. Fail if either both or none are given.
//
const char *msg = NULL;
if (!prefix_field && !pattern_field) {
msg = "Either a 'prefix' or 'pattern' attribute must be provided";
} else if (prefix_field && pattern_field) {
msg = "Cannot specify both a 'prefix' and a 'pattern' attribute";
}
if (fallback && (waypoint || in_phase > 0 || out_phase > 0)) {
msg = "Fallback cannot be specified with waypoint or non-zero ingress and egress phases";
}
if (msg) {
query->status = QD_AMQP_BAD_REQUEST;
query->status.description = msg;
qd_log(core->agent_log, QD_LOG_ERROR, "Error performing CREATE of %s: %s", CONFIG_ADDRESS_TYPE, query->status.description);
break;
}
// validate the pattern/prefix, add "/#" if prefix
pattern = qdra_config_address_validate_pattern_CT((prefix_field) ? prefix_field : pattern_field,
!!prefix_field,
&msg);
if (!pattern) {
query->status = QD_AMQP_BAD_REQUEST;
query->status.description = msg;
qd_log(core->agent_log, QD_LOG_ERROR, "Error performing CREATE of %s: %s", CONFIG_ADDRESS_TYPE, query->status.description);
break;
}
//
// Handle the address-phasing logic. If the phases are provided, use them. Otherwise
// use the waypoint flag to set the most common defaults.
//
if (in_phase == -1 && out_phase == -1) {
in_phase = 0;
out_phase = waypoint ? 1 : 0;
}
//
// Validate the phase values
//
if (in_phase < 0 || in_phase > 9 || out_phase < 0 || out_phase > 9) {
query->status = QD_AMQP_BAD_REQUEST;
query->status.description = "Phase values must be between 0 and 9";
qd_log(core->agent_log, QD_LOG_ERROR, "Error performing CREATE of %s: %s", CONFIG_ADDRESS_TYPE, query->status.description);
break;
}
//
// Validate the priority values.
//
if (priority > QDR_MAX_PRIORITY ) {
query->status = QD_AMQP_BAD_REQUEST;
query->status.description = "Priority value, if present, must be between 0 and QDR_MAX_PRIORITY";
qd_log(core->agent_log, QD_LOG_ERROR, "Error performing CREATE of %s: %s", CONFIG_ADDRESS_TYPE, query->status.description);
break;
}
//
// The request is valid. Attempt to insert the address pattern into
// the parse tree, fail if there is already an entry for that pattern
//
addr = new_qdr_address_config_t();
if (!addr) {
query->status = QD_AMQP_BAD_REQUEST;
query->status.description = "Out of memory";
qd_log(core->agent_log, QD_LOG_ERROR, "Error performing CREATE of %s: %s", CONFIG_ADDRESS_TYPE, query->status.description);
break;
}
ZERO(addr);
//
// Insert the uninitialized address to check if it already exists in
// the parse tree. On success initialize it. This is thread safe
// since the current thread (core) is the only thread allowed to use
// the parse tree
//
qd_error_t rc = qd_parse_tree_add_pattern_str(core->addr_parse_tree, pattern, addr);
if (rc) {
free_qdr_address_config_t(addr);
query->status = QD_AMQP_BAD_REQUEST;
query->status.description = qd_error_name(rc);
qd_log(core->agent_log, QD_LOG_ERROR, "Error performing CREATE of %s: %s", CONFIG_ADDRESS_TYPE, query->status.description);
break;
}
addr->ref_count = 1; // Represents the reference from the addr_config list
addr->name = name ? (char*) qd_iterator_copy(name) : 0;
addr->identity = qdr_identifier(core);
addr->treatment = qdra_address_treatment_CT(distrib_field);
addr->in_phase = in_phase;
addr->out_phase = out_phase;
addr->is_prefix = !!prefix_field;
addr->pattern = pattern;
addr->priority = priority;
addr->fallback = fallback;
pattern = 0;
DEQ_INSERT_TAIL(core->addr_config, addr);
if (name) {
qd_iterator_view_t iter_view = qd_iterator_get_view(name);
qd_iterator_reset_view(name, ITER_VIEW_ADDRESS_HASH);
qd_hash_insert(core->addr_lr_al_hash, name, addr, &addr->hash_handle);
qd_iterator_reset_view(name, iter_view);
}
//
// Compose the result map for the response.
//
if (query->body) {
qd_compose_start_map(query->body);
for (int col = 0; col < QDR_CONFIG_ADDRESS_COLUMN_COUNT; col++)
qdr_config_address_insert_column_CT(addr, col, query->body, true);
qd_compose_end_map(query->body);
}
query->status = QD_AMQP_CREATED;
break;
}
//
// Enqueue the response if there is a body. If there is no body, this is a management
// operation created internally by the configuration file parser.
//
if (query->body) {
//
// If there was an error in processing the create, insert a NULL value into the body.
//
if (query->status.status / 100 > 2)
qd_compose_insert_null(query->body);
qdr_agent_enqueue_response_CT(core, query);
} else
qdr_query_free(query);
free(pattern);
}
static void qdr_manage_write_config_address_map_CT(qdr_core_t *core,
qdr_address_config_t *addr,
qd_composed_field_t *body,
const char *qdr_config_address_columns[])
{
qd_compose_start_map(body);
for(int i = 0; i < QDR_CONFIG_ADDRESS_COLUMN_COUNT; i++) {
qd_compose_insert_string(body, qdr_config_address_columns[i]);
qdr_config_address_insert_column_CT(addr, i, body, false);
}
qd_compose_end_map(body);
}
void qdra_config_address_get_CT(qdr_core_t *core,
qd_iterator_t *name,
qd_iterator_t *identity,
qdr_query_t *query,
const char *qdr_config_address_columns[])
{
qdr_address_config_t *addr = 0;
if (!name && !identity) {
query->status = QD_AMQP_BAD_REQUEST;
query->status.description = "No name or identity provided";
qd_log(core->agent_log, QD_LOG_ERROR, "Error performing READ of %s: %s", CONFIG_ADDRESS_TYPE, query->status.description);
}
else {
if (identity) //If there is identity, ignore the name
addr = qdr_address_config_find_by_identity_CT(core, identity);
else if (name)
addr = qdr_address_config_find_by_name_CT(core, name);
if (addr == 0) {
// Send back a 404
query->status = QD_AMQP_NOT_FOUND;
}
else {
//
// Write the columns of the address entity into the response body.
//
qdr_manage_write_config_address_map_CT(core, addr, query->body, qdr_config_address_columns);
query->status = QD_AMQP_OK;
}
}
//
// Enqueue the response.
//
qdr_agent_enqueue_response_CT(core, query);
}
// given an address pattern parsed field, validate it and convert it to a string
char *qdra_config_address_validate_pattern_CT(qd_parsed_field_t *pattern_field,
bool is_prefix,
const char **error)
{
char *buf = NULL;
char *pattern = NULL;
uint8_t tag = qd_parse_tag(pattern_field);
qd_iterator_t *p_iter = qd_parse_raw(pattern_field);
int len = qd_iterator_length(p_iter);
*error = NULL;
if ((tag != QD_AMQP_STR8_UTF8 && tag != QD_AMQP_STR32_UTF8)
|| len == 0)
{
*error = ((is_prefix)
? "Prefix must be a non-empty string type"
: "Pattern must be a non-empty string type");
goto exit;
}
buf = (char *)qd_iterator_copy(p_iter);
char *begin = buf;
// strip leading token separators
// note: see parse_tree.c for acceptable separator characters
while (*begin && strchr("./", *begin))
begin++;
// strip trailing separators
while (*begin) {
char *end = &begin[strlen(begin) - 1];
if (!strchr("./", *end))
break;
*end = 0;
}
if (*begin == 0) {
*error = ((is_prefix)
? "Prefix invalid - no tokens"
: "Pattern invalid - no tokens");
goto exit;
}
if (is_prefix) {
// convert a prefix match into a valid pattern by appending "/#"
pattern = malloc(strlen(begin) + 3);
strcpy(pattern, begin);
strcat(pattern, "/#");
} else {
pattern = strdup(begin);
}
exit:
free(buf);
return pattern;
}
| 34.115086 | 137 | 0.614925 | [
"object"
] |
239515c210916508d776cb87466309d23df396bd | 2,343 | h | C | include/linux_parser.h | christophomos/CppND-System-Monitor | d6e86909da4f97ebffdd0f518c236acd48b83ede | [
"MIT"
] | null | null | null | include/linux_parser.h | christophomos/CppND-System-Monitor | d6e86909da4f97ebffdd0f518c236acd48b83ede | [
"MIT"
] | null | null | null | include/linux_parser.h | christophomos/CppND-System-Monitor | d6e86909da4f97ebffdd0f518c236acd48b83ede | [
"MIT"
] | null | null | null | #ifndef SYSTEM_PARSER_H
#define SYSTEM_PARSER_H
#include <fstream>
#include <regex>
#include <string>
#include <string_view>
#include "proc_pid_stat.h"
#include "procstat.h"
namespace LinuxParser {
// Paths
const std::string kProcDirectory{"/proc/"};
//
const std::string kCmdlineFilename{"/cmdline"};
const std::string kCpuinfoFilename{"/cpuinfo"};
const std::string kStatusFilename{"/status"};
const std::string kStatFilename{"/stat"};
const std::string kUptimeFilename{"/uptime"};
const std::string kMeminfoFilename{"/meminfo"};
const std::string kVersionFilename{"/version"};
const std::string kOSPath{"/etc/os-release"};
const std::string kPasswordPath{"/etc/passwd"};
// Key names
const std::string kOperatingSystemPrettyName{"PRETTY_NAME"};
constexpr char kOperatingSystemDelimiter{'='};
// For parsing a process' status file
const std::string kStatusUidKey{"Uid"};
constexpr char kStatusDelimiter{':'};
constexpr char kPasswdDelimiter{':'};
constexpr char kMeminfoDelimiter{':'};
const std::string kMeminfoMemTotal{"MemTotal"};
const std::string kMeminfoMemFree{"MemFree"};
constexpr char kProcessStatusDelimiter{':'};
const std::string kVmSizeKey{"VmSize"};
const std::string kProcsRunning{"procs_running"};
// System
float MemoryUtilization();
long UpTime();
std::vector<int> Pids(const std::string& path = kProcDirectory);
int TotalProcesses();
int RunningProcesses();
std::string OperatingSystem(const std::string& path = kOSPath);
std::string Kernel();
// CPU
enum CPUStates {
kUser_ = 0,
kNice_,
kSystem_,
kIdle_,
kIOwait_,
kIRQ_,
kSoftIRQ_,
kSteal_,
kGuest_,
kGuestNice_
};
ProcStat StatsInfo();
float CpuUtilization();
long Jiffies();
long ActiveJiffies();
long ActiveJiffies(int pid);
long IdleJiffies();
// Processes
ProcPidStat ParseProcStat(int pid);
std::string Command(int pid);
std::string Ram(int pid);
std::string Uid(int pid, const std::string& proc_dir = kProcDirectory);
std::string User(int pid);
long int UpTime(int pid);
// EXTRA: Used generic programming
template<typename T>
T ReadSingleValuedFile(const std::string& path) {
std::string line;
T value;
std::ifstream filestream{path};
if (filestream.is_open()) {
getline(filestream, line);
std::istringstream linestream{line};
linestream >> value;
}
return value;
}
}; // namespace LinuxParser
#endif | 23.908163 | 71 | 0.738796 | [
"vector"
] |
2395171acfdbb860d9e8a158c4610044bae20b57 | 21,180 | c | C | qemu/target/i386/sev.c | WUSTL-CSPL/RT-TEE | aafb3e9ff6c6e744c6bce1e42bcb198e1063efcc | [
"MIT"
] | 55 | 2019-12-20T03:25:14.000Z | 2022-01-16T07:19:47.000Z | qemu/target/i386/sev.c | WUSTL-CSPL/RT-TEE | aafb3e9ff6c6e744c6bce1e42bcb198e1063efcc | [
"MIT"
] | 3 | 2021-07-27T19:36:05.000Z | 2021-12-31T02:20:53.000Z | qemu/target/i386/sev.c | WUSTL-CSPL/RT-TEE | aafb3e9ff6c6e744c6bce1e42bcb198e1063efcc | [
"MIT"
] | 11 | 2020-08-06T03:59:45.000Z | 2022-02-25T02:31:59.000Z | /*
* QEMU SEV support
*
* Copyright Advanced Micro Devices 2016-2018
*
* Author:
* Brijesh Singh <brijesh.singh@amd.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include <linux/kvm.h>
#include <linux/psp-sev.h>
#include <sys/ioctl.h>
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qom/object_interfaces.h"
#include "qemu/base64.h"
#include "sysemu/kvm.h"
#include "sev_i386.h"
#include "sysemu/sysemu.h"
#include "trace.h"
#include "migration/blocker.h"
#define DEFAULT_GUEST_POLICY 0x1 /* disable debug */
#define DEFAULT_SEV_DEVICE "/dev/sev"
static SEVState *sev_state;
static Error *sev_mig_blocker;
static const char *const sev_fw_errlist[] = {
"",
"Platform state is invalid",
"Guest state is invalid",
"Platform configuration is invalid",
"Buffer too small",
"Platform is already owned",
"Certificate is invalid",
"Policy is not allowed",
"Guest is not active",
"Invalid address",
"Bad signature",
"Bad measurement",
"Asid is already owned",
"Invalid ASID",
"WBINVD is required",
"DF_FLUSH is required",
"Guest handle is invalid",
"Invalid command",
"Guest is active",
"Hardware error",
"Hardware unsafe",
"Feature not supported",
"Invalid parameter"
};
#define SEV_FW_MAX_ERROR ARRAY_SIZE(sev_fw_errlist)
static int
sev_ioctl(int fd, int cmd, void *data, int *error)
{
int r;
struct kvm_sev_cmd input;
memset(&input, 0x0, sizeof(input));
input.id = cmd;
input.sev_fd = fd;
input.data = (__u64)(unsigned long)data;
r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, &input);
if (error) {
*error = input.error;
}
return r;
}
static int
sev_platform_ioctl(int fd, int cmd, void *data, int *error)
{
int r;
struct sev_issue_cmd arg;
arg.cmd = cmd;
arg.data = (unsigned long)data;
r = ioctl(fd, SEV_ISSUE_CMD, &arg);
if (error) {
*error = arg.error;
}
return r;
}
static const char *
fw_error_to_str(int code)
{
if (code < 0 || code >= SEV_FW_MAX_ERROR) {
return "unknown error";
}
return sev_fw_errlist[code];
}
static bool
sev_check_state(SevState state)
{
assert(sev_state);
return sev_state->state == state ? true : false;
}
static void
sev_set_guest_state(SevState new_state)
{
assert(new_state < SEV_STATE__MAX);
assert(sev_state);
trace_kvm_sev_change_state(SevState_str(sev_state->state),
SevState_str(new_state));
sev_state->state = new_state;
}
static void
sev_ram_block_added(RAMBlockNotifier *n, void *host, size_t size)
{
int r;
struct kvm_enc_region range;
range.addr = (__u64)(unsigned long)host;
range.size = size;
trace_kvm_memcrypt_register_region(host, size);
r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_REG_REGION, &range);
if (r) {
error_report("%s: failed to register region (%p+%#zx) error '%s'",
__func__, host, size, strerror(errno));
exit(1);
}
}
static void
sev_ram_block_removed(RAMBlockNotifier *n, void *host, size_t size)
{
int r;
struct kvm_enc_region range;
range.addr = (__u64)(unsigned long)host;
range.size = size;
trace_kvm_memcrypt_unregister_region(host, size);
r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_UNREG_REGION, &range);
if (r) {
error_report("%s: failed to unregister region (%p+%#zx)",
__func__, host, size);
}
}
static struct RAMBlockNotifier sev_ram_notifier = {
.ram_block_added = sev_ram_block_added,
.ram_block_removed = sev_ram_block_removed,
};
static void
qsev_guest_finalize(Object *obj)
{
}
static char *
qsev_guest_get_session_file(Object *obj, Error **errp)
{
QSevGuestInfo *s = QSEV_GUEST_INFO(obj);
return s->session_file ? g_strdup(s->session_file) : NULL;
}
static void
qsev_guest_set_session_file(Object *obj, const char *value, Error **errp)
{
QSevGuestInfo *s = QSEV_GUEST_INFO(obj);
s->session_file = g_strdup(value);
}
static char *
qsev_guest_get_dh_cert_file(Object *obj, Error **errp)
{
QSevGuestInfo *s = QSEV_GUEST_INFO(obj);
return g_strdup(s->dh_cert_file);
}
static void
qsev_guest_set_dh_cert_file(Object *obj, const char *value, Error **errp)
{
QSevGuestInfo *s = QSEV_GUEST_INFO(obj);
s->dh_cert_file = g_strdup(value);
}
static char *
qsev_guest_get_sev_device(Object *obj, Error **errp)
{
QSevGuestInfo *sev = QSEV_GUEST_INFO(obj);
return g_strdup(sev->sev_device);
}
static void
qsev_guest_set_sev_device(Object *obj, const char *value, Error **errp)
{
QSevGuestInfo *sev = QSEV_GUEST_INFO(obj);
sev->sev_device = g_strdup(value);
}
static void
qsev_guest_class_init(ObjectClass *oc, void *data)
{
object_class_property_add_str(oc, "sev-device",
qsev_guest_get_sev_device,
qsev_guest_set_sev_device,
NULL);
object_class_property_set_description(oc, "sev-device",
"SEV device to use", NULL);
object_class_property_add_str(oc, "dh-cert-file",
qsev_guest_get_dh_cert_file,
qsev_guest_set_dh_cert_file,
NULL);
object_class_property_set_description(oc, "dh-cert-file",
"guest owners DH certificate (encoded with base64)", NULL);
object_class_property_add_str(oc, "session-file",
qsev_guest_get_session_file,
qsev_guest_set_session_file,
NULL);
object_class_property_set_description(oc, "session-file",
"guest owners session parameters (encoded with base64)", NULL);
}
static void
qsev_guest_set_handle(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
QSevGuestInfo *sev = QSEV_GUEST_INFO(obj);
uint32_t value;
visit_type_uint32(v, name, &value, errp);
sev->handle = value;
}
static void
qsev_guest_set_policy(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
QSevGuestInfo *sev = QSEV_GUEST_INFO(obj);
uint32_t value;
visit_type_uint32(v, name, &value, errp);
sev->policy = value;
}
static void
qsev_guest_set_cbitpos(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
QSevGuestInfo *sev = QSEV_GUEST_INFO(obj);
uint32_t value;
visit_type_uint32(v, name, &value, errp);
sev->cbitpos = value;
}
static void
qsev_guest_set_reduced_phys_bits(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
QSevGuestInfo *sev = QSEV_GUEST_INFO(obj);
uint32_t value;
visit_type_uint32(v, name, &value, errp);
sev->reduced_phys_bits = value;
}
static void
qsev_guest_get_policy(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
uint32_t value;
QSevGuestInfo *sev = QSEV_GUEST_INFO(obj);
value = sev->policy;
visit_type_uint32(v, name, &value, errp);
}
static void
qsev_guest_get_handle(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
uint32_t value;
QSevGuestInfo *sev = QSEV_GUEST_INFO(obj);
value = sev->handle;
visit_type_uint32(v, name, &value, errp);
}
static void
qsev_guest_get_cbitpos(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
uint32_t value;
QSevGuestInfo *sev = QSEV_GUEST_INFO(obj);
value = sev->cbitpos;
visit_type_uint32(v, name, &value, errp);
}
static void
qsev_guest_get_reduced_phys_bits(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
uint32_t value;
QSevGuestInfo *sev = QSEV_GUEST_INFO(obj);
value = sev->reduced_phys_bits;
visit_type_uint32(v, name, &value, errp);
}
static void
qsev_guest_init(Object *obj)
{
QSevGuestInfo *sev = QSEV_GUEST_INFO(obj);
sev->sev_device = g_strdup(DEFAULT_SEV_DEVICE);
sev->policy = DEFAULT_GUEST_POLICY;
object_property_add(obj, "policy", "uint32", qsev_guest_get_policy,
qsev_guest_set_policy, NULL, NULL, NULL);
object_property_add(obj, "handle", "uint32", qsev_guest_get_handle,
qsev_guest_set_handle, NULL, NULL, NULL);
object_property_add(obj, "cbitpos", "uint32", qsev_guest_get_cbitpos,
qsev_guest_set_cbitpos, NULL, NULL, NULL);
object_property_add(obj, "reduced-phys-bits", "uint32",
qsev_guest_get_reduced_phys_bits,
qsev_guest_set_reduced_phys_bits, NULL, NULL, NULL);
}
/* sev guest info */
static const TypeInfo qsev_guest_info = {
.parent = TYPE_OBJECT,
.name = TYPE_QSEV_GUEST_INFO,
.instance_size = sizeof(QSevGuestInfo),
.instance_finalize = qsev_guest_finalize,
.class_size = sizeof(QSevGuestInfoClass),
.class_init = qsev_guest_class_init,
.instance_init = qsev_guest_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
};
static QSevGuestInfo *
lookup_sev_guest_info(const char *id)
{
Object *obj;
QSevGuestInfo *info;
obj = object_resolve_path_component(object_get_objects_root(), id);
if (!obj) {
return NULL;
}
info = (QSevGuestInfo *)
object_dynamic_cast(obj, TYPE_QSEV_GUEST_INFO);
if (!info) {
return NULL;
}
return info;
}
bool
sev_enabled(void)
{
return sev_state ? true : false;
}
uint64_t
sev_get_me_mask(void)
{
return sev_state ? sev_state->me_mask : ~0;
}
uint32_t
sev_get_cbit_position(void)
{
return sev_state ? sev_state->cbitpos : 0;
}
uint32_t
sev_get_reduced_phys_bits(void)
{
return sev_state ? sev_state->reduced_phys_bits : 0;
}
SevInfo *
sev_get_info(void)
{
SevInfo *info;
info = g_new0(SevInfo, 1);
info->enabled = sev_state ? true : false;
if (info->enabled) {
info->api_major = sev_state->api_major;
info->api_minor = sev_state->api_minor;
info->build_id = sev_state->build_id;
info->policy = sev_state->policy;
info->state = sev_state->state;
info->handle = sev_state->handle;
}
return info;
}
static int
sev_get_pdh_info(int fd, guchar **pdh, size_t *pdh_len, guchar **cert_chain,
size_t *cert_chain_len)
{
guchar *pdh_data = NULL;
guchar *cert_chain_data = NULL;
struct sev_user_data_pdh_cert_export export = {};
int err, r;
/* query the certificate length */
r = sev_platform_ioctl(fd, SEV_PDH_CERT_EXPORT, &export, &err);
if (r < 0) {
if (err != SEV_RET_INVALID_LEN) {
error_report("failed to export PDH cert ret=%d fw_err=%d (%s)",
r, err, fw_error_to_str(err));
return 1;
}
}
pdh_data = g_new(guchar, export.pdh_cert_len);
cert_chain_data = g_new(guchar, export.cert_chain_len);
export.pdh_cert_address = (unsigned long)pdh_data;
export.cert_chain_address = (unsigned long)cert_chain_data;
r = sev_platform_ioctl(fd, SEV_PDH_CERT_EXPORT, &export, &err);
if (r < 0) {
error_report("failed to export PDH cert ret=%d fw_err=%d (%s)",
r, err, fw_error_to_str(err));
goto e_free;
}
*pdh = pdh_data;
*pdh_len = export.pdh_cert_len;
*cert_chain = cert_chain_data;
*cert_chain_len = export.cert_chain_len;
return 0;
e_free:
g_free(pdh_data);
g_free(cert_chain_data);
return 1;
}
SevCapability *
sev_get_capabilities(void)
{
SevCapability *cap = NULL;
guchar *pdh_data = NULL;
guchar *cert_chain_data = NULL;
size_t pdh_len = 0, cert_chain_len = 0;
uint32_t ebx;
int fd;
fd = open(DEFAULT_SEV_DEVICE, O_RDWR);
if (fd < 0) {
error_report("%s: Failed to open %s '%s'", __func__,
DEFAULT_SEV_DEVICE, strerror(errno));
return NULL;
}
if (sev_get_pdh_info(fd, &pdh_data, &pdh_len,
&cert_chain_data, &cert_chain_len)) {
goto out;
}
cap = g_new0(SevCapability, 1);
cap->pdh = g_base64_encode(pdh_data, pdh_len);
cap->cert_chain = g_base64_encode(cert_chain_data, cert_chain_len);
host_cpuid(0x8000001F, 0, NULL, &ebx, NULL, NULL);
cap->cbitpos = ebx & 0x3f;
/*
* When SEV feature is enabled, we loose one bit in guest physical
* addressing.
*/
cap->reduced_phys_bits = 1;
out:
g_free(pdh_data);
g_free(cert_chain_data);
close(fd);
return cap;
}
static int
sev_read_file_base64(const char *filename, guchar **data, gsize *len)
{
gsize sz;
gchar *base64;
GError *error = NULL;
if (!g_file_get_contents(filename, &base64, &sz, &error)) {
error_report("failed to read '%s' (%s)", filename, error->message);
return -1;
}
*data = g_base64_decode(base64, len);
return 0;
}
static int
sev_launch_start(SEVState *s)
{
gsize sz;
int ret = 1;
int fw_error, rc;
QSevGuestInfo *sev = s->sev_info;
struct kvm_sev_launch_start *start;
guchar *session = NULL, *dh_cert = NULL;
start = g_new0(struct kvm_sev_launch_start, 1);
start->handle = object_property_get_int(OBJECT(sev), "handle",
&error_abort);
start->policy = object_property_get_int(OBJECT(sev), "policy",
&error_abort);
if (sev->session_file) {
if (sev_read_file_base64(sev->session_file, &session, &sz) < 0) {
goto out;
}
start->session_uaddr = (unsigned long)session;
start->session_len = sz;
}
if (sev->dh_cert_file) {
if (sev_read_file_base64(sev->dh_cert_file, &dh_cert, &sz) < 0) {
goto out;
}
start->dh_uaddr = (unsigned long)dh_cert;
start->dh_len = sz;
}
trace_kvm_sev_launch_start(start->policy, session, dh_cert);
rc = sev_ioctl(s->sev_fd, KVM_SEV_LAUNCH_START, start, &fw_error);
if (rc < 0) {
error_report("%s: LAUNCH_START ret=%d fw_error=%d '%s'",
__func__, ret, fw_error, fw_error_to_str(fw_error));
goto out;
}
object_property_set_int(OBJECT(sev), start->handle, "handle",
&error_abort);
sev_set_guest_state(SEV_STATE_LAUNCH_UPDATE);
s->handle = start->handle;
s->policy = start->policy;
ret = 0;
out:
g_free(start);
g_free(session);
g_free(dh_cert);
return ret;
}
static int
sev_launch_update_data(uint8_t *addr, uint64_t len)
{
int ret, fw_error;
struct kvm_sev_launch_update_data update;
if (!addr || !len) {
return 1;
}
update.uaddr = (__u64)(unsigned long)addr;
update.len = len;
trace_kvm_sev_launch_update_data(addr, len);
ret = sev_ioctl(sev_state->sev_fd, KVM_SEV_LAUNCH_UPDATE_DATA,
&update, &fw_error);
if (ret) {
error_report("%s: LAUNCH_UPDATE ret=%d fw_error=%d '%s'",
__func__, ret, fw_error, fw_error_to_str(fw_error));
}
return ret;
}
static void
sev_launch_get_measure(Notifier *notifier, void *unused)
{
int ret, error;
guchar *data;
SEVState *s = sev_state;
struct kvm_sev_launch_measure *measurement;
if (!sev_check_state(SEV_STATE_LAUNCH_UPDATE)) {
return;
}
measurement = g_new0(struct kvm_sev_launch_measure, 1);
/* query the measurement blob length */
ret = sev_ioctl(sev_state->sev_fd, KVM_SEV_LAUNCH_MEASURE,
measurement, &error);
if (!measurement->len) {
error_report("%s: LAUNCH_MEASURE ret=%d fw_error=%d '%s'",
__func__, ret, error, fw_error_to_str(errno));
goto free_measurement;
}
data = g_new0(guchar, measurement->len);
measurement->uaddr = (unsigned long)data;
/* get the measurement blob */
ret = sev_ioctl(sev_state->sev_fd, KVM_SEV_LAUNCH_MEASURE,
measurement, &error);
if (ret) {
error_report("%s: LAUNCH_MEASURE ret=%d fw_error=%d '%s'",
__func__, ret, error, fw_error_to_str(errno));
goto free_data;
}
sev_set_guest_state(SEV_STATE_LAUNCH_SECRET);
/* encode the measurement value and emit the event */
s->measurement = g_base64_encode(data, measurement->len);
trace_kvm_sev_launch_measurement(s->measurement);
free_data:
g_free(data);
free_measurement:
g_free(measurement);
}
char *
sev_get_launch_measurement(void)
{
if (sev_state &&
sev_state->state >= SEV_STATE_LAUNCH_SECRET) {
return g_strdup(sev_state->measurement);
}
return NULL;
}
static Notifier sev_machine_done_notify = {
.notify = sev_launch_get_measure,
};
static void
sev_launch_finish(SEVState *s)
{
int ret, error;
Error *local_err = NULL;
trace_kvm_sev_launch_finish();
ret = sev_ioctl(sev_state->sev_fd, KVM_SEV_LAUNCH_FINISH, 0, &error);
if (ret) {
error_report("%s: LAUNCH_FINISH ret=%d fw_error=%d '%s'",
__func__, ret, error, fw_error_to_str(error));
exit(1);
}
sev_set_guest_state(SEV_STATE_RUNNING);
/* add migration blocker */
error_setg(&sev_mig_blocker,
"SEV: Migration is not implemented");
ret = migrate_add_blocker(sev_mig_blocker, &local_err);
if (local_err) {
error_report_err(local_err);
error_free(sev_mig_blocker);
exit(1);
}
}
static void
sev_vm_state_change(void *opaque, int running, RunState state)
{
SEVState *s = opaque;
if (running) {
if (!sev_check_state(SEV_STATE_RUNNING)) {
sev_launch_finish(s);
}
}
}
void *
sev_guest_init(const char *id)
{
SEVState *s;
char *devname;
int ret, fw_error;
uint32_t ebx;
uint32_t host_cbitpos;
struct sev_user_data_status status = {};
sev_state = s = g_new0(SEVState, 1);
s->sev_info = lookup_sev_guest_info(id);
if (!s->sev_info) {
error_report("%s: '%s' is not a valid '%s' object",
__func__, id, TYPE_QSEV_GUEST_INFO);
goto err;
}
s->state = SEV_STATE_UNINIT;
host_cpuid(0x8000001F, 0, NULL, &ebx, NULL, NULL);
host_cbitpos = ebx & 0x3f;
s->cbitpos = object_property_get_int(OBJECT(s->sev_info), "cbitpos", NULL);
if (host_cbitpos != s->cbitpos) {
error_report("%s: cbitpos check failed, host '%d' requested '%d'",
__func__, host_cbitpos, s->cbitpos);
goto err;
}
s->reduced_phys_bits = object_property_get_int(OBJECT(s->sev_info),
"reduced-phys-bits", NULL);
if (s->reduced_phys_bits < 1) {
error_report("%s: reduced_phys_bits check failed, it should be >=1,"
"' requested '%d'", __func__, s->reduced_phys_bits);
goto err;
}
s->me_mask = ~(1UL << s->cbitpos);
devname = object_property_get_str(OBJECT(s->sev_info), "sev-device", NULL);
s->sev_fd = open(devname, O_RDWR);
if (s->sev_fd < 0) {
error_report("%s: Failed to open %s '%s'", __func__,
devname, strerror(errno));
}
g_free(devname);
if (s->sev_fd < 0) {
goto err;
}
ret = sev_platform_ioctl(s->sev_fd, SEV_PLATFORM_STATUS, &status,
&fw_error);
if (ret) {
error_report("%s: failed to get platform status ret=%d"
"fw_error='%d: %s'", __func__, ret, fw_error,
fw_error_to_str(fw_error));
goto err;
}
s->build_id = status.build;
s->api_major = status.api_major;
s->api_minor = status.api_minor;
trace_kvm_sev_init();
ret = sev_ioctl(s->sev_fd, KVM_SEV_INIT, NULL, &fw_error);
if (ret) {
error_report("%s: failed to initialize ret=%d fw_error=%d '%s'",
__func__, ret, fw_error, fw_error_to_str(fw_error));
goto err;
}
ret = sev_launch_start(s);
if (ret) {
error_report("%s: failed to create encryption context", __func__);
goto err;
}
ram_block_notifier_add(&sev_ram_notifier);
qemu_add_machine_init_done_notifier(&sev_machine_done_notify);
qemu_add_vm_change_state_handler(sev_vm_state_change, s);
return s;
err:
g_free(sev_state);
sev_state = NULL;
return NULL;
}
int
sev_encrypt_data(void *handle, uint8_t *ptr, uint64_t len)
{
assert(handle);
/* if SEV is in update state then encrypt the data else do nothing */
if (sev_check_state(SEV_STATE_LAUNCH_UPDATE)) {
return sev_launch_update_data(ptr, len);
}
return 0;
}
static void
sev_register_types(void)
{
type_register_static(&qsev_guest_info);
}
type_init(sev_register_types);
| 25.955882 | 79 | 0.629981 | [
"object"
] |
239c241fb4cba6cca752f1f93e97336509acc582 | 1,896 | h | C | ds/pq_cbh.h | liang2kl/2021-Fall-DSA-Utilities | 9f2cd0567ffe4c8d6be14281c42ab02860257312 | [
"MIT"
] | null | null | null | ds/pq_cbh.h | liang2kl/2021-Fall-DSA-Utilities | 9f2cd0567ffe4c8d6be14281c42ab02860257312 | [
"MIT"
] | null | null | null | ds/pq_cbh.h | liang2kl/2021-Fall-DSA-Utilities | 9f2cd0567ffe4c8d6be14281c42ab02860257312 | [
"MIT"
] | null | null | null | #ifndef __PQ_CBH_H
#define __PQ_CBH_H
#include "pq.h"
template <typename T>
class PQ_CBH : public PQ<T>, public Vector<T> {
private:
static inline int parent(int r) {
return (r - 1) / 2;
}
static inline int lc(int r) {
return 2 * r + 1;
}
static inline int rc(int r) {
return 2 * r + 2;
}
inline int inHeap(int r) {
return r >= 0 && r < this->_size;
}
inline int properParent(int p) {
int lc = this->lc(p);
int rc = this->rc(p);
int child;
if (inHeap(lc)) {
if (inHeap(rc)) {
child = this->_data[lc] > this->_data[rc] ? lc : rc;
} else {
child = lc;
}
} else {
if (inHeap(rc)) {
child = rc;
} else {
return p;
}
}
return this->_data[p] > this->_data[child] ? p : child;
}
void swap(int a, int b) {
T tmp = this->_data[a];
this->_data[a] = this->_data[b];
this->_data[b] = tmp;
}
int precolateUp(int r) {
while (r > 0) {
int p = parent(r);
if (this->_data[p] >= this->_data[r]) {
break;
}
swap(p, r);
r = p;
}
return r;
}
int percolateDown(int r) {
int parent = properParent(r);
while (parent != r) {
swap(r, parent);
r = parent;
parent = properParent(r);
}
return r;
}
public:
T getMax() {
return this->_data[0];
}
void insert(T e) {
Vector<T>::insert(e);
precolateUp(this->_size - 1);
}
T delMax() {
T tmp = getMax();
this->_size--;
this->_data[0] = this->_data[this->_size];
percolateDown(0);
return tmp;
}
};
#endif | 22.843373 | 68 | 0.429325 | [
"vector"
] |
23a58d6c53e71b685126a74d9c4f58a77e390ae0 | 45,249 | c | C | src/router_core/exchange_bindings.c | cliffjansen/qpid-dispatch | 136940b55a6c0d269726f27e3c0087deafa492e5 | [
"Apache-2.0"
] | 1 | 2019-03-17T04:08:43.000Z | 2019-03-17T04:08:43.000Z | src/router_core/exchange_bindings.c | DalavanCloud/qpid-dispatch | 933b9df940983282067033e05b46e9665a266058 | [
"Apache-2.0"
] | null | null | null | src/router_core/exchange_bindings.c | DalavanCloud/qpid-dispatch | 933b9df940983282067033e05b46e9665a266058 | [
"Apache-2.0"
] | null | null | null | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <qpid/dispatch/ctools.h>
#include <inttypes.h>
#include <stdio.h>
#include "router_core_private.h"
#include "forwarder.h"
#include "exchange_bindings.h"
// next_hop_t
// Describes the destination of a forwarded message
// May be shared by different bindings
//
typedef struct next_hop_t next_hop_t;
struct next_hop_t
{
// per-exchange list of all next hops
DEQ_LINKS_N(exchange_list, next_hop_t);
// when hooked to the transmit list
DEQ_LINKS_N(transmit_list, next_hop_t);
int ref_count; // binding references
int phase;
bool on_xmit_list;
qdr_exchange_t *exchange;
unsigned char *next_hop;
qdr_address_t *qdr_addr;
};
ALLOC_DECLARE(next_hop_t);
ALLOC_DEFINE(next_hop_t);
DEQ_DECLARE(next_hop_t, next_hop_list_t);
// qdr_binding_t
// Represents a subject key --> next hop mapping
// A binding is uniquely identified by the tuple (pattern, nextHop, phase). No
// two bindings with the same tuple value can exist on an exchange.
// The binding is implemented using two classes: qdr_binding_t and
// next_hop_t. The qdr_binding_t holds the pattern and points to the
// next_hop_t. This allows different patterns to share the same nextHop.
// Since there is only one next_hop_t instance for each (nextHop, phase) value,
// we guarantee only 1 copy of a message is forwarded to a given nextHop+phase
// even if multiple distinct patterns are matched. Ex: a message with a
// value of "a.b" will match two distict binding keys "+.b" and "a.+". If
// both these patterns share the same next_hop_t only 1 copy of the message
// will be forwarded.
typedef struct qdr_binding qdr_binding_t;
struct qdr_binding
{
// per-exchange list of all bindings
DEQ_LINKS_N(exchange_list, qdr_binding_t);
// parse tree node's list of bindings sharing the same pattern
DEQ_LINKS_N(tree_list, qdr_binding_t);
unsigned char *name;
uint64_t identity;
qdr_exchange_t *exchange;
unsigned char *key;
next_hop_t *next_hop;
uint64_t msgs_matched;
};
ALLOC_DECLARE(qdr_binding_t);
ALLOC_DEFINE(qdr_binding_t);
DEQ_DECLARE(qdr_binding_t, qdr_binding_list_t);
struct qdr_exchange {
DEQ_LINKS(qdr_exchange_t); // for core->exchanges
qdr_core_t *core;
uint64_t identity;
unsigned char *name;
unsigned char *address;
int phase;
qd_parse_tree_t *parse_tree;
qdr_address_t *qdr_addr;
next_hop_t *alternate;
qdr_binding_list_t bindings;
next_hop_list_t next_hops;
qdr_forwarder_t *old_forwarder;
uint64_t msgs_received;
uint64_t msgs_dropped;
uint64_t msgs_routed;
uint64_t msgs_alternate;
};
ALLOC_DECLARE(qdr_exchange_t);
ALLOC_DEFINE(qdr_exchange_t);
static void qdr_exchange_free(qdr_exchange_t *ex);
static qdr_exchange_t *qdr_exchange(qdr_core_t *core,
qd_iterator_t *name,
qd_iterator_t *address,
int phase,
qd_iterator_t *alternate,
int alt_phase,
qd_parse_tree_type_t method);
static void write_config_exchange_map(qdr_exchange_t *ex,
qd_composed_field_t *body);
static qdr_exchange_t *find_exchange(qdr_core_t *core,
qd_iterator_t *identity,
qd_iterator_t *name);
static qdr_binding_t *find_binding(qdr_core_t *core,
qd_iterator_t *identity,
qd_iterator_t *name);
static void write_config_exchange_list(qdr_exchange_t *ex,
qdr_query_t *query);
static qdr_binding_t *qdr_binding(qdr_exchange_t *ex,
qd_iterator_t *name,
qd_iterator_t *key,
qd_iterator_t *next_hop,
int phase);
static void write_config_binding_map(qdr_binding_t *binding,
qd_composed_field_t *body);
static qdr_binding_t *find_binding(qdr_core_t *core,
qd_iterator_t *identity,
qd_iterator_t *name);
static void qdr_binding_free(qdr_binding_t *b);
static void write_config_binding_list(qdr_binding_t *binding,
qdr_query_t *query);
static qdr_binding_t *get_binding_at_index(qdr_core_t *core,
int index);
static next_hop_t *next_hop(qdr_exchange_t *ex,
qd_iterator_t *address,
int phase);
static void next_hop_release(next_hop_t *next_hop);
static next_hop_t *find_next_hop(qdr_exchange_t *ex,
qd_iterator_t *address,
int phase);
static bool gather_next_hops(void *handle,
const char *pattern,
void *payload);
static int send_message(qdr_core_t *core,
next_hop_t *next_hop,
qd_message_t *msg,
qdr_delivery_t *in_delivery,
bool exclude_inprocess,
bool control);
//
// The Exchange Forwarder
//
int qdr_forward_exchange_CT(qdr_core_t *core,
qdr_address_t *addr,
qd_message_t *msg,
qdr_delivery_t *in_delivery,
bool exclude_inprocess,
bool control)
{
int forwarded = 0;
const bool presettled = !!in_delivery ? in_delivery->settled : true;
qdr_exchange_t *ex = addr->exchange;
assert(ex);
ex->msgs_received += 1;
// honor the disposition for the exchange address (this may not be right??)
if (ex->old_forwarder)
forwarded = ex->old_forwarder->forward_message(core, addr, msg, in_delivery, exclude_inprocess, control);
// @TODO(kgiusti): de-duplicate this code (cut & paste from multicast
// forwarder)
//
// If the delivery is not presettled, set the settled flag for forwarding so all
// outgoing deliveries will be presettled.
//
// NOTE: This is the only multicast mode currently supported. Others will likely be
// implemented in the future.
//
if (!presettled)
in_delivery->settled = true;
qd_iterator_t *subject = qd_message_check(msg, QD_DEPTH_PROPERTIES)
? qd_message_field_iterator(msg, QD_FIELD_SUBJECT)
: NULL;
next_hop_list_t transmit_list;
DEQ_INIT(transmit_list);
if (subject) {
// find all matching bindings and build a list of their next hops
qd_parse_tree_search(ex->parse_tree, subject, gather_next_hops, &transmit_list);
qd_iterator_free(subject);
}
// if there are valid next hops then we're routing this message based on an
// entirely new destination address. We need to reset the origin and the
// excluded link flags in the delivery. We also need to reset the trace
// annotations and ingress field in the message. This is done because it is
// possible that the next hop is reached via the same link/router this
// message arrived from.
// @TODO(kgiusti) - loop detection
if (DEQ_SIZE(transmit_list) > 0 || ex->alternate) {
if (in_delivery) {
in_delivery->origin = 0;
qd_bitmask_free(in_delivery->link_exclusion);
in_delivery->link_exclusion = 0;
}
const char *node_id = qd_router_id(core->qd);
qd_composed_field_t *trace_field = qd_compose_subfield(0);
qd_compose_start_list(trace_field);
qd_compose_insert_string(trace_field, node_id);
qd_compose_end_list(trace_field);
qd_message_set_trace_annotation(msg, trace_field);
qd_composed_field_t *ingress_field = qd_compose_subfield(0);
qd_compose_insert_string(ingress_field, node_id);
qd_message_set_ingress_annotation(msg, ingress_field);
}
next_hop_t *next_hop = DEQ_HEAD(transmit_list);
while (next_hop) {
DEQ_REMOVE_N(transmit_list, transmit_list, next_hop);
next_hop->on_xmit_list = false;
assert(next_hop->qdr_addr);
// @TODO(kgiusti) - non-recursive handling of next hop if it is an exchange
forwarded += send_message(ex->core, next_hop, msg, in_delivery, exclude_inprocess, control);
next_hop = DEQ_HEAD(transmit_list);
}
if (forwarded == 0 && ex->alternate) {
forwarded = send_message(ex->core, ex->alternate, msg, in_delivery, exclude_inprocess, control);
if (forwarded) {
ex->msgs_alternate += 1;
}
}
// @TODO(kgiusti): de-duplicate the settlement code (cut & paste from
// multicast forwarder)
if (forwarded == 0) {
ex->msgs_dropped += 1;
if (!presettled) {
//
// The delivery was not originally presettled and it was not
// forwarded to any destinations, return it to its original
// unsettled state.
//
in_delivery->settled = false;
}
} else {
ex->msgs_routed += 1;
if (in_delivery && !presettled) {
//
// The delivery was not presettled and it was forwarded to at least
// one destination. Accept and settle the delivery only if the
// entire delivery has been received.
//
const bool receive_complete = qd_message_receive_complete(qdr_delivery_message(in_delivery));
if (receive_complete) {
in_delivery->disposition = PN_ACCEPTED;
qdr_delivery_push_CT(core, in_delivery);
}
}
}
return forwarded;
}
// callback from parse tree search:
// handle = transmit_list containing all matching next_hops
// pattern = pattern that matches the search (ignored)
// payload = list of bindings configured for the pattern
static bool gather_next_hops(void *handle, const char *pattern, void *payload)
{
next_hop_list_t *transmit_list = (next_hop_list_t *)handle;
qdr_binding_list_t *bindings = (qdr_binding_list_t *)payload;
qdr_binding_t *binding = DEQ_HEAD(*bindings);
while (binding) {
binding->msgs_matched += 1;
// note - since multiple bindings may reference the next hop, it is
// possible a next hop has already been added to the transmit list.
// do not re-add. This is not thread safe but that is fine since all
// forwarding is done on the core thread.
if (!binding->next_hop->on_xmit_list) {
DEQ_INSERT_TAIL_N(transmit_list, *transmit_list, binding->next_hop);
binding->next_hop->on_xmit_list = true;
}
binding = DEQ_NEXT_N(tree_list, binding);
}
return true; // keep searching
}
// Forward a copy of the message to the to_addr address
static int send_message(qdr_core_t *core,
next_hop_t *next_hop,
qd_message_t *msg,
qdr_delivery_t *in_delivery,
bool exclude_inprocess,
bool control)
{
int count = 0;
qd_message_t *copy = qd_message_copy(msg);
qd_log(core->log, QD_LOG_TRACE, "Exchange '%s' forwarding message to '%s'",
next_hop->exchange->name, next_hop->next_hop);
// set "to override" and "phase" message annotations based on the next hop
qd_composed_field_t *to_field = qd_compose_subfield(0);
qd_compose_insert_string(to_field, (char *)next_hop->next_hop);
qd_message_set_to_override_annotation(copy, to_field); // frees to_field
qd_message_set_phase_annotation(copy, next_hop->phase);
count = qdr_forward_message_CT(core, next_hop->qdr_addr, copy, in_delivery, exclude_inprocess, control);
qd_message_free(copy);
return count;
}
long qdr_exchange_binding_count(const qdr_exchange_t *ex)
{
return (long) DEQ_SIZE(ex->bindings);
}
qdr_address_t *qdr_exchange_alternate_addr(const qdr_exchange_t *ex)
{
return (ex->alternate) ? ex->alternate->qdr_addr : NULL;
}
/////////////////////////////
// Exchange Management API //
/////////////////////////////
#define QDR_CONFIG_EXCHANGE_NAME 0
#define QDR_CONFIG_EXCHANGE_IDENTITY 1
#define QDR_CONFIG_EXCHANGE_ADDRESS 2
#define QDR_CONFIG_EXCHANGE_PHASE 3
#define QDR_CONFIG_EXCHANGE_ALTERNATE 4
#define QDR_CONFIG_EXCHANGE_ALT_PHASE 5
#define QDR_CONFIG_EXCHANGE_MATCH_METHOD 6
#define QDR_CONFIG_EXCHANGE_BINDING_COUNT 7
#define QDR_CONFIG_EXCHANGE_RECEIVED 8
#define QDR_CONFIG_EXCHANGE_DROPPED 9
#define QDR_CONFIG_EXCHANGE_FORWARDED 10
#define QDR_CONFIG_EXCHANGE_DIVERTED 11
const char *qdr_config_exchange_columns[QDR_CONFIG_EXCHANGE_COLUMN_COUNT + 1] =
{"name",
"identity",
"address",
"phase",
"alternateAddress",
"alternatePhase",
"matchMethod",
"bindingCount",
"receivedCount",
"droppedCount",
"forwardedCount",
"divertedCount",
0};
// from management_agent.c
extern const unsigned char *config_exchange_entity_type;
#define QDR_CONFIG_BINDING_NAME 0
#define QDR_CONFIG_BINDING_IDENTITY 1
#define QDR_CONFIG_BINDING_EXCHANGE 2
#define QDR_CONFIG_BINDING_KEY 3
#define QDR_CONFIG_BINDING_NEXTHOP 4
#define QDR_CONFIG_BINDING_NHOP_PHASE 5
#define QDR_CONFIG_BINDING_MATCHED 6
const char *qdr_config_binding_columns[QDR_CONFIG_BINDING_COLUMN_COUNT + 1] =
{"name",
"identity",
"exchangeName",
"bindingKey",
"nextHopAddress",
"nextHopPhase",
"matchedCount",
0};
// from management_agent.c
extern const unsigned char *config_binding_entity_type;
// called on core shutdown to release all exchanges
//
void qdr_exchange_free_all(qdr_core_t *core)
{
qdr_exchange_t *ex = DEQ_HEAD(core->exchanges);
while (ex) {
qdr_exchange_t *next = DEQ_NEXT(ex);
qdr_exchange_free(ex);
ex = next;
}
}
// Exchange CREATE
//
//
void qdra_config_exchange_create_CT(qdr_core_t *core,
qd_iterator_t *name,
qdr_query_t *query,
qd_parsed_field_t *in_body)
{
qdr_exchange_t *ex = NULL;
query->status = QD_AMQP_BAD_REQUEST;
if (!qd_parse_is_map(in_body)) {
query->status.description = "Body of request must be a map";
goto exit;
}
if (!name) {
query->status.description = "exchange requires a unique name";
goto exit;
}
qd_parsed_field_t *address_field = qd_parse_value_by_key(in_body,
qdr_config_exchange_columns[QDR_CONFIG_EXCHANGE_ADDRESS]);
if (!address_field) {
query->status.description = "exchange address is mandatory";
goto exit;
}
qd_iterator_t *address = qd_parse_raw(address_field);
// check for duplicates
{
qdr_exchange_t *eptr = 0;
for (eptr = DEQ_HEAD(core->exchanges); eptr; eptr = DEQ_NEXT(eptr)) {
if (qd_iterator_equal(address, eptr->address)) {
query->status.description = "duplicate exchange address";
goto exit;
} else if (qd_iterator_equal(name, eptr->name)) {
query->status.description = "duplicate exchange name";
goto exit;
}
}
}
qd_parsed_field_t *method_field = qd_parse_value_by_key(in_body,
qdr_config_exchange_columns[QDR_CONFIG_EXCHANGE_MATCH_METHOD]);
qd_parse_tree_type_t method = QD_PARSE_TREE_AMQP_0_10;
if (method_field) {
if (qd_iterator_equal(qd_parse_raw(method_field), (const unsigned char *)"mqtt")) {
method = QD_PARSE_TREE_MQTT;
} else if (!qd_iterator_equal(qd_parse_raw(method_field), (const unsigned char *)"amqp")) {
query->status.description = "Exchange matchMethod must be either 'amqp' or 'mqtt'";
goto exit;
}
}
long phase = 0;
qd_parsed_field_t *phase_field = qd_parse_value_by_key(in_body,
qdr_config_exchange_columns[QDR_CONFIG_EXCHANGE_PHASE]);
if (phase_field) {
phase = qd_parse_as_long(phase_field);
if (phase < 0 || phase > 9) {
query->status.description = "phase must be in the range 0-9";
goto exit;
}
}
qd_iterator_t *alternate = NULL;
long alt_phase = 0;
qd_parsed_field_t *alternate_field = qd_parse_value_by_key(in_body,
qdr_config_exchange_columns[QDR_CONFIG_EXCHANGE_ALTERNATE]);
if (alternate_field) {
alternate = qd_parse_raw(alternate_field);
qd_parsed_field_t *alt_phase_field = qd_parse_value_by_key(in_body,
qdr_config_exchange_columns[QDR_CONFIG_EXCHANGE_ALT_PHASE]);
if (alt_phase_field) {
alt_phase = qd_parse_as_long(alt_phase_field);
if (alt_phase < 0 || alt_phase > 9) {
query->status.description = "phase must be in the range 0-9";
goto exit;
}
}
}
ex = qdr_exchange(core, name, address, phase, alternate, alt_phase, method);
if (ex) {
// @TODO(kgiusti) - for now, until the behavior is nailed down:
static int warn_user;
if (!warn_user) {
warn_user = 1;
qd_log(core->agent_log, QD_LOG_WARNING,
"The Exchange/Binding feature is currently EXPERIMENTAL."
" Its functionality may change in future releases"
" of the Qpid Dispatch Router. Backward compatibility is"
" not guaranteed.");
}
query->status = QD_AMQP_CREATED;
if (query->body) {
write_config_exchange_map(ex, query->body);
}
} else {
query->status.description = "failed to allocate exchange";
}
exit:
if (query->status.status == QD_AMQP_CREATED.status) {
qd_log(core->agent_log, QD_LOG_DEBUG,
"Exchange %s CREATED (id=%"PRIu64")", ex->name, ex->identity);
} else {
qd_log(core->agent_log, QD_LOG_ERROR,
"Error performing CREATE of %s: %s", config_exchange_entity_type, query->status.description);
// return a NULL body:
if (query->body) qd_compose_insert_null(query->body);
}
if (query->body) {
qdr_agent_enqueue_response_CT(core, query);
} else {
// no body == create from internal config parser
qdr_query_free(query);
}
}
// Exchange DELETE:
//
void qdra_config_exchange_delete_CT(qdr_core_t *core,
qdr_query_t *query,
qd_iterator_t *name,
qd_iterator_t *identity)
{
qdr_exchange_t *ex = 0;
if (!name && !identity) {
query->status = QD_AMQP_BAD_REQUEST;
query->status.description = "No name or identity provided";
qd_log(core->agent_log, QD_LOG_ERROR, "Error performing DELETE of %s: %s",
config_exchange_entity_type, query->status.description);
} else {
ex = find_exchange(core, identity, name);
if (ex) {
qd_log(core->agent_log, QD_LOG_DEBUG,
"Exchange %s DELETED (id=%"PRIu64")", ex->name, ex->identity);
qdr_exchange_free(ex);
query->status = QD_AMQP_NO_CONTENT;
} else
query->status = QD_AMQP_NOT_FOUND;
}
qdr_agent_enqueue_response_CT(core, query);
}
// Exchange GET
//
void qdra_config_exchange_get_CT(qdr_core_t *core,
qd_iterator_t *name,
qd_iterator_t *identity,
qdr_query_t *query,
const char *columns[])
{
qdr_exchange_t *ex = 0;
if (!name && !identity) {
query->status = QD_AMQP_BAD_REQUEST;
query->status.description = "No name or identity provided";
qd_log(core->agent_log, QD_LOG_ERROR, "Error performing READ of %s: %s",
config_exchange_entity_type, query->status.description);
}
else {
ex = find_exchange(core, identity, name);
if (!ex) {
query->status = QD_AMQP_NOT_FOUND;
}
else {
if (query->body) write_config_exchange_map(ex, query->body);
query->status = QD_AMQP_OK;
}
}
qdr_agent_enqueue_response_CT(core, query);
}
// Exchange GET first:
//
void qdra_config_exchange_get_first_CT(qdr_core_t *core, qdr_query_t *query, int offset)
{
//
// Queries that get this far will always succeed.
//
query->status = QD_AMQP_OK;
//
// If the offset goes beyond the set of objects, end the query now.
//
if (offset >= DEQ_SIZE(core->exchanges)) {
query->more = false;
qdr_agent_enqueue_response_CT(core, query);
return;
}
//
// Run to the object at the offset.
//
qdr_exchange_t *ex = DEQ_HEAD(core->exchanges);
for (int i = 0; i < offset; i++)
ex = DEQ_NEXT(ex);
assert(ex);
//
// Write the columns of the object into the response body.
//
if (query->body) write_config_exchange_list(ex, query);
//
// Advance to the next address
//
query->next_offset = offset + 1;
query->more = !!DEQ_NEXT(ex);
//
// Enqueue the response.
//
qdr_agent_enqueue_response_CT(core, query);
}
// Exchange GET-NEXT
//
void qdra_config_exchange_get_next_CT(qdr_core_t *core, qdr_query_t *query)
{
qdr_exchange_t *ex = 0;
if (query->next_offset < DEQ_SIZE(core->exchanges)) {
ex = DEQ_HEAD(core->exchanges);
for (int i = 0; i < query->next_offset && ex; i++)
ex = DEQ_NEXT(ex);
}
if (ex) {
//
// Write the columns of the addr entity into the response body.
//
if (query->body) write_config_exchange_list(ex, query);
//
// Advance to the next object
//
query->next_offset++;
query->more = !!DEQ_NEXT(ex);
} else
query->more = false;
//
// Enqueue the response.
//
qdr_agent_enqueue_response_CT(core, query);
}
// Binding CREATE
void qdra_config_binding_create_CT(qdr_core_t *core,
qd_iterator_t *name,
qdr_query_t *query,
qd_parsed_field_t *in_body)
{
qdr_binding_t *binding = NULL;
qdr_exchange_t *ex = NULL;
qd_iterator_t *key = NULL;
query->status = QD_AMQP_BAD_REQUEST;
if (!qd_parse_is_map(in_body)) {
query->status.description = "Body of request must be a map";
goto exit;
}
qd_parsed_field_t *exchange_field = qd_parse_value_by_key(in_body,
qdr_config_binding_columns[QDR_CONFIG_BINDING_EXCHANGE]);
if (!exchange_field) {
query->status.description = "Binding configuration requires an exchange";
goto exit;
}
// lookup the exchange by its name:
ex = find_exchange(core, NULL, qd_parse_raw(exchange_field));
if (!ex) {
query->status.description = "Named exchange does not exist";
goto exit;
}
qd_parsed_field_t *next_hop_field = qd_parse_value_by_key(in_body,
qdr_config_binding_columns[QDR_CONFIG_BINDING_NEXTHOP]);
if (!next_hop_field) {
query->status.description = "No next hop specified";
goto exit;
}
qd_iterator_t *nhop = qd_parse_raw(next_hop_field);
qd_parsed_field_t *key_field = qd_parse_value_by_key(in_body,
qdr_config_binding_columns[QDR_CONFIG_BINDING_KEY]);
// if no pattern given, assume match all "#":
key = key_field ? qd_iterator_dup(qd_parse_raw(key_field)) : qd_iterator_string("#", ITER_VIEW_ALL);
if (!qd_parse_tree_validate_pattern(ex->parse_tree, key)) {
query->status.description = "The binding key pattern is invalid";
goto exit;
}
qd_parsed_field_t *phase_field = qd_parse_value_by_key(in_body,
qdr_config_binding_columns[QDR_CONFIG_BINDING_NHOP_PHASE]);
long phase = (phase_field ? qd_parse_as_long(phase_field) : 0);
if (phase < 0 || phase > 9) {
query->status.description = "phase must be in the range 0-9";
goto exit;
}
// check for duplicates: the name and the tuple (key, next hop, phase) must
// be unique per exchange
for (qdr_binding_t *b = DEQ_HEAD(ex->bindings); b; b = DEQ_NEXT_N(exchange_list, b)) {
if (name && b->name && qd_iterator_equal(name, b->name)) {
query->status.description = "Duplicate next hop name";
goto exit;
} else if (qd_iterator_equal(key, b->key) &&
qd_iterator_equal(nhop, b->next_hop->next_hop) &&
phase == b->next_hop->phase) {
query->status.description = "Next hop for key already exists";
goto exit;
}
}
binding = qdr_binding(ex, name, key, nhop, phase);
if (binding) {
query->status = QD_AMQP_CREATED;
if (query->body) {
write_config_binding_map(binding, query->body);
}
} else {
query->status.description = "Failed to allocate next hop";
}
exit:
if (query->status.status == QD_AMQP_CREATED.status) {
qd_log(core->agent_log, QD_LOG_DEBUG,
"Exchange %s Binding %s -> %s CREATED (id=%"PRIu64")", ex->name,
binding->key, binding->next_hop->next_hop, binding->identity);
} else {
qd_log(core->agent_log, QD_LOG_ERROR,
"Error performing CREATE of %s: %s",
config_binding_entity_type,
query->status.description);
// return a NULL body:
if (query->body) qd_compose_insert_null(query->body);
}
if (query->body) {
qdr_agent_enqueue_response_CT(core, query);
} else {
// no body == create from internal config parser
qdr_query_free(query);
}
if (key) qd_iterator_free(key);
}
// Binding DELETE
//
void qdra_config_binding_delete_CT(qdr_core_t *core,
qdr_query_t *query,
qd_iterator_t *name,
qd_iterator_t *identity)
{
if (!identity && !name) {
query->status = QD_AMQP_BAD_REQUEST;
query->status.description = "No binding name or identity provided";
qd_log(core->agent_log, QD_LOG_ERROR, "Error performing DELETE of %s: %s",
config_binding_entity_type, query->status.description);
} else {
qdr_binding_t *binding = find_binding(core, identity, name);
if (!binding) {
query->status = QD_AMQP_NOT_FOUND;
} else {
qd_log(core->agent_log, QD_LOG_DEBUG,
"Binding %s -> %s on exchange %s DELETED (id=%"PRIu64")",
binding->key,
binding->next_hop->next_hop,
binding->exchange->name,
binding->identity);
qdr_binding_free(binding);
query->status = QD_AMQP_NO_CONTENT;
}
}
qdr_agent_enqueue_response_CT(core, query);
}
// Binding GET
//
void qdra_config_binding_get_CT(qdr_core_t *core,
qd_iterator_t *name,
qd_iterator_t *identity,
qdr_query_t *query,
const char *columns[])
{
if (!identity && !name) {
query->status = QD_AMQP_BAD_REQUEST;
query->status.description = "No binding name or identity provided";
qd_log(core->agent_log, QD_LOG_ERROR, "Error performing READ of %s: %s",
config_binding_entity_type, query->status.description);
} else {
qdr_binding_t *binding = find_binding(core, identity, name);
if (binding == 0) {
query->status = QD_AMQP_NOT_FOUND;
}
else {
if (query->body) write_config_binding_map(binding, query->body);
query->status = QD_AMQP_OK;
}
}
qdr_agent_enqueue_response_CT(core, query);
}
// Binding GET first
//
void qdra_config_binding_get_first_CT(qdr_core_t *core, qdr_query_t *query, int offset)
{
query->status = QD_AMQP_OK;
qdr_binding_t *binding = get_binding_at_index(core, offset);
if (!binding) {
query->more = false;
qdr_agent_enqueue_response_CT(core, query);
return;
}
if (query->body) write_config_binding_list(binding, query);
query->next_offset = offset + 1;
query->more = !!(DEQ_NEXT_N(exchange_list, binding) || DEQ_NEXT(binding->exchange));
qdr_agent_enqueue_response_CT(core, query);
}
// Binding GET-NEXT
//
void qdra_config_binding_get_next_CT(qdr_core_t *core, qdr_query_t *query)
{
qdr_binding_t *binding = get_binding_at_index(core, query->next_offset);
if (binding) {
if (query->body) write_config_binding_list(binding, query);
query->next_offset++;
query->more = !!(DEQ_NEXT_N(exchange_list, binding) || DEQ_NEXT(binding->exchange));
} else
query->more = false;
qdr_agent_enqueue_response_CT(core, query);
}
// Exchange constructor/destructor
static qdr_exchange_t *qdr_exchange(qdr_core_t *core,
qd_iterator_t *name,
qd_iterator_t *address,
int phase,
qd_iterator_t *alternate,
int alt_phase,
qd_parse_tree_type_t method)
{
assert(address);
qdr_exchange_t *ex = new_qdr_exchange_t();
if (ex) {
ZERO(ex);
DEQ_ITEM_INIT(ex);
ex->core = core;
ex->identity = qdr_identifier(core);
ex->name = qd_iterator_copy(name);
ex->address = qd_iterator_copy(address);
ex->phase = phase;
ex->parse_tree = qd_parse_tree_new(method);
DEQ_INIT(ex->bindings);
DEQ_INIT(ex->next_hops);
qd_iterator_reset_view(address, ITER_VIEW_ADDRESS_HASH);
qd_iterator_annotate_phase(address, (char) phase + '0');
qd_hash_retrieve(core->addr_hash, address, (void **)&ex->qdr_addr);
if (!ex->qdr_addr) {
qdr_address_config_t *addr_config;
qd_address_treatment_t treatment = qdr_treatment_for_address_hash_CT(core, address, &addr_config);
ex->qdr_addr = qdr_address_CT(core, treatment, addr_config);
qd_hash_insert(core->addr_hash, address, ex->qdr_addr, &ex->qdr_addr->hash_handle);
DEQ_INSERT_TAIL(core->addrs, ex->qdr_addr);
}
// we're going to override the forwarder
qdr_forwarder_t *old = ex->qdr_addr->forwarder;
qdr_forwarder_t *new = qdr_new_forwarder(qdr_forward_exchange_CT,
old ? old->forward_attach : 0,
old ? old->bypass_valid_origins: false);
ex->old_forwarder = old;
ex->qdr_addr->forwarder = new;
ex->qdr_addr->ref_count += 1;
ex->qdr_addr->exchange = ex;
DEQ_INSERT_TAIL(core->exchanges, ex);
if (alternate) {
ex->alternate = next_hop(ex, alternate, alt_phase);
}
qdr_post_mobile_added_CT(core,
(const char*) qd_hash_key_by_handle(ex->qdr_addr->hash_handle),
ex->qdr_addr->treatment);
}
return ex;
}
static void qdr_exchange_free(qdr_exchange_t *ex)
{
if (ex->core->running && DEQ_SIZE(ex->qdr_addr->rlinks) == 0) {
qdr_post_mobile_removed_CT(ex->core,
(const char*) qd_hash_key_by_handle(ex->qdr_addr->hash_handle));
}
DEQ_REMOVE(ex->core->exchanges, ex);
while (DEQ_SIZE(ex->bindings) > 0) {
// freeing the binding removes it from the binding list
// and the parse tree
qdr_binding_free(DEQ_HEAD(ex->bindings));
}
if (ex->alternate) {
next_hop_release(ex->alternate);
}
assert(DEQ_IS_EMPTY(ex->next_hops));
free(ex->qdr_addr->forwarder);
ex->qdr_addr->forwarder = ex->old_forwarder;
assert(ex->qdr_addr->ref_count > 0);
ex->qdr_addr->ref_count -= 1;
qdr_check_addr_CT(ex->core, ex->qdr_addr);
free(ex->name);
free(ex->address);
qd_parse_tree_free(ex->parse_tree);
free_qdr_exchange_t(ex);
}
// Binding constructor/destructor
static qdr_binding_t *qdr_binding(qdr_exchange_t *ex,
qd_iterator_t *name,
qd_iterator_t *key,
qd_iterator_t *nhop,
int phase)
{
qdr_binding_t *b = new_qdr_binding_t();
if (b) {
ZERO(b);
DEQ_ITEM_INIT_N(exchange_list, b);
DEQ_ITEM_INIT_N(tree_list, b);
b->name = qd_iterator_copy(name);
b->identity = qdr_identifier(ex->core);
b->exchange = ex;
b->key = qd_iterator_copy(key);
b->next_hop = next_hop(ex, nhop, phase);
qdr_binding_list_t *bindings = NULL;
if (!qd_parse_tree_get_pattern(ex->parse_tree, key, (void **)&bindings)) {
// new pattern
bindings = malloc(sizeof(*bindings));
DEQ_INIT(*bindings);
qd_parse_tree_add_pattern(ex->parse_tree, key, bindings);
}
assert(bindings);
DEQ_INSERT_TAIL_N(tree_list, *bindings, b);
DEQ_INSERT_TAIL_N(exchange_list, ex->bindings, b);
}
return b;
}
static void qdr_binding_free(qdr_binding_t *b)
{
qdr_binding_list_t *bindings = NULL;
qdr_exchange_t *ex = b->exchange;
qd_iterator_t *k_iter = qd_iterator_string((char *)b->key,
ITER_VIEW_ALL);
if (qd_parse_tree_get_pattern(ex->parse_tree, k_iter, (void **)&bindings)) {
assert(bindings && !DEQ_IS_EMPTY(*bindings));
DEQ_REMOVE_N(tree_list, *bindings, b);
if (DEQ_IS_EMPTY(*bindings)) {
qd_parse_tree_remove_pattern(ex->parse_tree, k_iter);
free(bindings);
}
}
qd_iterator_free(k_iter);
DEQ_REMOVE_N(exchange_list, b->exchange->bindings, b);
free(b->name);
free(b->key);
next_hop_release(b->next_hop);
free_qdr_binding_t(b);
}
// Next Hop constructor/destructor
static next_hop_t *next_hop(qdr_exchange_t *ex,
qd_iterator_t *address,
int phase)
{
next_hop_t *nh = find_next_hop(ex, address, phase);
if (!nh) {
nh = new_next_hop_t();
if (!nh) return NULL;
ZERO(nh);
DEQ_ITEM_INIT_N(exchange_list, nh);
DEQ_ITEM_INIT_N(transmit_list, nh);
nh->exchange = ex;
nh->next_hop = qd_iterator_copy(address);
nh->phase = phase;
qd_iterator_reset_view(address, ITER_VIEW_ADDRESS_HASH);
qd_iterator_annotate_phase(address, (char) phase + '0');
qd_hash_retrieve(ex->core->addr_hash, address, (void **)&nh->qdr_addr);
if (!nh->qdr_addr) {
qdr_core_t *core = ex->core;
qdr_address_config_t *addr_config;
qd_address_treatment_t treatment = qdr_treatment_for_address_hash_CT(core, address, &addr_config);
nh->qdr_addr = qdr_address_CT(core, treatment, addr_config);
qd_hash_insert(core->addr_hash, address, nh->qdr_addr, &nh->qdr_addr->hash_handle);
DEQ_INSERT_TAIL(core->addrs, nh->qdr_addr);
}
nh->qdr_addr->ref_count += 1;
DEQ_INSERT_TAIL_N(exchange_list, ex->next_hops, nh);
}
nh->ref_count += 1; // for caller's reference
return nh;
}
static void next_hop_release(next_hop_t *nh)
{
assert(nh->ref_count > 0);
if (--nh->ref_count == 0) {
assert(nh->qdr_addr->ref_count > 0);
if (--nh->qdr_addr->ref_count == 0) {
qdr_check_addr_CT(nh->exchange->core, nh->qdr_addr);
}
DEQ_REMOVE_N(exchange_list, nh->exchange->next_hops, nh);
assert(!nh->on_xmit_list);
free(nh->next_hop);
free_next_hop_t(nh);
}
}
// lookup
static qdr_exchange_t *find_exchange(qdr_core_t *core, qd_iterator_t *identity, qd_iterator_t *name)
{
qdr_exchange_t *ex = 0;
for (ex = DEQ_HEAD(core->exchanges); ex; ex = DEQ_NEXT(ex)) {
if (identity) { // ignore name - identity takes precedence
// Convert the identity for comparison
char id[100];
snprintf(id, 100, "%"PRId64, ex->identity);
if (qd_iterator_equal(identity, (const unsigned char*) id))
break;
} else if (name && qd_iterator_equal(name, ex->name))
break;
}
return ex;
}
static qdr_binding_t *find_binding(qdr_core_t *core, qd_iterator_t *identity, qd_iterator_t *name)
{
for (qdr_exchange_t *ex = DEQ_HEAD(core->exchanges); ex; ex = DEQ_NEXT(ex)) {
for (qdr_binding_t *binding = DEQ_HEAD(ex->bindings); binding; binding = DEQ_NEXT_N(exchange_list, binding)) {
if (identity) { // ignore name - identity takes precedence
// Convert the identity for comparison
char id[100];
snprintf(id, 100, "%"PRId64, binding->identity);
if (qd_iterator_equal(identity, (const unsigned char*) id))
return binding;
} else if (name && qd_iterator_equal(name, binding->name))
return binding;
}
}
return NULL;
}
static next_hop_t *find_next_hop(qdr_exchange_t *ex,
qd_iterator_t *address,
int phase)
{
next_hop_t *nh = DEQ_HEAD(ex->next_hops);
DEQ_FIND_N(exchange_list, nh, (phase == nh->phase) && qd_iterator_equal(address, nh->next_hop));
return nh;
}
// Management helper routines
static void exchange_insert_column(qdr_exchange_t *ex, int col, qd_composed_field_t *body)
{
switch(col) {
case QDR_CONFIG_EXCHANGE_NAME:
qd_compose_insert_string(body, (const char *)ex->name);
break;
case QDR_CONFIG_EXCHANGE_IDENTITY: {
char id_str[100];
snprintf(id_str, 100, "%"PRId64, ex->identity);
qd_compose_insert_string(body, id_str);
break;
}
case QDR_CONFIG_EXCHANGE_ADDRESS:
qd_compose_insert_string(body, (const char *)ex->address);
break;
case QDR_CONFIG_EXCHANGE_PHASE:
qd_compose_insert_int(body, ex->phase);
break;
case QDR_CONFIG_EXCHANGE_ALTERNATE:
if (ex->alternate && ex->alternate->next_hop)
qd_compose_insert_string(body, (const char *)ex->alternate->next_hop);
else
qd_compose_insert_null(body);
break;
case QDR_CONFIG_EXCHANGE_ALT_PHASE:
if (ex->alternate)
qd_compose_insert_int(body, ex->alternate->phase);
else
qd_compose_insert_null(body);
break;
case QDR_CONFIG_EXCHANGE_MATCH_METHOD:
switch (qd_parse_tree_type(ex->parse_tree)) {
case QD_PARSE_TREE_AMQP_0_10:
qd_compose_insert_string(body, "amqp");
break;
case QD_PARSE_TREE_MQTT:
qd_compose_insert_string(body, "mqtt");
break;
default:
break;
}
break;
case QDR_CONFIG_EXCHANGE_BINDING_COUNT:
qd_compose_insert_uint(body, DEQ_SIZE(ex->bindings));
break;
case QDR_CONFIG_EXCHANGE_RECEIVED:
qd_compose_insert_ulong(body, ex->msgs_received);
break;
case QDR_CONFIG_EXCHANGE_DROPPED:
qd_compose_insert_ulong(body, ex->msgs_dropped);
break;
case QDR_CONFIG_EXCHANGE_FORWARDED:
qd_compose_insert_ulong(body, ex->msgs_routed);
break;
case QDR_CONFIG_EXCHANGE_DIVERTED:
qd_compose_insert_ulong(body, ex->msgs_alternate);
break;
}
}
static void binding_insert_column(qdr_binding_t *b, int col, qd_composed_field_t *body)
{
switch(col) {
case QDR_CONFIG_BINDING_NAME:
if (b->name)
qd_compose_insert_string(body, (char *)b->name);
else
qd_compose_insert_null(body);
break;
case QDR_CONFIG_BINDING_IDENTITY: {
char id_str[100];
snprintf(id_str, 100, "%"PRIu64, b->identity);
qd_compose_insert_string(body, id_str);
break;
}
case QDR_CONFIG_BINDING_EXCHANGE:
qd_compose_insert_string(body, (char *)b->exchange->name);
break;
case QDR_CONFIG_BINDING_KEY:
qd_compose_insert_string(body, (char *)b->key);
break;
case QDR_CONFIG_BINDING_NEXTHOP:
assert(b->next_hop && b->next_hop->next_hop);
qd_compose_insert_string(body, (char *)b->next_hop->next_hop);
break;
case QDR_CONFIG_BINDING_NHOP_PHASE:
assert(b->next_hop);
qd_compose_insert_int(body, b->next_hop->phase);
break;
case QDR_CONFIG_BINDING_MATCHED:
qd_compose_insert_ulong(body, b->msgs_matched);
break;
}
}
static void write_config_exchange_map(qdr_exchange_t *ex,
qd_composed_field_t *body)
{
qd_compose_start_map(body);
for(int i = 0; i < QDR_CONFIG_EXCHANGE_COLUMN_COUNT; i++) {
qd_compose_insert_string(body, qdr_config_exchange_columns[i]);
exchange_insert_column(ex, i, body);
}
qd_compose_end_map(body);
}
static void write_config_exchange_list(qdr_exchange_t *ex,
qdr_query_t *query)
{
qd_compose_start_list(query->body);
int i = 0;
while (query->columns[i] >= 0) {
exchange_insert_column(ex, query->columns[i], query->body);
i++;
}
qd_compose_end_list(query->body);
}
static void write_config_binding_map(qdr_binding_t *binding,
qd_composed_field_t *body)
{
qd_compose_start_map(body);
for(int i = 0; i < QDR_CONFIG_BINDING_COLUMN_COUNT; i++) {
qd_compose_insert_string(body, qdr_config_binding_columns[i]);
binding_insert_column(binding, i, body);
}
qd_compose_end_map(body);
}
static void write_config_binding_list(qdr_binding_t *binding,
qdr_query_t *query)
{
qd_compose_start_list(query->body);
int i = 0;
while (query->columns[i] >= 0) {
binding_insert_column(binding, query->columns[i], query->body);
i++;
}
qd_compose_end_list(query->body);
}
static qdr_binding_t *get_binding_at_index(qdr_core_t *core, int index)
{
qdr_binding_t *binding = 0;
// skip to the proper exchange:
qdr_exchange_t *ex = DEQ_HEAD(core->exchanges);
while (ex && index >= DEQ_SIZE(ex->bindings)) {
index -= DEQ_SIZE(ex->bindings);
ex = DEQ_NEXT(ex);
}
if (ex) {
// then to the target binding
assert(index < DEQ_SIZE(ex->bindings));
binding = DEQ_HEAD(ex->bindings);
while (index--) {
binding = DEQ_NEXT_N(exchange_list, binding);
}
}
return binding;
}
| 34.201814 | 127 | 0.602091 | [
"object"
] |
23ada0b977d841ca16daa9bd1217260d49167b94 | 1,973 | h | C | include/kerberos/capture/IPCamera.h | wollars/kerberos-machinery | 95c648133a7229b94b78af74283b3a8a1574922b | [
"Unlicense"
] | 1 | 2018-06-12T21:48:53.000Z | 2018-06-12T21:48:53.000Z | include/kerberos/capture/IPCamera.h | wollars/kerberos-machinery | 95c648133a7229b94b78af74283b3a8a1574922b | [
"Unlicense"
] | null | null | null | include/kerberos/capture/IPCamera.h | wollars/kerberos-machinery | 95c648133a7229b94b78af74283b3a8a1574922b | [
"Unlicense"
] | null | null | null | //
// Class: IPCamera
// Description: Class that handles an IP camera.
// Created: 23/07/2015
// Author: Cédric Verstraeten
// Mail: cedric@verstraeten.io
// Website: www.verstraeten.io
//
// The copyright to the computer program(s) herein
// is the property of Verstraeten.io, Belgium.
// The program(s) may be used and/or copied under
// the CC-NC-ND license model.
//
// https://doc.kerberos.io/license
//
/////////////////////////////////////////////////////
#ifndef __IPCamera_H_INCLUDED__ // if IPCamera.h hasn't been included yet...
#define __IPCamera_H_INCLUDED__ // #define this so the compiler knows it has been included
#include "capture/Capture.h"
#include "Executor.h"
namespace kerberos
{
char IPCameraName[] = "IPCamera";
class IPCamera : public CaptureCreator<IPCameraName, IPCamera>
{
private:
cv::VideoCapture * m_camera;
Executor<IPCamera> tryToUpdateCapture;
std::string m_url;
std::string m_streamType;
public:
IPCamera()
{
try
{
m_camera = new cv::VideoCapture();
}
catch(cv::Exception & ex)
{
throw OpenCVException(ex.msg.c_str());
}
}
IPCamera(int width, int height);
virtual ~IPCamera(){};
void setup(StringMap & settings);
void setImageSize(int width, int height);
void setUrl(std::string url);
void setRotation(int angle);
void setDelay(int msec);
void grab();
Image retrieve();
Image * takeImage();
void open();
void open(const char * url);
void reopen();
void close();
void update();
bool isOpened();
};
}
#endif | 28.185714 | 92 | 0.514952 | [
"model"
] |
23b8077723f5fe1289dc23726c24ab46efa5f786 | 5,362 | h | C | src/Robot.h | PEQUI-MEC/VSSS-MBED | 7489e1ffaa850403983bfdf4c8cb773c73b06c11 | [
"MIT"
] | 3 | 2018-05-22T21:45:53.000Z | 2019-03-29T13:21:44.000Z | src/Robot.h | PEQUI-MEC/VSSS-MBED | 7489e1ffaa850403983bfdf4c8cb773c73b06c11 | [
"MIT"
] | null | null | null | src/Robot.h | PEQUI-MEC/VSSS-MBED | 7489e1ffaa850403983bfdf4c8cb773c73b06c11 | [
"MIT"
] | 1 | 2018-10-16T23:43:02.000Z | 2018-10-16T23:43:02.000Z | #ifndef VSSS_ROBOT2_H
#define VSSS_ROBOT2_H
#include <mbed.h>
#include "Controller.h"
#include "Messenger.h"
#include <string>
class Messenger;
#define VECTOR_CONTROL 0
#define POSITION_CONTROL 1
#define ORIENTATION_CONTROL 2
#define NO_CONTROL 3
#define UVF_CONTROL 4
struct robot_state {
float x;
float y;
float theta;
float velocity;
int command;
float ref_x;
float ref_y;
};
class Robot {
private:
Thread control_thread;
Timer msg_timeout_timer;
robot_state state = {};
robot_state target = {};
float uvf_n = 2;
float vel_acelerada = 0;
float orientation_Kp = 0.8;
bool previously_backwards = false;
/** @brief Main control loop. Calls vector_control, position_control or orientation_control
* depending on state.command. Controller can be selected by calling start_vector_control,
* start_position_control or start_orientation_control */
void control_loop();
/** @brief Executes controller for the uvf command. Is started by start_uvf_control */
void uvf_control();
/** @brief Executes controller for the vector command. Is started by start_vector_control */
void vector_control();
/** @brief Executes controller for the position command. Is started by start_position_control */
void position_control();
/** @brief Executes controller for the orientation command. Is started by start_orientation_control */
void orientation_control();
/** @brief Sets the target velocity for each wheel using the output of the nonlinear controller
* @param theta_error Controller input. Difference between the target orientation and the current orientation.
* @param velocity Total desired velocity. Multiplies controller output for each wheel
* @param backwards True for backwards movement, false for forwards movement */
void set_wheel_velocity_nonlinear_controller(float theta_error, float velocity, bool backwards);
/** @brief Updates position and orientation stored on Robot::state, using odometry data
* computed by controller.update_wheel_velocity */
void update_odometry();
/** @brief Pauses threads control_thread and controller.control_thread and waits for the signal CONTINUE_SIGNAL.
* Signal is sent by continue_threads */
void stop_and_wait();
/** @brief Sets signal CONTINUE_SIGNAL, resuming control_thread and controller.control_thread */
void continue_threads();
/** @brief Sets position and orientation to 0. Used on relative commands */
void reset_state();
/** @brief Angle is converted to a value between -PI and PI
* @param angle Input angle in radians
* @return Equivalent angle between -PI and PI, in radians */
float round_angle(float angle);
/** @brief Saturates value, setting an upper and lower limit
* @param value Floating point number to be saturated
* @param limit Upper and lower limit
* @return Returns value if (abs(value) < limit),
* limit if (value > limit), or -limit if (value < -limit) */
float saturate(float value, float limit);
public:
Messenger* messenger;
Controller controller;
float max_theta_error;
float acc_rate;
float kgz;
int msg_timeout_limit;
char MY_ID;
/** @brief Constructor
* @param msgr Pointer to Messenger, can be used to send logs */
explicit Robot(Messenger *msgr);
/** @brief Executes vector command. Configures Robot::target used on the main control loop
* @param x X component of the desired position
* @param y Y component of the desired position
* @param x_ref X component of the uvf reference
* @param y_ref Y component of the uvf reference
* @param n UVF constant, defines curvature
* @param velocity Desired velocity
* @param reset Executes a relative command if true, by setting state variables to 0 */
void start_uvf_control(float x, float y, float x_ref, float y_ref, float n, float velocity, bool reset);
/** @brief Executes vector command. Configures Robot::target used on the main control loop
* @param theta Desired orientation
* @param velocity Desired velocity
* @param reset Executes a relative command if true, by setting state variables to 0 */
void start_vector_control(float theta, float velocity, bool reset = true);
/** @brief Executes orientation command. Configures Robot::target used on the main control loop
* @param theta Desired orientation
* @param velocity Desired velocity */
void start_orientation_control(float theta, float velocity, bool reset = true);
/** @brief Executes position command. Configures Robot::target used on the main control loop
* @param x X component of the desired position
* @param y Y component of the desired position
* @param velocity Desired velocity
* @param reset Executes a relative command if true, by setting state variables to 0 */
void start_position_control(float x, float y, float velocity, bool reset = true);
/** @brief Executes velocity command. Sets robot.command to NO_CONTROL
* @param vel_left Desired velocity for the left wheel
* @param vel_right Desired velocity for the right wheel */
void start_velocity_control(float vel_left, float vel_right);
/** @brief Setter for max_theta_error
* @param error Maximum allowed theta_error, in degrees */
void set_max_theta_error(float error);
/** @brief Starts main control loop thread */
void start_thread();
};
#endif //VSSS_ROBOT2_H
| 36.97931 | 114 | 0.746363 | [
"vector"
] |
23bb6ac1381e049ba8429d26a582efd1fd7ba68b | 9,536 | h | C | Gems/EMotionFX/Code/EMotionFX/Source/AnimGraphReferenceNode.h | sandeel31/o3de | db88812d61eef77c6f4451b7f8c7605d6db07412 | [
"Apache-2.0",
"MIT"
] | 1 | 2022-03-12T14:13:45.000Z | 2022-03-12T14:13:45.000Z | Gems/EMotionFX/Code/EMotionFX/Source/AnimGraphReferenceNode.h | sandeel31/o3de | db88812d61eef77c6f4451b7f8c7605d6db07412 | [
"Apache-2.0",
"MIT"
] | 2 | 2022-01-13T04:29:38.000Z | 2022-03-12T01:05:31.000Z | Gems/EMotionFX/Code/EMotionFX/Source/AnimGraphReferenceNode.h | sandeel31/o3de | db88812d61eef77c6f4451b7f8c7605d6db07412 | [
"Apache-2.0",
"MIT"
] | null | null | null | /*
* Copyright (c) Contributors to the Open 3D Engine Project.
* For complete copyright and license terms please see the LICENSE at the root of this distribution.
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
*/
#pragma once
#include <AzCore/Asset/AssetCommon.h>
#include <EMotionFX/Source/EMotionFXConfig.h>
#include <EMotionFX/Source/AnimGraphNode.h>
#include <EMotionFX/Source/AnimGraph.h>
#include <EMotionFX/Source/ObjectAffectedByParameterChanges.h>
#include <Integration/Assets/AnimGraphAsset.h>
#include <Integration/Assets/MotionSetAsset.h>
#include <Integration/System/SystemCommon.h>
namespace EMotionFX
{
class AnimGraphModelFixture;
class EMFX_API AnimGraphReferenceNode
: public AnimGraphNode
, private AZ::Data::AssetBus::MultiHandler
, public ObjectAffectedByParameterChanges
{
public:
AZ_RTTI(AnimGraphReferenceNode, "{61B29119-CF2F-4CC6-8872-CC6BB5A198FD}", AnimGraphNode, ObjectAffectedByParameterChanges)
AZ_CLASS_ALLOCATOR_DECL
enum
{
OUTPUTPORT_RESULT = 0
};
enum
{
PORTID_OUTPUT_POSE = 0
};
class EMFX_API UniqueData
: public AnimGraphNodeData
{
EMFX_ANIMGRAPHOBJECTDATA_IMPLEMENT_LOADSAVE
public:
AZ_CLASS_ALLOCATOR_DECL
UniqueData(AnimGraphNode* node, AnimGraphInstance* parentAnimGraphInstance);
~UniqueData();
void Update() override;
void OnReferenceAnimGraphAssetChanged();
AnimGraphInstance* m_referencedAnimGraphInstance = nullptr;
// Cache the mappings.
// During Update, parameter values that are not coming from an upstream connections
// are going to taken from parent anim graphs (if name and type matches). This information
// is cached with the following structure.
struct ValueParameterMappingCacheEntry
{
ValueParameterMappingCacheEntry(AnimGraphInstance* sourceAnimGraphInstance,
uint32 sourceValueParameterIndex,
uint32 targetValueParameterIndex)
: m_sourceAnimGraphInstance(sourceAnimGraphInstance)
, m_sourceValueParameterIndex(sourceValueParameterIndex)
, m_targetValueParameterIndex(targetValueParameterIndex)
{}
AnimGraphInstance* m_sourceAnimGraphInstance;
uint32 m_sourceValueParameterIndex;
uint32 m_targetValueParameterIndex;
};
AZStd::vector<ValueParameterMappingCacheEntry> m_parameterMappingCache;
bool m_parameterMappingCacheDirty = true;
};
AnimGraphReferenceNode();
~AnimGraphReferenceNode();
void Reinit();
void RecursiveReinit();
bool InitAfterLoading(AnimGraph* animGraph) override;
AnimGraphObjectData* CreateUniqueData(AnimGraphInstance* animGraphInstance) override { return aznew UniqueData(this, animGraphInstance); }
void RecursiveOnChangeMotionSet(AnimGraphInstance* animGraphInstance, MotionSet* newMotionSet) override;
void Rewind(AnimGraphInstance* animGraphInstance) override;
void RecursiveInvalidateUniqueDatas(AnimGraphInstance* animGraphInstance) override;
void RecursiveResetFlags(AnimGraphInstance* animGraphInstance, uint32 flagsToDisable = 0xffffffff) override;
void RecursiveSetUniqueDataFlag(AnimGraphInstance* animGraphInstance, uint32 flag, bool enabled) override;
void RecursiveCollectActiveNodes(AnimGraphInstance* animGraphInstance, AZStd::vector<AnimGraphNode*>* outNodes, const AZ::TypeId& nodeType) const override;
AnimGraphPose* GetMainOutputPose(AnimGraphInstance* animGraphInstance) const override;
void RecursiveCollectObjects(MCore::Array<AnimGraphObject*>& outObjects) const override;
void RecursiveCollectObjectsAffectedBy(AnimGraph* animGraph, AZStd::vector<AnimGraphObject*>& outObjects) const override;
bool RecursiveDetectCycles(AZStd::unordered_set<const AnimGraphNode*>& nodes) const override;
AZ::Color GetVisualColor() const override { return AZ::Color(0.64f, 0.42f, 0.58f, 1.0f); }
bool GetCanActAsState() const override { return true; }
bool GetSupportsVisualization() const override { return true; }
bool GetHasOutputPose() const override { return true; }
bool GetHasVisualOutputPorts() const override { return true; }
bool GetCanHaveOnlyOneInsideParent() const override { return false; }
bool GetHasVisualGraph() const override;
const char* GetPaletteName() const override;
AnimGraphObject::ECategory GetPaletteCategory() const override;
bool GetHasCycles() const { return m_hasCycles; }
static void Reflect(AZ::ReflectContext* context);
//////////////////////////////////////////////////////////////////////////
// AZ::Data::AssetBus::MultiHandler
void OnAssetReady(AZ::Data::Asset<AZ::Data::AssetData> asset) override;
void OnAssetReloaded(AZ::Data::Asset<AZ::Data::AssetData> asset) override;
//////////////////////////////////////////////////////////////////////////
void SetAnimGraphAsset(AZ::Data::Asset<Integration::AnimGraphAsset> asset);
AnimGraph* GetReferencedAnimGraph() const;
MotionSet* GetMotionSet() const;
AZ::Data::Asset<Integration::AnimGraphAsset> GetReferencedAnimGraphAsset() const;
AZ::Data::Asset<Integration::MotionSetAsset> GetReferencedMotionSetAsset() const;
AnimGraphInstance* GetReferencedAnimGraphInstance(AnimGraphInstance* animGraphInstance) const;
// ObjectAffectedByParameterChanges
AZStd::vector<AZStd::string> GetParameters() const override;
AnimGraph* GetParameterAnimGraph() const override;
void ParameterMaskChanged(const AZStd::vector<AZStd::string>& newParameterMask) override;
void AddRequiredParameters(AZStd::vector<AZStd::string>& parameterNames) const override;
void ParameterAdded(const AZStd::string& newParameterName) override;
void ParameterRenamed(const AZStd::string& oldParameterName, const AZStd::string& newParameterName) override;
void ParameterOrderChanged(const ValueParameterVector& beforeChange, const ValueParameterVector& afterChange) override;
void ParameterRemoved(const AZStd::string& oldParameterName) override;
private:
void Output(AnimGraphInstance* animGraphInstance) override;
void Update(AnimGraphInstance* animGraphInstance, float timePassedInSeconds) override;
void TopDownUpdate(AnimGraphInstance* animGraphInstance, float timePassedInSeconds) override;
void PostUpdate(AnimGraphInstance* animGraphInstance, float timePassedInSeconds) override;
AZ::Data::Asset<Integration::MotionSetAsset>* GetMotionSetAsset() { return &m_motionSetAsset; }
bool HasMotionSetAsset() const { return static_cast<bool>(m_motionSetAsset); }
void ReleaseAnimGraphInstances();
// Callbacks from the Reflected Property Editor
void OnAnimGraphAssetChanged();
void OnMotionSetAssetChanged();
void OnMotionSetChanged();
void OnMaskedParametersChanged();
void LoadAnimGraphAsset();
void LoadMotionSetAsset();
void OnAnimGraphAssetReady();
void OnMotionSetAssetReady();
void ReinitMaskedParameters();
void ReinitInputPorts();
void UpdateParameterMappingCache(AnimGraphInstance* animGraphInstance);
AZ::Data::Asset<Integration::AnimGraphAsset> m_animGraphAsset;
AZ::Data::Asset<Integration::MotionSetAsset> m_motionSetAsset;
AZStd::string m_activeMotionSetName;
// Since changing the anim graph asset could trigger its destructor (since
// it could be the last anim graph being used) and produce anim graph instances
// to be destroyed, invalidating the data we have in unique data, we are going
// to cache a second anim graph asset and update it after we processed the
// unique data
uint32 m_lastProcessedAnimGraphId = MCORE_INVALIDINDEX32;
// When a different anim graph is set, we are going to select all
// the parameters that can't be mapped automatically. Only parameters
// not in this list are going to be tried to be mapped. If the user changes
// the parameters names, then those excluded will try to be mapped.
// Parameters in this list will have ports and will allow to be connected
// If a parameter is in this list it means that it wasn't mapped
AZStd::vector<AZStd::string> m_maskedParameterNames;
bool m_hasCycles;
bool m_reinitMaskedParameters;
// Cache the value parameter indices that are selected to be inputs
// The index in the vector is the portid. This is used during Update to get
// the upstream values faster
AZStd::vector<uint32> m_parameterIndexByPortIndex;
};
} // namespace EMotionFX
| 47.68 | 163 | 0.670617 | [
"vector",
"3d"
] |
23bd46b049f35695d9a2f4cbeed7a14e4f4229f6 | 5,812 | h | C | src/StandardTetrahedralFEMForceFieldTwoMaterials.h | thejohnhoffer/physics_simulation | e387ed475ae5ad5429f679ba8a1be50e77494be1 | [
"Apache-2.0"
] | null | null | null | src/StandardTetrahedralFEMForceFieldTwoMaterials.h | thejohnhoffer/physics_simulation | e387ed475ae5ad5429f679ba8a1be50e77494be1 | [
"Apache-2.0"
] | null | null | null | src/StandardTetrahedralFEMForceFieldTwoMaterials.h | thejohnhoffer/physics_simulation | e387ed475ae5ad5429f679ba8a1be50e77494be1 | [
"Apache-2.0"
] | 1 | 2020-08-20T21:03:21.000Z | 2020-08-20T21:03:21.000Z |
#ifndef SOFA_COMPONENT_FORCEFIELD_StandardTetrahedralFEMForceFieldTwoMaterials_H
#define SOFA_COMPONENT_FORCEFIELD_StandardTetrahedralFEMForceFieldTwoMaterials_H
#if !defined(__GNUC__) || (__GNUC__ > 3 || (_GNUC__ == 3 && __GNUC_MINOR__ > 3))
#pragma once
#endif
#include "initPlugin.h"
#include <SofaMiscFem/StandardTetrahedralFEMForceField.h>
#include <SofaMiscFem/HyperelasticMaterial.h>
#include <sofa/core/behavior/ForceField.h>
#include <SofaBaseMechanics/MechanicalObject.h>
#include <sofa/defaulttype/Vec.h>
#include <sofa/defaulttype/Mat.h>
#include <sofa/defaulttype/MatSym.h>
#include <sofa/core/topology/BaseMeshTopology.h>
#include <SofaBaseTopology/TopologyData.h>
#include <string>
#include <map>
#include <sofa/component/component.h>
#include <string>
#include <iostream>
#include <math.h>
#include <Eigen/src/Core/Matrix.h>
#include <LastFriction.h>
#include <sofa/simulation/common/Node.h>
namespace sofa
{
namespace component
{
namespace forcefield
{
using namespace std;
using namespace sofa::defaulttype;
using namespace sofa::component::topology;
//***************** Tetrahedron FEM code for several elastic models: StandardTetrahedralFEMForceField*******************************************************************
//********************************** Based on classical discretization : Fi=-Bi^T S V and Kij=Bi^T N Bj +Di^T S Dj **********************************************
//***************************************** where Bi is the strain displacement (6*3 matrix), S SPK tensor N=dS/dC, Di shape vector ************************************
//**************************** Code dependant on HyperelasticMatrialFEM and inherited classes *********************************************************************
/** Compute Finite Element forces based on tetrahedral elements.
*/
template<class DataTypes>
class StandardTetrahedralFEMForceFieldTwoMaterials: public StandardTetrahedralFEMForceField<DataTypes>
{
public:
SOFA_CLASS(SOFA_TEMPLATE(StandardTetrahedralFEMForceFieldTwoMaterials, DataTypes), SOFA_TEMPLATE(StandardTetrahedralFEMForceField, DataTypes));
typedef StandardTetrahedralFEMForceField<DataTypes> Inherited;
typedef typename Inherited::VecCoord VecCoord;
typedef typename Inherited::VecDeriv VecDeriv;
typedef typename Inherited::Coord Coord;
typedef typename Inherited::Deriv Deriv;
typedef typename Inherited::Real Real;
typedef defaulttype::Mat<3,3,Real> Matrix3;
typedef defaulttype::Mat<6,6,Real> Matrix6;
typedef defaulttype::Mat<6,3,Real> Matrix63;
typedef defaulttype::MatSym<3,Real> MatrixSym;
typedef core::objectmodel::Data<VecDeriv> DataVecDeriv;
typedef core::objectmodel::Data<VecCoord> DataVecCoord;
typedef helper::vector<Real> SetParameterArray;
typedef helper::vector<Coord> SetAnisotropyDirectionArray;
typedef core::topology::BaseMeshTopology::index_type Index;
typedef core::topology::BaseMeshTopology::Tetra Element;
typedef core::topology::BaseMeshTopology::SeqTetrahedra VecElement;
typedef typename Inherited::EdgeInformation EdgeInformation;
typedef typename Inherited::edgeInformationVector edgeInformationVector;
typedef typename Inherited::tetrahedronRestInfoVector tetrahedronRestInfoVector;
typedef typename Inherited::TetrahedronRestInformation TetrahedronRestInformation;
public :
vector<vector<double> > internalForce;
vector<vector<double> > conservForces;
vector<vector<double> > forceCorrective;
int m; // Num internalForce rows
int n; // Num internalForce columns
bool firstime; // First loop
bool conservationLinearMomentum;
bool conservationAngularMomentum;
double timestep;
double counter;
int row;
typename sofa::component::fem::MaterialParameters<DataTypes> globalParameters2;
Data<unsigned> f_secondMaterialStart;
protected:
Data<SetParameterArray> f_secondParameterSet;
public:
StandardTetrahedralFEMForceFieldTwoMaterials();
virtual ~StandardTetrahedralFEMForceFieldTwoMaterials();
void init();
void addDForce(const core::MechanicalParams* mparams /* PARAMS FIRST */, DataVecDeriv& d_df, const DataVecDeriv& d_dx);
void addForce(const core::MechanicalParams* /* mparams */ /* PARAMS FIRST */, DataVecDeriv& d_f, const DataVecCoord& d_x, const DataVecDeriv& /* d_v */);
void draw(const core::visual::VisualParams* vparams);
std::string intToString(int number);
int loadVector(std::string filename, int i);
vector<double> interpolate(int row);
Eigen::Vector3d createForceCorrective(Eigen::Vector3d position, Eigen::Vector3d centerOfMass, Eigen::Vector3d lambdaForce, Eigen::Vector3d lambdaTorque);
Eigen::Vector3d centerMass(const VecCoord& x, int numVert);
void multLagrange(Eigen::Vector3d &lambdaForce, Eigen::Vector3d &lambdaTorque, const VecCoord& x, Eigen::Vector3d force, Eigen::Vector3d torque, Eigen::Vector3d centerOfMass, int numVert);
Data<bool> f_writeTraces;
bool m_writeTraces;
//drawing variables
simulation::Node* m_gnode;
std::map<int,std::vector<int>>* m_frictionRelations;
std::vector<sofa::component::forcefield::LastFriction<Vec3dTypes>::nodeProperties>* m_nodes;
VecDeriv auxf;
};
#ifndef SOFA_FLOAT
using sofa::defaulttype::Vec3dTypes;
#endif
#ifndef SOFA_DOUBLE
using sofa::defaulttype::Vec3fTypes;
#endif
#if defined(SOFA_EXTERN_TEMPLATE) && !defined(SOFA_BUILD_SIELEGANSPLUGIN)
#ifndef SOFA_FLOAT
extern template class StandardTetrahedralFEMForceFieldTwoMaterials<Vec3dTypes>;
#endif
#ifndef SOFA_DOUBLE
extern template class StandardTetrahedralFEMForceFieldTwoMaterials<Vec3fTypes>;
#endif
#endif // defined(SOFA_EXTERN_TEMPLATE) && !defined(SOFA_BUILD_SIELEGANSPLUGIN)
} // namespace forcefield
} // namespace component
} // namespace sofa
#endif // SOFA_COMPONENT_FORCEFIELD_StandardTetrahedralFEMForceFieldTwoMaterials_H | 36.325 | 189 | 0.747247 | [
"shape",
"vector"
] |
23c0900afb53beeec683c20fbced72eb4eda8b5d | 986 | h | C | src/Transformation.h | hixio-mh/node-occ | 033e94044712750f7eab869fc74f7661746fc552 | [
"MIT"
] | 213 | 2017-02-16T08:39:54.000Z | 2022-03-22T09:38:00.000Z | src/Transformation.h | hixio-mh/node-occ | 033e94044712750f7eab869fc74f7661746fc552 | [
"MIT"
] | 28 | 2017-04-16T17:01:12.000Z | 2022-02-08T04:55:22.000Z | src/Transformation.h | hixio-mh/node-occ | 033e94044712750f7eab869fc74f7661746fc552 | [
"MIT"
] | 62 | 2017-03-07T10:49:47.000Z | 2022-03-31T16:38:56.000Z | #pragma once
#include "NodeV8.h"
#include "OCC.h"
#include "Util.h"
#include "Point3Wrap.h"
class Transformation : public Nan::ObjectWrap {
public:
typedef class Transformation _ThisType ;
Transformation()
{}
static NAN_METHOD(makeTranslation);
static NAN_METHOD(makePlaneMirror);
static NAN_METHOD(makeAxisMirror);
static NAN_METHOD(makeScale);
static NAN_METHOD(makeRotation);
double scaleFactor() {
return m_trsf.ScaleFactor();
}
const gp_XYZ translationPart() const {
return m_trsf.TranslationPart();
}
TEAROFF_POINT(Transformation,translationPart,Point3Wrap,gp_XYZ);
gp_Trsf m_trsf;
// Methods exposed to JavaScripts
static void Init(v8::Local<v8::Object> target);
static NAN_METHOD(NewInstance);
static NAN_METHOD(New);
static Nan::Persistent<v8::FunctionTemplate> _template;
private:
Transformation(const Transformation&);
void operator=(const Transformation&);
}; | 21.434783 | 68 | 0.706897 | [
"object"
] |
23c24857f464b2cda3491abbba6388bf73584108 | 3,764 | h | C | Gems/Multiplayer/Code/Source/Debug/MultiplayerDebugSystemComponent.h | psy-repos-c/o3de | 42d917e4726b1cae8c39c10834b0e621c9e8300d | [
"Apache-2.0",
"MIT"
] | null | null | null | Gems/Multiplayer/Code/Source/Debug/MultiplayerDebugSystemComponent.h | psy-repos-c/o3de | 42d917e4726b1cae8c39c10834b0e621c9e8300d | [
"Apache-2.0",
"MIT"
] | null | null | null | Gems/Multiplayer/Code/Source/Debug/MultiplayerDebugSystemComponent.h | psy-repos-c/o3de | 42d917e4726b1cae8c39c10834b0e621c9e8300d | [
"Apache-2.0",
"MIT"
] | null | null | null | /*
* Copyright (c) Contributors to the Open 3D Engine Project.
* For complete copyright and license terms please see the LICENSE at the root of this distribution.
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
*/
#pragma once
#include "MultiplayerDebugAuditTrail.h"
#include "MultiplayerDebugHierarchyReporter.h"
#include "MultiplayerDebugPerEntityReporter.h"
#include <AzCore/Component/Component.h>
#include <AzCore/Interface/Interface.h>
#include <AzFramework/Input/Buses/Requests/InputSystemCursorRequestBus.h>
#include <Debug/MultiplayerDebugNetworkMetrics.h>
#include <Debug/MultiplayerDebugMultiplayerMetrics.h>
#include <Multiplayer/IMultiplayerDebug.h>
#ifdef IMGUI_ENABLED
# include <imgui/imgui.h>
# include <ImGuiBus.h>
# include <LYImGuiUtils/HistogramContainer.h>
#endif
namespace Multiplayer
{
class MultiplayerDebugSystemComponent final
: public AZ::Component
, public AZ::Interface<IMultiplayerDebug>::Registrar
#ifdef IMGUI_ENABLED
, public ImGui::ImGuiUpdateListenerBus::Handler
#endif
{
public:
static constexpr char HOST_BUTTON_TITLE[] = "Host";
static constexpr char LAUNCH_LOCAL_CLIENT_BUTTON_TITLE[] = "Launch Local Client";
AZ_COMPONENT(MultiplayerDebugSystemComponent, "{060BF3F1-0BFE-4FCE-9C3C-EE991F0DA581}");
static void Reflect(AZ::ReflectContext* context);
static void GetProvidedServices(AZ::ComponentDescriptor::DependencyArrayType& provided);
static void GetRequiredServices(AZ::ComponentDescriptor::DependencyArrayType& required);
static void GetIncompatibleServices(AZ::ComponentDescriptor::DependencyArrayType& incompatible);
~MultiplayerDebugSystemComponent() override = default;
//! AZ::Component overrides
//! @{
void Activate() override;
void Deactivate() override;
//! @}
//! IMultiplayerDebug overrides
//! @{
void ShowEntityBandwidthDebugOverlay() override;
void HideEntityBandwidthDebugOverlay() override;
void AddAuditEntry(
const AuditCategory category,
const ClientInputId inputId,
const HostFrameId frameId,
const AZStd::string& name,
AZStd::vector<MultiplayerAuditingElement>&& entryDetails) override;
//! @}
#ifdef IMGUI_ENABLED
//! ImGui::ImGuiUpdateListenerBus overrides
//! @{
void OnImGuiMainMenuUpdate() override;
void OnImGuiUpdate() override;
//! @}
private:
//! Constructs a filtered version of the audit trail based on a search string
void FilterAuditTrail();
bool m_displayNetworkingStats = false;
AZStd::unique_ptr<MultiplayerDebugNetworkMetrics> m_networkMetrics;
bool m_displayMultiplayerStats = false;
AZStd::unique_ptr<MultiplayerDebugMultiplayerMetrics> m_multiplayerMetrics;
bool m_displayPerEntityStats = false;
AZStd::unique_ptr<MultiplayerDebugPerEntityReporter> m_reporter;
bool m_displayHierarchyDebugger = false;
AZStd::unique_ptr<MultiplayerDebugHierarchyReporter> m_hierarchyDebugger;
bool m_displayNetAuditTrail = false;
AZStd::unique_ptr<MultiplayerDebugAuditTrail> m_auditTrail;
AzFramework::SystemCursorState m_previousSystemCursorState = AzFramework::SystemCursorState::Unknown; //! The last system cursor state.
AZStd::string m_lastFilter;
AZStd::deque<AuditTrailInput> m_auditTrailElems;
AZStd::deque<AuditTrailInput> m_committedAuditTrail;
AZStd::deque<AuditTrailInput> m_pendingAuditTrail;
AZStd::deque<AuditTrailInput> m_filteredAuditTrail;
AZ::ApplicationTypeQuery m_applicationType;
#endif
};
}
| 35.509434 | 143 | 0.719182 | [
"vector",
"3d"
] |
23c86f39cd968f5f32e168961cf786cba5073894 | 8,571 | h | C | chrome/browser/ui/views/browser_action_view.h | hujiajie/pa-chromium | 1816ff80336a6efd1616f9e936880af460b1e105 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2020-05-03T06:33:56.000Z | 2021-11-14T18:39:42.000Z | chrome/browser/ui/views/browser_action_view.h | devasia1000/chromium | 919a8a666862fb866a6bb7aa7f3ae8c0442b4828 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | chrome/browser/ui/views/browser_action_view.h | devasia1000/chromium | 919a8a666862fb866a6bb7aa7f3ae8c0442b4828 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | // Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_UI_VIEWS_BROWSER_ACTION_VIEW_H_
#define CHROME_BROWSER_UI_VIEWS_BROWSER_ACTION_VIEW_H_
#include <string>
#include "chrome/browser/extensions/extension_action_icon_factory.h"
#include "chrome/browser/extensions/extension_context_menu_model.h"
#include "content/public/browser/notification_observer.h"
#include "content/public/browser/notification_registrar.h"
#include "ui/views/context_menu_controller.h"
#include "ui/views/controls/button/menu_button.h"
#include "ui/views/controls/button/menu_button_listener.h"
#include "ui/views/drag_controller.h"
#include "ui/views/view.h"
class Browser;
class BrowserActionButton;
class ExtensionAction;
namespace extensions {
class Extension;
}
namespace gfx {
class Image;
}
namespace views {
class MenuItemView;
class MenuRunner;
}
////////////////////////////////////////////////////////////////////////////////
// BrowserActionView
// A single entry in the browser action container. This contains the actual
// BrowserActionButton, as well as the logic to paint the badge.
class BrowserActionView : public views::View {
public:
// Need DragController here because BrowserActionView could be
// dragged/dropped.
class Delegate : public views::DragController,
public ExtensionContextMenuModel::PopupDelegate {
public:
// Returns the current tab's ID, or -1 if there is no current tab.
virtual int GetCurrentTabId() const = 0;
// Called when the user clicks on the browser action icon.
virtual void OnBrowserActionExecuted(BrowserActionButton* button) = 0;
// Called when a browser action becomes visible/hidden.
virtual void OnBrowserActionVisibilityChanged() = 0;
// Returns relative position of a button inside BrowserActionView.
virtual gfx::Point GetViewContentOffset() const = 0;
virtual bool NeedToShowMultipleIconStates() const;
virtual bool NeedToShowTooltip() const;
protected:
virtual ~Delegate() {}
};
BrowserActionView(const extensions::Extension* extension,
Browser* browser,
Delegate* delegate);
virtual ~BrowserActionView();
BrowserActionButton* button() { return button_; }
// Gets browser action button icon with the badge.
gfx::ImageSkia GetIconWithBadge();
// Overridden from views::View:
virtual void Layout() OVERRIDE;
virtual void GetAccessibleState(ui::AccessibleViewState* state) OVERRIDE;
virtual gfx::Size GetPreferredSize() OVERRIDE;
protected:
// Overridden from views::View to paint the badge on top of children.
virtual void PaintChildren(gfx::Canvas* canvas) OVERRIDE;
private:
// The Browser object this view is associated with.
Browser* browser_;
// Usually a container for this view.
Delegate* delegate_;
// The button this view contains.
BrowserActionButton* button_;
// Extension this view associated with.
const extensions::Extension* extension_;
DISALLOW_COPY_AND_ASSIGN(BrowserActionView);
};
////////////////////////////////////////////////////////////////////////////////
// BrowserActionButton
// The BrowserActionButton is a specialization of the MenuButton class.
// It acts on a ExtensionAction, in this case a BrowserAction and handles
// loading the image for the button asynchronously on the file thread.
class BrowserActionButton : public views::MenuButton,
public views::ButtonListener,
public views::ContextMenuController,
public content::NotificationObserver,
public ExtensionActionIconFactory::Observer {
public:
BrowserActionButton(const extensions::Extension* extension,
Browser* browser_,
BrowserActionView::Delegate* delegate);
// Call this instead of delete.
void Destroy();
ExtensionAction* browser_action() const { return browser_action_; }
const extensions::Extension* extension() { return extension_; }
// Called to update the display to match the browser action's state.
void UpdateState();
// Does this button's action have a popup?
virtual bool IsPopup();
virtual GURL GetPopupUrl();
// Overridden from views::View:
virtual bool CanHandleAccelerators() const OVERRIDE;
virtual void GetAccessibleState(ui::AccessibleViewState* state) OVERRIDE;
// Overridden from views::ButtonListener:
virtual void ButtonPressed(views::Button* sender,
const ui::Event& event) OVERRIDE;
// Overridden from views::ContextMenuController.
virtual void ShowContextMenuForView(View* source,
const gfx::Point& point) OVERRIDE;
// Overridden from content::NotificationObserver:
virtual void Observe(int type,
const content::NotificationSource& source,
const content::NotificationDetails& details) OVERRIDE;
// Overriden from ExtensionActionIconFactory::Observer.
virtual void OnIconUpdated() OVERRIDE;
// MenuButton behavior overrides. These methods all default to TextButton
// behavior unless this button is a popup. In that case, it uses MenuButton
// behavior. MenuButton has the notion of a child popup being shown where the
// button will stay in the pushed state until the "menu" (a popup in this
// case) is dismissed.
virtual bool Activate() OVERRIDE;
virtual bool OnMousePressed(const ui::MouseEvent& event) OVERRIDE;
virtual void OnMouseReleased(const ui::MouseEvent& event) OVERRIDE;
virtual void OnMouseExited(const ui::MouseEvent& event) OVERRIDE;
virtual bool OnKeyReleased(const ui::KeyEvent& event) OVERRIDE;
virtual void OnGestureEvent(ui::GestureEvent* event) OVERRIDE;
// Overridden from ui::AcceleratorTarget.
virtual bool AcceleratorPressed(const ui::Accelerator& accelerator) OVERRIDE;
// Notifications when to set button state to pushed/not pushed (for when the
// popup/context menu is hidden or shown by the container).
void SetButtonPushed();
void SetButtonNotPushed();
// Whether the browser action is enabled on this tab. Note that we cannot use
// the built-in views enabled/SetEnabled because disabled views do not
// receive drag events.
bool IsEnabled(int tab_id) const;
// Returns icon factory for the button.
ExtensionActionIconFactory& icon_factory() { return icon_factory_; }
// Returns button icon so it can be accessed during tests.
gfx::ImageSkia GetIconForTest();
protected:
// Overridden from views::View:
virtual void ViewHierarchyChanged(
const ViewHierarchyChangedDetails& details) OVERRIDE;
private:
virtual ~BrowserActionButton();
// Register an extension command if the extension has an active one.
void MaybeRegisterExtensionCommand();
// Unregisters an extension command, if the extension has registered one and
// it is active.
void MaybeUnregisterExtensionCommand(bool only_if_active);
// The Browser object this button is associated with.
Browser* browser_;
// The browser action this view represents. The ExtensionAction is not owned
// by this class.
ExtensionAction* browser_action_;
// The extension associated with the browser action we're displaying.
const extensions::Extension* extension_;
// The object that will be used to get the browser action icon for us.
// It may load the icon asynchronously (in which case the initial icon
// returned by the factory will be transparent), so we have to observe it for
// updates to the icon.
ExtensionActionIconFactory icon_factory_;
// Delegate that usually represents a container for BrowserActionView.
BrowserActionView::Delegate* delegate_;
// The context menu. This member is non-NULL only when the menu is shown.
views::MenuItemView* context_menu_;
// Used to make sure MaybeRegisterExtensionCommand() is called only once
// from ViewHierarchyChanged().
bool called_registered_extension_command_;
content::NotificationRegistrar registrar_;
// The extension key binding accelerator this browser action is listening for
// (to show the popup).
scoped_ptr<ui::Accelerator> keybinding_;
// Responsible for running the menu.
scoped_ptr<views::MenuRunner> menu_runner_;
friend class base::DeleteHelper<BrowserActionButton>;
DISALLOW_COPY_AND_ASSIGN(BrowserActionButton);
};
#endif // CHROME_BROWSER_UI_VIEWS_BROWSER_ACTION_VIEW_H_
| 36.012605 | 80 | 0.726403 | [
"object"
] |
d97784b9226dc015f486bb2159c5baa3479e4f29 | 1,204 | h | C | sympiler/HTree.h | kobeliu85/MatRox_RU | 31a77155229d8f28ca98a0098c5c263003b881cf | [
"MIT"
] | 2 | 2019-12-11T03:08:27.000Z | 2020-01-04T03:13:16.000Z | sympiler/HTree.h | kobeliu85/MatRox_RU | 31a77155229d8f28ca98a0098c5c263003b881cf | [
"MIT"
] | null | null | null | sympiler/HTree.h | kobeliu85/MatRox_RU | 31a77155229d8f28ca98a0098c5c263003b881cf | [
"MIT"
] | null | null | null | //
// Created by Bangtian Liu on 6/8/19.
//
#ifndef PROJECT_HTREE_H
#define PROJECT_HTREE_H
#include "ClusterTree.h"
namespace Sympiler {
namespace Internal {
class HTree {
public:
Setup setup;
clustertree *tree;
//Paramters for Code Generation
std::string DM, BM, TVM;
std::string DM_ptr, BM_ptr, TVM_ptr;
std::string lchildren;
std::string rchildren;
int *levelset;
int *idx;
int *wpart;
int *clevelset;
int *cidx;
int *tlchildren;
int *trchildren;
int *nblockset;
int *nblocks;
int *nxval;
int *nyval;
int *fblockset;
int *fblocks;
int *fxval;
int *fyval;
int *leafmap;
int *Dim;
int *lids;
int *lidsoffset;
int *lidslen;
int nlen;
HTree(Type t, std::string path, int n, int dim, double tau);
void config(int n, int dim, double tau);
void LoadPoints(std::string path);
void getDecl(std::vector<Expr>& , std::vector<Argument>&);
void StructureNearBlock();
void StructureFarBlock();
void StructureNear();
void StructureFar();
void Sampling();
void SaveSampling();
void savetree();
void savetree2disk();
~HTree();
};
}
}
#endif //PROJECT_HTREE_H
| 14.506024 | 63 | 0.634551 | [
"vector"
] |
d97dfb2f498edf90d59a408cace896cd1bd2f4e6 | 22,388 | h | C | lib/util/include/hse_util/bonsai_tree.h | leroyjvargis/hse | 586550622c5a0cb9bb5a3de5e4b6c43ce3751906 | [
"Apache-2.0"
] | null | null | null | lib/util/include/hse_util/bonsai_tree.h | leroyjvargis/hse | 586550622c5a0cb9bb5a3de5e4b6c43ce3751906 | [
"Apache-2.0"
] | 7 | 2021-04-08T16:08:13.000Z | 2021-08-17T18:20:41.000Z | lib/util/include/hse_util/bonsai_tree.h | leroyjvargis/hse | 586550622c5a0cb9bb5a3de5e4b6c43ce3751906 | [
"Apache-2.0"
] | 2 | 2021-05-18T19:35:17.000Z | 2021-07-15T21:02:13.000Z | /* SPDX-License-Identifier: Apache-2.0 */
/*
* Copyright (C) 2015-2021 Micron Technology, Inc. All rights reserved.
*/
/*
* This file contains the type definitions and prototypes of bonsai
* tree implementation
*/
#ifndef HSE_BONSAI_TREE_H
#define HSE_BONSAI_TREE_H
#include <hse_util/arch.h>
#include <hse_util/page.h>
#include <hse_util/atomic.h>
#include <hse_util/spinlock.h>
#include <hse_util/key_util.h>
#include <hse_util/cursor_heap.h>
#include <hse_util/hse_err.h>
#include <hse_util/list.h>
#include <hse_util/rcu.h>
/* clang-format off */
/* Bonsai tree static config params...
*/
#define HSE_BT_BALANCE_THRESHOLD (2)
#define HSE_BT_SLABSZ (PAGE_SIZE * 8)
#define HSE_BT_NODESPERSLAB \
((HSE_BT_SLABSZ - sizeof(struct bonsai_slab)) / sizeof(struct bonsai_node))
/* Bonsai node RCU generation count special values...
*
* The RCU generation count is a monotonically increasing integer which marks
* the grace period epoch. When a bonsai node is rotated out of the tree we
* set its rcugen to the current epoch so that it can be freed/reclaimed in
* the next epoch when it is no longer visible to any RCU reader.
*
* HSE_BN_RCUGEN_ACTIVE node is live and possibly visible
* HSE_BN_RCUGEN_FREE node has been reclaimed and is free for reuse
* HSE_BN_RCUGEN_MAX value at which to begin rollover mitigation
*/
#define HSE_BN_RCUGEN_ACTIVE (UINT32_MAX)
#define HSE_BN_RCUGEN_FREE (UINT32_MAX - 1)
#define HSE_BN_RCUGEN_MAX (UINT32_MAX - 1024)
/* If the caller is managing the k/v memory and can ensure
* it will outlive the bonsai tree then this flag hints
* that the bonsai tree need not copy the data.
*/
#define HSE_BTF_MANAGED (0x0001)
enum bonsai_ior_code {
B_IOR_INVALID = 0,
B_IOR_INSERTED = 1,
B_IOR_REPLACED = 2,
B_IOR_ADDED_VALUE = 3,
B_IOR_REP_OR_ADD = 4,
};
#define IS_IOR_INS(_c) ((_c) == B_IOR_INSERTED)
#define IS_IOR_REP(_c) ((_c) == B_IOR_REPLACED)
#define IS_IOR_ADD(_c) ((_c) == B_IOR_ADDED_VALUE)
#define IS_IOR_REPORADD(_c) ((_c) == B_IOR_REP_OR_ADD)
#define SET_IOR_INS(_c) ((_c) = B_IOR_INSERTED)
#define SET_IOR_REP(_c) ((_c) = B_IOR_REPLACED)
#define SET_IOR_ADD(_c) ((_c) = B_IOR_ADDED_VALUE)
#define SET_IOR_REPORADD(_c) ((_c) = B_IOR_REP_OR_ADD)
/**
* struct bonsai_skey - input key argument
* @bsk_key_imm:
* @bsk_key:
* @bsk_flags:
*/
struct bonsai_skey {
struct key_immediate bsk_key_imm;
const void *bsk_key;
u32 bsk_flags;
};
/**
* struct bonsai_val - bonsai tree value node
* @bv_seqnoref: sequence number reference
* @bv_next: ptr to next value in list
* @bv_value: ptr to value data
* @bv_xlen: opaque encoded value length
* @bv_priv: user-managed ptr
* @bv_free: ptr to next value in free list bkv_freevals
* @bv_valbuf: value data (zero length if caller managed)
*
* A bonsai_val includes the value data and may be on both the bnkv_values
* list and the free list at the same time.
*
* Note that the value length (@bv_xlen) is an opaque encoding of compressed
* and uncompressed value lengths so one must use the bonsai_val_*len()
* functions to decode it.
*/
struct bonsai_val {
uintptr_t bv_seqnoref;
struct bonsai_val *bv_next;
void *bv_value;
u64 bv_xlen;
struct bonsai_val *bv_priv;
struct bonsai_val *bv_free;
char bv_valbuf[];
};
/**
* bonsai_val_ulen() - return uncompressed value length
* @bv: ptr to a bonsai val
*
* bonsai_val_ulen() returns the uncompressed length (in bytes) of the
* given bonsai value. Note that uncompressed value lengths are always
* greater than compressed value lengths.
*/
static HSE_ALWAYS_INLINE uint
bonsai_val_ulen(const struct bonsai_val *bv)
{
return bv->bv_xlen & 0xfffffffful;
}
/**
* bonsai_val_clen() - return compressed value length
* @bv: ptr to a bonsai val
*
* bonsai_val_clen() returns the compressed length (in bytes) of the
* given bonsai value. If the value is not compressed then zero is
* returned. Note that compressed value lengths are always less than
* uncompressed value lengths.
*/
static HSE_ALWAYS_INLINE uint
bonsai_val_clen(const struct bonsai_val *bv)
{
return bv->bv_xlen >> 32;
}
/**
* bonsai_val_vlen() - return in-core value length
* @bv: ptr to a bonsai val
*
* bonsai_val_vlen() returns the in-core length (in bytes) of the
* given bonsai value, irrespective of whether or not it is compressed.
*/
static HSE_ALWAYS_INLINE uint
bonsai_val_vlen(const struct bonsai_val *bv)
{
return bonsai_val_clen(bv) ?: bonsai_val_ulen(bv);
}
/**
* struct bonsai_sval - input value argument
* @bsv_val: pointer to value data
* @bsv_xlen: opaque encoded value length
* @bsv_seqnoref: sequence number reference
*
* Note that the value length (@bsv_xlen) is an opaque encoding of compressed
* and uncompressed value lengths so one must use the bonsai_sval_vlen()
* function decode it.
*/
struct bonsai_sval {
void *bsv_val;
u64 bsv_xlen;
uintptr_t bsv_seqnoref;
};
/**
* bonsai_sval_vlen() - return in-core value length
* @bsv: pointer to a bonsai sval
*
* bonsai_sval_vlen() returns the in-core length (in bytes) of the
* given bonsaid svalue, irrespective of whether or not it is compressed.
*/
static HSE_ALWAYS_INLINE uint
bonsai_sval_vlen(const struct bonsai_sval *bsv)
{
uint clen = bsv->bsv_xlen >> 32;
uint vlen = bsv->bsv_xlen & 0xfffffffful;
return clen ?: vlen;
}
#define BKV_FLAG_PTOMB 0x01
#define BKV_FLAG_TOMB_HEAD 0x02
#define BKV_FLAG_FROM_LC 0x04
/**
* struct bonsai_kv - bonsai tree key/value node
* @bkv_key_imm: c0 skidx + first few bytes of key + key length
* @bkv_key: ptr to key (typically to bkv_keybuf[])
* @bkv_flags: BKV_FLAG_*
* @bkv_voffset: offset to embedded bonsai_val
* @bkv_valcnt: user-managed length of bkv_values list
* @bkv_values: user-managed list of values
* @bkv_prev: sorted key list linkage
* @bkv_next: sorted key list linkage
* @bkv_es: user-managed element source pointer
* @bkv_free: free list linkage
* @bkv_freevals: list of freed values (may still be visible)
* @bkv_keybuf: key data (zero length if caller-managed)
*
* A bonsai_kv includes the key and a list of bonsai_val objects.
* The bonsai_kv and initial bonsai_val are allocated in one chunk
* as recorded by %bkv_allocsz.
*/
struct bonsai_kv {
struct key_immediate bkv_key_imm;
char *bkv_key;
u16 bkv_flags;
u16 bkv_voffset;
u32 bkv_valcnt;
struct bonsai_val * bkv_values;
struct bonsai_kv * bkv_prev;
struct bonsai_kv * bkv_next;
struct bonsai_val *bkv_freevals;
struct element_source *bkv_es;
struct bonsai_kv *bkv_free;
char bkv_keybuf[];
};
/**
* There is one such structure for each node in the tree.
*
* struct bonsai_node - structure representing interal nodes of tree
* @bn_key_imm: cache of first KI_DLEN_MAX bytes of bn_kv->bkv_key[]
* @bn_height: height of the node.
* @bn_rcugen: rcu grace period gen when can be reclaimed
* @bn_left: bonsai tree child node linkage
* @bn_right: bonsai tree child node linkage
* @bn_kv: ptr to a key/value node (contains full key)
*
* The %bn_kv, and %bn_key_imm fields are set during node initialization and
* never change thoughout the lifetime of the node. Hence, they need
* no special RCU handling.
*
* This structure is arranged and packed so as to consume exactly one full
* 64-byte cache line, so as to avoid false-sharing that would otherwise
* be caused by tree update operations).
*/
struct bonsai_node {
struct key_immediate bn_key_imm;
int32_t bn_height;
uint32_t bn_rcugen;
struct bonsai_node *bn_left;
struct bonsai_node *bn_right;
struct bonsai_kv *bn_kv;
} HSE_ALIGNED(64);
_Static_assert(sizeof(struct bonsai_node) == 64, "bonsai node too large");
/* struct bonsai_slab -
* @bs_rnodes: list of reclaimed nodes
* @bs_rnodec: number of reclaimed node allocations
* @bs_entryc: next entry from bs_entryv[] to allocate
* @bs_nodec: number of node allocations from entryv
* @bs_canfree: ok to free via free() if true
* @bs_next: linkage for br_gc_waitq and br_gc_readyq
* @bs_entry: linkage for bsi_freeq and br_gc_emptyq
* @bs_slabinfo: ptr to owning slabinfo record
* @bs_rcugen: rcu gen of last reclaim attempt
* @bs_vfkeys: list of freed but possibly visible keys
* @bs_entryv: fixed size bonsai node heap
*/
struct bonsai_slab {
struct bonsai_node *bs_rnodes;
uint32_t bs_rnodec;
uint32_t bs_nodec;
uint32_t bs_entryc;
uint8_t bs_canfree;
union {
struct bonsai_slab *bs_next;
struct list_head bs_entry;
};
struct bonsai_slabinfo *bs_slabinfo;
uint64_t bs_rcugen;
struct bonsai_kv *bs_vfkeys;
struct bonsai_node bs_entryv[];
};
/* struct bonsai_slabinfo -
* @bsi_slab: current slab from which to allocate entries
* @bsi_rnodec: count of recycled node allocations
* @bsi_nodec: count of node entry allocations
* @bsi_slabc: count of slab allocations
* @bsi_slab0: initial slab (embedded)
* @bsi_lock: protects bsi_freeq
* @bsi_freeq: list of slabs that have reclaimed nodes
*/
struct bonsai_slabinfo {
struct bonsai_slab *bsi_slab HSE_ACP_ALIGNED;
ulong bsi_rnodec;
ulong bsi_nodec;
uint bsi_slabc;
spinlock_t bsi_lock;
struct list_head bsi_freeq;
struct bonsai_slab *bsi_slab0;
};
/**
* @bonsai_ior_cb: callback for insert or replace
*
* @rock: per-tree rock entity passed by the client
* @code: enum bonsai_ior_code
* @kv: bonsai_kv associated with node where the new value will be
* inserted or replaced
* @val: Allocated and initialized bonsai_val element
* @old_val: bonsai_val element replaced, code must be set to B_IOR_REPLACED
*
* This callback is invoked during insert or replace and is implemented by
* the client.
*/
typedef void bonsai_ior_cb(
void * rock,
enum bonsai_ior_code *code,
struct bonsai_kv * kv,
struct bonsai_val * val,
struct bonsai_val ** old_val,
uint height);
/**
* struct bonsai_root - bonsai tree parameters
* @br_bounds: indicates bounds are established and lcp
* @br_magic: used for sanity checking
* @br_height: tree current max height
* @br_root: pointer to the root of bonsai_tree
* @br_cheap: ptr to cheap (or nil for malloc backed tree)
* @br_iorcb: client's callback for insert or replace
* @br_iorcb_arg: opaque arg for br_iorcb()
* @br_rootslab: a slab from which to allocate nodes low in the tree
* @br_oomslab: a slab to fulfill out-of-memory node allocations
* @br_slabbase: ptr to base of slabs embedded in bonsai_root
* @br_key_alloc: total number of keys ever allocated
* @br_val_alloc: total number of values ever allocated
* @br_kv: a circular k/v list, next=head, prev=tail
* @br_gc_lock: protects gc queues between user and rcu callback
* @br_gc_waitq: list of slabs waiting to get on the ready queue
* @br_gc_readyq: list of slabs waiting on rcu callback
* @br_gc_rcugen_start: next rcu grace period generation
* @br_gc_rcugen_done: last rcu grace period generation
* @br_gc_holdq: list of mostly empty slabs undergoing gc
* @br_gc_holdqc: count of slabs on holdq
* @br_gc_sched: rcu callback list node
* @br_slabinfov: vector of per-skidx slab headers
* @br_data: storage for embedded slabs
*/
struct bonsai_root {
atomic_int br_bounds HSE_ACP_ALIGNED;
uint br_magic;
struct bonsai_node *br_root;
struct cheap *br_cheap;
bonsai_ior_cb *br_ior_cb;
void *br_ior_cbarg;
struct bonsai_slabinfo *br_rootslab;
struct bonsai_slabinfo *br_oomslab;
void *br_slabbase;
/* Everything from here to the end of the structure is bzero'd
* by bn_reset().
*/
int br_height HSE_L1D_ALIGNED;
ulong br_key_alloc;
ulong br_val_alloc;
struct bonsai_kv *br_vfkeys;
struct bonsai_kv *br_rfkeys;
spinlock_t br_gc_lock HSE_L1D_ALIGNED;
struct bonsai_slab *br_gc_waitq;
struct bonsai_slab *br_gc_readyq;
atomic_int br_gc_rcugen_start HSE_L1D_ALIGNED;
struct bonsai_kv *br_gc_vfkeys;
struct list_head br_gc_holdq;
int br_gc_holdqc;
atomic_int br_gc_rcugen_done HSE_L1D_ALIGNED;
uint64_t br_gc_latstart HSE_L1D_ALIGNED;
uint64_t br_gc_latsum_gp;
uint64_t br_gc_latsum_gc;
struct rcu_head br_gc_sched_rcu;
/* There are eight per-skidx slabs, one "rootslab", and one "OOM" slab.
* The root slab is used to satisfy node allocation requests for nodes
* low in the tree, while the "OOM" slab is used to satisfy allocation
* requests that would otherwise fail because we're unable to allocate
* a new slab.
*/
struct bonsai_slabinfo br_slabinfov[8 + 2];
/* br_kv must be last as it contains a flexible array member.
*/
struct bonsai_kv br_kv;
};
/* clang-format off */
/**
* bn_create() - Initialize tree and client info.
* @cheap: memory allocator
* @cb: insert or replace callback
* @rock: per-tree rock entity for client
* @tree: bonsai tree instance (output parameter)
*
* Return:
*/
merr_t
bn_create(
struct cheap *cheap,
bonsai_ior_cb *cb,
void *cbarg,
struct bonsai_root **tree);
/**
* bn_reset() - Resets bonsai tree.
* @tree: bonsai tree instance
*/
void
bn_reset(struct bonsai_root *tree);
/**
* bn_destroy() - Destroys bonsai tree.
* @tree: bonsai tree instance
*
* Return:
*/
void
bn_destroy(struct bonsai_root *tree);
/**
* bn_insert_or_replace() - Inserts a given key, value pair into the tree
* @tree: bonsai tree instance
* @skey: bonsai_skey instance containing the key and its related info
* @sval: bonsai_sval instance containing the value and its related info
*
* For multiple values support, the client specified callback
* (bonsai_ior_cb) is invoked with the following:
* a. Owner private pointer
* a. The bonsai_kv instance associated with the looked-up bonsai node
* b. An allocated and initialized value node.
*
* The logic to position the new value node in the bkv_values list must be
* determined by the client. For example, the client could place the new
* value at the front of bkv_values list or at the tail or at a position
* determined by the rock values stored in the value nodes.
*
* Return : 0 upon success, error code otherwise
*/
merr_t
bn_insert_or_replace(
struct bonsai_root * tree,
const struct bonsai_skey *skey,
struct bonsai_sval *sval);
/**
* bn_delete() - remove and delete the given key from the tree
* @tree: bonsai tree instance
* @skey: bonsai_skey instance containing the key and its related info
*/
merr_t
bn_delete(struct bonsai_root *tree, const struct bonsai_skey *skey);
/**
* bn_find() - Searches for a given key in the node
* @tree: bonsai tree instance
* @skey: bonsai_skey instance containing the key and its related info
* @kv: bonsai_kv instance containing all the values (output parameter).
* The logic to pick an appropriate value from kv->bkv_values is
* left to the client.
*
* - Caller must hold rcu_read_lock() across this call and while looking at kv.
* - Caller must not modify kv.
*
* Return :
*/
bool
bn_find(struct bonsai_root *tree, const struct bonsai_skey *skey, struct bonsai_kv **kv);
/**
* bn_findGE() - Searches for a given key in the node
* @tree: bonsai tree instance
* @skey: bonsai_skey instance containing the key and its related info
* @kv: bonsai_kv instance containing all the values (output parameter).
* The logic to pick an appropriate value from kv->bkv_values is
* left to the client.
*
* - Caller must hold rcu_read_lock() across this call and while looking at kv.
* - Caller must not modify kv.
*
* Return :
*/
bool
bn_findGE(struct bonsai_root *tree, const struct bonsai_skey *skey, struct bonsai_kv **kv);
/**
* bn_skiptombs_GE() - Searches for a given key in the node
* @tree: bonsai tree instance
* @skey: bonsai_skey instance containing the key and its related info
* @kv: bonsai_kv instance containing all the values (output parameter).
* The key returned is the smallest key >= skey,
* skipping contiguous tomb spans.
* The key returned may or may not be a tombstone.
* Contiguous tombspans are not strict - if one is skipped, it is valid.
* However, not all possible contiguous tombspans are recorded.
*
* - Caller must hold rcu_read_lock() across this call and while looking at kv.
* - Caller must not modify kv.
*
* Return :
*/
bool
bn_skiptombs_GE(struct bonsai_root *tree, const struct bonsai_skey *skey, struct bonsai_kv **kv);
bool
bn_find_pfx_GT(struct bonsai_root *tree, const struct bonsai_skey *skey, struct bonsai_kv **kv);
bool
bn_find_pfx_LT(struct bonsai_root *tree, const struct bonsai_skey *skey, struct bonsai_kv **kv);
/**
* bn_findLE() - Searches for a given key in the node
* @tree: bonsai tree instance
* @skey: bonsai_skey instance containing the key and its related info
* @kv: bonsai_kv instance containing all the values (output parameter).
* The logic to pick an appropriate value from kv->bkv_values is
* left to the client.
*
* - Caller must hold rcu_read_lock() across this call and while looking at kv.
* - Caller must not modify kv.
*
* Return :
*/
bool
bn_findLE(struct bonsai_root *tree, const struct bonsai_skey *skey, struct bonsai_kv **kv);
/**
* bn_traverse() - In-order tree traversal for debugging purposes.
* @tree: bonsai tree instance
*
* Return :
*/
void
bn_traverse(struct bonsai_root *tree);
int
bn_summary(struct bonsai_root *tree, char *buf, size_t bufsz);
/**
* bn_finalize() - prepare a fixated cb_tree for efficient traversal
* @tree: bonsai tree instance
*
* This function performs an in-order traversal of the given bonsai tree,
* producing an ordered doubly-linked list of all the key nodes (i.e.,
* the struct cb_kv nodes).
*
* This function must only be called on a bonsai tree in the quiescent
* state for which no further updates will occur.
*
* Return :
*/
void
bn_finalize(struct bonsai_root *tree);
/**
* Accessor functions for bonsai client specific fields
*/
/**
* bn_skey_init() - initialize a bonsai_skey instance
* @key: key
* @klen: key length
* @index:
* @skey:
*/
static inline void
bn_skey_init(const void *key, s32 klen, u32 flags, u16 index, struct bonsai_skey *skey)
{
key_immediate_init(key, klen, index, &skey->bsk_key_imm);
skey->bsk_key = key;
skey->bsk_flags = flags;
}
/**
* bn_sval_init() - initialize a bonsai_sval instance
* @val: value
* @xlen: value length
* @seqnoref: sequence number reference
* @sval:
*/
static inline void
bn_sval_init(void *val, u64 xlen, uintptr_t seqnoref, struct bonsai_sval *sval)
{
sval->bsv_val = val;
sval->bsv_xlen = xlen;
sval->bsv_seqnoref = seqnoref;
}
static inline s32
bn_kv_cmp(const void *lhs, const void *rhs)
{
const struct bonsai_kv *l = lhs;
const struct bonsai_kv *r = rhs;
return key_full_cmp(&l->bkv_key_imm, l->bkv_key, &r->bkv_key_imm, r->bkv_key);
}
/*
* Max heap comparator with a caveat: A ptomb sorts before all keys w/ matching
* prefix.
*
* Returns:
* < 0 : lhs > rhs
* > 0 : lhs < rhs
* == 0 : lhs == rhs
*
* Note that the return values are inverted compared to what bn_kv_cmp()
* returns. This way heapify can be agnositic of this logic.
*/
static inline s32
bn_kv_cmp_rev(const void *lhs, const void *rhs)
{
const struct bonsai_kv *l = lhs;
const struct bonsai_kv *r = rhs;
const void *r_key = r->bkv_key;
int r_klen = key_imm_klen(&r->bkv_key_imm);
const void *l_key = l->bkv_key;
int l_klen = key_imm_klen(&l->bkv_key_imm);
bool l_ptomb = !!(l->bkv_flags & BKV_FLAG_PTOMB);
bool r_ptomb = !!(r->bkv_flags & BKV_FLAG_PTOMB);
uint l_skidx = key_immediate_index(&l->bkv_key_imm);
uint r_skidx = key_immediate_index(&r->bkv_key_imm);
int rc;
rc = r_skidx - l_skidx;
if (rc)
return rc;
if (!(l_ptomb ^ r_ptomb))
return key_inner_cmp(r_key, r_klen, l_key, l_klen);
/* exactly one of lhs and rhs is a ptomb */
if (l_ptomb && l_klen <= r_klen) {
rc = key_inner_cmp(r_key, l_klen, l_key, l_klen);
if (rc == 0)
return -1; /* l wins */
} else if (r_ptomb && r_klen <= l_klen) {
rc = key_inner_cmp(r_key, r_klen, l_key, r_klen);
if (rc == 0)
return 1; /* r wins */
}
return key_inner_cmp(r_key, r_klen, l_key, l_klen);
}
/**
* bn_val_rcufree() - free given value in next rcu epoch
* @kv: ptr to bonsai key/value object which contains %dval
* @dval: value to be delay freed
*
* In reality, dval must remain visible for the life of the kv
* since cursors and ingest might use it long after dropping
* the rcu read lock.
*
* Caller must hold the bonsai tree mutex or be operating in
* a single threaded environment.
*/
static HSE_ALWAYS_INLINE void
bn_val_rcufree(struct bonsai_kv *kv, struct bonsai_val *dval)
{
dval->bv_free = kv->bkv_freevals;
kv->bkv_freevals = dval;
}
#endif /* HSE_BONSAI_TREE_H */
| 33.069424 | 97 | 0.671699 | [
"object",
"vector"
] |
d981ab277bf2cd0c26771560111964f010684acf | 3,996 | h | C | src/vw/Math/Statistics.h | tkeemon/visionworkbench | df59fcb31191e1fc4fecfe1901963da1614a52b1 | [
"NASA-1.3"
] | 1 | 2020-06-02T04:06:43.000Z | 2020-06-02T04:06:43.000Z | src/vw/Math/Statistics.h | tkeemon/visionworkbench | df59fcb31191e1fc4fecfe1901963da1614a52b1 | [
"NASA-1.3"
] | null | null | null | src/vw/Math/Statistics.h | tkeemon/visionworkbench | df59fcb31191e1fc4fecfe1901963da1614a52b1 | [
"NASA-1.3"
] | null | null | null | // __BEGIN_LICENSE__
// Copyright (C) 2006-2010 United States Government as represented by
// the Administrator of the National Aeronautics and Space Administration.
// All Rights Reserved.
// __END_LICENSE__
/// \file Statistics.h
///
/// Assorted useful statistical routines and functors.
///
#ifndef __MATH_STATISTICS_H__
#define __MATH_STATISTICS_H__
#include <cmath>
#include <vector>
namespace vw {
namespace math {
/// Finds the mean of a set of points.
class MeanFunctor {
bool m_homogeneous;
public:
/// If this functor is going to be applied to points in a
/// projective space (i.e. homogeneous coordinates), you should
/// set this flag to to true. The resulting mean will be in
/// the same coordinates.
MeanFunctor(bool homogeneous_points = false)
: m_homogeneous(homogeneous_points) {}
/// This function can use points in any container that supports
/// the size() and operator[] methods. The container is usually a
/// vw::Vector<>, but you could substitute other classes here as
/// well.
template <class ContainerT>
ContainerT operator() (std::vector<ContainerT> const& points) const {
ContainerT result = points[0]; // to resize container if necessary
size_t num_points = points.size();
size_t dimensions = points[0].size();
if (m_homogeneous)
dimensions--;
for (size_t i = 0; i < dimensions; ++i)
result[i] = 0;
if (m_homogeneous)
result[dimensions] = 1;
if (m_homogeneous) {
for (size_t i = 0; i < num_points; ++i)
for (size_t j = 0; j < dimensions; ++j)
result[j] += points[i][j] / points[i][dimensions];
}
else {
for (size_t i = 0; i < num_points; ++i)
for (size_t j = 0; j < dimensions; ++j)
result[j] += points[i][j];
}
for (size_t i = 0; i < dimensions; ++i)
result[i] /= num_points;
return result;
}
};
/// Finds the standard deviation of a set of points.
class StandardDeviationFunctor {
bool m_homogeneous;
public:
/// If this functor is going to be applied to points in a
/// projective space (i.e. homogeneous coordinates), you should
/// set this flag to to true. The resulting standard deviation
/// will be in the same coordinates.
StandardDeviationFunctor(bool homogeneous_points = false)
: m_homogeneous(homogeneous_points) {}
/// This function can use points in any container that supports
/// the size() and operator[] methods. The container is usually a
/// vw::Vector<>, but you could substitute other classes here as
/// well.
template <class ContainerT>
ContainerT operator() (std::vector<ContainerT> const& points) const {
ContainerT result = points[0]; // to resize container if necessary
ContainerT temp = points[0]; // to resize container if necessary
MeanFunctor mean_func(m_homogeneous);
ContainerT mean = mean_func(points);
unsigned num_points = points.size();
unsigned dimensions = points[0].size();
if (m_homogeneous)
dimensions--;
for (unsigned int i = 0; i < dimensions; ++i)
result[i] = 0;
if (m_homogeneous)
result[dimensions] = 1;
if (m_homogeneous) {
for (unsigned i = 0; i < num_points; ++i)
for (unsigned int j = 0; j < dimensions; ++j) {
temp[j] = points[i][j] / points[i][dimensions] - mean[j];
result[j] += temp[j] * temp[j];
}
}
else {
for (unsigned i = 0; i < num_points; ++i)
for (unsigned int j = 0; j < dimensions; ++j) {
temp[j] = points[i][j] - mean[j];
result[j] += temp[j] * temp[j];
}
}
for (unsigned int i = 0; i < dimensions; ++i) {
result[i] /= num_points;
result[i] = sqrt(result[i]);
}
return result;
}
};
}} // namespace vw::math
#endif // __MATH_STATISTICS_H__
| 31.968 | 74 | 0.610611 | [
"vector"
] |
d983aebaee0b4b9d4f906bde8468bcf1d26a8796 | 10,478 | h | C | Source/ThirdParty/openvdb/8.1/openvdb/tools/LevelSetFracture.h | c0rvus-ix/unreal-vdb | 70efcac08ec02c432150f9a2bcb7e4eee936d95b | [
"Apache-2.0"
] | 55 | 2022-01-11T14:06:00.000Z | 2022-03-31T22:29:09.000Z | Source/ThirdParty/openvdb/8.1/openvdb/tools/LevelSetFracture.h | c0rvus-ix/unreal-vdb | 70efcac08ec02c432150f9a2bcb7e4eee936d95b | [
"Apache-2.0"
] | 3 | 2022-03-12T20:06:53.000Z | 2022-03-27T11:49:20.000Z | Source/ThirdParty/openvdb/8.1/openvdb/tools/LevelSetFracture.h | c0rvus-ix/unreal-vdb | 70efcac08ec02c432150f9a2bcb7e4eee936d95b | [
"Apache-2.0"
] | 4 | 2022-01-15T16:27:28.000Z | 2022-03-30T03:23:08.000Z | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
//
/// @file tools/LevelSetFracture.h
///
/// @brief Divide volumes represented by level set grids into multiple,
/// disjoint pieces by intersecting them with one or more "cutter" volumes,
/// also represented by level sets.
#ifndef OPENVDB_TOOLS_LEVELSETFRACTURE_HAS_BEEN_INCLUDED
#define OPENVDB_TOOLS_LEVELSETFRACTURE_HAS_BEEN_INCLUDED
#include <openvdb/Grid.h>
#include <openvdb/math/Quat.h>
#include <openvdb/util/NullInterrupter.h>
#include "Composite.h" // for csgIntersectionCopy() and csgDifferenceCopy()
#include "GridTransformer.h" // for resampleToMatch()
#include "LevelSetUtil.h" // for sdfSegmentation()
#include <algorithm> // for std::max(), std::min()
#include <limits>
#include <list>
#include <vector>
#include <tbb/blocked_range.h>
#include <tbb/parallel_reduce.h>
namespace openvdb {
OPENVDB_USE_VERSION_NAMESPACE
namespace OPENVDB_VERSION_NAME {
namespace tools {
/// @brief Level set fracturing
template<class GridType, class InterruptType = util::NullInterrupter>
class LevelSetFracture
{
public:
using Vec3sList = std::vector<Vec3s>;
using QuatsList = std::vector<math::Quats>;
using GridPtrList = std::list<typename GridType::Ptr>;
using GridPtrListIter = typename GridPtrList::iterator;
/// @brief Default constructor
///
/// @param interrupter optional interrupter object
explicit LevelSetFracture(InterruptType* interrupter = nullptr);
/// @brief Divide volumes represented by level set grids into multiple,
/// disjoint pieces by intersecting them with one or more "cutter" volumes,
/// also represented by level sets.
/// @details If desired, the process can be applied iteratively, so that
/// fragments created with one cutter are subdivided by other cutters.
///
/// @note The incoming @a grids and the @a cutter are required to have matching
/// transforms and narrow band widths!
///
/// @param grids list of grids to fracture. The residuals of the
/// fractured grids will remain in this list
/// @param cutter a level set grid to use as the cutter object
/// @param segment toggle to split disjoint fragments into their own grids
/// @param points optional list of world space points at which to instance the
/// cutter object (if null, use the cutter's current position only)
/// @param rotations optional list of custom rotations for each cutter instance
/// @param cutterOverlap toggle to allow consecutive cutter instances to fracture
/// previously generated fragments
void fracture(GridPtrList& grids, const GridType& cutter, bool segment = false,
const Vec3sList* points = nullptr, const QuatsList* rotations = nullptr,
bool cutterOverlap = true);
/// Return a list of new fragments, not including the residuals from the input grids.
GridPtrList& fragments() { return mFragments; }
/// Remove all elements from the fragment list.
void clear() { mFragments.clear(); }
private:
// disallow copy by assignment
void operator=(const LevelSetFracture&) {}
bool wasInterrupted(int percent = -1) const {
return mInterrupter && mInterrupter->wasInterrupted(percent);
}
bool isValidFragment(GridType&) const;
void segmentFragments(GridPtrList&) const;
void process(GridPtrList&, const GridType& cutter);
InterruptType* mInterrupter;
GridPtrList mFragments;
};
////////////////////////////////////////
/// @cond OPENVDB_DOCS_INTERNAL
// Internal utility objects and implementation details
namespace level_set_fracture_internal {
template<typename LeafNodeType>
struct FindMinMaxVoxelValue {
using ValueType = typename LeafNodeType::ValueType;
FindMinMaxVoxelValue(const std::vector<const LeafNodeType*>& nodes)
: minValue(std::numeric_limits<ValueType>::max())
, maxValue(-minValue)
, mNodes(nodes.empty() ? nullptr : &nodes.front())
{
}
FindMinMaxVoxelValue(FindMinMaxVoxelValue& rhs, tbb::split)
: minValue(std::numeric_limits<ValueType>::max())
, maxValue(-minValue)
, mNodes(rhs.mNodes)
{
}
void operator()(const tbb::blocked_range<size_t>& range) {
for (size_t n = range.begin(), N = range.end(); n < N; ++n) {
const ValueType* data = mNodes[n]->buffer().data();
for (Index i = 0; i < LeafNodeType::SIZE; ++i) {
minValue = std::min(minValue, data[i]);
maxValue = std::max(maxValue, data[i]);
}
}
}
void join(FindMinMaxVoxelValue& rhs) {
minValue = std::min(minValue, rhs.minValue);
maxValue = std::max(maxValue, rhs.maxValue);
}
ValueType minValue, maxValue;
LeafNodeType const * const * const mNodes;
}; // struct FindMinMaxVoxelValue
} // namespace level_set_fracture_internal
/// @endcond
////////////////////////////////////////
template<class GridType, class InterruptType>
LevelSetFracture<GridType, InterruptType>::LevelSetFracture(InterruptType* interrupter)
: mInterrupter(interrupter)
, mFragments()
{
}
template<class GridType, class InterruptType>
void
LevelSetFracture<GridType, InterruptType>::fracture(GridPtrList& grids, const GridType& cutter,
bool segmentation, const Vec3sList* points, const QuatsList* rotations, bool cutterOverlap)
{
// We can process all incoming grids with the same cutter instance,
// this optimization is enabled by the requirement of having matching
// transforms between all incoming grids and the cutter object.
if (points && points->size() != 0) {
math::Transform::Ptr originalCutterTransform = cutter.transform().copy();
GridType cutterGrid(*const_cast<GridType*>(&cutter), ShallowCopy());
const bool hasInstanceRotations =
points && rotations && points->size() == rotations->size();
// for each instance point..
for (size_t p = 0, P = points->size(); p < P; ++p) {
int percent = int((float(p) / float(P)) * 100.0);
if (wasInterrupted(percent)) break;
GridType instCutterGrid;
instCutterGrid.setTransform(originalCutterTransform->copy());
math::Transform::Ptr xform = originalCutterTransform->copy();
if (hasInstanceRotations) {
const Vec3s& rot = (*rotations)[p].eulerAngles(math::XYZ_ROTATION);
xform->preRotate(rot[0], math::X_AXIS);
xform->preRotate(rot[1], math::Y_AXIS);
xform->preRotate(rot[2], math::Z_AXIS);
xform->postTranslate((*points)[p]);
} else {
xform->postTranslate((*points)[p]);
}
cutterGrid.setTransform(xform);
// Since there is no scaling, use the generic resampler instead of
// the more expensive level set rebuild tool.
if (mInterrupter != nullptr) {
if (hasInstanceRotations) {
doResampleToMatch<BoxSampler>(cutterGrid, instCutterGrid, *mInterrupter);
} else {
doResampleToMatch<PointSampler>(cutterGrid, instCutterGrid, *mInterrupter);
}
} else {
util::NullInterrupter interrupter;
if (hasInstanceRotations) {
doResampleToMatch<BoxSampler>(cutterGrid, instCutterGrid, interrupter);
} else {
doResampleToMatch<PointSampler>(cutterGrid, instCutterGrid, interrupter);
}
}
if (wasInterrupted(percent)) break;
if (cutterOverlap && !mFragments.empty()) process(mFragments, instCutterGrid);
process(grids, instCutterGrid);
}
} else {
// use cutter in place
if (cutterOverlap && !mFragments.empty()) process(mFragments, cutter);
process(grids, cutter);
}
if (segmentation) {
segmentFragments(mFragments);
segmentFragments(grids);
}
}
template<class GridType, class InterruptType>
bool
LevelSetFracture<GridType, InterruptType>::isValidFragment(GridType& grid) const
{
using LeafNodeType = typename GridType::TreeType::LeafNodeType;
if (grid.tree().leafCount() < 9) {
std::vector<const LeafNodeType*> nodes;
grid.tree().getNodes(nodes);
Index64 activeVoxelCount = 0;
for (size_t n = 0, N = nodes.size(); n < N; ++n) {
activeVoxelCount += nodes[n]->onVoxelCount();
}
if (activeVoxelCount < 27) return false;
level_set_fracture_internal::FindMinMaxVoxelValue<LeafNodeType> op(nodes);
tbb::parallel_reduce(tbb::blocked_range<size_t>(0, nodes.size()), op);
if ((op.minValue < 0) == (op.maxValue < 0)) return false;
}
return true;
}
template<class GridType, class InterruptType>
void
LevelSetFracture<GridType, InterruptType>::segmentFragments(GridPtrList& grids) const
{
GridPtrList newFragments;
for (GridPtrListIter it = grids.begin(); it != grids.end(); ++it) {
std::vector<typename GridType::Ptr> segments;
segmentSDF(*(*it), segments);
for (size_t n = 0, N = segments.size(); n < N; ++n) {
newFragments.push_back(segments[n]);
}
}
grids.swap(newFragments);
}
template<class GridType, class InterruptType>
void
LevelSetFracture<GridType, InterruptType>::process(
GridPtrList& grids, const GridType& cutter)
{
using GridPtr = typename GridType::Ptr;
GridPtrList newFragments;
for (GridPtrListIter it = grids.begin(); it != grids.end(); ++it) {
if (wasInterrupted()) break;
GridPtr& grid = *it;
GridPtr fragment = csgIntersectionCopy(*grid, cutter);
if (!isValidFragment(*fragment)) continue;
GridPtr residual = csgDifferenceCopy(*grid, cutter);
if (!isValidFragment(*residual)) continue;
newFragments.push_back(fragment);
grid->tree().clear();
grid->tree().merge(residual->tree());
}
if (!newFragments.empty()) {
mFragments.splice(mFragments.end(), newFragments);
}
}
} // namespace tools
} // namespace OPENVDB_VERSION_NAME
} // namespace openvdb
#endif // OPENVDB_TOOLS_LEVELSETFRACTURE_HAS_BEEN_INCLUDED
| 32.74375 | 95 | 0.646688 | [
"object",
"vector",
"transform"
] |
d983bcab3609319b83c809c827440e1afe632c57 | 48,321 | c | C | libraries/mbed/targets/hal/TARGET_STM/TARGET_STM32F4/serial_api.c | OmarValdez/mbed | bd78f98496fab2f02162521d7e279d7bd0f0840e | [
"Apache-2.0"
] | null | null | null | libraries/mbed/targets/hal/TARGET_STM/TARGET_STM32F4/serial_api.c | OmarValdez/mbed | bd78f98496fab2f02162521d7e279d7bd0f0840e | [
"Apache-2.0"
] | null | null | null | libraries/mbed/targets/hal/TARGET_STM/TARGET_STM32F4/serial_api.c | OmarValdez/mbed | bd78f98496fab2f02162521d7e279d7bd0f0840e | [
"Apache-2.0"
] | null | null | null | /* mbed Microcontroller Library
*******************************************************************************
* Copyright (c) 2015, STMicroelectronics
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************
*/
#ifdef YOTTA_CFG_MBED_OS
#include "target_config.h"
#include "uvisor-lib/uvisor-lib.h"
#include "mbed-drivers/mbed_assert.h"
#else
#include "mbed_assert.h"
#endif
#include "serial_api.h"
#if DEVICE_SERIAL
#include "cmsis.h"
#include "pinmap.h"
#include <string.h>
#include "PeripheralPins.h"
#ifdef YOTTA_CFG_MBED_OS
#include "mbed-drivers/mbed_error.h"
#else
#include "mbed_error.h"
#endif
#define DEBUG_STDIO 0
#ifndef DEBUG_STDIO
# define DEBUG_STDIO 0
#endif
#if DEBUG_STDIO
# include <stdio.h>
# define DEBUG_PRINTF(...) do { printf(__VA_ARGS__); } while(0)
#else
# define DEBUG_PRINTF(...) {}
#endif
#define UART_NUM (8)
#define UART_STATE_RX_ACTIVE 0x20
#define UART_STATE_TX_ACTIVE 0x10
#if DEVICE_SERIAL_ASYNCH_DMA
static const uint32_t DMA_UartRx_Channel[UART_NUM] = {DMA_CHANNEL_4, DMA_CHANNEL_4, DMA_CHANNEL_4, DMA_CHANNEL_4, \
DMA_CHANNEL_4, DMA_CHANNEL_5, DMA_CHANNEL_5, DMA_CHANNEL_5};
DMA_Stream_TypeDef *DMA_UartRx_Stream[UART_NUM] = {
DMA2_Stream5, DMA1_Stream5, DMA1_Stream1, \
DMA1_Stream2, DMA1_Stream0, DMA2_Stream1, \
DMA1_Stream3, DMA1_Stream6
};
static const uint32_t DMA_UartTx_Channel[UART_NUM] = {DMA_CHANNEL_4, DMA_CHANNEL_4, DMA_CHANNEL_4, DMA_CHANNEL_4, \
DMA_CHANNEL_4, DMA_CHANNEL_5, DMA_CHANNEL_5, DMA_CHANNEL_5};
DMA_Stream_TypeDef *DMA_UartTx_Stream[UART_NUM] = {
DMA2_Stream7, DMA1_Stream6, DMA1_Stream3, \
DMA1_Stream4, DMA1_Stream7, DMA2_Stream6,\
DMA1_Stream1, DMA1_Stream0
};
DMA_HandleTypeDef DmaHandle;
#endif
uint32_t serial_irq_ids[UART_NUM] = {0, 0, 0, 0, 0, 0, 0, 0};
static uart_irq_handler irq_handler;
static DMA_HandleTypeDef DmaTxHandle[UART_NUM];
static DMA_HandleTypeDef DmaRxHandle[UART_NUM];
static UART_HandleTypeDef UartHandle[UART_NUM];
int stdio_uart_inited = 0;
serial_t stdio_uart;
#if DEVICE_SERIAL_ASYNCH
#define SERIAL_OBJ(X) (obj->serial.X)
#else
#define SERIAL_OBJ(X) (obj->X)
#endif
static void init_uart(serial_t *obj, UARTName instance)
{
UART_HandleTypeDef *handle = &UartHandle[SERIAL_OBJ(index)];
handle->Instance = (USART_TypeDef *)instance;
handle->Init.BaudRate = SERIAL_OBJ(baudrate);
handle->Init.WordLength = SERIAL_OBJ(databits);
handle->Init.StopBits = SERIAL_OBJ(stopbits);
handle->Init.Parity = SERIAL_OBJ(parity);
#if DEVICE_SERIAL_FC
handle->Init.HwFlowCtl = SERIAL_OBJ(hw_flow_ctl);
#else
handle->Init.HwFlowCtl = UART_HWCONTROL_NONE;
#endif
handle->Init.OverSampling = UART_OVERSAMPLING_16;
handle->TxXferCount = 0;
handle->RxXferCount = 0;
if (SERIAL_OBJ(pin_rx) == NC) {
handle->Init.Mode = UART_MODE_TX;
} else if (SERIAL_OBJ(pin_tx) == NC) {
handle->Init.Mode = UART_MODE_RX;
} else {
handle->Init.Mode = UART_MODE_TX_RX;
}
#ifdef YOTTA_CFG_MBED_OS
if (SERIAL_OBJ(pin_tx) == STDIO_UART_TX && SERIAL_OBJ(pin_rx) == STDIO_UART_RX) {
handle->Init.BaudRate = YOTTA_CFG_MBED_OS_STDIO_DEFAULT_BAUD;
}
#endif
#if DEVICE_SERIAL_ASYNCH_DMA
if (SERIAL_OBJ(pin_tx) != NC) {
// set DMA in the UartHandle
DMA_HandleTypeDef *hdma_tx = &DmaTxHandle[SERIAL_OBJ(index)];
/* Configure the DMA handler for Transmission process */
hdma_tx->Instance = (DMA_Stream_TypeDef *)DMA_UartTx_Stream[SERIAL_OBJ(index)];
hdma_tx->Init.Channel = DMA_UartTx_Channel[SERIAL_OBJ(index)];
hdma_tx->Init.Direction = DMA_MEMORY_TO_PERIPH;
hdma_tx->Init.PeriphInc = DMA_PINC_DISABLE;
hdma_tx->Init.MemInc = DMA_MINC_ENABLE;
hdma_tx->Init.PeriphDataAlignment = DMA_PDATAALIGN_BYTE;
hdma_tx->Init.MemDataAlignment = DMA_MDATAALIGN_BYTE;
hdma_tx->Init.Mode = DMA_NORMAL;
hdma_tx->Init.Priority = DMA_PRIORITY_LOW;
hdma_tx->Init.FIFOMode = DMA_FIFOMODE_DISABLE;
hdma_tx->Init.FIFOThreshold = DMA_FIFO_THRESHOLD_FULL;
hdma_tx->Init.MemBurst = DMA_MBURST_INC4;
hdma_tx->Init.PeriphBurst = DMA_PBURST_INC4;
HAL_DMA_Init(hdma_tx);
/* Associate the initialized DMA handle to the UART handle */
handle->hdmatx = hdma_tx;
hdma_tx->Parent = handle;
}
if (SERIAL_OBJ(pin_rx) != NC) {
/* Configure the DMA handler for reception process */
DMA_HandleTypeDef *hdma_rx = &DmaRxHandle[SERIAL_OBJ(index)];
hdma_rx->Instance = (DMA_Stream_TypeDef *)DMA_UartRx_Stream[SERIAL_OBJ(index)];
hdma_rx->Init.Channel = DMA_UartRx_Channel[SERIAL_OBJ(index)];
hdma_rx->Init.Direction = DMA_PERIPH_TO_MEMORY;
hdma_rx->Init.PeriphInc = DMA_PINC_DISABLE;
hdma_rx->Init.MemInc = DMA_MINC_ENABLE;
hdma_rx->Init.PeriphDataAlignment = DMA_PDATAALIGN_BYTE;
hdma_rx->Init.MemDataAlignment = DMA_MDATAALIGN_BYTE;
hdma_rx->Init.Mode = DMA_NORMAL;
hdma_rx->Init.Priority = DMA_PRIORITY_HIGH;
hdma_rx->Init.FIFOMode = DMA_FIFOMODE_DISABLE;
hdma_rx->Init.FIFOThreshold = DMA_FIFO_THRESHOLD_FULL;
hdma_rx->Init.MemBurst = DMA_MBURST_INC4;
hdma_rx->Init.PeriphBurst = DMA_PBURST_INC4;
HAL_DMA_Init(hdma_rx);
/* Associate the initialized DMA handle to the UART handle */
handle->hdmarx = hdma_rx;
hdma_rx->Parent = handle;
}
#endif
if (HAL_UART_Init(handle) != HAL_OK) {
error("Cannot initialize UART\n");
}
}
void serial_init(serial_t *obj, PinName tx, PinName rx)
{
// Determine the UART to use (UART_1, UART_2, ...)
UARTName uart_tx = (UARTName)pinmap_peripheral(tx, PinMap_UART_TX);
UARTName uart_rx = (UARTName)pinmap_peripheral(rx, PinMap_UART_RX);
// Get the peripheral name (UART_1, UART_2, ...) from the pin and assign it to the object
UARTName instance = (UARTName)pinmap_merge(uart_tx, uart_rx);
MBED_ASSERT(instance != (UARTName)NC);
// Enable USART clock
switch (instance) {
case UART_1:
__USART1_FORCE_RESET();
__USART1_RELEASE_RESET();
__HAL_RCC_USART1_CLK_ENABLE();
SERIAL_OBJ(index) = 0;
#if DEVICE_SERIAL_ASYNCH_DMA
__HAL_RCC_DMA2_CLK_ENABLE();
#endif
break;
case UART_2:
__USART2_FORCE_RESET();
__USART2_RELEASE_RESET();
__HAL_RCC_USART2_CLK_ENABLE();
SERIAL_OBJ(index) = 1;
#if DEVICE_SERIAL_ASYNCH_DMA
__HAL_RCC_DMA1_CLK_ENABLE();
#endif
break;
#if defined(USART3_BASE)
case UART_3:
__USART3_FORCE_RESET();
__USART3_RELEASE_RESET();
__HAL_RCC_USART3_CLK_ENABLE();
SERIAL_OBJ(index) = 2;
#if DEVICE_SERIAL_ASYNCH_DMA
__HAL_RCC_DMA1_CLK_ENABLE();
#endif
break;
#endif
#if defined(UART4_BASE)
case UART_4:
__USART4_FORCE_RESET();
__USART4_RELEASE_RESET();
__HAL_RCC_UART4_CLK_ENABLE();
SERIAL_OBJ(index) = 3;
#if DEVICE_SERIAL_ASYNCH_DMA
__HAL_RCC_DMA1_CLK_ENABLE();
#endif
break;
#endif
#if defined(UART5_BASE)
case UART_5:
__USART5_FORCE_RESET();
__USART5_RELEASE_RESET();
__HAL_RCC_UART5_CLK_ENABLE();
SERIAL_OBJ(index) = 4;
#if DEVICE_SERIAL_ASYNCH_DMA
__HAL_RCC_DMA1_CLK_ENABLE();
#endif
break;
#endif
#if defined(USART6_BASE)
case UART_6:
__USART6_FORCE_RESET();
__USART6_RELEASE_RESET();
__HAL_RCC_USART6_CLK_ENABLE();
SERIAL_OBJ(index) = 5;
#if DEVICE_SERIAL_ASYNCH_DMA
__HAL_RCC_DMA2_CLK_ENABLE();
#endif
break;
#endif
#if defined(UART7_BASE)
case UART_7:
__USART8_FORCE_RESET();
__USART8_RELEASE_RESET();
__HAL_RCC_UART7_CLK_ENABLE();
SERIAL_OBJ(index) = 6;
#if DEVICE_SERIAL_ASYNCH_DMA
__HAL_RCC_DMA1_CLK_ENABLE();
#endif
break;
#endif
#if defined(UART8_BASE)
case UART_8:
__USART8_FORCE_RESET();
__USART8_RELEASE_RESET();
__HAL_RCC_UART8_CLK_ENABLE();
SERIAL_OBJ(index) = 7;
#if DEVICE_SERIAL_ASYNCH_DMA
__HAL_RCC_DMA1_CLK_ENABLE();
#endif
break;
#endif
}
// Configure the UART pins
pinmap_pinout(tx, PinMap_UART_TX);
pinmap_pinout(rx, PinMap_UART_RX);
if (tx != NC) {
pin_mode(tx, PullUp);
}
if (rx != NC) {
pin_mode(rx, PullUp);
}
// Configure UART
SERIAL_OBJ(baudrate) = 9600;
SERIAL_OBJ(databits) = UART_WORDLENGTH_8B;
SERIAL_OBJ(stopbits) = UART_STOPBITS_1;
SERIAL_OBJ(parity) = UART_PARITY_NONE;
SERIAL_OBJ(pin_tx) = tx;
SERIAL_OBJ(pin_rx) = rx;
init_uart(obj, instance);
#ifndef YOTTA_CFG_MBED_OS
// For stdio management
if ((int)(UartHandle[SERIAL_OBJ(index)].Instance) == STDIO_UART) {
stdio_uart_inited = 1;
memcpy(&stdio_uart, obj, sizeof(serial_t));
}
#endif
DEBUG_PRINTF("UART%u: Init\n", obj->serial.module+1);
}
void serial_free(serial_t *obj)
{
// Reset UART and disable clock
switch (SERIAL_OBJ(index)) {
case 0:
__USART1_FORCE_RESET();
__USART1_RELEASE_RESET();
__USART1_CLK_DISABLE();
break;
case 1:
__USART2_FORCE_RESET();
__USART2_RELEASE_RESET();
__USART2_CLK_DISABLE();
#if DEVICE_SERIAL_ASYNCH_DMA
__HAL_RCC_DMA1_CLK_DISABLE();
#endif
break;
#if defined(USART3_BASE)
case 2:
__USART3_FORCE_RESET();
__USART3_RELEASE_RESET();
__USART3_CLK_DISABLE();
break;
#endif
#if defined(UART4_BASE)
case 3:
__UART4_FORCE_RESET();
__UART4_RELEASE_RESET();
__UART4_CLK_DISABLE();
#if DEVICE_SERIAL_ASYNCH_DMA
__HAL_RCC_DMA1_CLK_DISABLE();
#endif
break;
#endif
#if defined(UART5_BASE)
case 4:
__UART5_FORCE_RESET();
__UART5_RELEASE_RESET();
__UART5_CLK_DISABLE();
break;
#endif
#if defined(USART6_BASE)
case 5:
__USART6_FORCE_RESET();
__USART6_RELEASE_RESET();
__USART6_CLK_DISABLE();
break;
#endif
#if defined(UART7_BASE)
case 6:
__UART7_FORCE_RESET();
__UART7_RELEASE_RESET();
__UART7_CLK_DISABLE();
break;
#endif
#if defined(UART8_BASE)
case 7:
__UART8_FORCE_RESET();
__UART8_RELEASE_RESET();
__UART8_CLK_DISABLE();
break;
#endif
}
// Configure GPIOs
pin_function(SERIAL_OBJ(pin_tx), STM_PIN_DATA(STM_MODE_INPUT, GPIO_NOPULL, 0));
pin_function(SERIAL_OBJ(pin_rx), STM_PIN_DATA(STM_MODE_INPUT, GPIO_NOPULL, 0));
serial_irq_ids[SERIAL_OBJ(index)] = 0;
DEBUG_PRINTF("UART%u: Free\n", obj->serial.module+1);
}
void serial_baud(serial_t *obj, int baudrate)
{
UART_HandleTypeDef *handle = &UartHandle[SERIAL_OBJ(index)];
SERIAL_OBJ(baudrate) = baudrate;
handle->Init.BaudRate = baudrate;
if (HAL_UART_Init(handle) != HAL_OK) {
error("Cannot initialize UART\n");
}
DEBUG_PRINTF("UART%u: Baudrate: %u\n", obj->serial.module+1, baudrate);
}
void serial_format(serial_t *obj, int data_bits, SerialParity parity, int stop_bits)
{
UART_HandleTypeDef *handle = &UartHandle[SERIAL_OBJ(index)];
if (data_bits == 9) {
SERIAL_OBJ(databits) = UART_WORDLENGTH_9B;
handle->Init.WordLength = UART_WORDLENGTH_9B;
} else {
SERIAL_OBJ(databits) = UART_WORDLENGTH_8B;
handle->Init.WordLength = UART_WORDLENGTH_8B;
}
switch (parity) {
case ParityOdd:
SERIAL_OBJ(parity) = UART_PARITY_ODD;
handle->Init.Parity = UART_PARITY_ODD;
break;
case ParityEven:
SERIAL_OBJ(parity) = UART_PARITY_EVEN;
handle->Init.Parity = UART_PARITY_EVEN;
break;
default: // ParityNone
case ParityForced0: // unsupported!
case ParityForced1: // unsupported!
SERIAL_OBJ(parity) = UART_PARITY_NONE;
handle->Init.Parity = UART_PARITY_NONE;
break;
}
if (stop_bits == 2) {
SERIAL_OBJ(stopbits) = UART_STOPBITS_2;
handle->Init.StopBits = UART_STOPBITS_2;
} else {
SERIAL_OBJ(stopbits) = UART_STOPBITS_1;
handle->Init.StopBits = UART_STOPBITS_1;
}
if (HAL_UART_Init(handle) != HAL_OK) {
error("Cannot initialize UART\n");
}
DEBUG_PRINTF("UART%u: Format: %u, %u, %u\n", obj->serial.module+1, data_bits, parity, stop_bits);
}
/******************************************************************************
* INTERRUPTS HANDLING
******************************************************************************/
static void uart_irq(int id)
{
UART_HandleTypeDef *handle = &UartHandle[id];
if (serial_irq_ids[id] != 0) {
if (__HAL_UART_GET_FLAG(handle, UART_FLAG_TC) != RESET) {
irq_handler(serial_irq_ids[id], TxIrq);
__HAL_UART_CLEAR_FLAG(handle, UART_FLAG_TC);
}
if (__HAL_UART_GET_FLAG(handle, UART_FLAG_RXNE) != RESET) {
irq_handler(serial_irq_ids[id], RxIrq);
__HAL_UART_CLEAR_FLAG(handle, UART_FLAG_RXNE);
}
if (__HAL_UART_GET_FLAG(handle, UART_FLAG_ORE) != RESET) {
uint8_t c = handle->Instance->DR;
}
}
}
#if DEVICE_SERIAL_ASYNCH_DMA
static void dma_irq(DMAName name, int id, SerialIrq txrxirq)
{
if (serial_irq_ids[id] != 0) {
if (txrxirq == RxIrq) {
if (__HAL_DMA_GET_TC_FLAG_INDEX(&DmaHandle) != RESET) {
irq_handler(serial_irq_ids[id], RxIrq);
__HAL_DMA_CLEAR_FLAG(&DmaHandle, DMA_FLAG_TCIF2_6);
}
} else {
if (__HAL_DMA_GET_TC_FLAG_INDEX(&DmaHandle) != RESET) {
irq_handler(serial_irq_ids[id], TxIrq);
__HAL_DMA_CLEAR_FLAG(&DmaHandle, DMA_FLAG_TCIF0_4);
}
}
}
DmaHandle.Instance = (DMA_Stream_TypeDef *)name;
if (serial_irq_ids[id] != 0) {
if (__HAL_DMA_GET_TC_FLAG_INDEX(&DmaHandle) != RESET) {
irq_handler(serial_irq_ids[id], TxIrq);
__HAL_DMA_CLEAR_FLAG(&DmaHandle, DMA_FLAG_TCIF0_4);
}
if (__HAL_DMA_GET_TC_FLAG_INDEX(&DmaHandle) != RESET) {
irq_handler(serial_irq_ids[id], RxIrq);
__HAL_DMA_CLEAR_FLAG(&DmaHandle, DMA_FLAG_TCIF2_6);
}
}
}
#endif
static void uart1_irq(void)
{
uart_irq(0);
}
static void uart2_irq(void)
{
uart_irq(1);
}
#if defined(USART3_BASE)
static void uart3_irq(void)
{
uart_irq(2);
}
#endif
#if defined(UART4_BASE)
static void uart4_irq(void)
{
uart_irq(3);
}
#endif
#if DEVICE_SERIAL_ASYNCH_DMA
#if defined(UART5_BASE)
static void dma1_stream0_irq(void)
{
dma_irq(DMA_1, 4, RxIrq); // uart5_rx
}
#endif
#if defined(USART3_BASE)
static void dma1_stream1_irq(void)
{
dma_irq(DMA_1, 2, RxIrq); // uart3_rx
}
#endif
#if defined(UART4_BASE)
static void dma1_stream2_irq(void)
{
dma_irq(DMA_1, 3, RxIrq); // uart4_rx
}
#endif
#if defined(USART3_BASE)
static void dma1_stream3_irq(void)
{
dma_irq(DMA_1, 2, TxIrq); // uart3_tx
}
#endif
#if defined(UART4_BASE)
static void dma1_stream4_irq(void)
{
dma_irq(DMA_1, 3, TxIrq); // uart4_tx
}
#endif
static void dma1_stream5_irq(void)
{
dma_irq(DMA_1, 1, RxIrq); // uart2_rx
}
static void dma1_stream6_irq(void)
{
dma_irq(DMA_1, 1, TxIrq); // uart2_tx
}
#if defined(UART5_BASE)
static void dma1_stream7_irq(void)
{
dma_irq(DMA_1, 4, TxIrq); // uart5_tx
}
#endif
#if defined(USART6_BASE)
static void dma2_stream1_irq(void)
{
dma_irq(DMA_2, 5, RxIrq); // uart6_rx
}
#endif
static void dma2_stream5_irq(void)
{
dma_irq(DMA_2, 0, RxIrq); // uart1_rx
}
static void dma2_stream6_irq(void)
{
dma_irq(DMA_2, 5, TxIrq); // uart6_tx
}
static void dma2_stream7_irq(void)
{
dma_irq(DMA_2, 0, TxIrq); // uart1_tx
}
#endif // DEVICE_SERIAL_ASYNCH_DMA
#if defined(UART5_BASE)
static void uart5_irq(void)
{
uart_irq(4);
}
#endif
#if defined(USART6_BASE)
static void uart6_irq(void)
{
uart_irq(5);
}
#endif
#if defined(UART7_BASE)
static void uart7_irq(void)
{
uart_irq(6);
}
#endif
#if defined(UART8_BASE)
static void uart8_irq(void)
{
uart_irq(7);
}
#endif
void serial_irq_handler(serial_t *obj, uart_irq_handler handler, uint32_t id)
{
irq_handler = handler;
serial_irq_ids[SERIAL_OBJ(index)] = id;
}
void serial_irq_set(serial_t *obj, SerialIrq irq, uint32_t enable)
{
IRQn_Type irq_n = (IRQn_Type)0;
uint32_t vector = 0;
#if DEVICE_SERIAL_ASYNCH_DMA
IRQn_Type irqn_dma = (IRQn_Type)0;
uint32_t vector_dma = 0;
#endif
UART_HandleTypeDef *handle = &UartHandle[SERIAL_OBJ(index)];
switch (SERIAL_OBJ(index)) {
case 0:
irq_n = USART1_IRQn;
vector = (uint32_t)&uart1_irq;
#if DEVICE_SERIAL_ASYNCH_DMA
if (irq == RxIrq) {
irqn_dma = DMA2_Stream5_IRQn;
vector_dma = (uint32_t)&dma2_stream5_irq;
} else {
irqn_dma = DMA2_Stream7_IRQn;
vector_dma = (uint32_t)&dma2_stream7_irq;
}
#endif
break;
case 1:
irq_n = USART2_IRQn;
vector = (uint32_t)&uart2_irq;
#if DEVICE_SERIAL_ASYNCH_DMA
if (irq == RxIrq) {
irqn_dma = DMA1_Stream5_IRQn;
vector_dma = (uint32_t)&dma1_stream5_irq;
} else {
irqn_dma = DMA1_Stream6_IRQn;
vector_dma = (uint32_t)&dma1_stream6_irq;
}
#endif
break;
#if defined(USART3_BASE)
case 2:
irq_n = USART3_IRQn;
vector = (uint32_t)&uart3_irq;
#if DEVICE_SERIAL_ASYNCH_DMA
if (irq == RxIrq) {
irqn_dma = DMA1_Stream1_IRQn;
vector_dma = (uint32_t)&dma1_stream1_irq;
} else {
irqn_dma = DMA1_Stream3_IRQn;
vector_dma = (uint32_t)&dma1_stream3_irq;
}
#endif
break;
#endif
#if defined(UART4_BASE)
case 3:
irq_n = UART4_IRQn;
vector = (uint32_t)&uart4_irq;
#if DEVICE_SERIAL_ASYNCH_DMA
if (irq == RxIrq) {
irqn_dma = DMA1_Stream2_IRQn;
vector_dma = (uint32_t)&dma1_stream2_irq;
} else {
irqn_dma = DMA1_Stream4_IRQn;
vector_dma = (uint32_t)&dma1_stream4_irq;
}
#endif
break;
#endif
#if defined(UART5_BASE)
case 4:
irq_n = UART5_IRQn;
vector = (uint32_t)&uart5_irq;
#if DEVICE_SERIAL_ASYNCH_DMA
if (irq == RxIrq) {
irqn_dma = DMA1_Stream0_IRQn;
vector_dma = (uint32_t)&dma1_stream0_irq;
} else {
irqn_dma = DMA1_Stream4_IRQn;
vector_dma = (uint32_t)&dma1_stream7_irq;
}
#endif
break;
#endif
#if defined(USART6_BASE)
case 5:
irq_n = USART6_IRQn;
vector = (uint32_t)&uart6_irq;
#if DEVICE_SERIAL_ASYNCH_DMA
if (irq == RxIrq) {
irqn_dma = DMA2_Stream1_IRQn;
vector_dma = (uint32_t)&dma2_stream1_irq;
} else {
irqn_dma = DMA2_Stream6_IRQn;
vector_dma = (uint32_t)&dma2_stream6_irq;
}
#endif
break;
#endif
#if defined(UART7_BASE)
case 6:
irq_n = UART7_IRQn;
vector = (uint32_t)&uart7_irq;
break;
#endif
#if defined(UART8_BASE)
case 7:
irq_n = UART8_IRQn;
vector = (uint32_t)&uart8_irq;
break;
#endif
}
if (enable) {
if (irq == RxIrq) {
__HAL_UART_ENABLE_IT(handle, UART_IT_RXNE);
#if DEVICE_SERIAL_ASYNCH_DMA
NVIC_SetVector(irq_n, vector_dma);
NVIC_EnableIRQ(irq_n);
NVIC_SetVector(irqn_dma, vector_dma);
NVIC_EnableIRQ(irqn_dma);
#else
NVIC_SetVector(irq_n, vector);
NVIC_EnableIRQ(irq_n);
#endif
} else { // TxIrq
__HAL_UART_ENABLE_IT(handle, UART_IT_TXE);
NVIC_SetVector(irq_n, vector);
NVIC_EnableIRQ(irq_n);
#if DEVICE_SERIAL_ASYNCH_DMA
NVIC_SetVector(irqn_dma, vector_dma);
NVIC_EnableIRQ(irqn_dma);
#endif
}
} else { // disable
int all_disabled = 0;
if (irq == RxIrq) {
__HAL_UART_DISABLE_IT(handle, UART_IT_RXNE);
// Check if TxIrq is disabled too
if ((handle->Instance->CR1 & USART_CR1_TXEIE) == 0) all_disabled = 1;
} else { // TxIrq
__HAL_UART_DISABLE_IT(handle, UART_IT_TXE);
// Check if RxIrq is disabled too
if ((handle->Instance->CR1 & USART_CR1_RXNEIE) == 0) all_disabled = 1;
}
if (all_disabled) {
NVIC_DisableIRQ(irq_n);
#if DEVICE_SERIAL_ASYNCH_DMA
NVIC_DisableIRQ(irqn_dma);
#endif
}
}
}
/******************************************************************************
* READ/WRITE
******************************************************************************/
int serial_getc(serial_t *obj)
{
UART_HandleTypeDef *handle = &UartHandle[SERIAL_OBJ(index)];
while (!serial_readable(obj));
return (int)(handle->Instance->DR & 0x1FF);
}
void serial_putc(serial_t *obj, int c)
{
UART_HandleTypeDef *handle = &UartHandle[SERIAL_OBJ(index)];
while (!serial_writable(obj));
handle->Instance->DR = (uint32_t)(c & 0x1FF);
}
int serial_readable(serial_t *obj)
{
int status;
UART_HandleTypeDef *handle = &UartHandle[SERIAL_OBJ(index)];
// Check if data is received
status = ((__HAL_UART_GET_FLAG(handle, UART_FLAG_RXNE) != RESET) ? 1 : 0);
return status;
}
int serial_writable(serial_t *obj)
{
int status;
UART_HandleTypeDef *handle = &UartHandle[SERIAL_OBJ(index)];
// Check if data is transmitted
status = ((__HAL_UART_GET_FLAG(handle, UART_FLAG_TXE) != RESET) ? 1 : 0);
return status;
}
void serial_clear(serial_t *obj)
{
UART_HandleTypeDef *handle = &UartHandle[SERIAL_OBJ(index)];
__HAL_UART_CLEAR_FLAG(handle, UART_FLAG_TXE);
__HAL_UART_CLEAR_FLAG(handle, UART_FLAG_RXNE);
}
void serial_pinout_tx(PinName tx)
{
pinmap_pinout(tx, PinMap_UART_TX);
}
void serial_break_set(serial_t *obj)
{
UART_HandleTypeDef *uart = &UartHandle[SERIAL_OBJ(index)];
HAL_LIN_SendBreak(uart);
}
void serial_break_clear(serial_t *obj)
{
(void)obj;
}
//########################################################################################
#if DEVICE_SERIAL_ASYNCH
//----------------------------------------------------------------------------------------
// LOCAL HELPER FUNCTIONS
//----------------------------------------------------------------------------------------
/** Configure the TX buffer for an asynchronous write serial transaction
*
* @param obj The serial object.
* @param tx The buffer for sending.
* @param tx_length The number of words to transmit.
*/
static void h_serial_tx_buffer_set(serial_t *obj, void *tx, int tx_length, uint8_t width)
{
// We only support byte buffers for now
MBED_ASSERT(width == 8);
// Exit if a transmit is already on-going
if (serial_tx_active(obj)) return;
obj->tx_buff.buffer = tx;
obj->tx_buff.length = tx_length;
obj->tx_buff.pos = 0;
return;
}
/** Configure the RX buffer for an asynchronous write serial transaction
*
* @param obj The serial object.
* @param tx The buffer for sending.
* @param tx_length The number of words to transmit.
*/
static void h_serial_rx_buffer_set(serial_t *obj, void *rx, int rx_length, uint8_t width)
{
/* Sanity check arguments */
MBED_ASSERT(obj);
MBED_ASSERT(rx != (void*)0);
// We only support byte buffers for now
MBED_ASSERT(width == 8);
// Exit if a reception is already on-going
if (serial_rx_active(obj)) return;
obj->rx_buff.buffer = rx;
obj->rx_buff.length = rx_length;
obj->rx_buff.pos = 0;
return;
}
/** Configure TX events
*
* @param obj The serial object
* @param event The logical OR of the TX events to configure
* @param enable Set to non-zero to enable events, or zero to disable them
*/
static void h_serial_tx_enable_event(serial_t *obj, int event, uint8_t enable)
{
// Shouldn't have to enable TX interrupt here, just need to keep track of the requested events.
if (enable) SERIAL_OBJ(events) |= event;
else SERIAL_OBJ(events) &= ~event;
}
/** Configure RX events
*
* @param obj The serial object
* @param event The logical OR of the RX events to configure
* @param enable Set to non-zero to enable events, or zero to disable them
*/
static void h_serial_rx_enable_event(serial_t *obj, int event, uint8_t enable)
{
// Shouldn't have to enable RX interrupt here, just need to keep track of the requested events.
if (enable) SERIAL_OBJ(events) |= event;
else SERIAL_OBJ(events) &= ~event;
}
/**
* Get index of serial object TX IRQ, relating it to the physical peripheral.
*
* @param obj pointer to serial object
* @return internal NVIC TX IRQ index of U(S)ART peripheral
*/
static IRQn_Type h_serial_get_irq_index(serial_t *obj)
{
IRQn_Type irq_n = (IRQn_Type)0;
switch (SERIAL_OBJ(index)) {
#if defined(USART1_BASE)
case 0:
irq_n = USART1_IRQn;
break;
#endif
#if defined(USART2_BASE)
case 1:
irq_n = USART2_IRQn;
break;
#endif
#if defined(USART3_BASE)
case 2:
irq_n = USART3_IRQn;
break;
#endif
#if defined(UART4_BASE)
case 3:
irq_n = UART4_IRQn;
break;
#endif
#if defined(USART5_BASE)
case 4:
irq_n = UART5_IRQn;
break;
#endif
#if defined(USART6_BASE)
case 5:
irq_n = USART6_IRQn;
break;
#endif
#if defined(UART7_BASE)
case 6:
irq_n = UART7_IRQn;
break;
#endif
#if defined(UART8_BASE)
case 7:
irq_n = UART8_IRQn;
break;
#endif
default:
irq_n = (IRQn_Type)0;
}
return irq_n;
}
#if DEVICE_SERIAL_ASYNCH_DMA
/**
* @brief Start the DMA Transfer with interrupt enabled.
* @param hdma: pointer to a DMA_HandleTypeDef structure that contains
* the configuration information for the specified DMA Stream.
* @param SrcAddress: The source memory Buffer address
* @param DstAddress: The destination memory Buffer address
* @param DataLength: The length of data to be transferred from source to destination
* @retval HAL status
*/
static HAL_StatusTypeDef MBED_DMA_Start_IT(DMA_HandleTypeDef *hdma, uint32_t SrcAddress, uint32_t DstAddress, uint32_t DataLength)
{
/* Process locked */
__HAL_LOCK(hdma);
/* Change DMA peripheral state */
hdma->State = HAL_DMA_STATE_BUSY;
/* Check the parameters */
assert_param(IS_DMA_BUFFER_SIZE(DataLength));
/* Disable the peripheral */
__HAL_DMA_DISABLE(hdma);
/* Configure the source, destination address and the data length */
/* Clear DBM bit */
hdma->Instance->CR &= (uint32_t)(~DMA_SxCR_DBM);
/* Configure DMA Stream data length */
hdma->Instance->NDTR = DataLength;
/* Peripheral to Memory */
if((hdma->Init.Direction) == DMA_MEMORY_TO_PERIPH) {
/* Configure DMA Stream destination address */
hdma->Instance->PAR = DstAddress;
/* Configure DMA Stream source address */
hdma->Instance->M0AR = SrcAddress;
} else {
/* Memory to Peripheral */
/* Configure DMA Stream source address */
hdma->Instance->PAR = SrcAddress;
/* Configure DMA Stream destination address */
hdma->Instance->M0AR = DstAddress;
}
/* Enable all interrupts EXCEPT HALF TRANSFER COMPLETE */
hdma->Instance->CR |= DMA_IT_TC | DMA_IT_TE | DMA_IT_DME;
hdma->Instance->FCR |= DMA_IT_FE;
/* Enable the Peripheral */
__HAL_DMA_ENABLE(hdma);
return HAL_OK;
}
/**
* @brief DMA UART receive process half complete callback
* @param hdma: pointer to a DMA_HandleTypeDef structure that contains
* the configuration information for the specified DMA module.
* @retval None
*/
static void h_UART_DMARxHalfCplt(DMA_HandleTypeDef *hdma)
{
UART_HandleTypeDef* huart = (UART_HandleTypeDef*)((DMA_HandleTypeDef*)hdma)->Parent;
HAL_UART_RxHalfCpltCallback(huart);
}
/**
* @brief DMA UART receive process complete callback.
* @param hdma: DMA handle
* @retval None
*/
static void h_UART_DMAReceiveCplt(DMA_HandleTypeDef *hdma)
{
UART_HandleTypeDef* huart = ( UART_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent;
/* DMA Normal mode*/
if((hdma->Instance->CR & DMA_SxCR_CIRC) == 0)
{
huart->RxXferCount = 0;
/* Disable the DMA transfer for the receiver request by setting the DMAR bit
in the UART CR3 register */
huart->Instance->CR3 &= (uint32_t)~((uint32_t)USART_CR3_DMAR);
/* Check if a transmit process is ongoing or not */
if(huart->State == HAL_UART_STATE_BUSY_TX_RX)
{
huart->State = HAL_UART_STATE_BUSY_TX;
}
else
{
huart->State = HAL_UART_STATE_READY;
}
}
HAL_UART_RxCpltCallback(huart);
}
/**
* @brief DMA UART communication error callback.
* @param hdma: DMA handle
* @retval None
*/
static void h_UART_DMAError(DMA_HandleTypeDef *hdma)
{
UART_HandleTypeDef* huart = ( UART_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent;
huart->RxXferCount = 0;
huart->TxXferCount = 0;
huart->State= HAL_UART_STATE_READY;
huart->ErrorCode |= HAL_UART_ERROR_DMA;
HAL_UART_ErrorCallback(huart);
}
/**
* @brief Receives an amount of data in non blocking mode.
* @note This function differs from HAL's function as it does not enable HalfTranferComplete
* @param huart: pointer to a UART_HandleTypeDef structure that contains
* the configuration information for the specified UART module.
* @param pData: Pointer to data buffer
* @param Size: Amount of data to be received
* @note When the UART parity is enabled (PCE = 1) the data received contain the parity bit.
* @retval HAL status
*/
static HAL_StatusTypeDef MBED_UART_Receive_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size)
{
uint32_t *tmp;
uint32_t tmp1 = 0;
tmp1 = huart->State;
if((tmp1 == HAL_UART_STATE_READY) || (tmp1 == HAL_UART_STATE_BUSY_TX)) {
if((pData == NULL ) || (Size == 0)) {
return HAL_ERROR;
}
/* Process Locked */
__HAL_LOCK(huart);
huart->pRxBuffPtr = pData;
huart->RxXferSize = Size;
huart->ErrorCode = HAL_UART_ERROR_NONE;
/* Check if a transmit process is ongoing or not */
if(huart->State == HAL_UART_STATE_BUSY_TX) {
huart->State = HAL_UART_STATE_BUSY_TX_RX;
} else {
huart->State = HAL_UART_STATE_BUSY_RX;
}
/* Set the UART DMA transfer complete callback */
huart->hdmarx->XferCpltCallback = h_UART_DMAReceiveCplt;
/* Set the UART DMA Half transfer complete callback */
huart->hdmarx->XferHalfCpltCallback = h_UART_DMARxHalfCplt;
/* Set the DMA error callback */
huart->hdmarx->XferErrorCallback = h_UART_DMAError;
/* Enable the DMA Stream */
tmp = (uint32_t*)&pData;
MBED_DMA_Start_IT(huart->hdmarx, (uint32_t)&huart->Instance->DR, *(uint32_t*)tmp, Size);
/* Enable the DMA transfer for the receiver request by setting the DMAR bit
in the UART CR3 register */
huart->Instance->CR3 |= USART_CR3_DMAR;
/* Process Unlocked */
__HAL_UNLOCK(huart);
return HAL_OK;
} else {
return HAL_BUSY;
}
}
/**
* Get index of serial object TX DMA IRQ, relating it to the physical peripheral.
*
* @param obj pointer to serial object
* @return internal NVIC TX DMA IRQ index of U(S)ART peripheral
*/
static IRQn_Type h_serial_tx_get_irqdma_index(serial_t *obj)
{
IRQn_Type irq_n = (IRQn_Type)0;
switch (SERIAL_OBJ(index)) {
#if defined(USART1_BASE)
case 0:
irq_n = DMA2_Stream7_IRQn;
break;
#endif
#if defined(USART2_BASE)
case 1:
irq_n = DMA1_Stream6_IRQn;
break;
#endif
#if defined(USART3_BASE)
case 2:
irq_n = DMA1_Stream3_IRQn;
break;
#endif
#if defined(UART4_BASE)
case 3:
irq_n = DMA1_Stream4_IRQn;
break;
#endif
#if defined(UART5_BASE)
case 4:
irq_n = DMA1_Stream7_IRQn;
break;
#endif
#if defined(USART6_BASE)
case 5:
irq_n = DMA2_Stream6_IRQn;
break;
#endif
default:
irq_n = (IRQn_Type)0;
}
return irq_n;
}
/**
* Get index of serial object RX DMA IRQ, relating it to the physical peripheral.
*
* @param obj pointer to serial object
* @return internal NVIC RX DMA IRQ index of U(S)ART peripheral
*/
static IRQn_Type h_serial_rx_get_irqdma_index(serial_t *obj)
{
IRQn_Type irq_n = (IRQn_Type)0;
switch (SERIAL_OBJ(index)) {
#if defined(USART1_BASE)
case 0:
irq_n = DMA2_Stream5_IRQn;
break;
#endif
#if defined(USART2_BASE)
case 1:
irq_n = DMA1_Stream5_IRQn;
break;
#endif
#if defined(USART3_BASE)
case 2:
irq_n = DMA1_Stream1_IRQn;
break;
#endif
#if defined(UART4_BASE)
case 3:
irq_n = DMA1_Stream2_IRQn;
break;
#endif
#if defined(UART5_BASE)
case 4:
irq_n = DMA1_Stream0_IRQn;
break;
#endif
#if defined(USART6_BASE)
case 5:
irq_n = DMA2_Stream1_IRQn;
break;
#endif
default:
irq_n = (IRQn_Type)0;
}
return irq_n;
}
#endif
//----------------------------------------------------------------------------------------
// MBED API FUNCTIONS
//----------------------------------------------------------------------------------------
/** Begin asynchronous TX transfer. The used buffer is specified in the serial object,
* tx_buff
*
* @param obj The serial object
* @param tx The buffer for sending
* @param tx_length The number of words to transmit
* @param tx_width The bit width of buffer word
* @param handler The serial handler
* @param event The logical OR of events to be registered
* @param hint A suggestion for how to use DMA with this transfer
* @return Returns number of data transfered, or 0 otherwise
*/
#ifdef YOTTA_CFG_MBED_OS
int serial_tx_asynch(serial_t *obj, void *tx, size_t tx_length, uint8_t tx_width, uint32_t handler, uint32_t event, DMAUsage hint)
#else
int serial_tx_asynch(serial_t *obj, const void *tx, size_t tx_length, uint8_t tx_width, uint32_t handler, uint32_t event, DMAUsage hint)
#endif
{
// DMA usage is currently ignored
(void) hint;
// Check buffer is ok
MBED_ASSERT(tx != (void*)0);
MBED_ASSERT(tx_width == 8); // support only 8b width
if (tx_length == 0) return 0;
// Set up buffer
h_serial_tx_buffer_set(obj, (void *)tx, tx_length, tx_width);
// Set up events
h_serial_tx_enable_event(obj, SERIAL_EVENT_TX_ALL, 0); // Clear all events
h_serial_tx_enable_event(obj, event, 1); // Set only the wanted events
UART_HandleTypeDef *handle = &UartHandle[SERIAL_OBJ(index)];
// Enable interrupt
IRQn_Type irqn = h_serial_get_irq_index(obj);
NVIC_ClearPendingIRQ(irqn);
NVIC_DisableIRQ(irqn);
NVIC_SetPriority(irqn, 1);
NVIC_SetVector(irqn, (uint32_t)handler);
NVIC_EnableIRQ(irqn);
#if DEVICE_SERIAL_ASYNCH_DMA
// Enable DMA interrupt
irqn = h_serial_tx_get_irqdma_index(obj);
NVIC_ClearPendingIRQ(irqn);
NVIC_DisableIRQ(irqn);
NVIC_SetPriority(irqn, 1);
NVIC_SetVector(irqn, (uint32_t)handler);
NVIC_EnableIRQ(irqn);
// the following function will enable program and enable the DMA transfer
if (HAL_UART_Transmit_DMA(handle, (uint8_t*)tx, tx_length) != HAL_OK)
{
/* Transfer error in transmission process */
return 0;
}
#else
// the following function will enable UART_IT_TXE and error interrupts
if (HAL_UART_Transmit_IT(handle, (uint8_t*)tx, tx_length) != HAL_OK)
{
/* Transfer error in transmission process */
return 0;
}
#endif
DEBUG_PRINTF("UART%u: Tx: 0=(%u, %u) %x\n", obj->serial.module+1, tx_length, tx_width, HAL_UART_GetState(handle));
return tx_length;
}
/** Begin asynchronous RX transfer (enable interrupt for data collecting)
* The used buffer is specified in the serial object - rx_buff
*
* @param obj The serial object
* @param rx The buffer for sending
* @param rx_length The number of words to transmit
* @param rx_width The bit width of buffer word
* @param handler The serial handler
* @param event The logical OR of events to be registered
* @param handler The serial handler
* @param char_match A character in range 0-254 to be matched
* @param hint A suggestion for how to use DMA with this transfer
*/
void serial_rx_asynch(serial_t *obj, void *rx, size_t rx_length, uint8_t rx_width, uint32_t handler, uint32_t event, uint8_t char_match, DMAUsage hint)
{
// DMA usage is currently ignored
(void) hint;
/* Sanity check arguments */
MBED_ASSERT(obj);
MBED_ASSERT(rx != (void*)0);
MBED_ASSERT(rx_width == 8); // support only 8b width
h_serial_rx_enable_event(obj, SERIAL_EVENT_RX_ALL, 0);
h_serial_rx_enable_event(obj, event, 1);
// set CharMatch
if (char_match != SERIAL_RESERVED_CHAR_MATCH) {
obj->char_match = char_match;
}
h_serial_rx_buffer_set(obj, rx, rx_length, rx_width);
IRQn_Type irqn = h_serial_get_irq_index(obj);
NVIC_ClearPendingIRQ(irqn);
NVIC_DisableIRQ(irqn);
NVIC_SetPriority(irqn, 0);
NVIC_SetVector(irqn, (uint32_t)handler);
NVIC_EnableIRQ(irqn);
UART_HandleTypeDef *handle = &UartHandle[SERIAL_OBJ(index)];
// flush current data + error flags
__HAL_UART_CLEAR_PEFLAG(handle);
#if DEVICE_SERIAL_ASYNCH_DMA
// Enable DMA interrupt
irqn = h_serial_rx_get_irqdma_index(obj);
NVIC_ClearPendingIRQ(irqn);
NVIC_DisableIRQ(irqn);
NVIC_SetPriority(irqn, 1);
NVIC_SetVector(irqn, (uint32_t)handler);
NVIC_EnableIRQ(irqn);
// following HAL function will program and enable the DMA transfer
MBED_UART_Receive_DMA(handle, (uint8_t*)rx, rx_length);
#else
// following HAL function will enable the RXNE interrupt + error interrupts
HAL_UART_Receive_IT(handle, (uint8_t*)rx, rx_length);
#endif
/* Enable the UART Error Interrupt: (Frame error, noise error, overrun error) */
__HAL_UART_ENABLE_IT(handle, UART_IT_ERR);
DEBUG_PRINTF("UART%u: Rx: 0=(%u, %u, %u) %x\n", obj->serial.module+1, rx_length, rx_width, char_match, HAL_UART_GetState(handle));
return;
}
/** Attempts to determine if the serial peripheral is already in use for TX
*
* @param obj The serial object
* @return Non-zero if the TX transaction is ongoing, 0 otherwise
*/
uint8_t serial_tx_active(serial_t *obj)
{
MBED_ASSERT(obj);
UART_HandleTypeDef *handle = &UartHandle[SERIAL_OBJ(index)];
return ((HAL_UART_GetState(handle) & UART_STATE_TX_ACTIVE) ? 1 : 0);
}
/** Attempts to determine if the serial peripheral is already in use for RX
*
* @param obj The serial object
* @return Non-zero if the RX transaction is ongoing, 0 otherwise
*/
uint8_t serial_rx_active(serial_t *obj)
{
MBED_ASSERT(obj);
UART_HandleTypeDef *handle = &UartHandle[SERIAL_OBJ(index)];
return ((HAL_UART_GetState(handle) & UART_STATE_RX_ACTIVE) ? 1 : 0);
}
/** The asynchronous TX and RX handler.
*
* @param obj The serial object
* @return Returns event flags if a TX/RX transfer termination condition was met or 0 otherwise
*/
int serial_irq_handler_asynch(serial_t *obj)
{
volatile int return_event = 0;
uint8_t *buf = (uint8_t*)obj->rx_buff.buffer;
uint8_t i = 0;
// Irq handler is common to Tx and Rx
UART_HandleTypeDef *handle = &UartHandle[SERIAL_OBJ(index)];
#if DEVICE_SERIAL_ASYNCH_DMA
if ((handle->Instance->CR3 & USART_CR3_DMAT) !=0) {
// call dma tx interrupt
HAL_DMA_IRQHandler(handle->hdmatx);
}
if ((handle->Instance->CR3 & USART_CR3_DMAR) !=0) {
// call dma rx interrupt
HAL_DMA_IRQHandler(handle->hdmarx);
}
#endif
HAL_UART_IRQHandler(handle);
// TX PART:
if (__HAL_UART_GET_FLAG(handle, UART_FLAG_TC) != RESET) {
__HAL_UART_CLEAR_FLAG(handle, UART_FLAG_TC);
// return event SERIAL_EVENT_TX_COMPLETE if requested
if ((SERIAL_OBJ(events) & SERIAL_EVENT_TX_COMPLETE ) != 0){
return_event |= SERIAL_EVENT_TX_COMPLETE & obj->serial.events;
}
}
// handle error events:
if (__HAL_UART_GET_FLAG(handle, HAL_UART_ERROR_PE)) {
__HAL_UART_CLEAR_FLAG(handle, HAL_UART_ERROR_PE);
return_event |= SERIAL_EVENT_RX_PARITY_ERROR & obj->serial.events;
}
if (__HAL_UART_GET_FLAG(handle, HAL_UART_ERROR_NE)||(handle->ErrorCode & HAL_UART_ERROR_NE)!=0) {
__HAL_UART_CLEAR_FLAG(handle, HAL_UART_ERROR_NE);
// not supported by mbed
}
if (__HAL_UART_GET_FLAG(handle, HAL_UART_ERROR_FE)||(handle->ErrorCode & HAL_UART_ERROR_FE)!=0) {
__HAL_UART_CLEAR_FLAG(handle, HAL_UART_ERROR_FE);
return_event |= SERIAL_EVENT_RX_FRAMING_ERROR & SERIAL_OBJ(events);
}
if (__HAL_UART_GET_FLAG(handle, HAL_UART_ERROR_ORE)||(handle->ErrorCode & HAL_UART_ERROR_ORE)!=0) {
__HAL_UART_CLEAR_FLAG(handle, HAL_UART_ERROR_ORE);
return_event |= SERIAL_EVENT_RX_OVERRUN_ERROR & SERIAL_OBJ(events);
}
//RX PART
// increment rx_buff.pos
if (handle->RxXferSize !=0) {
obj->rx_buff.pos = handle->RxXferSize - handle->RxXferCount;
}
if ((handle->RxXferCount==0)&&(obj->rx_buff.pos >= (obj->rx_buff.length - 1))) {
return_event |= SERIAL_EVENT_RX_COMPLETE & SERIAL_OBJ(events);
}
// Chek if Char_match is present
if (SERIAL_OBJ(events) & SERIAL_EVENT_RX_CHARACTER_MATCH) {
if (buf != NULL){
while((buf[i] != obj->char_match)&&(i<handle->RxXferSize)){//for (i=0;i<UartHandle.RxXferSize;i++){
i++;//if (buf[i] == obj->char_match{
//}
}
if (i<handle->RxXferSize){
obj->rx_buff.pos = i;
return_event |= SERIAL_EVENT_RX_CHARACTER_MATCH & SERIAL_OBJ(events);
}
}
}
return return_event;
}
/** Abort the ongoing TX transaction. It disables the enabled interupt for TX and
* flush TX hardware buffer if TX FIFO is used
*
* @param obj The serial object
*/
void serial_tx_abort_asynch(serial_t *obj)
{
UART_HandleTypeDef *handle = &UartHandle[SERIAL_OBJ(index)];
__HAL_UART_DISABLE_IT(handle, UART_IT_TC|UART_IT_TXE);
// clear flags
__HAL_UART_CLEAR_PEFLAG(handle);
// reset states
handle->TxXferCount = 0;
// update handle state
if (handle->State == HAL_UART_STATE_BUSY_TX_RX) {
handle->State = HAL_UART_STATE_BUSY_RX;
} else {
handle->State = HAL_UART_STATE_READY;
}
}
/** Abort the ongoing RX transaction It disables the enabled interrupt for RX and
* flush RX hardware buffer if RX FIFO is used
*
* @param obj The serial object
*/
void serial_rx_abort_asynch(serial_t *obj)
{
UART_HandleTypeDef *handle = &UartHandle[SERIAL_OBJ(index)];
__HAL_UART_DISABLE_IT(handle, UART_IT_RXNE);
// clear flags
__HAL_UART_CLEAR_PEFLAG(handle);
// reset states
handle->RxXferCount = 0;
// update handle state
if (handle->State == HAL_UART_STATE_BUSY_TX_RX) {
handle->State = HAL_UART_STATE_BUSY_TX;
} else {
handle->State = HAL_UART_STATE_READY;
}
}
#endif
#if DEVICE_SERIAL_FC
/** Set HW Control Flow
* @param obj The serial object
* @param type The Control Flow type (FlowControlNone, FlowControlRTS, FlowControlCTS, FlowControlRTSCTS)
* @param rxflow Pin for the rxflow
* @param txflow Pin for the txflow
*/
void serial_set_flow_control(serial_t *obj, FlowControl type, PinName rxflow, PinName txflow)
{
// Determine the UART to use (UART_1, UART_2, ...)
UARTName uart_rts = (UARTName)pinmap_peripheral(rxflow, PinMap_UART_RTS);
UARTName uart_cts = (UARTName)pinmap_peripheral(txflow, PinMap_UART_CTS);
// Get the peripheral name (UART_1, UART_2, ...) from the pin and assign it to the object
UARTName instance = (UARTName)pinmap_merge(uart_cts, uart_rts);
MBED_ASSERT(instance != (UARTName)NC);
if(type == FlowControlNone) {
// Disable hardware flow control
SERIAL_OBJ(hw_flow_ctl) = UART_HWCONTROL_NONE;
}
if (type == FlowControlRTS) {
// Enable RTS
MBED_ASSERT(uart_rts != (UARTName)NC);
SERIAL_OBJ(hw_flow_ctl) = UART_HWCONTROL_RTS;
SERIAL_OBJ(pin_rts) = rxflow;
// Enable the pin for RTS function
pinmap_pinout(rxflow, PinMap_UART_RTS);
}
if (type == FlowControlCTS) {
// Enable CTS
MBED_ASSERT(uart_cts != (UARTName)NC);
SERIAL_OBJ(hw_flow_ctl) = UART_HWCONTROL_CTS;
SERIAL_OBJ(pin_cts) = txflow;
// Enable the pin for CTS function
pinmap_pinout(txflow, PinMap_UART_CTS);
}
if (type == FlowControlRTSCTS) {
// Enable CTS & RTS
MBED_ASSERT(uart_rts != (UARTName)NC);
MBED_ASSERT(uart_cts != (UARTName)NC);
SERIAL_OBJ(hw_flow_ctl) = UART_HWCONTROL_RTS_CTS;
SERIAL_OBJ(pin_rts) = rxflow;
SERIAL_OBJ(pin_cts) = txflow;
// Enable the pin for CTS function
pinmap_pinout(txflow, PinMap_UART_CTS);
// Enable the pin for RTS function
pinmap_pinout(rxflow, PinMap_UART_RTS);
}
init_uart(obj, instance);
}
#endif
#endif
| 30.106542 | 151 | 0.639225 | [
"object",
"vector"
] |
d98458376747c76f14116e5bb294ec6e2c36f632 | 15,004 | c | C | bsp/hc32/libraries/hc32f4a0_ddl/midwares/hc32/usb/usb_device_lib/device_class/hid_cdc_composite/usb_dev_hid_cdc_wrapper.c | BreederBai/rt-thread | 53ed0314982556dfa9c5db75d4f3e02485d16ab5 | [
"Apache-2.0"
] | 1 | 2018-11-27T02:48:44.000Z | 2018-11-27T02:48:44.000Z | bsp/hc32/libraries/hc32f4a0_ddl/midwares/hc32/usb/usb_device_lib/device_class/hid_cdc_composite/usb_dev_hid_cdc_wrapper.c | BreederBai/rt-thread | 53ed0314982556dfa9c5db75d4f3e02485d16ab5 | [
"Apache-2.0"
] | null | null | null | bsp/hc32/libraries/hc32f4a0_ddl/midwares/hc32/usb/usb_device_lib/device_class/hid_cdc_composite/usb_dev_hid_cdc_wrapper.c | BreederBai/rt-thread | 53ed0314982556dfa9c5db75d4f3e02485d16ab5 | [
"Apache-2.0"
] | null | null | null | /**
*******************************************************************************
* @file usb_dev_hid_cdc_wrapper.c
* @brief HID CDC composite functions.
@verbatim
Change Logs:
Date Author Notes
2022-03-31 CDT First version
@endverbatim
*******************************************************************************
* Copyright (C) 2022, Xiaohua Semiconductor Co., Ltd. All rights reserved.
*
* This software component is licensed by XHSC under BSD 3-Clause license
* (the "License"); You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
*******************************************************************************
*/
/*******************************************************************************
* Include files
******************************************************************************/
#include "usb_dev_custom_hid_class.h"
#include "usb_dev_cdc_class.h"
#include "usb_dev_hid_cdc_wrapper.h"
#include "usb_dev_desc.h"
#include "usb_dev_stdreq.h"
/**
* @addtogroup LL_USB_LIB
* @{
*/
/**
* @addtogroup LL_USB_DEV_CLASS
* @{
*/
/**
* @addtogroup LL_USB_DEV_HID_CDC_COMPOSITE USB Device HID CDC Composite
* @{
*/
/*******************************************************************************
* Local type definitions ('typedef')
******************************************************************************/
/*******************************************************************************
* Local pre-processor symbols/macros ('#define')
******************************************************************************/
#define USB_COMPOSITE_CFGDESC_SIZE (USB_CUSTOM_HID_CONFIG_DESC_SIZ - 9U + USB_CDC_CONFIG_DESC_SIZ + 8U)
#define HID_INTERFACE (0x0U)
#define CDC_COM_INTERFACE (0x1U)
/*******************************************************************************
* Local function prototypes ('static')
******************************************************************************/
void usb_dev_composite_init(void *pdev);
void usb_dev_composite_deinit(void *pdev);
uint8_t usb_dev_composite_setup(void *pdev, USB_SETUP_REQ *req);
void usb_dev_composite_rxready(void *pdev);
void usb_dev_composite_datain(void *pdev, uint8_t epnum);
void usb_dev_composite_dataout(void *pdev, uint8_t epnum);
uint8_t usb_dev_composite_sof(void *pdev);
uint8_t *usb_dev_composite_getcfgdesc(uint16_t *length);
/*******************************************************************************
* Global variable definitions
******************************************************************************/
usb_dev_class_func class_composite_cbk = {
&usb_dev_composite_init,
&usb_dev_composite_deinit,
&usb_dev_composite_setup,
NULL,
&usb_dev_composite_rxready,
&usb_dev_composite_getcfgdesc,
&usb_dev_composite_sof,
&usb_dev_composite_datain,
&usb_dev_composite_dataout,
NULL,
NULL,
};
/*******************************************************************************
* Local variable definitions ('static')
******************************************************************************/
__USB_ALIGN_BEGIN static uint8_t usb_dev_composite_cfgdesc[USB_COMPOSITE_CFGDESC_SIZE] = {
0x09, /* bLength: Configuration Descriptor size */
USB_CFG_DESCRIPTOR_TYPE, /* bDescriptorType: Configuration */
USB_COMPOSITE_CFGDESC_SIZE, /* wTotalLength: Bytes returned */
0x00,
0x03, /* bNumInterfaces: 3 interfaces (2 for CDC, 1 for HID) */
0x01, /* bConfigurationValue: Configuration value */
0x00, /* iConfiguration: Index of string descriptor describing the configuration */
0xE0, /* bmAttributes: bus powered and Support Remote Wake-up */
0x32, /* MaxPower 100 mA: this current is used for detecting Vbus */
/* 09 */
/************** Descriptor of HID interface ****************/
0x09, /* bLength: Interface Descriptor size */
USB_INTERFACE_DESCRIPTOR_TYPE, /* bDescriptorType: Interface descriptor type */
HID_INTERFACE, /* bInterfaceNumber: Number of Interface */
0x00, /* bAlternateSetting: Alternate setting */
0x02, /* bNumEndpoints */
0x03, /* bInterfaceClass: HID */
0x01, /* bInterfaceSubClass : 1=BOOT, 0=no boot */
0x02, /* nInterfaceProtocol : 0=none, 1=keyboard, 2=mouse */
0x00, /* iInterface: Index of string descriptor */
/* 18 */
/******************** Descriptor of HID ********************/
0x09, /* bLength: HID Descriptor size */
CUSTOM_HID_DESCRIPTOR_TYPE, /* bDescriptorType: HID*/
0x11, /* bcdHID: HID Class Spec release number */
0x01,
0x00, /* bCountryCode: Hardware target country */
0x01, /* bNumDescriptors: Number of HID class descriptors to follow */
0x22, /* bDescriptorType */
CUSTOM_HID_REPORT_DESC_SIZE, /* wItemLength: Total length of Report descriptor */
0x00,
/* 27 */
/******************** Descriptor of HID endpoint ********************/
0x07, /* bLength: Endpoint Descriptor size */
USB_ENDPOINT_DESCRIPTOR_TYPE, /* bDescriptorType: */
HID_IN_EP, /* bEndpointAddress: Endpoint Address (IN) */
0x03, /* bmAttributes: Interrupt endpoint */
HID_IN_PACKET, /* wMaxPacketSize: 64 Byte max */
0x00,
0x0A, /* bInterval: Polling Interval (10 ms) */
/* 34 */
0x07, /* bLength: Endpoint Descriptor size */
USB_ENDPOINT_DESCRIPTOR_TYPE, /* bDescriptorType: */
HID_OUT_EP, /* bEndpointAddress: Endpoint Address (IN) */
0x03, /* bmAttributes: Interrupt endpoint */
HID_OUT_PACKET, /* wMaxPacketSize: 64 Byte max */
0x00,
0x0A, /* bInterval: Polling Interval (10 ms) */
/* 41 */
/******** IAD should be positioned just before the CDC interfaces *******/
0x08, /* bLength */
0x0B, /* bDescriptorType */
0x01, /* bFirstInterface */
0x02, /* bInterfaceCount */
0x02, /* bFunctionClass */
0x02, /* bFunctionSubClass */
0x01, /* bFunctionProtocol */
0x00, /* iFunction (Index of string descriptor describing this function) */
/*49*/
/*Interface Descriptor */
0x09, /* bLength: Interface Descriptor size */
USB_INTERFACE_DESCRIPTOR_TYPE, /* bDescriptorType: Interface */
CDC_COM_INTERFACE, /* bInterfaceNumber: Number of Interface */
0x00, /* bAlternateSetting: Alternate setting */
0x01, /* bNumEndpoints: One endpoints used */
0x02, /* bInterfaceClass: Communication Interface Class */
0x02, /* bInterfaceSubClass: Abstract Control Model */
0x01, /* bInterfaceProtocol: Common AT commands */
0x01, /* iInterface: */
/*58*/
/*Header Functional Descriptor*/
0x05, /* bLength: Endpoint Descriptor size */
0x24, /* bDescriptorType: CS_INTERFACE */
0x00, /* bDescriptorSubtype: Header Func Desc */
0x10, /* bcdCDC: spec release number */
0x01,
/*63*/
/*Call Management Functional Descriptor*/
0x05, /* bFunctionLength */
0x24, /* bDescriptorType: CS_INTERFACE */
0x01, /* bDescriptorSubtype: Call Management Func Desc */
0x00, /* bmCapabilities: D0+D1 */
0x02, /* bDataInterface: 2 */
/*68*/
/*ACM Functional Descriptor*/
0x04, /* bFunctionLength */
0x24, /* bDescriptorType: CS_INTERFACE */
0x02, /* bDescriptorSubtype: Abstract Control Management desc */
0x02, /* bmCapabilities */
/*72*/
/*Union Functional Descriptor*/
0x05, /* bFunctionLength */
0x24, /* bDescriptorType: CS_INTERFACE */
0x06, /* bDescriptorSubtype: Union func desc */
0x01, /* bMasterInterface: Communication class interface */
0x02, /* bSlaveInterface0: Data Class Interface */
/*77*/
/*Endpoint 2 Descriptor*/
0x07, /* bLength: Endpoint Descriptor size */
USB_ENDPOINT_DESCRIPTOR_TYPE, /* bDescriptorType: Endpoint */
CDC_CMD_EP, /* bEndpointAddress */
0x03, /* bmAttributes: Interrupt */
LOBYTE(CDC_CMD_PACKET_SIZE), /* wMaxPacketSize: */
HIBYTE(CDC_CMD_PACKET_SIZE),
0xFF, /* bInterval: */
/*84*/
/*---------------------------------------------------------------------------*/
/*Data class interface descriptor*/
0x09, /* bLength: Endpoint Descriptor size */
USB_INTERFACE_DESCRIPTOR_TYPE, /* bDescriptorType: */
0x02, /* bInterfaceNumber: Number of Interface */
0x00, /* bAlternateSetting: Alternate setting */
0x02, /* bNumEndpoints: Two endpoints used */
0x0A, /* bInterfaceClass: CDC */
0x00, /* bInterfaceSubClass: */
0x00, /* bInterfaceProtocol: */
0x00, /* iInterface: */
/*93*/
/*Endpoint OUT Descriptor*/
0x07, /* bLength: Endpoint Descriptor size */
USB_ENDPOINT_DESCRIPTOR_TYPE, /* bDescriptorType: Endpoint */
CDC_OUT_EP, /* bEndpointAddress */
0x02, /* bmAttributes: Bulk */
LOBYTE(MAX_CDC_PACKET_SIZE), /* wMaxPacketSize: */
HIBYTE(MAX_CDC_PACKET_SIZE),
0x00, /* bInterval: ignore for Bulk transfer */
/*100*/
/*Endpoint IN Descriptor*/
0x07, /* bLength: Endpoint Descriptor size */
USB_ENDPOINT_DESCRIPTOR_TYPE, /* bDescriptorType: Endpoint */
CDC_IN_EP, /* bEndpointAddress */
0x02, /* bmAttributes: Bulk */
LOBYTE(MAX_CDC_PACKET_SIZE), /* wMaxPacketSize: */
HIBYTE(MAX_CDC_PACKET_SIZE),
0x00, /* bInterval */
/*107*/
} ;
/*******************************************************************************
* Function implementation - global ('extern') and local ('static')
******************************************************************************/
/**
* @brief Initialize the composite app
* @param [in] pdev Device instance
* @retval None
*/
void usb_dev_composite_init(void *pdev)
{
usb_dev_hid_init(pdev);
usb_dev_cdc_init(pdev);
}
/**
* @brief Deinitialize the composite app
* @param [in] pdev Device instance
* @retval None
*/
void usb_dev_composite_deinit(void *pdev)
{
usb_dev_hid_deinit(pdev);
usb_dev_cdc_deinit(pdev);
}
/**
* @brief Handle the setup requests
* @param [in] pdev Device instance
* @param [in] req usb requests
* @retval status
*/
uint8_t usb_dev_composite_setup(void *pdev, USB_SETUP_REQ *req)
{
uint8_t u8Res = USB_DEV_OK;
switch (req->bmRequest & USB_REQ_RECIPIENT_MASK) {
case USB_REQ_RECIPIENT_INTERFACE:
if (req->wIndex == HID_INTERFACE) {
u8Res = usb_dev_hid_setup(pdev, req);
} else {
u8Res = usb_dev_cdc_setup(pdev, req);
}
break;
case USB_REQ_RECIPIENT_ENDPOINT:
if (req->wIndex == HID_IN_EP) {
u8Res = usb_dev_hid_setup(pdev, req);
} else {
u8Res = usb_dev_cdc_setup(pdev, req);
}
break;
default:
break;
}
return u8Res;
}
/**
* @brief get the configuration descriptor and return the its pointer
* @param [in] length length of configuration descriptor
* @retval the pointer of configuration descriptor buffer
*/
uint8_t *usb_dev_composite_getcfgdesc(uint16_t *length)
{
*length = (uint16_t)sizeof(usb_dev_composite_cfgdesc);
return usb_dev_composite_cfgdesc;
}
/**
* @brief processing for data in
* @param [in] pdev Device instance
* @param [in] epnum endpoint index
* @retval None
*/
void usb_dev_composite_datain(void *pdev, uint8_t epnum)
{
if (epnum == ((uint8_t)CDC_IN_EP & ((uint8_t)~0x80U))) {
usb_dev_cdc_datain(pdev, epnum);
} else {
usb_dev_hid_datain(pdev, epnum);
}
}
/**
* @brief processing for data out
* @param [in] pdev Device instance
* @param [in] epnum endpoint index
* @retval None
*/
void usb_dev_composite_dataout(void *pdev, uint8_t epnum)
{
if (epnum == ((uint8_t)CDC_OUT_EP & (uint8_t)~0x80U)) {
usb_dev_cdc_dataout(pdev, epnum);
} else {
usb_dev_hid_dataout(pdev, epnum);
}
}
/**
* @brief processing for sof
* @param [in] pdev Device instance
* @retval status
*/
uint8_t usb_dev_composite_sof(void *pdev)
{
return (usb_dev_cdc_sof(pdev));
}
/**
* @brief processing for rxready of control endpoint
* @param [in] pdev Device instance
* @retval None
*/
void usb_dev_composite_rxready(void *pdev)
{
usb_dev_cdc_ctrlep_rxready(pdev);
}
/**
* @}
*/
/**
* @}
*/
/**
* @}
*/
/*******************************************************************************
* EOF (not truncated)
******************************************************************************/
| 41.106849 | 116 | 0.479139 | [
"model"
] |
d986777fe9a659505e634ef64f7207fb273c94a6 | 1,168 | h | C | SODAClient/SODADataTypesMapper.h | mingsai/soda-ios-sdk | aba43df6e306492e7d44304a8d7bde4ea0e0d14b | [
"Apache-2.0"
] | 36 | 2015-01-12T03:22:18.000Z | 2021-10-06T02:58:04.000Z | SODAClient/SODADataTypesMapper.h | ZeusbaseObjectiveC/soda-ios-sdk | aba43df6e306492e7d44304a8d7bde4ea0e0d14b | [
"Apache-2.0"
] | 4 | 2015-09-07T04:56:41.000Z | 2016-08-26T00:35:34.000Z | SODAClient/SODADataTypesMapper.h | ZeusbaseObjectiveC/soda-ios-sdk | aba43df6e306492e7d44304a8d7bde4ea0e0d14b | [
"Apache-2.0"
] | 11 | 2015-02-13T15:28:13.000Z | 2018-09-20T15:41:21.000Z | //
// SODA iOS SDK - Socrata, Inc
//
// Copyright (C) 2013 Socrata, Inc
// All rights reserved.
//
// Developed for Socrata, Inc by:
// 47 Degrees, LLC
// http://47deg.com
// hello@47deg.com
//
#import <Foundation/Foundation.h>
/**
* Service that serializes and maps SODA responses to model classes based on the returned SODA fields and dataTypes headers.
* The mappings dictionary is composed by NSString keys that match the data types and blocks such as ^(id value) { return transformedValue; }
* is in charge of the mapping work transforming the automatic serialized JSON objects to the target NSObject container properties.
*/
@interface SODADataTypesMapper : NSObject {
NSDictionary *_mappings;
}
#pragma mark - Properties
@property(nonatomic, strong) NSDictionary *mappings;
#pragma mark - Initializers
- (id)initWithMappings:(NSDictionary *)mappings;
+ (id)mapperWithMappings:(NSDictionary *)mappings;
#pragma mark - Mapping and Deserialization
- (NSString *)propertyNameForKey:(NSString *)column inObject:(id)object;
- (void)mapColumns:(NSArray *)columns withTypes:(NSArray *)dataTypes inContainer:(id)JSON toObject:(id)object;
@end | 28.487805 | 142 | 0.744863 | [
"object",
"model"
] |
d994c07d3523d7e8a5b2214e0616aa1cfd98557d | 7,669 | h | C | include/fbgemm/FbgemmFP16.h | jorgtied/FBGEMM | 6f45243cb8ab7d7ab921af18d313ae97144618b8 | [
"BSD-3-Clause"
] | 2 | 2020-01-21T07:20:40.000Z | 2020-02-01T12:10:02.000Z | include/fbgemm/FbgemmFP16.h | jorgtied/FBGEMM | 6f45243cb8ab7d7ab921af18d313ae97144618b8 | [
"BSD-3-Clause"
] | 3 | 2020-02-25T18:26:56.000Z | 2022-03-22T09:21:38.000Z | include/fbgemm/FbgemmFP16.h | jorgtied/FBGEMM | 6f45243cb8ab7d7ab921af18d313ae97144618b8 | [
"BSD-3-Clause"
] | 2 | 2020-02-24T20:01:06.000Z | 2022-03-21T21:23:32.000Z | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
// WARNING: this is a legacy fp16 fbgemm implementation and will soon be
// upgraded to match with new fbgemm interface.
#include <cassert>
#include <cstdlib>
#include <memory>
#include <stdexcept>
#include <vector>
#include "Types.h"
#include "Utils.h"
namespace fbgemm {
/// class that performs packing of matrix in
/// row-major format into
/// internal packed blocked-row major format
class PackedGemmMatrixFP16 {
public:
// takes smat input mamtrix in row-major format;
// packs it into gemm-friendly blocked format;
// allocate space and sets up all the internal variables;
// also premultiplies by alpha during packing.
// brow_ contains tile size along k dimension
// and also is # of fmas updates into int16 container
// before flushing into fp32.
// the smaller the brow_, the higher overhead
// of flushing is.
// kernel_ncol_blocks is the number of column blocks (in the size of 8 fp16,
// or 128 bit, or 1 xmm register size) in the kernel. Because the batch size
// can be dynamic and we need to prepack the weight matrix B, the internal
// packing layout of the weight matrix and kernel_ncol_blocks have to be
// fixed. We can choose kernel_ncol_blocks = 1 (with kernels of 1x1~14x1
// register layouts), 2 (with kernels of 1x2~6x2 register layout), or 3 (with
// kernels of 1x3~4x3 register layout).
PackedGemmMatrixFP16(
const matrix_op_t trans,
const int nrow,
const int ncol,
const float alpha,
const float* smat,
const int brow = 512,
const int kernel_ncol_blocks = 2)
: nrow_(nrow),
ncol_(ncol),
brow_(brow),
kernel_ncol_blocks_(kernel_ncol_blocks) {
initializeParam();
initializeMemory();
// copy source matrix into packed matrix
this->packFromSrc(trans, alpha, smat);
}
PackedGemmMatrixFP16(
const int nrow,
const int ncol,
const int brow,
const int last_brow,
const int bcol,
const int nbrow,
const int nbcol,
const uint64_t size,
const int kernel_ncol_blocks = 2)
: nrow_(nrow),
ncol_(ncol),
brow_(brow),
last_brow_(last_brow),
bcol_(bcol),
nbrow_(nbrow),
nbcol_(nbcol),
size_(size),
kernel_ncol_blocks_(kernel_ncol_blocks) {
initializeMemory();
}
void initializeParam() {
bcol_ = 8 * kernelNumColBlocks();
// set up internal packing parameters
nbrow_ = ((numRows() % blockRowSize()) == 0)
? (numRows() / blockRowSize())
: ((numRows() + blockRowSize()) / blockRowSize());
last_brow_ = ((nrow_ % blockRowSize()) == 0) ? blockRowSize()
: (nrow_ % blockRowSize());
nbcol_ = ((numCols() % blockColSize()) == 0)
? (numCols() / blockColSize())
: ((numCols() + blockColSize()) / blockColSize());
if (numCols() != blockColSize() * nbcol_) {
#ifdef VLOG
VLOG(0) << "Packer warning: ncol(" << numCols()
<< ") is not a multiple of internal block size ("
<< blockColSize() << ")";
VLOG(0)
<< "lefover is currently done via MKL: hence overhead will inccur";
#endif
}
}
void setPacked(bool p) {
packed_ = p;
}
bool packed() const {
return packed_;
}
void initializeMemory() {
// allocate and initialize packed memory
const int padding = 1024; // required by sw pipelined kernels
size_ = (blockRowSize() * nbrow_) * (blockColSize() * nbcol_);
#ifdef _MSC_VER
pmat_ = (float16 *)_aligned_malloc(matSize() * sizeof(float16) +
padding, 64);
#else
int result = posix_memalign((void**)&pmat_, 64, matSize() * sizeof(float16) + padding);
assert(result == 0);
#endif
for (auto i = 0; i < matSize(); i++) {
pmat_[i] = tconv(0.f, pmat_[i]);
}
}
~PackedGemmMatrixFP16() {
#ifdef _MSC_VER
_aligned_free(pmat_);
#else
free(pmat_);
#endif
}
void unpackFromSrc(const matrix_op_t trans, float16* src_mat) {
bool tr = (trans == matrix_op_t::Transpose);
for (int i = 0; i < numRows(); i++) {
for (int j = 0; j < numCols(); j++) {
pmat_[tr ? i + numRows() * j : i * numCols() + j] = src_mat[addr(i, j)];
}
}
packed_ = false;
}
// protected:
// blocked row-major format address arithmetic
uint64_t addr(const int r_, const int c_) const {
uint64_t r = (uint64_t)r_;
uint64_t c = (uint64_t)c_;
uint64_t block_row_id = r / blockRowSize(),
brow_offset =
(block_row_id * nbcol_) * (blockRowSize() * blockColSize());
uint64_t block_col_id = c / blockColSize(),
bcol_offset = block_col_id *
((block_row_id != nbrow_ - 1) ? (blockRowSize() * blockColSize())
: (last_brow_ * blockColSize()));
uint64_t block_offset = brow_offset + bcol_offset;
uint64_t inblock_offset =
r % blockRowSize() * blockColSize() + c % blockColSize();
uint64_t index = block_offset + inblock_offset;
assert(index < matSize());
return index;
}
void
packFromSrc(const matrix_op_t trans, const float alpha, const float* smat) {
bool tr = (trans == matrix_op_t::Transpose);
// pack
for (int i = 0; i < numRows(); i++) {
for (int j = 0; j < numCols(); j++) {
pmat_[addr(i, j)] = tconv(
alpha *
((tr == false) ? smat[i * numCols() + j]
: smat[i + numRows() * j]),
pmat_[addr(i, j)]);
}
}
packed_ = true;
}
// This function takes in an unpacked float16 matrix of the same size and
// packs it. There is no floating type conversion.
void packFromSrc(const matrix_op_t trans, const float16* smat) {
bool tr = (trans == matrix_op_t::Transpose);
for (int i = 0; i < numRows(); ++i) {
for (int j = 0; j < numCols(); ++j) {
pmat_[addr(i, j)] = smat[tr ? i + numRows() * j : i * numCols() + j];
}
}
packed_ = true;
}
const float16& operator()(const int r, const int c) const {
uint64_t a = addr(r, c);
assert(r < numRows());
assert(c < numCols());
assert(a < this->matSize());
return pmat_[a];
}
int matSize() const {
return (int)size_;
}
int numRows() const {
return nrow_;
}
int numCols() const {
return ncol_;
}
int lastBrow() const {
return last_brow_;
}
int numBrow() const {
return nbrow_;
}
int numBcol() const {
return nbcol_;
}
float16* pmat() const {
return pmat_;
}
inline int blockRowSize() const {
return brow_;
}
inline int blockColSize() const {
return bcol_;
}
inline int kernelNumColBlocks() const {
return kernel_ncol_blocks_;
}
int nrow_, ncol_;
int brow_, last_brow_, bcol_;
int nbrow_, nbcol_;
uint64_t size_;
int kernel_ncol_blocks_;
float16* pmat_;
bool packed_{false};
friend void cblas_gemm_compute(
const matrix_op_t transa,
const int m,
const float* A,
const PackedGemmMatrixFP16& Bp,
const float beta,
float* C,
int thread_id,
int num_threads);
};
/**
* restrictions: transa == CblasNoTrans
*/
extern void cblas_gemm_compute(
const matrix_op_t transa,
const int m,
const float* A,
const PackedGemmMatrixFP16& Bp,
const float beta,
float* C,
int thread_id = 0,
int num_threads = 1);
}; // namespace fbgemm
| 28.298893 | 91 | 0.609597 | [
"vector"
] |
d995a3268f29858b00f9684b9ac826213183885c | 1,723 | h | C | Treap.h | researchGit/AgreementTesting | 49356b46be812fa8afd6ba291d910fa1f8f91ff9 | [
"MIT"
] | null | null | null | Treap.h | researchGit/AgreementTesting | 49356b46be812fa8afd6ba291d910fa1f8f91ff9 | [
"MIT"
] | null | null | null | Treap.h | researchGit/AgreementTesting | 49356b46be812fa8afd6ba291d910fa1f8f91ff9 | [
"MIT"
] | null | null | null | //
// Created by Lei on 7/29/20.
//
#ifndef AGREEMENTTESTING_TREAP_H
#define AGREEMENTTESTING_TREAP_H
#include "TreapNode.h"
#include <vector>
#include "HelperDataStructure.h"
#include <unordered_set>
#include <unordered_map>
using namespace std;
class Treap {
public:
Treap();
~Treap() = default;
void
insert(shared_ptr<TreapNode> &root, string label, int key, int priority, bool active, bool activeOccur,
shared_ptr<TreapNode> &parent);
void printTreap(shared_ptr<TreapNode> &node);
int getKeyCount();
bool checkInfoCorrectness(shared_ptr<TreapNode> &root);
bool checkParentChileRelationship(shared_ptr<TreapNode> &root);
shared_ptr<TreapNode> constructTreap(vector<string> &eulerTourSequence,
unordered_map<string, unordered_map<int, unordered_set<graphEdge, graphEdgeHash>>> &nonTreeEdges);
unordered_map<string, unordered_map<int, unordered_map<string, unordered_set<shared_ptr<TreapNode>>>>>
getLevelLabelReferenceMap() { return levelLabelReferenceMap; }
unordered_map<string, shared_ptr<TreapNode>> getActiveOccurList() { return activeOccurList; }
void printLevelLabelReferenceMap();
void printActiveOccurList();
private:
void rotateLeft(shared_ptr<TreapNode> &root);
void rotateRight(shared_ptr<TreapNode> &root);
bool computeSizeRootedOfNode(shared_ptr<TreapNode> &root);
int keyCount;
unordered_map<string, unordered_map<int, unordered_map<string, unordered_set<shared_ptr<TreapNode>>>>> levelLabelReferenceMap;
shared_ptr<TreapNode> lastVisitedPointer;
unordered_map<string, shared_ptr<TreapNode>> activeOccurList;
};
#endif //AGREEMENTTESTING_TREAP_H
| 26.921875 | 139 | 0.739988 | [
"vector"
] |
d9a19a4bbab5d4e165adc1a56297c64acd743202 | 4,023 | c | C | core_blas/core_sgeqp3_larfg.c | zhuangsc/Plasma-ompss1 | bcc99c164a256bc7df7c936b9c43afd38c12aea2 | [
"BSD-3-Clause"
] | null | null | null | core_blas/core_sgeqp3_larfg.c | zhuangsc/Plasma-ompss1 | bcc99c164a256bc7df7c936b9c43afd38c12aea2 | [
"BSD-3-Clause"
] | null | null | null | core_blas/core_sgeqp3_larfg.c | zhuangsc/Plasma-ompss1 | bcc99c164a256bc7df7c936b9c43afd38c12aea2 | [
"BSD-3-Clause"
] | null | null | null | /**
*
* @file core_sgeqp3_larfg.c
*
* PLASMA core_blas kernel
* PLASMA is a software package provided by Univ. of Tennessee,
* Univ. of California Berkeley and Univ. of Colorado Denver
*
* @version 2.6.0
* @author Mark Gates
* @date 2010-11-15
* @generated s Tue Jan 7 11:44:49 2014
*
**/
#include <math.h>
#include "common.h"
#define A(m,n) BLKADDR( A, float, m, n )
/***************************************************************************//**
*
* @ingroup CORE_float
*
* CORE_sgeqp3_larfg generates a Householder elementary reflector H, such that
*
* H**T * x = [ beta ] and H**T * H = I.
* [ 0 ]
*
* where alpha and beta are scalars, with beta real, and x is an n element vector.
* H is reperested in the form
*
* H = I - tau * [ 1 ] * [ 1 v**T ],
* [ v ]
*
* where tau is a scalar and v is an (n-1) element vector.
* If x[1:] = 0 and x[0] is real, then tau = 0 and H = I.
* Otherwise, 1 <= real(tau) <= 2 and abs(tau-1) <= 1.
*
* Here, x = A[ ii*mb + i : m, jj*nb + j ].
* That is, x is j-th column of the jj-th block-column of A, starting in the
* i-th row of the ii-th block-row of A, and going to the last row.
* Note that x spans multiple tiles of A.
*
* This DIFFERS from LAPACK in that the 1.0 is stored explicitly in the top
* element of x and beta is stored separately. (Whereas in LAPACK, the
* 1.0 is implicit and beta is stored in the top element of x.)
*
*******************************************************************************
*
* @param[in,out] A
* Description of m by n matrix A.
* On entry, A[ ii*mb + i : m, jj*nb + j ] is the vector x.
* On exit, A[ ii*mb + i : m, jj*nb + j ] is overwritten with [ 1, v ].
*
* @param[in] ii
* Index of block row of A to start in.
*
* @param[in] jj
* Index of block column of A.
*
* @param[in] i
* Index of row within ii-th block row to start in.
*
* @param[in] j
* Index of column within jj-th block column.
*
* @param[out] tau
* The scalar tau.
*
* @param[out] beta
* The scalar beta.
*
**/
#if defined(PLASMA_HAVE_WEAK)
#pragma weak CORE_sgeqp3_larfg = PCORE_sgeqp3_larfg
#define CORE_sgeqp3_larfg PCORE_sgeqp3_larfg
#endif
void CORE_sgeqp3_larfg( PLASMA_desc A, int ii, int jj, int i, int j,
float *tau, float *beta )
{
int i2, kk, k, mb, lda;
float norm2;
float x0, scale;
float *Akj, *Aij;
/* TODO: it might be simpler to pass in global (i,j) indices, */
/* from which (ii,jj) can be derived. */
/* compute norm( A[i+1:m,j] )^2 */
/* todo: use lassq algorithm */
i2 = i+1;
norm2 = 0.;
for( kk = ii; kk < A.mt; ++kk ) {
mb = min( A.mb, A.m - kk*A.mb );
lda = BLKLDD( A, kk );
Akj = A(kk,jj);
for( k = i2; k < mb; ++k ) {
norm2 += Akj[k + j*lda] * ( Akj[k + j*lda] );
}
i2 = 0;
}
lda = BLKLDD( A, ii );
Aij = A(ii,jj);
x0 = Aij[i + j*lda];
if ( norm2 == 0. && cimagf(x0) == 0. ) {
/* H = I */
*tau = 0;
*beta = x0;
}
else {
/* todo: use lapack's fancy sqrt function */
/* todo: scale vector as in lapack */
*beta = sqrt( (x0)*(x0) + cimagf(x0)*cimagf(x0) + norm2 );
if ( (x0) >= 0. ) {
*beta = -(*beta);
}
*tau = (*beta - x0) / (*beta);
/* todo: use zladiv or dladiv */
scale = 1. / (x0 - *beta);
/* x *= scale */
i2 = i;
for( kk = ii; kk < A.mt; ++kk ) {
mb = min( A.mb, A.m - kk*A.mb );
lda = BLKLDD( A, kk );
Akj = A(kk,jj);
for( k = i2; k < mb; ++k ) {
Akj[k + j*lda] *= scale;
}
i2 = 0;
}
}
/* unlike LAPACK, we explicitly store the 1 in the vector, */
/* and return beta separately. */
lda = BLKLDD( A, ii );
Aij = A(ii,jj);
Aij[i + j*lda] = 1.;
}
| 28.735714 | 82 | 0.489436 | [
"vector"
] |
d9a38e40be6e65ed678a71d0b137f688cc10d49b | 12,113 | h | C | src/secp256k1/include/secp256k1_surjectionproof.h | romanz/libwally-core | e29408ea70fc85f9efa27e13c905eb9fd1652da0 | [
"MIT"
] | null | null | null | src/secp256k1/include/secp256k1_surjectionproof.h | romanz/libwally-core | e29408ea70fc85f9efa27e13c905eb9fd1652da0 | [
"MIT"
] | null | null | null | src/secp256k1/include/secp256k1_surjectionproof.h | romanz/libwally-core | e29408ea70fc85f9efa27e13c905eb9fd1652da0 | [
"MIT"
] | null | null | null | #ifndef _SECP256K1_SURJECTIONPROOF_
#define _SECP256K1_SURJECTIONPROOF_
#include "secp256k1.h"
#include "secp256k1_rangeproof.h"
#ifdef __cplusplus
extern "C" {
#endif
/** Maximum number of inputs that may be given in a surjection proof */
#define SECP256K1_SURJECTIONPROOF_MAX_N_INPUTS 256
/** Number of bytes a serialized surjection proof requires given the
* number of inputs and the number of used inputs.
*/
#define SECP256K1_SURJECTIONPROOF_SERIALIZATION_BYTES(n_inputs, n_used_inputs) \
(2 + (n_inputs + 7)/8 + 32 * (1 + (n_used_inputs)))
/** Maximum number of bytes a serialized surjection proof requires. */
#define SECP256K1_SURJECTIONPROOF_SERIALIZATION_BYTES_MAX \
SECP256K1_SURJECTIONPROOF_SERIALIZATION_BYTES(SECP256K1_SURJECTIONPROOF_MAX_N_INPUTS, SECP256K1_SURJECTIONPROOF_MAX_N_INPUTS)
/** Opaque data structure that holds a parsed surjection proof
*
* The exact representation of data inside is implementation defined and not
* guaranteed to be portable between different platforms or versions. Nor is
* it guaranteed to have any particular size, nor that identical proofs
* will have identical representation. (That is, memcmp may return nonzero
* even for identical proofs.)
*
* To obtain these properties, instead use secp256k1_surjectionproof_parse
* and secp256k1_surjectionproof_serialize to encode/decode proofs into a
* well-defined format.
*
* The representation is exposed to allow creation of these objects on the
* stack; please *do not* use these internals directly.
*/
typedef struct {
#ifdef VERIFY
/** Mark whether this proof has gone through `secp256k1_surjectionproof_initialize` */
int initialized;
#endif
/** Total number of input asset tags */
size_t n_inputs;
/** Bitmap of which input tags are used in the surjection proof */
unsigned char used_inputs[SECP256K1_SURJECTIONPROOF_MAX_N_INPUTS / 8];
/** Borromean signature: e0, scalars */
unsigned char data[32 * (1 + SECP256K1_SURJECTIONPROOF_MAX_N_INPUTS)];
} secp256k1_surjectionproof;
/** Parse a surjection proof
*
* Returns: 1 when the proof could be parsed, 0 otherwise.
* Args: ctx: a secp256k1 context object
* Out: proof: a pointer to a proof object
* In: input: a pointer to the array to parse
* inputlen: length of the array pointed to by input
*
* The proof must consist of:
* - A 2-byte little-endian total input count `n`
* - A ceil(n/8)-byte bitmap indicating which inputs are used.
* - A big-endian 32-byte borromean signature e0 value
* - `m` big-endian 32-byte borromean signature s values, where `m`
* is the number of set bits in the bitmap
*/
SECP256K1_API int secp256k1_surjectionproof_parse(
const secp256k1_context* ctx,
secp256k1_surjectionproof *proof,
const unsigned char *input,
size_t inputlen
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3);
/** Serialize a surjection proof
*
* Returns: 1 if enough space was available to serialize, 0 otherwise
* Args: ctx: a secp256k1 context object
* Out: output: a pointer to an array to store the serialization
* In/Out: outputlen: a pointer to an integer which is initially set to the
* size of output, and is overwritten with the written
* size.
* In: proof: a pointer to an initialized proof object
*
* See secp256k1_surjectionproof_parse for details about the encoding.
*/
SECP256K1_API int secp256k1_surjectionproof_serialize(
const secp256k1_context* ctx,
unsigned char *output,
size_t *outputlen,
const secp256k1_surjectionproof *proof
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4);
/** Data structure that holds a fixed asset tag.
*
* This data type is *not* opaque. It will always be 32 bytes of whatever
* data the API user wants to use as an asset tag. Its contents have no
* semantic meaning to libsecp whatsoever.
*/
typedef struct {
unsigned char data[32];
} secp256k1_fixed_asset_tag;
/** Returns the total number of inputs a proof expects to be over.
*
* Returns: the number of inputs for the given proof
* In: ctx: pointer to a context object
* proof: a pointer to a proof object
*/
SECP256K1_API size_t secp256k1_surjectionproof_n_total_inputs(
const secp256k1_context* ctx,
const secp256k1_surjectionproof* proof
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2);
/** Returns the actual number of inputs that a proof uses
*
* Returns: the number of inputs for the given proof
* In: ctx: pointer to a context object
* proof: a pointer to a proof object
*/
SECP256K1_API size_t secp256k1_surjectionproof_n_used_inputs(
const secp256k1_context* ctx,
const secp256k1_surjectionproof* proof
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2);
/** Returns the total size this proof would take, in bytes, when serialized
*
* Returns: the total size
* In: ctx: pointer to a context object
* proof: a pointer to a proof object
*/
SECP256K1_API size_t secp256k1_surjectionproof_serialized_size(
const secp256k1_context* ctx,
const secp256k1_surjectionproof* proof
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2);
/** Surjection proof initialization function; decides on inputs to use
* To be used to initialize stack-allocated secp256k1_surjectionproof struct
* Returns 0: inputs could not be selected
* n: inputs were selected after n iterations of random selection
*
* In: ctx: pointer to a context object
* fixed_input_tags: fixed input tags `A_i` for all inputs. (If the fixed tag is not known,
* e.g. in a coinjoin with others' inputs, an ephemeral tag can be given;
* this won't match the output tag but might be used in the anonymity set.)
* n_input_tags: the number of entries in the fixed_input_tags array
* n_input_tags_to_use: the number of inputs to select randomly to put in the anonymity set
* fixed_output_tag: fixed output tag
* max_n_iterations: the maximum number of iterations to do before giving up. Because the
* maximum number of inputs (SECP256K1_SURJECTIONPROOF_MAX_N_INPUTS) is
* limited to 256 the probability of giving up is smaller than
* (255/256)^(n_input_tags_to_use*max_n_iterations).
*
* random_seed32: a random seed to be used for input selection
* Out: proof: The proof whose bitvector will be initialized. In case of failure,
* the state of the proof is undefined.
* input_index: The index of the actual input that is secretly mapped to the output
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_surjectionproof_initialize(
const secp256k1_context* ctx,
secp256k1_surjectionproof* proof,
size_t *input_index,
const secp256k1_fixed_asset_tag* fixed_input_tags,
const size_t n_input_tags,
const size_t n_input_tags_to_use,
const secp256k1_fixed_asset_tag* fixed_output_tag,
const size_t n_max_iterations,
const unsigned char *random_seed32
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(7);
/** Surjection proof allocation and initialization function; decides on inputs to use
* Returns 0: inputs could not be selected, or malloc failure
* n: inputs were selected after n iterations of random selection
*
* In: ctx: pointer to a context object
* proof_out_p: a pointer to a pointer to `secp256k1_surjectionproof*`.
* the newly-allocated struct pointer will be saved here.
* fixed_input_tags: fixed input tags `A_i` for all inputs. (If the fixed tag is not known,
* e.g. in a coinjoin with others' inputs, an ephemeral tag can be given;
* this won't match the output tag but might be used in the anonymity set.)
* n_input_tags: the number of entries in the fixed_input_tags array
* n_input_tags_to_use: the number of inputs to select randomly to put in the anonymity set
* fixed_output_tag: fixed output tag
* max_n_iterations: the maximum number of iterations to do before giving up. Because the
* maximum number of inputs (SECP256K1_SURJECTIONPROOF_MAX_N_INPUTS) is
* limited to 256 the probability of giving up is smaller than
* (255/256)^(n_input_tags_to_use*max_n_iterations).
*
* random_seed32: a random seed to be used for input selection
* Out: proof_out_p: The pointer to newly-allocated proof whose bitvector will be initialized.
* In case of failure, the pointer will be NULL.
* input_index: The index of the actual input that is secretly mapped to the output
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_surjectionproof_allocate_initialized(
const secp256k1_context* ctx,
secp256k1_surjectionproof** proof_out_p,
size_t *input_index,
const secp256k1_fixed_asset_tag* fixed_input_tags,
const size_t n_input_tags,
const size_t n_input_tags_to_use,
const secp256k1_fixed_asset_tag* fixed_output_tag,
const size_t n_max_iterations,
const unsigned char *random_seed32
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(7);
/** Surjection proof destroy function
* deallocates the struct that was allocated with secp256k1_surjectionproof_allocate_initialized
*
* In: proof: pointer to secp256k1_surjectionproof struct
*/
SECP256K1_API void secp256k1_surjectionproof_destroy(
secp256k1_surjectionproof* proof
) SECP256K1_ARG_NONNULL(1);
/** Surjection proof generation function
* Returns 0: proof could not be created
* 1: proof was successfully created
*
* In: ctx: pointer to a context object, initialized for signing and verification
* ephemeral_input_tags: the ephemeral asset tag of all inputs
* n_ephemeral_input_tags: the number of entries in the ephemeral_input_tags array
* ephemeral_output_tag: the ephemeral asset tag of the output
* input_index: the index of the input that actually maps to the output
* input_blinding_key: the blinding key of the input
* output_blinding_key: the blinding key of the output
* In/Out: proof: The produced surjection proof. Must have already gone through `secp256k1_surjectionproof_initialize`
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_surjectionproof_generate(
const secp256k1_context* ctx,
secp256k1_surjectionproof* proof,
const secp256k1_generator* ephemeral_input_tags,
size_t n_ephemeral_input_tags,
const secp256k1_generator* ephemeral_output_tag,
size_t input_index,
const unsigned char *input_blinding_key,
const unsigned char *output_blinding_key
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(5) SECP256K1_ARG_NONNULL(7) SECP256K1_ARG_NONNULL(8);
/** Surjection proof verification function
* Returns 0: proof was invalid
* 1: proof was valid
*
* In: ctx: pointer to a context object, initialized for signing and verification
* proof: proof to be verified
* ephemeral_input_tags: the ephemeral asset tag of all inputs
* n_ephemeral_input_tags: the number of entries in the ephemeral_input_tags array
* ephemeral_output_tag: the ephemeral asset tag of the output
*/
SECP256K1_API int secp256k1_surjectionproof_verify(
const secp256k1_context* ctx,
const secp256k1_surjectionproof* proof,
const secp256k1_generator* ephemeral_input_tags,
size_t n_ephemeral_input_tags,
const secp256k1_generator* ephemeral_output_tag
) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(5);
#ifdef __cplusplus
}
#endif
#endif
| 46.057034 | 152 | 0.738133 | [
"object"
] |
d9a48c8418abf2c01f57f9d23ee84d2f17a928f6 | 2,028 | h | C | analytical_engine/core/utils/trivial_tensor.h | LI-Mingyu/GraphScope-MY | 942060983d3f7f8d3a3377467386e27aba285b33 | [
"Apache-2.0"
] | 1,521 | 2020-10-28T03:20:24.000Z | 2022-03-31T12:42:51.000Z | analytical_engine/core/utils/trivial_tensor.h | LI-Mingyu/GraphScope-MY | 942060983d3f7f8d3a3377467386e27aba285b33 | [
"Apache-2.0"
] | 850 | 2020-12-15T03:17:32.000Z | 2022-03-31T11:40:13.000Z | analytical_engine/core/utils/trivial_tensor.h | LI-Mingyu/GraphScope-MY | 942060983d3f7f8d3a3377467386e27aba285b33 | [
"Apache-2.0"
] | 180 | 2020-11-10T03:43:21.000Z | 2022-03-28T11:13:31.000Z | /** Copyright 2020 Alibaba Group Holding Limited.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANALYTICAL_ENGINE_CORE_UTILS_TRIVIAL_TENSOR_H_
#define ANALYTICAL_ENGINE_CORE_UTILS_TRIVIAL_TENSOR_H_
#include <algorithm>
#include <vector>
namespace gs {
/**
* @brief trivial_tensor_t is a naive implementation of tensor data structure.
* It seems that xtensor is too heavy to us currently, and impose a lot of
* unnecessary dependencies.
*
* @tparam T the data type to hold by the tensor
*/
template <typename T>
struct trivial_tensor_t {
public:
trivial_tensor_t() : size_(0), data_(nullptr) {}
~trivial_tensor_t() {
if (data_) {
delete[] data_;
data_ = nullptr;
}
}
T* data() { return data_; }
const T* data() const { return data_; }
void fill(T const& value) { std::fill_n(data_, size_, value); }
std::vector<size_t> shape() const { return shape_; }
size_t size() const { return size_; }
void resize(std::vector<size_t> const& shape) {
size_t flat_size = shape.empty() ? 0 : 1;
for (auto dim_size : shape) {
flat_size *= dim_size;
}
this->shape_ = shape;
if (flat_size != size_) {
T* new_data = new T[flat_size];
std::copy(data_, data_ + size_, new_data);
size_ = flat_size;
if (data_) {
delete[] data_;
}
data_ = new_data;
}
}
private:
size_t size_;
std::vector<size_t> shape_;
T* data_;
};
} // namespace gs
#endif // ANALYTICAL_ENGINE_CORE_UTILS_TRIVIAL_TENSOR_H_
| 27.405405 | 78 | 0.682446 | [
"shape",
"vector"
] |
d9a6a7341f6019adebb457bc312b69dd50fb0f5b | 3,143 | h | C | third_party/gecko-2/win32/include/nsIIDBDatabaseException.h | akiellor/selenium | 239490f9c5f3c7e7d4082bbe53c86eb5158d70a3 | [
"Apache-2.0"
] | 1 | 2018-08-24T18:01:34.000Z | 2018-08-24T18:01:34.000Z | third_party/gecko-2/win32/include/nsIIDBDatabaseException.h | akiellor/selenium | 239490f9c5f3c7e7d4082bbe53c86eb5158d70a3 | [
"Apache-2.0"
] | 1 | 2021-10-18T12:23:37.000Z | 2021-10-18T12:23:37.000Z | third_party/gecko-2/win32/include/nsIIDBDatabaseException.h | akiellor/selenium | 239490f9c5f3c7e7d4082bbe53c86eb5158d70a3 | [
"Apache-2.0"
] | 2 | 2018-04-30T21:35:30.000Z | 2021-05-14T08:11:46.000Z | /*
* DO NOT EDIT. THIS FILE IS GENERATED FROM e:/builds/moz2_slave/rel-2.0-xr-w32-bld/build/dom/indexedDB/nsIIDBDatabaseException.idl
*/
#ifndef __gen_nsIIDBDatabaseException_h__
#define __gen_nsIIDBDatabaseException_h__
#ifndef __gen_nsISupports_h__
#include "nsISupports.h"
#endif
/* For IDL files that don't want to include root IDL files. */
#ifndef NS_NO_VTABLE
#define NS_NO_VTABLE
#endif
/* starting interface: nsIIDBDatabaseException */
#define NS_IIDBDATABASEEXCEPTION_IID_STR "2f182bf1-1542-47fe-b2f7-4b1741b5283c"
#define NS_IIDBDATABASEEXCEPTION_IID \
{0x2f182bf1, 0x1542, 0x47fe, \
{ 0xb2, 0xf7, 0x4b, 0x17, 0x41, 0xb5, 0x28, 0x3c }}
class NS_NO_VTABLE NS_SCRIPTABLE nsIIDBDatabaseException : public nsISupports {
public:
NS_DECLARE_STATIC_IID_ACCESSOR(NS_IIDBDATABASEEXCEPTION_IID)
enum { UNKNOWN_ERR = 1U };
enum { NON_TRANSIENT_ERR = 2U };
enum { NOT_FOUND_ERR = 3U };
enum { CONSTRAINT_ERR = 4U };
enum { DATA_ERR = 5U };
enum { NOT_ALLOWED_ERR = 6U };
enum { TRANSACTION_INACTIVE_ERR = 7U };
enum { ABORT_ERR = 8U };
enum { READ_ONLY_ERR = 9U };
enum { RECOVERABLE_ERR = 10U };
enum { TRANSIENT_ERR = 11U };
enum { TIMEOUT_ERR = 12U };
enum { DEADLOCK_ERR = 13U };
/* readonly attribute unsigned short code; */
NS_SCRIPTABLE NS_IMETHOD GetCode(PRUint16 *aCode) = 0;
};
NS_DEFINE_STATIC_IID_ACCESSOR(nsIIDBDatabaseException, NS_IIDBDATABASEEXCEPTION_IID)
/* Use this macro when declaring classes that implement this interface. */
#define NS_DECL_NSIIDBDATABASEEXCEPTION \
NS_SCRIPTABLE NS_IMETHOD GetCode(PRUint16 *aCode);
/* Use this macro to declare functions that forward the behavior of this interface to another object. */
#define NS_FORWARD_NSIIDBDATABASEEXCEPTION(_to) \
NS_SCRIPTABLE NS_IMETHOD GetCode(PRUint16 *aCode) { return _to GetCode(aCode); }
/* Use this macro to declare functions that forward the behavior of this interface to another object in a safe way. */
#define NS_FORWARD_SAFE_NSIIDBDATABASEEXCEPTION(_to) \
NS_SCRIPTABLE NS_IMETHOD GetCode(PRUint16 *aCode) { return !_to ? NS_ERROR_NULL_POINTER : _to->GetCode(aCode); }
#if 0
/* Use the code below as a template for the implementation class for this interface. */
/* Header file */
class nsIDBDatabaseException : public nsIIDBDatabaseException
{
public:
NS_DECL_ISUPPORTS
NS_DECL_NSIIDBDATABASEEXCEPTION
nsIDBDatabaseException();
private:
~nsIDBDatabaseException();
protected:
/* additional members */
};
/* Implementation file */
NS_IMPL_ISUPPORTS1(nsIDBDatabaseException, nsIIDBDatabaseException)
nsIDBDatabaseException::nsIDBDatabaseException()
{
/* member initializers and constructor code */
}
nsIDBDatabaseException::~nsIDBDatabaseException()
{
/* destructor code */
}
/* readonly attribute unsigned short code; */
NS_IMETHODIMP nsIDBDatabaseException::GetCode(PRUint16 *aCode)
{
return NS_ERROR_NOT_IMPLEMENTED;
}
/* End of implementation class template. */
#endif
#endif /* __gen_nsIIDBDatabaseException_h__ */
| 26.635593 | 133 | 0.732103 | [
"object"
] |
d9b1c3156380e2c3648b0c14e7fcb56637ef04f8 | 24,047 | h | C | applications/ULFapplication/custom_utilities/pfem_gid_io.h | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 778 | 2017-01-27T16:29:17.000Z | 2022-03-30T03:01:51.000Z | applications/ULFapplication/custom_utilities/pfem_gid_io.h | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 6,634 | 2017-01-15T22:56:13.000Z | 2022-03-31T15:03:36.000Z | applications/ULFapplication/custom_utilities/pfem_gid_io.h | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 224 | 2017-02-07T14:12:49.000Z | 2022-03-06T23:09:34.000Z | /*
==============================================================================
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2007
Pooyan Dadvand, Riccardo Rossi
pooyan@cimne.upc.edu
rrossi@cimne.upc.edu
CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNER.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
//
// Project Name: Kratos
// Last Modified by: $Author: anonymous $
// Date: $Date: 2009-01-15 14:50:24 $
// Revision: $Revision: 1.8 $
//
//
#if !defined(KRATOS_PFEM_GID_IO_BASE_H_INCLUDED)
#define KRATOS_PFEM_GID_IO_BASE_H_INCLUDED
// System includes
#include <string>
#include <iostream>
#include <fstream>
#include <sstream>
#include <cstddef>
// Project includes
#include <pybind11/pybind11.h>
#include "includes/define.h"
#include "includes/define_python.h"
//#include "includes/datafile_io.h"
#include "includes/gid_io.h"
namespace Kratos
{
/**
* Type definitions
*/
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef ModelPart::NodesContainerType NodesArrayType;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef GeometryData::IntegrationMethod IntegrationMethodType;
typedef GeometryData::KratosGeometryFamily KratosGeometryFamily;
/**
* Auxiliary class to store gauss point containers and perform result printing
* on gauss points
*/
class PfemGidGaussPointsContainer
{
public:
///Constructor
PfemGidGaussPointsContainer( const char* gp_title, KratosGeometryFamily geometryFamily,
GiD_ElementType gid_element_type,
unsigned int number_of_integration_points,
std::vector<int> index_container )
:mGPTitle(gp_title),mKratosElementFamily(geometryFamily),
mGidElementFamily(gid_element_type), mSize(number_of_integration_points),
mIndexContainer(index_container) {}
bool AddElement( const ModelPart::ElementsContainerType::iterator pElemIt )
{
KRATOS_TRY
if( pElemIt->GetGeometry().GetGeometryFamily() == mKratosElementFamily
&& pElemIt->GetGeometry().IntegrationPoints(
pElemIt->GetIntegrationMethod() ).size() == mSize )
{
mMeshElements.push_back( *(pElemIt.base() ) );
return true;
}
else return false;
KRATOS_CATCH("")
}
bool AddCondition( const ModelPart::ConditionsContainerType::iterator pCondIt )
{
KRATOS_TRY
if( pCondIt->GetGeometry().GetGeometryFamily() == mKratosElementFamily
&& pCondIt->GetGeometry().IntegrationPoints().size() == mSize )
{
mMeshConditions.push_back( *(pCondIt.base() ) );
return true;
}
else return false;
KRATOS_CATCH("")
}
void PrintResults( GiD_FILE ResultFile, Variable<double> rVariable, ModelPart& r_model_part,
double SolutionTag, unsigned int value_index = 0 )
{
if( mMeshElements.size() != 0 || mMeshConditions.size() != 0 )
{
WriteGaussPoints(ResultFile);
GiD_fBeginResult(ResultFile, (char*)(rVariable.Name()).c_str(), "Kratos", SolutionTag,
GiD_Scalar, GiD_OnGaussPoints, mGPTitle, NULL, 0, NULL );
std::vector<double> ValuesOnIntPoint(mSize);
if( mMeshElements.size() != 0 )
{
for( ModelPart::ElementsContainerType::iterator it = mMeshElements.begin();
it != mMeshElements.end(); it++ )
{
it->GetValueOnIntegrationPoints( rVariable, ValuesOnIntPoint,
r_model_part.GetProcessInfo() );
for(unsigned int i=0; i<mIndexContainer.size(); i++)
{
int index = mIndexContainer[i];
GiD_fWriteScalar(ResultFile, it->Id(), ValuesOnIntPoint[index] );
}
}
}
if( mMeshConditions.size() != 0 )
{
for( ModelPart::ConditionsContainerType::iterator it = mMeshConditions.begin();
it != mMeshConditions.end(); it++ )
{
it->GetValueOnIntegrationPoints( rVariable, ValuesOnIntPoint,
r_model_part.GetProcessInfo() );
for(unsigned int i=0; i<mIndexContainer.size(); i++)
{
int index = mIndexContainer[i];
GiD_fWriteScalar(ResultFile, it->Id(), ValuesOnIntPoint[index] );
}
}
}
GiD_fEndResult(ResultFile);
}
}
void PrintResults( GiD_FILE ResultFile, Variable<Vector> rVariable, ModelPart& r_model_part,
double SolutionTag, unsigned int value_index = 0 )
{
if( mMeshElements.size() != 0 || mMeshConditions.size() != 0 )
{
WriteGaussPoints(ResultFile);
GiD_fBeginResult(ResultFile, (char *)(rVariable.Name()).c_str(), "Kratos", SolutionTag,
GiD_Vector, GiD_OnGaussPoints, mGPTitle, NULL, 0, NULL );
std::vector<Vector> ValuesOnIntPoint(mSize);
if( mMeshElements.size() != 0 )
{
for( ModelPart::ElementsContainerType::iterator it = mMeshElements.begin();
it != mMeshElements.end(); ++it )
{
it->GetValueOnIntegrationPoints( rVariable, ValuesOnIntPoint,
r_model_part.GetProcessInfo() );
for(unsigned int i=0; i<mIndexContainer.size(); i++)
{
int index = mIndexContainer[i];
if( ValuesOnIntPoint[0].size() == 3 )
GiD_fWriteVector(ResultFile, it->Id(), ValuesOnIntPoint[index][0],
ValuesOnIntPoint[index][1], ValuesOnIntPoint[index][2] );
}
}
}
if( mMeshConditions.size() != 0 )
{
for( ModelPart::ConditionsContainerType::iterator it = mMeshConditions.begin();
it != mMeshConditions.end(); it++ )
{
it->GetValueOnIntegrationPoints( rVariable, ValuesOnIntPoint,
r_model_part.GetProcessInfo() );
for(unsigned int i=0; i<mIndexContainer.size(); i++)
{
int index = mIndexContainer[i];
if( ValuesOnIntPoint[0].size() == 3 )
GiD_fWriteVector(ResultFile, it->Id(), ValuesOnIntPoint[index][0],
ValuesOnIntPoint[index][1], ValuesOnIntPoint[index][2] );
}
}
}
GiD_fEndResult(ResultFile);
}
}
void PrintResults( GiD_FILE ResultFile, Variable<Matrix> rVariable, ModelPart& r_model_part,
double SolutionTag, unsigned int value_index = 0 )
{
if( mMeshElements.size() != 0 || mMeshConditions.size() != 0 )
{
WriteGaussPoints(ResultFile);
GiD_fBeginResult( ResultFile, (char *)(rVariable.Name()).c_str(), "Kratos", SolutionTag,
GiD_Matrix, GiD_OnGaussPoints, mGPTitle, NULL, 0, NULL );
std::vector<Matrix> ValuesOnIntPoint(mSize);
if( mMeshElements.size() != 0 )
{
for( ModelPart::ElementsContainerType::iterator it = mMeshElements.begin();
it != mMeshElements.end(); ++it )
{
it->GetValueOnIntegrationPoints( rVariable, ValuesOnIntPoint,
r_model_part.GetProcessInfo() );
for(unsigned int i=0; i<mIndexContainer.size(); i++)
{
int index = mIndexContainer[i];
if(ValuesOnIntPoint[index].size1() ==3
&& ValuesOnIntPoint[index].size2() ==3)
GiD_fWrite3DMatrix( ResultFile, it->Id(), ValuesOnIntPoint[index](0,0),
ValuesOnIntPoint[index](1,1), ValuesOnIntPoint[index](2,2),
ValuesOnIntPoint[index](0,1), ValuesOnIntPoint[index](1,2),
ValuesOnIntPoint[index](0,2) );
if(ValuesOnIntPoint[index].size1() ==1
&& ValuesOnIntPoint[index].size2() ==6)
GiD_fWrite3DMatrix( ResultFile, it->Id(), ValuesOnIntPoint[index](0,0),
ValuesOnIntPoint[index](0,1), ValuesOnIntPoint[index](0,2),
ValuesOnIntPoint[index](0,3), ValuesOnIntPoint[index](0,4),
ValuesOnIntPoint[index](0,5) );
}
}
}
if( mMeshConditions.size() != 0 )
{
for( ModelPart::ConditionsContainerType::iterator it = mMeshConditions.begin();
it != mMeshConditions.end(); it++ )
{
it->GetValueOnIntegrationPoints( rVariable, ValuesOnIntPoint,
r_model_part.GetProcessInfo() );
for(unsigned int i=0; i<mIndexContainer.size(); i++)
{
int index = mIndexContainer[i];
if(ValuesOnIntPoint[index].size1() ==3
&& ValuesOnIntPoint[index].size2() ==3)
GiD_fWrite3DMatrix( ResultFile, it->Id(), ValuesOnIntPoint[index](0,0),
ValuesOnIntPoint[index](1,1), ValuesOnIntPoint[index](2,2),
ValuesOnIntPoint[index](0,1), ValuesOnIntPoint[index](1,2),
ValuesOnIntPoint[index](0,2) );
if(ValuesOnIntPoint[index].size1() ==1
&& ValuesOnIntPoint[index].size2() ==6)
GiD_fWrite3DMatrix( ResultFile, it->Id(), ValuesOnIntPoint[index](0,0),
ValuesOnIntPoint[index](0,1), ValuesOnIntPoint[index](0,2),
ValuesOnIntPoint[index](0,3), ValuesOnIntPoint[index](0,4),
ValuesOnIntPoint[index](0,5) );
}
}
}
GiD_fEndResult(ResultFile);
}
}
void PrintResults( GiD_FILE ResultFile, Variable<array_1d<double, 3> > rVariable, ModelPart& r_model_part,
double SolutionTag, int value_index = 0 )
{
}
void WriteGaussPoints(GiD_FILE ResultFile)
{
//setting up gauss points
if( mGidElementFamily == GiD_Tetrahedra && mSize == 5 )
{
GiD_fBeginGaussPoint( ResultFile, mGPTitle, GiD_Tetrahedra, NULL, 4, 0, 0 );
GiD_fWriteGaussPoint3D( ResultFile, 1.0/6.0, 1.0/6.0, 1.0/6.0 );
GiD_fWriteGaussPoint3D( ResultFile, 1.0/2.0, 1.0/6.0, 1.0/6.0 );
GiD_fWriteGaussPoint3D( ResultFile, 1.0/6.0, 1.0/2.0, 1.0/6.0 );
GiD_fWriteGaussPoint3D( ResultFile, 1.0/6.0, 1.0/6.0, 1.0/2.0 );
GiD_fEndGaussPoint(ResultFile);
}
else if( mGidElementFamily == GiD_Tetrahedra && mSize == 10 )
{
GiD_fBeginGaussPoint(ResultFile, "tet10_element_gp", GiD_Tetrahedra, NULL, 4, 0, 0);
GiD_fWriteGaussPoint3D( ResultFile, 1.0/14.0, 1.0/14.0, 1.0/14.0 );
GiD_fWriteGaussPoint3D( ResultFile, 11.0/14.0, 1.0/14.0, 1.0/14.0 );
GiD_fWriteGaussPoint3D( ResultFile, 1.0/14.0, 11.0/14.0, 1.0/14.0 );
GiD_fWriteGaussPoint3D( ResultFile, 1.0/14.0, 1.0/14.0, 11.0/14.0 );
GiD_fEndGaussPoint(ResultFile);
}
else
{
GiD_fBeginGaussPoint(ResultFile, mGPTitle, mGidElementFamily, NULL,
mSize, 0, 1);
GiD_fEndGaussPoint(ResultFile);
}
}
void Reset()
{
mMeshElements.clear();
mMeshConditions.clear();
}
protected:
///member variables
const char* mGPTitle;
KratosGeometryFamily mKratosElementFamily;
GiD_ElementType mGidElementFamily;
unsigned int mSize;
std::vector<int> mIndexContainer;
ModelPart::ElementsContainerType mMeshElements;
ModelPart::ConditionsContainerType mMeshConditions;
};//class PfemGidGaussPointsContainer
/**
* Auxiliary class to store meshes of different element types and to
* write these meshes to an output file
*/
class PfemGidMeshContainer
{
public:
///Constructor
PfemGidMeshContainer( GeometryData::KratosGeometryType geometryType,
GiD_ElementType elementType, const char* mesh_title )
:mGeometryType(geometryType), mGidElementType(elementType), mMeshTitle(mesh_title) {}
bool AddElement( const ModelPart::ElementsContainerType::iterator pElemIt )
{
KRATOS_TRY
if( pElemIt->GetGeometry().GetGeometryType() == mGeometryType )
{
mMeshElements.push_back( *(pElemIt.base() ) );
Geometry<Node<3> >&geom = pElemIt->GetGeometry();
for( Element::GeometryType::iterator it = geom.begin(); it != geom.end(); it++)
{
mMeshNodes.push_back( *(it.base() ) );
}
return true;
}
else
return false;
KRATOS_CATCH("")
}
bool AddCondition(const ModelPart::ConditionsContainerType::iterator pCondIt)
{
KRATOS_TRY
if( pCondIt->GetGeometry().GetGeometryType() == mGeometryType )
{
mMeshConditions.push_back( *(pCondIt.base() ) );
Geometry<Node<3> >&geom = pCondIt->GetGeometry();
for( Condition::GeometryType::iterator it = geom.begin(); it != geom.end(); it++)
{
mMeshNodes.push_back( *(it.base() ) );
}
return true;
}
else
return false;
KRATOS_CATCH("")
}
void FinalizeMeshCreation()
{
if( mMeshElements.size() != 0 )
{
mMeshNodes.Unique();
}
}
void WriteMesh(GiD_FILE MeshFile, bool deformed)
{
KRATOS_TRY
if( mMeshElements.size() != 0 )
{
if( mMeshElements.begin()->GetGeometry().WorkingSpaceDimension() == 2 )
{
std::cout << "writing a 2D mesh" << std::endl;
GiD_fBeginMesh(MeshFile, "Volume mesh", GiD_2D, mGidElementType,
mMeshElements.begin()->GetGeometry().size() );
}
else if( mMeshElements.begin()->GetGeometry().WorkingSpaceDimension() == 3 )
{
std::cout << "writing a 3D mesh" << std::endl;
GiD_fBeginMesh(MeshFile, "Volume mesh", GiD_3D, mGidElementType,
mMeshElements.begin()->GetGeometry().size() );
}
else
KRATOS_THROW_ERROR(std::logic_error,"check working space dimension of model","");
//printing nodes
GiD_fBeginCoordinates(MeshFile);
for( ModelPart::NodesContainerType::iterator it = mMeshNodes.begin();
it != mMeshNodes.end(); ++it )
{
if( deformed )
GiD_fWriteCoordinates(MeshFile, (it)->Id(), (it)->X(),
(it)->Y(), (it)->Z());
else
GiD_fWriteCoordinates(MeshFile, (it)->Id(), (it)->X0(),
(it)->Y0(), (it)->Z0());
}
GiD_fEndCoordinates(MeshFile);
//printing elements
GiD_fBeginElements(MeshFile);
int* nodes_id = new int[mMeshElements.begin()->GetGeometry().size()+1];
for( ModelPart::ElementsContainerType::iterator it = mMeshElements.begin();
it != mMeshElements.end(); ++it )
{
for( unsigned int i=0; i<(it)->GetGeometry().size(); i++ )
nodes_id[i] = (it)->GetGeometry()[i].Id();
//setting the color for either fluid or solid or contact element
int n_fl=0;
int n_str=0;
int n_interf=0;
int color=13;
for ( unsigned int i=0; i<(it)->GetGeometry().size(); i++ )
{
n_fl += int(it->GetGeometry()[i].FastGetSolutionStepValue(IS_FLUID));
n_str+= int(it->GetGeometry()[i].FastGetSolutionStepValue(IS_STRUCTURE));
n_interf+= int(it->GetGeometry()[i].FastGetSolutionStepValue(IS_INTERFACE));
}
if (n_fl==int((it)->GetGeometry().size()) && n_interf!=30)// && n_str!= (it)->GetGeometry().size() && n_interf!=30)
{
color=4;
}
if (n_str==int((it)->GetGeometry().size()))// && n_fl!=(it)->GetGeometry().size())
{
color=14;
}
if (n_interf==40)
{
color=5;
}
nodes_id[(it)->GetGeometry().size()]= color;
GiD_fWriteElementMat(MeshFile, (it)->Id(), nodes_id);
}
delete [] nodes_id;
GiD_fEndElements(MeshFile);
GiD_fEndMesh(MeshFile);
}
if( mMeshConditions.size() != 0 )
{
KRATOS_WATCH( mMeshConditions.begin()->GetGeometry().WorkingSpaceDimension() )
if( mMeshConditions.begin()->GetGeometry().WorkingSpaceDimension() == 3 )
{
std::cout << "writing a 3D mesh of the faces" << std::endl;
GiD_fBeginMesh(MeshFile, "Surface Structure Mesh", GiD_3D, GiD_Triangle, 3);
}
else
KRATOS_THROW_ERROR(std::logic_error,"Check your space dimensions","");
//printing nodes
GiD_fBeginCoordinates(MeshFile);
GiD_fEndCoordinates(MeshFile);
//printing elements
GiD_fBeginElements(MeshFile);
//for every face of tetrahedron we create a list of its nodes
int* nodes_id = new int[4];
for( ModelPart::ConditionsContainerType::iterator it = mMeshConditions.begin();
it != mMeshConditions.end(); ++it )
{
for( unsigned int i=0; i<(it)->GetGeometry().size(); i++ )
nodes_id[i] = (it)->GetGeometry()[i].Id();
int n_str=0;
int n_free_surf=0;
for (unsigned int i=0; i<3; i++)
{
n_free_surf+=int((it)->GetGeometry()[i].FastGetSolutionStepValue(IS_FREE_SURFACE));
n_str+=int((it)->GetGeometry()[i].FastGetSolutionStepValue(IS_STRUCTURE));
}
if (n_str==int(it->GetGeometry().size()))// && n_free_surf!=it->GetGeometry().size())
{
nodes_id[3]=3;
GiD_fWriteElementMat(MeshFile, (it)->Id(), nodes_id);
}
}
delete [] nodes_id;
GiD_fEndElements(MeshFile);
GiD_fEndMesh(MeshFile);
}
if( mMeshConditions.size() != 0 )
{
KRATOS_WATCH( mMeshConditions.begin()->GetGeometry().WorkingSpaceDimension() )
if( mMeshConditions.begin()->GetGeometry().WorkingSpaceDimension() == 3 )
{
std::cout << "writing a 3D mesh of the faces" << std::endl;
GiD_fBeginMesh(MeshFile, "Surface Fluid Mesh", GiD_3D, GiD_Triangle, 3);
}
else
KRATOS_THROW_ERROR(std::logic_error,"Check your space dimensions","");
//now writing the fluid surface mesh
//printing nodes
GiD_fBeginCoordinates(MeshFile);
GiD_fEndCoordinates(MeshFile);
//printing elements
GiD_fBeginElements(MeshFile);
//for every face of tetrahedron we create a list of its nodes
//int* nodes_id = new int[4];
int* nodes_id = new int[4];
for( ModelPart::ConditionsContainerType::iterator it = mMeshConditions.begin();
it != mMeshConditions.end(); ++it )
{
for( unsigned int i=0; i<(it)->GetGeometry().size(); i++ )
nodes_id[i] = (it)->GetGeometry()[i].Id();
int n_fl=0;
int n_str=0;
for (int i=0; i<3; i++)
{
n_str+=int((it)->GetGeometry()[i].FastGetSolutionStepValue(IS_STRUCTURE));
n_fl+=int((it)->GetGeometry()[i].FastGetSolutionStepValue(IS_FLUID));
}
//if (n_free_surf==it->GetGeometry().size())
// nodes_id[3]=1;
if (n_fl==int(it->GetGeometry().size()))
{
//the color of water
nodes_id[3]=6;
GiD_fWriteElementMat(MeshFile, (it)->Id(), nodes_id);
}
}
delete [] nodes_id;
GiD_fEndElements(MeshFile);
GiD_fEndMesh(MeshFile);
}
KRATOS_CATCH("")
}
void Reset()
{
mMeshNodes.clear();
mMeshElements.clear();
mMeshConditions.clear();
}
ModelPart::NodesContainerType GetMeshNodes()
{
return mMeshNodes;
}
private:
///member variables
GeometryData::KratosGeometryType mGeometryType;
GiD_ElementType mGidElementType;
ModelPart::NodesContainerType mMeshNodes;
ModelPart::ElementsContainerType mMeshElements;
ModelPart::ConditionsContainerType mMeshConditions;
const char* mMeshTitle;
};//class PfemGidMeshContainer
}// namespace Kratos.
#endif // KRATOS_PFEM_GID_IO_BASE_H_INCLUDED defined
| 41.388985 | 131 | 0.531376 | [
"mesh",
"geometry",
"vector",
"model",
"3d",
"solid"
] |
d9b598528cd26b67a7268c60b5710462f9732976 | 4,324 | h | C | build-viewer-qml/include/buildfetcher.h | hbirchtree/coffeecutie-build-daemon | f29ce65896822d4f1358fee5b20c25e54ec1864f | [
"MIT"
] | null | null | null | build-viewer-qml/include/buildfetcher.h | hbirchtree/coffeecutie-build-daemon | f29ce65896822d4f1358fee5b20c25e54ec1864f | [
"MIT"
] | 2 | 2016-06-06T19:31:16.000Z | 2017-01-30T12:46:30.000Z | build-viewer-qml/include/buildfetcher.h | hbirchtree/coffeecutie-build-daemon | f29ce65896822d4f1358fee5b20c25e54ec1864f | [
"MIT"
] | null | null | null | #include <QObject>
#include <QJsonDocument>
#include <QJsonObject>
#include <QJsonArray>
#include <QStandardItemModel>
#include <QtNetwork/QNetworkAccessManager>
#include <QtNetwork/QNetworkRequest>
#include <QtNetwork/QNetworkReply>
class BuildInformationFetcher : public QStandardItemModel
{
Q_OBJECT
Q_PROPERTY(QString server READ server WRITE setServer NOTIFY serverChanged)
Q_PROPERTY(int serverPort READ serverPort WRITE setServerPort NOTIFY serverPortChanged)
Q_PROPERTY(bool fetching READ fetching NOTIFY fetchingChanged)
bool m_fetching;
QString m_server;
int m_serverPort;
QNetworkAccessManager m_nman;
public:
enum BuildRoles
{
BuildId = Qt::UserRole + 1,
CommitId,
HasBinary,
HostId,
PlatformId,
StatusId,
TimeId,
ColorId,
};
BuildInformationFetcher(QObject* parent = nullptr):
QStandardItemModel(parent),
m_fetching(true)
{
setColumnCount(7);
QHash<int,QByteArray> roles;
roles[BuildId] = "bid";
roles[CommitId] = "commit";
roles[HasBinary] = "has_bin";
roles[HostId] = "host";
roles[PlatformId] = "platform";
roles[StatusId] = "status";
roles[TimeId] = "timest";
roles[ColorId] = "colorCode";
setItemRoleNames(roles);
}
~BuildInformationFetcher()
{
}
bool fetching() const
{
return m_fetching;
}
QString server() const
{
return m_server;
}
int serverPort() const
{
return m_serverPort;
}
public slots:
void update()
{
QUrl url(QString("http://%1:%2/rest/v1/all")
.arg(m_server)
.arg(m_serverPort));
QNetworkRequest req;
req.setUrl(url);
QNetworkReply* rep = m_nman.get(req);
connect(rep,&QNetworkReply::finished,this,&BuildInformationFetcher::receiveUpdate);
m_fetching = true;
fetchingChanged(m_fetching);
}
private slots:
void receiveUpdate()
{
QNetworkReply* rep = static_cast<QNetworkReply*>(sender());
if(rep)
{
this->removeRows(0,rowCount());
QByteArray data = rep->readAll();
QJsonParseError err;
QJsonDocument doc = QJsonDocument::fromJson(data,&err);
if(err.error == QJsonParseError::NoError)
{
QJsonArray const& elements = doc.object()["logs"].toArray();
for(QJsonValue const& obj : elements)
{
QStandardItem* it = new QStandardItem;
QJsonObject b = obj.toObject();
it->setData(b["bid"].toVariant().toULongLong(),BuildId);
it->setData(b["commit"].toString().mid(0,10),CommitId);
it->setData(b["has_binary"].toBool(),HasBinary);
it->setData(b["host"].toString(),HostId);
it->setData(b["platform"].toString(),PlatformId);
it->setData(b["status"].toInt(),StatusId);
it->setData(QDateTime::fromMSecsSinceEpoch(
b["time"].toVariant().toULongLong()*1000),
TimeId);
if(b["status"].toInt() == 0)
it->setData(QColor("green"),ColorId);
else
it->setData(QColor("red"),ColorId);
appendRow(it);
}
}else
qDebug("Parsing error: %s",err.errorString().toStdString().c_str());
rep->deleteLater();
}else{
qDebug("Please no.");
}
m_fetching = false;
fetchingChanged(m_fetching);
}
public slots:
void setServer(QString server)
{
if (m_server == server)
return;
m_server = server;
emit serverChanged(server);
}
void setServerPort(int serverPort)
{
if (m_serverPort == serverPort)
return;
m_serverPort = serverPort;
emit serverPortChanged(serverPort);
}
signals:
void fetchingChanged(bool fetching);
void serverChanged(QString server);
void serverPortChanged(int serverPort);
};
| 26.048193 | 91 | 0.558279 | [
"object"
] |
d9b609944651102bdf585c71a545acbe063682b7 | 10,189 | h | C | build/djgpp/include/libm/math.h | Cwc-Test/CpcdosOS2.1 | d52c170be7f11cc50de38ef536d4355743d21706 | [
"Apache-2.0"
] | 1 | 2021-05-05T20:42:24.000Z | 2021-05-05T20:42:24.000Z | build/djgpp/include/libm/math.h | Cwc-Test/CpcdosOS2.1 | d52c170be7f11cc50de38ef536d4355743d21706 | [
"Apache-2.0"
] | null | null | null | build/djgpp/include/libm/math.h | Cwc-Test/CpcdosOS2.1 | d52c170be7f11cc50de38ef536d4355743d21706 | [
"Apache-2.0"
] | null | null | null | /* Copyright (C) 2015 DJ Delorie, see COPYING.DJ for details */
/* Copyright (C) 2013 DJ Delorie, see COPYING.DJ for details */
/* Copyright (C) 1998 DJ Delorie, see COPYING.DJ for details */
/* Copyright (C) 1995 DJ Delorie, see COPYING.DJ for details */
/* Provided by Cygnus Support (jtc@cygnus.com) */
/*
* ====================================================
* Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
*
* Developed at SunPro, a Sun Microsystems, Inc. business.
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
/*
* from: @(#)fdlibm.h 5.1 93/09/24
* $Id: math.h,v 1.11.2.1 2015/07/19 11:29:21 juan.guerrero Exp $
*/
#ifndef _MATH_H_
#define _MATH_H_
/*
* ANSI/POSIX
*/
typedef int __int32_t;
typedef unsigned int __uint32_t;
union __dmath
{
__uint32_t i[2];
double d;
};
extern double __dj_huge_val;
#define HUGE_VAL (__dj_huge_val)
/*
* XOPEN/SVID
*/
// #if !defined(__STRICT_ANSI__) && !defined(_POSIX_SOURCE)
#define M_E 2.7182818284590452354 /* e */
#define M_LOG2E 1.4426950408889634074 /* log 2e */
#define M_LOG10E 0.43429448190325182765 /* log 10e */
#define M_LN2 0.693147180559945309417 /* log e2 */
#define M_LN10 2.30258509299404568402 /* log e10 */
#define M_PI 3.14159265358979323846 /* pi */
#define M_TWOPI 6.28318530717958647692 /* 2*pi */
#define M_PI_2 1.57079632679489661923 /* pi/2 */
#define M_PI_4 0.78539816339744830962 /* pi/4 */
#define M_3PI_4 2.3561944901923448370 /* 3/4 * pi */
#define M_SQRTPI 1.77245385090551602792981 /* sqrt(pi) */
#define M_1_PI 0.31830988618379067154 /* 1/pi */
#define M_2_PI 0.63661977236758134308 /* 2/pi */
#define M_2_SQRTPI 1.12837916709551257390 /* 2/sqrt(pi) */
#define M_SQRT2 1.41421356237309504880 /* sqrt(2) */
#define M_SQRT1_2 0.70710678118654752440 /* 1/sqrt(2) */
#define M_LN2LO 1.9082149292705877000E-10 /* lower bits of log e2 */
#define M_LN2HI 6.9314718036912381649E-1 /* log e2 */
#define M_SQRT3 1.73205080756887719000 /* sqrt(3) */
#define M_IVLN10 0.43429448190325182765 /* 1 / log(10) */
#define M_LOG2_E 0.693147180559945309417
#define M_INVLN2 1.4426950408889633870E0 /* 1 / log e2 */
extern int signgam;
enum __fdlibm_version
{
__fdlibm_ieee = -1,
__fdlibm_svid,
__fdlibm_xopen,
__fdlibm_posix
};
#define _LIB_VERSION_TYPE enum __fdlibm_version
#define _LIB_VERSION __fdlib_version
/* if global variable _LIB_VERSION is not desirable, one may
* change the following to be a constant by:
* #define _LIB_VERSION_TYPE const enum version
* In that case, after one initializes the value _LIB_VERSION (see
* s_lib_version.c) during compile time, it cannot be modified
* in the middle of a program
*/
extern _LIB_VERSION_TYPE _LIB_VERSION;
#define _IEEE_ __fdlibm_ieee
#define _SVID_ __fdlibm_svid
#define _XOPEN_ __fdlibm_xopen
#define _POSIX_ __fdlibm_posix
/* The exception structure passed to the matherr routine. */
#ifndef __cplusplus
struct exception
{
int type;
const char *name;
double arg1;
double arg2;
double retval;
int err;
};
#endif
/*
* set X_TLOSS = pi*2**52, which is possibly defined in <values.h>
* (one may replace the following line by "#include <values.h>")
*/
#define X_TLOSS 1.41484755040568800000e+16
#define DOMAIN 1
#define SING 2
#define OVERFLOW 3
#define UNDERFLOW 4
#define TLOSS 5
#define PLOSS 6
#endif /* !__STRICT_ANSI__ && !_POSIX_SOURCE */
#include <sys/cdefs.h>
__BEGIN_DECLS
/*
* ANSI/POSIX
*/
extern double acos __P((double));
extern double asin __P((double));
extern double atan __P((double));
extern double atan2 __P((double, double));
extern double cos __P((double));
extern double sin __P((double));
extern double tan __P((double));
extern double cosh __P((double));
extern double sinh __P((double));
extern double tanh __P((double));
extern double exp __P((double));
extern double frexp __P((double, int *));
extern double ldexp __P((double, int));
extern double log __P((double));
extern double log10 __P((double));
extern double modf __P((double, double *));
extern double pow __P((double, double));
extern double sqrt __P((double));
extern double ceil __P((double));
extern double fabs __P((double));
extern double floor __P((double));
extern double fmod __P((double, double));
#if !defined(__STRICT_ANSI__) || defined(__cplusplus) || \
defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
/* ISO C99 types and macros. */
extern long long int llrintf __P((float));
extern long long int llrint __P((double));
extern long long int llrintl __P((long double));
extern long int lrintf __P((float));
extern long int lrint __P((double));
extern long int lrintl __P((long double));
extern long long int llroundf __P((float));
extern long long int llround __P((double));
extern long long int llroundl __P((long double));
extern long int lroundf __P((float));
extern long int lround __P((double));
extern long int lroundl __P((long double));
extern float roundf __P((float));
extern double round __P((double));
extern long double roundl __P((long double));
extern float truncf __P((float));
extern double trunc __P((double));
extern long double truncl __P((long double));
extern int isinfl __P((long double));
extern int isnanl __P((long double));
extern int finitel __P((long double));
#endif /* !defined (__STRICT_ANSI__) || defined(__cplusplus)
|| defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L */
// #if !defined(__STRICT_ANSI__) && !defined(_POSIX_SOURCE)
extern double erf __P((double));
extern double erfc __P((double));
extern double gamma __P((double));
extern double hypot __P((double, double));
extern double infinity __P((void));
extern int isinf __P((double));
extern int isnan __P((double));
extern int finite __P((double));
extern double j0 __P((double));
extern double j1 __P((double));
extern double jn __P((int, double));
extern double lgamma __P((double));
extern double nan __P((void));
extern double y0 __P((double));
extern double y1 __P((double));
extern double yn __P((int, double));
extern double log2 __P((double));
#if !defined(_XOPEN_SOURCE)
extern double acosh __P((double));
extern double asinh __P((double));
extern double atanh __P((double));
extern double cbrt __P((double));
extern double exp10 __P((double));
extern double exp2 __P((double));
extern double log1p __P((double));
extern double logb __P((double));
extern long double modfl __P((long double, long double *));
extern double nextafter __P((double, double));
extern double pow10 __P((double));
extern double pow2 __P((double));
extern double powi __P((double, int));
extern void sincos __P((double *, double *, double));
extern double remainder __P((double, double));
extern double scalb __P((double, double));
#ifndef __cplusplus
extern int matherr __P((struct exception *));
#endif
extern long double rintl __P((long double));
/*
* IEEE Test Vector
*/
extern double significand __P((double));
/*
* Functions callable from C, intended to support IEEE arithmetic.
*/
extern double copysign __P((double, double));
extern int ilogb __P((double));
extern double rint __P((double));
extern double scalbn __P((double, int));
/*
* BSD math library entry points
*/
extern double drem __P((double, double));
extern double expm1 __P((double));
extern double log1p __P((double));
/*
* Reentrant version of gamma & lgamma; passes signgam back by reference
* as the second argument; user must allocate space for signgam.
*/
extern double gamma_r __P((double, int *));
extern double lgamma_r __P((double, int *));
/* float versions of ANSI/POSIX functions */
extern float acosf __P((float));
extern float asinf __P((float));
extern float atanf __P((float));
extern float atan2f __P((float, float));
extern float cosf __P((float));
extern float sinf __P((float));
extern float tanf __P((float));
extern float coshf __P((float));
extern float sinhf __P((float));
extern float tanhf __P((float));
extern float expf __P((float));
extern float frexpf __P((float, int *));
extern float ldexpf __P((float, int));
extern float logf __P((float));
extern float log10f __P((float));
extern float log2f __P((float));
extern float modff __P((float, float *));
extern float powf __P((float, float));
extern float sqrtf __P((float));
extern float ceilf __P((float));
extern float fabsf __P((float));
extern float floorf __P((float));
extern float fmodf __P((float, float));
extern float erff __P((float));
extern float erfcf __P((float));
extern float gammaf __P((float));
extern float hypotf __P((float, float));
extern float infinityf __P((void));
extern int isinff __P((float));
extern int isnanf __P((float));
extern int finitef __P((float));
extern float j0f __P((float));
extern float j1f __P((float));
extern float jnf __P((int, float));
extern float lgammaf __P((float));
extern float nanf __P((void));
extern float y0f __P((float));
extern float y1f __P((float));
extern float ynf __P((int, float));
extern float acoshf __P((float));
extern float asinhf __P((float));
extern float atanhf __P((float));
extern float cbrtf __P((float));
extern float logbf __P((float));
extern float nextafterf __P((float, float));
extern float remainderf __P((float, float));
extern float scalbf __P((float, float));
/*
* float version of IEEE Test Vector
*/
extern float significandf __P((float));
/*
* Float versions of functions callable from C, intended to support
* IEEE arithmetic.
*/
extern float copysignf __P((float, float));
extern int ilogbf __P((float));
extern float rintf __P((float));
extern float scalbnf __P((float, int));
/*
* float versions of BSD math library entry points
*/
extern float dremf __P((float, float));
extern float expm1f __P((float));
extern float log1pf __P((float));
/*
* Float versions of reentrant version of gamma & lgamma; passes
* signgam back by reference as the second argument; user must
* allocate space for signgam.
*/
extern float gammaf_r __P((float, int *));
extern float lgammaf_r __P((float, int *));
#endif /* !_XOPEN_SOURCE */
// #endif /* !__STRICT_ANSI__ && !_POSIX_SOURCE */
__END_DECLS
#endif /* _MATH_H_ */
| 29.278736 | 72 | 0.715772 | [
"vector"
] |
d9b71e11ed5378a14016bdc8752693506990172c | 10,819 | h | C | components/data_reduction_proxy/core/browser/data_reduction_proxy_io_data.h | xzhan96/chromium.src | 1bd0cf3997f947746c0fc5406a2466e7b5f6159e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 3 | 2018-02-22T18:06:56.000Z | 2021-08-28T12:49:27.000Z | components/data_reduction_proxy/core/browser/data_reduction_proxy_io_data.h | emilio/chromium.src | 1bd0cf3997f947746c0fc5406a2466e7b5f6159e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | components/data_reduction_proxy/core/browser/data_reduction_proxy_io_data.h | emilio/chromium.src | 1bd0cf3997f947746c0fc5406a2466e7b5f6159e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 6 | 2020-09-23T08:56:12.000Z | 2021-11-18T03:40:49.000Z | // Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_DATA_REDUCTION_PROXY_CORE_BROWSER_DATA_REDUCTION_PROXY_IO_DATA_H_
#define COMPONENTS_DATA_REDUCTION_PROXY_CORE_BROWSER_DATA_REDUCTION_PROXY_IO_DATA_H_
#include <stdint.h>
#include <memory>
#include <string>
#include <utility>
#include "base/gtest_prod_util.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "base/single_thread_task_runner.h"
#include "components/data_reduction_proxy/core/browser/data_reduction_proxy_delegate.h"
#include "components/data_reduction_proxy/core/browser/data_reduction_proxy_metrics.h"
#include "components/data_reduction_proxy/core/browser/data_reduction_proxy_network_delegate.h"
#include "components/data_reduction_proxy/core/browser/data_reduction_proxy_request_options.h"
#include "components/data_reduction_proxy/core/browser/data_use_group_provider.h"
#include "components/data_reduction_proxy/core/common/data_reduction_proxy_event_storage_delegate.h"
#include "components/data_reduction_proxy/core/common/data_reduction_proxy_util.h"
#include "components/data_reduction_proxy/core/common/lofi_decider.h"
#include "components/data_reduction_proxy/core/common/lofi_ui_service.h"
namespace base {
class Value;
}
namespace net {
class NetLog;
class URLRequestContextGetter;
class URLRequestInterceptor;
}
namespace data_reduction_proxy {
class DataReductionProxyBypassStats;
class DataReductionProxyConfig;
class DataReductionProxyConfigServiceClient;
class DataReductionProxyConfigurator;
class DataReductionProxyEventCreator;
class DataReductionProxyService;
// Contains and initializes all Data Reduction Proxy objects that operate on
// the IO thread.
class DataReductionProxyIOData : public DataReductionProxyEventStorageDelegate {
public:
// Constructs a DataReductionProxyIOData object. |param_flags| is used to
// set information about the DNS names used by the proxy, and allowable
// configurations. |enabled| sets the initial state of the Data Reduction
// Proxy.
DataReductionProxyIOData(
Client client,
int param_flags,
net::NetLog* net_log,
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner,
bool enabled,
const std::string& user_agent,
const std::string& channel);
virtual ~DataReductionProxyIOData();
// Performs UI thread specific shutdown logic.
void ShutdownOnUIThread();
// Sets the Data Reduction Proxy service after it has been created.
// Virtual for testing.
virtual void SetDataReductionProxyService(
base::WeakPtr<DataReductionProxyService> data_reduction_proxy_service);
// Creates an interceptor suitable for following the Data Reduction Proxy
// bypass protocol.
std::unique_ptr<net::URLRequestInterceptor> CreateInterceptor();
// Creates a NetworkDelegate suitable for carrying out the Data Reduction
// Proxy protocol, including authenticating, establishing a handler to
// override the current proxy configuration, and
// gathering statistics for UMA.
std::unique_ptr<DataReductionProxyNetworkDelegate> CreateNetworkDelegate(
std::unique_ptr<net::NetworkDelegate> wrapped_network_delegate,
bool track_proxy_bypass_statistics);
std::unique_ptr<DataReductionProxyDelegate> CreateProxyDelegate() const;
// Sets user defined preferences for how the Data Reduction Proxy
// configuration should be set. |at_startup| is true only
// when DataReductionProxySettings is initialized.
void SetProxyPrefs(bool enabled, bool at_startup);
// Applies a serialized Data Reduction Proxy configuration.
void SetDataReductionProxyConfiguration(const std::string& serialized_config);
// Returns true when Lo-Fi mode should be activated. When Lo-Fi mode is
// active, URL requests are modified to request low fidelity versions of the
// resources, except when the user is in the Lo-Fi control group.
bool ShouldEnableLoFiMode(const net::URLRequest& request);
// Sets Lo-Fi mode off in |config_|.
void SetLoFiModeOff();
// Bridge methods to safely call to the UI thread objects.
void UpdateContentLengths(
int64_t data_used,
int64_t original_size,
bool data_reduction_proxy_enabled,
DataReductionProxyRequestType request_type,
const scoped_refptr<DataUseGroup>& data_usage_source,
const std::string& mime_type);
void SetLoFiModeActiveOnMainFrame(bool lo_fi_mode_active);
// Overrides of DataReductionProxyEventStorageDelegate. Bridges to the UI
// thread objects.
void AddEvent(std::unique_ptr<base::Value> event) override;
void AddEnabledEvent(std::unique_ptr<base::Value> event,
bool enabled) override;
void AddEventAndSecureProxyCheckState(std::unique_ptr<base::Value> event,
SecureProxyCheckState state) override;
void AddAndSetLastBypassEvent(std::unique_ptr<base::Value> event,
int64_t expiration_ticks) override;
// Returns true if the Data Reduction Proxy is enabled and false otherwise.
bool IsEnabled() const;
// Changes the reporting fraction for the pingback service to
// |pingback_reporting_fraction|. Overridden in testing.
virtual void SetPingbackReportingFraction(float pingback_reporting_fraction);
// Various accessor methods.
DataReductionProxyConfigurator* configurator() const {
return configurator_.get();
}
DataReductionProxyConfig* config() const {
return config_.get();
}
DataReductionProxyEventCreator* event_creator() const {
return event_creator_.get();
}
DataReductionProxyRequestOptions* request_options() const {
return request_options_.get();
}
DataReductionProxyConfigServiceClient* config_client() const {
return config_client_.get();
}
net::ProxyDelegate* proxy_delegate() const {
return proxy_delegate_.get();
}
net::NetLog* net_log() {
return net_log_;
}
const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner() const {
return io_task_runner_;
}
// Used for testing.
DataReductionProxyBypassStats* bypass_stats() const {
return bypass_stats_.get();
}
LoFiDecider* lofi_decider() const { return lofi_decider_.get(); }
void set_lofi_decider(std::unique_ptr<LoFiDecider> lofi_decider) const {
lofi_decider_ = std::move(lofi_decider);
}
LoFiUIService* lofi_ui_service() const { return lofi_ui_service_.get(); }
// Takes ownership of |lofi_ui_service|.
void set_lofi_ui_service(
std::unique_ptr<LoFiUIService> lofi_ui_service) const {
lofi_ui_service_ = std::move(lofi_ui_service);
}
void set_data_usage_source_provider(
std::unique_ptr<DataUseGroupProvider> data_usage_source_provider) {
data_use_group_provider_ = std::move(data_usage_source_provider);
}
// The production channel of this build.
std::string channel() const { return channel_; }
// The Client type of this build.
Client client() const { return client_; }
private:
friend class TestDataReductionProxyIOData;
FRIEND_TEST_ALL_PREFIXES(DataReductionProxyIODataTest, TestConstruction);
FRIEND_TEST_ALL_PREFIXES(DataReductionProxyIODataTest,
TestResetBadProxyListOnDisableDataSaver);
// Used for testing.
DataReductionProxyIOData();
// Initializes the weak pointer to |this| on the IO thread. It must be done
// on the IO thread, since it is used for posting tasks from the UI thread
// to IO thread objects in a thread safe manner.
void InitializeOnIOThread();
// Records that the data reduction proxy is unreachable or not.
void SetUnreachable(bool unreachable);
// Stores an int64_t value in preferences storage.
void SetInt64Pref(const std::string& pref_path, int64_t value);
// Stores a string value in preferences storage.
void SetStringPref(const std::string& pref_path, const std::string& value);
// Stores a serialized Data Reduction Proxy configuration in preferences
// storage.
void StoreSerializedConfig(const std::string& serialized_config);
// The type of Data Reduction Proxy client.
const Client client_;
// Parameters including DNS names and allowable configurations.
std::unique_ptr<DataReductionProxyConfig> config_;
// Handles getting if a request is in Lo-Fi mode.
mutable std::unique_ptr<LoFiDecider> lofi_decider_;
// Handles showing Lo-Fi UI when a Lo-Fi response is received.
mutable std::unique_ptr<LoFiUIService> lofi_ui_service_;
// Creates Data Reduction Proxy-related events for logging.
std::unique_ptr<DataReductionProxyEventCreator> event_creator_;
// Setter of the Data Reduction Proxy-specific proxy configuration.
std::unique_ptr<DataReductionProxyConfigurator> configurator_;
// A proxy delegate. Used, for example, for Data Reduction Proxy resolution.
// request.
std::unique_ptr<DataReductionProxyDelegate> proxy_delegate_;
// Data Reduction Proxy objects with a UI based lifetime.
base::WeakPtr<DataReductionProxyService> service_;
// Tracker of various metrics to be reported in UMA.
std::unique_ptr<DataReductionProxyBypassStats> bypass_stats_;
// Constructs credentials suitable for authenticating the client.
std::unique_ptr<DataReductionProxyRequestOptions> request_options_;
// Requests new Data Reduction Proxy configurations from a remote service.
std::unique_ptr<DataReductionProxyConfigServiceClient> config_client_;
// A net log.
net::NetLog* net_log_;
// IO and UI task runners, respectively.
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner_;
// Manages instances of |DataUsageSource| and maps |URLRequest| instances to
// their appropriate |DataUsageSource|.
std::unique_ptr<DataUseGroupProvider> data_use_group_provider_;
// Whether the Data Reduction Proxy has been enabled or not by the user. In
// practice, this can be overridden by the command line.
bool enabled_;
// The net::URLRequestContextGetter used for making URL requests.
net::URLRequestContextGetter* url_request_context_getter_;
// A net::URLRequestContextGetter used for making secure proxy checks. It
// does not use alternate protocols.
scoped_refptr<net::URLRequestContextGetter> basic_url_request_context_getter_;
// The production channel of this build.
const std::string channel_;
base::WeakPtrFactory<DataReductionProxyIOData> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(DataReductionProxyIOData);
};
} // namespace data_reduction_proxy
#endif // COMPONENTS_DATA_REDUCTION_PROXY_CORE_BROWSER_DATA_REDUCTION_PROXY_IO_DATA_H_
| 37.565972 | 100 | 0.776966 | [
"object"
] |
8ac3acc7f491fb911939df4df79258ca816ac8de | 177 | h | C | plugins/ball_plugin.h | timrobot/ArmRL | 7de4588d7df288735a4995f216c809768ea8f59f | [
"MIT"
] | null | null | null | plugins/ball_plugin.h | timrobot/ArmRL | 7de4588d7df288735a4995f216c809768ea8f59f | [
"MIT"
] | 1 | 2017-12-15T04:25:25.000Z | 2017-12-15T04:25:25.000Z | plugins/ball_plugin.h | timrobot/ArmRL | 7de4588d7df288735a4995f216c809768ea8f59f | [
"MIT"
] | 1 | 2020-12-22T08:16:24.000Z | 2020-12-22T08:16:24.000Z | #pragma once
#include <vector>
extern "C" {
void ball_plugin_init(void);
void ball_plugin_destroy(void);
void ball_plugin_setPosition(double x, double y, double z);
}
| 14.75 | 61 | 0.728814 | [
"vector"
] |
8ac9d4e9a46612b31b07528613d2f59c488c6763 | 5,176 | h | C | msv_teleop/include/msv_teleop/msv_teleop.h | daconjurer/ros_msv | dbf70083588b869172ac74f187b81166e78c8da1 | [
"BSD-3-Clause"
] | 2 | 2021-07-13T22:02:43.000Z | 2022-03-10T15:46:58.000Z | msv_teleop/include/msv_teleop/msv_teleop.h | daconjurer/ros_msv | dbf70083588b869172ac74f187b81166e78c8da1 | [
"BSD-3-Clause"
] | null | null | null | msv_teleop/include/msv_teleop/msv_teleop.h | daconjurer/ros_msv | dbf70083588b869172ac74f187b81166e78c8da1 | [
"BSD-3-Clause"
] | null | null | null | /*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2019, Robótica de la Mixteca
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Universidad Tecnológica de la Mixteca nor
* the names of its contributors may be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
/////////////////////////////////////////////////////////////////////////////////////////
/// @file ROS header for the teleop class of the MSV-01 rescue robot.
/// @author Victor Esteban Sandoval-Luna
///
/// Based on the Modbus protocol "lightmodbus" library (RTU) from Jacek Wieczorek,
/// please see https://github.com/Jacajack/liblightmodbus.
///
/// Based on the "rescue" ROS metapackage from José Armando Sánchez-Rojas.
/////////////////////////////////////////////////////////////////////////////////////////
#ifndef MSV_TELEOP_H_
#define MSV_TELEOP_H_
#include <ros/ros.h>
#include <msv_main/port_handler.h>
#include <std_msgs/String.h>
#include <std_msgs/UInt8.h>
#include <stdio.h>
#include <time.h>
#include <inttypes.h>
#include <geometry_msgs/Twist.h>
#include <std_msgs/MultiArrayDimension.h>
#include <std_msgs/UInt8MultiArray.h>
#include <msv_msgs/Actuators.h>
#include <lightmodbus/lightmodbus.h>
#include <lightmodbus/master.h>
class MsvTeleop
{
private:
// lightmodbus Master configuration struct
ModbusMaster master;
// For Master Exit code
uint8_t mec;
// Modbus holding registers and forced coils
std::vector<uint16_t> hregs = std::vector<uint16_t> (4);
std::vector<uint8_t> fcoils = std::vector<uint8_t> (1);
// Modbus coils and actuators data to be read
std::vector<uint8_t> rcoils = std::vector<uint8_t> (2);
std::vector<uint8_t> alarms = std::vector<uint8_t> (2);
std::vector<uint16_t> iregs = std::vector<uint16_t> (4);
std::vector<float> actuators = std::vector<float> (4);
// Arrays to be published (requests)
std_msgs::UInt8MultiArray bpt_read_iregs;
std_msgs::UInt8MultiArray bpt_read_coils;
std_msgs::UInt8MultiArray bpt_preset_hregs;
std_msgs::UInt8MultiArray bpt_forced_coils;
// Message to be published
msv_msgs::Actuators actuators_msg;
// Serial port handler
msv::PortHandler bpt;
// MSV-01 teleop topics
ros::NodeHandle n_teleop;
ros::Subscriber sub_vel;
ros::Subscriber sub_mode;
ros::Publisher pub_actuators; // actuators data topic
ros::Publisher pub_rcoils; // read coils frame topic
ros::Publisher pub_iregs; // read input registers frame topic
ros::Publisher pub_fcoils; // force coils frame topic
ros::Publisher pub_hregs; // preset holding registers frame topic
// Arrays for BPT responses decodifying
std::vector<uint8_t> hregs_response = std::vector<uint8_t> (8);
std::vector<uint8_t> fcoils_response = std::vector<uint8_t> (8);
std::vector<uint8_t> iregs_response = std::vector<uint8_t> (13);
std::vector<uint8_t> rcoils_response = std::vector<uint8_t> (6);
int verbosity;
int turn_mode;
int l, a, f;
std::vector<uint8_t> ack_modbus = std::vector<uint8_t> (1);
void twistCallback (const geometry_msgs::Twist& msg);
void modeCallback (const std_msgs::String::ConstPtr& mode);
void readCoils ();
void readInputRegisters ();
// Communication methods for the controller port
int sendreceivePacketBPT (const int& ack_length, const int& verbose);
// For debug purposes
int sendPacketBPT (const int& verbose);
// For Modbus/Serial debugging
void printQuery ();
public:
MsvTeleop (char* port, const int& baudrate, const int& verb);
virtual ~MsvTeleop () {}
void sense ();
void close ();
};// End of class MsvTeleop
#endif
| 36.195804 | 89 | 0.683346 | [
"vector"
] |
8ad4c02e32201c100bf8385dd5734a063a5e743b | 84,221 | c | C | apps/mysql-5.1.65/storage/innodb_plugin/srv/srv0srv.c | vusec/firestarter | 2048c1f731b8f3c5570a920757f9d7730d5f716a | [
"Apache-2.0"
] | 3 | 2021-04-29T07:59:16.000Z | 2021-12-10T02:23:05.000Z | apps/mysql-5.1.65/storage/innodb_plugin/srv/srv0srv.c | vusec/firestarter | 2048c1f731b8f3c5570a920757f9d7730d5f716a | [
"Apache-2.0"
] | null | null | null | apps/mysql-5.1.65/storage/innodb_plugin/srv/srv0srv.c | vusec/firestarter | 2048c1f731b8f3c5570a920757f9d7730d5f716a | [
"Apache-2.0"
] | null | null | null | /*****************************************************************************
Copyright (c) 1995, 2010, Innobase Oy. All Rights Reserved.
Copyright (c) 2008, 2009 Google Inc.
Copyright (c) 2009, Percona Inc.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
briefly in the InnoDB documentation. The contributions by Google are
incorporated with their permission, and subject to the conditions contained in
the file COPYING.Google.
Portions of this file contain modifications contributed and copyrighted
by Percona Inc.. Those modifications are
gratefully acknowledged and are described briefly in the InnoDB
documentation. The contributions by Percona Inc. are incorporated with
their permission, and subject to the conditions contained in the file
COPYING.Percona.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
Place, Suite 330, Boston, MA 02111-1307 USA
*****************************************************************************/
/**************************************************//**
@file srv/srv0srv.c
The database server main program
NOTE: SQL Server 7 uses something which the documentation
calls user mode scheduled threads (UMS threads). One such
thread is usually allocated per processor. Win32
documentation does not know any UMS threads, which suggests
that the concept is internal to SQL Server 7. It may mean that
SQL Server 7 does all the scheduling of threads itself, even
in i/o waits. We should maybe modify InnoDB to use the same
technique, because thread switches within NT may be too slow.
SQL Server 7 also mentions fibers, which are cooperatively
scheduled threads. They can boost performance by 5 %,
according to the Delaney and Soukup's book.
Windows 2000 will have something called thread pooling
(see msdn website), which we could possibly use.
Another possibility could be to use some very fast user space
thread library. This might confuse NT though.
Created 10/8/1995 Heikki Tuuri
*******************************************************/
/* Dummy comment */
#include "srv0srv.h"
#include "ut0mem.h"
#include "ut0ut.h"
#include "os0proc.h"
#include "mem0mem.h"
#include "mem0pool.h"
#include "sync0sync.h"
#include "thr0loc.h"
#include "que0que.h"
#include "srv0que.h"
#include "log0recv.h"
#include "pars0pars.h"
#include "usr0sess.h"
#include "lock0lock.h"
#include "trx0purge.h"
#include "ibuf0ibuf.h"
#include "buf0flu.h"
#include "buf0lru.h"
#include "btr0sea.h"
#include "dict0load.h"
#include "dict0boot.h"
#include "srv0start.h"
#include "row0mysql.h"
#include "ha_prototypes.h"
#include "trx0i_s.h"
#include "os0sync.h" /* for HAVE_ATOMIC_BUILTINS */
/* This is set to TRUE if the MySQL user has set it in MySQL; currently
affects only FOREIGN KEY definition parsing */
UNIV_INTERN ibool srv_lower_case_table_names = FALSE;
/* The following counter is incremented whenever there is some user activity
in the server */
UNIV_INTERN ulint srv_activity_count = 0;
/* The following is the maximum allowed duration of a lock wait. */
UNIV_INTERN ulint srv_fatal_semaphore_wait_threshold = 600;
/* How much data manipulation language (DML) statements need to be delayed,
in microseconds, in order to reduce the lagging of the purge thread. */
UNIV_INTERN ulint srv_dml_needed_delay = 0;
UNIV_INTERN ibool srv_lock_timeout_active = FALSE;
UNIV_INTERN ibool srv_monitor_active = FALSE;
UNIV_INTERN ibool srv_error_monitor_active = FALSE;
UNIV_INTERN const char* srv_main_thread_op_info = "";
/** Prefix used by MySQL to indicate pre-5.1 table name encoding */
UNIV_INTERN const char srv_mysql50_table_name_prefix[9] = "#mysql50#";
/* Server parameters which are read from the initfile */
/* The following three are dir paths which are catenated before file
names, where the file name itself may also contain a path */
UNIV_INTERN char* srv_data_home = NULL;
#ifdef UNIV_LOG_ARCHIVE
UNIV_INTERN char* srv_arch_dir = NULL;
#endif /* UNIV_LOG_ARCHIVE */
/** store to its own file each table created by an user; data
dictionary tables are in the system tablespace 0 */
UNIV_INTERN my_bool srv_file_per_table;
/** The file format to use on new *.ibd files. */
UNIV_INTERN ulint srv_file_format = 0;
/** Whether to check file format during startup. A value of
DICT_TF_FORMAT_MAX + 1 means no checking ie. FALSE. The default is to
set it to the highest format we support. */
UNIV_INTERN ulint srv_check_file_format_at_startup = DICT_TF_FORMAT_MAX;
#if DICT_TF_FORMAT_51
# error "DICT_TF_FORMAT_51 must be 0!"
#endif
/** Place locks to records only i.e. do not use next-key locking except
on duplicate key checking and foreign key checking */
UNIV_INTERN ibool srv_locks_unsafe_for_binlog = FALSE;
UNIV_INTERN ulint srv_n_data_files = 0;
UNIV_INTERN char** srv_data_file_names = NULL;
/* size in database pages */
UNIV_INTERN ulint* srv_data_file_sizes = NULL;
/* if TRUE, then we auto-extend the last data file */
UNIV_INTERN ibool srv_auto_extend_last_data_file = FALSE;
/* if != 0, this tells the max size auto-extending may increase the
last data file size */
UNIV_INTERN ulint srv_last_file_size_max = 0;
/* If the last data file is auto-extended, we add this
many pages to it at a time */
UNIV_INTERN ulong srv_auto_extend_increment = 8;
UNIV_INTERN ulint* srv_data_file_is_raw_partition = NULL;
/* If the following is TRUE we do not allow inserts etc. This protects
the user from forgetting the 'newraw' keyword to my.cnf */
UNIV_INTERN ibool srv_created_new_raw = FALSE;
UNIV_INTERN char** srv_log_group_home_dirs = NULL;
UNIV_INTERN ulint srv_n_log_groups = ULINT_MAX;
UNIV_INTERN ulint srv_n_log_files = ULINT_MAX;
/* size in database pages */
UNIV_INTERN ulint srv_log_file_size = ULINT_MAX;
/* size in database pages */
UNIV_INTERN ulint srv_log_buffer_size = ULINT_MAX;
UNIV_INTERN ulong srv_flush_log_at_trx_commit = 1;
/* Try to flush dirty pages so as to avoid IO bursts at
the checkpoints. */
UNIV_INTERN char srv_adaptive_flushing = TRUE;
/** Maximum number of times allowed to conditionally acquire
mutex before switching to blocking wait on the mutex */
#define MAX_MUTEX_NOWAIT 20
/** Check whether the number of failed nonblocking mutex
acquisition attempts exceeds maximum allowed value. If so,
srv_printf_innodb_monitor() will request mutex acquisition
with mutex_enter(), which will wait until it gets the mutex. */
#define MUTEX_NOWAIT(mutex_skipped) ((mutex_skipped) < MAX_MUTEX_NOWAIT)
/** The sort order table of the MySQL latin1_swedish_ci character set
collation */
UNIV_INTERN const byte* srv_latin1_ordering;
/* use os/external memory allocator */
UNIV_INTERN my_bool srv_use_sys_malloc = TRUE;
/* requested size in kilobytes */
UNIV_INTERN ulint srv_buf_pool_size = ULINT_MAX;
/* previously requested size */
UNIV_INTERN ulint srv_buf_pool_old_size;
/* current size in kilobytes */
UNIV_INTERN ulint srv_buf_pool_curr_size = 0;
/* size in bytes */
UNIV_INTERN ulint srv_mem_pool_size = ULINT_MAX;
UNIV_INTERN ulint srv_lock_table_size = ULINT_MAX;
/* This parameter is deprecated. Use srv_n_io_[read|write]_threads
instead. */
UNIV_INTERN ulint srv_n_file_io_threads = ULINT_MAX;
UNIV_INTERN ulint srv_n_read_io_threads = ULINT_MAX;
UNIV_INTERN ulint srv_n_write_io_threads = ULINT_MAX;
/* Switch to enable random read ahead. */
UNIV_INTERN my_bool srv_random_read_ahead = FALSE;
/* User settable value of the number of pages that must be present
in the buffer cache and accessed sequentially for InnoDB to trigger a
readahead request. */
UNIV_INTERN ulong srv_read_ahead_threshold = 56;
#ifdef UNIV_LOG_ARCHIVE
UNIV_INTERN ibool srv_log_archive_on = FALSE;
UNIV_INTERN ibool srv_archive_recovery = 0;
UNIV_INTERN ib_uint64_t srv_archive_recovery_limit_lsn;
#endif /* UNIV_LOG_ARCHIVE */
/* This parameter is used to throttle the number of insert buffers that are
merged in a batch. By increasing this parameter on a faster disk you can
possibly reduce the number of I/O operations performed to complete the
merge operation. The value of this parameter is used as is by the
background loop when the system is idle (low load), on a busy system
the parameter is scaled down by a factor of 4, this is to avoid putting
a heavier load on the I/O sub system. */
UNIV_INTERN ulong srv_insert_buffer_batch_size = 20;
UNIV_INTERN char* srv_file_flush_method_str = NULL;
UNIV_INTERN ulint srv_unix_file_flush_method = SRV_UNIX_FSYNC;
UNIV_INTERN ulint srv_win_file_flush_method = SRV_WIN_IO_UNBUFFERED;
UNIV_INTERN ulint srv_max_n_open_files = 300;
/* Number of IO operations per second the server can do */
UNIV_INTERN ulong srv_io_capacity = 200;
/* The InnoDB main thread tries to keep the ratio of modified pages
in the buffer pool to all database pages in the buffer pool smaller than
the following number. But it is not guaranteed that the value stays below
that during a time of heavy update/insert activity. */
UNIV_INTERN ulong srv_max_buf_pool_modified_pct = 75;
/* variable counts amount of data read in total (in bytes) */
UNIV_INTERN ulint srv_data_read = 0;
/* Internal setting for "innodb_stats_method". Decides how InnoDB treats
NULL value when collecting statistics. By default, it is set to
SRV_STATS_NULLS_EQUAL(0), ie. all NULL value are treated equal */
ulong srv_innodb_stats_method = SRV_STATS_NULLS_EQUAL;
/* here we count the amount of data written in total (in bytes) */
UNIV_INTERN ulint srv_data_written = 0;
/* the number of the log write requests done */
UNIV_INTERN ulint srv_log_write_requests = 0;
/* the number of physical writes to the log performed */
UNIV_INTERN ulint srv_log_writes = 0;
/* amount of data written to the log files in bytes */
UNIV_INTERN ulint srv_os_log_written = 0;
/* amount of writes being done to the log files */
UNIV_INTERN ulint srv_os_log_pending_writes = 0;
/* we increase this counter, when there we don't have enough space in the
log buffer and have to flush it */
UNIV_INTERN ulint srv_log_waits = 0;
/* this variable counts the amount of times, when the doublewrite buffer
was flushed */
UNIV_INTERN ulint srv_dblwr_writes = 0;
/* here we store the number of pages that have been flushed to the
doublewrite buffer */
UNIV_INTERN ulint srv_dblwr_pages_written = 0;
/* in this variable we store the number of write requests issued */
UNIV_INTERN ulint srv_buf_pool_write_requests = 0;
/* here we store the number of times when we had to wait for a free page
in the buffer pool. It happens when the buffer pool is full and we need
to make a flush, in order to be able to read or create a page. */
UNIV_INTERN ulint srv_buf_pool_wait_free = 0;
/* variable to count the number of pages that were written from buffer
pool to the disk */
UNIV_INTERN ulint srv_buf_pool_flushed = 0;
/** Number of buffer pool reads that led to the
reading of a disk page */
UNIV_INTERN ulint srv_buf_pool_reads = 0;
/* structure to pass status variables to MySQL */
UNIV_INTERN export_struc export_vars;
/* If the following is != 0 we do not allow inserts etc. This protects
the user from forgetting the innodb_force_recovery keyword to my.cnf */
UNIV_INTERN ulint srv_force_recovery = 0;
/*-----------------------*/
/* We are prepared for a situation that we have this many threads waiting for
a semaphore inside InnoDB. innobase_start_or_create_for_mysql() sets the
value. */
UNIV_INTERN ulint srv_max_n_threads = 0;
/* The following controls how many threads we let inside InnoDB concurrently:
threads waiting for locks are not counted into the number because otherwise
we could get a deadlock. MySQL creates a thread for each user session, and
semaphore contention and convoy problems can occur withput this restriction.
Value 10 should be good if there are less than 4 processors + 4 disks in the
computer. Bigger computers need bigger values. Value 0 will disable the
concurrency check. */
UNIV_INTERN ulong srv_thread_concurrency = 0;
/* this mutex protects srv_conc data structures */
UNIV_INTERN os_fast_mutex_t srv_conc_mutex;
/* number of transactions that have declared_to_be_inside_innodb set.
It used to be a non-error for this value to drop below zero temporarily.
This is no longer true. We'll, however, keep the lint datatype to add
assertions to catch any corner cases that we may have missed. */
UNIV_INTERN lint srv_conc_n_threads = 0;
/* number of OS threads waiting in the FIFO for a permission to enter
InnoDB */
UNIV_INTERN ulint srv_conc_n_waiting_threads = 0;
typedef struct srv_conc_slot_struct srv_conc_slot_t;
struct srv_conc_slot_struct{
os_event_t event; /*!< event to wait */
ibool reserved; /*!< TRUE if slot
reserved */
ibool wait_ended; /*!< TRUE when another
thread has already set
the event and the
thread in this slot is
free to proceed; but
reserved may still be
TRUE at that point */
UT_LIST_NODE_T(srv_conc_slot_t) srv_conc_queue; /*!< queue node */
};
/* queue of threads waiting to get in */
UNIV_INTERN UT_LIST_BASE_NODE_T(srv_conc_slot_t) srv_conc_queue;
/* array of wait slots */
UNIV_INTERN srv_conc_slot_t* srv_conc_slots;
/* Number of times a thread is allowed to enter InnoDB within the same
SQL query after it has once got the ticket at srv_conc_enter_innodb */
#define SRV_FREE_TICKETS_TO_ENTER srv_n_free_tickets_to_enter
#define SRV_THREAD_SLEEP_DELAY srv_thread_sleep_delay
/*-----------------------*/
/* If the following is set to 1 then we do not run purge and insert buffer
merge to completion before shutdown. If it is set to 2, do not even flush the
buffer pool to data files at the shutdown: we effectively 'crash'
InnoDB (but lose no committed transactions). */
UNIV_INTERN ulint srv_fast_shutdown = 0;
/* Generate a innodb_status.<pid> file */
UNIV_INTERN ibool srv_innodb_status = FALSE;
/* When estimating number of different key values in an index, sample
this many index pages */
UNIV_INTERN unsigned long long srv_stats_sample_pages = 8;
UNIV_INTERN ibool srv_use_doublewrite_buf = TRUE;
UNIV_INTERN ibool srv_use_checksums = TRUE;
UNIV_INTERN ibool srv_set_thread_priorities = TRUE;
UNIV_INTERN int srv_query_thread_priority = 0;
UNIV_INTERN ulong srv_replication_delay = 0;
/*-------------------------------------------*/
UNIV_INTERN ulong srv_n_spin_wait_rounds = 30;
UNIV_INTERN ulong srv_n_free_tickets_to_enter = 500;
UNIV_INTERN ulong srv_thread_sleep_delay = 10000;
UNIV_INTERN ulong srv_spin_wait_delay = 6;
UNIV_INTERN ibool srv_priority_boost = TRUE;
#ifdef UNIV_DEBUG
UNIV_INTERN ibool srv_print_thread_releases = FALSE;
UNIV_INTERN ibool srv_print_lock_waits = FALSE;
UNIV_INTERN ibool srv_print_buf_io = FALSE;
UNIV_INTERN ibool srv_print_log_io = FALSE;
UNIV_INTERN ibool srv_print_latch_waits = FALSE;
#endif /* UNIV_DEBUG */
UNIV_INTERN ulint srv_n_rows_inserted = 0;
UNIV_INTERN ulint srv_n_rows_updated = 0;
UNIV_INTERN ulint srv_n_rows_deleted = 0;
UNIV_INTERN ulint srv_n_rows_read = 0;
static ulint srv_n_rows_inserted_old = 0;
static ulint srv_n_rows_updated_old = 0;
static ulint srv_n_rows_deleted_old = 0;
static ulint srv_n_rows_read_old = 0;
UNIV_INTERN ulint srv_n_lock_wait_count = 0;
UNIV_INTERN ulint srv_n_lock_wait_current_count = 0;
UNIV_INTERN ib_int64_t srv_n_lock_wait_time = 0;
UNIV_INTERN ulint srv_n_lock_max_wait_time = 0;
/*
Set the following to 0 if you want InnoDB to write messages on
stderr on startup/shutdown
*/
UNIV_INTERN ibool srv_print_verbose_log = TRUE;
UNIV_INTERN ibool srv_print_innodb_monitor = FALSE;
UNIV_INTERN ibool srv_print_innodb_lock_monitor = FALSE;
UNIV_INTERN ibool srv_print_innodb_tablespace_monitor = FALSE;
UNIV_INTERN ibool srv_print_innodb_table_monitor = FALSE;
/* Array of English strings describing the current state of an
i/o handler thread */
UNIV_INTERN const char* srv_io_thread_op_info[SRV_MAX_N_IO_THREADS];
UNIV_INTERN const char* srv_io_thread_function[SRV_MAX_N_IO_THREADS];
UNIV_INTERN time_t srv_last_monitor_time;
UNIV_INTERN mutex_t srv_innodb_monitor_mutex;
/* Mutex for locking srv_monitor_file */
UNIV_INTERN mutex_t srv_monitor_file_mutex;
/* Temporary file for innodb monitor output */
UNIV_INTERN FILE* srv_monitor_file;
/* Mutex for locking srv_dict_tmpfile.
This mutex has a very high rank; threads reserving it should not
be holding any InnoDB latches. */
UNIV_INTERN mutex_t srv_dict_tmpfile_mutex;
/* Temporary file for output from the data dictionary */
UNIV_INTERN FILE* srv_dict_tmpfile;
/* Mutex for locking srv_misc_tmpfile.
This mutex has a very low rank; threads reserving it should not
acquire any further latches or sleep before releasing this one. */
UNIV_INTERN mutex_t srv_misc_tmpfile_mutex;
/* Temporary file for miscellanous diagnostic output */
UNIV_INTERN FILE* srv_misc_tmpfile;
UNIV_INTERN ulint srv_main_thread_process_no = 0;
UNIV_INTERN ulint srv_main_thread_id = 0;
/* The following count work done by srv_master_thread. */
/* Iterations by the 'once per second' loop. */
static ulint srv_main_1_second_loops = 0;
/* Calls to sleep by the 'once per second' loop. */
static ulint srv_main_sleeps = 0;
/* Iterations by the 'once per 10 seconds' loop. */
static ulint srv_main_10_second_loops = 0;
/* Iterations of the loop bounded by the 'background_loop' label. */
static ulint srv_main_background_loops = 0;
/* Iterations of the loop bounded by the 'flush_loop' label. */
static ulint srv_main_flush_loops = 0;
/* Log writes involving flush. */
static ulint srv_log_writes_and_flush = 0;
/* This is only ever touched by the master thread. It records the
time when the last flush of log file has happened. The master
thread ensures that we flush the log files at least once per
second. */
static time_t srv_last_log_flush_time;
/* The master thread performs various tasks based on the current
state of IO activity and the level of IO utilization is past
intervals. Following macros define thresholds for these conditions. */
#define SRV_PEND_IO_THRESHOLD (PCT_IO(3))
#define SRV_RECENT_IO_ACTIVITY (PCT_IO(5))
#define SRV_PAST_IO_ACTIVITY (PCT_IO(200))
/*
IMPLEMENTATION OF THE SERVER MAIN PROGRAM
=========================================
There is the following analogue between this database
server and an operating system kernel:
DB concept equivalent OS concept
---------- ---------------------
transaction -- process;
query thread -- thread;
lock -- semaphore;
transaction set to
the rollback state -- kill signal delivered to a process;
kernel -- kernel;
query thread execution:
(a) without kernel mutex
reserved -- process executing in user mode;
(b) with kernel mutex reserved
-- process executing in kernel mode;
The server is controlled by a master thread which runs at
a priority higher than normal, that is, higher than user threads.
It sleeps most of the time, and wakes up, say, every 300 milliseconds,
to check whether there is anything happening in the server which
requires intervention of the master thread. Such situations may be,
for example, when flushing of dirty blocks is needed in the buffer
pool or old version of database rows have to be cleaned away.
The threads which we call user threads serve the queries of
the clients and input from the console of the server.
They run at normal priority. The server may have several
communications endpoints. A dedicated set of user threads waits
at each of these endpoints ready to receive a client request.
Each request is taken by a single user thread, which then starts
processing and, when the result is ready, sends it to the client
and returns to wait at the same endpoint the thread started from.
So, we do not have dedicated communication threads listening at
the endpoints and dealing the jobs to dedicated worker threads.
Our architecture saves one thread swithch per request, compared
to the solution with dedicated communication threads
which amounts to 15 microseconds on 100 MHz Pentium
running NT. If the client
is communicating over a network, this saving is negligible, but
if the client resides in the same machine, maybe in an SMP machine
on a different processor from the server thread, the saving
can be important as the threads can communicate over shared
memory with an overhead of a few microseconds.
We may later implement a dedicated communication thread solution
for those endpoints which communicate over a network.
Our solution with user threads has two problems: for each endpoint
there has to be a number of listening threads. If there are many
communication endpoints, it may be difficult to set the right number
of concurrent threads in the system, as many of the threads
may always be waiting at less busy endpoints. Another problem
is queuing of the messages, as the server internally does not
offer any queue for jobs.
Another group of user threads is intended for splitting the
queries and processing them in parallel. Let us call these
parallel communication threads. These threads are waiting for
parallelized tasks, suspended on event semaphores.
A single user thread waits for input from the console,
like a command to shut the database.
Utility threads are a different group of threads which takes
care of the buffer pool flushing and other, mainly background
operations, in the server.
Some of these utility threads always run at a lower than normal
priority, so that they are always in background. Some of them
may dynamically boost their priority by the pri_adjust function,
even to higher than normal priority, if their task becomes urgent.
The running of utilities is controlled by high- and low-water marks
of urgency. The urgency may be measured by the number of dirty blocks
in the buffer pool, in the case of the flush thread, for example.
When the high-water mark is exceeded, an utility starts running, until
the urgency drops under the low-water mark. Then the utility thread
suspend itself to wait for an event. The master thread is
responsible of signaling this event when the utility thread is
again needed.
For each individual type of utility, some threads always remain
at lower than normal priority. This is because pri_adjust is implemented
so that the threads at normal or higher priority control their
share of running time by calling sleep. Thus, if the load of the
system sudenly drops, these threads cannot necessarily utilize
the system fully. The background priority threads make up for this,
starting to run when the load drops.
When there is no activity in the system, also the master thread
suspends itself to wait for an event making
the server totally silent. The responsibility to signal this
event is on the user thread which again receives a message
from a client.
There is still one complication in our server design. If a
background utility thread obtains a resource (e.g., mutex) needed by a user
thread, and there is also some other user activity in the system,
the user thread may have to wait indefinitely long for the
resource, as the OS does not schedule a background thread if
there is some other runnable user thread. This problem is called
priority inversion in real-time programming.
One solution to the priority inversion problem would be to
keep record of which thread owns which resource and
in the above case boost the priority of the background thread
so that it will be scheduled and it can release the resource.
This solution is called priority inheritance in real-time programming.
A drawback of this solution is that the overhead of acquiring a mutex
increases slightly, maybe 0.2 microseconds on a 100 MHz Pentium, because
the thread has to call os_thread_get_curr_id.
This may be compared to 0.5 microsecond overhead for a mutex lock-unlock
pair. Note that the thread
cannot store the information in the resource, say mutex, itself,
because competing threads could wipe out the information if it is
stored before acquiring the mutex, and if it stored afterwards,
the information is outdated for the time of one machine instruction,
at least. (To be precise, the information could be stored to
lock_word in mutex if the machine supports atomic swap.)
The above solution with priority inheritance may become actual in the
future, but at the moment we plan to implement a more coarse solution,
which could be called a global priority inheritance. If a thread
has to wait for a long time, say 300 milliseconds, for a resource,
we just guess that it may be waiting for a resource owned by a background
thread, and boost the priority of all runnable background threads
to the normal level. The background threads then themselves adjust
their fixed priority back to background after releasing all resources
they had (or, at some fixed points in their program code).
What is the performance of the global priority inheritance solution?
We may weigh the length of the wait time 300 milliseconds, during
which the system processes some other thread
to the cost of boosting the priority of each runnable background
thread, rescheduling it, and lowering the priority again.
On 100 MHz Pentium + NT this overhead may be of the order 100
microseconds per thread. So, if the number of runnable background
threads is not very big, say < 100, the cost is tolerable.
Utility threads probably will access resources used by
user threads not very often, so collisions of user threads
to preempted utility threads should not happen very often.
The thread table contains
information of the current status of each thread existing in the system,
and also the event semaphores used in suspending the master thread
and utility and parallel communication threads when they have nothing to do.
The thread table can be seen as an analogue to the process table
in a traditional Unix implementation.
The thread table is also used in the global priority inheritance
scheme. This brings in one additional complication: threads accessing
the thread table must have at least normal fixed priority,
because the priority inheritance solution does not work if a background
thread is preempted while possessing the mutex protecting the thread table.
So, if a thread accesses the thread table, its priority has to be
boosted at least to normal. This priority requirement can be seen similar to
the privileged mode used when processing the kernel calls in traditional
Unix.*/
/* Thread slot in the thread table */
struct srv_slot_struct{
os_thread_id_t id; /*!< thread id */
os_thread_t handle; /*!< thread handle */
unsigned type:3; /*!< thread type: user, utility etc. */
unsigned in_use:1; /*!< TRUE if this slot is in use */
unsigned suspended:1; /*!< TRUE if the thread is waiting
for the event of this slot */
ib_time_t suspend_time; /*!< time when the thread was
suspended */
os_event_t event; /*!< event used in suspending the
thread when it has nothing to do */
que_thr_t* thr; /*!< suspended query thread (only
used for MySQL threads) */
};
/* Table for MySQL threads where they will be suspended to wait for locks */
UNIV_INTERN srv_slot_t* srv_mysql_table = NULL;
UNIV_INTERN os_event_t srv_lock_timeout_thread_event;
UNIV_INTERN srv_sys_t* srv_sys = NULL;
/* padding to prevent other memory update hotspots from residing on
the same memory cache line */
UNIV_INTERN byte srv_pad1[64];
/* mutex protecting the server, trx structs, query threads, and lock table */
UNIV_INTERN mutex_t* kernel_mutex_temp;
/* padding to prevent other memory update hotspots from residing on
the same memory cache line */
UNIV_INTERN byte srv_pad2[64];
#if 0
/* The following three values measure the urgency of the jobs of
buffer, version, and insert threads. They may vary from 0 - 1000.
The server mutex protects all these variables. The low-water values
tell that the server can acquiesce the utility when the value
drops below this low-water mark. */
static ulint srv_meter[SRV_MASTER + 1];
static ulint srv_meter_low_water[SRV_MASTER + 1];
static ulint srv_meter_high_water[SRV_MASTER + 1];
static ulint srv_meter_high_water2[SRV_MASTER + 1];
static ulint srv_meter_foreground[SRV_MASTER + 1];
#endif
/* The following values give info about the activity going on in
the database. They are protected by the server mutex. The arrays
are indexed by the type of the thread. */
UNIV_INTERN ulint srv_n_threads_active[SRV_MASTER + 1];
UNIV_INTERN ulint srv_n_threads[SRV_MASTER + 1];
/***********************************************************************
Prints counters for work done by srv_master_thread. */
static
void
srv_print_master_thread_info(
/*=========================*/
FILE *file) /* in: output stream */
{
fprintf(file, "srv_master_thread loops: %lu 1_second, %lu sleeps, "
"%lu 10_second, %lu background, %lu flush\n",
srv_main_1_second_loops, srv_main_sleeps,
srv_main_10_second_loops, srv_main_background_loops,
srv_main_flush_loops);
fprintf(file, "srv_master_thread log flush and writes: %lu\n",
srv_log_writes_and_flush);
}
/*********************************************************************//**
Sets the info describing an i/o thread current state. */
UNIV_INTERN
void
srv_set_io_thread_op_info(
/*======================*/
ulint i, /*!< in: the 'segment' of the i/o thread */
const char* str) /*!< in: constant char string describing the
state */
{
ut_a(i < SRV_MAX_N_IO_THREADS);
srv_io_thread_op_info[i] = str;
}
/*********************************************************************//**
Accessor function to get pointer to n'th slot in the server thread
table.
@return pointer to the slot */
static
srv_slot_t*
srv_table_get_nth_slot(
/*===================*/
ulint index) /*!< in: index of the slot */
{
ut_a(index < OS_THREAD_MAX_N);
return(srv_sys->threads + index);
}
/*********************************************************************//**
Gets the number of threads in the system.
@return sum of srv_n_threads[] */
UNIV_INTERN
ulint
srv_get_n_threads(void)
/*===================*/
{
ulint i;
ulint n_threads = 0;
mutex_enter(&kernel_mutex);
for (i = SRV_COM; i < SRV_MASTER + 1; i++) {
n_threads += srv_n_threads[i];
}
mutex_exit(&kernel_mutex);
return(n_threads);
}
/*********************************************************************//**
Reserves a slot in the thread table for the current thread. Also creates the
thread local storage struct for the current thread. NOTE! The server mutex
has to be reserved by the caller!
@return reserved slot index */
static
ulint
srv_table_reserve_slot(
/*===================*/
enum srv_thread_type type) /*!< in: type of the thread */
{
srv_slot_t* slot;
ulint i;
ut_a(type > 0);
ut_a(type <= SRV_MASTER);
i = 0;
slot = srv_table_get_nth_slot(i);
while (slot->in_use) {
i++;
slot = srv_table_get_nth_slot(i);
}
ut_a(slot->in_use == FALSE);
slot->in_use = TRUE;
slot->suspended = FALSE;
slot->type = type;
slot->id = os_thread_get_curr_id();
slot->handle = os_thread_get_curr();
thr_local_create();
thr_local_set_slot_no(os_thread_get_curr_id(), i);
return(i);
}
/*********************************************************************//**
Suspends the calling thread to wait for the event in its thread slot.
NOTE! The server mutex has to be reserved by the caller!
@return event for the calling thread to wait */
static
os_event_t
srv_suspend_thread(void)
/*====================*/
{
srv_slot_t* slot;
os_event_t event;
ulint slot_no;
enum srv_thread_type type;
ut_ad(mutex_own(&kernel_mutex));
slot_no = thr_local_get_slot_no(os_thread_get_curr_id());
if (srv_print_thread_releases) {
fprintf(stderr,
"Suspending thread %lu to slot %lu\n",
(ulong) os_thread_get_curr_id(), (ulong) slot_no);
}
slot = srv_table_get_nth_slot(slot_no);
type = slot->type;
ut_ad(type >= SRV_WORKER);
ut_ad(type <= SRV_MASTER);
event = slot->event;
slot->suspended = TRUE;
ut_ad(srv_n_threads_active[type] > 0);
srv_n_threads_active[type]--;
os_event_reset(event);
return(event);
}
/*********************************************************************//**
Releases threads of the type given from suspension in the thread table.
NOTE! The server mutex has to be reserved by the caller!
@return number of threads released: this may be less than n if not
enough threads were suspended at the moment */
UNIV_INTERN
ulint
srv_release_threads(
/*================*/
enum srv_thread_type type, /*!< in: thread type */
ulint n) /*!< in: number of threads to release */
{
srv_slot_t* slot;
ulint i;
ulint count = 0;
ut_ad(type >= SRV_WORKER);
ut_ad(type <= SRV_MASTER);
ut_ad(n > 0);
ut_ad(mutex_own(&kernel_mutex));
for (i = 0; i < OS_THREAD_MAX_N; i++) {
slot = srv_table_get_nth_slot(i);
if (slot->in_use && slot->type == type && slot->suspended) {
slot->suspended = FALSE;
srv_n_threads_active[type]++;
os_event_set(slot->event);
if (srv_print_thread_releases) {
fprintf(stderr,
"Releasing thread %lu type %lu"
" from slot %lu\n",
(ulong) slot->id, (ulong) type,
(ulong) i);
}
count++;
if (count == n) {
break;
}
}
}
return(count);
}
/*********************************************************************//**
Returns the calling thread type.
@return SRV_COM, ... */
UNIV_INTERN
enum srv_thread_type
srv_get_thread_type(void)
/*=====================*/
{
ulint slot_no;
srv_slot_t* slot;
enum srv_thread_type type;
mutex_enter(&kernel_mutex);
slot_no = thr_local_get_slot_no(os_thread_get_curr_id());
slot = srv_table_get_nth_slot(slot_no);
type = slot->type;
ut_ad(type >= SRV_WORKER);
ut_ad(type <= SRV_MASTER);
mutex_exit(&kernel_mutex);
return(type);
}
/*********************************************************************//**
Initializes the server. */
UNIV_INTERN
void
srv_init(void)
/*==========*/
{
srv_conc_slot_t* conc_slot;
srv_slot_t* slot;
ulint i;
srv_sys = mem_alloc(sizeof(srv_sys_t));
kernel_mutex_temp = mem_alloc(sizeof(mutex_t));
mutex_create(&kernel_mutex, SYNC_KERNEL);
mutex_create(&srv_innodb_monitor_mutex, SYNC_NO_ORDER_CHECK);
srv_sys->threads = mem_alloc(OS_THREAD_MAX_N * sizeof(srv_slot_t));
for (i = 0; i < OS_THREAD_MAX_N; i++) {
slot = srv_table_get_nth_slot(i);
slot->in_use = FALSE;
slot->type=0; /* Avoid purify errors */
slot->event = os_event_create(NULL);
ut_a(slot->event);
}
srv_mysql_table = mem_alloc(OS_THREAD_MAX_N * sizeof(srv_slot_t));
for (i = 0; i < OS_THREAD_MAX_N; i++) {
slot = srv_mysql_table + i;
slot->in_use = FALSE;
slot->type = 0;
slot->event = os_event_create(NULL);
ut_a(slot->event);
}
srv_lock_timeout_thread_event = os_event_create(NULL);
for (i = 0; i < SRV_MASTER + 1; i++) {
srv_n_threads_active[i] = 0;
srv_n_threads[i] = 0;
#if 0
srv_meter[i] = 30;
srv_meter_low_water[i] = 50;
srv_meter_high_water[i] = 100;
srv_meter_high_water2[i] = 200;
srv_meter_foreground[i] = 250;
#endif
}
UT_LIST_INIT(srv_sys->tasks);
/* Create dummy indexes for infimum and supremum records */
dict_ind_init();
/* Init the server concurrency restriction data structures */
os_fast_mutex_init(&srv_conc_mutex);
UT_LIST_INIT(srv_conc_queue);
srv_conc_slots = mem_alloc(OS_THREAD_MAX_N * sizeof(srv_conc_slot_t));
for (i = 0; i < OS_THREAD_MAX_N; i++) {
conc_slot = srv_conc_slots + i;
conc_slot->reserved = FALSE;
conc_slot->event = os_event_create(NULL);
ut_a(conc_slot->event);
}
/* Initialize some INFORMATION SCHEMA internal structures */
trx_i_s_cache_init(trx_i_s_cache);
}
/*********************************************************************//**
Frees the data structures created in srv_init(). */
UNIV_INTERN
void
srv_free(void)
/*==========*/
{
os_fast_mutex_free(&srv_conc_mutex);
mem_free(srv_conc_slots);
srv_conc_slots = NULL;
mem_free(srv_sys->threads);
mem_free(srv_sys);
srv_sys = NULL;
mem_free(kernel_mutex_temp);
kernel_mutex_temp = NULL;
mem_free(srv_mysql_table);
srv_mysql_table = NULL;
trx_i_s_cache_free(trx_i_s_cache);
}
/*********************************************************************//**
Initializes the synchronization primitives, memory system, and the thread
local storage. */
UNIV_INTERN
void
srv_general_init(void)
/*==================*/
{
ut_mem_init();
/* Reset the system variables in the recovery module. */
recv_sys_var_init();
os_sync_init();
sync_init();
mem_init(srv_mem_pool_size);
thr_local_init();
}
/*======================= InnoDB Server FIFO queue =======================*/
/* Maximum allowable purge history length. <=0 means 'infinite'. */
UNIV_INTERN ulong srv_max_purge_lag = 0;
/*********************************************************************//**
Puts an OS thread to wait if there are too many concurrent threads
(>= srv_thread_concurrency) inside InnoDB. The threads wait in a FIFO queue. */
UNIV_INTERN
void
srv_conc_enter_innodb(
/*==================*/
trx_t* trx) /*!< in: transaction object associated with the
thread */
{
ibool has_slept = FALSE;
srv_conc_slot_t* slot = NULL;
ulint i;
if (trx->mysql_thd != NULL
&& thd_is_replication_slave_thread(trx->mysql_thd)) {
UT_WAIT_FOR(srv_conc_n_threads
< (lint)srv_thread_concurrency,
srv_replication_delay * 1000);
return;
}
/* If trx has 'free tickets' to enter the engine left, then use one
such ticket */
if (trx->n_tickets_to_enter_innodb > 0) {
trx->n_tickets_to_enter_innodb--;
return;
}
os_fast_mutex_lock(&srv_conc_mutex);
retry:
if (trx->declared_to_be_inside_innodb) {
ut_print_timestamp(stderr);
fputs(" InnoDB: Error: trying to declare trx"
" to enter InnoDB, but\n"
"InnoDB: it already is declared.\n", stderr);
trx_print(stderr, trx, 0);
putc('\n', stderr);
os_fast_mutex_unlock(&srv_conc_mutex);
return;
}
ut_ad(srv_conc_n_threads >= 0);
if (srv_conc_n_threads < (lint)srv_thread_concurrency) {
srv_conc_n_threads++;
trx->declared_to_be_inside_innodb = TRUE;
trx->n_tickets_to_enter_innodb = SRV_FREE_TICKETS_TO_ENTER;
os_fast_mutex_unlock(&srv_conc_mutex);
return;
}
/* If the transaction is not holding resources, let it sleep
for SRV_THREAD_SLEEP_DELAY microseconds, and try again then */
if (!has_slept && !trx->has_search_latch
&& NULL == UT_LIST_GET_FIRST(trx->trx_locks)) {
has_slept = TRUE; /* We let it sleep only once to avoid
starvation */
srv_conc_n_waiting_threads++;
os_fast_mutex_unlock(&srv_conc_mutex);
trx->op_info = "sleeping before joining InnoDB queue";
/* Peter Zaitsev suggested that we take the sleep away
altogether. But the sleep may be good in pathological
situations of lots of thread switches. Simply put some
threads aside for a while to reduce the number of thread
switches. */
if (SRV_THREAD_SLEEP_DELAY > 0) {
os_thread_sleep(SRV_THREAD_SLEEP_DELAY);
}
trx->op_info = "";
os_fast_mutex_lock(&srv_conc_mutex);
srv_conc_n_waiting_threads--;
goto retry;
}
/* Too many threads inside: put the current thread to a queue */
for (i = 0; i < OS_THREAD_MAX_N; i++) {
slot = srv_conc_slots + i;
if (!slot->reserved) {
break;
}
}
if (i == OS_THREAD_MAX_N) {
/* Could not find a free wait slot, we must let the
thread enter */
srv_conc_n_threads++;
trx->declared_to_be_inside_innodb = TRUE;
trx->n_tickets_to_enter_innodb = 0;
os_fast_mutex_unlock(&srv_conc_mutex);
return;
}
/* Release possible search system latch this thread has */
if (trx->has_search_latch) {
trx_search_latch_release_if_reserved(trx);
}
/* Add to the queue */
slot->reserved = TRUE;
slot->wait_ended = FALSE;
UT_LIST_ADD_LAST(srv_conc_queue, srv_conc_queue, slot);
os_event_reset(slot->event);
srv_conc_n_waiting_threads++;
os_fast_mutex_unlock(&srv_conc_mutex);
/* Go to wait for the event; when a thread leaves InnoDB it will
release this thread */
trx->op_info = "waiting in InnoDB queue";
os_event_wait(slot->event);
trx->op_info = "";
os_fast_mutex_lock(&srv_conc_mutex);
srv_conc_n_waiting_threads--;
/* NOTE that the thread which released this thread already
incremented the thread counter on behalf of this thread */
slot->reserved = FALSE;
UT_LIST_REMOVE(srv_conc_queue, srv_conc_queue, slot);
trx->declared_to_be_inside_innodb = TRUE;
trx->n_tickets_to_enter_innodb = SRV_FREE_TICKETS_TO_ENTER;
os_fast_mutex_unlock(&srv_conc_mutex);
}
/*********************************************************************//**
This lets a thread enter InnoDB regardless of the number of threads inside
InnoDB. This must be called when a thread ends a lock wait. */
UNIV_INTERN
void
srv_conc_force_enter_innodb(
/*========================*/
trx_t* trx) /*!< in: transaction object associated with the
thread */
{
if (UNIV_LIKELY(!srv_thread_concurrency)) {
return;
}
ut_ad(srv_conc_n_threads >= 0);
os_fast_mutex_lock(&srv_conc_mutex);
srv_conc_n_threads++;
trx->declared_to_be_inside_innodb = TRUE;
trx->n_tickets_to_enter_innodb = 1;
os_fast_mutex_unlock(&srv_conc_mutex);
}
/*********************************************************************//**
This must be called when a thread exits InnoDB in a lock wait or at the
end of an SQL statement. */
UNIV_INTERN
void
srv_conc_force_exit_innodb(
/*=======================*/
trx_t* trx) /*!< in: transaction object associated with the
thread */
{
srv_conc_slot_t* slot = NULL;
if (trx->mysql_thd != NULL
&& thd_is_replication_slave_thread(trx->mysql_thd)) {
return;
}
if (trx->declared_to_be_inside_innodb == FALSE) {
return;
}
os_fast_mutex_lock(&srv_conc_mutex);
ut_ad(srv_conc_n_threads > 0);
srv_conc_n_threads--;
trx->declared_to_be_inside_innodb = FALSE;
trx->n_tickets_to_enter_innodb = 0;
if (srv_conc_n_threads < (lint)srv_thread_concurrency) {
/* Look for a slot where a thread is waiting and no other
thread has yet released the thread */
slot = UT_LIST_GET_FIRST(srv_conc_queue);
while (slot && slot->wait_ended == TRUE) {
slot = UT_LIST_GET_NEXT(srv_conc_queue, slot);
}
if (slot != NULL) {
slot->wait_ended = TRUE;
/* We increment the count on behalf of the released
thread */
srv_conc_n_threads++;
}
}
os_fast_mutex_unlock(&srv_conc_mutex);
if (slot != NULL) {
os_event_set(slot->event);
}
}
/*********************************************************************//**
This must be called when a thread exits InnoDB. */
UNIV_INTERN
void
srv_conc_exit_innodb(
/*=================*/
trx_t* trx) /*!< in: transaction object associated with the
thread */
{
if (trx->n_tickets_to_enter_innodb > 0) {
/* We will pretend the thread is still inside InnoDB though it
now leaves the InnoDB engine. In this way we save
a lot of semaphore operations. srv_conc_force_exit_innodb is
used to declare the thread definitely outside InnoDB. It
should be called when there is a lock wait or an SQL statement
ends. */
return;
}
srv_conc_force_exit_innodb(trx);
}
/*========================================================================*/
/*********************************************************************//**
Normalizes init parameter values to use units we use inside InnoDB.
@return DB_SUCCESS or error code */
static
ulint
srv_normalize_init_values(void)
/*===========================*/
{
ulint n;
ulint i;
n = srv_n_data_files;
for (i = 0; i < n; i++) {
srv_data_file_sizes[i] = srv_data_file_sizes[i]
* ((1024 * 1024) / UNIV_PAGE_SIZE);
}
srv_last_file_size_max = srv_last_file_size_max
* ((1024 * 1024) / UNIV_PAGE_SIZE);
srv_log_file_size = srv_log_file_size / UNIV_PAGE_SIZE;
srv_log_buffer_size = srv_log_buffer_size / UNIV_PAGE_SIZE;
srv_lock_table_size = 5 * (srv_buf_pool_size / UNIV_PAGE_SIZE);
return(DB_SUCCESS);
}
/*********************************************************************//**
Boots the InnoDB server.
@return DB_SUCCESS or error code */
UNIV_INTERN
ulint
srv_boot(void)
/*==========*/
{
ulint err;
/* Transform the init parameter values given by MySQL to
use units we use inside InnoDB: */
err = srv_normalize_init_values();
if (err != DB_SUCCESS) {
return(err);
}
/* Initialize synchronization primitives, memory management, and thread
local storage */
srv_general_init();
/* Initialize this module */
srv_init();
return(DB_SUCCESS);
}
/*********************************************************************//**
Reserves a slot in the thread table for the current MySQL OS thread.
NOTE! The kernel mutex has to be reserved by the caller!
@return reserved slot */
static
srv_slot_t*
srv_table_reserve_slot_for_mysql(void)
/*==================================*/
{
srv_slot_t* slot;
ulint i;
ut_ad(mutex_own(&kernel_mutex));
i = 0;
slot = srv_mysql_table + i;
while (slot->in_use) {
i++;
if (i >= OS_THREAD_MAX_N) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: There appear to be %lu MySQL"
" threads currently waiting\n"
"InnoDB: inside InnoDB, which is the"
" upper limit. Cannot continue operation.\n"
"InnoDB: We intentionally generate"
" a seg fault to print a stack trace\n"
"InnoDB: on Linux. But first we print"
" a list of waiting threads.\n", (ulong) i);
for (i = 0; i < OS_THREAD_MAX_N; i++) {
slot = srv_mysql_table + i;
fprintf(stderr,
"Slot %lu: thread id %lu, type %lu,"
" in use %lu, susp %lu, time %lu\n",
(ulong) i,
(ulong) os_thread_pf(slot->id),
(ulong) slot->type,
(ulong) slot->in_use,
(ulong) slot->suspended,
(ulong) difftime(ut_time(),
slot->suspend_time));
}
ut_error;
}
slot = srv_mysql_table + i;
}
ut_a(slot->in_use == FALSE);
slot->in_use = TRUE;
slot->id = os_thread_get_curr_id();
slot->handle = os_thread_get_curr();
return(slot);
}
/***************************************************************//**
Puts a MySQL OS thread to wait for a lock to be released. If an error
occurs during the wait trx->error_state associated with thr is
!= DB_SUCCESS when we return. DB_LOCK_WAIT_TIMEOUT and DB_DEADLOCK
are possible errors. DB_DEADLOCK is returned if selective deadlock
resolution chose this transaction as a victim. */
UNIV_INTERN
void
srv_suspend_mysql_thread(
/*=====================*/
que_thr_t* thr) /*!< in: query thread associated with the MySQL
OS thread */
{
srv_slot_t* slot;
os_event_t event;
double wait_time;
trx_t* trx;
ulint had_dict_lock;
ibool was_declared_inside_innodb = FALSE;
ib_int64_t start_time = 0;
ib_int64_t finish_time;
ulint diff_time;
ulint sec;
ulint ms;
ulong lock_wait_timeout;
ut_ad(!mutex_own(&kernel_mutex));
trx = thr_get_trx(thr);
os_event_set(srv_lock_timeout_thread_event);
mutex_enter(&kernel_mutex);
trx->error_state = DB_SUCCESS;
if (thr->state == QUE_THR_RUNNING) {
ut_ad(thr->is_active == TRUE);
/* The lock has already been released or this transaction
was chosen as a deadlock victim: no need to suspend */
if (trx->was_chosen_as_deadlock_victim) {
trx->error_state = DB_DEADLOCK;
trx->was_chosen_as_deadlock_victim = FALSE;
}
mutex_exit(&kernel_mutex);
return;
}
ut_ad(thr->is_active == FALSE);
slot = srv_table_reserve_slot_for_mysql();
event = slot->event;
slot->thr = thr;
os_event_reset(event);
slot->suspend_time = ut_time();
if (thr->lock_state == QUE_THR_LOCK_ROW) {
srv_n_lock_wait_count++;
srv_n_lock_wait_current_count++;
if (ut_usectime(&sec, &ms) == -1) {
start_time = -1;
} else {
start_time = (ib_int64_t) sec * 1000000 + ms;
}
}
/* Wake the lock timeout monitor thread, if it is suspended */
os_event_set(srv_lock_timeout_thread_event);
mutex_exit(&kernel_mutex);
if (trx->declared_to_be_inside_innodb) {
was_declared_inside_innodb = TRUE;
/* We must declare this OS thread to exit InnoDB, since a
possible other thread holding a lock which this thread waits
for must be allowed to enter, sooner or later */
srv_conc_force_exit_innodb(trx);
}
had_dict_lock = trx->dict_operation_lock_mode;
switch (had_dict_lock) {
case RW_S_LATCH:
/* Release foreign key check latch */
row_mysql_unfreeze_data_dictionary(trx);
break;
case RW_X_LATCH:
/* Release fast index creation latch */
row_mysql_unlock_data_dictionary(trx);
break;
}
ut_a(trx->dict_operation_lock_mode == 0);
/* Suspend this thread and wait for the event. */
os_event_wait(event);
/* After resuming, reacquire the data dictionary latch if
necessary. */
switch (had_dict_lock) {
case RW_S_LATCH:
row_mysql_freeze_data_dictionary(trx);
break;
case RW_X_LATCH:
row_mysql_lock_data_dictionary(trx);
break;
}
if (was_declared_inside_innodb) {
/* Return back inside InnoDB */
srv_conc_force_enter_innodb(trx);
}
mutex_enter(&kernel_mutex);
/* Release the slot for others to use */
slot->in_use = FALSE;
wait_time = ut_difftime(ut_time(), slot->suspend_time);
if (thr->lock_state == QUE_THR_LOCK_ROW) {
if (ut_usectime(&sec, &ms) == -1) {
finish_time = -1;
} else {
finish_time = (ib_int64_t) sec * 1000000 + ms;
}
diff_time = (ulint) (finish_time - start_time);
srv_n_lock_wait_current_count--;
srv_n_lock_wait_time = srv_n_lock_wait_time + diff_time;
if (diff_time > srv_n_lock_max_wait_time &&
/* only update the variable if we successfully
retrieved the start and finish times. See Bug#36819. */
start_time != -1 && finish_time != -1) {
srv_n_lock_max_wait_time = diff_time;
}
}
if (trx->was_chosen_as_deadlock_victim) {
trx->error_state = DB_DEADLOCK;
trx->was_chosen_as_deadlock_victim = FALSE;
}
mutex_exit(&kernel_mutex);
/* InnoDB system transactions (such as the purge, and
incomplete transactions that are being rolled back after crash
recovery) will use the global value of
innodb_lock_wait_timeout, because trx->mysql_thd == NULL. */
lock_wait_timeout = thd_lock_wait_timeout(trx->mysql_thd);
if (lock_wait_timeout < 100000000
&& wait_time > (double) lock_wait_timeout) {
trx->error_state = DB_LOCK_WAIT_TIMEOUT;
}
if (trx_is_interrupted(trx)) {
trx->error_state = DB_INTERRUPTED;
}
}
/********************************************************************//**
Releases a MySQL OS thread waiting for a lock to be released, if the
thread is already suspended. */
UNIV_INTERN
void
srv_release_mysql_thread_if_suspended(
/*==================================*/
que_thr_t* thr) /*!< in: query thread associated with the
MySQL OS thread */
{
srv_slot_t* slot;
ulint i;
ut_ad(mutex_own(&kernel_mutex));
for (i = 0; i < OS_THREAD_MAX_N; i++) {
slot = srv_mysql_table + i;
if (slot->in_use && slot->thr == thr) {
/* Found */
os_event_set(slot->event);
return;
}
}
/* not found */
}
/******************************************************************//**
Refreshes the values used to calculate per-second averages. */
static
void
srv_refresh_innodb_monitor_stats(void)
/*==================================*/
{
mutex_enter(&srv_innodb_monitor_mutex);
srv_last_monitor_time = time(NULL);
os_aio_refresh_stats();
btr_cur_n_sea_old = btr_cur_n_sea;
btr_cur_n_non_sea_old = btr_cur_n_non_sea;
log_refresh_stats();
buf_refresh_io_stats();
srv_n_rows_inserted_old = srv_n_rows_inserted;
srv_n_rows_updated_old = srv_n_rows_updated;
srv_n_rows_deleted_old = srv_n_rows_deleted;
srv_n_rows_read_old = srv_n_rows_read;
mutex_exit(&srv_innodb_monitor_mutex);
}
/******************************************************************//**
Outputs to a file the output of the InnoDB Monitor.
@return FALSE if not all information printed
due to failure to obtain necessary mutex */
UNIV_INTERN
ibool
srv_printf_innodb_monitor(
/*======================*/
FILE* file, /*!< in: output stream */
ibool nowait, /*!< in: whether to wait for kernel mutex */
ulint* trx_start, /*!< out: file position of the start of
the list of active transactions */
ulint* trx_end) /*!< out: file position of the end of
the list of active transactions */
{
double time_elapsed;
time_t current_time;
ulint n_reserved;
ibool ret;
mutex_enter(&srv_innodb_monitor_mutex);
current_time = time(NULL);
/* We add 0.001 seconds to time_elapsed to prevent division
by zero if two users happen to call SHOW INNODB STATUS at the same
time */
time_elapsed = difftime(current_time, srv_last_monitor_time)
+ 0.001;
srv_last_monitor_time = time(NULL);
fputs("\n=====================================\n", file);
ut_print_timestamp(file);
fprintf(file,
" INNODB MONITOR OUTPUT\n"
"=====================================\n"
"Per second averages calculated from the last %lu seconds\n",
(ulong)time_elapsed);
fputs("-----------------\n"
"BACKGROUND THREAD\n"
"-----------------\n", file);
srv_print_master_thread_info(file);
fputs("----------\n"
"SEMAPHORES\n"
"----------\n", file);
sync_print(file);
/* Conceptually, srv_innodb_monitor_mutex has a very high latching
order level in sync0sync.h, while dict_foreign_err_mutex has a very
low level 135. Therefore we can reserve the latter mutex here without
a danger of a deadlock of threads. */
mutex_enter(&dict_foreign_err_mutex);
if (ftell(dict_foreign_err_file) != 0L) {
fputs("------------------------\n"
"LATEST FOREIGN KEY ERROR\n"
"------------------------\n", file);
ut_copy_file(file, dict_foreign_err_file);
}
mutex_exit(&dict_foreign_err_mutex);
/* Only if lock_print_info_summary proceeds correctly,
before we call the lock_print_info_all_transactions
to print all the lock information. */
ret = lock_print_info_summary(file, nowait);
if (ret) {
if (trx_start) {
long t = ftell(file);
if (t < 0) {
*trx_start = ULINT_UNDEFINED;
} else {
*trx_start = (ulint) t;
}
}
lock_print_info_all_transactions(file);
if (trx_end) {
long t = ftell(file);
if (t < 0) {
*trx_end = ULINT_UNDEFINED;
} else {
*trx_end = (ulint) t;
}
}
}
fputs("--------\n"
"FILE I/O\n"
"--------\n", file);
os_aio_print(file);
fputs("-------------------------------------\n"
"INSERT BUFFER AND ADAPTIVE HASH INDEX\n"
"-------------------------------------\n", file);
ibuf_print(file);
ha_print_info(file, btr_search_sys->hash_index);
fprintf(file,
"%.2f hash searches/s, %.2f non-hash searches/s\n",
(btr_cur_n_sea - btr_cur_n_sea_old)
/ time_elapsed,
(btr_cur_n_non_sea - btr_cur_n_non_sea_old)
/ time_elapsed);
btr_cur_n_sea_old = btr_cur_n_sea;
btr_cur_n_non_sea_old = btr_cur_n_non_sea;
fputs("---\n"
"LOG\n"
"---\n", file);
log_print(file);
fputs("----------------------\n"
"BUFFER POOL AND MEMORY\n"
"----------------------\n", file);
fprintf(file,
"Total memory allocated " ULINTPF
"; in additional pool allocated " ULINTPF "\n",
ut_total_allocated_memory,
mem_pool_get_reserved(mem_comm_pool));
fprintf(file, "Dictionary memory allocated " ULINTPF "\n",
dict_sys->size);
buf_print_io(file);
fputs("--------------\n"
"ROW OPERATIONS\n"
"--------------\n", file);
fprintf(file, "%ld queries inside InnoDB, %lu queries in queue\n",
(long) srv_conc_n_threads,
(ulong) srv_conc_n_waiting_threads);
fprintf(file, "%lu read views open inside InnoDB\n",
UT_LIST_GET_LEN(trx_sys->view_list));
n_reserved = fil_space_get_n_reserved_extents(0);
if (n_reserved > 0) {
fprintf(file,
"%lu tablespace extents now reserved for"
" B-tree split operations\n",
(ulong) n_reserved);
}
#ifdef UNIV_LINUX
fprintf(file, "Main thread process no. %lu, id %lu, state: %s\n",
(ulong) srv_main_thread_process_no,
(ulong) srv_main_thread_id,
srv_main_thread_op_info);
#else
fprintf(file, "Main thread id %lu, state: %s\n",
(ulong) srv_main_thread_id,
srv_main_thread_op_info);
#endif
fprintf(file,
"Number of rows inserted " ULINTPF
", updated " ULINTPF ", deleted " ULINTPF
", read " ULINTPF "\n",
srv_n_rows_inserted,
srv_n_rows_updated,
srv_n_rows_deleted,
srv_n_rows_read);
fprintf(file,
"%.2f inserts/s, %.2f updates/s,"
" %.2f deletes/s, %.2f reads/s\n",
(srv_n_rows_inserted - srv_n_rows_inserted_old)
/ time_elapsed,
(srv_n_rows_updated - srv_n_rows_updated_old)
/ time_elapsed,
(srv_n_rows_deleted - srv_n_rows_deleted_old)
/ time_elapsed,
(srv_n_rows_read - srv_n_rows_read_old)
/ time_elapsed);
srv_n_rows_inserted_old = srv_n_rows_inserted;
srv_n_rows_updated_old = srv_n_rows_updated;
srv_n_rows_deleted_old = srv_n_rows_deleted;
srv_n_rows_read_old = srv_n_rows_read;
fputs("----------------------------\n"
"END OF INNODB MONITOR OUTPUT\n"
"============================\n", file);
mutex_exit(&srv_innodb_monitor_mutex);
fflush(file);
return(ret);
}
/******************************************************************//**
Function to pass InnoDB status variables to MySQL */
UNIV_INTERN
void
srv_export_innodb_status(void)
/*==========================*/
{
mutex_enter(&srv_innodb_monitor_mutex);
export_vars.innodb_data_pending_reads
= os_n_pending_reads;
export_vars.innodb_data_pending_writes
= os_n_pending_writes;
export_vars.innodb_data_pending_fsyncs
= fil_n_pending_log_flushes
+ fil_n_pending_tablespace_flushes;
export_vars.innodb_data_fsyncs = os_n_fsyncs;
export_vars.innodb_data_read = srv_data_read;
export_vars.innodb_data_reads = os_n_file_reads;
export_vars.innodb_data_writes = os_n_file_writes;
export_vars.innodb_data_written = srv_data_written;
export_vars.innodb_buffer_pool_read_requests = buf_pool->stat.n_page_gets;
export_vars.innodb_buffer_pool_write_requests
= srv_buf_pool_write_requests;
export_vars.innodb_buffer_pool_wait_free = srv_buf_pool_wait_free;
export_vars.innodb_buffer_pool_pages_flushed = srv_buf_pool_flushed;
export_vars.innodb_buffer_pool_reads = srv_buf_pool_reads;
export_vars.innodb_buffer_pool_read_ahead_rnd
= buf_pool->stat.n_ra_pages_read_rnd;
export_vars.innodb_buffer_pool_read_ahead
= buf_pool->stat.n_ra_pages_read;
export_vars.innodb_buffer_pool_read_ahead_evicted
= buf_pool->stat.n_ra_pages_evicted;
export_vars.innodb_buffer_pool_pages_data
= UT_LIST_GET_LEN(buf_pool->LRU);
export_vars.innodb_buffer_pool_pages_dirty
= UT_LIST_GET_LEN(buf_pool->flush_list);
export_vars.innodb_buffer_pool_pages_free
= UT_LIST_GET_LEN(buf_pool->free);
#ifdef UNIV_DEBUG
export_vars.innodb_buffer_pool_pages_latched
= buf_get_latched_pages_number();
#endif /* UNIV_DEBUG */
export_vars.innodb_buffer_pool_pages_total = buf_pool->curr_size;
export_vars.innodb_buffer_pool_pages_misc = buf_pool->curr_size
- UT_LIST_GET_LEN(buf_pool->LRU)
- UT_LIST_GET_LEN(buf_pool->free);
#ifdef HAVE_ATOMIC_BUILTINS
export_vars.innodb_have_atomic_builtins = 1;
#else
export_vars.innodb_have_atomic_builtins = 0;
#endif
export_vars.innodb_page_size = UNIV_PAGE_SIZE;
export_vars.innodb_log_waits = srv_log_waits;
export_vars.innodb_os_log_written = srv_os_log_written;
export_vars.innodb_os_log_fsyncs = fil_n_log_flushes;
export_vars.innodb_os_log_pending_fsyncs = fil_n_pending_log_flushes;
export_vars.innodb_os_log_pending_writes = srv_os_log_pending_writes;
export_vars.innodb_log_write_requests = srv_log_write_requests;
export_vars.innodb_log_writes = srv_log_writes;
export_vars.innodb_dblwr_pages_written = srv_dblwr_pages_written;
export_vars.innodb_dblwr_writes = srv_dblwr_writes;
export_vars.innodb_pages_created = buf_pool->stat.n_pages_created;
export_vars.innodb_pages_read = buf_pool->stat.n_pages_read;
export_vars.innodb_pages_written = buf_pool->stat.n_pages_written;
export_vars.innodb_row_lock_waits = srv_n_lock_wait_count;
export_vars.innodb_row_lock_current_waits
= srv_n_lock_wait_current_count;
export_vars.innodb_row_lock_time = srv_n_lock_wait_time / 1000;
if (srv_n_lock_wait_count > 0) {
export_vars.innodb_row_lock_time_avg = (ulint)
(srv_n_lock_wait_time / 1000 / srv_n_lock_wait_count);
} else {
export_vars.innodb_row_lock_time_avg = 0;
}
export_vars.innodb_row_lock_time_max
= srv_n_lock_max_wait_time / 1000;
export_vars.innodb_rows_read = srv_n_rows_read;
export_vars.innodb_rows_inserted = srv_n_rows_inserted;
export_vars.innodb_rows_updated = srv_n_rows_updated;
export_vars.innodb_rows_deleted = srv_n_rows_deleted;
mutex_exit(&srv_innodb_monitor_mutex);
}
/*********************************************************************//**
A thread which prints the info output by various InnoDB monitors.
@return a dummy parameter */
UNIV_INTERN
os_thread_ret_t
srv_monitor_thread(
/*===============*/
void* arg __attribute__((unused)))
/*!< in: a dummy parameter required by
os_thread_create */
{
double time_elapsed;
time_t current_time;
time_t last_table_monitor_time;
time_t last_tablespace_monitor_time;
time_t last_monitor_time;
ulint mutex_skipped;
ibool last_srv_print_monitor;
#ifdef UNIV_DEBUG_THREAD_CREATION
fprintf(stderr, "Lock timeout thread starts, id %lu\n",
os_thread_pf(os_thread_get_curr_id()));
#endif
UT_NOT_USED(arg);
srv_last_monitor_time = time(NULL);
last_table_monitor_time = time(NULL);
last_tablespace_monitor_time = time(NULL);
last_monitor_time = time(NULL);
mutex_skipped = 0;
last_srv_print_monitor = srv_print_innodb_monitor;
loop:
srv_monitor_active = TRUE;
/* Wake up every 5 seconds to see if we need to print
monitor information. */
os_thread_sleep(5000000);
current_time = time(NULL);
time_elapsed = difftime(current_time, last_monitor_time);
if (time_elapsed > 15) {
last_monitor_time = time(NULL);
if (srv_print_innodb_monitor) {
/* Reset mutex_skipped counter everytime
srv_print_innodb_monitor changes. This is to
ensure we will not be blocked by kernel_mutex
for short duration information printing,
such as requested by sync_array_print_long_waits() */
if (!last_srv_print_monitor) {
mutex_skipped = 0;
last_srv_print_monitor = TRUE;
}
if (!srv_printf_innodb_monitor(stderr,
MUTEX_NOWAIT(mutex_skipped),
NULL, NULL)) {
mutex_skipped++;
} else {
/* Reset the counter */
mutex_skipped = 0;
}
} else {
last_srv_print_monitor = FALSE;
}
if (srv_innodb_status) {
mutex_enter(&srv_monitor_file_mutex);
rewind(srv_monitor_file);
if (!srv_printf_innodb_monitor(srv_monitor_file,
MUTEX_NOWAIT(mutex_skipped),
NULL, NULL)) {
mutex_skipped++;
} else {
mutex_skipped = 0;
}
os_file_set_eof(srv_monitor_file);
mutex_exit(&srv_monitor_file_mutex);
}
if (srv_print_innodb_tablespace_monitor
&& difftime(current_time,
last_tablespace_monitor_time) > 60) {
last_tablespace_monitor_time = time(NULL);
fputs("========================"
"========================\n",
stderr);
ut_print_timestamp(stderr);
fputs(" INNODB TABLESPACE MONITOR OUTPUT\n"
"========================"
"========================\n",
stderr);
fsp_print(0);
fputs("Validating tablespace\n", stderr);
fsp_validate(0);
fputs("Validation ok\n"
"---------------------------------------\n"
"END OF INNODB TABLESPACE MONITOR OUTPUT\n"
"=======================================\n",
stderr);
}
if (srv_print_innodb_table_monitor
&& difftime(current_time, last_table_monitor_time) > 60) {
last_table_monitor_time = time(NULL);
fputs("===========================================\n",
stderr);
ut_print_timestamp(stderr);
fputs(" INNODB TABLE MONITOR OUTPUT\n"
"===========================================\n",
stderr);
dict_print();
fputs("-----------------------------------\n"
"END OF INNODB TABLE MONITOR OUTPUT\n"
"==================================\n",
stderr);
}
}
if (srv_shutdown_state >= SRV_SHUTDOWN_CLEANUP) {
goto exit_func;
}
if (srv_print_innodb_monitor
|| srv_print_innodb_lock_monitor
|| srv_print_innodb_tablespace_monitor
|| srv_print_innodb_table_monitor) {
goto loop;
}
srv_monitor_active = FALSE;
goto loop;
exit_func:
srv_monitor_active = FALSE;
/* We count the number of threads in os_thread_exit(). A created
thread should always use that to exit and not use return() to exit. */
os_thread_exit(NULL);
OS_THREAD_DUMMY_RETURN;
}
/*********************************************************************//**
A thread which wakes up threads whose lock wait may have lasted too long.
@return a dummy parameter */
UNIV_INTERN
os_thread_ret_t
srv_lock_timeout_thread(
/*====================*/
void* arg __attribute__((unused)))
/* in: a dummy parameter required by
os_thread_create */
{
srv_slot_t* slot;
ibool some_waits;
double wait_time;
ulint i;
loop:
/* When someone is waiting for a lock, we wake up every second
and check if a timeout has passed for a lock wait */
os_thread_sleep(1000000);
srv_lock_timeout_active = TRUE;
mutex_enter(&kernel_mutex);
some_waits = FALSE;
/* Check of all slots if a thread is waiting there, and if it
has exceeded the time limit */
for (i = 0; i < OS_THREAD_MAX_N; i++) {
slot = srv_mysql_table + i;
if (slot->in_use) {
trx_t* trx;
ulong lock_wait_timeout;
some_waits = TRUE;
wait_time = ut_difftime(ut_time(), slot->suspend_time);
trx = thr_get_trx(slot->thr);
lock_wait_timeout = thd_lock_wait_timeout(
trx->mysql_thd);
if (trx_is_interrupted(trx)
|| (lock_wait_timeout < 100000000
&& (wait_time > (double) lock_wait_timeout
|| wait_time < 0))) {
/* Timeout exceeded or a wrap-around in system
time counter: cancel the lock request queued
by the transaction and release possible
other transactions waiting behind; it is
possible that the lock has already been
granted: in that case do nothing */
if (trx->wait_lock) {
lock_cancel_waiting_and_release(
trx->wait_lock);
}
}
}
}
os_event_reset(srv_lock_timeout_thread_event);
mutex_exit(&kernel_mutex);
if (srv_shutdown_state >= SRV_SHUTDOWN_CLEANUP) {
goto exit_func;
}
if (some_waits) {
goto loop;
}
srv_lock_timeout_active = FALSE;
#if 0
/* The following synchronisation is disabled, since
the InnoDB monitor output is to be updated every 15 seconds. */
os_event_wait(srv_lock_timeout_thread_event);
#endif
goto loop;
exit_func:
srv_lock_timeout_active = FALSE;
/* We count the number of threads in os_thread_exit(). A created
thread should always use that to exit and not use return() to exit. */
os_thread_exit(NULL);
OS_THREAD_DUMMY_RETURN;
}
/*********************************************************************//**
A thread which prints warnings about semaphore waits which have lasted
too long. These can be used to track bugs which cause hangs.
@return a dummy parameter */
UNIV_INTERN
os_thread_ret_t
srv_error_monitor_thread(
/*=====================*/
void* arg __attribute__((unused)))
/*!< in: a dummy parameter required by
os_thread_create */
{
/* number of successive fatal timeouts observed */
ulint fatal_cnt = 0;
ib_uint64_t old_lsn;
ib_uint64_t new_lsn;
/* longest waiting thread for a semaphore */
os_thread_id_t waiter = os_thread_get_curr_id();
os_thread_id_t old_waiter = waiter;
/* the semaphore that is being waited for */
const void* sema = NULL;
const void* old_sema = NULL;
old_lsn = srv_start_lsn;
#ifdef UNIV_DEBUG_THREAD_CREATION
fprintf(stderr, "Error monitor thread starts, id %lu\n",
os_thread_pf(os_thread_get_curr_id()));
#endif
loop:
srv_error_monitor_active = TRUE;
/* Try to track a strange bug reported by Harald Fuchs and others,
where the lsn seems to decrease at times */
new_lsn = log_get_lsn();
if (new_lsn < old_lsn) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Error: old log sequence number %llu"
" was greater\n"
"InnoDB: than the new log sequence number %llu!\n"
"InnoDB: Please submit a bug report"
" to http://bugs.mysql.com\n",
old_lsn, new_lsn);
}
old_lsn = new_lsn;
if (difftime(time(NULL), srv_last_monitor_time) > 60) {
/* We referesh InnoDB Monitor values so that averages are
printed from at most 60 last seconds */
srv_refresh_innodb_monitor_stats();
}
/* Update the statistics collected for deciding LRU
eviction policy. */
buf_LRU_stat_update();
/* Update the statistics collected for flush rate policy. */
buf_flush_stat_update();
/* In case mutex_exit is not a memory barrier, it is
theoretically possible some threads are left waiting though
the semaphore is already released. Wake up those threads: */
sync_arr_wake_threads_if_sema_free();
if (sync_array_print_long_waits(&waiter, &sema)
&& sema == old_sema && os_thread_eq(waiter, old_waiter)) {
fatal_cnt++;
if (fatal_cnt > 10) {
fprintf(stderr,
"InnoDB: Error: semaphore wait has lasted"
" > %lu seconds\n"
"InnoDB: We intentionally crash the server,"
" because it appears to be hung.\n",
(ulong) srv_fatal_semaphore_wait_threshold);
ut_error;
}
} else {
fatal_cnt = 0;
old_waiter = waiter;
old_sema = sema;
}
/* Flush stderr so that a database user gets the output
to possible MySQL error file */
fflush(stderr);
os_thread_sleep(1000000);
if (srv_shutdown_state < SRV_SHUTDOWN_CLEANUP) {
goto loop;
}
srv_error_monitor_active = FALSE;
/* We count the number of threads in os_thread_exit(). A created
thread should always use that to exit and not use return() to exit. */
os_thread_exit(NULL);
OS_THREAD_DUMMY_RETURN;
}
/*******************************************************************//**
Tells the InnoDB server that there has been activity in the database
and wakes up the master thread if it is suspended (not sleeping). Used
in the MySQL interface. Note that there is a small chance that the master
thread stays suspended (we do not protect our operation with the kernel
mutex, for performace reasons). */
UNIV_INTERN
void
srv_active_wake_master_thread(void)
/*===============================*/
{
srv_activity_count++;
if (srv_n_threads_active[SRV_MASTER] == 0) {
mutex_enter(&kernel_mutex);
srv_release_threads(SRV_MASTER, 1);
mutex_exit(&kernel_mutex);
}
}
/*******************************************************************//**
Wakes up the master thread if it is suspended or being suspended. */
UNIV_INTERN
void
srv_wake_master_thread(void)
/*========================*/
{
srv_activity_count++;
mutex_enter(&kernel_mutex);
srv_release_threads(SRV_MASTER, 1);
mutex_exit(&kernel_mutex);
}
/**********************************************************************
The master thread is tasked to ensure that flush of log file happens
once every second in the background. This is to ensure that not more
than one second of trxs are lost in case of crash when
innodb_flush_logs_at_trx_commit != 1 */
static
void
srv_sync_log_buffer_in_background(void)
/*===================================*/
{
time_t current_time = time(NULL);
srv_main_thread_op_info = "flushing log";
if (difftime(current_time, srv_last_log_flush_time) >= 1) {
log_buffer_sync_in_background(TRUE);
srv_last_log_flush_time = current_time;
srv_log_writes_and_flush++;
}
}
/*********************************************************************//**
The master thread controlling the server.
@return a dummy parameter */
UNIV_INTERN
os_thread_ret_t
srv_master_thread(
/*==============*/
void* arg __attribute__((unused)))
/*!< in: a dummy parameter required by
os_thread_create */
{
os_event_t event;
ulint old_activity_count;
ulint n_pages_purged = 0;
ulint n_bytes_merged;
ulint n_pages_flushed;
ulint n_bytes_archived;
ulint n_tables_to_drop;
ulint n_ios;
ulint n_ios_old;
ulint n_ios_very_old;
ulint n_pend_ios;
ibool skip_sleep = FALSE;
ulint i;
#ifdef UNIV_DEBUG_THREAD_CREATION
fprintf(stderr, "Master thread starts, id %lu\n",
os_thread_pf(os_thread_get_curr_id()));
#endif
srv_main_thread_process_no = os_proc_get_number();
srv_main_thread_id = os_thread_pf(os_thread_get_curr_id());
srv_table_reserve_slot(SRV_MASTER);
mutex_enter(&kernel_mutex);
srv_n_threads_active[SRV_MASTER]++;
mutex_exit(&kernel_mutex);
loop:
/*****************************************************************/
/* ---- When there is database activity by users, we cycle in this
loop */
srv_main_thread_op_info = "reserving kernel mutex";
n_ios_very_old = log_sys->n_log_ios + buf_pool->stat.n_pages_read
+ buf_pool->stat.n_pages_written;
mutex_enter(&kernel_mutex);
/* Store the user activity counter at the start of this loop */
old_activity_count = srv_activity_count;
mutex_exit(&kernel_mutex);
if (srv_force_recovery >= SRV_FORCE_NO_BACKGROUND) {
goto suspend_thread;
}
/* ---- We run the following loop approximately once per second
when there is database activity */
srv_last_log_flush_time = time(NULL);
skip_sleep = FALSE;
for (i = 0; i < 10; i++) {
n_ios_old = log_sys->n_log_ios + buf_pool->stat.n_pages_read
+ buf_pool->stat.n_pages_written;
srv_main_thread_op_info = "sleeping";
srv_main_1_second_loops++;
if (!skip_sleep) {
os_thread_sleep(1000000);
srv_main_sleeps++;
}
skip_sleep = FALSE;
/* ALTER TABLE in MySQL requires on Unix that the table handler
can drop tables lazily after there no longer are SELECT
queries to them. */
srv_main_thread_op_info = "doing background drop tables";
row_drop_tables_for_mysql_in_background();
srv_main_thread_op_info = "";
if (srv_fast_shutdown && srv_shutdown_state > 0) {
goto background_loop;
}
/* Flush logs if needed */
srv_sync_log_buffer_in_background();
srv_main_thread_op_info = "making checkpoint";
log_free_check();
/* If i/os during one second sleep were less than 5% of
capacity, we assume that there is free disk i/o capacity
available, and it makes sense to do an insert buffer merge. */
n_pend_ios = buf_get_n_pending_ios()
+ log_sys->n_pending_writes;
n_ios = log_sys->n_log_ios + buf_pool->stat.n_pages_read
+ buf_pool->stat.n_pages_written;
if (n_pend_ios < SRV_PEND_IO_THRESHOLD
&& (n_ios - n_ios_old < SRV_RECENT_IO_ACTIVITY)) {
srv_main_thread_op_info = "doing insert buffer merge";
ibuf_contract_for_n_pages(FALSE, PCT_IO(5));
/* Flush logs if needed */
srv_sync_log_buffer_in_background();
}
if (UNIV_UNLIKELY(buf_get_modified_ratio_pct()
> srv_max_buf_pool_modified_pct)) {
/* Try to keep the number of modified pages in the
buffer pool under the limit wished by the user */
srv_main_thread_op_info =
"flushing buffer pool pages";
n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST,
PCT_IO(100),
IB_ULONGLONG_MAX);
/* If we had to do the flush, it may have taken
even more than 1 second, and also, there may be more
to flush. Do not sleep 1 second during the next
iteration of this loop. */
skip_sleep = TRUE;
} else if (srv_adaptive_flushing) {
/* Try to keep the rate of flushing of dirty
pages such that redo log generation does not
produce bursts of IO at checkpoint time. */
ulint n_flush = buf_flush_get_desired_flush_rate();
if (n_flush) {
srv_main_thread_op_info =
"flushing buffer pool pages";
n_flush = ut_min(PCT_IO(100), n_flush);
n_pages_flushed =
buf_flush_batch(
BUF_FLUSH_LIST,
n_flush,
IB_ULONGLONG_MAX);
if (n_flush == PCT_IO(100)) {
skip_sleep = TRUE;
}
}
}
if (srv_activity_count == old_activity_count) {
/* There is no user activity at the moment, go to
the background loop */
goto background_loop;
}
}
/* ---- We perform the following code approximately once per
10 seconds when there is database activity */
#ifdef MEM_PERIODIC_CHECK
/* Check magic numbers of every allocated mem block once in 10
seconds */
mem_validate_all_blocks();
#endif
/* If i/os during the 10 second period were less than 200% of
capacity, we assume that there is free disk i/o capacity
available, and it makes sense to flush srv_io_capacity pages.
Note that this is done regardless of the fraction of dirty
pages relative to the max requested by the user. The one second
loop above requests writes for that case. The writes done here
are not required, and may be disabled. */
n_pend_ios = buf_get_n_pending_ios() + log_sys->n_pending_writes;
n_ios = log_sys->n_log_ios + buf_pool->stat.n_pages_read
+ buf_pool->stat.n_pages_written;
srv_main_10_second_loops++;
if (n_pend_ios < SRV_PEND_IO_THRESHOLD
&& (n_ios - n_ios_very_old < SRV_PAST_IO_ACTIVITY)) {
srv_main_thread_op_info = "flushing buffer pool pages";
buf_flush_batch(BUF_FLUSH_LIST, PCT_IO(100),
IB_ULONGLONG_MAX);
/* Flush logs if needed */
srv_sync_log_buffer_in_background();
}
/* We run a batch of insert buffer merge every 10 seconds,
even if the server were active */
srv_main_thread_op_info = "doing insert buffer merge";
ibuf_contract_for_n_pages(FALSE, PCT_IO(5));
/* Flush logs if needed */
srv_sync_log_buffer_in_background();
/* We run a full purge every 10 seconds, even if the server
were active */
do {
if (srv_fast_shutdown && srv_shutdown_state > 0) {
goto background_loop;
}
srv_main_thread_op_info = "purging";
n_pages_purged = trx_purge();
/* Flush logs if needed */
srv_sync_log_buffer_in_background();
} while (n_pages_purged);
srv_main_thread_op_info = "flushing buffer pool pages";
/* Flush a few oldest pages to make a new checkpoint younger */
if (buf_get_modified_ratio_pct() > 70) {
/* If there are lots of modified pages in the buffer pool
(> 70 %), we assume we can afford reserving the disk(s) for
the time it requires to flush 100 pages */
n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST,
PCT_IO(100),
IB_ULONGLONG_MAX);
} else {
/* Otherwise, we only flush a small number of pages so that
we do not unnecessarily use much disk i/o capacity from
other work */
n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST,
PCT_IO(10),
IB_ULONGLONG_MAX);
}
srv_main_thread_op_info = "making checkpoint";
/* Make a new checkpoint about once in 10 seconds */
log_checkpoint(TRUE, FALSE);
srv_main_thread_op_info = "reserving kernel mutex";
mutex_enter(&kernel_mutex);
/* ---- When there is database activity, we jump from here back to
the start of loop */
if (srv_activity_count != old_activity_count) {
mutex_exit(&kernel_mutex);
goto loop;
}
mutex_exit(&kernel_mutex);
/* If the database is quiet, we enter the background loop */
/*****************************************************************/
background_loop:
/* ---- In this loop we run background operations when the server
is quiet from user activity. Also in the case of a shutdown, we
loop here, flushing the buffer pool to the data files. */
/* The server has been quiet for a while: start running background
operations */
srv_main_background_loops++;
srv_main_thread_op_info = "doing background drop tables";
n_tables_to_drop = row_drop_tables_for_mysql_in_background();
if (n_tables_to_drop > 0) {
/* Do not monopolize the CPU even if there are tables waiting
in the background drop queue. (It is essentially a bug if
MySQL tries to drop a table while there are still open handles
to it and we had to put it to the background drop queue.) */
os_thread_sleep(100000);
}
srv_main_thread_op_info = "purging";
/* Run a full purge */
do {
if (srv_fast_shutdown && srv_shutdown_state > 0) {
break;
}
srv_main_thread_op_info = "purging";
n_pages_purged = trx_purge();
/* Flush logs if needed */
srv_sync_log_buffer_in_background();
} while (n_pages_purged);
srv_main_thread_op_info = "reserving kernel mutex";
mutex_enter(&kernel_mutex);
if (srv_activity_count != old_activity_count) {
mutex_exit(&kernel_mutex);
goto loop;
}
mutex_exit(&kernel_mutex);
srv_main_thread_op_info = "doing insert buffer merge";
if (srv_fast_shutdown && srv_shutdown_state > 0) {
n_bytes_merged = 0;
} else {
/* This should do an amount of IO similar to the number of
dirty pages that will be flushed in the call to
buf_flush_batch below. Otherwise, the system favors
clean pages over cleanup throughput. */
n_bytes_merged = ibuf_contract_for_n_pages(FALSE,
PCT_IO(100));
}
srv_main_thread_op_info = "reserving kernel mutex";
mutex_enter(&kernel_mutex);
if (srv_activity_count != old_activity_count) {
mutex_exit(&kernel_mutex);
goto loop;
}
mutex_exit(&kernel_mutex);
flush_loop:
srv_main_thread_op_info = "flushing buffer pool pages";
srv_main_flush_loops++;
if (srv_fast_shutdown < 2) {
n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST,
PCT_IO(100),
IB_ULONGLONG_MAX);
} else {
/* In the fastest shutdown we do not flush the buffer pool
to data files: we set n_pages_flushed to 0 artificially. */
n_pages_flushed = 0;
}
srv_main_thread_op_info = "reserving kernel mutex";
mutex_enter(&kernel_mutex);
if (srv_activity_count != old_activity_count) {
mutex_exit(&kernel_mutex);
goto loop;
}
mutex_exit(&kernel_mutex);
srv_main_thread_op_info = "waiting for buffer pool flush to end";
buf_flush_wait_batch_end(BUF_FLUSH_LIST);
/* Flush logs if needed */
srv_sync_log_buffer_in_background();
srv_main_thread_op_info = "making checkpoint";
log_checkpoint(TRUE, FALSE);
if (buf_get_modified_ratio_pct() > srv_max_buf_pool_modified_pct) {
/* Try to keep the number of modified pages in the
buffer pool under the limit wished by the user */
goto flush_loop;
}
srv_main_thread_op_info = "reserving kernel mutex";
mutex_enter(&kernel_mutex);
if (srv_activity_count != old_activity_count) {
mutex_exit(&kernel_mutex);
goto loop;
}
mutex_exit(&kernel_mutex);
/*
srv_main_thread_op_info = "archiving log (if log archive is on)";
log_archive_do(FALSE, &n_bytes_archived);
*/
n_bytes_archived = 0;
/* Keep looping in the background loop if still work to do */
if (srv_fast_shutdown && srv_shutdown_state > 0) {
if (n_tables_to_drop + n_pages_flushed
+ n_bytes_archived != 0) {
/* If we are doing a fast shutdown (= the default)
we do not do purge or insert buffer merge. But we
flush the buffer pool completely to disk.
In a 'very fast' shutdown we do not flush the buffer
pool to data files: we have set n_pages_flushed to
0 artificially. */
goto background_loop;
}
} else if (n_tables_to_drop
+ n_pages_purged + n_bytes_merged + n_pages_flushed
+ n_bytes_archived != 0) {
/* In a 'slow' shutdown we run purge and the insert buffer
merge to completion */
goto background_loop;
}
/* There is no work for background operations either: suspend
master thread to wait for more server activity */
suspend_thread:
srv_main_thread_op_info = "suspending";
mutex_enter(&kernel_mutex);
if (row_get_background_drop_list_len_low() > 0) {
mutex_exit(&kernel_mutex);
goto loop;
}
event = srv_suspend_thread();
mutex_exit(&kernel_mutex);
/* DO NOT CHANGE THIS STRING. innobase_start_or_create_for_mysql()
waits for database activity to die down when converting < 4.1.x
databases, and relies on this string being exactly as it is. InnoDB
manual also mentions this string in several places. */
srv_main_thread_op_info = "waiting for server activity";
os_event_wait(event);
if (srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS) {
/* This is only extra safety, the thread should exit
already when the event wait ends */
os_thread_exit(NULL);
}
/* When there is user activity, InnoDB will set the event and the
main thread goes back to loop. */
goto loop;
OS_THREAD_DUMMY_RETURN; /* Not reached, avoid compiler warning */
}
| 29.437609 | 79 | 0.71202 | [
"object",
"transform"
] |
8adb6198357057971382a49ae98fcb55ec717d7e | 8,769 | h | C | rgbdscan/include/include.old/framebufferObject.h | ttykkala/recon | fe73b0f1023ff177e6870ed1426604e380e078b7 | [
"Apache-2.0"
] | 1 | 2017-06-20T13:52:14.000Z | 2017-06-20T13:52:14.000Z | src/tools/kinect_rgbd_to_stereo/stereogen/include/framebufferObject.h | kamarain/rgbd_dense_slam-code | 0ace518f9e9640ac2d61df868e4a6a73cdd89cf8 | [
"MIT"
] | 2 | 2017-06-21T08:49:34.000Z | 2017-06-24T15:39:53.000Z | src/tools/kinect_rgbd_to_stereo/stereogen/include/framebufferObject.h | kamarain/rgbd_dense_slam-code | 0ace518f9e9640ac2d61df868e4a6a73cdd89cf8 | [
"MIT"
] | null | null | null | /*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
Copyright (c) 2005,
Aaron Lefohn (lefohn@cs.ucdavis.edu)
Robert Strzodka (strzodka@stanford.edu)
Adam Moerschell (atmoerschell@ucdavis.edu)
All rights reserved.
This software is licensed under the BSD open-source license. See
http://www.opensource.org/licenses/bsd-license.php for more detail.
*************************************************************
Redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following
conditions are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
Neither the name of the University of Californa, Davis nor the names of
the contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
*/
#ifndef UCDAVIS_FRAMEBUFFER_OBJECT_H
#define UCDAVIS_FRAMEBUFFER_OBJECT_H
#include <GL/glew.h>
#include <iostream>
/*!
FramebufferObject Class. This class encapsulates the FramebufferObject
(FBO) OpenGL spec. See the official spec at:
http://oss.sgi.com/projects/ogl-sample/registry/EXT/framebuffer_object.txt
for details.
A framebuffer object (FBO) is conceptually a structure containing pointers
to GPU memory. The memory pointed to is either an OpenGL texture or an
OpenGL RenderBuffer. FBOs can be used to render to one or more textures,
share depth buffers between multiple sets of color buffers/textures and
are a complete replacement for pbuffers.
Performance Notes:
1) It is more efficient (but not required) to call Bind()
on an FBO before making multiple method calls. For example:
FramebufferObject fbo;
fbo.Bind();
fbo.AttachTexture(GL_TEXTURE_2D, texId0, GL_COLOR_ATTACHMENT0_EXT);
fbo.AttachTexture(GL_TEXTURE_2D, texId1, GL_COLOR_ATTACHMENT1_EXT);
fbo.IsValid();
To provide a complete encapsulation, the following usage
pattern works correctly but is less efficient:
FramebufferObject fbo;
// NOTE : No Bind() call
fbo.AttachTexture(GL_TEXTURE_2D, texId0, GL_COLOR_ATTACHMENT0_EXT);
fbo.AttachTexture(GL_TEXTURE_2D, texId1, GL_COLOR_ATTACHMENT1_EXT);
fbo.IsValid();
The first usage pattern binds the FBO only once, whereas
the second usage binds/unbinds the FBO for each method call.
2) Use FramebufferObject::Disable() sparingly. We have intentionally
left out an "Unbind()" method because it is largely unnecessary
and encourages rendundant Bind/Unbind coding. Binding an FBO is
usually much faster than enabling/disabling a pbuffer, but is
still a costly operation. When switching between multiple FBOs
and a visible OpenGL framebuffer, the following usage pattern
is recommended:
FramebufferObject fbo1, fbo2;
fbo1.Bind();
... Render ...
// NOTE : No Unbind/Disable here...
fbo2.Bind();
... Render ...
// Disable FBO rendering and return to visible window
// OpenGL framebuffer.
FramebufferObject::Disable();
*/
class FramebufferObject
{
public:
/// Ctor/Dtor
FramebufferObject();
virtual ~FramebufferObject();
/// Bind this FBO as current render target
void Bind();
/// Bind a texture to the "attachment" point of this FBO
virtual void AttachTexture(GLenum texTarget,
GLuint texId,
GLenum attachment = GL_COLOR_ATTACHMENT0_EXT,
int mipLevel = 0,
int zSlice = 0);
/// Bind an array of textures to multiple "attachment" points of this FBO
/// - By default, the first 'numTextures' attachments are used,
/// starting with GL_COLOR_ATTACHMENT0_EXT
virtual void AttachTextures(int numTextures,
GLenum texTarget[],
GLuint texId[],
GLenum attachment[] = NULL,
int mipLevel[] = NULL,
int zSlice[] = NULL);
/// Bind a render buffer to the "attachment" point of this FBO
virtual void AttachRenderBuffer(GLuint buffId,
GLenum attachment = GL_COLOR_ATTACHMENT0_EXT);
/// Bind an array of render buffers to corresponding "attachment" points
/// of this FBO.
/// - By default, the first 'numBuffers' attachments are used,
/// starting with GL_COLOR_ATTACHMENT0_EXT
virtual void AttachRenderBuffers(int numBuffers, GLuint buffId[],
GLenum attachment[] = NULL);
/// Free any resource bound to the "attachment" point of this FBO
void Unattach(GLenum attachment);
/// Free any resources bound to any attachment points of this FBO
void UnattachAll();
/// Is this FBO currently a valid render target?
/// - Sends output to std::cerr by default but can
/// be a user-defined C++ stream
///
/// NOTE : This function works correctly in debug build
/// mode but always returns "true" if NDEBUG is
/// is defined (optimized builds)
#ifndef NDEBUG
bool IsValid(std::ostream &ostr = std::cerr);
#else
bool IsValid(std::ostream &ostr = std::cerr)
{
return true;
}
#endif
/// BEGIN : Accessors
/// Is attached type GL_RENDERBUFFER_EXT or GL_TEXTURE?
GLenum GetAttachedType(GLenum attachment);
/// What is the Id of Renderbuffer/texture currently
/// attached to "attachement?"
GLuint GetAttachedId(GLenum attachment);
/// Which mipmap level is currently attached to "attachement?"
GLint GetAttachedMipLevel(GLenum attachment);
/// Which cube face is currently attached to "attachment?"
GLint GetAttachedCubeFace(GLenum attachment);
/// Which z-slice is currently attached to "attachment?"
GLint GetAttachedZSlice(GLenum attachment);
/// END : Accessors
/// BEGIN : Static methods global to all FBOs
/// Return number of color attachments permitted
static int GetMaxColorAttachments();
/// Disable all FBO rendering and return to traditional,
/// windowing-system controlled framebuffer
/// NOTE:
/// This is NOT an "unbind" for this specific FBO, but rather
/// disables all FBO rendering. This call is intentionally "static"
/// and named "Disable" instead of "Unbind" for this reason. The
/// motivation for this strange semantic is performance. Providing
/// "Unbind" would likely lead to a large number of unnecessary
/// FBO enablings/disabling.
static void Disable();
/// END : Static methods global to all FBOs
protected:
void _GuardedBind();
void _GuardedUnbind();
void _FramebufferTextureND(GLenum attachment, GLenum texTarget,
GLuint texId, int mipLevel, int zSlice);
static GLuint _GenerateFboId();
private:
GLuint m_fboId;
GLint m_savedFboId;
};
#endif
| 39.32287 | 86 | 0.661535 | [
"render",
"object"
] |
8aded71ffcbae0f2e4b69fb6c0c67c74ac795be5 | 500 | h | C | semantic/semantic.h | minghust/WYJ-Compiler | 80adb9a6c7c4547b76ddc4498f6bcde8476bf727 | [
"MIT"
] | null | null | null | semantic/semantic.h | minghust/WYJ-Compiler | 80adb9a6c7c4547b76ddc4498f6bcde8476bf727 | [
"MIT"
] | null | null | null | semantic/semantic.h | minghust/WYJ-Compiler | 80adb9a6c7c4547b76ddc4498f6bcde8476bf727 | [
"MIT"
] | null | null | null | //
// Created by MingZhang on 2018-05-31
//
#ifndef SEMANTIC_H_
#define SEMANTIC_H_
#include <sstream>
#include "symbolTable.h"
extern vector<ParameterTable> parameter_table;
extern vector<FunctionTable> function_table;
extern vector<VariableTable> variable_table;
bool checkConstant(string leftname, string rightname);
bool checkLvalue(string leftname, string rightname);
bool checkCall(string leftname, string rightname);
void OutputSemanticError();
extern vector<string>semantic_error;
#endif | 23.809524 | 54 | 0.812 | [
"vector"
] |
8adef07cfe5f0bb1b4ee1a7b6f6e6936022f9cba | 4,650 | h | C | ios/Frameworks/QCloudCOSXML.framework/Versions/Current/Headers/QCloudGetObjectRequest.h | david-woody/flutter_vod | 8005e5e2900631bc438a043ec467e04f969144c7 | [
"Apache-2.0"
] | 6 | 2020-09-26T15:15:09.000Z | 2022-01-25T07:46:20.000Z | ios/Frameworks/QCloudCOSXML.framework/Versions/Current/Headers/QCloudGetObjectRequest.h | david-woody/flutter_vod | 8005e5e2900631bc438a043ec467e04f969144c7 | [
"Apache-2.0"
] | 4 | 2020-10-16T08:37:16.000Z | 2022-01-26T07:06:34.000Z | ios/Frameworks/QCloudCOSXML.framework/Versions/Current/Headers/QCloudGetObjectRequest.h | david-woody/flutter_vod | 8005e5e2900631bc438a043ec467e04f969144c7 | [
"Apache-2.0"
] | 3 | 2021-01-27T04:25:33.000Z | 2021-10-08T02:39:19.000Z | //
// GetObject.h
// GetObject
//
// Created by tencent
// Copyright (c) 2015年 tencent. All rights reserved.
//
// ██████╗ ██████╗██╗ ██████╗ ██╗ ██╗██████╗ ████████╗███████╗██████╗ ███╗ ███╗██╗███╗ ██╗ █████╗ ██╗ ██╗ █████╗ ██████╗
// ██╔═══██╗██╔════╝██║ ██╔═══██╗██║ ██║██╔══██╗ ╚══██╔══╝██╔════╝██╔══██╗████╗ ████║██║████╗ ██║██╔══██╗██║ ██║ ██╔══██╗██╔══██╗
// ██║ ██║██║ ██║ ██║ ██║██║ ██║██║ ██║ ██║ █████╗ ██████╔╝██╔████╔██║██║██╔██╗ ██║███████║██║ ██║ ███████║██████╔╝
// ██║▄▄ ██║██║ ██║ ██║ ██║██║ ██║██║ ██║ ██║ ██╔══╝ ██╔══██╗██║╚██╔╝██║██║██║╚██╗██║██╔══██║██║ ██║ ██╔══██║██╔══██╗
// ╚██████╔╝╚██████╗███████╗╚██████╔╝╚██████╔╝██████╔╝ ██║ ███████╗██║ ██║██║ ╚═╝ ██║██║██║ ╚████║██║ ██║███████╗ ███████╗██║ ██║██████╔╝
// ╚══▀▀═╝ ╚═════╝╚══════╝ ╚═════╝ ╚═════╝ ╚═════╝ ╚═╝ ╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝╚═╝ ╚═══╝╚═╝ ╚═╝╚══════╝ ╚══════╝╚═╝ ╚═╝╚═════╝
//
//
// _ __ _ _
// (_) / _| | | | |
// ___ ___ _ ____ ___ ___ ___ | |_ ___ _ __ __| | _____ _____| | ___ _ __ ___ _ __ ___
// / __|/ _ \ '__\ \ / / |/ __/ _ \ | _/ _ \| '__| / _` |/ _ \ \ / / _ \ |/ _ \| '_ \ / _ \ '__/ __|
// \__ \ __/ | \ V /| | (_| __/ | || (_) | | | (_| | __/\ V / __/ | (_) | |_) | __/ | \__
// |___/\___|_| \_/ |_|\___\___| |_| \___/|_| \__,_|\___| \_/ \___|_|\___/| .__/ \___|_| |___/
// ______ ______ ______ ______ ______ ______ ______ ______ | |
// |______|______|______|______|______|______|______|______| |_|
//
#import <Foundation/Foundation.h>
#import <QCloudCore/QCloudCore.h>
NS_ASSUME_NONNULL_BEGIN
/**
下载 COS 对象的方法.
可以直接发起 GET 请求获取 COS 中完整的对象数据, 或者在 GET 请求 中传入 Range 请求头部获取对象的部分内容. 获取COS 对象的同时,对象的元数据将会作为 HTTP 响应头部随对象内容一同返回,COS 支持GET 请求时 使用 URL 参数的方式覆盖响应的部分元数据值,例如覆盖 Content-iDisposition 的响应值.
关于获取 COS 对象的具体描述,请查看 https://cloud.tencent.com/document/product/436/14115.
关于获取 COS 对象的接口描述,请查看 https://cloud.tencent.com/document/product/436/7753.
cos iOS SDK 中获取 COS 对象请求的方法具体步骤如下:
1. 实例化 QCloudGetObjectRequest,填入需要的参数。
2. 调用 QCloudCOSXMLService 对象中的 GetObject 方法发出请求。
3. 从回调的 finishBlock 中的 outputObject 获取具体内容。
示例:
@code
QCloudGetObjectRequest* request = [[QCloudGetObjectRequest alloc] init];
request.bucket = @"bucketName"; //存储桶名称(cos v5 的 bucket格式为:xxx-appid, 如 test-1253960454)
request.object = @"objectName";;
[request setFinishBlock:^(id outputObject, NSError *error) {
//additional actions after finishing
}];
[[QCloudCOSXMLService defaultCOSXML] GetObject:request];
@endcode
*/
@interface QCloudGetObjectRequest : QCloudBizHTTPRequest
/**
设置响应头部中的 Content-Type参数
*/
@property (strong, nonatomic) NSString *responseContentType;
/**
设置响应头部中的Content-Language参数
*/
@property (strong, nonatomic) NSString *responseContentLanguage;
/**
设置响应头部中的Content-Expires参数
*/
@property (strong, nonatomic) NSString *responseContentExpires;
/**
设置响应头部中的Cache-Control参数
*/
@property (strong, nonatomic) NSString *responseCacheControl;
/**
设置响应头部中的 Content-Disposition 参数。
*/
@property (strong, nonatomic) NSString *responseContentDisposition;
/**
设置响应头部中的 Content-Encoding 参数。
*/
@property (strong, nonatomic) NSString *responseContentEncoding;
/**
RFC 2616 中定义的指定文件下载范围,以字节(bytes)为单位
*/
@property (strong, nonatomic) NSString *range;
/**
如果文件修改时间晚于指定时间,才返回文件内容。否则返回 412 (not modified)
*/
@property (strong, nonatomic) NSString *ifModifiedSince;
/**
如果文件修改时间早于或等于指定时间,才返回文件内容。否则返回 412 (precondition failed)
*/
@property (strong, nonatomic) NSString *ifUnmodifiedModifiedSince;
/**
当 ETag 与指定的内容一致,才返回文件。否则返回 412 (precondition failed)
*/
@property (strong, nonatomic) NSString *ifMatch;
/**
当 ETag 与指定的内容不一致,才返回文件。否则返回 304 (not modified)
*/
@property (strong, nonatomic) NSString *ifNoneMatch;
/**
指定 Object 的 VersionID (在开启多版本的情况下)
*/
@property (strong, nonatomic) NSString *versionID;
/**
对象名
*/
@property (strong, nonatomic) NSString *object;
/**
存储桶名
*/
@property (strong, nonatomic) NSString *bucket;
@end
NS_ASSUME_NONNULL_END
| 38.429752 | 177 | 0.473763 | [
"object"
] |
8adf66628f35d045c49830d171ffc08d30e7ad42 | 7,660 | h | C | lib/sw_services/xilskey/examples/xilskey_puf_registration.h | wincle626/Xilinx_Embedded_Driver_Example | 45c989dc5b56a0449d5102fdefbabca626860e19 | [
"BSD-3-Clause"
] | 2 | 2019-03-12T17:57:32.000Z | 2020-08-15T07:28:08.000Z | lib/sw_services/xilskey/examples/xilskey_puf_registration.h | wincle626/Xilinx_Embedded_Driver_Example | 45c989dc5b56a0449d5102fdefbabca626860e19 | [
"BSD-3-Clause"
] | null | null | null | lib/sw_services/xilskey/examples/xilskey_puf_registration.h | wincle626/Xilinx_Embedded_Driver_Example | 45c989dc5b56a0449d5102fdefbabca626860e19 | [
"BSD-3-Clause"
] | 1 | 2021-08-10T01:50:25.000Z | 2021-08-10T01:50:25.000Z | /******************************************************************************
*
* Copyright (C) 2016 - 17 Xilinx, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* Use of the Software is limited solely to applications:
* (a) running on a Xilinx device, or
* (b) that interact with a Xilinx device through a bus or interconnect.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* XILINX BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Except as contained in this notice, the name of the Xilinx shall not be used
* in advertising or otherwise to promote the sale, use or other dealings in
* this Software without prior written authorization from Xilinx.
*
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xilskey_puf_registration.h
*
* This file contains header interface related information for PUF device
* and macros used in the driver
*
* @note
*
* User configurable parameters for PUF
*------------------------------------------------------------------------------
*
* #define XSK_PUF_INFO_ON_UART FALSE
* TRUE will display syndrome data on UART com port
* FALSE will display any data on UART com port.
*
* #define XSK_PUF_PROGRAM_EFUSE FALSE
* TRUE will programs the generated syndrome data, CHash and Auxilary values,
* Black key.
* FALSE will not program data into eFUSE.
*
* #define XSK_PUF_IF_CONTRACT_MANUFATURER FALSE
* This should be enabled when application is hand over to contract
* manufacturer.
* TRUE will allow only authenticated application.
* FALSE authentication is not mandatory.
*
* #define XSK_PUF_REG_MODE XSK_PUF_MODE4K
* PUF registration is performed in 4K mode. For only understanding it is
* provided in this file, but user is not supposed to modify this.
*
* #define XSK_PUF_READ_SECUREBITS FALSE
* TRUE will read status of the puf secure bits from eFUSE and will be
* displayed on UART.
* FALSE will not read secure bits.
*
* #define XSK_PUF_PROGRAM_SECUREBITS FALSE
* TRUE will program PUF secure bits based on the user input provided
* at XSK_PUF_SYN_INVALID, XSK_PUF_SYN_WRLK and XSK_PUF_REGISTER_DISABLE
* FALSE will not program any PUF secure bits.
*
* #define XSK_PUF_SYN_INVALID FALSE
* TRUE will permanently invalidates the already programmed syndrome data.
* FALSE will not modify anything
*
* #define XSK_PUF_SYN_WRLK FALSE
* TRUE will permanently disables programming syndrome data into eFUSE.
* FALSE will not modify anything.
*
* #define XSK_PUF_REGISTER_DISABLE FALSE
* TRUE permanently does not allows PUF syndrome data registration.
* FALSE will not modify anything.
*
* #define XSK_PUF_RESERVED FALSE
* TRUE programs this reserved eFUSE bit.
* FALSE will not modify anything.
*
* #define XSK_PUF_AES_KEY
* "0000000000000000000000000000000000000000000000000000000000000000"
* The value mentioned in this will be converted to hex buffer and encrypts
* this with PUF helper data and generates a black key and written
* into the ZynqMP PS eFUSE array when XSK_PUF_PROGRAM_EFUSE macro is TRUE
* This value should be given in string format. It should be 64 characters
* long, valid characters are 0-9,a-f,A-F. Any other character is
* considered as invalid string and will not burn AES Key.
* Note: Provided here should be red key and application calculates the
* black key and programs into eFUSE if XSK_PUF_PROGRAM_EFUSE macro is
* TRUE.
* To avoid programming eFUSE results can be displayed on UART com port
* by making XSK_PUF_INFO_ON_UART to TRUE.
*
* #define XSK_PUF_IV "000000000000000000000000"
* The value mentioned here will be converted to hex buffer.
* This is Initialization vector(IV) which is used to generated black key
* with provided AES key and generated PUF key.
* This value should be given in string format. It should be 24 characters
* long, valid characters are 0-9,a-f,A-F. Any other character is
* considered as invalid string.
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- -------------------------------------------------------
* 6.1 rp 17/10/16 First release.
* 6.2 vns 03/10/17 Added support for programming and reading one reserved
* bit
* </pre>
*
*
******************************************************************************/
#ifndef XILSKEY_PUF_REGISTRATION_H_
#define XILSKEY_PUF_REGISTRATION_H_
#ifdef __cplusplus
extern "C" {
#endif
/************************** Include Files ************************************/
#include "xsecure_aes.h"
#include "xilskey_eps_zynqmp_puf.h"
/************************** Constant Definitions ****************************/
#ifdef XSK_ZYNQ_ULTRA_MP_PLATFORM
#define XSK_CSUDMA_DEVICE_ID XPAR_XCSUDMA_0_DEVICE_ID
#endif
#define XSK_PUF_MODE4K (0U)
/**************************** Type Definitions *******************************/
/***************** Macros (Inline Functions) Definitions *********************/
/* Following parameters should be configured by user */
#define XSK_PUF_INFO_ON_UART FALSE
#define XSK_PUF_PROGRAM_EFUSE FALSE
#define XSK_PUF_IF_CONTRACT_MANUFATURER FALSE
/* For programming/reading secure bits of PUF */
#define XSK_PUF_READ_SECUREBITS FALSE
#define XSK_PUF_PROGRAM_SECUREBITS FALSE
#if (XSK_PUF_PROGRAM_SECUREBITS == TRUE)
#define XSK_PUF_SYN_INVALID FALSE
#define XSK_PUF_SYN_WRLK FALSE
#define XSK_PUF_REGISTER_DISABLE FALSE
#define XSK_PUF_RESERVED FALSE
#endif
#define XSK_PUF_AES_KEY "0000000000000000000000000000000000000000000000000000000000000000"
#define XSK_PUF_IV "000000000000000000000000"
#define XSK_PUF_REG_MODE XSK_PUF_MODE4K
/**< Registration Mode
* XPUF_MODE4K */
/***************************End of configurable parameters********************/
#if (XSK_PUF_INFO_ON_UART == TRUE)
#define XPUF_INFO_ON_UART /**< If defined, sends information
* on UART */
#define XPUF_DEBUG_GENERAL 1
#else
#define XPUF_DEBUG_GENERAL 0
#endif
#if (XSK_PUF_PROGRAM_EFUSE == TRUE)
#define XPUF_FUSE_SYN_DATA /**< If defined, writes syndrome data,
* black key, Aux and Chash
* values into eFUSE */
#endif
#if (XSK_PUF_IF_CONTRACT_MANUFATURER == TRUE)
#define XPUF_CONTRACT_MANUFACTURER /**< If defined, additional checks
* will be made to verify that app
* is authenticated before running*/
#endif
/************************** Type Definitions **********************************/
/* All the instances used for this application */
XilSKey_Puf PufInstance;
XSecure_Aes AesInstance;
XCsuDma CsuDma;
XilSKey_ZynqMpEPs EfuseInstance;
/************************** Function Prototypes ******************************/
#ifdef __cplusplus
}
#endif
#endif /* XILSKEY_PUF_REGISTRATION_H_ */
| 36.47619 | 91 | 0.684204 | [
"vector"
] |
8ae21e026ec88bebae189b96fa0d01ea795805a3 | 5,931 | h | C | ZNews/NewsBasicInfo.h | zhaowave/ZNews | dd8cb532aa7555812b62b21974ec79b7ac67ca90 | [
"Apache-2.0"
] | 4 | 2017-08-29T08:48:07.000Z | 2021-04-10T19:00:18.000Z | ZNews/NewsBasicInfo.h | zhaowave/ZNews | dd8cb532aa7555812b62b21974ec79b7ac67ca90 | [
"Apache-2.0"
] | null | null | null | ZNews/NewsBasicInfo.h | zhaowave/ZNews | dd8cb532aa7555812b62b21974ec79b7ac67ca90 | [
"Apache-2.0"
] | null | null | null | //
// NewsBasicInfo.h
// ZNews
//
// Created by zhawei on 2017/7/7.
// Copyright © 2017年 wei zhao. All rights reserved.
//
#import <Foundation/Foundation.h>
#import <WCDB/WCDB.h>
#import "ZDatabase.h"
typedef void(^GetNewsList)(NSMutableArray *newsArray, NSError *error);
@interface NewsBasicInfo : NSObject <WCTTableCoding>
@property (strong ,nonatomic) NSString *Id;
@property (strong ,nonatomic) NSString *uinnick;
@property (strong ,nonatomic) NSString *title;
@property (strong ,nonatomic) NSString *longTitle;
@property (strong ,nonatomic) NSString *source;
@property (strong ,nonatomic) NSString *surl;
@property (strong ,nonatomic) NSString *short_url;
@property (strong ,nonatomic) NSString *url;
@property (strong ,nonatomic) NSArray *thumbnails;
@property (strong ,nonatomic) NSArray *thumbnails_big;
@property (strong ,nonatomic) NSArray *thumbnails_qqnews;
@property (strong ,nonatomic) NSArray *thumbnails_qqnews_photo;
@property (strong ,nonatomic) NSArray *bigImage;
@property (nonatomic, assign) long timestamp;
@property (nonatomic, assign) long imagecount;
@property (nonatomic, assign) long pushCommentCount;
@property (assign ,nonatomic) long show_expr;
@property (assign ,nonatomic) long openAds;
@property (assign ,nonatomic) long openAdsText;
@property (assign ,nonatomic) long openAdsComment;
@property (assign ,nonatomic) long openAdsPhotos;
@property (strong ,nonatomic) NSString *articletype;
@property (strong ,nonatomic) NSString *media_id;
@property (strong ,nonatomic) NSString *showType_video;
@property (strong ,nonatomic) NSString *qishu;
@property (strong ,nonatomic) NSString *comment;
@property (strong ,nonatomic) NSString *commentPlacementId;
@property (strong ,nonatomic) NSString *flag;
@property (strong ,nonatomic) NSString *showType;
@property (strong ,nonatomic) NSString *voteId;
@property (strong ,nonatomic) NSString *voteNum;
@property (strong ,nonatomic) NSString *weiboid;
@property (strong ,nonatomic) NSString *abstract;
@property (strong ,nonatomic) NSString *graphicLiveID;
@property (strong ,nonatomic) NSString *specialID;
@property (strong ,nonatomic) NSString *realChlName;
@property (strong ,nonatomic) NSString *seq_no;
@property (strong ,nonatomic) NSString *reasonInfo;
@property (strong ,nonatomic) NSString *a_ver;
@property (assign ,nonatomic) BOOL isshow;
WCDB_PROPERTY(Id)
WCDB_PROPERTY(uinnick)
WCDB_PROPERTY(title)
WCDB_PROPERTY(longTitle)
WCDB_PROPERTY(source)
WCDB_PROPERTY(url)
WCDB_PROPERTY(surl)
WCDB_PROPERTY(short_url)
WCDB_PROPERTY(thumbnails)
WCDB_PROPERTY(thumbnails_big)
WCDB_PROPERTY(thumbnails_qqnews)
WCDB_PROPERTY(thumbnails_qqnews_photo)
WCDB_PROPERTY(bigImage)
WCDB_PROPERTY(timestamp)
WCDB_PROPERTY(imagecount)
WCDB_PROPERTY(pushCommentCount)
WCDB_PROPERTY(openAds)
WCDB_PROPERTY(openAdsText)
WCDB_PROPERTY(openAdsComment)
WCDB_PROPERTY(openAdsPhotos)
WCDB_PROPERTY(articletype)
WCDB_PROPERTY(media_id)
WCDB_PROPERTY(showType_video)
WCDB_PROPERTY(qishu)
WCDB_PROPERTY(comment)
WCDB_PROPERTY(commentPlacementId)
WCDB_PROPERTY(flag)
WCDB_PROPERTY(showType)
WCDB_PROPERTY(voteId)
WCDB_PROPERTY(voteNum)
WCDB_PROPERTY(weiboid)
WCDB_PROPERTY(abstract)
WCDB_PROPERTY(graphicLiveID)
WCDB_PROPERTY(specialID)
WCDB_PROPERTY(realChlName)
WCDB_PROPERTY(seq_no)
WCDB_PROPERTY(reasonInfo)
WCDB_PROPERTY(a_ver)
WCDB_PROPERTY(isshow)
@end
@interface NewsService:NSObject
- (void) createTable;
- (BOOL) updateObject:(id)object;
- (bool) insertObject:(id)object into:(NSString*)name;
- (NSArray*) getNewsInfoFromDB:(int)offset;
- (NSArray*) getShildedNewsInfoFromDB;
- (void) queryNewsWithCallback:(GetNewsList) callback;
- (void) querySportsNewsWithCallback:(GetNewsList) callback;
singleton_h(NewsService);
@end
//{
//"id": "NEW2017070701916000",
//"uinnick": "腾讯新闻",
//"uinname": "news_news_top",
//"title": "文在寅在德发表演讲:愿条件成熟时会晤金正恩",
//"longtitle": "文在寅在德发表演讲:愿条件成熟时会晤金正恩",
//"surl": "https://view.inews.qq.com/a/NEW2017070701916004",
//"short_url": "https://view.inews.qq.com/a/NEW2017070701916004",
//"weiboid": "",
//"commentid": "2018572318",
//"url": "https://view.inews.qq.com/a/NEW2017070701916004",
//"time": "2017-07-07 09:55:05",
//"timestamp": 1499392915,
//"articletype": "0",
//"media_id": "",
//"showType_video": "normal",
//"thumbnails_big": [],
//"thumbnails": [
//"http://inews.gtimg.com/newsapp_ls/0/1749626775_294195/0"
//],
//"qishu": "",
//"source": "环球网",
//"imagecount": 0,
//"comment": "",
//"flag": "0",
//"tag": [],
//"thumbnails_qqnews": [
//"http://inews.gtimg.com/newsapp_ls/0/1749626775_294195/0"
//],
//"voteId": "",
//"voteNum": "",
//"abstract": "",
//"pushCommentCount": 0,
//"graphicLiveID": "",
//"specialID": "",
//"thumbnails_qqnews_photo": [
//"http://inews.gtimg.com/newsapp_ls/0/1749626775_640330/0"
//],
//"showType": "three",
//"show_expr": 1,
//"openAds": 1,
//"openAdsText": 1,
//"openAdsComment": 1,
//"openAdsPhotos": 0,
//"adTitle": "",
//"gesture": 1,
//"smallWindow": 1,
//"openBigImage": 0,
//"commentPlacementId": "8863362282584659151",
//"showBigPicStyle": "0",
//"bigPicStyleImg": "",
//"FadCid": "",
//"showType_title": 1,
//"disableDelete": 0,
//"picShowType": 0,
//"show_source": 0,
//"forbidCommentUpDown": 0,
//"disableDeclare": 1,
//"forbidExpr": 1,
//"isSensitive": 1,
//"forbidRedPacket": 1,
//"toneScore": "3",
//"qualityScore": "3",
//"bigImage": [
//"http://inews.gtimg.com/newsapp_ls/0/1749626775_870492/0"
//],
//"realChlName": "要闻",
//"a_ver": "04",
//"alg_version": 1,
//"labelList": [],
//"reasonInfo": "",
//"seq_no": "218999392963$$$3-3--要闻-0",
//"article_pos": 1
//}
| 31.380952 | 73 | 0.688923 | [
"object"
] |
8ae6663b419c53db58c1723330f82b4dc3c843db | 3,157 | h | C | components/safe_browsing_db/remote_database_manager.h | google-ar/chromium | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 777 | 2017-08-29T15:15:32.000Z | 2022-03-21T05:29:41.000Z | components/safe_browsing_db/remote_database_manager.h | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 66 | 2017-08-30T18:31:18.000Z | 2021-08-02T10:59:35.000Z | components/safe_browsing_db/remote_database_manager.h | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 123 | 2017-08-30T01:19:34.000Z | 2022-03-17T22:55:31.000Z | // Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Implementation of the SafeBrowsingDatabaseManager that sends URLs
// via IPC to a database that chromium doesn't manage locally.
#ifndef COMPONENTS_SAFE_BROWSING_DB_REMOTE_DATABASE_MANAGER_H_
#define COMPONENTS_SAFE_BROWSING_DB_REMOTE_DATABASE_MANAGER_H_
#include <set>
#include <string>
#include <vector>
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "components/safe_browsing_db/database_manager.h"
#include "url/gurl.h"
namespace net {
class URLRequestContextGetter;
}
namespace safe_browsing {
struct V4ProtocolConfig;
// An implementation that proxies requests to a service outside of Chromium.
// Does not manage a local database.
class RemoteSafeBrowsingDatabaseManager : public SafeBrowsingDatabaseManager {
public:
// Construct RemoteSafeBrowsingDatabaseManager.
// Must be initialized by calling StartOnIOThread() before using.
RemoteSafeBrowsingDatabaseManager();
//
// SafeBrowsingDatabaseManager implementation
//
void CancelCheck(Client* client) override;
bool CanCheckResourceType(content::ResourceType resource_type) const override;
bool CanCheckUrl(const GURL& url) const override;
bool ChecksAreAlwaysAsync() const override;
bool CheckBrowseUrl(const GURL& url, Client* client) override;
bool CheckDownloadUrl(const std::vector<GURL>& url_chain,
Client* client) override;
bool CheckExtensionIDs(const std::set<std::string>& extension_ids,
Client* client) override;
bool CheckResourceUrl(const GURL& url, Client* client) override;
bool MatchCsdWhitelistUrl(const GURL& url) override;
bool MatchDownloadWhitelistString(const std::string& str) override;
bool MatchDownloadWhitelistUrl(const GURL& url) override;
bool MatchMalwareIP(const std::string& ip_address) override;
bool MatchModuleWhitelistString(const std::string& str) override;
safe_browsing::ThreatSource GetThreatSource() const override;
bool IsCsdWhitelistKillSwitchOn() override;
bool IsDownloadProtectionEnabled() const override;
bool IsMalwareKillSwitchOn() override;
bool IsSupported() const override;
void StartOnIOThread(net::URLRequestContextGetter* request_context_getter,
const V4ProtocolConfig& config) override;
void StopOnIOThread(bool shutdown) override;
//
// RemoteSafeBrowsingDatabaseManager implementation
//
private:
~RemoteSafeBrowsingDatabaseManager() override;
class ClientRequest; // Per-request tracker.
// Requests currently outstanding. This owns the ptrs.
std::vector<ClientRequest*> current_requests_;
bool enabled_;
std::set<content::ResourceType> resource_types_to_check_;
friend class base::RefCountedThreadSafe<RemoteSafeBrowsingDatabaseManager>;
DISALLOW_COPY_AND_ASSIGN(RemoteSafeBrowsingDatabaseManager);
}; // class RemoteSafeBrowsingDatabaseManager
} // namespace safe_browsing
#endif // COMPONENTS_SAFE_BROWSING_DB_REMOTE_DATABASE_MANAGER_H_
| 36.709302 | 80 | 0.784289 | [
"vector"
] |
8ae895c6f2b0d8129d2226d1c2c7d880ba9cdb60 | 26,480 | h | C | src/uti_phgrm/TiepTri/TiepTri.h | kikislater/micmac | 3009dbdad62b3ad906ec882b74b85a3db86ca755 | [
"CECILL-B"
] | 451 | 2016-11-25T09:40:28.000Z | 2022-03-30T04:20:42.000Z | src/uti_phgrm/TiepTri/TiepTri.h | kikislater/micmac | 3009dbdad62b3ad906ec882b74b85a3db86ca755 | [
"CECILL-B"
] | 143 | 2016-11-25T20:35:57.000Z | 2022-03-01T11:58:02.000Z | src/uti_phgrm/TiepTri/TiepTri.h | kikislater/micmac | 3009dbdad62b3ad906ec882b74b85a3db86ca755 | [
"CECILL-B"
] | 139 | 2016-12-02T10:26:21.000Z | 2022-03-10T19:40:29.000Z | /*Header-MicMac-eLiSe-25/06/2007
MicMac : Multi Image Correspondances par Methodes Automatiques de Correlation
eLiSe : ELements of an Image Software Environnement
www.micmac.ign.fr
Copyright : Institut Geographique National
Author : Marc Pierrot Deseilligny
Contributors : Gregoire Maillet, Didier Boldo.
[1] M. Pierrot-Deseilligny, N. Paparoditis.
"A multiresolution and optimization-based image matching approach:
An application to surface reconstruction from SPOT5-HRS stereo imagery."
In IAPRS vol XXXVI-1/W41 in ISPRS Workshop On Topographic Mapping From Space
(With Special Emphasis on Small Satellites), Ankara, Turquie, 02-2006.
[2] M. Pierrot-Deseilligny, "MicMac, un lociel de mise en correspondance
d'images, adapte au contexte geograhique" to appears in
Bulletin d'information de l'Institut Geographique National, 2007.
Francais :
MicMac est un logiciel de mise en correspondance d'image adapte
au contexte de recherche en information geographique. Il s'appuie sur
la bibliotheque de manipulation d'image eLiSe. Il est distibue sous la
licences Cecill-B. Voir en bas de fichier et http://www.cecill.info.
English :
MicMac is an open source software specialized in image matching
for research in geographic information. MicMac is built on the
eLiSe image library. MicMac is governed by the "Cecill-B licence".
See below and http://www.cecill.info.
Header-MicMac-eLiSe-25/06/2007*/
// EpipolarEcart
// double EpipolarEcart(const Pt2dr & aP1,const cBasicGeomCap3D & aCam2,const Pt2dr & aP2) const;
#ifndef _TiePTri_
#define _TiePTri_
#include "StdAfx.h"
#include "../../TpMMPD/TiePByMesh/Fast.h"
#include "MultTieP.h"
// Header du header
class cHomolPackTiepTri;
class cParamAppliTieTri;
class cAppliTieTri;
class cImTieTri;
class cImMasterTieTri;
class cImSecTieTri;
class cResulRechCorrel;
class cResulMultiImRechCorrel;
class cOneTriMultiImRechCorrel;
class cIntTieTriInterest;
class cLinkImTT;
#define TT_DefCorrel -2.0
#define TT_MaxCorrel 1.0
//======= Point d'interet & Other Seuil ============//
#define TT_DIST_RECH_HOM 12.0 // Seuil de recherche des homologues lors de la premiere iteration (par les etiquettes)
#define TT_DIST_EXTREMA 3.0 // Taille du voisinage sur lequel un extrema local doit etre max
#define TT_SEUIL_SURF_TRI_PIXEL 100 // Supprime les triangles trop petits
//======= Filtrage Spatial Seuil ============//
#define TT_DefSeuilDensiteResul 50 // conserve 1 point / disque de rayon TT_DefSeuilDensiteResul
// Rayon de filtrage spatial du point apres l'appariement
// Dans FiltrageSpatialRMIRC()
#define TT_RatioFastFiltrSpatial 8 // Ratio par rapport a TT_DefSeuilDensiteResul, pour point I Fast
#define TT_RatioCorrEntFiltrSpatial 4 // Ratio par rapport a TT_DefSeuilDensiteResul, pour point apres Corr Ent
#define TT_RatioCorrSupPix 2 // Ratio par rapport a TT_DefSeuilDensiteResul, pour point apres Corr Ent
#define TT_RatioCorrLSQ 1 // Ratio par rapport a TT_DefSeuilDensiteResul, pour point apres Corr Ent
// ===========================
#define TT_FSDeltaCorrel 0.2 // Dans filtrage spatial, delta de correl/ aux max point que l'on va eliminer
#define TT_FSExpoAtten 2 // Dans filtrage spatial, module l'attenaution fontion de la distance
//(TT_DefSeuilDensiteResul/TT_RatioFastFiltrSpatial).^2 = rayon de filtrage spatial du point d'interet.
// Cet seuil est appliquer pour filtrer les point d'interet juste apres la detection de point d'interet
// Appliquer sur image maitraisse seulement
// Priorité par FAST quality
//======= AutoCorrel Critere Seuil ============//
#define TT_SEUIL_AutoCorrel 0.85 // Seuil d'elimination par auto-correlation
#define TT_SEUIL_CutAutoCorrel_INT 0.65 // Seuil d'acceptation rapide par auto correl entiere
#define TT_SEUIL_CutAutoCorrel_REEL 0.75 // Seuil d'acceptation rapide par auto correl reelle
#define TT_SZ_AUTO_COR 3
//======= Correlation Seuil ============//
#define TT_SEUIL_CORREL_1PIXSUR2 0.7 // seuil d'acceptation pour correl 1px/2
#define TT_SEUIl_DIST_Extrema_Entier 1.5 // Distance entre l'extrema init et le max de correl trouve.
//seuil d'acceptation point pour correl 1px/2 & pixel entier.
#define TT_DemiFenetreCorrel 6
// Correlation 1PIX/2 => aSzW = TT_DemiFenetreCorrel/2
// Correlation entier => aSzW = TT_DemiFenetreCorrel
//======= FAST Critere Seuil ============//
#define TT_DIST_FAST 4.0 // Critere type Fast calcul des extrema locaux
// 75% de point Non consecutive ecarte point noyeau un valeur d'intensité min = 5
#define TT_PropFastStd 0.75
#define TT_SeuilFastStd 5
// 60% de point consecutive ecarte point noyeau un valeur d'intensité min = 3
#define TT_PropFastConsec 0.6
#define TT_SeuilFastCons 3
extern bool BugAC;
extern bool USE_SCOR_CORREL;
// =====================================
// =====================================
typedef double tElTiepTri ;
typedef Im2D<tElTiepTri,tElTiepTri> tImTiepTri;
typedef TIm2D<tElTiepTri,tElTiepTri> tTImTiepTri;
typedef cInterpolateurIm2D<tElTiepTri> tInterpolTiepTri;
// Prop.x => standard , Prop.y => contingu
// extern Pt2dr TestFastQuality(TIm2D<double,double> anIm,Pt2di aP,double aRay,bool IsMax,Pt2dr aProp);
extern void TestcAutoCorrelDir(TIm2D<double,double> aTIm,const Pt2di & aP0);
#define ETAPE_CORREL_ENT 0
#define ETAPE_CORREL_BILIN 1
#define ETAPE_CORREL_DENSE 2
#define ETAPE_FINALE 3
// Pour initialiser les parametres avec EAM en ayant un constructeur trivial
class cParamAppliTieTri
{
public :
cParamAppliTieTri() ;
double mDistFiltr;
int mNumInterpolDense;
bool mDoRaffImInit;
int mNbByPix;
int mSzWEnd;
int mNivLSQM;
double mRandomize;
bool mNoTif;
bool mFilSpatial;
bool mFilAC;
bool mFilFAST;
double mTT_SEUIL_SURF_TRI;
double mTT_SEUIL_CORREL_1PIXSUR2;
double mTT_SEUIl_DIST_Extrema_Entier;
int mEtapeInteract;
int mLastEtape; // Inclusif !!
int mFlagFS; // FlagFitrage Spatial
string mHomolOut;
Pt2dr mSurfDiffAffHomo;
bool mUseHomo;
double mMaxErr;
};
class cAppliTieTri : public cParamAppliTieTri
{
public :
cAppliTieTri
(
const cParamAppliTieTri &,
cInterfChantierNameManipulateur *,
const std::string & aDir,
const std::string & anOri,
const cXml_TriAngulationImMaster &
);
void SetSzW(Pt2di , int);
bool CurEtapeInFlagFiltre() const;
cInterfChantierNameManipulateur * ICNM();
const std::string & Ori() const;
const std::string & Dir() const;
void DoAllTri (const cXml_TriAngulationImMaster &);
bool WithW() const;
Pt2di SzW() const;
int ZoomW() const;
cImMasterTieTri * Master();
const std::vector<Pt2di> & VoisExtr() const;
const std::vector<Pt2di> & VoisHom() const;
bool & Debug() ;
const double & DistRechHom() const;
const cElPlan3D & CurPlan() const;
tInterpolTiepTri * Interpol();
void FiltrageSpatialRMIRC(const double & aDist);
// void FiltrageSpatialGlobRMIRC(const double & aDist);
std::vector<cResulMultiImRechCorrel *> FiltrageSpatial
(
const std::vector<cResulMultiImRechCorrel *> & aVIn,
double aSeuilDist,
double aGainCorrel
);
void RechHomPtsDense(cResulMultiImRechCorrel &);
void SetPtsSelect(const Pt2dr & aP);
void SetNumSelectImage(const std::vector<int> & aNum);
bool HasPtSelecTri() const;
const Pt2dr & PtsSelectTri() const;
bool NumImageIsSelect(const int aNum) const;
void PutInGlobCoord(cResulMultiImRechCorrel & aRMIRC,bool WithDecal,bool WithRedr);
const std::string & KeyMasqIm() const;
void SetMasqIm(const std::string & aKeyMasqIm);
Pt2dr & MoyDifAffHomo() {return mMoyDifAffHomo;}
Pt2dr & MaxDifAffHomo() {return mMaxDifAffHomo;}
int & CountDiff() {return mCountDiff;}
vector<int> & HistoErrAffHomoX() {return mHistoErrAffHomoX;}
vector<int> & HistoErrAffHomoY() {return mHistoErrAffHomoY;}
ofstream mErrLog;
private :
cAppliTieTri(const cAppliTieTri &); // N.I.
void DoOneTri (const cXml_Triangle3DForTieP & ,int aKT);
cInterfChantierNameManipulateur * mICNM;
std::string mDir;
std::string mOri;
cImMasterTieTri * mMasIm;
std::vector<cImSecTieTri *> mImSec;
std::vector<cImSecTieTri *> mImSecLoaded;
Pt2di mSzW;
int mZoomW;
bool mWithW;
double mDisExtrema;
double mDistRechHom;
// Les voisins pour savoir si un point est un extrema local, ne contient
// pas le point central (0,0)
std::vector<Pt2di> mVoisExtr;
// Les voisins pour rechercher les homologues une certaine distance
std::vector<Pt2di> mVoisHom;
bool mDebug;
// Le plan du triangle courant
cElPlan3D mCurPlan;
// Les interpolateurs
tInterpolTiepTri * mInterpolSinC;
tInterpolTiepTri * mInterpolBicub;
tInterpolTiepTri * mInterpolBilin;
std::vector<cResulMultiImRechCorrel*> mVCurMIRMC;
std::vector<cResulMultiImRechCorrel*> mGlobMRIRC;
// std::vector<cOneTriMultiImRechCorrel> mVGlobMIRMC;
int mNbTriLoaded;
int mNbPts;
double mTimeCorInit;
double mTimeCorDense;
bool mHasPtSelecTri;
Pt2dr mPtsSelectTri;
bool mHasNumSelectImage;
std::vector<int> mNumSelectImage;
std::string mKeyMasqIm;
bool mPIsInImRedr; // Savoir si les points de correlation sont points redresses ou non
int mCurEtape;
Pt2dr mMoyDifAffHomo;
Pt2dr mMaxDifAffHomo;
int mCountDiff;
vector<int> mHistoErrAffHomoX;
vector<int> mHistoErrAffHomoY;
};
/*
cIntTieTriInterest : point d'interet = Local + Type (Max,Min ...) + Qualite de contraste (Fast)
*/
typedef enum eTypeTieTri
{
eTTTNoLabel = 0,
eTTTMax = 1,
eTTTMin = 2
} eTypeTieTri;
class cIntTieTriInterest
{
public :
cIntTieTriInterest(const Pt2di & aP,eTypeTieTri aType,const double & aFastQual);
cIntTieTriInterest(const cIntTieTriInterest &aPt);
Pt2di mPt;
eTypeTieTri mType;
double mFastQual;
bool mSelected;
};
/*
class cLinkImTT
{
public :
cImTieTri * mIm1;
cImTieTri * mIm2;
bool mLnkActif;
private :
};
*/
class cImTieTri
{
public :
friend class cImMasterTieTri;
friend class cImSecTieTri;
cImTieTri(cAppliTieTri & ,const std::string& aNameIm,int aNum);
Video_Win * W();
virtual bool IsMaster() const = 0;
virtual tTImTiepTri & ImRedr() = 0; // C'est l'image init pour Mastre et Redr sinon
const Pt2di & Decal() const;
const int & Num() const;
string NameIm() {return mNameIm;}
bool AutoCorrel(Pt2di aP);
Tiff_Im Tif();
std::vector<Pt3dr> & PtTri3DHomoGrp() {return mPtTri3DHomoGrp;}
Pt2dr & P1Glob() {return mP1Glob;}
Pt2dr & P2Glob() {return mP2Glob;}
Pt2dr & P3Glob() {return mP3Glob;}
protected :
cImTieTri(const cImTieTri &) ; // N.I.
int IsExtrema(const TIm2D<tElTiepTri,tElTiepTri> &,Pt2di aP);
void MakeInterestPoint
(
std::list<cIntTieTriInterest> *,
TIm2D<U_INT1,INT> *,
const TIm2DBits<1> & aMasq,const TIm2D<tElTiepTri,tElTiepTri> &
);
void MakeInterestPointFAST
(
std::list<cIntTieTriInterest> *,
TIm2D<U_INT1,INT> *,
const TIm2DBits<1> & aMasq,const TIm2D<tElTiepTri,tElTiepTri> &
);
bool LoadTri(const cXml_Triangle3DForTieP & );
Col_Pal ColOfType(eTypeTieTri);
cAppliTieTri & mAppli;
std::string mNameIm;
Tiff_Im mTif;
cBasicGeomCap3D * mCamGen;
CamStenope * mCamS;
Pt2dr mP1Glob;
Pt2dr mP2Glob;
Pt2dr mP3Glob;
std::vector<Pt2dr> mVTriGlob;
std::vector<Pt3dr> mPtTri3DHomoGrp;
Pt2dr mP1Loc;
Pt2dr mP2Loc;
Pt2dr mP3Loc;
Pt2di mDecal;
Pt2di mSzIm;
tImTiepTri mImInit;
tTImTiepTri mTImInit;
Im2D_Bits<1> mMasqTri;
TIm2DBits<1> mTMasqTri;
Im2D_Bits<1> mMasqIm;
TIm2DBits<1> mTMasqIm;
int mRab;
Video_Win * mW;
int mNum;
cFastCriterCompute * mFastCC;
cCutAutoCorrelDir<tTImTiepTri> mCutACD;
bool mLoaded;
};
class cImMasterTieTri : public cImTieTri
{
public :
cImMasterTieTri(cAppliTieTri & ,const std::string& aNameIm);
bool LoadTri(const cXml_Triangle3DForTieP & );
cIntTieTriInterest GetPtsInteret();
cResulMultiImRechCorrel * GetRMIRC(const std::vector<cResulMultiImRechCorrel*> & aVR);
virtual bool IsMaster() const ;
virtual tTImTiepTri & ImRedr();
const std::list<cIntTieTriInterest> & LIP() const;
private :
cImMasterTieTri(const cImMasterTieTri&) ; // N.I.
std::list<cIntTieTriInterest> mLIP;
};
class cImSecTieTri : public cImTieTri
{
public :
cImSecTieTri(cAppliTieTri & ,const std::string& aNameIm,int aNum);
bool LoadTri(const cXml_Triangle3DForTieP & );
cResulRechCorrel RechHomPtsInteretEntier(bool Interact,const cIntTieTriInterest & aP);
cResulRechCorrel RechHomPtsInteretBilin(bool Interact,const cResulMultiImRechCorrel &aRMIC,int aKIm);
cResulRechCorrel RechHomPtsDense(bool Interact,const cResulMultiImRechCorrel &aRMIC,int aKIm);
cResulRechCorrel RechHomPtsGen(bool Interact,int aNumEtape,const cResulMultiImRechCorrel &aRMIC,int aKIm);
// cResulRechCorrel RechHomPtsInteretBilin(bool Interact,const Pt2dr & aP0,const cResulRechCorrel & aCRC0);
// Enchaine RechHomPtsInteretEntier puis RechHomPtsInteretBilin
// cResulRechCorrel RechHomPtsInteretEntierAndRefine(bool Interact,const cIntTieTriInterest & aP);
virtual bool IsMaster() const ;
virtual tTImTiepTri & ImRedr();
ElPackHomologue & PackH() ;
Pt2dr Mas2Sec(const Pt2dr &) const;
Pt2dr Mas2Sec_Hom(const Pt2dr &) const;
private :
bool InMasqReech(const Pt2dr &) const;
bool InMasqReech(const Pt2di &) const;
cImSecTieTri(const cImSecTieTri&); // N.I.
void DecomposeVecHom(const Pt2dr & aPSH1,const Pt2dr & aPSH2,Pt2dr & aDirProf,Pt2dr & aNewCoord);
tImTiepTri mImReech;
tTImTiepTri mTImReech;
Im2D<U_INT1,INT> mImLabelPC;
TIm2D<U_INT1,INT> mTImLabelPC;
Im2D_Bits<1> mMasqReech;
TIm2DBits<1> mTMasqReech;
Pt2di mSzReech;
ElAffin2D mAffMas2Sec;
ElAffin2D mAffSec2Mas;
cElHomographie mHomMas2Sec;
cElHomographie mHomSec2Mas;
cImMasterTieTri * mMaster;
ElPackHomologue mPackH;
};
// ==================================== Correlation ==========================
class cLSQAffineMatch
{
public :
cLSQAffineMatch
(
Pt2dr aPC1,
const tImTiepTri & aI1,
const tImTiepTri & aI2,
ElAffin2D anAf1To2
);
bool OneIter(tInterpolTiepTri *,int aNbW,double aStep,bool AffineGeom,bool AffineRadiom);
const ElAffin2D & Af1To2() const;
private :
void CalcRect(tInterpolTiepTri *,double aStepTop);
void AddEqq(L2SysSurResol & aSys,const Pt2dr &PIm1,const Pt2dr & aPC1);
Pt2dr mPC1;
Pt2dr mPInfIm1;
Pt2dr mPSupIm1;
Pt2dr mPInfIm2;
Pt2dr mPSupIm2;
tTImTiepTri mTI1;
tElTiepTri** mData1;
tTImTiepTri mTI2;
tElTiepTri** mData2;
ElAffin2D mAf1To2;
double mA;
double mB;
bool mAffineGeom;
bool mAffineRadiom;
double mCoeff[10];
int NumAB;
int NumTr;
int NumAffGeom;
int NumAfRad;
tInterpolTiepTri * mInterp;
double mSomDiff;
};
// inline const double & MyDeCorrel() {static double aR=-2.0; return aR;}
class cResulRechCorrel
{
public :
cResulRechCorrel(const Pt2dr & aPt,double aCorrel) ;
bool IsInit() const ;
cResulRechCorrel() ;
void Merge(const cResulRechCorrel & aRRC);
Pt2dr mPt;
double mCorrel;
};
class cResulMultiImRechCorrel
{
public :
cResulMultiImRechCorrel(const cIntTieTriInterest & aPMaster) ;
double square_dist(const cResulMultiImRechCorrel & aR2) const;
void AddResul(const cResulRechCorrel aRRC,int aNumIm);
bool AllInit() const ;
bool IsInit() const ;
double Score() const ;
const std::vector<cResulRechCorrel > & VRRC() const ;
std::vector<cResulRechCorrel > & VRRC() ;
const cIntTieTriInterest & PIMaster() const ;
cIntTieTriInterest & PIMaster() ;
Pt2di PtMast() const ;
const std::vector<int> & VIndex() const ;
void CalculScoreMin();
void CalculScoreAgreg(double Epsilon,double pow,double aSign);
int & HeapIndexe () ;
const int & HeapIndexe () const ;
// std::vector<bool> & VSelec();
//const std::vector<bool> & VSelec() const;
int NbSel() const;
void SetAllSel();
void SetSelec(int aK,bool aVal);
void SuprUnSelect();
static void SuprUnSelect(std::vector<cResulMultiImRechCorrel*> &);
private :
cResulMultiImRechCorrel(const cResulMultiImRechCorrel & ) ; // N.I.
cIntTieTriInterest mPMaster;
double mScore;
bool mAllInit;
std::vector<cResulRechCorrel > mVRRC;
std::vector<int> mVIndex;
std::vector<bool> mVSelec; // Utilise dans le filtrage spatial pour savoir si ce point a deja ete selec
int mHeapIndexe;
int mNbSel;
};
/*
class cOneTriMultiImRechCorrel
{
public :
cOneTriMultiImRechCorrel(int aKT,const std::vector<cResulMultiImRechCorrel*> & aVMultiC) :
mKT (aKT),
mVMultiC (aVMultiC)
{
}
const std::vector<cResulMultiImRechCorrel*>& VMultiC() const {return mVMultiC;}
private :
// cOneTriMultiImRechCorrel(const cOneTriMultiImRechCorrel &); // N.I.
const int & KT() const {return mKT;}
int mKT;
std::vector<cResulMultiImRechCorrel*> mVMultiC;
};
*/
Pt2dr TT_CorrelBasique
(
const tTImTiepTri & Im1,
const Pt2di & aP1,
const tTImTiepTri & Im2,
const Pt2di & aP2,
const int aSzW,
const int aStep
);
cResulRechCorrel TT_RechMaxCorrelBasique
(
const tTImTiepTri & Im1,
const Pt2di & aP1,
const tTImTiepTri & Im2,
const Pt2di & aP2,
const int aSzW,
const int aStep,
const int aSzRech
);
Pt2dr TT_CorrelBilin
(
const tTImTiepTri & Im1,
const Pt2di & aP1,
const tTImTiepTri & Im2,
const Pt2dr & aP2,
const int aSzW
);
cResulRechCorrel TT_RechMaxCorrelLocale
(
const tTImTiepTri & aIm1,
const Pt2di & aP1,
const tTImTiepTri & aIm2,
const Pt2di & aP2,
const int aSzW,
const int aStep,
const int aSzRechMax
);
cResulRechCorrel TT_RechMaxCorrelMultiScaleBilin
(
const tTImTiepTri & aIm1,
const Pt2dr & aP1,
const tTImTiepTri & aIm2,
const Pt2dr & aP2,
const int aSzW,
double aStepFinal
);
cResulRechCorrel TT_MaxLocCorrelDS1R
(
tInterpolTiepTri * anInterpol,
cElMap2D * aMap,
const tTImTiepTri & aIm1,
Pt2dr aPC1,
const tTImTiepTri & aIm2,
Pt2dr aPC2,
const int aSzW,
const int aNbByPix,
double aStep0,
double aStepEnd
);
// ==================================== cHomolPackTiepTri ==========================
class cHomolPackTiepTri
{
public:
cHomolPackTiepTri (std::string img1, std::string img2, int index, cInterfChantierNameManipulateur * aICNM, bool skipPackVide);
void writeToDisk(std::string aHomolOut);
ElPackHomologue & Pack() {return mPack;}
std::string & Img1() {return mImg1;}
std::string & Img2() {return mImg2;}
private:
std::string mImg1;
std::string mImg2;
int mIndex;
cInterfChantierNameManipulateur * mICNM;
ElPackHomologue mPack;
bool mSkipVide;
};
class cCmpInterOnFast
{
public :
bool operator () (const cIntTieTriInterest & aI1,const cIntTieTriInterest &aI2)
{
return aI1.mFastQual > aI2.mFastQual;
}
};
#endif // _TiePTri_
/*Footer-MicMac-eLiSe-25/06/2007
Ce logiciel est un programme informatique servant à la mise en
correspondances d'images pour la reconstruction du relief.
Ce logiciel est régi par la licence CeCILL-B soumise au droit français et
respectant les principes de diffusion des logiciels libres. Vous pouvez
utiliser, modifier et/ou redistribuer ce programme sous les conditions
de la licence CeCILL-B telle que diffusée par le CEA, le CNRS et l'INRIA
sur le site "http://www.cecill.info".
En contrepartie de l'accessibilité au code source et des droits de copie,
de modification et de redistribution accordés par cette licence, il n'est
offert aux utilisateurs qu'une garantie limitée. Pour les mêmes raisons,
seule une responsabilité restreinte pèse sur l'auteur du programme, le
titulaire des droits patrimoniaux et les concédants successifs.
A cet égard l'attention de l'utilisateur est attirée sur les risques
associés au chargement, à l'utilisation, à la modification et/ou au
développement et à la reproduction du logiciel par l'utilisateur étant
donné sa spécificité de logiciel libre, qui peut le rendre complexe à
manipuler et qui le réserve donc à des développeurs et des professionnels
avertis possédant des connaissances informatiques approfondies. Les
utilisateurs sont donc invités à charger et tester l'adéquation du
logiciel à leurs besoins dans des conditions permettant d'assurer la
sécurité de leurs systèmes et ou de leurs données et, plus généralement,
à l'utiliser et l'exploiter dans les mêmes conditions de sécurité.
Le fait que vous puissiez accéder à cet en-tête signifie que vous avez
pris connaissance de la licence CeCILL-B, et que vous en avez accepté les
termes.
aooter-MicMac-eLiSe-25/06/2007*/
| 35.543624 | 134 | 0.567674 | [
"vector"
] |
8af24522148561b9a7d967cd6a1530dd53784fb7 | 2,376 | h | C | src/entity.h | adamjedlicka/yarpg | 6c8a3fde76ec340b76b046c05822e09aabb6e830 | [
"MIT"
] | 1 | 2016-06-04T12:11:53.000Z | 2016-06-04T12:11:53.000Z | src/entity.h | adamjedlicka/yarpg | 6c8a3fde76ec340b76b046c05822e09aabb6e830 | [
"MIT"
] | null | null | null | src/entity.h | adamjedlicka/yarpg | 6c8a3fde76ec340b76b046c05822e09aabb6e830 | [
"MIT"
] | null | null | null | #ifndef __ENTITY_H
#define __ENTITY_H
#include "engine.h"
class Fireball : public Entity {
protected:
int damage;
public:
Fireball(int, int, int, int);
virtual ~Fireball();
virtual void Tick(Engine *);
virtual void Render(Buffer *) const;
virtual void Colide(Structure *) { Destroy(); };
virtual void Colide(Entity *);
virtual void OnDestroy();
};
class FireballBlast : public Entity {
protected:
int ticks;
int damage;
public:
FireballBlast(int, int);
virtual ~FireballBlast();
virtual void Tick(Engine *);
virtual void Render(Buffer *) const;
virtual void Colide(Entity *);
};
class Melee : public Entity {
protected:
int damage;
int ticks;
public:
Melee(int, int);
virtual ~Melee();
virtual void Tick(Engine *);
virtual void Render(Buffer *) const;
virtual void Colide(Entity *);
};
class Enemy : public Entity {
protected:
std::string enemyType;
int damage, movSpeed, attSpeed;
char ch;
short color;
int movTicks, attTicks;
public:
Enemy(int, int, const std::string &, int, int, char, short, int, int);
virtual ~Enemy();
virtual void Tick(Engine *);
virtual void Render(Buffer *) const;
virtual void Colide(Entity *);
virtual bool Attack(int);
virtual bool IsSolid() { return true; }
virtual void OnDestroy();
};
class NPC : public Entity {
protected:
char ch;
short color;
std::string text;
std::string questID;
bool showText;
int showTextTicks;
public:
NPC(int, int, char, short, const std::string &, const std::string &);
~NPC();
virtual void Tick(Engine *);
virtual void Render(Buffer *) const;
virtual void Colide(Entity *);
virtual bool IsSolid() { return true; };
};
class ItemGiver : public Entity {
protected:
int movSpeed, armor, damage;
char ch;
short color;
public:
ItemGiver(int, int, int, int, int, char, short);
~ItemGiver();
void Render(Buffer *) const;
void Colide(Entity *);
};
class Portal : public Entity {
protected:
std::string nextLevel;
public:
Portal(int, int, const std::string &);
virtual ~Portal();
virtual void Render(Buffer *) const;
virtual void Colide(Entity *);
};
class DoorSwitch : public Entity {
protected:
int xFrom, xTo, yFrom, yTo;
bool switched;
char ch;
public:
DoorSwitch(int, int, int, int, int, int);
virtual ~DoorSwitch();
virtual void Render(Buffer *) const;
virtual void Colide(Entity *);
};
#endif
| 18.276923 | 71 | 0.686448 | [
"render"
] |
8af2571869ef255a95441d7cdcf6fbf44375b1d6 | 5,075 | h | C | src/ui/light/drivers/aml-light/aml-light.h | dahliaOS/fuchsia-pi4 | 5b534fccefd918b5f03205393c1fe5fddf8031d0 | [
"BSD-2-Clause"
] | 3 | 2021-09-02T07:21:06.000Z | 2022-03-12T03:20:10.000Z | src/ui/light/drivers/aml-light/aml-light.h | dahliaOS/fuchsia-pi4 | 5b534fccefd918b5f03205393c1fe5fddf8031d0 | [
"BSD-2-Clause"
] | null | null | null | src/ui/light/drivers/aml-light/aml-light.h | dahliaOS/fuchsia-pi4 | 5b534fccefd918b5f03205393c1fe5fddf8031d0 | [
"BSD-2-Clause"
] | 2 | 2022-02-25T12:22:49.000Z | 2022-03-12T03:20:10.000Z | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef SRC_UI_LIGHT_DRIVERS_AML_LIGHT_AML_LIGHT_H_
#define SRC_UI_LIGHT_DRIVERS_AML_LIGHT_AML_LIGHT_H_
#include <fuchsia/hardware/gpio/cpp/banjo.h>
#include <fuchsia/hardware/light/llcpp/fidl.h>
#include <fuchsia/hardware/pwm/cpp/banjo.h>
#include <lib/ddk/debug.h>
#include <threads.h>
#include <optional>
#include <string>
#include <vector>
#include <ddktl/device.h>
#include <ddktl/protocol/empty-protocol.h>
#include <fbl/array.h>
#include <soc/aml-common/aml-pwm-regs.h>
namespace aml_light {
using fuchsia_hardware_light::Light;
using fuchsia_hardware_light::wire::Capability;
using fuchsia_hardware_light::wire::LightError;
using fuchsia_hardware_light::wire::Rgb;
class AmlLight;
using AmlLightType = ddk::Device<AmlLight, ddk::Messageable<Light>::Mixin>;
class LightDevice {
public:
LightDevice(std::string name, ddk::GpioProtocolClient gpio,
std::optional<ddk::PwmProtocolClient> pwm)
: name_(std::move(name)), gpio_(gpio), pwm_(pwm) {}
zx_status_t Init(bool init_on);
const std::string GetName() const { return name_; }
Capability GetCapability() const {
return pwm_.has_value() ? Capability::kBrightness : Capability::kSimple;
}
bool GetCurrentSimpleValue() const { return (value_ != 0); }
zx_status_t SetSimpleValue(bool value);
double GetCurrentBrightnessValue() const { return value_; }
zx_status_t SetBrightnessValue(double value);
private:
std::string name_;
ddk::GpioProtocolClient gpio_;
std::optional<ddk::PwmProtocolClient> pwm_;
double value_ = 0;
};
class AmlLight : public AmlLightType,
public fidl::WireServer<Light>,
public ddk::EmptyProtocol<ZX_PROTOCOL_LIGHT> {
public:
explicit AmlLight(zx_device_t* parent) : AmlLightType(parent) {}
static zx_status_t Create(void* ctx, zx_device_t* parent);
// Device protocol implementation.
void DdkRelease();
// FIDL messages.
void GetNumLights(GetNumLightsRequestView request,
GetNumLightsCompleter::Sync& completer) override;
void GetNumLightGroups(GetNumLightGroupsRequestView request,
GetNumLightGroupsCompleter::Sync& completer) override;
void GetInfo(GetInfoRequestView request, GetInfoCompleter::Sync& completer) override;
void GetCurrentSimpleValue(GetCurrentSimpleValueRequestView request,
GetCurrentSimpleValueCompleter::Sync& completer) override;
void SetSimpleValue(SetSimpleValueRequestView request,
SetSimpleValueCompleter::Sync& completer) override;
void GetCurrentBrightnessValue(GetCurrentBrightnessValueRequestView request,
GetCurrentBrightnessValueCompleter::Sync& completer) override;
void SetBrightnessValue(SetBrightnessValueRequestView request,
SetBrightnessValueCompleter::Sync& completer) override;
void GetCurrentRgbValue(GetCurrentRgbValueRequestView request,
GetCurrentRgbValueCompleter::Sync& completer) override;
void SetRgbValue(SetRgbValueRequestView request, SetRgbValueCompleter::Sync& completer) override;
void GetGroupInfo(GetGroupInfoRequestView request,
GetGroupInfoCompleter::Sync& completer) override {
completer.ReplyError(LightError::kNotSupported);
}
void GetGroupCurrentSimpleValue(GetGroupCurrentSimpleValueRequestView request,
GetGroupCurrentSimpleValueCompleter::Sync& completer) override {
completer.ReplyError(LightError::kNotSupported);
}
void SetGroupSimpleValue(SetGroupSimpleValueRequestView request,
SetGroupSimpleValueCompleter::Sync& completer) override {
completer.ReplyError(LightError::kNotSupported);
}
void GetGroupCurrentBrightnessValue(
GetGroupCurrentBrightnessValueRequestView request,
GetGroupCurrentBrightnessValueCompleter::Sync& completer) override {
completer.ReplyError(LightError::kNotSupported);
}
void SetGroupBrightnessValue(SetGroupBrightnessValueRequestView request,
SetGroupBrightnessValueCompleter::Sync& completer) override {
completer.ReplyError(LightError::kNotSupported);
}
void GetGroupCurrentRgbValue(GetGroupCurrentRgbValueRequestView request,
GetGroupCurrentRgbValueCompleter::Sync& completer) override {
completer.ReplyError(LightError::kNotSupported);
}
void SetGroupRgbValue(SetGroupRgbValueRequestView request,
SetGroupRgbValueCompleter::Sync& completer) override {
completer.ReplyError(LightError::kNotSupported);
}
private:
DISALLOW_COPY_ASSIGN_AND_MOVE(AmlLight);
friend class FakeAmlLight;
zx_status_t Init();
static constexpr size_t kNameLength = ZX_MAX_NAME_LEN;
std::vector<LightDevice> lights_;
};
} // namespace aml_light
#endif // SRC_UI_LIGHT_DRIVERS_AML_LIGHT_AML_LIGHT_H_
| 38.740458 | 99 | 0.741084 | [
"vector"
] |
8afc4bbabaaed0342b445fb4f22dff76e9f4eda9 | 9,173 | h | C | src/autopas/utils/SoA.h | ssauermann/AutoPas | 309f9a43840101933b8d06324ea910c780954f61 | [
"BSD-2-Clause"
] | null | null | null | src/autopas/utils/SoA.h | ssauermann/AutoPas | 309f9a43840101933b8d06324ea910c780954f61 | [
"BSD-2-Clause"
] | null | null | null | src/autopas/utils/SoA.h | ssauermann/AutoPas | 309f9a43840101933b8d06324ea910c780954f61 | [
"BSD-2-Clause"
] | null | null | null | /**
* @file SoA.h
* @authors tchipevn, seckler
* @date 18.01.2018
*/
#pragma once
#include <algorithm>
#include <initializer_list>
#include <map>
#include <tuple>
#include <vector>
#include "autopas/utils/AlignedAllocator.h"
#include "autopas/utils/ExceptionHandler.h"
#include "autopas/utils/SoAStorage.h"
#include "autopas/utils/SoAType.h"
#include "autopas/utils/SoAView.h"
namespace autopas {
/**
* Structur of the array class.
* @tparam SoAArraysType The SoAArrayType to be used for storage.
*/
template <class SoAArraysType>
class SoA {
public:
/**
* @brief Default constructor.
*/
SoA() = default;
/**
* @brief Copy constructor.
* @param soa SoA to copy.
*/
SoA(const SoA &soa) = default;
/**
* @brief Resizes all Vectors to the given length.
* @param length new length.
*/
void resizeArrays(size_t length) {
soaStorage.apply([=](auto &list) { list.resize(length); });
}
/**
* @brief Pushes a given value to the desired attribute array.
* @tparam attribute Index of array to push to.
* @param value Value to push.
*/
template <std::size_t attribute>
void push(const double value) {
soaStorage.template get<attribute>().push_back(value);
}
/**
* @brief Writes / updates the value of an attribute for a specific particle.
* @tparam attribute Attribute to update.
* @tparam ValueType type of the attribute
* @param particleId Particle to update.
* @param value New value.
*/
template <int attribute, class ValueType>
void write(size_t particleId, const ValueType &value) {
soaStorage.template get<attribute>().at(particleId) = value;
}
/**
* Appends the other SoA buffer to this.
* @param other Other buffer.
*/
void append(const SoA<SoAArraysType> &other) {
if (other.getNumParticles() > 0) {
append_impl(other.soaStorage, std::make_index_sequence<std::tuple_size<SoAArraysType>::value>{});
}
}
/**
* Appends the other SoA buffer to this.
* @param other Other buffer.
*/
void append(const SoAView<SoAArraysType> &other) {
if (other.getNumParticles() > 0) {
append_impl(other, std::make_index_sequence<std::tuple_size<SoAArraysType>::value>{});
}
}
/**
* Appends the specified attributes from the other SoA buffer to this.
* @tparam attributes Attributes to append.
* @param other Other buffer.
*/
template <int... attributes>
void append(const SoA<SoAArraysType> &other) {
if (other.getNumParticles() > 0) {
const auto newSize = getNumParticles() + other.getNumParticles();
append_impl(other.soaStorage, std::index_sequence<attributes...>{});
resizeArrays(newSize);
}
}
/**
* Writes or updates values of attributes for a specific particle.
* @tparam attributes Array of attributes to update.
* @tparam ValueArrayType type of the array
* @param particleId Particle to update.
* @param values New value.
*/
template <int... attributes, class ValueArrayType>
void writeMultiple(size_t particleId, const ValueArrayType &values) {
write_impl<attributes...>(particleId, values);
}
/**
* Specialized version to pass arrays without specifying it directly.
* @tparam attributes
* @tparam N
* @param particleId
* @param values
*/
template <int... attributes, size_t N = sizeof...(attributes)>
inline void writeMultiple(size_t particleId, std::array<double, N> values) {
write_impl<attributes...>(particleId, values);
}
/**
* @brief Reads from all given attribute arrays at position `particleId`.
* @tparam ArrayLength length of the returned array. Should be equal
* attributes.size().
* @tparam attributes Attributes to read from.
* @param particleId Position to read from.
* @return Array of attributes ordered by given attribute order.
*/
template <int... attributes>
std::array<double, sizeof...(attributes)> readMultiple(size_t particleId) const {
std::array<double, sizeof...(attributes)> retArray;
if (particleId >= getNumParticles()) {
autopas::utils::ExceptionHandler::exception(
"SoA::read: requested particle id ({}) is bigger than number of particles ({})", particleId,
getNumParticles());
return retArray;
}
read_impl<attributes...>(particleId, retArray);
return retArray;
}
/**
* @brief Reads the value of a given attribute of a given particle.
* @tparam attribute Attribute to read from.
* @param particleId Position to read from.
* @return Attribute value.
*/
template <std::size_t attribute>
auto read(size_t particleId) const {
return soaStorage.template get<attribute>().at(particleId);
}
/**
* Returns a pointer to the given attribute vector.
* @tparam attribute ID of the desired attribute.
* @return Pointer to the beginning of the attribute vector
*/
template <std::size_t attribute>
auto begin() {
return soaStorage.template get<attribute>().data();
}
/**
* @brief Returns the number of particles.
*
* This function only checks the size of the first array since it is assumed
* that the user manages the arrays responsibly.
*
* @return Number of particles.
*/
inline size_t getNumParticles() const { return soaStorage.template get<0>().size(); }
/**
* delete all particles in the soa
*/
void clear() {
soaStorage.apply([](auto &list) { list.clear(); });
}
/**
* swap the position of two particles in the soa
* @param a position of the first particle
* @param b position of the second particle
*/
void swap(std::size_t a, std::size_t b) {
soaStorage.apply([=](auto &list) { std::swap(list[a], list[b]); });
}
/**
* Delete the last particle in the SoA.
*/
void pop_back() {
soaStorage.apply([](auto &list) { list.pop_back(); });
}
/**
* Constructs a SoAView for the whole SoA and returns it.
* @return the constructed SoAView on the whole SoA.
*/
SoAView<SoAArraysType> constructView() { return {this, 0, getNumParticles()}; }
/**
* Constructs a view that starts at \p startIndex (inclusive) and ends at \p endIndex (exclusive).
*
* \p startIndex and \p endIndex have to be between 0 (inclusive) and `this->getNumParticles()` (inclusive). \p
* endIndex has to be greater or equal to \p startIndex.
* @param startIndex The index of the first entry to view.
* @param endIndex The index of the entry after the last entry to view.
* @return the constructed SoAView from \p startIndex (inclusive) to \p endIndex (exclusive).
*/
SoAView<SoAArraysType> constructView(size_t startIndex, size_t endIndex) { return {this, startIndex, endIndex}; }
private:
// actual implementation of read
template <int attribute, int... attributes, class ValueArrayType>
void read_impl(size_t particleId, ValueArrayType &values, int _current = 0) const {
values[_current] = soaStorage.template get<attribute>().at(particleId);
read_impl<attributes...>(particleId, values, _current + 1);
}
// stop of recursive read call
template <class ValueArrayType>
void read_impl(size_t particleId, ValueArrayType &values, int _current = 0) const {}
// actual implementation of the write function.
// uses a recursive call.
template <int attribute, int... attributes, class ValueArrayType>
void write_impl(size_t particleId, const ValueArrayType &values, int _current = 0) {
soaStorage.template get<attribute>().at(particleId) = values[_current];
write_impl<attributes...>(particleId, values, _current + 1);
}
// Stop of the recursive write_impl call
template <class ValueArrayType>
void write_impl(size_t particleId, const ValueArrayType &values, int _current = 0) {}
// helper function to append a single array
template <std::size_t attribute>
void appendSingleArray(const utils::SoAStorage<SoAArraysType> &valArrays) {
auto ¤tVector = soaStorage.template get<attribute>();
auto &otherVector = valArrays.template get<attribute>();
currentVector.insert(currentVector.end(), otherVector.begin(), otherVector.end());
}
// helper function to append a single array
template <std::size_t attribute>
void appendSingleArray(const SoAView<SoAArraysType> &valArrays) {
auto ¤tVector = soaStorage.template get<attribute>();
auto otherVectorIterator = valArrays.template begin<attribute>();
currentVector.insert(currentVector.end(), otherVectorIterator, otherVectorIterator + valArrays.getNumParticles());
}
// actual implementation of append
template <std::size_t... Is>
void append_impl(const utils::SoAStorage<SoAArraysType> &valArrays, std::index_sequence<Is...>) {
// fold expression
(appendSingleArray<Is>(valArrays), ...);
}
// actual implementation of append
template <std::size_t... Is>
void append_impl(const SoAView<SoAArraysType> &valArrays, std::index_sequence<Is...>) {
// fold expression
(appendSingleArray<Is>(valArrays), ...);
}
// ------------- members ---------------
// storage container for the SoA's
utils::SoAStorage<SoAArraysType> soaStorage;
}; // namespace autopas
} // namespace autopas
| 32.644128 | 118 | 0.686907 | [
"vector"
] |
8afd90b2964f165d71337bf597d7bac403c9cf4b | 784 | h | C | src/controller/scan/ScanControllerGUI.h | seanngpack/swag-scanner-c- | afc905cbba40de424488d2b60bb0b506e5fee5f2 | [
"MIT"
] | 8 | 2020-09-03T21:39:27.000Z | 2022-02-21T07:18:24.000Z | src/controller/scan/ScanControllerGUI.h | seanngpack/swag-scanner-c- | afc905cbba40de424488d2b60bb0b506e5fee5f2 | [
"MIT"
] | 13 | 2020-05-05T18:33:03.000Z | 2020-10-27T06:04:00.000Z | src/controller/scan/ScanControllerGUI.h | seanngpack/swag-scanner-c- | afc905cbba40de424488d2b60bb0b506e5fee5f2 | [
"MIT"
] | 2 | 2020-11-29T18:04:03.000Z | 2020-12-20T01:34:58.000Z | #ifndef SWAG_SCANNER_SCANCONTROLLERGUI_H
#define SWAG_SCANNER_SCANCONTROLLERGUI_H
#include "ScanController.h"
#include "IControllerGUI.h"
namespace controller {
class ScanControllerGUI : public ScanController, public IControllerGUI {
public:
ScanControllerGUI(std::shared_ptr<camera::ICamera> camera,
std::shared_ptr<arduino::Arduino> arduino,
std::shared_ptr<model::ScanModel> model,
std::shared_ptr<SwagGUI> gui);
/**
* This run method is just like the base's run method, but it fetches move info before running.
*/
void run() override;
void update(const IFormsPayload &payload) override;
};
}
#endif //SWAG_SCANNER_SCANCONTROLLERGUI_H
| 30.153846 | 103 | 0.654337 | [
"model"
] |
c1001d6754294277baad5390f64d44b07b70d16c | 85,125 | h | C | src/ftacmp/query_plan.h | o-ran-sc/com-gs-lite | 2bc6bde491e4ae54fb54302c052f23a98482eb92 | [
"Apache-2.0"
] | null | null | null | src/ftacmp/query_plan.h | o-ran-sc/com-gs-lite | 2bc6bde491e4ae54fb54302c052f23a98482eb92 | [
"Apache-2.0"
] | null | null | null | src/ftacmp/query_plan.h | o-ran-sc/com-gs-lite | 2bc6bde491e4ae54fb54302c052f23a98482eb92 | [
"Apache-2.0"
] | null | null | null | /* ------------------------------------------------
Copyright 2014 AT&T Intellectual Property
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
------------------------------------------- */
#ifndef __QUERY_PLAN_H__
#define __QUERY_PLAN_H__
#include<vector>
#include<string>
#include<map>
using namespace std;
#include"analyze_fta.h"
#include"iface_q.h"
#include"parse_partn.h"
#include"generate_utils.h"
// Identify the format of the input, output streams.
#define UNKNOWNFORMAT 0
#define NETFORMAT 1
#define HOSTFORMAT 2
///////////////////////////////////////////////////
// representation of an output operator specification
struct ospec_str{
string query;
string operator_type;
string operator_param;
string output_directory;
int bucketwidth;
string partitioning_flds;
int n_partitions;
};
////////////////////////////////////////////////////
// Input representation of a query
struct query_node{
int idx;
std::set<int> reads_from;
std::set<int> sources_to;
std::vector<std::string> refd_tbls;
std::vector<var_pair_t *> params;
std::string name;
std::string file;
std::string mangler; // for UDOPs
bool touched;
table_exp_t *parse_tree;
int n_consumers;
bool is_udop;
bool is_externally_visible;
bool inferred_visible_node;
set<int> subtree_roots;
query_node(){
idx = -1;
touched = false;
parse_tree = NULL;
n_consumers = 0;
is_externally_visible = false;
inferred_visible_node = false;
mangler="";
};
query_node(int i, std::string qnm, std::string flnm, table_exp_t *pt){
idx = i;
touched = false;
name = qnm;
file = flnm;
parse_tree = pt;
n_consumers = 0;
is_udop = false;
is_externally_visible = pt->get_visible();
inferred_visible_node = false;
mangler="";
tablevar_list_t *fm = parse_tree->get_from();
if(fm!=NULL){
refd_tbls = fm->get_table_names();
}
params = pt->query_params;
};
query_node(int ix, std::string udop_name,table_list *Schema){
idx = ix;
touched = false;
name = udop_name;
file = udop_name;
parse_tree = NULL;
n_consumers = 0;
is_udop = true;
is_externally_visible = true;
inferred_visible_node = false;
mangler="";
int sid = Schema->find_tbl(udop_name);
std::vector<subquery_spec *> subq = Schema->get_subqueryspecs(sid);
int i;
for(i=0;i<subq.size();++i){
refd_tbls.push_back(subq[i]->name);
}
};
};
struct hfta_node{
std::string name;
std::string source_name;
std::vector<int> query_node_indices;
std::set<int> reads_from;
std::set<int> sources_to;
bool is_udop;
bool inferred_visible_node;
int n_parallel;
int parallel_idx;
bool do_generation; // false means, ignore it.
hfta_node(){
is_udop = false;
inferred_visible_node = false;
n_parallel = 1;
parallel_idx = 0;
do_generation = true;
}
};
#define SPX_QUERY 1
#define SGAH_QUERY 2
// the following selectivity estimates are used by our primitive rate estimators
#define SPX_SELECTIVITY 1.0
#define SGAH_SELECTIVITY 0.1
#define RSGAH_SELECTIVITY 0.1
#define SGAHCWCB_SELECTIVITY 0.1
#define MRG_SELECTIVITY 1.0
#define JOIN_EQ_HASH_SELECTIVITY 1.0
// the the output rate of the interface is not given we are going to use
// this default value
#define DEFAULT_INTERFACE_RATE 100
// Define query plan nodes
// These nodes are intended for query modeling
// and transformation rather than for code generation.
// Query plan node base class.
// It has an ID, can return its type,
// and can be linked into lists with the predecessors
// and successors.
// To add : serialize, unserialize?
class qp_node{
public:
int id;
std::vector<int> predecessors;
std::vector<int> successors;
std::string node_name;
// For error reporting without exiting the program.
int error_code;
std::string err_str;
// These should be moved to the containing stream_query object.
std::map<std::string, std::string> definitions;
param_table *param_tbl;
// The value of a field in terms of protocol fields (if any).
std::map<std::string, scalarexp_t *> protocol_map;
qp_node(){
error_code = 0;
id = -1;
param_tbl = new param_table();
};
qp_node(int i){
error_code = 0;
id = i;
param_tbl = new param_table();
};
int get_id(){return(id);};
void set_id(int i){id = i; };
int get_error_code(){return error_code;};
std::string get_error_str(){return err_str;};
virtual std::string node_type() = 0;
// For code generation, does the operator xform its input.
virtual bool makes_transform() = 0;
// For linking, what external libraries does the operator depend on?
virtual std::vector<std::string> external_libs() = 0;
void set_node_name(std::string n){node_name = n;};
std::string get_node_name(){return node_name;};
void set_definitions(std::map<std::string, std::string> &def){
definitions = def;
};
std::map<std::string, std::string> get_definitions(){return definitions;};
// call to create the mapping from field name to se in protocol fields.
// Pass in qp_node of data sources, in order.
virtual void create_protocol_se(std::vector<qp_node *> q_sources,table_list *Schema)=0;
// get the protocol map. the parameter is the return value.
std::map<std::string, scalarexp_t *> *get_protocol_se(){return &protocol_map;}
// Each qp node must be able to return a description
// of the tuples it creates.
// TODO: the get_output_tbls method should subsume the get_fields
// method, but in fact it really just returns the
// operator name.
virtual table_def *get_fields() = 0; // Should be vector?
// get keys from the operator. Currently, only works on group-by queries.
// partial_keys set to true if there is a suspicion that the list is partial.
virtual std::vector<string> get_tbl_keys(std::vector<std::string> &partial_keys) = 0;
// Get the from clause
virtual std::vector<tablevar_t *> get_input_tbls() = 0;
// this is a confused function, it acutally return the output
// table name.
virtual std::vector<tablevar_t *> get_output_tbls() = 0;
std::string get_val_of_def(std::string def){
if(definitions.count(def) > 0) return definitions[def];
return("");
};
void set_definition(std::string def, std::string val){
definitions[def]=val;
}
// Associate colrefs in SEs with tables
// at code generation time.
virtual void bind_to_schema(table_list *Schema) = 0;
// Get colrefs of the operator, currently only meaningful for lfta
// operators, and only interested in colrefs with extraction fcns
virtual col_id_set get_colrefs(bool ext_fcns_only,table_list *Schema)=0;
virtual std::string to_query_string() = 0;
virtual std::string generate_functor(table_list *schema, ext_fcn_list *Ext_fcns, std::vector<bool> &needs_xform) = 0;
virtual std::string generate_functor_name() = 0;
virtual std::string generate_operator(int i, std::string params) = 0;
virtual std::string get_include_file() = 0;
virtual cplx_lit_table *get_cplx_lit_tbl(ext_fcn_list *Ext_fcns) = 0;
virtual std::vector<handle_param_tbl_entry *> get_handle_param_tbl(ext_fcn_list *Ext_fcns) = 0;
// Split this node into LFTA and HFTA nodes.
// Four possible outcomes:
// 1) the qp_node reads from a protocol, but does not need to
// split (can be evaluated as an LFTA).
// The lfta node is the only element in the return vector,
// and hfta_returned is false.
// 2) the qp_node reads from no protocol, and therefore cannot be split.
// THe hfta node is the only element in the return vector,
// and hfta_returned is true.
// 3) reads from at least one protocol, but cannot be split : failure.
// return vector is empty, the error conditions are written
// in the qp_node.
// 4) The qp_node splits into an hfta node and one or more LFTA nodes.
// the return vector has two or more elements, and hfta_returned
// is true. The last element is the HFTA.
virtual std::vector<qp_node *> split_node_for_fta(ext_fcn_list *Ext_fcns, table_list *Schema, int &hfta_returned, ifq_t *ifdb, int n_virtual_ifaces, int hfta_parallelism, int hfta_idx) = 0;
// Ensure that any refs to interface params have been split away.
virtual int count_ifp_refs(std::set<std::string> &ifpnames)=0;
// Tag the data sources which are views,
// return the (optimized) source queries and
// record the view access in opview_set
virtual std::vector<table_exp_t *> extract_opview(table_list *Schema, std::vector<query_node *> &qnodes, opview_set &opviews, std::string rootnm, std::string silo_nm) = 0;
param_table *get_param_tbl(){return param_tbl;};
// The "where" clause is a pre-filter
virtual std::vector<cnf_elem *> get_where_clause() = 0;
// To be more explicit, use get_filter_preds, this is used to compute the prefilter
virtual std::vector<cnf_elem *> get_filter_clause() = 0;
// Add an extra predicate. Currently only used for LFTAs.
virtual void append_to_where(cnf_elem *c) = 0;
void add_predecessor(int i){predecessors.push_back(i);};
void remove_predecessor(int i){
std::vector<int>::iterator vi;
for(vi=predecessors.begin(); vi!=predecessors.end();++vi){
if((*vi) == i){
predecessors.erase(vi);
return;
}
}
};
void add_successor(int i){successors.push_back(i);};
std::vector<int> get_predecessors(){return predecessors;};
int n_predecessors(){return predecessors.size();};
std::vector<int> get_successors(){return successors;};
int n_successors(){return successors.size();};
void clear_predecessors(){predecessors.clear();};
void clear_successors(){successors.clear();};
// the following method is used for distributed query optimization
double get_rate_estimate();
// used for cloning query nodes
virtual qp_node* make_copy(std::string suffix) = 0;
};
// Select, project, transform (xform) query plan node.
// represent the following query fragment
// select scalar_expression_1, ..., scalar_expression_k
// from S
// where predicate
//
// the predicates and the scalar expressions can reference
// attributes of S and also functions.
class spx_qpn: public qp_node{
public:
tablevar_t *table_name; // Source table
std::vector<cnf_elem *> where; // selection predicate
std::vector<select_element *> select_list; // Select list
std::string node_type(){return("spx_qpn"); };
bool makes_transform(){return true;};
std::vector<std::string> external_libs(){
std::vector<std::string> ret;
return ret;
}
void append_to_where(cnf_elem *c){
where.push_back(c);
}
void bind_to_schema(table_list *Schema);
col_id_set get_colrefs(bool ext_fcns_only,table_list *Schema);
std::string to_query_string();
std::string generate_functor(table_list *schema, ext_fcn_list *Ext_fcns, std::vector<bool> &needs_xform);
std::string generate_functor_name();
std::string generate_operator(int i, std::string params);
std::string get_include_file(){return("#include <selection_operator.h>\n");};
std::vector<select_element *> get_select_list(){return select_list;};
std::vector<scalarexp_t *> get_select_se_list(){
std::vector<scalarexp_t *> ret;
int i;
for(i=0;i<select_list.size();++i) ret.push_back(select_list[i]->se);
return ret;
};
std::vector<cnf_elem *> get_where_clause(){return where;};
std::vector<cnf_elem *> get_filter_clause(){return where;};
cplx_lit_table *get_cplx_lit_tbl(ext_fcn_list *Ext_fcns);
std::vector<handle_param_tbl_entry *> get_handle_param_tbl(ext_fcn_list *Ext_fcns);
table_def *get_fields();
std::vector<string> get_tbl_keys(std::vector<std::string> &partial_keys){
std::vector<string> ret;
return ret;
}
std::vector<tablevar_t *> get_input_tbls();
std::vector<tablevar_t *> get_output_tbls();
std::vector<qp_node *> split_node_for_fta(ext_fcn_list *Ext_fcns, table_list *Schema, int &hfta_returned, ifq_t *ifdb, int n_virtual_ifaces, int hfta_parallelism, int hfta_idx);
virtual std::vector<table_exp_t *> extract_opview(table_list *Schema, std::vector<query_node *> &qnodes, opview_set &opviews, std::string rootnm, std::string silo_nm);
// Ensure that any refs to interface params have been split away.
int count_ifp_refs(std::set<std::string> &ifpnames);
int resolve_if_params(ifq_t *ifdb, std::string &err);
spx_qpn(){
};
spx_qpn(query_summary_class *qs,table_list *Schema){
// Get the table name.
// NOTE the colrefs have the table ref (an int)
// embedded in them. Would it make sense
// to grab the whole table list?
tablevar_list_t *fm = qs->fta_tree->get_from();
std::vector<tablevar_t *> tbl_vec = fm->get_table_list();
if(tbl_vec.size() != 1){
char tmpstr[200];
sprintf(tmpstr,"INTERNAL ERROR building SPX node: query defined over %lu tables.\n",tbl_vec.size() );
err_str = tmpstr;
error_code = 1;
}
table_name = (tbl_vec[0]);
int t = tbl_vec[0]->get_schema_ref();
if(! Schema->is_stream(t)){
err_str += "ERROR in query "+node_name+", the source "+table_name->get_schema_name()+" is not a stream.\n";
error_code = 1;
}
// Get the select list.
select_list = qs->fta_tree->get_sl_vec();
// Get the selection predicate.
where = qs->wh_cnf;
// Get the parameters
param_tbl = qs->param_tbl;
};
// the following method is used for distributed query optimization
double get_rate_estimate();
qp_node* make_copy(std::string suffix){
spx_qpn *ret = new spx_qpn();
ret->param_tbl = new param_table();
std::vector<std::string> param_names = param_tbl->get_param_names();
int pi;
for(pi=0;pi<param_names.size();pi++){
data_type *dt = param_tbl->get_data_type(param_names[pi]);
ret->param_tbl->add_param(param_names[pi],dt->duplicate(),
param_tbl->handle_access(param_names[pi]));
}
ret->definitions = definitions;
ret->node_name = node_name + suffix;
// make shallow copy of all fields
ret->where = where;
ret->select_list = select_list;
return ret;
};
void create_protocol_se(vector<qp_node *> q_sources, table_list *Schema);
};
// Select, group-by, aggregate.
// Representing
// Select SE_1, ..., SE_k
// From T
// Where predicate
// Group By gb1, ..., gb_n
// Having predicate
//
// NOTE : the samlping operator is sgahcwcb_qpn.
//
// For now, must have group-by variables and aggregates.
// The scalar expressions which are output must be a function
// of the groub-by variables and the aggregates.
// The group-by variables can be references to columsn of T,
// or they can be scalar expressions.
class sgah_qpn: public qp_node{
public:
tablevar_t *table_name; // source table
std::vector<cnf_elem *> where; // selection predicate
std::vector<cnf_elem *> having; // post-aggregation predicate
std::vector<select_element *> select_list; // se's of output
gb_table gb_tbl; // Table of all group-by attributes.
aggregate_table aggr_tbl; // Table of all referenced aggregates.
std::vector<scalarexp_t *> gb_sources; // pre-compute for partitioning.
int lfta_disorder; // maximum disorder in the steam between lfta, hfta
int hfta_disorder; // maximum disorder in the hfta
int hfta_slow_flush; // outputs per input, 0 means no slow flush
// rollup, cube, and grouping_sets cannot be readily reconstructed by
// analyzing the patterns, so explicitly record them here.
// used only so that to_query_string produces something meaningful.
std::vector<std::string> gb_entry_type;
std::vector<int> gb_entry_count;
std::vector<scalarexp_t *> get_gb_sources(){return gb_sources;}
std::string node_type(){return("sgah_qpn"); };
bool makes_transform(){return true;};
std::vector<std::string> external_libs(){
std::vector<std::string> ret;
return ret;
}
void bind_to_schema(table_list *Schema);
col_id_set get_colrefs(bool ext_fcns_only,table_list *Schema);
std::string to_query_string();
std::string generate_functor(table_list *schema, ext_fcn_list *Ext_fcns, std::vector<bool> &needs_xform);
std::string generate_functor_name();
std::string generate_operator(int i, std::string params);
std::string get_include_file(){
if(hfta_disorder <= 1){
if(hfta_slow_flush>0){
return("#include <groupby_slowflush_operator.h>\n");
}else{
return("#include <groupby_operator.h>\n");
}
}else{
return("#include <groupby_operator_oop.h>\n");
}
};
std::vector<select_element *> get_select_list(){return select_list;};
std::vector<scalarexp_t *> get_select_se_list(){
std::vector<scalarexp_t *> ret;
int i;
for(i=0;i<select_list.size();++i) ret.push_back(select_list[i]->se);
return ret;
};
std::vector<cnf_elem *> get_where_clause(){return where;};
void append_to_where(cnf_elem *c){
where.push_back(c);
}
std::vector<cnf_elem *> get_filter_clause(){return where;};
std::vector<cnf_elem *> get_having_clause(){return having;};
gb_table *get_gb_tbl(){return &gb_tbl;};
aggregate_table *get_aggr_tbl(){return &aggr_tbl;};
cplx_lit_table *get_cplx_lit_tbl(ext_fcn_list *Ext_fcns);
std::vector<handle_param_tbl_entry *> get_handle_param_tbl(ext_fcn_list *Ext_fcns);
// table which represents output tuple.
table_def *get_fields();
std::vector<string> get_tbl_keys(std::vector<std::string> &partial_keys);
std::vector<tablevar_t *> get_input_tbls();
std::vector<tablevar_t *> get_output_tbls();
sgah_qpn(){
lfta_disorder = 1;
hfta_disorder = 1;
hfta_slow_flush = 0;
};
sgah_qpn(query_summary_class *qs,table_list *Schema){
lfta_disorder = 1;
hfta_disorder = 1;
hfta_slow_flush = 0;
// Get the table name.
// NOTE the colrefs have the tablevar ref (an int)
// embedded in them. Would it make sense
// to grab the whole table list?
tablevar_list_t *fm = qs->fta_tree->get_from();
std::vector<tablevar_t *> tbl_vec = fm->get_table_list();
if(tbl_vec.size() != 1){
char tmpstr[200];
sprintf(tmpstr,"INTERNAL ERROR building SGAH node: query defined over %lu tables.\n",tbl_vec.size() );
err_str=tmpstr;
error_code = 1;
}
table_name = (tbl_vec[0]);
int t = tbl_vec[0]->get_schema_ref();
if(! Schema->is_stream(t)){
err_str += "ERROR in query "+node_name+", the source "+table_name->get_schema_name()+" is not a stream.\n";
error_code = 1;
}
// Get the select list.
select_list = qs->fta_tree->get_sl_vec();
// Get the selection and having predicates.
where = qs->wh_cnf;
having = qs->hav_cnf;
// Build a new GB var table (don't share, might need to modify)
int g;
for(g=0;g<qs->gb_tbl->size();g++){
gb_tbl.add_gb_var(qs->gb_tbl->get_name(g),
qs->gb_tbl->get_tblvar_ref(g), qs->gb_tbl->get_def(g),
qs->gb_tbl->get_reftype(g)
);
}
gb_tbl.set_pattern_info(qs->gb_tbl);
// gb_tbl.gb_entry_type = qs->gb_tbl->gb_entry_type;
// gb_tbl.gb_entry_count = qs->gb_tbl->gb_entry_count;
// gb_tbl.pattern_components = qs->gb_tbl->pattern_components;
// Build a new aggregate table. (don't share, might need
// to modify).
int a;
for(a=0;a<qs->aggr_tbl->size();a++){
aggr_tbl.add_aggr(
// qs->aggr_tbl->get_op(a), qs->aggr_tbl->get_aggr_se(a)
qs->aggr_tbl->duplicate(a)
);
}
// Get the parameters
param_tbl = qs->param_tbl;
};
std::vector<qp_node *> split_node_for_fta(ext_fcn_list *Ext_fcns, table_list *Schema, int &hfta_returned, ifq_t *ifdb, int n_virtual_ifaces, int hfta_parallelism, int hfta_idx);
virtual std::vector<table_exp_t *> extract_opview(table_list *Schema, std::vector<query_node *> &qnodes, opview_set &opviews, std::string rootnm, std::string silo_nm);
// Ensure that any refs to interface params have been split away.
int count_ifp_refs(std::set<std::string> &ifpnames);
int resolve_if_params(ifq_t *ifdb, std::string &err);
// the following method is used for distributed query optimization
double get_rate_estimate();
qp_node* make_copy(std::string suffix){
sgah_qpn *ret = new sgah_qpn();
ret->param_tbl = new param_table();
std::vector<std::string> param_names = param_tbl->get_param_names();
int pi;
for(pi=0;pi<param_names.size();pi++){
data_type *dt = param_tbl->get_data_type(param_names[pi]);
ret->param_tbl->add_param(param_names[pi],dt->duplicate(),
param_tbl->handle_access(param_names[pi]));
}
ret->definitions = definitions;
ret->hfta_slow_flush = hfta_slow_flush;
ret->node_name = node_name + suffix;
// make shallow copy of all fields
ret->where = where;
ret->having = having;
ret->select_list = select_list;
ret->gb_tbl = gb_tbl;
ret->aggr_tbl = aggr_tbl;
return ret;
};
// Split aggregation into two HFTA components - sub and superaggregation
// If unable to split the aggreagates, split into selection and aggregation
// If resulting low-level query is empty (e.g. when aggregates cannot be split and
// where clause is empty) empty vector willb e returned
virtual std::vector<qp_node *> split_node_for_hfta(ext_fcn_list *Ext_fcns, table_list *Schema);
void create_protocol_se(vector<qp_node *> q_sources, table_list *Schema);
};
// Select, group-by, aggregate. with running aggregates
// Representing
// Select SE_1, ..., SE_k
// From T
// Where predicate
// Group By gb1, ..., gb_n
// Closing When predicate
// Having predicate
//
// NOTE : the sampling operator is sgahcwcb_qpn.
//
// For now, must have group-by variables and aggregates.
// The scalar expressions which are output must be a function
// of the groub-by variables and the aggregates.
// The group-by variables can be references to columsn of T,
// or they can be scalar expressions.
class rsgah_qpn: public qp_node{
public:
tablevar_t *table_name; // source table
std::vector<cnf_elem *> where; // selection predicate
std::vector<cnf_elem *> having; // post-aggregation predicate
std::vector<cnf_elem *> closing_when; // group closing predicate
std::vector<select_element *> select_list; // se's of output
gb_table gb_tbl; // Table of all group-by attributes.
aggregate_table aggr_tbl; // Table of all referenced aggregates.
std::vector<scalarexp_t *> gb_sources; // pre-compute for partitioning.
int lfta_disorder; // maximum disorder allowed in stream between lfta, hfta
int hfta_disorder; // maximum disorder allowed in hfta
std::vector<scalarexp_t *> get_gb_sources(){return gb_sources;}
std::string node_type(){return("rsgah_qpn"); };
bool makes_transform(){return true;};
std::vector<std::string> external_libs(){
std::vector<std::string> ret;
return ret;
}
void bind_to_schema(table_list *Schema);
col_id_set get_colrefs(bool ext_fcns_only,table_list *Schema){
fprintf(stderr,"INTERNAL ERROR, calling rsgah_qpn::get_colrefs\n");
exit(1);
}
std::string to_query_string();
std::string generate_functor(table_list *schema, ext_fcn_list *Ext_fcns, std::vector<bool> &needs_xform);
std::string generate_functor_name();
std::string generate_operator(int i, std::string params);
std::string get_include_file(){return("#include <running_gb_operator.h>\n");};
std::vector<select_element *> get_select_list(){return select_list;};
std::vector<scalarexp_t *> get_select_se_list(){
std::vector<scalarexp_t *> ret;
int i;
for(i=0;i<select_list.size();++i) ret.push_back(select_list[i]->se);
return ret;
};
std::vector<cnf_elem *> get_where_clause(){return where;};
void append_to_where(cnf_elem *c){
where.push_back(c);
}
std::vector<cnf_elem *> get_filter_clause(){return where;};
std::vector<cnf_elem *> get_having_clause(){return having;};
std::vector<cnf_elem *> get_closing_when_clause(){return closing_when;};
gb_table *get_gb_tbl(){return &gb_tbl;};
aggregate_table *get_aggr_tbl(){return &aggr_tbl;};
cplx_lit_table *get_cplx_lit_tbl(ext_fcn_list *Ext_fcns);
std::vector<handle_param_tbl_entry *> get_handle_param_tbl(ext_fcn_list *Ext_fcns);
// table which represents output tuple.
table_def *get_fields();
std::vector<string> get_tbl_keys(std::vector<std::string> &partial_keys);
std::vector<tablevar_t *> get_input_tbls();
std::vector<tablevar_t *> get_output_tbls();
rsgah_qpn(){
lfta_disorder = 1;
hfta_disorder = 1;
};
rsgah_qpn(query_summary_class *qs,table_list *Schema){
lfta_disorder = 1;
hfta_disorder = 1;
// Get the table name.
// NOTE the colrefs have the tablevar ref (an int)
// embedded in them. Would it make sense
// to grab the whole table list?
tablevar_list_t *fm = qs->fta_tree->get_from();
std::vector<tablevar_t *> tbl_vec = fm->get_table_list();
if(tbl_vec.size() != 1){
char tmpstr[200];
sprintf(tmpstr,"INTERNAL ERROR buildingR SGAH node: query defined over %lu tables.\n",tbl_vec.size() );
err_str=tmpstr;
error_code = 1;
}
table_name = (tbl_vec[0]);
int t = tbl_vec[0]->get_schema_ref();
if(! Schema->is_stream(t)){
err_str += "ERROR in query "+node_name+", the source "+table_name->get_schema_name()+" is not a stream.\n";
error_code = 1;
}
// Get the select list.
select_list = qs->fta_tree->get_sl_vec();
// Get the selection and having predicates.
where = qs->wh_cnf;
having = qs->hav_cnf;
closing_when = qs->closew_cnf;
// Build a new GB var table (don't share, might need to modify)
int g;
for(g=0;g<qs->gb_tbl->size();g++){
gb_tbl.add_gb_var(qs->gb_tbl->get_name(g),
qs->gb_tbl->get_tblvar_ref(g), qs->gb_tbl->get_def(g),
qs->gb_tbl->get_reftype(g)
);
}
// Build a new aggregate table. (don't share, might need
// to modify).
int a;
for(a=0;a<qs->aggr_tbl->size();a++){
aggr_tbl.add_aggr(
// qs->aggr_tbl->get_op(a), qs->aggr_tbl->get_aggr_se(a)
qs->aggr_tbl->duplicate(a)
);
}
// Get the parameters
param_tbl = qs->param_tbl;
};
std::vector<qp_node *> split_node_for_fta(ext_fcn_list *Ext_fcns, table_list *Schema, int &hfta_returned, ifq_t *ifdb, int n_virtual_ifaces, int hfta_parallelism, int hfta_idx);
std::vector<qp_node *> split_node_for_hfta(ext_fcn_list *Ext_fcns, table_list *Schema);
virtual std::vector<table_exp_t *> extract_opview(table_list *Schema, std::vector<query_node *> &qnodes, opview_set &opviews, std::string rootnm, std::string silo_nm);
// Ensure that any refs to interface params have been split away.
int count_ifp_refs(std::set<std::string> &ifpnames);
int resolve_if_params(ifq_t *ifdb, std::string &err){ return 0;}
// the following method is used for distributed query optimization
double get_rate_estimate();
qp_node* make_copy(std::string suffix){
rsgah_qpn *ret = new rsgah_qpn();
ret->param_tbl = new param_table();
std::vector<std::string> param_names = param_tbl->get_param_names();
int pi;
for(pi=0;pi<param_names.size();pi++){
data_type *dt = param_tbl->get_data_type(param_names[pi]);
ret->param_tbl->add_param(param_names[pi],dt->duplicate(),
param_tbl->handle_access(param_names[pi]));
}
ret->definitions = definitions;
ret->node_name = node_name + suffix;
// make shallow copy of all fields
ret->where = where;
ret->having = having;
ret->closing_when = closing_when;
ret->select_list = select_list;
ret->gb_tbl = gb_tbl;
ret->aggr_tbl = aggr_tbl;
return ret;
};
void create_protocol_se(vector<qp_node *> q_sources, table_list *Schema);
};
// Watchlist - from a table read from an external source.
class watch_tbl_qpn: public qp_node{
public:
table_def *table_layout; // the output schema
std::vector<std::string> key_flds;
// Parameters related to loading the table
std::string filename;
int refresh_interval;
void append_to_where(cnf_elem *c){
fprintf(stderr, "ERROR, append_to_where called on watch_tbl_qpn, not supported, query %s.\n", node_name.c_str());
exit(1);
}
std::string node_type(){return("watch_tbl_qpn"); };
bool makes_transform(){return false;};
std::vector<std::string> external_libs(){
std::vector<std::string> ret;
return ret;
}
void bind_to_schema(table_list *Schema){}
col_id_set get_colrefs(bool ext_fcns_only,table_list *Schema){
col_id_set ret;
return ret;
}
std::string to_query_string();
std::string generate_functor(table_list *schema, ext_fcn_list *Ext_fcns, std::vector<bool> &needs_xform);
std::string generate_functor_name();
std::string generate_operator(int i, std::string params);
std::string get_include_file(){
return("#include <watchlist_tbl.h>\n");
};
cplx_lit_table *get_cplx_lit_tbl(ext_fcn_list *Ext_fcns);
std::vector<handle_param_tbl_entry *> get_handle_param_tbl(ext_fcn_list *Ext_fcns);
table_def *get_fields();
std::vector<string> get_tbl_keys(std::vector<std::string> &partial_keys){
return key_flds;
}
std::vector<tablevar_t *> get_input_tbls();
std::vector<tablevar_t *> get_output_tbls();
std::vector<qp_node *> split_node_for_fta(ext_fcn_list *Ext_fcns, table_list *Schema, int &hfta_returned, ifq_t *ifdb, int n_virtual_ifaces, int hfta_parallelism, int hfta_idx);
virtual std::vector<table_exp_t *> extract_opview(table_list *Schema, std::vector<query_node *> &qnodes, opview_set &opviews, std::string rootnm, std::string silo_nm);
// Ensure that any refs to interface params have been split away.
int count_ifp_refs(std::set<std::string> &ifpnames);
// No predicates, return an empty clause
std::vector<cnf_elem *> get_where_clause(){
std::vector<cnf_elem *> t;
return(t);
};
std::vector<cnf_elem *> get_filter_clause(){
return get_where_clause();
}
watch_tbl_qpn(){
};
watch_tbl_qpn(query_summary_class *qs,table_list *Schema){
node_name=qs->query_name;
param_tbl = qs->param_tbl;
definitions = qs->definitions;
// Populate the schema
table_layout = new table_def(
node_name.c_str(), NULL, NULL, qs->fta_tree->fel, WATCHLIST_SCHEMA
);
// Find the keys
std::vector<field_entry *> flds = qs->fta_tree->fel->get_list();
for(int f=0;f<flds.size();++f){
if(flds[f]->get_modifier_list()->contains_key("key") ||
flds[f]->get_modifier_list()->contains_key("Key") ||
flds[f]->get_modifier_list()->contains_key("KEY") ){
key_flds.push_back(flds[f]->get_name());
}
}
if(key_flds.size()==0){
fprintf(stderr,"Error, no key fields defined for table watchlist %s\n",node_name.c_str());
exit(1);
}
table_layout->set_keys(key_flds); // communicate keys to consumers
// Get loading parameters
if(definitions.count("filename")>0){
filename = definitions["filename"];
}else{
fprintf(stderr, "Error, no filename for source data defined for table watchlist %s\n",node_name.c_str());
exit(1);
}
if(definitions.count("refresh_interval")>0){
refresh_interval = atoi(definitions["refresh_interval"].c_str());
if(refresh_interval <= 0){
fprintf(stderr, "Error, the refresh_interval (%s) of table watchlist %s must be a positive non-zero integer.\n",definitions["refresh_interval"].c_str(), node_name.c_str());
exit(1);
}
}else{
fprintf(stderr, "Error, no refresh_interval defined for table watchlist %s\n",node_name.c_str());
exit(1);
}
}
qp_node *make_copy(std::string suffix){
watch_tbl_qpn *ret = new watch_tbl_qpn();
ret->filename = filename;
ret->refresh_interval = refresh_interval;
ret->key_flds = key_flds;
ret->param_tbl = new param_table();
std::vector<std::string> param_names = param_tbl->get_param_names();
int pi;
for(pi=0;pi<param_names.size();pi++){
data_type *dt = param_tbl->get_data_type(param_names[pi]);
ret->param_tbl->add_param(param_names[pi],dt->duplicate(),
param_tbl->handle_access(param_names[pi]));
}
ret->definitions = definitions;
ret->node_name = node_name + suffix;
ret->table_layout = table_layout->make_shallow_copy(ret->node_name);
return ret;
};
// the following method is used for distributed query optimization
double get_rate_estimate();
void create_protocol_se(vector<qp_node *> q_sources, table_list *Schema);
};
// forward reference
class filter_join_qpn;
class watch_join_qpn;
// (temporal) Merge query plan node.
// represent the following query fragment
// Merge c1:c2
// from T1 _t1, T2 _t2
//
// T1 and T2 must have compatible schemas,
// that is the same types in the same slots.
// c1 and c2 must be colrefs from T1 and T2,
// both ref'ing the same slot. Their types
// must be temporal and the same kind of temporal.
// in the output, no other field is temporal.
// the field names ofthe output are drawn from T1.
class mrg_qpn: public qp_node{
public:
std::vector<tablevar_t *> fm; // Source table
std::vector<colref_t *> mvars; // the merge-by columns.
scalarexp_t *slack;
table_def *table_layout; // the output schema
int merge_fieldpos; // position of merge field,
// convenience for manipulation.
int disorder; // max disorder seen in the input / allowed in the output
// partition definition for merges that combine streams partitioned over multiple interfaces
partn_def_t* partn_def;
void append_to_where(cnf_elem *c){
fprintf(stderr, "ERROR, append_to_where called on mrg_qpn, not supported, query %s.\n", node_name.c_str());
exit(1);
}
std::string node_type(){return("mrg_qpn"); };
bool makes_transform(){return false;};
std::vector<std::string> external_libs(){
std::vector<std::string> ret;
return ret;
}
void bind_to_schema(table_list *Schema);
col_id_set get_colrefs(bool ext_fcns_only,table_list *Schema){
fprintf(stderr,"INTERNAL ERROR, calling mrg_qpn::get_colrefs\n");
exit(1);
}
std::string to_query_string();
std::string generate_functor(table_list *schema, ext_fcn_list *Ext_fcns, std::vector<bool> &needs_xform);
std::string generate_functor_name();
std::string generate_operator(int i, std::string params);
std::string get_include_file(){
if(disorder>1)
return("#include <merge_operator_oop.h>\n");
return("#include <merge_operator.h>\n");
};
cplx_lit_table *get_cplx_lit_tbl(ext_fcn_list *Ext_fcns);
std::vector<handle_param_tbl_entry *> get_handle_param_tbl(ext_fcn_list *Ext_fcns);
table_def *get_fields();
std::vector<string> get_tbl_keys(std::vector<std::string> &partial_keys){
std::vector<string> ret;
return ret;
}
std::vector<tablevar_t *> get_input_tbls();
std::vector<tablevar_t *> get_output_tbls();
std::vector<qp_node *> split_node_for_fta(ext_fcn_list *Ext_fcns, table_list *Schema, int &hfta_returned, ifq_t *ifdb, int n_virtual_ifaces, int hfta_parallelism, int hfta_idx);
virtual std::vector<table_exp_t *> extract_opview(table_list *Schema, std::vector<query_node *> &qnodes, opview_set &opviews, std::string rootnm, std::string silo_nm);
// Ensure that any refs to interface params have been split away.
int count_ifp_refs(std::set<std::string> &ifpnames);
// No predicates, return an empty clause
std::vector<cnf_elem *> get_where_clause(){
std::vector<cnf_elem *> t;
return(t);
};
std::vector<cnf_elem *> get_filter_clause(){
return get_where_clause();
}
mrg_qpn(){
partn_def = NULL;
};
void set_disorder(int d){
disorder = d;
}
mrg_qpn(query_summary_class *qs,table_list *Schema){
disorder = 1;
// Grab the elements of the query node.
fm = qs->fta_tree->get_from()->get_table_list();
mvars = qs->mvars;
slack = qs->slack;
// sanity check
if(fm.size() != mvars.size()){
fprintf(stderr,"INTERNAL ERROR in mrg_qpn::mrg_qpn. fm.size() = %lu, mvars.size() = %lu\n",fm.size(),mvars.size());
exit(1);
}
for(int f=0;f<fm.size();++f){
int t=fm[f]->get_schema_ref();
if(! Schema->is_stream(t)){
err_str += "ERROR in query "+node_name+", the source "+fm[f]->get_schema_name()+" is not a stream.\n";
error_code = 1;
}
}
// Get the parameters
param_tbl = qs->param_tbl;
// Need to set the node name now, so that the
// schema (table_layout) can be properly named.
// TODO: Setting the name of the table might best be done
// via the set_node_name method, because presumably
// thats when the node name is really known.
// This should propogate to the table_def table_layout
node_name=qs->query_name;
/*
int ff;
printf("instantiating merge node, name = %s, %d sources.\n\t",node_name.c_str(), fm.size());
for(ff=0;ff<fm.size();++ff){
printf("%s ",fm[ff]->to_string().c_str());
}
printf("\n");
*/
// Create the output schema.
// strip temporal properites form all fields except the merge field.
std::vector<field_entry *> flva = Schema->get_fields(fm[0]->get_schema_name());
field_entry_list *fel = new field_entry_list();
int f;
for(f=0;f<flva.size();++f){
field_entry *fe;
data_type dt(flva[f]->get_type().c_str(), flva[f]->get_modifier_list());
if(flva[f]->get_name() == mvars[0]->get_field()){
merge_fieldpos = f;
// if(slack != NULL) dt.reset_temporal();
}else{
dt.reset_temporal();
}
param_list *plist = new param_list();
std::vector<std::string> param_strings = dt.get_param_keys();
int p;
for(p=0;p<param_strings.size();++p){
std::string v = dt.get_param_val(param_strings[p]);
if(v != "")
plist->append(param_strings[p].c_str(),v.c_str());
else
plist->append(param_strings[p].c_str());
}
fe=new field_entry(
dt.get_type_str().c_str(), flva[f]->get_name().c_str(),"",plist, flva[f]->get_unpack_fcns());
fel->append_field(fe);
}
table_layout = new table_def(
node_name.c_str(), NULL, NULL, fel, STREAM_SCHEMA
);
partn_def = NULL;
};
/////////////////////////////////////////////
/// Created for de-siloing. to be removed? or is it otherwise useful?
// Merge existing set of sources (de-siloing)
mrg_qpn(std::string n_name, std::vector<std::string> &src_names,table_list *Schema){
int i,f;
disorder = 1;
// Construct the fm list
for(f=0;f<src_names.size();++f){
int tbl_ref = Schema->get_table_ref(src_names[f]);
if(tbl_ref < 0){
fprintf(stderr,"INTERNAL ERROR, can't find %s in the schema when constructing no-silo merge node %s\n",src_names[f].c_str(), n_name.c_str());
exit(1);
}
table_def *src_tbl = Schema->get_table(tbl_ref);
tablevar_t *fm_t = new tablevar_t(src_names[f].c_str());
string range_name = "_t" + int_to_string(f);
fm_t->set_range_var(range_name);
fm_t->set_schema_ref(tbl_ref);
fm.push_back(fm_t);
}
// Create the output schema.
// strip temporal properites form all fields except the merge field.
std::vector<field_entry *> flva = Schema->get_fields(fm[0]->get_schema_name());
field_entry_list *fel = new field_entry_list();
bool temporal_found = false;
for(f=0;f<flva.size();++f){
field_entry *fe;
data_type dt(flva[f]->get_type().c_str(), flva[f]->get_modifier_list());
if(dt.is_temporal() && !temporal_found){
merge_fieldpos = f;
temporal_found = true;
}else{
dt.reset_temporal();
}
param_list *plist = new param_list();
std::vector<std::string> param_strings = dt.get_param_keys();
int p;
for(p=0;p<param_strings.size();++p){
std::string v = dt.get_param_val(param_strings[p]);
if(v != "")
plist->append(param_strings[p].c_str(),v.c_str());
else
plist->append(param_strings[p].c_str());
}
fe=new field_entry(
dt.get_type_str().c_str(), flva[f]->get_name().c_str(),"",plist,
flva[f]->get_unpack_fcns()
);
fel->append_field(fe);
}
if(! temporal_found){
fprintf(stderr,"ERROR, can't find temporal field of the sources when constructing no-silo merge node %s\n",n_name.c_str());
exit(1);
}
node_name=n_name;
table_layout = new table_def(
node_name.c_str(), NULL, NULL, fel, STREAM_SCHEMA
);
partn_def = NULL;
param_tbl = new param_table();
// Construct mvars
for(f=0;f<fm.size();++f){
std::vector<field_entry *> flv_f = Schema->get_fields(fm[f]->get_schema_name());
data_type dt_f(flv_f[merge_fieldpos]->get_type().c_str(),
flva[merge_fieldpos]->get_modifier_list());
colref_t *mcr = new colref_t(fm[f]->get_var_name().c_str(),
flv_f[merge_fieldpos]->get_name().c_str());
mvars.push_back(mcr);
}
// literal_t *s_lit = new literal_t("5",LITERAL_INT);
// slack = new scalarexp_t(s_lit);
slack = NULL;
};
// end de-siloing
////////////////////////////////////////
void resolve_slack(scalarexp_t *t_se, std::string fname, std::vector<std::pair<std::string, std::string> > &sources,ifq_t *ifdb, gb_table *gbt);
// Merge filter_join LFTAs.
mrg_qpn(filter_join_qpn *spx, std::string n_name, std::vector<std::string> &sources, std::vector<std::pair<std::string, std::string> > &ifaces, ifq_t *ifdb);
// Merge watch_join LFTAs.
mrg_qpn(watch_join_qpn *spx, std::string n_name, std::vector<std::string> &sources, std::vector<std::pair<std::string, std::string> > &ifaces, ifq_t *ifdb);
// Merge selection LFTAs.
mrg_qpn(spx_qpn *spx, std::string n_name, std::vector<std::string> &sources, std::vector<std::pair<std::string, std::string> > &ifaces, ifq_t *ifdb){
disorder = 1;
param_tbl = spx->param_tbl;
int i;
node_name = n_name;
field_entry_list *fel = new field_entry_list();
merge_fieldpos = -1;
for(i=0;i<spx->select_list.size();++i){
data_type *dt = spx->select_list[i]->se->get_data_type()->duplicate();
if(dt->is_temporal()){
if(merge_fieldpos < 0){
merge_fieldpos = i;
}else{
fprintf(stderr,"Warning: Merge subquery %s found two temporal fields (%s, %s), using %s\n", n_name.c_str(), spx->select_list[merge_fieldpos]->name.c_str(), spx->select_list[i]->name.c_str(), spx->select_list[merge_fieldpos]->name.c_str() );
dt->reset_temporal();
}
}
field_entry *fe = dt->make_field_entry(spx->select_list[i]->name);
fel->append_field(fe);
delete dt;
}
if(merge_fieldpos<0){
fprintf(stderr,"ERROR, no temporal attribute for merge subquery %s\n",n_name.c_str());
exit(1);
}
table_layout = new table_def( n_name.c_str(), NULL, NULL, fel, STREAM_SCHEMA);
// NEED TO HANDLE USER_SPECIFIED SLACK
this->resolve_slack(spx->select_list[merge_fieldpos]->se,
spx->select_list[merge_fieldpos]->name, ifaces, ifdb,NULL);
// if(this->slack == NULL)
// fprintf(stderr,"Zero slack.\n");
// else
// fprintf(stderr,"slack is %s\n",slack->to_string().c_str());
for(i=0;i<sources.size();i++){
std::string rvar = "_m"+int_to_string(i);
mvars.push_back(new colref_t(rvar.c_str(), spx->select_list[merge_fieldpos]->name.c_str()));
mvars[i]->set_tablevar_ref(i);
fm.push_back(new tablevar_t(sources[i].c_str()));
fm[i]->set_range_var(rvar);
}
param_tbl = new param_table();
std::vector<std::string> param_names = spx->param_tbl->get_param_names();
int pi;
for(pi=0;pi<param_names.size();pi++){
data_type *dt = spx->param_tbl->get_data_type(param_names[pi]);
param_tbl->add_param(param_names[pi],dt->duplicate(),
spx->param_tbl->handle_access(param_names[pi]));
}
definitions = spx->definitions;
}
// Merge aggregation LFTAs
mrg_qpn(sgah_qpn *sgah, std::string n_name, std::vector<std::string> &sources, std::vector<std::pair< std::string, std::string> > &ifaces, ifq_t *ifdb){
disorder = 1;
param_tbl = sgah->param_tbl;
int i;
node_name = n_name;
field_entry_list *fel = new field_entry_list();
merge_fieldpos = -1;
for(i=0;i<sgah->select_list.size();++i){
data_type *dt = sgah->select_list[i]->se->get_data_type()->duplicate();
if(dt->is_temporal()){
if(merge_fieldpos < 0){
merge_fieldpos = i;
}else{
fprintf(stderr,"Warning: Merge subquery %s found two temporal fields (%s, %s), using %s\n", n_name.c_str(), sgah->select_list[merge_fieldpos]->name.c_str(), sgah->select_list[i]->name.c_str(), sgah->select_list[merge_fieldpos]->name.c_str() );
dt->reset_temporal();
}
}
field_entry *fe = dt->make_field_entry(sgah->select_list[i]->name);
fel->append_field(fe);
delete dt;
}
if(merge_fieldpos<0){
fprintf(stderr,"ERROR, no temporal attribute for merge subquery %s\n",n_name.c_str());
exit(1);
}
table_layout = new table_def( n_name.c_str(), NULL, NULL, fel, STREAM_SCHEMA);
// NEED TO HANDLE USER_SPECIFIED SLACK
this->resolve_slack(sgah->select_list[merge_fieldpos]->se,
sgah->select_list[merge_fieldpos]->name, ifaces, ifdb,
&(sgah->gb_tbl));
if(this->slack == NULL)
fprintf(stderr,"Zero slack.\n");
else
fprintf(stderr,"slack is %s\n",slack->to_string().c_str());
for(i=0;i<sources.size();i++){
std::string rvar = "_m"+int_to_string(i);
mvars.push_back(new colref_t(rvar.c_str(), sgah->select_list[merge_fieldpos]->name.c_str()));
mvars[i]->set_tablevar_ref(i);
fm.push_back(new tablevar_t(sources[i].c_str()));
fm[i]->set_range_var(rvar);
}
param_tbl = new param_table();
std::vector<std::string> param_names = sgah->param_tbl->get_param_names();
int pi;
for(pi=0;pi<param_names.size();pi++){
data_type *dt = sgah->param_tbl->get_data_type(param_names[pi]);
param_tbl->add_param(param_names[pi],dt->duplicate(),
sgah->param_tbl->handle_access(param_names[pi]));
}
definitions = sgah->definitions;
}
qp_node *make_copy(std::string suffix){
mrg_qpn *ret = new mrg_qpn();
ret->slack = slack;
ret->disorder = disorder;
ret->param_tbl = new param_table();
std::vector<std::string> param_names = param_tbl->get_param_names();
int pi;
for(pi=0;pi<param_names.size();pi++){
data_type *dt = param_tbl->get_data_type(param_names[pi]);
ret->param_tbl->add_param(param_names[pi],dt->duplicate(),
param_tbl->handle_access(param_names[pi]));
}
ret->definitions = definitions;
ret->node_name = node_name + suffix;
ret->table_layout = table_layout->make_shallow_copy(ret->node_name);
ret->merge_fieldpos = merge_fieldpos;
return ret;
};
std::vector<mrg_qpn *> split_sources();
// the following method is used for distributed query optimization
double get_rate_estimate();
// get partition definition for merges that combine streams partitioned over multiple interfaces
// return NULL for regular merges
partn_def_t* get_partn_definition(map<string, int> lfta_names, vector<string> interface_names, vector<string> machine_names, ifq_t *ifaces_db, partn_def_list_t *partn_parse_result) {
if (partn_def)
return partn_def;
int err;
string err_str;
string partn_name;
vector<tablevar_t *> input_tables = get_input_tbls();
for (int i = 0; i < input_tables.size(); ++i) {
tablevar_t * table = input_tables[i];
vector<string> partn_names = ifaces_db->get_iface_vals(table->get_machine(), table->get_interface(),"iface_partition",err,err_str);
if (partn_names.size() != 1) // can't have more than one value of partition attribute
return NULL;
string new_partn_name = partn_names[0];
// need to make sure that all ifaces belong to the same partition
if (!i)
partn_name = new_partn_name;
else if (new_partn_name != partn_name)
return NULL;
}
// now find partition definition corresponding to partn_name
partn_def = partn_parse_result->get_partn_def(partn_name);
return partn_def;
};
void set_partn_definition(partn_def_t* def) {
partn_def = def;
}
bool is_multihost_merge() {
bool is_multihost = false;
// each input table must be have machine attribute be non-empty
// and there should be at least 2 different values of machine attributes
vector<tablevar_t *> input_tables = get_input_tbls();
string host = input_tables[0]->get_machine();
for (int i = 1; i < input_tables.size(); ++i) {
string new_host = input_tables[i]->get_machine();
if (new_host == "")
return false;
if (new_host != host)
is_multihost = true;
}
return is_multihost;
}
void create_protocol_se(vector<qp_node *> q_sources, table_list *Schema);
};
// eq_temporal, hash join query plan node.
// represent the following query fragment
// select scalar_expression_1, ..., scalar_expression_k
// from T0 t0, T1 t1
// where predicate
//
// the predicates and the scalar expressions can reference
// attributes of t0 and t1 and also functions.
// The predicate must contain CNF elements to enable the
// efficient evaluation of the query.
// 1) at least one predicate of the form
// (temporal se in t0) = (temporal se in t1)
// 2) at least one predicate of the form
// (non-temporal se in t0) = (non-temporal se in t1)
//
class join_eq_hash_qpn: public qp_node{
public:
std::vector<tablevar_t *> from; // Source tables
std::vector<select_element *> select_list; // Select list
std::vector<cnf_elem *> prefilter[2]; // source prefilters
std::vector<cnf_elem *> temporal_eq; // define temporal window
std::vector<cnf_elem *> hash_eq; // define hash key
std::vector<cnf_elem *> postfilter; // final filter on hash matches.
std::vector<cnf_elem *> where; // all the filters
// useful for summary analysis
std::vector<scalarexp_t *> hash_src_r, hash_src_l;
std::vector<scalarexp_t *> get_hash_r(){return hash_src_r;}
std::vector<scalarexp_t *> get_hash_l(){return hash_src_l;}
std::string node_type(){return("join_eq_hash_qpn"); };
bool makes_transform(){return true;};
std::vector<std::string> external_libs(){
std::vector<std::string> ret;
return ret;
}
void bind_to_schema(table_list *Schema);
col_id_set get_colrefs(bool ext_fcns_only,table_list *Schema){
fprintf(stderr,"INTERNAL ERROR, calling join_eq_hash_qpn::get_colrefs\n");
exit(1);
}
void append_to_where(cnf_elem *c){
fprintf(stderr, "Error, append_to_where called on join_hash_qpn, not supported, query is %s\n",node_name.c_str());
exit(1);
}
std::string to_query_string();
std::string generate_functor(table_list *schema, ext_fcn_list *Ext_fcns, std::vector<bool> &needs_xform);
std::string generate_functor_name();
std::string generate_operator(int i, std::string params);
std::string get_include_file(){return("#include <join_eq_hash_operator.h>\n");};
std::vector<select_element *> get_select_list(){return select_list;};
std::vector<scalarexp_t *> get_select_se_list(){
std::vector<scalarexp_t *> ret;
int i;
for(i=0;i<select_list.size();++i) ret.push_back(select_list[i]->se);
return ret;
};
// Used for LFTA only
std::vector<cnf_elem *> get_where_clause(){
std::vector<cnf_elem *> t;
return(t);
};
std::vector<cnf_elem *> get_filter_clause(){
return get_where_clause();
}
cplx_lit_table *get_cplx_lit_tbl(ext_fcn_list *Ext_fcns);
std::vector<handle_param_tbl_entry *> get_handle_param_tbl(ext_fcn_list *Ext_fcns);
table_def *get_fields();
// It might be feasible to find keys in an equijoin expression.
std::vector<string> get_tbl_keys(std::vector<std::string> &partial_keys){
std::vector<string> ret;
return ret;
}
std::vector<tablevar_t *> get_input_tbls();
std::vector<tablevar_t *> get_output_tbls();
std::vector<qp_node *> split_node_for_fta(ext_fcn_list *Ext_fcns, table_list *Schema, int &hfta_returned, ifq_t *ifdb, int n_virtual_ifaces, int hfta_parallelism, int hfta_idx);
virtual std::vector<table_exp_t *> extract_opview(table_list *Schema, std::vector<query_node *> &qnodes, opview_set &opviews, std::string rootnm, std::string silo_nm);
// Ensure that any refs to interface params have been split away.
int count_ifp_refs(std::set<std::string> &ifpnames);
join_eq_hash_qpn(){
};
join_eq_hash_qpn(query_summary_class *qs,table_list *Schema){
int w;
// Get the table name.
// NOTE the colrefs have the table ref (an int)
// embedded in them. Would it make sense
// to grab the whole table list?
from = qs->fta_tree->get_from()->get_table_list();
if(from.size() != 2){
char tmpstr[200];
sprintf(tmpstr,"ERROR building join_eq_hash node: query defined over %lu tables, but joins must be between two sources.\n",from.size() );
err_str = tmpstr;
error_code = 1;
}
for(int f=0;f<from.size();++f){
int t=from[f]->get_schema_ref();
if(! Schema->is_stream(t)){
err_str += "ERROR in query "+node_name+", the source "+from[f]->get_schema_name()+" is not a stream.\n";
error_code = 1;
}
}
// Get the select list.
select_list = qs->fta_tree->get_sl_vec();
// Get the selection predicate.
where = qs->wh_cnf;
for(w=0;w<where.size();++w){
analyze_cnf(where[w]);
std::vector<int> pred_tbls;
get_tablevar_ref_pr(where[w]->pr,pred_tbls);
// Prefilter if refs only one tablevar
if(pred_tbls.size()==1){
prefilter[pred_tbls[0]].push_back(where[w]);
continue;
}
// refs nothing -- might be sampling, do it as postfilter.
if(pred_tbls.size()==0){
postfilter.push_back(where[w]);
continue;
}
// See if it can be a hash or temporal predicate.
// NOTE: synchronize with the temporality checking
// done at join_eq_hash_qpn::get_fields
if(where[w]->is_atom && where[w]->eq_pred){
std::vector<int> sel_tbls, ser_tbls;
get_tablevar_ref_se(where[w]->pr->get_left_se(),sel_tbls);
get_tablevar_ref_se(where[w]->pr->get_right_se(),ser_tbls);
if(sel_tbls.size()==1 && ser_tbls.size()==1 && sel_tbls[0] != ser_tbls[0]){
// make channel 0 SE on LHS.
if(sel_tbls[0] != 0)
where[w]->pr->swap_scalar_operands();
data_type *dtl=where[w]->pr->get_left_se()->get_data_type();
data_type *dtr=where[w]->pr->get_right_se()->get_data_type();
if( (dtl->is_increasing() && dtr->is_increasing()) ||
(dtl->is_decreasing() && dtr->is_decreasing()) )
temporal_eq.push_back(where[w]);
else
hash_eq.push_back(where[w]);
continue;
}
}
// All tests failed, fallback is postfilter.
postfilter.push_back(where[w]);
}
if(temporal_eq.size()==0){
err_str = "ERROR in join query: can't find temporal equality predicate to define a join window.\n";
error_code = 1;
}
// Get the parameters
param_tbl = qs->param_tbl;
};
// the following method is used for distributed query optimization
double get_rate_estimate();
qp_node* make_copy(std::string suffix){
join_eq_hash_qpn *ret = new join_eq_hash_qpn();
ret->param_tbl = new param_table();
std::vector<std::string> param_names = param_tbl->get_param_names();
int pi;
for(pi=0;pi<param_names.size();pi++){
data_type *dt = param_tbl->get_data_type(param_names[pi]);
ret->param_tbl->add_param(param_names[pi],dt->duplicate(),
param_tbl->handle_access(param_names[pi]));
}
ret->definitions = definitions;
ret->node_name = node_name + suffix;
// make shallow copy of all fields
ret->where = where;
ret->from = from;
ret->select_list = select_list;
ret->prefilter[0] = prefilter[0];
ret->prefilter[1] = prefilter[1];
ret->postfilter = postfilter;
ret->temporal_eq = temporal_eq;
ret->hash_eq = hash_eq;
return ret;
};
void create_protocol_se(vector<qp_node *> q_sources, table_list *Schema);
};
// ---------------------------------------------
// eq_temporal, hash join query plan node.
// represent the following query fragment
// select scalar_expression_1, ..., scalar_expression_k
// FILTER_JOIN(col, range) from T0 t0, T1 t1
// where predicate
//
// t0 is the output range variable, t1 is the filtering range
// variable. Both must alias a PROTOCOL.
// The scalar expressions in the select clause may
// reference t0 only.
// The predicates are classified as follows
// prefilter predicates:
// a cheap predicate in t0 such that there is an equivalent
// predicate in t1. Cost decisions about pushing to
// lfta prefilter made later.
// t0 predicates (other than prefilter predicates)
// -- cheap vs. expensive sorted out at genereate time,
// the constructor isn't called with the function list.
// t1 predicates (other than prefiler predicates).
// equi-join predicates of the form:
// (se in t0) = (se in t1)
//
// There must be at least one equi-join predicate.
// No join predicates other than equi-join predicates
// are allowed.
// Warn on temporal equi-join predicates.
// t1 predicates should not be expensive ... warn?
//
class filter_join_qpn: public qp_node{
public:
std::vector<tablevar_t *> from; // Source tables
colref_t *temporal_var; // join window in FROM
unsigned int temporal_range; // metadata.
std::vector<select_element *> select_list; // Select list
std::vector<cnf_elem *> shared_pred; // prefilter preds
std::vector<cnf_elem *> pred_t0; // main (R) preds
std::vector<cnf_elem *> pred_t1; // filtering (S) preds
std::vector<cnf_elem *> hash_eq; // define hash key
std::vector<cnf_elem *> postfilter; // ref's no table.
std::vector<cnf_elem *> where; // all the filters
// useful for summary analysis
std::vector<scalarexp_t *> hash_src_r, hash_src_l;
std::vector<scalarexp_t *> get_hash_r(){return hash_src_r;}
std::vector<scalarexp_t *> get_hash_l(){return hash_src_l;}
bool use_bloom; // true => bloom filter, false => limited hash
std::string node_type(){return("filter_join"); };
bool makes_transform(){return true;};
std::vector<std::string> external_libs(){
std::vector<std::string> ret;
return ret;
}
void bind_to_schema(table_list *Schema);
col_id_set get_colrefs(bool ext_fcns_only,table_list *Schema);
std::string to_query_string();
std::string generate_functor(table_list *schema, ext_fcn_list *Ext_fcns, std::vector<bool> &needs_xform){
fprintf(stderr,"INTERNAL ERROR, filter_join_qpn::generate_functor called\n");
exit(1);
}
std::string generate_functor_name(){
fprintf(stderr,"INTERNAL ERROR, filter_join_qpn::generate_functor_name called\n");
exit(1);
}
std::string generate_operator(int i, std::string params){
fprintf(stderr,"INTERNAL ERROR, filter_join_qpn::generate_operator called\n");
exit(1);
}
std::string get_include_file(){return("#include <join_eq_hash_operator.h>\n");};
std::vector<select_element *> get_select_list(){return select_list;};
std::vector<scalarexp_t *> get_select_se_list(){
std::vector<scalarexp_t *> ret;
int i;
for(i=0;i<select_list.size();++i) ret.push_back(select_list[i]->se);
return ret;
};
// Used for LFTA only
void append_to_where(cnf_elem *c){
where.push_back(c);
}
std::vector<cnf_elem *> get_where_clause(){return where;}
std::vector<cnf_elem *> get_filter_clause(){return shared_pred;}
cplx_lit_table *get_cplx_lit_tbl(ext_fcn_list *Ext_fcns);
std::vector<handle_param_tbl_entry *> get_handle_param_tbl(ext_fcn_list *Ext_fcns);
table_def *get_fields();
// It should be feasible to find keys in a filter join
std::vector<string> get_tbl_keys(std::vector<std::string> &partial_keys){
std::vector<string> ret;
return ret;
}
std::vector<tablevar_t *> get_input_tbls();
std::vector<tablevar_t *> get_output_tbls();
std::vector<qp_node *> split_node_for_fta(ext_fcn_list *Ext_fcns, table_list *Schema, int &hfta_returned, ifq_t *ifdb, int n_virtual_ifaces, int hfta_parallelism, int hfta_idx);
int resolve_if_params(ifq_t *ifdb, std::string &err);
virtual std::vector<table_exp_t *> extract_opview(table_list *Schema, std::vector<query_node *> &qnodes, opview_set &opviews, std::string rootnm, std::string silo_nm);
// Ensure that any refs to interface params have been split away.
int count_ifp_refs(std::set<std::string> &ifpnames);
// CONSTRUCTOR
filter_join_qpn(){
};
filter_join_qpn(query_summary_class *qs,table_list *Schema){
int i,w;
// Get the table name.
// NOTE the colrefs have the table ref (an int)
// embedded in them. Would it make sense
// to grab the whole table list?
from = qs->fta_tree->get_from()->get_table_list();
temporal_var = qs->fta_tree->get_from()->get_colref();
temporal_range = qs->fta_tree->get_from()->get_temporal_range();
if(from.size() != 2){
char tmpstr[200];
sprintf(tmpstr,"ERROR building filter_join_qpn node: query defined over %lu tables, but joins must be between two sources.\n",from.size() );
err_str += tmpstr;
error_code = 1;
}
if(from[0]->get_interface() != from[1]->get_interface()){
err_str += "Error building filter_join_qpn node: all range variables must be sourced from the same interface or interface set ("+from[0]->get_interface()+" vs. "+from[1]->get_interface()+")\n";
error_code = 1;
}
for(int f=0;f<from.size();++f){
int t=from[f]->get_schema_ref();
if(! Schema->is_stream(t)){
err_str += "ERROR in query "+node_name+", the source "+from[f]->get_schema_name()+" is not a stream.\n";
error_code = 1;
}
}
// Get the select list.
select_list = qs->fta_tree->get_sl_vec();
// Verify that only t0 is referenced.
bool bad_ref = false;
for(i=0;i<select_list.size();i++){
vector<int> sel_tbls;
get_tablevar_ref_se(select_list[i]->se,sel_tbls);
if((sel_tbls.size() == 2) || (sel_tbls.size()==1 && sel_tbls[0]==1))
bad_ref = true;
}
if(bad_ref){
err_str += "ERROR building filter_join_qpn node: query references range variable "+from[1]->variable_name+", but only the first range variable ("+from[0]->variable_name+" can be referenced.\n";
error_code = 1;
}
// Get the selection predicate.
where = qs->wh_cnf;
std::vector<cnf_elem *> t0_only, t1_only;
for(w=0;w<where.size();++w){
analyze_cnf(where[w]);
std::vector<int> pred_tbls;
get_tablevar_ref_pr(where[w]->pr,pred_tbls);
// Collect the list of preds by src var,
// extract the shared preds later.
if(pred_tbls.size()==1){
if(pred_tbls[0] == 0){
t0_only.push_back(where[w]);
}else{
t1_only.push_back(where[w]);
}
continue;
}
// refs nothing -- might be sampling, do it as postfilter.
if(pred_tbls.size()==0){
postfilter.push_back(where[w]);
continue;
}
// See if it can be a hash or temporal predicate.
// NOTE: synchronize with the temporality checking
// done at join_eq_hash_qpn::get_fields
if(where[w]->is_atom && where[w]->eq_pred){
std::vector<int> sel_tbls, ser_tbls;
get_tablevar_ref_se(where[w]->pr->get_left_se(),sel_tbls);
get_tablevar_ref_se(where[w]->pr->get_right_se(),ser_tbls);
if(sel_tbls.size()==1 && ser_tbls.size()==1 && sel_tbls[0] != ser_tbls[0]){
// make channel 0 SE on LHS.
if(sel_tbls[0] != 0)
where[w]->pr->swap_scalar_operands();
hash_eq.push_back(where[w]);
data_type *dtl=where[w]->pr->get_left_se()->get_data_type();
data_type *dtr=where[w]->pr->get_right_se()->get_data_type();
if( (dtl->is_increasing() && dtr->is_increasing()) ||
(dtl->is_decreasing() && dtr->is_decreasing()) )
err_str += "Warning, a filter join should not have join predicates on temporal fields.\n";
continue;
}
}
// All tests failed, fallback is postfilter.
err_str += "ERROR, join predicates in a filter join should have the form (scalar expression in "+from[0]->variable_name+") = (scalar expression in "+from[1]->variable_name+").\n";
error_code = 3;
}
// Classify the t0_only and t1_only preds.
set<int> matched_pred;
int v;
for(w=0;w<t0_only.size();w++){
for(v=0;v<t1_only.size();++v)
if(is_equivalent_pred_base(t0_only[w]->pr,t1_only[v]->pr,Schema))
break;
if(v<t1_only.size()){
shared_pred.push_back(t0_only[w]);
matched_pred.insert(v);
}else{
pred_t0.push_back(t0_only[w]);
}
}
for(v=0;v<t1_only.size();++v){
if(matched_pred.count(v) == 0)
pred_t1.push_back(t1_only[v]);
}
// Get the parameters
param_tbl = qs->param_tbl;
definitions = qs->definitions;
// Determine the algorithm
if(this->get_val_of_def("algorithm") == "hash"){
use_bloom = false;
}else{
use_bloom = true;
}
};
// the following method is used for distributed query optimization
double get_rate_estimate();
qp_node* make_copy(std::string suffix){
filter_join_qpn *ret = new filter_join_qpn();
ret->param_tbl = new param_table();
std::vector<std::string> param_names = param_tbl->get_param_names();
int pi;
for(pi=0;pi<param_names.size();pi++){
data_type *dt = param_tbl->get_data_type(param_names[pi]);
ret->param_tbl->add_param(param_names[pi],dt->duplicate(),
param_tbl->handle_access(param_names[pi]));
}
ret->definitions = definitions;
ret->node_name = node_name + suffix;
// make shallow copy of all fields
ret->where = where;
ret->from = from;
ret->temporal_range = temporal_range;
ret->temporal_var = temporal_var;
ret->select_list = select_list;
ret->shared_pred = shared_pred;
ret->pred_t0 = pred_t0;
ret->pred_t1 = pred_t1;
ret->postfilter = postfilter;
ret->hash_eq = hash_eq;
return ret;
};
void create_protocol_se(vector<qp_node *> q_sources, table_list *Schema);
};
// TODO : put tests on other operators to ensure they dont' read from a watchlist
// TODO : register with : is_partn_compatible pushdown_partn_operator is_pushdown_compatible pushdown_operator ?
class watch_join_qpn: public qp_node{
public:
std::vector<tablevar_t *> from; // Source tables
std::vector<select_element *> select_list; // Select list
std::vector<cnf_elem *> pred_t0; // main (R) preds
std::vector<cnf_elem *> pred_t1; // watchlist-only (S) preds (?)
std::map<std::string, cnf_elem *> hash_eq; // predicates on S hash keys
std::vector<cnf_elem *> join_filter; // ref's R, S, but not a hash
std::vector<cnf_elem *> postfilter; // ref's no table.
std::vector<std::string> key_flds;
std::vector<cnf_elem *> where; // all the filters
// useful for summary analysis
std::vector<scalarexp_t *> hash_src_r, hash_src_l;
std::vector<scalarexp_t *> get_hash_r(){return hash_src_r;}
std::vector<scalarexp_t *> get_hash_l(){return hash_src_l;}
std::string node_type(){return("watch_join"); };
bool makes_transform(){return true;};
std::vector<std::string> external_libs(){
std::vector<std::string> ret;
return ret;
}
void bind_to_schema(table_list *Schema);
col_id_set get_colrefs(bool ext_fcns_only,table_list *Schema);
std::string to_query_string();
std::string generate_functor(table_list *schema, ext_fcn_list *Ext_fcns, std::vector<bool> &needs_xform){
fprintf(stderr,"INTERNAL ERROR, watch_join_qpn::generate_functor called\n");
exit(1);
}
std::string generate_functor_name(){
fprintf(stderr,"INTERNAL ERROR, watch_join_qpn::generate_functor_name called\n");
exit(1);
}
std::string generate_operator(int i, std::string params){
fprintf(stderr,"INTERNAL ERROR, watch_join_qpn::generate_operator called\n");
exit(1);
}
std::string get_include_file(){return("#include <watchlist_operator.h>\n");};
std::vector<select_element *> get_select_list(){return select_list;};
std::vector<scalarexp_t *> get_select_se_list(){
std::vector<scalarexp_t *> ret;
int i;
for(i=0;i<select_list.size();++i) ret.push_back(select_list[i]->se);
return ret;
};
// Used for LFTA only
void append_to_where(cnf_elem *c){
where.push_back(c);
}
std::vector<cnf_elem *> get_where_clause(){return where;}
std::vector<cnf_elem *> get_filter_clause(){return pred_t0;}
cplx_lit_table *get_cplx_lit_tbl(ext_fcn_list *Ext_fcns);
std::vector<handle_param_tbl_entry *> get_handle_param_tbl(ext_fcn_list *Ext_fcns);
table_def *get_fields();
// It should be feasible to find keys in a watchlist join
std::vector<string> get_tbl_keys(std::vector<std::string> &partial_keys){
std::vector<string> ret;
return ret;
}
std::vector<tablevar_t *> get_input_tbls();
std::vector<tablevar_t *> get_output_tbls();
std::vector<qp_node *> split_node_for_fta(ext_fcn_list *Ext_fcns, table_list *Schema, int &hfta_returned, ifq_t *ifdb, int n_virtual_ifaces, int hfta_parallelism, int hfta_idx);
int resolve_if_params(ifq_t *ifdb, std::string &err);
virtual std::vector<table_exp_t *> extract_opview(table_list *Schema, std::vector<query_node *> &qnodes, opview_set &opviews, std::string rootnm, std::string silo_nm);
// Ensure that any refs to interface params have been split away.
int count_ifp_refs(std::set<std::string> &ifpnames);
// CONSTRUCTOR
watch_join_qpn(){
};
watch_join_qpn(query_summary_class *qs,table_list *Schema){
int i,w;
// Get the table name.
// NOTE the colrefs have the table ref (an int)
// embedded in them. Would it make sense
// to grab the whole table list?
from = qs->fta_tree->get_from()->get_table_list();
if(from.size() != 2){
char tmpstr[200];
sprintf(tmpstr,"ERROR building filter_join_qpn node: query defined over %lu tables, but joins must be between two sources.\n",from.size() );
err_str += tmpstr;
error_code = 1;
}
int t = from[0]->get_schema_ref();
if(Schema->get_schema_type(t) != PROTOCOL_SCHEMA){
err_str += "ERROR in query "+node_name+", the LHS of the join must be a PROTOCOL\n";
error_code = 1;
}
t = from[1]->get_schema_ref();
if(Schema->get_schema_type(t) != WATCHLIST_SCHEMA){
err_str += "ERROR in query "+node_name+", the RHS of the join must be a WATCHLIST\n";
error_code = 1;
}
key_flds = Schema->get_table(t)->get_keys();
// Get the select list.
select_list = qs->fta_tree->get_sl_vec();
// Get the selection predicate.
where = qs->wh_cnf;
std::vector<cnf_elem *> t0_only, t1_only;
for(w=0;w<where.size();++w){
analyze_cnf(where[w]);
std::vector<int> pred_tbls;
get_tablevar_ref_pr(where[w]->pr,pred_tbls);
// Collect the list of preds by src var,
// extract the shared preds later.
if(pred_tbls.size()==1){
if(pred_tbls[0] == 0){
pred_t0.push_back(where[w]);
}else{
pred_t1.push_back(where[w]);
}
continue;
}
// refs nothing -- might be sampling, do it as postfilter.
if(pred_tbls.size()==0){
postfilter.push_back(where[w]);
continue;
}
// Must reference both
// See if it can be a hash predicate.
if(where[w]->is_atom && where[w]->eq_pred){
std::vector<int> sel_tbls, ser_tbls;
get_tablevar_ref_se(where[w]->pr->get_left_se(),sel_tbls);
get_tablevar_ref_se(where[w]->pr->get_right_se(),ser_tbls);
if(sel_tbls.size()==1 && ser_tbls.size()==1 && sel_tbls[0] != ser_tbls[0]){
// make channel 0 SE on LHS.
if(sel_tbls[0] != 0)
where[w]->swap_scalar_operands();
// Must be simple (a colref) on the RHS
if(where[w]->r_simple){
string rcol = where[w]->pr->get_right_se()->get_colref()->get_field();
if(std::find(key_flds.begin(), key_flds.end(), rcol) != key_flds.end()){
hash_eq[rcol] = where[w];
data_type *dtl=where[w]->pr->get_left_se()->get_data_type();
data_type *dtr=where[w]->pr->get_right_se()->get_data_type();
if( (dtl->is_increasing() && dtr->is_increasing()) || (dtl->is_decreasing() && dtr->is_decreasing()) )
err_str += "Warning, a watchlist join should not have join predicates on temporal fields, query "+node_name+".\n";
continue;
}
}
}
}
// All tests failed, fallback is join_filter.
join_filter.push_back(where[w]);
}
if(key_flds.size() > hash_eq.size()){
err_str += "Error, in query "+node_name+" the watchlist join does not cover all fields in the watchlist with an equality predicate. Missing fields are";
for(int k=0;k<key_flds.size();++k){
if(hash_eq.count(key_flds[k]) < 1){
err_str += " "+key_flds[k];
}
}
err_str += ".\n";
error_code = 5;
}
// Get the parameters
param_tbl = qs->param_tbl;
definitions = qs->definitions;
};
// the following method is used for distributed query optimization
double get_rate_estimate();
qp_node* make_copy(std::string suffix){
watch_join_qpn *ret = new watch_join_qpn();
ret->param_tbl = new param_table();
std::vector<std::string> param_names = param_tbl->get_param_names();
int pi;
for(pi=0;pi<param_names.size();pi++){
data_type *dt = param_tbl->get_data_type(param_names[pi]);
ret->param_tbl->add_param(param_names[pi],dt->duplicate(),
param_tbl->handle_access(param_names[pi]));
}
ret->definitions = definitions;
ret->node_name = node_name + suffix;
// make shallow copy of all fields
ret->where = where;
ret->from = from;
ret->select_list = select_list;
ret->key_flds = key_flds;
ret->pred_t0 = pred_t0;
ret->pred_t1 = pred_t1;
ret->join_filter = join_filter;
ret->postfilter = postfilter;
ret->hash_eq = hash_eq;
ret->hash_src_r = hash_src_r;
ret->hash_src_l = hash_src_l;
return ret;
};
void create_protocol_se(vector<qp_node *> q_sources, table_list *Schema);
};
enum output_file_type_enum {regular, gzip, bzip};
class output_file_qpn: public qp_node{
public:
std::string source_op_name; // Source table
std::vector<field_entry *> fields;
ospec_str *output_spec;
vector<tablevar_t *> fm;
std::string hfta_query_name;
std::string filestream_id;
bool eat_input;
std::vector<std::string> params;
bool do_gzip;
output_file_type_enum compression_type;
int n_streams; // Number of output streams
int n_hfta_clones; // number of hfta clones
int parallel_idx; // which close this produces output for.
std::vector<int> hash_flds; // fields used to hash the output.
std::string node_type(){return("output_file_qpn"); };
bool makes_transform(){return false;};
std::vector<std::string> external_libs(){
std::vector<std::string> ret;
switch(compression_type){
case gzip:
ret.push_back("-lz");
break;
case bzip:
ret.push_back("-lbz2");
break;
default:
break;
}
return ret;
}
void append_to_where(cnf_elem *c){
fprintf(stderr, "ERROR, append_to_where called on output_file_qpn, not supported, query %s.\n", node_name.c_str());
exit(1);
}
void bind_to_schema(table_list *Schema){}
col_id_set get_colrefs(bool ext_fcns_only,table_list *Schema){
col_id_set ret;
return ret;
}
std::string to_query_string(){return "// output_file_operator \n";}
std::string generate_functor(table_list *schema, ext_fcn_list *Ext_fcns, std::vector<bool> &needs_xform);
std::string generate_functor_name();
std::string generate_operator(int i, std::string params);
std::string get_include_file(){
switch(compression_type){
case gzip:
return("#include <zfile_output_operator.h>\n");
default:
return("#include <file_output_operator.h>\n");
}
return("#include <file_output_operator.h>\n");
};
std::vector<cnf_elem *> get_where_clause(){std::vector<cnf_elem *> ret; return ret;};
std::vector<cnf_elem *> get_filter_clause(){std::vector<cnf_elem *> ret; return ret;};
cplx_lit_table *get_cplx_lit_tbl(ext_fcn_list *Ext_fcns){cplx_lit_table *ret = new cplx_lit_table(); return ret;}
std::vector<handle_param_tbl_entry *> get_handle_param_tbl(ext_fcn_list *Ext_fcns){std::vector<handle_param_tbl_entry *> ret; return ret;}
table_def *get_fields(){
field_entry_list *fel = new field_entry_list();
int i;
for(i=0;i<fields.size();++i)
fel->append_field(fields[i]);
return new table_def(node_name.c_str(), NULL, NULL, fel, STREAM_SCHEMA);
}
// TODO! either bypass the output operator in stream_query,
// or propagate key information when the output operator is constructed.
std::vector<string> get_tbl_keys(std::vector<std::string> &partial_keys){
std::vector<string> ret;
return ret;
}
std::vector<tablevar_t *> get_input_tbls();
std::vector<tablevar_t *> get_output_tbls();
std::vector<qp_node *> split_node_for_fta(ext_fcn_list *Ext_fcns, table_list *Schema, int &hfta_returned, ifq_t *ifdb, int n_virtual_ifaces, int hfta_parallelism, int hfta_idx){
std::vector<qp_node *> ret; ret.push_back(this); hfta_returned = true; return ret;
}
std::vector<table_exp_t *> extract_opview(table_list *Schema, std::vector<query_node *> &qnodes, opview_set &opviews, std::string rootnm, std::string silo_nm){
std::vector<table_exp_t *> ret; return ret;
}
// Ensure that any refs to interface params have been split away.
int count_ifp_refs(std::set<std::string> &ifpnames){return 0;}
int resolve_if_params(ifq_t *ifdb, std::string &err){return 0;};
output_file_qpn(std::string src_op, std::string qn, std::string fs_id, table_def *src_tbl_def, ospec_str *ospec, bool ei){
source_op_name = src_op;
node_name = source_op_name + "_output";
filestream_id = fs_id;
fields = src_tbl_def->get_fields();
output_spec = ospec;
fm.push_back(new tablevar_t(source_op_name.c_str()));
hfta_query_name = qn;
eat_input = ei;
// TODO stream checking, but it requires passing Schema to output_file_qpn
/*
for(int f=0;f<fm.size();++f){
int t=fm[f]->get_schema_ref();
if(! Schema->is_stream(t)){
err_str += "ERROR in query "+node_name+", the source "+from[f]->get_schema_name()+" is not a stream.\n";
error_code = 1;
}
}
*/
do_gzip = false;
compression_type = regular;
if(ospec->operator_type == "zfile")
compression_type = gzip;
n_streams = 1;
parallel_idx = 0;
n_hfta_clones = 1;
char buf[1000];
strncpy(buf, output_spec->operator_param.c_str(),1000);
buf[999] = '\0';
char *words[100];
int nwords = split_string(buf, ':', words,100);
int i;
for(i=0;i<nwords;i++){
params.push_back(words[i]);
}
for(i=0;i<params.size();i++){
if(params[i] == "gzip")
do_gzip = true;
}
}
// Set output splitting parameters
bool set_splitting_params(int np, int ix, int ns, std::string split_flds, std::string &err_report){
n_streams = ns;
n_hfta_clones = np;
parallel_idx = ix;
if(split_flds != ""){
string err_flds = "";
char *tmpstr = strdup(split_flds.c_str());
char *words[100];
int nwords = split_string(tmpstr,':',words,100);
int i,j;
for(i=0;i<nwords;++i){
string target = words[i];
for(j=0;j<fields.size();++j){
if(fields[j]->get_name() == target){
hash_flds.push_back(j);
break;
}
}
if(j==fields.size()){
err_flds += " "+target;
}
}
if(err_flds != ""){
err_report += "ERROR in "+hfta_query_name+", a file output operator needs to split the output but these splitting fileds are not part of the output:"+err_flds;
return true;
}
}
return false;
}
// the following method is used for distributed query optimization
double get_rate_estimate(){return 1.0;}
qp_node* make_copy(std::string suffix){
// output_file_qpn *ret = new output_file_qpn();
output_file_qpn *ret = new output_file_qpn(source_op_name, hfta_query_name, filestream_id, this->get_fields(), output_spec, eat_input);
return ret;
}
void create_protocol_se(vector<qp_node *> q_sources, table_list *Schema){}
};
//
// ---------------------------------------------
// Select, group-by, aggregate, sampling.
// Representing
// Select SE_1, ..., SE_k
// From T
// Where predicate
// Group By gb1, ..., gb_n
// [Subgroup gb_i1, .., gb_ik]
// Cleaning_when predicate
// Cleaning_by predicate
// Having predicate
//
// For now, must have group-by variables and aggregates.
// The scalar expressions which are output must be a function
// of the groub-by variables and the aggregates.
// The group-by variables can be references to columsn of T,
// or they can be scalar expressions.
class sgahcwcb_qpn: public qp_node{
public:
tablevar_t *table_name; // source table
std::vector<cnf_elem *> where; // selection predicate
std::vector<cnf_elem *> having; // post-aggregation predicate
std::vector<select_element *> select_list; // se's of output
gb_table gb_tbl; // Table of all group-by attributes.
std::set<int> sg_tbl; // Names of the superGB attributes
aggregate_table aggr_tbl; // Table of all referenced aggregates.
std::set<std::string> states_refd; // states ref'd by stateful fcns.
std::vector<cnf_elem *> cleanby;
std::vector<cnf_elem *> cleanwhen;
std::vector<scalarexp_t *> gb_sources; // pre-compute for partitioning.
std::vector<scalarexp_t *> get_gb_sources(){return gb_sources;}
std::string node_type(){return("sgahcwcb_qpn"); };
bool makes_transform(){return true;};
std::vector<std::string> external_libs(){
std::vector<std::string> ret;
return ret;
}
void bind_to_schema(table_list *Schema);
col_id_set get_colrefs(bool ext_fcns_only,table_list *Schema){
fprintf(stderr,"INTERNAL ERROR, calling sgahcwcb_qpn::get_colrefs\n");
exit(1);
}
std::string to_query_string();
std::string generate_functor(table_list *schema, ext_fcn_list *Ext_fcns, std::vector<bool> &needs_xform);
std::string generate_functor_name();
std::string generate_operator(int i, std::string params);
std::string get_include_file(){return("#include <clean_operator.h>\n");};
std::vector<select_element *> get_select_list(){return select_list;};
std::vector<scalarexp_t *> get_select_se_list(){
std::vector<scalarexp_t *> ret;
int i;
for(i=0;i<select_list.size();++i) ret.push_back(select_list[i]->se);
return ret;
};
std::vector<cnf_elem *> get_where_clause(){return where;};
std::vector<cnf_elem *> get_filter_clause(){return where;};
std::vector<cnf_elem *> get_having_clause(){return having;};
gb_table *get_gb_tbl(){return &gb_tbl;};
aggregate_table *get_aggr_tbl(){return &aggr_tbl;};
cplx_lit_table *get_cplx_lit_tbl(ext_fcn_list *Ext_fcns);
std::vector<handle_param_tbl_entry *> get_handle_param_tbl(ext_fcn_list *Ext_fcns);
// table which represents output tuple.
table_def *get_fields();
// TODO Key extraction should be feasible but I'll defer the issue.
std::vector<string> get_tbl_keys(std::vector<std::string> &partial_keys){
std::vector<string> ret;
return ret;
}
std::vector<tablevar_t *> get_input_tbls();
std::vector<tablevar_t *> get_output_tbls();
void append_to_where(cnf_elem *c){
where.push_back(c);
}
sgahcwcb_qpn(){
};
sgahcwcb_qpn(query_summary_class *qs,table_list *Schema){
// Get the table name.
// NOTE the colrefs have the tablevar ref (an int)
// embedded in them. Would it make sense
// to grab the whole table list?
tablevar_list_t *fm = qs->fta_tree->get_from();
std::vector<tablevar_t *> tbl_vec = fm->get_table_list();
if(tbl_vec.size() != 1){
char tmpstr[200];
sprintf(tmpstr,"INTERNAL ERROR building SGAHCWCB node: query defined over %lu tables.\n",tbl_vec.size() );
err_str=tmpstr;
error_code = 1;
}
table_name = (tbl_vec[0]);
int t = tbl_vec[0]->get_schema_ref();
if(! Schema->is_stream(t)){
err_str += "ERROR in query "+node_name+", the source "+table_name->get_schema_name()+" is not a stream.\n";
error_code = 1;
}
// Get the select list.
select_list = qs->fta_tree->get_sl_vec();
// Get the selection and having predicates.
where = qs->wh_cnf;
having = qs->hav_cnf;
cleanby = qs->cb_cnf;
cleanwhen = qs->cw_cnf;
// Build a new GB var table (don't share, might need to modify)
int g;
for(g=0;g<qs->gb_tbl->size();g++){
gb_tbl.add_gb_var(qs->gb_tbl->get_name(g),
qs->gb_tbl->get_tblvar_ref(g), qs->gb_tbl->get_def(g),
qs->gb_tbl->get_reftype(g)
);
}
// Build a new aggregate table. (don't share, might need
// to modify).
int a;
for(a=0;a<qs->aggr_tbl->size();a++){
aggr_tbl.add_aggr(
// qs->aggr_tbl->get_op(a), qs->aggr_tbl->get_aggr_se(a)
qs->aggr_tbl->duplicate(a)
);
}
sg_tbl = qs->sg_tbl;
states_refd = qs->states_refd;
// Get the parameters
param_tbl = qs->param_tbl;
};
std::vector<qp_node *> split_node_for_fta(ext_fcn_list *Ext_fcns, table_list *Schema, int &hfta_returned, ifq_t *ifdb, int n_virtual_ifaces, int hfta_parallelism, int hfta_idx);
virtual std::vector<table_exp_t *> extract_opview(table_list *Schema, std::vector<query_node *> &qnodes, opview_set &opviews, std::string rootnm, std::string silo_nm);
// Ensure that any refs to interface params have been split away.
// CURRENTLY not allowed by split_node_for_fta
int count_ifp_refs(std::set<std::string> &ifpnames){return 0;}
int resolve_if_params(ifq_t *ifdb, std::string &err){return 0;}
// the following method is used for distributed query optimization
double get_rate_estimate();
qp_node* make_copy(std::string suffix){
sgahcwcb_qpn *ret = new sgahcwcb_qpn();
ret->param_tbl = new param_table();
std::vector<std::string> param_names = param_tbl->get_param_names();
int pi;
for(pi=0;pi<param_names.size();pi++){
data_type *dt = param_tbl->get_data_type(param_names[pi]);
ret->param_tbl->add_param(param_names[pi],dt->duplicate(),
param_tbl->handle_access(param_names[pi]));
}
ret->definitions = definitions;
ret->node_name = node_name + suffix;
// make shallow copy of all fields
ret->where = where;
ret->having = having;
ret->select_list = select_list;
ret->gb_tbl = gb_tbl;
ret->aggr_tbl = aggr_tbl;
ret->sg_tbl = sg_tbl;
ret->states_refd = states_refd;
ret->cleanby = cleanby;
ret->cleanwhen = cleanwhen;
return ret;
};
void create_protocol_se(vector<qp_node *> q_sources, table_list *Schema);
};
std::vector<qp_node *> create_query_nodes(query_summary_class *qs,table_list *Schema);
void untaboo(string &s);
table_def *create_attributes(string tname, vector<select_element *> &select_list);
#endif
| 32.502864 | 248 | 0.695836 | [
"object",
"vector",
"transform"
] |
c1053f3b895a60251f2b4ebf487c79518bb3ebde | 11,517 | h | C | libwfs/request/GetPropertyValue.h | nakkim/smartmet-plugin-wfs | 851334dd3be1a24b9708f66696f088fdc857a999 | [
"MIT"
] | null | null | null | libwfs/request/GetPropertyValue.h | nakkim/smartmet-plugin-wfs | 851334dd3be1a24b9708f66696f088fdc857a999 | [
"MIT"
] | 2 | 2018-04-17T10:02:46.000Z | 2019-10-21T08:57:55.000Z | libwfs/request/GetPropertyValue.h | nakkim/smartmet-plugin-wfs | 851334dd3be1a24b9708f66696f088fdc857a999 | [
"MIT"
] | 2 | 2017-05-10T12:03:51.000Z | 2021-07-06T07:05:25.000Z | #pragma once
#include "PluginImpl.h"
#include "QueryBase.h"
#include "RequestBase.h"
#include "StandardPresentationParameters.h"
#include "StoredQueryMap.h"
#include "XPathSnapshot.h"
#include <macgyver/TimedCache.h>
#include <xercesc/dom/DOMDocument.hpp>
namespace SmartMet
{
namespace Plugin
{
namespace WFS
{
namespace Request
{
/**
* @brief Represents GetPropertyValue request
*/
class GetPropertyValue : public RequestBase
{
private:
GetPropertyValue(const std::string& language,
PluginImpl& plugin_impl);
public:
virtual ~GetPropertyValue();
virtual RequestType get_type() const;
/**
* @brief Execute request and output result to stream
*
* Throws SmartMet::Plugin::WFS::WfsException in case of an error.
*/
virtual void execute(std::ostream& ost) const;
/**
* @brief Create request from http GET request
*
* @param language output language
* @param http_request parsed http get request
* @param plugin_impl plugin data for spp, map etc.
* @param query_cache cached stored query responses.
*/
static boost::shared_ptr<GetPropertyValue> create_from_kvp(
const std::string& language,
const SmartMet::Spine::HTTP::Request& http_request,
PluginImpl& plugin_impl);
/**
* @brief Create request from http GET request
*
* @param language output language
* @param document request parsed from http-post-request
* @param plugin_impl plugin data for spp, map etc.
* @param query_cache cached stored query responses.
*/
static boost::shared_ptr<GetPropertyValue> create_from_xml(const std::string& language,
const xercesc::DOMDocument& document,
PluginImpl& plugin_impl);
/**
* @brief Get response expiration time
*
* @retval Smallest expiration time from queries, or default from plugin data, if no expiration
* times found.
*/
virtual int get_response_expires_seconds() const;
private:
/**
* @brief Get cached responses for stored queries
*
* @retval True, if all queries have cached response, otherwise false.
*/
bool get_cached_responses();
/**
* @brief Get Start index and count from Standard Presentattion Parameters
*
* @param max_members maximum number of members in output
* @param start_index index of first member from responses to go output
*/
void initialize(boost::optional<int>& max_members, boost::optional<int>& start_index) const;
/**
* @brief Add hits and members received etc. to final output
*
* @param result result tree
* @param cumulative_num_returned number of members returned, i.e. included in result tree
* @param cumulative_num_matched number of hits in all responsens, i.e. those not included in
* result tree are included in this number
* @param is_timestamp_set value "false", timestamp has not been already set, so set it here.
* @param is_schemalocation_set value "false", schema location has not been already set, so set
* it here.
*/
void finalize(boost::shared_ptr<xercesc::DOMDocument> result,
const int cumulative_num_returned,
const int cumulative_num_matched,
const bool is_timestamp_set,
const bool is_schemalocation_set) const;
/**
* @brief Add collected responses to result tree
*
* @param result result tree
* @param query_responses collected responses from queries
* @param cumulative_num_returned keep count of number of members returned, i.e. included in
* result tree
* @param cumulative_num_matched keep count of number of hits in all responsens, i.e. those not
* included in result tree are included in this number
* @param is_timestamp_set value changes to "true", when valid timestamp has been copied from
* query response to result tree.
* @param is_schemalocation_set value changes to "true", when valid schema location has been
* copied from query response to result tree.
*
* Throws xercesc::DOMException or SmartMet::Plugin::WFS::Xml::XmlError in case of an error.
*/
void add_responses(boost::shared_ptr<xercesc::DOMDocument> result,
const std::vector<std::string>& query_responses,
int& cumulative_num_returned,
int& cumulative_num_matched,
bool& is_timestamp_set,
bool& is_schemalocation_set) const;
/**
* @brief Perform response filtering, if this is Ad Hoc-query and filter has been defined.
* Otherwise simply pass thru extract_property-method.
*
* @param result result tree
* @param response response from query
* @param cumulative_num_returned keep count of number of members returned, i.e. included in
* result tree
* @param cumulative_num_matched keep count of number of hits in all responsens, i.e. those not
* included in result tree are included in this number
* @param max_members maximum number of members still needed in output, i.e. this parameter
* decreases as output progresses.
* @param start_index current index of first member from responses to go output, i.e. this
* parameter decreases as output progresses.
* @param query is the target of filtering
* @param is_timestamp_set value changes to "true", when valid timestamp has been copied from
* query response to result tree.
* @param is_schemalocation_set value changes to "true", when valid schema location has been
* copied from query response to result tree.
*/
void filter(boost::shared_ptr<xercesc::DOMDocument> result,
const std::string& response,
int& cumulative_num_returned,
int& cumulative_num_matched,
boost::optional<int>& max_members,
boost::optional<int>& start_index,
const boost::shared_ptr<QueryBase> query,
bool& is_timestamp_set,
bool& is_schemalocation_set) const;
/**
* @brief Extract only desired property. (perform XPath-query to get only what is wanted)
*
* @param result result tree
* @param response response from query, possibly already filtered.
* @param cumulative_num_returned keep count of number of members returned, i.e. included in
* result tree
* @param cumulative_num_matched keep count of number of hits in all responsens, i.e. those not
* included in result tree are included in this number
* @param max_members maximum number of members still needed in output, i.e. this parameter
* decreases as output progresses.
* @param start_index current index of first member from responses to go output, i.e. this
* parameter decreases as output progresses.
* @param is_timestamp_set value changes to "true", when valid timestamp has been copied from
* query response to result tree.
* @param is_schemalocation_set value changes to "true", when valid schema location has been
* copied from query response to result tree.
*/
void extract_property(boost::shared_ptr<xercesc::DOMDocument> result,
const std::string& response,
int& cumulative_num_returned,
int& cumulative_num_matched,
boost::optional<int>& max_members,
boost::optional<int>& start_index,
bool& is_timestamp_set,
bool& is_schemalocation_set) const;
/**
* @brief Actually append the filtered, extracted members to result tree and keep tab of
* cumulatives
*
* @param result result tree
* @param xpath_snapshot response parsed.
* @param cumulative_num_returned keep count of number of members returned, i.e. included in
* result tree
* @param max_members maximum number of members still needed in output, i.e. this parameter
* decreases as output progresses.
* @param start_index current index of first member from responses to go output, i.e. this
* parameter decreases as output progresses.
* @param is_timestamp_set value changes to "true", when valid timestamp has been copied from
* query response to result tree.
* @param is_schemalocation_set value changes to "true", when valid schema location has been
* copied from query response to result tree.
*/
void append_members(boost::shared_ptr<xercesc::DOMDocument> result,
Xml::XPathSnapshot& xpath_snapshot,
int& cumulative_num_returned,
boost::optional<int>& max_members,
boost::optional<int>& start_index,
bool& is_timestamp_set,
bool& is_schemalocation_set) const;
/**
* @brief Calculate what to append in result tree, based on what was set in Standard
* Presentation Parameters
*
* @param max_members maximum number of members still needed in output, i.e. this parameter
* decreases as output progresses.
* @param start_index current index of first member from responses to go output, i.e. this
* parameter decreases as output progresses.
* @param num_members number of members in current response.
* @param first index of first member to include to result tree.
* @param last index of last member to include to result tree.
*/
void calculate_loop_limits(const boost::optional<int>& max_members,
const boost::optional<int>& start_index,
const int num_members,
int& first,
int& last) const;
/**
* @brief Copy schema location
*
* @param source copy source, i.e. parsed response.
* @param destination destination, i.e. result tree.
* @retval True, if source had valid schema location attribute, and it was copied, otherwise
* false.
*/
bool copy_schema_location(const xercesc::DOMElement* source,
xercesc::DOMElement* destination) const;
/**
* @brief Copy timestamp
*
* @param source copy source, i.e. parsed response.
* @param destination destination, i.e. result tree.
* @retval True, if source had valid timestamp attribute, and it was copied, otherwise false.
*/
bool copy_timestamp(const xercesc::DOMElement* source, xercesc::DOMElement* destination) const;
/**
* @brief Collects responses of all queries from the GetValueProperty request as strings
*
* @param query_responses A vector where to put the response strings
* @retval true at least one query succeeded
* @retval false none of queries succeeded
*/
bool collect_query_responses(std::vector<std::string>& query_responses) const;
/**
* @brief Substitutes apikey and pushes to query responses
*
* @param query_responses A vector where to put the response strings
* @param response Response string
*/
void add_query_responses(std::vector<std::string>& query_responses,
const std::string& response) const;
private:
std::vector<boost::shared_ptr<QueryBase> > queries;
std::string xpath_string;
StandardPresentationParameters spp;
QueryResponseCache& query_cache;
bool fast;
};
} // namespace Request
} // namespace WFS
} // namespace Plugin
} // namespace SmartMet
| 41.27957 | 99 | 0.673092 | [
"vector"
] |
c1064d309235a1434cd099f099ea666f920281e5 | 938 | h | C | Ch17/02.Kernel64/Source/InterruptHandler.h | ybjeon01/my-operating-system-programming-skills | 02b233bac30acd6bc530889250fc3cacc861e0f3 | [
"MIT"
] | null | null | null | Ch17/02.Kernel64/Source/InterruptHandler.h | ybjeon01/my-operating-system-programming-skills | 02b233bac30acd6bc530889250fc3cacc861e0f3 | [
"MIT"
] | null | null | null | Ch17/02.Kernel64/Source/InterruptHandler.h | ybjeon01/my-operating-system-programming-skills | 02b233bac30acd6bc530889250fc3cacc861e0f3 | [
"MIT"
] | null | null | null | #ifndef __INTERRUPTHANDLER_H__
#define __INTERRUPTHANDLER_H__
#include "Types.h"
// common exception handler for exceptions that do not have handler
// info:
// This handler is for reserved exception/interrupt: 0 ~ 31
// params:
// iVectorNumber: IDT gate descriptor index number
// qwErrorCode: error code for some exceptions.
void kCommonExceptionHandler(int iVectorNumber, QWORD qwErrorCode);
// common Interrupt handler for interrupts that do not have handler
// info:
// This handler is for interrupts whose vector number is over 31
// params:
// iVectorNumber: IDT gate descriptor index number
void kCommonInterruptHandler(int iVectorNumber);
// PIC keyboard Interrupt Handler. This function prints how many this
// interrupt is received from booting to the top right corner
// params:
// iVectorNumber: IDT gate descriptor index number
void kKeyboardHandler(int iVectorNumber);
#endif /* __INTERRUPTHANDLER_H__ */ | 34.740741 | 69 | 0.779318 | [
"vector"
] |
c10c168c72a4fe4380626944ec1f05057f0de943 | 27,255 | h | C | platform/mcu/msp432p4xx/ti/drivers/Power.h | ruoranluomu/AliOS-Things | d0f3431bcacac5b61645e9beb231a0a53be8078b | [
"Apache-2.0"
] | 4 | 2019-11-22T04:28:29.000Z | 2021-07-06T10:45:10.000Z | platform/mcu/msp432p4xx/ti/drivers/Power.h | ewfweftwer/AliOS-Things | 26a5c1a2d6b1771590f5d302f0b2e7fe2fcf843e | [
"Apache-2.0"
] | 2 | 2022-03-29T05:16:50.000Z | 2022-03-29T05:16:50.000Z | vendors/ti/SimpleLink_CC32xx/v2_10_00_04/source/ti/drivers/Power.h | ictk-solution-dev/amazon-freertos | cc76512292ddfb70bba3030dbcb740ef3c6ead8b | [
"MIT"
] | 6 | 2019-08-30T09:43:03.000Z | 2021-04-05T04:20:41.000Z | /*
* Copyright (c) 2015-2018, Texas Instruments Incorporated
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* * Neither the name of Texas Instruments Incorporated nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/** ============================================================================
* @file Power.h
*
* @brief Power Manager interface
*
* The Power header file should be included in an application as follows:
* @code
* #include <ti/drivers/Power.h>
* @endcode
*
* # Operation #
* The Power Manager facilitates the transition of the MCU from active states
* to sleep states and vice versa. It provides other drivers the
* ability to set and release dependencies on hardware resources, and keeps
* reference counts on each resource to know when to enable or disable the
* resource. It provides drivers the ability to register callback functions
* to be invoked upon specific power events. In addition, drivers and
* applications can set or release constraints to prevent the MCU from
* transitioning into specific active or sleep states.
*
* <B>The Power Manager APIs and configuration parameters are described here.
* For a detailed description of terms and concepts, and usage by different
* types of software components (peripheral drivers, power policies,
* and applications) please see the
* <a href='../../Power_Management.pdf'>SimpleLink SDK Power Management User's Guide</a>.</B>
*
* ============================================================================
*/
#ifndef ti_drivers_Power__include
#define ti_drivers_Power__include
#include <stdbool.h>
#include <stdint.h>
#include <ti/drivers/utils/List.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Power latency types */
#define Power_TOTAL (1U) /*!< total latency */
#define Power_RESUME (2U) /*!< resume latency */
/* Power notify responses */
#define Power_NOTIFYDONE (0) /*!< OK, notify completed */
#define Power_NOTIFYERROR (-1) /*!< an error occurred during notify */
/* Power status */
#define Power_SOK (0) /*!< OK, operation succeeded */
#define Power_EFAIL (-1) /*!< general failure */
#define Power_EINVALIDINPUT (-2) /*!< invalid data value */
#define Power_EINVALIDPOINTER (-3) /*!< invalid pointer */
#define Power_ECHANGE_NOT_ALLOWED (-4) /*!< change is not allowed */
#define Power_EBUSY (-5) /*!< busy with another transition */
/* Power transition states */
#define Power_ACTIVE (1U) /*!< normal active state */
#define Power_ENTERING_SLEEP (2U) /*!< entering a sleep state */
#define Power_EXITING_SLEEP (3U) /*!< exiting a sleep state */
#define Power_ENTERING_SHUTDOWN (4U) /*!< entering a shutdown state */
#define Power_CHANGING_PERF_LEVEL (5U) /*!< moving to new performance level */
/*!
* @brief Power policy initialization function pointer
*/
typedef void (*Power_PolicyInitFxn)(void);
/*!
* @brief Power policy function pointer
*/
typedef void (*Power_PolicyFxn)(void);
/*!
* @brief Power notify function pointer
*/
typedef int_fast16_t (*Power_NotifyFxn)(uint_fast16_t eventType,
uintptr_t eventArg, uintptr_t clientArg);
/*!
* @brief Power notify object structure.
*
* This struct specification is for internal use. Notification clients must
* pre-allocate a notify object when registering for a notification;
* Power_registerNotify() will take care initializing the internal elements
* appropriately.
*/
typedef struct Power_NotifyObj_ {
List_Elem link; /*!< for placing on the notify list */
uint_fast16_t eventTypes; /*!< the event type */
Power_NotifyFxn notifyFxn; /*!< notification function */
uintptr_t clientArg; /*!< argument provided by client */
} Power_NotifyObj;
/*!
* @brief Disable the configured power policy from running when the CPU is
* idle
*
* Calling this function clears the flag that controls whether the configured
* power policy function is invoked on each pass through the Idle loop.
* This function call will override both a 'true' setting of the
* "enablePolicy" setting in the Power Manager configuration object, as well
* as a previous runtime call to the Power_enablePolicy() function.
*
* @return The old value of "enablePolicy".
*
* @sa Power_enablePolicy
*/
bool Power_disablePolicy(void);
/*!
* @brief Enable the configured power policy to run when the CPU is idle
*
* Calling this function sets a flag that will cause the configured power
* policy function to be invoked on each pass through the Idle loop. This
* function call will override both a 'false' setting of the "enablePolicy"
* setting in the Power Manager configuration object, as well as a previous
* runtime call to the Power_disablePolicy() function.
*
* For some processor families, automatic power transitions can make initial
* application development more difficult, as well as being at odds with
* basic debugger operation. This convenience function allows an application
* to be initially configured, built, and debugged, without automatic power
* transitions during idle time. When the application is found to be working,
* this function can be called (typically in main()) to enable the policy
* to run, without having to change the application configuration.
*
* @sa Power_disablePolicy
*/
void Power_enablePolicy(void);
/*!
* @brief Get the constraints that have been declared with Power
*
* This function returns a bitmask indicating the constraints that are
* currently declared to the Power Manager (via previous calls to
* Power_setConstraint()). For each constraint that is currently declared,
* the corresponding bit in the bitmask will be set. For example, if two
* clients have independently declared two different constraints, the returned
* bitmask will have two bits set.
*
* Constraint identifiers are device specific, and defined in the
* device-specific Power include file. For example, the constraints for
* MSP432 are defined in PowerMSP432.h. The corresponding bit in the
* bitmask returned by this function can be derived by a left-shift using
* the constraint identifier. For example, for MSP432, for the corresponding
* bit for the PowerMSP432_DISALLOW_SLEEP constraint, the bit position is
* determined by the operation: (1 << PowerMSP432_DISALLOW_SLEEP)
*
* @return A bitmask of the currently declared constraints.
*
* @sa Power_setConstraint
*/
uint_fast32_t Power_getConstraintMask(void);
/*!
* @brief Get the current dependency count for a resource
*
* This function returns the number of dependencies that are currently
* declared upon a resource.
*
* Resource identifiers are device specific, and defined in the
* device-specific Power include file. For example, the resources for
* CC32XX are defined in PowerCC32XX.h.
*
* @param resourceId resource id
*
* @return The number of dependencies declared for the resource.
* Power_EINVALIDINPUT if the resourceId is invalid.
*
* @sa Power_setDependency
*/
int_fast16_t Power_getDependencyCount(uint_fast16_t resourceId);
/*!
* @brief Get the current performance level
*
* This function returns the current device performance level in effect.
*
* If performance scaling is not supported for the device, this function
* will always indicate a performance level of zero.
*
* @return The current performance level.
*
* @sa Power_setPerformanceLevel
*/
uint_fast16_t Power_getPerformanceLevel(void);
/*!
* @brief Get the hardware transition latency for a sleep state
*
* This function reports the minimal hardware transition latency for a specific
* sleep state. The reported latency is that for a direct transition, and does
* not include any additional latency that might occur due to software-based
* notifications.
*
* Sleep states are device specific, and defined in the device-specific Power
* include file. For example, the sleep states for CC32XX are defined in
* PowerCC32XX.h.
*
* This function is typically called by the power policy function. The latency
* is reported in units of microseconds.
*
* @param sleepState the sleep state
*
* @param type the latency type (Power_TOTAL or Power_RESUME)
*
* @return The latency value, in units of microseconds.
*/
uint_fast32_t Power_getTransitionLatency(uint_fast16_t sleepState,
uint_fast16_t type);
/*!
* @brief Get the current transition state of the Power Manager
*
* This function returns the current transition state for the Power Manager.
* For example, when no transitions are in progress, a status of Power_ACTIVE
* is returned. Power_ENTERING_SLEEP is returned during the transition to
* sleep, before sleep has occurred. Power_EXITING_SLEEP is returned
* after wakeup, as the device is being transitioned back to Power_ACTIVE.
* And Power_CHANGING_PERF_LEVEL is returned when a change is being made
* to the performance level.
*
* @return The current Power Manager transition state.
*/
uint_fast16_t Power_getTransitionState(void);
/*!
* @brief Power function to be added to the application idle loop
*
* This function should be added to the application idle loop. (The method to
* do this depends upon the operating system being used.) This function
* will invoke the configured power policy function when appropriate. The
* specific policy function to be invoked is configured as the 'policyFxn'
* in the application-defined Power configuration object.
*
*/
void Power_idleFunc(void);
/*!
* @brief Power initialization function
*
* This function initializes Power Manager internal state. It must be called
* prior to any other Power API. This function is normally called as part
* of TI-RTOS board initialization, for example, from within the
* \<board name\>_initGeneral() function.
*
* @return Power_SOK
*/
int_fast16_t Power_init(void);
/*!
* @brief Register a function to be called upon a specific power event
*
* This function registers a function to be called when a Power event occurs.
* Registrations and the corresponding notifications are processed in
* first-in-first-out (FIFO) order. The function registered must behave as
* described later, below.
*
* The pNotifyObj parameter is a pointer to a pre-allocated, opaque object
* that will be used by Power to support the notification. This object could
* be dynamically allocated, or declared as a global object. This function
* will properly initialized the object's fields as appropriate; the caller
* just needs to provide a pointer to this pre-existing object.
*
* The eventTypes parameter identifies the type of power event(s) for which
* the notify function being registered is to be called. (Event identifiers are
* device specific, and defined in the device-specific Power include file.
* For example, the events for MSP432 are defined in PowerMSP432.h.) The
* eventTypes parameter for this function call is treated as a bitmask, so
* multiple event types can be registered at once, using a common callback
* function. For example, to call the specified notifyFxn when both
* the entering deepsleep and awake from deepsleep events occur, eventTypes
* should be specified as: PowerMSP432_ENTERING_DEEPSLEEP |
* PowerMSP432_AWAKE_DEEPSLEEP
*
* The notifyFxn parameter specifies a callback function to be called when the
* specified Power event occurs. The notifyFxn must implement the following
* signature:
* status = notifyFxn(eventType, eventArg, clientArg);
*
* Where: eventType identifies the event being signalled, eventArg is an
* optional event-specific argument, and clientArg is an abitrary argument
* specified by the client at registration. Note that multipe types of events
* can be specified when registering the notification callback function,
* but when the callback function is actually called by Power, only a
* single eventType will be specified for the callback (i.e., the current
* event). The status returned by the client notification function must
* be one of the following constants: Power_NOTIFYDONE if the client processed
* the notification successfully, or Power_NOTIFYERROR if an error occurred
* during notification.
*
* The clientArg parameter is an arbitrary, client-defined argument to be
* passed back to the client upon notification. This argument may allow one
* notify function to be used by multiple instances of a driver (that is, the
* clientArg can be used to identify the instance of the driver that is being
* notified).
*
* @param pNotifyObj notification object (preallocated by caller)
*
* @param eventTypes event type or types
*
* @param notifyFxn client's callback function
*
* @param clientArg client-specified argument to pass with notification
*
* @return Power_SOK on success.
* Power_EINVALIDPOINTER if either pNotifyObj or notifyFxn are NULL.
*
* @sa Power_unregisterNotify
*/
int_fast16_t Power_registerNotify(Power_NotifyObj *pNotifyObj,
uint_fast16_t eventTypes,
Power_NotifyFxn notifyFxn,
uintptr_t clientArg);
/*!
* @brief Release a previously declared constraint
*
* This function releases a constraint that was previously declared with
* Power_setConstraint(). For example, if a device driver is starting an I/O
* transaction and wants to prohibit activation of a sleep state during the
* transaction, it uses Power_setConstraint() to declare the constraint,
* before starting the transaction. When the transaction completes, the
* driver calls this function to release the constraint, to allow the Power
* manager to once again allow transitions to sleep.
*
* Constraint identifiers are device specific, and defined in the
* device-specific Power include file. For example, the constraints for
* MSP432 are defined in PowerMSP432.h.
*
* Only one constraint can be specified with each call to this function; to
* release multiple constraints this function must be called multiple times.
*
* It is critical that clients call Power_releaseConstraint() when operational
* constraints no longer exists. Otherwise, Power may be left unnecessarily
* restricted from activating power savings.
*
* @param constraintId constraint id
*
* @return <b>CC26XX/CC13XX only</b>: Power_SOK. To minimize code size
* asserts are used internally to check that the constraintId is valid,
* and that the constraint count is not already zero;
* the function always returns Power_SOK.
*
* @return <b>All other devices</b>: Power_SOK on success,
* Power_EINVALIDINPUT if the constraintId is invalid, and Power_EFAIL
* if the constraint count is already zero.
*
* @sa Power_setConstraint
*/
int_fast16_t Power_releaseConstraint(uint_fast16_t constraintId);
/*!
* @brief Release a previously declared dependency
*
* This function releases a dependency that had been previously declared upon
* a resource (by a call to Power_setDependency()).
*
* Resource identifiers are device specific, and defined in the
* device-specific Power include file. For example, the resources for
* CC32XX are defined in PowerCC32XX.h.
*
* @param resourceId resource id
*
* @return <b>CC26XX/CC13XX only</b>: Power_SOK. To minimize code size
* asserts are used internally to check that the resourceId is valid,
* and that the resource reference count is not already zero;
* the function always returns Power_SOK.
*
* @return <b>All other devices</b>: Power_SOK on success,
* Power_EINVALIDINPUT if the resourceId is invalid, and Power_EFAIL
* if the resource reference count is already zero.
*
* @sa Power_setDependency
*/
int_fast16_t Power_releaseDependency(uint_fast16_t resourceId);
/*!
* @brief Declare an operational constraint
*
* Before taking certain actions, the Power Manager checks to see if the
* requested action would conflict with a client-declared constraint. If the
* action does conflict, Power will not proceed with the request. This is the
* function that allows clients to declare their constraints with Power.
*
* Constraint identifiers are device specific, and defined in the
* device-specific Power include file. For example, the constraints for
* MSP432 are defined in PowerMSP432.h.
*
* Only one constraint can be specified with each call to this function; to
* declare multiple constraints this function must be called multiple times.
*
* @param constraintId constraint id
*
* @return <b>CC26XX/CC13XX only</b>: Power_SOK. To minimize code size an
* assert is used internally to check that the constraintId is valid;
* the function always returns Power_SOK.
*
* @return <b>All other devices</b>: Power_SOK on success,
* Power_EINVALIDINPUT if the constraintId is invalid.
*
* @sa Power_releaseConstraint
*/
int_fast16_t Power_setConstraint(uint_fast16_t constraintId);
/*!
* @brief Declare a dependency upon a resource
*
* This function declares a dependency upon a resource. For example, if a
* UART driver needs a specific UART peripheral, it uses this function to
* declare this to the Power Manager. If the resource had been inactive,
* then Power will activate the peripheral during this function call.
*
* What is needed to make a peripheral resource 'active' will vary by device
* family. For some devices this may be a simple enable of a clock to the
* specified peripheral. For others it may also require a power on of a
* power domain. In either case, the Power Manager will take care of these
* details, and will also implement reference counting for resources and their
* interdependencies. For example, if multiple UART peripherals reside in
* a shared serial power domain, the Power Manager will power up the serial
* domain when it is first needed, and then automatically power the domain off
* later, when all related dependencies for the relevant peripherals are
* released.
*
* Resource identifiers are device specific, and defined in the
* device-specific Power include file. For example, the resources for
* CC32XX are defined in PowerCC32XX.h.
*
* @param resourceId resource id
*
* @return <b>CC26XX/CC13XX only</b>: Power_SOK. To minimize code size an
* assert is used internally to check that the resourceId is valid;
* the function always returns Power_SOK.
*
* @return <b>All other devices</b>: Power_SOK on success,
* Power_EINVALIDINPUT if the reseourceId is invalid.
*
* @sa Power_releaseDependency
*/
int_fast16_t Power_setDependency(uint_fast16_t resourceId);
/*!
* @brief Set the MCU performance level
*
* This function manages a transition to a new device performance level.
* Before the actual transition is initiated, notifications will be sent to
* any clients who've registered (with Power_registerNotify()) for a
* 'start change performance level' notification. The event name is device
* specific, and defined in the device-specific Power include file. For
* example, for MSP432, the event is "PowerMSP432_START_CHANGE_PERF_LEVEL",
* which is defined in PowerMSP432.h. Once notifications have been completed,
* the change to the performance level is initiated. After the level change
* is completed, there is a comparable event that can be used to signal a
* client that the change has completed. For example, on MSP432 the
* "PowerMSP432_DONE_CHANGE_PERF_LEVEL" event can be used to signal
* completion.
*
* This function will not return until the new performance level is in effect.
* If performance scaling is not supported for the device, or is prohibited
* by an active constraint, or if the specified level is invalid, then an
* error status will be returned.
*
* @param level the new performance level
*
* @return Power_SOK on success.
* Power_EINVALIDINPUT if the specified performance level is out of
* range of valid levels.
* Power_EBUSY if another transition is already in progress, or if
* a single constraint is set to prohibit any change to the
* performance level.
* Power_ECHANGE_NOT_ALLOWED if a level-specific constraint prohibits
* a change to the requested level.
* Power_EFAIL if performance scaling is not supported, if an
* error occurred during initialization, or if an error occurred
* during client notifications.
*
* @sa Power_getPerformanceLevel
*/
int_fast16_t Power_setPerformanceLevel(uint_fast16_t level);
/*!
* @brief Set a new Power policy
*
* This function allows a new Power policy function to be selected at runtime.
*
* @param policy the new Power policy function
*/
void Power_setPolicy(Power_PolicyFxn policy);
/*!
* @brief Put the device into a shutdown state
*
* This function will transition the device into a shutdown state.
* Before the actual transition is initiated, notifications will be sent to
* any clients who've registered (with Power_registerNotify()) for an
* 'entering shutdown' event. The event name is device specific, and defined
* in the device-specific Power include file. For example, for CC32XX, the
* event is "PowerCC32XX_ENTERING_SHUTDOWN", which is defined in
* PowerCC32XX.h. Once notifications have been completed, the device shutdown
* will commence.
*
* If the device is successfully transitioned to shutdown, this function
* call will never return. Upon wakeup, the device and application will
* be rebooted (through a device reset). If the transition is not
* successful, one of the error codes listed below will be returned.
*
* On some devices a timed wakeup from shutdown can be specified, using
* the shutdownTime parameter. This enables an autonomous application reboot
* at a future time. For example, an application can go to shutdown, and then
* automatically reboot at a future time to do some work. And once that work
* is done, the application can shutdown again, for another timed interval.
* The time interval is specified via the shutdownTime parameter. (On devices
* that do not support this feature, any value specified for shutdownTime will
* be ignored.) If the specified shutdownTime is less than the total
* shutdown latency for the device, then shutdownTime will be ignored. The
* shutdown latency for the device can be found in the device-specific Power
* include file. For example, for the CC32XX, this latency is defined in
* PowerCC32XX.h, as "PowerCC32XX_TOTALTIMESHUTDOWN".)
*
* @param shutdownState the device-specific shutdown state
*
* @param shutdownTime the amount of time (in milliseconds) to keep the
* the device in the shutdown state; this parameter
* is not supported on all device families
*
* @return Power_ECHANGE_NOT_ALLOWED if a constraint is prohibiting shutdown.
* Power_EFAIL if an error occurred during client notifications.
* Power_EINVALIDINPUT if the shutdownState is invalid.
* Power_EBUSY if another transition is already in progress.
*/
int_fast16_t Power_shutdown(uint_fast16_t shutdownState,
uint_fast32_t shutdownTime);
/*!
* @brief Transition the device into a sleep state
*
* This function is called from the power policy when it has made a decision
* to put the device in a specific sleep state. This function returns to the
* caller (the policy function) once the device has awoken from sleep.
*
* This function must be called with interrupts disabled, and should not be
* called directly by the application, or by any drivers.
* This function does not check declared constraints; the policy function
* must check constraints before calling this function to initiate sleep.
*
* @param sleepState the sleep state
*
* @return Power_SOK on success, the device has slept and is awake again.
* Power_EFAIL if an error occurred during client notifications, or
* if a general failure occurred.
* Power_EINVALIDINPUT if the sleepState is invalid.
* Power_EBUSY if another transition is already in progress.
*/
int_fast16_t Power_sleep(uint_fast16_t sleepState);
/*!
* @brief Unregister previously registered notifications
*
* This function unregisters for event notifications that were previously
* registered with Power_registerNotify(). The caller must specify a pointer
* to the same notification object used during registration.
*
* @param pNotifyObj notify object
*
* @sa Power_registerNotify
*/
void Power_unregisterNotify(Power_NotifyObj *pNotifyObj);
#ifdef __cplusplus
}
#endif
#endif /* ti_drivers_Power__include */
| 44.827303 | 95 | 0.711649 | [
"object"
] |
c11c9f2a0c9fd6f90424d7d521f0b3508a85df1a | 714 | h | C | include/thundergbm/dataset.h | Haiga/thundergbm | e7a1cca68ad076f112c48642f05651fb1f01e43a | [
"Apache-2.0"
] | 1 | 2019-10-06T21:22:59.000Z | 2019-10-06T21:22:59.000Z | include/thundergbm/dataset.h | Haiga/gbm | d7a3f98271f15bd94d064f93b4ae3ce57f3b67da | [
"Apache-2.0"
] | null | null | null | include/thundergbm/dataset.h | Haiga/gbm | d7a3f98271f15bd94d064f93b4ae3ce57f3b67da | [
"Apache-2.0"
] | null | null | null | //
// Created by jiashuai on 18-1-17.
//
#ifndef THUNDERGBM_DATASET_H
#define THUNDERGBM_DATASET_H
#include "common.h"
#include "syncarray.h"
class DataSet {
public:
///load dataset from file
void load_from_sparse(int row_size, float* val, int* row_ptr, int* col_ptr, float* label);
void load_from_file(string file_name, GBMParam param);
void load_group_file(string file_name);
void group_label();
size_t n_features() const;
size_t n_instances() const;
vector<float_type> csr_val;
vector<int> csr_row_ptr;
vector<int> csr_col_idx;
vector<float_type> y;
size_t n_features_;
vector<int> group;
vector<float_type> label;
};
#endif //THUNDERGBM_DATASET_H
| 21.636364 | 94 | 0.711485 | [
"vector"
] |
c132bc287d6efaa7619fa0d45d834efcf7421849 | 23,563 | c | C | src/examples/media-session/default-routes.c | mikhailnov/pipewire | 2405f0942b0019a5bcc49a52e6634248c1bbd456 | [
"MIT"
] | null | null | null | src/examples/media-session/default-routes.c | mikhailnov/pipewire | 2405f0942b0019a5bcc49a52e6634248c1bbd456 | [
"MIT"
] | null | null | null | src/examples/media-session/default-routes.c | mikhailnov/pipewire | 2405f0942b0019a5bcc49a52e6634248c1bbd456 | [
"MIT"
] | null | null | null | /* PipeWire
*
* Copyright © 2020 Wim Taymans
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <string.h>
#include <stdio.h>
#include <errno.h>
#include <math.h>
#include <time.h>
#include <fcntl.h>
#include <unistd.h>
#include "config.h"
#include <spa/utils/hook.h>
#include <spa/utils/result.h>
#include <spa/utils/json.h>
#include <spa/pod/parser.h>
#include <spa/pod/builder.h>
#include <spa/debug/pod.h>
#include "pipewire/pipewire.h"
#include "extensions/metadata.h"
#include "media-session.h"
#define NAME "default-routes"
#define SESSION_KEY "default-routes"
#define PREFIX "default.route."
#define SAVE_INTERVAL 1
struct impl {
struct timespec now;
struct sm_media_session *session;
struct spa_hook listener;
struct pw_context *context;
struct spa_source *idle_timeout;
struct spa_hook meta_listener;
struct pw_properties *to_restore;
};
struct device {
struct sm_device *obj;
uint32_t id;
struct impl *impl;
char *name;
struct spa_hook listener;
uint32_t active_profile;
uint32_t generation;
struct pw_array route_info;
};
static void remove_idle_timeout(struct impl *impl)
{
struct pw_loop *main_loop = pw_context_get_main_loop(impl->context);
int res;
if (impl->idle_timeout) {
if ((res = sm_media_session_save_state(impl->session,
SESSION_KEY, impl->to_restore)) < 0)
pw_log_error("can't save "SESSION_KEY" state: %s", spa_strerror(res));
pw_loop_destroy_source(main_loop, impl->idle_timeout);
impl->idle_timeout = NULL;
}
}
static void idle_timeout(void *data, uint64_t expirations)
{
struct impl *impl = data;
pw_log_debug(NAME " %p: idle timeout", impl);
remove_idle_timeout(impl);
}
static void add_idle_timeout(struct impl *impl)
{
struct timespec value;
struct pw_loop *main_loop = pw_context_get_main_loop(impl->context);
if (impl->idle_timeout == NULL)
impl->idle_timeout = pw_loop_add_timer(main_loop, idle_timeout, impl);
value.tv_sec = SAVE_INTERVAL;
value.tv_nsec = 0;
pw_loop_update_timer(main_loop, impl->idle_timeout, &value, NULL, false);
}
static uint32_t channel_from_name(const char *name)
{
int i;
for (i = 0; spa_type_audio_channel[i].name; i++) {
if (strcmp(name, spa_debug_type_short_name(spa_type_audio_channel[i].name)) == 0)
return spa_type_audio_channel[i].type;
}
return SPA_AUDIO_CHANNEL_UNKNOWN;
}
static const char *channel_to_name(uint32_t channel)
{
int i;
for (i = 0; spa_type_audio_channel[i].name; i++) {
if (spa_type_audio_channel[i].type == channel)
return spa_debug_type_short_name(spa_type_audio_channel[i].name);
}
return "UNK";
}
struct route_info {
uint32_t index;
uint32_t generation;
uint32_t available;
uint32_t prev_available;
enum spa_direction direction;
char name[64];
unsigned int save:1;
unsigned int prev_active:1;
unsigned int active:1;
};
struct route {
struct sm_param *p;
uint32_t index;
uint32_t device_id;
enum spa_direction direction;
const char *name;
uint32_t priority;
uint32_t available;
struct spa_pod *props;
bool save;
};
#define ROUTE_INIT(__p) (struct route) { \
.p = (__p), \
.available = SPA_PARAM_AVAILABILITY_unknown, \
}
static struct route_info *find_route_info(struct device *dev, struct route *r)
{
struct route_info *i;
pw_array_for_each(i, &dev->route_info) {
if (i->index == r->index)
return i;
}
i = pw_array_add(&dev->route_info, sizeof(*i));
if (i == NULL)
return NULL;
pw_log_info("device %d: new route %d '%s' found", dev->id, r->index, r->name);
spa_zero(*i);
i->index = r->index;
snprintf(i->name, sizeof(i->name), "%s", r->name);
i->direction = r->direction;
i->generation = dev->generation;
i->available = r->available;
i->prev_available = r->available;
return i;
}
static int parse_route(struct sm_param *p, struct route *r)
{
*r = ROUTE_INIT(p);
return spa_pod_parse_object(p->param,
SPA_TYPE_OBJECT_ParamRoute, NULL,
SPA_PARAM_ROUTE_index, SPA_POD_Int(&r->index),
SPA_PARAM_ROUTE_direction, SPA_POD_Id(&r->direction),
SPA_PARAM_ROUTE_device, SPA_POD_Int(&r->device_id),
SPA_PARAM_ROUTE_name, SPA_POD_String(&r->name),
SPA_PARAM_ROUTE_priority, SPA_POD_OPT_Int(&r->priority),
SPA_PARAM_ROUTE_available, SPA_POD_OPT_Id(&r->available),
SPA_PARAM_ROUTE_props, SPA_POD_OPT_Pod(&r->props),
SPA_PARAM_ROUTE_save, SPA_POD_OPT_Bool(&r->save));
}
static bool array_contains(struct spa_pod *pod, uint32_t val)
{
uint32_t *vals, n_vals;
uint32_t n;
if (pod == NULL)
return false;
vals = spa_pod_get_array(pod, &n_vals);
if (vals == NULL || n_vals == 0)
return false;
for (n = 0; n < n_vals; n++)
if (vals[n] == val)
return true;
return false;
}
static int parse_enum_route(struct sm_param *p, uint32_t device_id, struct route *r)
{
struct spa_pod *devices = NULL;
int res;
*r = ROUTE_INIT(p);
if ((res = spa_pod_parse_object(p->param,
SPA_TYPE_OBJECT_ParamRoute, NULL,
SPA_PARAM_ROUTE_index, SPA_POD_Int(&r->index),
SPA_PARAM_ROUTE_direction, SPA_POD_Id(&r->direction),
SPA_PARAM_ROUTE_name, SPA_POD_String(&r->name),
SPA_PARAM_ROUTE_priority, SPA_POD_OPT_Int(&r->priority),
SPA_PARAM_ROUTE_available, SPA_POD_OPT_Id(&r->available),
SPA_PARAM_ROUTE_devices, SPA_POD_OPT_Pod(&devices))) < 0)
return res;
if (device_id != SPA_ID_INVALID && !array_contains(devices, device_id))
return -ENOENT;
r->device_id = device_id;
return 0;
}
static char *serialize_props(struct device *dev, const struct spa_pod *param)
{
struct spa_pod_prop *prop;
struct spa_pod_object *obj = (struct spa_pod_object *) param;
bool comma = false;
char *ptr;
size_t size;
FILE *f;
f = open_memstream(&ptr, &size);
fprintf(f, "{");
SPA_POD_OBJECT_FOREACH(obj, prop) {
switch (prop->key) {
case SPA_PROP_volume:
{
float val;
if (spa_pod_get_float(&prop->value, &val) < 0)
continue;
fprintf(f, "%s \"volume\": %f", (comma ? "," : ""), val);
break;
}
case SPA_PROP_mute:
{
bool b;
if (spa_pod_get_bool(&prop->value, &b) < 0)
continue;
fprintf(f, "%s \"mute\": %s", (comma ? "," : ""), b ? "true" : "false");
break;
}
case SPA_PROP_channelVolumes:
{
uint32_t i, n_vals;
float vals[SPA_AUDIO_MAX_CHANNELS];
n_vals = spa_pod_copy_array(&prop->value, SPA_TYPE_Float,
vals, SPA_AUDIO_MAX_CHANNELS);
if (n_vals == 0)
continue;
fprintf(f, "%s \"volumes\": [", (comma ? "," : ""));
for (i = 0; i < n_vals; i++)
fprintf(f, "%s %f", (i == 0 ? "" : ","), vals[i]);
fprintf(f, " ]");
break;
}
case SPA_PROP_channelMap:
{
uint32_t i, n_vals;
uint32_t map[SPA_AUDIO_MAX_CHANNELS];
n_vals = spa_pod_copy_array(&prop->value, SPA_TYPE_Id,
map, SPA_AUDIO_MAX_CHANNELS);
if (n_vals == 0)
continue;
fprintf(f, "%s \"channels\": [", (comma ? "," : ""));
for (i = 0; i < n_vals; i++)
fprintf(f, "%s \"%s\"", (i == 0 ? "" : ","), channel_to_name(map[i]));
fprintf(f, " ]");
break;
}
case SPA_PROP_latencyOffsetNsec:
{
int64_t delay;
if (spa_pod_get_long(&prop->value, &delay) < 0)
continue;
fprintf(f, "%s \"latencyOffsetNsec\": %"PRIi64, (comma ? "," : ""), delay);
break;
}
default:
continue;
}
comma = true;
}
fprintf(f, " }");
fclose(f);
return ptr;
}
static int restore_route_params(struct device *dev, const char *val, struct route *r)
{
struct spa_json it[3];
char buf[1024], key[128];
const char *value;
struct spa_pod_builder b = SPA_POD_BUILDER_INIT(buf, sizeof(buf));
struct spa_pod_frame f[2];
struct spa_pod *param;
spa_json_init(&it[0], val, strlen(val));
if (spa_json_enter_object(&it[0], &it[1]) <= 0)
return -EINVAL;
spa_pod_builder_push_object(&b, &f[0],
SPA_TYPE_OBJECT_ParamRoute, SPA_PARAM_Route);
spa_pod_builder_add(&b,
SPA_PARAM_ROUTE_index, SPA_POD_Int(r->index),
SPA_PARAM_ROUTE_device, SPA_POD_Int(r->device_id),
0);
spa_pod_builder_prop(&b, SPA_PARAM_ROUTE_props, 0);
spa_pod_builder_push_object(&b, &f[1],
SPA_TYPE_OBJECT_Props, SPA_PARAM_Route);
while (spa_json_get_string(&it[1], key, sizeof(key)-1) > 0) {
if (strcmp(key, "volume") == 0) {
float vol;
if (spa_json_get_float(&it[1], &vol) <= 0)
continue;
spa_pod_builder_prop(&b, SPA_PROP_volume, 0);
spa_pod_builder_float(&b, vol);
}
else if (strcmp(key, "mute") == 0) {
bool mute;
if (spa_json_get_bool(&it[1], &mute) <= 0)
continue;
spa_pod_builder_prop(&b, SPA_PROP_mute, 0);
spa_pod_builder_bool(&b, mute);
}
else if (strcmp(key, "volumes") == 0) {
uint32_t n_vols;
float vols[SPA_AUDIO_MAX_CHANNELS];
if (spa_json_enter_array(&it[1], &it[2]) <= 0)
continue;
for (n_vols = 0; n_vols < SPA_AUDIO_MAX_CHANNELS; n_vols++) {
if (spa_json_get_float(&it[2], &vols[n_vols]) <= 0)
break;
}
if (n_vols == 0)
continue;
spa_pod_builder_prop(&b, SPA_PROP_channelVolumes, 0);
spa_pod_builder_array(&b, sizeof(float), SPA_TYPE_Float,
n_vols, vols);
}
else if (strcmp(key, "channels") == 0) {
uint32_t n_ch;
uint32_t map[SPA_AUDIO_MAX_CHANNELS];
if (spa_json_enter_array(&it[1], &it[2]) <= 0)
continue;
for (n_ch = 0; n_ch < SPA_AUDIO_MAX_CHANNELS; n_ch++) {
char chname[16];
if (spa_json_get_string(&it[2], chname, sizeof(chname)) <= 0)
break;
map[n_ch] = channel_from_name(chname);
}
if (n_ch == 0)
continue;
spa_pod_builder_prop(&b, SPA_PROP_channelMap, 0);
spa_pod_builder_array(&b, sizeof(uint32_t), SPA_TYPE_Id,
n_ch, map);
}
else if (strcmp(key, "latencyOffsetNsec") == 0) {
float delay;
if (spa_json_get_float(&it[1], &delay) <= 0)
continue;
spa_pod_builder_prop(&b, SPA_PROP_latencyOffsetNsec, 0);
spa_pod_builder_long(&b, (int64_t)SPA_CLAMP(delay, INT64_MIN, INT64_MAX));
} else {
if (spa_json_next(&it[1], &value) <= 0)
break;
}
}
spa_pod_builder_pop(&b, &f[1]);
spa_pod_builder_prop(&b, SPA_PARAM_ROUTE_save, 0);
spa_pod_builder_bool(&b, r->save);
param = spa_pod_builder_pop(&b, &f[0]);
if (pw_log_level_enabled(SPA_LOG_LEVEL_DEBUG))
spa_debug_pod(2, NULL, param);
pw_device_set_param((struct pw_node*)dev->obj->obj.proxy,
SPA_PARAM_Route, 0, param);
sm_media_session_schedule_rescan(dev->impl->session);
return 0;
}
struct profile {
uint32_t index;
const char *name;
uint32_t prio;
uint32_t available;
struct spa_pod *classes;
};
static int parse_profile(struct sm_param *p, struct profile *pr)
{
int res;
spa_zero(*pr);
if ((res = spa_pod_parse_object(p->param,
SPA_TYPE_OBJECT_ParamProfile, NULL,
SPA_PARAM_PROFILE_index, SPA_POD_Int(&pr->index),
SPA_PARAM_PROFILE_name, SPA_POD_String(&pr->name),
SPA_PARAM_PROFILE_priority, SPA_POD_OPT_Int(&pr->prio),
SPA_PARAM_PROFILE_available, SPA_POD_OPT_Id(&pr->available),
SPA_PARAM_PROFILE_classes, SPA_POD_OPT_Pod(&pr->classes))) < 0)
return res;
return 0;
}
static int find_current_profile(struct device *dev, struct profile *pr)
{
struct sm_param *p;
spa_list_for_each(p, &dev->obj->param_list, link) {
if (p->id == SPA_PARAM_Profile &&
parse_profile(p, pr) >= 0)
return 0;
}
return -ENOENT;
}
static int restore_route(struct device *dev, struct route *r)
{
struct impl *impl = dev->impl;
char key[1024];
const char *val;
struct route_info *ri;
if ((ri = find_route_info(dev, r)) == NULL)
return -errno;
snprintf(key, sizeof(key), PREFIX"%s:%s:%s", dev->name,
r->direction == SPA_DIRECTION_INPUT ? "input" : "output", r->name);
val = pw_properties_get(impl->to_restore, key);
if (val == NULL)
val = "{ \"volumes\": [ 0.4 ], \"mute\": false }";
pw_log_info("device %d: restore route %d '%s' to %s", dev->id, r->index, key, val);
restore_route_params(dev, val, r);
ri->prev_active = true;
ri->active = true;
ri->generation = dev->generation;
ri->save = r->save;
return 0;
}
static int save_route(struct device *dev, struct route *r)
{
struct impl *impl = dev->impl;
char key[1024], *val;
if (r->props == NULL)
return -EINVAL;
snprintf(key, sizeof(key), PREFIX"%s:%s:%s", dev->name,
r->direction == SPA_DIRECTION_INPUT ? "input" : "output", r->name);
val = serialize_props(dev, r->props);
if (pw_properties_set(impl->to_restore, key, val)) {
pw_log_info("device %d: route properties changed %s %s", dev->id, key, val);
add_idle_timeout(impl);
}
free(val);
return 0;
}
static char *serialize_routes(struct device *dev)
{
char *ptr;
size_t size;
FILE *f;
struct route_info *ri;
int count = 0;
f = open_memstream(&ptr, &size);
fprintf(f, "[");
pw_array_for_each(ri, &dev->route_info) {
if (ri->save) {
fprintf(f, "%s \"%s\"", count++ == 0 ? "" : ",", ri->name);
}
}
fprintf(f, " ]");
fclose(f);
return ptr;
}
static int save_profile(struct device *dev, struct profile *pr)
{
struct impl *impl = dev->impl;
char key[1024], *val;
if (pw_array_get_len(&dev->route_info, struct route_info) == 0)
return 0;
snprintf(key, sizeof(key), PREFIX"%s:profile:%s", dev->name, pr->name);
val = serialize_routes(dev);
if (pw_properties_set(impl->to_restore, key, val)) {
pw_log_info("device %d: profile %s routes changed %s %s",
dev->id, pr->name, key, val);
add_idle_timeout(impl);
} else {
pw_log_info("device %d: profile %s unchanged (%s)",
dev->id, pr->name, val);
}
free(val);
return 0;
}
static int find_best_route(struct device *dev, uint32_t device_id, struct route *r)
{
struct sm_param *p;
struct route best, best_avail, best_unk;
spa_zero(best_avail);
spa_zero(best_unk);
spa_list_for_each(p, &dev->obj->param_list, link) {
struct route t;
if (p->id != SPA_PARAM_EnumRoute ||
parse_enum_route(p, device_id, &t) < 0)
continue;
if (t.available == SPA_PARAM_AVAILABILITY_yes) {
if (best_avail.name == NULL || t.priority > best_avail.priority)
best_avail = t;
}
else if (t.available != SPA_PARAM_AVAILABILITY_no) {
if (best_unk.name == NULL || t.priority > best_unk.priority)
best_unk = t;
}
}
best = best_avail;
if (best.name == NULL)
best = best_unk;
if (best.name == NULL)
return -ENOENT;
*r = best;
return 0;
}
static int find_route(struct device *dev, uint32_t device_id, const char *name, struct route *r)
{
struct sm_param *p;
spa_list_for_each(p, &dev->obj->param_list, link) {
if (p->id != SPA_PARAM_EnumRoute ||
parse_enum_route(p, device_id, r) < 0)
continue;
if (strcmp(r->name, name) != 0)
continue;
return 0;
}
return -ENOENT;
}
static int find_saved_route(struct device *dev, const char *val, uint32_t device_id, struct route *r)
{
struct spa_json it[2];
char key[128];
if (val == NULL)
return -ENOENT;
spa_json_init(&it[0], val, strlen(val));
if (spa_json_enter_array(&it[0], &it[1]) <= 0)
return -EINVAL;
while (spa_json_get_string(&it[1], key, sizeof(key)-1) > 0) {
if (find_route(dev, device_id, key, r) >= 0)
return 0;
}
return -ENOENT;
}
static int restore_device_route(struct device *dev, const char *val, uint32_t device_id, bool restore)
{
int res = -ENOENT;
struct route t;
pw_log_info("device %d: restoring device %u", dev->id, device_id);
if (restore) {
res = find_saved_route(dev, val, device_id, &t);
if (res >= 0) {
/* we found a saved route */
if (t.available == SPA_PARAM_AVAILABILITY_no) {
pw_log_info("device %d: saved route '%s' not available", dev->id,
t.name);
/* not available, try to find next best port */
res = -ENOENT;
} else {
pw_log_info("device %d: found saved route '%s'", dev->id,
t.name);
/* make sure we save it again */
t.save = true;
}
}
}
if (res < 0) {
/* we could not find a saved route, try to find a new best */
res = find_best_route(dev, device_id, &t);
if (res < 0) {
pw_log_info("device %d: can't find best route", dev->id);
} else {
pw_log_info("device %d: found best route '%s'", dev->id,
t.name);
}
}
if (res >= 0)
restore_route(dev, &t);
return res;
}
static int reconfigure_profile(struct device *dev, struct profile *pr, bool restore)
{
struct impl *impl = dev->impl;
char key[1024];
const char *json;
pw_log_info("device %s: restore routes for profile '%s'",
dev->name, pr->name);
dev->active_profile = pr->index;
snprintf(key, sizeof(key), PREFIX"%s:profile:%s", dev->name, pr->name);
json = pw_properties_get(impl->to_restore, key);
if (pr->classes != NULL) {
struct spa_pod *iter;
SPA_POD_STRUCT_FOREACH(pr->classes, iter) {
struct spa_pod_parser prs;
struct spa_pod_frame f[1];
struct spa_pod *val;
char *key;
spa_pod_parser_pod(&prs, iter);
if (spa_pod_parser_push_struct(&prs, &f[0]) < 0)
continue;
while (spa_pod_parser_get(&prs,
SPA_POD_String(&key),
SPA_POD_Pod(&val),
NULL) >= 0) {
if (key == NULL || val == NULL)
break;
if (strcmp(key, "card.profile.devices") == 0) {
uint32_t *devices, n_devices, i;
devices = spa_pod_get_array(val, &n_devices);
if (devices == NULL || n_devices == 0)
continue;
for (i = 0; i < n_devices; i++)
restore_device_route(dev, json, devices[i], restore);
}
}
spa_pod_parser_pop(&prs, &f[0]);
}
}
return 0;
}
static void prune_route_info(struct device *dev)
{
struct route_info *i;
for (i = pw_array_first(&dev->route_info);
pw_array_check(&dev->route_info, i);) {
if (i->generation != dev->generation) {
pw_log_info("device %d: route '%s' unused", dev->id, i->name);
pw_array_remove(&dev->route_info, i);
} else
i++;
}
}
static int handle_route(struct device *dev, struct route *r)
{
struct route_info *ri;
pw_log_info("device %d: port '%s'", dev->id, r->name);
if ((ri = find_route_info(dev, r)) == NULL)
return -errno;
ri->active = true;
ri->save = r->save;
if (!ri->prev_active) {
/* a new port has been found, restore the volume and make sure we
* save this as a preferred port */
pw_log_info("device %d: new active port found '%s'", dev->id, r->name);
restore_route(dev, r);
} else if (r->props && r->save) {
/* just save port properties */
save_route(dev, r);
}
return 0;
}
static int handle_device(struct device *dev)
{
struct profile pr;
struct sm_param *p;
int res;
bool route_changed = false;
dev->generation++;
/* first look at all routes and update */
spa_list_for_each(p, &dev->obj->param_list, link) {
struct route r;
struct route_info *ri;
if (p->id != SPA_PARAM_EnumRoute ||
parse_enum_route(p, SPA_ID_INVALID, &r) < 0)
continue;
if ((ri = find_route_info(dev, &r)) == NULL)
continue;
ri->prev_available = ri->available;
if (ri->available != r.available) {
pw_log_info("device %d: route %s available changed %d -> %d",
dev->id, r.name, ri->available, r.available);
ri->available = r.available;
route_changed = true;
}
ri->generation = dev->generation;
ri->prev_active = ri->active;
ri->active = false;
ri->save = false;
}
/* then check for changes in the active ports */
spa_list_for_each(p, &dev->obj->param_list, link) {
struct route r;
if (p->id != SPA_PARAM_Route ||
parse_route(p, &r) < 0)
continue;
handle_route(dev, &r);
}
prune_route_info(dev);
if ((res = find_current_profile(dev, &pr)) >= 0) {
bool restore = dev->active_profile != pr.index;
if (restore || route_changed)
reconfigure_profile(dev, &pr, restore);
save_profile(dev, &pr);
}
return 0;
}
static void object_update(void *data)
{
struct device *dev = data;
struct impl *impl = dev->impl;
pw_log_debug(NAME" %p: device %p %08x/%08x", impl, dev,
dev->obj->obj.changed, dev->obj->obj.avail);
if (dev->obj->obj.changed & SM_DEVICE_CHANGE_MASK_PARAMS)
handle_device(dev);
}
static const struct sm_object_events object_events = {
SM_VERSION_OBJECT_EVENTS,
.update = object_update
};
static void session_create(void *data, struct sm_object *object)
{
struct impl *impl = data;
struct device *dev;
const char *name;
if (strcmp(object->type, PW_TYPE_INTERFACE_Device) != 0 ||
object->props == NULL ||
(name = pw_properties_get(object->props, PW_KEY_DEVICE_NAME)) == NULL)
return;
pw_log_debug(NAME " %p: add device '%d' %s", impl, object->id, name);
dev = sm_object_add_data(object, SESSION_KEY, sizeof(struct device));
dev->obj = (struct sm_device*)object;
dev->id = object->id;
dev->impl = impl;
dev->name = strdup(name);
dev->active_profile = SPA_ID_INVALID;
dev->generation = 0;
pw_array_init(&dev->route_info, sizeof(struct route_info) * 16);
dev->obj->obj.mask |= SM_DEVICE_CHANGE_MASK_PARAMS;
sm_object_add_listener(&dev->obj->obj, &dev->listener, &object_events, dev);
}
static void destroy_device(struct impl *impl, struct device *dev)
{
spa_hook_remove(&dev->listener);
pw_array_clear(&dev->route_info);
free(dev->name);
sm_object_remove_data((struct sm_object*)dev->obj, SESSION_KEY);
}
static void session_remove(void *data, struct sm_object *object)
{
struct impl *impl = data;
struct device *dev;
if (strcmp(object->type, PW_TYPE_INTERFACE_Device) != 0)
return;
pw_log_debug(NAME " %p: remove device '%d'", impl, object->id);
if ((dev = sm_object_get_data(object, SESSION_KEY)) != NULL)
destroy_device(impl, dev);
}
static void session_destroy(void *data)
{
struct impl *impl = data;
remove_idle_timeout(impl);
spa_hook_remove(&impl->listener);
pw_properties_free(impl->to_restore);
free(impl);
}
static const struct sm_media_session_events session_events = {
SM_VERSION_MEDIA_SESSION_EVENTS,
.create = session_create,
.remove = session_remove,
.destroy = session_destroy,
};
int sm_default_routes_start(struct sm_media_session *session)
{
struct impl *impl;
int res;
impl = calloc(1, sizeof(struct impl));
if (impl == NULL)
return -errno;
impl->session = session;
impl->context = session->context;
impl->to_restore = pw_properties_new(NULL, NULL);
if (impl->to_restore == NULL) {
res = -errno;
goto exit_free;
}
if ((res = sm_media_session_load_state(impl->session,
SESSION_KEY, impl->to_restore)) < 0)
pw_log_info("can't load "SESSION_KEY" state: %s", spa_strerror(res));
sm_media_session_add_listener(impl->session, &impl->listener, &session_events, impl);
return 0;
exit_free:
free(impl);
return res;
}
| 25.864984 | 102 | 0.66893 | [
"object"
] |
c13445859794fccc40058a5d25aed0e9dda7494b | 15,446 | c | C | clone/misc/fifa2002.c | ttwings/WuXiaAndJiangHu_Godot | a12bb9028d5625ea01de1ea9cb16fef665472275 | [
"MIT"
] | 34 | 2019-04-16T03:32:27.000Z | 2022-03-29T08:05:25.000Z | clone/misc/fifa2002.c | ttwings/WuXiaAndJiangHu_Godot | a12bb9028d5625ea01de1ea9cb16fef665472275 | [
"MIT"
] | null | null | null | clone/misc/fifa2002.c | ttwings/WuXiaAndJiangHu_Godot | a12bb9028d5625ea01de1ea9cb16fef665472275 | [
"MIT"
] | 12 | 2019-03-06T05:15:45.000Z | 2022-03-17T02:43:48.000Z | // jingcaiban.c
#include <ansi.h>
inherit ITEM;
inherit F_SAVE;
// 全局变量,即储存玩家押注的所有数据
mapping *all_biao;
// 全局变量,此处为目前数量
int num;
// 全局变量,此处为最终结杲
mapping *end_biao;
int jieguo;
// 以下七行为函数声明
int do_read(string);
int do_post(string);
int do_ya(string);
void enough_rest();
string codetoteam(string arg);
string ordercode(string arg, int arg1);
string upcase(string arg);
string *team = ({"阿根廷", "巴西", "比利时", "喀麦隆", "中国", "哥斯达黎加", "德国", "丹麦",
"厄瓜多尔", "西班牙", "法国", "克罗地亚", "爱尔兰", "意大利", "日本", "韩国",
"墨西哥", "尼日利亚", "波兰", "葡萄牙", "巴拉圭", "俄罗斯", "沙特", "瑞典",
"斯洛文尼亚", "塞内加尔", "突尼斯", "土耳其", "英格兰", "美国", "乌拉圭", "南非"});
string *code = ({"AR", "BR", "BE", "CM", "CN", "CR", "DE", "DK",
"EC", "ES", "FR", "HR", "IE", "IT", "JP", "KP",
"MX", "NG", "PL", "PT", "PY", "RU", "SA", "SE",
"SI", "SN", "TN", "TU", "UK", "US", "UY", "ZA"});
void create()
{
set_name(HIY "世界杯竞猜版" NOR, ({"board", "ban"}));
set("long", "这是一个记录玩家的竞猜押注情况的版。
押注请read rules,查看目前的投注情况请read ban。\n");
set("unit", "张");
set("no_put", 1);
set("no_get", 1);
//设大点不让玩家Get
set_weight(900000000);
seteuid(getuid());
restore();
}
void init()
{
add_action("do_read", "read");
add_action("do_post", "post");
add_action("do_ya", "ya");
add_action("do_duijiang", "duijiang");
add_action("do_duijiang", "duixian");
}
string query_save_file()
{
// 定义一个储存文件的路径
return DATA_DIR + "board/fifa2002_b";
}
// 玩家下注
int do_ya(string arg)
{
int i;
string c, t, c1, c2, c3, c4, c5, c6, c7, c8;
object ob, me = this_player();
mapping biao;
// 表示停止下注,由巫师在do_post()函数里加入
if (query("end_ya"))
return notify_fail("截止时间已过,下回赶早。\n");
// 防止多人同意押注产生意外
if (query_temp("busy"))
return notify_fail("稍候........\n");
// 分析玩家指令
if (!arg)
return notify_fail("命令格式:ya <类别> <球队> <多少两黄金>\n");
arg = upcase(arg);
if (sscanf(arg, "%s %s %d", t, c, i) != 3)
return notify_fail("命令格式:ya <类别> <球队> <多少两黄金>\n");
// 排除一些不可能的押注可能
if (t != "1" && t != "2" && t != "4" && t != "8")
return notify_fail("你总要先选定投注类别罢?\n");
// 至少1gold
if (i < 1)
return notify_fail("你想白赚啊?\n");
// 上限,可以自由调整
if (i > 1000)
return notify_fail("押得太多,请少于1000。\n");
if ((int)i > me->query("balance") / 10000)
// 钱庄的存款不够押的钱
return notify_fail("这里不收现金!到钱庄存够了钱再来!\n");
// 押冠军
switch (t)
{
case "1":
// 调玩家身上的参数
if (me->query("fifa2002/1"))
return notify_fail("你只能给冠军下一次注!\n");
if (codetoteam(c) == "未知国名")
return notify_fail("先看好国家代码再来!\n");
message_vision("$N想了半天大声喊道:“我认为" HIR + codetoteam(c) + NOR "队能拿到冠军!押 " HIY + chinese_number(i) + NOR " 两黄金!”\n", me);
// 在玩家身上设下押的结果
me->set("fifa2002/1", c);
// 押的黄金数
me->set("fifa2002/10", i);
// 这是一个记录该玩家押注数据的映射
biao = ([
"id":me->query("id"),
"name":me->query("name"),
"type":1,
"code":c,
"gold":i,
]);
break;
case "2":
// 调玩家身上的参数
if (me->query("fifa2002/2"))
return notify_fail("你只能给亚军下一次注!\n");
if (codetoteam(c) == "未知国名")
return notify_fail("先看好国家代码再来!\n");
message_vision("$N想了半天大声喊道:“我认为" HIR + codetoteam(c) + NOR "队能拿到亚军!押 " HIY + chinese_number(i) + NOR " 两黄金!”\n", me);
// 在玩家身上设下押的结果
me->set("fifa2002/2", c);
// 押的黄金数
me->set("fifa2002/20", i);
// 这是一个记录该玩家押注数据的映射
biao = ([
"id":me->query("id"),
"name":me->query("name"),
"type":2,
"code":c,
"gold":i,
]);
break;
case "4":
// 调玩家身上的参数
if (me->query("fifa2002/4"))
return notify_fail("你只能给四强下一次注!\n");
if (sscanf(c, "%s-%s-%s-%s", c1, c2, c3, c4) != 4)
return notify_fail("国家代码输入格式错!\n");
if (codetoteam(c1) == "未知国名")
return notify_fail("没有" HIR + c1 + NOR "这国家代码!\n");
if (codetoteam(c2) == "未知国名")
return notify_fail("没有" HIR + c2 + NOR "这国家代码!\n");
if (codetoteam(c3) == "未知国名")
return notify_fail("没有" HIR + c3 + NOR "这国家代码!\n");
if (codetoteam(c4) == "未知国名")
return notify_fail("没有" HIR + c4 + NOR "这国家代码!\n");
c = ordercode(c, 4);
message_vision("$N想了半天大声喊道:“我认为" HIR + codetoteam(c1) + "、" + codetoteam(c2) + "、" + codetoteam(c3) + "、" + codetoteam(c4) + NOR "队能进入四强!押 " HIY + chinese_number(i) + NOR " 两黄金!”\n", me);
// 在玩家身上设下押的结果
me->set("fifa2002/4", c);
// 押的黄金数
me->set("fifa2002/40", i);
// 这是一个记录该玩家押注数据的映射
biao = ([
"id":me->query("id"),
"name":me->query("name"),
"type":4,
"code":c,
"gold":i,
]);
break;
case "8":
// 调玩家身上的参数
if (me->query("fifa2002/8"))
return notify_fail("你只能给八强下一次注!\n");
if (sscanf(c, "%s-%s-%s-%s-%s-%s-%s-%s", c1, c2, c3, c4, c5, c6, c7, c8) != 8)
return notify_fail("国家代码格式输入错!\n");
if (codetoteam(c1) == "未知国名")
return notify_fail("没有" HIR + c1 + NOR "这国家代码!\n");
if (codetoteam(c2) == "未知国名")
return notify_fail("没有" HIR + c2 + NOR "这国家代码!\n");
if (codetoteam(c3) == "未知国名")
return notify_fail("没有" HIR + c3 + NOR "这国家代码!\n");
if (codetoteam(c4) == "未知国名")
return notify_fail("没有" HIR + c4 + NOR "这国家代码!\n");
if (codetoteam(c5) == "未知国名")
return notify_fail("没有" HIR + c5 + NOR "这国家代码!\n");
if (codetoteam(c6) == "未知国名")
return notify_fail("没有" HIR + c6 + NOR "这国家代码!\n");
if (codetoteam(c7) == "未知国名")
return notify_fail("没有" HIR + c7 + NOR "这国家代码!\n");
if (codetoteam(c8) == "未知国名")
return notify_fail("没有" HIR + c8 + NOR "这国家代码!\n");
c = ordercode(c, 8);
message_vision("$N想了半天大声喊道:“我认为" HIR + codetoteam(c1) + "、" + codetoteam(c2) + "、" + codetoteam(c3) + "、" + codetoteam(c4) + "、" + codetoteam(c5) + "、" + codetoteam(c6) + "、" + codetoteam(c7) + "、" + codetoteam(c8) + NOR "队能进入八强!押 " HIY + chinese_number(i) + NOR " 两黄金!”\n", me);
// 在玩家身上设下押的结果
me->set("fifa2002/8", c);
// 押的黄金数
me->set("fifa2002/80", i);
// 这是一个记录该玩家押注数据的映射
biao = ([
"id":me->query("id"),
"name":me->query("name"),
"type":8,
"code":c,
"gold":i,
]);
break;
}
me->add("balance", -i * 10000);
tell_object(me, "钱庄已经扣除了你押下的" + chinese_number(i) + "两黄金。请等候结果吧!\n");
if (!pointerp(all_biao))
all_biao = ({biao});
else
all_biao += ({biao});
// 储存进这个文件对应的.o文件里
save();
remove_call_out("enough_rest");
// 1秒后取消busy
call_out("enough_rest", 1);
return 1;
}
// 巫师专用,确定结果
int do_post(string arg)
{
int i;
string c, c1, c2, c3, c4, c5, c6, c7, c8;
mapping end;
object me = this_player();
if (!wizardp(me))
return 0;
if (arg == "end")
{
// 停止押注
set("end_ya", 1);
message("channel:sys", HIM "【谣言】" HIR "某人:竞猜版开始停止下注,请关注比赛结果!\n" NOR, users());
save();
return 1;
}
// 分析巫师指令
if (!arg)
return notify_fail("命令格式:post <类别> <结果>\n");
arg = upcase(arg);
if (sscanf(arg, "%d %s", i, c) != 2)
return notify_fail("命令格式:post <类别> <结果>\n");
switch (i)
{
case 1:
message("channel:sys", HIM "【谣言】" HIR "某人:竞猜版冠军结果公布,是 " HIY + codetoteam(c) + HIR " !押对的快去兑奖啊!\n" NOR, users());
break;
case 2:
message("channel:sys", HIM "【谣言】" HIR "某人:竞猜版亚军结果公布,是 " HIY + codetoteam(c) + HIR " !押对的快去兑奖啊!\n" NOR, users());
break;
case 4:
c = ordercode(c, 4);
if (sscanf(c, "%s-%s-%s-%s", c1, c2, c3, c4) != 4)
return notify_fail("国家代码输入格式错!\n");
if (codetoteam(c1) == "未知国名")
return notify_fail("没有" HIR + c1 + NOR "这国家代码!\n");
if (codetoteam(c2) == "未知国名")
return notify_fail("没有" HIR + c2 + NOR "这国家代码!\n");
if (codetoteam(c3) == "未知国名")
return notify_fail("没有" HIR + c3 + NOR "这国家代码!\n");
if (codetoteam(c4) == "未知国名")
return notify_fail("没有" HIR + c4 + NOR "这国家代码!\n");
message("channel:sys", HIM "【谣言】" HIR "某人:竞猜版四强结果公布,是 " HIY + codetoteam(c1) + "、" + codetoteam(c2) + "、" + codetoteam(c3) + "、" + codetoteam(c4) + HIR " !押对的快去兑奖啊!\n" NOR, users());
break;
case 8:
c = ordercode(c, 8);
if (sscanf(c, "%s-%s-%s-%s-%s-%s-%s-%s", c1, c2, c3, c4, c5, c6, c7, c8) != 8)
return notify_fail("国家代码格式输入错!\n");
if (codetoteam(c1) == "未知国名")
return notify_fail("没有" HIR + c1 + NOR "这国家代码!\n");
if (codetoteam(c2) == "未知国名")
return notify_fail("没有" HIR + c2 + NOR "这国家代码!\n");
if (codetoteam(c3) == "未知国名")
return notify_fail("没有" HIR + c3 + NOR "这国家代码!\n");
if (codetoteam(c4) == "未知国名")
return notify_fail("没有" HIR + c4 + NOR "这国家代码!\n");
if (codetoteam(c5) == "未知国名")
return notify_fail("没有" HIR + c5 + NOR "这国家代码!\n");
if (codetoteam(c6) == "未知国名")
return notify_fail("没有" HIR + c6 + NOR "这国家代码!\n");
if (codetoteam(c7) == "未知国名")
return notify_fail("没有" HIR + c7 + NOR "这国家代码!\n");
if (codetoteam(c8) == "未知国名")
return notify_fail("没有" HIR + c8 + NOR "这国家代码!\n");
message("channel:sys", HIM "【谣言】" HIR "某人:竞猜版八强结果公布,是 " HIY + codetoteam(c1) + "、" + codetoteam(c2) + "、" + codetoteam(c3) + "、" + codetoteam(c4) + "、" + codetoteam(c5) + "、" + codetoteam(c6) + "、" + codetoteam(c7) + "、" + codetoteam(c8) + HIR " !押对的快去兑奖啊!\n" NOR, users());
break;
default:
return notify_fail(HIR "哪有这个类别!\n" NOR);
}
// 这是一个记录结果数据的映射
end = ([
"type":i,
"code":c,
]);
// 最终结果储存进这个文件对应的.o文件里
if (!pointerp(end_biao))
end_biao = ({end});
else
end_biao += ({end});
jieguo = 1;
save();
// 更新
return 1;
}
// 玩家查看
int do_read(string arg)
{
int i, j;
object me = this_player();
string name, str, str1, str2;
mapping biao;
if (arg == "rules")
{
//这个规则根据每次巫师设计的定
write("
世界杯的冠军、亚军、四强、八强分别谁属?欢迎在此押注:
押注者以黄金为单位,最高可押一千黄金,最少也要押一两黄金。现金
不收,必须先存进钱庄。押赌后钱庄直接扣钱。押赌时间到六月十日截
止。押对八强的一赔二十,四强的一赔十,冠亚军的一赔八,比赛揭晓,
押中者按倍数返还黄金,不中者则罢。
想好了后就 "HIR"ya <类别> <国名编号> <多少两黄金> "NOR"。
例如:
想压一千黄金给阿根廷为冠军,则ya 1 AR 1000。
如果认为四强是阿根廷、法国、巴西、德国,而且想押五百黄金的
话,则ya 4 AR-BR-DE-FR 500。当然,顺序是无所谓的。
当然每个ID只能押一次,押过不许后悔!\n
类 别: "HIY"1-冠军 2-亚军 4-四强 8-八强"NOR"
国家编号:"HIC"
A组 法国FR 丹麦DK 乌拉圭UY 塞内加尔SN
B组 南非ZA 西班牙ES 巴拉圭PY 斯洛文尼亚SI
C组 巴西BR 中国CN 土耳其TU 哥斯达黎加CR
D组 波兰PL 美国US 韩国KP 葡萄牙PT
E组 德国DE 沙特SA 爱尔兰IE 喀麦隆CM
F组 瑞典SE 阿根廷AR 英格兰UK 尼日利亚NG
G组 意大利IT 墨西哥MX 克罗地亚HR 厄瓜多尔EC
H组 日本JP 俄罗斯RU 比利时BE 突尼斯TN
\n"NOR);
write("开奖后请使用duixian <类别> 指令,你的奖金将自动进入你帐户。\n");
return 1;
}
if (arg == "ban")
{
//有了分数的参数
if (num)
write("★★★★★★★★★★★★★★★★★★★★★★★★★★★★★★★★★★★\n" HIG "世界杯赛投注竞猜版 ");
write(!jieguo ? RED "还在投注中!" : HIR "开始兑奖(duijiang)了!");
//表示还没有数据
if (!pointerp(end_biao) || !sizeof(end_biao))
write(HIY "\n巫师尚未设定结果。\n" NOR);
else
{
write(HIY "\n比赛结果\n" NOR);
for (i = 0; i < sizeof(end_biao); i++)
{
switch ((int)end_biao[i]["type"])
{
case 1:
write(HIR "\n冠军 ");
break;
case 2:
write(HIR "\n亚军 ");
break;
case 4:
write(HIR "\n四强 ");
break;
case 8:
write(HIR "\n八强 ");
break;
}
for (j = 0; j < sizeof(code); j++)
if (strsrch(end_biao[i]["code"], code[j]) >= 0)
write(HIY + team[j] + " " NOR);
}
}
//表示还没有数据
if (!pointerp(all_biao) || !sizeof(all_biao))
{
write(HIG "\n\n还没有人开始投注。\n");
return 1;
}
write(HIG "\n\n已有 " + sizeof(all_biao) + " 个玩家投注:\n" HIY "玩家名 投注类别 押黄金数 国家名\n" HIG "———————————————————————————————————\n" NOR);
str1 = HIM "已经兑过奖的玩家: \n" NOR;
str2 = HIM "还未兑过奖的玩家: \n" NOR;
// all_biao也是全局变量,看文件头
for (i = 0; i < sizeof(all_biao); i++)
{
// 取出每个押注玩家的名字
str = sprintf("%18s ", all_biao[i]["name"] + "(" + all_biao[i]["id"] + ")");
switch ((int)all_biao[i]["type"])
{
case 1:
str += HIR "冠军" NOR;
break;
case 2:
str += HIR "亚军" NOR;
break;
case 4:
str += HIR "四强" NOR;
break;
case 8:
str += HIR "八强" NOR;
break;
}
str += sprintf("%8s", all_biao[i]["gold"] + "两 ");
for (j = 0; j < sizeof(code); j++)
if (strsrch(all_biao[i]["code"], code[j]) >= 0)
str += team[j] + " ";
// dui这个参数是在兑奖后加入的,这表示没兑奖的,加入str2
if (!(int)all_biao[i]["dui"])
str2 += str + "\n";
// 有dui参数的,记入str1
else
str1 += str + "\n";
}
// 显示信息
write(str1 + "\n" + str2 + "\n" NOR);
return 1;
}
else
write("你要看什么?押注规则请read rules,押注情况请read ban。\n");
return 1;
}
void enough_rest()
{
delete_temp("busy");
}
// 兑奖指令
int do_duijiang(string arg)
{
int i, j, k, item, t;
string result;
object ob, me = this_player();
mapping biao, fifa2002;
// 没有jieguo就表示没有开始兑奖
if (!jieguo)
return notify_fail("还未到兑奖时间!\n");
if (!query("end_ya"))
return notify_fail("还未到兑奖时间!\n");
if (query_temp("busy"))
return notify_fail("稍候........\n");
set_temp("busy", 1);
k = 0;
// 分析指令
if (!arg)
return notify_fail("命令格式:duijiang <类别>\n");
switch (arg)
{
case "1":
item = 1;
t = 8;
break;
case "2":
item = 2;
t = 8;
break;
case "4":
item = 4;
t = 10;
break;
case "8":
item = 8;
t = 20;
break;
default:
return notify_fail("没有这个项目吧!\n");
}
for (i = 0; i < sizeof(all_biao); i++)
{
if ((int)all_biao[i]["type"] == item &&
all_biao[i]["id"] == me->query("id"))
{
if (all_biao[i]["dui"])
return notify_fail("你已兑过奖啦!\n");
result = all_biao[i]["code"];
j = all_biao[i]["gold"];
// 表示此人兑过奖了
all_biao[i]["dui"] = 1;
// 存盘
save();
}
}
for (i = 0; i < sizeof(end_biao); i++)
{
if ((int)end_biao[i]["type"] == item &&
result == end_biao[i]["code"])
{
k = j * t;
}
}
if (k > 0)
// 中奖就给吧
{
write("你押 " + j + " 两黄金,按规定得奖金 " + k + " 两黄金,已划入你的钱庄帐户!\n");
me->add("balance", k * 10000);
message("channel:sys", HIM "【谣言】" HIR "某人:" + me->name() + "兑奖得到 " + chinese_number(k) + " 两黄金!\n" NOR, users());
return 1;
}
else
// 没有对的也来个空门奖吧
{
ob = new ("/clone/food/jitui");
ob->move(me);
write("你没押对,但发一个安慰奖,送你一根鸡腿吧!\n");
message_vision(HIY "竞猜表上头突然油光一闪,“扑”地掉下一根油光光的鸡腿,正好掉进$N的手中。\n" NOR, me);
}
remove_call_out("enough_rest");
call_out("enough_rest", 1);
return 1;
}
// 对玩家输入的国家代码进行字母排序
string ordercode(string arg, int arg1)
{
int i, j;
string nc, nc1, nc2, nc3, nc4, nc5, nc6, nc7, nc8;
j = 1;
for (i = 0; i < sizeof(code); i++)
if (strsrch(arg, code[i]) >= 0)
{
switch (j)
{
case 1:
nc1 = code[i];
break;
case 2:
nc2 = code[i];
break;
case 3:
nc3 = code[i];
break;
case 4:
nc4 = code[i];
break;
case 5:
nc5 = code[i];
break;
case 6:
nc6 = code[i];
break;
case 7:
nc7 = code[i];
break;
case 8:
nc8 = code[i];
break;
}
j++;
}
if (arg1 == 4)
nc = nc1 + "-" + nc2 + "-" + nc3 + "-" + nc4;
if (arg1 == 8)
nc = nc1 + "-" + nc2 + "-" + nc3 + "-" + nc4 + "-" + nc5 + "-" + nc6 + "-" + nc7 + "-" + nc8;
return nc;
}
// 国家代码大写修正
string upcase(string arg)
{
arg = replace_string(arg, "a", "A");
arg = replace_string(arg, "b", "B");
arg = replace_string(arg, "c", "C");
arg = replace_string(arg, "d", "D");
arg = replace_string(arg, "e", "E");
arg = replace_string(arg, "f", "F");
arg = replace_string(arg, "g", "G");
arg = replace_string(arg, "h", "H");
arg = replace_string(arg, "i", "I");
arg = replace_string(arg, "j", "J");
arg = replace_string(arg, "k", "K");
arg = replace_string(arg, "l", "L");
arg = replace_string(arg, "m", "M");
arg = replace_string(arg, "n", "N");
arg = replace_string(arg, "o", "O");
arg = replace_string(arg, "p", "P");
arg = replace_string(arg, "q", "Q");
arg = replace_string(arg, "r", "R");
arg = replace_string(arg, "s", "S");
arg = replace_string(arg, "t", "T");
arg = replace_string(arg, "u", "U");
arg = replace_string(arg, "v", "V");
arg = replace_string(arg, "w", "W");
arg = replace_string(arg, "x", "X");
arg = replace_string(arg, "y", "Y");
arg = replace_string(arg, "z", "Z");
return arg;
}
// 国家代码和中文国家名的变换
string codetoteam(string arg)
{
int i;
for (i = 0; i < sizeof(team); i++)
if (code[i] == arg)
return team[i];
return "未知国名";
}
| 25.700499 | 281 | 0.555484 | [
"object"
] |
c13845474c759787afa14029461efd6dccba55c4 | 3,938 | c | C | ncdump/utils.c | iridl/netcdf | 9be5d2b38242dec78679ba84e9be43f0e6e6f066 | [
"NetCDF"
] | 1 | 2021-12-01T07:24:43.000Z | 2021-12-01T07:24:43.000Z | ncdump/utils.c | iridl/netcdf | 9be5d2b38242dec78679ba84e9be43f0e6e6f066 | [
"NetCDF"
] | null | null | null | ncdump/utils.c | iridl/netcdf | 9be5d2b38242dec78679ba84e9be43f0e6e6f066 | [
"NetCDF"
] | 3 | 2019-04-29T14:57:37.000Z | 2021-12-01T07:24:44.000Z | /*********************************************************************
* Copyright 2011, University Corporation for Atmospheric Research
* See netcdf/README file for copying and redistribution conditions.
* $Id$
*********************************************************************/
#include "config.h"
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <netcdf.h>
#include <assert.h>
#include <ctype.h>
#include "utils.h"
/*
* Print error message to stderr and exit
*/
void
error(const char *fmt, ...)
{
va_list args ;
(void) fprintf(stderr,"%s: ", progname);
va_start(args, fmt) ;
(void) vfprintf(stderr,fmt,args) ;
va_end(args) ;
(void) fprintf(stderr, "\n") ;
(void) fflush(stderr); /* to ensure log files are current */
exit(EXIT_FAILURE);
}
void *
emalloc ( /* check return from malloc */
size_t size)
{
void *p;
p = (void *) malloc (size==0 ? 1 : size); /* malloc(0) not portable */
if (p == 0) {
error ("out of memory\n");
}
return p;
}
void
check(int err, const char* file, const int line)
{
fprintf(stderr,"%s\n",nc_strerror(err));
fprintf(stderr,"Location: file %s; line %d\n", file,line);
fflush(stderr); fflush(stdout);
exit(1);
}
/*
* Returns malloced name with chars special to CDL escaped.
* Caller should free result when done with it.
*/
char*
escaped_name(const char* cp) {
char *ret; /* string returned */
char *sp;
assert(cp != NULL);
/* For some reason, and on some machines (e.g. tweety)
utf8 characters such as \343 are considered control character. */
/* if(*cp && (isspace(*cp) | iscntrl(*cp)))*/
if((*cp >= 0x01 && *cp <= 0x20) || (*cp == 0x7f))
{
error("name begins with space or control-character: %c",*cp);
}
ret = emalloc(4*strlen(cp) + 1); /* max if every char escaped */
sp = ret;
*sp = 0; /* empty name OK */
/* Special case: leading number allowed, but we must escape it for CDL */
if((*cp >= '0' && *cp <= '9'))
{
*sp++ = '\\';
}
for (; *cp; cp++) {
if (isascii(*cp)) {
if(iscntrl(*cp)) { /* render control chars as two hex digits, \%xx */
snprintf(sp, 4,"\\%%%.2x", *cp);
sp += 4;
} else {
switch (*cp) {
case ' ':
case '!':
case '"':
case '#':
case '$':
case '%':
case '&':
case '\'':
case '(':
case ')':
case '*':
case ',':
case ':':
case ';':
case '<':
case '=':
case '>':
case '?':
case '[':
case ']':
case '\\':
case '^':
case '`':
case '{':
case '|':
case '}':
case '~':
*sp++ = '\\';
*sp++ = *cp;
break;
default: /* includes '/' */
*sp++ = *cp;
break;
}
}
} else { /* not ascii, assume just UTF-8 byte */
*sp++ = *cp;
}
}
*sp = 0;
return ret;
}
/*
* Print name with escapes for special characters
*/
void
print_name(const char* name) {
char *ename = escaped_name(name);
fputs(ename, stdout);
free(ename);
}
/* Missing functionality that should be in nc_inq_dimid(), to get
* dimid from a full dimension path name that may include group
* names */
int
nc_inq_dimid2(int ncid, const char *dimname, int *dimidp) {
int ret = NC_NOERR;
/* If '/' doesn't occur in dimname, just return id found by
* nc_inq_dimid() */
char *sp = strrchr(dimname, '/');
if(!sp) { /* No '/' in dimname, so return nc_inq_dimid() result */
ret = nc_inq_dimid(ncid, dimname, dimidp);
}
#ifdef USE_NETCDF4
else { /* Parse group name out and get dimid using that */
size_t grp_namelen = sp - dimname;
char *grpname = emalloc(grp_namelen + 1);
int grpid;
strncpy(grpname, dimname, grp_namelen);
grpname[grp_namelen] = '\0';
ret = nc_inq_grp_full_ncid(ncid, grpname, &grpid);
if(ret == NC_NOERR) {
ret = nc_inq_dimid(grpid, dimname, dimidp);
}
free(grpname);
}
#endif /* USE_NETCDF4 */
return ret;
}
| 22.632184 | 77 | 0.548756 | [
"render"
] |
c13cd49f006c11a75458e90d3fa737d30ccd8190 | 792 | h | C | src/core/transform.h | jjzhang166/MirageRender | a51c3dff7db6eacaf111b868ad498caa8fe5a3d2 | [
"MIT"
] | 166 | 2015-11-28T21:24:03.000Z | 2022-03-17T12:29:47.000Z | src/core/transform.h | jjzhang166/MirageRender | a51c3dff7db6eacaf111b868ad498caa8fe5a3d2 | [
"MIT"
] | 5 | 2016-09-27T06:18:32.000Z | 2017-05-26T09:31:29.000Z | src/core/transform.h | jjzhang166/MirageRender | a51c3dff7db6eacaf111b868ad498caa8fe5a3d2 | [
"MIT"
] | 16 | 2016-01-24T07:57:27.000Z | 2021-08-04T03:31:40.000Z | #ifndef TRANSFORM_H
#define TRANSFORM_H
// mirage includes
#include "../math/vec3.h"
#include "../math/quaternion.h"
#include "../math/mat4.h"
namespace mirage
{
class Transform
{
public:
Transform(vec3 position = vec3(), quaternion orientation = quaternion(), vec3 scale = vec3(1, 1, 1));
void setPosition(const vec3 &position);
void setOrientation(const quaternion &orientation);
void setScale(const vec3 &scale);
void setState(const bool state);
vec3 getPosition() const;
quaternion getOrientation() const;
vec3 getScale() const;
Transform inverse() const;
mat4 getMatrix() const;
bool reqStateUpdate() const;
private:
vec3 m_position;
quaternion m_orientation;
vec3 m_scale;
bool m_stateChanged;
};
}
#endif // TRANSFORM_H
| 22 | 105 | 0.699495 | [
"transform"
] |
c1446ace9cae9df87fe22a6000f02c15324fb91b | 11,739 | c | C | source/example_pair_host_and_optiga_using_pre_shared_secret.c | Infineon/mtb-example-optiga-crypto | 6748c6dfe0504a3c78c7c2332e48dc9407064b6c | [
"MIT"
] | null | null | null | source/example_pair_host_and_optiga_using_pre_shared_secret.c | Infineon/mtb-example-optiga-crypto | 6748c6dfe0504a3c78c7c2332e48dc9407064b6c | [
"MIT"
] | null | null | null | source/example_pair_host_and_optiga_using_pre_shared_secret.c | Infineon/mtb-example-optiga-crypto | 6748c6dfe0504a3c78c7c2332e48dc9407064b6c | [
"MIT"
] | null | null | null | /******************************************************************************
* File Name: example_pair_host_and_optiga_using_pre_shared_secret.c
*
* Description: This file provides an example for pairing the Host and OPTIGA
* using a preshared secret.
*
* Related Document: See README.md
*
*
* The MIT License
*
* Copyright (c) 2021 Infineon Technologies AG
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE
*******************************************************************************/
#include "optiga/optiga_util.h"
#include "optiga/optiga_crypt.h"
#include "optiga/pal/pal_os_datastore.h"
#include "optiga_example.h"
#ifdef OPTIGA_COMMS_SHIELDED_CONNECTION
#ifndef OPTIGA_INIT_DEINIT_DONE_EXCLUSIVELY
extern void example_optiga_init(void);
extern void example_optiga_deinit(void);
#endif
uint32_t time_taken_for_pairing = 0;
// Value of Operational state
#define LCSO_STATE_CREATION (0x01)
// Value of Operational state
#define LCSO_STATE_OPERATIONAL (0x07)
//Currently set to Creation state(defualt value). At the real time/customer side this needs to be LCSO_STATE_OPERATIONAL (0x07)
#define FINAL_LCSO_STATE (LCSO_STATE_CREATION)
/* Platform Binding Shared Secret (0xE140) Metadata to be updated */
const uint8_t platform_binding_shared_secret_metadata_final [] = {
//Metadata to be updated
0x20, 0x17,
// LcsO
0xC0, 0x01,
FINAL_LCSO_STATE, // Refer Macro to see the value or some more notes
// Change/Write Access tag
0xD0, 0x07,
// This allows updating the binding secret during the runtime using shielded connection
// If not required to update the secret over the runtime, set this to NEV and
// update Metadata length accordingly
0xE1, 0xFC, LCSO_STATE_OPERATIONAL, // LcsO < Operational state
0xFE,
0x20, 0xE1, 0x40,
// Read Access tag
0xD1, 0x03,
0xE1, 0xFC, LCSO_STATE_OPERATIONAL, // LcsO < Operational state
// Execute Access tag
0xD3, 0x01,
0x00, // Always
// Data object Type
0xE8, 0x01,
0x22, // Platform binding secret type
};
/**
* Callback when optiga_util_xxxx/optiga_crypt_xxxx operation is completed asynchronously
*/
static volatile optiga_lib_status_t optiga_lib_status;
//lint --e{818} suppress "argument "context" is not used in the sample provided"
static void optiga_lib_callback(void * context, optiga_lib_status_t return_status)
{
optiga_lib_status = return_status;
if (NULL != context)
{
// callback to upper layer here
}
}
optiga_lib_status_t pair_host_and_optiga_using_pre_shared_secret(void)
{
uint16_t bytes_to_read;
uint8_t platform_binding_secret[64];
uint8_t platform_binding_secret_metadata[44];
optiga_lib_status_t return_status = !OPTIGA_LIB_SUCCESS;
pal_status_t pal_return_status;
optiga_util_t * me_util = NULL;
optiga_crypt_t * me_crypt = NULL;
do
{
/**
* 1. Create OPTIGA Util and Crypt Instances
*/
me_util = optiga_util_create(0, optiga_lib_callback, NULL);
if (NULL == me_util)
{
break;
}
me_crypt = optiga_crypt_create(0, optiga_lib_callback, NULL);
if (NULL == me_crypt)
{
break;
}
/**
* 2. Initialize the protection level and protocol version for the instances
*/
OPTIGA_UTIL_SET_COMMS_PROTECTION_LEVEL(me_util,OPTIGA_COMMS_NO_PROTECTION);
OPTIGA_UTIL_SET_COMMS_PROTOCOL_VERSION(me_util,OPTIGA_COMMS_PROTOCOL_VERSION_PRE_SHARED_SECRET);
OPTIGA_CRYPT_SET_COMMS_PROTECTION_LEVEL(me_crypt,OPTIGA_COMMS_NO_PROTECTION);
OPTIGA_CRYPT_SET_COMMS_PROTOCOL_VERSION(me_crypt,OPTIGA_COMMS_PROTOCOL_VERSION_PRE_SHARED_SECRET);
/**
* 3. Read Platform Binding Shared secret (0xE140) data object metadata from OPTIGA
* using optiga_util_read_metadata.
*/
bytes_to_read = sizeof(platform_binding_secret_metadata);
optiga_lib_status = OPTIGA_LIB_BUSY;
START_PERFORMANCE_MEASUREMENT(time_taken_for_pairing);
return_status = optiga_util_read_metadata(me_util,
0xE140,
platform_binding_secret_metadata,
&bytes_to_read);
WAIT_AND_CHECK_STATUS(return_status, optiga_lib_status);
/**
* 4. Validate LcsO in the metadata.
* Skip the rest of the procedure if LcsO is greater than or equal to operational state(0x07)
*/
if (platform_binding_secret_metadata[4] >= LCSO_STATE_OPERATIONAL)
{
// The LcsO is already greater than or equal to operational state
break;
}
/**
* 5. Generate Random using optiga_crypt_random
* - Specify the Random type as TRNG
* a. The maximum supported size of secret is 64 bytes.
* The minimum recommended is 32 bytes.
* b. If the host platform doesn't support random generation,
* use OPTIGA to generate the maximum size chosen.
* else choose the appropriate length of random to be generated by OPTIGA
*
*/
optiga_lib_status = OPTIGA_LIB_BUSY;
return_status = optiga_crypt_random(me_crypt,
OPTIGA_RNG_TYPE_TRNG,
platform_binding_secret,
sizeof(platform_binding_secret));
WAIT_AND_CHECK_STATUS(return_status, optiga_lib_status);
/**
* 6. Generate random on Host
* If the host platform doesn't support, skip this step
*/
/**
* 7. Write random(secret) to OPTIGA platform Binding shared secret data object (0xE140)
*/
optiga_lib_status = OPTIGA_LIB_BUSY;
OPTIGA_UTIL_SET_COMMS_PROTECTION_LEVEL(me_util,OPTIGA_COMMS_NO_PROTECTION);
return_status = optiga_util_write_data(me_util,
0xE140,
OPTIGA_UTIL_ERASE_AND_WRITE,
0,
platform_binding_secret,
sizeof(platform_binding_secret));
WAIT_AND_CHECK_STATUS(return_status, optiga_lib_status);
/**
* 8. Write/store the random(secret) on the Host platform
*
*/
pal_return_status = pal_os_datastore_write(OPTIGA_PLATFORM_BINDING_SHARED_SECRET_ID,
platform_binding_secret,
sizeof(platform_binding_secret));
if (PAL_STATUS_SUCCESS != pal_return_status)
{
//Storing of Pre-shared secret on Host failed.
optiga_lib_status = pal_return_status;
break;
}
/**
* 9. Update metadata of OPTIGA Platform Binding shared secret data object (0xE140)
*/
optiga_lib_status = OPTIGA_LIB_BUSY;
OPTIGA_UTIL_SET_COMMS_PROTECTION_LEVEL(me_util,OPTIGA_COMMS_NO_PROTECTION);
return_status = optiga_util_write_metadata(me_util,
0xE140,
platform_binding_shared_secret_metadata_final,
sizeof(platform_binding_shared_secret_metadata_final));
WAIT_AND_CHECK_STATUS(return_status, optiga_lib_status);
READ_PERFORMANCE_MEASUREMENT(time_taken_for_pairing);
return_status = OPTIGA_LIB_SUCCESS;
} while(FALSE);
OPTIGA_EXAMPLE_LOG_STATUS(return_status);
if(me_util)
{
//Destroy the instance after the completion of usecase if not required.
return_status = optiga_util_destroy(me_util);
if(OPTIGA_LIB_SUCCESS != return_status)
{
//lint --e{774} suppress This is a generic macro
OPTIGA_EXAMPLE_LOG_STATUS(return_status);
}
}
if(me_crypt)
{
//Destroy the instance after the completion of usecase if not required.
return_status = optiga_crypt_destroy(me_crypt);
if(OPTIGA_LIB_SUCCESS != return_status)
{
//lint --e{774} suppress This is a generic macro
OPTIGA_EXAMPLE_LOG_STATUS(return_status);
}
}
return return_status;
}
/**
* The below example demonstrates pairing the Host and OPTIGA using a preshared secret for the first time.
*
* Note:
* 1) If the below example is executed once, the LcsO of Platform Binding shared secret is set to Initialization.
* The LcsO can't be reverted to previous states.
*
* Please ensure the access conditions and other required settings in the metadata must be accordingly
* before setting the LcsO to Initialization state.
*
* 2) The metadata gets written in this example is just an example. The user must update this to the respective
* needs including LcsO state and access conditions
*
* Preconditions: The optiga_util_open_application must be executed before invoking the below example.
*
*/
void example_pair_host_and_optiga_using_pre_shared_secret(void)
{
optiga_lib_status_t return_status;
#ifndef OPTIGA_INIT_DEINIT_DONE_EXCLUSIVELY
/**
* Open the application on OPTIGA which is a precondition to perform any other operations
* using optiga_util_open_application
*/
example_optiga_init();
#endif //OPTIGA_INIT_DEINIT_DONE_EXCLUSIVELY
OPTIGA_EXAMPLE_LOG_MESSAGE(__FUNCTION__);
return_status = pair_host_and_optiga_using_pre_shared_secret();
#ifndef OPTIGA_INIT_DEINIT_DONE_EXCLUSIVELY
/**
* Close the application on OPTIGA after all the operations are executed
* using optiga_util_close_application
*/
example_optiga_deinit();
#endif //OPTIGA_INIT_DEINIT_DONE_EXCLUSIVELY
OPTIGA_EXAMPLE_LOG_PERFORMANCE_VALUE(time_taken_for_pairing,return_status);
}
#endif
| 39.79322 | 128 | 0.622881 | [
"object"
] |
c147920d9f0ba66314392d611293ed17b94037ba | 3,397 | h | C | chrome/browser/media/router/providers/cast/cast_media_controller.h | Yannic/chromium | ab32e8aacb08c9fce0dc4bf09eec456ba46e3710 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 76 | 2020-09-02T03:05:41.000Z | 2022-03-30T04:40:55.000Z | chrome/browser/media/router/providers/cast/cast_media_controller.h | blueboxd/chromium-legacy | 07223bc94bd97499909c9ed3c3f5769d718fe2e0 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 45 | 2020-09-02T03:21:37.000Z | 2022-03-31T22:19:45.000Z | chrome/browser/media/router/providers/cast/cast_media_controller.h | Yannic/chromium | ab32e8aacb08c9fce0dc4bf09eec456ba46e3710 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 8 | 2020-07-22T18:49:18.000Z | 2022-02-08T10:27:16.000Z | // Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_MEDIA_ROUTER_PROVIDERS_CAST_CAST_MEDIA_CONTROLLER_H_
#define CHROME_BROWSER_MEDIA_ROUTER_PROVIDERS_CAST_CAST_MEDIA_CONTROLLER_H_
#include "base/macros.h"
#include "components/cast_channel/cast_message_util.h"
#include "components/media_router/common/mojom/media_controller.mojom.h"
#include "components/media_router/common/mojom/media_status.mojom.h"
#include "mojo/public/cpp/bindings/pending_receiver.h"
#include "mojo/public/cpp/bindings/pending_remote.h"
#include "mojo/public/cpp/bindings/receiver.h"
#include "mojo/public/cpp/bindings/remote.h"
namespace base {
class Value;
}
namespace media_router {
class AppActivity;
class CastSession;
enum SupportedMediaCommand {
kSupportedMediaCommandPause = 1 << 0,
kSupportedMediaCommandSeek = 1 << 1,
kSupportedMediaCommandStreamVolume = 1 << 2,
kSupportedMediaCommandStreamMute = 1 << 3,
kSupportedMediaCommandSkipForward = 1 << 4,
kSupportedMediaCommandSkipBackward = 1 << 5,
kSupportedMediaCommandQueueNext = 1 << 6,
kSupportedMediaCommandQueuePrev = 1 << 7,
kSupportedMediaCommandQueueShuffle = 1 << 8,
kSupportedMediaCommandSkipAd = 1 << 9,
kSupportedMediaCommandQueueRepeatAll = 1 << 10,
kSupportedMediaCommandQueueRepeatOne = 1 << 11,
kSupportedMediaCommandEditTracks = 1 << 12,
kSupportedMediaCommandPlaybackRate = 1 << 13,
kSupportedMediaCommandLike = 1 << 14,
kSupportedMediaCommandDislike = 1 << 15,
kSupportedMediaCommandFollow = 1 << 16,
kSupportedMediaCommandUnfollow = 1 << 17,
kSupportedMediaCommandStreamTransfer = 1 << 18,
};
// Per-session object for sending media control commands to a Cast receiver, and
// notifying an observer of updates on the session's media status.
class CastMediaController : public mojom::MediaController {
public:
CastMediaController(AppActivity* activity,
mojo::PendingReceiver<mojom::MediaController> receiver,
mojo::PendingRemote<mojom::MediaStatusObserver> observer);
CastMediaController(const CastMediaController&) = delete;
CastMediaController& operator=(const CastMediaController&) = delete;
~CastMediaController() override;
// mojom::MediaController overrides:
void Play() override;
void Pause() override;
void SetMute(bool mute) override;
void SetVolume(float volume) override;
void Seek(base::TimeDelta time) override;
void NextTrack() override;
void PreviousTrack() override;
// These methods may notify the MediaStatusObserver that the status has been
// updated.
void SetSession(const CastSession& session);
void SetMediaStatus(const base::Value& media_status);
const std::string& sender_id() const { return sender_id_; }
private:
base::Value CreateMediaRequest(cast_channel::V2MessageType type);
base::Value CreateVolumeRequest();
void UpdateMediaStatus(const base::Value& message_value);
const std::string sender_id_;
AppActivity* const activity_;
mojom::MediaStatus media_status_;
std::string session_id_;
int media_session_id_;
mojo::Receiver<mojom::MediaController> receiver_;
mojo::Remote<mojom::MediaStatusObserver> observer_;
};
} // namespace media_router
#endif // CHROME_BROWSER_MEDIA_ROUTER_PROVIDERS_CAST_CAST_MEDIA_CONTROLLER_H_
| 35.385417 | 80 | 0.773918 | [
"object"
] |
c14e63a99afba11ea67858e3481d56570c5e75f8 | 8,954 | h | C | tools/analyzer/index.h | sekiguchi-nagisa/ydsh | 19b94962e076d4dffacb6b0236147d84dbe0fd53 | [
"Apache-2.0"
] | 16 | 2017-04-30T18:12:59.000Z | 2022-01-10T10:04:02.000Z | tools/analyzer/index.h | sekiguchi-nagisa/ydsh | 19b94962e076d4dffacb6b0236147d84dbe0fd53 | [
"Apache-2.0"
] | 564 | 2015-01-30T19:14:26.000Z | 2022-03-31T15:16:36.000Z | tools/analyzer/index.h | sekiguchi-nagisa/ydsh | 19b94962e076d4dffacb6b0236147d84dbe0fd53 | [
"Apache-2.0"
] | 2 | 2020-05-12T11:04:20.000Z | 2021-04-05T19:20:25.000Z | /*
* Copyright (C) 2021 Nagisa Sekiguchi
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef YDSH_TOOLS_ANALYZER_INDEX_H
#define YDSH_TOOLS_ANALYZER_INDEX_H
#include <functional>
#include <vector>
#include <misc/buffer.hpp>
#include <misc/enum_util.hpp>
#include <misc/resource.hpp>
#include <misc/result.hpp>
#include <misc/string_ref.hpp>
#include <misc/token.hpp>
namespace ydsh::lsp {
class SymbolRef {
private:
unsigned int pos;
unsigned short size;
unsigned short modId;
public:
static Optional<SymbolRef> create(Token token, unsigned short modId) {
if (token.size > UINT16_MAX) {
return {};
}
return SymbolRef(token.pos, static_cast<unsigned short>(token.size), modId);
}
SymbolRef(unsigned int pos, unsigned short size, unsigned short modId)
: pos(pos), size(size), modId(modId) {}
unsigned int getPos() const { return this->pos; }
Token getToken() const {
return Token{
.pos = this->pos,
.size = this->size,
};
}
unsigned short getModId() const { return this->modId; }
bool operator<(SymbolRef o) const {
return this->getModId() < o.getModId() ||
(!(o.getModId() < this->getModId()) && this->getPos() < o.getPos());
}
};
// for symbol lookup
struct SymbolRequest {
unsigned short modId;
unsigned int pos;
};
class DeclBase {
private:
unsigned int pos;
unsigned short size;
unsigned short modId;
FlexBuffer<SymbolRef> refs;
protected:
DeclBase(unsigned int pos, unsigned short size, unsigned short modId)
: pos(pos), size(size), modId(modId) {}
public:
unsigned int getPos() const { return this->pos; }
unsigned short getSize() const { return this->size; }
unsigned short getModId() const { return this->modId; }
Token getToken() const {
return Token{
.pos = this->pos,
.size = this->size,
};
}
const FlexBuffer<SymbolRef> &getRefs() const { return this->refs; }
void addRef(SymbolRef ref);
SymbolRef toRef() const { return SymbolRef(this->pos, this->size, this->modId); }
bool operator<(const DeclBase &o) const {
return this->getModId() < o.getModId() ||
(!(o.getModId() < this->getModId()) && this->getToken().endPos() < o.getPos());
}
};
class DeclSymbol : public DeclBase {
public:
enum class Kind : unsigned char {
VAR,
LET,
IMPORT_ENV,
EXPORT_ENV,
CONST,
FUNC,
BUILTIN_CMD,
CMD,
TYPE_ALIAS,
MOD, // for named import
};
enum class Attr : unsigned char {
GLOBAL = 1u << 0u,
PUBLIC = 1u << 1u,
};
private:
Kind kind;
Attr attr;
CStrPtr mangledName;
CStrPtr info; // hover information
public:
static Optional<DeclSymbol> create(Kind kind, Attr attr, Token token, unsigned short modId,
const std::string &name, const char *info = nullptr) {
if (token.size > UINT16_MAX) {
return {};
}
return DeclSymbol(kind, attr, token.pos, static_cast<unsigned short>(token.size), modId, name,
info != nullptr ? info : "(dummy)");
}
DeclSymbol(Kind kind, Attr attr, unsigned int pos, unsigned short size, unsigned short mod,
const std::string &name, const char *info)
: DeclBase(pos, size, mod), kind(kind), attr(attr),
mangledName(CStrPtr(strdup(name.c_str()))), info(CStrPtr(strdup(info))) {}
Kind getKind() const { return this->kind; }
Attr getAttr() const { return this->attr; }
StringRef getMangledName() const { return this->mangledName.get(); }
StringRef getInfo() const { return this->info.get(); }
/**
* for Kind::MOD
* @return
* if kind is not Kind::MOD, return {0, false}
*/
std::pair<unsigned short, bool> getInfoAsModId() const;
struct Compare {
bool operator()(const DeclSymbol &x, unsigned int y) const { return x.getToken().endPos() < y; }
bool operator()(unsigned int x, const DeclSymbol &y) const { return x < y.getToken().pos; }
};
static std::string mangle(Kind k, StringRef name);
static std::string demangle(Kind k, StringRef mangledName);
static bool isVarName(Kind k) {
return k != Kind::CMD && k != Kind::BUILTIN_CMD && k != Kind::TYPE_ALIAS;
}
};
class Symbol {
private:
unsigned int pos;
unsigned short size;
unsigned short declModId;
unsigned int declPos;
public:
static Optional<Symbol> create(Token token, const DeclBase &decl) {
if (token.size > UINT16_MAX) {
return {};
}
return Symbol(token.pos, static_cast<unsigned short>(token.size), decl.getModId(),
decl.getPos());
}
Symbol(unsigned int pos, unsigned short size, unsigned short declModId, unsigned int declPos)
: pos(pos), size(size), declModId(declModId), declPos(declPos) {}
unsigned int getPos() const { return this->pos; }
Token getToken() const {
return Token{
.pos = this->pos,
.size = this->size,
};
}
unsigned short getDeclModId() const { return this->declModId; }
unsigned short getDeclPos() const { return this->declPos; }
struct Compare {
bool operator()(const Symbol &x, unsigned int y) const { return x.getToken().endPos() < y; }
bool operator()(unsigned int x, const Symbol &y) const { return x < y.getToken().pos; }
};
};
class ForeignDecl : public DeclBase {
public:
explicit ForeignDecl(const DeclSymbol &decl)
: DeclBase(decl.getPos(), decl.getSize(), decl.getModId()) {}
struct Compare {
bool operator()(const ForeignDecl &x, const SymbolRequest &y) const {
return x.getModId() < y.modId || (!(y.modId < x.getModId()) && x.getToken().endPos() < y.pos);
}
bool operator()(const SymbolRequest &x, const ForeignDecl &y) const {
return x.modId < y.getModId() || (!(y.getModId() < x.modId) && x.pos < y.getPos());
}
};
};
class SymbolIndex {
private:
unsigned short modId;
int version;
std::vector<DeclSymbol> decls;
std::vector<Symbol> symbols;
std::vector<ForeignDecl> foreignDecls;
FlexBuffer<unsigned short> inlinedModIds;
public:
SymbolIndex(unsigned short modId, int version, std::vector<DeclSymbol> &&decls,
std::vector<Symbol> &&symbols, std::vector<ForeignDecl> &&foreignDecls,
FlexBuffer<unsigned short> inlinedModIds)
: modId(modId), version(version), decls(std::move(decls)), symbols(std::move(symbols)),
foreignDecls(std::move(foreignDecls)), inlinedModIds(std::move(inlinedModIds)) {}
unsigned short getModId() const { return this->modId; }
int getVersion() const { return this->version; }
const DeclSymbol *findDecl(unsigned int pos) const;
const Symbol *findSymbol(unsigned int pos) const;
const ForeignDecl *findForeignDecl(SymbolRequest request) const;
const std::vector<DeclSymbol> &getDecls() const { return this->decls; }
const std::vector<Symbol> &getSymbols() const { return this->symbols; }
const std::vector<ForeignDecl> &getForeignDecls() const { return this->foreignDecls; }
const FlexBuffer<unsigned short> &getInlinedModIds() const { return this->inlinedModIds; }
struct Compare {
bool operator()(const SymbolIndex &x, unsigned short id) const { return x.getModId() < id; }
bool operator()(unsigned short id, const SymbolIndex &y) const { return id < y.getModId(); }
};
};
class SymbolIndexes {
private:
std::vector<SymbolIndex> indexes;
public:
void add(SymbolIndex &&index);
const SymbolIndex *find(unsigned short modId) const;
void remove(unsigned short id);
const DeclSymbol *findDecl(SymbolRequest req) const {
if (auto *index = this->find(req.modId); index) {
return index->findDecl(req.pos);
}
return nullptr;
}
auto begin() const { return this->indexes.cbegin(); }
auto end() const { return this->indexes.cend(); }
};
struct FindDeclResult {
const DeclSymbol &decl;
const Symbol &request;
};
bool findDeclaration(const SymbolIndexes &indexes, SymbolRequest request,
const std::function<void(const FindDeclResult &)> &consumer);
struct FindRefsResult {
const SymbolRef &symbol;
const DeclSymbol &request;
};
bool findAllReferences(const SymbolIndexes &indexes, SymbolRequest request,
const std::function<void(const FindRefsResult &)> &cosumer);
} // namespace ydsh::lsp
namespace ydsh {
template <>
struct allow_enum_bitop<lsp::DeclSymbol::Attr> : std::true_type {};
} // namespace ydsh
#endif // YDSH_TOOLS_ANALYZER_INDEX_H
| 27.550769 | 100 | 0.666183 | [
"vector"
] |
267c8749c81460130327b18b5c4daf7ca7ffedfe | 111,150 | c | C | develop/vagrant/rbus_sprint_2010/src/rbus.c | StefanosVorkas/Dobby | c865ce0cfb0bb45f872ab0b3c4fa03ace6811ba2 | [
"Apache-2.0"
] | null | null | null | develop/vagrant/rbus_sprint_2010/src/rbus.c | StefanosVorkas/Dobby | c865ce0cfb0bb45f872ab0b3c4fa03ace6811ba2 | [
"Apache-2.0"
] | null | null | null | develop/vagrant/rbus_sprint_2010/src/rbus.c | StefanosVorkas/Dobby | c865ce0cfb0bb45f872ab0b3c4fa03ace6811ba2 | [
"Apache-2.0"
] | null | null | null | /*
* If not stated otherwise in this file or this component's Licenses.txt file
* the following copyright and licenses apply:
*
* Copyright 2019 RDK Management
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <assert.h>
#include <pthread.h>
#include <rtVector.h>
#include <rbus_core.h>
#include <rbus_marshalling.h>
#include <rbus_session_mgr.h>
#include <rbus.h>
#include "rbus_buffer.h"
#include "rbus_element.h"
#include "rbus_valuechange.h"
#include "rbus_subscriptions.h"
//******************************* MACROS *****************************************//
#define UNUSED1(a) (void)(a)
#define UNUSED2(a,b) UNUSED1(a),UNUSED1(b)
#define UNUSED3(a,b,c) UNUSED1(a),UNUSED2(b,c)
#define UNUSED4(a,b,c,d) UNUSED1(a),UNUSED3(b,c,d)
#define UNUSED5(a,b,c,d,e) UNUSED1(a),UNUSED4(b,c,d,e)
#define UNUSED6(a,b,c,d,e,f) UNUSED1(a),UNUSED5(b,c,d,e,f)
#define MAX_COMPS_PER_PROCESS 5
#define FALSE 0
#define TRUE 1
#define VALUE_CHANGE_POLLING_PERIOD 2 //seconds
//********************************************************************************//
//******************************* STRUCTURES *************************************//
struct _rbusHandle_t
{
int inUse;
char* componentName;
elementNode* elementRoot;
rtVector eventSubs; /* consumer side subscriptions FIXME - this needs to be an associative map instead of list/vector*/
rbusSubscriptions_t subscriptions; /*provider side subscriptions */
};
struct _rbusMethodAsyncHandle_t
{
rtMessageHeader hdr;
};
#define comp_info struct _rbusHandle_t
comp_info comp_array[MAX_COMPS_PER_PROCESS] = {
{0,"", NULL, NULL, NULL},
{0,"", NULL, NULL, NULL},
{0,"", NULL, NULL, NULL},
{0,"", NULL, NULL, NULL},
{0,"", NULL, NULL, NULL}
};
typedef enum _rbus_legacy_support
{
RBUS_LEGACY_STRING = 0, /**< Null terminated string */
RBUS_LEGACY_INT, /**< Integer (2147483647 or -2147483648) as String */
RBUS_LEGACY_UNSIGNEDINT, /**< Unsigned Integer (ex: 4,294,967,295) as String */
RBUS_LEGACY_BOOLEAN, /**< Boolean as String (ex:"true", "false" */
RBUS_LEGACY_DATETIME, /**< ISO-8601 format (YYYY-MM-DDTHH:MM:SSZ) as String */
RBUS_LEGACY_BASE64, /**< Base64 representation of data as String */
RBUS_LEGACY_LONG, /**< Long (ex: 9223372036854775807 or -9223372036854775808) as String */
RBUS_LEGACY_UNSIGNEDLONG, /**< Unsigned Long (ex: 18446744073709551615) as String */
RBUS_LEGACY_FLOAT, /**< Float (ex: 1.2E-38 or 3.4E+38) as String */
RBUS_LEGACY_DOUBLE, /**< Double (ex: 2.3E-308 or 1.7E+308) as String */
RBUS_LEGACY_BYTE,
RBUS_LEGACY_NONE
} rbusLegacyDataType_t;
typedef enum _rbus_legacy_returns {
RBUS_LEGACY_ERR_SUCCESS = 100,
RBUS_LEGACY_ERR_MEMORY_ALLOC_FAIL = 101,
RBUS_LEGACY_ERR_FAILURE = 102,
RBUS_LEGACY_ERR_NOT_CONNECT = 190,
RBUS_LEGACY_ERR_TIMEOUT = 191,
RBUS_LEGACY_ERR_NOT_EXIST = 192,
RBUS_LEGACY_ERR_NOT_SUPPORT = 193,
RBUS_LEGACY_ERR_OTHERS
} rbusLegacyReturn_t;
//********************************************************************************//
//******************************* INTERNAL FUNCTIONS *****************************//
static void rbusEventSubscription_free(void* p)
{
rbusEventSubscription_t* sub = (rbusEventSubscription_t*)p;
free((void*)sub->eventName);
if(sub->filter)
{
rbusFilter_Release(sub->filter);
}
free(sub);
}
static rbusEventSubscription_t* rbusEventSubscription_find(rtVector eventSubs, char const* eventName)
{
/*FIXME - convert to map */
size_t i;
for(i=0; i < rtVector_Size(eventSubs); ++i)
{
rbusEventSubscription_t* sub = (rbusEventSubscription_t*)rtVector_At(eventSubs, i);
if(sub && !strcmp(sub->eventName, eventName))
return sub;
}
rtLog_Warn("rbusEventSubscription_find error: can't find %s", eventName);
return NULL;
}
static void _parse_rbusData_to_value (char const* pBuff, rbusLegacyDataType_t legacyType, rbusValue_t value)
{
if (pBuff && value)
{
switch (legacyType)
{
case RBUS_LEGACY_STRING:
{
rbusValue_SetFromString(value, RBUS_STRING, pBuff);
break;
}
case RBUS_LEGACY_INT:
{
rbusValue_SetFromString(value, RBUS_INT32, pBuff);
break;
}
case RBUS_LEGACY_UNSIGNEDINT:
{
rbusValue_SetFromString(value, RBUS_UINT32, pBuff);
break;
}
case RBUS_LEGACY_BOOLEAN:
{
rbusValue_SetFromString(value, RBUS_BOOLEAN, pBuff);
break;
}
case RBUS_LEGACY_LONG:
{
rbusValue_SetFromString(value, RBUS_INT64, pBuff);
break;
}
case RBUS_LEGACY_UNSIGNEDLONG:
{
rbusValue_SetFromString(value, RBUS_UINT64, pBuff);
break;
}
case RBUS_LEGACY_FLOAT:
{
rbusValue_SetFromString(value, RBUS_SINGLE, pBuff);
break;
}
case RBUS_LEGACY_DOUBLE:
{
rbusValue_SetFromString(value, RBUS_DOUBLE, pBuff);
break;
}
case RBUS_LEGACY_BYTE:
{
rbusValue_SetBytes(value, (uint8_t*)pBuff, strlen(pBuff));
break;
}
case RBUS_LEGACY_DATETIME:
{
rbusValue_SetFromString(value, RBUS_DATETIME, pBuff);
break;
}
case RBUS_LEGACY_BASE64:
{
rtLog_Warn("RBUS_LEGACY_BASE64_TYPE: Base64 type was never used in CCSP so far. So, Rbus did not support it till now. Since this is the first Base64 query, please report to get it fixed.");
rbusValue_SetString(value, pBuff);
break;
}
default:
break;
}
}
}
//*************************** SERIALIZE/DERIALIZE FUNCTIONS ***************************//
#define DEBUG_SERIALIZER 0
static void rbusValue_initFromMessage(rbusValue_t* value, rtMessage msg);
static void rbusValue_appendToMessage(char const* name, rbusValue_t value, rtMessage msg);
static void rbusProperty_initFromMessage(rbusProperty_t* property, rtMessage msg);
static void rbusPropertyList_initFromMessage(rbusProperty_t* prop, rtMessage msg);
static void rbusPropertyList_appendToMessage(rbusProperty_t prop, rtMessage msg);
static void rbusObject_initFromMessage(rbusObject_t* obj, rtMessage msg);
static void rbusObject_appendToMessage(rbusObject_t obj, rtMessage msg);
static void rbusEvent_updateFromMessage(rbusEvent_t* event, rtMessage msg);
static void rbusEvent_appendToMessage(rbusEvent_t* event, rtMessage msg);
static void rbusFilter_AppendToMessage(rbusFilter_t filter, rtMessage msg);
static void rbusFilter_InitFromMessage(rbusFilter_t* filter, rtMessage msg);
static void rbusValue_initFromMessage(rbusValue_t* value, rtMessage msg)
{
void const* data;
uint32_t length;
int type;
char const* pBuffer = NULL;
rbusValue_Init(value);
rbus_PopInt32(msg, (int*) &type);
#if DEBUG_SERIALIZER
rtLog_Info("> value pop type=%d", type);
#endif
if(type>=RBUS_LEGACY_STRING && type<=RBUS_LEGACY_NONE)
{
rbus_PopString(msg, &pBuffer);
rtLog_Debug("Received Param Value in string : [%s]", pBuffer);
_parse_rbusData_to_value (pBuffer, type, *value);
}
else
{
if(type == RBUS_OBJECT)
{
rbusObject_t obj;
rbusObject_initFromMessage(&obj, msg);
rbusValue_SetObject(*value, obj);
rbusObject_Release(obj);
}
else
if(type == RBUS_PROPERTY)
{
rbusProperty_t prop;
rbusPropertyList_initFromMessage(&prop, msg);
rbusValue_SetProperty(*value, prop);
rbusProperty_Release(prop);
}
else
{
int32_t ival;
double fval;
switch(type)
{
case RBUS_INT16:
rbus_PopInt32(msg, &ival);
rbusValue_SetInt16(*value, (int16_t)ival);
break;
case RBUS_UINT16:
rbus_PopInt32(msg, &ival);
rbusValue_SetUInt16(*value, (uint16_t)ival);
break;
case RBUS_INT32:
rbus_PopInt32(msg, &ival);
rbusValue_SetInt32(*value, (int32_t)ival);
break;
case RBUS_UINT32:
rbus_PopInt32(msg, &ival);
rbusValue_SetUInt32(*value, (uint32_t)ival);
break;
case RBUS_INT64:
{
union UNION64
{
int32_t i32[2];
int64_t i64;
};
union UNION64 u;
rbus_PopInt32(msg, &u.i32[0]);
rbus_PopInt32(msg, &u.i32[1]);
rbusValue_SetInt64(*value, u.i64);
break;
}
case RBUS_UINT64:
{
union UNION64
{
int32_t i32[2];
uint64_t u64;
};
union UNION64 u;
rbus_PopInt32(msg, &u.i32[0]);
rbus_PopInt32(msg, &u.i32[1]);
rbusValue_SetUInt64(*value, u.u64);
break;
}
case RBUS_SINGLE:
rbus_PopDouble(msg, &fval);
rbusValue_SetSingle(*value, (float)fval);
break;
case RBUS_DOUBLE:
rbus_PopDouble(msg, &fval);
rbusValue_SetDouble(*value, (double)fval);
break;
case RBUS_DATETIME:
rbus_PopBinaryData(msg, &data, &length);
rbusValue_SetTLV(*value, type, length, data);
break;
default:
rbus_PopBinaryData(msg, &data, &length);
rbusValue_SetTLV(*value, type, length, data);
break;
}
#if DEBUG_SERIALIZER
char* sv = rbusValue_ToString(*value,0,0);
rtLog_Info("> value pop data=%s", sv);
free(sv);
#endif
}
}
}
static void rbusProperty_initFromMessage(rbusProperty_t* property, rtMessage msg)
{
char const* name;
rbusValue_t value;
rbus_PopString(msg, (char const**) &name);
#if DEBUG_SERIALIZER
rtLog_Info("> prop pop name=%s", name);
#endif
rbusProperty_Init(property, name, NULL);
rbusValue_initFromMessage(&value, msg);
rbusProperty_SetValue(*property, value);
rbusValue_Release(value);
}
static void rbusEvent_updateFromMessage(rbusEvent_t* event, rtMessage msg)
{
char const* name;
int type;
rbusObject_t data;
rbus_PopString(msg, (char const**) &name);
rbus_PopInt32(msg, (int*) &type);
#if DEBUG_SERIALIZER
rtLog_Info("> event pop name=%s type=%d", name, type);
#endif
rbusObject_initFromMessage(&data, msg);
event->name = name;
event->type = type;
event->data = data;/*caller must call rbusValue_Release*/
}
static void rbusPropertyList_appendToMessage(rbusProperty_t prop, rtMessage msg)
{
int numProps = 0;
rbusProperty_t first = prop;
while(prop)
{
numProps++;
prop = rbusProperty_GetNext(prop);
}
rbus_AppendInt32(msg, numProps);/*property count*/
#if DEBUG_SERIALIZER
rtLog_Info("> prop add numProps=%d", numProps);
#endif
prop = first;
while(prop)
{
rbusValue_appendToMessage(rbusProperty_GetName(prop), rbusProperty_GetValue(prop), msg);
prop = rbusProperty_GetNext(prop);
}
}
static void rbusPropertyList_initFromMessage(rbusProperty_t* prop, rtMessage msg)
{
rbusProperty_t previous = NULL, first = NULL;
int numProps = 0;
rbus_PopInt32(msg, (int*) &numProps);
#if DEBUG_SERIALIZER
rtLog_Info("> prop pop numProps=%d", numProps);
#endif
while(--numProps >= 0)
{
rbusProperty_t prop;
rbusProperty_initFromMessage(&prop, msg);
if(first == NULL)
first = prop;
if(previous != NULL)
{
rbusProperty_SetNext(previous, prop);
rbusProperty_Release(prop);
}
previous = prop;
}
/*TODO we need to release the props we inited*/
*prop = first;
}
static void rbusObject_appendToMessage(rbusObject_t obj, rtMessage msg)
{
int numChild = 0;
rbusObject_t child;
rbus_AppendString(msg, rbusObject_GetName(obj));/*object name*/
rbus_AppendInt32(msg, rbusObject_GetType(obj));/*object type*/
#if DEBUG_SERIALIZER
rtLog_Info("> object add name=%s type=%d", rbusObject_GetName(obj), rbusObject_GetType(obj));
#endif
rbusPropertyList_appendToMessage(rbusObject_GetProperties(obj), msg);
child = rbusObject_GetChildren(obj);
numChild = 0;
while(child)
{
numChild++;
child = rbusObject_GetNext(child);
}
rbus_AppendInt32(msg, numChild);/*object child object count*/
#if DEBUG_SERIALIZER
rtLog_Info("> object add numChild=%d", numChild);
#endif
child = rbusObject_GetChildren(obj);
while(child)
{
rbusObject_appendToMessage(child, msg);/*object child object*/
child = rbusObject_GetNext(child);
}
}
static void rbusObject_initFromMessage(rbusObject_t* obj, rtMessage msg)
{
char const* name;
int type;
int numChild = 0;
rbusProperty_t prop;
rbusObject_t children=NULL, previous=NULL;
rbus_PopString(msg, &name);
rbus_PopInt32(msg, &type);
#if DEBUG_SERIALIZER
rtLog_Info("> object pop name=%s type=%d", name, type);
#endif
rbusPropertyList_initFromMessage(&prop, msg);
rbus_PopInt32(msg, &numChild);
#if DEBUG_SERIALIZER
rtLog_Info("> object pop numChild=%d", numChild);
#endif
while(--numChild >= 0)
{
rbusObject_t next;
rbusObject_initFromMessage(&next, msg);/*object child object*/
if(children == NULL)
children = next;
if(previous != NULL)
{
rbusObject_SetNext(previous, next);
rbusObject_Release(next);
}
previous = next;
}
if(type == RBUS_OBJECT_MULTI_INSTANCE)
rbusObject_InitMultiInstance(obj, name);
else
rbusObject_Init(obj, name);
rbusObject_SetProperties(*obj, prop);
rbusProperty_Release(prop);
rbusObject_SetChildren(*obj, children);
rbusObject_Release(children);
}
static void rbusValue_appendToMessage(char const* name, rbusValue_t value, rtMessage msg)
{
rbusValueType_t type = rbusValue_GetType(value);
rbus_AppendString(msg, name);
rbus_AppendInt32(msg, type);
#if DEBUG_SERIALIZER
rtLog_Info("> value add name=%s type=%d", name, type);
#endif
if(type == RBUS_OBJECT)
{
rbusObject_appendToMessage(rbusValue_GetObject(value), msg);
}
else if(type == RBUS_PROPERTY)
{
rbusPropertyList_appendToMessage(rbusValue_GetProperty(value), msg);
}
else
{
switch(type)
{
case RBUS_INT16:
rbus_AppendInt32(msg, (int32_t)rbusValue_GetInt16(value));
break;
case RBUS_UINT16:
rbus_AppendInt32(msg, (int32_t)rbusValue_GetUInt16(value));
break;
case RBUS_INT32:
rbus_AppendInt32(msg, (int32_t)rbusValue_GetInt32(value));
break;
case RBUS_UINT32:
rbus_AppendInt32(msg, (int32_t)rbusValue_GetUInt32(value));
break;
case RBUS_INT64:
{
union UNION64
{
int32_t i32[2];
int64_t i64;
};
union UNION64 u;
u.i64 = rbusValue_GetInt64(value);
rbus_AppendInt32(msg, (int32_t)u.i32[0]);
rbus_AppendInt32(msg, (int32_t)u.i32[1]);
break;
}
case RBUS_UINT64:
{
union UNION64
{
int32_t i32[2];
int64_t u64;
};
union UNION64 u;
u.u64 = rbusValue_GetUInt64(value);
rbus_AppendInt32(msg, (int32_t)u.i32[0]);
rbus_AppendInt32(msg, (int32_t)u.i32[1]);
break;
}
case RBUS_SINGLE:
rbus_AppendDouble(msg, (double)rbusValue_GetSingle(value));
break;
case RBUS_DOUBLE:
rbus_AppendDouble(msg, (double)rbusValue_GetDouble(value));
break;
case RBUS_DATETIME:
rbus_AppendBinaryData(msg, rbusValue_GetV(value), rbusValue_GetL(value));
break;
default:
rbus_AppendBinaryData(msg, rbusValue_GetV(value), rbusValue_GetL(value));
break;
}
#if DEBUG_SERIALIZER
char* sv = rbusValue_ToString(value,0,0);
rtLog_Info("> value add data=%s", sv);
free(sv);
#endif
}
}
static void rbusEvent_appendToMessage(rbusEvent_t* event, rtMessage msg)
{
rbus_AppendString(msg, event->name);
rbus_AppendInt32(msg, event->type);
#if DEBUG_SERIALIZER
rtLog_Info("> event add name=%s type=%d", event->name, event->type);
#endif
rbusObject_appendToMessage(event->data, msg);
}
bool _is_valid_get_query(char const* name)
{
/* 1. Find whether the query ends with `!` to find out Event is being queried */
/* 2. Find whether the query ends with `()` to find out method is being queried */
if (name != NULL)
{
int length = strlen (name);
int temp = 0;
temp = length - 1;
if (('!' == name[temp]) ||
(')' == name[temp]) ||
(NULL != strstr (name, "(")))
{
rtLog_Debug("Event or Method is Queried");
return false;
}
}
else
{
rtLog_Debug("Null Pointer sent for Query");
return false;
}
return true;
}
void rbusFilter_AppendToMessage(rbusFilter_t filter, rtMessage msg)
{
rbus_AppendInt32(msg, rbusFilter_GetType(filter));
if(rbusFilter_GetType(filter) == RBUS_FILTER_EXPRESSION_RELATION)
{
rbus_AppendInt32(msg, rbusFilter_GetRelationOperator(filter));
rbusValue_appendToMessage("filter", rbusFilter_GetRelationValue(filter), msg);
}
else if(rbusFilter_GetType(filter) == RBUS_FILTER_EXPRESSION_LOGIC)
{
rbus_AppendInt32(msg, rbusFilter_GetLogicOperator(filter));
rbusFilter_AppendToMessage(rbusFilter_GetLogicLeft(filter), msg);
if(rbusFilter_GetLogicOperator(filter) != RBUS_FILTER_OPERATOR_NOT)
rbusFilter_AppendToMessage(rbusFilter_GetLogicRight(filter), msg);
}
}
void rbusFilter_InitFromMessage(rbusFilter_t* filter, rtMessage msg)
{
int32_t type;
int32_t op;
rbus_PopInt32(msg, &type);
if(type == RBUS_FILTER_EXPRESSION_RELATION)
{
char const* name;
rbusValue_t val;
rbus_PopInt32(msg, &op);
rbus_PopString(msg, &name);
rbusValue_initFromMessage(&val, msg);
rbusFilter_InitRelation(filter, op, val);
}
else if(type == RBUS_FILTER_EXPRESSION_LOGIC)
{
rbusFilter_t left = NULL, right = NULL;
rbus_PopInt32(msg, &op);
rbusFilter_InitFromMessage(&left, msg);
if(op != RBUS_FILTER_OPERATOR_NOT)
rbusFilter_InitFromMessage(&right, msg);
rbusFilter_InitLogic(filter, op, left, right);
}
}
bool _is_wildcard_query(char const* name)
{
if (name != NULL)
{
/* 1. Find whether the query ends with `.` to find out object level query */
/* 2. Find whether the query has `*` to find out multiple items are being queried */
int length = strlen (name);
int temp = 0;
temp = length - 1;
if (('.' == name[temp]) || (NULL != strstr (name, "*")))
{
rtLog_Debug("The Query is having wildcard.. ");
return true;
}
}
else
{
rtLog_Debug("Null Pointer sent for Query");
return true;
}
return false;
}
char const* getLastTokenInName(char const* name)
{
if(name == NULL)
return NULL;
int len = (int)strlen(name);
if(len == 0)
return name;
len--;
if(name[len] == '.')
len--;
while(len != 0 && name[len] != '.')
len--;
if(name[len] == '.')
return &name[len+1];
else
return name;
}
/* Recurse row elements Adding or Removing value change properties
* when adding a row, call this after subscriptions are added to the row element
* when removing a row, call this before subscriptions are removed from the row element
*/
void valueChangeTableRowUpdate(rbusHandle_t handle, elementNode* rowNode, bool added)
{
if(rowNode)
{
elementNode* child = rowNode->child;
while(child)
{
if(child->type == RBUS_ELEMENT_TYPE_PROPERTY)
{
/*avoid calling ValueChange if there's no subs on it*/
if(elementHasAutoPubSubscriptions(child, NULL))
{
if(added)
{
rbusValueChange_AddPropertyNode(handle, child);
}
else
{
rbusValueChange_RemovePropertyNode(handle, child);
}
}
}
/*recurse into children that are not row templates*/
if( child->child && !(child->parent->type == RBUS_ELEMENT_TYPE_TABLE && strcmp(child->name, "{i}") == 0) )
{
valueChangeTableRowUpdate(handle, child, added);
}
child = child->nextSibling;
}
}
}
//******************************* CALLBACKS *************************************//
static int _event_subscribe_callback_handler(char const* object, char const* eventName, char const* listener, int added, const rtMessage payload, void* userData)
{
rbusHandle_t handle = (rbusHandle_t)userData;
comp_info* ci = (comp_info*)userData;
UNUSED2(object,listener);
rtLog_Debug("%s: event subscribe callback for [%s] event!", __FUNCTION__, eventName);
elementNode* el = retrieveElement(ci->elementRoot, eventName);
if(el)
{
rbusError_t err = RBUS_ERROR_SUCCESS;
bool autoPublish = true;
rbusSubscription_t* subscription = NULL;
int32_t interval = 0;
int32_t duration = 0;
rbusFilter_t filter = NULL;
/* copy the optional filter */
if(payload)
{
int hasFilter;
rbus_PopInt32(payload, &interval);
rbus_PopInt32(payload, &duration);
rbus_PopInt32(payload, &hasFilter);
if(hasFilter)
{
rbusFilter_InitFromMessage(&filter, payload);
rbusFilter_fwrite(filter, stdout, NULL);
}
}
rtLog_Debug("%s: found element of type %d", __FUNCTION__, el->type);
/* call the provider subHandler first to see if it overrides autoPublish */
if(el->cbTable.eventSubHandler)
{
rbusEventSubAction_t action;
if(added)
action = RBUS_EVENT_ACTION_SUBSCRIBE;
else
action = RBUS_EVENT_ACTION_UNSUBSCRIBE;
err = el->cbTable.eventSubHandler(handle, action, eventName, filter, interval, &autoPublish);
}
if(err == RBUS_ERROR_SUCCESS)
{
/*TODO do we care about error from callback ?
return 0;
*/
}
if(added)
{
subscription = rbusSubscriptions_addSubscription(ci->subscriptions, listener, eventName, filter, interval, duration, autoPublish, el);
}
else
{
subscription = rbusSubscriptions_getSubscription(ci->subscriptions, listener, eventName, filter);
}
if(!subscription)
{
rtLog_Warn("%s: subscription null", __FUNCTION__);
rbusFilter_Release(filter);
return 0;
}
/* if autoPublish and its a property being subscribed to
then update rbusValueChange to handle the property */
if(el->type == RBUS_ELEMENT_TYPE_PROPERTY && subscription->autoPublish)
{
rtListItem item;
rtList_GetFront(subscription->instances, &item);
while(item)
{
elementNode* node;
rtListItem_GetData(item, (void**)&node);
/* Check if the node has other subscribers or not. If it has other
subs then we don't need to either add or remove it from ValueChange */
if(!elementHasAutoPubSubscriptions(node, subscription))
{
rtLog_Info("%s: ValueChange %s event=%s prop=%s", __FUNCTION__,
added ? "Add" : "Remove", subscription->eventName, node->fullName);
if(added)
{
rbusValueChange_AddPropertyNode(handle, node);
}
else
{
rbusValueChange_RemovePropertyNode(handle, node);
}
}
rtListItem_GetNext(item, &item);
}
}
/*remove subscription only after handling its ValueChange properties above*/
if(!added)
{
rbusSubscriptions_removeSubscription(ci->subscriptions, subscription);
}
}
else
{
rtLog_Warn("event subscribe callback: unexpected! element not found");
}
return 0;
}
static int _event_callback_handler (char const* objectName, char const* eventName, rtMessage message, void* userData)
{
rbusEventSubscription_t* subscription;
rbusEventHandler_t handler;
rbusEvent_t event;
rtLog_Debug("Received event callback: objectName=%s eventName=%s",
objectName, eventName);
subscription = (rbusEventSubscription_t*)userData;
if(!subscription || !subscription->handle || !subscription->handler)
{
return RBUS_ERROR_BUS_ERROR;
}
handler = (rbusEventHandler_t)subscription->handler;
rbusEvent_updateFromMessage(&event, message);
(*handler)(subscription->handle, &event, subscription);
rbusObject_Release(event.data);
return 0;
}
static void _set_callback_handler (rbusHandle_t handle, rtMessage request, rtMessage *response)
{
rbusError_t rc = 0;
int sessionId = 0;
int numVals = 0;
int loopCnt = 0;
char* pCompName = NULL;
char* pIsCommit = NULL;
char const* pFailedElement = NULL;
rbusProperty_t* pProperties = NULL;
comp_info* pCompInfo = (comp_info*)handle;
rbusSetHandlerOptions_t opts;
memset(&opts, 0, sizeof(opts));
rbus_PopInt32(request, &sessionId);
rbus_PopString(request, (char const**) &pCompName);
rbus_PopInt32(request, &numVals);
if(numVals > 0)
{
/* Update the Get Handler input options */
opts.sessionId = sessionId;
opts.requestingComponent = pCompName;
elementNode* el = NULL;
pProperties = (rbusProperty_t*)malloc(numVals*sizeof(rbusProperty_t));
for (loopCnt = 0; loopCnt < numVals; loopCnt++)
{
rbusProperty_initFromMessage(&pProperties[loopCnt], request);
}
rbus_PopString(request, (char const**) &pIsCommit);
/* Since we set as string, this needs to compared with string..
* Otherwise, just the #define in the top for TRUE/FALSE should be used.
*/
if (strncasecmp("TRUE", pIsCommit, 4) == 0)
opts.commit = true;
for (loopCnt = 0; loopCnt < numVals; loopCnt++)
{
/* Retrive the element node */
char const* paramName = rbusProperty_GetName(pProperties[loopCnt]);
el = retrieveElement(pCompInfo->elementRoot, paramName);
if(el != NULL)
{
if(el->cbTable.setHandler)
{
rc = el->cbTable.setHandler(handle, pProperties[loopCnt], &opts);
if (rc != RBUS_ERROR_SUCCESS)
{
rtLog_Warn("Set Failed for %s; Component Owner returned Error", paramName);
pFailedElement = paramName;
break;
}
}
else
{
rtLog_Warn("Set Failed for %s; No Handler found", paramName);
rc = RBUS_ERROR_ACCESS_NOT_ALLOWED;
pFailedElement = paramName;
break;
}
}
else
{
rtLog_Warn("Set Failed for %s; No Element registered", paramName);
rc = RBUS_ERROR_ELEMENT_DOES_NOT_EXIST;
pFailedElement = paramName;
break;
}
}
}
else
{
rtLog_Warn("Set Failed as %s did not send any input", pCompName);
rc = RBUS_ERROR_INVALID_INPUT;
pFailedElement = pCompName;
}
rtMessage_Create(response);
rbus_AppendInt32(*response, (int) rc);
if (pFailedElement)
rbus_AppendString(*response, pFailedElement);
if(pProperties)
{
for (loopCnt = 0; loopCnt < numVals; loopCnt++)
{
rbusProperty_Release(pProperties[loopCnt]);
}
free(pProperties);
}
return;
}
void _get_recursive_wildcard_handler(elementNode* wildQueryElemNode, rbusHandle_t handle, const char* pRequestingComp, rbusProperty_t properties, int *pCount)
{
rbusGetHandlerOptions_t options;
memset(&options, 0, sizeof(options));
/* Update the Get Handler input options */
options.requestingComponent = pRequestingComp;
if (wildQueryElemNode != NULL)
{
elementNode* child = wildQueryElemNode->child;
while(child)
{
if((child->type == RBUS_ELEMENT_TYPE_PROPERTY) && (child->cbTable.getHandler))
{
rbusError_t result;
rbusProperty_t tmpProperties;
rbusProperty_Init(&tmpProperties, child->fullName, NULL);
result = child->cbTable.getHandler(handle, tmpProperties, &options);
if (result == RBUS_ERROR_SUCCESS)
{
rbusProperty_PushBack(properties, tmpProperties);
*pCount += 1;
}
rbusProperty_Release(tmpProperties);
}
/*recurse into children that are not row templates*/
if( child->child && !(child->parent->type == RBUS_ELEMENT_TYPE_TABLE && strcmp(child->name, "{i}") == 0) )
{
_get_recursive_wildcard_handler(child, handle, pRequestingComp, properties, pCount);
}
child = child->nextSibling;
}
}
}
void _get_callback_wildcard_handler(rbusHandle_t handle, const char* pParameterName, const char* pRequestingComp, rtMessage *response)
{
/* Lets find all the elements registered by the Component */
rtMessage eResponse;
const char *pElementNames= NULL;
int numOfElements = 0;
int numOfGets = 0;
comp_info* ci = (comp_info*)handle;
rbusProperty_t properties, first, last;
int firstElement = 0;
int i = 0, j = 0, length = 0;
rbusGetHandlerOptions_t options;
memset(&options, 0, sizeof(options));
/* Update the Get Handler input options */
options.requestingComponent = pRequestingComp;
if (RTMESSAGE_BUS_SUCCESS == rbus_GetElementsAddedByObject(ci->componentName, &eResponse))
{
rbus_PopInt32(eResponse, &numOfElements);
if (numOfElements > 0)
numOfElements -= 1;
rtLog_Debug("Number of Entries in %s component is %d", ci->componentName, numOfElements);
length = strlen(pParameterName);
for(j = 0; j < numOfElements; j++)
{
rbus_PopString(eResponse, &pElementNames);
rtLog_Debug("The names is, %s", pElementNames);
if (strncmp(pElementNames, pParameterName, length) == 0)
{
elementNode* el = NULL;
el = retrieveElement(ci->elementRoot, pElementNames);
if(el != NULL)
{
rtLog_Debug("Retrieved [%s]", pElementNames);
if(el->cbTable.getHandler)
{
numOfGets++;
rtLog_Debug("Table and CB exists for [%s], call the CB!", pElementNames);
if (0 == firstElement)
{
firstElement = 1;
rbusProperty_Init(&properties, pElementNames, NULL);
el->cbTable.getHandler(handle, properties, &options);
last = properties;
}
else
{
rbusProperty_t tmpProperties;
rbusProperty_Init(&tmpProperties, pElementNames, NULL);
el->cbTable.getHandler(handle, tmpProperties, &options);
rbusProperty_SetNext(last, tmpProperties);
rbusProperty_Release(tmpProperties);
last = tmpProperties;
}
}
}
}
}
rtMessage_Release(eResponse);
rtLog_Debug("We have identified %d entries that are matching the request and got the value. Lets return it.", numOfGets);
rtMessage_Create(response);
if (numOfGets > 0)
{
first = properties;
rbus_AppendInt32(*response, (int) RBUS_ERROR_SUCCESS);
rbus_AppendInt32(*response, numOfGets);
for(i = 0; i < numOfGets; i++)
{
rbusValue_appendToMessage(rbusProperty_GetName(first), rbusProperty_GetValue(first), *response);
first = rbusProperty_GetNext(first);
}
/* Release the memory */
rbusProperty_Release(properties);
}
else
{
rbus_AppendInt32(*response, (int) RBUS_ERROR_ELEMENT_DOES_NOT_EXIST);
}
}
else
{
rtMessage_Create(response);
rbus_AppendInt32(*response, (int) RBUS_ERROR_ELEMENT_DOES_NOT_EXIST);
}
return;
}
static void _get_callback_handler (rbusHandle_t handle, rtMessage request, rtMessage *response)
{
comp_info* ci = (comp_info*)handle;
int paramSize = 1, i = 0;
rbusError_t result = RBUS_ERROR_SUCCESS;
char const *parameterName = NULL;
char const *pCompName = NULL;
rbusProperty_t* properties = NULL;
rbusGetHandlerOptions_t options;
memset(&options, 0, sizeof(options));
rbus_PopString(request, &pCompName);
rbus_PopInt32(request, ¶mSize);
rtLog_Debug("Param Size [%d]", paramSize);
if(paramSize > 0)
{
/* Update the Get Handler input options */
options.requestingComponent = pCompName;
properties = malloc(paramSize*sizeof(rbusProperty_t));
for(i = 0; i < paramSize; i++)
{
rbusProperty_Init(&properties[i], NULL, NULL);
}
for(i = 0; i < paramSize; i++)
{
elementNode* el = NULL;
parameterName = NULL;
rbus_PopString(request, ¶meterName);
rtLog_Debug("Param Name [%d]:[%s]", i, parameterName);
rbusProperty_SetName(properties[i], parameterName);
/* Check for wildcard query */
int length = strlen(parameterName) - 1;
if (parameterName[length] == '.')
{
rtLog_Debug("handle the wildcard request..");
#if 1
el = retrieveInstanceElement(ci->elementRoot, parameterName);
if (el != NULL)
{
rbusProperty_t xproperties, first;
rbusValue_t xtmp;
int count = 0;
rbusValue_Init(&xtmp);
rbusValue_SetString(xtmp, "tmpValue");
rbusProperty_Init(&xproperties, "tmpProp", xtmp);
_get_recursive_wildcard_handler(el, handle, pCompName, xproperties, &count);
rtLog_Debug("We have identified %d entries that are matching the request and got the value. Lets return it.", count);
rtMessage_Create(response);
if (count > 0)
{
first = rbusProperty_GetNext(xproperties);
rbus_AppendInt32(*response, (int) RBUS_ERROR_SUCCESS);
rbus_AppendInt32(*response, count);
for(i = 0; i < count; i++)
{
rbusValue_appendToMessage(rbusProperty_GetName(first), rbusProperty_GetValue(first), *response);
first = rbusProperty_GetNext(first);
}
/* Release the memory */
rbusProperty_Release(xproperties);
}
else
{
rbus_AppendInt32(*response, (int) RBUS_ERROR_ELEMENT_DOES_NOT_EXIST);
}
}
#else
_get_callback_wildcard_handler(handle, parameterName, pCompName, response);
#endif
/* Free the memory, regardless of success or not.. */
for (i = 0; i < paramSize; i++)
{
rbusProperty_Release(properties[i]);
}
free (properties);
return;
}
//Do a look up and call the corresponding method
el = retrieveElement(ci->elementRoot, parameterName);
if(el != NULL)
{
rtLog_Debug("Retrieved [%s]", parameterName);
if(el->cbTable.getHandler)
{
rtLog_Debug("Table and CB exists for [%s], call the CB!", parameterName);
result = el->cbTable.getHandler(handle, properties[i], &options);
if (result != RBUS_ERROR_SUCCESS)
{
rtLog_Warn("called CB with result [%d]", result);
break;
}
}
else
{
rtLog_Warn("Element retrieved, but no cb installed for [%s]!", parameterName);
result = RBUS_ERROR_ACCESS_NOT_ALLOWED;
break;
}
}
else
{
rtLog_Warn("Not able to retrieve element [%s]", parameterName);
result = RBUS_ERROR_ACCESS_NOT_ALLOWED;
break;
}
}
rtMessage_Create(response);
rbus_AppendInt32(*response, (int) result);
if (result == RBUS_ERROR_SUCCESS)
{
rbus_AppendInt32(*response, paramSize);
for(i = 0; i < paramSize; i++)
{
rbusValue_appendToMessage(rbusProperty_GetName(properties[i]), rbusProperty_GetValue(properties[i]), *response);
}
}
/* Free the memory, regardless of success or not.. */
for (i = 0; i < paramSize; i++)
{
rbusProperty_Release(properties[i]);
}
free (properties);
}
else
{
rtLog_Warn("Get Failed as %s did not send any input", pCompName);
result = RBUS_ERROR_INVALID_INPUT;
rtMessage_Create(response);
rbus_AppendInt32(*response, (int) result);
}
return;
}
static void _table_add_row_callback_handler (rbusHandle_t handle, rtMessage request, rtMessage* response)
{
comp_info* ci = (comp_info*)handle;
rbusError_t result = RBUS_ERROR_BUS_ERROR;
int sessionId;
char const* tableName;
char const* aliasName = NULL;
int err;
uint32_t instNum = 0;
rbus_PopInt32(request, &sessionId);
rbus_PopString(request, &tableName);
err = rbus_PopString(request, &aliasName); /*this presumes rbus_updateTable sent the alias.
if CCSP/dmcli is calling us, then this will be NULL*/
if(err != RT_OK || (aliasName && strlen(aliasName)==0))
aliasName = NULL;
rtLog_Debug("%s table [%s] alias [%s] err [%d]", __FUNCTION__, tableName, aliasName, err);
elementNode* tableRegElem = retrieveElement(ci->elementRoot, tableName);
elementNode* tableInstElem = retrieveInstanceElement(ci->elementRoot, tableName);
if(tableRegElem && tableInstElem)
{
if(tableRegElem->cbTable.tableAddRowHandler)
{
rtLog_Info("%s calling tableAddRowHandler table [%s] alias [%s]", __FUNCTION__, tableName, aliasName);
result = tableRegElem->cbTable.tableAddRowHandler(handle, tableName, aliasName, &instNum);
if (result == RBUS_ERROR_SUCCESS)
{
elementNode* rowElem;
rtLog_Debug("%s tableAddRowHandler success table [%s] alias [%s]", __FUNCTION__, tableName, aliasName);
rowElem = instantiateTableRow(tableInstElem, instNum, aliasName);
rbusSubscriptions_onTableRowAdded(ci->subscriptions, rowElem);
/*update ValueChange after rbusSubscriptions_onTableRowAdded */
valueChangeTableRowUpdate(handle, rowElem, true);
/*send OBJECT_CREATED event after we create the row*/
{
rbusEvent_t event;
rbusError_t respub;
rbusObject_t data;
rbusValue_t instNumVal;
rbusValue_t aliasVal;
rbusValue_t rowNameVal;
rbusValue_Init(&rowNameVal);
rbusValue_Init(&instNumVal);
rbusValue_Init(&aliasVal);
rbusValue_SetString(rowNameVal, rowElem->fullName);
rbusValue_SetUInt32(instNumVal, instNum);
rbusValue_SetString(aliasVal, aliasName ? aliasName : "");
rbusObject_Init(&data, NULL);
rbusObject_SetValue(data, "rowName", rowNameVal);
rbusObject_SetValue(data, "instNum", instNumVal);
rbusObject_SetValue(data, "alias", aliasVal);
event.name = tableName;
event.type = RBUS_EVENT_OBJECT_CREATED;
event.data = data;
rtLog_Info("%s publishing ObjectCreated table=%s rowName=%s", __FUNCTION__, tableName, rowElem->fullName);
respub = rbusEvent_Publish(handle, &event);
if(respub != RBUS_ERROR_SUCCESS)
{
rtLog_Warn("failed to publish ObjectCreated event err:%d", respub);
}
rbusValue_Release(rowNameVal);
rbusValue_Release(instNumVal);
rbusValue_Release(aliasVal);
rbusObject_Release(data);
}
}
else
{
rtLog_Warn("%s tableAddRowHandler failed table [%s] alias [%s]", __FUNCTION__, tableName, aliasName);
}
}
else
{
rtLog_Warn("%s tableAddRowHandler not registered table [%s] alias [%s]", __FUNCTION__, tableName, aliasName);
result = RBUS_ERROR_ACCESS_NOT_ALLOWED;
}
}
else
{
rtLog_Warn("%s no element found table [%s] alias [%s]", __FUNCTION__, tableName, aliasName);
result = RBUS_ERROR_ELEMENT_DOES_NOT_EXIST;
}
rtMessage_Create(response);
rbus_AppendInt32(*response, result);
rbus_AppendInt32(*response, (int32_t)instNum);
}
static void _table_remove_row_callback_handler (rbusHandle_t handle, rtMessage request, rtMessage* response)
{
comp_info* ci = (comp_info*)handle;
rbusError_t result = RBUS_ERROR_BUS_ERROR;
int sessionId;
char const* rowName;
rbus_PopInt32(request, &sessionId);
rbus_PopString(request, &rowName);
rtLog_Debug("%s row [%s]", __FUNCTION__, rowName);
/*get the element for the row */
elementNode* rowRegElem = retrieveElement(ci->elementRoot, rowName);
elementNode* rowInstElem = retrieveInstanceElement(ci->elementRoot, rowName);
if(rowRegElem && rowInstElem)
{
/*switch to the row's table */
elementNode* tableRegElem = rowRegElem->parent;
elementNode* tableInstElem = rowInstElem->parent;
if(tableRegElem && tableInstElem)
{
if(tableRegElem->cbTable.tableRemoveRowHandler)
{
rtLog_Info("%s calling tableRemoveRowHandler row [%s]", __FUNCTION__, rowName);
result = tableRegElem->cbTable.tableRemoveRowHandler(handle, rowName);
if (result == RBUS_ERROR_SUCCESS)
{
rtLog_Info("%s tableRemoveRowHandler success row [%s]", __FUNCTION__, rowName);
char* rowInstName = strdup(rowInstElem->fullName); /*must dup because we are deleting the instance*/
/*update ValueChange before rbusSubscriptions_onTableRowRemoved */
valueChangeTableRowUpdate(handle, rowInstElem, false);
rbusSubscriptions_onTableRowRemoved(ci->subscriptions, rowInstElem);
deleteTableRow(rowInstElem);
/*send OBJECT_DELETED event after we delete the row*/
{
rbusEvent_t event;
rbusError_t respub;
rbusValue_t rowNameVal;
rbusObject_t data;
char tableName[RBUS_MAX_NAME_LENGTH];
/*must end the table name with a dot(.)*/
snprintf(tableName, RBUS_MAX_NAME_LENGTH, "%s.", tableInstElem->fullName);
rbusValue_Init(&rowNameVal);
rbusValue_SetString(rowNameVal, rowInstName);
rbusObject_Init(&data, NULL);
rbusObject_SetValue(data, "rowName", rowNameVal);
event.name = tableName;
event.data = data;
event.type = RBUS_EVENT_OBJECT_DELETED;
rtLog_Info("%s publishing ObjectDeleted table=%s rowName=%s", __FUNCTION__, tableInstElem->fullName, rowName);
respub = rbusEvent_Publish(handle, &event);
rbusValue_Release(rowNameVal);
rbusObject_Release(data);
free(rowInstName);
if(respub != RBUS_ERROR_SUCCESS)
{
rtLog_Warn("failed to publish ObjectDeleted event err:%d", respub);
}
}
}
else
{
rtLog_Warn("%s tableRemoveRowHandler failed row [%s]", __FUNCTION__, rowName);
}
}
else
{
rtLog_Info("%s tableRemoveRowHandler not registered row [%s]", __FUNCTION__, rowName);
result = RBUS_ERROR_ACCESS_NOT_ALLOWED;
}
}
else
{
rtLog_Warn("%s no parent element found row [%s]", __FUNCTION__, rowName);
result = RBUS_ERROR_ELEMENT_DOES_NOT_EXIST;
}
}
else
{
rtLog_Warn("%s no element found row [%s]", __FUNCTION__, rowName);
result = RBUS_ERROR_ELEMENT_DOES_NOT_EXIST;
}
rtMessage_Create(response);
rbus_AppendInt32(*response, result);
}
static int _method_callback_handler(rbusHandle_t handle, rtMessage request, rtMessage* response, const rtMessageHeader* hdr)
{
comp_info* ci = (comp_info*)handle;
rbusError_t result = RBUS_ERROR_BUS_ERROR;
int sessionId;
char const* methodName;
rbusObject_t inParams, outParams;
rbus_PopInt32(request, &sessionId);
rbus_PopString(request, &methodName);
rbusObject_initFromMessage(&inParams, request);
rtLog_Info("%s method [%s]", __FUNCTION__, methodName);
/*get the element for the row */
elementNode* methRegElem = retrieveElement(ci->elementRoot, methodName);
elementNode* methInstElem = retrieveInstanceElement(ci->elementRoot, methodName);
if(methRegElem && methInstElem)
{
if(methRegElem->cbTable.methodHandler)
{
rtLog_Info("%s calling methodHandler method [%s]", __FUNCTION__, methodName);
rbusObject_Init(&outParams, NULL);
rbusMethodAsyncHandle_t asyncHandle = malloc(sizeof(struct _rbusMethodAsyncHandle_t));
asyncHandle->hdr = *hdr;
result = methRegElem->cbTable.methodHandler(handle, methodName, inParams, outParams, asyncHandle);
if (result == RBUS_ERROR_ASYNC_RESPONSE)
{
/*outParams will be sent async*/
rtLog_Info("%s async method in progress [%s]", __FUNCTION__, methodName);
}
else
{
free(asyncHandle);
}
if (result != RBUS_ERROR_SUCCESS)
{
rbusObject_Release(outParams);
}
}
else
{
rtLog_Info("%s methodHandler not registered method [%s]", __FUNCTION__, methodName);
result = RBUS_ERROR_ACCESS_NOT_ALLOWED;
}
}
else
{
rtLog_Warn("%s no element found method [%s]", __FUNCTION__, methodName);
result = RBUS_ERROR_ELEMENT_DOES_NOT_EXIST;
}
rbusObject_Release(inParams);
if(result == RBUS_ERROR_ASYNC_RESPONSE)
{
return RTMESSAGE_BUS_SUCCESS_ASYNC;
}
else
{
rtMessage_Create(response);
rbus_AppendInt32(*response, result);
if (result == RBUS_ERROR_SUCCESS)
{
rbusObject_appendToMessage(outParams, *response);
rbusObject_Release(outParams);
}
return RTMESSAGE_BUS_SUCCESS;
}
}
static int _callback_handler(char const* destination, char const* method, rtMessage request, void* userData, rtMessage* response, const rtMessageHeader* hdr)
{
rbusHandle_t handle = (rbusHandle_t)userData;
rtLog_Debug("Received callback for [%s]", destination);
if(!strcmp(method, METHOD_GETPARAMETERVALUES))
{
_get_callback_handler (handle, request, response);
}
else if(!strcmp(method, METHOD_SETPARAMETERVALUES))
{
_set_callback_handler (handle, request, response);
}
else if(!strcmp(method, METHOD_ADDTBLROW))
{
_table_add_row_callback_handler (handle, request, response);
}
else if(!strcmp(method, METHOD_DELETETBLROW))
{
_table_remove_row_callback_handler (handle, request, response);
}
else if(!strcmp(method, METHOD_RPC))
{
return _method_callback_handler (handle, request, response, hdr);
}
else
{
rtLog_Warn("unhandled callback for [%s] method!", method);
}
return 0;
}
//******************************* Bus Initialization *****************************//
rbusError_t rbus_open(rbusHandle_t* handle, char *componentName)
{
rbusError_t errorcode = RBUS_ERROR_SUCCESS;
rbus_error_t err = RTMESSAGE_BUS_SUCCESS;
int foundIndex = -1;
int i = 0;
if((handle == NULL) || (componentName == NULL))
{
return RBUS_ERROR_INVALID_INPUT;
}
*handle = NULL;
rtLog_SetLevel(RT_LOG_WARN);
/*
Per spec: If a component calls this API more than once, any previous busHandle
and all previous data element registrations will be canceled.
*/
for(i = 0; i < MAX_COMPS_PER_PROCESS; i++)
{
if(comp_array[i].inUse && strcmp(componentName, comp_array[i].componentName) == 0)
{
rbus_close(&comp_array[i]);
}
}
/* Find open item in array:
TODO why can't this just be a rtVector we push/remove from? */
for(i = 0; i < MAX_COMPS_PER_PROCESS; i++)
{
if(!comp_array[i].inUse)
{
foundIndex = i;
break;
}
}
if(foundIndex == -1)
{
rtLog_Error("<%s>: Exceeded the allowed number of components per process!", __FUNCTION__);
errorcode = RBUS_ERROR_OUT_OF_RESOURCES;
}
else
{
/*
Per spec: the first component that calls rbus_open will establishes a new socket connection to the bus broker.
Note: rbus_openBrokerConnection returns RTMESSAGE_BUS_ERROR_INVALID_STATE if a connection is already established.
We cannot expect our 1st call to rbus_openBrokerConnection to succeed, as another library in this process
may have already called rbus_openBrokerConnection. This would happen if the ccsp_message_bus is already
running with rbus-core in the same process. Thus we must call again and check the return code.
*/
err = rbus_openBrokerConnection(componentName);
if( err != RTMESSAGE_BUS_SUCCESS &&
err != RTMESSAGE_BUS_ERROR_INVALID_STATE/*connection already opened. which is allowed*/)
{
rtLog_Error("<%s>: rbus_openBrokerConnection() fails with %d", __FUNCTION__, err);
errorcode = RBUS_ERROR_BUS_ERROR;
}
else
{
rbusHandle_t tmpHandle = &comp_array[foundIndex];
rtLog_Info("Bus registration successfull!");
rtLog_Debug("<%s>: Try rbus_registerObj() for component base object [%s]!", __FUNCTION__, componentName);
if((err = rbus_registerObj(componentName, _callback_handler, tmpHandle)) != RTMESSAGE_BUS_SUCCESS)
{
/*Note that this will fail if a previous rbus_open was made with the same componentName
because rbus_registerObj doesn't allow the same name to be registered twice. This would
also fail if ccsp using rbus-core has registered the same object name */
rtLog_Error("<%s>: rbus_registerObj() fails with %d", __FUNCTION__, err);
errorcode = RBUS_ERROR_BUS_ERROR;
}
else
{
rtLog_Debug("<%s>: rbus_registerObj() Success!", __FUNCTION__);
if((err = rbus_registerSubscribeHandler(componentName, _event_subscribe_callback_handler, tmpHandle)) != RTMESSAGE_BUS_SUCCESS)
{
rtLog_Error("<%s>: rbus_registerSubscribeHandler() Failed!", __FUNCTION__);
errorcode = RBUS_ERROR_BUS_ERROR;
}
else
{
rtLog_Debug("<%s>: rbus_registerSubscribeHandler() Success!", __FUNCTION__);
comp_array[foundIndex].inUse = 1;
comp_array[foundIndex].componentName = strdup(componentName);
*handle = tmpHandle;
rtVector_Create(&comp_array[foundIndex].eventSubs);
/*you really only need to call once per process but it doesn't hurt to call here*/
rbusValueChange_SetPollingPeriod(VALUE_CHANGE_POLLING_PERIOD);
#if 0 /*my test*/
{
rtLog_Info("Running test:");
elementNode* root = getEmptyElementNode(), * node;
root->name = strdup("componentA");
rbusDataElement_t el[5] = {
{"Device.WiFi.AccessPoint.{i}.", RBUS_ELEMENT_TYPE_TABLE, {NULL} },
{"Device.WiFi.AccessPoint.{i}.Prop1", RBUS_ELEMENT_TYPE_PROPERTY, {NULL} },
{"Device.WiFi.AccessPoint.{i}.OtherObject.Property2", RBUS_ELEMENT_TYPE_PROPERTY, {NULL} },
{"Device.WiFi.AccessPoint.{i}.AssociatedDevice.{i}.", RBUS_ELEMENT_TYPE_TABLE, {NULL} },
{"Device.WiFi.AccessPoint.{i}.AssociatedDevice.{i}.SignalStrength", RBUS_ELEMENT_TYPE_PROPERTY, {NULL} }
};
int i = 0;
for(i = 0; i < 5; ++i)
insertElement(&root, &el[i]);
printRegisteredElements(root, 0);
instantiateTableRow(retrieveElement(root, "Device.WiFi.AccessPoint.{i}."), 1, "doghnut");
printRegisteredElements(root, 0);
instantiateTableRow(retrieveElement(root, "Device.WiFi.AccessPoint.1.AssociatedDevice.{i}."), 1, "apple");
printRegisteredElements(root, 0);
instantiateTableRow(retrieveElement(root, "Device.WiFi.AccessPoint.1.AssociatedDevice.{i}."), 2 , "orange");
printRegisteredElements(root, 0);
exit(0);
}
#endif
}
}
}
}
return errorcode;
}
rbusError_t rbus_close(rbusHandle_t handle)
{
rbusError_t errorcode = RBUS_ERROR_SUCCESS;
rbus_error_t err = RTMESSAGE_BUS_SUCCESS;
comp_info* ci = (comp_info*)handle;
if(handle == NULL)
{
return RBUS_ERROR_INVALID_INPUT;
}
if(ci->eventSubs)
{
while(rtVector_Size(ci->eventSubs))
{
rbusEventSubscription_t* sub = (rbusEventSubscription_t*)rtVector_At(ci->eventSubs, 0);
if(sub)
{
rbusEvent_Unsubscribe(handle, sub->eventName);
}
}
rtVector_Destroy(ci->eventSubs, NULL);
ci->eventSubs = NULL;
}
if(ci->subscriptions != NULL)
{
rbusSubscriptions_destroy(ci->subscriptions);
ci->subscriptions = NULL;
}
rbusValueChange_Close(handle);//called before freeAllElements below
if(ci->elementRoot)
{
freeAllElements(&(ci->elementRoot));
//free(ci->elementRoot); valgrind reported this and I saw that freeAllElements actually frees this . leaving comment so others won't wonder why this is gone
ci->elementRoot = NULL;
}
if((err = rbus_unregisterObj(ci->componentName)) != RTMESSAGE_BUS_SUCCESS) //FIXME: shouldn't rbus_closeBrokerConnection be called even if this fails ?
{
rtLog_Warn("<%s>: rbus_unregisterObj() for [%s] fails with %d", __FUNCTION__, ci->componentName, err);
errorcode = RBUS_ERROR_INVALID_HANDLE;
}
else
{
int canClose = 1;
int i;
rtLog_Debug("<%s>: rbus_unregisterObj() for [%s] Success!!", __FUNCTION__, ci->componentName);
free(ci->componentName);
ci->componentName = NULL;
ci->inUse = 0;
for(i = 0; i < MAX_COMPS_PER_PROCESS; i++)
{
if(comp_array[i].inUse)
{
canClose = 0;
break;
}
}
if(canClose)
{
if((err = rbus_closeBrokerConnection()) != RTMESSAGE_BUS_SUCCESS)
{
rtLog_Warn("<%s>: rbus_closeBrokerConnection() fails with %d", __FUNCTION__, err);
errorcode = RBUS_ERROR_BUS_ERROR;
}
else
{
rtLog_Info("Bus unregistration Successfull!");
}
}
}
return errorcode;
}
rbusError_t rbus_regDataElements(
rbusHandle_t handle,
int numDataElements,
rbusDataElement_t *elements)
{
int i;
rbusError_t rc = RBUS_ERROR_SUCCESS;
comp_info* ci = (comp_info*)handle;
for(i=0; i<numDataElements; ++i)
{
char* name = elements[i].name;
rbus_error_t err = RTMESSAGE_BUS_SUCCESS;
if((!name) || (0 == strlen(name))) {
rc = RBUS_ERROR_INVALID_INPUT;
break ;
}
rtLog_Debug("rbus_getDataElements: %s", name);
if(ci->elementRoot == NULL)
{
rtLog_Debug("First Time, create the root node for [%s]!", ci->componentName);
ci->elementRoot = getEmptyElementNode();
ci->elementRoot->name = strdup(ci->componentName);
rtLog_Debug("Root node created for [%s]", ci->elementRoot->name);
}
if(ci->subscriptions == NULL)
{
rbusSubscriptions_create(&ci->subscriptions, handle, ci->elementRoot);
}
elementNode* node;
if((node = insertElement(&(ci->elementRoot), &elements[i])) == NULL)
{
rtLog_Error("<%s>: failed to insert element [%s]!!", __FUNCTION__, name);
rc = RBUS_ERROR_OUT_OF_RESOURCES;
break;
}
else
{
if((err = rbus_addElement(ci->componentName, name)) != RTMESSAGE_BUS_SUCCESS)
{
rtLog_Error("<%s>: failed to add element with core [%s] err=%d!!", __FUNCTION__, name, err);
removeElement(&(ci->elementRoot), name);
rc = RBUS_ERROR_OUT_OF_RESOURCES;
break;
}
else
{
rtLog_Info("%s inserted successfully!", name);
}
}
}
/*TODO: need to review if this is how we should handle any failed register.
To avoid a provider having a half registered data model, and to avoid
the complexity of returning a list of error codes for each element in the list,
we treat rbus_regDataElements as a transaction. If any element from the elements list
fails to register, we abort the whole thing. We do this as follows: As soon
as 1 fail occurs above, we break out of loop and we unregister all the
successfully registered elements that happened during this call, before we failed.
Thus we unregisters elements 0 to i (i was when we broke from loop above).*/
if(rc != RBUS_ERROR_SUCCESS && i > 0)
rbus_unregDataElements(handle, i, elements);
return rc;
}
rbusError_t rbus_unregDataElements(
rbusHandle_t handle,
int numDataElements,
rbusDataElement_t *elements)
{
comp_info* ci = (comp_info*)handle;
int i;
for(i=0; i<numDataElements; ++i)
{
char const* name = elements[i].name;
/*
if(rbus_unregisterEvent(ci->componentName, name) != RTMESSAGE_BUS_SUCCESS)
rtLog_Info("<%s>: failed to remove event [%s]!!", __FUNCTION__, name);
*/
if(rbus_removeElement(ci->componentName, name) != RTMESSAGE_BUS_SUCCESS)
rtLog_Warn("<%s>: failed to remove element from core [%s]!!", __FUNCTION__, name);
/* TODO: we need to remove all instance elements that this registration element instantiated
rbusValueChange_RemoveParameter(handle, NULL, name);
removeElement(&(ci->elementRoot), name);
*/
}
return RBUS_ERROR_SUCCESS;
}
//************************* Discovery related Operations *******************//
rbusError_t rbus_discoverComponentName (rbusHandle_t handle,
int numElements, char** elementNames,
int *numComponents, char ***componentName)
{
rbusError_t errorcode = RBUS_ERROR_SUCCESS;
if(handle == NULL)
{
return RBUS_ERROR_INVALID_INPUT;
}
*numComponents = 0;
*componentName = 0;
char **output = NULL;
if(RTMESSAGE_BUS_SUCCESS == rbus_findMatchingObjects((char const**)elementNames,numElements,&output))
{
*componentName = output;
*numComponents = numElements;
}
else
{
rtLog_Warn("return from rbus_findMatchingObjects is not success");
}
return errorcode;
}
rbusError_t rbus_discoverComponentDataElements (rbusHandle_t handle,
char* name, bool nextLevel,
int *numElements, char*** elementNames)
{
rbusError_t errorcode = RBUS_ERROR_SUCCESS;
char** val = NULL;
rtMessage response;
*numElements = 0;
*elementNames = 0;
UNUSED1(nextLevel);
if((handle == NULL) || (name == NULL))
{
return RBUS_ERROR_INVALID_INPUT;
}
rbus_error_t ret = RTMESSAGE_BUS_SUCCESS;
ret = rbus_GetElementsAddedByObject(name, &response);
if(ret == RTMESSAGE_BUS_SUCCESS)
{
char const *comp = NULL;
rbus_PopInt32(response, numElements);
if(*numElements > 0)
*numElements = *numElements-1; //Fix this. We need a better way to ignore the component name as element name.
if(*numElements)
{
int i;
val = (char**)calloc(*numElements, sizeof(char*));
memset(val, 0, *numElements * sizeof(char*));
for(i = 0; i < *numElements; i++)
{
comp = NULL;
rbus_PopString(response, &comp);
val[i] = strdup(comp);
}
}
rtMessage_Release(response);
*elementNames = val;
}
return errorcode;
}
//************************* Parameters related Operations *******************//
rbusError_t rbus_get(rbusHandle_t handle, char const* name, rbusValue_t* value)
{
rbusError_t errorcode = RBUS_ERROR_SUCCESS;
rbus_error_t err = RTMESSAGE_BUS_SUCCESS;
rtMessage request, response;
int ret = -1;
comp_info* pCompInfo = (comp_info*) handle;
if (_is_wildcard_query(name))
{
rtLog_Warn("%s This method does not support wildcard query", __FUNCTION__);
return RBUS_ERROR_ACCESS_NOT_ALLOWED;
}
/* Is it a valid Query */
if (!_is_valid_get_query(name))
{
rtLog_Warn("%s This method is only to get Parameters", __FUNCTION__);
return RBUS_ERROR_INVALID_INPUT;
}
rtMessage_Create(&request);
/* Set the Component name that invokes the set */
rbus_AppendString(request, pCompInfo->componentName);
/* Param Size */
rbus_AppendInt32(request, (int32_t)1);
rbus_AppendString(request, name);
rtLog_Debug("Calling rbus_invokeRemoteMethod for [%s]", name);
if((err = rbus_invokeRemoteMethod(name, METHOD_GETPARAMETERVALUES, request, 6000, &response)) != RTMESSAGE_BUS_SUCCESS)
{
rtLog_Error("%s rbus_invokeRemoteMethod failed with err %d", __FUNCTION__, err);
errorcode = RBUS_ERROR_BUS_ERROR;
}
else
{
int valSize;
rbusLegacyReturn_t legacyRetCode = RBUS_LEGACY_ERR_FAILURE;
rtLog_Debug("Received response for remote method invocation!");
rbus_PopInt32(response, &ret);
rtLog_Debug("Response from the remote method is [%d]!",ret);
errorcode = (rbusError_t) ret;
legacyRetCode = (rbusLegacyReturn_t) ret;
if((errorcode == RBUS_ERROR_SUCCESS) || (legacyRetCode == RBUS_LEGACY_ERR_SUCCESS))
{
errorcode = RBUS_ERROR_SUCCESS;
rtLog_Debug("Received valid response!");
rbus_PopInt32(response, &valSize);
if(1/*valSize*/)
{
char const *buff = NULL;
//Param Name
rbus_PopString(response, &buff);
if(buff && (strcmp(name, buff) == 0))
{
rbusValue_initFromMessage(value, response);
}
else
{
rtLog_Warn("Param mismatch!");
rtLog_Warn("Requested param: [%s], Received Param: [%s]", name, buff);
}
}
}
else
{
rtLog_Warn("Response from remote method indicates the call failed!!");
errorcode = RBUS_ERROR_BUS_ERROR;
}
rtMessage_Release(response);
}
return errorcode;
}
rbusError_t _getExt_response_parser(rtMessage response, int *numValues, rbusProperty_t* retProperties)
{
rbusError_t errorcode = RBUS_ERROR_SUCCESS;
rbusLegacyReturn_t legacyRetCode = RBUS_LEGACY_ERR_FAILURE;
int numOfVals = 0;
int ret = -1;
int i = 0;
rtLog_Debug("Received response for remote method invocation!");
rbus_PopInt32(response, &ret);
rtLog_Debug("Response from the remote method is [%d]!",ret);
errorcode = (rbusError_t) ret;
legacyRetCode = (rbusLegacyReturn_t) ret;
*numValues = 0;
if((errorcode == RBUS_ERROR_SUCCESS) || (legacyRetCode == RBUS_LEGACY_ERR_SUCCESS))
{
errorcode = RBUS_ERROR_SUCCESS;
rtLog_Debug("Received valid response!");
rbus_PopInt32(response, &numOfVals);
*numValues = numOfVals;
rtLog_Debug("Number of return params = %d", numOfVals);
if(numOfVals)
{
rbusProperty_t last;
for(i = 0; i < numOfVals; i++)
{
/* For the first instance, lets use the given pointer */
if (0 == i)
{
rbusProperty_initFromMessage(retProperties, response);
last = *retProperties;
}
else
{
rbusProperty_t tmpProperties;
rbusProperty_initFromMessage(&tmpProperties, response);
rbusProperty_SetNext(last, tmpProperties);
rbusProperty_Release(tmpProperties);
last = tmpProperties;
}
}
}
}
else
{
rtLog_Error("Response from remote method indicates the call failed!!");
}
rtMessage_Release(response);
return errorcode;
}
rbusError_t rbus_getExt(rbusHandle_t handle, int paramCount, char const** pParamNames, int *numValues, rbusProperty_t* retProperties)
{
rbusError_t errorcode = RBUS_ERROR_SUCCESS;
rbus_error_t err = RTMESSAGE_BUS_SUCCESS;
int i = 0;
comp_info* pCompInfo = (comp_info*) handle;
if ((1 == paramCount) && (_is_wildcard_query(pParamNames[0])))
{
rtMessage wResponse;
int numOfEntries = 0;
const char* pEntryName = NULL;
//int length = strlen(pParamNames[0]);
err = rbus_resolveWildcardDestination(pParamNames[0], &numOfEntries, &wResponse);
if (RTMESSAGE_BUS_SUCCESS == err)
{
rtLog_Debug("Query for expression %s was successful. See result below:", pParamNames[0]);
rbusProperty_t last;
*numValues = 0;
if (0 == numOfEntries)
{
rtLog_Debug("It is possibly a table entry from single component.");
rtMessage_Release(wResponse);
}
else
{
for(i = 0; i < numOfEntries; i++)
{
int tmpNumOfValues = 0;
rtMessage request, response;
rbus_PopString(wResponse, &pEntryName);
rtLog_Debug("Destination %d is %s", i, pEntryName);
/* Get the query sent to each component identified */
rtMessage_Create(&request);
/* Set the Component name that invokes the set */
rbus_AppendString(request, pCompInfo->componentName);
rbus_AppendInt32(request, 1);
rbus_AppendString(request, pParamNames[0]);
/* Invoke the method */
if((err = rbus_invokeRemoteMethod(pEntryName, METHOD_GETPARAMETERVALUES, request, 60000, &response)) != RTMESSAGE_BUS_SUCCESS)
{
rtLog_Error("%s rbus_invokeRemoteMethod failed with err %d", __FUNCTION__, err);
errorcode = RBUS_ERROR_BUS_ERROR;
}
else
{
if (0 == i)
{
errorcode = _getExt_response_parser(response, &tmpNumOfValues, retProperties);
last = *retProperties;
}
else
{
rbusProperty_t tmpProperties;
errorcode = _getExt_response_parser(response, &tmpNumOfValues, &tmpProperties);
rbusProperty_PushBack(last, tmpProperties);
last = tmpProperties;
}
}
if (errorcode != RBUS_ERROR_SUCCESS)
{
rtLog_Warn("Failed to get the data from %s Component", pEntryName);
break;
}
else
{
*numValues += tmpNumOfValues;
}
}
rtMessage_Release(wResponse);
return errorcode;
}
}
else
{
rtLog_Debug("Query for expression %s was not successful.", pParamNames[0]);
return RBUS_ERROR_ELEMENT_DOES_NOT_EXIST;
}
}
{
rtMessage request, response;
rtMessage_Create(&request);
/* Set the Component name that invokes the set */
rbus_AppendString(request, pCompInfo->componentName);
rbus_AppendInt32(request, paramCount);
for (i = 0; i < paramCount; i++)
rbus_AppendString(request, pParamNames[i]);
/* Invoke the method */
if((err = rbus_invokeRemoteMethod(pParamNames[0], METHOD_GETPARAMETERVALUES, request, 6000, &response)) != RTMESSAGE_BUS_SUCCESS)
{
rtLog_Error("%s rbus_invokeRemoteMethod failed with err %d", __FUNCTION__, err);
errorcode = RBUS_ERROR_BUS_ERROR;
}
else
errorcode = _getExt_response_parser(response, numValues, retProperties);
}
return errorcode;
}
static rbusError_t rbus_getByType(rbusHandle_t handle, char const* paramName, void* paramVal, rbusValueType_t type)
{
rbusError_t errorcode = RBUS_ERROR_INVALID_INPUT;
if (paramVal && paramName)
{
rbusValue_t value;
errorcode = rbus_get(handle, paramName, &value);
if (errorcode == RBUS_ERROR_SUCCESS)
{
if (rbusValue_GetType(value) == type)
{
switch(type)
{
case RBUS_INT32:
*((int*)paramVal) = rbusValue_GetInt32(value);
break;
case RBUS_UINT32:
*((unsigned int*)paramVal) = rbusValue_GetUInt32(value);
break;
case RBUS_STRING:
*((char**)paramVal) = strdup(rbusValue_GetString(value,NULL));
break;
default:
rtLog_Warn("%s unexpected type param %d", __FUNCTION__, type);
break;
}
rbusValue_Release(value);
}
else
{
rtLog_Error("%s rbus_get type missmatch. expected %d. got %d", __FUNCTION__, type, rbusValue_GetType(value));
errorcode = RBUS_ERROR_BUS_ERROR;
}
}
}
return errorcode;
}
rbusError_t rbus_getInt(rbusHandle_t handle, char const* paramName, int* paramVal)
{
return rbus_getByType(handle, paramName, paramVal, RBUS_INT32);
}
rbusError_t rbus_getUint (rbusHandle_t handle, char const* paramName, unsigned int* paramVal)
{
return rbus_getByType(handle, paramName, paramVal, RBUS_UINT32);
}
rbusError_t rbus_getStr (rbusHandle_t handle, char const* paramName, char** paramVal)
{
return rbus_getByType(handle, paramName, paramVal, RBUS_STRING);
}
rbusError_t rbus_set(rbusHandle_t handle, char const* name,rbusValue_t value, rbusSetOptions_t* opts)
{
rbusError_t errorcode = RBUS_ERROR_INVALID_INPUT;
rbus_error_t err = RTMESSAGE_BUS_SUCCESS;
rtMessage setRequest, setResponse;
comp_info* pCompInfo = (comp_info*) handle;
if (value != NULL)
{
rtMessage_Create(&setRequest);
/* Set the Session ID first */
if ((opts) && (opts->sessionId != 0))
rbus_AppendInt32(setRequest, opts->sessionId);
else
rbus_AppendInt32(setRequest, 0);
/* Set the Component name that invokes the set */
rbus_AppendString(setRequest, pCompInfo->componentName);
/* Set the Size of params */
rbus_AppendInt32(setRequest, 1);
/* Set the params in details */
rbusValue_appendToMessage(name, value, setRequest);
/* Set the Commit value; FIXME: Should we use string? */
rbus_AppendString(setRequest, (!opts || opts->commit) ? "TRUE" : "FALSE");
if((err = rbus_invokeRemoteMethod(name, METHOD_SETPARAMETERVALUES, setRequest, 6000, &setResponse)) != RTMESSAGE_BUS_SUCCESS)
{
rtLog_Error("%s rbus_invokeRemoteMethod failed with err %d", __FUNCTION__, err);
errorcode = RBUS_ERROR_BUS_ERROR;
}
else
{
rbusLegacyReturn_t legacyRetCode = RBUS_LEGACY_ERR_FAILURE;
int ret = -1;
char const* pErrorReason = NULL;
rbus_PopInt32(setResponse, &ret);
rtLog_Debug("Response from the remote method is [%d]!", ret);
errorcode = (rbusError_t) ret;
legacyRetCode = (rbusLegacyReturn_t) ret;
if((errorcode == RBUS_ERROR_SUCCESS) || (legacyRetCode == RBUS_LEGACY_ERR_SUCCESS))
{
errorcode = RBUS_ERROR_SUCCESS;
rtLog_Debug("Successfully Set the Value");
}
else
{
rbus_PopString(setResponse, &pErrorReason);
rtLog_Warn("Failed to Set the Value for %s", pErrorReason);
}
/* Release the reponse message */
rtMessage_Release(setResponse);
}
}
return errorcode;
}
rbusError_t rbus_setMulti(rbusHandle_t handle, int numProps, rbusProperty_t properties, rbusSetOptions_t* opts)
{
rbusError_t errorcode = RBUS_ERROR_INVALID_INPUT;
rbus_error_t err = RTMESSAGE_BUS_SUCCESS;
rtMessage setRequest, setResponse;
comp_info* pCompInfo = (comp_info*) handle;
rbusProperty_t current;
if (numProps > 0 && properties != NULL)
{
rtMessage_Create(&setRequest);
/* Set the Session ID first */
if ((opts) && (opts->sessionId != 0))
rbus_AppendInt32(setRequest, opts->sessionId);
else
rbus_AppendInt32(setRequest, 0);
/* Set the Component name that invokes the set */
rbus_AppendString(setRequest, pCompInfo->componentName);
/* Set the Size of params */
rbus_AppendInt32(setRequest, numProps);
current = properties;
while(current)
{
rbusValue_appendToMessage(rbusProperty_GetName(current), rbusProperty_GetValue(current), setRequest);
current = rbusProperty_GetNext(current);
}
/* Set the Commit value; FIXME: Should we use string? */
rbus_AppendString(setRequest, (!opts || opts->commit) ? "TRUE" : "FALSE");
/* TODO: At this point in time, only given Table/Component can be updated with SET/GET..
* So, passing the elementname as first arg is not a issue for now..
* We must enhance the rbus in such a way that we shd be able to set across components. Lets revist this area at that time.
*/
#if 0
/* TODO: First step towards the above comment. When we enhace to support acorss components, this following has to be looped or appropriate count will be passed */
char const* pElementNames[] = {values[0].name, NULL};
char** pComponentName = NULL;
err = rbus_findMatchingObjects(pElementNames, 1, &pComponentName);
if (err != RTMESSAGE_BUS_SUCCESS)
{
rtLog_Info ("Element not found");
errorcode = RBUS_ERROR_ELEMENT_DOES_NOT_EXIST;
}
else
{
rtLog_Info ("Component name is, %s", pComponentName[0]);
free (pComponentName[0]);
}
#endif
if((err = rbus_invokeRemoteMethod(rbusProperty_GetName(properties), METHOD_SETPARAMETERVALUES, setRequest, 6000, &setResponse)) != RTMESSAGE_BUS_SUCCESS)
{
rtLog_Error("%s rbus_invokeRemoteMethod failed with err %d", __FUNCTION__, err);
errorcode = RBUS_ERROR_BUS_ERROR;
}
else
{
char const* pErrorReason = NULL;
rbusLegacyReturn_t legacyRetCode = RBUS_LEGACY_ERR_FAILURE;
int ret = -1;
rbus_PopInt32(setResponse, &ret);
rtLog_Debug("Response from the remote method is [%d]!", ret);
errorcode = (rbusError_t) ret;
legacyRetCode = (rbusLegacyReturn_t) ret;
if((errorcode == RBUS_ERROR_SUCCESS) || (legacyRetCode == RBUS_LEGACY_ERR_SUCCESS))
{
errorcode = RBUS_ERROR_SUCCESS;
rtLog_Debug("Successfully Set the Value");
}
else
{
rbus_PopString(setResponse, &pErrorReason);
rtLog_Warn("Failed to Set the Value for %s", pErrorReason);
}
/* Release the reponse message */
rtMessage_Release(setResponse);
}
}
return errorcode;
}
#if 0
rbusError_t rbus_setMulti(rbusHandle_t handle, int numValues,
char const** valueNames, rbusValue_t* values, rbusSetOptions_t* opts)
{
rbusError_t errorcode = RBUS_ERROR_INVALID_INPUT;
rbus_error_t err = RTMESSAGE_BUS_SUCCESS;
rtMessage setRequest, setResponse;
int loopCnt = 0;
comp_info* pCompInfo = (comp_info*) handle;
if (values != NULL)
{
rtMessage_Create(&setRequest);
/* Set the Session ID first */
rbus_AppendInt32(setRequest, 0);
/* Set the Component name that invokes the set */
rbus_AppendString(setRequest, pCompInfo->componentName);
/* Set the Size of params */
rbus_AppendInt32(setRequest, numValues);
/* Set the params in details */
for (loopCnt = 0; loopCnt < numValues; loopCnt++)
{
rbusValue_appendToMessage(valueNames[loopCnt], values[loopCnt], setRequest);
}
/* Set the Commit value; FIXME: Should we use string? */
rbus_AppendString(setRequest, (!opts || opts->commit) ? "TRUE" : "FALSE");
/* TODO: At this point in time, only given Table/Component can be updated with SET/GET..
* So, passing the elementname as first arg is not a issue for now..
* We must enhance the rbus in such a way that we shd be able to set across components. Lets revist this area at that time.
*/
#if 0
/* TODO: First step towards the above comment. When we enhace to support acorss components, this following has to be looped or appropriate count will be passed */
char const* pElementNames[] = {values[0].name, NULL};
char** pComponentName = NULL;
err = rbus_findMatchingObjects(pElementNames, 1, &pComponentName);
if (err != RTMESSAGE_BUS_SUCCESS)
{
rtLog_Info ("Element not found");
errorcode = RBUS_ERROR_ELEMENT_DOES_NOT_EXIST;
}
else
{
rtLog_Info ("Component name is, %s", pComponentName[0]);
free (pComponentName[0]);
}
#endif
if((err = rbus_invokeRemoteMethod(valueNames[0], METHOD_SETPARAMETERVALUES, setRequest, 6000, &setResponse)) != RTMESSAGE_BUS_SUCCESS)
{
rtLog_Info("%s rbus_invokeRemoteMethod failed with err %d", __FUNCTION__, err);
errorcode = RBUS_ERROR_BUS_ERROR;
}
else
{
char const* pErrorReason = NULL;
rbusLegacyReturn_t legacyRetCode = RBUS_LEGACY_ERR_FAILURE;
int ret = -1;
rbus_PopInt32(setResponse, &ret);
rtLog_Info("Response from the remote method is [%d]!", ret);
errorcode = (rbusError_t) ret;
legacyRetCode = (rbusLegacyReturn_t) ret;
if((errorcode == RBUS_ERROR_SUCCESS) || (legacyRetCode == RBUS_LEGACY_ERR_SUCCESS))
{
errorcode = RBUS_ERROR_SUCCESS;
rtLog_Info("Successfully Set the Value");
}
else
{
rbus_PopString(setResponse, &pErrorReason);
rtLog_Info("Failed to Set the Value for %s", pErrorReason);
}
/* Release the reponse message */
rtMessage_Release(setResponse);
}
}
return errorcode;
}
#endif
rbusError_t rbus_setByType(rbusHandle_t handle, char const* paramName, void const* paramVal, rbusValueType_t type)
{
rbusError_t errorcode = RBUS_ERROR_INVALID_INPUT;
if (paramName != NULL)
{
rbusValue_t value;
rbusValue_Init(&value);
switch(type)
{
case RBUS_INT32:
rbusValue_SetInt32(value, *((int*)paramVal));
break;
case RBUS_UINT32:
rbusValue_SetUInt32(value, *((unsigned int*)paramVal));
break;
case RBUS_STRING:
rbusValue_SetString(value, (char*)paramVal);
break;
default:
rtLog_Warn("%s unexpected type param %d", __FUNCTION__, type);
break;
}
errorcode = rbus_set(handle, paramName, value, NULL);
rbusValue_Release(value);
}
return errorcode;
}
rbusError_t rbus_setInt(rbusHandle_t handle, char const* paramName, int paramVal)
{
return rbus_setByType(handle, paramName, ¶mVal, RBUS_INT32);
}
rbusError_t rbus_setUint(rbusHandle_t handle, char const* paramName, unsigned int paramVal)
{
return rbus_setByType(handle, paramName, ¶mVal, RBUS_UINT32);
}
rbusError_t rbus_setStr(rbusHandle_t handle, char const* paramName, char const* paramVal)
{
return rbus_setByType(handle, paramName, paramVal, RBUS_STRING);
}
rbusError_t rbusTable_addRow(
rbusHandle_t handle,
char const* tableName,
char const* aliasName,
uint32_t* instNum)
{
(void)handle;
rbus_error_t err;
int returnCode = 0;
int32_t instanceId = 0;
rtMessage request, response;
rtLog_Info("%s: %s %s", __FUNCTION__, tableName, aliasName);
rtMessage_Create(&request);
rbus_AppendInt32(request, 0);/*TODO: this should be the session ID*/
rbus_AppendString(request, tableName);/*TODO: do we need to append the name as well as pass the name as the 1st arg to rbus_invokeRemoteMethod ?*/
if(aliasName)
rbus_AppendString(request, aliasName);
else
rbus_AppendString(request, "");
if((err = rbus_invokeRemoteMethod(
tableName, /*as taken from ccsp_base_api.c, this was the destination component ID, but to locate the route, the table name can be used
because the broker simlpy looks at the top level nodes that are owned by a component route. maybe this breaks if the broker changes*/
METHOD_ADDTBLROW,
request,
6000,
&response)) != RTMESSAGE_BUS_SUCCESS)
{
rtLog_Info("%s rbus_invokeRemoteMethod failed with err %d", __FUNCTION__, err);
return RBUS_ERROR_BUS_ERROR;
}
rbus_PopInt32(response, &returnCode);
rbus_PopInt32(response, &instanceId);
rtMessage_Release(response);
if(instNum)
*instNum = (uint32_t)instanceId;/*FIXME we need an rbus_PopUInt32 to avoid loosing a bit */
rtLog_Info("%s rbus_invokeRemoteMethod success response returnCode:%d instanceId:%d", __FUNCTION__, returnCode, instanceId);
return returnCode;
}
rbusError_t rbusTable_removeRow(
rbusHandle_t handle,
char const* rowName)
{
(void)handle;
rbus_error_t err;
int returnCode = 0;
rtMessage request, response;
rtLog_Info("%s: %s", __FUNCTION__, rowName);
rtMessage_Create(&request);
rbus_AppendInt32(request, 0);/*TODO: this should be the session ID*/
rbus_AppendString(request, rowName);/*TODO: do we need to append the name as well as pass the name as the 1st arg to rbus_invokeRemoteMethod ?*/
if((err = rbus_invokeRemoteMethod(
rowName,
METHOD_DELETETBLROW,
request,
6000,
&response)) != RTMESSAGE_BUS_SUCCESS)
{
rtLog_Info("%s rbus_invokeRemoteMethod failed with err %d", __FUNCTION__, err);
return RBUS_ERROR_BUS_ERROR;
}
rbus_PopInt32(response, &returnCode); //TODO: should we handle this ?
rtMessage_Release(response);
rtLog_Info("%s rbus_invokeRemoteMethod success response returnCode:%d", __FUNCTION__, returnCode);
return returnCode;
}
//************************** Events ****************************//
rbusError_t rbusEvent_Subscribe(
rbusHandle_t handle,
char const* eventName,
rbusEventHandler_t handler,
void* userData)
{
comp_info* ci = (comp_info*)handle;
rbus_error_t err;
rbusEventSubscription_t* sub;
rtLog_Debug("%s: %s", __FUNCTION__, eventName);
sub = malloc(sizeof(rbusEventSubscription_t));
sub->handle = handle;
sub->handler = handler;
sub->userData = userData;
sub->eventName = strdup(eventName);
sub->duration = 0;
sub->interval = 0;
sub->filter = NULL;
err = rbus_subscribeToEvent(NULL, eventName, _event_callback_handler, NULL, sub);
if(err == RTMESSAGE_BUS_SUCCESS)
{
rtVector_PushBack(ci->eventSubs, sub);
}
else
{
rbusEventSubscription_free(sub);
rtLog_Error("rbusEvent_Subscribe failed err=%d", err);
}
return err == RTMESSAGE_BUS_SUCCESS ? RBUS_ERROR_SUCCESS: RBUS_ERROR_BUS_ERROR;
}
rbusError_t rbusEvent_Unsubscribe(
rbusHandle_t handle,
char const* eventName)
{
rbusError_t errorcode = RBUS_ERROR_SUCCESS;
comp_info* ci = (comp_info*)handle;
rbusEventSubscription_t* sub;
rtLog_Debug("%s: %s", __FUNCTION__, eventName);
rbus_unsubscribeFromEvent(NULL, eventName);
/*the use of rtVector is inefficient here. I have to loop through the vector to find the sub by name,
then call RemoveItem, which loops through again to find the item by address to destroy */
sub = rbusEventSubscription_find(ci->eventSubs, eventName);
if(sub)
{
rtVector_RemoveItem(ci->eventSubs, sub, rbusEventSubscription_free);
}
else
{
rtLog_Error("rbusEvent_Unsubscribe unexpected -- we should have found a sub but didn't");
}
return errorcode;
}
rbusError_t rbusEvent_SubscribeEx(
rbusHandle_t handle,
rbusEventSubscription_t* subscription,
int numSubscriptions)
{
comp_info* ci = (comp_info*)handle;
rbus_error_t err;
rbusEventSubscription_t* sub;
int i, j;
for(i = 0; i < numSubscriptions; ++i)
{
rtMessage payload = NULL;
rtLog_Info("%s: %s", __FUNCTION__, subscription[i].eventName);
sub = malloc(sizeof(rbusEventSubscription_t));
sub->handle = handle;
sub->handler = subscription[i].handler;
sub->userData = subscription[i].userData;
sub->eventName = strdup(subscription[i].eventName);
sub->duration = subscription[i].duration;
sub->interval = subscription[i].interval;
if(subscription[i].filter || sub->interval || sub->duration)
{
rtMessage_Create(&payload);
rbus_AppendInt32(payload, sub->interval);
rbus_AppendInt32(payload, sub->duration);
if(subscription[i].filter)
{
sub->filter = subscription[i].filter;
rbusFilter_Retain(sub->filter);
rbus_AppendInt32(payload, 1);
rbusFilter_AppendToMessage(sub->filter, payload);
}
else
{
rbus_AppendInt32(payload, 0);
}
}
else
{
sub->filter = NULL;
}
err = rbus_subscribeToEvent(NULL, sub->eventName, _event_callback_handler, payload, sub);
if(payload)
{
rtMessage_Release(payload);
}
if(err == RTMESSAGE_BUS_SUCCESS)
{
rtVector_PushBack(ci->eventSubs, sub);
}
else
{
rtLog_Warn("rbusEvent_SubscribeEx failed err=%d", err);
rbusEventSubscription_free(sub);
/* So here I'm thinking its best to treat SubscribeEx like a transaction because
if any subs fails, how will the user know which ones succeeded and which failed ?
So, as a transaction, we just undo everything, which are all those from 0 to i-1.
*/
for(j = 0; j < i; ++j)
{
rbusEvent_Unsubscribe(handle, subscription[i].eventName);
}
return RBUS_ERROR_BUS_ERROR;
}
}
return RBUS_ERROR_SUCCESS;
}
rbusError_t rbusEvent_UnsubscribeEx(
rbusHandle_t handle,
rbusEventSubscription_t* subscription,
int numSubscriptions)
{
rbusError_t errorcode = RBUS_ERROR_SUCCESS;
comp_info* ci = (comp_info*)handle;
rbusEventSubscription_t* our_copy;
int i;
for(i = 0; i < numSubscriptions; ++i)
{
rtLog_Info("%s: %s", __FUNCTION__, subscription[i].eventName);
rbus_unsubscribeFromEvent(NULL, subscription[i].eventName);
/*the use of rtVector is inefficient here. I have to loop through the vector to find the sub by name,
then call RemoveItem, which loops through again to find the item by address to destroy */
our_copy = rbusEventSubscription_find(ci->eventSubs, subscription[i].eventName);
if(our_copy)
{
rtVector_RemoveItem(ci->eventSubs, our_copy, rbusEventSubscription_free);
}
else
{
rtLog_Warn("rbusEvent_Unsubscribe unexpected -- we should have found a sub but didn't");
}
}
return errorcode;
}
rbusError_t rbusEvent_Publish(
rbusHandle_t handle,
rbusEvent_t* eventData)
{
comp_info* ci = (comp_info*)handle;
rbus_error_t err, errOut = RTMESSAGE_BUS_SUCCESS;
rtListItem listItem;
rbusSubscription_t* subscription;
rtLog_Info("%s: %s", __FUNCTION__, eventData->name);
/*get the node and walk its subscriber list,
publishing event to each subscriber*/
elementNode* el = retrieveInstanceElement(ci->elementRoot, eventData->name);
if(!el)
{
rtLog_Warn("rbusEvent_Publish failed: retrieveElement return NULL for %s", eventData->name);
return RBUS_ERROR_ELEMENT_DOES_NOT_EXIST;
}
if(!el->subscriptions)/*nobody subscribed yet*/
{
return RTMESSAGE_BUS_SUCCESS;
}
rtList_GetFront(el->subscriptions, &listItem);
while(listItem)
{
bool publish = true;
rtListItem_GetData(listItem, (void**)&subscription);
if(!subscription || !subscription->eventName || !subscription->listener)
{
rtLog_Info("rbusEvent_Publish failed: null subscriber data");
if(errOut == RTMESSAGE_BUS_SUCCESS)
errOut = RBUS_ERROR_BUS_ERROR;
rtListItem_GetNext(listItem, &listItem);
}
/* Commented out the following experiment. Leaving it the comment for now.
The idea here was to only publish to the subscriber who either didn't have a filter,
or had a filter that was triggered. So subscribers who had a filter that wasn't
triggered, would not get an event. This would allow multiple consumers to
subscribe to the same property but with different filters, and those consumers
would only get events when their specific filter was triggered.
So currently, without this code, if one consumer's filter is triggered, all
consumer's subscribed to this same property will get the event.
*/
#if 0
/* apply filter for value change events */
if(eventData->type == RBUS_EVENT_VALUE_CHANGED)
{
/*it is a code bug to call value change for non-properties*/
assert(el->type == RBUS_ELEMENT_TYPE_PROPERTY);
/* if autoPublish then rbus_valuechange should be the only one calling us*/
if(subscription->autoPublish)
{
/* if the subscriber has a filter we check the filter to determine if we publish.
if the subscriber does not have a filter, we publish always*/
if(subscription->filter)
{
/*We publish an event only when the value crosses the filter threshold boundary.
When the value crosses into the threshold we publish a single event signally the filter started matching.
When the value crosses out of the threshold we publish a single event signally the filter stopped matching.
We do not publish continuous events while the filter continues to match. The consumer can read the 'filter'
property from the event data to determine if the filter has started or stopped matching. */
rbusValue_t valNew, valOld, valFilter;
int cNew, cOld;
int result = -1;/*-1 means don't publish*/
valNew = rbusObject_GetValue(eventData->data, "value");
valOld = rbusObject_GetValue(eventData->data, "oldValue");
valFilter = subscription->filter->filter.threshold.value;
cNew = rbusValue_Compare(valNew, valFilter);
cOld = rbusValue_Compare(valOld, valFilter);
switch(subscription->filter->filter.threshold.type)
{
case RBUS_THRESHOLD_ON_CHANGE_GREATER_THAN:
if(cNew > 0 && cOld <= 0)
result = 1;
else if(cNew <= 0 && cOld > 0)
result = 0;
break;
case RBUS_THRESHOLD_ON_CHANGE_GREATER_THAN_OR_EQUAL:
if(cNew >= 0 && cOld < 0)
result = 1;
else if(cNew < 0 && cOld >= 0)
result = 0;
break;
case RBUS_THRESHOLD_ON_CHANGE_LESS_THAN:
if(cNew < 0 && cOld >= 0)
result = 1;
else if(cNew >= 0 && cOld < 0)
result = 0;
break;
case RBUS_THRESHOLD_ON_CHANGE_LESS_THAN_OR_EQUAL:
if(cNew <= 0 && cOld > 0)
result = 1;
else if(cNew > 0 && cOld <= 0)
result = 0;
break;
case RBUS_THRESHOLD_ON_CHANGE_EQUAL:
if(cNew == 0 && cOld != 0)
result = 1;
else if(cNew != 0 && cOld == 0)
result = 0;
break;
case RBUS_THRESHOLD_ON_CHANGE_NOT_EQUAL:
if(cNew != 0 && cOld == 0)
result = 1;
else if(cNew == 0 && cOld != 0)
result = 0;
break;
default:
break;
}
if(result != -1)
{
/*set 'filter' to true/false implying that either the filter has started or stopped matching*/
rbusValue_t val;
rbusValue_Init(&val);
rbusValue_SetBoolean(val, result);
rbusObject_SetValue(eventData->data, "filter", val);
rbusValue_Release(val);
}
else
{
publish = false;
}
}
}
else
{
/* If autoPublish is false then a provider should be the only one calling us.
Its expected that the provider will apply any filter so if the provider has
set the filter then we publish to the subscriber owning that filter.
If the provider set the filter NULL then we publish to all subscribers. */
if(eventData->filter && eventData->filter != subscription->filter)
{
publish = false;
}
}
}
#endif
if(publish)
{
rtMessage msg;
rtMessage_Create(&msg);
rbusEvent_appendToMessage(eventData, msg);
rtLog_Info("rbusEvent_Publish: publising event %s", subscription->eventName);
err = rbus_publishSubscriberEvent(
ci->componentName,
subscription->eventName/*use the same eventName the consumer subscribed with; not event instance name eventData->name*/,
subscription->listener,
msg);
rtMessage_Release(msg);
if(err != RTMESSAGE_BUS_SUCCESS)
{
if(errOut == RTMESSAGE_BUS_SUCCESS)
errOut = err;
rtLog_Info("rbusEvent_Publish faild: rbus_publishSubscriberEvent return error %d", err);
}
}
rtListItem_GetNext(listItem, &listItem);
}
return errOut == RTMESSAGE_BUS_SUCCESS ? RBUS_ERROR_SUCCESS: RBUS_ERROR_BUS_ERROR;
}
rbusError_t rbusMethod_InvokeInternal(
rbusHandle_t handle,
char const* methodName,
rbusObject_t inParams,
rbusObject_t* outParams,
int timeout)
{
(void)handle;
rbus_error_t err;
int returnCode = 0;
rtMessage request, response;
rtLog_Info("%s: %s", __FUNCTION__, methodName);
rtMessage_Create(&request);
rbus_AppendInt32(request, 0);/*TODO: this should be the session ID*/
rbus_AppendString(request, methodName); /*TODO: do we need to append the name as well as pass the name as the 1st arg to rbus_invokeRemoteMethod ?*/
rbusObject_appendToMessage(inParams, request);
if((err = rbus_invokeRemoteMethod(
methodName,
METHOD_RPC,
request,
timeout,
&response)) != RTMESSAGE_BUS_SUCCESS)
{
rtLog_Info("%s rbus_invokeRemoteMethod failed with err %d", __FUNCTION__, err);
if(err == RTMESSAGE_BUS_ERROR_REMOTE_TIMED_OUT)
return RBUS_ERROR_TIMEOUT;
else
return RBUS_ERROR_BUS_ERROR;
}
rbus_PopInt32(response, &returnCode);
if(returnCode == RBUS_ERROR_SUCCESS)
{
rbusObject_initFromMessage(outParams, response);
}
rtMessage_Release(response);
rtLog_Info("%s rbus_invokeRemoteMethod success response returnCode:%d", __FUNCTION__, returnCode);
return returnCode;
}
rbusError_t rbusMethod_Invoke(
rbusHandle_t handle,
char const* methodName,
rbusObject_t inParams,
rbusObject_t* outParams)
{
return rbusMethod_InvokeInternal(handle, methodName, inParams, outParams, 6000);
}
typedef struct _rbusMethodInvokeAsyncData_t
{
rbusHandle_t handle;
char* methodName;
rbusObject_t inParams;
rbusMethodAsyncRespHandler_t callback;
int timeout;
} rbusMethodInvokeAsyncData_t;
static void* rbusMethod_InvokeAsyncThreadFunc(void *p)
{
rbusError_t err;
rbusMethodInvokeAsyncData_t* data = p;
rbusObject_t outParams = NULL;
err = rbusMethod_InvokeInternal(
data->handle,
data->methodName,
data->inParams,
&outParams,
data->timeout);
data->callback(data->handle, data->methodName, err, outParams);
rbusObject_Release(data->inParams);
if(outParams)
rbusObject_Release(outParams);
free(data->methodName);
free(data);
return NULL;
}
rbusError_t rbusMethod_InvokeAsync(
rbusHandle_t handle,
char const* methodName,
rbusObject_t inParams,
rbusMethodAsyncRespHandler_t callback,
int timeout)
{
pthread_t pid;
rbusMethodInvokeAsyncData_t* data;
int err = 0;
rbusObject_Retain(inParams);
data = malloc(sizeof(rbusMethodInvokeAsyncData_t));
data->handle = handle;
data->methodName = strdup(methodName);
data->inParams = inParams;
data->callback = callback;
data->timeout = timeout > 0 ? timeout : 6000;
if((err = pthread_create(&pid, NULL, rbusMethod_InvokeAsyncThreadFunc, data)) != 0)
{
rtLog_Error("%s pthread_create failed: err=%d", __FUNCTION__, err);
return RBUS_ERROR_BUS_ERROR;
}
if((err = pthread_detach(pid)) != 0)
{
rtLog_Error("%s pthread_detach failed: err=%d", __FUNCTION__, err);
}
return RBUS_ERROR_SUCCESS;
}
rbusError_t rbusMethod_SendAsyncResponse(
rbusMethodAsyncHandle_t asyncHandle,
rbusError_t error,
rbusObject_t outParams)
{
rtMessage response;
rtMessage_Create(&response);
rbus_AppendInt32(response, error);
if(outParams)
rbusObject_appendToMessage(outParams, response);
rbus_sendResponse(&asyncHandle->hdr, response);
free(asyncHandle);
return RBUS_ERROR_SUCCESS;
}
rbusError_t rbus_createSession(rbusHandle_t handle, uint32_t *pSessionId)
{
(void)handle;
rbusError_t rc = RBUS_ERROR_SUCCESS;
rbus_error_t err = RTMESSAGE_BUS_SUCCESS;
rtMessage response;
if (pSessionId)
{
*pSessionId = 0;
if((err = rbus_invokeRemoteMethod(RBUS_SMGR_DESTINATION_NAME, RBUS_SMGR_METHOD_REQUEST_SESSION_ID, NULL, 6000, &response)) == RTMESSAGE_BUS_SUCCESS)
{
rbus_GetInt32(response, MESSAGE_FIELD_RESULT, (int*) &err);
if(RTMESSAGE_BUS_SUCCESS != err)
{
rtLog_Error("Session manager reports internal error %d.", err);
rc = RBUS_ERROR_SESSION_ALREADY_EXIST;
}
else
{
rbus_GetInt32(response, MESSAGE_FIELD_PAYLOAD, (int*) pSessionId);
rtLog_Info("Received new session id %u", *pSessionId);
}
}
else
{
rtLog_Error("Failed to communicated with session manager.");
rc = RBUS_ERROR_BUS_ERROR;
}
}
else
{
rtLog_Warn("Invalid Input passed..");
rc = RBUS_ERROR_INVALID_INPUT;
}
return rc;
}
rbusError_t rbus_getCurrentSession(rbusHandle_t handle, uint32_t *pSessionId)
{
(void)handle;
rbusError_t rc = RBUS_ERROR_SUCCESS;
rbus_error_t err = RTMESSAGE_BUS_SUCCESS;
rtMessage response;
if (pSessionId)
{
*pSessionId = 0;
if((err = rbus_invokeRemoteMethod(RBUS_SMGR_DESTINATION_NAME, RBUS_SMGR_METHOD_GET_CURRENT_SESSION_ID, NULL, 6000, &response)) == RTMESSAGE_BUS_SUCCESS)
{
rbus_GetInt32(response, MESSAGE_FIELD_RESULT, (int*) &err);
if(RTMESSAGE_BUS_SUCCESS != err)
{
rtLog_Error("Session manager reports internal error %d.", err);
rc = RBUS_ERROR_SESSION_ALREADY_EXIST;
}
else
{
rbus_GetInt32(response, MESSAGE_FIELD_PAYLOAD, (int*) pSessionId);
rtLog_Info("Received new session id %u", *pSessionId);
}
}
else
{
rtLog_Error("Failed to communicated with session manager.");
rc = RBUS_ERROR_BUS_ERROR;
}
}
else
{
rtLog_Warn("Invalid Input passed..");
rc = RBUS_ERROR_INVALID_INPUT;
}
return rc;
}
rbusError_t rbus_closeSession(rbusHandle_t handle, uint32_t sessionId)
{
(void)handle;
rbusError_t rc = RBUS_ERROR_SUCCESS;
rbus_error_t err = RTMESSAGE_BUS_SUCCESS;
if (0 != sessionId)
{
rtMessage inputSession;
rtMessage response;
rtMessage_Create(&inputSession);
rbus_SetInt32(inputSession, MESSAGE_FIELD_PAYLOAD, sessionId);
if((err = rbus_invokeRemoteMethod(RBUS_SMGR_DESTINATION_NAME, RBUS_SMGR_METHOD_END_SESSION, inputSession, 6000, &response)) == RTMESSAGE_BUS_SUCCESS)
{
rbus_GetInt32(response, MESSAGE_FIELD_RESULT, (int*) &err);
if(RTMESSAGE_BUS_SUCCESS != err)
{
rtLog_Error("Session manager reports internal error %d.", err);
rc = RBUS_ERROR_SESSION_ALREADY_EXIST;
}
else
rtLog_Info("Successfully ended session %u.", sessionId);
}
else
{
rtLog_Error("Failed to communicated with session manager.");
rc = RBUS_ERROR_BUS_ERROR;
}
}
else
{
rtLog_Warn("Invalid Input passed..");
rc = RBUS_ERROR_INVALID_INPUT;
}
return rc;
}
rbusStatus_t rbus_checkStatus(void)
{
rtLog_SetLevel(RT_LOG_WARN);
rbuscore_bus_status_t busStatus = rbuscore_checkBusStatus();
return (rbusStatus_t) busStatus;
}
rbusError_t rbus_registerLogHandler(rbusLogHandler logHandler)
{
if (logHandler)
{
rtLogSetLogHandler ((rtLogHandler) logHandler);
return RBUS_ERROR_SUCCESS;
}
else
{
rtLog_Warn("Invalid Input passed..");
return RBUS_ERROR_INVALID_INPUT;
}
}
| 34.18948 | 205 | 0.58081 | [
"object",
"vector",
"model"
] |
267f77ea1c3d8bffedf51833c7600dc244d683c8 | 2,314 | h | C | include/rumba_autocar/csv_input.h | tamago117/rumba_autocar | 53c9ea56cdfc5fe8074652611938bd315693c0fc | [
"Apache-2.0"
] | 8 | 2021-07-03T22:08:57.000Z | 2021-09-26T06:44:18.000Z | include/rumba_autocar/csv_input.h | tamago117/rumba_autocar | 53c9ea56cdfc5fe8074652611938bd315693c0fc | [
"Apache-2.0"
] | null | null | null | include/rumba_autocar/csv_input.h | tamago117/rumba_autocar | 53c9ea56cdfc5fe8074652611938bd315693c0fc | [
"Apache-2.0"
] | 1 | 2021-09-10T07:41:32.000Z | 2021-09-10T07:41:32.000Z | /**
* @file csv_input.h
* @brief input csv header file
* @author Michikuni Eguchi
* @date 2021.7.28
* @details
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <vector>
namespace csv{
class csv_input
{
public:
csv_input(const std::string filePath);
double readCSV(int line, int col);
int lineNum();
int colNum();
private:
std::string line;
std::vector<std::vector<std::string>> strcon;
std::vector<std::string> split(const std::string& s, const std::string separator);
};
std::vector<std::string> csv_input::split(const std::string& s, const std::string separator) {
bool ignore_empty = 0;
bool split_empty = 0;
struct {
auto len(const std::string& s) { return s.length(); }
auto len(const std::string::value_type* p) { return p ? std::char_traits<std::string::value_type>::length(p) : 0; }
auto len(const std::string::value_type c) { return c == std::string::value_type() ? 0 : 1; /*return 1;*/ }
} util;
if (s.empty()) { /// empty string ///
if (!split_empty || util.len(separator)) return {""};
return {};
}
auto v = std::vector<std::string>();
auto n = static_cast<std::string::size_type>(util.len(separator));
if (n == 0) { /// empty separator ///
if (!split_empty) return {s};
for (auto&& c : s) v.emplace_back(1, c);
return v;
}
auto p = std::string::size_type(0);
while (1) { /// split with separator ///
auto pos = s.find(separator, p);
if (pos == std::string::npos) {
if (ignore_empty && p - n + 1 == s.size()) break;
v.emplace_back(s.begin() + p, s.end());
break;
}
if (!ignore_empty || p != pos)
v.emplace_back(s.begin() + p, s.begin() + pos);
p = pos + n;
}
return v;
}
csv_input::csv_input(const std::string filePath)
{
int i=0;
std::ifstream ifs(filePath);
// 開かなかったらエラー
if (!ifs){
std::cout << "Error! File can not be opened"<<std::endl;
}
while (getline(ifs, line)) {
strcon.push_back(split(line, ","));
i++;
}
}
double csv_input::readCSV(int line, int col)
{
return std::stod(strcon[line][col]);
}
int csv_input::lineNum()
{
return strcon.size();
}
int csv_input::colNum()
{
return strcon[0].size();
}
}//namespace csv
| 23.14 | 119 | 0.593345 | [
"vector"
] |
267f8087a915ecadb0128e77410ac1e895ed24c0 | 2,395 | h | C | libs/kinematics/include/mrpt/kinematics/CVehicleVelCmd_DiffDriven.h | feroze/mrpt-shivang | 95bf524c5e10ed2e622bd199f1b0597951b45370 | [
"BSD-3-Clause"
] | 2 | 2017-03-25T18:09:17.000Z | 2017-05-22T08:14:48.000Z | libs/kinematics/include/mrpt/kinematics/CVehicleVelCmd_DiffDriven.h | shivangag/mrpt | 95bf524c5e10ed2e622bd199f1b0597951b45370 | [
"BSD-3-Clause"
] | null | null | null | libs/kinematics/include/mrpt/kinematics/CVehicleVelCmd_DiffDriven.h | shivangag/mrpt | 95bf524c5e10ed2e622bd199f1b0597951b45370 | [
"BSD-3-Clause"
] | 1 | 2017-06-30T18:23:45.000Z | 2017-06-30T18:23:45.000Z | /* +---------------------------------------------------------------------------+
| Mobile Robot Programming Toolkit (MRPT) |
| http://www.mrpt.org/ |
| |
| Copyright (c) 2005-2017, Individual contributors, see AUTHORS file |
| See: http://www.mrpt.org/Authors - All rights reserved. |
| Released under BSD License. See details in http://www.mrpt.org/License |
+---------------------------------------------------------------------------+ */
#pragma once
#include <mrpt/kinematics/CVehicleVelCmd.h>
namespace mrpt
{
namespace kinematics
{
DEFINE_SERIALIZABLE_PRE_CUSTOM_BASE_LINKAGE(CVehicleVelCmd_DiffDriven, CVehicleVelCmd, KINEMATICS_IMPEXP)
/** Kinematic model for Ackermann-like or differential-driven vehicles.
*
* \ingroup mrpt_kinematics_grp
*/
class KINEMATICS_IMPEXP CVehicleVelCmd_DiffDriven : public CVehicleVelCmd
{
DEFINE_SERIALIZABLE(CVehicleVelCmd_DiffDriven)
public:
double lin_vel; //!< Linear velocity (m/s)
double ang_vel; //!< Angular velocity (rad/s)
CVehicleVelCmd_DiffDriven();
virtual ~CVehicleVelCmd_DiffDriven();
size_t getVelCmdLength() const MRPT_OVERRIDE;
std::string getVelCmdDescription(const int index) const MRPT_OVERRIDE;
double getVelCmdElement(const int index) const MRPT_OVERRIDE;
void setVelCmdElement(const int index, const double val) MRPT_OVERRIDE;
bool isStopCmd() const MRPT_OVERRIDE;
void setToStop() MRPT_OVERRIDE;
/** See docs of method in base class. The implementation for differential-driven robots of this method
* just multiplies all the components of vel_cmd times vel_scale, which is appropriate
* for differential-driven kinematic models (v,w).
*/
void cmdVel_scale(double vel_scale) MRPT_OVERRIDE;
/** See base class docs.
* Tecognizes these parameters: `robotMax_V_mps`, `robotMax_W_degps` */
double cmdVel_limits(const mrpt::kinematics::CVehicleVelCmd &prev_vel_cmd, const double beta, const TVelCmdParams ¶ms) MRPT_OVERRIDE;
private:
double filter_max_vw(double &v, double &w, const TVelCmdParams &p);
};
DEFINE_SERIALIZABLE_POST_CUSTOM_BASE_LINKAGE(CVehicleVelCmd_DiffDriven, CVehicleVelCmd, KINEMATICS_IMPEXP)
} // End of namespace
} // End of namespace
| 42.767857 | 140 | 0.656367 | [
"model"
] |
269732f8e53c973dccb89570b649b3bcf150e637 | 5,350 | h | C | samples/sample_fei/include/auxiliary_interfaces.h | strassek-snapshots/MediaSDK | 1d419006b9bb10d93c1ade7bd6643c6d73e63efc | [
"MIT"
] | null | null | null | samples/sample_fei/include/auxiliary_interfaces.h | strassek-snapshots/MediaSDK | 1d419006b9bb10d93c1ade7bd6643c6d73e63efc | [
"MIT"
] | null | null | null | samples/sample_fei/include/auxiliary_interfaces.h | strassek-snapshots/MediaSDK | 1d419006b9bb10d93c1ade7bd6643c6d73e63efc | [
"MIT"
] | 1 | 2018-01-04T06:53:11.000Z | 2018-01-04T06:53:11.000Z | /******************************************************************************\
Copyright (c) 2005-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
This sample was distributed or derived from the Intel's Media Samples package.
The original version of this sample may be obtained from https://software.intel.com/en-us/intel-media-server-studio
or https://software.intel.com/en-us/media-client-solutions-support.
\**********************************************************************************/
#ifndef __SAMPLE_FEI__AUXILIARY_INTERFACES_H__
#define __SAMPLE_FEI__AUXILIARY_INTERFACES_H__
#include "encoding_task.h"
#ifndef MFX_VERSION
#error MFX_VERSION not defined
#endif
class MFX_VppInterface
{
private:
MFX_VppInterface(const MFX_VppInterface& other_vpp); // forbidden
MFX_VppInterface& operator= (const MFX_VppInterface& other_vpp); // forbidden
public:
MFXVideoSession* m_pmfxSession;
MFXVideoVPP* m_pmfxVPP;
mfxU32 m_allocId;
mfxVideoParam m_videoParams;
AppConfig* m_pAppConfig;
mfxSyncPoint m_SyncPoint;
std::vector<mfxExtBuffer*> m_InitExtParams;
MFX_VppInterface(MFXVideoSession* session, mfxU32 allocId, AppConfig* config);
~MFX_VppInterface();
mfxStatus Init();
mfxStatus Close();
mfxStatus Reset(mfxU16 width = 0, mfxU16 height = 0, mfxU16 crop_w = 0, mfxU16 crop_h = 0);
mfxVideoParam* GetCommonVideoParams();
mfxStatus QueryIOSurf(mfxFrameAllocRequest* request);
mfxStatus FillParameters();
mfxStatus VPPoneFrame(mfxFrameSurface1* pSurf_in, mfxFrameSurface1* pSurf_out);
};
class MFX_DecodeInterface
{
private:
MFX_DecodeInterface(const MFX_DecodeInterface& other_decode); // forbidden
MFX_DecodeInterface& operator= (const MFX_DecodeInterface& other_decode); // forbidden
public:
MFXVideoSession* m_pmfxSession;
MFXVideoDECODE* m_pmfxDECODE;
mfxU32 m_allocId;
mfxVideoParam m_videoParams;
AppConfig* m_pAppConfig;
mfxBitstream m_mfxBS;
mfxSyncPoint m_SyncPoint;
CSmplBitstreamReader m_BSReader;
ExtSurfPool* m_pSurfPool;
bool m_bEndOfFile;
FILE* m_DecStremout_out;
std::vector<mfxExtBuffer*> m_InitExtParams;
MFX_DecodeInterface(MFXVideoSession* session, mfxU32 allocId, AppConfig* config, ExtSurfPool* surf_pool);
~MFX_DecodeInterface();
mfxStatus Init();
mfxStatus Close();
mfxStatus Reset();
mfxStatus QueryIOSurf(mfxFrameAllocRequest* request);
mfxVideoParam* GetCommonVideoParams();
mfxStatus UpdateVideoParam();
mfxStatus FillParameters();
mfxStatus GetOneFrame(mfxFrameSurface1* & pSurf);
mfxStatus DecodeOneFrame(mfxFrameSurface1 * & pSurf_out);
mfxStatus DecodeLastFrame(mfxFrameSurface1 * & pSurf_out);
mfxStatus FlushOutput(mfxFrameSurface1* pSurf);
mfxStatus ResetState();
};
class YUVreader
{
public:
CSmplYUVReader m_FileReader;
AppConfig* m_pAppConfig;
ExtSurfPool* m_pSurfPool;
bool m_bExternalAlloc;
MFXFrameAllocator* m_pMFXAllocator;
YUVreader(AppConfig* cfg, ExtSurfPool* surf_pool, MFXFrameAllocator* allocator)
: m_pAppConfig(cfg)
, m_pSurfPool(surf_pool)
, m_bExternalAlloc(m_pAppConfig->bUseHWmemory)
, m_pMFXAllocator(allocator)
{}
~YUVreader()
{
Close();
}
void Close()
{
m_FileReader.Close();
}
mfxStatus Init()
{
std::list<msdk_string> tmpl;
tmpl.push_back(msdk_string(m_pAppConfig->strSrcFile));
return m_FileReader.Init(tmpl,
m_pAppConfig->ColorFormat);
}
mfxStatus ResetState()
{
return Init();
}
mfxStatus GetOneFrame(mfxFrameSurface1* & pSurf);
};
#endif // __SAMPLE_FEI__AUXILIARY_INTERFACES_H__ | 39.051095 | 755 | 0.706355 | [
"vector"
] |
2697691905f34ad680c1a344918088c1a22c9c91 | 136,467 | c | C | drivers/pcie/al_hal_pcie.c | delroth/alpine_hal | eb6b9f132c08eb3cd70a6a94586bcd689a244d0a | [
"Unlicense"
] | 1 | 2022-02-03T01:04:47.000Z | 2022-02-03T01:04:47.000Z | drivers/pcie/al_hal_pcie.c | delroth/alpine_hal | eb6b9f132c08eb3cd70a6a94586bcd689a244d0a | [
"Unlicense"
] | null | null | null | drivers/pcie/al_hal_pcie.c | delroth/alpine_hal | eb6b9f132c08eb3cd70a6a94586bcd689a244d0a | [
"Unlicense"
] | null | null | null | /*******************************************************************************
Copyright (C) 2015 Annapurna Labs Ltd.
This file may be licensed under the terms of the Annapurna Labs Commercial
License Agreement.
Alternatively, this file can be distributed under the terms of the GNU General
Public License V2 as published by the Free Software Foundation and can be
found at http://www.gnu.org/licenses/gpl-2.0.html
Alternatively, redistribution and use in source and binary forms, with or
without modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#include "al_hal_pcie.h"
#include "al_hal_pbs_utils.h"
#include "al_hal_unit_adapter_regs.h"
#if (!defined(AL_DEV_ID)) || (AL_DEV_ID == AL_DEV_ID_ALPINE_V1) || (defined(AL_HAL_PCIE_REV_ID_ALL))
#define REV1_SUPPORTED 1
#else
#define REV1_SUPPORTED 0
#endif
#if (!defined(AL_DEV_ID)) || (AL_DEV_ID == AL_DEV_ID_ALPINE_V2) || (defined(AL_HAL_PCIE_REV_ID_ALL))
#define REV2_SUPPORTED 1
#define REV3_SUPPORTED 1
#else
#define REV2_SUPPORTED 0
#define REV3_SUPPORTED 0
#endif
#if (!defined(AL_DEV_ID)) || (AL_DEV_ID == AL_DEV_ID_ALPINE_V3) || (defined(AL_HAL_PCIE_REV_ID_ALL))
#define REV4_SUPPORTED 1
#else
#define REV4_SUPPORTED 0
#endif
#if (!defined(AL_DEV_ID)) || (AL_DEV_ID == AL_DEV_ID_ALPINE_V4) || (defined(AL_HAL_PCIE_REV_ID_ALL))
#define REV5_SUPPORTED 1
#else
#define REV5_SUPPORTED 0
#endif
#if (REV1_SUPPORTED)
#include "al_hal_pcie_reg_ptr_set_rev1.h"
#endif
#if (REV2_SUPPORTED)
#include "al_hal_pcie_reg_ptr_set_rev2.h"
#endif
#if (REV3_SUPPORTED)
#include "al_hal_pcie_reg_ptr_set_rev3.h"
#endif
#if (REV4_SUPPORTED)
#include "al_hal_pcie_reg_ptr_set_rev4.h"
#endif
#if (REV5_SUPPORTED)
#include "al_hal_pcie_reg_ptr_set_rev5.h"
#endif
#include "al_hal_pcie_regs_fields.h"
/**
* Parameter definitions
*/
#define AL_PCIE_AXI_REGS_OFFSET 0x0
#define AL_PCIE_DEVCTL_PAYLOAD_128B 0x00
#define AL_PCIE_DEVCTL_PAYLOAD_256B 0x20
#define AL_PCIE_SECBUS_DEFAULT 0x1
#define AL_PCIE_SUBBUS_DEFAULT 0x1
#define AL_PCIE_LINKUP_WAIT_INTERVAL 50 /* measured in usec */
#define AL_PCIE_LINKUP_WAIT_INTERVALS_PER_SEC 20
#define AL_PCIE_LINKUP_RETRIES 8
#define AL_PCIE_MAX_32_MEMORY_BAR_SIZE (0x100000000ULL)
#define AL_PCIE_MIN_MEMORY_BAR_SIZE (1 << 12)
#define AL_PCIE_MIN_IO_BAR_SIZE (1 << 8)
#define AL_PCIE_CLK_GEN2_PIPE_HZ (250 * 1000000) /* 250MHZ */
#define AL_PCIE_CLK_GEN1_PIPE_HZ (125 * 1000000) /* 125MHZ */
#define AL_PCIE_CLK_DELAY_AFTER_RESET_EN_HZ (0x50) /* 0x50HZ */
#define AL_PCIE_CLK_DELAY_BEFORE_RATE_CHANGE_HZ (0x20) /* 0x20HZ */
#define AL_PCIE_CLK_DELAY_AFTER_RATE_CHANGE_HZ (0x20) /* 0x20HZ */
#define AL_PCIE_CLK_REF_MAX (3)
#define AL_PCIE_PORT_GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA_MAX 15
#define AL_PCIE_PORT_GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA_MAX 15
#define AL_PCIE_PORT_GEN3_EQ_FMDC_N_EVALS_MAX 16
#define AL_PCIE_PORT_GEN3_EQ_FMDC_T_MIN_PHAS_MAX 24
#define AL_PCIE_INT_FWD_UNIT_ETH_IDX_MAX 6
#define AL_PCIE_ATU_ADDRESS_MATCH_EXT_MODE_THRESHOLD (1ULL << 32)
/**
* MACROS
*/
#define AL_PCIE_PARSE_LANES(v) (((1 << v) - 1) << \
PCIE_REVX_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_SHIFT)
#define AL_PCIE_FLR_DONE_INTERVAL 10
/**
* Inbound posted/non-posted header credits and outstanding outbound reads
* completion header HW settings
*
* Programmers are not expected to modify these setting except for rare cases,
*/
struct al_pcie_ib_hcrd_os_ob_reads_hw_config {
/**
* This value holds the hardware limit for
* number of outstanding outbound reads
*/
uint8_t max_nof_outstanding_ob_reads;
/**
* This value holds the hardware limit for the sum of
* nof_cpl_hdr, nof_np_hdr and nof_p_hdr
*/
unsigned int total_hdr_limit;
/**
* This value state if it is advised to use hardware
* default of values of header credits
*/
al_bool ib_hcrd_use_hw_default;
/**
* This value state if current hardware support
* dynamic header credits allocation
*/
al_bool ib_hcrd_supported;
};
static const struct al_pcie_ib_hcrd_os_ob_reads_config
ib_hcrd_os_ob_reads_defaults[AL_PCIE_REV_ID_MAX + 1][PCIE_AXI_DEVICE_ID_REG_REV_ID_MAX + 1][2]
= {
/* REV1 EP */
[AL_PCIE_REV_ID_1][PCIE_AXI_DEVICE_ID_REG_REV_ID_NA][AL_PCIE_OPERATING_MODE_EP] = {
.nof_outstanding_ob_reads = 15,
.nof_cpl_hdr = 75,
.nof_np_hdr = 6,
.nof_p_hdr = 15,
},
/* REV1 RC */
[AL_PCIE_REV_ID_1][PCIE_AXI_DEVICE_ID_REG_REV_ID_NA][AL_PCIE_OPERATING_MODE_RC] = {
.nof_outstanding_ob_reads = 8,
.nof_cpl_hdr = 41,
.nof_np_hdr = 25,
.nof_p_hdr = 31,
},
/* REV2 EP */
[AL_PCIE_REV_ID_2][PCIE_AXI_DEVICE_ID_REG_REV_ID_NA][AL_PCIE_OPERATING_MODE_EP] = {
.nof_outstanding_ob_reads = 15,
.nof_cpl_hdr = 75,
.nof_np_hdr = 6,
.nof_p_hdr = 15,
},
/* REV2 RC */
[AL_PCIE_REV_ID_2][PCIE_AXI_DEVICE_ID_REG_REV_ID_NA][AL_PCIE_OPERATING_MODE_RC] = {
.nof_outstanding_ob_reads = 8,
.nof_cpl_hdr = 41,
.nof_np_hdr = 25,
.nof_p_hdr = 31,
},
/* REV3 EP */
[AL_PCIE_REV_ID_3][PCIE_AXI_DEVICE_ID_REG_REV_ID_NA][AL_PCIE_OPERATING_MODE_EP] = {
.nof_outstanding_ob_reads = 32,
.nof_cpl_hdr = 161,
.nof_np_hdr = 38,
.nof_p_hdr = 60,
},
/* REV3 RC */
[AL_PCIE_REV_ID_3][PCIE_AXI_DEVICE_ID_REG_REV_ID_NA][AL_PCIE_OPERATING_MODE_RC] = {
.nof_outstanding_ob_reads = 32,
.nof_cpl_hdr = 161,
.nof_np_hdr = 38,
.nof_p_hdr = 60,
},
/* REV4 EP X4*/
[AL_PCIE_REV_ID_4][PCIE_AXI_DEVICE_ID_REG_REV_ID_X4][AL_PCIE_OPERATING_MODE_EP] = {
.nof_outstanding_ob_reads = 16,
.nof_cpl_hdr = 81,
.nof_np_hdr = 33,
.nof_p_hdr = 33,
},
/* REV4 RC X4*/
[AL_PCIE_REV_ID_4][PCIE_AXI_DEVICE_ID_REG_REV_ID_X4][AL_PCIE_OPERATING_MODE_RC] = {
.nof_outstanding_ob_reads = 16,
.nof_cpl_hdr = 81,
.nof_np_hdr = 33,
.nof_p_hdr = 33,
},
/* REV4 EP X8*/
[AL_PCIE_REV_ID_4][PCIE_AXI_DEVICE_ID_REG_REV_ID_X8][AL_PCIE_OPERATING_MODE_EP] = {
.nof_outstanding_ob_reads = 64,
.nof_cpl_hdr = 321,
.nof_np_hdr = 33,
.nof_p_hdr = 65,
},
/* REV4 RC X8*/
[AL_PCIE_REV_ID_4][PCIE_AXI_DEVICE_ID_REG_REV_ID_X8][AL_PCIE_OPERATING_MODE_RC] = {
.nof_outstanding_ob_reads = 64,
.nof_cpl_hdr = 321,
.nof_np_hdr = 33,
.nof_p_hdr = 65,
},
/* REV4 EP X16*/
[AL_PCIE_REV_ID_4][PCIE_AXI_DEVICE_ID_REG_REV_ID_X16][AL_PCIE_OPERATING_MODE_EP] = {
.nof_outstanding_ob_reads = 80,
.nof_cpl_hdr = 200,
.nof_np_hdr = 33,
.nof_p_hdr = 33,
},
/* REV4 RC X16*/
[AL_PCIE_REV_ID_4][PCIE_AXI_DEVICE_ID_REG_REV_ID_X16][AL_PCIE_OPERATING_MODE_RC] = {
.nof_outstanding_ob_reads = 80,
.nof_cpl_hdr = 200,
.nof_np_hdr = 33,
.nof_p_hdr = 33,
},
/* REV5 RC X8*/
[AL_PCIE_REV_ID_5][PCIE_AXI_DEVICE_ID_REG_REV_ID_REV5_X8][AL_PCIE_OPERATING_MODE_RC] = {
.nof_outstanding_ob_reads = 64,
.nof_cpl_hdr = 200,
.nof_np_hdr = 33,
.nof_p_hdr = 33,
},
};
/**
* inbound header credits and outstanding outbound reads HW config
*/
static const struct al_pcie_ib_hcrd_os_ob_reads_hw_config
ib_hcrd_os_ob_reads_hw_config[AL_PCIE_REV_ID_MAX + 1][PCIE_AXI_DEVICE_ID_REG_REV_ID_MAX + 1]
= {
/* REV1 */
[AL_PCIE_REV_ID_1][PCIE_AXI_DEVICE_ID_REG_REV_ID_NA] = {
.max_nof_outstanding_ob_reads = 16,
.total_hdr_limit = AL_PCIE_REV_1_2_IB_HCRD_SUM,
.ib_hcrd_use_hw_default = 0,
.ib_hcrd_supported = 1,
},
/* REV2 */
[AL_PCIE_REV_ID_2][PCIE_AXI_DEVICE_ID_REG_REV_ID_NA] = {
.max_nof_outstanding_ob_reads = 16,
.total_hdr_limit = AL_PCIE_REV_1_2_IB_HCRD_SUM,
.ib_hcrd_use_hw_default = 0,
.ib_hcrd_supported = 1,
},
/* REV3 */
[AL_PCIE_REV_ID_3][PCIE_AXI_DEVICE_ID_REG_REV_ID_NA] = {
.max_nof_outstanding_ob_reads = 32,
.total_hdr_limit = AL_PCIE_REV3_IB_HCRD_SUM,
.ib_hcrd_use_hw_default = 0,
.ib_hcrd_supported = 1,
},
/* REV4 X4*/
[AL_PCIE_REV_ID_4][PCIE_AXI_DEVICE_ID_REG_REV_ID_X4] = {
.max_nof_outstanding_ob_reads = 32,
.total_hdr_limit = AL_PCIE_REV4_X4_IB_HCRD_SUM,
.ib_hcrd_use_hw_default = 1,
.ib_hcrd_supported = 1,
},
/* REV4 X8*/
[AL_PCIE_REV_ID_4][PCIE_AXI_DEVICE_ID_REG_REV_ID_X8] = {
.max_nof_outstanding_ob_reads = 64,
.total_hdr_limit = AL_PCIE_REV4_X8_IB_HCRD_SUM,
.ib_hcrd_use_hw_default = 1,
.ib_hcrd_supported = 1,
},
/* REV4 X16*/
[AL_PCIE_REV_ID_4][PCIE_AXI_DEVICE_ID_REG_REV_ID_X16] = {
.max_nof_outstanding_ob_reads = 128,
.total_hdr_limit = AL_PCIE_REV4_X16_IB_HCRD_SUM,
.ib_hcrd_use_hw_default = 1,
.ib_hcrd_supported = 0,
},
/* REV5 X8*/
[AL_PCIE_REV_ID_5][PCIE_AXI_DEVICE_ID_REG_REV_ID_REV5_X8] = {
.max_nof_outstanding_ob_reads = 128,
.total_hdr_limit = AL_PCIE_REV5_X8_IB_HCRD_SUM,
.ib_hcrd_use_hw_default = 1,
.ib_hcrd_supported = 1,
},
};
static unsigned int lanes_num[AL_PCIE_REV_ID_MAX + 1] = {
[AL_PCIE_REV_ID_1] = 4,
[AL_PCIE_REV_ID_2] = 4,
[AL_PCIE_REV_ID_3] = 8,
[AL_PCIE_REV_ID_4] = 16,
[AL_PCIE_REV_ID_5] = 8,
};
/**
* Static functions
*/
static void
al_pcie_port_wr_to_ro_set(struct al_pcie_port *pcie_port, al_bool enable)
{
/* when disabling writes to RO, make sure any previous writes to
* config space were committed
*/
if (enable == AL_FALSE)
al_local_data_memory_barrier();
al_reg_write32(&pcie_port->regs->port_regs->rd_only_wr_en,
(enable == AL_TRUE) ? 1 : 0);
/* when enabling writes to RO, make sure it is committed before trying
* to write to RO config space
*/
if (enable == AL_TRUE)
al_local_data_memory_barrier();
}
/** helper function to access dbi_cs2 registers */
static void al_reg_write32_dbi_cs2(
struct al_pcie_port *pcie_port,
uint32_t *offset,
uint32_t val)
{
uintptr_t cs2_bit =
(pcie_port->rev_id >= AL_PCIE_REV_ID_3) ? 0x4000 : 0x1000;
al_reg_write32((uint32_t *)((uintptr_t)offset | cs2_bit), val);
}
static unsigned int al_pcie_speed_gen_code(enum al_pcie_link_speed speed)
{
if (speed == AL_PCIE_LINK_SPEED_GEN1)
return 1;
if (speed == AL_PCIE_LINK_SPEED_GEN2)
return 2;
if (speed == AL_PCIE_LINK_SPEED_GEN3)
return 3;
/* must not be reached */
return 0;
}
static inline void al_pcie_port_link_speed_ctrl_set(
struct al_pcie_port *pcie_port,
enum al_pcie_link_speed max_speed)
{
struct al_pcie_regs *regs = pcie_port->regs;
al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
if (max_speed != AL_PCIE_LINK_SPEED_DEFAULT) {
uint16_t max_speed_val = (uint16_t)al_pcie_speed_gen_code(max_speed);
al_reg_write32_masked(
(uint32_t __iomem *)(regs->core_space[0].pcie_link_cap_base),
0xF, max_speed_val);
al_reg_write32_masked(
(uint32_t __iomem *)(regs->core_space[0].pcie_cap_base
+ (AL_PCI_EXP_LNKCTL2 >> 2)),
0xF, max_speed_val);
}
al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
}
static int al_pcie_port_link_config(
struct al_pcie_port *pcie_port,
const struct al_pcie_link_params *link_params)
{
struct al_pcie_regs *regs = pcie_port->regs;
uint8_t max_lanes = pcie_port->max_lanes;
if ((link_params->max_payload_size != AL_PCIE_MPS_DEFAULT) &&
(link_params->max_payload_size != AL_PCIE_MPS_128) &&
(link_params->max_payload_size != AL_PCIE_MPS_256)) {
al_err("PCIe %d: unsupported Max Payload Size (%u)\n",
pcie_port->port_id, link_params->max_payload_size);
return -EINVAL;
}
al_pcie_port_link_speed_ctrl_set(pcie_port, link_params->max_speed);
/* Change Max Payload Size, if needed.
* The Max Payload Size is only valid for PF0.
*/
if (link_params->max_payload_size != AL_PCIE_MPS_DEFAULT)
al_reg_write32_masked(regs->core_space[0].pcie_dev_ctrl_status,
PCIE_PORT_DEV_CTRL_STATUS_MPS_MASK,
link_params->max_payload_size <<
PCIE_PORT_DEV_CTRL_STATUS_MPS_SHIFT);
/** Snap from PCIe core spec:
* Link Mode Enable. Sets the number of lanes in the link that you want
* to connect to the link partner. When you have unused lanes in your
* system, then you must change the value in this register to reflect
* the number of lanes. You must also change the value in the
* "Predetermined Number of Lanes" field of the "Link Width and Speed
* Change Control Register".
* 000001: x1
* 000011: x2
* 000111: x4
* 001111: x8
* 011111: x16
* 111111: x32 (not supported)
*/
al_reg_write32_masked(®s->port_regs->gen2_ctrl,
PCIE_PORT_GEN2_CTRL_NUM_OF_LANES_MASK,
max_lanes << PCIE_PORT_GEN2_CTRL_NUM_OF_LANES_SHIFT);
al_reg_write32_masked(®s->port_regs->port_link_ctrl,
PCIE_PORT_LINK_CTRL_LINK_CAPABLE_MASK,
(max_lanes + (max_lanes-1))
<< PCIE_PORT_LINK_CTRL_LINK_CAPABLE_SHIFT);
return 0;
}
static void al_pcie_port_relaxed_pcie_ordering_config(
struct al_pcie_port *pcie_port,
struct al_pcie_relaxed_ordering_params *relaxed_ordering_params)
{
struct al_pcie_regs *regs = pcie_port->regs;
enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
/**
* Default:
* - RC: Rx relaxed ordering only
* - EP: TX relaxed ordering only
*/
al_bool tx_relaxed_ordering = (op_mode == AL_PCIE_OPERATING_MODE_RC ? AL_FALSE : AL_TRUE);
al_bool rx_relaxed_ordering = (op_mode == AL_PCIE_OPERATING_MODE_RC ? AL_TRUE : AL_FALSE);
if (relaxed_ordering_params) {
tx_relaxed_ordering = relaxed_ordering_params->enable_tx_relaxed_ordering;
rx_relaxed_ordering = relaxed_ordering_params->enable_rx_relaxed_ordering;
}
/** PCIe ordering:
* - disable outbound completion must be stalled behind outbound write
* ordering rule enforcement is disabled for root-port
* - disables read completion on the master port push slave writes for end-point
*/
al_reg_write32_masked(
regs->axi.ordering.pos_cntl,
PCIE_AXI_POS_ORDER_BYPASS_CMPL_AFTER_WR_FIX |
PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_DIS |
PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_SUPPORT_INTERLV_DIS |
PCIE_AXI_POS_ORDER_SEGMENT_BUFFER_DONT_WAIT_FOR_P_WRITES,
(tx_relaxed_ordering ?
(PCIE_AXI_POS_ORDER_BYPASS_CMPL_AFTER_WR_FIX |
PCIE_AXI_POS_ORDER_SEGMENT_BUFFER_DONT_WAIT_FOR_P_WRITES) : 0) |
(rx_relaxed_ordering ?
(PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_DIS |
PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_SUPPORT_INTERLV_DIS) : 0));
}
static int al_pcie_dev_id_get(struct al_pcie_revx_regs *pcie_reg_base)
{
return al_reg_read32(&pcie_reg_base->axi.device_id.device_rev_id) &
PCIE_AXI_DEVICE_ID_REG_DEV_ID_MASK;
}
/* For Alpine V3+ return the PCIe revision id, all other revisions return NA */
static int al_pcie_subrev_id_get(struct al_pcie_revx_regs *pcie_reg_base, uint8_t rev_id)
{
int subrev_id = PCIE_AXI_DEVICE_ID_REG_REV_ID_NA;
if (rev_id >= AL_PCIE_REV_ID_4) {
subrev_id = al_reg_read32(&pcie_reg_base->axi.device_id.device_rev_id) &
PCIE_AXI_DEVICE_ID_REG_REV_ID_MASK;
al_assert((subrev_id >= PCIE_AXI_DEVICE_ID_REG_REV_ID_X4) &&
(subrev_id <= PCIE_AXI_DEVICE_ID_REG_REV_ID_X16));
}
return subrev_id;
}
static int al_pcie_rev_id_get(
void __iomem *pbs_reg_base,
void __iomem *pcie_reg_base)
{
unsigned int chip_id_dev;
unsigned int rev_id;
/*
* Addressing RMN: 4022, 10069
*
* RMN description:
* The revision ID of the PCIe Core is placed in the LSB of the PCI Class field, which
* creates two problems:
* - The port must be enabled to be able to determine the revision ID
* - The wrong revision ID might be placed in the field as it can be programmed by the
* application in EP mode
*
* Software flow:
* Read the revision ID from the PBS chip ID
*/
/* get revision ID from the PBS */
chip_id_dev = al_pbs_dev_id_get(pbs_reg_base);
if (chip_id_dev == PBS_UNIT_CHIP_ID_DEV_ID_ALPINE_V1) {
rev_id = AL_PCIE_REV_ID_1;
} else if (chip_id_dev == PBS_UNIT_CHIP_ID_DEV_ID_ALPINE_V2) {
uint32_t dev_id = al_pcie_dev_id_get(pcie_reg_base);
if (dev_id == PCIE_AXI_DEVICE_ID_REG_DEV_ID_X4) {
rev_id = AL_PCIE_REV_ID_2;
} else if (dev_id == PCIE_AXI_DEVICE_ID_REG_DEV_ID_X8) {
rev_id = AL_PCIE_REV_ID_3;
} else {
al_warn("%s: Revision ID is unknown\n",
__func__);
return -EINVAL;
}
} else if (chip_id_dev == PBS_UNIT_CHIP_ID_DEV_ID_ALPINE_V3) {
uint32_t dev_id = al_pcie_dev_id_get(pcie_reg_base);
if (dev_id == PCIE_AXI_DEVICE_ID_REG_DEV_ID_X16) {
rev_id = AL_PCIE_REV_ID_4;
} else {
al_warn("%s: Revision ID is unknown\n",
__func__);
return -EINVAL;
}
} else if (chip_id_dev == PBS_UNIT_CHIP_ID_DEV_ID_ALPINE_V4) {
uint32_t dev_id = al_pcie_dev_id_get(pcie_reg_base);
if (dev_id == PCIE_AXI_DEVICE_ID_REG_DEV_ID_REV5) {
rev_id = AL_PCIE_REV_ID_5;
} else {
al_warn("%s: Revision ID is unknown\n",
__func__);
return -EINVAL;
}
} else {
al_warn("%s: Revision ID is unknown\n",
__func__);
return -EINVAL;
}
return rev_id;
}
static int
al_pcie_port_lat_rply_timers_config(
struct al_pcie_port *pcie_port,
const struct al_pcie_latency_replay_timers *lat_rply_timers)
{
struct al_pcie_regs *regs = pcie_port->regs;
uint32_t reg = 0;
AL_REG_FIELD_SET(reg, 0xFFFF, 0, lat_rply_timers->round_trip_lat_limit);
AL_REG_FIELD_SET(reg, 0xFFFF0000, 16, lat_rply_timers->replay_timer_limit);
al_reg_write32(®s->port_regs->ack_lat_rply_timer, reg);
return 0;
}
static void al_pcie_ib_hcrd_os_ob_reads_config_default(
struct al_pcie_port *pcie_port)
{
struct al_pcie_ib_hcrd_os_ob_reads_config ib_hcrd_os_ob_reads_config;
enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
uint8_t rev_id = pcie_port->rev_id;
int subrev_id = pcie_port->subrev_id;
al_assert(ib_hcrd_os_ob_reads_hw_config[rev_id][subrev_id].ib_hcrd_supported);
ib_hcrd_os_ob_reads_config.nof_outstanding_ob_reads =
ib_hcrd_os_ob_reads_defaults[rev_id][subrev_id][op_mode].nof_outstanding_ob_reads;
ib_hcrd_os_ob_reads_config.nof_cpl_hdr =
ib_hcrd_os_ob_reads_defaults[rev_id][subrev_id][op_mode].nof_cpl_hdr;
ib_hcrd_os_ob_reads_config.nof_np_hdr =
ib_hcrd_os_ob_reads_defaults[rev_id][subrev_id][op_mode].nof_np_hdr;
ib_hcrd_os_ob_reads_config.nof_p_hdr =
ib_hcrd_os_ob_reads_defaults[rev_id][subrev_id][op_mode].nof_p_hdr;
al_pcie_port_ib_hcrd_os_ob_reads_config(pcie_port, &ib_hcrd_os_ob_reads_config);
};
/** return AL_TRUE if link is up, AL_FALSE otherwise */
static al_bool al_pcie_check_link(
struct al_pcie_port *pcie_port,
uint8_t *ltssm_ret)
{
struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
uint32_t info_0;
uint8_t ltssm_state;
info_0 = al_reg_read32(regs->app.debug.info_0);
ltssm_state = AL_REG_FIELD_GET(info_0,
PCIE_W_DEBUG_INFO_0_LTSSM_STATE_MASK,
PCIE_W_DEBUG_INFO_0_LTSSM_STATE_SHIFT);
al_dbg("PCIe %d: Port Debug 0: 0x%08x. LTSSM state :0x%x\n",
pcie_port->port_id, info_0, ltssm_state);
if (ltssm_ret)
*ltssm_ret = ltssm_state;
if ((ltssm_state == AL_PCIE_LTSSM_STATE_L0) ||
(ltssm_state == AL_PCIE_LTSSM_STATE_L0S))
return AL_TRUE;
return AL_FALSE;
}
static int
al_pcie_port_gen2_params_config(struct al_pcie_port *pcie_port,
const struct al_pcie_gen2_params *gen2_params)
{
struct al_pcie_regs *regs = pcie_port->regs;
uint32_t gen2_ctrl;
al_dbg("PCIe %d: Gen2 params config: Tx Swing %s, interrupt on link Eq %s, set Deemphasis %s\n",
pcie_port->port_id,
gen2_params->tx_swing_low ? "Low" : "Full",
gen2_params->tx_compliance_receive_enable? "enable" : "disable",
gen2_params->set_deemphasis? "enable" : "disable");
gen2_ctrl = al_reg_read32(®s->port_regs->gen2_ctrl);
if (gen2_params->tx_swing_low)
AL_REG_BIT_SET(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_SWING_LOW_SHIFT);
else
AL_REG_BIT_CLEAR(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_SWING_LOW_SHIFT);
if (gen2_params->tx_compliance_receive_enable)
AL_REG_BIT_SET(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_COMPLIANCE_RCV_SHIFT);
else
AL_REG_BIT_CLEAR(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_COMPLIANCE_RCV_SHIFT);
if (gen2_params->set_deemphasis)
AL_REG_BIT_SET(gen2_ctrl, PCIE_PORT_GEN2_CTRL_DEEMPHASIS_SET_SHIFT);
else
AL_REG_BIT_CLEAR(gen2_ctrl, PCIE_PORT_GEN2_CTRL_DEEMPHASIS_SET_SHIFT);
al_reg_write32(®s->port_regs->gen2_ctrl, gen2_ctrl);
return 0;
}
static uint16_t
gen3_lane_eq_param_to_val(const struct al_pcie_gen3_lane_eq_params *eq_params)
{
uint16_t eq_control = 0;
eq_control = eq_params->downstream_port_transmitter_preset & 0xF;
eq_control |= (eq_params->downstream_port_receiver_preset_hint & 0x7) << 4;
eq_control |= (eq_params->upstream_port_transmitter_preset & 0xF) << 8;
eq_control |= (eq_params->upstream_port_receiver_preset_hint & 0x7) << 12;
return eq_control;
}
static int
al_pcie_port_gen3_params_config(struct al_pcie_port *pcie_port,
enum al_pcie_operating_mode op_mode,
const struct al_pcie_gen3_params *gen3_params)
{
struct al_pcie_regs *regs = pcie_port->regs;
uint32_t reg = 0;
uint16_t __iomem *lanes_eq_base = (uint16_t __iomem *)(regs->core_space[0].pcie_sec_ext_cap_base + (0xC >> 2));
unsigned int i;
al_dbg("PCIe %d: Gen3 params config: Equalization %s, interrupt on link Eq %s\n",
pcie_port->port_id,
gen3_params->perform_eq ? "enable" : "disable",
gen3_params->interrupt_enable_on_link_eq_request? "enable" : "disable");
if (gen3_params->perform_eq)
AL_REG_BIT_SET(reg, 0);
if (gen3_params->interrupt_enable_on_link_eq_request)
AL_REG_BIT_SET(reg, 1);
al_reg_write32(regs->core_space[0].pcie_sec_ext_cap_base + (4 >> 2),
reg);
al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
for (i = 0; i < gen3_params->eq_params_elements; i += 2) {
uint32_t eq_control =
(uint32_t)gen3_lane_eq_param_to_val(gen3_params->eq_params + i) |
(uint32_t)gen3_lane_eq_param_to_val(gen3_params->eq_params + i + 1) << 16;
al_dbg("PCIe %d: Set EQ (0x%08x) for lane %d, %d\n", pcie_port->port_id, eq_control, i, i + 1);
al_reg_write32((uint32_t *)(lanes_eq_base + i), eq_control);
}
al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
reg = al_reg_read32(®s->port_regs->gen3_ctrl);
if (gen3_params->eq_disable)
AL_REG_BIT_SET(reg, PCIE_PORT_GEN3_CTRL_EQ_DISABLE_SHIFT);
else
AL_REG_BIT_CLEAR(reg, PCIE_PORT_GEN3_CTRL_EQ_DISABLE_SHIFT);
if (gen3_params->eq_phase2_3_disable)
AL_REG_BIT_SET(reg, PCIE_PORT_GEN3_CTRL_EQ_PHASE_2_3_DISABLE_SHIFT);
else
AL_REG_BIT_CLEAR(reg, PCIE_PORT_GEN3_CTRL_EQ_PHASE_2_3_DISABLE_SHIFT);
if (gen3_params->eq_redo_bypass)
AL_REG_BIT_SET(reg, PCIE_PORT_GEN3_CTRL_EQ_REDO_BYPASS_SHIFT);
else
AL_REG_BIT_CLEAR(reg, PCIE_PORT_GEN3_CTRL_EQ_REDO_BYPASS_SHIFT);
al_reg_write32(®s->port_regs->gen3_ctrl, reg);
reg = 0;
AL_REG_FIELD_SET(reg, PCIE_PORT_GEN3_EQ_LF_MASK,
PCIE_PORT_GEN3_EQ_LF_SHIFT,
gen3_params->local_lf);
AL_REG_FIELD_SET(reg, PCIE_PORT_GEN3_EQ_FS_MASK,
PCIE_PORT_GEN3_EQ_FS_SHIFT,
gen3_params->local_fs);
al_reg_write32(®s->port_regs->gen3_eq_fs_lf, reg);
reg = 0;
AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_LF_MASK,
PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_LF_SHIFT,
gen3_params->local_lf);
AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_FS_MASK,
PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_FS_SHIFT,
gen3_params->local_fs);
for (i = 0; i < lanes_num[pcie_port->rev_id]; i++)
al_reg_write32(regs->axi.conf.zero_lane[i], reg);
/*
* Gen3 EQ Control Register:
* - Behavior After 24 ms Timeout (when optimal settings are not
* found): Recovery.Speed
* - Phase2_3 2 ms Timeout Disable
*/
reg = 0x00000020;
/* Set preset request vector */
AL_REG_FIELD_SET(reg, PCIE_PORT_GEN3_EQ_CTRL_PSET_REQ_VEC_MASK,
PCIE_PORT_GEN3_EQ_CTRL_PSET_REQ_VEC_SHIFT,
(op_mode == AL_PCIE_OPERATING_MODE_RC) ?
gen3_params->pset_req_vec_rc :
gen3_params->pset_req_vec_ep);
/* Set feedback mode */
AL_REG_FIELD_SET(reg, PCIE_PORT_GEN3_EQ_CTRL_FB_MODE_MASK,
PCIE_PORT_GEN3_EQ_CTRL_FB_MODE_SHIFT, gen3_params->fb_mode);
al_reg_write32(®s->port_regs->gen3_eq_ctrl, reg);
/* Set direction change feedback mode params */
if (gen3_params->fb_mode == AL_PCIE_GEN3_FB_MODE_DIR_CHG) {
al_assert(gen3_params->fmdc);
al_assert(gen3_params->fmdc->max_post_cusror_delta <=
AL_PCIE_PORT_GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA_MAX);
al_assert(gen3_params->fmdc->max_pre_cusror_delta <=
AL_PCIE_PORT_GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA_MAX);
al_assert(gen3_params->fmdc->n_evals <=
AL_PCIE_PORT_GEN3_EQ_FMDC_N_EVALS_MAX);
al_assert(gen3_params->fmdc->t_min_phas <=
AL_PCIE_PORT_GEN3_EQ_FMDC_T_MIN_PHAS_MAX);
al_reg_write32_masked(®s->port_regs->gen3_eq_fb_mode_dir_chg,
PCIE_PORT_GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA_MASK |
PCIE_PORT_GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA_MASK |
PCIE_PORT_GEN3_EQ_FMDC_N_EVALS_MASK |
PCIE_PORT_GEN3_EQ_FMDC_T_MIN_PHAS_MASK,
(gen3_params->fmdc->max_post_cusror_delta <<
PCIE_PORT_GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA_SHIFT) |
(gen3_params->fmdc->max_pre_cusror_delta <<
PCIE_PORT_GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA_SHIFT) |
(gen3_params->fmdc->n_evals <<
PCIE_PORT_GEN3_EQ_FMDC_N_EVALS_SHIFT) |
(gen3_params->fmdc->t_min_phas <<
PCIE_PORT_GEN3_EQ_FMDC_T_MIN_PHAS_SHIFT));
}
return 0;
}
static int
al_pcie_port_pf_params_config(struct al_pcie_pf *pcie_pf,
const struct al_pcie_pf_config_params *pf_params)
{
struct al_pcie_port *pcie_port = pcie_pf->pcie_port;
struct al_pcie_regs *regs = pcie_port->regs;
unsigned int pf_num = pcie_pf->pf_num;
int bar_idx;
int ret;
al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
/* Disable D1 and D3hot capabilities */
if (pf_params->cap_d1_d3hot_dis)
al_reg_write32_masked(
regs->core_space[pf_num].pcie_pm_cap_base,
AL_FIELD_MASK(26, 25) | AL_FIELD_MASK(31, 28), 0);
/* Set/Clear FLR bit */
if (pf_params->cap_flr_dis)
al_reg_write32_masked(
regs->core_space[pf_num].pcie_dev_cap_base,
AL_PCI_EXP_DEVCAP_FLR, 0);
else
al_reg_write32_masked(
regs->core_space[pcie_pf->pf_num].pcie_dev_cap_base,
AL_PCI_EXP_DEVCAP_FLR, AL_PCI_EXP_DEVCAP_FLR);
/* Disable ASPM capability */
if (pf_params->cap_aspm_dis) {
al_reg_write32_masked(
regs->core_space[pf_num].pcie_cap_base + (AL_PCI_EXP_LNKCAP >> 2),
AL_PCI_EXP_LNKCAP_ASPMS, 0);
}
if (pf_params->id_params) {
if (pf_params->id_params->vendor_id_override)
al_reg_write32_masked(
(uint32_t __iomem *)(®s->core_space[pf_num].config_header[0]),
AL_PCI_DEV_ID_VEN_ID_VEN_ID_MASK,
pf_params->id_params->vendor_id <<
AL_PCI_DEV_ID_VEN_ID_VEN_ID_SHIFT);
if (pf_params->id_params->device_id_override)
al_reg_write32_masked(
(uint32_t __iomem *)(®s->core_space[pf_num].config_header[0]),
AL_PCI_DEV_ID_VEN_ID_DEV_ID_MASK,
pf_params->id_params->device_id <<
AL_PCI_DEV_ID_VEN_ID_DEV_ID_SHIFT);
if (pf_params->id_params->revision_id_override)
al_reg_write32_masked(
(uint32_t __iomem *)(®s->core_space[pf_num].config_header[0]
+ (PCI_CLASS_REVISION >> 2)),
PCI_CLASS_REVISION_REVISION_MASK,
pf_params->id_params->revision_id <<
PCI_CLASS_REVISION_REVISION_SHIFT);
if (pf_params->id_params->class_code_override)
al_reg_write32_masked(
(uint32_t __iomem *)(®s->core_space[pf_num].config_header[0]
+ (PCI_CLASS_REVISION >> 2)),
PCI_CLASS_REVISION_CLASS_MASK,
pf_params->id_params->class_code <<
PCI_CLASS_REVISION_CLASS_SHIFT);
}
if (!pf_params->bar_params_valid) {
ret = 0;
goto done;
}
for (bar_idx = 0; bar_idx < 6;){ /* bar_idx will be incremented depending on bar type */
const struct al_pcie_ep_bar_params *params = pf_params->bar_params + bar_idx;
uint32_t mask = 0;
uint32_t ctrl = 0;
uint32_t __iomem *bar_addr = ®s->core_space[pf_num].config_header[(AL_PCI_BASE_ADDRESS_0 >> 2) + bar_idx];
if (params->enable) {
uint64_t size = params->size;
if (params->memory_64_bit) {
const struct al_pcie_ep_bar_params *next_params = params + 1;
/* 64 bars start at even index (BAR0, BAR 2 or BAR 4) */
if (bar_idx & 1) {
ret = -EINVAL;
goto done;
}
/* next BAR must be disabled */
if (next_params->enable) {
ret = -EINVAL;
goto done;
}
/* 64 bar must be memory bar */
if (!params->memory_space) {
ret = -EINVAL;
goto done;
}
} else {
if (size > AL_PCIE_MAX_32_MEMORY_BAR_SIZE)
return -EINVAL;
/* 32 bit space can't be prefetchable */
if (params->memory_is_prefetchable) {
ret = -EINVAL;
goto done;
}
}
if (params->memory_space) {
if (size < AL_PCIE_MIN_MEMORY_BAR_SIZE) {
al_err("PCIe %d: memory BAR %d: size (0x%" PRIx64 ")"
" less that minimal allowed value\n",
pcie_port->port_id, bar_idx, size);
ret = -EINVAL;
goto done;
}
} else {
/* IO can't be prefetchable */
if (params->memory_is_prefetchable) {
ret = -EINVAL;
goto done;
}
if (size < AL_PCIE_MIN_IO_BAR_SIZE) {
al_err("PCIe %d: IO BAR %d: size (0x%" PRIx64 ")"
" less that minimal allowed value\n",
pcie_port->port_id, bar_idx, size);
ret = -EINVAL;
goto done;
}
}
/* size must be power of 2 */
if (size & (size - 1)) {
al_err("PCIe %d: BAR %d:size (0x%" PRIx64 ") must be "
"power of 2\n",
pcie_port->port_id, bar_idx, size);
ret = -EINVAL;
goto done;
}
/* If BAR is 64-bit, disable the next BAR before
* configuring this one
*/
if (params->memory_64_bit)
al_reg_write32_dbi_cs2(pcie_port, bar_addr + 1, 0);
mask = 1; /* enable bit*/
mask |= (params->size - 1) & 0xFFFFFFFF;
al_reg_write32_dbi_cs2(pcie_port, bar_addr , mask);
if (params->memory_space == AL_FALSE)
ctrl = AL_PCI_BASE_ADDRESS_SPACE_IO;
if (params->memory_64_bit)
ctrl |= AL_PCI_BASE_ADDRESS_MEM_TYPE_64;
if (params->memory_is_prefetchable)
ctrl |= AL_PCI_BASE_ADDRESS_MEM_PREFETCH;
al_reg_write32(bar_addr, ctrl);
if (params->memory_64_bit) {
mask = ((params->size - 1) >> 32) & 0xFFFFFFFF;
al_reg_write32_dbi_cs2(pcie_port, bar_addr + 1, mask);
}
} else {
al_reg_write32_dbi_cs2(pcie_port, bar_addr , mask);
}
if (params->enable && params->memory_64_bit)
bar_idx += 2;
else
bar_idx += 1;
}
if (pf_params->exp_bar_params.enable) {
if (pcie_port->rev_id < AL_PCIE_REV_ID_3) {
al_err("PCIe %d: Expansion BAR enable not supported\n", pcie_port->port_id);
ret = -ENOSYS;
goto done;
} else {
/* Enable exp ROM */
uint32_t __iomem *exp_rom_bar_addr =
®s->core_space[pf_num].config_header[AL_PCI_EXP_ROM_BASE_ADDRESS >> 2];
uint32_t mask = 1; /* enable bit*/
mask |= (pf_params->exp_bar_params.size - 1) & 0xFFFFFFFF;
al_reg_write32_dbi_cs2(pcie_port, exp_rom_bar_addr , mask);
}
} else if (pcie_port->rev_id >= AL_PCIE_REV_ID_3) {
/* Disable exp ROM */
uint32_t __iomem *exp_rom_bar_addr =
®s->core_space[pf_num].config_header[AL_PCI_EXP_ROM_BASE_ADDRESS >> 2];
al_reg_write32_dbi_cs2(pcie_port, exp_rom_bar_addr , 0);
}
/* Open CPU generated msi and legacy interrupts in pcie wrapper logic */
if (pcie_port->rev_id == AL_PCIE_REV_ID_1) {
al_reg_write32(regs->app.soc_int[pf_num].mask_inta_leg_0, AL_BIT(21));
} else if ((pcie_port->rev_id == AL_PCIE_REV_ID_2) ||
(pcie_port->rev_id == AL_PCIE_REV_ID_3)) {
al_reg_write32(regs->app.soc_int[pf_num].mask_inta_leg_3, AL_BIT(18));
} else if (pcie_port->rev_id >= AL_PCIE_REV_ID_4) {
al_reg_write32(regs->app.soc_int[pf_num].mask_inta_leg_6, AL_BIT(0));
} else {
al_assert(0);
ret = -ENOSYS;
goto done;
}
/**
* Addressing RMN: 1547
*
* RMN description:
* 1. Whenever writing to 0x2xx offset, the write also happens to
* 0x3xx address, meaning two registers are written instead of one.
* 2. Read and write from 0x3xx work ok.
*
* Software flow:
* Backup the value of the app.int_grp_a.mask_a register, because
* app.int_grp_a.mask_clear_a gets overwritten during the write to
* app.soc.mask_msi_leg_0 register.
* Restore the original value after the write to app.soc.mask_msi_leg_0
* register.
*/
if (pcie_port->rev_id == AL_PCIE_REV_ID_1) {
al_reg_write32(regs->app.soc_int[pf_num].mask_msi_leg_0, AL_BIT(22));
} else if ((pcie_port->rev_id == AL_PCIE_REV_ID_2) ||
(pcie_port->rev_id == AL_PCIE_REV_ID_3)) {
al_reg_write32(regs->app.soc_int[pf_num].mask_msi_leg_3, AL_BIT(19));
} else if (pcie_port->rev_id >= AL_PCIE_REV_ID_4) {
al_reg_write32(regs->app.soc_int[pf_num].mask_msi_leg_6, AL_BIT(1));
} else {
al_assert(0);
ret = -ENOSYS;
goto done;
}
ret = 0;
done:
al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
return ret;
}
static int al_pcie_port_sris_config(
struct al_pcie_port *pcie_port,
struct al_pcie_sris_params *sris_params,
enum al_pcie_link_speed link_speed)
{
int rc = 0;
struct al_pcie_regs *regs = pcie_port->regs;
if (sris_params->use_defaults) {
sris_params->kp_counter_gen3 = (pcie_port->rev_id > AL_PCIE_REV_ID_1) ?
PCIE_SRIS_KP_COUNTER_GEN3_DEFAULT_VAL : 0;
sris_params->kp_counter_gen21 = PCIE_SRIS_KP_COUNTER_GEN21_DEFAULT_VAL;
al_dbg("PCIe %d: configuring SRIS with default values kp_gen3[%d] kp_gen21[%d]\n",
pcie_port->port_id,
sris_params->kp_counter_gen3,
sris_params->kp_counter_gen21);
}
switch (pcie_port->rev_id) {
case AL_PCIE_REV_ID_5:
case AL_PCIE_REV_ID_4:
case AL_PCIE_REV_ID_3:
al_reg_write32_masked(regs->app.cfg_func_ext.cfg,
PCIE_W_CFG_FUNC_EXT_CFG_APP_SRIS_MODE,
PCIE_W_CFG_FUNC_EXT_CFG_APP_SRIS_MODE);
/* fall through */
case AL_PCIE_REV_ID_2:
al_reg_write32_masked(regs->app.global_ctrl.sris_kp_counter,
PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_GEN3_SRIS_MASK |
PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_GEN21_SRIS_MASK |
PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_PCIE_X4_SRIS_EN,
(sris_params->kp_counter_gen3 <<
PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_GEN3_SRIS_SHIFT) |
(sris_params->kp_counter_gen21 <<
PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_GEN21_SRIS_SHIFT) |
PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_PCIE_X4_SRIS_EN);
break;
case AL_PCIE_REV_ID_1:
if ((link_speed == AL_PCIE_LINK_SPEED_GEN3) && (sris_params->kp_counter_gen3)) {
al_err("PCIe %d: cannot config Gen%d SRIS with rev_id[%d]\n",
pcie_port->port_id, al_pcie_speed_gen_code(link_speed),
pcie_port->rev_id);
return -EINVAL;
}
al_reg_write32_masked(®s->port_regs->filter_mask_reg_1,
PCIE_FLT_MASK_SKP_INT_VAL_MASK,
sris_params->kp_counter_gen21);
break;
default:
al_err("PCIe %d: SRIS config is not supported in rev_id[%d]\n",
pcie_port->port_id, pcie_port->rev_id);
al_assert(0);
return -EINVAL;
}
return rc;
}
static void
al_pcie_port_ib_hcrd_config(struct al_pcie_port *pcie_port)
{
struct al_pcie_regs *regs = pcie_port->regs;
al_reg_write32_masked(
®s->port_regs->vc0_posted_rcv_q_ctrl,
RADM_PQ_HCRD_VC0_MASK,
(pcie_port->ib_hcrd_config.nof_p_hdr - 1)
<< RADM_PQ_HCRD_VC0_SHIFT);
al_reg_write32_masked(
®s->port_regs->vc0_non_posted_rcv_q_ctrl,
RADM_NPQ_HCRD_VC0_MASK,
(pcie_port->ib_hcrd_config.nof_np_hdr - 1)
<< RADM_NPQ_HCRD_VC0_SHIFT);
}
static unsigned int
al_pcie_port_max_num_of_pfs_get(struct al_pcie_port *pcie_port)
{
struct al_pcie_regs *regs = pcie_port->regs;
uint32_t max_func_num;
uint32_t max_num_of_pfs;
/**
* Only in REV3+, when port is already enabled, max_num_of_pfs is already
* initialized, return it. Otherwise, return default: 1 PF
*/
if ((pcie_port->rev_id >= AL_PCIE_REV_ID_3)
&& al_pcie_port_is_enabled(pcie_port)) {
max_func_num = al_reg_read32(®s->port_regs->timer_ctrl_max_func_num);
max_num_of_pfs = AL_REG_FIELD_GET(max_func_num, PCIE_PORT_GEN3_MAX_FUNC_NUM, 0) + 1;
return max_num_of_pfs;
}
return 1;
}
/** Enable ecrc generation in outbound atu (Addressing RMN: 5119) */
static void al_pcie_ecrc_gen_ob_atu_enable(struct al_pcie_port *pcie_port, unsigned int pf_num)
{
struct al_pcie_regs *regs = pcie_port->regs;
int max_ob_atu = (pcie_port->rev_id >= AL_PCIE_REV_ID_3) ?
AL_PCIE_REV_3_4_ATU_NUM_OUTBOUND_REGIONS : AL_PCIE_REV_1_2_ATU_NUM_OUTBOUND_REGIONS;
int i;
for (i = 0; i < max_ob_atu; i++) {
al_bool enable = 0;
uint32_t reg = 0;
unsigned int func_num;
AL_REG_FIELD_SET(reg, 0xF, 0, i);
AL_REG_BIT_VAL_SET(reg, 31, AL_PCIE_ATU_DIR_OUTBOUND);
al_reg_write32(®s->port_regs->iatu.index, reg);
reg = al_reg_read32(®s->port_regs->iatu.cr2);
enable = AL_REG_BIT_GET(reg, 31) ? AL_TRUE : AL_FALSE;
reg = al_reg_read32(®s->port_regs->iatu.cr1);
func_num = AL_REG_FIELD_GET(reg,
PCIE_IATU_CR1_FUNC_NUM_MASK,
PCIE_IATU_CR1_FUNC_NUM_SHIFT);
if ((enable == AL_TRUE) && (pf_num == func_num)) {
/* Set TD bit */
AL_REG_BIT_SET(reg, 8);
al_reg_write32(®s->port_regs->iatu.cr1, reg);
}
}
}
/******************************************************************************/
/***************************** API Implementation *****************************/
/******************************************************************************/
/*************************** PCIe Initialization API **************************/
/**
* Initializes a PCIe port handle structure
* Caution: this function should not read/write to any register except for
* reading RO register (REV_ID for example)
*/
int al_pcie_port_handle_init(
struct al_pcie_port *pcie_port,
void __iomem *pcie_reg_base,
void __iomem *pbs_reg_base,
unsigned int port_id)
{
int ret;
pcie_port->pcie_reg_base = pcie_reg_base;
pcie_port->regs = &pcie_port->regs_ptrs;
pcie_port->ex_regs = NULL;
pcie_port->pbs_regs = pbs_reg_base;
pcie_port->port_id = port_id;
pcie_port->max_lanes = 0;
ret = al_pcie_rev_id_get(pbs_reg_base, pcie_reg_base);
if (ret < 0)
return ret;
pcie_port->rev_id = ret;
ret = al_pcie_subrev_id_get(pcie_reg_base, pcie_port->rev_id);
if (ret < 0)
return ret;
pcie_port->subrev_id = ret;
/* Zero all regs */
al_memset(pcie_port->regs, 0, sizeof(struct al_pcie_regs));
#if (REV1_SUPPORTED)
if (pcie_port->rev_id == AL_PCIE_REV_ID_1) {
al_pcie_port_handle_init_reg_ptr_set_rev1(pcie_port, pcie_reg_base);
} else
#endif
#if (REV2_SUPPORTED)
if (pcie_port->rev_id == AL_PCIE_REV_ID_2) {
al_pcie_port_handle_init_reg_ptr_set_rev2(pcie_port, pcie_reg_base);
} else
#endif
#if (REV3_SUPPORTED)
if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
al_pcie_port_handle_init_reg_ptr_set_rev3(pcie_port, pcie_reg_base);
} else
#endif
#if (REV4_SUPPORTED)
if (pcie_port->rev_id == AL_PCIE_REV_ID_4) {
al_pcie_port_handle_init_reg_ptr_set_rev4(pcie_port, pcie_reg_base);
} else
#endif
#if (REV5_SUPPORTED)
if (pcie_port->rev_id == AL_PCIE_REV_ID_5) {
al_pcie_port_handle_init_reg_ptr_set_rev5(pcie_port, pcie_reg_base);
} else
#endif
{
al_warn("%s: Revision ID is unknown\n",
__func__);
return -EINVAL;
}
/* set maximum number of physical functions */
pcie_port->max_num_of_pfs = al_pcie_port_max_num_of_pfs_get(pcie_port);
/* Clear 'nof_p_hdr' & 'nof_np_hdr' to later know if they where changed by the user */
pcie_port->ib_hcrd_config.nof_np_hdr = 0;
pcie_port->ib_hcrd_config.nof_p_hdr = 0;
pcie_port->num_retrains = 0;
al_dbg("pcie port handle initialized. port id: %d, rev_id %d, regs base %p\n",
port_id, pcie_port->rev_id, pcie_reg_base);
return 0;
}
void al_pcie_port_perf_params_print(
struct al_pcie_port *pcie_port)
{
struct al_pcie_ib_hcrd_os_ob_reads_config ib_hcrd_os_ob_reads_config;
struct al_pcie_link_status status;
enum al_pcie_max_payload_size mps;
struct al_pcie_regs *regs;
uint32_t reg_val;
int err;
int i;
al_assert(pcie_port);
regs = pcie_port->regs;
if (!al_pcie_port_is_enabled(pcie_port)) {
al_print("- Port is disabled\n");
return;
}
err = al_pcie_link_status(pcie_port, &status);
if (err) {
al_print("- al_pcie_link_status failed!\n");
return;
}
if (!status.link_up) {
al_print("- Link is down\n");
return;
}
al_print("- Link is up (Gen %ux%u)\n", status.speed, status.lanes);
reg_val = al_reg_read32(regs->core_space[0].pcie_dev_ctrl_status);
mps = AL_REG_FIELD_GET(reg_val, PCIE_PORT_DEV_CTRL_STATUS_MPS_MASK,
PCIE_PORT_DEV_CTRL_STATUS_MPS_SHIFT);
al_print("- Max payload size = %s\n",
(mps == AL_PCIE_MPS_128) ? "128" :
(mps == AL_PCIE_MPS_256) ? "256" :
"N/A");
for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
unsigned int mrrs;
reg_val = al_reg_read32(regs->core_space[i].pcie_dev_ctrl_status);
mrrs = reg_val & PCIE_PORT_DEV_CTRL_STATUS_MRRS_MASK;
al_print("- PF[%d] max read request size = %s\n", i,
(mrrs == PCIE_PORT_DEV_CTRL_STATUS_MRRS_VAL_128) ? "128" :
(mrrs == PCIE_PORT_DEV_CTRL_STATUS_MRRS_VAL_256) ? "256" :
(mrrs == PCIE_PORT_DEV_CTRL_STATUS_MRRS_VAL_512) ? "512" :
"N/A");
if (pcie_port->rev_id < AL_PCIE_REV_ID_3)
break;
}
reg_val = al_reg_read32(regs->axi.init_fc.cfg);
if (pcie_port->rev_id >= AL_PCIE_REV_ID_3) {
ib_hcrd_os_ob_reads_config.nof_p_hdr = AL_REG_FIELD_GET(reg_val,
PCIE_AXI_REV3_4_INIT_FC_CFG_NOF_P_HDR_MASK,
PCIE_AXI_REV3_4_INIT_FC_CFG_NOF_P_HDR_SHIFT);
ib_hcrd_os_ob_reads_config.nof_np_hdr = AL_REG_FIELD_GET(reg_val,
PCIE_AXI_REV3_4_INIT_FC_CFG_NOF_NP_HDR_MASK,
PCIE_AXI_REV3_4_INIT_FC_CFG_NOF_NP_HDR_SHIFT);
ib_hcrd_os_ob_reads_config.nof_cpl_hdr = AL_REG_FIELD_GET(reg_val,
PCIE_AXI_REV3_4_INIT_FC_CFG_NOF_CPL_HDR_MASK,
PCIE_AXI_REV3_4_INIT_FC_CFG_NOF_CPL_HDR_SHIFT);
} else {
ib_hcrd_os_ob_reads_config.nof_p_hdr = AL_REG_FIELD_GET(reg_val,
PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_P_HDR_MASK,
PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_P_HDR_SHIFT);
ib_hcrd_os_ob_reads_config.nof_np_hdr = AL_REG_FIELD_GET(reg_val,
PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_NP_HDR_MASK,
PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_NP_HDR_SHIFT);
ib_hcrd_os_ob_reads_config.nof_cpl_hdr = AL_REG_FIELD_GET(reg_val,
PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_CPL_HDR_MASK,
PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_CPL_HDR_SHIFT);
}
reg_val = al_reg_read32(regs->axi.pre_configuration.pcie_core_setup);
ib_hcrd_os_ob_reads_config.nof_outstanding_ob_reads = AL_REG_FIELD_GET(reg_val,
PCIE_AXI_CORE_SETUP_NOF_READS_ONSLAVE_INTRF_PCIE_CORE_MASK,
PCIE_AXI_CORE_SETUP_NOF_READS_ONSLAVE_INTRF_PCIE_CORE_SHIFT);
al_print("- Max outstanding reads = %u\n",
ib_hcrd_os_ob_reads_config.nof_outstanding_ob_reads);
al_print("- Num completions headers = %u\n", ib_hcrd_os_ob_reads_config.nof_cpl_hdr);
al_print("- Num posted headers = %u\n", ib_hcrd_os_ob_reads_config.nof_p_hdr);
al_print("- Num non posted headers = %u\n", ib_hcrd_os_ob_reads_config.nof_np_hdr);
reg_val = al_reg_read32(regs->axi.ordering.pos_cntl);
al_print("- cfg_ep_cmpl_after_wr_dis = %u\n",
!!(reg_val & PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_DIS));
al_print("- cfg_bypass_cmpl_after_write_fix = %u\n",
!!(reg_val & PCIE_AXI_POS_ORDER_BYPASS_CMPL_AFTER_WR_FIX));
}
int al_pcie_port_clk_init(struct al_pcie_port *pcie_port,
struct al_pcie_clk_params *pcie_clk_params)
{
if (pcie_port->rev_id < AL_PCIE_REV_ID_4) {
al_err("pcie port %u does not support clk init\n", pcie_port->port_id);
return -EINVAL;
}
/* set gen2/gen1 pipe clocks and delay after reset */
al_reg_write32_masked(pcie_port->regs->axi.cfg_pclk_generate.cntr_low,
PCIE_AXI_CFG_PCLK_GENERATE_CNTR_LOW_GEN2_MHZ_MASK |
PCIE_AXI_CFG_PCLK_GENERATE_CNTR_LOW_GEN1_MHZ_MASK |
PCIE_AXI_CFG_PCLK_GENERATE_CNTR_LOW_RST_DELAY_AFTER_CLK_EN_MASK,
(pcie_clk_params->sb_clk_freq / AL_PCIE_CLK_GEN2_PIPE_HZ)
<< PCIE_AXI_CFG_PCLK_GENERATE_CNTR_LOW_GEN2_MHZ_SHIFT |
(pcie_clk_params->sb_clk_freq / AL_PCIE_CLK_GEN1_PIPE_HZ)
<< PCIE_AXI_CFG_PCLK_GENERATE_CNTR_LOW_GEN1_MHZ_SHIFT |
AL_PCIE_CLK_DELAY_AFTER_RESET_EN_HZ
<< PCIE_AXI_CFG_PCLK_GENERATE_CNTR_LOW_RST_DELAY_AFTER_CLK_EN_SHIFT);
/* set delay before/after rate-change event */
al_reg_write32_masked(pcie_port->regs->axi.cfg_pclk_generate.cntr_high,
PCIE_AXI_CFG_PCLK_GENERATE_CNTR_HIGH_DELAY_BEFORE_RATE_CHANGE_MASK |
PCIE_AXI_CFG_PCLK_GENERATE_CNTR_HIGH_DELAY_AFTER_RATE_CHANGE_MASK,
AL_PCIE_CLK_DELAY_BEFORE_RATE_CHANGE_HZ
<< PCIE_AXI_CFG_PCLK_GENERATE_CNTR_HIGH_DELAY_BEFORE_RATE_CHANGE_SHIFT |
AL_PCIE_CLK_DELAY_AFTER_RATE_CHANGE_HZ
<< PCIE_AXI_CFG_PCLK_GENERATE_CNTR_HIGH_DELAY_AFTER_RATE_CHANGE_SHIFT);
/* 1 usec delay for the clock to bubble */
al_udelay(1);
/* choose the correct clock for this pcie unit */
al_assert(pcie_clk_params->ref_clk <= AL_PCIE_CLK_REF_MAX);
al_reg_write32_masked(pcie_port->regs->axi.cfg_pclk_generate.cntr_low,
PCIE_AXI_CFG_PCLK_GENERATE_CNTR_LOW_UNIT_PCLK_MASK,
pcie_clk_params->ref_clk << PCIE_AXI_CFG_PCLK_GENERATE_CNTR_LOW_UNIT_PCLK_SHIFT);
/* 1 usec delay for the clock to bubble */
al_udelay(1);
/* enable main clock gate and gen2/gen1 dividers */
al_reg_write32_masked(pcie_port->regs->axi.cfg_pclk_generate.cntr_low,
PCIE_AXI_CFG_PCLK_GENERATE_CNTR_LOW_MAIN_CLK_GATE_EN |
PCIE_AXI_CFG_PCLK_GENERATE_CNTR_LOW_CLK_GEN2_DIV_EN |
PCIE_AXI_CFG_PCLK_GENERATE_CNTR_LOW_CLK_GEN1_DIV_EN,
PCIE_AXI_CFG_PCLK_GENERATE_CNTR_LOW_MAIN_CLK_GATE_EN |
PCIE_AXI_CFG_PCLK_GENERATE_CNTR_LOW_CLK_GEN2_DIV_EN |
PCIE_AXI_CFG_PCLK_GENERATE_CNTR_LOW_CLK_GEN1_DIV_EN);
/* 1 usec delay for the clock to bubble */
al_udelay(1);
/* reset internal clock logic */
al_reg_write32_masked(pcie_port->regs->axi.cfg_pclk_generate.cntr_low,
PCIE_AXI_CFG_PCLK_GENERATE_CNTR_LOW_MAIN_INTERNAL_CLK_LOGIC_RST_EN,
PCIE_AXI_CFG_PCLK_GENERATE_CNTR_LOW_MAIN_INTERNAL_CLK_LOGIC_RST_EN);
/* 2 usec delay for the reset to take effect*/
al_udelay(2);
return 0;
}
/**
* Initializes a PCIe Physical function handle structure
* Caution: this function should not read/write to any register except for
* reading RO register (REV_ID for example)
*/
int al_pcie_pf_handle_init(
struct al_pcie_pf *pcie_pf,
struct al_pcie_port *pcie_port,
unsigned int pf_num)
{
enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
al_assert(pf_num < pcie_port->max_num_of_pfs);
if (op_mode != AL_PCIE_OPERATING_MODE_EP) {
al_err("PCIe %d: can't init PF handle with operating mode [%d]\n",
pcie_port->port_id, op_mode);
return -EINVAL;
}
pcie_pf->pf_num = pf_num;
pcie_pf->pcie_port = pcie_port;
al_dbg("PCIe %d: pf handle initialized. pf number: %d, rev_id %d, regs %p\n",
pcie_port->port_id, pcie_pf->pf_num, pcie_port->rev_id,
pcie_port->regs);
return 0;
}
/** Get port revision ID */
int al_pcie_port_rev_id_get(struct al_pcie_port *pcie_port)
{
return pcie_port->rev_id;
}
/************************** Pre PCIe Port Enable API **************************/
/** configure pcie operating mode (root complex or endpoint) */
int al_pcie_port_operating_mode_config(
struct al_pcie_port *pcie_port,
enum al_pcie_operating_mode mode)
{
struct al_pcie_regs *regs = pcie_port->regs;
uint32_t reg, device_type, new_device_type;
if (al_pcie_port_is_enabled(pcie_port)) {
al_err("PCIe %d: already enabled, cannot set operating mode\n",
pcie_port->port_id);
return -EINVAL;
}
reg = al_reg_read32(regs->axi.pcie_global.conf);
device_type = AL_REG_FIELD_GET(reg,
PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_MASK,
PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT);
if (mode == AL_PCIE_OPERATING_MODE_EP) {
new_device_type = PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_EP;
} else if (mode == AL_PCIE_OPERATING_MODE_RC) {
new_device_type = PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_RC;
if (pcie_port->rev_id >= AL_PCIE_REV_ID_3) {
/* config 1 PF in RC mode */
al_reg_write32_masked(regs->axi.axi_attr_ovrd.pf_sel,
PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT0_OVRD_FROM_AXUSER |
PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT0_OVRD_FROM_REG |
PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT0_ADDR_OFFSET_MASK |
PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_CFG_PF_BIT0_OVRD |
PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT1_OVRD_FROM_AXUSER |
PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT1_OVRD_FROM_REG |
PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT1_ADDR_OFFSET_MASK |
PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_CFG_PF_BIT1_OVRD,
PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT0_OVRD_FROM_REG |
PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT1_OVRD_FROM_REG);
}
} else {
al_err("PCIe %d: unknown operating mode: %d\n", pcie_port->port_id, mode);
return -EINVAL;
}
if (new_device_type == device_type) {
al_dbg("PCIe %d: operating mode already set to %s\n",
pcie_port->port_id, (mode == AL_PCIE_OPERATING_MODE_EP) ?
"EndPoint" : "Root Complex");
return 0;
}
al_dbg("PCIe %d: set operating mode to %s\n",
pcie_port->port_id, (mode == AL_PCIE_OPERATING_MODE_EP) ?
"EndPoint" : "Root Complex");
AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_MASK,
PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT,
new_device_type);
al_reg_write32(regs->axi.pcie_global.conf, reg);
return 0;
}
int al_pcie_port_max_lanes_set(struct al_pcie_port *pcie_port, uint8_t lanes)
{
struct al_pcie_regs *regs = pcie_port->regs;
uint32_t active_lanes_val;
if (al_pcie_port_is_enabled(pcie_port)) {
al_err("PCIe %d: already enabled, cannot set max lanes\n",
pcie_port->port_id);
return -EINVAL;
}
/* convert to bitmask format (4 ->'b1111, 2 ->'b11, 1 -> 'b1) */
active_lanes_val = AL_PCIE_PARSE_LANES(lanes);
al_reg_write32_masked(regs->axi.pcie_global.conf,
(pcie_port->rev_id >= AL_PCIE_REV_ID_3) ?
PCIE_REV3_4_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK :
PCIE_REV1_2_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK,
active_lanes_val);
pcie_port->max_lanes = lanes;
return 0;
}
int al_pcie_port_max_lanes_get(struct al_pcie_port *pcie_port, uint8_t *lanes)
{
uint32_t act_lanes;
act_lanes = AL_REG_FIELD_GET(al_reg_read32(pcie_port->regs->axi.pcie_global.conf),
(pcie_port->rev_id >= AL_PCIE_REV_ID_3) ?
PCIE_REV3_4_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK :
PCIE_REV1_2_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK,
PCIE_REVX_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_SHIFT);
switch (act_lanes) {
case 0x1:
*lanes = 1;
break;
case 0x3:
*lanes = 2;
break;
case 0xf:
*lanes = 4;
break;
case 0xff:
*lanes = 8;
break;
case 0xffff:
*lanes = 16;
break;
default:
*lanes = 0;
al_err("PCIe %d: invalid max lanes val (0x%x)\n", pcie_port->port_id, act_lanes);
return -EINVAL;
}
return 0;
}
int al_pcie_port_max_num_of_pfs_set(
struct al_pcie_port *pcie_port,
uint8_t max_num_of_pfs)
{
struct al_pcie_regs *regs = pcie_port->regs;
if (pcie_port->rev_id == AL_PCIE_REV_ID_5)
al_assert(max_num_of_pfs <= REV5_MAX_NUM_OF_PFS);
else if (pcie_port->rev_id == AL_PCIE_REV_ID_4)
al_assert(max_num_of_pfs <= REV4_MAX_NUM_OF_PFS);
else if (pcie_port->rev_id == AL_PCIE_REV_ID_3)
al_assert(max_num_of_pfs <= REV3_MAX_NUM_OF_PFS);
else
al_assert(max_num_of_pfs == REV1_2_MAX_NUM_OF_PFS);
pcie_port->max_num_of_pfs = max_num_of_pfs;
if (al_pcie_port_is_enabled(pcie_port) && (pcie_port->rev_id >= AL_PCIE_REV_ID_3)) {
enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
al_bool is_multi_pf =
((op_mode == AL_PCIE_OPERATING_MODE_EP) && (pcie_port->max_num_of_pfs > 1));
/* Set maximum physical function numbers */
al_reg_write32_masked(
®s->port_regs->timer_ctrl_max_func_num,
PCIE_PORT_GEN3_MAX_FUNC_NUM,
pcie_port->max_num_of_pfs - 1);
al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
/**
* in EP mode, when we have more than 1 PF we need to assert
* multi-pf support so the host scan all PFs
*/
al_reg_write32_masked((uint32_t __iomem *)
(®s->core_space[0].config_header[0] +
(PCIE_BIST_HEADER_TYPE_BASE >> 2)),
PCIE_BIST_HEADER_TYPE_MULTI_FUNC_MASK,
is_multi_pf ? PCIE_BIST_HEADER_TYPE_MULTI_FUNC_MASK : 0);
al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
}
return 0;
}
/* Inbound header credits and outstanding outbound reads configuration */
int al_pcie_port_ib_hcrd_os_ob_reads_config(
struct al_pcie_port *pcie_port,
struct al_pcie_ib_hcrd_os_ob_reads_config *ib_hcrd_os_ob_reads_config)
{
struct al_pcie_regs *regs = pcie_port->regs;
uint8_t rev_id = pcie_port->rev_id;
int subrev_id = pcie_port->subrev_id;
unsigned int credit_sum;
unsigned int total_hdr_limit;
/* This version doesn't support dynamic credit allocation */
if (ib_hcrd_os_ob_reads_hw_config[rev_id][subrev_id].ib_hcrd_supported == AL_FALSE) {
al_err("PCIe %d: this port doesn't support dynamic header allocation\n",
pcie_port->port_id);
return -EINVAL;
}
if (al_pcie_port_is_enabled(pcie_port)) {
al_err("PCIe %d: already enabled, cannot configure IB credits and OB OS reads\n",
pcie_port->port_id);
return -EINVAL;
}
al_assert(ib_hcrd_os_ob_reads_config->nof_np_hdr > 0);
al_assert(ib_hcrd_os_ob_reads_config->nof_p_hdr > 0);
al_assert(ib_hcrd_os_ob_reads_config->nof_cpl_hdr > 0);
credit_sum = ib_hcrd_os_ob_reads_config->nof_cpl_hdr +
ib_hcrd_os_ob_reads_config->nof_np_hdr +
ib_hcrd_os_ob_reads_config->nof_p_hdr;
total_hdr_limit = ib_hcrd_os_ob_reads_hw_config[rev_id][subrev_id].total_hdr_limit;
al_assert(credit_sum <= total_hdr_limit);
if (pcie_port->rev_id >= AL_PCIE_REV_ID_4) {
al_reg_write32_masked(
regs->axi.init_fc.cfg,
PCIE_AXI_REV3_4_INIT_FC_CFG_NOF_P_HDR_MASK |
PCIE_AXI_REV3_4_INIT_FC_CFG_NOF_NP_HDR_MASK |
PCIE_AXI_REV3_4_INIT_FC_CFG_NOF_CPL_HDR_MASK,
(ib_hcrd_os_ob_reads_config->nof_p_hdr <<
PCIE_AXI_REV3_4_INIT_FC_CFG_NOF_P_HDR_SHIFT) |
(ib_hcrd_os_ob_reads_config->nof_np_hdr <<
PCIE_AXI_REV3_4_INIT_FC_CFG_NOF_NP_HDR_SHIFT) |
(ib_hcrd_os_ob_reads_config->nof_cpl_hdr <<
PCIE_AXI_REV3_4_INIT_FC_CFG_NOF_CPL_HDR_SHIFT));
} else if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
al_reg_write32_masked(
regs->axi.init_fc.cfg,
PCIE_AXI_REV3_4_INIT_FC_CFG_NOF_P_HDR_MASK |
PCIE_AXI_REV3_4_INIT_FC_CFG_NOF_NP_HDR_MASK |
PCIE_AXI_REV3_4_INIT_FC_CFG_NOF_CPL_HDR_MASK,
(ib_hcrd_os_ob_reads_config->nof_p_hdr <<
PCIE_AXI_REV3_4_INIT_FC_CFG_NOF_P_HDR_SHIFT) |
(ib_hcrd_os_ob_reads_config->nof_np_hdr <<
PCIE_AXI_REV3_4_INIT_FC_CFG_NOF_NP_HDR_SHIFT) |
(ib_hcrd_os_ob_reads_config->nof_cpl_hdr <<
PCIE_AXI_REV3_4_INIT_FC_CFG_NOF_CPL_HDR_SHIFT));
} else {
al_reg_write32_masked(
regs->axi.init_fc.cfg,
PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_P_HDR_MASK |
PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_NP_HDR_MASK |
PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_CPL_HDR_MASK,
(ib_hcrd_os_ob_reads_config->nof_p_hdr <<
PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_P_HDR_SHIFT) |
(ib_hcrd_os_ob_reads_config->nof_np_hdr <<
PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_NP_HDR_SHIFT) |
(ib_hcrd_os_ob_reads_config->nof_cpl_hdr <<
PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_CPL_HDR_SHIFT));
}
if (ib_hcrd_os_ob_reads_hw_config[rev_id][subrev_id].ib_hcrd_use_hw_default == AL_FALSE) {
al_reg_write32_masked(
regs->axi.pre_configuration.pcie_core_setup,
PCIE_AXI_CORE_SETUP_NOF_READS_ONSLAVE_INTRF_PCIE_CORE_MASK,
ib_hcrd_os_ob_reads_config->nof_outstanding_ob_reads <<
PCIE_AXI_CORE_SETUP_NOF_READS_ONSLAVE_INTRF_PCIE_CORE_SHIFT);
/* Store 'nof_p_hdr' and 'nof_np_hdr' to be set in the core later */
pcie_port->ib_hcrd_config.nof_np_hdr =
ib_hcrd_os_ob_reads_config->nof_np_hdr;
pcie_port->ib_hcrd_config.nof_p_hdr =
ib_hcrd_os_ob_reads_config->nof_p_hdr;
pcie_port->ib_hcrd_config.crdt_update_required = 1;
} else {
/* Disable proprietary mechanism limiting number of outstanding outbound reads */
al_reg_write32_masked(
regs->axi.pre_configuration.pcie_core_setup,
PCIE_AXI_CORE_SETUP_NOF_READS_ONSLAVE_INTRF_PCIE_CORE_MASK,
0);
}
return 0;
}
enum al_pcie_operating_mode al_pcie_operating_mode_get(
struct al_pcie_port *pcie_port)
{
struct al_pcie_regs *regs;
uint32_t reg, device_type;
al_assert(pcie_port);
regs = pcie_port->regs;
reg = al_reg_read32(regs->axi.pcie_global.conf);
device_type = AL_REG_FIELD_GET(reg,
PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_MASK,
PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT);
switch (device_type) {
case PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_EP:
return AL_PCIE_OPERATING_MODE_EP;
case PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_RC:
return AL_PCIE_OPERATING_MODE_RC;
default:
al_err("PCIe %d: unknown device type (%d) in global conf register.\n",
pcie_port->port_id, device_type);
}
return AL_PCIE_OPERATING_MODE_UNKNOWN;
}
/* PCIe AXI quality of service configuration */
void al_pcie_axi_qos_config(
struct al_pcie_port *pcie_port,
unsigned int arqos,
unsigned int awqos)
{
struct al_pcie_regs *regs;
al_assert(pcie_port);
regs = pcie_port->regs;
al_assert(arqos <= PCIE_AXI_CTRL_MASTER_ARCTL_ARQOS_VAL_MAX);
al_assert(awqos <= PCIE_AXI_CTRL_MASTER_AWCTL_AWQOS_VAL_MAX);
al_reg_write32_masked(
regs->axi.ctrl.master_arctl,
PCIE_AXI_CTRL_MASTER_ARCTL_ARQOS_MASK,
arqos << PCIE_AXI_CTRL_MASTER_ARCTL_ARQOS_SHIFT);
al_reg_write32_masked(
regs->axi.ctrl.master_awctl,
PCIE_AXI_CTRL_MASTER_AWCTL_AWQOS_MASK,
awqos << PCIE_AXI_CTRL_MASTER_AWCTL_AWQOS_SHIFT);
}
/**************************** PCIe Port Enable API ****************************/
/** Enable PCIe port (deassert reset) */
int al_pcie_port_enable(struct al_pcie_port *pcie_port)
{
struct al_pbs_regs *pbs_reg_base =
(struct al_pbs_regs *)pcie_port->pbs_regs;
struct al_pcie_regs *regs = pcie_port->regs;
unsigned int port_id = pcie_port->port_id;
uint8_t rev_id = pcie_port->rev_id;
int subrev_id = pcie_port->subrev_id;
/* pre-port-enable default functionality should be here */
/**
* Set inbound header credit and outstanding outbound reads defaults
* if the port initiator doesn't set it & port support it
* Must be called before port enable (PCIE_EXIST)
*/
if (((pcie_port->ib_hcrd_config.nof_np_hdr == 0) ||
(pcie_port->ib_hcrd_config.nof_p_hdr == 0)) &&
(ib_hcrd_os_ob_reads_hw_config[rev_id][subrev_id].ib_hcrd_supported == AL_TRUE))
al_pcie_ib_hcrd_os_ob_reads_config_default(pcie_port);
if (rev_id == AL_PCIE_REV_ID_4) {
if (subrev_id == PCIE_AXI_DEVICE_ID_REG_REV_ID_X16) {
/*
* Addressing RMN: 11478
*
* RMN description:
* Completion header buffer size, apply to X16 only.
* This port has place for 400 headers.
* due to fact each read request can be split to up to 5 parts
* (how many parts depend of max_read_size / RCB) there is a need
* to set the proper max value.
*
* Software flow:
* set limit NOF_OUT_RD = 'd80
* The max setting can be slightly different,
* depends on system setup (max_read_size / RCB), we took the worse case
*/
al_reg_write32_masked(
regs->axi.pre_configuration.pcie_core_setup,
PCIE_AXI_CORE_SETUP_NOF_READS_ONSLAVE_INTRF_PCIE_CORE_MASK,
80 << PCIE_AXI_CORE_SETUP_NOF_READS_ONSLAVE_INTRF_PCIE_CORE_SHIFT);
}
if ((subrev_id == PCIE_AXI_DEVICE_ID_REG_REV_ID_X16) ||
(subrev_id == PCIE_AXI_DEVICE_ID_REG_REV_ID_X8)) {
/*
* Addressing RMN: 11478
*
* RMN description:
* Completion DATA buffer size apply to X16 and X8.
* This ports have a smaller completion DATA buffer size than required
* to accommodate all the outstanding reads data.
*
* Software flow:
* set the RD_LIMITER in value that will prevent it from accepting new read
* request when there is not sufficient place
*/
al_reg_write32(regs->axi.cfg_outbound_rd_len.len_shaper,
(subrev_id == PCIE_AXI_DEVICE_ID_REG_REV_ID_X16) ?
640 : 320);
}
}
/*
* Disable ATS capability
* - must be done before core reset deasserted
* - rev_id 0 - no effect, but no harm
*/
if ((pcie_port->rev_id == AL_PCIE_REV_ID_1) ||
(pcie_port->rev_id == AL_PCIE_REV_ID_2)) {
al_reg_write32_masked(
regs->axi.ordering.pos_cntl,
PCIE_AXI_CORE_SETUP_ATS_CAP_DIS,
PCIE_AXI_CORE_SETUP_ATS_CAP_DIS);
}
/* Deassert core reset */
al_reg_write32_masked(
&pbs_reg_base->unit.pcie_conf_1,
1 << (port_id + PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT),
1 << (port_id + PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT));
return 0;
}
/** Disable PCIe port (assert reset) */
void al_pcie_port_disable(struct al_pcie_port *pcie_port)
{
struct al_pbs_regs *pbs_reg_base =
(struct al_pbs_regs *)pcie_port->pbs_regs;
unsigned int port_id = pcie_port->port_id;
if (!al_pcie_port_is_enabled(pcie_port)) {
al_warn("PCIe %d: trying to disable a non-enabled port\n",
pcie_port->port_id);
}
/* Unset values given in port_enable */
pcie_port->ib_hcrd_config.nof_np_hdr = 0;
pcie_port->ib_hcrd_config.nof_p_hdr = 0;
pcie_port->ib_hcrd_config.crdt_update_required = 0;
/* Assert core reset */
al_reg_write32_masked(
&pbs_reg_base->unit.pcie_conf_1,
1 << (port_id + PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT),
0);
}
int al_pcie_port_memory_shutdown_set(
struct al_pcie_port *pcie_port,
al_bool enable)
{
struct al_pcie_regs *regs = pcie_port->regs;
uint32_t mask = (pcie_port->rev_id >= AL_PCIE_REV_ID_3) ?
PCIE_REV3_4_AXI_MISC_PCIE_GLOBAL_CONF_MEM_SHUTDOWN :
PCIE_REV1_2_AXI_MISC_PCIE_GLOBAL_CONF_MEM_SHUTDOWN;
if (!al_pcie_port_is_enabled(pcie_port)) {
al_err("PCIe %d: not enabled, cannot shutdown memory\n",
pcie_port->port_id);
return -EINVAL;
}
al_reg_write32_masked(regs->axi.pcie_global.conf,
mask, enable == AL_TRUE ? mask : 0);
return 0;
}
al_bool al_pcie_port_memory_is_shutdown(struct al_pcie_port *pcie_port)
{
struct al_pcie_regs *regs = pcie_port->regs;
uint32_t mask;
al_assert(pcie_port);
if (!al_pcie_port_is_enabled(pcie_port))
return AL_TRUE;
switch (pcie_port->rev_id) {
case AL_PCIE_REV_ID_5:
case AL_PCIE_REV_ID_4:
case AL_PCIE_REV_ID_3:
mask = PCIE_REV3_4_AXI_MISC_PCIE_GLOBAL_CONF_MEM_SHUTDOWN;
break;
case AL_PCIE_REV_ID_2:
case AL_PCIE_REV_ID_1:
mask = PCIE_REV1_2_AXI_MISC_PCIE_GLOBAL_CONF_MEM_SHUTDOWN;
break;
default:
al_err("%s: unsupported PCIe revision ID %d\n", __func__, pcie_port->rev_id);
al_assert(0);
return AL_FALSE;
}
if (al_reg_read32(regs->axi.pcie_global.conf) & mask)
return AL_TRUE;
return AL_FALSE;
}
al_bool al_pcie_port_is_enabled_raw(
void __iomem *pbs_reg_base,
unsigned int port_id)
{
struct al_pbs_regs *pbs_regs = (struct al_pbs_regs *)pbs_reg_base;
uint32_t pcie_exist = al_reg_read32(&pbs_regs->unit.pcie_conf_1);
uint32_t ports_enabled = AL_REG_FIELD_GET(pcie_exist,
PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_MASK,
PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT);
return (AL_REG_FIELD_GET(ports_enabled, AL_BIT(port_id), port_id) == 1);
}
al_bool al_pcie_port_is_enabled(struct al_pcie_port *pcie_port)
{
struct al_pbs_regs *pbs_reg_base = (struct al_pbs_regs *)pcie_port->pbs_regs;
return al_pcie_port_is_enabled_raw(pbs_reg_base, pcie_port->port_id);
}
/*************************** PCIe Configuration API ***************************/
/** configure pcie port (link params, etc..) */
int al_pcie_port_config(struct al_pcie_port *pcie_port,
const struct al_pcie_port_config_params *params)
{
struct al_pcie_port_config_live_params live_params;
struct al_pcie_regs *regs;
enum al_pcie_operating_mode op_mode;
int status = 0;
int i;
unsigned int pf;
al_assert(pcie_port);
al_assert(params);
regs = pcie_port->regs;
if (!al_pcie_port_is_enabled(pcie_port)) {
al_err("PCIe %d: port not enabled, cannot configure port\n",
pcie_port->port_id);
return -EINVAL;
}
if (al_pcie_is_link_started(pcie_port)) {
al_err("PCIe %d: link already started, cannot configure port\n",
pcie_port->port_id);
return -EINVAL;
}
al_dbg("PCIe %d: port config\n", pcie_port->port_id);
op_mode = al_pcie_operating_mode_get(pcie_port);
/* Addressing RMN: 10181
*
* RMN description:
* BAR4/5 Registers Not Properly Disabled in RC mode
* Software flow:
* Disable BAR's 4/5 in HW when in RC mode
*/
if (op_mode == AL_PCIE_OPERATING_MODE_RC) {
for (i = 0; i < pcie_port->max_num_of_pfs; i++) {
uint32_t __iomem *bar_addr;
bar_addr =
®s->core_space[i].config_header[(AL_PCI_BASE_ADDRESS_4 >> 2)];
al_reg_write32_dbi_cs2(pcie_port, bar_addr, 0);
bar_addr =
®s->core_space[i].config_header[(AL_PCI_BASE_ADDRESS_4_HI >> 2)];
al_reg_write32_dbi_cs2(pcie_port, bar_addr, 0);
}
}
/* if max lanes not specifies, read it from register */
if (pcie_port->max_lanes == 0) {
status = al_pcie_port_max_lanes_get(pcie_port, &pcie_port->max_lanes);
if (status) {
pcie_port->max_lanes = 0;
al_err("PCIe %d: invalid max lanes val\n", pcie_port->port_id);
}
}
if (params->link_params)
status = al_pcie_port_link_config(pcie_port, params->link_params);
if (status)
goto done;
status = al_pcie_port_snoop_config(pcie_port, params->enable_axi_snoop);
if (status)
goto done;
al_pcie_port_max_num_of_pfs_set(pcie_port, pcie_port->max_num_of_pfs);
al_pcie_port_ram_parity_int_config(pcie_port, params->enable_ram_parity_int);
al_pcie_port_axi_parity_int_config(pcie_port, params->enable_axi_parity_int);
al_pcie_port_relaxed_pcie_ordering_config(pcie_port, params->relaxed_ordering_params);
if (params->lat_rply_timers)
status = al_pcie_port_lat_rply_timers_config(pcie_port, params->lat_rply_timers);
if (status)
goto done;
if (params->gen2_params)
status = al_pcie_port_gen2_params_config(pcie_port, params->gen2_params);
if (status)
goto done;
if (params->gen3_params)
status = al_pcie_port_gen3_params_config(pcie_port, op_mode, params->gen3_params);
if (status)
goto done;
if (params->sris_params)
status = al_pcie_port_sris_config(pcie_port, params->sris_params,
params->link_params->max_speed);
if (status)
goto done;
if (pcie_port->ib_hcrd_config.crdt_update_required) {
al_pcie_port_ib_hcrd_config(pcie_port);
}
if (params->fast_link_mode) {
if (pcie_port->rev_id >= AL_PCIE_REV_ID_4)
al_reg_write32_masked(
regs->app.cfg_blockalign.cfg_ext_cntl_1,
PCIE_W_CFG_EXT_CNTL_1_FAST_MODE_CNT_SLOWDOWN_FACTOR_MASK,
PCIE_W_CFG_EXT_CNTL_1_FAST_MODE_CNT_SLOWDOWN_FACTOR_VAL_FAST <<
PCIE_W_CFG_EXT_CNTL_1_FAST_MODE_CNT_SLOWDOWN_FACTOR_SHIFT);
al_reg_write32_masked(®s->port_regs->port_link_ctrl,
1 << PCIE_PORT_LINK_CTRL_FAST_LINK_EN_SHIFT,
1 << PCIE_PORT_LINK_CTRL_FAST_LINK_EN_SHIFT);
}
if (params->enable_axi_slave_err_resp)
al_reg_write32_masked(®s->port_regs->axi_slave_err_resp,
1 << PCIE_PORT_AXI_SLAVE_ERR_RESP_ALL_MAPPING_SHIFT,
1 << PCIE_PORT_AXI_SLAVE_ERR_RESP_ALL_MAPPING_SHIFT);
/**
* Addressing RMN: 5477
*
* RMN description:
* address-decoder logic performs sub-target decoding even for transactions
* which undergo target enforcement. thus, in case transaction's address is
* inside any ECAM bar, the sub-target decoding will be set to ECAM, which
* causes wrong handling by PCIe unit
*
* Software flow:
* on EP mode only, turning on the iATU-enable bit (with the relevant mask
* below) allows the PCIe unit to discard the ECAM bit which was asserted
* by-mistake in the address-decoder
*/
if (op_mode == AL_PCIE_OPERATING_MODE_EP) {
al_reg_write32_masked(regs->axi.ob_ctrl.cfg_target_bus,
PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK,
(0) << PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_SHIFT);
al_reg_write32_masked(regs->axi.ob_ctrl.cfg_control,
PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_IATU_EN,
PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_IATU_EN);
}
if (op_mode == AL_PCIE_OPERATING_MODE_RC) {
/**
* enable memory and I/O access from port when in RC mode
* in RC mode, only core_space[0] is valid.
*/
al_reg_write16_masked(
(uint16_t __iomem *)(®s->core_space[0].config_header[0] + (0x4 >> 2)),
0x7, /* Mem, MSE, IO */
0x7);
/* change the class code to match pci bridge */
al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
al_reg_write32_masked(
(uint32_t __iomem *)(®s->core_space[0].config_header[0]
+ (PCI_CLASS_REVISION >> 2)),
0xFFFFFF00,
0x06040000);
al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
/**
* Addressing RMN: 5702
*
* RMN description:
* target bus mask default value in HW is: 0xFE, this enforces
* setting the target bus for ports 1 and 3 when running on RC
* mode since bit[20] in ECAM address in these cases is set
*
* Software flow:
* on RC mode only, set target-bus value to 0xFF to prevent this
* enforcement
*/
al_reg_write32_masked(regs->axi.ob_ctrl.cfg_target_bus,
PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK,
PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK);
}
if (pcie_port->rev_id >= AL_PCIE_REV_ID_3) {
/* Disable referring to PM state of non-enable functions */
for (i = pcie_port->max_num_of_pfs; i < AL_MAX_NUM_OF_PFS; i++) {
al_reg_write32_masked(regs->app.pm_state_per_func[i].pm_state_per_func,
PCIE_W_PM_STATE_PER_FUNC_PM_STATE_PER_FUNC_ASPM_PF_ENABLE_MAX_FUNC_NUMBER |
PCIE_W_PM_STATE_PER_FUNC_PM_STATE_PER_FUNC_DSATE_PF_ENABLE_MAX_FUNC_NUMBER,
0);
}
}
/* disable resetting the EIEOS count which may cause equalization to
* take longer than needed
*/
al_reg_write32_masked(®s->port_regs->gen3_ctrl,
AL_BIT(PCIE_PORT_GEN3_CTRL_EQ_EIEOS_COUNT_RESET_DISABLE_SHIFT),
AL_BIT(PCIE_PORT_GEN3_CTRL_EQ_EIEOS_COUNT_RESET_DISABLE_SHIFT));
al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
for (pf = 0; pf < pcie_port->max_num_of_pfs; pf++) {
al_reg_write32_masked(
regs->core_space[pf].pcie_link_cap_base,
AL_PCI_EXP_LNKCAP_MLW,
pcie_port->max_lanes << AL_PCI_EXP_LNKCAP_MLW_SHIFT);
/*
* in EP mode we dont want to expose that we support TPH as completer
*/
if (op_mode == AL_PCIE_OPERATING_MODE_EP)
al_reg_write32_masked(
regs->core_space[pf].pcie_cap_base + (AL_PCI_EXP_DEVCAP2 >> 2),
AL_PCI_EXP_DEVCAP2_TCS_MASK,
0);
}
al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
/* Default live configuration */
if (sizeof(live_params))
al_memset(&live_params, 0, sizeof(live_params));
al_pcie_port_config_live(pcie_port, &live_params);
done:
al_dbg("PCIe %d: port config %s\n", pcie_port->port_id, status? "failed": "done");
return status;
}
void al_pcie_port_config_live(
struct al_pcie_port *pcie_port,
const struct al_pcie_port_config_live_params *params)
{
struct al_pcie_regs *regs;
al_assert(pcie_port);
al_assert(al_pcie_port_is_enabled(pcie_port));
al_assert(params);
regs = pcie_port->regs;
/* disable the mechanism that allows direct message generation by CPU or DMA address
matching Msg region defined by Msg_Start and Msg_Limit region (this mechanism
* is currently not in use)
*/
al_reg_write32_masked(regs->axi.ctrl.slv_wctl,
PCIE_AXI_CTRL_SLV_WCTRL_MSG_GENERATE_MASK,
PCIE_AXI_CTRL_SLV_WCTRL_MSG_GENERATE_MASK);
/**
* Addressing RMN: 12107
*
* RMN description:
* When setting in-bound ATU to return bad response, such as UR,
* error indication might be asserted for in-bound request not hitting this ATU.
*
* Software flow:
* set bit rsrvd1 in pre_configuration_pcie_core_setup register.
* This will add the missing qualifier : cfg_qualified_atu_error_with_flt_q_hv.
*/
if (pcie_port->rev_id > AL_PCIE_REV_ID_1)
al_reg_write32_masked(regs->axi.pre_configuration.pcie_core_setup,
PCIE_AXI_CFG_QUALIFIED_ATU_ERROR_WITH_FLT_Q_HV,
PCIE_AXI_CFG_QUALIFIED_ATU_ERROR_WITH_FLT_Q_HV);
}
int al_pcie_pf_config(
struct al_pcie_pf *pcie_pf,
const struct al_pcie_pf_config_params *params)
{
struct al_pcie_port *pcie_port;
int status = 0;
al_assert(pcie_pf);
al_assert(params);
pcie_port = pcie_pf->pcie_port;
if (!al_pcie_port_is_enabled(pcie_port)) {
al_err("PCIe %d: port not enabled, cannot configure port\n", pcie_port->port_id);
return -EINVAL;
}
al_dbg("PCIe %d: pf %d config\n", pcie_port->port_id, pcie_pf->pf_num);
if (params)
status = al_pcie_port_pf_params_config(pcie_pf, params);
if (status)
goto done;
done:
al_dbg("PCIe %d: pf %d config %s\n",
pcie_port->port_id, pcie_pf->pf_num, status ? "failed" : "done");
return status;
}
/*************************** PCIe PM Related API ***************************/
/* Configure ASPM capabilities of the port */
void al_pcie_pm_aspm_cfg_set(
struct al_pcie_port *pcie_port,
unsigned int aspm_cfg)
{
struct al_pcie_regs *regs;
enum al_pcie_operating_mode op_mode;
al_assert(!(aspm_cfg &
~(AL_PCIE_PM_ASPM_CFG_L0S |
AL_PCIE_PM_ASPM_CFG_L1)));
al_assert(pcie_port);
al_assert(al_pcie_port_is_enabled(pcie_port));
regs = pcie_port->regs;
op_mode = al_pcie_operating_mode_get(pcie_port);
al_reg_write32_masked(regs->core_space[0].pcie_link_cap_base,
AL_PCI_EXP_LNKCAP_ASPMS,
aspm_cfg << AL_PCI_EXP_LNKCAP_ASPMS_SHIFT);
/* Enable entrance to L1 without receiver in L0s, if L0s not supported */
if (aspm_cfg == AL_PCIE_PM_ASPM_CFG_L1)
al_reg_write32_masked(®s->port_regs->ack_f_aspm_ctrl,
PCIE_PORT_ACK_F_ASPM_CTRL_ENTER_ASPM,
PCIE_PORT_ACK_F_ASPM_CTRL_ENTER_ASPM);
if (op_mode == AL_PCIE_OPERATING_MODE_RC)
al_reg_write32_masked(regs->core_space[0].pcie_link_ctrl_status,
AL_PCI_EXP_LNKCTL_ASPMC,
aspm_cfg);
}
/* Transition the link to or from L2 state */
void al_pcie_pm_l2_trigger(
struct al_pcie_port *pcie_port,
al_bool enable)
{
enum al_pcie_operating_mode op_mode;
struct al_pcie_regs *regs;
al_assert(pcie_port);
al_assert(al_pcie_port_is_enabled(pcie_port));
regs = pcie_port->regs;
op_mode = al_pcie_operating_mode_get(pcie_port);
al_assert(((op_mode == AL_PCIE_OPERATING_MODE_RC) && enable) ||
((op_mode == AL_PCIE_OPERATING_MODE_EP) && (!enable)));
if (enable) {
unsigned int mask = (pcie_port->rev_id >= AL_PCIE_REV_ID_3) ?
PCIE_W_REV3_4_GLOBAL_CTRL_PM_CONTROL_PM_XMT_TURNOFF :
PCIE_W_REV1_2_GLOBAL_CTRL_PM_CONTROL_PM_XMT_TURNOFF;
/*
* In order to create the TurnOff MSG TLP,
* the msg_arbitration module requires an edge on the relevant bit.
* Thus, we need to make sure it is 1'b0 before asserting it to 1'b1
*/
al_reg_write32_masked(regs->app.global_ctrl.pm_control, mask, 0);
al_reg_write32_masked(regs->app.global_ctrl.pm_control, mask, mask);
} else {
unsigned int mask = (pcie_port->rev_id >= AL_PCIE_REV_ID_3) ?
PCIE_W_REV3_4_GLOBAL_CTRL_PM_CONTROL_PM_XMT_PME_FUNC_MASK :
PCIE_W_REV1_2_GLOBAL_CTRL_PM_CONTROL_PM_XMT_PME;
/*
* In order to create the PM_PME MSG TLP,
* the msg_arbitration module requires an edge on the all relevant bits.
* Thus, we need to make sure each one of them is 1'b0
* before asserting it to 1'b1
*/
al_reg_write32_masked(regs->app.global_ctrl.pm_control, mask, 0);
al_reg_write32_masked(regs->app.global_ctrl.pm_control, mask, mask);
}
}
/************************** PCIe Link Operations API **************************/
/* start pcie link */
int al_pcie_link_start(struct al_pcie_port *pcie_port)
{
struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
if (!al_pcie_port_is_enabled(pcie_port)) {
al_err("PCIe %d: port not enabled, cannot start link\n",
pcie_port->port_id);
return -EINVAL;
}
al_dbg("PCIe_%d: start port link.\n", pcie_port->port_id);
al_reg_write32_masked(
regs->app.global_ctrl.port_init,
PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK,
PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK);
return 0;
}
/* stop pcie link */
int al_pcie_link_stop(struct al_pcie_port *pcie_port)
{
struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
if (!al_pcie_is_link_started(pcie_port)) {
al_warn("PCIe %d: trying to stop a non-started link\n",
pcie_port->port_id);
}
al_dbg("PCIe_%d: stop port link.\n", pcie_port->port_id);
al_reg_write32_masked(
regs->app.global_ctrl.port_init,
PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK,
~PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK);
return 0;
}
/** return AL_TRUE is link started (LTSSM enabled) and AL_FALSE otherwise */
al_bool al_pcie_is_link_started(struct al_pcie_port *pcie_port)
{
uint32_t port_init;
uint8_t ltssm_en;
struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
if (!al_pcie_port_is_enabled(pcie_port))
return AL_FALSE;
port_init = al_reg_read32(regs->app.global_ctrl.port_init);
ltssm_en = AL_REG_FIELD_GET(port_init,
PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK,
PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_SHIFT);
return ltssm_en;
}
static int _al_pcie_link_up_wait_retrain_if_not_full_width(
struct al_pcie_port *pcie_port,
int timeout_usec,
unsigned int *time_remaining_usec,
unsigned int *retrain_cnt)
{
unsigned int count = 0;
int state_counted = -1;
unsigned int retrain_count = 0;
struct state_cnt {
unsigned int ltssm_state;
unsigned int timeout;
};
static const struct state_cnt state_cnt[] = {
{
.ltssm_state = AL_PCIE_LTSSM_STATE_POLL_ACTIVE,
.timeout = 50,
},
{
.ltssm_state = AL_PCIE_LTSSM_STATE_CFG_LINKWD_START,
.timeout = 5,
},
{
.ltssm_state = AL_PCIE_LTSSM_STATE_CFG_LINKWD_ACEPT,
.timeout = 5,
},
{
.ltssm_state = AL_PCIE_LTSSM_STATE_CFG_LANENUM_WAIT,
.timeout = 5,
},
{
.ltssm_state = AL_PCIE_LTSSM_STATE_CFG_LANENUM_ACEPT,
.timeout = 5,
},
};
static const unsigned int num_state_types = AL_ARR_SIZE(state_cnt);
do {
al_bool retrain_required = AL_FALSE;
uint8_t ltssm_state;
al_pcie_check_link(pcie_port, <ssm_state);
if (ltssm_state >= AL_PCIE_LTSSM_STATE_L0) {
break;
} else {
unsigned int i;
for (i = 0; i < num_state_types; i++) {
if (ltssm_state == state_cnt[i].ltssm_state) {
if (state_counted != ltssm_state)
count = 1;
else
count++;
state_counted = ltssm_state;
if (count == state_cnt[i].timeout)
retrain_required = AL_TRUE;
break;
}
}
if (i == num_state_types)
count = 0;
}
if (retrain_required) {
al_pcie_link_stop(pcie_port);
al_udelay(1000);
timeout_usec -= 1000;
retrain_count++;
al_pcie_link_start(pcie_port);
}
al_udelay(100);
timeout_usec -= 100;
} while (timeout_usec > 0);
*retrain_cnt = retrain_count;
if (timeout_usec <= 0)
return -ETIME;
*time_remaining_usec = (unsigned int)timeout_usec;
return 0;
}
static int _al_pcie_link_up_wait(struct al_pcie_port *pcie_port, uint32_t timeout_ms,
al_bool retrain_if_not_full_width)
{
unsigned int retrain_cnt = 0;
int wait_count;
if (retrain_if_not_full_width) {
unsigned int time_remaining_usec;
int err;
err = _al_pcie_link_up_wait_retrain_if_not_full_width(
pcie_port, timeout_ms * 1000, &time_remaining_usec, &retrain_cnt);
if (err) {
al_err("PCIE_%d: link retrain timeout (%u retrains)\n",
pcie_port->port_id, retrain_cnt);
return err;
}
al_print("PCIE_%d: link retrain ok (%u retrains, %u usec remaining)\n",
pcie_port->port_id, retrain_cnt, time_remaining_usec);
timeout_ms = time_remaining_usec / 1000;
}
pcie_port->num_retrains = retrain_cnt;
wait_count = timeout_ms * AL_PCIE_LINKUP_WAIT_INTERVALS_PER_SEC;
while (wait_count-- > 0) {
if (al_pcie_check_link(pcie_port, NULL)) {
al_dbg("PCIe_%d: <<<<<<<<< Link up >>>>>>>>>\n", pcie_port->port_id);
return 0;
} else
al_dbg("PCIe_%d: No link up, %d attempts remaining\n",
pcie_port->port_id, wait_count);
al_udelay(AL_PCIE_LINKUP_WAIT_INTERVAL);
}
al_dbg("PCIE_%d: link is not established in time\n",
pcie_port->port_id);
return -ETIME;
}
int al_pcie_link_up_wait(struct al_pcie_port *pcie_port, uint32_t timeout_ms)
{
return _al_pcie_link_up_wait(pcie_port, timeout_ms, AL_FALSE);
}
int al_pcie_link_up_wait_ex(struct al_pcie_port *pcie_port, uint32_t timeout_ms,
al_bool retrain_if_not_full_width)
{
return _al_pcie_link_up_wait(pcie_port, timeout_ms, retrain_if_not_full_width);
}
int al_pcie_link_active_lanes_get(struct al_pcie_port *pcie_port)
{
struct al_pcie_regs *regs = pcie_port->regs;
uint16_t pcie_lnksta = al_reg_read16((uint16_t __iomem *)regs->core_space[0].pcie_cap_base
+ (AL_PCI_EXP_LNKSTA >> 1));
return (pcie_lnksta & AL_PCI_EXP_LNKSTA_NLW) >> AL_PCI_EXP_LNKSTA_NLW_SHIFT;
}
/** get link status */
int al_pcie_link_status(struct al_pcie_port *pcie_port,
struct al_pcie_link_status *status)
{
struct al_pcie_regs *regs = pcie_port->regs;
uint16_t pcie_lnksta;
al_assert(status);
if (!al_pcie_port_is_enabled(pcie_port)) {
al_dbg("PCIe %d: port not enabled, no link.\n", pcie_port->port_id);
status->link_up = AL_FALSE;
status->speed = AL_PCIE_LINK_SPEED_DEFAULT;
status->lanes = 0;
status->ltssm_state = 0;
status->num_retrains = 0;
return 0;
}
status->link_up = al_pcie_check_link(pcie_port, &status->ltssm_state);
if (!status->link_up) {
status->speed = AL_PCIE_LINK_SPEED_DEFAULT;
status->lanes = 0;
status->num_retrains = 0;
return 0;
}
pcie_lnksta = al_reg_read16((uint16_t __iomem *)regs->core_space[0].pcie_cap_base + (AL_PCI_EXP_LNKSTA >> 1));
switch(pcie_lnksta & AL_PCI_EXP_LNKSTA_CLS) {
case AL_PCI_EXP_LNKSTA_CLS_2_5GB:
status->speed = AL_PCIE_LINK_SPEED_GEN1;
break;
case AL_PCI_EXP_LNKSTA_CLS_5_0GB:
status->speed = AL_PCIE_LINK_SPEED_GEN2;
break;
case AL_PCI_EXP_LNKSTA_CLS_8_0GB:
status->speed = AL_PCIE_LINK_SPEED_GEN3;
break;
default:
status->speed = AL_PCIE_LINK_SPEED_DEFAULT;
al_err("PCIe %d: unknown link speed indication. PCIE LINK STATUS %x\n",
pcie_port->port_id, pcie_lnksta);
}
status->lanes = (pcie_lnksta & AL_PCI_EXP_LNKSTA_NLW) >> AL_PCI_EXP_LNKSTA_NLW_SHIFT;
status->num_retrains = pcie_port->num_retrains;
al_dbg("PCIe %d: Link up. speed gen%d negotiated width %d (%u retrains)\n",
pcie_port->port_id, status->speed, status->lanes, status->num_retrains);
return 0;
}
/** get lane status */
void al_pcie_lane_status_get(
struct al_pcie_port *pcie_port,
unsigned int lane,
struct al_pcie_lane_status *status)
{
struct al_pcie_regs *regs;
uint32_t lane_status;
uint32_t *reg_ptr;
al_assert(pcie_port);
al_assert(status);
al_assert(lane < lanes_num[pcie_port->rev_id]);
regs = pcie_port->regs;
reg_ptr = regs->axi.status.lane[lane];
/* Reset field is valid only when same value is read twice */
do {
lane_status = al_reg_read32(reg_ptr);
status->is_reset = !!(lane_status & PCIE_AXI_STATUS_LANE_IS_RESET);
} while (status->is_reset != (!!(al_reg_read32(reg_ptr) & PCIE_AXI_STATUS_LANE_IS_RESET)));
status->requested_speed =
(lane_status & PCIE_AXI_STATUS_LANE_REQUESTED_SPEED_MASK) >>
PCIE_AXI_STATUS_LANE_REQUESTED_SPEED_SHIFT;
}
/** trigger hot reset */
int al_pcie_link_hot_reset(struct al_pcie_port *pcie_port, al_bool enable)
{
struct al_pcie_regs *regs = pcie_port->regs;
uint32_t events_gen;
al_bool app_reset_state;
enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
if (op_mode != AL_PCIE_OPERATING_MODE_RC) {
al_err("PCIe %d: hot-reset is applicable only for RC mode\n", pcie_port->port_id);
return -EINVAL;
}
if (!al_pcie_is_link_started(pcie_port)) {
al_err("PCIe %d: link not started, cannot trigger hot-reset\n", pcie_port->port_id);
return -EINVAL;
}
events_gen = al_reg_read32(regs->app.global_ctrl.events_gen[0]);
app_reset_state = events_gen & PCIE_W_GLOBAL_CTRL_EVENTS_GEN_APP_RST_INIT;
if (enable && app_reset_state) {
al_err("PCIe %d: link is already in hot-reset state\n", pcie_port->port_id);
return -EINVAL;
} else if ((!enable) && (!(app_reset_state))) {
al_err("PCIe %d: link is already in non-hot-reset state\n", pcie_port->port_id);
return -EINVAL;
} else {
al_dbg("PCIe %d: %s hot-reset\n", pcie_port->port_id,
(enable ? "enabling" : "disabling"));
/* hot-reset functionality is implemented only for function 0 */
al_reg_write32_masked(regs->app.global_ctrl.events_gen[0],
PCIE_W_GLOBAL_CTRL_EVENTS_GEN_APP_RST_INIT,
(enable ? PCIE_W_GLOBAL_CTRL_EVENTS_GEN_APP_RST_INIT
: ~PCIE_W_GLOBAL_CTRL_EVENTS_GEN_APP_RST_INIT));
return 0;
}
}
/** disable port link */
int al_pcie_link_disable(struct al_pcie_port *pcie_port, al_bool disable)
{
struct al_pcie_regs *regs = pcie_port->regs;
uint32_t pcie_lnkctl;
al_bool link_disable_state;
enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
if (op_mode != AL_PCIE_OPERATING_MODE_RC) {
al_err("PCIe %d: hot-reset is applicable only for RC mode\n", pcie_port->port_id);
return -EINVAL;
}
if (!al_pcie_is_link_started(pcie_port)) {
al_err("PCIe %d: link not started, cannot disable link\n", pcie_port->port_id);
return -EINVAL;
}
pcie_lnkctl = al_reg_read32(regs->core_space[0].pcie_cap_base + (AL_PCI_EXP_LNKCTL >> 2));
link_disable_state = pcie_lnkctl & AL_PCI_EXP_LNKCTL_LNK_DIS;
if (disable && link_disable_state) {
al_err("PCIe %d: link is already in disable state\n", pcie_port->port_id);
return -EINVAL;
} else if ((!disable) && (!(link_disable_state))) {
al_err("PCIe %d: link is already in enable state\n", pcie_port->port_id);
return -EINVAL;
}
al_dbg("PCIe %d: %s port\n", pcie_port->port_id, (disable ? "disabling" : "enabling"));
al_reg_write32_masked(regs->core_space[0].pcie_cap_base + (AL_PCI_EXP_LNKCTL >> 2),
AL_PCI_EXP_LNKCTL_LNK_DIS,
(disable ? AL_PCI_EXP_LNKCTL_LNK_DIS : ~AL_PCI_EXP_LNKCTL_LNK_DIS));
return 0;
}
/** retrain link */
int al_pcie_link_retrain(struct al_pcie_port *pcie_port)
{
struct al_pcie_regs *regs = pcie_port->regs;
enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
if (op_mode != AL_PCIE_OPERATING_MODE_RC) {
al_err("PCIe %d: link-retrain is applicable only for RC mode\n",
pcie_port->port_id);
return -EINVAL;
}
if (!al_pcie_is_link_started(pcie_port)) {
al_err("PCIe %d: link not started, cannot link-retrain\n", pcie_port->port_id);
return -EINVAL;
}
al_reg_write32_masked(regs->core_space[0].pcie_cap_base + (AL_PCI_EXP_LNKCTL >> 2),
AL_PCI_EXP_LNKCTL_LNK_RTRN, AL_PCI_EXP_LNKCTL_LNK_RTRN);
return 0;
}
/** redo equalization */
int al_pcie_link_retrain_eq_redo_en(struct al_pcie_port *pcie_port, al_bool enable)
{
struct al_pcie_regs *regs = pcie_port->regs;
enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
if (op_mode != AL_PCIE_OPERATING_MODE_RC) {
al_err("PCIe %d: link-retrain redoing equalization is applicable only for RC mode\n",
pcie_port->port_id);
return -EINVAL;
}
al_reg_write32_masked(
regs->core_space[0].pcie_sec_ext_cap_base + (AL_PCI_EXP_LNKCTL3 >> 2),
AL_PCI_EXP_LNKCTL3_PRFRM_EQ | AL_PCI_EXP_LNKCTL3_EQ_INT_EN,
(enable == AL_TRUE) ?
(AL_PCI_EXP_LNKCTL3_PRFRM_EQ | AL_PCI_EXP_LNKCTL3_EQ_INT_EN) : 0);
return 0;
}
/* trigger speed change */
int al_pcie_link_change_speed(struct al_pcie_port *pcie_port,
enum al_pcie_link_speed new_speed)
{
struct al_pcie_regs *regs = pcie_port->regs;
if (!al_pcie_is_link_started(pcie_port)) {
al_err("PCIe %d: link not started, cannot change speed\n", pcie_port->port_id);
return -EINVAL;
}
al_dbg("PCIe %d: changing speed to %d\n", pcie_port->port_id, new_speed);
al_pcie_port_link_speed_ctrl_set(pcie_port, new_speed);
al_reg_write32_masked(®s->port_regs->gen2_ctrl,
PCIE_PORT_GEN2_CTRL_DIRECT_SPEED_CHANGE,
PCIE_PORT_GEN2_CTRL_DIRECT_SPEED_CHANGE);
return 0;
}
/* TODO: check if this function needed */
int al_pcie_link_change_width(struct al_pcie_port *pcie_port,
uint8_t width __attribute__((__unused__)))
{
al_err("PCIe %d: link change width not implemented\n",
pcie_port->port_id);
return -ENOSYS;
}
void al_pcie_link_ltssm_state_vec0_cmp_enable(
struct al_pcie_port *pcie_port,
enum al_pcie_ltssm_state state)
{
struct al_pcie_regs *regs;
al_assert(pcie_port);
al_assert(pcie_port->rev_id >= AL_PCIE_REV_ID_4);
al_assert(state <= AL_PCIE_LTSSM_RCVRY_EQ3);
regs = (struct al_pcie_regs *)pcie_port->regs;
al_reg_write32_masked(regs->app.global_ctrl.port_init,
PCIE_W_GLOBAL_CTRL_PORT_INIT_LTSSM_STATE_CMP_0_MASK,
state << PCIE_W_GLOBAL_CTRL_PORT_INIT_LTSSM_STATE_CMP_0_SHIFT);
}
/**************************** Post Link Start API *****************************/
/************************** Snoop Configuration API ***************************/
int
al_pcie_port_snoop_config(struct al_pcie_port *pcie_port, al_bool enable_axi_snoop)
{
struct al_pcie_regs *regs = pcie_port->regs;
/* Set snoop mode */
al_dbg("PCIE_%d: snoop mode %s\n",
pcie_port->port_id, enable_axi_snoop ? "enable" : "disable");
if (enable_axi_snoop) {
al_reg_write32_masked(regs->axi.ctrl.master_arctl,
PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_ARCTL_SNOOP,
PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_ARCTL_SNOOP);
al_reg_write32_masked(regs->axi.ctrl.master_awctl,
PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_AWCTL_SNOOP,
PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_AWCTL_SNOOP);
} else {
al_reg_write32_masked(regs->axi.ctrl.master_arctl,
PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_ARCTL_SNOOP,
PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP);
al_reg_write32_masked(regs->axi.ctrl.master_awctl,
PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_AWCTL_SNOOP,
PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP);
}
return 0;
}
/************************** Configuration Space API ***************************/
/** get base address of pci configuration space header */
int al_pcie_config_space_get(struct al_pcie_pf *pcie_pf,
uint8_t __iomem **addr)
{
struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
*addr = (uint8_t __iomem *)®s->core_space[pcie_pf->pf_num].config_header[0];
return 0;
}
/* Read data from the local configuration space */
uint32_t al_pcie_local_cfg_space_read(
struct al_pcie_pf *pcie_pf,
unsigned int reg_offset)
{
struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
uint32_t data;
al_assert(reg_offset < AL_PCIE_CFG_SPACE_REGS_NUM);
data = al_reg_read32(®s->core_space[pcie_pf->pf_num].config_header[reg_offset]);
return data;
}
/* Write data to the local configuration space */
void al_pcie_local_cfg_space_write(
struct al_pcie_pf *pcie_pf,
unsigned int reg_offset,
uint32_t data,
al_bool cs2,
al_bool allow_ro_wr)
{
struct al_pcie_port *pcie_port = pcie_pf->pcie_port;
struct al_pcie_regs *regs = pcie_port->regs;
unsigned int pf_num = pcie_pf->pf_num;
uint32_t *offset = ®s->core_space[pf_num].config_header[reg_offset];
al_assert(reg_offset < AL_PCIE_CFG_SPACE_REGS_NUM);
if (allow_ro_wr)
al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
if (cs2 == AL_FALSE)
al_reg_write32(offset, data);
else
al_reg_write32_dbi_cs2(pcie_port, offset, data);
if (allow_ro_wr)
al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
}
/** set target_bus and mask_target_bus */
int al_pcie_target_bus_set(
struct al_pcie_port *pcie_port,
uint8_t target_bus,
uint8_t mask_target_bus)
{
struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
uint32_t reg;
reg = al_reg_read32(regs->axi.ob_ctrl.cfg_target_bus);
AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK,
PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_SHIFT,
mask_target_bus);
AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_MASK,
PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_SHIFT,
target_bus);
al_reg_write32(regs->axi.ob_ctrl.cfg_target_bus, reg);
return 0;
}
/** get target_bus and mask_target_bus */
int al_pcie_target_bus_get(
struct al_pcie_port *pcie_port,
uint8_t *target_bus,
uint8_t *mask_target_bus)
{
struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
uint32_t reg;
al_assert(target_bus);
al_assert(mask_target_bus);
reg = al_reg_read32(regs->axi.ob_ctrl.cfg_target_bus);
*mask_target_bus = AL_REG_FIELD_GET(reg,
PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK,
PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_SHIFT);
*target_bus = AL_REG_FIELD_GET(reg,
PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_MASK,
PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_SHIFT);
return 0;
}
/** Set secondary bus number */
int al_pcie_secondary_bus_set(struct al_pcie_port *pcie_port, uint8_t secbus)
{
struct al_pcie_regs *regs = pcie_port->regs;
uint32_t secbus_val = (secbus <<
PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SEC_BUS_SHIFT);
al_reg_write32_masked(
regs->axi.ob_ctrl.cfg_control,
PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SEC_BUS_MASK,
secbus_val);
return 0;
}
/** Set sub-ordinary bus number */
int al_pcie_subordinary_bus_set(struct al_pcie_port *pcie_port, uint8_t subbus)
{
struct al_pcie_regs *regs = pcie_port->regs;
uint32_t subbus_val = (subbus <<
PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SUBBUS_SHIFT);
al_reg_write32_masked(
regs->axi.ob_ctrl.cfg_control,
PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SUBBUS_MASK,
subbus_val);
return 0;
}
/* Enable/disable deferring incoming configuration requests */
void al_pcie_app_req_retry_set(
struct al_pcie_port *pcie_port,
al_bool en)
{
struct al_pcie_regs *regs = pcie_port->regs;
uint32_t mask = (pcie_port->rev_id >= AL_PCIE_REV_ID_3) ?
PCIE_W_REV3_4_GLOBAL_CTRL_PM_CONTROL_APP_REQ_RETRY_EN :
PCIE_W_REV1_2_GLOBAL_CTRL_PM_CONTROL_APP_REQ_RETRY_EN;
al_reg_write32_masked(regs->app.global_ctrl.pm_control,
mask, (en == AL_TRUE) ? mask : 0);
}
/* Check if deferring incoming configuration requests is enabled or not */
al_bool al_pcie_app_req_retry_get_status(struct al_pcie_port *pcie_port)
{
struct al_pcie_regs *regs = pcie_port->regs;
uint32_t pm_control;
uint32_t mask = (pcie_port->rev_id >= AL_PCIE_REV_ID_3) ?
PCIE_W_REV3_4_GLOBAL_CTRL_PM_CONTROL_APP_REQ_RETRY_EN :
PCIE_W_REV1_2_GLOBAL_CTRL_PM_CONTROL_APP_REQ_RETRY_EN;
pm_control = al_reg_read32(regs->app.global_ctrl.pm_control);
return (pm_control & mask) ? AL_TRUE : AL_FALSE;
}
/* set configuration space reset on link down event */
void al_pcie_port_cfg_rst_on_link_down_set(struct al_pcie_port *pcie_port, al_bool enable)
{
struct al_pcie_regs *regs = pcie_port->regs;
al_assert(pcie_port->rev_id >= AL_PCIE_REV_ID_3);
al_reg_write32_masked(regs->axi.ctrl.global,
PCIE_REV3_4_AXI_CTRL_GLOBAL_CFG_ALLOW_NONSTICKY_RESET_WHEN_LNKDOWN_CLK_RST,
enable ?
PCIE_REV3_4_AXI_CTRL_GLOBAL_CFG_ALLOW_NONSTICKY_RESET_WHEN_LNKDOWN_CLK_RST :
0);
}
/*************** Internal Address Translation Unit (ATU) API ******************/
unsigned int al_pcie_atu_max_num_get(
struct al_pcie_port *pcie_port,
enum al_pcie_atu_dir direction)
{
al_assert(pcie_port);
switch (direction) {
case AL_PCIE_ATU_DIR_OUTBOUND:
switch (pcie_port->rev_id) {
case AL_PCIE_REV_ID_1:
case AL_PCIE_REV_ID_2:
return AL_PCIE_REV_1_2_ATUS_OB_NUM;
case AL_PCIE_REV_ID_3:
case AL_PCIE_REV_ID_4:
return AL_PCIE_REV_3_4_ATUS_OB_NUM;
default:
al_assert_msg(0,
"%s: invalid port rev ID [%d]\n",
__func__,
pcie_port->rev_id);
}
break;
case AL_PCIE_ATU_DIR_INBOUND:
switch (pcie_port->rev_id) {
case AL_PCIE_REV_ID_1:
case AL_PCIE_REV_ID_2:
return AL_PCIE_REV_1_2_ATUS_IB_NUM;
case AL_PCIE_REV_ID_3:
return AL_PCIE_REV_3_ATUS_IB_NUM;
case AL_PCIE_REV_ID_4:
if (pcie_port->subrev_id == PCIE_AXI_DEVICE_ID_REG_REV_ID_X4)
return AL_PCIE_REV_4_X4_ATUS_IB_NUM;
else
return AL_PCIE_REV4_X8_X16_ATUS_IB_NUM;
default:
al_assert_msg(0,
"%s: invalid port rev ID [%d]\n",
__func__,
pcie_port->rev_id);
}
break;
default:
al_assert_msg(0,
"%s: invalid direction [%d]\n",
__func__,
direction);
}
return 0;
}
al_bool al_pcie_atu_region_is_valid(struct al_pcie_atu_region *atu_region)
{
al_assert(atu_region);
if (!atu_region->enable)
return AL_TRUE;
/*
* address match mode
*/
if (atu_region->match_mode == 0) {
const uint64_t limit_sz_mask = atu_region->limit - atu_region->base_addr;
if (atu_region->limit < atu_region->base_addr)
return AL_FALSE;
if ((atu_region->limit >= (1ULL << 32)) &&
(atu_region->base_addr < (1ULL << 32))) {
al_err("%s: limit and base must both be greater or smaller than 32b\n",
__func__);
return AL_FALSE;
}
/*
* if working in extended mode
*/
if (limit_sz_mask > AL_PCIE_ATU_ADDRESS_MATCH_EXT_MODE_THRESHOLD) {
/**
* - size most be a power of 2
* - base address must be aligned to size
* - target address must be aligned to size
*/
if (!AL_IS_POW_OF_TWO(limit_sz_mask + 1)) {
al_err("%s: size (0x%" PRIx64 ") "
"must be a power of 2\n",
__func__,
limit_sz_mask + 1);
return AL_FALSE;
}
if (atu_region->target_addr & limit_sz_mask) {
al_err("%s: target address (0x%" PRIx64 ") "
"not aligned to size (0x%" PRIx64 ")\n",
__func__,
atu_region->target_addr,
limit_sz_mask + 1);
return AL_FALSE;
}
if (atu_region->base_addr & limit_sz_mask) {
al_err("%s: base address (0x%" PRIx64 ") "
"not aligned to size (0x%" PRIx64 ")\n",
__func__,
atu_region->base_addr,
limit_sz_mask + 1);
return AL_FALSE;
}
} else {
/**
* if working in regular mode:
* - base address most be aligned to 4k
* - limit address + 1 must be aligned to 4k
* - target address must be aligned to 4k
*/
if (atu_region->base_addr % 0x1000) {
al_err("%s: base address (0x%" PRIx64 ") "
"not aligned to 4k\n",
__func__,
atu_region->base_addr);
return AL_FALSE;
}
if ((atu_region->limit + 1) % 0x1000) {
al_err("%s: limit address (0x%" PRIx64 ") + 1 not aligned to 4k\n",
__func__,
atu_region->limit);
return AL_FALSE;
}
if (atu_region->target_addr % 0x1000) {
al_err("%s: target address (0x%" PRIx64 ") "
"not aligned to 4k\n",
__func__,
atu_region->target_addr);
return AL_FALSE;
}
}
} else {
/*
* bar match mode
*/
if (atu_region->target_addr % 0x1000) {
al_err("%s: target address (0x%" PRIx64 ") "
"not aligned to 4k\n",
__func__,
atu_region->target_addr);
return AL_FALSE;
}
}
return AL_TRUE;
}
/** program internal ATU region entry */
int al_pcie_atu_region_set(
struct al_pcie_port *pcie_port,
struct al_pcie_atu_region *atu_region)
{
struct al_pcie_regs *regs = pcie_port->regs;
enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
unsigned int max_atus = al_pcie_atu_max_num_get(pcie_port, atu_region->direction);
uint32_t reg;
/*
* assert in case index in out of bounds
* since HW will accept wrong index write
* and other ATU register writes will have no affect.
*/
al_assert(max_atus > atu_region->index);
/**
* Addressing RMN: 5384
*
* RMN description:
* From SNPS (also included in the data book) Dynamic iATU Programming
* With AHB/AXI Bridge Module When the bridge slave interface clock
* (hresetn or slv_aclk) is asynchronous to the PCIe native core clock
* (core_clk), you must not update the iATU registers while operations
* are in progress on the AHB/AXI bridge slave interface. The iATU
* registers are in the core_clk clock domain. The register outputs are
* used in the AHB/AXI bridge slave interface clock domain. There is no
* synchronization logic between these registers and the AHB/AXI bridge
* slave interface.
*
* Software flow:
* Do not allow configuring Outbound iATU after link is started
*/
if ((atu_region->direction == AL_PCIE_ATU_DIR_OUTBOUND)
&& (al_pcie_is_link_started(pcie_port))) {
if (!atu_region->enforce_ob_atu_region_set) {
al_err("PCIe %d: setting OB iATU after link is started is not allowed\n",
pcie_port->port_id);
al_assert(AL_FALSE);
return -EINVAL;
} else {
al_info("PCIe %d: setting OB iATU even after link is started\n",
pcie_port->port_id);
}
}
/* set atu index and direction */
reg = 0;
AL_REG_FIELD_SET(reg,
PCIE_IATU_INDEX_REGION_INDEX_MASK,
PCIE_IATU_INDEX_REGION_INDEX_SHIFT,
atu_region->index);
AL_REG_BIT_VAL_SET(reg, PCIE_IATU_INDEX_REGION_DIR_SHIFT, atu_region->direction);
al_reg_write32(®s->port_regs->iatu.index, reg);
/* in case of disable no need validate/write to configuration/address/limit registers */
if (!atu_region->enable) {
/* enable = 0 */
al_reg_write32(®s->port_regs->iatu.cr2, 0);
return 0;
}
/**
* iATU base/target addresses must be aligned to the size if:
* - address match-mode
* - size is larger than 4GB
*/
if (atu_region->match_mode == 0) {
const uint64_t limit_sz_mask = atu_region->limit - atu_region->base_addr;
if (limit_sz_mask > (1ULL << 32) /* 4GB */) {
if (atu_region->target_addr & limit_sz_mask) {
al_err("PCIe %d: target addr (0x%" PRIx64 ") "
"not aligned to size (0x%" PRIx64 ")\n",
pcie_port->port_id, atu_region->target_addr, limit_sz_mask);
return -EINVAL;
}
if (atu_region->base_addr & limit_sz_mask) {
al_err("PCIe %d: base addr (0x%" PRIx64 ") "
"not aligned to size (0x%" PRIx64 ")\n",
pcie_port->port_id, atu_region->base_addr, limit_sz_mask);
return -EINVAL;
}
}
}
al_reg_write32(®s->port_regs->iatu.lower_base_addr,
(uint32_t)(atu_region->base_addr & 0xFFFFFFFF));
al_reg_write32(®s->port_regs->iatu.upper_base_addr,
(uint32_t)((atu_region->base_addr >> 32)& 0xFFFFFFFF));
al_reg_write32(®s->port_regs->iatu.lower_target_addr,
(uint32_t)(atu_region->target_addr & 0xFFFFFFFF));
al_reg_write32(®s->port_regs->iatu.upper_target_addr,
(uint32_t)((atu_region->target_addr >> 32)& 0xFFFFFFFF));
/* configure the limit, not needed when working in BAR match mode */
if (atu_region->match_mode == 0) {
uint32_t limit_reg_val;
uint32_t *limit_ext_reg =
(atu_region->direction == AL_PCIE_ATU_DIR_OUTBOUND) ?
®s->app.atu.out_mask_pair[atu_region->index / 2] :
®s->app.atu.in_mask_pair[atu_region->index / 2];
uint32_t limit_ext_reg_mask =
(atu_region->index % 2) ?
PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_MASK :
PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_MASK;
unsigned int limit_ext_reg_shift =
(atu_region->index % 2) ?
PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_SHIFT :
PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_SHIFT;
uint64_t limit_sz_msk =
atu_region->limit - atu_region->base_addr;
uint32_t limit_ext_reg_val = (uint32_t)(((limit_sz_msk) >>
32) & 0xFFFFFFFF);
if (limit_ext_reg_val) {
limit_reg_val = (uint32_t)((limit_sz_msk) & 0xFFFFFFFF);
al_assert(limit_reg_val == 0xFFFFFFFF);
} else {
limit_reg_val = (uint32_t)(atu_region->limit &
0xFFFFFFFF);
}
al_reg_write32_masked(
limit_ext_reg,
limit_ext_reg_mask,
limit_ext_reg_val << limit_ext_reg_shift);
al_reg_write32(®s->port_regs->iatu.limit_addr,
limit_reg_val);
}
/**
* Addressing RMN: 3186
*
* RMN description:
* Bug in SNPS IP (versions 4.21 , 4.10a-ea02)
* In CFG request created via outbound atu (shift mode) bits [27:12] go to
* [31:16] , the shifting is correct , however the ATU leaves bit [15:12]
* to their original values, this is then transmited in the tlp .
* Those bits are currently reserved ,bit might be non-resv. in future generations .
*
* Software flow:
* Enable HW fix
* rev=REV1,REV2 set bit 15 in corresponding app_reg.atu.out_mask
* rev>REV2 set corresponding bit is app_reg.atu.reg_out_mask
*/
if ((atu_region->cfg_shift_mode == AL_TRUE) &&
(atu_region->direction == AL_PCIE_ATU_DIR_OUTBOUND)) {
if (pcie_port->rev_id > AL_PCIE_REV_ID_2) {
al_reg_write32_masked(regs->app.atu.reg_out_mask,
1 << (atu_region->index) ,
1 << (atu_region->index));
} else {
uint32_t *limit_ext_reg =
(atu_region->direction == AL_PCIE_ATU_DIR_OUTBOUND) ?
®s->app.atu.out_mask_pair[atu_region->index / 2] :
®s->app.atu.in_mask_pair[atu_region->index / 2];
uint32_t limit_ext_reg_mask =
(atu_region->index % 2) ?
PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_MASK :
PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_MASK;
unsigned int limit_ext_reg_shift =
(atu_region->index % 2) ?
PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_SHIFT :
PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_SHIFT;
al_reg_write32_masked(
limit_ext_reg,
limit_ext_reg_mask,
(AL_BIT(15)) << limit_ext_reg_shift);
}
}
reg = 0;
AL_REG_FIELD_SET(reg, 0x1F, 0, atu_region->tlp_type);
AL_REG_FIELD_SET(reg, 0x3 << 9, 9, atu_region->attr);
if ((pcie_port->rev_id >= AL_PCIE_REV_ID_3)
&& (op_mode == AL_PCIE_OPERATING_MODE_EP)
&& (((atu_region->function_match_bypass_mode) &&
(atu_region->direction == AL_PCIE_ATU_DIR_INBOUND)) ||
((!atu_region->function_match_bypass_mode) &&
(atu_region->direction == AL_PCIE_ATU_DIR_OUTBOUND)))) {
AL_REG_FIELD_SET(reg,
PCIE_IATU_CR1_FUNC_NUM_MASK,
PCIE_IATU_CR1_FUNC_NUM_SHIFT,
atu_region->function_match_bypass_mode_number);
}
al_reg_write32(®s->port_regs->iatu.cr1, reg);
/* Enable/disable the region. */
reg = 0;
AL_REG_FIELD_SET(reg, 0xFF, 0, atu_region->msg_code);
AL_REG_FIELD_SET(reg, 0x700, 8, atu_region->bar_number);
AL_REG_FIELD_SET(reg, 0x3 << 24, 24, atu_region->response);
AL_REG_BIT_VAL_SET(reg, 16, atu_region->enable_attr_match_mode == AL_TRUE);
AL_REG_BIT_VAL_SET(reg, 21, atu_region->enable_msg_match_mode == AL_TRUE);
AL_REG_BIT_VAL_SET(reg, 28, atu_region->cfg_shift_mode == AL_TRUE);
AL_REG_BIT_VAL_SET(reg, 29, atu_region->invert_matching == AL_TRUE);
if (atu_region->tlp_type == AL_PCIE_TLP_TYPE_MEM || atu_region->tlp_type == AL_PCIE_TLP_TYPE_IO)
AL_REG_BIT_VAL_SET(reg, 30, !!atu_region->match_mode);
AL_REG_BIT_SET(reg, 31);
/* In outbound, enable function bypass
* In inbound, enable function match mode
* Note: this is the same bit, has different meanings in ob/ib ATUs
*/
if (op_mode == AL_PCIE_OPERATING_MODE_EP)
AL_REG_FIELD_SET(reg,
PCIE_IATU_CR2_FUNC_NUM_TRANS_BYPASS_FUNC_MATCH_ENABLE_MASK,
PCIE_IATU_CR2_FUNC_NUM_TRANS_BYPASS_FUNC_MATCH_ENABLE_SHIFT,
atu_region->function_match_bypass_mode ? 0x1 : 0x0);
al_reg_write32(®s->port_regs->iatu.cr2, reg);
return 0;
}
void al_pcie_atu_region_get(
struct al_pcie_port *pcie_port,
enum al_pcie_atu_dir direction,
uint8_t index,
struct al_pcie_atu_region *atu_region)
{
struct al_pcie_regs *regs = pcie_port->regs;
unsigned int max_atus = al_pcie_atu_max_num_get(pcie_port, direction);
uint32_t reg = 0;
al_assert(atu_region);
/*
* assert in case index in out of bounds
* since HW will accept wrong index write
* and other ATU register writes will have no affect.
*/
al_assert(max_atus > index);
/*
* set desired ATU
*/
AL_REG_FIELD_SET(reg,
PCIE_IATU_INDEX_REGION_INDEX_MASK,
PCIE_IATU_INDEX_REGION_INDEX_SHIFT,
index);
AL_REG_BIT_VAL_SET(reg, PCIE_IATU_INDEX_REGION_DIR_SHIFT, direction);
al_reg_write32(®s->port_regs->iatu.index, reg);
reg = al_reg_read32(®s->port_regs->iatu.upper_base_addr);
atu_region->base_addr = (uint64_t)reg << 32;
atu_region->base_addr |= al_reg_read32(®s->port_regs->iatu.lower_base_addr);
atu_region->target_addr =
(uint64_t)al_reg_read32(®s->port_regs->iatu.upper_target_addr) << 32;
atu_region->target_addr |= al_reg_read32(®s->port_regs->iatu.lower_target_addr);
/*
* limit high address bits are the same as base address high bits
*/
atu_region->limit = (uint64_t)reg << 32;
atu_region->limit |= al_reg_read32(®s->port_regs->iatu.limit_addr);
reg = al_reg_read32(®s->port_regs->iatu.cr1);
atu_region->function_match_bypass_mode_number =
AL_REG_FIELD_GET(reg,
PCIE_IATU_CR1_FUNC_NUM_MASK,
PCIE_IATU_CR1_FUNC_NUM_SHIFT);
atu_region->attr = AL_REG_FIELD_GET(reg,
PCIE_IATU_CR1_ATTER_MASK,
PCIE_IATU_CR1_ATTER_SHIFT);
atu_region->tlp_type = AL_REG_FIELD_GET(reg,
PCIE_IATU_CR1_TYPE_MASK,
PCIE_IATU_CR1_TYPE_SHIFT);
reg = al_reg_read32(®s->port_regs->iatu.cr2);
atu_region->enable = AL_REG_MASK_IS_SET(reg, PCIE_IATU_CR2_ENABLE);
atu_region->match_mode = AL_REG_MASK_IS_SET(reg, PCIE_IATU_CR2_MATCH_MODE);
atu_region->invert_matching = AL_REG_MASK_IS_SET(reg, PCIE_IATU_CR2_INVERT_MODE);
atu_region->msg_code = AL_REG_FIELD_GET(reg,
PCIE_IATU_CR2_MSG_CODE_MASK,
PCIE_IATU_CR2_MSG_CODE_SHIFT);
atu_region->bar_number = AL_REG_FIELD_GET(reg,
PCIE_IATU_CR2_MSG_BAR_NUM_MASK,
PCIE_IATU_CR2_MSG_BAR_NUM_SHIFT);
atu_region->response = AL_REG_FIELD_GET(reg,
PCIE_IATU_CR2_RESPONSE_CODE_MASK,
PCIE_IATU_CR2_RESPONSE_CODE_SHIFT);
atu_region->enable_attr_match_mode = AL_REG_MASK_IS_SET(reg, PCIE_IATU_CR2_ATTR_MATCH_EN);
atu_region->enable_msg_match_mode =
AL_REG_MASK_IS_SET(reg, PCIE_IATU_CR2_MSG_CODE_MATCH_EN);
atu_region->cfg_shift_mode = AL_REG_MASK_IS_SET(reg, PCIE_IATU_CR2_CFG_SHIFT_MODE);
atu_region->function_match_bypass_mode =
AL_REG_MASK_IS_SET(reg, PCIE_IATU_CR2_FUNC_NUM_TRANS_BYPASS_FUNC_MATCH_ENABLE_MASK);
}
/** obtains internal ATU region base/target addresses */
void al_pcie_atu_region_get_fields(
struct al_pcie_port *pcie_port,
enum al_pcie_atu_dir direction, uint8_t index,
al_bool *enable, uint64_t *base_addr, uint64_t *target_addr)
{
struct al_pcie_atu_region atu_region;
al_pcie_atu_region_get(pcie_port, direction, index, &atu_region);
*enable = atu_region.enable;
*base_addr = atu_region.base_addr;
*target_addr = atu_region.target_addr;
}
void al_pcie_axi_io_config(
struct al_pcie_port *pcie_port,
al_phys_addr_t start,
al_phys_addr_t end)
{
struct al_pcie_regs *regs = pcie_port->regs;
al_reg_write32(regs->axi.ob_ctrl.io_start_h,
(uint32_t)((start >> 32) & 0xFFFFFFFF));
al_reg_write32(regs->axi.ob_ctrl.io_start_l,
(uint32_t)(start & 0xFFFFFFFF));
al_reg_write32(regs->axi.ob_ctrl.io_limit_h,
(uint32_t)((end >> 32) & 0xFFFFFFFF));
al_reg_write32(regs->axi.ob_ctrl.io_limit_l,
(uint32_t)(end & 0xFFFFFFFF));
al_reg_write32_masked(regs->axi.ctrl.slv_ctl,
PCIE_AXI_CTRL_SLV_CTRL_IO_BAR_EN,
PCIE_AXI_CTRL_SLV_CTRL_IO_BAR_EN);
}
/************** Interrupt and Event generation (Endpoint mode Only) API *****************/
int al_pcie_pf_flr_done_gen(struct al_pcie_pf *pcie_pf)
{
struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
unsigned int pf_num = pcie_pf->pf_num;
al_reg_write32_masked(regs->app.global_ctrl.events_gen[pf_num],
PCIE_W_GLOBAL_CTRL_EVENTS_GEN_FLR_PF_DONE,
PCIE_W_GLOBAL_CTRL_EVENTS_GEN_FLR_PF_DONE);
al_udelay(AL_PCIE_FLR_DONE_INTERVAL);
al_reg_write32_masked(regs->app.global_ctrl.events_gen[pf_num],
PCIE_W_GLOBAL_CTRL_EVENTS_GEN_FLR_PF_DONE, 0);
return 0;
}
/** generate INTx Assert/DeAssert Message */
int al_pcie_legacy_int_gen(
struct al_pcie_pf *pcie_pf,
al_bool assert,
enum al_pcie_legacy_int_type type)
{
struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
unsigned int pf_num = pcie_pf->pf_num;
uint32_t reg;
al_assert(type == AL_PCIE_LEGACY_INTA); /* only INTA supported */
reg = al_reg_read32(regs->app.global_ctrl.events_gen[pf_num]);
AL_REG_BIT_VAL_SET(reg, 3, !!assert);
al_reg_write32(regs->app.global_ctrl.events_gen[pf_num], reg);
return 0;
}
/** generate MSI interrupt */
int al_pcie_msi_int_gen(struct al_pcie_pf *pcie_pf, uint8_t vector)
{
struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
unsigned int pf_num = pcie_pf->pf_num;
uint32_t reg;
/* set msi vector and clear MSI request */
reg = al_reg_read32(regs->app.global_ctrl.events_gen[pf_num]);
AL_REG_BIT_CLEAR(reg, 4);
AL_REG_FIELD_SET(reg,
PCIE_W_GLOBAL_CTRL_EVENTS_GEN_MSI_VECTOR_MASK,
PCIE_W_GLOBAL_CTRL_EVENTS_GEN_MSI_VECTOR_SHIFT,
vector);
al_reg_write32(regs->app.global_ctrl.events_gen[pf_num], reg);
/* set MSI request */
AL_REG_BIT_SET(reg, 4);
al_reg_write32(regs->app.global_ctrl.events_gen[pf_num], reg);
return 0;
}
/** configure MSIX capability */
int al_pcie_msix_config(
struct al_pcie_pf *pcie_pf,
struct al_pcie_msix_params *msix_params)
{
struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
unsigned int pf_num = pcie_pf->pf_num;
uint32_t msix_reg0;
al_pcie_port_wr_to_ro_set(pcie_pf->pcie_port, AL_TRUE);
msix_reg0 = al_reg_read32(regs->core_space[pf_num].msix_cap_base);
msix_reg0 &= ~(AL_PCI_MSIX_MSGCTRL_TBL_SIZE << AL_PCI_MSIX_MSGCTRL_TBL_SIZE_SHIFT);
msix_reg0 |= ((msix_params->table_size - 1) & AL_PCI_MSIX_MSGCTRL_TBL_SIZE) <<
AL_PCI_MSIX_MSGCTRL_TBL_SIZE_SHIFT;
al_reg_write32(regs->core_space[pf_num].msix_cap_base, msix_reg0);
/* Table offset & BAR */
al_reg_write32(regs->core_space[pf_num].msix_cap_base + (AL_PCI_MSIX_TABLE >> 2),
(msix_params->table_offset & AL_PCI_MSIX_TABLE_OFFSET) |
(msix_params->table_bar & AL_PCI_MSIX_TABLE_BAR));
/* PBA offset & BAR */
al_reg_write32(regs->core_space[pf_num].msix_cap_base + (AL_PCI_MSIX_PBA >> 2),
(msix_params->pba_offset & AL_PCI_MSIX_PBA_OFFSET) |
(msix_params->pba_bar & AL_PCI_MSIX_PBA_BAR));
al_pcie_port_wr_to_ro_set(pcie_pf->pcie_port, AL_FALSE);
return 0;
}
/** check whether MSIX is enabled */
al_bool al_pcie_msix_enabled(struct al_pcie_pf *pcie_pf)
{
struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
uint32_t msix_reg0 = al_reg_read32(regs->core_space[pcie_pf->pf_num].msix_cap_base);
if (msix_reg0 & AL_PCI_MSIX_MSGCTRL_EN)
return AL_TRUE;
return AL_FALSE;
}
/** check whether MSIX is masked */
al_bool al_pcie_msix_masked(struct al_pcie_pf *pcie_pf)
{
struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
uint32_t msix_reg0 = al_reg_read32(regs->core_space[pcie_pf->pf_num].msix_cap_base);
if (msix_reg0 & AL_PCI_MSIX_MSGCTRL_MASK)
return AL_TRUE;
return AL_FALSE;
}
/******************** Advanced Error Reporting (AER) API **********************/
/************************* Auxiliary functions ********************************/
/* configure AER capability */
static int al_pcie_aer_config_aux(
struct al_pcie_port *pcie_port,
unsigned int pf_num,
struct al_pcie_aer_params *params)
{
struct al_pcie_regs *regs = pcie_port->regs;
struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pf_num].aer;
uint32_t reg_val;
reg_val = al_reg_read32(&aer_regs->header);
if (((reg_val & PCIE_AER_CAP_ID_MASK) >> PCIE_AER_CAP_ID_SHIFT) !=
PCIE_AER_CAP_ID_VAL)
return -EIO;
if (((reg_val & PCIE_AER_CAP_VER_MASK) >> PCIE_AER_CAP_VER_SHIFT) !=
PCIE_AER_CAP_VER_VAL)
return -EIO;
al_reg_write32(&aer_regs->corr_err_mask, ~params->enabled_corr_err);
al_reg_write32(&aer_regs->uncorr_err_mask,
~((params->enabled_uncorr_non_fatal_err) |
(params->enabled_uncorr_fatal_err)));
al_reg_write32(&aer_regs->uncorr_err_severity,
params->enabled_uncorr_fatal_err);
al_reg_write32(&aer_regs->cap_and_ctrl,
(params->ecrc_gen_en ? PCIE_AER_CTRL_STAT_ECRC_GEN_EN : 0) |
(params->ecrc_chk_en ? PCIE_AER_CTRL_STAT_ECRC_CHK_EN : 0));
/**
* Addressing RMN: 5119
*
* RMN description:
* ECRC generation for outbound request translated by iATU is effected
* by iATU setting instead of ecrc_gen_bit in AER
*
* Software flow:
* When enabling ECRC generation, set the outbound iATU to generate ECRC
*/
if (params->ecrc_gen_en == AL_TRUE) {
al_pcie_ecrc_gen_ob_atu_enable(pcie_port, pf_num);
}
al_reg_write32_masked(
regs->core_space[pf_num].pcie_dev_ctrl_status,
PCIE_PORT_DEV_CTRL_STATUS_CORR_ERR_REPORT_EN |
PCIE_PORT_DEV_CTRL_STATUS_NON_FTL_ERR_REPORT_EN |
PCIE_PORT_DEV_CTRL_STATUS_FTL_ERR_REPORT_EN |
PCIE_PORT_DEV_CTRL_STATUS_UNSUP_REQ_REPORT_EN,
(params->enabled_corr_err ?
PCIE_PORT_DEV_CTRL_STATUS_CORR_ERR_REPORT_EN : 0) |
(params->enabled_uncorr_non_fatal_err ?
PCIE_PORT_DEV_CTRL_STATUS_NON_FTL_ERR_REPORT_EN : 0) |
(params->enabled_uncorr_fatal_err ?
PCIE_PORT_DEV_CTRL_STATUS_FTL_ERR_REPORT_EN : 0) |
((params->enabled_uncorr_non_fatal_err &
AL_PCIE_AER_UNCORR_UNSUPRT_REQ_ERR) ?
PCIE_PORT_DEV_CTRL_STATUS_UNSUP_REQ_REPORT_EN : 0) |
((params->enabled_uncorr_fatal_err &
AL_PCIE_AER_UNCORR_UNSUPRT_REQ_ERR) ?
PCIE_PORT_DEV_CTRL_STATUS_UNSUP_REQ_REPORT_EN : 0));
return 0;
}
/** AER uncorrectable errors get and clear */
static unsigned int al_pcie_aer_uncorr_get_and_clear_aux(
struct al_pcie_port *pcie_port,
unsigned int pf_num)
{
struct al_pcie_regs *regs = pcie_port->regs;
struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pf_num].aer;
uint32_t reg_val;
reg_val = al_reg_read32(&aer_regs->uncorr_err_stat);
al_reg_write32(&aer_regs->uncorr_err_stat, reg_val);
return reg_val;
}
/** AER correctable errors get and clear */
static unsigned int al_pcie_aer_corr_get_and_clear_aux(
struct al_pcie_port *pcie_port,
unsigned int pf_num)
{
struct al_pcie_regs *regs = pcie_port->regs;
struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pf_num].aer;
uint32_t reg_val;
reg_val = al_reg_read32(&aer_regs->corr_err_stat);
al_reg_write32(&aer_regs->corr_err_stat, reg_val);
return reg_val;
}
#if (AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS != 4)
#error Wrong assumption!
#endif
/** AER get the header for the TLP corresponding to a detected error */
static void al_pcie_aer_err_tlp_hdr_get_aux(
struct al_pcie_port *pcie_port,
unsigned int pf_num,
uint32_t hdr[AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS])
{
struct al_pcie_regs *regs = pcie_port->regs;
struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pf_num].aer;
int i;
for (i = 0; i < AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS; i++)
hdr[i] = al_reg_read32(&aer_regs->header_log[i]);
}
/******************** EP AER functions **********************/
/** configure EP physical function AER capability */
int al_pcie_aer_config(
struct al_pcie_pf *pcie_pf,
struct al_pcie_aer_params *params)
{
al_assert(pcie_pf);
al_assert(params);
return al_pcie_aer_config_aux(
pcie_pf->pcie_port, pcie_pf->pf_num, params);
}
/** EP physical function AER uncorrectable errors get and clear */
unsigned int al_pcie_aer_uncorr_get_and_clear(struct al_pcie_pf *pcie_pf)
{
al_assert(pcie_pf);
return al_pcie_aer_uncorr_get_and_clear_aux(
pcie_pf->pcie_port, pcie_pf->pf_num);
}
/** EP physical function AER correctable errors get and clear */
unsigned int al_pcie_aer_corr_get_and_clear(struct al_pcie_pf *pcie_pf)
{
al_assert(pcie_pf);
return al_pcie_aer_corr_get_and_clear_aux(
pcie_pf->pcie_port, pcie_pf->pf_num);
}
/**
* EP physical function AER get the header for
* the TLP corresponding to a detected error
* */
void al_pcie_aer_err_tlp_hdr_get(
struct al_pcie_pf *pcie_pf,
uint32_t hdr[AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS])
{
al_assert(pcie_pf);
al_assert(hdr);
al_pcie_aer_err_tlp_hdr_get_aux(
pcie_pf->pcie_port, pcie_pf->pf_num, hdr);
}
/******************** RC AER functions **********************/
/** configure RC port AER capability */
int al_pcie_port_aer_config(
struct al_pcie_port *pcie_port,
struct al_pcie_aer_params *params)
{
al_assert(pcie_port);
al_assert(params);
/**
* For RC mode there's no PFs (neither PF handles),
* therefore PF#0 is used
* */
return al_pcie_aer_config_aux(pcie_port, 0, params);
}
/** RC port AER uncorrectable errors get and clear */
unsigned int al_pcie_port_aer_uncorr_get_and_clear(
struct al_pcie_port *pcie_port)
{
al_assert(pcie_port);
/**
* For RC mode there's no PFs (neither PF handles),
* therefore PF#0 is used
* */
return al_pcie_aer_uncorr_get_and_clear_aux(pcie_port, 0);
}
/** RC port AER correctable errors get and clear */
unsigned int al_pcie_port_aer_corr_get_and_clear(
struct al_pcie_port *pcie_port)
{
al_assert(pcie_port);
/**
* For RC mode there's no PFs (neither PF handles),
* therefore PF#0 is used
* */
return al_pcie_aer_corr_get_and_clear_aux(pcie_port, 0);
}
/** RC port AER get the header for the TLP corresponding to a detected error */
void al_pcie_port_aer_err_tlp_hdr_get(
struct al_pcie_port *pcie_port,
uint32_t hdr[AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS])
{
al_assert(pcie_port);
al_assert(hdr);
/**
* For RC mode there's no PFs (neither PF handles),
* therefore PF#0 is used
* */
al_pcie_aer_err_tlp_hdr_get_aux(pcie_port, 0, hdr);
}
/********************** Loopback mode (RC and Endpoint modes) ************/
/** enter local pipe loopback mode */
int al_pcie_local_pipe_loopback_enter(struct al_pcie_port *pcie_port)
{
struct al_pcie_regs *regs = pcie_port->regs;
al_dbg("PCIe %d: Enter LOCAL PIPE Loopback mode\n", pcie_port->port_id);
al_reg_write32_masked(®s->port_regs->pipe_loopback_ctrl,
1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT,
1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT);
al_reg_write32_masked(®s->port_regs->port_link_ctrl,
1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT,
1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT);
return 0;
}
/**
* @brief exit local pipe loopback mode
*
* @param pcie_port pcie port handle
* @return 0 if no error found
*/
int al_pcie_local_pipe_loopback_exit(struct al_pcie_port *pcie_port)
{
struct al_pcie_regs *regs = pcie_port->regs;
al_dbg("PCIe %d: Exit LOCAL PIPE Loopback mode\n", pcie_port->port_id);
al_reg_write32_masked(®s->port_regs->pipe_loopback_ctrl,
1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT,
0);
al_reg_write32_masked(®s->port_regs->port_link_ctrl,
1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT,
0);
return 0;
}
/** enter remote loopback mode */
int al_pcie_remote_loopback_enter(struct al_pcie_port *pcie_port)
{
struct al_pcie_regs *regs = pcie_port->regs;
al_dbg("PCIe %d: Enter REMOTE Loopback mode\n", pcie_port->port_id);
al_reg_write32_masked(®s->port_regs->port_link_ctrl,
1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT,
1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT);
return 0;
}
/**
* @brief exit remote loopback mode
*
* @param pcie_port pcie port handle
* @return 0 if no error found
*/
int al_pcie_remote_loopback_exit(struct al_pcie_port *pcie_port)
{
struct al_pcie_regs *regs = pcie_port->regs;
al_dbg("PCIe %d: Exit REMOTE Loopback mode\n", pcie_port->port_id);
al_reg_write32_masked(®s->port_regs->port_link_ctrl,
1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT,
0);
return 0;
}
static const char *al_pcie_interrupt_forwarding_unit_to_str(enum al_pcie_int_fwd_unit unit)
{
switch (unit) {
case AL_PCIE_INT_FWD_UNIT_ETH:
return "Ethernet";
default:
return "unknown unit";
}
}
static void al_pcie_interrupt_forwarding_eth_registers(struct al_pcie_port *pcie_port,
int pf_num, int unit_sub_index, uint32_t **inta_a, uint32_t **inta_b,
uint32_t *mask_a, uint32_t *mask_b)
{
struct al_pcie_regs *regs = pcie_port->regs;
struct al_pcie_reg_ptrs_app_soc_int *soc_int = ®s->app.soc_int[pf_num];
al_assert(unit_sub_index < AL_PCIE_INT_FWD_UNIT_ETH_IDX_MAX);
*mask_a = 0;
*mask_b = 0;
switch (unit_sub_index) {
case 0:
*inta_a = soc_int->mask_inta_leg_0;
*mask_a = PCIE_INTERRUPT_FORWARDING_ETH_0_MASK;
break;
case 1:
*inta_a = soc_int->mask_inta_leg_0;
*mask_a = PCIE_INTERRUPT_FORWARDING_ETH_1_MASK_A;
*inta_b = soc_int->mask_inta_leg_1;
*mask_b = PCIE_INTERRUPT_FORWARDING_ETH_1_MASK_B;
break;
case 2:
*inta_a = soc_int->mask_inta_leg_1;
*mask_a = PCIE_INTERRUPT_FORWARDING_ETH_2_MASK;
break;
case 3:
*inta_a = soc_int->mask_inta_leg_1;
*mask_a = PCIE_INTERRUPT_FORWARDING_ETH_3_MASK_A;
*inta_b = soc_int->mask_inta_leg_2;
*mask_b = PCIE_INTERRUPT_FORWARDING_ETH_3_MASK_B;
break;
case 4:
*inta_a = soc_int->mask_inta_leg_2;
*mask_a = PCIE_INTERRUPT_FORWARDING_ETH_4_MASK;
break;
case 5:
*inta_a = soc_int->mask_inta_leg_2;
*mask_a = PCIE_INTERRUPT_FORWARDING_ETH_5_MASK_A;
*inta_b = soc_int->mask_inta_leg_3;
*mask_b = PCIE_INTERRUPT_FORWARDING_ETH_5_MASK_B;
break;
}
return;
}
static void interrupt_forwarding_eth_disable(struct al_pcie_port *pcie_port,
int pf_num, int unit_sub_index)
{
uint32_t *inta_reg_a = NULL;
uint32_t *inta_reg_b = NULL;
uint32_t mask_a, mask_b;
/**
* Each eth unit has 17 bit wide mask to control interrupt forwarding
* 4 bit per UDMA (bits 0:15) and 1 bit for the adapter
*/
al_pcie_interrupt_forwarding_eth_registers(pcie_port, pf_num, unit_sub_index,
&inta_reg_a, &inta_reg_b, &mask_a, &mask_b);
/** Mask the interrupt ack bits */
al_reg_write32_masked(inta_reg_a, mask_a, mask_a);
if (inta_reg_b)
al_reg_write32_masked(inta_reg_b, mask_b, mask_b);
}
static void interrupt_forwarding_eth_enable(struct al_pcie_port *pcie_port,
int pf_num, int unit_sub_index)
{
uint32_t *inta_reg_a = NULL;
uint32_t *inta_reg_b = NULL;
uint32_t mask_a, mask_b;
al_pcie_interrupt_forwarding_eth_registers(pcie_port, pf_num, unit_sub_index,
&inta_reg_a, &inta_reg_b, &mask_a, &mask_b);
/** Unmask the interrupt ack bits */
al_reg_write32_masked(inta_reg_a, mask_a, 0);
if (inta_reg_b)
al_reg_write32_masked(inta_reg_b, mask_b, 0);
}
int al_pcie_interrupt_forwarding_disable(struct al_pcie_port *pcie_port, int pf_num,
enum al_pcie_int_fwd_unit unit, int unit_sub_index)
{
al_assert(pcie_port);
al_assert(al_pcie_port_is_enabled(pcie_port));
switch (unit) {
case AL_PCIE_INT_FWD_UNIT_ETH:
interrupt_forwarding_eth_disable(pcie_port, pf_num, unit_sub_index);
break;
default:
al_err("%s: No support for unit %s (%d)\n",
__func__, al_pcie_interrupt_forwarding_unit_to_str(unit) , unit);
}
return 0;
}
int al_pcie_interrupt_forwarding_enable(struct al_pcie_port *pcie_port, int pf_num,
enum al_pcie_int_fwd_unit unit, int unit_sub_index)
{
al_assert(pcie_port);
al_assert(al_pcie_port_is_enabled(pcie_port));
switch (unit) {
case AL_PCIE_INT_FWD_UNIT_ETH:
interrupt_forwarding_eth_enable(pcie_port, pf_num, unit_sub_index);
break;
default:
al_err("%s: No support for unit %s (%d)\n",
__func__, al_pcie_interrupt_forwarding_unit_to_str(unit) , unit);
}
return 0;
}
/********************** Parity Errors ************/
int al_pcie_port_ram_parity_int_config(
struct al_pcie_port *pcie_port,
al_bool enable)
{
struct al_pcie_regs *regs = pcie_port->regs;
al_reg_write32(regs->app.parity.en_core,
(enable == AL_TRUE) ? 0xffffffff : 0x0);
al_reg_write32_masked(®s->app.int_grp_b->mask,
PCIE_W_INT_GRP_B_CAUSE_B_PARITY_ERROR_CORE,
(enable != AL_TRUE) ?
PCIE_W_INT_GRP_B_CAUSE_B_PARITY_ERROR_CORE : 0);
return 0;
}
int al_pcie_port_axi_parity_int_config(
struct al_pcie_port *pcie_port,
al_bool enable)
{
struct al_pcie_regs *regs = pcie_port->regs;
uint32_t parity_enable_value;
uint32_t parity_enable_mask = 0xffffffff;
/**
* Addressing RMN: 5603
*
* RMN description:
* u4_ram2p signal false parity error
*
* Software flow:
* Disable parity check for this memory
*/
if (pcie_port->rev_id >= AL_PCIE_REV_ID_3)
parity_enable_mask &= ~PCIE_AXI_PARITY_EN_AXI_U4_RAM2P;
/**
* Addressing RMN: 11118
*
* RMN description:
* u5_ram2p signal false parity error
*
* Software flow:
* Disable parity check for this memory
*/
if (pcie_port->rev_id == AL_PCIE_REV_ID_3)
parity_enable_mask &= ~PCIE_AXI_PARITY_AXI_U5_RAM2P;
al_reg_write32(regs->axi.parity.en_axi,
(enable == AL_TRUE) ? parity_enable_mask : 0x0);
if (pcie_port->rev_id >= AL_PCIE_REV_ID_3) {
al_reg_write32_masked(regs->axi.ctrl.global,
PCIE_REV3_4_AXI_CTRL_GLOBAL_PARITY_CALC_EN_MSTR |
PCIE_REV3_4_AXI_CTRL_GLOBAL_PARITY_ERR_EN_RD |
PCIE_REV3_4_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV |
PCIE_REV3_4_AXI_CTRL_GLOBAL_PARITY_ERR_EN_WR,
(enable == AL_TRUE) ?
PCIE_REV3_4_AXI_CTRL_GLOBAL_PARITY_CALC_EN_MSTR |
PCIE_REV3_4_AXI_CTRL_GLOBAL_PARITY_ERR_EN_RD |
PCIE_REV3_4_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV |
PCIE_REV3_4_AXI_CTRL_GLOBAL_PARITY_ERR_EN_WR :
PCIE_REV3_4_AXI_CTRL_GLOBAL_PARITY_CALC_EN_MSTR |
PCIE_REV3_4_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV);
} else {
al_reg_write32_masked(regs->axi.ctrl.global,
PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_MSTR |
PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_ERR_EN_RD |
PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV |
PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_ERR_EN_WR,
(enable == AL_TRUE) ?
PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_MSTR |
PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_ERR_EN_RD |
PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV |
PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_ERR_EN_WR :
PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_MSTR |
PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV);
}
parity_enable_mask = (PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_DATA_PATH_RD |
PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_ADDR_RD |
PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_ADDR_WR |
PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_DATA_WR |
PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERROR_AXI);
if (!enable)
parity_enable_value = parity_enable_mask;
else {
parity_enable_value = 0;
/**
* Addressing RMN: 11096
*
* RMN description:
* False indication of AXI slave data parity interrupt on V1
*
* Software flow:
* mask and ignore this interrupt
*/
if (pcie_port->rev_id == AL_PCIE_REV_ID_1)
parity_enable_value |= PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_DATA_WR;
}
al_reg_write32_masked(®s->axi.int_grp_a->mask,
parity_enable_mask,
parity_enable_value);
return 0;
}
void al_pcie_core_parity_stats_get(struct al_pcie_port *pcie_port,
struct al_pcie_core_parity_stats *stats)
{
uint32_t status;
al_assert(pcie_port);
al_assert(stats);
al_memset(stats, 0, sizeof(*stats));
/* this clears all turned on bits */
status = al_reg_read32(pcie_port->regs->app.parity.status_core);
if (status) {
stats->num_of_errors = al_popcount(status & AL_BIT_MASK(13));
if (pcie_port->rev_id >= AL_PCIE_REV_ID_3) {
if (status & PCIE_W_V3_CORE_PARITY_CORE_RAM_1P_RBUF)
stats->ram_1p_rbuf = AL_TRUE;
if (status & PCIE_W_V3_CORE_PARITY_CORE_RAM_2P_SOTBUF)
stats->ram_2p_sotbuf = AL_TRUE;
if (status & PCIE_W_V3_CORE_PARITY_CORE_U0_RAM_RADM_QBUFFER_HDR)
stats->u0_ram_radm_qbuffer_hdr = AL_TRUE;
if (status & PCIE_W_V3_CORE_PARITY_CORE_U3_RAM_RADM_QBUFFER_DATA_0)
stats->u3_ram_radm_qbuffer_data_0 = AL_TRUE;
if (status & PCIE_W_V3_CORE_PARITY_CORE_U3_RAM_RADM_QBUFFER_DATA_1)
stats->u3_ram_radm_qbuffer_data_1 = AL_TRUE;
if (status & PCIE_W_V3_CORE_PARITY_CORE_U10_RAM2P_0)
stats->u10_ram2p_0 = AL_TRUE;
if (status & PCIE_W_V3_CORE_PARITY_CORE_U10_RAM2P_1)
stats->u10_ram2p_1 = AL_TRUE;
if (status & PCIE_W_V3_CORE_PARITY_CORE_U8_RAM2P)
stats->u8_ram2p = AL_TRUE;
if (status & PCIE_W_V3_CORE_PARITY_CORE_U7_RAM2P)
stats->u7_ram2p = AL_TRUE;
if (status & PCIE_W_V3_CORE_PARITY_CORE_U6_RAM)
stats->u6_ram = AL_TRUE;
if (status & PCIE_W_V3_CORE_PARITY_CORE_U11_RAM2P)
stats->u11_ram2p = AL_TRUE;
if (status & PCIE_W_V3_CORE_PARITY_CORE_U1_RAM2P)
stats->u1_ram2p = AL_TRUE;
if (status & PCIE_W_V3_CORE_PARITY_CORE_U0_RAM2P)
stats->u0_ram2p = AL_TRUE;
} else {
if (status & PCIE_W_V1_V2_CORE_PARITY_CORE_U_RAM_1P_SOTBUF)
stats->u_ram_1p_sotbuf = AL_TRUE;
if (status & PCIE_W_V1_V2_CORE_PARITY_CORE_U0_RAM_RADM_QBUFFER)
stats->u0_ram_radm_qbuffer = AL_TRUE;
if (status & PCIE_W_V1_V2_CORE_PARITY_CORE_U3_QBUFFER_0)
stats->u3_qbuffer_0 = AL_TRUE;
if (status & PCIE_W_V1_V2_CORE_PARITY_CORE_U3_QBUFFER_1)
stats->u3_qbuffer_1 = AL_TRUE;
if (status & PCIE_W_V1_V2_CORE_PARITY_CORE_U9_DECOMP)
stats->u9_decomp = AL_TRUE;
if (status & PCIE_W_V1_V2_CORE_PARITY_CORE_U8_RAM2P)
stats->u8_ram2p = AL_TRUE;
if (status & PCIE_W_V1_V2_CORE_PARITY_CORE_U7_RAM2P)
stats->u7_ram2p = AL_TRUE;
if (status & PCIE_W_V1_V2_CORE_PARITY_CORE_U6_RAM2P)
stats->u6_ram2p = AL_TRUE;
if (status & PCIE_W_V1_V2_CORE_PARITY_CORE_U11_RAM2P)
stats->u11_ram2p = AL_TRUE;
if (status & PCIE_W_V1_V2_CORE_PARITY_CORE_U1_RAM2P)
stats->u1_ram2p = AL_TRUE;
if (status & PCIE_W_V1_V2_CORE_PARITY_CORE_U0_RAM2P)
stats->u0_ram2p = AL_TRUE;
if (status & PCIE_W_V1_V2_CORE_PARITY_CORE_U0_RBUF)
stats->u0_rbuf = AL_TRUE;
if (status & PCIE_W_V1_V2_CORE_PARITY_CORE_U3_QBUFFER_2)
stats->u3_qbuffer_2 = AL_TRUE;
}
}
}
void al_pcie_axi_parity_stats_get(struct al_pcie_port *pcie_port,
struct al_pcie_axi_parity_stats *stats)
{
uint32_t status, mask;
al_assert(pcie_port);
al_assert(stats);
al_memset(stats, 0, sizeof(*stats));
/* this clears all turned on bits */
status = al_reg_read32(pcie_port->regs->axi.parity.status_axi);
if (status) {
mask = (PCIE_AXI_PARITY_AXI_U5_RAM2P |
PCIE_AXI_PARITY_AXI_U4_RAM2P |
PCIE_AXI_PARITY_AXI_U3_RAM2P);
if (status & PCIE_AXI_PARITY_AXI_U5_RAM2P)
stats->u5_ram2p = AL_TRUE;
if (status & PCIE_AXI_PARITY_AXI_U4_RAM2P)
stats->u4_ram2p = AL_TRUE;
if (status & PCIE_AXI_PARITY_AXI_U3_RAM2P)
stats->u3_ram2p = AL_TRUE;
switch (pcie_port->rev_id) {
case AL_PCIE_REV_ID_3:
mask |= PCIE_V3_AXI_PARITY_AXI_U2_RAM2P;
stats->num_of_errors = al_popcount(status & mask);
if (status & PCIE_V3_AXI_PARITY_AXI_U2_RAM2P)
stats->u2_ram2p = AL_TRUE;
break;
case AL_PCIE_REV_ID_2:
case AL_PCIE_REV_ID_1:
mask |= (PCIE_V1_V2_AXI_PARITY_AXI_U2_RAM2P |
PCIE_V1_V2_AXI_PARITY_AXI_U10_RAM2P |
PCIE_V1_V2_AXI_PARITY_AXI_U12_RAM2P);
stats->num_of_errors = al_popcount(status & mask);
if (status & PCIE_V1_V2_AXI_PARITY_AXI_U2_RAM2P)
stats->u2_ram2p = AL_TRUE;
if (status & PCIE_V1_V2_AXI_PARITY_AXI_U10_RAM2P)
stats->u10_ram2p = AL_TRUE;
if (status & PCIE_V1_V2_AXI_PARITY_AXI_U12_RAM2P)
stats->u12_ram2p = AL_TRUE;
break;
default:
al_err("%s: unsupported PCIe revision ID %d\n",
__func__,
pcie_port->rev_id);
al_assert(0);
}
}
}
int al_pcie_ltssm_state_get(
struct al_pcie_port *pcie_port,
uint8_t *ltssm_state,
al_bool skip_port_enabled_check)
{
if (skip_port_enabled_check != AL_TRUE) {
if (!al_pcie_port_is_enabled(pcie_port)) {
al_err("PCIe %d: not enabled, cannot get ltssm state\n",
pcie_port->port_id);
return -EINVAL;
}
}
al_pcie_check_link(pcie_port, ltssm_state);
return 0;
}
uint64_t al_pcie_axi_pos_error_addr_get(struct al_pcie_port *pcie_port)
{
uint32_t addr_low, addr_high;
uint64_t addr;
al_assert(pcie_port);
addr_low = al_reg_read32(pcie_port->regs->axi.pos_logged.error_low);
addr_high = al_reg_read32(pcie_port->regs->axi.pos_logged.error_high);
/*
* V1 address:
* error_low [31:0] => addr[31:0]
* error_high[11:0] => addr[44:32]
*
* V2/V3 address:
* error_low [31:0] => addr[45:14]
* error_high[21:0] => addr[67:46]
* NOTE: we ignore [67:64] as it fabric target ID
*/
switch (pcie_port->rev_id) {
case AL_PCIE_REV_ID_1:
addr = addr_low;
addr |= (uint64_t)(addr_high & AL_FIELD_MASK(11, 0)) << 32;
break;
case AL_PCIE_REV_ID_2:
case AL_PCIE_REV_ID_3:
addr = (uint64_t)addr_low << 14;
addr |= (uint64_t)(addr_high & AL_FIELD_MASK(17, 0)) << 46;
break;
default:
al_err("%s: unsupported PCIE revision ID %d\n", __func__, pcie_port->rev_id);
addr = 0;
}
return addr;
}
uint64_t al_pcie_axi_read_data_parity_error_addr_get(struct al_pcie_port *pcie_port)
{
uint64_t addr;
al_assert(pcie_port);
addr = al_reg_read32(pcie_port->regs->axi.rd_parity.log_low);
addr |= ((uint64_t)al_reg_read32(pcie_port->regs->axi.rd_parity.log_high)) << 32;
return addr;
}
uint64_t al_pcie_axi_read_compl_error_addr_get(struct al_pcie_port *pcie_port)
{
uint64_t addr;
al_assert(pcie_port);
addr = al_reg_read32(pcie_port->regs->axi.rd_cmpl.cmpl_log_low);
addr |= ((uint64_t)al_reg_read32(pcie_port->regs->axi.rd_cmpl.cmpl_log_high)) << 32;
return addr;
}
uint64_t al_pcie_axi_read_to_error_addr_get(struct al_pcie_port *pcie_port)
{
uint64_t addr;
al_assert(pcie_port);
addr = al_reg_read32(pcie_port->regs->axi.rd_to.to_log_low);
addr |= ((uint64_t)al_reg_read32(pcie_port->regs->axi.rd_to.to_log_high)) << 32;
return addr;
}
uint64_t al_pcie_axi_write_compl_error_addr_get(struct al_pcie_port *pcie_port)
{
uint64_t addr;
al_assert(pcie_port);
addr = al_reg_read32(pcie_port->regs->axi.wr_cmpl.wr_cmpl_log_low);
addr |= ((uint64_t)al_reg_read32(pcie_port->regs->axi.wr_cmpl.wr_cmpl_log_high)) << 32;
return addr;
}
uint64_t al_pcie_axi_write_to_error_addr_get(struct al_pcie_port *pcie_port)
{
uint64_t addr;
al_assert(pcie_port);
addr = al_reg_read32(pcie_port->regs->axi.wr_to.wr_to_log_low);
addr |= ((uint64_t)al_reg_read32(pcie_port->regs->axi.wr_to.wr_to_log_high)) << 32;
return addr;
}
| 31.043449 | 112 | 0.75065 | [
"vector"
] |
26a13083f367913358df95273d975bbf6fd452d3 | 13,344 | c | C | Python/mactoolboxglue.c | jasonadu/Python-2.5 | 93e24b88564de120b1296165b5c55975fdcb8a3c | [
"PSF-2.0"
] | 1 | 2018-08-21T09:19:46.000Z | 2018-08-21T09:19:46.000Z | Python/mactoolboxglue.c | jasonadu/Python-2.5 | 93e24b88564de120b1296165b5c55975fdcb8a3c | [
"PSF-2.0"
] | null | null | null | Python/mactoolboxglue.c | jasonadu/Python-2.5 | 93e24b88564de120b1296165b5c55975fdcb8a3c | [
"PSF-2.0"
] | 2 | 2017-01-30T21:52:13.000Z | 2019-07-18T21:33:17.000Z | /***********************************************************
Copyright 1991-1997 by Stichting Mathematisch Centrum, Amsterdam,
The Netherlands.
All Rights Reserved
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation, and that the names of Stichting Mathematisch
Centrum or CWI not be used in advertising or publicity pertaining to
distribution of the software without specific, written prior permission.
STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
******************************************************************/
#include "Python.h"
#include "pymactoolbox.h"
#include <arpa/inet.h> /* for ntohl, htonl */
/* Like strerror() but for Mac OS error numbers */
char *
PyMac_StrError(int err)
{
static char buf[256];
PyObject *m;
PyObject *rv;
m = PyImport_ImportModule("MacOS");
if (!m) {
if (Py_VerboseFlag)
PyErr_Print();
PyErr_Clear();
rv = NULL;
}
else {
rv = PyObject_CallMethod(m, "GetErrorString", "i", err);
if (!rv)
PyErr_Clear();
}
if (!rv) {
buf[0] = '\0';
}
else {
char *input = PyString_AsString(rv);
if (!input) {
PyErr_Clear();
buf[0] = '\0';
} else {
strncpy(buf, input, sizeof(buf) - 1);
buf[sizeof(buf) - 1] = '\0';
}
Py_DECREF(rv);
}
Py_XDECREF(m);
return buf;
}
/* Exception object shared by all Mac specific modules for Mac OS errors */
PyObject *PyMac_OSErrException;
/* Initialize and return PyMac_OSErrException */
PyObject *
PyMac_GetOSErrException(void)
{
if (PyMac_OSErrException == NULL)
PyMac_OSErrException = PyErr_NewException("MacOS.Error", NULL, NULL);
return PyMac_OSErrException;
}
/* Set a MAC-specific error from errno, and return NULL; return None if no error */
PyObject *
PyErr_Mac(PyObject *eobj, int err)
{
char *msg;
PyObject *v;
if (err == 0 && !PyErr_Occurred()) {
Py_INCREF(Py_None);
return Py_None;
}
if (err == -1 && PyErr_Occurred())
return NULL;
msg = PyMac_StrError(err);
v = Py_BuildValue("(is)", err, msg);
PyErr_SetObject(eobj, v);
Py_DECREF(v);
return NULL;
}
/* Call PyErr_Mac with PyMac_OSErrException */
PyObject *
PyMac_Error(OSErr err)
{
return PyErr_Mac(PyMac_GetOSErrException(), err);
}
OSErr
PyMac_GetFullPathname(FSSpec *fss, char *path, int len)
{
PyObject *fs, *exc;
PyObject *rv = NULL;
char *input;
OSErr err = noErr;
*path = '\0';
fs = PyMac_BuildFSSpec(fss);
if (!fs)
goto error;
rv = PyObject_CallMethod(fs, "as_pathname", "");
if (!rv)
goto error;
input = PyString_AsString(rv);
if (!input)
goto error;
strncpy(path, input, len - 1);
path[len - 1] = '\0';
Py_XDECREF(rv);
Py_XDECREF(fs);
return err;
error:
exc = PyErr_Occurred();
if (exc && PyErr_GivenExceptionMatches(exc,
PyMac_GetOSErrException())) {
PyObject *args = PyObject_GetAttrString(exc, "args");
if (args) {
char *ignore;
PyArg_ParseTuple(args, "is", &err, &ignore);
Py_XDECREF(args);
}
}
if (err == noErr)
err = -1;
PyErr_Clear();
Py_XDECREF(rv);
Py_XDECREF(fs);
return err;
}
/* Convert a 4-char string object argument to an OSType value */
int
PyMac_GetOSType(PyObject *v, OSType *pr)
{
uint32_t tmp;
if (!PyString_Check(v) || PyString_Size(v) != 4) {
PyErr_SetString(PyExc_TypeError,
"OSType arg must be string of 4 chars");
return 0;
}
memcpy((char *)&tmp, PyString_AsString(v), 4);
*pr = (OSType)ntohl(tmp);
return 1;
}
/* Convert an OSType value to a 4-char string object */
PyObject *
PyMac_BuildOSType(OSType t)
{
uint32_t tmp = htonl((uint32_t)t);
return PyString_FromStringAndSize((char *)&tmp, 4);
}
/* Convert an NumVersion value to a 4-element tuple */
PyObject *
PyMac_BuildNumVersion(NumVersion t)
{
return Py_BuildValue("(hhhh)", t.majorRev, t.minorAndBugRev, t.stage, t.nonRelRev);
}
/* Convert a Python string object to a Str255 */
int
PyMac_GetStr255(PyObject *v, Str255 pbuf)
{
int len;
if (!PyString_Check(v) || (len = PyString_Size(v)) > 255) {
PyErr_SetString(PyExc_TypeError,
"Str255 arg must be string of at most 255 chars");
return 0;
}
pbuf[0] = len;
memcpy((char *)(pbuf+1), PyString_AsString(v), len);
return 1;
}
/* Convert a Str255 to a Python string object */
PyObject *
PyMac_BuildStr255(Str255 s)
{
if ( s == NULL ) {
PyErr_SetString(PyExc_SystemError, "Str255 pointer is NULL");
return NULL;
}
return PyString_FromStringAndSize((char *)&s[1], (int)s[0]);
}
PyObject *
PyMac_BuildOptStr255(Str255 s)
{
if ( s == NULL ) {
Py_INCREF(Py_None);
return Py_None;
}
return PyString_FromStringAndSize((char *)&s[1], (int)s[0]);
}
/* Convert a Python object to a Rect.
The object must be a (left, top, right, bottom) tuple.
(This differs from the order in the struct but is consistent with
the arguments to SetRect(), and also with STDWIN). */
int
PyMac_GetRect(PyObject *v, Rect *r)
{
return PyArg_Parse(v, "(hhhh)", &r->left, &r->top, &r->right, &r->bottom);
}
/* Convert a Rect to a Python object */
PyObject *
PyMac_BuildRect(Rect *r)
{
return Py_BuildValue("(hhhh)", r->left, r->top, r->right, r->bottom);
}
/* Convert a Python object to a Point.
The object must be a (h, v) tuple.
(This differs from the order in the struct but is consistent with
the arguments to SetPoint(), and also with STDWIN). */
int
PyMac_GetPoint(PyObject *v, Point *p)
{
return PyArg_Parse(v, "(hh)", &p->h, &p->v);
}
/* Convert a Point to a Python object */
PyObject *
PyMac_BuildPoint(Point p)
{
return Py_BuildValue("(hh)", p.h, p.v);
}
/* Convert a Python object to an EventRecord.
The object must be a (what, message, when, (v, h), modifiers) tuple. */
int
PyMac_GetEventRecord(PyObject *v, EventRecord *e)
{
return PyArg_Parse(v, "(Hkk(hh)H)",
&e->what,
&e->message,
&e->when,
&e->where.h,
&e->where.v,
&e->modifiers);
}
/* Convert a Rect to an EventRecord object */
PyObject *
PyMac_BuildEventRecord(EventRecord *e)
{
return Py_BuildValue("(hll(hh)h)",
e->what,
e->message,
e->when,
e->where.h,
e->where.v,
e->modifiers);
}
/* Convert Python object to Fixed */
int
PyMac_GetFixed(PyObject *v, Fixed *f)
{
double d;
if( !PyArg_Parse(v, "d", &d))
return 0;
*f = (Fixed)(d * 0x10000);
return 1;
}
/* Convert a Fixed to a Python object */
PyObject *
PyMac_BuildFixed(Fixed f)
{
double d;
d = f;
d = d / 0x10000;
return Py_BuildValue("d", d);
}
/* Convert wide to/from Python int or (hi, lo) tuple. XXXX Should use Python longs */
int
PyMac_Getwide(PyObject *v, wide *rv)
{
if (PyInt_Check(v)) {
rv->hi = 0;
rv->lo = PyInt_AsLong(v);
if( rv->lo & 0x80000000 )
rv->hi = -1;
return 1;
}
return PyArg_Parse(v, "(kk)", &rv->hi, &rv->lo);
}
PyObject *
PyMac_Buildwide(wide *w)
{
if ( (w->hi == 0 && (w->lo & 0x80000000) == 0) ||
(w->hi == -1 && (w->lo & 0x80000000) ) )
return PyInt_FromLong(w->lo);
return Py_BuildValue("(ll)", w->hi, w->lo);
}
#ifdef USE_TOOLBOX_OBJECT_GLUE
/*
** Glue together the toolbox objects.
**
** Because toolbox modules interdepend on each other, they use each others
** object types, on MacOSX/MachO this leads to the situation that they
** cannot be dynamically loaded (or they would all have to be lumped into
** a single .so, but this would be bad for extensibility).
**
** This file defines wrappers for all the _New and _Convert functions,
** which are the Py_BuildValue and PyArg_ParseTuple helpers. The wrappers
** check an indirection function pointer, and if it isn't filled in yet
** they import the appropriate module, whose init routine should fill in
** the pointer.
*/
#define GLUE_NEW(object, routinename, module) \
PyObject *(*PyMacGluePtr_##routinename)(object); \
\
PyObject *routinename(object cobj) { \
if (!PyMacGluePtr_##routinename) { \
if (!PyImport_ImportModule(module)) return NULL; \
if (!PyMacGluePtr_##routinename) { \
PyErr_SetString(PyExc_ImportError, "Module did not provide routine: " module ": " #routinename); \
return NULL; \
} \
} \
return (*PyMacGluePtr_##routinename)(cobj); \
}
#define GLUE_CONVERT(object, routinename, module) \
int (*PyMacGluePtr_##routinename)(PyObject *, object *); \
\
int routinename(PyObject *pyobj, object *cobj) { \
if (!PyMacGluePtr_##routinename) { \
if (!PyImport_ImportModule(module)) return 0; \
if (!PyMacGluePtr_##routinename) { \
PyErr_SetString(PyExc_ImportError, "Module did not provide routine: " module ": " #routinename); \
return 0; \
} \
} \
return (*PyMacGluePtr_##routinename)(pyobj, cobj); \
}
GLUE_NEW(FSSpec *, PyMac_BuildFSSpec, "Carbon.File")
GLUE_CONVERT(FSSpec, PyMac_GetFSSpec, "Carbon.File")
GLUE_NEW(FSRef *, PyMac_BuildFSRef, "Carbon.File")
GLUE_CONVERT(FSRef, PyMac_GetFSRef, "Carbon.File")
GLUE_NEW(AppleEvent *, AEDesc_New, "Carbon.AE") /* XXXX Why by address? */
GLUE_NEW(AppleEvent *, AEDesc_NewBorrowed, "Carbon.AE")
GLUE_CONVERT(AppleEvent, AEDesc_Convert, "Carbon.AE")
GLUE_NEW(Component, CmpObj_New, "Carbon.Cm")
GLUE_CONVERT(Component, CmpObj_Convert, "Carbon.Cm")
GLUE_NEW(ComponentInstance, CmpInstObj_New, "Carbon.Cm")
GLUE_CONVERT(ComponentInstance, CmpInstObj_Convert, "Carbon.Cm")
GLUE_NEW(ControlHandle, CtlObj_New, "Carbon.Ctl")
GLUE_CONVERT(ControlHandle, CtlObj_Convert, "Carbon.Ctl")
GLUE_NEW(DialogPtr, DlgObj_New, "Carbon.Dlg")
GLUE_CONVERT(DialogPtr, DlgObj_Convert, "Carbon.Dlg")
GLUE_NEW(DialogPtr, DlgObj_WhichDialog, "Carbon.Dlg")
GLUE_NEW(DragReference, DragObj_New, "Carbon.Drag")
GLUE_CONVERT(DragReference, DragObj_Convert, "Carbon.Drag")
GLUE_NEW(ListHandle, ListObj_New, "Carbon.List")
GLUE_CONVERT(ListHandle, ListObj_Convert, "Carbon.List")
GLUE_NEW(MenuHandle, MenuObj_New, "Carbon.Menu")
GLUE_CONVERT(MenuHandle, MenuObj_Convert, "Carbon.Menu")
GLUE_NEW(GrafPtr, GrafObj_New, "Carbon.Qd")
GLUE_CONVERT(GrafPtr, GrafObj_Convert, "Carbon.Qd")
GLUE_NEW(BitMapPtr, BMObj_New, "Carbon.Qd")
GLUE_CONVERT(BitMapPtr, BMObj_Convert, "Carbon.Qd")
GLUE_NEW(RGBColor *, QdRGB_New, "Carbon.Qd") /* XXXX Why? */
GLUE_CONVERT(RGBColor, QdRGB_Convert, "Carbon.Qd")
GLUE_NEW(GWorldPtr, GWorldObj_New, "Carbon.Qdoffs")
GLUE_CONVERT(GWorldPtr, GWorldObj_Convert, "Carbon.Qdoffs")
GLUE_NEW(Track, TrackObj_New, "Carbon.Qt")
GLUE_CONVERT(Track, TrackObj_Convert, "Carbon.Qt")
GLUE_NEW(Movie, MovieObj_New, "Carbon.Qt")
GLUE_CONVERT(Movie, MovieObj_Convert, "Carbon.Qt")
GLUE_NEW(MovieController, MovieCtlObj_New, "Carbon.Qt")
GLUE_CONVERT(MovieController, MovieCtlObj_Convert, "Carbon.Qt")
GLUE_NEW(TimeBase, TimeBaseObj_New, "Carbon.Qt")
GLUE_CONVERT(TimeBase, TimeBaseObj_Convert, "Carbon.Qt")
GLUE_NEW(UserData, UserDataObj_New, "Carbon.Qt")
GLUE_CONVERT(UserData, UserDataObj_Convert, "Carbon.Qt")
GLUE_NEW(Media, MediaObj_New, "Carbon.Qt")
GLUE_CONVERT(Media, MediaObj_Convert, "Carbon.Qt")
GLUE_NEW(Handle, ResObj_New, "Carbon.Res")
GLUE_CONVERT(Handle, ResObj_Convert, "Carbon.Res")
GLUE_NEW(Handle, OptResObj_New, "Carbon.Res")
GLUE_CONVERT(Handle, OptResObj_Convert, "Carbon.Res")
GLUE_NEW(TEHandle, TEObj_New, "Carbon.TE")
GLUE_CONVERT(TEHandle, TEObj_Convert, "Carbon.TE")
GLUE_NEW(WindowPtr, WinObj_New, "Carbon.Win")
GLUE_CONVERT(WindowPtr, WinObj_Convert, "Carbon.Win")
GLUE_NEW(WindowPtr, WinObj_WhichWindow, "Carbon.Win")
GLUE_CONVERT(CFTypeRef, CFObj_Convert, "Carbon.CF")
GLUE_NEW(CFTypeRef, CFObj_New, "Carbon.CF")
GLUE_CONVERT(CFTypeRef, CFTypeRefObj_Convert, "Carbon.CF")
GLUE_NEW(CFTypeRef, CFTypeRefObj_New, "Carbon.CF")
GLUE_CONVERT(CFStringRef, CFStringRefObj_Convert, "Carbon.CF")
GLUE_NEW(CFStringRef, CFStringRefObj_New, "Carbon.CF")
GLUE_CONVERT(CFMutableStringRef, CFMutableStringRefObj_Convert, "Carbon.CF")
GLUE_NEW(CFMutableStringRef, CFMutableStringRefObj_New, "Carbon.CF")
GLUE_CONVERT(CFArrayRef, CFArrayRefObj_Convert, "Carbon.CF")
GLUE_NEW(CFArrayRef, CFArrayRefObj_New, "Carbon.CF")
GLUE_CONVERT(CFMutableArrayRef, CFMutableArrayRefObj_Convert, "Carbon.CF")
GLUE_NEW(CFMutableArrayRef, CFMutableArrayRefObj_New, "Carbon.CF")
GLUE_CONVERT(CFDictionaryRef, CFDictionaryRefObj_Convert, "Carbon.CF")
GLUE_NEW(CFDictionaryRef, CFDictionaryRefObj_New, "Carbon.CF")
GLUE_CONVERT(CFMutableDictionaryRef, CFMutableDictionaryRefObj_Convert, "Carbon.CF")
GLUE_NEW(CFMutableDictionaryRef, CFMutableDictionaryRefObj_New, "Carbon.CF")
GLUE_CONVERT(CFURLRef, CFURLRefObj_Convert, "Carbon.CF")
GLUE_CONVERT(CFURLRef, OptionalCFURLRefObj_Convert, "Carbon.CF")
GLUE_NEW(CFURLRef, CFURLRefObj_New, "Carbon.CF")
#endif /* USE_TOOLBOX_OBJECT_GLUE */
| 28.33121 | 109 | 0.698291 | [
"object"
] |
26a285bab78f18eed99dcc1c7ba639d456b67907 | 6,250 | h | C | Modules/ThirdParty/GDCM/src/gdcm/Source/MediaStorageAndFileFormat/gdcmImageHelper.h | jcfr/ITK | 6632b7f968c3827e8a3bcc9812e05b4e2064676e | [
"Apache-2.0"
] | 1 | 2019-04-22T09:09:17.000Z | 2019-04-22T09:09:17.000Z | Modules/ThirdParty/GDCM/src/gdcm/Source/MediaStorageAndFileFormat/gdcmImageHelper.h | JamesLinus/ITK | 01fb2f2a97ae7767b7835dd92b40b6cc2c82e750 | [
"Apache-2.0"
] | null | null | null | Modules/ThirdParty/GDCM/src/gdcm/Source/MediaStorageAndFileFormat/gdcmImageHelper.h | JamesLinus/ITK | 01fb2f2a97ae7767b7835dd92b40b6cc2c82e750 | [
"Apache-2.0"
] | null | null | null | /*=========================================================================
Program: GDCM (Grassroots DICOM). A DICOM library
Copyright (c) 2006-2011 Mathieu Malaterre
All rights reserved.
See Copyright.txt or http://gdcm.sourceforge.net/Copyright.html for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
#ifndef GDCMIMAGEHELPER_H
#define GDCMIMAGEHELPER_H
#include "gdcmTypes.h"
#include "gdcmTag.h"
#include <vector>
#include "gdcmPixelFormat.h"
#include "gdcmPhotometricInterpretation.h"
#include "gdcmSmartPointer.h"
#include "gdcmLookupTable.h"
namespace gdcm
{
class MediaStorage;
class DataSet;
class File;
class Image;
class ByteValue;
/**
* \brief ImageHelper (internal class, not intended for user level)
*
* \details
* Helper for writing World images in DICOM. DICOM has a 'template' approach to image where
* MR Image Storage are distinct object from Enhanced MR Image Storage. For example the
* Pixel Spacing in one object is not at the same position (ie Tag) as in the other
* this class is the central (read: fragile) place where all the dispatching is done from
* a unified view of a world image (typically VTK or ITK point of view) down to the low
* level DICOM point of view.
*
* \warning: do not expect the API of this class to be maintained at any point, since as
* Modalities are added the API might have to be augmented or behavior changed to cope
* with new modalities.
*/
class GDCM_EXPORT ImageHelper
{
public:
/// GDCM 1.x compatibility issue:
/// when using ReWrite an MR Image Storage would be rewritten with a Rescale Slope/Intercept
/// while the standard would prohibit this (Philips Medical System is still doing that)
/// Unless explicitly set elsewhere by the standard, it will use value from 0028,1052 / 0028,1053
/// for the Rescale Slope & Rescale Intercept values
static void SetForceRescaleInterceptSlope(bool);
static bool GetForceRescaleInterceptSlope();
/// GDCM 1.x compatibility issue:
/// When using ReWrite an MR Image Storage would be rewritten as Secondary Capture Object while
/// still having a Pixel Spacing tag (0028,0030). If you have deal with those files, use this
/// very special flag to handle them
/// Unless explicitly set elsewhere by the standard, it will use value from 0028,0030 / 0018,0088
/// for the Pixel Spacing of the Image
static void SetForcePixelSpacing(bool);
static bool GetForcePixelSpacing();
/// This function checks tags (0x0028, 0x0010) and (0x0028, 0x0011) for the
/// rows and columns of the image in pixels (as opposed to actual distances).
/// The output is {col , row}
static std::vector<unsigned int> GetDimensionsValue(const File& f);
static void SetDimensionsValue(File& f, const Image & img);
/// This function returns pixel information about an image from its dataset
/// That includes samples per pixel and bit depth (in that order)
static PixelFormat GetPixelFormatValue(const File& f);
/// Set/Get shift/scale from/to a file
/// \warning this function reads/sets the Slope/Intercept in appropriate
/// class storage, but also Grid Scaling in RT Dose Storage
/// Can't take a dataset because the mediastorage of the file must be known
static std::vector<double> GetRescaleInterceptSlopeValue(File const & f);
static void SetRescaleInterceptSlopeValue(File & f, const Image & img);
/// Set/Get Origin (IPP) from/to a file
static std::vector<double> GetOriginValue(File const & f);
static void SetOriginValue(DataSet & ds, const Image & img);
/// Get Direction Cosines (IOP) from/to a file
/// Requires a file because mediastorage must be known
static std::vector<double> GetDirectionCosinesValue(File const & f);
/// Set Direction Cosines (IOP) from/to a file
/// When IOD does not defines what is IOP (eg. typically Secondary Capture Image Storage)
/// this call will simply remove the IOP attribute.
/// Else in case of MR/CT image storage, this call will properly lookup the correct attribute
/// to store the IOP.
// FIXME: There is a major issue for image with multiple IOP (eg. Enhanced * Image Storage).
static void SetDirectionCosinesValue(DataSet & ds, const std::vector<double> & dircos);
/// Set/Get Spacing from/to a File
static std::vector<double> GetSpacingValue(File const & f);
static void SetSpacingValue(DataSet & ds, const std::vector<double> & spacing);
/// DO NOT USE
static bool ComputeSpacingFromImagePositionPatient(const std::vector<double> &imageposition, std::vector<double> & spacing);
static bool GetDirectionCosinesFromDataSet(DataSet const & ds, std::vector<double> & dircos);
//functions to get more information from a file
//useful for the stream image reader, which fills in necessary image information
//distinctly from the reader-style data input
static PhotometricInterpretation GetPhotometricInterpretationValue(File const& f);
//returns the configuration of colors in a plane, either RGB RGB RGB or RRR GGG BBB
static unsigned int GetPlanarConfigurationValue(const File& f);
//returns the lookup table of an image file
static SmartPointer<LookupTable> GetLUT(File const& f);
///Moved from PixampReader to here. Generally used for photometric interpretation.
static const ByteValue* GetPointerFromElement(Tag const &tag, File const& f);
/// Moved from MediaStorage here, since we need extra info stored in PixelFormat & PhotometricInterpretation
static MediaStorage ComputeMediaStorageFromModality(const char *modality,
unsigned int dimension = 2, PixelFormat const & pf = PixelFormat(),
PhotometricInterpretation const & pi = PhotometricInterpretation(), double rescaleintercept = 0, double rescaleslope = 1 );
protected:
static Tag GetSpacingTagFromMediaStorage(MediaStorage const &ms);
static Tag GetZSpacingTagFromMediaStorage(MediaStorage const &ms);
private:
static bool ForceRescaleInterceptSlope;
static bool ForcePixelSpacing;
};
} // end namespace gdcm
#endif // GDCMIMAGEHELPER_H
| 44.964029 | 127 | 0.73872 | [
"object",
"vector"
] |
26a65497a9d4a6d9b249ad4cc68e93d71ce62c88 | 5,268 | h | C | projects/dpg/discontinuous_scalar_reference_finite_element.h | Fytch/lehrfempp | c804b3e350aa893180f1a02ce57a93b3d7686e91 | [
"MIT"
] | 16 | 2018-08-30T19:55:43.000Z | 2022-02-16T16:38:06.000Z | projects/dpg/discontinuous_scalar_reference_finite_element.h | Fytch/lehrfempp | c804b3e350aa893180f1a02ce57a93b3d7686e91 | [
"MIT"
] | 151 | 2018-05-27T13:01:50.000Z | 2021-08-04T14:50:50.000Z | projects/dpg/discontinuous_scalar_reference_finite_element.h | Fytch/lehrfempp | c804b3e350aa893180f1a02ce57a93b3d7686e91 | [
"MIT"
] | 20 | 2018-11-13T13:46:38.000Z | 2022-02-18T17:33:52.000Z | #ifndef PROJECTS_DPG_DISCONTINUOUS_SCALAR_REFERENCE_FINITE_ELEMENT
#define PROJECTS_DPG_DISCONTINUOUS_SCALAR_REFERENCE_FINITE_ELEMENT
/**
* @file
* @brief A Decorator around a ScalarReferenceFiniteElement to represent
* discontinuous shape functions
* @author Philippe Peter
* @date June 2019
* @copyright MIT License
*/
#include <lf/fe/fe.h>
#include <lf/uscalfe/uscalfe.h>
#include <iostream>
#include <typeinfo>
#include "dpg.h"
namespace projects::dpg {
/**
* @headerfile projecte/dpg/discontinuous_scalar_reference_finite_element.h
* @brief Decorator class around a ScalarReferenceFiniteElement to represent
* discontinuous shape functions.
* @tparam SCALAR The scalar type of the shape functions e.g. 'double'
*
* The class decorates any lf::fe::ScalarReferenceFiniteElement and
* forwards most calls to the decorated instance. The exception are methods
* requesting the number of shape functions associated with certain codimensions
* or subentities. Here the class changes the underlying implementation and
* associates all shape functions to the underlying entity of codimension 0.
*
* In particular this class is used to represent \f$L^2(\Omega) \f$ conforming
* finite elements. Standard \f$ H^1(\Omega) \f$ Lagrangian finite elements
* fullfill some continuity constraints, since certain shape functions are
* associated with vertices or edges of the cells. These continuity constraints
* are broken, by considering all shape functions as interior.
*/
template <typename SCALAR>
class DiscontinuousScalarReferenceFiniteElement
: public lf::fe::ScalarReferenceFiniteElement<SCALAR> {
public:
/**
* Default constructor, does not initialize this class (invalid state).
* If any method is called upon it, an error is thrown.
*/
DiscontinuousScalarReferenceFiniteElement() = default;
DiscontinuousScalarReferenceFiniteElement(
const DiscontinuousScalarReferenceFiniteElement&) = delete;
DiscontinuousScalarReferenceFiniteElement(
DiscontinuousScalarReferenceFiniteElement&&) noexcept = default;
DiscontinuousScalarReferenceFiniteElement& operator=(
const DiscontinuousScalarReferenceFiniteElement&) = delete;
DiscontinuousScalarReferenceFiniteElement& operator=(
DiscontinuousScalarReferenceFiniteElement&&) noexcept = default;
explicit DiscontinuousScalarReferenceFiniteElement(
std::shared_ptr<const lf::fe::ScalarReferenceFiniteElement<SCALAR>> cfe)
: lf::fe::ScalarReferenceFiniteElement<SCALAR>(), cfe_(std::move(cfe)) {}
/**
* @brief Reports initialization status of the object
*
* Objects built by the default constructor are undefined
*/
[[nodiscard]] inline bool isInitialized() const { return (cfe_ != nullptr); }
[[nodiscard]] lf::base::RefEl RefEl() const override {
LF_ASSERT_MSG(isInitialized(), "Not initialized!");
return cfe_->RefEl();
}
[[nodiscard]] unsigned Degree() const override {
LF_ASSERT_MSG(isInitialized(), "Not Initialized");
return cfe_->Degree();
}
[[nodiscard]] size_type NumRefShapeFunctions() const override {
LF_ASSERT_MSG(isInitialized(), "Not initialized");
return cfe_->NumRefShapeFunctions();
}
/** Associates all shape functions to the underlying entity of codim
* 0*/
[[nodiscard]] size_type NumRefShapeFunctions(dim_t codim) const override {
LF_ASSERT_MSG(isInitialized(), "Not initialized");
return (codim == 0) ? cfe_->NumRefShapeFunctions() : 0;
}
/** Associates all shape functions to the underlying entity of codim
* 0*/
[[nodiscard]] size_type NumRefShapeFunctions(
dim_t codim, sub_idx_t /*subidx*/) const override {
LF_ASSERT_MSG(isInitialized(), "Not initialized");
return (codim == 0) ? cfe_->NumRefShapeFunctions() : 0;
}
[[nodiscard]] Eigen::Matrix<SCALAR, Eigen::Dynamic, Eigen::Dynamic>
EvalReferenceShapeFunctions(const Eigen::MatrixXd& local) const override {
LF_ASSERT_MSG(isInitialized(), "Not initialized");
return cfe_->EvalReferenceShapeFunctions(local);
}
[[nodiscard]] Eigen::Matrix<SCALAR, Eigen::Dynamic, Eigen::Dynamic>
GradientsReferenceShapeFunctions(
const Eigen::MatrixXd& local) const override {
LF_ASSERT_MSG(isInitialized(), "Not initialized");
return cfe_->GradientsReferenceShapeFunctions(local);
}
[[nodiscard]] Eigen::MatrixXd EvaluationNodes() const override {
LF_ASSERT_MSG(isInitialized(), "Not initialized");
return cfe_->EvaluationNodes();
}
[[nodiscard]] size_type NumEvaluationNodes() const override {
LF_ASSERT_MSG(isInitialized(), "Not initialized");
return cfe_->NumEvaluationNodes();
}
[[nodiscard]] Eigen::Matrix<SCALAR, 1, Eigen::Dynamic> NodalValuesToDofs(
const Eigen::Matrix<SCALAR, 1, Eigen::Dynamic>& nodvals) const override {
LF_ASSERT_MSG(isInitialized(), "Not initialized");
return cfe_->NodalValuesToDofs(nodvals);
}
/** virtual destructor*/
~DiscontinuousScalarReferenceFiniteElement() override = default;
private:
/** The underlying (continuous) scalar-valued paramteric finite element */
std::shared_ptr<const lf::fe::ScalarReferenceFiniteElement<SCALAR>> cfe_;
};
} // namespace projects::dpg
#endif // PROJECTS_DPG_DISCONTINUOUS_SCALAR_REFERENCE_FINITE_ELEMENT
| 37.361702 | 80 | 0.748861 | [
"object",
"shape"
] |
26a9e4c702da364a9a55d162701099a92a356882 | 2,699 | h | C | rothko/window/common/window_backend.h | cristiandonosoc/rothko | 0d488e10cc3b4150f638da9cf6711e66ba19b1c5 | [
"MIT",
"BSL-1.0",
"BSD-4-Clause",
"Unlicense"
] | 5 | 2019-05-26T00:04:06.000Z | 2020-12-28T19:20:12.000Z | rothko/window/common/window_backend.h | cristiandonosoc/rothko | 0d488e10cc3b4150f638da9cf6711e66ba19b1c5 | [
"MIT",
"BSL-1.0",
"BSD-4-Clause",
"Unlicense"
] | null | null | null | rothko/window/common/window_backend.h | cristiandonosoc/rothko | 0d488e10cc3b4150f638da9cf6711e66ba19b1c5 | [
"MIT",
"BSL-1.0",
"BSD-4-Clause",
"Unlicense"
] | 1 | 2019-06-02T19:35:59.000Z | 2019-06-02T19:35:59.000Z | // Copyright 2019, Cristián Donoso.
// This code has a BSD license. See LICENSE.
#pragma once
#include <stdint.h>
#include <utility>
#include <vector>
namespace rothko {
// WindowBackend
// =================================================================================================
//
// Abstract interface each specific implementation of a window manager has to provide in order to
// work with Rothko. Each particular window manager (SDL, GLFW, etc.) must subclass this interface
// and suscribe a factory function keyed by it's particular entry into the WindowBackendType defined
// in rothko/window/common/window.h
//
// At the moment of needing a particular backend, the code will call that factory function to obtain
// an instance of that particular WindowBackend.
//
// It is recommended that the suscription is done at initialization time, so that the backend is
// assured to be there without any further work from part of the called.
enum class WindowEvent : uint32_t;
struct Input;
struct InitWindowConfig;
struct Window;
enum class MouseCursor {
kArrow,
kIbeam, // "Writing" cursor.
kWait,
kCrosshair,
kWaitArrow,
kSizeNWSE, // Double arrow pointing northwest and southeast
kSizeNESW, // Double arrow pointing northeast and southwest
kSizeWE, // Double arrow pointing west and east
kSizeNS, // Double arrow pointing north and south
kSizeAll, // Four pointed arrow pointing north, south, east, and west
kNo, // Slashed circle or crossbones
kHand, // Hand
kLast,
};
struct WindowBackend {
virtual ~WindowBackend() = default;
virtual bool Init(Window*, InitWindowConfig*) = 0;
virtual void Shutdown() = 0;
virtual WindowEvent StartFrame(Window*, Input*) = 0;
// No-op if the window manager doesn't require it.
virtual void SwapBuffers() {};
// Changes the mouse cursor (if supported).
virtual void ShowCursor(bool) {}
virtual void SetMouseCursor(MouseCursor cursor = MouseCursor::kArrow) { (void)cursor; }
// *** VULKAN SPECIFIC ***
// These functions must be subclassed if needed. If a backend doesn't need
// them, they can choose not to do so.
// Calling them in a backend that doesn't support them will assert a failure.
// (see window_backend.cc).
// Instance extensions required by this window manager.
virtual std::vector<const char*> GetVulkanInstanceExtensions();
// |vk_instance| & |surface_khr| must be casted to the right type in the
// implementation. This is so that we don't need to forward declare vulkan
// typedefs.
virtual bool CreateVulkanSurface(void* vk_instance, void* surface_khr);
};
} // namespace rothko
| 33.7375 | 100 | 0.690997 | [
"vector"
] |
26b300fc4228a91fe5f46f230e13a88029427417 | 3,344 | h | C | common/memory_reader.h | OwlcatGames/OwlcatMonoProfiler | 1104be18e16107d13ff2082da0c3723361f0c2ac | [
"MIT"
] | 85 | 2020-12-04T09:12:35.000Z | 2022-02-08T18:55:54.000Z | common/memory_reader.h | OwlcatGames/OwlcatMonoProfiler | 1104be18e16107d13ff2082da0c3723361f0c2ac | [
"MIT"
] | 6 | 2021-01-08T13:56:22.000Z | 2022-01-07T20:56:46.000Z | common/memory_reader.h | OwlcatGames/OwlcatMonoProfiler | 1104be18e16107d13ff2082da0c3723361f0c2ac | [
"MIT"
] | 2 | 2020-12-04T12:21:56.000Z | 2021-08-19T20:11:07.000Z | #pragma once
#include <string>
#include <iterator>
/**
\brief A helper class for safely reading data from a std::vector of bytes
*/
class memory_reader
{
public:
memory_reader(const std::vector<uint8_t>& storage)
: m_storage(storage)
{}
template<typename T>
bool read(T& value)
{
const size_t value_size = sizeof(value);
if (m_pos + value_size > m_storage.size())
return false;
memcpy(&value, &m_storage[m_pos], value_size);
m_pos += value_size;
return true;
}
bool read_uint32_be(uint32_t& value)
{
uint32_t value_be;
if (!read_uint32(value_be))
return false;
value = (value_be & 0xFF) << 24;
value = value | ((value_be & 0xFF00) << 8);
value = value | ((value_be & 0xFF0000) >> 8);
value = value | ((value_be & 0xFF000000) >> 24);
return true;
}
bool read_uint8(uint8_t& value) { return read<uint8_t>(value); }
bool read_uint16(uint16_t& value) { return read<uint16_t>(value); }
bool read_uint32(uint32_t& value) { return read<uint32_t>(value); }
bool read_uint64(uint64_t& value) { return read<uint64_t>(value); }
bool read_varint(uint64_t& value)
{
uint8_t first_byte;
if (!read_uint8(first_byte))
return false;
if (first_byte < 0xFDULL)
{
value = first_byte;
return true;
}
else if(first_byte == 0xFD)
{
uint16_t value16;
if (!read_uint16(value16))
return false;
value = value16;
}
else if(first_byte == 0xFE)
{
uint32_t value32;
if (!read_uint32(value32))
return false;
value = value32;
}
else if(first_byte == 0xFF)
{
return read_uint64(value);
}
else
return false;
return true;
}
bool read_buffer(std::vector<uint8_t>& buffer, size_t length)
{
buffer.clear();
if (m_pos + length > m_storage.size())
return false;
std::copy(m_storage.begin() + m_pos, m_storage.begin() + m_pos + length, std::back_inserter(buffer));
m_pos += length;
return true;
}
bool read_buffer(uint8_t* buffer, size_t length)
{
if (m_pos + length > m_storage.size())
return false;
// Pray that buffer has enough space
memcpy(buffer, &m_storage[m_pos], length);
m_pos += length;
return true;
}
bool read_varint_length_and_bufffer(std::vector<uint8_t>& buffer)
{
uint64_t length;
if (!read_varint(length))
return false;
return read_buffer(buffer, (size_t)length);
}
bool read_string(std::string& str)
{
uint64_t length;
if (!read_varint(length))
return false;
if (m_pos + length > m_storage.size())
return false;
if (length > 0)
{
str.resize(length);
memcpy(str.data(), &m_storage[m_pos], length);
m_pos += length;
}
return true;
}
size_t get_pos() const { return m_pos; }
private:
const std::vector<uint8_t>& m_storage;
size_t m_pos = 0;
};
| 25.142857 | 109 | 0.545754 | [
"vector"
] |
26b320c6868b3326bad2f98be2d17602ee2845e3 | 10,129 | h | C | tesseract_command_language/include/tesseract_command_language/core/waypoint.h | jdlangs/tesseract_planning | c009664e1bb8cdd50df25c16b2e34a2070b127af | [
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null | tesseract_command_language/include/tesseract_command_language/core/waypoint.h | jdlangs/tesseract_planning | c009664e1bb8cdd50df25c16b2e34a2070b127af | [
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null | tesseract_command_language/include/tesseract_command_language/core/waypoint.h | jdlangs/tesseract_planning | c009664e1bb8cdd50df25c16b2e34a2070b127af | [
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null | /**
* @file waypoint.h
* @brief
*
* @author Levi Armstrong
* @date June 15, 2020
* @version TODO
* @bug No known bugs
*
* @copyright Copyright (c) 2020, Southwest Research Institute
*
* @par License
* Software License Agreement (Apache License)
* @par
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* @par
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TESSERACT_COMMAND_LANGUAGE_WAYPOINT_H
#define TESSERACT_COMMAND_LANGUAGE_WAYPOINT_H
#include <tesseract_common/macros.h>
TESSERACT_COMMON_IGNORE_WARNINGS_PUSH
#include <memory>
#include <string>
#include <typeindex>
#include <boost/serialization/base_object.hpp>
#include <boost/serialization/export.hpp>
#include <boost/serialization/nvp.hpp>
#include <boost/serialization/unique_ptr.hpp>
#include <boost/type_traits/is_virtual_base_of.hpp>
TESSERACT_COMMON_IGNORE_WARNINGS_POP
#include <tesseract_command_language/core/serialization.h>
#include <tesseract_common/sfinae_utils.h>
#ifdef SWIG
//%template(Waypoints) std::vector<tesseract_planning::Waypoint>;
#endif // SWIG
/** @brief If shared library, this must go in the header after the class definition */
#define TESSERACT_WAYPOINT_EXPORT_KEY(wp) \
BOOST_CLASS_EXPORT_KEY2(tesseract_planning::detail_waypoint::WaypointInner<wp>, #wp) \
BOOST_CLASS_TRACKING(tesseract_planning::detail_waypoint::WaypointInner<wp>, boost::serialization::track_never)
/** @brief If shared library, this must go in the cpp after the implicit instantiation of the serialize function */
#define TESSERACT_WAYPOINT_EXPORT_IMPLEMENT(wp) \
BOOST_CLASS_EXPORT_IMPLEMENT(tesseract_planning::detail_waypoint::WaypointInner<wp>)
/**
* @brief This should not be used within shared libraries use the two above.
* If not in a shared library it can go in header or cpp
*/
#define TESSERACT_WAYPOINT_EXPORT(wp) \
TESSERACT_WAYPOINT_EXPORT_KEY(wp) \
TESSERACT_WAYPOINT_EXPORT_IMPLEMENT(wp)
namespace tesseract_planning
{
class Waypoint;
#ifndef SWIG
namespace detail_waypoint
{
CREATE_MEMBER_CHECK(print);
CREATE_MEMBER_FUNC_SIGNATURE_CHECK(print, void, std::string);
struct WaypointInnerBase
{
WaypointInnerBase() = default;
virtual ~WaypointInnerBase() = default;
WaypointInnerBase(const WaypointInnerBase&) = delete;
WaypointInnerBase& operator=(const WaypointInnerBase&) = delete;
WaypointInnerBase(WaypointInnerBase&&) = delete;
WaypointInnerBase& operator=(WaypointInnerBase&&) = delete;
// User-defined methods
virtual void print(const std::string& prefix) const = 0;
virtual bool operator==(const WaypointInnerBase& rhs) const = 0;
// This is not required for user defined implementation
virtual bool operator!=(const WaypointInnerBase& rhs) const = 0;
// This is not required for user defined implementation
virtual std::type_index getType() const = 0;
// This is not required for user defined implementation
virtual void* recover() = 0;
// This is not required for user defined implementation
virtual const void* recover() const = 0;
// This is not required for user defined implementation
virtual std::unique_ptr<WaypointInnerBase> clone() const = 0;
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& /*ar*/, const unsigned int /*version*/) // NOLINT
{
}
};
template <typename T>
struct WaypointInner final : WaypointInnerBase
{
WaypointInner()
{
static_assert(has_member_print<T>::value, "Class does not have member function 'print'");
static_assert(has_member_func_signature_print<T>::value, "Class 'print' function has incorrect signature");
}
~WaypointInner() override = default;
WaypointInner(const WaypointInner&) = delete;
WaypointInner(WaypointInner&&) = delete;
WaypointInner& operator=(const WaypointInner&) = delete;
WaypointInner& operator=(WaypointInner&&) = delete;
// Constructors from T (copy and move variants).
explicit WaypointInner(T waypoint) : waypoint_(std::move(waypoint))
{
static_assert(has_member_print<T>::value, "Class does not have member function 'print'");
static_assert(has_member_func_signature_print<T>::value, "Class 'print' function has incorrect signature");
}
explicit WaypointInner(T&& waypoint) : waypoint_(std::move(waypoint))
{
static_assert(has_member_print<T>::value, "Class does not have member function 'print'");
static_assert(has_member_func_signature_print<T>::value, "Class 'print' function has incorrect signature");
}
std::unique_ptr<WaypointInnerBase> clone() const final { return std::make_unique<WaypointInner>(waypoint_); }
std::type_index getType() const final { return std::type_index(typeid(T)); }
void print(const std::string& prefix) const final { waypoint_.print(prefix); }
void* recover() final { return &waypoint_; }
const void* recover() const final { return &waypoint_; }
bool operator==(const WaypointInnerBase& rhs) const final
{
// Compare class types before casting the incoming object to the T type
if (rhs.getType() == getType())
{
auto waypoint = static_cast<const T*>(rhs.recover());
return waypoint_ == *waypoint;
}
return false;
}
bool operator!=(const WaypointInnerBase& rhs) const final { return !operator==(rhs); }
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive& ar, const unsigned int /*version*/) // NOLINT
{
// If this line is removed a exception is thrown for unregistered cast need to too look into this.
ar& boost::serialization::make_nvp("base", boost::serialization::base_object<WaypointInnerBase>(*this));
ar& boost::serialization::make_nvp("impl", waypoint_);
}
T waypoint_;
};
} // namespace detail_waypoint
#endif // SWIG
} // namespace tesseract_planning
namespace boost
{
// Taken from pagmo to address the same issue
// NOTE: in some earlier versions of Boost (i.e., at least up to 1.67)
// the is_virtual_base_of type trait, used by the Boost serialization library, fails
// with a compile time error if a class is declared final. Thus, we provide a specialised
// implementation of this type trait to work around the issue. See:
// https://www.boost.org/doc/libs/1_52_0/libs/type_traits/doc/html/boost_typetraits/reference/is_virtual_base_of.html
// https://stackoverflow.com/questions/18982064/boost-serialization-of-base-class-of-final-subclass-error
// We never use virtual inheritance, thus the specialisation is always false.
template <typename T>
struct is_virtual_base_of<tesseract_planning::detail_waypoint::WaypointInnerBase,
tesseract_planning::detail_waypoint::WaypointInner<T>> : false_type
{
};
} // namespace boost
namespace tesseract_planning
{
class Waypoint
{
template <typename T>
using uncvref_t = std::remove_cv_t<typename std::remove_reference<T>::type>;
// Enable the generic ctor only if ``T`` is not a ForwardKinematics (after removing const/reference qualifiers)
// If ``T`` is of type ForwardKinematics we disable so it will use the copy or move constructors of this class.
template <typename T>
using generic_ctor_enabler = std::enable_if_t<!std::is_same<Waypoint, uncvref_t<T>>::value, int>;
public:
template <typename T, generic_ctor_enabler<T> = 0>
Waypoint(T&& waypoint) // NOLINT
: waypoint_(std::make_unique<detail_waypoint::WaypointInner<uncvref_t<T>>>(waypoint))
{
}
// Destructor
~Waypoint() = default;
// Copy constructor
Waypoint(const Waypoint& other) { waypoint_ = other.waypoint_->clone(); }
// Move ctor.
Waypoint(Waypoint&& other) noexcept { waypoint_.swap(other.waypoint_); }
// Move assignment.
Waypoint& operator=(Waypoint&& other) noexcept
{
waypoint_.swap(other.waypoint_);
return (*this);
}
// Copy assignment.
Waypoint& operator=(const Waypoint& other)
{
(*this) = Waypoint(other);
return (*this);
}
template <typename T, generic_ctor_enabler<T> = 0>
Waypoint& operator=(T&& other)
{
(*this) = Waypoint(std::forward<T>(other));
return (*this);
}
std::type_index getType() const { return waypoint_->getType(); }
void print(const std::string& prefix = "") const { waypoint_->print(prefix); }
bool operator==(const Waypoint& rhs) const { return waypoint_->operator==(*rhs.waypoint_); }
bool operator!=(const Waypoint& rhs) const { return !operator==(rhs); }
template <typename T>
T& as()
{
if (getType() != typeid(T))
throw std::bad_cast();
auto p = static_cast<uncvref_t<T>*>(waypoint_->recover());
return *p;
}
template <typename T>
const T& as() const
{
if (getType() != typeid(T))
throw std::bad_cast();
auto p = static_cast<const uncvref_t<T>*>(waypoint_->recover());
return *p;
}
private:
friend class boost::serialization::access;
friend struct tesseract_planning::Serialization;
Waypoint() // NOLINT
: waypoint_(nullptr)
{
}
template <class Archive>
void serialize(Archive& ar, const unsigned int /*version*/) // NOLINT
{
ar& boost::serialization::make_nvp("waypoint", waypoint_);
}
std::unique_ptr<detail_waypoint::WaypointInnerBase> waypoint_;
};
} // namespace tesseract_planning
BOOST_CLASS_TRACKING(tesseract_planning::Waypoint, boost::serialization::track_never);
#endif // TESSERACT_COMMAND_LANGUAGE_WAYPOINT_H
| 34.688356 | 120 | 0.708658 | [
"object",
"vector"
] |
26b7dc8e7c8595e6ce7d1a2a4d73a37d45052dba | 11,818 | h | C | src/policy/fees.h | discoltk/bitcoin-abc | ffc7102e6230384bfd9d500ba9c86c3b5d60c7b1 | [
"MIT"
] | 10 | 2018-05-01T19:24:54.000Z | 2019-11-18T09:24:19.000Z | src/policy/fees.h | discoltk/bitcoin-abc | ffc7102e6230384bfd9d500ba9c86c3b5d60c7b1 | [
"MIT"
] | 2 | 2017-08-02T15:48:07.000Z | 2017-08-04T16:54:18.000Z | src/policy/fees.h | discoltk/bitcoin-abc | ffc7102e6230384bfd9d500ba9c86c3b5d60c7b1 | [
"MIT"
] | 18 | 2017-07-28T20:12:05.000Z | 2019-05-21T16:31:14.000Z | // Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_POLICYESTIMATOR_H
#define BITCOIN_POLICYESTIMATOR_H
#include "amount.h"
#include "random.h"
#include "uint256.h"
#include <map>
#include <string>
#include <vector>
class CAutoFile;
class CFeeRate;
class CTxMemPoolEntry;
class CTxMemPool;
/** \class CBlockPolicyEstimator
* The BlockPolicyEstimator is used for estimating the feerate needed for a
* transaction to be included in a block within a certain number of blocks.
*
* At a high level the algorithm works by grouping transactions into buckets
* based on having similar feerates and then tracking how long it takes
* transactions in the various buckets to be mined. It operates under the
* assumption that in general transactions of higher feerate will be included in
* blocks before transactions of lower feerate. So for example if you wanted to
* know what feerate you should put on a transaction to be included in a block
* within the next 5 blocks, you would start by looking at the bucket with the
* highest feerate transactions and verifying that a sufficiently high
* percentage of them were confirmed within 5 blocks and then you would look at
* the next highest feerate bucket, and so on, stopping at the last bucket to
* pass the test. The average feerate of transactions in this bucket will give
* you an indication of the lowest feerate you can put on a transaction and
* still have a sufficiently high chance of being confirmed within your desired
* 5 blocks.
*
* Here is a brief description of the implementation:
* When a transaction enters the mempool, we track the height of the block chain
* at entry. Whenever a block comes in, we count the number of transactions in
* each bucket and the total amount of feerate paid in each bucket. Then we
* calculate how many blocks Y it took each transaction to be mined and we track
* an array of counters in each bucket for how long it to took transactions to
* get confirmed from 1 to a max of 25 and we increment all the counters from Y
* up to 25. This is because for any number Z>=Y the transaction was
* successfully mined within Z blocks. We want to save a history of this
* information, so at any time we have a counter of the total number of
* transactions that happened in a given feerate bucket and the total number
* that were confirmed in each number 1-25 blocks or less for any bucket. We
* save this history by keeping an exponentially decaying moving average of each
* one of these stats. Furthermore we also keep track of the number unmined (in
* mempool) transactions in each bucket and for how many blocks they have been
* outstanding and use that to increase the number of transactions we've seen in
* that feerate bucket when calculating an estimate for any number of
* confirmations below the number of blocks they've been outstanding.
*/
/**
* We will instantiate an instance of this class to track transactions that were
* included in a block. We will lump transactions into a bucket according to
* their approximate feerate and then track how long it took for those txs to be
* included in a block.
*
* The tracking of unconfirmed (mempool) transactions is completely independent
* of the historical tracking of transactions that have been confirmed in a
* block.
*/
class TxConfirmStats {
private:
// Define the buckets we will group transactions into
// The upper-bound of the range for the bucket (inclusive)
std::vector<double> buckets;
// Map of bucket upper-bound to index into all vectors by bucket
std::map<double, unsigned int> bucketMap;
// For each bucket X:
// Count the total # of txs in each bucket
// Track the historical moving average of this total over blocks
std::vector<double> txCtAvg;
// and calculate the total for the current block to update the moving
// average
std::vector<int> curBlockTxCt;
// Count the total # of txs confirmed within Y blocks in each bucket
// Track the historical moving average of theses totals over blocks
// confAvg[Y][X]
std::vector<std::vector<double>> confAvg;
// and calculate the totals for the current block to update the moving
// averages
// curBlockConf[Y][X]
std::vector<std::vector<int>> curBlockConf;
// Sum the total feerate of all tx's in each bucket
// Track the historical moving average of this total over blocks
std::vector<double> avg;
// and calculate the total for the current block to update the moving
// average
std::vector<double> curBlockVal;
// Combine the conf counts with tx counts to calculate the confirmation %
// for each Y,X. Combine the total value with the tx counts to calculate the
// avg feerate per bucket
double decay;
// Mempool counts of outstanding transactions
// For each bucket X, track the number of transactions in the mempool that
// are unconfirmed for each possible confirmation value Y
// unconfTxs[Y][X]
std::vector<std::vector<int>> unconfTxs;
// transactions still unconfirmed after MAX_CONFIRMS for each bucket
std::vector<int> oldUnconfTxs;
public:
/**
* Initialize the data structures. This is called by BlockPolicyEstimator's
* constructor with default values.
* @param defaultBuckets contains the upper limits for the bucket boundaries
* @param maxConfirms max number of confirms to track
* @param decay how much to decay the historical moving average per block
*/
void Initialize(std::vector<double> &defaultBuckets,
unsigned int maxConfirms, double decay);
/**
* Clear the state of the curBlock variables to start counting for the new
* block.
*/
void ClearCurrent(unsigned int nBlockHeight);
/**
* Record a new transaction data point in the current block stats
* @param blocksToConfirm the number of blocks it took this transaction to
* confirm
* @param val the feerate of the transaction
* @warning blocksToConfirm is 1-based and has to be >= 1
*/
void Record(int blocksToConfirm, double val);
/** Record a new transaction entering the mempool*/
unsigned int NewTx(unsigned int nBlockHeight, double val);
/** Remove a transaction from mempool tracking stats*/
void removeTx(unsigned int entryHeight, unsigned int nBestSeenHeight,
unsigned int bucketIndex);
/**
* Update our estimates by decaying our historical moving average and
* updating with the data gathered from the current block.
*/
void UpdateMovingAverages();
/**
* Calculate a feerate estimate. Find the lowest value bucket (or range of
* buckets to make sure we have enough data points) whose transactions still
* have sufficient likelihood of being confirmed within the target number of
* confirmations
* @param confTarget target number of confirmations
* @param sufficientTxVal required average number of transactions per block
* in a bucket range
* @param minSuccess the success probability we require
* @param requireGreater return the lowest feerate such that all higher
* values pass minSuccess OR
* return the highest feerate such that all lower values fail
* minSuccess
* @param nBlockHeight the current block height
*/
double EstimateMedianVal(int confTarget, double sufficientTxVal,
double minSuccess, bool requireGreater,
unsigned int nBlockHeight);
/** Return the max number of confirms we're tracking */
unsigned int GetMaxConfirms() { return confAvg.size(); }
/** Write state of estimation data to a file*/
void Write(CAutoFile &fileout);
/**
* Read saved state of estimation data from a file and replace all internal
* data structures and variables with this state.
*/
void Read(CAutoFile &filein);
};
/** Track confirm delays up to 25 blocks, can't estimate beyond that */
static const unsigned int MAX_BLOCK_CONFIRMS = 25;
/** Decay of .998 is a half-life of 346 blocks or about 2.4 days */
static const double DEFAULT_DECAY = .998;
/** Require greater than 95% of X feerate transactions to be confirmed within Y
* blocks for X to be big enough */
static const double MIN_SUCCESS_PCT = .95;
/** Require an avg of 1 tx in the combined feerate bucket per block to have stat
* significance */
static const double SUFFICIENT_FEETXS = 1;
// Minimum and Maximum values for tracking feerates
static constexpr Amount MIN_FEERATE(10 * SATOSHI);
static const Amount MAX_FEERATE(int64_t(1e7) * SATOSHI);
static const Amount INF_FEERATE(MAX_MONEY);
static const Amount INF_PRIORITY(int64_t(1e9) * MAX_MONEY);
// We have to lump transactions into buckets based on feerate, but we want to be
// able to give accurate estimates over a large range of potential feerates.
// Therefore it makes sense to exponentially space the buckets
/** Spacing of FeeRate buckets */
static const double FEE_SPACING = 1.1;
/**
* We want to be able to estimate feerates that are needed on tx's to be
* included in a certain number of blocks. Every time a block is added to the
* best chain, this class records stats on the transactions included in that
* block
*/
class CBlockPolicyEstimator {
public:
/**
* Create new BlockPolicyEstimator and initialize stats tracking classes
* with default values.
*/
CBlockPolicyEstimator();
/** Process all the transactions that have been included in a block */
void processBlock(unsigned int nBlockHeight,
std::vector<const CTxMemPoolEntry *> &entries);
/** Process a transaction confirmed in a block*/
bool processBlockTx(unsigned int nBlockHeight,
const CTxMemPoolEntry *entry);
/** Process a transaction accepted to the mempool*/
void processTransaction(const CTxMemPoolEntry &entry,
bool validFeeEstimate);
/** Remove a transaction from the mempool tracking stats*/
bool removeTx(uint256 hash);
/** Return a feerate estimate */
CFeeRate estimateFee(int confTarget);
/** Estimate feerate needed to get be included in a block within
* confTarget blocks. If no answer can be given at confTarget, return an
* estimate at the lowest target where one can be given.
*/
CFeeRate estimateSmartFee(int confTarget, int *answerFoundAtTarget,
const CTxMemPool &pool);
/** Write estimation data to a file */
void Write(CAutoFile &fileout);
/** Read estimation data from a file */
void Read(CAutoFile &filein, int nFileVersion);
private:
//!< Passed to constructor to avoid dependency on main
unsigned int nBestSeenHeight;
struct TxStatsInfo {
unsigned int blockHeight;
unsigned int bucketIndex;
TxStatsInfo() : blockHeight(0), bucketIndex(0) {}
};
// map of txids to information about that transaction
std::map<uint256, TxStatsInfo> mapMemPoolTxs;
/** Classes to track historical data on transaction confirmations */
TxConfirmStats feeStats;
unsigned int trackedTxs;
unsigned int untrackedTxs;
};
class FeeFilterRounder {
public:
/** Create new FeeFilterRounder */
FeeFilterRounder(const CFeeRate &minIncrementalFee);
/** Quantize a minimum fee for privacy purpose before broadcast **/
Amount round(const Amount currentMinFee);
private:
std::set<Amount> feeset;
FastRandomContext insecure_rand;
};
#endif /*BITCOIN_POLICYESTIMATOR_H */
| 40.892734 | 80 | 0.720934 | [
"vector"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.